xref: /dpdk/drivers/net/mlx5/windows/mlx5_os.c (revision 4dd46d38820e0bf5e74f99b84f4b098d1b7220dd)
11137eceeSOphir Munk /* SPDX-License-Identifier: BSD-3-Clause
21137eceeSOphir Munk  * Copyright 2020 Mellanox Technologies, Ltd
31137eceeSOphir Munk  */
41137eceeSOphir Munk 
51137eceeSOphir Munk #include <errno.h>
61137eceeSOphir Munk #include <stdalign.h>
71137eceeSOphir Munk #include <stddef.h>
81137eceeSOphir Munk #include <stdint.h>
91137eceeSOphir Munk #include <stdlib.h>
101137eceeSOphir Munk 
111137eceeSOphir Munk #include <rte_windows.h>
12df96fd0dSBruce Richardson #include <ethdev_pci.h>
131137eceeSOphir Munk 
141137eceeSOphir Munk #include <mlx5_glue.h>
151137eceeSOphir Munk #include <mlx5_devx_cmds.h>
161137eceeSOphir Munk #include <mlx5_common.h>
1759f10207SOphir Munk #include <mlx5_common_mp.h>
1859f10207SOphir Munk #include <mlx5_common_mr.h>
1959f10207SOphir Munk #include <mlx5_malloc.h>
201137eceeSOphir Munk 
211137eceeSOphir Munk #include "mlx5_defs.h"
221137eceeSOphir Munk #include "mlx5.h"
2359f10207SOphir Munk #include "mlx5_common_os.h"
2459f10207SOphir Munk #include "mlx5_utils.h"
2559f10207SOphir Munk #include "mlx5_rxtx.h"
26151cbe3aSMichael Baum #include "mlx5_rx.h"
27377b69fbSMichael Baum #include "mlx5_tx.h"
281137eceeSOphir Munk #include "mlx5_autoconf.h"
2959f10207SOphir Munk #include "mlx5_flow.h"
3093f4ece9SOphir Munk #include "mlx5_devx.h"
3193f4ece9SOphir Munk 
32980826dcSTal Shnaiderman static const char *MZ_MLX5_PMD_SHARED_DATA = "mlx5_pmd_shared_data";
33980826dcSTal Shnaiderman 
34980826dcSTal Shnaiderman /* Spinlock for mlx5_shared_data allocation. */
35980826dcSTal Shnaiderman static rte_spinlock_t mlx5_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
36980826dcSTal Shnaiderman 
3789a4bcb1SSuanming Mou /* rte flow indexed pool configuration. */
38e38776c3SMaayan Kashani static const struct mlx5_indexed_pool_config default_icfg[] = {
3989a4bcb1SSuanming Mou 	{
4089a4bcb1SSuanming Mou 		.size = sizeof(struct rte_flow),
4189a4bcb1SSuanming Mou 		.trunk_size = 64,
4289a4bcb1SSuanming Mou 		.need_lock = 1,
4389a4bcb1SSuanming Mou 		.release_mem_en = 0,
4489a4bcb1SSuanming Mou 		.malloc = mlx5_malloc,
4589a4bcb1SSuanming Mou 		.free = mlx5_free,
4689a4bcb1SSuanming Mou 		.per_core_cache = 0,
4789a4bcb1SSuanming Mou 		.type = "ctl_flow_ipool",
4889a4bcb1SSuanming Mou 	},
4989a4bcb1SSuanming Mou 	{
5089a4bcb1SSuanming Mou 		.size = sizeof(struct rte_flow),
5189a4bcb1SSuanming Mou 		.trunk_size = 64,
5289a4bcb1SSuanming Mou 		.grow_trunk = 3,
5389a4bcb1SSuanming Mou 		.grow_shift = 2,
5489a4bcb1SSuanming Mou 		.need_lock = 1,
5589a4bcb1SSuanming Mou 		.release_mem_en = 0,
5689a4bcb1SSuanming Mou 		.malloc = mlx5_malloc,
5789a4bcb1SSuanming Mou 		.free = mlx5_free,
5889a4bcb1SSuanming Mou 		.per_core_cache = 1 << 14,
5989a4bcb1SSuanming Mou 		.type = "rte_flow_ipool",
6089a4bcb1SSuanming Mou 	},
6189a4bcb1SSuanming Mou 	{
6289a4bcb1SSuanming Mou 		.size = sizeof(struct rte_flow),
6389a4bcb1SSuanming Mou 		.trunk_size = 64,
6489a4bcb1SSuanming Mou 		.grow_trunk = 3,
6589a4bcb1SSuanming Mou 		.grow_shift = 2,
6689a4bcb1SSuanming Mou 		.need_lock = 1,
6789a4bcb1SSuanming Mou 		.release_mem_en = 0,
6889a4bcb1SSuanming Mou 		.malloc = mlx5_malloc,
6989a4bcb1SSuanming Mou 		.free = mlx5_free,
7089a4bcb1SSuanming Mou 		.per_core_cache = 0,
7189a4bcb1SSuanming Mou 		.type = "mcp_flow_ipool",
7289a4bcb1SSuanming Mou 	},
7389a4bcb1SSuanming Mou };
7489a4bcb1SSuanming Mou 
75e50fe91aSTal Shnaiderman static void
76e50fe91aSTal Shnaiderman mlx5_queue_counter_id_prepare(struct rte_eth_dev *dev)
77e50fe91aSTal Shnaiderman {
78e50fe91aSTal Shnaiderman 	struct mlx5_priv *priv = dev->data->dev_private;
79e50fe91aSTal Shnaiderman 	void *ctx = priv->sh->cdev->ctx;
80e50fe91aSTal Shnaiderman 
81e50fe91aSTal Shnaiderman 	priv->q_counters = mlx5_devx_cmd_queue_counter_alloc(ctx);
82e50fe91aSTal Shnaiderman 	if (!priv->q_counters) {
83e50fe91aSTal Shnaiderman 		DRV_LOG(ERR, "Port %d queue counter object cannot be created "
84e50fe91aSTal Shnaiderman 			"by DevX - imissed counter will be unavailable",
85e50fe91aSTal Shnaiderman 			dev->data->port_id);
86cd00dce6SShani Peretz 		priv->q_counters_allocation_failure = 1;
87e50fe91aSTal Shnaiderman 		return;
88e50fe91aSTal Shnaiderman 	}
89e50fe91aSTal Shnaiderman 	priv->counter_set_id = priv->q_counters->id;
90e50fe91aSTal Shnaiderman }
91e50fe91aSTal Shnaiderman 
92980826dcSTal Shnaiderman /**
93980826dcSTal Shnaiderman  * Initialize shared data between primary and secondary process.
94980826dcSTal Shnaiderman  *
95980826dcSTal Shnaiderman  * A memzone is reserved by primary process and secondary processes attach to
96980826dcSTal Shnaiderman  * the memzone.
97980826dcSTal Shnaiderman  *
98980826dcSTal Shnaiderman  * @return
99980826dcSTal Shnaiderman  *   0 on success, a negative errno value otherwise and rte_errno is set.
100980826dcSTal Shnaiderman  */
101980826dcSTal Shnaiderman static int
102980826dcSTal Shnaiderman mlx5_init_shared_data(void)
103980826dcSTal Shnaiderman {
104980826dcSTal Shnaiderman 	const struct rte_memzone *mz;
105980826dcSTal Shnaiderman 	int ret = 0;
106980826dcSTal Shnaiderman 
107980826dcSTal Shnaiderman 	rte_spinlock_lock(&mlx5_shared_data_lock);
108980826dcSTal Shnaiderman 	if (mlx5_shared_data == NULL) {
109980826dcSTal Shnaiderman 		/* Allocate shared memory. */
110980826dcSTal Shnaiderman 		mz = rte_memzone_reserve(MZ_MLX5_PMD_SHARED_DATA,
111980826dcSTal Shnaiderman 					 sizeof(*mlx5_shared_data),
112980826dcSTal Shnaiderman 					 SOCKET_ID_ANY, 0);
113980826dcSTal Shnaiderman 		if (mz == NULL) {
114980826dcSTal Shnaiderman 			DRV_LOG(ERR,
115980826dcSTal Shnaiderman 				"Cannot allocate mlx5 shared data");
116980826dcSTal Shnaiderman 			ret = -rte_errno;
117980826dcSTal Shnaiderman 			goto error;
118980826dcSTal Shnaiderman 		}
119980826dcSTal Shnaiderman 		mlx5_shared_data = mz->addr;
120980826dcSTal Shnaiderman 		memset(mlx5_shared_data, 0, sizeof(*mlx5_shared_data));
121980826dcSTal Shnaiderman 		rte_spinlock_init(&mlx5_shared_data->lock);
122980826dcSTal Shnaiderman 	}
123980826dcSTal Shnaiderman error:
124980826dcSTal Shnaiderman 	rte_spinlock_unlock(&mlx5_shared_data_lock);
125980826dcSTal Shnaiderman 	return ret;
126980826dcSTal Shnaiderman }
127980826dcSTal Shnaiderman 
128980826dcSTal Shnaiderman /**
129980826dcSTal Shnaiderman  * PMD global initialization.
130980826dcSTal Shnaiderman  *
131980826dcSTal Shnaiderman  * Independent from individual device, this function initializes global
132980826dcSTal Shnaiderman  * per-PMD data structures distinguishing primary and secondary processes.
133980826dcSTal Shnaiderman  * Hence, each initialization is called once per a process.
134980826dcSTal Shnaiderman  *
135980826dcSTal Shnaiderman  * @return
136980826dcSTal Shnaiderman  *   0 on success, a negative errno value otherwise and rte_errno is set.
137980826dcSTal Shnaiderman  */
138980826dcSTal Shnaiderman static int
139980826dcSTal Shnaiderman mlx5_init_once(void)
140980826dcSTal Shnaiderman {
141980826dcSTal Shnaiderman 	if (mlx5_init_shared_data())
142980826dcSTal Shnaiderman 		return -rte_errno;
143980826dcSTal Shnaiderman 	return 0;
144980826dcSTal Shnaiderman }
145980826dcSTal Shnaiderman 
1461137eceeSOphir Munk /**
14791d1cfafSMichael Baum  * Get mlx5 device capabilities.
1481137eceeSOphir Munk  *
14991d1cfafSMichael Baum  * @param sh
15091d1cfafSMichael Baum  *   Pointer to shared device context.
1511137eceeSOphir Munk  *
1521137eceeSOphir Munk  * @return
1536be4c57aSMichael Baum  *   0 on success, a negative errno value otherwise and rte_errno is set.
1541137eceeSOphir Munk  */
1551137eceeSOphir Munk int
15691d1cfafSMichael Baum mlx5_os_capabilities_prepare(struct mlx5_dev_ctx_shared *sh)
1571137eceeSOphir Munk {
15891d1cfafSMichael Baum 	struct mlx5_hca_attr *hca_attr = &sh->cdev->config.hca_attr;
15991d1cfafSMichael Baum 	struct mlx5_context *mlx5_ctx = sh->cdev->ctx;
1601137eceeSOphir Munk 	void *pv_iseg = NULL;
1611137eceeSOphir Munk 	u32 cb_iseg = 0;
1621137eceeSOphir Munk 
16387af0d1eSMichael Baum 	MLX5_ASSERT(sh->cdev->config.devx);
16487af0d1eSMichael Baum 	MLX5_ASSERT(mlx5_dev_is_pci(sh->cdev->dev));
1651137eceeSOphir Munk 	pv_iseg = mlx5_glue->query_hca_iseg(mlx5_ctx, &cb_iseg);
1661137eceeSOphir Munk 	if (pv_iseg == NULL) {
16791d1cfafSMichael Baum 		DRV_LOG(ERR, "Failed to get device hca_iseg.");
1686be4c57aSMichael Baum 		rte_errno = errno;
1696be4c57aSMichael Baum 		return -rte_errno;
1701137eceeSOphir Munk 	}
17191d1cfafSMichael Baum 	memset(&sh->dev_cap, 0, sizeof(struct mlx5_dev_cap));
17287af0d1eSMichael Baum 	sh->dev_cap.vf = mlx5_dev_is_vf_pci(RTE_DEV_TO_PCI(sh->cdev->dev));
17391d1cfafSMichael Baum 	sh->dev_cap.max_cq = 1 << hca_attr->log_max_cq;
17491d1cfafSMichael Baum 	sh->dev_cap.max_qp = 1 << hca_attr->log_max_qp;
17591d1cfafSMichael Baum 	sh->dev_cap.max_qp_wr = 1 << hca_attr->log_max_qp_sz;
17687af0d1eSMichael Baum 	sh->dev_cap.dv_flow_en = 1;
17787af0d1eSMichael Baum 	DRV_LOG(DEBUG, "MPW isn't supported.");
17887af0d1eSMichael Baum 	DRV_LOG(DEBUG, "MPLS over GRE/UDP tunnel offloading is no supported.");
17987af0d1eSMichael Baum 	sh->dev_cap.hw_csum = hca_attr->csum_cap;
18087af0d1eSMichael Baum 	DRV_LOG(DEBUG, "Checksum offloading is %ssupported.",
18187af0d1eSMichael Baum 		(sh->dev_cap.hw_csum ? "" : "not "));
18287af0d1eSMichael Baum 	sh->dev_cap.hw_vlan_strip = hca_attr->vlan_cap;
18387af0d1eSMichael Baum 	DRV_LOG(DEBUG, "VLAN stripping is %ssupported.",
18487af0d1eSMichael Baum 		(sh->dev_cap.hw_vlan_strip ? "" : "not "));
18587af0d1eSMichael Baum 	sh->dev_cap.hw_fcs_strip = hca_attr->scatter_fcs;
18687af0d1eSMichael Baum 	sh->dev_cap.tso = ((1 << hca_attr->max_lso_cap) > 0);
18787af0d1eSMichael Baum 	if (sh->dev_cap.tso)
18887af0d1eSMichael Baum 		sh->dev_cap.tso_max_payload_sz = 1 << hca_attr->max_lso_cap;
18987af0d1eSMichael Baum 	DRV_LOG(DEBUG, "Counters are not supported.");
190358fbb01STal Shnaiderman 	if (hca_attr->striding_rq) {
191358fbb01STal Shnaiderman 		sh->dev_cap.mprq.enabled = 1;
192358fbb01STal Shnaiderman 		sh->dev_cap.mprq.log_min_stride_size =
193358fbb01STal Shnaiderman 			MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES;
194358fbb01STal Shnaiderman 		sh->dev_cap.mprq.log_max_stride_size =
195358fbb01STal Shnaiderman 			MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES;
196358fbb01STal Shnaiderman 		if (hca_attr->ext_stride_num_range)
197358fbb01STal Shnaiderman 			sh->dev_cap.mprq.log_min_stride_num =
198358fbb01STal Shnaiderman 				MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
199358fbb01STal Shnaiderman 		else
200358fbb01STal Shnaiderman 			sh->dev_cap.mprq.log_min_stride_num =
201358fbb01STal Shnaiderman 				MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
202358fbb01STal Shnaiderman 		sh->dev_cap.mprq.log_max_stride_num =
203358fbb01STal Shnaiderman 			MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES;
204358fbb01STal Shnaiderman 		DRV_LOG(DEBUG, "\tmin_single_stride_log_num_of_bytes: %u",
205358fbb01STal Shnaiderman 			sh->dev_cap.mprq.log_min_stride_size);
206358fbb01STal Shnaiderman 		DRV_LOG(DEBUG, "\tmax_single_stride_log_num_of_bytes: %u",
207358fbb01STal Shnaiderman 			sh->dev_cap.mprq.log_max_stride_size);
208358fbb01STal Shnaiderman 		DRV_LOG(DEBUG, "\tmin_single_wqe_log_num_of_strides: %u",
209358fbb01STal Shnaiderman 			sh->dev_cap.mprq.log_min_stride_num);
210358fbb01STal Shnaiderman 		DRV_LOG(DEBUG, "\tmax_single_wqe_log_num_of_strides: %u",
211358fbb01STal Shnaiderman 			sh->dev_cap.mprq.log_max_stride_num);
212358fbb01STal Shnaiderman 		DRV_LOG(DEBUG, "\tmin_stride_wqe_log_size: %u",
213358fbb01STal Shnaiderman 			sh->dev_cap.mprq.log_min_stride_wqe_size);
214358fbb01STal Shnaiderman 		DRV_LOG(DEBUG, "Device supports Multi-Packet RQ.");
215358fbb01STal Shnaiderman 	}
21691d1cfafSMichael Baum 	if (hca_attr->rss_ind_tbl_cap) {
21787af0d1eSMichael Baum 		/*
21887af0d1eSMichael Baum 		 * DPDK doesn't support larger/variable indirection tables.
21987af0d1eSMichael Baum 		 * Once DPDK supports it, take max size from device attr.
22087af0d1eSMichael Baum 		 */
22187af0d1eSMichael Baum 		sh->dev_cap.ind_table_max_size =
2228ce24f0cSThomas Monjalon 			RTE_MIN((uint32_t)1 << hca_attr->rss_ind_tbl_cap,
2238ce24f0cSThomas Monjalon 				(uint32_t)RTE_ETH_RSS_RETA_SIZE_512);
22487af0d1eSMichael Baum 		DRV_LOG(DEBUG, "Maximum Rx indirection table size is %u",
22587af0d1eSMichael Baum 			sh->dev_cap.ind_table_max_size);
22691d1cfafSMichael Baum 	}
22778fe8a2eSTal Shnaiderman 	if (hca_attr->enhanced_multi_pkt_send_wqe)
22878fe8a2eSTal Shnaiderman 		sh->dev_cap.mps = MLX5_MPW_ENHANCED;
22978fe8a2eSTal Shnaiderman 	else if (hca_attr->multi_pkt_send_wqe &&
23078fe8a2eSTal Shnaiderman 		 sh->dev_cap.mps != MLX5_ARG_UNSET)
23178fe8a2eSTal Shnaiderman 		sh->dev_cap.mps = MLX5_MPW;
23278fe8a2eSTal Shnaiderman 	else
23378fe8a2eSTal Shnaiderman 		sh->dev_cap.mps = MLX5_MPW_DISABLED;
23487af0d1eSMichael Baum 	sh->dev_cap.swp = mlx5_get_supported_sw_parsing_offloads(hca_attr);
23587af0d1eSMichael Baum 	sh->dev_cap.tunnel_en = mlx5_get_supported_tunneling_offloads(hca_attr);
23687af0d1eSMichael Baum 	if (sh->dev_cap.tunnel_en) {
23787af0d1eSMichael Baum 		DRV_LOG(DEBUG, "Tunnel offloading is supported for %s%s%s",
23887af0d1eSMichael Baum 			sh->dev_cap.tunnel_en &
23987af0d1eSMichael Baum 			MLX5_TUNNELED_OFFLOADS_VXLAN_CAP ? "[VXLAN]" : "",
24087af0d1eSMichael Baum 			sh->dev_cap.tunnel_en &
24187af0d1eSMichael Baum 			MLX5_TUNNELED_OFFLOADS_GRE_CAP ? "[GRE]" : "",
24287af0d1eSMichael Baum 			sh->dev_cap.tunnel_en &
24387af0d1eSMichael Baum 			MLX5_TUNNELED_OFFLOADS_GENEVE_CAP ? "[GENEVE]" : "");
24487af0d1eSMichael Baum 	} else {
24587af0d1eSMichael Baum 		DRV_LOG(DEBUG, "Tunnel offloading is not supported.");
24687af0d1eSMichael Baum 	}
2475de129f5STal Shnaiderman 	sh->dev_cap.cqe_comp = 0;
2485de129f5STal Shnaiderman #if (RTE_CACHE_LINE_SIZE == 128)
2495de129f5STal Shnaiderman 	if (hca_attr->cqe_compression_128)
2505de129f5STal Shnaiderman 		sh->dev_cap.cqe_comp = 1;
2515de129f5STal Shnaiderman 	DRV_LOG(DEBUG, "Rx CQE 128B compression is %ssupported.",
2525de129f5STal Shnaiderman 		sh->dev_cap.cqe_comp ? "" : "not ");
2535de129f5STal Shnaiderman #else
2545de129f5STal Shnaiderman 	if (hca_attr->cqe_compression)
2555de129f5STal Shnaiderman 		sh->dev_cap.cqe_comp = 1;
2565de129f5STal Shnaiderman 	DRV_LOG(DEBUG, "Rx CQE compression is %ssupported.",
2575de129f5STal Shnaiderman 		sh->dev_cap.cqe_comp ? "" : "not ");
2585de129f5STal Shnaiderman #endif
25991d1cfafSMichael Baum 	snprintf(sh->dev_cap.fw_ver, 64, "%x.%x.%04x",
2601137eceeSOphir Munk 		 MLX5_GET(initial_seg, pv_iseg, fw_rev_major),
2611137eceeSOphir Munk 		 MLX5_GET(initial_seg, pv_iseg, fw_rev_minor),
2621137eceeSOphir Munk 		 MLX5_GET(initial_seg, pv_iseg, fw_rev_subminor));
26387af0d1eSMichael Baum 	DRV_LOG(DEBUG, "Packet pacing is not supported.");
26487af0d1eSMichael Baum 	mlx5_rt_timestamp_config(sh, hca_attr);
2656be4c57aSMichael Baum 	return 0;
2661137eceeSOphir Munk }
26759f10207SOphir Munk 
26859f10207SOphir Munk /**
26993f4ece9SOphir Munk  * Initialize DR related data within private structure.
27093f4ece9SOphir Munk  * Routine checks the reference counter and does actual
27193f4ece9SOphir Munk  * resources creation/initialization only if counter is zero.
27293f4ece9SOphir Munk  *
27393f4ece9SOphir Munk  * @param[in] priv
27493f4ece9SOphir Munk  *   Pointer to the private device data structure.
27593f4ece9SOphir Munk  *
27693f4ece9SOphir Munk  * @return
27793f4ece9SOphir Munk  *   Zero on success, positive error code otherwise.
27893f4ece9SOphir Munk  */
27993f4ece9SOphir Munk static int
28093f4ece9SOphir Munk mlx5_alloc_shared_dr(struct mlx5_priv *priv)
28193f4ece9SOphir Munk {
28293f4ece9SOphir Munk 	struct mlx5_dev_ctx_shared *sh = priv->sh;
28393f4ece9SOphir Munk 	int err = 0;
28493f4ece9SOphir Munk 
28593f4ece9SOphir Munk 	if (!sh->flow_tbls)
28693f4ece9SOphir Munk 		err = mlx5_alloc_table_hash_list(priv);
28793f4ece9SOphir Munk 	else
2881b9e9826SThomas Monjalon 		DRV_LOG(DEBUG, "sh->flow_tbls[%p] already created, reuse",
28993f4ece9SOphir Munk 			(void *)sh->flow_tbls);
29093f4ece9SOphir Munk 	return err;
29193f4ece9SOphir Munk }
29293f4ece9SOphir Munk /**
29393f4ece9SOphir Munk  * Destroy DR related data within private structure.
29493f4ece9SOphir Munk  *
29593f4ece9SOphir Munk  * @param[in] priv
29693f4ece9SOphir Munk  *   Pointer to the private device data structure.
29793f4ece9SOphir Munk  */
29893f4ece9SOphir Munk void
29993f4ece9SOphir Munk mlx5_os_free_shared_dr(struct mlx5_priv *priv)
30093f4ece9SOphir Munk {
30193f4ece9SOphir Munk 	mlx5_free_table_hash_list(priv);
30293f4ece9SOphir Munk }
30393f4ece9SOphir Munk 
30493f4ece9SOphir Munk /**
30559f10207SOphir Munk  * Set the completion channel file descriptor interrupt as non-blocking.
30659f10207SOphir Munk  * Currently it has no support under Windows.
30759f10207SOphir Munk  *
30859f10207SOphir Munk  * @param[in] rxq_obj
30959f10207SOphir Munk  *   Pointer to RQ channel object, which includes the channel fd
31059f10207SOphir Munk  *
31159f10207SOphir Munk  * @param[out] fd
3127be78d02SJosh Soref  *   The file descriptor (representing the interrupt) used in this channel.
31359f10207SOphir Munk  *
31459f10207SOphir Munk  * @return
31559f10207SOphir Munk  *   0 on successfully setting the fd to non-blocking, non-zero otherwise.
31659f10207SOphir Munk  */
31759f10207SOphir Munk int
31859f10207SOphir Munk mlx5_os_set_nonblock_channel_fd(int fd)
31959f10207SOphir Munk {
32059f10207SOphir Munk 	(void)fd;
32159f10207SOphir Munk 	DRV_LOG(WARNING, "%s: is not supported", __func__);
32259f10207SOphir Munk 	return -ENOTSUP;
32359f10207SOphir Munk }
32459f10207SOphir Munk 
32559f10207SOphir Munk /**
3267af08c8fSMichael Baum  * Spawn an Ethernet device from DevX information.
327980826dcSTal Shnaiderman  *
328980826dcSTal Shnaiderman  * @param dpdk_dev
329980826dcSTal Shnaiderman  *   Backing DPDK device.
330980826dcSTal Shnaiderman  * @param spawn
331980826dcSTal Shnaiderman  *   Verbs device parameters (name, port, switch_info) to spawn.
332a729d2f0SMichael Baum  * @param mkvlist
333a729d2f0SMichael Baum  *   Pointer to mlx5 kvargs control, can be NULL if there is no devargs.
334980826dcSTal Shnaiderman  *
335980826dcSTal Shnaiderman  * @return
33693f4ece9SOphir Munk  *   A valid Ethernet device object on success, NULL otherwise and rte_errno
33793f4ece9SOphir Munk  *   is set. The following errors are defined:
33893f4ece9SOphir Munk  *
33993f4ece9SOphir Munk  *   EEXIST: device is already spawned
340980826dcSTal Shnaiderman  */
341980826dcSTal Shnaiderman static struct rte_eth_dev *
342980826dcSTal Shnaiderman mlx5_dev_spawn(struct rte_device *dpdk_dev,
343a729d2f0SMichael Baum 	       struct mlx5_dev_spawn_data *spawn,
344a729d2f0SMichael Baum 	       struct mlx5_kvargs_ctrl *mkvlist)
345980826dcSTal Shnaiderman {
34693f4ece9SOphir Munk 	const struct mlx5_switch_info *switch_info = &spawn->info;
34793f4ece9SOphir Munk 	struct mlx5_dev_ctx_shared *sh = NULL;
34893f4ece9SOphir Munk 	struct rte_eth_dev *eth_dev = NULL;
34993f4ece9SOphir Munk 	struct mlx5_priv *priv = NULL;
35093f4ece9SOphir Munk 	int err = 0;
35193f4ece9SOphir Munk 	struct rte_ether_addr mac;
35293f4ece9SOphir Munk 	char name[RTE_ETH_NAME_MAX_LEN];
35393f4ece9SOphir Munk 	int own_domain_id = 0;
35493f4ece9SOphir Munk 	uint16_t port_id;
35589a4bcb1SSuanming Mou 	int i;
356e38776c3SMaayan Kashani 	struct mlx5_indexed_pool_config icfg[RTE_DIM(default_icfg)];
35793f4ece9SOphir Munk 
358e38776c3SMaayan Kashani 	memcpy(icfg, default_icfg, sizeof(icfg));
35993f4ece9SOphir Munk 	/* Build device name. */
36093f4ece9SOphir Munk 	strlcpy(name, dpdk_dev->name, sizeof(name));
36193f4ece9SOphir Munk 	/* check if the device is already spawned */
36293f4ece9SOphir Munk 	if (rte_eth_dev_get_port_by_name(name, &port_id) == 0) {
36393f4ece9SOphir Munk 		rte_errno = EEXIST;
36493f4ece9SOphir Munk 		return NULL;
36593f4ece9SOphir Munk 	}
36693f4ece9SOphir Munk 	DRV_LOG(DEBUG, "naming Ethernet device \"%s\"", name);
367a729d2f0SMichael Baum 	sh = mlx5_alloc_shared_dev_ctx(spawn, mkvlist);
36893f4ece9SOphir Munk 	if (!sh)
36993f4ece9SOphir Munk 		return NULL;
370a13ec19cSMichael Baum 	if (!sh->config.dv_flow_en) {
371cfe0639bSMichael Baum 		DRV_LOG(ERR, "Windows flow mode must be DV flow enable.");
372cfe0639bSMichael Baum 		err = ENOTSUP;
373cfe0639bSMichael Baum 		goto error;
374cfe0639bSMichael Baum 	}
375a13ec19cSMichael Baum 	if (sh->config.vf_nl_en) {
376a13ec19cSMichael Baum 		DRV_LOG(DEBUG, "VF netlink isn't supported.");
377a13ec19cSMichael Baum 		sh->config.vf_nl_en = 0;
378cfe0639bSMichael Baum 	}
37993f4ece9SOphir Munk 	/* Initialize the shutdown event in mlx5_dev_spawn to
38093f4ece9SOphir Munk 	 * support mlx5_is_removed for Windows.
38193f4ece9SOphir Munk 	 */
382ca1418ceSMichael Baum 	err = mlx5_glue->devx_init_showdown_event(sh->cdev->ctx);
38393f4ece9SOphir Munk 	if (err) {
38493f4ece9SOphir Munk 		DRV_LOG(ERR, "failed to init showdown event: %s",
38593f4ece9SOphir Munk 			strerror(errno));
38693f4ece9SOphir Munk 		goto error;
38793f4ece9SOphir Munk 	}
38893f4ece9SOphir Munk 	/* Allocate private eth device data. */
38993f4ece9SOphir Munk 	priv = mlx5_malloc(MLX5_MEM_ZERO | MLX5_MEM_RTE,
39093f4ece9SOphir Munk 			   sizeof(*priv),
39193f4ece9SOphir Munk 			   RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
39293f4ece9SOphir Munk 	if (priv == NULL) {
39393f4ece9SOphir Munk 		DRV_LOG(ERR, "priv allocation failure");
39493f4ece9SOphir Munk 		err = ENOMEM;
39593f4ece9SOphir Munk 		goto error;
39693f4ece9SOphir Munk 	}
39793f4ece9SOphir Munk 	priv->sh = sh;
39893f4ece9SOphir Munk 	priv->dev_port = spawn->phys_port;
39993f4ece9SOphir Munk 	priv->pci_dev = spawn->pci_dev;
40093f4ece9SOphir Munk 	priv->mtu = RTE_ETHER_MTU;
40193f4ece9SOphir Munk 	priv->mp_id.port_id = port_id;
40293f4ece9SOphir Munk 	strlcpy(priv->mp_id.name, MLX5_MP_NAME, RTE_MP_MAX_NAME_LEN);
40393f4ece9SOphir Munk 	priv->representor = !!switch_info->representor;
40493f4ece9SOphir Munk 	priv->master = !!switch_info->master;
40593f4ece9SOphir Munk 	priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
40693f4ece9SOphir Munk 	priv->vport_meta_tag = 0;
40793f4ece9SOphir Munk 	priv->vport_meta_mask = 0;
40893f4ece9SOphir Munk 	priv->pf_bond = spawn->pf_bond;
40993f4ece9SOphir Munk 	priv->vport_id = -1;
41093f4ece9SOphir Munk 	/* representor_id field keeps the unmodified VF index. */
41193f4ece9SOphir Munk 	priv->representor_id = -1;
41293f4ece9SOphir Munk 	/*
41393f4ece9SOphir Munk 	 * Look for sibling devices in order to reuse their switch domain
41493f4ece9SOphir Munk 	 * if any, otherwise allocate one.
41593f4ece9SOphir Munk 	 */
416e9d420dfSGregory Etelson 	MLX5_ETH_FOREACH_DEV(port_id, dpdk_dev) {
41793f4ece9SOphir Munk 		const struct mlx5_priv *opriv =
41893f4ece9SOphir Munk 			rte_eth_devices[port_id].data->dev_private;
41993f4ece9SOphir Munk 
42093f4ece9SOphir Munk 		if (!opriv ||
42193f4ece9SOphir Munk 		    opriv->sh != priv->sh ||
42293f4ece9SOphir Munk 			opriv->domain_id ==
42393f4ece9SOphir Munk 			RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID)
42493f4ece9SOphir Munk 			continue;
42593f4ece9SOphir Munk 		priv->domain_id = opriv->domain_id;
42693f4ece9SOphir Munk 		break;
42793f4ece9SOphir Munk 	}
42893f4ece9SOphir Munk 	if (priv->domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
42993f4ece9SOphir Munk 		err = rte_eth_switch_domain_alloc(&priv->domain_id);
43093f4ece9SOphir Munk 		if (err) {
43193f4ece9SOphir Munk 			err = rte_errno;
43293f4ece9SOphir Munk 			DRV_LOG(ERR, "unable to allocate switch domain: %s",
43393f4ece9SOphir Munk 				strerror(rte_errno));
43493f4ece9SOphir Munk 			goto error;
43593f4ece9SOphir Munk 		}
43693f4ece9SOphir Munk 		own_domain_id = 1;
43793f4ece9SOphir Munk 	}
43845a6df80SMichael Baum 	/* Process parameters and store port configuration on priv structure. */
439a729d2f0SMichael Baum 	err = mlx5_port_args_config(priv, mkvlist, &priv->config);
44045a6df80SMichael Baum 	if (err) {
44145a6df80SMichael Baum 		err = rte_errno;
44245a6df80SMichael Baum 		DRV_LOG(ERR, "Failed to process port configure: %s",
44345a6df80SMichael Baum 			strerror(rte_errno));
44445a6df80SMichael Baum 		goto error;
44593f4ece9SOphir Munk 	}
44693f4ece9SOphir Munk 	eth_dev = rte_eth_dev_allocate(name);
44793f4ece9SOphir Munk 	if (eth_dev == NULL) {
44893f4ece9SOphir Munk 		DRV_LOG(ERR, "can not allocate rte ethdev");
44993f4ece9SOphir Munk 		err = ENOMEM;
45093f4ece9SOphir Munk 		goto error;
45193f4ece9SOphir Munk 	}
45293f4ece9SOphir Munk 	if (priv->representor) {
45393f4ece9SOphir Munk 		eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
45493f4ece9SOphir Munk 		eth_dev->data->representor_id = priv->representor_id;
455ff4e52efSViacheslav Galaktionov 		MLX5_ETH_FOREACH_DEV(port_id, dpdk_dev) {
456ff4e52efSViacheslav Galaktionov 			struct mlx5_priv *opriv =
457ff4e52efSViacheslav Galaktionov 				rte_eth_devices[port_id].data->dev_private;
458ff4e52efSViacheslav Galaktionov 			if (opriv &&
459ff4e52efSViacheslav Galaktionov 			    opriv->master &&
460ff4e52efSViacheslav Galaktionov 			    opriv->domain_id == priv->domain_id &&
461ff4e52efSViacheslav Galaktionov 			    opriv->sh == priv->sh) {
462ff4e52efSViacheslav Galaktionov 				eth_dev->data->backer_port_id = port_id;
463ff4e52efSViacheslav Galaktionov 				break;
464ff4e52efSViacheslav Galaktionov 			}
465ff4e52efSViacheslav Galaktionov 		}
466ff4e52efSViacheslav Galaktionov 		if (port_id >= RTE_MAX_ETHPORTS)
467ff4e52efSViacheslav Galaktionov 			eth_dev->data->backer_port_id = eth_dev->data->port_id;
46893f4ece9SOphir Munk 	}
46993f4ece9SOphir Munk 	/*
47093f4ece9SOphir Munk 	 * Store associated network device interface index. This index
47193f4ece9SOphir Munk 	 * is permanent throughout the lifetime of device. So, we may store
47293f4ece9SOphir Munk 	 * the ifindex here and use the cached value further.
47393f4ece9SOphir Munk 	 */
47493f4ece9SOphir Munk 	MLX5_ASSERT(spawn->ifindex);
47593f4ece9SOphir Munk 	priv->if_index = spawn->ifindex;
47693f4ece9SOphir Munk 	eth_dev->data->dev_private = priv;
47793f4ece9SOphir Munk 	priv->dev_data = eth_dev->data;
47893f4ece9SOphir Munk 	eth_dev->data->mac_addrs = priv->mac;
47993f4ece9SOphir Munk 	eth_dev->device = dpdk_dev;
48093f4ece9SOphir Munk 	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
48193f4ece9SOphir Munk 	/* Configure the first MAC address by default. */
48293f4ece9SOphir Munk 	if (mlx5_get_mac(eth_dev, &mac.addr_bytes)) {
48393f4ece9SOphir Munk 		DRV_LOG(ERR,
48493f4ece9SOphir Munk 			"port %u cannot get MAC address, is mlx5_en"
48593f4ece9SOphir Munk 			" loaded? (errno: %s).",
48693f4ece9SOphir Munk 			eth_dev->data->port_id, strerror(rte_errno));
48793f4ece9SOphir Munk 		err = ENODEV;
48893f4ece9SOphir Munk 		goto error;
48993f4ece9SOphir Munk 	}
49093f4ece9SOphir Munk 	DRV_LOG(INFO,
491c2c4f87bSAman Deep Singh 		"port %u MAC address is " RTE_ETHER_ADDR_PRT_FMT,
492a7db3afcSAman Deep Singh 		eth_dev->data->port_id, RTE_ETHER_ADDR_BYTES(&mac));
49393f4ece9SOphir Munk #ifdef RTE_LIBRTE_MLX5_DEBUG
49493f4ece9SOphir Munk 	{
49528743807STal Shnaiderman 		char ifname[MLX5_NAMESIZE];
49693f4ece9SOphir Munk 
49793f4ece9SOphir Munk 		if (mlx5_get_ifname(eth_dev, &ifname) == 0)
49893f4ece9SOphir Munk 			DRV_LOG(DEBUG, "port %u ifname is \"%s\"",
49993f4ece9SOphir Munk 				eth_dev->data->port_id, ifname);
50093f4ece9SOphir Munk 		else
50193f4ece9SOphir Munk 			DRV_LOG(DEBUG, "port %u ifname is unknown.",
50293f4ece9SOphir Munk 				eth_dev->data->port_id);
50393f4ece9SOphir Munk 	}
50493f4ece9SOphir Munk #endif
50593f4ece9SOphir Munk 	/* Get actual MTU if possible. */
50693f4ece9SOphir Munk 	err = mlx5_get_mtu(eth_dev, &priv->mtu);
50793f4ece9SOphir Munk 	if (err) {
50893f4ece9SOphir Munk 		err = rte_errno;
50993f4ece9SOphir Munk 		goto error;
51093f4ece9SOphir Munk 	}
51193f4ece9SOphir Munk 	DRV_LOG(DEBUG, "port %u MTU is %u.", eth_dev->data->port_id,
51293f4ece9SOphir Munk 		priv->mtu);
51393f4ece9SOphir Munk 	/* Initialize burst functions to prevent crashes before link-up. */
514a41f593fSFerruh Yigit 	eth_dev->rx_pkt_burst = rte_eth_pkt_burst_dummy;
515a41f593fSFerruh Yigit 	eth_dev->tx_pkt_burst = rte_eth_pkt_burst_dummy;
516b012b4ceSOphir Munk 	eth_dev->dev_ops = &mlx5_dev_ops;
51793f4ece9SOphir Munk 	eth_dev->rx_descriptor_status = mlx5_rx_descriptor_status;
51893f4ece9SOphir Munk 	eth_dev->tx_descriptor_status = mlx5_tx_descriptor_status;
51993f4ece9SOphir Munk 	eth_dev->rx_queue_count = mlx5_rx_queue_count;
52093f4ece9SOphir Munk 	/* Register MAC address. */
52193f4ece9SOphir Munk 	claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0));
52293f4ece9SOphir Munk 	priv->ctrl_flows = 0;
52393f4ece9SOphir Munk 	TAILQ_INIT(&priv->flow_meters);
524*4dd46d38SShun Hao 	if (priv->mtr_en) {
525a295c69aSShun Hao 		priv->mtr_profile_tbl = mlx5_l3t_create(MLX5_L3T_TYPE_PTR);
526a295c69aSShun Hao 		if (!priv->mtr_profile_tbl)
527a295c69aSShun Hao 			goto error;
528*4dd46d38SShun Hao 	}
52993f4ece9SOphir Munk 	/* Bring Ethernet device up. */
53093f4ece9SOphir Munk 	DRV_LOG(DEBUG, "port %u forcing Ethernet interface up.",
53193f4ece9SOphir Munk 		eth_dev->data->port_id);
53293f4ece9SOphir Munk 	/* nl calls are unsupported - set to -1 not to fail on release */
53393f4ece9SOphir Munk 	priv->nl_socket_rdma = -1;
53493f4ece9SOphir Munk 	priv->nl_socket_route = -1;
53593f4ece9SOphir Munk 	mlx5_set_link_up(eth_dev);
53693f4ece9SOphir Munk 	/*
53793f4ece9SOphir Munk 	 * Even though the interrupt handler is not installed yet,
53893f4ece9SOphir Munk 	 * interrupts will still trigger on the async_fd from
53993f4ece9SOphir Munk 	 * Verbs context returned by ibv_open_device().
54093f4ece9SOphir Munk 	 */
54193f4ece9SOphir Munk 	mlx5_link_update(eth_dev, 0);
54289a4bcb1SSuanming Mou 	for (i = 0; i < MLX5_FLOW_TYPE_MAXI; i++) {
543a13ec19cSMichael Baum 		icfg[i].release_mem_en = !!sh->config.reclaim_mode;
544a13ec19cSMichael Baum 		if (sh->config.reclaim_mode)
54589a4bcb1SSuanming Mou 			icfg[i].per_core_cache = 0;
546e38776c3SMaayan Kashani #ifdef HAVE_MLX5_HWS_SUPPORT
547e38776c3SMaayan Kashani 		if (priv->sh->config.dv_flow_en == 2)
548e38776c3SMaayan Kashani 			icfg[i].size = sizeof(struct rte_flow_hw) + sizeof(struct rte_flow_nt2hws);
549e38776c3SMaayan Kashani #endif
55089a4bcb1SSuanming Mou 		priv->flows[i] = mlx5_ipool_create(&icfg[i]);
55189a4bcb1SSuanming Mou 		if (!priv->flows[i])
55289a4bcb1SSuanming Mou 			goto error;
55389a4bcb1SSuanming Mou 	}
55493f4ece9SOphir Munk 	/* Create context for virtual machine VLAN workaround. */
55593f4ece9SOphir Munk 	priv->vmwa_context = NULL;
556a13ec19cSMichael Baum 	if (sh->config.dv_flow_en) {
55793f4ece9SOphir Munk 		err = mlx5_alloc_shared_dr(priv);
55893f4ece9SOphir Munk 		if (err)
55993f4ece9SOphir Munk 			goto error;
56093f4ece9SOphir Munk 	}
56193f4ece9SOphir Munk 	/* No supported flow priority number detection. */
5623c4338a4SJiawei Wang 	priv->sh->flow_max_priority = -1;
56393f4ece9SOphir Munk 	mlx5_set_metadata_mask(eth_dev);
564a13ec19cSMichael Baum 	if (sh->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
56593f4ece9SOphir Munk 	    !priv->sh->dv_regc0_mask) {
56693f4ece9SOphir Munk 		DRV_LOG(ERR, "metadata mode %u is not supported "
56793f4ece9SOphir Munk 			     "(no metadata reg_c[0] is available).",
568a13ec19cSMichael Baum 			     sh->config.dv_xmeta_en);
56993f4ece9SOphir Munk 			err = ENOTSUP;
57093f4ece9SOphir Munk 			goto error;
57193f4ece9SOphir Munk 	}
572d03b7860SSuanming Mou 	priv->hrxqs = mlx5_list_create("hrxq", eth_dev, true,
573e78e5408SMatan Azrad 		mlx5_hrxq_create_cb, mlx5_hrxq_match_cb,
574491b7137SMatan Azrad 		mlx5_hrxq_remove_cb, mlx5_hrxq_clone_cb,
575491b7137SMatan Azrad 		mlx5_hrxq_clone_free_cb);
57693f4ece9SOphir Munk 	/* Query availability of metadata reg_c's. */
5773c4338a4SJiawei Wang 	if (!priv->sh->metadata_regc_check_flag) {
57893f4ece9SOphir Munk 		err = mlx5_flow_discover_mreg_c(eth_dev);
57993f4ece9SOphir Munk 		if (err < 0) {
58093f4ece9SOphir Munk 			err = -err;
58193f4ece9SOphir Munk 			goto error;
58293f4ece9SOphir Munk 		}
5833c4338a4SJiawei Wang 	}
58493f4ece9SOphir Munk 	if (!mlx5_flow_ext_mreg_supported(eth_dev)) {
58593f4ece9SOphir Munk 		DRV_LOG(DEBUG,
58693f4ece9SOphir Munk 			"port %u extensive metadata register is not supported.",
58793f4ece9SOphir Munk 			eth_dev->data->port_id);
588a13ec19cSMichael Baum 		if (sh->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
58993f4ece9SOphir Munk 			DRV_LOG(ERR, "metadata mode %u is not supported "
59093f4ece9SOphir Munk 				     "(no metadata registers available).",
591a13ec19cSMichael Baum 				     sh->config.dv_xmeta_en);
59293f4ece9SOphir Munk 			err = ENOTSUP;
59393f4ece9SOphir Munk 			goto error;
59493f4ece9SOphir Munk 		}
59593f4ece9SOphir Munk 	}
5966dc0cbc6SMichael Baum 	if (sh->cdev->config.devx) {
59793f4ece9SOphir Munk 		priv->obj_ops = devx_obj_ops;
59893f4ece9SOphir Munk 	} else {
599dcbaafdcSMichael Baum 		DRV_LOG(ERR, "Windows flow must be DevX.");
60093f4ece9SOphir Munk 		err = ENOTSUP;
60193f4ece9SOphir Munk 		goto error;
60293f4ece9SOphir Munk 	}
60393f4ece9SOphir Munk 	mlx5_flow_counter_mode_config(eth_dev);
604e50fe91aSTal Shnaiderman 	mlx5_queue_counter_id_prepare(eth_dev);
60580a5af9fSDariusz Sosnowski 	rte_spinlock_init(&priv->hw_ctrl_lock);
60680a5af9fSDariusz Sosnowski 	LIST_INIT(&priv->hw_ctrl_flows);
60780a5af9fSDariusz Sosnowski 	LIST_INIT(&priv->hw_ext_ctrl_flows);
60893f4ece9SOphir Munk 	return eth_dev;
60993f4ece9SOphir Munk error:
61093f4ece9SOphir Munk 	if (priv) {
611a295c69aSShun Hao 		if (priv->mtr_profile_tbl)
612a295c69aSShun Hao 			mlx5_l3t_destroy(priv->mtr_profile_tbl);
61393f4ece9SOphir Munk 		if (own_domain_id)
61493f4ece9SOphir Munk 			claim_zero(rte_eth_switch_domain_free(priv->domain_id));
61593f4ece9SOphir Munk 		mlx5_free(priv);
61693f4ece9SOphir Munk 		if (eth_dev != NULL)
61793f4ece9SOphir Munk 			eth_dev->data->dev_private = NULL;
61893f4ece9SOphir Munk 	}
61993f4ece9SOphir Munk 	if (eth_dev != NULL) {
62093f4ece9SOphir Munk 		/* mac_addrs must not be freed alone because part of
62193f4ece9SOphir Munk 		 * dev_private
62293f4ece9SOphir Munk 		 **/
62393f4ece9SOphir Munk 		eth_dev->data->mac_addrs = NULL;
62493f4ece9SOphir Munk 		rte_eth_dev_release_port(eth_dev);
62593f4ece9SOphir Munk 	}
62693f4ece9SOphir Munk 	if (sh)
62793f4ece9SOphir Munk 		mlx5_free_shared_dev_ctx(sh);
62893f4ece9SOphir Munk 	MLX5_ASSERT(err > 0);
62993f4ece9SOphir Munk 	rte_errno = err;
630980826dcSTal Shnaiderman 	return NULL;
631980826dcSTal Shnaiderman }
632980826dcSTal Shnaiderman 
633980826dcSTal Shnaiderman /**
63459f10207SOphir Munk  * This function should share events between multiple ports of single IB
63559f10207SOphir Munk  * device.  Currently it has no support under Windows.
63659f10207SOphir Munk  *
63759f10207SOphir Munk  * @param sh
63859f10207SOphir Munk  *   Pointer to mlx5_dev_ctx_shared object.
63959f10207SOphir Munk  */
64059f10207SOphir Munk void
64159f10207SOphir Munk mlx5_os_dev_shared_handler_install(struct mlx5_dev_ctx_shared *sh)
64259f10207SOphir Munk {
64359f10207SOphir Munk 	(void)sh;
64459f10207SOphir Munk 	DRV_LOG(WARNING, "%s: is not supported", __func__);
64559f10207SOphir Munk }
64659f10207SOphir Munk 
64759f10207SOphir Munk /**
64859f10207SOphir Munk  * This function should share events between multiple ports of single IB
64959f10207SOphir Munk  * device.  Currently it has no support under Windows.
65059f10207SOphir Munk  *
65159f10207SOphir Munk  * @param dev
65259f10207SOphir Munk  *   Pointer to mlx5_dev_ctx_shared object.
65359f10207SOphir Munk  */
65459f10207SOphir Munk void
65559f10207SOphir Munk mlx5_os_dev_shared_handler_uninstall(struct mlx5_dev_ctx_shared *sh)
65659f10207SOphir Munk {
65759f10207SOphir Munk 	(void)sh;
65859f10207SOphir Munk 	DRV_LOG(WARNING, "%s: is not supported", __func__);
65959f10207SOphir Munk }
66059f10207SOphir Munk 
66159f10207SOphir Munk /**
66259f10207SOphir Munk  * Read statistics by a named counter.
66359f10207SOphir Munk  *
66459f10207SOphir Munk  * @param[in] priv
66559f10207SOphir Munk  *   Pointer to the private device data structure.
66659f10207SOphir Munk  * @param[in] ctr_name
66759f10207SOphir Munk  *   Pointer to the name of the statistic counter to read
66859f10207SOphir Munk  * @param[out] stat
66959f10207SOphir Munk  *   Pointer to read statistic value.
67059f10207SOphir Munk  * @return
671e50fe91aSTal Shnaiderman  *   0 on success and stat is valid, non-zero if failed to read the value
672e50fe91aSTal Shnaiderman  *   or counter is not supported.
67359f10207SOphir Munk  *   rte_errno is set.
67459f10207SOphir Munk  *
67559f10207SOphir Munk  */
67659f10207SOphir Munk int
67759f10207SOphir Munk mlx5_os_read_dev_stat(struct mlx5_priv *priv, const char *ctr_name,
67859f10207SOphir Munk 		      uint64_t *stat)
67959f10207SOphir Munk {
680e50fe91aSTal Shnaiderman 	if (priv->q_counters != NULL && strcmp(ctr_name, "out_of_buffer") == 0)
681e50fe91aSTal Shnaiderman 		return mlx5_devx_cmd_queue_counter_query
682e50fe91aSTal Shnaiderman 				(priv->q_counters, 0, (uint32_t *)stat);
683e50fe91aSTal Shnaiderman 	DRV_LOG(WARNING, "%s: is not supported for the %s counter",
684e50fe91aSTal Shnaiderman 		__func__, ctr_name);
68559f10207SOphir Munk 	return -ENOTSUP;
68659f10207SOphir Munk }
68759f10207SOphir Munk 
68859f10207SOphir Munk /**
68959f10207SOphir Munk  * Flush device MAC addresses
69059f10207SOphir Munk  * Currently it has no support under Windows.
69159f10207SOphir Munk  *
69259f10207SOphir Munk  * @param dev
69359f10207SOphir Munk  *   Pointer to Ethernet device structure.
69459f10207SOphir Munk  *
69559f10207SOphir Munk  */
69659f10207SOphir Munk void
69759f10207SOphir Munk mlx5_os_mac_addr_flush(struct rte_eth_dev *dev)
69859f10207SOphir Munk {
69959f10207SOphir Munk 	(void)dev;
70059f10207SOphir Munk 	DRV_LOG(WARNING, "%s: is not supported", __func__);
70159f10207SOphir Munk }
70259f10207SOphir Munk 
70359f10207SOphir Munk /**
70459f10207SOphir Munk  * Remove a MAC address from device
70559f10207SOphir Munk  * Currently it has no support under Windows.
70659f10207SOphir Munk  *
70759f10207SOphir Munk  * @param dev
70859f10207SOphir Munk  *   Pointer to Ethernet device structure.
70959f10207SOphir Munk  * @param index
71059f10207SOphir Munk  *   MAC address index.
71159f10207SOphir Munk  */
71259f10207SOphir Munk void
71359f10207SOphir Munk mlx5_os_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
71459f10207SOphir Munk {
71559f10207SOphir Munk 	(void)dev;
71659f10207SOphir Munk 	(void)(index);
71759f10207SOphir Munk 	DRV_LOG(WARNING, "%s: is not supported", __func__);
71859f10207SOphir Munk }
71959f10207SOphir Munk 
72059f10207SOphir Munk /**
721d36bb662STal Shnaiderman  * Adds a MAC address to the device
722d36bb662STal Shnaiderman  * Currently it has no support under Windows.
723d36bb662STal Shnaiderman  *
724d36bb662STal Shnaiderman  * @param dev
725d36bb662STal Shnaiderman  *   Pointer to Ethernet device structure.
726d36bb662STal Shnaiderman  * @param mac_addr
727d36bb662STal Shnaiderman  *   MAC address to register.
728d36bb662STal Shnaiderman  * @param index
729d36bb662STal Shnaiderman  *   MAC address index.
730d36bb662STal Shnaiderman  *
731d36bb662STal Shnaiderman  * @return
732d36bb662STal Shnaiderman  *   0 on success, a negative errno value otherwise
733d36bb662STal Shnaiderman  */
734d36bb662STal Shnaiderman int
735d36bb662STal Shnaiderman mlx5_os_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac,
736d36bb662STal Shnaiderman 		     uint32_t index)
737d36bb662STal Shnaiderman {
738d36bb662STal Shnaiderman 	(void)index;
739d36bb662STal Shnaiderman 	struct rte_ether_addr lmac;
740d36bb662STal Shnaiderman 
741d36bb662STal Shnaiderman 	if (mlx5_get_mac(dev, &lmac.addr_bytes)) {
742d36bb662STal Shnaiderman 		DRV_LOG(ERR,
743d36bb662STal Shnaiderman 			"port %u cannot get MAC address, is mlx5_en"
744d36bb662STal Shnaiderman 			" loaded? (errno: %s)",
745d36bb662STal Shnaiderman 			dev->data->port_id, strerror(rte_errno));
746d36bb662STal Shnaiderman 		return rte_errno;
747d36bb662STal Shnaiderman 	}
748d36bb662STal Shnaiderman 	if (!rte_is_same_ether_addr(&lmac, mac)) {
749d36bb662STal Shnaiderman 		DRV_LOG(ERR,
750d36bb662STal Shnaiderman 			"adding new mac address to device is unsupported");
751d36bb662STal Shnaiderman 		return -ENOTSUP;
752d36bb662STal Shnaiderman 	}
753d36bb662STal Shnaiderman 	return 0;
754d36bb662STal Shnaiderman }
755d36bb662STal Shnaiderman 
756d36bb662STal Shnaiderman /**
75759f10207SOphir Munk  * Modify a VF MAC address
75859f10207SOphir Munk  * Currently it has no support under Windows.
75959f10207SOphir Munk  *
76059f10207SOphir Munk  * @param priv
76159f10207SOphir Munk  *   Pointer to device private data.
76259f10207SOphir Munk  * @param mac_addr
76359f10207SOphir Munk  *   MAC address to modify into.
76459f10207SOphir Munk  * @param iface_idx
76559f10207SOphir Munk  *   Net device interface index
76659f10207SOphir Munk  * @param vf_index
76759f10207SOphir Munk  *   VF index
76859f10207SOphir Munk  *
76959f10207SOphir Munk  * @return
77059f10207SOphir Munk  *   0 on success, a negative errno value otherwise
77159f10207SOphir Munk  */
77259f10207SOphir Munk int
77359f10207SOphir Munk mlx5_os_vf_mac_addr_modify(struct mlx5_priv *priv,
77459f10207SOphir Munk 			   unsigned int iface_idx,
77559f10207SOphir Munk 			   struct rte_ether_addr *mac_addr,
77659f10207SOphir Munk 			   int vf_index)
77759f10207SOphir Munk {
77859f10207SOphir Munk 	(void)priv;
77959f10207SOphir Munk 	(void)iface_idx;
78059f10207SOphir Munk 	(void)mac_addr;
78159f10207SOphir Munk 	(void)vf_index;
78259f10207SOphir Munk 	DRV_LOG(WARNING, "%s: is not supported", __func__);
78359f10207SOphir Munk 	return -ENOTSUP;
78459f10207SOphir Munk }
78559f10207SOphir Munk 
78659f10207SOphir Munk /**
78759f10207SOphir Munk  * Set device promiscuous mode
78859f10207SOphir Munk  *
78959f10207SOphir Munk  * @param dev
79059f10207SOphir Munk  *   Pointer to Ethernet device structure.
79159f10207SOphir Munk  * @param enable
79259f10207SOphir Munk  *   0 - promiscuous is disabled, otherwise - enabled
79359f10207SOphir Munk  *
79459f10207SOphir Munk  * @return
79559f10207SOphir Munk  *   0 on success, a negative error value otherwise
79659f10207SOphir Munk  */
79759f10207SOphir Munk int
79859f10207SOphir Munk mlx5_os_set_promisc(struct rte_eth_dev *dev, int enable)
79959f10207SOphir Munk {
8003014718fSAdham Masarwah 	struct mlx5_priv *priv = dev->data->dev_private;
8013014718fSAdham Masarwah 
8023014718fSAdham Masarwah 	return mlx5_glue->devx_set_promisc_vport(priv->sh->cdev->ctx, ALL_PROMISC, enable);
80359f10207SOphir Munk }
80459f10207SOphir Munk 
80559f10207SOphir Munk /**
80659f10207SOphir Munk  * Set device allmulti mode
80759f10207SOphir Munk  *
80859f10207SOphir Munk  * @param dev
80959f10207SOphir Munk  *   Pointer to Ethernet device structure.
81059f10207SOphir Munk  * @param enable
81159f10207SOphir Munk  *   0 - all multicase is disabled, otherwise - enabled
81259f10207SOphir Munk  *
81359f10207SOphir Munk  * @return
81459f10207SOphir Munk  *   0 on success, a negative error value otherwise
81559f10207SOphir Munk  */
81659f10207SOphir Munk int
81759f10207SOphir Munk mlx5_os_set_allmulti(struct rte_eth_dev *dev, int enable)
81859f10207SOphir Munk {
8193014718fSAdham Masarwah 	struct mlx5_priv *priv = dev->data->dev_private;
8203014718fSAdham Masarwah 
8213014718fSAdham Masarwah 	return mlx5_glue->devx_set_promisc_vport(priv->sh->cdev->ctx, MC_PROMISC, enable);
82259f10207SOphir Munk }
82359f10207SOphir Munk 
82438e8684aSOphir Munk /**
825980826dcSTal Shnaiderman  * DPDK callback to register a PCI device.
826980826dcSTal Shnaiderman  *
827919488fbSXueming Li  * This function spawns Ethernet devices out of a given device.
828980826dcSTal Shnaiderman  *
829a729d2f0SMichael Baum  * @param[in] cdev
8307af08c8fSMichael Baum  *   Pointer to the common device.
831a729d2f0SMichael Baum  * @param[in, out] mkvlist
832a729d2f0SMichael Baum  *   Pointer to mlx5 kvargs control, can be NULL if there is no devargs.
833980826dcSTal Shnaiderman  *
834980826dcSTal Shnaiderman  * @return
835980826dcSTal Shnaiderman  *   0 on success, a negative errno value otherwise and rte_errno is set.
836980826dcSTal Shnaiderman  */
837980826dcSTal Shnaiderman int
838a729d2f0SMichael Baum mlx5_os_net_probe(struct mlx5_common_device *cdev,
839a729d2f0SMichael Baum 		  struct mlx5_kvargs_ctrl *mkvlist)
840980826dcSTal Shnaiderman {
8417af08c8fSMichael Baum 	struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(cdev->dev);
8425021ce20SMichael Baum 	struct mlx5_dev_spawn_data spawn = {
8435021ce20SMichael Baum 		.pf_bond = -1,
8445021ce20SMichael Baum 		.max_port = 1,
8455021ce20SMichael Baum 		.phys_port = 1,
846ca1418ceSMichael Baum 		.phys_dev_name = mlx5_os_get_ctx_device_name(cdev->ctx),
8475021ce20SMichael Baum 		.pci_dev = pci_dev,
8485021ce20SMichael Baum 		.cdev = cdev,
8495021ce20SMichael Baum 		.ifindex = -1, /* Spawn will assign */
8505021ce20SMichael Baum 		.info = (struct mlx5_switch_info){
8515021ce20SMichael Baum 			.name_type = MLX5_PHYS_PORT_NAME_TYPE_UPLINK,
8525021ce20SMichael Baum 		},
8535021ce20SMichael Baum 	};
8545021ce20SMichael Baum 	int ret;
855980826dcSTal Shnaiderman 	uint32_t restore;
856980826dcSTal Shnaiderman 
857980826dcSTal Shnaiderman 	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
858980826dcSTal Shnaiderman 		DRV_LOG(ERR, "Secondary process is not supported on Windows.");
859980826dcSTal Shnaiderman 		return -ENOTSUP;
860980826dcSTal Shnaiderman 	}
861980826dcSTal Shnaiderman 	ret = mlx5_init_once();
862980826dcSTal Shnaiderman 	if (ret) {
863980826dcSTal Shnaiderman 		DRV_LOG(ERR, "unable to init PMD global data: %s",
864980826dcSTal Shnaiderman 			strerror(rte_errno));
865980826dcSTal Shnaiderman 		return -rte_errno;
866980826dcSTal Shnaiderman 	}
867a729d2f0SMichael Baum 	spawn.eth_dev = mlx5_dev_spawn(cdev->dev, &spawn, mkvlist);
868ca1418ceSMichael Baum 	if (!spawn.eth_dev)
869887183efSMichael Baum 		return -rte_errno;
8705021ce20SMichael Baum 	restore = spawn.eth_dev->data->dev_flags;
8715021ce20SMichael Baum 	rte_eth_copy_pci_info(spawn.eth_dev, pci_dev);
872980826dcSTal Shnaiderman 	/* Restore non-PCI flags cleared by the above call. */
8735021ce20SMichael Baum 	spawn.eth_dev->data->dev_flags |= restore;
8745021ce20SMichael Baum 	rte_eth_dev_probing_finish(spawn.eth_dev);
875887183efSMichael Baum 	return 0;
876980826dcSTal Shnaiderman }
877980826dcSTal Shnaiderman 
878ea823b2cSDmitry Kozlyuk /**
879ea823b2cSDmitry Kozlyuk  * Cleanup resources when the last device is closed.
880ea823b2cSDmitry Kozlyuk  */
881ea823b2cSDmitry Kozlyuk void
882ea823b2cSDmitry Kozlyuk mlx5_os_net_cleanup(void)
883ea823b2cSDmitry Kozlyuk {
884ea823b2cSDmitry Kozlyuk }
885ea823b2cSDmitry Kozlyuk 
88659f10207SOphir Munk const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops = {0};
887