xref: /dpdk/drivers/net/intel/ice/ice_dcf_vf_representor.c (revision c1d145834f287aa8cf53de914618a7312f2c360e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Intel Corporation
3  */
4 
5 #include <errno.h>
6 #include <sys/types.h>
7 
8 #include <rte_ethdev.h>
9 
10 #include "ice_dcf_ethdev.h"
11 #include "ice_rxtx.h"
12 
13 static uint16_t
14 ice_dcf_vf_repr_rx_burst(__rte_unused void *rxq,
15 			 __rte_unused struct rte_mbuf **rx_pkts,
16 			 __rte_unused uint16_t nb_pkts)
17 {
18 	return 0;
19 }
20 
21 static uint16_t
22 ice_dcf_vf_repr_tx_burst(__rte_unused void *txq,
23 			 __rte_unused struct rte_mbuf **tx_pkts,
24 			 __rte_unused uint16_t nb_pkts)
25 {
26 	return 0;
27 }
28 
29 static int
30 ice_dcf_vf_repr_dev_configure(struct rte_eth_dev *dev)
31 {
32 	ice_dcf_vf_repr_init_vlan(dev);
33 
34 	return 0;
35 }
36 
37 static int
38 ice_dcf_vf_repr_dev_start(struct rte_eth_dev *dev)
39 {
40 	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
41 
42 	return 0;
43 }
44 
45 static int
46 ice_dcf_vf_repr_dev_stop(struct rte_eth_dev *dev)
47 {
48 	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
49 
50 	return 0;
51 }
52 
53 static void
54 ice_dcf_vf_repr_notify_one(struct rte_eth_dev *dev, bool valid)
55 {
56 	struct ice_dcf_vf_repr *repr = dev->data->dev_private;
57 
58 	repr->dcf_valid = valid;
59 }
60 
61 static int
62 ice_dcf_vf_repr_dev_close(struct rte_eth_dev *dev)
63 {
64 	struct ice_dcf_vf_repr *repr = dev->data->dev_private;
65 	struct ice_dcf_adapter *dcf_adapter;
66 	int err;
67 
68 	if (repr->dcf_valid) {
69 		dcf_adapter = repr->dcf_eth_dev->data->dev_private;
70 		err = ice_dcf_handle_vf_repr_close(dcf_adapter, repr->vf_id);
71 		if (err)
72 			PMD_DRV_LOG(ERR, "VF representor invalid");
73 	}
74 
75 	return ice_dcf_vf_repr_uninit(dev);
76 }
77 
78 static int
79 ice_dcf_vf_repr_rx_queue_setup(__rte_unused struct rte_eth_dev *dev,
80 			       __rte_unused uint16_t queue_id,
81 			       __rte_unused uint16_t nb_desc,
82 			       __rte_unused unsigned int socket_id,
83 			       __rte_unused const struct rte_eth_rxconf *conf,
84 			       __rte_unused struct rte_mempool *pool)
85 {
86 	return 0;
87 }
88 
89 static int
90 ice_dcf_vf_repr_tx_queue_setup(__rte_unused struct rte_eth_dev *dev,
91 			       __rte_unused uint16_t queue_id,
92 			       __rte_unused uint16_t nb_desc,
93 			       __rte_unused unsigned int socket_id,
94 			       __rte_unused const struct rte_eth_txconf *conf)
95 {
96 	return 0;
97 }
98 
99 static int
100 ice_dcf_vf_repr_promiscuous_enable(__rte_unused struct rte_eth_dev *ethdev)
101 {
102 	return 0;
103 }
104 
105 static int
106 ice_dcf_vf_repr_promiscuous_disable(__rte_unused struct rte_eth_dev *ethdev)
107 {
108 	return 0;
109 }
110 
111 static int
112 ice_dcf_vf_repr_allmulticast_enable(__rte_unused struct rte_eth_dev *dev)
113 {
114 	return 0;
115 }
116 
117 static int
118 ice_dcf_vf_repr_allmulticast_disable(__rte_unused struct rte_eth_dev *dev)
119 {
120 	return 0;
121 }
122 
123 static int
124 ice_dcf_vf_repr_link_update(__rte_unused struct rte_eth_dev *ethdev,
125 			    __rte_unused int wait_to_complete)
126 {
127 	return 0;
128 }
129 
130 static __rte_always_inline struct ice_dcf_hw *
131 ice_dcf_vf_repr_hw(struct ice_dcf_vf_repr *repr)
132 {
133 	struct ice_dcf_adapter *dcf_adapter;
134 
135 	if (!repr->dcf_valid) {
136 		PMD_DRV_LOG(ERR, "DCF for VF representor has been released");
137 		return NULL;
138 	}
139 
140 	dcf_adapter = repr->dcf_eth_dev->data->dev_private;
141 
142 	return &dcf_adapter->real_hw;
143 }
144 
145 static int
146 ice_dcf_vf_repr_dev_info_get(struct rte_eth_dev *dev,
147 			     struct rte_eth_dev_info *dev_info)
148 {
149 	struct ice_dcf_vf_repr *repr = dev->data->dev_private;
150 	struct ice_dcf_hw *dcf_hw = ice_dcf_vf_repr_hw(repr);
151 
152 	if (!dcf_hw)
153 		return -EIO;
154 
155 	dev_info->device = dev->device;
156 	dev_info->max_mac_addrs = 1;
157 	dev_info->max_rx_queues = dcf_hw->vsi_res->num_queue_pairs;
158 	dev_info->max_tx_queues = dcf_hw->vsi_res->num_queue_pairs;
159 	dev_info->min_rx_bufsize = ICE_BUF_SIZE_MIN;
160 	dev_info->max_rx_pktlen = ICE_FRAME_SIZE_MAX;
161 	dev_info->hash_key_size = dcf_hw->vf_res->rss_key_size;
162 	dev_info->reta_size = dcf_hw->vf_res->rss_lut_size;
163 	dev_info->flow_type_rss_offloads = ICE_RSS_OFFLOAD_ALL;
164 
165 	dev_info->rx_offload_capa =
166 		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
167 		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
168 		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
169 		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
170 		RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
171 		RTE_ETH_RX_OFFLOAD_SCATTER |
172 		RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
173 		RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
174 		RTE_ETH_RX_OFFLOAD_RSS_HASH;
175 	dev_info->tx_offload_capa =
176 		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
177 		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
178 		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
179 		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
180 		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
181 		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
182 		RTE_ETH_TX_OFFLOAD_TCP_TSO |
183 		RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
184 		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
185 		RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
186 		RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
187 		RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
188 
189 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
190 		.rx_thresh = {
191 			.pthresh = ICE_DEFAULT_RX_PTHRESH,
192 			.hthresh = ICE_DEFAULT_RX_HTHRESH,
193 			.wthresh = ICE_DEFAULT_RX_WTHRESH,
194 		},
195 		.rx_free_thresh = ICE_DEFAULT_RX_FREE_THRESH,
196 		.rx_drop_en = 0,
197 		.offloads = 0,
198 	};
199 
200 	dev_info->default_txconf = (struct rte_eth_txconf) {
201 		.tx_thresh = {
202 			.pthresh = ICE_DEFAULT_TX_PTHRESH,
203 			.hthresh = ICE_DEFAULT_TX_HTHRESH,
204 			.wthresh = ICE_DEFAULT_TX_WTHRESH,
205 		},
206 		.tx_free_thresh = ICE_DEFAULT_TX_FREE_THRESH,
207 		.tx_rs_thresh = ICE_DEFAULT_TX_RSBIT_THRESH,
208 		.offloads = 0,
209 	};
210 
211 	dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
212 		.nb_max = ICE_MAX_RING_DESC,
213 		.nb_min = ICE_MIN_RING_DESC,
214 		.nb_align = ICE_ALIGN_RING_DESC,
215 	};
216 
217 	dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
218 		.nb_max = ICE_MAX_RING_DESC,
219 		.nb_min = ICE_MIN_RING_DESC,
220 		.nb_align = ICE_ALIGN_RING_DESC,
221 	};
222 
223 	dev_info->switch_info.name = dcf_hw->eth_dev->device->name;
224 	dev_info->switch_info.domain_id = repr->switch_domain_id;
225 	dev_info->switch_info.port_id = repr->vf_id;
226 
227 	return 0;
228 }
229 
230 static __rte_always_inline bool
231 ice_dcf_vlan_offload_ena(struct ice_dcf_vf_repr *repr)
232 {
233 	return !!(ice_dcf_vf_repr_hw(repr)->vf_res->vf_cap_flags &
234 		  VIRTCHNL_VF_OFFLOAD_VLAN_V2);
235 }
236 
237 static int
238 ice_dcf_vlan_offload_config(struct ice_dcf_vf_repr *repr,
239 			    struct virtchnl_dcf_vlan_offload *vlan_offload)
240 {
241 	struct dcf_virtchnl_cmd args;
242 	int err;
243 
244 	memset(&args, 0, sizeof(args));
245 	args.v_op = VIRTCHNL_OP_DCF_VLAN_OFFLOAD;
246 	args.req_msg = (uint8_t *)vlan_offload;
247 	args.req_msglen = sizeof(*vlan_offload);
248 
249 	err = ice_dcf_execute_virtchnl_cmd(ice_dcf_vf_repr_hw(repr), &args);
250 	if (err)
251 		PMD_DRV_LOG(ERR,
252 			    "Failed to execute command of VIRTCHNL_OP_DCF_VLAN_OFFLOAD");
253 
254 	return err;
255 }
256 
257 static int
258 ice_dcf_vf_repr_vlan_offload_set(struct rte_eth_dev *dev, int mask)
259 {
260 	struct ice_dcf_vf_repr *repr = dev->data->dev_private;
261 	struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
262 	struct virtchnl_dcf_vlan_offload vlan_offload;
263 	int err;
264 
265 	if (!ice_dcf_vlan_offload_ena(repr))
266 		return -ENOTSUP;
267 
268 	/* Vlan stripping setting */
269 	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
270 		bool enable = !!(dev_conf->rxmode.offloads &
271 				 RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
272 
273 		if (enable && repr->outer_vlan_info.port_vlan_ena) {
274 			PMD_DRV_LOG(ERR,
275 				    "Disable the port VLAN firstly");
276 			return -EINVAL;
277 		}
278 
279 		memset(&vlan_offload, 0, sizeof(vlan_offload));
280 
281 		if (enable)
282 			vlan_offload.vlan_flags =
283 					VIRTCHNL_DCF_VLAN_STRIP_INTO_RX_DESC <<
284 					VIRTCHNL_DCF_VLAN_STRIP_MODE_S;
285 		else if (repr->outer_vlan_info.stripping_ena && !enable)
286 			vlan_offload.vlan_flags =
287 					VIRTCHNL_DCF_VLAN_STRIP_DISABLE <<
288 					VIRTCHNL_DCF_VLAN_STRIP_MODE_S;
289 
290 		if (vlan_offload.vlan_flags) {
291 			vlan_offload.vf_id = repr->vf_id;
292 			vlan_offload.tpid = repr->outer_vlan_info.tpid;
293 			vlan_offload.vlan_flags |=
294 					VIRTCHNL_DCF_VLAN_TYPE_OUTER <<
295 					VIRTCHNL_DCF_VLAN_TYPE_S;
296 
297 			err = ice_dcf_vlan_offload_config(repr, &vlan_offload);
298 			if (err)
299 				return -EIO;
300 
301 			repr->outer_vlan_info.stripping_ena = enable;
302 		}
303 	}
304 
305 	return 0;
306 }
307 
308 static int
309 ice_dcf_vf_repr_vlan_pvid_set(struct rte_eth_dev *dev,
310 			      uint16_t pvid, int on)
311 {
312 	struct ice_dcf_vf_repr *repr = dev->data->dev_private;
313 	struct virtchnl_dcf_vlan_offload vlan_offload;
314 	int err;
315 
316 	if (!ice_dcf_vlan_offload_ena(repr))
317 		return -ENOTSUP;
318 
319 	if (repr->outer_vlan_info.stripping_ena) {
320 		PMD_DRV_LOG(ERR,
321 			    "Disable the VLAN stripping firstly");
322 		return -EINVAL;
323 	}
324 
325 	if (pvid > RTE_ETHER_MAX_VLAN_ID)
326 		return -EINVAL;
327 
328 	memset(&vlan_offload, 0, sizeof(vlan_offload));
329 
330 	if (on)
331 		vlan_offload.vlan_flags =
332 				(VIRTCHNL_DCF_VLAN_INSERT_PORT_BASED <<
333 				 VIRTCHNL_DCF_VLAN_INSERT_MODE_S);
334 	else
335 		vlan_offload.vlan_flags =
336 				(VIRTCHNL_DCF_VLAN_INSERT_DISABLE <<
337 				 VIRTCHNL_DCF_VLAN_INSERT_MODE_S);
338 
339 	vlan_offload.vf_id = repr->vf_id;
340 	vlan_offload.tpid = repr->outer_vlan_info.tpid;
341 	vlan_offload.vlan_flags |= (VIRTCHNL_DCF_VLAN_TYPE_OUTER <<
342 				    VIRTCHNL_DCF_VLAN_TYPE_S);
343 	vlan_offload.vlan_id = pvid;
344 
345 	err = ice_dcf_vlan_offload_config(repr, &vlan_offload);
346 	if (!err) {
347 		if (on) {
348 			repr->outer_vlan_info.port_vlan_ena = true;
349 			repr->outer_vlan_info.vid = pvid;
350 		} else {
351 			repr->outer_vlan_info.port_vlan_ena = false;
352 		}
353 	}
354 
355 	return err;
356 }
357 
358 static int
359 ice_dcf_vf_repr_vlan_tpid_set(struct rte_eth_dev *dev,
360 			      enum rte_vlan_type vlan_type, uint16_t tpid)
361 {
362 	struct ice_dcf_vf_repr *repr = dev->data->dev_private;
363 	int err = 0;
364 
365 	if (!ice_dcf_vlan_offload_ena(repr))
366 		return -ENOTSUP;
367 
368 	if (vlan_type != RTE_ETH_VLAN_TYPE_OUTER) {
369 		PMD_DRV_LOG(ERR,
370 			    "Can accelerate only outer VLAN in QinQ");
371 		return -EINVAL;
372 	}
373 
374 	if (tpid != RTE_ETHER_TYPE_QINQ &&
375 	    tpid != RTE_ETHER_TYPE_VLAN &&
376 	    tpid != RTE_ETHER_TYPE_QINQ1) {
377 		PMD_DRV_LOG(ERR,
378 			    "Invalid TPID: 0x%04x", tpid);
379 		return -EINVAL;
380 	}
381 
382 	repr->outer_vlan_info.tpid = tpid;
383 
384 	if (repr->outer_vlan_info.port_vlan_ena) {
385 		err = ice_dcf_vf_repr_vlan_pvid_set(dev,
386 						    repr->outer_vlan_info.vid,
387 						    true);
388 		if (err) {
389 			PMD_DRV_LOG(ERR,
390 				    "Failed to reset port VLAN : %d",
391 				    err);
392 			return err;
393 		}
394 	}
395 
396 	if (repr->outer_vlan_info.stripping_ena) {
397 		err = ice_dcf_vf_repr_vlan_offload_set(dev,
398 						       RTE_ETH_VLAN_STRIP_MASK);
399 		if (err) {
400 			PMD_DRV_LOG(ERR,
401 				    "Failed to reset VLAN stripping : %d",
402 				    err);
403 			return err;
404 		}
405 	}
406 
407 	return 0;
408 }
409 
410 static const struct eth_dev_ops ice_dcf_vf_repr_dev_ops = {
411 	.dev_configure        = ice_dcf_vf_repr_dev_configure,
412 	.dev_start            = ice_dcf_vf_repr_dev_start,
413 	.dev_stop             = ice_dcf_vf_repr_dev_stop,
414 	.dev_close            = ice_dcf_vf_repr_dev_close,
415 	.dev_infos_get        = ice_dcf_vf_repr_dev_info_get,
416 	.rx_queue_setup       = ice_dcf_vf_repr_rx_queue_setup,
417 	.tx_queue_setup       = ice_dcf_vf_repr_tx_queue_setup,
418 	.promiscuous_enable   = ice_dcf_vf_repr_promiscuous_enable,
419 	.promiscuous_disable  = ice_dcf_vf_repr_promiscuous_disable,
420 	.allmulticast_enable  = ice_dcf_vf_repr_allmulticast_enable,
421 	.allmulticast_disable = ice_dcf_vf_repr_allmulticast_disable,
422 	.link_update          = ice_dcf_vf_repr_link_update,
423 	.vlan_offload_set     = ice_dcf_vf_repr_vlan_offload_set,
424 	.vlan_pvid_set        = ice_dcf_vf_repr_vlan_pvid_set,
425 	.vlan_tpid_set        = ice_dcf_vf_repr_vlan_tpid_set,
426 };
427 
428 int
429 ice_dcf_vf_repr_init(struct rte_eth_dev *vf_rep_eth_dev, void *init_param)
430 {
431 	struct ice_dcf_vf_repr *repr = vf_rep_eth_dev->data->dev_private;
432 	struct ice_dcf_vf_repr_param *param = init_param;
433 
434 	repr->dcf_eth_dev = param->dcf_eth_dev;
435 	repr->switch_domain_id = param->switch_domain_id;
436 	repr->vf_id = param->vf_id;
437 	repr->dcf_valid = true;
438 	repr->outer_vlan_info.port_vlan_ena = false;
439 	repr->outer_vlan_info.stripping_ena = false;
440 	repr->outer_vlan_info.tpid = RTE_ETHER_TYPE_VLAN;
441 
442 	vf_rep_eth_dev->dev_ops = &ice_dcf_vf_repr_dev_ops;
443 
444 	vf_rep_eth_dev->rx_pkt_burst = ice_dcf_vf_repr_rx_burst;
445 	vf_rep_eth_dev->tx_pkt_burst = ice_dcf_vf_repr_tx_burst;
446 
447 	vf_rep_eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
448 	vf_rep_eth_dev->data->representor_id = repr->vf_id;
449 	vf_rep_eth_dev->data->backer_port_id = repr->dcf_eth_dev->data->port_id;
450 
451 	vf_rep_eth_dev->data->mac_addrs = &repr->mac_addr;
452 
453 	rte_eth_random_addr(repr->mac_addr.addr_bytes);
454 
455 	return 0;
456 }
457 
458 int
459 ice_dcf_vf_repr_uninit(struct rte_eth_dev *vf_rep_eth_dev)
460 {
461 	vf_rep_eth_dev->data->mac_addrs = NULL;
462 
463 	return 0;
464 }
465 
466 int
467 ice_dcf_vf_repr_init_vlan(struct rte_eth_dev *vf_rep_eth_dev)
468 {
469 	struct ice_dcf_vf_repr *repr = vf_rep_eth_dev->data->dev_private;
470 	int err;
471 
472 	err = ice_dcf_vf_repr_vlan_offload_set(vf_rep_eth_dev,
473 					       RTE_ETH_VLAN_STRIP_MASK);
474 	if (err) {
475 		PMD_DRV_LOG(ERR, "Failed to set VLAN offload");
476 		return err;
477 	}
478 
479 	if (repr->outer_vlan_info.port_vlan_ena) {
480 		err = ice_dcf_vf_repr_vlan_pvid_set(vf_rep_eth_dev,
481 						    repr->outer_vlan_info.vid,
482 						    true);
483 		if (err) {
484 			PMD_DRV_LOG(ERR, "Failed to enable port VLAN");
485 			return err;
486 		}
487 	}
488 
489 	return 0;
490 }
491 
492 void
493 ice_dcf_vf_repr_stop_all(struct ice_dcf_adapter *dcf_adapter)
494 {
495 	uint16_t vf_id;
496 	int ret;
497 
498 	if (!dcf_adapter->repr_infos)
499 		return;
500 
501 	for (vf_id = 0; vf_id < dcf_adapter->real_hw.num_vfs; vf_id++) {
502 		struct rte_eth_dev *vf_rep_eth_dev =
503 				dcf_adapter->repr_infos[vf_id].vf_rep_eth_dev;
504 		if (!vf_rep_eth_dev || vf_rep_eth_dev->data->dev_started == 0)
505 			continue;
506 
507 		ret = ice_dcf_vf_repr_dev_stop(vf_rep_eth_dev);
508 		if (!ret)
509 			vf_rep_eth_dev->data->dev_started = 0;
510 	}
511 }
512 
513 void
514 ice_dcf_vf_repr_notify_all(struct ice_dcf_adapter *dcf_adapter, bool valid)
515 {
516 	uint16_t vf_id;
517 	struct rte_eth_dev *vf_rep_eth_dev;
518 
519 	if (!dcf_adapter->repr_infos)
520 		return;
521 
522 	for (vf_id = 0; vf_id < dcf_adapter->real_hw.num_vfs; vf_id++) {
523 		vf_rep_eth_dev = dcf_adapter->repr_infos[vf_id].vf_rep_eth_dev;
524 
525 		if (!vf_rep_eth_dev)
526 			continue;
527 
528 		ice_dcf_vf_repr_notify_one(vf_rep_eth_dev, valid);
529 	}
530 }
531