xref: /dpdk/drivers/net/intel/ice/ice_dcf_parent.c (revision c1d145834f287aa8cf53de914618a7312f2c360e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Intel Corporation
3  */
4 #include <stdlib.h>
5 #include <sys/types.h>
6 #include <sys/stat.h>
7 #include <unistd.h>
8 
9 #include <rte_spinlock.h>
10 
11 #include "ice_dcf_ethdev.h"
12 #include "ice_generic_flow.h"
13 
14 #define ICE_DCF_VSI_UPDATE_SERVICE_INTERVAL	100000 /* us */
15 static rte_spinlock_t vsi_update_lock = RTE_SPINLOCK_INITIALIZER;
16 
17 struct ice_dcf_reset_event_param {
18 	struct ice_dcf_hw *dcf_hw;
19 
20 	bool vfr; /* VF reset event */
21 	uint16_t vf_id; /* The reset VF ID */
22 };
23 
24 static __rte_always_inline void
25 ice_dcf_update_vsi_ctx(struct ice_hw *hw, uint16_t vsi_handle,
26 		       uint16_t vsi_map)
27 {
28 	struct ice_vsi_ctx *vsi_ctx;
29 	bool first_update = false;
30 	uint16_t new_vsi_num;
31 
32 	if (unlikely(vsi_handle >= ICE_MAX_VSI)) {
33 		PMD_DRV_LOG(ERR, "Invalid vsi handle %u", vsi_handle);
34 		return;
35 	}
36 
37 	vsi_ctx = hw->vsi_ctx[vsi_handle];
38 
39 	if (vsi_map & VIRTCHNL_DCF_VF_VSI_VALID) {
40 		if (!vsi_ctx) {
41 			vsi_ctx = ice_malloc(hw, sizeof(*vsi_ctx));
42 			if (!vsi_ctx) {
43 				PMD_DRV_LOG(ERR, "No memory for vsi context %u",
44 					    vsi_handle);
45 				return;
46 			}
47 			hw->vsi_ctx[vsi_handle] = vsi_ctx;
48 			first_update = true;
49 		}
50 
51 		new_vsi_num = (vsi_map & VIRTCHNL_DCF_VF_VSI_ID_M) >>
52 			VIRTCHNL_DCF_VF_VSI_ID_S;
53 
54 		/* Redirect rules if vsi mapping table changes. */
55 		if (!first_update) {
56 			struct ice_flow_redirect rd;
57 
58 			memset(&rd, 0, sizeof(struct ice_flow_redirect));
59 			rd.type = ICE_FLOW_REDIRECT_VSI;
60 			rd.vsi_handle = vsi_handle;
61 			rd.new_vsi_num = new_vsi_num;
62 			ice_flow_redirect((struct ice_adapter *)hw->back, &rd);
63 		} else {
64 			vsi_ctx->vsi_num = new_vsi_num;
65 		}
66 
67 		PMD_DRV_LOG(DEBUG, "VF%u is assigned with vsi number %u",
68 			    vsi_handle, vsi_ctx->vsi_num);
69 	} else {
70 		hw->vsi_ctx[vsi_handle] = NULL;
71 
72 		ice_free(hw, vsi_ctx);
73 
74 		PMD_DRV_LOG(NOTICE, "VF%u is disabled", vsi_handle);
75 	}
76 }
77 
78 static void
79 ice_dcf_update_vf_vsi_map(struct ice_hw *hw, uint16_t num_vfs,
80 			  uint16_t *vf_vsi_map)
81 {
82 	uint16_t vf_id;
83 
84 	for (vf_id = 0; vf_id < num_vfs; vf_id++)
85 		ice_dcf_update_vsi_ctx(hw, vf_id, vf_vsi_map[vf_id]);
86 }
87 
88 static void
89 ice_dcf_update_pf_vsi_map(struct ice_hw *hw, uint16_t pf_vsi_idx,
90 			uint16_t pf_vsi_num)
91 {
92 	struct ice_vsi_ctx *vsi_ctx;
93 
94 	if (unlikely(pf_vsi_idx >= ICE_MAX_VSI)) {
95 		PMD_DRV_LOG(ERR, "Invalid vsi handle %u", pf_vsi_idx);
96 		return;
97 	}
98 
99 	vsi_ctx = hw->vsi_ctx[pf_vsi_idx];
100 
101 	if (!vsi_ctx)
102 		vsi_ctx = ice_malloc(hw, sizeof(*vsi_ctx));
103 
104 	if (!vsi_ctx) {
105 		PMD_DRV_LOG(ERR, "No memory for vsi context %u",
106 				pf_vsi_idx);
107 		return;
108 	}
109 
110 	vsi_ctx->vsi_num = pf_vsi_num;
111 	hw->vsi_ctx[pf_vsi_idx] = vsi_ctx;
112 
113 	PMD_DRV_LOG(DEBUG, "VF%u is assigned with vsi number %u",
114 			pf_vsi_idx, vsi_ctx->vsi_num);
115 }
116 
117 static uint32_t
118 ice_dcf_vsi_update_service_handler(void *param)
119 {
120 	struct ice_dcf_reset_event_param *reset_param = param;
121 	struct ice_dcf_hw *hw = reset_param->dcf_hw;
122 	struct ice_dcf_adapter *adapter =
123 		container_of(hw, struct ice_dcf_adapter, real_hw);
124 	struct ice_adapter *parent_adapter = &adapter->parent;
125 
126 	rte_atomic_fetch_add_explicit(&hw->vsi_update_thread_num, 1,
127 		rte_memory_order_relaxed);
128 
129 	rte_thread_detach(rte_thread_self());
130 
131 	rte_delay_us(ICE_DCF_VSI_UPDATE_SERVICE_INTERVAL);
132 
133 	rte_spinlock_lock(&vsi_update_lock);
134 
135 	if (!ice_dcf_handle_vsi_update_event(hw)) {
136 		rte_atomic_store_explicit(&parent_adapter->dcf_state_on, true,
137 				 rte_memory_order_relaxed);
138 		ice_dcf_update_vf_vsi_map(&adapter->parent.hw,
139 					  hw->num_vfs, hw->vf_vsi_map);
140 	}
141 
142 	if (reset_param->vfr && adapter->repr_infos) {
143 		struct rte_eth_dev *vf_rep_eth_dev =
144 			adapter->repr_infos[reset_param->vf_id].vf_rep_eth_dev;
145 		if (vf_rep_eth_dev && vf_rep_eth_dev->data->dev_started) {
146 			PMD_DRV_LOG(DEBUG, "VF%u representor is resetting",
147 				    reset_param->vf_id);
148 			ice_dcf_vf_repr_init_vlan(vf_rep_eth_dev);
149 		}
150 	}
151 
152 	if (hw->tm_conf.committed)
153 		ice_dcf_replay_vf_bw(hw, reset_param->vf_id);
154 
155 	rte_spinlock_unlock(&vsi_update_lock);
156 
157 	free(param);
158 
159 	rte_atomic_fetch_sub_explicit(&hw->vsi_update_thread_num, 1,
160 		rte_memory_order_release);
161 
162 	return 0;
163 }
164 
165 static void
166 start_vsi_reset_thread(struct ice_dcf_hw *dcf_hw, bool vfr, uint16_t vf_id)
167 {
168 	struct ice_dcf_reset_event_param *param;
169 	char name[RTE_THREAD_INTERNAL_NAME_SIZE];
170 	rte_thread_t thread;
171 	int ret;
172 
173 	param = malloc(sizeof(*param));
174 	if (!param) {
175 		PMD_DRV_LOG(ERR, "Failed to allocate the memory for reset handling");
176 		return;
177 	}
178 
179 	param->dcf_hw = dcf_hw;
180 	param->vfr = vfr;
181 	param->vf_id = vf_id;
182 
183 	snprintf(name, sizeof(name), "ice-rst%u", vf_id);
184 	ret = rte_thread_create_internal_control(&thread, name,
185 				     ice_dcf_vsi_update_service_handler, param);
186 	if (ret != 0) {
187 		PMD_DRV_LOG(ERR, "Failed to start the thread for reset handling");
188 		free(param);
189 	}
190 }
191 
192 static uint32_t
193 ice_dcf_convert_link_speed(enum virtchnl_link_speed virt_link_speed)
194 {
195 	uint32_t speed;
196 
197 	switch (virt_link_speed) {
198 	case VIRTCHNL_LINK_SPEED_100MB:
199 		speed = 100;
200 		break;
201 	case VIRTCHNL_LINK_SPEED_1GB:
202 		speed = 1000;
203 		break;
204 	case VIRTCHNL_LINK_SPEED_10GB:
205 		speed = 10000;
206 		break;
207 	case VIRTCHNL_LINK_SPEED_40GB:
208 		speed = 40000;
209 		break;
210 	case VIRTCHNL_LINK_SPEED_20GB:
211 		speed = 20000;
212 		break;
213 	case VIRTCHNL_LINK_SPEED_25GB:
214 		speed = 25000;
215 		break;
216 	case VIRTCHNL_LINK_SPEED_2_5GB:
217 		speed = 2500;
218 		break;
219 	case VIRTCHNL_LINK_SPEED_5GB:
220 		speed = 5000;
221 		break;
222 	default:
223 		speed = 0;
224 		break;
225 	}
226 
227 	return speed;
228 }
229 
230 void
231 ice_dcf_handle_pf_event_msg(struct ice_dcf_hw *dcf_hw,
232 			    uint8_t *msg, uint16_t msglen)
233 {
234 	struct virtchnl_pf_event *pf_msg = (struct virtchnl_pf_event *)msg;
235 	struct ice_dcf_adapter *adapter =
236 		container_of(dcf_hw, struct ice_dcf_adapter, real_hw);
237 	struct ice_adapter *parent_adapter = &adapter->parent;
238 
239 	if (msglen < sizeof(struct virtchnl_pf_event)) {
240 		PMD_DRV_LOG(DEBUG, "Invalid event message length : %u", msglen);
241 		return;
242 	}
243 
244 	switch (pf_msg->event) {
245 	case VIRTCHNL_EVENT_RESET_IMPENDING:
246 		PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_RESET_IMPENDING event");
247 		dcf_hw->resetting = true;
248 		break;
249 	case VIRTCHNL_EVENT_LINK_CHANGE:
250 		PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_LINK_CHANGE event");
251 		dcf_hw->link_up = pf_msg->event_data.link_event.link_status;
252 		if (dcf_hw->vf_res->vf_cap_flags &
253 			VIRTCHNL_VF_CAP_ADV_LINK_SPEED) {
254 			dcf_hw->link_speed =
255 				pf_msg->event_data.link_event_adv.link_speed;
256 		} else {
257 			enum virtchnl_link_speed speed;
258 			speed = pf_msg->event_data.link_event.link_speed;
259 			dcf_hw->link_speed = ice_dcf_convert_link_speed(speed);
260 		}
261 		ice_dcf_link_update(dcf_hw->eth_dev, 0);
262 		rte_eth_dev_callback_process(dcf_hw->eth_dev,
263 			RTE_ETH_EVENT_INTR_LSC, NULL);
264 		break;
265 	case VIRTCHNL_EVENT_PF_DRIVER_CLOSE:
266 		PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_PF_DRIVER_CLOSE event");
267 		break;
268 	case VIRTCHNL_EVENT_DCF_VSI_MAP_UPDATE:
269 		PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_DCF_VSI_MAP_UPDATE event : VF%u with VSI num %u",
270 			    pf_msg->event_data.vf_vsi_map.vf_id,
271 			    pf_msg->event_data.vf_vsi_map.vsi_id);
272 		rte_atomic_store_explicit(&parent_adapter->dcf_state_on, false,
273 				 rte_memory_order_relaxed);
274 		start_vsi_reset_thread(dcf_hw, true,
275 				       pf_msg->event_data.vf_vsi_map.vf_id);
276 		break;
277 	default:
278 		PMD_DRV_LOG(ERR, "Unknown event received %u", pf_msg->event);
279 		break;
280 	}
281 }
282 
283 static int
284 ice_dcf_query_port_ets(struct ice_hw *parent_hw, struct ice_dcf_hw *real_hw)
285 {
286 	int ret;
287 
288 	real_hw->ets_config = (struct ice_aqc_port_ets_elem *)
289 			ice_malloc(real_hw, sizeof(*real_hw->ets_config));
290 	if (!real_hw->ets_config)
291 		return ICE_ERR_NO_MEMORY;
292 
293 	ret = ice_aq_query_port_ets(parent_hw->port_info,
294 			real_hw->ets_config, sizeof(*real_hw->ets_config),
295 			NULL);
296 	if (ret) {
297 		PMD_DRV_LOG(ERR, "DCF Query Port ETS failed");
298 		rte_free(real_hw->ets_config);
299 		real_hw->ets_config = NULL;
300 		return ret;
301 	}
302 
303 	return ICE_SUCCESS;
304 }
305 
306 static int
307 ice_dcf_init_parent_hw(struct ice_hw *hw)
308 {
309 	struct ice_aqc_get_phy_caps_data *pcaps;
310 	int status;
311 
312 	status = ice_aq_get_fw_ver(hw, NULL);
313 	if (status)
314 		return status;
315 
316 	status = ice_get_caps(hw);
317 	if (status)
318 		return status;
319 
320 	hw->port_info = (struct ice_port_info *)
321 			ice_malloc(hw, sizeof(*hw->port_info));
322 	if (!hw->port_info)
323 		return ICE_ERR_NO_MEMORY;
324 
325 	/* set the back pointer to HW */
326 	hw->port_info->hw = hw;
327 
328 	/* Initialize port_info struct with switch configuration data */
329 	status = ice_get_initial_sw_cfg(hw);
330 	if (status)
331 		goto err_unroll_alloc;
332 
333 	pcaps = (struct ice_aqc_get_phy_caps_data *)
334 		ice_malloc(hw, sizeof(*pcaps));
335 	if (!pcaps) {
336 		status = ICE_ERR_NO_MEMORY;
337 		goto err_unroll_alloc;
338 	}
339 
340 	/* Initialize port_info struct with PHY capabilities */
341 	status = ice_aq_get_phy_caps(hw->port_info, false,
342 				     ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, NULL);
343 	ice_free(hw, pcaps);
344 	if (status)
345 		goto err_unroll_alloc;
346 
347 	/* Initialize port_info struct with link information */
348 	status = ice_aq_get_link_info(hw->port_info, true, NULL, NULL);
349 	if (status) {
350 		enum ice_mac_type type = hw->mac_type;
351 
352 		/* DCF uses ICE_MAC_GENERIC which can be talking to either
353 		 * E810 or E830. Retry with E830 mac type to ensure correct
354 		 * data length is used for IAVF communication with PF.
355 		 */
356 		hw->mac_type = ICE_MAC_E830;
357 		status = ice_aq_get_link_info(hw->port_info, true, NULL, NULL);
358 		hw->mac_type = type;
359 		if (status)
360 			goto err_unroll_alloc;
361 	}
362 
363 	status = ice_init_fltr_mgmt_struct(hw);
364 	if (status)
365 		goto err_unroll_alloc;
366 
367 	status = ice_init_hw_tbls(hw);
368 	if (status)
369 		goto err_unroll_fltr_mgmt_struct;
370 
371 	PMD_INIT_LOG(INFO,
372 		     "firmware %d.%d.%d api %d.%d.%d build 0x%08x",
373 		     hw->fw_maj_ver, hw->fw_min_ver, hw->fw_patch,
374 		     hw->api_maj_ver, hw->api_min_ver, hw->api_patch,
375 		     hw->fw_build);
376 
377 	return ICE_SUCCESS;
378 
379 err_unroll_fltr_mgmt_struct:
380 	ice_cleanup_fltr_mgmt_struct(hw);
381 err_unroll_alloc:
382 	ice_free(hw, hw->port_info);
383 	hw->port_info = NULL;
384 	hw->switch_info = NULL;
385 
386 	return status;
387 }
388 
389 static void ice_dcf_uninit_parent_hw(struct ice_hw *hw)
390 {
391 	ice_cleanup_fltr_mgmt_struct(hw);
392 
393 	ice_free_seg(hw);
394 	ice_free_hw_tbls(hw);
395 
396 	ice_free(hw, hw->port_info);
397 	hw->port_info = NULL;
398 	hw->switch_info = NULL;
399 
400 	ice_clear_all_vsi_ctx(hw);
401 }
402 
403 static int
404 ice_dcf_load_pkg(struct ice_adapter *adapter)
405 {
406 	struct ice_dcf_adapter *dcf_adapter =
407 			container_of(&adapter->hw, struct ice_dcf_adapter, parent.hw);
408 	struct virtchnl_pkg_info pkg_info;
409 	struct dcf_virtchnl_cmd vc_cmd;
410 	bool use_dsn;
411 	uint64_t dsn = 0;
412 
413 	vc_cmd.v_op = VIRTCHNL_OP_DCF_GET_PKG_INFO;
414 	vc_cmd.req_msglen = 0;
415 	vc_cmd.req_msg = NULL;
416 	vc_cmd.rsp_buflen = sizeof(pkg_info);
417 	vc_cmd.rsp_msgbuf = (uint8_t *)&pkg_info;
418 
419 	use_dsn = ice_dcf_execute_virtchnl_cmd(&dcf_adapter->real_hw, &vc_cmd) == 0;
420 	if (use_dsn)
421 		rte_memcpy(&dsn, pkg_info.dsn, sizeof(dsn));
422 
423 	return ice_load_pkg(adapter, use_dsn, dsn);
424 }
425 
426 int
427 ice_dcf_init_parent_adapter(struct rte_eth_dev *eth_dev)
428 {
429 	struct ice_dcf_adapter *adapter = eth_dev->data->dev_private;
430 	struct ice_adapter *parent_adapter = &adapter->parent;
431 	struct ice_hw *parent_hw = &parent_adapter->hw;
432 	struct ice_dcf_hw *hw = &adapter->real_hw;
433 	const struct rte_ether_addr *mac;
434 	int err;
435 
436 	parent_adapter->pf.adapter = parent_adapter;
437 	parent_adapter->pf.dev_data = eth_dev->data;
438 	/* create a dummy main_vsi */
439 	parent_adapter->pf.main_vsi =
440 		rte_zmalloc(NULL, sizeof(struct ice_vsi), 0);
441 	if (!parent_adapter->pf.main_vsi)
442 		return -ENOMEM;
443 	parent_adapter->pf.main_vsi->adapter = parent_adapter;
444 	parent_adapter->pf.adapter_stopped = 1;
445 
446 	parent_hw->back = parent_adapter;
447 	parent_hw->mac_type = ICE_MAC_GENERIC;
448 	parent_hw->vendor_id = ICE_INTEL_VENDOR_ID;
449 
450 	ice_init_lock(&parent_hw->adminq.sq_lock);
451 	ice_init_lock(&parent_hw->adminq.rq_lock);
452 	parent_hw->aq_send_cmd_fn = ice_dcf_send_aq_cmd;
453 	parent_hw->aq_send_cmd_param = &adapter->real_hw;
454 	parent_hw->dcf_enabled = true;
455 
456 	err = ice_dcf_init_parent_hw(parent_hw);
457 	if (err) {
458 		PMD_INIT_LOG(ERR, "failed to init the DCF parent hardware with error %d",
459 			     err);
460 		return err;
461 	}
462 
463 	if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS) {
464 		err = ice_dcf_query_port_ets(parent_hw, hw);
465 		if (err) {
466 			PMD_INIT_LOG(ERR, "failed to query port ets with error %d",
467 				     err);
468 			goto uninit_hw;
469 		}
470 	}
471 
472 	err = ice_dcf_load_pkg(parent_adapter);
473 	if (err) {
474 		PMD_INIT_LOG(ERR, "failed to load package with error %d",
475 			     err);
476 		goto uninit_hw;
477 	}
478 
479 	parent_adapter->pf.main_vsi->idx = hw->num_vfs;
480 	ice_dcf_update_pf_vsi_map(parent_hw,
481 			parent_adapter->pf.main_vsi->idx, hw->pf_vsi_id);
482 
483 	ice_dcf_update_vf_vsi_map(parent_hw, hw->num_vfs, hw->vf_vsi_map);
484 
485 	if (ice_devargs_check(eth_dev->device->devargs, ICE_DCF_DEVARG_ACL))
486 		parent_adapter->disabled_engine_mask |= BIT(ICE_FLOW_ENGINE_ACL);
487 
488 	parent_adapter->disabled_engine_mask |= BIT(ICE_FLOW_ENGINE_FDIR);
489 	parent_adapter->disabled_engine_mask |= BIT(ICE_FLOW_ENGINE_HASH);
490 
491 	err = ice_flow_init(parent_adapter);
492 	if (err) {
493 		PMD_INIT_LOG(ERR, "Failed to initialize flow");
494 		goto uninit_hw;
495 	}
496 
497 	mac = (const struct rte_ether_addr *)hw->avf.mac.addr;
498 	if (rte_is_valid_assigned_ether_addr(mac))
499 		rte_ether_addr_copy(mac, &parent_adapter->pf.dev_addr);
500 	else
501 		rte_eth_random_addr(parent_adapter->pf.dev_addr.addr_bytes);
502 
503 	eth_dev->data->mac_addrs = &parent_adapter->pf.dev_addr;
504 
505 	return 0;
506 
507 uninit_hw:
508 	ice_dcf_uninit_parent_hw(parent_hw);
509 	return err;
510 }
511 
512 void
513 ice_dcf_uninit_parent_adapter(struct rte_eth_dev *eth_dev)
514 {
515 	struct ice_dcf_adapter *adapter = eth_dev->data->dev_private;
516 	struct ice_adapter *parent_adapter = &adapter->parent;
517 	struct ice_hw *parent_hw = &parent_adapter->hw;
518 
519 	eth_dev->data->mac_addrs = NULL;
520 	rte_free(parent_adapter->pf.main_vsi);
521 	parent_adapter->pf.main_vsi = NULL;
522 
523 	ice_flow_uninit(parent_adapter);
524 	ice_dcf_uninit_parent_hw(parent_hw);
525 }
526