xref: /dpdk/drivers/net/hns3/hns3_mbx.c (revision e12a0166c80f65e35408f4715b2f3a60763c3741)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2021 HiSilicon Limited.
3  */
4 
5 #include <ethdev_driver.h>
6 #include <rte_io.h>
7 
8 #include "hns3_common.h"
9 #include "hns3_regs.h"
10 #include "hns3_logs.h"
11 #include "hns3_intr.h"
12 #include "hns3_rxtx.h"
13 
14 static const struct errno_respcode_map err_code_map[] = {
15 	{0, 0},
16 	{1, -EPERM},
17 	{2, -ENOENT},
18 	{5, -EIO},
19 	{11, -EAGAIN},
20 	{12, -ENOMEM},
21 	{16, -EBUSY},
22 	{22, -EINVAL},
23 	{28, -ENOSPC},
24 	{95, -EOPNOTSUPP},
25 };
26 
27 void
hns3vf_mbx_setup(struct hns3_vf_to_pf_msg * req,uint8_t code,uint8_t subcode)28 hns3vf_mbx_setup(struct hns3_vf_to_pf_msg *req, uint8_t code, uint8_t subcode)
29 {
30 	memset(req, 0, sizeof(struct hns3_vf_to_pf_msg));
31 	req->code = code;
32 	req->subcode = subcode;
33 }
34 
35 static int
hns3_resp_to_errno(uint16_t resp_code)36 hns3_resp_to_errno(uint16_t resp_code)
37 {
38 	uint32_t i, num;
39 
40 	num = sizeof(err_code_map) / sizeof(struct errno_respcode_map);
41 	for (i = 0; i < num; i++) {
42 		if (err_code_map[i].resp_code == resp_code)
43 			return err_code_map[i].err_no;
44 	}
45 
46 	return -EIO;
47 }
48 
49 static int
hns3_get_mbx_resp(struct hns3_hw * hw,uint16_t code,uint16_t subcode,uint8_t * resp_data,uint16_t resp_len)50 hns3_get_mbx_resp(struct hns3_hw *hw, uint16_t code, uint16_t subcode,
51 		  uint8_t *resp_data, uint16_t resp_len)
52 {
53 #define HNS3_WAIT_RESP_US	100
54 #define US_PER_MS		1000
55 	uint32_t mbx_time_limit;
56 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
57 	struct hns3_mbx_resp_status *mbx_resp;
58 	uint32_t wait_time = 0;
59 
60 	if (resp_len > HNS3_MBX_MAX_RESP_DATA_SIZE) {
61 		hns3_err(hw, "VF mbx response len(=%u) exceeds maximum(=%d)",
62 			 resp_len, HNS3_MBX_MAX_RESP_DATA_SIZE);
63 		return -EINVAL;
64 	}
65 
66 	mbx_time_limit = (uint32_t)hns->mbx_time_limit_ms * US_PER_MS;
67 	while (wait_time < mbx_time_limit) {
68 		if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed)) {
69 			hns3_err(hw, "Don't wait for mbx response because of "
70 				 "disable_cmd");
71 			return -EBUSY;
72 		}
73 
74 		if (is_reset_pending(hns)) {
75 			hw->mbx_resp.req_msg_data = 0;
76 			hns3_err(hw, "Don't wait for mbx response because of "
77 				 "reset pending");
78 			return -EIO;
79 		}
80 
81 		hns3vf_handle_mbx_msg(hw);
82 		rte_delay_us(HNS3_WAIT_RESP_US);
83 
84 		if (hw->mbx_resp.received_match_resp)
85 			break;
86 
87 		wait_time += HNS3_WAIT_RESP_US;
88 	}
89 	hw->mbx_resp.req_msg_data = 0;
90 	if (wait_time >= mbx_time_limit) {
91 		hns3_err(hw, "VF could not get mbx(%u,%u) from PF", code, subcode);
92 		return -ETIME;
93 	}
94 	rte_io_rmb();
95 	mbx_resp = &hw->mbx_resp;
96 
97 	if (mbx_resp->resp_status)
98 		return mbx_resp->resp_status;
99 
100 	if (resp_data)
101 		memcpy(resp_data, &mbx_resp->additional_info[0], resp_len);
102 
103 	return 0;
104 }
105 
106 static void
hns3_mbx_prepare_resp(struct hns3_hw * hw,uint16_t code,uint16_t subcode)107 hns3_mbx_prepare_resp(struct hns3_hw *hw, uint16_t code, uint16_t subcode)
108 {
109 	/*
110 	 * Init both matching scheme fields because we may not know the exact
111 	 * scheme will be used when in the initial phase.
112 	 *
113 	 * Also, there are OK to init both matching scheme fields even though
114 	 * we get the exact scheme which is used.
115 	 */
116 	hw->mbx_resp.req_msg_data = (uint32_t)code << 16 | subcode;
117 
118 	/* Update match_id and ensure the value of match_id is not zero */
119 	hw->mbx_resp.match_id++;
120 	if (hw->mbx_resp.match_id == 0)
121 		hw->mbx_resp.match_id = 1;
122 	hw->mbx_resp.received_match_resp = false;
123 
124 	hw->mbx_resp.resp_status = 0;
125 	memset(hw->mbx_resp.additional_info, 0, HNS3_MBX_MAX_RESP_DATA_SIZE);
126 }
127 
128 int
hns3vf_mbx_send(struct hns3_hw * hw,struct hns3_vf_to_pf_msg * req,bool need_resp,uint8_t * resp_data,uint16_t resp_len)129 hns3vf_mbx_send(struct hns3_hw *hw,
130 		struct hns3_vf_to_pf_msg *req, bool need_resp,
131 		uint8_t *resp_data, uint16_t resp_len)
132 {
133 	struct hns3_mbx_vf_to_pf_cmd *cmd;
134 	struct hns3_cmd_desc desc;
135 	int ret;
136 
137 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MBX_VF_TO_PF, false);
138 	cmd = (struct hns3_mbx_vf_to_pf_cmd *)desc.data;
139 	cmd->msg = *req;
140 
141 	/* synchronous send */
142 	if (need_resp) {
143 		cmd->mbx_need_resp |= HNS3_MBX_NEED_RESP_BIT;
144 		rte_spinlock_lock(&hw->mbx_resp.lock);
145 		hns3_mbx_prepare_resp(hw, req->code, req->subcode);
146 		cmd->match_id = hw->mbx_resp.match_id;
147 		ret = hns3_cmd_send(hw, &desc, 1);
148 		if (ret) {
149 			rte_spinlock_unlock(&hw->mbx_resp.lock);
150 			hns3_err(hw, "VF failed(=%d) to send mbx message to PF",
151 				 ret);
152 			return ret;
153 		}
154 
155 		ret = hns3_get_mbx_resp(hw, req->code, req->subcode,
156 					resp_data, resp_len);
157 		rte_spinlock_unlock(&hw->mbx_resp.lock);
158 	} else {
159 		/* asynchronous send */
160 		ret = hns3_cmd_send(hw, &desc, 1);
161 		if (ret) {
162 			hns3_err(hw, "VF failed(=%d) to send mbx message to PF",
163 				 ret);
164 			return ret;
165 		}
166 	}
167 
168 	return ret;
169 }
170 
171 static bool
hns3_cmd_crq_empty(struct hns3_hw * hw)172 hns3_cmd_crq_empty(struct hns3_hw *hw)
173 {
174 	uint32_t tail = hns3_read_dev(hw, HNS3_CMDQ_RX_TAIL_REG);
175 
176 	return tail == hw->cmq.crq.next_to_use;
177 }
178 
179 static void
hns3vf_handle_link_change_event(struct hns3_hw * hw,struct hns3_mbx_pf_to_vf_cmd * req)180 hns3vf_handle_link_change_event(struct hns3_hw *hw,
181 				struct hns3_mbx_pf_to_vf_cmd *req)
182 {
183 	struct hns3_mbx_link_status *link_info =
184 		(struct hns3_mbx_link_status *)req->msg.msg_data;
185 	uint8_t link_status, link_duplex;
186 	uint8_t support_push_lsc;
187 	uint32_t link_speed;
188 
189 	link_status = (uint8_t)rte_le_to_cpu_16(link_info->link_status);
190 	link_speed = rte_le_to_cpu_32(link_info->speed);
191 	link_duplex = (uint8_t)rte_le_to_cpu_16(link_info->duplex);
192 	hns3vf_update_link_status(hw, link_status, link_speed, link_duplex);
193 	support_push_lsc = (link_info->flag) & 1u;
194 	hns3vf_update_push_lsc_cap(hw, support_push_lsc);
195 }
196 
197 static void
hns3_handle_asserting_reset(struct hns3_hw * hw,struct hns3_mbx_pf_to_vf_cmd * req)198 hns3_handle_asserting_reset(struct hns3_hw *hw,
199 			    struct hns3_mbx_pf_to_vf_cmd *req)
200 {
201 	enum hns3_reset_level reset_level;
202 
203 	/*
204 	 * PF has asserted reset hence VF should go in pending
205 	 * state and poll for the hardware reset status till it
206 	 * has been completely reset. After this stack should
207 	 * eventually be re-initialized.
208 	 */
209 	reset_level = rte_le_to_cpu_16(req->msg.reset_level);
210 	hns3_atomic_set_bit(reset_level, &hw->reset.pending);
211 
212 	hns3_warn(hw, "PF inform reset level %d", reset_level);
213 	hw->reset.stats.request_cnt++;
214 	hns3_schedule_reset(HNS3_DEV_HW_TO_ADAPTER(hw));
215 }
216 
217 static void
hns3_handle_mbx_response(struct hns3_hw * hw,struct hns3_mbx_pf_to_vf_cmd * req)218 hns3_handle_mbx_response(struct hns3_hw *hw, struct hns3_mbx_pf_to_vf_cmd *req)
219 {
220 #define HNS3_MBX_RESP_CODE_OFFSET 16
221 	struct hns3_mbx_resp_status *resp = &hw->mbx_resp;
222 	uint32_t msg_data;
223 
224 	if (req->match_id != 0) {
225 		/*
226 		 * If match_id is not zero, it means PF support copy request's
227 		 * match_id to its response. So VF could use the match_id
228 		 * to match the request.
229 		 */
230 		if (req->match_id == resp->match_id) {
231 			resp->resp_status =
232 				hns3_resp_to_errno(req->msg.resp_status);
233 			memcpy(resp->additional_info, &req->msg.resp_data,
234 			       HNS3_MBX_MAX_RESP_DATA_SIZE);
235 			rte_io_wmb();
236 			resp->received_match_resp = true;
237 		}
238 		return;
239 	}
240 
241 	/*
242 	 * If the below instructions can be executed, it means PF does not
243 	 * support copy request's match_id to its response. So VF follows the
244 	 * original scheme to process.
245 	 */
246 	msg_data = (uint32_t)req->msg.vf_mbx_msg_code <<
247 			HNS3_MBX_RESP_CODE_OFFSET | req->msg.vf_mbx_msg_subcode;
248 	if (resp->req_msg_data != msg_data) {
249 		hns3_warn(hw,
250 			"received response tag (%u) is mismatched with requested tag (%u)",
251 			msg_data, resp->req_msg_data);
252 		return;
253 	}
254 
255 	resp->resp_status = hns3_resp_to_errno(req->msg.resp_status);
256 	memcpy(resp->additional_info, &req->msg.resp_data,
257 	       HNS3_MBX_MAX_RESP_DATA_SIZE);
258 	rte_io_wmb();
259 	resp->received_match_resp = true;
260 }
261 
262 static void
hns3_link_fail_parse(struct hns3_hw * hw,uint8_t link_fail_code)263 hns3_link_fail_parse(struct hns3_hw *hw, uint8_t link_fail_code)
264 {
265 	switch (link_fail_code) {
266 	case HNS3_MBX_LF_NORMAL:
267 		break;
268 	case HNS3_MBX_LF_REF_CLOCK_LOST:
269 		hns3_warn(hw, "Reference clock lost!");
270 		break;
271 	case HNS3_MBX_LF_XSFP_TX_DISABLE:
272 		hns3_warn(hw, "SFP tx is disabled!");
273 		break;
274 	case HNS3_MBX_LF_XSFP_ABSENT:
275 		hns3_warn(hw, "SFP is absent!");
276 		break;
277 	default:
278 		hns3_warn(hw, "Unknown fail code:%u!", link_fail_code);
279 		break;
280 	}
281 }
282 
283 static void
hns3pf_handle_link_change_event(struct hns3_hw * hw,struct hns3_mbx_vf_to_pf_cmd * req)284 hns3pf_handle_link_change_event(struct hns3_hw *hw,
285 				struct hns3_mbx_vf_to_pf_cmd *req)
286 {
287 	if (!req->msg.link_status)
288 		hns3_link_fail_parse(hw, req->msg.link_fail_code);
289 
290 	hns3_update_linkstatus_and_event(hw, true);
291 }
292 
293 static void
hns3_update_port_base_vlan_info(struct hns3_hw * hw,struct hns3_mbx_pf_to_vf_cmd * req)294 hns3_update_port_base_vlan_info(struct hns3_hw *hw,
295 				struct hns3_mbx_pf_to_vf_cmd *req)
296 {
297 	uint16_t new_pvid_state = req->msg.pvid_state ?
298 		HNS3_PORT_BASE_VLAN_ENABLE : HNS3_PORT_BASE_VLAN_DISABLE;
299 	/*
300 	 * Currently, hardware doesn't support more than two layers VLAN offload
301 	 * based on hns3 network engine, which would cause packets loss or wrong
302 	 * packets for these types of packets. If the hns3 PF kernel ethdev
303 	 * driver sets the PVID for VF device after initialization of the
304 	 * related VF device, the PF driver will notify VF driver to update the
305 	 * PVID configuration state. The VF driver will update the PVID
306 	 * configuration state immediately to ensure that the VLAN process in Tx
307 	 * and Rx is correct. But in the window period of this state transition,
308 	 * packets loss or packets with wrong VLAN may occur.
309 	 */
310 	if (hw->port_base_vlan_cfg.state != new_pvid_state) {
311 		hw->port_base_vlan_cfg.state = new_pvid_state;
312 		hns3_update_all_queues_pvid_proc_en(hw);
313 	}
314 }
315 
316 static void
hns3_handle_promisc_info(struct hns3_hw * hw,uint16_t promisc_en)317 hns3_handle_promisc_info(struct hns3_hw *hw, uint16_t promisc_en)
318 {
319 	if (!promisc_en) {
320 		/*
321 		 * When promisc/allmulti mode is closed by the hns3 PF kernel
322 		 * ethdev driver for untrusted, modify VF's related status.
323 		 */
324 		hns3_warn(hw, "Promisc mode will be closed by host for being "
325 			      "untrusted.");
326 		hw->data->promiscuous = 0;
327 		hw->data->all_multicast = 0;
328 	}
329 }
330 
331 static void
hns3_handle_mbx_msg_out_intr(struct hns3_hw * hw)332 hns3_handle_mbx_msg_out_intr(struct hns3_hw *hw)
333 {
334 	struct hns3_cmq_ring *crq = &hw->cmq.crq;
335 	struct hns3_mbx_pf_to_vf_cmd *req;
336 	struct hns3_cmd_desc *desc;
337 	uint32_t tail, next_to_use;
338 	uint8_t opcode;
339 	uint16_t flag;
340 
341 	tail = hns3_read_dev(hw, HNS3_CMDQ_RX_TAIL_REG);
342 	next_to_use = crq->next_to_use;
343 	while (next_to_use != tail) {
344 		desc = &crq->desc[next_to_use];
345 		req = (struct hns3_mbx_pf_to_vf_cmd *)desc->data;
346 		opcode = req->msg.code & 0xff;
347 
348 		flag = rte_le_to_cpu_16(crq->desc[next_to_use].flag);
349 		if (!hns3_get_bit(flag, HNS3_CMDQ_RX_OUTVLD_B))
350 			goto scan_next;
351 
352 		if (crq->desc[next_to_use].opcode == 0)
353 			goto scan_next;
354 
355 		if (opcode == HNS3_MBX_PF_VF_RESP) {
356 			hns3_handle_mbx_response(hw, req);
357 			/*
358 			 * Clear opcode to inform intr thread don't process
359 			 * again.
360 			 */
361 			crq->desc[next_to_use].opcode = 0;
362 		}
363 
364 scan_next:
365 		next_to_use = (next_to_use + 1) % hw->cmq.crq.desc_num;
366 	}
367 
368 	/*
369 	 * Note: the crq->next_to_use field should not updated, otherwise,
370 	 * mailbox messages may be discarded.
371 	 */
372 }
373 
374 void
hns3pf_handle_mbx_msg(struct hns3_hw * hw)375 hns3pf_handle_mbx_msg(struct hns3_hw *hw)
376 {
377 	struct hns3_cmq_ring *crq = &hw->cmq.crq;
378 	struct hns3_mbx_vf_to_pf_cmd *req;
379 	struct hns3_cmd_desc *desc;
380 	uint16_t flag;
381 
382 	rte_spinlock_lock(&hw->cmq.crq.lock);
383 
384 	while (!hns3_cmd_crq_empty(hw)) {
385 		if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed)) {
386 			rte_spinlock_unlock(&hw->cmq.crq.lock);
387 			return;
388 		}
389 		desc = &crq->desc[crq->next_to_use];
390 		req = (struct hns3_mbx_vf_to_pf_cmd *)desc->data;
391 
392 		flag = rte_le_to_cpu_16(crq->desc[crq->next_to_use].flag);
393 		if (unlikely(!hns3_get_bit(flag, HNS3_CMDQ_RX_OUTVLD_B))) {
394 			hns3_warn(hw,
395 				  "dropped invalid mailbox message, code = %u",
396 				  req->msg.code);
397 
398 			/* dropping/not processing this invalid message */
399 			crq->desc[crq->next_to_use].flag = 0;
400 			hns3_mbx_ring_ptr_move_crq(crq);
401 			continue;
402 		}
403 
404 		switch (req->msg.code) {
405 		case HNS3_MBX_PUSH_LINK_STATUS:
406 			hns3pf_handle_link_change_event(hw, req);
407 			break;
408 		default:
409 			hns3_err(hw, "received unsupported(%u) mbx msg",
410 				 req->msg.code);
411 			break;
412 		}
413 		crq->desc[crq->next_to_use].flag = 0;
414 		hns3_mbx_ring_ptr_move_crq(crq);
415 	}
416 
417 	/* Write back CMDQ_RQ header pointer, IMP need this pointer */
418 	hns3_write_dev(hw, HNS3_CMDQ_RX_HEAD_REG, crq->next_to_use);
419 
420 	rte_spinlock_unlock(&hw->cmq.crq.lock);
421 }
422 
423 void
hns3vf_handle_mbx_msg(struct hns3_hw * hw)424 hns3vf_handle_mbx_msg(struct hns3_hw *hw)
425 {
426 	struct hns3_cmq_ring *crq = &hw->cmq.crq;
427 	struct hns3_mbx_pf_to_vf_cmd *req;
428 	struct hns3_cmd_desc *desc;
429 	bool handle_out;
430 	uint8_t opcode;
431 	uint16_t flag;
432 
433 	rte_spinlock_lock(&hw->cmq.crq.lock);
434 
435 	handle_out = (rte_eal_process_type() != RTE_PROC_PRIMARY ||
436 		      !rte_thread_is_intr());
437 	if (handle_out) {
438 		/*
439 		 * Currently, any threads in the primary and secondary processes
440 		 * could send mailbox sync request, so it will need to process
441 		 * the crq message (which is the HNS3_MBX_PF_VF_RESP) in there
442 		 * own thread context. It may also process other messages
443 		 * because it uses the policy of processing all pending messages
444 		 * at once.
445 		 * But some messages such as HNS3_MBX_PUSH_LINK_STATUS could
446 		 * only process within the intr thread in primary process,
447 		 * otherwise it may lead to report lsc event in secondary
448 		 * process.
449 		 * So the threads other than intr thread in primary process
450 		 * could only process HNS3_MBX_PF_VF_RESP message, if the
451 		 * message processed, its opcode will rewrite with zero, then
452 		 * the intr thread in primary process will not process again.
453 		 */
454 		hns3_handle_mbx_msg_out_intr(hw);
455 		rte_spinlock_unlock(&hw->cmq.crq.lock);
456 		return;
457 	}
458 
459 	while (!hns3_cmd_crq_empty(hw)) {
460 		if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed)) {
461 			rte_spinlock_unlock(&hw->cmq.crq.lock);
462 			return;
463 		}
464 
465 		desc = &crq->desc[crq->next_to_use];
466 		req = (struct hns3_mbx_pf_to_vf_cmd *)desc->data;
467 		opcode = req->msg.code & 0xff;
468 
469 		flag = rte_le_to_cpu_16(crq->desc[crq->next_to_use].flag);
470 		if (unlikely(!hns3_get_bit(flag, HNS3_CMDQ_RX_OUTVLD_B))) {
471 			hns3_warn(hw,
472 				  "dropped invalid mailbox message, code = %u",
473 				  opcode);
474 
475 			/* dropping/not processing this invalid message */
476 			crq->desc[crq->next_to_use].flag = 0;
477 			hns3_mbx_ring_ptr_move_crq(crq);
478 			continue;
479 		}
480 
481 		if (desc->opcode == 0) {
482 			/* Message already processed by other thread */
483 			crq->desc[crq->next_to_use].flag = 0;
484 			hns3_mbx_ring_ptr_move_crq(crq);
485 			continue;
486 		}
487 
488 		switch (opcode) {
489 		case HNS3_MBX_PF_VF_RESP:
490 			hns3_handle_mbx_response(hw, req);
491 			break;
492 		case HNS3_MBX_LINK_STAT_CHANGE:
493 			hns3vf_handle_link_change_event(hw, req);
494 			break;
495 		case HNS3_MBX_ASSERTING_RESET:
496 			hns3_handle_asserting_reset(hw, req);
497 			break;
498 		case HNS3_MBX_PUSH_VLAN_INFO:
499 			/*
500 			 * When the PVID configuration status of VF device is
501 			 * changed by the hns3 PF kernel driver, VF driver will
502 			 * receive this mailbox message from PF driver.
503 			 */
504 			hns3_update_port_base_vlan_info(hw, req);
505 			break;
506 		case HNS3_MBX_PUSH_PROMISC_INFO:
507 			/*
508 			 * When the trust status of VF device changed by the
509 			 * hns3 PF kernel driver, VF driver will receive this
510 			 * mailbox message from PF driver.
511 			 */
512 			hns3_handle_promisc_info(hw, req->msg.promisc_en);
513 			break;
514 		default:
515 			hns3_err(hw, "received unsupported(%u) mbx msg",
516 				 opcode);
517 			break;
518 		}
519 
520 		crq->desc[crq->next_to_use].flag = 0;
521 		hns3_mbx_ring_ptr_move_crq(crq);
522 	}
523 
524 	/* Write back CMDQ_RQ header pointer, IMP need this pointer */
525 	hns3_write_dev(hw, HNS3_CMDQ_RX_HEAD_REG, crq->next_to_use);
526 
527 	rte_spinlock_unlock(&hw->cmq.crq.lock);
528 }
529