xref: /dpdk/drivers/vdpa/nfp/nfp_vdpa_core.c (revision b6de43530dfa30cbf6b70857e3835099701063d4)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2023 Corigine, Inc.
3  * All rights reserved.
4  */
5 
6 #include "nfp_vdpa_core.h"
7 
8 #include <nfp_common.h>
9 #include <rte_vhost.h>
10 
11 #include "nfp_vdpa_log.h"
12 
13 /* Available and used descs are in same order */
14 #ifndef VIRTIO_F_IN_ORDER
15 #define VIRTIO_F_IN_ORDER      35
16 #endif
17 
18 #define NFP_QCP_NOTIFY_MAX_ADD    0x7f
19 
20 enum nfp_qcp_notify_ptr {
21 	NFP_QCP_NOTIFY_WRITE_PTR = 0,
22 	NFP_QCP_NOTIFY_READ_PTR
23 };
24 
25 /**
26  * Add the value to the selected pointer of a queue
27  *
28  * @param queue
29  *   Base address for queue structure
30  * @param ptr
31  *   Add to the Read or Write pointer
32  * @param val
33  *   Value to add to the queue pointer
34  */
35 static inline void
36 nfp_qcp_notify_ptr_add(uint8_t *q,
37 		enum nfp_qcp_notify_ptr ptr,
38 		uint32_t val)
39 {
40 	uint32_t off;
41 
42 	if (ptr == NFP_QCP_NOTIFY_WRITE_PTR)
43 		off = NFP_QCP_QUEUE_ADD_RPTR;
44 	else
45 		off = NFP_QCP_QUEUE_ADD_WPTR;
46 
47 	for (; val > NFP_QCP_NOTIFY_MAX_ADD; val -= NFP_QCP_NOTIFY_MAX_ADD)
48 		nn_writel(rte_cpu_to_le_32(NFP_QCP_NOTIFY_MAX_ADD), q + off);
49 
50 	nn_writel(rte_cpu_to_le_32(val), q + off);
51 }
52 
53 int
54 nfp_vdpa_hw_init(struct nfp_vdpa_hw *vdpa_hw,
55 		struct rte_pci_device *pci_dev)
56 {
57 	uint32_t queue;
58 	uint8_t *tx_bar;
59 	uint32_t start_q;
60 	struct nfp_hw *hw;
61 	uint32_t tx_bar_off;
62 	uint8_t *notify_base;
63 
64 	hw = &vdpa_hw->super;
65 	hw->ctrl_bar = pci_dev->mem_resource[0].addr;
66 	if (hw->ctrl_bar == NULL) {
67 		DRV_CORE_LOG(ERR, "The hw->ctrl_bar is NULL. BAR0 not configured.");
68 		return -ENODEV;
69 	}
70 
71 	notify_base = hw->ctrl_bar + NFP_VDPA_NOTIFY_ADDR_BASE;
72 	for (queue = 0; queue < NFP_VDPA_MAX_QUEUES; queue++) {
73 		uint32_t idx = queue * 2;
74 
75 		/* RX */
76 		vdpa_hw->notify_addr[idx] = notify_base;
77 		notify_base += NFP_VDPA_NOTIFY_ADDR_INTERVAL;
78 		/* TX */
79 		vdpa_hw->notify_addr[idx + 1] = notify_base;
80 		notify_base += NFP_VDPA_NOTIFY_ADDR_INTERVAL;
81 
82 		vdpa_hw->notify_region = queue;
83 		DRV_CORE_LOG(DEBUG, "The notify_addr[%d] at %p, notify_addr[%d] at %p.",
84 				idx, vdpa_hw->notify_addr[idx],
85 				idx + 1, vdpa_hw->notify_addr[idx + 1]);
86 	}
87 
88 	/* NFP vDPA cfg queue setup */
89 	start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ);
90 	tx_bar_off = start_q * NFP_QCP_QUEUE_ADDR_SZ;
91 	tx_bar = (uint8_t *)pci_dev->mem_resource[2].addr + tx_bar_off;
92 	hw->qcp_cfg = tx_bar + NFP_QCP_QUEUE_ADDR_SZ;
93 
94 	vdpa_hw->sw_lm = true;
95 
96 	vdpa_hw->features = (1ULL << VIRTIO_F_VERSION_1) |
97 			(1ULL << VIRTIO_F_IN_ORDER) |
98 			(1ULL << VHOST_F_LOG_ALL) |
99 			(1ULL << VHOST_USER_F_PROTOCOL_FEATURES);
100 
101 	return 0;
102 }
103 
104 static void
105 nfp_vdpa_hw_queue_init(struct nfp_vdpa_hw *vdpa_hw)
106 {
107 	/* Distribute ring information to firmware */
108 	nn_cfg_writel(&vdpa_hw->super, NFP_NET_CFG_TX_USED_INDEX,
109 			vdpa_hw->vring[1].last_used_idx);
110 	nn_cfg_writel(&vdpa_hw->super, NFP_NET_CFG_RX_USED_INDEX,
111 			vdpa_hw->vring[0].last_used_idx);
112 }
113 
114 static uint32_t
115 nfp_vdpa_check_offloads(void)
116 {
117 	return NFP_NET_CFG_CTRL_VIRTIO  |
118 			NFP_NET_CFG_CTRL_IN_ORDER;
119 }
120 
121 static int
122 nfp_vdpa_vf_config(struct nfp_hw *hw,
123 		int vid,
124 		bool relay)
125 {
126 	int ret;
127 	uint32_t update;
128 	uint32_t new_ctrl;
129 	uint32_t new_ext_ctrl;
130 	struct timespec wait_tst;
131 	uint8_t mac_addr[RTE_ETHER_ADDR_LEN];
132 
133 	nn_cfg_writel(hw, NFP_NET_CFG_MTU, 9216);
134 	nn_cfg_writel(hw, NFP_NET_CFG_FLBUFSZ, 10240);
135 
136 	/* TODO: Temporary set MAC to fixed value fe:1b:ac:05:a5:22 */
137 	mac_addr[0] = 0xfe;
138 	mac_addr[1] = 0x1b;
139 	mac_addr[2] = 0xac;
140 	mac_addr[3] = 0x05;
141 	mac_addr[4] = 0xa5;
142 	mac_addr[5] = (0x22 + vid);
143 
144 	/* Writing new MAC to the specific port BAR address */
145 	nfp_write_mac(hw, (uint8_t *)mac_addr);
146 
147 	new_ext_ctrl = nfp_vdpa_check_offloads();
148 	if (relay)
149 		new_ext_ctrl |= NFP_NET_CFG_CTRL_LM_RELAY;
150 	else
151 		new_ext_ctrl |= NFP_NET_CFG_CTRL_SWLM;
152 
153 	update = NFP_NET_CFG_UPDATE_GEN;
154 	ret = nfp_ext_reconfig(hw, new_ext_ctrl, update);
155 	if (ret != 0)
156 		return -EIO;
157 
158 	hw->ctrl_ext = new_ext_ctrl;
159 
160 	/* Enable device */
161 	new_ctrl = NFP_NET_CFG_CTRL_ENABLE;
162 
163 	/* Signal the NIC about the change */
164 	update = NFP_NET_CFG_UPDATE_MACADDR |
165 			NFP_NET_CFG_UPDATE_GEN |
166 			NFP_NET_CFG_UPDATE_RING;
167 
168 	if (relay) {
169 		update |= NFP_NET_CFG_UPDATE_MSIX;
170 
171 		/* Enable misx interrupt for vdpa relay */
172 		new_ctrl |= NFP_NET_CFG_CTRL_MSIX_TX_OFF;
173 
174 		nn_cfg_writeb(hw, NFP_NET_CFG_RXR_VEC(0), 1);
175 	}
176 
177 	ret = nfp_reconfig(hw, new_ctrl, update);
178 	if (ret < 0)
179 		return -EIO;
180 
181 	hw->ctrl = new_ctrl;
182 
183 	DRV_CORE_LOG(DEBUG, "Enabling the device, sleep 1 seconds...");
184 	wait_tst.tv_sec = 1;
185 	wait_tst.tv_nsec = 0;
186 	nanosleep(&wait_tst, 0);
187 
188 	return 0;
189 }
190 
191 static void
192 nfp_vdpa_queue_config(struct nfp_vdpa_hw *vdpa_hw,
193 		bool relay)
194 {
195 	struct nfp_hw *hw = &vdpa_hw->super;
196 
197 	if (!relay) {
198 		nn_cfg_writeq(hw, NFP_NET_CFG_TXR_ADDR(0), vdpa_hw->vring[1].desc);
199 		nn_cfg_writeb(hw, NFP_NET_CFG_TXR_SZ(0),
200 				rte_log2_u32(vdpa_hw->vring[1].size));
201 		nn_cfg_writeq(hw, NFP_NET_CFG_TXR_ADDR(1), vdpa_hw->vring[1].avail);
202 		nn_cfg_writeq(hw, NFP_NET_CFG_TXR_ADDR(2), vdpa_hw->vring[1].used);
203 
204 		nn_cfg_writeq(hw, NFP_NET_CFG_RXR_ADDR(0), vdpa_hw->vring[0].desc);
205 		nn_cfg_writeb(hw, NFP_NET_CFG_RXR_SZ(0),
206 				rte_log2_u32(vdpa_hw->vring[0].size));
207 		nn_cfg_writeq(hw, NFP_NET_CFG_RXR_ADDR(1), vdpa_hw->vring[0].avail);
208 	}
209 
210 	nn_cfg_writeq(hw, NFP_NET_CFG_RXR_ADDR(2), vdpa_hw->vring[0].used);
211 
212 	if (!relay)
213 		nfp_vdpa_hw_queue_init(vdpa_hw);
214 
215 	rte_wmb();
216 }
217 
218 int
219 nfp_vdpa_hw_start(struct nfp_vdpa_hw *vdpa_hw,
220 		int vid)
221 {
222 	struct nfp_hw *hw = &vdpa_hw->super;
223 
224 	nfp_vdpa_queue_config(vdpa_hw, false);
225 
226 	nfp_disable_queues(hw);
227 	nfp_enable_queues(hw, NFP_VDPA_MAX_QUEUES, NFP_VDPA_MAX_QUEUES);
228 
229 	return nfp_vdpa_vf_config(hw, vid, false);
230 }
231 
232 int
233 nfp_vdpa_relay_hw_start(struct nfp_vdpa_hw *vdpa_hw,
234 		int vid)
235 {
236 	struct nfp_hw *hw = &vdpa_hw->super;
237 
238 	nfp_vdpa_queue_config(vdpa_hw, true);
239 
240 	return nfp_vdpa_vf_config(hw, vid, true);
241 }
242 
243 void
244 nfp_vdpa_hw_stop(struct nfp_vdpa_hw *vdpa_hw)
245 {
246 	nfp_disable_queues(&vdpa_hw->super);
247 }
248 
249 /*
250  * This offset is used for mmaping the notify area. It implies it needs
251  * to be a multiple of PAGE_SIZE.
252  * For debugging, using notify region 0 with an offset of 4K. This should
253  * point to the conf bar.
254  */
255 uint64_t
256 nfp_vdpa_get_queue_notify_offset(struct nfp_vdpa_hw *vdpa_hw __rte_unused,
257 		int qid)
258 {
259 	return NFP_VDPA_NOTIFY_ADDR_BASE + ((uint64_t)qid * NFP_VDPA_NOTIFY_ADDR_INTERVAL);
260 }
261 
262 /*
263  * With just one queue the increment is 0, which does not
264  * incremente the counter but will raise a queue event due
265  * to queue configured for watermark events.
266  */
267 void
268 nfp_vdpa_notify_queue(struct nfp_vdpa_hw *vdpa_hw,
269 		uint16_t qid)
270 {
271 	nfp_qcp_notify_ptr_add(vdpa_hw->notify_addr[qid],
272 			NFP_QCP_NOTIFY_WRITE_PTR, qid);
273 }
274 
275 void nfp_vdpa_irq_unmask(struct nfp_vdpa_hw *vdpa_hw)
276 {
277 	struct nfp_hw *hw = &vdpa_hw->super;
278 
279 	/* Make sure all updates are written before un-masking */
280 	rte_wmb();
281 	nn_cfg_writeb(hw, NFP_NET_CFG_ICR(1), NFP_NET_CFG_ICR_UNMASKED);
282 }
283