xref: /dpdk/drivers/raw/ntb/ntb_hw_intel.c (revision 089e5ed727a15da2729cfee9b63533dd120bd04c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation.
3  */
4 #include <stdint.h>
5 #include <stdio.h>
6 #include <errno.h>
7 
8 #include <rte_io.h>
9 #include <rte_eal.h>
10 #include <rte_pci.h>
11 #include <rte_bus_pci.h>
12 #include <rte_rawdev.h>
13 #include <rte_rawdev_pmd.h>
14 
15 #include "ntb.h"
16 #include "ntb_hw_intel.h"
17 
18 enum xeon_ntb_bar {
19 	XEON_NTB_BAR23 = 2,
20 	XEON_NTB_BAR45 = 4,
21 };
22 
23 static enum xeon_ntb_bar intel_ntb_bar[] = {
24 	XEON_NTB_BAR23,
25 	XEON_NTB_BAR45,
26 };
27 
28 static int
29 intel_ntb_dev_init(struct rte_rawdev *dev)
30 {
31 	struct ntb_hw *hw = dev->dev_private;
32 	uint8_t reg_val, bar;
33 	int ret, i;
34 
35 	if (hw == NULL) {
36 		NTB_LOG(ERR, "Invalid device.");
37 		return -EINVAL;
38 	}
39 
40 	ret = rte_pci_read_config(hw->pci_dev, &reg_val,
41 				  sizeof(reg_val), XEON_PPD_OFFSET);
42 	if (ret < 0) {
43 		NTB_LOG(ERR, "Cannot get NTB PPD (PCIe port definition).");
44 		return -EIO;
45 	}
46 
47 	/* Check connection topo type. Only support B2B. */
48 	switch (reg_val & XEON_PPD_CONN_MASK) {
49 	case XEON_PPD_CONN_B2B:
50 		NTB_LOG(INFO, "Topo B2B (back to back) is using.");
51 		break;
52 	case XEON_PPD_CONN_TRANSPARENT:
53 	case XEON_PPD_CONN_RP:
54 	default:
55 		NTB_LOG(ERR, "Not supported conn topo. Please use B2B.");
56 		return -EINVAL;
57 	}
58 
59 	/* Check device type. */
60 	if (reg_val & XEON_PPD_DEV_DSD) {
61 		NTB_LOG(INFO, "DSD, Downstream Device.");
62 		hw->topo = NTB_TOPO_B2B_DSD;
63 	} else {
64 		NTB_LOG(INFO, "USD, Upstream device.");
65 		hw->topo = NTB_TOPO_B2B_USD;
66 	}
67 
68 	/* Check if bar4 is split. Do not support split bar. */
69 	if (reg_val & XEON_PPD_SPLIT_BAR_MASK) {
70 		NTB_LOG(ERR, "Do not support split bar.");
71 		return -EINVAL;
72 	}
73 
74 	hw->hw_addr = (char *)hw->pci_dev->mem_resource[0].addr;
75 
76 	hw->mw_cnt = XEON_MW_COUNT;
77 	hw->db_cnt = XEON_DB_COUNT;
78 	hw->spad_cnt = XEON_SPAD_COUNT;
79 
80 	hw->mw_size = rte_zmalloc("uint64_t",
81 				  hw->mw_cnt * sizeof(uint64_t), 0);
82 	for (i = 0; i < hw->mw_cnt; i++) {
83 		bar = intel_ntb_bar[i];
84 		hw->mw_size[i] = hw->pci_dev->mem_resource[bar].len;
85 	}
86 
87 	/* Reserve the last 2 spad registers for users. */
88 	for (i = 0; i < NTB_SPAD_USER_MAX_NUM; i++)
89 		hw->spad_user_list[i] = hw->spad_cnt;
90 	hw->spad_user_list[0] = hw->spad_cnt - 2;
91 	hw->spad_user_list[1] = hw->spad_cnt - 1;
92 
93 	return 0;
94 }
95 
96 static void *
97 intel_ntb_get_peer_mw_addr(struct rte_rawdev *dev, int mw_idx)
98 {
99 	struct ntb_hw *hw = dev->dev_private;
100 	uint8_t bar;
101 
102 	if (hw == NULL) {
103 		NTB_LOG(ERR, "Invalid device.");
104 		return 0;
105 	}
106 
107 	if (mw_idx < 0 || mw_idx >= hw->mw_cnt) {
108 		NTB_LOG(ERR, "Invalid memory window index (0 - %u).",
109 			hw->mw_cnt - 1);
110 		return 0;
111 	}
112 
113 	bar = intel_ntb_bar[mw_idx];
114 
115 	return hw->pci_dev->mem_resource[bar].addr;
116 }
117 
118 static int
119 intel_ntb_mw_set_trans(struct rte_rawdev *dev, int mw_idx,
120 		       uint64_t addr, uint64_t size)
121 {
122 	struct ntb_hw *hw = dev->dev_private;
123 	void *xlat_addr, *limit_addr;
124 	uint64_t xlat_off, limit_off;
125 	uint64_t base, limit;
126 	uint8_t bar;
127 
128 	if (hw == NULL) {
129 		NTB_LOG(ERR, "Invalid device.");
130 		return -EINVAL;
131 	}
132 
133 	if (mw_idx < 0 || mw_idx >= hw->mw_cnt) {
134 		NTB_LOG(ERR, "Invalid memory window index (0 - %u).",
135 			hw->mw_cnt - 1);
136 		return -EINVAL;
137 	}
138 
139 	bar = intel_ntb_bar[mw_idx];
140 
141 	xlat_off = XEON_IMBAR1XBASE_OFFSET + mw_idx * XEON_BAR_INTERVAL_OFFSET;
142 	limit_off = XEON_IMBAR1XLMT_OFFSET + mw_idx * XEON_BAR_INTERVAL_OFFSET;
143 	xlat_addr = hw->hw_addr + xlat_off;
144 	limit_addr = hw->hw_addr + limit_off;
145 
146 	/* Limit reg val should be EMBAR base address plus MW size. */
147 	base = addr;
148 	limit = hw->pci_dev->mem_resource[bar].phys_addr + size;
149 	rte_write64(base, xlat_addr);
150 	rte_write64(limit, limit_addr);
151 
152 	/* Setup the external point so that remote can access. */
153 	xlat_off = XEON_EMBAR1_OFFSET + 8 * mw_idx;
154 	xlat_addr = hw->hw_addr + xlat_off;
155 	limit_off = XEON_EMBAR1XLMT_OFFSET + mw_idx * XEON_BAR_INTERVAL_OFFSET;
156 	limit_addr = hw->hw_addr + limit_off;
157 	base = rte_read64(xlat_addr);
158 	base &= ~0xf;
159 	limit = base + size;
160 	rte_write64(limit, limit_addr);
161 
162 	return 0;
163 }
164 
165 static int
166 intel_ntb_get_link_status(struct rte_rawdev *dev)
167 {
168 	struct ntb_hw *hw = dev->dev_private;
169 	uint16_t reg_val;
170 	int ret;
171 
172 	if (hw == NULL) {
173 		NTB_LOG(ERR, "Invalid device.");
174 		return -EINVAL;
175 	}
176 
177 	ret = rte_pci_read_config(hw->pci_dev, &reg_val,
178 				  sizeof(reg_val), XEON_LINK_STATUS_OFFSET);
179 	if (ret < 0) {
180 		NTB_LOG(ERR, "Unable to get link status.");
181 		return -EIO;
182 	}
183 
184 	hw->link_status = NTB_LNK_STA_ACTIVE(reg_val);
185 
186 	if (hw->link_status) {
187 		hw->link_speed = NTB_LNK_STA_SPEED(reg_val);
188 		hw->link_width = NTB_LNK_STA_WIDTH(reg_val);
189 	} else {
190 		hw->link_speed = NTB_SPEED_NONE;
191 		hw->link_width = NTB_WIDTH_NONE;
192 	}
193 
194 	return 0;
195 }
196 
197 static int
198 intel_ntb_set_link(struct rte_rawdev *dev, bool up)
199 {
200 	struct ntb_hw *hw = dev->dev_private;
201 	uint32_t ntb_ctrl, reg_off;
202 	void *reg_addr;
203 
204 	reg_off = XEON_NTBCNTL_OFFSET;
205 	reg_addr = hw->hw_addr + reg_off;
206 	ntb_ctrl = rte_read32(reg_addr);
207 
208 	if (up) {
209 		ntb_ctrl &= ~(NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK);
210 		ntb_ctrl |= NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP;
211 		ntb_ctrl |= NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP;
212 	} else {
213 		ntb_ctrl &= ~(NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP);
214 		ntb_ctrl &= ~(NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP);
215 		ntb_ctrl |= NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK;
216 	}
217 
218 	rte_write32(ntb_ctrl, reg_addr);
219 
220 	return 0;
221 }
222 
223 static uint32_t
224 intel_ntb_spad_read(struct rte_rawdev *dev, int spad, bool peer)
225 {
226 	struct ntb_hw *hw = dev->dev_private;
227 	uint32_t spad_v, reg_off;
228 	void *reg_addr;
229 
230 	if (spad < 0 || spad >= hw->spad_cnt) {
231 		NTB_LOG(ERR, "Invalid spad reg index.");
232 		return 0;
233 	}
234 
235 	/* When peer is true, read peer spad reg */
236 	reg_off = peer ? XEON_B2B_SPAD_OFFSET : XEON_IM_SPAD_OFFSET;
237 	reg_addr = hw->hw_addr + reg_off + (spad << 2);
238 	spad_v = rte_read32(reg_addr);
239 
240 	return spad_v;
241 }
242 
243 static int
244 intel_ntb_spad_write(struct rte_rawdev *dev, int spad,
245 		     bool peer, uint32_t spad_v)
246 {
247 	struct ntb_hw *hw = dev->dev_private;
248 	uint32_t reg_off;
249 	void *reg_addr;
250 
251 	if (spad < 0 || spad >= hw->spad_cnt) {
252 		NTB_LOG(ERR, "Invalid spad reg index.");
253 		return -EINVAL;
254 	}
255 
256 	/* When peer is true, write peer spad reg */
257 	reg_off = peer ? XEON_B2B_SPAD_OFFSET : XEON_IM_SPAD_OFFSET;
258 	reg_addr = hw->hw_addr + reg_off + (spad << 2);
259 
260 	rte_write32(spad_v, reg_addr);
261 
262 	return 0;
263 }
264 
265 static uint64_t
266 intel_ntb_db_read(struct rte_rawdev *dev)
267 {
268 	struct ntb_hw *hw = dev->dev_private;
269 	uint64_t db_off, db_bits;
270 	void *db_addr;
271 
272 	db_off = XEON_IM_INT_STATUS_OFFSET;
273 	db_addr = hw->hw_addr + db_off;
274 
275 	db_bits = rte_read64(db_addr);
276 
277 	return db_bits;
278 }
279 
280 static int
281 intel_ntb_db_clear(struct rte_rawdev *dev, uint64_t db_bits)
282 {
283 	struct ntb_hw *hw = dev->dev_private;
284 	uint64_t db_off;
285 	void *db_addr;
286 
287 	db_off = XEON_IM_INT_STATUS_OFFSET;
288 	db_addr = hw->hw_addr + db_off;
289 
290 	rte_write64(db_bits, db_addr);
291 
292 	return 0;
293 }
294 
295 static int
296 intel_ntb_db_set_mask(struct rte_rawdev *dev, uint64_t db_mask)
297 {
298 	struct ntb_hw *hw = dev->dev_private;
299 	uint64_t db_m_off;
300 	void *db_m_addr;
301 
302 	db_m_off = XEON_IM_INT_DISABLE_OFFSET;
303 	db_m_addr = hw->hw_addr + db_m_off;
304 
305 	db_mask |= hw->db_mask;
306 
307 	rte_write64(db_mask, db_m_addr);
308 
309 	hw->db_mask = db_mask;
310 
311 	return 0;
312 }
313 
314 static int
315 intel_ntb_peer_db_set(struct rte_rawdev *dev, uint8_t db_idx)
316 {
317 	struct ntb_hw *hw = dev->dev_private;
318 	uint32_t db_off;
319 	void *db_addr;
320 
321 	if (((uint64_t)1 << db_idx) & ~hw->db_valid_mask) {
322 		NTB_LOG(ERR, "Invalid doorbell.");
323 		return -EINVAL;
324 	}
325 
326 	db_off = XEON_IM_DOORBELL_OFFSET + db_idx * 4;
327 	db_addr = hw->hw_addr + db_off;
328 
329 	rte_write32(1, db_addr);
330 
331 	return 0;
332 }
333 
334 static int
335 intel_ntb_vector_bind(struct rte_rawdev *dev, uint8_t intr, uint8_t msix)
336 {
337 	struct ntb_hw *hw = dev->dev_private;
338 	uint8_t reg_off;
339 	void *reg_addr;
340 
341 	if (intr >= hw->db_cnt) {
342 		NTB_LOG(ERR, "Invalid intr source.");
343 		return -EINVAL;
344 	}
345 
346 	/* Bind intr source to msix vector */
347 	reg_off = XEON_INTVEC_OFFSET;
348 	reg_addr = hw->hw_addr + reg_off + intr;
349 
350 	rte_write8(msix, reg_addr);
351 
352 	return 0;
353 }
354 
355 /* operations for primary side of local ntb */
356 const struct ntb_dev_ops intel_ntb_ops = {
357 	.ntb_dev_init       = intel_ntb_dev_init,
358 	.get_peer_mw_addr   = intel_ntb_get_peer_mw_addr,
359 	.mw_set_trans       = intel_ntb_mw_set_trans,
360 	.get_link_status    = intel_ntb_get_link_status,
361 	.set_link           = intel_ntb_set_link,
362 	.spad_read          = intel_ntb_spad_read,
363 	.spad_write         = intel_ntb_spad_write,
364 	.db_read            = intel_ntb_db_read,
365 	.db_clear           = intel_ntb_db_clear,
366 	.db_set_mask        = intel_ntb_db_set_mask,
367 	.peer_db_set        = intel_ntb_peer_db_set,
368 	.vector_bind        = intel_ntb_vector_bind,
369 };
370