xref: /dpdk/drivers/raw/ntb/ntb_hw_intel.c (revision 68a03efeed657e6e05f281479b33b51102797e15)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation.
3  */
4 #include <stdint.h>
5 #include <stdio.h>
6 #include <errno.h>
7 
8 #include <rte_io.h>
9 #include <rte_eal.h>
10 #include <rte_pci.h>
11 #include <rte_bus_pci.h>
12 #include <rte_rawdev.h>
13 #include <rte_rawdev_pmd.h>
14 
15 #include "ntb.h"
16 #include "ntb_hw_intel.h"
17 
18 enum xeon_ntb_bar {
19 	XEON_NTB_BAR23 = 2,
20 	XEON_NTB_BAR45 = 4,
21 };
22 
23 static enum xeon_ntb_bar intel_ntb_bar[] = {
24 	XEON_NTB_BAR23,
25 	XEON_NTB_BAR45,
26 };
27 
28 static inline int
29 is_gen3_ntb(const struct ntb_hw *hw)
30 {
31 	if (hw->pci_dev->id.device_id == NTB_INTEL_DEV_ID_B2B_SKX)
32 		return 1;
33 
34 	return 0;
35 }
36 
37 static inline int
38 is_gen4_ntb(const struct ntb_hw *hw)
39 {
40 	if (hw->pci_dev->id.device_id == NTB_INTEL_DEV_ID_B2B_ICX)
41 		return 1;
42 
43 	return 0;
44 }
45 
46 static int
47 intel_ntb3_check_ppd(struct ntb_hw *hw)
48 {
49 	uint8_t reg_val;
50 	int ret;
51 
52 	ret = rte_pci_read_config(hw->pci_dev, &reg_val,
53 				  sizeof(reg_val), XEON_PPD_OFFSET);
54 	if (ret < 0) {
55 		NTB_LOG(ERR, "Cannot get NTB PPD (PCIe port definition).");
56 		return -EIO;
57 	}
58 
59 	/* Check connection topo type. Only support B2B. */
60 	switch (reg_val & XEON_PPD_CONN_MASK) {
61 	case XEON_PPD_CONN_B2B:
62 		NTB_LOG(INFO, "Topo B2B (back to back) is using.");
63 		break;
64 	case XEON_PPD_CONN_TRANSPARENT:
65 	case XEON_PPD_CONN_RP:
66 	default:
67 		NTB_LOG(ERR, "Not supported conn topo. Please use B2B.");
68 		return -EINVAL;
69 	}
70 
71 	/* Check device type. */
72 	if (reg_val & XEON_PPD_DEV_DSD) {
73 		NTB_LOG(INFO, "DSD, Downstream Device.");
74 		hw->topo = NTB_TOPO_B2B_DSD;
75 	} else {
76 		NTB_LOG(INFO, "USD, Upstream device.");
77 		hw->topo = NTB_TOPO_B2B_USD;
78 	}
79 
80 	/* Check if bar4 is split. Do not support split bar. */
81 	if (reg_val & XEON_PPD_SPLIT_BAR_MASK) {
82 		NTB_LOG(ERR, "Do not support split bar.");
83 		return -EINVAL;
84 	}
85 
86 	return 0;
87 }
88 
89 static int
90 intel_ntb4_check_ppd(struct ntb_hw *hw)
91 {
92 	uint32_t reg_val;
93 
94 	reg_val = rte_read32(hw->hw_addr + XEON_GEN4_PPD1_OFFSET);
95 
96 	/* Check connection topo type. Only support B2B. */
97 	switch (reg_val & XEON_GEN4_PPD_CONN_MASK) {
98 	case XEON_GEN4_PPD_CONN_B2B:
99 		NTB_LOG(INFO, "Topo B2B (back to back) is using.");
100 		break;
101 	default:
102 		NTB_LOG(ERR, "Not supported conn topo. Please use B2B.");
103 		return -EINVAL;
104 	}
105 
106 	/* Check device type. */
107 	if (reg_val & XEON_GEN4_PPD_DEV_DSD) {
108 		NTB_LOG(INFO, "DSD, Downstream Device.");
109 		hw->topo = NTB_TOPO_B2B_DSD;
110 	} else {
111 		NTB_LOG(INFO, "USD, Upstream device.");
112 		hw->topo = NTB_TOPO_B2B_USD;
113 	}
114 
115 	return 0;
116 }
117 
118 static int
119 intel_ntb_dev_init(const struct rte_rawdev *dev)
120 {
121 	struct ntb_hw *hw = dev->dev_private;
122 	uint8_t bar;
123 	int ret, i;
124 
125 	if (hw == NULL) {
126 		NTB_LOG(ERR, "Invalid device.");
127 		return -EINVAL;
128 	}
129 
130 	hw->hw_addr = (char *)hw->pci_dev->mem_resource[0].addr;
131 
132 	if (is_gen3_ntb(hw))
133 		ret = intel_ntb3_check_ppd(hw);
134 	else if (is_gen4_ntb(hw))
135 		/* PPD is in MMIO but not config space for NTB Gen4 */
136 		ret = intel_ntb4_check_ppd(hw);
137 	else {
138 		NTB_LOG(ERR, "Cannot init device for unsupported device.");
139 		return -ENOTSUP;
140 	}
141 
142 	if (ret)
143 		return ret;
144 
145 	hw->mw_cnt = XEON_MW_COUNT;
146 	hw->db_cnt = XEON_DB_COUNT;
147 	hw->spad_cnt = XEON_SPAD_COUNT;
148 
149 	hw->mw_size = rte_zmalloc("ntb_mw_size",
150 				  hw->mw_cnt * sizeof(uint64_t), 0);
151 	for (i = 0; i < hw->mw_cnt; i++) {
152 		bar = intel_ntb_bar[i];
153 		hw->mw_size[i] = hw->pci_dev->mem_resource[bar].len;
154 	}
155 
156 	/* Reserve the last 2 spad registers for users. */
157 	for (i = 0; i < NTB_SPAD_USER_MAX_NUM; i++)
158 		hw->spad_user_list[i] = hw->spad_cnt;
159 	hw->spad_user_list[0] = hw->spad_cnt - 2;
160 	hw->spad_user_list[1] = hw->spad_cnt - 1;
161 
162 	return 0;
163 }
164 
165 static void *
166 intel_ntb_get_peer_mw_addr(const struct rte_rawdev *dev, int mw_idx)
167 {
168 	struct ntb_hw *hw = dev->dev_private;
169 	uint8_t bar;
170 
171 	if (hw == NULL) {
172 		NTB_LOG(ERR, "Invalid device.");
173 		return 0;
174 	}
175 
176 	if (mw_idx < 0 || mw_idx >= hw->mw_cnt) {
177 		NTB_LOG(ERR, "Invalid memory window index (0 - %u).",
178 			hw->mw_cnt - 1);
179 		return 0;
180 	}
181 
182 	bar = intel_ntb_bar[mw_idx];
183 
184 	return hw->pci_dev->mem_resource[bar].addr;
185 }
186 
187 static int
188 intel_ntb_mw_set_trans(const struct rte_rawdev *dev, int mw_idx,
189 		       uint64_t addr, uint64_t size)
190 {
191 	struct ntb_hw *hw = dev->dev_private;
192 	void *xlat_addr, *limit_addr;
193 	uint64_t xlat_off, limit_off;
194 	uint64_t base, limit;
195 	uint8_t bar;
196 
197 	if (hw == NULL) {
198 		NTB_LOG(ERR, "Invalid device.");
199 		return -EINVAL;
200 	}
201 
202 	if (mw_idx < 0 || mw_idx >= hw->mw_cnt) {
203 		NTB_LOG(ERR, "Invalid memory window index (0 - %u).",
204 			hw->mw_cnt - 1);
205 		return -EINVAL;
206 	}
207 
208 	bar = intel_ntb_bar[mw_idx];
209 
210 	xlat_off = XEON_IMBAR1XBASE_OFFSET + mw_idx * XEON_BAR_INTERVAL_OFFSET;
211 	limit_off = XEON_IMBAR1XLMT_OFFSET + mw_idx * XEON_BAR_INTERVAL_OFFSET;
212 	xlat_addr = hw->hw_addr + xlat_off;
213 	limit_addr = hw->hw_addr + limit_off;
214 
215 	/* Limit reg val should be EMBAR base address plus MW size. */
216 	base = addr;
217 	limit = hw->pci_dev->mem_resource[bar].phys_addr + size;
218 	rte_write64(base, xlat_addr);
219 	rte_write64(limit, limit_addr);
220 
221 	if (is_gen3_ntb(hw)) {
222 		/* Setup the external point so that remote can access. */
223 		xlat_off = XEON_EMBAR1_OFFSET + 8 * mw_idx;
224 		xlat_addr = hw->hw_addr + xlat_off;
225 		limit_off = XEON_EMBAR1XLMT_OFFSET +
226 			    mw_idx * XEON_BAR_INTERVAL_OFFSET;
227 		limit_addr = hw->hw_addr + limit_off;
228 		base = rte_read64(xlat_addr);
229 		base &= ~0xf;
230 		limit = base + size;
231 		rte_write64(limit, limit_addr);
232 	} else if (is_gen4_ntb(hw)) {
233 		/* Set translate base address index register */
234 		xlat_off = XEON_GEN4_IM1XBASEIDX_OFFSET +
235 			   mw_idx * XEON_GEN4_XBASEIDX_INTERVAL;
236 		xlat_addr = hw->hw_addr + xlat_off;
237 		rte_write16(rte_log2_u64(size), xlat_addr);
238 	} else {
239 		NTB_LOG(ERR, "Cannot set translation of memory windows for unsupported device.");
240 		rte_write64(base, limit_addr);
241 		rte_write64(0, xlat_addr);
242 		return -ENOTSUP;
243 	}
244 
245 	return 0;
246 }
247 
248 static void *
249 intel_ntb_ioremap(const struct rte_rawdev *dev, uint64_t addr)
250 {
251 	struct ntb_hw *hw = dev->dev_private;
252 	void *mapped = NULL;
253 	void *base;
254 	int i;
255 
256 	for (i = 0; i < hw->peer_used_mws; i++) {
257 		if (addr >= hw->peer_mw_base[i] &&
258 		    addr <= hw->peer_mw_base[i] + hw->mw_size[i]) {
259 			base = intel_ntb_get_peer_mw_addr(dev, i);
260 			mapped = (void *)(size_t)(addr - hw->peer_mw_base[i] +
261 				 (size_t)base);
262 			break;
263 		}
264 	}
265 
266 	return mapped;
267 }
268 
269 static int
270 intel_ntb_get_link_status(const struct rte_rawdev *dev)
271 {
272 	struct ntb_hw *hw = dev->dev_private;
273 	uint16_t reg_val, reg_off;
274 	int ret;
275 
276 	if (hw == NULL) {
277 		NTB_LOG(ERR, "Invalid device.");
278 		return -EINVAL;
279 	}
280 
281 	if (is_gen3_ntb(hw)) {
282 		reg_off = XEON_GEN3_LINK_STATUS_OFFSET;
283 		ret = rte_pci_read_config(hw->pci_dev, &reg_val,
284 					  sizeof(reg_val), reg_off);
285 		if (ret < 0) {
286 			NTB_LOG(ERR, "Unable to get link status.");
287 			return -EIO;
288 		}
289 	} else if (is_gen4_ntb(hw)) {
290 		reg_off = XEON_GEN4_LINK_STATUS_OFFSET;
291 		reg_val = rte_read16(hw->hw_addr + reg_off);
292 	} else {
293 		NTB_LOG(ERR, "Cannot get link status for unsupported device.");
294 		return -ENOTSUP;
295 	}
296 
297 	hw->link_status = NTB_LNK_STA_ACTIVE(reg_val);
298 
299 	if (hw->link_status) {
300 		hw->link_speed = NTB_LNK_STA_SPEED(reg_val);
301 		hw->link_width = NTB_LNK_STA_WIDTH(reg_val);
302 	} else {
303 		hw->link_speed = NTB_SPEED_NONE;
304 		hw->link_width = NTB_WIDTH_NONE;
305 	}
306 
307 	return 0;
308 }
309 
310 static int
311 intel_ntb_gen3_set_link(const struct ntb_hw *hw, bool up)
312 {
313 	uint32_t ntb_ctrl, reg_off;
314 	void *reg_addr;
315 
316 	reg_off = XEON_NTBCNTL_OFFSET;
317 	reg_addr = hw->hw_addr + reg_off;
318 	ntb_ctrl = rte_read32(reg_addr);
319 
320 	if (up) {
321 		ntb_ctrl &= ~(NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK);
322 		ntb_ctrl |= NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP;
323 		ntb_ctrl |= NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP;
324 	} else {
325 		ntb_ctrl &= ~(NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP);
326 		ntb_ctrl &= ~(NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP);
327 		ntb_ctrl |= NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK;
328 	}
329 
330 	rte_write32(ntb_ctrl, reg_addr);
331 
332 	return 0;
333 }
334 
335 static int
336 intel_ntb_gen4_set_link(const struct ntb_hw *hw, bool up)
337 {
338 	uint32_t ntb_ctrl, ppd0;
339 	uint16_t link_ctrl;
340 	void *reg_addr;
341 
342 	if (up) {
343 		reg_addr = hw->hw_addr + XEON_NTBCNTL_OFFSET;
344 		ntb_ctrl = NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP;
345 		ntb_ctrl |= NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP;
346 		rte_write32(ntb_ctrl, reg_addr);
347 
348 		reg_addr = hw->hw_addr + XEON_GEN4_LINK_CTRL_OFFSET;
349 		link_ctrl = rte_read16(reg_addr);
350 		link_ctrl &= ~XEON_GEN4_LINK_CTRL_LINK_DIS;
351 		rte_write16(link_ctrl, reg_addr);
352 
353 		/* start link training */
354 		reg_addr = hw->hw_addr + XEON_GEN4_PPD0_OFFSET;
355 		ppd0 = rte_read32(reg_addr);
356 		ppd0 |= XEON_GEN4_PPD_LINKTRN;
357 		rte_write32(ppd0, reg_addr);
358 
359 		/* make sure link training has started */
360 		ppd0 = rte_read32(reg_addr);
361 		if (!(ppd0 & XEON_GEN4_PPD_LINKTRN)) {
362 			NTB_LOG(ERR, "Link is not training.");
363 			return -EINVAL;
364 		}
365 	} else {
366 		reg_addr = hw->hw_addr + XEON_NTBCNTL_OFFSET;
367 		ntb_ctrl = rte_read32(reg_addr);
368 		ntb_ctrl &= ~(NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP);
369 		ntb_ctrl &= ~(NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP);
370 		rte_write32(ntb_ctrl, reg_addr);
371 
372 		reg_addr = hw->hw_addr + XEON_GEN4_LINK_CTRL_OFFSET;
373 		link_ctrl = rte_read16(reg_addr);
374 		link_ctrl |= XEON_GEN4_LINK_CTRL_LINK_DIS;
375 		rte_write16(link_ctrl, reg_addr);
376 	}
377 
378 	return 0;
379 }
380 
381 static int
382 intel_ntb_set_link(const struct rte_rawdev *dev, bool up)
383 {
384 	struct ntb_hw *hw = dev->dev_private;
385 	int ret = 0;
386 
387 	if (is_gen3_ntb(hw))
388 		ret = intel_ntb_gen3_set_link(hw, up);
389 	else if (is_gen4_ntb(hw))
390 		ret = intel_ntb_gen4_set_link(hw, up);
391 	else {
392 		NTB_LOG(ERR, "Cannot set link for unsupported device.");
393 		ret = -ENOTSUP;
394 	}
395 
396 	return ret;
397 }
398 
399 static uint32_t
400 intel_ntb_spad_read(const struct rte_rawdev *dev, int spad, bool peer)
401 {
402 	struct ntb_hw *hw = dev->dev_private;
403 	uint32_t spad_v, reg_off;
404 	void *reg_addr;
405 
406 	if (spad < 0 || spad >= hw->spad_cnt) {
407 		NTB_LOG(ERR, "Invalid spad reg index.");
408 		return 0;
409 	}
410 
411 	/* When peer is true, read peer spad reg */
412 	if (is_gen3_ntb(hw))
413 		reg_off = peer ? XEON_GEN3_B2B_SPAD_OFFSET :
414 				XEON_IM_SPAD_OFFSET;
415 	else if (is_gen4_ntb(hw))
416 		reg_off = peer ? XEON_GEN4_B2B_SPAD_OFFSET :
417 				XEON_IM_SPAD_OFFSET;
418 	else {
419 		NTB_LOG(ERR, "Cannot read spad for unsupported device.");
420 		return -ENOTSUP;
421 	}
422 	reg_addr = hw->hw_addr + reg_off + (spad << 2);
423 	spad_v = rte_read32(reg_addr);
424 
425 	return spad_v;
426 }
427 
428 static int
429 intel_ntb_spad_write(const struct rte_rawdev *dev, int spad,
430 		     bool peer, uint32_t spad_v)
431 {
432 	struct ntb_hw *hw = dev->dev_private;
433 	uint32_t reg_off;
434 	void *reg_addr;
435 
436 	if (spad < 0 || spad >= hw->spad_cnt) {
437 		NTB_LOG(ERR, "Invalid spad reg index.");
438 		return -EINVAL;
439 	}
440 
441 	/* When peer is true, write peer spad reg */
442 	if (is_gen3_ntb(hw))
443 		reg_off = peer ? XEON_GEN3_B2B_SPAD_OFFSET :
444 				XEON_IM_SPAD_OFFSET;
445 	else if (is_gen4_ntb(hw))
446 		reg_off = peer ? XEON_GEN4_B2B_SPAD_OFFSET :
447 				XEON_IM_SPAD_OFFSET;
448 	else {
449 		NTB_LOG(ERR, "Cannot write spad for unsupported device.");
450 		return -ENOTSUP;
451 	}
452 	reg_addr = hw->hw_addr + reg_off + (spad << 2);
453 
454 	rte_write32(spad_v, reg_addr);
455 
456 	return 0;
457 }
458 
459 static uint64_t
460 intel_ntb_db_read(const struct rte_rawdev *dev)
461 {
462 	struct ntb_hw *hw = dev->dev_private;
463 	uint64_t db_off, db_bits;
464 	void *db_addr;
465 
466 	db_off = XEON_IM_INT_STATUS_OFFSET;
467 	db_addr = hw->hw_addr + db_off;
468 
469 	db_bits = rte_read64(db_addr);
470 
471 	return db_bits;
472 }
473 
474 static int
475 intel_ntb_db_clear(const struct rte_rawdev *dev, uint64_t db_bits)
476 {
477 	struct ntb_hw *hw = dev->dev_private;
478 	uint64_t db_off;
479 	void *db_addr;
480 
481 	db_off = XEON_IM_INT_STATUS_OFFSET;
482 	db_addr = hw->hw_addr + db_off;
483 
484 	if (is_gen4_ntb(hw))
485 		rte_write16(XEON_GEN4_SLOTSTS_DLLSCS,
486 			    hw->hw_addr + XEON_GEN4_SLOTSTS);
487 	rte_write64(db_bits, db_addr);
488 
489 	return 0;
490 }
491 
492 static int
493 intel_ntb_db_set_mask(const struct rte_rawdev *dev, uint64_t db_mask)
494 {
495 	struct ntb_hw *hw = dev->dev_private;
496 	uint64_t db_m_off;
497 	void *db_m_addr;
498 
499 	db_m_off = XEON_IM_INT_DISABLE_OFFSET;
500 	db_m_addr = hw->hw_addr + db_m_off;
501 
502 	db_mask |= hw->db_mask;
503 
504 	rte_write64(db_mask, db_m_addr);
505 
506 	hw->db_mask = db_mask;
507 
508 	return 0;
509 }
510 
511 static int
512 intel_ntb_peer_db_set(const struct rte_rawdev *dev, uint8_t db_idx)
513 {
514 	struct ntb_hw *hw = dev->dev_private;
515 	uint32_t db_off;
516 	void *db_addr;
517 
518 	if (((uint64_t)1 << db_idx) & ~hw->db_valid_mask) {
519 		NTB_LOG(ERR, "Invalid doorbell.");
520 		return -EINVAL;
521 	}
522 
523 	db_off = XEON_IM_DOORBELL_OFFSET + db_idx * 4;
524 	db_addr = hw->hw_addr + db_off;
525 
526 	rte_write32(1, db_addr);
527 
528 	return 0;
529 }
530 
531 static int
532 intel_ntb_vector_bind(const struct rte_rawdev *dev, uint8_t intr, uint8_t msix)
533 {
534 	struct ntb_hw *hw = dev->dev_private;
535 	uint8_t reg_off;
536 	void *reg_addr;
537 
538 	if (intr >= hw->db_cnt) {
539 		NTB_LOG(ERR, "Invalid intr source.");
540 		return -EINVAL;
541 	}
542 
543 	/* Bind intr source to msix vector */
544 	if (is_gen3_ntb(hw))
545 		reg_off = XEON_GEN3_INTVEC_OFFSET;
546 	else if (is_gen4_ntb(hw))
547 		reg_off = XEON_GEN4_INTVEC_OFFSET;
548 	else {
549 		NTB_LOG(ERR, "Cannot bind vectors for unsupported device.");
550 		return -ENOTSUP;
551 	}
552 	reg_addr = hw->hw_addr + reg_off + intr;
553 
554 	rte_write8(msix, reg_addr);
555 
556 	return 0;
557 }
558 
559 /* operations for primary side of local ntb */
560 const struct ntb_dev_ops intel_ntb_ops = {
561 	.ntb_dev_init       = intel_ntb_dev_init,
562 	.get_peer_mw_addr   = intel_ntb_get_peer_mw_addr,
563 	.mw_set_trans       = intel_ntb_mw_set_trans,
564 	.ioremap            = intel_ntb_ioremap,
565 	.get_link_status    = intel_ntb_get_link_status,
566 	.set_link           = intel_ntb_set_link,
567 	.spad_read          = intel_ntb_spad_read,
568 	.spad_write         = intel_ntb_spad_write,
569 	.db_read            = intel_ntb_db_read,
570 	.db_clear           = intel_ntb_db_clear,
571 	.db_set_mask        = intel_ntb_db_set_mask,
572 	.peer_db_set        = intel_ntb_peer_db_set,
573 	.vector_bind        = intel_ntb_vector_bind,
574 };
575