xref: /dpdk/drivers/raw/ntb/ntb_hw_intel.c (revision 1f37cb2bb46b1fd403faa7c3bf8884e6a4dfde66)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation.
3  */
4 #include <stdint.h>
5 #include <stdio.h>
6 #include <errno.h>
7 
8 #include <rte_io.h>
9 #include <rte_eal.h>
10 #include <rte_pci.h>
11 #include <bus_pci_driver.h>
12 #include <rte_rawdev.h>
13 #include <rte_rawdev_pmd.h>
14 
15 #include "ntb.h"
16 #include "ntb_hw_intel.h"
17 
18 enum xeon_ntb_bar {
19 	XEON_NTB_BAR23 = 2,
20 	XEON_NTB_BAR45 = 4,
21 };
22 
23 static enum xeon_ntb_bar intel_ntb_bar[] = {
24 	XEON_NTB_BAR23,
25 	XEON_NTB_BAR45,
26 };
27 
28 static inline int
is_gen3_ntb(const struct ntb_hw * hw)29 is_gen3_ntb(const struct ntb_hw *hw)
30 {
31 	if (hw->pci_dev->id.device_id == NTB_INTEL_DEV_ID_B2B_SKX)
32 		return 1;
33 
34 	return 0;
35 }
36 
37 static inline int
is_gen4_ntb(const struct ntb_hw * hw)38 is_gen4_ntb(const struct ntb_hw *hw)
39 {
40 	if (hw->pci_dev->id.device_id == NTB_INTEL_DEV_ID_B2B_ICX ||
41 	    hw->pci_dev->id.device_id == NTB_INTEL_DEV_ID_B2B_SPR)
42 		return 1;
43 
44 	return 0;
45 }
46 
47 static int
intel_ntb3_check_ppd(struct ntb_hw * hw)48 intel_ntb3_check_ppd(struct ntb_hw *hw)
49 {
50 	uint8_t reg_val;
51 	int ret;
52 
53 	ret = rte_pci_read_config(hw->pci_dev, &reg_val,
54 				  sizeof(reg_val), XEON_PPD_OFFSET);
55 	if (ret < 0) {
56 		NTB_LOG(ERR, "Cannot get NTB PPD (PCIe port definition).");
57 		return -EIO;
58 	}
59 
60 	/* Check connection topo type. Only support B2B. */
61 	switch (reg_val & XEON_PPD_CONN_MASK) {
62 	case XEON_PPD_CONN_B2B:
63 		NTB_LOG(INFO, "Topo B2B (back to back) is using.");
64 		break;
65 	case XEON_PPD_CONN_TRANSPARENT:
66 	case XEON_PPD_CONN_RP:
67 	default:
68 		NTB_LOG(ERR, "Not supported conn topo. Please use B2B.");
69 		return -EINVAL;
70 	}
71 
72 	/* Check device type. */
73 	if (reg_val & XEON_PPD_DEV_DSD) {
74 		NTB_LOG(INFO, "DSD, Downstream Device.");
75 		hw->topo = NTB_TOPO_B2B_DSD;
76 	} else {
77 		NTB_LOG(INFO, "USD, Upstream device.");
78 		hw->topo = NTB_TOPO_B2B_USD;
79 	}
80 
81 	/* Check if bar4 is split. Do not support split bar. */
82 	if (reg_val & XEON_PPD_SPLIT_BAR_MASK) {
83 		NTB_LOG(ERR, "Do not support split bar.");
84 		return -EINVAL;
85 	}
86 
87 	return 0;
88 }
89 
90 static int
intel_ntb4_check_ppd_for_ICX(struct ntb_hw * hw,uint32_t reg_val)91 intel_ntb4_check_ppd_for_ICX(struct ntb_hw *hw, uint32_t reg_val)
92 {
93 	/* Check connection topo type. Only support B2B. */
94 	switch (reg_val & XEON_GEN4_PPD_CONN_MASK) {
95 	case XEON_GEN4_PPD_CONN_B2B:
96 		NTB_LOG(INFO, "Topo B2B (back to back) is using.");
97 		break;
98 	default:
99 		NTB_LOG(ERR, "Not supported conn topo. Please use B2B.");
100 		return -EINVAL;
101 	}
102 
103 	/* Check device type. */
104 	if (reg_val & XEON_GEN4_PPD_DEV_DSD) {
105 		NTB_LOG(INFO, "DSD, Downstream Device.");
106 		hw->topo = NTB_TOPO_B2B_DSD;
107 	} else {
108 		NTB_LOG(INFO, "USD, Upstream device.");
109 		hw->topo = NTB_TOPO_B2B_USD;
110 	}
111 
112 	return 0;
113 }
114 
115 static int
intel_ntb4_check_ppd_for_SPR(struct ntb_hw * hw,uint32_t reg_val)116 intel_ntb4_check_ppd_for_SPR(struct ntb_hw *hw, uint32_t reg_val)
117 {
118 	/* Check connection topo type. Only support B2B. */
119 	switch (reg_val & XEON_SPR_PPD_CONN_MASK) {
120 	case XEON_SPR_PPD_CONN_B2B:
121 		NTB_LOG(INFO, "Topo B2B (back to back) is using.");
122 		break;
123 	default:
124 		NTB_LOG(ERR, "Not supported conn topo. Please use B2B.");
125 		return -EINVAL;
126 	}
127 
128 	/* Check device type. */
129 	if (reg_val & XEON_SPR_PPD_DEV_DSD) {
130 		NTB_LOG(INFO, "DSD, Downstream Device.");
131 		hw->topo = NTB_TOPO_B2B_DSD;
132 	} else {
133 		NTB_LOG(INFO, "USD, Upstream device.");
134 		hw->topo = NTB_TOPO_B2B_USD;
135 	}
136 
137 	return 0;
138 }
139 
140 static int
intel_ntb4_check_ppd(struct ntb_hw * hw)141 intel_ntb4_check_ppd(struct ntb_hw *hw)
142 {
143 	uint8_t revision_id;
144 	uint32_t reg_val;
145 	int ret;
146 
147 	ret = rte_pci_read_config(hw->pci_dev, &revision_id,
148 				  NTB_PCI_DEV_REVISION_ID_LEN,
149 				  NTB_PCI_DEV_REVISION_ID_REG);
150 	if (ret != NTB_PCI_DEV_REVISION_ID_LEN) {
151 		NTB_LOG(ERR, "Cannot get NTB PCI Device Revision ID.");
152 		return -EIO;
153 	}
154 
155 	reg_val = rte_read32(hw->hw_addr + XEON_GEN4_PPD1_OFFSET);
156 
157 	/* Distinguish HW platform (ICX/SPR) via PCI Revision ID */
158 	if (revision_id > NTB_PCI_DEV_REVISION_ICX_MAX)
159 		ret = intel_ntb4_check_ppd_for_SPR(hw, reg_val);
160 	else if (revision_id >= NTB_PCI_DEV_REVISION_ICX_MIN)
161 		ret = intel_ntb4_check_ppd_for_ICX(hw, reg_val);
162 	else {
163 		NTB_LOG(ERR, "Invalid NTB PCI Device Revision ID.");
164 		return -EIO;
165 	}
166 
167 	return ret;
168 }
169 
170 static int
intel_ntb_dev_init(const struct rte_rawdev * dev)171 intel_ntb_dev_init(const struct rte_rawdev *dev)
172 {
173 	struct ntb_hw *hw = dev->dev_private;
174 	uint8_t bar;
175 	int ret, i;
176 
177 	if (hw == NULL) {
178 		NTB_LOG(ERR, "Invalid device.");
179 		return -EINVAL;
180 	}
181 
182 	hw->hw_addr = (char *)hw->pci_dev->mem_resource[0].addr;
183 
184 	if (is_gen3_ntb(hw))
185 		ret = intel_ntb3_check_ppd(hw);
186 	else if (is_gen4_ntb(hw))
187 		/* PPD is in MMIO but not config space for NTB Gen4 */
188 		ret = intel_ntb4_check_ppd(hw);
189 	else {
190 		NTB_LOG(ERR, "Cannot init device for unsupported device.");
191 		return -ENOTSUP;
192 	}
193 
194 	if (ret)
195 		return ret;
196 
197 	hw->mw_cnt = XEON_MW_COUNT;
198 	hw->db_cnt = XEON_DB_COUNT;
199 	hw->spad_cnt = XEON_SPAD_COUNT;
200 
201 	hw->mw_size = rte_zmalloc("ntb_mw_size",
202 				  hw->mw_cnt * sizeof(uint64_t), 0);
203 	if (hw->mw_size == NULL) {
204 		NTB_LOG(ERR, "Cannot allocate memory for mw size.");
205 		return -ENOMEM;
206 	}
207 
208 	for (i = 0; i < hw->mw_cnt; i++) {
209 		bar = intel_ntb_bar[i];
210 		hw->mw_size[i] = hw->pci_dev->mem_resource[bar].len;
211 	}
212 
213 	/* Reserve the last 2 spad registers for users. */
214 	for (i = 0; i < NTB_SPAD_USER_MAX_NUM; i++)
215 		hw->spad_user_list[i] = hw->spad_cnt;
216 	hw->spad_user_list[0] = hw->spad_cnt - 2;
217 	hw->spad_user_list[1] = hw->spad_cnt - 1;
218 
219 	return 0;
220 }
221 
222 static void *
intel_ntb_get_peer_mw_addr(const struct rte_rawdev * dev,int mw_idx)223 intel_ntb_get_peer_mw_addr(const struct rte_rawdev *dev, int mw_idx)
224 {
225 	struct ntb_hw *hw = dev->dev_private;
226 	uint8_t bar;
227 
228 	if (hw == NULL) {
229 		NTB_LOG(ERR, "Invalid device.");
230 		return 0;
231 	}
232 
233 	if (mw_idx < 0 || mw_idx >= hw->mw_cnt) {
234 		NTB_LOG(ERR, "Invalid memory window index (0 - %u).",
235 			hw->mw_cnt - 1);
236 		return 0;
237 	}
238 
239 	bar = intel_ntb_bar[mw_idx];
240 
241 	return hw->pci_dev->mem_resource[bar].addr;
242 }
243 
244 static int
intel_ntb_mw_set_trans(const struct rte_rawdev * dev,int mw_idx,uint64_t addr,uint64_t size)245 intel_ntb_mw_set_trans(const struct rte_rawdev *dev, int mw_idx,
246 		       uint64_t addr, uint64_t size)
247 {
248 	struct ntb_hw *hw = dev->dev_private;
249 	void *xlat_addr, *limit_addr;
250 	uint64_t xlat_off, limit_off;
251 	uint64_t base, limit;
252 	uint8_t bar;
253 
254 	if (hw == NULL) {
255 		NTB_LOG(ERR, "Invalid device.");
256 		return -EINVAL;
257 	}
258 
259 	if (mw_idx < 0 || mw_idx >= hw->mw_cnt) {
260 		NTB_LOG(ERR, "Invalid memory window index (0 - %u).",
261 			hw->mw_cnt - 1);
262 		return -EINVAL;
263 	}
264 
265 	bar = intel_ntb_bar[mw_idx];
266 
267 	xlat_off = XEON_IMBAR1XBASE_OFFSET + mw_idx * XEON_BAR_INTERVAL_OFFSET;
268 	limit_off = XEON_IMBAR1XLMT_OFFSET + mw_idx * XEON_BAR_INTERVAL_OFFSET;
269 	xlat_addr = hw->hw_addr + xlat_off;
270 	limit_addr = hw->hw_addr + limit_off;
271 
272 	/* Limit reg val should be EMBAR base address plus MW size. */
273 	base = addr;
274 	limit = hw->pci_dev->mem_resource[bar].phys_addr + size;
275 	rte_write64(base, xlat_addr);
276 	rte_write64(limit, limit_addr);
277 
278 	if (is_gen3_ntb(hw)) {
279 		/* Setup the external point so that remote can access. */
280 		xlat_off = XEON_EMBAR1_OFFSET + 8 * mw_idx;
281 		xlat_addr = hw->hw_addr + xlat_off;
282 		limit_off = XEON_EMBAR1XLMT_OFFSET +
283 			    mw_idx * XEON_BAR_INTERVAL_OFFSET;
284 		limit_addr = hw->hw_addr + limit_off;
285 		base = rte_read64(xlat_addr);
286 		base &= ~0xf;
287 		limit = base + size;
288 		rte_write64(limit, limit_addr);
289 	} else if (is_gen4_ntb(hw)) {
290 		/* Set translate base address index register */
291 		xlat_off = XEON_GEN4_IM1XBASEIDX_OFFSET +
292 			   mw_idx * XEON_GEN4_XBASEIDX_INTERVAL;
293 		xlat_addr = hw->hw_addr + xlat_off;
294 		rte_write16(rte_log2_u64(size), xlat_addr);
295 	} else {
296 		NTB_LOG(ERR, "Cannot set translation of memory windows for unsupported device.");
297 		rte_write64(base, limit_addr);
298 		rte_write64(0, xlat_addr);
299 		return -ENOTSUP;
300 	}
301 
302 	return 0;
303 }
304 
305 static void *
intel_ntb_ioremap(const struct rte_rawdev * dev,uint64_t addr)306 intel_ntb_ioremap(const struct rte_rawdev *dev, uint64_t addr)
307 {
308 	struct ntb_hw *hw = dev->dev_private;
309 	void *mapped = NULL;
310 	void *base;
311 	int i;
312 
313 	for (i = 0; i < hw->peer_used_mws; i++) {
314 		if (addr >= hw->peer_mw_base[i] &&
315 		    addr <= hw->peer_mw_base[i] + hw->mw_size[i]) {
316 			base = intel_ntb_get_peer_mw_addr(dev, i);
317 			mapped = (void *)(size_t)(addr - hw->peer_mw_base[i] +
318 				 (size_t)base);
319 			break;
320 		}
321 	}
322 
323 	return mapped;
324 }
325 
326 static int
intel_ntb_get_link_status(const struct rte_rawdev * dev)327 intel_ntb_get_link_status(const struct rte_rawdev *dev)
328 {
329 	struct ntb_hw *hw = dev->dev_private;
330 	uint16_t reg_val, reg_off;
331 	int ret;
332 
333 	if (hw == NULL) {
334 		NTB_LOG(ERR, "Invalid device.");
335 		return -EINVAL;
336 	}
337 
338 	if (is_gen3_ntb(hw)) {
339 		reg_off = XEON_GEN3_LINK_STATUS_OFFSET;
340 		ret = rte_pci_read_config(hw->pci_dev, &reg_val,
341 					  sizeof(reg_val), reg_off);
342 		if (ret < 0) {
343 			NTB_LOG(ERR, "Unable to get link status.");
344 			return -EIO;
345 		}
346 	} else if (is_gen4_ntb(hw)) {
347 		reg_off = XEON_GEN4_LINK_STATUS_OFFSET;
348 		reg_val = rte_read16(hw->hw_addr + reg_off);
349 	} else {
350 		NTB_LOG(ERR, "Cannot get link status for unsupported device.");
351 		return -ENOTSUP;
352 	}
353 
354 	hw->link_status = NTB_LNK_STA_ACTIVE(reg_val);
355 
356 	if (hw->link_status) {
357 		hw->link_speed = NTB_LNK_STA_SPEED(reg_val);
358 		hw->link_width = NTB_LNK_STA_WIDTH(reg_val);
359 	} else {
360 		hw->link_speed = NTB_SPEED_NONE;
361 		hw->link_width = NTB_WIDTH_NONE;
362 	}
363 
364 	return 0;
365 }
366 
367 static int
intel_ntb_gen3_set_link(const struct ntb_hw * hw,bool up)368 intel_ntb_gen3_set_link(const struct ntb_hw *hw, bool up)
369 {
370 	uint32_t ntb_ctrl, reg_off;
371 	void *reg_addr;
372 
373 	reg_off = XEON_NTBCNTL_OFFSET;
374 	reg_addr = hw->hw_addr + reg_off;
375 	ntb_ctrl = rte_read32(reg_addr);
376 
377 	if (up) {
378 		ntb_ctrl &= ~(NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK);
379 		ntb_ctrl |= NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP;
380 		ntb_ctrl |= NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP;
381 	} else {
382 		ntb_ctrl &= ~(NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP);
383 		ntb_ctrl &= ~(NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP);
384 		ntb_ctrl |= NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK;
385 	}
386 
387 	rte_write32(ntb_ctrl, reg_addr);
388 
389 	return 0;
390 }
391 
392 static int
intel_ntb_gen4_set_link(const struct ntb_hw * hw,bool up)393 intel_ntb_gen4_set_link(const struct ntb_hw *hw, bool up)
394 {
395 	uint32_t ntb_ctrl, ppd0;
396 	uint16_t link_ctrl;
397 	void *reg_addr;
398 
399 	if (up) {
400 		reg_addr = hw->hw_addr + XEON_NTBCNTL_OFFSET;
401 		ntb_ctrl = NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP;
402 		ntb_ctrl |= NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP;
403 		rte_write32(ntb_ctrl, reg_addr);
404 
405 		reg_addr = hw->hw_addr + XEON_GEN4_LINK_CTRL_OFFSET;
406 		link_ctrl = rte_read16(reg_addr);
407 		link_ctrl &= ~XEON_GEN4_LINK_CTRL_LINK_DIS;
408 		rte_write16(link_ctrl, reg_addr);
409 
410 		/* start link training */
411 		reg_addr = hw->hw_addr + XEON_GEN4_PPD0_OFFSET;
412 		ppd0 = rte_read32(reg_addr);
413 		ppd0 |= XEON_GEN4_PPD_LINKTRN;
414 		rte_write32(ppd0, reg_addr);
415 
416 		/* make sure link training has started */
417 		ppd0 = rte_read32(reg_addr);
418 		if (!(ppd0 & XEON_GEN4_PPD_LINKTRN)) {
419 			NTB_LOG(ERR, "Link is not training.");
420 			return -EINVAL;
421 		}
422 	} else {
423 		reg_addr = hw->hw_addr + XEON_NTBCNTL_OFFSET;
424 		ntb_ctrl = rte_read32(reg_addr);
425 		ntb_ctrl &= ~(NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP);
426 		ntb_ctrl &= ~(NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP);
427 		rte_write32(ntb_ctrl, reg_addr);
428 
429 		reg_addr = hw->hw_addr + XEON_GEN4_LINK_CTRL_OFFSET;
430 		link_ctrl = rte_read16(reg_addr);
431 		link_ctrl |= XEON_GEN4_LINK_CTRL_LINK_DIS;
432 		rte_write16(link_ctrl, reg_addr);
433 	}
434 
435 	return 0;
436 }
437 
438 static int
intel_ntb_set_link(const struct rte_rawdev * dev,bool up)439 intel_ntb_set_link(const struct rte_rawdev *dev, bool up)
440 {
441 	struct ntb_hw *hw = dev->dev_private;
442 	int ret = 0;
443 
444 	if (is_gen3_ntb(hw))
445 		ret = intel_ntb_gen3_set_link(hw, up);
446 	else if (is_gen4_ntb(hw))
447 		ret = intel_ntb_gen4_set_link(hw, up);
448 	else {
449 		NTB_LOG(ERR, "Cannot set link for unsupported device.");
450 		ret = -ENOTSUP;
451 	}
452 
453 	return ret;
454 }
455 
456 static uint32_t
intel_ntb_spad_read(const struct rte_rawdev * dev,int spad,bool peer)457 intel_ntb_spad_read(const struct rte_rawdev *dev, int spad, bool peer)
458 {
459 	struct ntb_hw *hw = dev->dev_private;
460 	uint32_t spad_v, reg_off;
461 	void *reg_addr;
462 
463 	if (spad < 0 || spad >= hw->spad_cnt) {
464 		NTB_LOG(ERR, "Invalid spad reg index.");
465 		return 0;
466 	}
467 
468 	/* When peer is true, read peer spad reg */
469 	if (is_gen3_ntb(hw))
470 		reg_off = peer ? XEON_GEN3_B2B_SPAD_OFFSET :
471 				XEON_IM_SPAD_OFFSET;
472 	else if (is_gen4_ntb(hw))
473 		reg_off = peer ? XEON_GEN4_B2B_SPAD_OFFSET :
474 				XEON_IM_SPAD_OFFSET;
475 	else {
476 		NTB_LOG(ERR, "Cannot read spad for unsupported device.");
477 		return -ENOTSUP;
478 	}
479 	reg_addr = hw->hw_addr + reg_off + (spad << 2);
480 	spad_v = rte_read32(reg_addr);
481 
482 	return spad_v;
483 }
484 
485 static int
intel_ntb_spad_write(const struct rte_rawdev * dev,int spad,bool peer,uint32_t spad_v)486 intel_ntb_spad_write(const struct rte_rawdev *dev, int spad,
487 		     bool peer, uint32_t spad_v)
488 {
489 	struct ntb_hw *hw = dev->dev_private;
490 	uint32_t reg_off;
491 	void *reg_addr;
492 
493 	if (spad < 0 || spad >= hw->spad_cnt) {
494 		NTB_LOG(ERR, "Invalid spad reg index.");
495 		return -EINVAL;
496 	}
497 
498 	/* When peer is true, write peer spad reg */
499 	if (is_gen3_ntb(hw))
500 		reg_off = peer ? XEON_GEN3_B2B_SPAD_OFFSET :
501 				XEON_IM_SPAD_OFFSET;
502 	else if (is_gen4_ntb(hw))
503 		reg_off = peer ? XEON_GEN4_B2B_SPAD_OFFSET :
504 				XEON_IM_SPAD_OFFSET;
505 	else {
506 		NTB_LOG(ERR, "Cannot write spad for unsupported device.");
507 		return -ENOTSUP;
508 	}
509 	reg_addr = hw->hw_addr + reg_off + (spad << 2);
510 
511 	rte_write32(spad_v, reg_addr);
512 
513 	return 0;
514 }
515 
516 static uint64_t
intel_ntb_db_read(const struct rte_rawdev * dev)517 intel_ntb_db_read(const struct rte_rawdev *dev)
518 {
519 	struct ntb_hw *hw = dev->dev_private;
520 	uint64_t db_off, db_bits;
521 	void *db_addr;
522 
523 	db_off = XEON_IM_INT_STATUS_OFFSET;
524 	db_addr = hw->hw_addr + db_off;
525 
526 	db_bits = rte_read64(db_addr);
527 
528 	return db_bits;
529 }
530 
531 static int
intel_ntb_db_clear(const struct rte_rawdev * dev,uint64_t db_bits)532 intel_ntb_db_clear(const struct rte_rawdev *dev, uint64_t db_bits)
533 {
534 	struct ntb_hw *hw = dev->dev_private;
535 	uint64_t db_off;
536 	void *db_addr;
537 
538 	db_off = XEON_IM_INT_STATUS_OFFSET;
539 	db_addr = hw->hw_addr + db_off;
540 
541 	if (is_gen4_ntb(hw))
542 		rte_write16(XEON_GEN4_SLOTSTS_DLLSCS,
543 			    hw->hw_addr + XEON_GEN4_SLOTSTS);
544 	rte_write64(db_bits, db_addr);
545 
546 	return 0;
547 }
548 
549 static int
intel_ntb_db_set_mask(const struct rte_rawdev * dev,uint64_t db_mask)550 intel_ntb_db_set_mask(const struct rte_rawdev *dev, uint64_t db_mask)
551 {
552 	struct ntb_hw *hw = dev->dev_private;
553 	uint64_t db_m_off;
554 	void *db_m_addr;
555 
556 	db_m_off = XEON_IM_INT_DISABLE_OFFSET;
557 	db_m_addr = hw->hw_addr + db_m_off;
558 
559 	db_mask |= hw->db_mask;
560 
561 	rte_write64(db_mask, db_m_addr);
562 
563 	hw->db_mask = db_mask;
564 
565 	return 0;
566 }
567 
568 static int
intel_ntb_peer_db_set(const struct rte_rawdev * dev,uint8_t db_idx)569 intel_ntb_peer_db_set(const struct rte_rawdev *dev, uint8_t db_idx)
570 {
571 	struct ntb_hw *hw = dev->dev_private;
572 	uint32_t db_off;
573 	void *db_addr;
574 
575 	if (((uint64_t)1 << db_idx) & ~hw->db_valid_mask) {
576 		NTB_LOG(ERR, "Invalid doorbell.");
577 		return -EINVAL;
578 	}
579 
580 	db_off = XEON_IM_DOORBELL_OFFSET + db_idx * 4;
581 	db_addr = hw->hw_addr + db_off;
582 
583 	rte_write32(1, db_addr);
584 
585 	return 0;
586 }
587 
588 static int
intel_ntb_vector_bind(const struct rte_rawdev * dev,uint8_t intr,uint8_t msix)589 intel_ntb_vector_bind(const struct rte_rawdev *dev, uint8_t intr, uint8_t msix)
590 {
591 	struct ntb_hw *hw = dev->dev_private;
592 	uint8_t reg_off;
593 	void *reg_addr;
594 
595 	if (intr >= hw->db_cnt) {
596 		NTB_LOG(ERR, "Invalid intr source.");
597 		return -EINVAL;
598 	}
599 
600 	/* Bind intr source to msix vector */
601 	if (is_gen3_ntb(hw))
602 		reg_off = XEON_GEN3_INTVEC_OFFSET;
603 	else if (is_gen4_ntb(hw))
604 		reg_off = XEON_GEN4_INTVEC_OFFSET;
605 	else {
606 		NTB_LOG(ERR, "Cannot bind vectors for unsupported device.");
607 		return -ENOTSUP;
608 	}
609 	reg_addr = hw->hw_addr + reg_off + intr;
610 
611 	rte_write8(msix, reg_addr);
612 
613 	return 0;
614 }
615 
616 /* operations for primary side of local ntb */
617 const struct ntb_dev_ops intel_ntb_ops = {
618 	.ntb_dev_init       = intel_ntb_dev_init,
619 	.get_peer_mw_addr   = intel_ntb_get_peer_mw_addr,
620 	.mw_set_trans       = intel_ntb_mw_set_trans,
621 	.ioremap            = intel_ntb_ioremap,
622 	.get_link_status    = intel_ntb_get_link_status,
623 	.set_link           = intel_ntb_set_link,
624 	.spad_read          = intel_ntb_spad_read,
625 	.spad_write         = intel_ntb_spad_write,
626 	.db_read            = intel_ntb_db_read,
627 	.db_clear           = intel_ntb_db_clear,
628 	.db_set_mask        = intel_ntb_db_set_mask,
629 	.peer_db_set        = intel_ntb_peer_db_set,
630 	.vector_bind        = intel_ntb_vector_bind,
631 };
632