xref: /dpdk/drivers/raw/ntb/ntb.h (revision 2b843cac232eb3f2fa79e4254e21766817e2019f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation.
3  */
4 
5 #ifndef _NTB_H_
6 #define _NTB_H_
7 
8 #include <stdbool.h>
9 
10 extern int ntb_logtype;
11 #define RTE_LOGTYPE_NTB ntb_logtype
12 
13 #define NTB_LOG(level, ...) \
14 	RTE_LOG_LINE_PREFIX(level, NTB, "%s(): ", __func__, __VA_ARGS__)
15 
16 /* Vendor ID */
17 #define NTB_INTEL_VENDOR_ID         0x8086
18 
19 /* Device IDs */
20 #define NTB_INTEL_DEV_ID_B2B_SKX    0x201C
21 #define NTB_INTEL_DEV_ID_B2B_ICX    0x347E
22 #define NTB_INTEL_DEV_ID_B2B_SPR    0x347E
23 
24 /* Reserved to app to use. */
25 #define NTB_SPAD_USER               "spad_user_"
26 #define NTB_SPAD_USER_LEN           (sizeof(NTB_SPAD_USER) - 1)
27 #define NTB_SPAD_USER_MAX_NUM       4
28 #define NTB_ATTR_NAME_LEN           30
29 
30 #define NTB_DFLT_TX_FREE_THRESH     256
31 
32 enum ntb_xstats_idx {
33 	NTB_TX_PKTS_ID = 0,
34 	NTB_TX_BYTES_ID,
35 	NTB_TX_ERRS_ID,
36 	NTB_RX_PKTS_ID,
37 	NTB_RX_BYTES_ID,
38 	NTB_RX_MISS_ID,
39 };
40 
41 enum ntb_topo {
42 	NTB_TOPO_NONE = 0,
43 	NTB_TOPO_B2B_USD,
44 	NTB_TOPO_B2B_DSD,
45 };
46 
47 enum ntb_link {
48 	NTB_LINK_DOWN = 0,
49 	NTB_LINK_UP,
50 };
51 
52 enum ntb_speed {
53 	NTB_SPEED_NONE = 0,
54 	NTB_SPEED_GEN1 = 1,
55 	NTB_SPEED_GEN2 = 2,
56 	NTB_SPEED_GEN3 = 3,
57 	NTB_SPEED_GEN4 = 4,
58 };
59 
60 enum ntb_width {
61 	NTB_WIDTH_NONE = 0,
62 	NTB_WIDTH_1 = 1,
63 	NTB_WIDTH_2 = 2,
64 	NTB_WIDTH_4 = 4,
65 	NTB_WIDTH_8 = 8,
66 	NTB_WIDTH_12 = 12,
67 	NTB_WIDTH_16 = 16,
68 	NTB_WIDTH_32 = 32,
69 };
70 
71 /* Define spad registers usage. 0 is reserved. */
72 enum ntb_spad_idx {
73 	SPAD_NUM_MWS = 1,
74 	SPAD_NUM_QPS,
75 	SPAD_Q_SZ,
76 	SPAD_USED_MWS,
77 	SPAD_MW0_SZ_H,
78 	SPAD_MW0_SZ_L,
79 	SPAD_MW1_SZ_H,
80 	SPAD_MW1_SZ_L,
81 	SPAD_MW0_BA_H,
82 	SPAD_MW0_BA_L,
83 	SPAD_MW1_BA_H,
84 	SPAD_MW1_BA_L,
85 };
86 
87 /**
88  * NTB device operations
89  * @ntb_dev_init: Init ntb dev.
90  * @get_peer_mw_addr: To get the addr of peer mw[mw_idx].
91  * @mw_set_trans: Set translation of internal memory that remote can access.
92  * @ioremap: Translate the remote host address to bar address.
93  * @get_link_status: get link status, link speed and link width.
94  * @set_link: Set local side up/down.
95  * @spad_read: Read local/peer spad register val.
96  * @spad_write: Write val to local/peer spad register.
97  * @db_read: Read doorbells status.
98  * @db_clear: Clear local doorbells.
99  * @db_set_mask: Set bits in db mask, preventing db interrupts generated
100  * for those db bits.
101  * @peer_db_set: Set doorbell bit to generate peer interrupt for that bit.
102  * @vector_bind: Bind vector source [intr] to msix vector [msix].
103  */
104 struct ntb_dev_ops {
105 	int (*ntb_dev_init)(const struct rte_rawdev *dev);
106 	void *(*get_peer_mw_addr)(const struct rte_rawdev *dev, int mw_idx);
107 	int (*mw_set_trans)(const struct rte_rawdev *dev, int mw_idx,
108 			    uint64_t addr, uint64_t size);
109 	void *(*ioremap)(const struct rte_rawdev *dev, uint64_t addr);
110 	int (*get_link_status)(const struct rte_rawdev *dev);
111 	int (*set_link)(const struct rte_rawdev *dev, bool up);
112 	uint32_t (*spad_read)(const struct rte_rawdev *dev, int spad,
113 			      bool peer);
114 	int (*spad_write)(const struct rte_rawdev *dev, int spad,
115 			  bool peer, uint32_t spad_v);
116 	uint64_t (*db_read)(const struct rte_rawdev *dev);
117 	int (*db_clear)(const struct rte_rawdev *dev, uint64_t db_bits);
118 	int (*db_set_mask)(const struct rte_rawdev *dev, uint64_t db_mask);
119 	int (*peer_db_set)(const struct rte_rawdev *dev, uint8_t db_bit);
120 	int (*vector_bind)(const struct rte_rawdev *dev, uint8_t intr,
121 			   uint8_t msix);
122 };
123 
124 struct ntb_desc {
125 	uint64_t addr; /* buffer addr */
126 	uint16_t len;  /* buffer length */
127 	uint16_t rsv1;
128 	uint32_t rsv2;
129 };
130 
131 #define NTB_FLAG_EOP    1 /* end of packet */
132 struct ntb_used {
133 	uint16_t len;     /* buffer length */
134 	uint16_t flags;   /* flags */
135 };
136 
137 struct ntb_rx_entry {
138 	struct rte_mbuf *mbuf;
139 };
140 
141 struct ntb_rx_queue {
142 	struct ntb_desc *rx_desc_ring;
143 	volatile struct ntb_used *rx_used_ring;
144 	uint16_t *avail_cnt;
145 	volatile uint16_t *used_cnt;
146 	uint16_t last_avail;
147 	uint16_t last_used;
148 	uint16_t nb_rx_desc;
149 
150 	uint16_t rx_free_thresh;
151 
152 	struct rte_mempool *mpool; /* mempool for mbuf allocation */
153 	struct ntb_rx_entry *sw_ring;
154 
155 	uint16_t queue_id;         /* DPDK queue index. */
156 	uint16_t port_id;          /* Device port identifier. */
157 
158 	struct ntb_hw *hw;
159 };
160 
161 struct ntb_tx_entry {
162 	struct rte_mbuf *mbuf;
163 	uint16_t next_id;
164 	uint16_t last_id;
165 };
166 
167 struct ntb_tx_queue {
168 	volatile struct ntb_desc *tx_desc_ring;
169 	struct ntb_used *tx_used_ring;
170 	volatile uint16_t *avail_cnt;
171 	uint16_t *used_cnt;
172 	uint16_t last_avail;          /* Next need to be free. */
173 	uint16_t last_used;           /* Next need to be sent. */
174 	uint16_t nb_tx_desc;
175 
176 	/* Total number of TX descriptors ready to be allocated. */
177 	uint16_t nb_tx_free;
178 	uint16_t tx_free_thresh;
179 
180 	struct ntb_tx_entry *sw_ring;
181 
182 	uint16_t queue_id;            /* DPDK queue index. */
183 	uint16_t port_id;             /* Device port identifier. */
184 
185 	struct ntb_hw *hw;
186 };
187 
188 struct ntb_header {
189 	alignas(RTE_CACHE_LINE_SIZE) uint16_t avail_cnt;
190 	alignas(RTE_CACHE_LINE_SIZE) uint16_t used_cnt;
191 	alignas(RTE_CACHE_LINE_SIZE) struct ntb_desc desc_ring[];
192 };
193 
194 /* ntb private data. */
195 struct ntb_hw {
196 	uint8_t mw_cnt;
197 	uint8_t db_cnt;
198 	uint8_t spad_cnt;
199 
200 	uint64_t db_valid_mask;
201 	uint64_t db_mask;
202 
203 	enum ntb_topo topo;
204 
205 	enum ntb_link link_status;
206 	enum ntb_speed link_speed;
207 	enum ntb_width link_width;
208 
209 	const struct ntb_dev_ops *ntb_ops;
210 
211 	struct rte_pci_device *pci_dev;
212 	char *hw_addr;
213 
214 	uint8_t peer_dev_up;
215 	uint64_t *mw_size;
216 	/* remote mem base addr */
217 	uint64_t *peer_mw_base;
218 
219 	uint16_t queue_pairs;
220 	uint16_t queue_size;
221 	uint32_t hdr_size_per_queue;
222 
223 	struct ntb_rx_queue **rx_queues;
224 	struct ntb_tx_queue **tx_queues;
225 
226 	/* memzone to populate RX ring. */
227 	const struct rte_memzone **mz;
228 	uint8_t used_mw_num;
229 
230 	uint8_t peer_used_mws;
231 
232 	uint64_t *ntb_xstats;
233 	uint64_t *ntb_xstats_off;
234 
235 	/* Reserve several spad for app to use. */
236 	int spad_user_list[NTB_SPAD_USER_MAX_NUM];
237 };
238 
239 #endif /* _NTB_H_ */
240