xref: /dpdk/drivers/common/mlx5/mlx5_common.h (revision b9a87346b05c562dd6005ee025eca67a1a80bea8)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2019 Mellanox Technologies, Ltd
3  */
4 
5 #ifndef RTE_PMD_MLX5_COMMON_H_
6 #define RTE_PMD_MLX5_COMMON_H_
7 
8 #include <stdio.h>
9 
10 #include <rte_compat.h>
11 #include <rte_pci.h>
12 #include <bus_pci_driver.h>
13 #include <rte_debug.h>
14 #include <rte_atomic.h>
15 #include <rte_rwlock.h>
16 #include <rte_log.h>
17 #include <rte_kvargs.h>
18 #include <rte_devargs.h>
19 #include <rte_bitops.h>
20 #include <rte_lcore.h>
21 #include <rte_spinlock.h>
22 #include <rte_os_shim.h>
23 
24 #include "mlx5_prm.h"
25 #include "mlx5_devx_cmds.h"
26 #include "mlx5_common_os.h"
27 #include "mlx5_common_mr.h"
28 
29 /* Reported driver name. */
30 #define MLX5_PCI_DRIVER_NAME "mlx5_pci"
31 #define MLX5_AUXILIARY_DRIVER_NAME "mlx5_auxiliary"
32 
33 /* Bit-field manipulation. */
34 #define BITFIELD_DECLARE(bf, type, size) \
35 	type bf[(((size_t)(size) / (sizeof(type) * CHAR_BIT)) + \
36 		!!((size_t)(size) % (sizeof(type) * CHAR_BIT)))]
37 #define BITFIELD_DEFINE(bf, type, size) \
38 	BITFIELD_DECLARE((bf), type, (size)) = { 0 }
39 #define BITFIELD_SET(bf, b) \
40 	(void)((bf)[((b) / (sizeof((bf)[0]) * CHAR_BIT))] |= \
41 		((size_t)1 << ((b) % (sizeof((bf)[0]) * CHAR_BIT))))
42 #define BITFIELD_RESET(bf, b) \
43 	(void)((bf)[((b) / (sizeof((bf)[0]) * CHAR_BIT))] &= \
44 		~((size_t)1 << ((b) % (sizeof((bf)[0]) * CHAR_BIT))))
45 #define BITFIELD_ISSET(bf, b) \
46 	!!(((bf)[((b) / (sizeof((bf)[0]) * CHAR_BIT))] & \
47 		((size_t)1 << ((b) % (sizeof((bf)[0]) * CHAR_BIT)))))
48 
49 /*
50  * Helper macros to work around __VA_ARGS__ limitations in a C99 compliant
51  * manner.
52  */
53 #define PMD_DRV_LOG_STRIP(a, b) a
54 #define PMD_DRV_LOG_OPAREN (
55 #define PMD_DRV_LOG_CPAREN )
56 #define PMD_DRV_LOG_COMMA ,
57 
58 /* Return the file name part of a path. */
59 static inline const char *
60 pmd_drv_log_basename(const char *s)
61 {
62 	const char *n = s;
63 
64 	while (*n)
65 		if (*(n++) == '/')
66 			s = n;
67 	return s;
68 }
69 
70 #define PMD_DRV_LOG___(level, type, name, ...) \
71 	rte_log(RTE_LOG_ ## level, \
72 		type, \
73 		RTE_FMT(name ": " \
74 			RTE_FMT_HEAD(__VA_ARGS__,), \
75 		RTE_FMT_TAIL(__VA_ARGS__,)))
76 
77 #ifdef RTE_LIBRTE_MLX5_DEBUG
78 
79 #define PMD_DRV_LOG__(level, type, name, ...) \
80 	PMD_DRV_LOG___(level, type, name, "%s:%u: %s(): " __VA_ARGS__)
81 #define PMD_DRV_LOG_(level, type, name, s, ...) \
82 	PMD_DRV_LOG__(level, type, name,\
83 		s "\n" PMD_DRV_LOG_COMMA \
84 		pmd_drv_log_basename(__FILE__) PMD_DRV_LOG_COMMA \
85 		__LINE__ PMD_DRV_LOG_COMMA \
86 		__func__, \
87 		__VA_ARGS__)
88 
89 #else /* RTE_LIBRTE_MLX5_DEBUG */
90 #define PMD_DRV_LOG__(level, type, name, ...) \
91 	PMD_DRV_LOG___(level, type, name, __VA_ARGS__)
92 #define PMD_DRV_LOG_(level, type, name, s, ...) \
93 	PMD_DRV_LOG__(level, type, name, s "\n", __VA_ARGS__)
94 
95 #endif /* RTE_LIBRTE_MLX5_DEBUG */
96 
97 /* claim_zero() does not perform any check when debugging is disabled. */
98 #ifdef RTE_LIBRTE_MLX5_DEBUG
99 
100 #define MLX5_ASSERT(exp) RTE_VERIFY(exp)
101 #define claim_zero(...) MLX5_ASSERT((__VA_ARGS__) == 0)
102 #define claim_nonzero(...) MLX5_ASSERT((__VA_ARGS__) != 0)
103 
104 #else /* RTE_LIBRTE_MLX5_DEBUG */
105 
106 #define MLX5_ASSERT(exp) RTE_ASSERT(exp)
107 #define claim_zero(...) (__VA_ARGS__)
108 #define claim_nonzero(...) (__VA_ARGS__)
109 
110 #endif /* RTE_LIBRTE_MLX5_DEBUG */
111 
112 /* Allocate a buffer on the stack and fill it with a printf format string. */
113 #define MKSTR(name, ...) \
114 	int mkstr_size_##name = snprintf(NULL, 0, "" __VA_ARGS__); \
115 	char name[mkstr_size_##name + 1]; \
116 	\
117 	memset(name, 0, mkstr_size_##name + 1); \
118 	snprintf(name, sizeof(name), "" __VA_ARGS__)
119 
120 enum {
121 	PCI_VENDOR_ID_MELLANOX = 0x15b3,
122 };
123 
124 enum {
125 	PCI_DEVICE_ID_MELLANOX_CONNECTX4 = 0x1013,
126 	PCI_DEVICE_ID_MELLANOX_CONNECTX4VF = 0x1014,
127 	PCI_DEVICE_ID_MELLANOX_CONNECTX4LX = 0x1015,
128 	PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF = 0x1016,
129 	PCI_DEVICE_ID_MELLANOX_CONNECTX5 = 0x1017,
130 	PCI_DEVICE_ID_MELLANOX_CONNECTX5VF = 0x1018,
131 	PCI_DEVICE_ID_MELLANOX_CONNECTX5EX = 0x1019,
132 	PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF = 0x101a,
133 	PCI_DEVICE_ID_MELLANOX_BLUEFIELD = 0xa2d2,
134 	PCI_DEVICE_ID_MELLANOX_BLUEFIELDVF = 0xa2d3,
135 	PCI_DEVICE_ID_MELLANOX_CONNECTX6 = 0x101b,
136 	PCI_DEVICE_ID_MELLANOX_CONNECTX6VF = 0x101c,
137 	PCI_DEVICE_ID_MELLANOX_CONNECTX6DX = 0x101d,
138 	PCI_DEVICE_ID_MELLANOX_CONNECTXVF = 0x101e,
139 	PCI_DEVICE_ID_MELLANOX_BLUEFIELD2 = 0xa2d6,
140 	PCI_DEVICE_ID_MELLANOX_CONNECTX6LX = 0x101f,
141 	PCI_DEVICE_ID_MELLANOX_CONNECTX7 = 0x1021,
142 	PCI_DEVICE_ID_MELLANOX_BLUEFIELD3 = 0Xa2dc,
143 };
144 
145 /* Maximum number of simultaneous unicast MAC addresses. */
146 #define MLX5_MAX_UC_MAC_ADDRESSES 128
147 /* Maximum number of simultaneous Multicast MAC addresses. */
148 #define MLX5_MAX_MC_MAC_ADDRESSES 128
149 /* Maximum number of simultaneous MAC addresses. */
150 #define MLX5_MAX_MAC_ADDRESSES \
151 	(MLX5_MAX_UC_MAC_ADDRESSES + MLX5_MAX_MC_MAC_ADDRESSES)
152 
153 /* Recognized Infiniband device physical port name types. */
154 enum mlx5_nl_phys_port_name_type {
155 	MLX5_PHYS_PORT_NAME_TYPE_NOTSET = 0, /* Not set. */
156 	MLX5_PHYS_PORT_NAME_TYPE_LEGACY, /* before kernel ver < 5.0 */
157 	MLX5_PHYS_PORT_NAME_TYPE_UPLINK, /* p0, kernel ver >= 5.0 */
158 	MLX5_PHYS_PORT_NAME_TYPE_PFVF, /* pf0vf0, kernel ver >= 5.0 */
159 	MLX5_PHYS_PORT_NAME_TYPE_PFHPF, /* pf0, kernel ver >= 5.7, HPF rep */
160 	MLX5_PHYS_PORT_NAME_TYPE_PFSF, /* pf0sf0, kernel ver >= 5.0 */
161 	MLX5_PHYS_PORT_NAME_TYPE_UNKNOWN, /* Unrecognized. */
162 };
163 
164 /** Switch information returned by mlx5_nl_switch_info(). */
165 struct mlx5_switch_info {
166 	uint32_t master:1; /**< Master device. */
167 	uint32_t representor:1; /**< Representor device. */
168 	enum mlx5_nl_phys_port_name_type name_type; /** < Port name type. */
169 	int32_t ctrl_num; /**< Controller number (valid for c#pf#vf# format). */
170 	int32_t pf_num; /**< PF number (valid for pfxvfx format only). */
171 	int32_t port_name; /**< Representor port name. */
172 	int32_t mpesw_owner; /**< MPESW owner port number. */
173 	uint64_t switch_id; /**< Switch identifier. */
174 };
175 
176 /* CQE status. */
177 enum mlx5_cqe_status {
178 	MLX5_CQE_STATUS_SW_OWN = -1,
179 	MLX5_CQE_STATUS_HW_OWN = -2,
180 	MLX5_CQE_STATUS_ERR = -3,
181 };
182 
183 /**
184  * Check whether CQE has an error opcode.
185  *
186  * @param op_code
187  *   Opcode to check.
188  *
189  * @return
190  *   The CQE status.
191  */
192 static __rte_always_inline enum mlx5_cqe_status
193 check_cqe_error(const uint8_t op_code)
194 {
195 	/* Prevent speculative reading of other fields in CQE until
196 	 * CQE is valid.
197 	 */
198 	rte_atomic_thread_fence(rte_memory_order_acquire);
199 
200 	if (unlikely(op_code == MLX5_CQE_RESP_ERR ||
201 		     op_code == MLX5_CQE_REQ_ERR))
202 		return MLX5_CQE_STATUS_ERR;
203 	return MLX5_CQE_STATUS_SW_OWN;
204 }
205 
206 /**
207  * Check whether CQE is valid using owner bit.
208  *
209  * @param cqe
210  *   Pointer to CQE.
211  * @param cqes_n
212  *   Size of completion queue.
213  * @param ci
214  *   Consumer index.
215  *
216  * @return
217  *   The CQE status.
218  */
219 static __rte_always_inline enum mlx5_cqe_status
220 check_cqe(volatile struct mlx5_cqe *cqe, const uint16_t cqes_n,
221 	  const uint16_t ci)
222 {
223 	const uint16_t idx = ci & cqes_n;
224 	const uint8_t op_own = cqe->op_own;
225 	const uint8_t op_owner = MLX5_CQE_OWNER(op_own);
226 	const uint8_t op_code = MLX5_CQE_OPCODE(op_own);
227 
228 	if (unlikely((op_owner != (!!(idx))) ||
229 		     (op_code == MLX5_CQE_INVALID)))
230 		return MLX5_CQE_STATUS_HW_OWN;
231 	return check_cqe_error(op_code);
232 }
233 
234 /**
235  * Check whether CQE is valid using validity iteration count.
236  *
237  * @param cqe
238  *   Pointer to CQE.
239  * @param cqes_n
240  *   Log 2 of completion queue size.
241  * @param ci
242  *   Consumer index.
243  *
244  * @return
245  *   The CQE status.
246  */
247 static __rte_always_inline enum mlx5_cqe_status
248 check_cqe_iteration(volatile struct mlx5_cqe *cqe, const uint16_t cqes_n,
249 		    const uint32_t ci)
250 {
251 	const uint8_t op_own = cqe->op_own;
252 	const uint8_t op_code = MLX5_CQE_OPCODE(op_own);
253 	const uint8_t vic = ci >> cqes_n;
254 
255 	if (unlikely((cqe->validity_iteration_count != vic) ||
256 		     (op_code == MLX5_CQE_INVALID)))
257 		return MLX5_CQE_STATUS_HW_OWN;
258 	return check_cqe_error(op_code);
259 }
260 
261 /*
262  * Get PCI address <DBDF> string from EAL device.
263  *
264  * @param[out] addr
265  *	The output address buffer string
266  * @param[in] size
267  *	The output buffer size
268  * @return
269  *   - 0 on success.
270  *   - Negative value and rte_errno is set otherwise.
271  */
272 __rte_internal
273 int mlx5_dev_to_pci_str(const struct rte_device *dev, char *addr, size_t size);
274 
275 /*
276  * Get PCI address from sysfs of a PCI-related device.
277  *
278  * @param[in] dev_path
279  *   The sysfs path should not point to the direct plain PCI device.
280  *   Instead, the node "/device/" is used to access the real device.
281  * @param[out] pci_addr
282  *   Parsed PCI address.
283  *
284  * @return
285  *   - 0 on success.
286  *   - Negative value and rte_errno is set otherwise.
287  */
288 __rte_internal
289 int mlx5_get_pci_addr(const char *dev_path, struct rte_pci_addr *pci_addr);
290 
291 /*
292  * Get kernel network interface name from sysfs IB device path.
293  *
294  * @param[in] ibdev_path
295  *   The sysfs path to IB device.
296  * @param[out] ifname
297  *   Interface name output of size IF_NAMESIZE.
298  *
299  * @return
300  *   - 0 on success.
301  *   - Negative value and rte_errno is set otherwise.
302  */
303 __rte_internal
304 int mlx5_get_ifname_sysfs(const char *ibdev_path, char *ifname);
305 
306 __rte_internal
307 int mlx5_auxiliary_get_child_name(const char *dev, const char *node,
308 				  char *child, size_t size);
309 
310 enum mlx5_class {
311 	MLX5_CLASS_INVALID,
312 	MLX5_CLASS_ETH = RTE_BIT64(0),
313 	MLX5_CLASS_VDPA = RTE_BIT64(1),
314 	MLX5_CLASS_REGEX = RTE_BIT64(2),
315 	MLX5_CLASS_COMPRESS = RTE_BIT64(3),
316 	MLX5_CLASS_CRYPTO = RTE_BIT64(4),
317 };
318 
319 #define MLX5_DBR_SIZE RTE_CACHE_LINE_SIZE
320 
321 /* devX creation object */
322 struct mlx5_devx_obj {
323 	void *obj; /* The DV object. */
324 	int id; /* The object ID. */
325 };
326 
327 /* UMR memory buffer used to define 1 entry in indirect mkey. */
328 struct mlx5_klm {
329 	uint32_t byte_count;
330 	uint32_t mkey;
331 	uint64_t address;
332 };
333 
334 /** Control for key/values list. */
335 struct mlx5_kvargs_ctrl {
336 	struct rte_kvargs *kvlist; /* Structure containing list of key/values.*/
337 	bool is_used[RTE_KVARGS_MAX]; /* Indicator which devargs were used. */
338 };
339 
340 /**
341  * Call a handler function for each key/value in the list of keys.
342  *
343  * For each key/value association that matches the given key, calls the
344  * handler function with the for a given arg_name passing the value on the
345  * dictionary for that key and a given extra argument.
346  *
347  * @param mkvlist
348  *   The mlx5_kvargs structure.
349  * @param keys
350  *   A list of keys to process (table of const char *, the last must be NULL).
351  * @param handler
352  *   The function to call for each matching key.
353  * @param opaque_arg
354  *   A pointer passed unchanged to the handler.
355  *
356  * @return
357  *   - 0 on success
358  *   - Negative on error
359  */
360 __rte_internal
361 int
362 mlx5_kvargs_process(struct mlx5_kvargs_ctrl *mkvlist, const char *const keys[],
363 		    arg_handler_t handler, void *opaque_arg);
364 
365 /* All UAR arguments using doorbell register in datapath. */
366 struct mlx5_uar_data {
367 	uint64_t *db;
368 	/* The doorbell's virtual address mapped to the relevant HW UAR space.*/
369 #ifndef RTE_ARCH_64
370 	rte_spinlock_t *sl_p;
371 	/* Pointer to UAR access lock required for 32bit implementations. */
372 #endif /* RTE_ARCH_64 */
373 };
374 
375 /* DevX UAR control structure. */
376 struct mlx5_uar {
377 	struct mlx5_uar_data bf_db; /* UAR data for Blueflame register. */
378 	struct mlx5_uar_data cq_db; /* UAR data for CQ arm db register. */
379 	void *obj; /* DevX UAR object. */
380 	bool dbnc; /* Doorbell mapped to non-cached region. */
381 #ifndef RTE_ARCH_64
382 	rte_spinlock_t bf_sl;
383 	rte_spinlock_t cq_sl;
384 	/* UAR access locks required for 32bit implementations. */
385 #endif /* RTE_ARCH_64 */
386 };
387 
388 /**
389  * Ring a doorbell and flush the update if requested.
390  *
391  * @param uar
392  *   Pointer to UAR data structure.
393  * @param val
394  *   value to write in big endian format.
395  * @param index
396  *   Index of doorbell record.
397  * @param db_rec
398  *   Address of doorbell record.
399  * @param flash
400  *   Decide whether to flush the DB writing using a memory barrier.
401  */
402 static __rte_always_inline void
403 mlx5_doorbell_ring(struct mlx5_uar_data *uar, uint64_t val, uint32_t index,
404 		   volatile uint32_t *db_rec, bool flash)
405 {
406 	rte_io_wmb();
407 	*db_rec = rte_cpu_to_be_32(index);
408 	/* Ensure ordering between DB record actual update and UAR access. */
409 	rte_wmb();
410 #ifdef RTE_ARCH_64
411 	*uar->db = val;
412 #else /* !RTE_ARCH_64 */
413 	rte_spinlock_lock(uar->sl_p);
414 	*(volatile uint32_t *)uar->db = val;
415 	rte_io_wmb();
416 	*((volatile uint32_t *)uar->db + 1) = val >> 32;
417 	rte_spinlock_unlock(uar->sl_p);
418 #endif
419 	if (flash)
420 		rte_wmb();
421 }
422 
423 /**
424  * Get the doorbell register mapping type.
425  *
426  * @param uar_mmap_offset
427  *   Mmap offset of Verbs/DevX UAR.
428  * @param page_size
429  *   System page size
430  *
431  * @return
432  *   1 for non-cached, 0 otherwise.
433  */
434 static inline uint16_t
435 mlx5_db_map_type_get(off_t uar_mmap_offset, size_t page_size)
436 {
437 	off_t cmd = uar_mmap_offset / page_size;
438 
439 	cmd >>= MLX5_UAR_MMAP_CMD_SHIFT;
440 	cmd &= MLX5_UAR_MMAP_CMD_MASK;
441 	if (cmd == MLX5_MMAP_GET_NC_PAGES_CMD)
442 		return 1;
443 	return 0;
444 }
445 
446 __rte_internal
447 void mlx5_translate_port_name(const char *port_name_in,
448 			      struct mlx5_switch_info *port_info_out);
449 void mlx5_glue_constructor(void);
450 extern uint8_t haswell_broadwell_cpu;
451 
452 __rte_internal
453 void mlx5_common_init(void);
454 
455 /*
456  * Common Driver Interface
457  *
458  * ConnectX common driver supports multiple classes: net, vDPA, regex, crypto
459  * and compress devices. This layer enables creating such multiple classes
460  * on a single device by allowing to bind multiple class-specific device
461  * drivers to attach to the common driver.
462  *
463  * ------------  -------------  --------------  -----------------  ------------
464  * | mlx5 net |  | mlx5 vdpa |  | mlx5 regex |  | mlx5 compress |  | mlx5 ... |
465  * |  driver  |  |  driver   |  |   driver   |  |     driver    |  |  drivers |
466  * ------------  -------------  --------------  -----------------  ------------
467  *                               ||
468  *                        -----------------
469  *                        |     mlx5      |
470  *                        | common driver |
471  *                        -----------------
472  *                          |          |
473  *                 -----------        -----------------
474  *                 |   mlx5  |        |   mlx5        |
475  *                 | pci dev |        | auxiliary dev |
476  *                 -----------        -----------------
477  *
478  * - mlx5 PCI bus driver binds to mlx5 PCI devices defined by PCI ID table
479  *   of all related devices.
480  * - mlx5 class driver such as net, vDPA, regex defines its specific
481  *   PCI ID table and mlx5 bus driver probes matching class drivers.
482  * - mlx5 common driver is central place that validates supported
483  *   class combinations.
484  * - mlx5 common driver hides bus difference by resolving device address
485  *   from devargs, locating target RDMA device and probing with it.
486  */
487 
488 /*
489  * Device configuration structure.
490  *
491  * Merged configuration from:
492  *
493  *  - Device capabilities,
494  *  - User device parameters disabled features.
495  */
496 struct mlx5_common_dev_config {
497 	struct mlx5_hca_attr hca_attr; /* HCA attributes. */
498 	int dbnc; /* Skip doorbell register write barrier. */
499 	int device_fd; /* Device file descriptor for importation. */
500 	int pd_handle; /* Protection Domain handle for importation.  */
501 	unsigned int devx:1; /* Whether devx interface is available or not. */
502 	unsigned int sys_mem_en:1; /* The default memory allocator. */
503 	unsigned int mr_mempool_reg_en:1;
504 	/* Allow/prevent implicit mempool memory registration. */
505 	unsigned int mr_ext_memseg_en:1;
506 	/* Whether memseg should be extended for MR creation. */
507 };
508 
509 struct mlx5_common_device {
510 	struct rte_device *dev;
511 	TAILQ_ENTRY(mlx5_common_device) next;
512 	uint32_t classes_loaded;
513 	void *ctx; /* Verbs/DV/DevX context. */
514 	void *pd; /* Protection Domain. */
515 	uint32_t pdn; /* Protection Domain Number. */
516 	struct mlx5_mr_share_cache mr_scache; /* Global shared MR cache. */
517 	struct mlx5_common_dev_config config; /* Device configuration. */
518 };
519 
520 /**
521  * Indicates whether PD and CTX are imported from another process,
522  * or created by this process.
523  *
524  * @param cdev
525  *   Pointer to common device.
526  *
527  * @return
528  *   True if PD and CTX are imported from another process, False otherwise.
529  */
530 static inline bool
531 mlx5_imported_pd_and_ctx(struct mlx5_common_device *cdev)
532 {
533 	return cdev->config.device_fd != MLX5_ARG_UNSET &&
534 	       cdev->config.pd_handle != MLX5_ARG_UNSET;
535 }
536 
537 /**
538  * Initialization function for the driver called during device probing.
539  */
540 typedef int (mlx5_class_driver_probe_t)(struct mlx5_common_device *cdev,
541 					struct mlx5_kvargs_ctrl *mkvlist);
542 
543 /**
544  * Uninitialization function for the driver called during hot-unplugging.
545  */
546 typedef int (mlx5_class_driver_remove_t)(struct mlx5_common_device *cdev);
547 
548 /** Device already probed can be probed again to check for new ports. */
549 #define MLX5_DRV_PROBE_AGAIN 0x0004
550 
551 /**
552  * A structure describing a mlx5 common class driver.
553  */
554 struct mlx5_class_driver {
555 	TAILQ_ENTRY(mlx5_class_driver) next;
556 	enum mlx5_class drv_class;            /**< Class of this driver. */
557 	const char *name;                     /**< Driver name. */
558 	mlx5_class_driver_probe_t *probe;     /**< Device probe function. */
559 	mlx5_class_driver_remove_t *remove;   /**< Device remove function. */
560 	const struct rte_pci_id *id_table;    /**< ID table, NULL terminated. */
561 	uint32_t probe_again:1;
562 	/**< Device already probed can be probed again to check new device. */
563 	uint32_t intr_lsc:1; /**< Supports link state interrupt. */
564 	uint32_t intr_rmv:1; /**< Supports device remove interrupt. */
565 };
566 
567 /**
568  * Register a mlx5 device driver.
569  *
570  * @param driver
571  *   A pointer to a mlx5_driver structure describing the driver
572  *   to be registered.
573  */
574 __rte_internal
575 void
576 mlx5_class_driver_register(struct mlx5_class_driver *driver);
577 
578 /**
579  * Test device is a PCI bus device.
580  *
581  * @param dev
582  *   Pointer to device.
583  *
584  * @return
585  *   - True on device devargs is a PCI bus device.
586  *   - False otherwise.
587  */
588 __rte_internal
589 bool
590 mlx5_dev_is_pci(const struct rte_device *dev);
591 
592 /**
593  * Test PCI device is a VF device.
594  *
595  * @param pci_dev
596  *   Pointer to PCI device.
597  *
598  * @return
599  *   - True on PCI device is a VF device.
600  *   - False otherwise.
601  */
602 __rte_internal
603 bool
604 mlx5_dev_is_vf_pci(const struct rte_pci_device *pci_dev);
605 
606 __rte_internal
607 int
608 mlx5_dev_mempool_subscribe(struct mlx5_common_device *cdev);
609 
610 __rte_internal
611 void
612 mlx5_dev_mempool_unregister(struct mlx5_common_device *cdev,
613 			    struct rte_mempool *mp);
614 
615 __rte_internal
616 int
617 mlx5_devx_uar_prepare(struct mlx5_common_device *cdev, struct mlx5_uar *uar);
618 
619 __rte_internal
620 void
621 mlx5_devx_uar_release(struct mlx5_uar *uar);
622 
623 /* mlx5_common_os.c */
624 
625 int mlx5_os_open_device(struct mlx5_common_device *cdev, uint32_t classes);
626 int mlx5_os_pd_prepare(struct mlx5_common_device *cdev);
627 int mlx5_os_pd_release(struct mlx5_common_device *cdev);
628 int mlx5_os_remote_pd_and_ctx_validate(struct mlx5_common_dev_config *config);
629 
630 /* mlx5 PMD wrapped MR struct. */
631 struct mlx5_pmd_wrapped_mr {
632 	uint32_t	     lkey;
633 	void		     *addr;
634 	size_t		     len;
635 	void		     *obj; /* verbs mr object or devx umem object. */
636 	void		     *imkey; /* DevX indirect mkey object. */
637 };
638 
639 __rte_internal
640 int
641 mlx5_os_wrapped_mkey_create(void *ctx, void *pd, uint32_t pdn, void *addr,
642 			    size_t length, struct mlx5_pmd_wrapped_mr *pmd_mr);
643 
644 __rte_internal
645 void
646 mlx5_os_wrapped_mkey_destroy(struct mlx5_pmd_wrapped_mr *pmd_mr);
647 
648 #endif /* RTE_PMD_MLX5_COMMON_H_ */
649