xref: /dpdk/lib/eal/include/rte_memory.h (revision fba9875559906e04eaeb74532f4cfd51194259a2)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4 
5 #ifndef _RTE_MEMORY_H_
6 #define _RTE_MEMORY_H_
7 
8 /**
9  * @file
10  *
11  * Memory-related RTE API.
12  */
13 
14 #include <stdint.h>
15 #include <stddef.h>
16 #include <stdio.h>
17 
18 #include <rte_bitops.h>
19 #include <rte_common.h>
20 #include <rte_config.h>
21 #include <rte_eal_memconfig.h>
22 #include <rte_fbarray.h>
23 
24 #ifdef __cplusplus
25 extern "C" {
26 #endif
27 
28 #define RTE_PGSIZE_4K   (1ULL << 12)
29 #define RTE_PGSIZE_64K  (1ULL << 16)
30 #define RTE_PGSIZE_256K (1ULL << 18)
31 #define RTE_PGSIZE_2M   (1ULL << 21)
32 #define RTE_PGSIZE_16M  (1ULL << 24)
33 #define RTE_PGSIZE_256M (1ULL << 28)
34 #define RTE_PGSIZE_512M (1ULL << 29)
35 #define RTE_PGSIZE_1G   (1ULL << 30)
36 #define RTE_PGSIZE_4G   (1ULL << 32)
37 #define RTE_PGSIZE_16G  (1ULL << 34)
38 
39 #define SOCKET_ID_ANY -1                    /**< Any NUMA socket. */
40 
41 /** Prevent this segment from being freed back to the OS. */
42 #define RTE_MEMSEG_FLAG_DO_NOT_FREE RTE_BIT32(0)
43 /** This segment is not filled with zeros. */
44 #define RTE_MEMSEG_FLAG_DIRTY RTE_BIT32(1)
45 
46 /**
47  * Physical memory segment descriptor.
48  */
49 struct __rte_packed_begin rte_memseg {
50 	rte_iova_t iova;            /**< Start IO address. */
51 	union {
52 		void *addr;         /**< Start virtual address. */
53 		uint64_t addr_64;   /**< Makes sure addr is always 64 bits */
54 	};
55 	size_t len;               /**< Length of the segment. */
56 	uint64_t hugepage_sz;       /**< The pagesize of underlying memory */
57 	int32_t socket_id;          /**< NUMA socket ID. */
58 	uint32_t nchannel;          /**< Number of channels. */
59 	uint32_t nrank;             /**< Number of ranks. */
60 	uint32_t flags;             /**< Memseg-specific flags */
61 } __rte_packed_end;
62 
63 /**
64  * memseg list is a special case as we need to store a bunch of other data
65  * together with the array itself.
66  */
67 struct rte_memseg_list {
68 	union {
69 		void *base_va;
70 		/**< Base virtual address for this memseg list. */
71 		uint64_t addr_64;
72 		/**< Makes sure addr is always 64-bits */
73 	};
74 	uint64_t page_sz; /**< Page size for all memsegs in this list. */
75 	int socket_id; /**< Socket ID for all memsegs in this list. */
76 	volatile uint32_t version; /**< version number for multiprocess sync. */
77 	size_t len; /**< Length of memory area covered by this memseg list. */
78 	unsigned int external; /**< 1 if this list points to external memory */
79 	unsigned int heap; /**< 1 if this list points to a heap */
80 	struct rte_fbarray memseg_arr;
81 };
82 
83 /**
84  * Lock page in physical memory and prevent from swapping.
85  *
86  * @param virt
87  *   The virtual address.
88  * @return
89  *   0 on success, negative on error.
90  */
91 int rte_mem_lock_page(const void *virt);
92 
93 /**
94  * Get physical address of any mapped virtual address in the current process.
95  * It is found by browsing the /proc/self/pagemap special file.
96  * The page must be locked.
97  *
98  * @param virt
99  *   The virtual address.
100  * @return
101  *   The physical address or RTE_BAD_IOVA on error.
102  */
103 phys_addr_t rte_mem_virt2phy(const void *virt);
104 
105 /**
106  * Get IO virtual address of any mapped virtual address in the current process.
107  *
108  * @note This function will not check internal page table. Instead, in IOVA as
109  *       PA mode, it will fall back to getting real physical address (which may
110  *       not match the expected IOVA, such as what was specified for external
111  *       memory).
112  *
113  * @param virt
114  *   The virtual address.
115  * @return
116  *   The IO address or RTE_BAD_IOVA on error.
117  */
118 rte_iova_t rte_mem_virt2iova(const void *virt);
119 
120 /**
121  * Get virtual memory address corresponding to iova address.
122  *
123  * @note This function read-locks the memory hotplug subsystem, and thus cannot
124  *       be used within memory-related callback functions.
125  *
126  * @param iova
127  *   The iova address.
128  * @return
129  *   Virtual address corresponding to iova address (or NULL if address does not
130  *   exist within DPDK memory map).
131  */
132 void *
133 rte_mem_iova2virt(rte_iova_t iova);
134 
135 /**
136  * Get memseg to which a particular virtual address belongs.
137  *
138  * @param virt
139  *   The virtual address.
140  * @param msl
141  *   The memseg list in which to look up based on ``virt`` address
142  *   (can be NULL).
143  * @return
144  *   Memseg pointer on success, or NULL on error.
145  */
146 struct rte_memseg *
147 rte_mem_virt2memseg(const void *virt, const struct rte_memseg_list *msl);
148 
149 /**
150  * Get memseg list corresponding to virtual memory address.
151  *
152  * @param virt
153  *   The virtual address.
154  * @return
155  *   Memseg list to which this virtual address belongs to.
156  */
157 struct rte_memseg_list *
158 rte_mem_virt2memseg_list(const void *virt);
159 
160 /**
161  * Memseg walk function prototype.
162  *
163  * Returning 0 will continue walk
164  * Returning 1 will stop the walk
165  * Returning -1 will stop the walk and report error
166  */
167 typedef int (*rte_memseg_walk_t)(const struct rte_memseg_list *msl,
168 		const struct rte_memseg *ms, void *arg);
169 
170 /**
171  * Memseg contig walk function prototype. This will trigger a callback on every
172  * VA-contiguous area starting at memseg ``ms``, so total valid VA space at each
173  * callback call will be [``ms->addr``, ``ms->addr + len``).
174  *
175  * Returning 0 will continue walk
176  * Returning 1 will stop the walk
177  * Returning -1 will stop the walk and report error
178  */
179 typedef int (*rte_memseg_contig_walk_t)(const struct rte_memseg_list *msl,
180 		const struct rte_memseg *ms, size_t len, void *arg);
181 
182 /**
183  * Memseg list walk function prototype. This will trigger a callback on every
184  * allocated memseg list.
185  *
186  * Returning 0 will continue walk
187  * Returning 1 will stop the walk
188  * Returning -1 will stop the walk and report error
189  */
190 typedef int (*rte_memseg_list_walk_t)(const struct rte_memseg_list *msl,
191 		void *arg);
192 
193 /**
194  * Walk list of all memsegs.
195  *
196  * @note This function read-locks the memory hotplug subsystem, and thus cannot
197  *       be used within memory-related callback functions.
198  *
199  * @note This function will also walk through externally allocated segments. It
200  *       is up to the user to decide whether to skip through these segments.
201  *
202  * @param func
203  *   Iterator function
204  * @param arg
205  *   Argument passed to iterator
206  * @return
207  *   0 if walked over the entire list
208  *   1 if stopped by the user
209  *   -1 if user function reported error
210  */
211 int
212 rte_memseg_walk(rte_memseg_walk_t func, void *arg);
213 
214 /**
215  * Walk each VA-contiguous area.
216  *
217  * @note This function read-locks the memory hotplug subsystem, and thus cannot
218  *       be used within memory-related callback functions.
219  *
220  * @note This function will also walk through externally allocated segments. It
221  *       is up to the user to decide whether to skip through these segments.
222  *
223  * @param func
224  *   Iterator function
225  * @param arg
226  *   Argument passed to iterator
227  * @return
228  *   0 if walked over the entire list
229  *   1 if stopped by the user
230  *   -1 if user function reported error
231  */
232 int
233 rte_memseg_contig_walk(rte_memseg_contig_walk_t func, void *arg);
234 
235 /**
236  * Walk each allocated memseg list.
237  *
238  * @note This function read-locks the memory hotplug subsystem, and thus cannot
239  *       be used within memory-related callback functions.
240  *
241  * @note This function will also walk through externally allocated segments. It
242  *       is up to the user to decide whether to skip through these segments.
243  *
244  * @param func
245  *   Iterator function
246  * @param arg
247  *   Argument passed to iterator
248  * @return
249  *   0 if walked over the entire list
250  *   1 if stopped by the user
251  *   -1 if user function reported error
252  */
253 int
254 rte_memseg_list_walk(rte_memseg_list_walk_t func, void *arg)
255 	__rte_locks_excluded(rte_mcfg_mem_get_lock());
256 
257 /**
258  * Walk list of all memsegs without performing any locking.
259  *
260  * @note This function does not perform any locking, and is only safe to call
261  *       from within memory-related callback functions.
262  *
263  * @param func
264  *   Iterator function
265  * @param arg
266  *   Argument passed to iterator
267  * @return
268  *   0 if walked over the entire list
269  *   1 if stopped by the user
270  *   -1 if user function reported error
271  */
272 int
273 rte_memseg_walk_thread_unsafe(rte_memseg_walk_t func, void *arg);
274 
275 /**
276  * Walk each VA-contiguous area without performing any locking.
277  *
278  * @note This function does not perform any locking, and is only safe to call
279  *       from within memory-related callback functions.
280  *
281  * @param func
282  *   Iterator function
283  * @param arg
284  *   Argument passed to iterator
285  * @return
286  *   0 if walked over the entire list
287  *   1 if stopped by the user
288  *   -1 if user function reported error
289  */
290 int
291 rte_memseg_contig_walk_thread_unsafe(rte_memseg_contig_walk_t func, void *arg);
292 
293 /**
294  * Walk each allocated memseg list without performing any locking.
295  *
296  * @note This function does not perform any locking, and is only safe to call
297  *       from within memory-related callback functions.
298  *
299  * @param func
300  *   Iterator function
301  * @param arg
302  *   Argument passed to iterator
303  * @return
304  *   0 if walked over the entire list
305  *   1 if stopped by the user
306  *   -1 if user function reported error
307  */
308 int
309 rte_memseg_list_walk_thread_unsafe(rte_memseg_list_walk_t func, void *arg);
310 
311 /**
312  * Return file descriptor associated with a particular memseg (if available).
313  *
314  * @note This function read-locks the memory hotplug subsystem, and thus cannot
315  *       be used within memory-related callback functions.
316  *
317  * @note This returns an internal file descriptor. Performing any operations on
318  *       this file descriptor is inherently dangerous, so it should be treated
319  *       as read-only for all intents and purposes.
320  *
321  * @param ms
322  *   A pointer to memseg for which to get file descriptor.
323  *
324  * @return
325  *   Valid file descriptor in case of success.
326  *   -1 in case of error, with ``rte_errno`` set to the following values:
327  *     - EINVAL  - ``ms`` pointer was NULL or did not point to a valid memseg
328  *     - ENODEV  - ``ms`` fd is not available
329  *     - ENOENT  - ``ms`` is an unused segment
330  *     - ENOTSUP - segment fd's are not supported
331  */
332 int
333 rte_memseg_get_fd(const struct rte_memseg *ms);
334 
335 /**
336  * Return file descriptor associated with a particular memseg (if available).
337  *
338  * @note This function does not perform any locking, and is only safe to call
339  *       from within memory-related callback functions.
340  *
341  * @note This returns an internal file descriptor. Performing any operations on
342  *       this file descriptor is inherently dangerous, so it should be treated
343  *       as read-only for all intents and purposes.
344  *
345  * @param ms
346  *   A pointer to memseg for which to get file descriptor.
347  *
348  * @return
349  *   Valid file descriptor in case of success.
350  *   -1 in case of error, with ``rte_errno`` set to the following values:
351  *     - EINVAL  - ``ms`` pointer was NULL or did not point to a valid memseg
352  *     - ENODEV  - ``ms`` fd is not available
353  *     - ENOENT  - ``ms`` is an unused segment
354  *     - ENOTSUP - segment fd's are not supported
355  */
356 int
357 rte_memseg_get_fd_thread_unsafe(const struct rte_memseg *ms);
358 
359 /**
360  * Get offset into segment file descriptor associated with a particular memseg
361  * (if available).
362  *
363  * @note This function read-locks the memory hotplug subsystem, and thus cannot
364  *       be used within memory-related callback functions.
365  *
366  * @param ms
367  *   A pointer to memseg for which to get file descriptor.
368  * @param offset
369  *   A pointer to offset value where the result will be stored.
370  *
371  * @return
372  *   Valid file descriptor in case of success.
373  *   -1 in case of error, with ``rte_errno`` set to the following values:
374  *     - EINVAL  - ``ms`` pointer was NULL or did not point to a valid memseg
375  *     - EINVAL  - ``offset`` pointer was NULL
376  *     - ENODEV  - ``ms`` fd is not available
377  *     - ENOENT  - ``ms`` is an unused segment
378  *     - ENOTSUP - segment fd's are not supported
379  */
380 int
381 rte_memseg_get_fd_offset(const struct rte_memseg *ms, size_t *offset);
382 
383 /**
384  * Get offset into segment file descriptor associated with a particular memseg
385  * (if available).
386  *
387  * @note This function does not perform any locking, and is only safe to call
388  *       from within memory-related callback functions.
389  *
390  * @param ms
391  *   A pointer to memseg for which to get file descriptor.
392  * @param offset
393  *   A pointer to offset value where the result will be stored.
394  *
395  * @return
396  *   Valid file descriptor in case of success.
397  *   -1 in case of error, with ``rte_errno`` set to the following values:
398  *     - EINVAL  - ``ms`` pointer was NULL or did not point to a valid memseg
399  *     - EINVAL  - ``offset`` pointer was NULL
400  *     - ENODEV  - ``ms`` fd is not available
401  *     - ENOENT  - ``ms`` is an unused segment
402  *     - ENOTSUP - segment fd's are not supported
403  */
404 int
405 rte_memseg_get_fd_offset_thread_unsafe(const struct rte_memseg *ms,
406 		size_t *offset);
407 
408 /**
409  * Register external memory chunk with DPDK.
410  *
411  * @note Using this API is mutually exclusive with ``rte_malloc`` family of
412  *   API's.
413  *
414  * @note This API will not perform any DMA mapping. It is expected that user
415  *   will do that themselves.
416  *
417  * @note Before accessing this memory in other processes, it needs to be
418  *   attached in each of those processes by calling ``rte_extmem_attach`` in
419  *   each other process.
420  *
421  * @param va_addr
422  *   Start of virtual area to register. Must be aligned by ``page_sz``.
423  * @param len
424  *   Length of virtual area to register. Must be aligned by ``page_sz``.
425  * @param iova_addrs
426  *   Array of page IOVA addresses corresponding to each page in this memory
427  *   area. Can be NULL, in which case page IOVA addresses will be set to
428  *   RTE_BAD_IOVA.
429  * @param n_pages
430  *   Number of elements in the iova_addrs array. Ignored if  ``iova_addrs``
431  *   is NULL.
432  * @param page_sz
433  *   Page size of the underlying memory
434  *
435  * @return
436  *   - 0 on success
437  *   - -1 in case of error, with rte_errno set to one of the following:
438  *     EINVAL - one of the parameters was invalid
439  *     EEXIST - memory chunk is already registered
440  *     ENOSPC - no more space in internal config to store a new memory chunk
441  */
442 int
443 rte_extmem_register(void *va_addr, size_t len, rte_iova_t iova_addrs[],
444 		unsigned int n_pages, size_t page_sz);
445 
446 /**
447  * Unregister external memory chunk with DPDK.
448  *
449  * @note Using this API is mutually exclusive with ``rte_malloc`` family of
450  *   API's.
451  *
452  * @note This API will not perform any DMA unmapping. It is expected that user
453  *   will do that themselves.
454  *
455  * @note Before calling this function, all other processes must call
456  *   ``rte_extmem_detach`` to detach from the memory area.
457  *
458  * @param va_addr
459  *   Start of virtual area to unregister
460  * @param len
461  *   Length of virtual area to unregister
462  *
463  * @return
464  *   - 0 on success
465  *   - -1 in case of error, with rte_errno set to one of the following:
466  *     EINVAL - one of the parameters was invalid
467  *     ENOENT - memory chunk was not found
468  */
469 int
470 rte_extmem_unregister(void *va_addr, size_t len);
471 
472 /**
473  * Attach to external memory chunk registered in another process.
474  *
475  * @note Using this API is mutually exclusive with ``rte_malloc`` family of
476  *   API's.
477  *
478  * @note This API will not perform any DMA mapping. It is expected that user
479  *   will do that themselves.
480  *
481  * @param va_addr
482  *   Start of virtual area to register
483  * @param len
484  *   Length of virtual area to register
485  *
486  * @return
487  *   - 0 on success
488  *   - -1 in case of error, with rte_errno set to one of the following:
489  *     EINVAL - one of the parameters was invalid
490  *     ENOENT - memory chunk was not found
491  */
492 int
493 rte_extmem_attach(void *va_addr, size_t len);
494 
495 /**
496  * Detach from external memory chunk registered in another process.
497  *
498  * @note Using this API is mutually exclusive with ``rte_malloc`` family of
499  *   API's.
500  *
501  * @note This API will not perform any DMA unmapping. It is expected that user
502  *   will do that themselves.
503  *
504  * @param va_addr
505  *   Start of virtual area to unregister
506  * @param len
507  *   Length of virtual area to unregister
508  *
509  * @return
510  *   - 0 on success
511  *   - -1 in case of error, with rte_errno set to one of the following:
512  *     EINVAL - one of the parameters was invalid
513  *     ENOENT - memory chunk was not found
514  */
515 int
516 rte_extmem_detach(void *va_addr, size_t len);
517 
518 /**
519  * Dump the physical memory layout to a file.
520  *
521  * @note This function read-locks the memory hotplug subsystem, and thus cannot
522  *       be used within memory-related callback functions.
523  *
524  * @param f
525  *   A pointer to a file for output
526  */
527 void rte_dump_physmem_layout(FILE *f);
528 
529 /**
530  * Get the total amount of available physical memory.
531  *
532  * @note This function read-locks the memory hotplug subsystem, and thus cannot
533  *       be used within memory-related callback functions.
534  *
535  * @return
536  *    The total amount of available physical memory in bytes.
537  */
538 uint64_t rte_eal_get_physmem_size(void);
539 
540 /**
541  * Get the number of memory channels.
542  *
543  * @return
544  *   The number of memory channels on the system. The value is 0 if unknown
545  *   or not the same on all devices.
546  */
547 unsigned rte_memory_get_nchannel(void);
548 
549 /**
550  * Get the number of memory ranks.
551  *
552  * @return
553  *   The number of memory ranks on the system. The value is 0 if unknown or
554  *   not the same on all devices.
555  */
556 unsigned rte_memory_get_nrank(void);
557 
558 /**
559  * Check if all currently allocated memory segments are compliant with
560  * supplied DMA address width.
561  *
562  *  @param maskbits
563  *    Address width to check against.
564  */
565 int rte_mem_check_dma_mask(uint8_t maskbits);
566 
567 /**
568  * Check if all currently allocated memory segments are compliant with
569  * supplied DMA address width. This function will use
570  * rte_memseg_walk_thread_unsafe instead of rte_memseg_walk implying
571  * memory_hotplug_lock will not be acquired avoiding deadlock during
572  * memory initialization.
573  *
574  * This function is just for EAL core memory internal use. Drivers should
575  * use the previous rte_mem_check_dma_mask.
576  *
577  *  @param maskbits
578  *    Address width to check against.
579  */
580 int rte_mem_check_dma_mask_thread_unsafe(uint8_t maskbits);
581 
582 /**
583  *  Set dma mask to use once memory initialization is done. Previous functions
584  *  rte_mem_check_dma_mask and rte_mem_check_dma_mask_thread_unsafe can not be
585  *  used safely until memory has been initialized.
586  */
587 void rte_mem_set_dma_mask(uint8_t maskbits);
588 
589 /**
590  * Drivers based on uio will not load unless physical
591  * addresses are obtainable. It is only possible to get
592  * physical addresses when running as a privileged user.
593  *
594  * @return
595  *   1 if the system is able to obtain physical addresses.
596  *   0 if using DMA addresses through an IOMMU.
597  */
598 int rte_eal_using_phys_addrs(void);
599 
600 
601 /**
602  * Enum indicating which kind of memory event has happened. Used by callbacks to
603  * distinguish between memory allocations and deallocations.
604  */
605 enum rte_mem_event {
606 	RTE_MEM_EVENT_ALLOC = 0, /**< Allocation event. */
607 	RTE_MEM_EVENT_FREE,      /**< Deallocation event. */
608 };
609 #define RTE_MEM_EVENT_CALLBACK_NAME_LEN 64
610 /**< maximum length of callback name */
611 
612 /**
613  * Function typedef used to register callbacks for memory events.
614  */
615 typedef void (*rte_mem_event_callback_t)(enum rte_mem_event event_type,
616 		const void *addr, size_t len, void *arg);
617 
618 /**
619  * Function used to register callbacks for memory events.
620  *
621  * @note callbacks will happen while memory hotplug subsystem is write-locked,
622  *       therefore some functions (e.g. `rte_memseg_walk()`) will cause a
623  *       deadlock when called from within such callbacks.
624  *
625  * @note mem event callbacks not being supported is an expected error condition,
626  *       so user code needs to handle this situation. In these cases, return
627  *       value will be -1, and rte_errno will be set to ENOTSUP.
628  *
629  * @param name
630  *   Name associated with specified callback to be added to the list.
631  *
632  * @param clb
633  *   Callback function pointer.
634  *
635  * @param arg
636  *   Argument to pass to the callback.
637  *
638  * @return
639  *   0 on successful callback register
640  *   -1 on unsuccessful callback register, with rte_errno value indicating
641  *   reason for failure.
642  */
643 int
644 rte_mem_event_callback_register(const char *name, rte_mem_event_callback_t clb,
645 		void *arg);
646 
647 /**
648  * Function used to unregister callbacks for memory events.
649  *
650  * @param name
651  *   Name associated with specified callback to be removed from the list.
652  *
653  * @param arg
654  *   Argument to look for among callbacks with specified callback name.
655  *
656  * @return
657  *   0 on successful callback unregister
658  *   -1 on unsuccessful callback unregister, with rte_errno value indicating
659  *   reason for failure.
660  */
661 int
662 rte_mem_event_callback_unregister(const char *name, void *arg);
663 
664 
665 #define RTE_MEM_ALLOC_VALIDATOR_NAME_LEN 64
666 /**< maximum length of alloc validator name */
667 /**
668  * Function typedef used to register memory allocation validation callbacks.
669  *
670  * Returning 0 will allow allocation attempt to continue. Returning -1 will
671  * prevent allocation from succeeding.
672  */
673 typedef int (*rte_mem_alloc_validator_t)(int socket_id,
674 		size_t cur_limit, size_t new_len);
675 
676 /**
677  * @brief Register validator callback for memory allocations.
678  *
679  * Callbacks registered by this function will be called right before memory
680  * allocator is about to trigger allocation of more pages from the system if
681  * said allocation will bring total memory usage above specified limit on
682  * specified socket. User will be able to cancel pending allocation if callback
683  * returns -1.
684  *
685  * @note callbacks will happen while memory hotplug subsystem is write-locked,
686  *       therefore some functions (e.g. `rte_memseg_walk()`) will cause a
687  *       deadlock when called from within such callbacks.
688  *
689  * @note validator callbacks not being supported is an expected error condition,
690  *       so user code needs to handle this situation. In these cases, return
691  *       value will be -1, and rte_errno will be set to ENOTSUP.
692  *
693  * @param name
694  *   Name associated with specified callback to be added to the list.
695  *
696  * @param clb
697  *   Callback function pointer.
698  *
699  * @param socket_id
700  *   Socket ID on which to watch for allocations.
701  *
702  * @param limit
703  *   Limit above which to trigger callbacks.
704  *
705  * @return
706  *   0 on successful callback register
707  *   -1 on unsuccessful callback register, with rte_errno value indicating
708  *   reason for failure.
709  */
710 int
711 rte_mem_alloc_validator_register(const char *name,
712 		rte_mem_alloc_validator_t clb, int socket_id, size_t limit);
713 
714 /**
715  * @brief Unregister validator callback for memory allocations.
716  *
717  * @param name
718  *   Name associated with specified callback to be removed from the list.
719  *
720  * @param socket_id
721  *   Socket ID on which to watch for allocations.
722  *
723  * @return
724  *   0 on successful callback unregister
725  *   -1 on unsuccessful callback unregister, with rte_errno value indicating
726  *   reason for failure.
727  */
728 int
729 rte_mem_alloc_validator_unregister(const char *name, int socket_id);
730 
731 #ifdef __cplusplus
732 }
733 #endif
734 
735 #endif /* _RTE_MEMORY_H_ */
736