1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2018 NXP 3 */ 4 5 #ifndef _DPAAX_IOVA_TABLE_H_ 6 #define _DPAAX_IOVA_TABLE_H_ 7 8 #include <unistd.h> 9 #include <stdio.h> 10 #include <string.h> 11 #include <stdbool.h> 12 #include <stdlib.h> 13 #include <inttypes.h> 14 #include <sys/stat.h> 15 #include <sys/types.h> 16 #include <dirent.h> 17 #include <fcntl.h> 18 #include <glob.h> 19 #include <errno.h> 20 #include <arpa/inet.h> 21 22 #include <rte_eal.h> 23 #include <rte_branch_prediction.h> 24 #include <rte_memory.h> 25 #include <rte_malloc.h> 26 27 struct dpaax_iovat_element { 28 phys_addr_t start; /**< Start address of block of physical pages */ 29 size_t len; /**< Difference of end-start for quick access */ 30 uint64_t *pages; /**< VA for each physical page in this block */ 31 }; 32 33 struct dpaax_iova_table { 34 unsigned int count; /**< No. of blocks of contiguous physical pages */ 35 struct dpaax_iovat_element entries[0]; 36 }; 37 38 /* Pointer to the table, which is common for DPAA/DPAA2 and only a single 39 * instance is required across net/crypto/event drivers. This table is 40 * populated iff devices are found on the bus. 41 */ 42 extern struct dpaax_iova_table *dpaax_iova_table_p; 43 44 /* Device tree file for memory layout is named 'memory@<addr>' where the 'addr' 45 * is SoC dependent, or even Uboot fixup dependent. 46 */ 47 #define MEM_NODE_PATH_GLOB "/proc/device-tree/memory[@0-9]*/reg" 48 /* For Virtual Machines memory node is at different path (below) */ 49 #define MEM_NODE_PATH_GLOB_VM "/proc/device-tree/memory/reg" 50 /* Device file should be multiple of 16 bytes, each containing 8 byte of addr 51 * and its length. Assuming max of 5 entries. 52 */ 53 #define MEM_NODE_FILE_LEN ((16 * 5) + 1) 54 55 /* Table is made up of DPAAX_MEM_SPLIT elements for each contiguous zone. This 56 * helps avoid separate handling for cases where more than one size of hugepage 57 * is supported. 58 */ 59 #define DPAAX_MEM_SPLIT (1<<21) 60 #define DPAAX_MEM_SPLIT_MASK ~(DPAAX_MEM_SPLIT - 1) /**< Floor aligned */ 61 #define DPAAX_MEM_SPLIT_MASK_OFF (DPAAX_MEM_SPLIT - 1) /**< Offset */ 62 63 /* APIs exposed */ 64 __rte_internal 65 int dpaax_iova_table_populate(void); 66 __rte_internal 67 void dpaax_iova_table_depopulate(void); 68 __rte_internal 69 int dpaax_iova_table_update(phys_addr_t paddr, void *vaddr, size_t length); 70 __rte_internal 71 void dpaax_iova_table_dump(void); 72 73 static inline void *dpaax_iova_table_get_va(phys_addr_t paddr) __rte_hot; 74 75 static inline void * 76 dpaax_iova_table_get_va(phys_addr_t paddr) { 77 unsigned int i = 0, index; 78 void *vaddr = 0; 79 phys_addr_t paddr_align = paddr & DPAAX_MEM_SPLIT_MASK; 80 size_t offset = paddr & DPAAX_MEM_SPLIT_MASK_OFF; 81 struct dpaax_iovat_element *entry; 82 83 if (unlikely(dpaax_iova_table_p == NULL)) 84 return NULL; 85 86 entry = dpaax_iova_table_p->entries; 87 88 do { 89 if (unlikely(i > dpaax_iova_table_p->count)) 90 break; 91 92 if (paddr_align < entry[i].start) { 93 /* Incorrect paddr; Not in memory range */ 94 return NULL; 95 } 96 97 if (paddr_align > (entry[i].start + entry[i].len)) { 98 i++; 99 continue; 100 } 101 102 /* paddr > entry->start && paddr <= entry->(start+len) */ 103 index = (paddr_align - entry[i].start)/DPAAX_MEM_SPLIT; 104 vaddr = (void *)((uintptr_t)entry[i].pages[index] + offset); 105 break; 106 } while (1); 107 108 return vaddr; 109 } 110 111 #endif /* _DPAAX_IOVA_TABLE_H_ */ 112