1 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) 2 * 3 * Copyright 2011-2016 Freescale Semiconductor Inc. 4 * Copyright 2017 NXP 5 * 6 */ 7 #include <assert.h> 8 #include <fcntl.h> 9 #include <unistd.h> 10 #include <sys/ioctl.h> 11 12 #include "process.h" 13 14 #include <fsl_usd.h> 15 16 /* As higher-level drivers will be built on top of this (dma_mem, qbman, ...), 17 * it's preferable that the process driver itself not provide any exported API. 18 * As such, combined with the fact that none of these operations are 19 * performance critical, it is justified to use lazy initialisation, so that's 20 * what the lock is for. 21 */ 22 static int fd = -1; 23 static pthread_mutex_t fd_init_lock = PTHREAD_MUTEX_INITIALIZER; 24 25 static int check_fd(void) 26 { 27 int ret; 28 29 if (fd >= 0) 30 return 0; 31 ret = pthread_mutex_lock(&fd_init_lock); 32 assert(!ret); 33 /* check again with the lock held */ 34 if (fd < 0) 35 fd = open(PROCESS_PATH, O_RDWR); 36 ret = pthread_mutex_unlock(&fd_init_lock); 37 assert(!ret); 38 return (fd >= 0) ? 0 : -ENODEV; 39 } 40 41 #define DPAA_IOCTL_MAGIC 'u' 42 struct dpaa_ioctl_id_alloc { 43 uint32_t base; /* Return value, the start of the allocated range */ 44 enum dpaa_id_type id_type; /* what kind of resource(s) to allocate */ 45 uint32_t num; /* how many IDs to allocate (and return value) */ 46 uint32_t align; /* must be a power of 2, 0 is treated like 1 */ 47 int partial; /* whether to allow less than 'num' */ 48 }; 49 50 struct dpaa_ioctl_id_release { 51 /* Input; */ 52 enum dpaa_id_type id_type; 53 uint32_t base; 54 uint32_t num; 55 }; 56 57 struct dpaa_ioctl_id_reserve { 58 enum dpaa_id_type id_type; 59 uint32_t base; 60 uint32_t num; 61 }; 62 63 #define DPAA_IOCTL_ID_ALLOC \ 64 _IOWR(DPAA_IOCTL_MAGIC, 0x01, struct dpaa_ioctl_id_alloc) 65 #define DPAA_IOCTL_ID_RELEASE \ 66 _IOW(DPAA_IOCTL_MAGIC, 0x02, struct dpaa_ioctl_id_release) 67 #define DPAA_IOCTL_ID_RESERVE \ 68 _IOW(DPAA_IOCTL_MAGIC, 0x0A, struct dpaa_ioctl_id_reserve) 69 70 int process_alloc(enum dpaa_id_type id_type, uint32_t *base, uint32_t num, 71 uint32_t align, int partial) 72 { 73 struct dpaa_ioctl_id_alloc id = { 74 .id_type = id_type, 75 .num = num, 76 .align = align, 77 .partial = partial 78 }; 79 int ret = check_fd(); 80 81 if (ret) 82 return ret; 83 ret = ioctl(fd, DPAA_IOCTL_ID_ALLOC, &id); 84 if (ret) 85 return ret; 86 for (ret = 0; ret < (int)id.num; ret++) 87 base[ret] = id.base + ret; 88 return id.num; 89 } 90 91 void process_release(enum dpaa_id_type id_type, uint32_t base, uint32_t num) 92 { 93 struct dpaa_ioctl_id_release id = { 94 .id_type = id_type, 95 .base = base, 96 .num = num 97 }; 98 int ret = check_fd(); 99 100 if (ret) { 101 fprintf(stderr, "Process FD failure\n"); 102 return; 103 } 104 ret = ioctl(fd, DPAA_IOCTL_ID_RELEASE, &id); 105 if (ret) 106 fprintf(stderr, "Process FD ioctl failure type %d base 0x%x num %d\n", 107 id_type, base, num); 108 } 109 110 int process_reserve(enum dpaa_id_type id_type, uint32_t base, uint32_t num) 111 { 112 struct dpaa_ioctl_id_reserve id = { 113 .id_type = id_type, 114 .base = base, 115 .num = num 116 }; 117 int ret = check_fd(); 118 119 if (ret) 120 return ret; 121 return ioctl(fd, DPAA_IOCTL_ID_RESERVE, &id); 122 } 123 124 /***************************************/ 125 /* Mapping and using QMan/BMan portals */ 126 /***************************************/ 127 128 #define DPAA_IOCTL_PORTAL_MAP \ 129 _IOWR(DPAA_IOCTL_MAGIC, 0x07, struct dpaa_ioctl_portal_map) 130 #define DPAA_IOCTL_PORTAL_UNMAP \ 131 _IOW(DPAA_IOCTL_MAGIC, 0x08, struct dpaa_portal_map) 132 133 int process_portal_map(struct dpaa_ioctl_portal_map *params) 134 { 135 int ret = check_fd(); 136 137 if (ret) 138 return ret; 139 140 ret = ioctl(fd, DPAA_IOCTL_PORTAL_MAP, params); 141 if (ret) { 142 perror("ioctl(DPAA_IOCTL_PORTAL_MAP)"); 143 return ret; 144 } 145 return 0; 146 } 147 148 int process_portal_unmap(struct dpaa_portal_map *map) 149 { 150 int ret = check_fd(); 151 152 if (ret) 153 return ret; 154 155 ret = ioctl(fd, DPAA_IOCTL_PORTAL_UNMAP, map); 156 if (ret) { 157 perror("ioctl(DPAA_IOCTL_PORTAL_UNMAP)"); 158 return ret; 159 } 160 return 0; 161 } 162 163 #define DPAA_IOCTL_PORTAL_IRQ_MAP \ 164 _IOW(DPAA_IOCTL_MAGIC, 0x09, struct dpaa_ioctl_irq_map) 165 166 int process_portal_irq_map(int ifd, struct dpaa_ioctl_irq_map *map) 167 { 168 map->fd = fd; 169 return ioctl(ifd, DPAA_IOCTL_PORTAL_IRQ_MAP, map); 170 } 171 172 int process_portal_irq_unmap(int ifd) 173 { 174 return close(ifd); 175 } 176 177 struct dpaa_ioctl_raw_portal { 178 /* inputs */ 179 enum dpaa_portal_type type; /* Type of portal to allocate */ 180 181 uint8_t enable_stash; /* set to non zero to turn on stashing */ 182 /* Stashing attributes for the portal */ 183 uint32_t cpu; 184 uint32_t cache; 185 uint32_t window; 186 /* Specifies the stash request queue this portal should use */ 187 uint8_t sdest; 188 189 /* Specifes a specific portal index to map or QBMAN_ANY_PORTAL_IDX 190 * for don't care. The portal index will be populated by the 191 * driver when the ioctl() successfully completes. 192 */ 193 uint32_t index; 194 195 /* outputs */ 196 uint64_t cinh; 197 uint64_t cena; 198 }; 199 200 #define DPAA_IOCTL_ALLOC_RAW_PORTAL \ 201 _IOWR(DPAA_IOCTL_MAGIC, 0x0C, struct dpaa_ioctl_raw_portal) 202 203 #define DPAA_IOCTL_FREE_RAW_PORTAL \ 204 _IOR(DPAA_IOCTL_MAGIC, 0x0D, struct dpaa_ioctl_raw_portal) 205 206 static int process_portal_allocate(struct dpaa_ioctl_raw_portal *portal) 207 { 208 int ret = check_fd(); 209 210 if (ret) 211 return ret; 212 213 ret = ioctl(fd, DPAA_IOCTL_ALLOC_RAW_PORTAL, portal); 214 if (ret) { 215 perror("ioctl(DPAA_IOCTL_ALLOC_RAW_PORTAL)"); 216 return ret; 217 } 218 return 0; 219 } 220 221 static int process_portal_free(struct dpaa_ioctl_raw_portal *portal) 222 { 223 int ret = check_fd(); 224 225 if (ret) 226 return ret; 227 228 ret = ioctl(fd, DPAA_IOCTL_FREE_RAW_PORTAL, portal); 229 if (ret) { 230 perror("ioctl(DPAA_IOCTL_FREE_RAW_PORTAL)"); 231 return ret; 232 } 233 return 0; 234 } 235 236 int qman_allocate_raw_portal(struct dpaa_raw_portal *portal) 237 { 238 struct dpaa_ioctl_raw_portal input; 239 int ret; 240 241 input.type = dpaa_portal_qman; 242 input.index = portal->index; 243 input.enable_stash = portal->enable_stash; 244 input.cpu = portal->cpu; 245 input.cache = portal->cache; 246 input.window = portal->window; 247 input.sdest = portal->sdest; 248 249 ret = process_portal_allocate(&input); 250 if (ret) 251 return ret; 252 portal->index = input.index; 253 portal->cinh = input.cinh; 254 portal->cena = input.cena; 255 return 0; 256 } 257 258 int qman_free_raw_portal(struct dpaa_raw_portal *portal) 259 { 260 struct dpaa_ioctl_raw_portal input; 261 262 input.type = dpaa_portal_qman; 263 input.index = portal->index; 264 input.cinh = portal->cinh; 265 input.cena = portal->cena; 266 267 return process_portal_free(&input); 268 } 269 270 int bman_allocate_raw_portal(struct dpaa_raw_portal *portal) 271 { 272 struct dpaa_ioctl_raw_portal input; 273 int ret; 274 275 input.type = dpaa_portal_bman; 276 input.index = portal->index; 277 input.enable_stash = 0; 278 279 ret = process_portal_allocate(&input); 280 if (ret) 281 return ret; 282 portal->index = input.index; 283 portal->cinh = input.cinh; 284 portal->cena = input.cena; 285 return 0; 286 } 287 288 int bman_free_raw_portal(struct dpaa_raw_portal *portal) 289 { 290 struct dpaa_ioctl_raw_portal input; 291 292 input.type = dpaa_portal_bman; 293 input.index = portal->index; 294 input.cinh = portal->cinh; 295 input.cena = portal->cena; 296 297 return process_portal_free(&input); 298 } 299