1 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) 2 * 3 * Copyright 2008-2016 Freescale Semiconductor Inc. 4 * Copyright 2017,2019 NXP 5 * 6 */ 7 8 #include <fsl_usd.h> 9 #include <process.h> 10 #include "qman_priv.h" 11 #include <sys/ioctl.h> 12 #include <err.h> 13 14 #include <rte_branch_prediction.h> 15 16 /* Global variable containing revision id (even on non-control plane systems 17 * where CCSR isn't available). 18 */ 19 u16 qman_ip_rev; 20 u16 qm_channel_pool1 = QMAN_CHANNEL_POOL1; 21 u16 qm_channel_caam = QMAN_CHANNEL_CAAM; 22 u16 qm_channel_pme = QMAN_CHANNEL_PME; 23 24 /* Ccsr map address to access ccsrbased register */ 25 static void *qman_ccsr_map; 26 /* The qman clock frequency */ 27 static u32 qman_clk; 28 29 static __thread int qmfd = -1; 30 static __thread struct qm_portal_config qpcfg; 31 static __thread struct dpaa_ioctl_portal_map map = { 32 .type = dpaa_portal_qman 33 }; 34 35 u16 dpaa_get_qm_channel_caam(void) 36 { 37 return qm_channel_caam; 38 } 39 40 u16 dpaa_get_qm_channel_pool(void) 41 { 42 return qm_channel_pool1; 43 } 44 45 static int fsl_qman_portal_init(uint32_t index, int is_shared) 46 { 47 struct qman_portal *portal; 48 struct dpaa_ioctl_irq_map irq_map; 49 int ret; 50 51 /* Allocate and map a qman portal */ 52 map.index = index; 53 ret = process_portal_map(&map); 54 if (ret) { 55 errno = ret; 56 err(0, "process_portal_map()"); 57 return ret; 58 } 59 qpcfg.channel = map.channel; 60 qpcfg.pools = map.pools; 61 qpcfg.index = map.index; 62 63 /* Make the portal's cache-[enabled|inhibited] regions */ 64 qpcfg.addr_virt[DPAA_PORTAL_CE] = map.addr.cena; 65 qpcfg.addr_virt[DPAA_PORTAL_CI] = map.addr.cinh; 66 67 qmfd = open(QMAN_PORTAL_IRQ_PATH, O_RDONLY); 68 if (qmfd == -1) { 69 pr_err("QMan irq init failed\n"); 70 process_portal_unmap(&map.addr); 71 return -EBUSY; 72 } 73 74 qpcfg.is_shared = is_shared; 75 qpcfg.node = NULL; 76 qpcfg.irq = qmfd; 77 78 portal = qman_create_affine_portal(&qpcfg, NULL); 79 if (!portal) { 80 pr_err("Qman portal initialisation failed (%d)\n", 81 qpcfg.cpu); 82 process_portal_unmap(&map.addr); 83 return -EBUSY; 84 } 85 86 irq_map.type = dpaa_portal_qman; 87 irq_map.portal_cinh = map.addr.cinh; 88 process_portal_irq_map(qmfd, &irq_map); 89 return 0; 90 } 91 92 static int fsl_qman_portal_finish(void) 93 { 94 __maybe_unused const struct qm_portal_config *cfg; 95 int ret; 96 97 process_portal_irq_unmap(qmfd); 98 99 cfg = qman_destroy_affine_portal(NULL); 100 DPAA_BUG_ON(cfg != &qpcfg); 101 ret = process_portal_unmap(&map.addr); 102 if (ret) { 103 errno = ret; 104 err(0, "process_portal_unmap()"); 105 } 106 return ret; 107 } 108 109 int qman_thread_fd(void) 110 { 111 return qmfd; 112 } 113 114 int qman_thread_init(void) 115 { 116 /* Convert from contiguous/virtual cpu numbering to real cpu when 117 * calling into the code that is dependent on the device naming. 118 */ 119 return fsl_qman_portal_init(QBMAN_ANY_PORTAL_IDX, 0); 120 } 121 122 int qman_thread_finish(void) 123 { 124 return fsl_qman_portal_finish(); 125 } 126 127 void qman_thread_irq(void) 128 { 129 qbman_invoke_irq(qpcfg.irq); 130 131 /* Now we need to uninhibit interrupts. This is the only code outside 132 * the regular portal driver that manipulates any portal register, so 133 * rather than breaking that encapsulation I am simply hard-coding the 134 * offset to the inhibit register here. 135 */ 136 out_be32(qpcfg.addr_virt[DPAA_PORTAL_CI] + 0x36C0, 0); 137 } 138 139 void qman_fq_portal_thread_irq(struct qman_portal *qp) 140 { 141 qman_portal_uninhibit_isr(qp); 142 } 143 144 struct qman_portal *fsl_qman_fq_portal_create(int *fd) 145 { 146 struct qman_portal *portal = NULL; 147 struct qm_portal_config *q_pcfg; 148 struct dpaa_ioctl_irq_map irq_map; 149 struct dpaa_ioctl_portal_map q_map = {0}; 150 int q_fd, ret; 151 152 q_pcfg = kzalloc((sizeof(struct qm_portal_config)), 0); 153 if (!q_pcfg) { 154 /* kzalloc sets errno */ 155 err(0, "q_pcfg kzalloc failed"); 156 return NULL; 157 } 158 159 /* Allocate and map a qman portal */ 160 q_map.type = dpaa_portal_qman; 161 q_map.index = QBMAN_ANY_PORTAL_IDX; 162 ret = process_portal_map(&q_map); 163 if (ret) { 164 errno = ret; 165 err(0, "process_portal_map()"); 166 kfree(q_pcfg); 167 return NULL; 168 } 169 q_pcfg->channel = q_map.channel; 170 q_pcfg->pools = q_map.pools; 171 q_pcfg->index = q_map.index; 172 173 /* Make the portal's cache-[enabled|inhibited] regions */ 174 q_pcfg->addr_virt[DPAA_PORTAL_CE] = q_map.addr.cena; 175 q_pcfg->addr_virt[DPAA_PORTAL_CI] = q_map.addr.cinh; 176 177 q_fd = open(QMAN_PORTAL_IRQ_PATH, O_RDONLY); 178 if (q_fd == -1) { 179 pr_err("QMan irq init failed\n"); 180 goto err; 181 } 182 183 q_pcfg->irq = q_fd; 184 185 portal = qman_alloc_global_portal(q_pcfg); 186 if (!portal) { 187 pr_err("Qman portal initialisation failed (%d)\n", 188 q_pcfg->cpu); 189 goto err_alloc; 190 } 191 192 irq_map.type = dpaa_portal_qman; 193 irq_map.portal_cinh = q_map.addr.cinh; 194 process_portal_irq_map(q_fd, &irq_map); 195 196 *fd = q_fd; 197 return portal; 198 err_alloc: 199 close(q_fd); 200 err: 201 process_portal_unmap(&q_map.addr); 202 kfree(q_pcfg); 203 return NULL; 204 } 205 206 int fsl_qman_fq_portal_init(struct qman_portal *qp) 207 { 208 struct qman_portal *res; 209 210 res = qman_init_portal(qp, NULL, NULL); 211 if (!res) { 212 pr_err("Qman portal initialisation failed\n"); 213 return -1; 214 } 215 216 return 0; 217 } 218 219 int fsl_qman_fq_portal_destroy(struct qman_portal *qp) 220 { 221 const struct qm_portal_config *cfg; 222 struct dpaa_portal_map addr; 223 int ret; 224 225 cfg = qman_destroy_affine_portal(qp); 226 227 ret = qman_free_global_portal(qp); 228 if (ret) 229 pr_err("qman_free_global_portal() (%d)\n", ret); 230 231 kfree(qp); 232 233 process_portal_irq_unmap(cfg->irq); 234 235 addr.cena = cfg->addr_virt[DPAA_PORTAL_CE]; 236 addr.cinh = cfg->addr_virt[DPAA_PORTAL_CI]; 237 238 ret = process_portal_unmap(&addr); 239 if (ret) 240 pr_err("process_portal_unmap() (%d)\n", ret); 241 242 kfree((void *)cfg); 243 244 return ret; 245 } 246 247 int qman_global_init(void) 248 { 249 const struct device_node *dt_node; 250 size_t lenp; 251 const u32 *chanid; 252 static int ccsr_map_fd; 253 const uint32_t *qman_addr; 254 uint64_t phys_addr; 255 uint64_t regs_size; 256 const u32 *clk; 257 258 static int done; 259 260 if (done) 261 return -EBUSY; 262 263 /* Use the device-tree to determine IP revision until something better 264 * is devised. 265 */ 266 dt_node = of_find_compatible_node(NULL, NULL, "fsl,qman-portal"); 267 if (!dt_node) { 268 pr_err("No qman portals available for any CPU\n"); 269 return -ENODEV; 270 } 271 if (of_device_is_compatible(dt_node, "fsl,qman-portal-1.0") || 272 of_device_is_compatible(dt_node, "fsl,qman-portal-1.0.0")) 273 pr_err("QMan rev1.0 on P4080 rev1 is not supported!\n"); 274 else if (of_device_is_compatible(dt_node, "fsl,qman-portal-1.1") || 275 of_device_is_compatible(dt_node, "fsl,qman-portal-1.1.0")) 276 qman_ip_rev = QMAN_REV11; 277 else if (of_device_is_compatible(dt_node, "fsl,qman-portal-1.2") || 278 of_device_is_compatible(dt_node, "fsl,qman-portal-1.2.0")) 279 qman_ip_rev = QMAN_REV12; 280 else if (of_device_is_compatible(dt_node, "fsl,qman-portal-2.0") || 281 of_device_is_compatible(dt_node, "fsl,qman-portal-2.0.0")) 282 qman_ip_rev = QMAN_REV20; 283 else if (of_device_is_compatible(dt_node, "fsl,qman-portal-3.0.0") || 284 of_device_is_compatible(dt_node, "fsl,qman-portal-3.0.1")) 285 qman_ip_rev = QMAN_REV30; 286 else if (of_device_is_compatible(dt_node, "fsl,qman-portal-3.1.0") || 287 of_device_is_compatible(dt_node, "fsl,qman-portal-3.1.1") || 288 of_device_is_compatible(dt_node, "fsl,qman-portal-3.1.2") || 289 of_device_is_compatible(dt_node, "fsl,qman-portal-3.1.3")) 290 qman_ip_rev = QMAN_REV31; 291 else if (of_device_is_compatible(dt_node, "fsl,qman-portal-3.2.0") || 292 of_device_is_compatible(dt_node, "fsl,qman-portal-3.2.1")) 293 qman_ip_rev = QMAN_REV32; 294 else 295 qman_ip_rev = QMAN_REV11; 296 297 if (!qman_ip_rev) { 298 pr_err("Unknown qman portal version\n"); 299 return -ENODEV; 300 } 301 if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) { 302 qm_channel_pool1 = QMAN_CHANNEL_POOL1_REV3; 303 qm_channel_caam = QMAN_CHANNEL_CAAM_REV3; 304 qm_channel_pme = QMAN_CHANNEL_PME_REV3; 305 } 306 307 dt_node = of_find_compatible_node(NULL, NULL, "fsl,pool-channel-range"); 308 if (!dt_node) { 309 pr_err("No qman pool channel range available\n"); 310 return -ENODEV; 311 } 312 chanid = of_get_property(dt_node, "fsl,pool-channel-range", &lenp); 313 if (!chanid) { 314 pr_err("Can not get pool-channel-range property\n"); 315 return -EINVAL; 316 } 317 318 /* get ccsr base */ 319 dt_node = of_find_compatible_node(NULL, NULL, "fsl,qman"); 320 if (!dt_node) { 321 pr_err("No qman device node available\n"); 322 return -ENODEV; 323 } 324 qman_addr = of_get_address(dt_node, 0, ®s_size, NULL); 325 if (!qman_addr) { 326 pr_err("of_get_address cannot return qman address\n"); 327 return -EINVAL; 328 } 329 phys_addr = of_translate_address(dt_node, qman_addr); 330 if (!phys_addr) { 331 pr_err("of_translate_address failed\n"); 332 return -EINVAL; 333 } 334 335 ccsr_map_fd = open("/dev/mem", O_RDWR); 336 if (unlikely(ccsr_map_fd < 0)) { 337 pr_err("Can not open /dev/mem for qman ccsr map\n"); 338 return ccsr_map_fd; 339 } 340 341 qman_ccsr_map = mmap(NULL, regs_size, PROT_READ | PROT_WRITE, 342 MAP_SHARED, ccsr_map_fd, phys_addr); 343 if (qman_ccsr_map == MAP_FAILED) { 344 pr_err("Can not map qman ccsr base\n"); 345 return -EINVAL; 346 } 347 348 clk = of_get_property(dt_node, "clock-frequency", NULL); 349 if (!clk) 350 pr_warn("Can't find Qman clock frequency\n"); 351 else 352 qman_clk = be32_to_cpu(*clk); 353 354 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP 355 return qman_setup_fq_lookup_table(CONFIG_FSL_QMAN_FQ_LOOKUP_MAX); 356 #endif 357 return 0; 358 } 359