1 /* $NetBSD: sun6i_dma.c,v 1.5 2018/05/10 00:07:08 jmcneill Exp $ */ 2 3 /*- 4 * Copyright (c) 2014-2017 Jared McNeill <jmcneill@invisible.ca> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include "opt_ddb.h" 30 31 #include <sys/cdefs.h> 32 __KERNEL_RCSID(0, "$NetBSD: sun6i_dma.c,v 1.5 2018/05/10 00:07:08 jmcneill Exp $"); 33 34 #include <sys/param.h> 35 #include <sys/bus.h> 36 #include <sys/device.h> 37 #include <sys/intr.h> 38 #include <sys/systm.h> 39 #include <sys/mutex.h> 40 #include <sys/bitops.h> 41 #include <sys/kmem.h> 42 43 #include <dev/fdt/fdtvar.h> 44 45 #define DMA_IRQ_EN_REG0_REG 0x0000 46 #define DMA_IRQ_EN_REG1_REG 0x0004 47 #define DMA_IRQ_EN_REG0_QUEUE_IRQ_EN(n) __BIT(n * 4 + 2) 48 #define DMA_IRQ_EN_REG0_PKG_IRQ_EN(n) __BIT(n * 4 + 1) 49 #define DMA_IRQ_EN_REG0_HLAF_IRQ_EN(n) __BIT(n * 4 + 0) 50 #define DMA_IRQ_EN_REG1_QUEUE_IRQ_EN(n) __BIT((n - 8) * 4 + 2) 51 #define DMA_IRQ_EN_REG1_PKG_IRQ_EN(n) __BIT((n - 8) * 4 + 1) 52 #define DMA_IRQ_EN_REG1_HLAF_IRQ_EN(n) __BIT((n - 8) * 4 + 0) 53 #define DMA_IRQ_PEND_REG0_REG 0x0010 54 #define DMA_IRQ_PEND_REG1_REG 0x0014 55 #define DMA_IRQ_QUEUE_MASK 0x4444444444444444ULL 56 #define DMA_IRQ_PKG_MASK 0x2222222222222222ULL 57 #define DMA_IRQ_HF_MASK 0x1111111111111111ULL 58 #define DMA_STA_REG 0x0030 59 #define DMA_EN_REG(n) (0x0100 + (n) * 0x40 + 0x00) 60 #define DMA_EN_EN __BIT(0) 61 #define DMA_PAU_REG(n) (0x0100 + (n) * 0x40 + 0x04) 62 #define DMA_PAU_PAUSE __BIT(0) 63 #define DMA_START_ADDR_REG(n) (0x0100 + (n) * 0x40 + 0x08) 64 #define DMA_CFG_REG(n) (0x0100 + (n) * 0x40 + 0x0C) 65 #define DMA_CFG_DEST_DATA_WIDTH __BITS(26,25) 66 #define DMA_CFG_DATA_WIDTH(n) ((n) >> 4) 67 #define DMA_CFG_DEST_BST_LEN __BITS(24,23) 68 #define DMA_CFG_BST_LEN(n) ((n) == 1 ? 0 : (((n) >> 3) + 1)) 69 #define DMA_CFG_DEST_ADDR_MODE __BITS(22,21) 70 #define DMA_CFG_ADDR_MODE_LINEAR 0 71 #define DMA_CFG_ADDR_MODE_IO 1 72 #define DMA_CFG_DEST_DRQ_TYPE __BITS(20,16) 73 #define DMA_CFG_DRQ_TYPE_SDRAM 1 74 #define DMA_CFG_SRC_DATA_WIDTH __BITS(10,9) 75 #define DMA_CFG_SRC_BST_LEN __BITS(8,7) 76 #define DMA_CFG_SRC_ADDR_MODE __BITS(6,5) 77 #define DMA_CFG_SRC_DRQ_TYPE __BITS(4,0) 78 #define DMA_CUR_SRC_REG(n) (0x0100 + (n) * 0x40 + 0x10) 79 #define DMA_CUR_DEST_REG(n) (0x0100 + (n) * 0x40 + 0x14) 80 #define DMA_BCNT_LEFT_REG(n) (0x0100 + (n) * 0x40 + 0x18) 81 #define DMA_PARA_REG(n) (0x0100 + (n) * 0x40 + 0x1C) 82 #define DMA_PARA_DATA_BLK_SIZE __BITS(15,8) 83 #define DMA_PARA_WAIT_CYC __BITS(7,0) 84 85 struct sun6idma_desc { 86 uint32_t dma_config; 87 uint32_t dma_srcaddr; 88 uint32_t dma_dstaddr; 89 uint32_t dma_bcnt; 90 uint32_t dma_para; 91 uint32_t dma_next; 92 #define DMA_NULL 0xfffff800 93 }; 94 95 struct sun6idma_config { 96 u_int num_channels; 97 bool autogate; 98 bus_size_t autogate_reg; 99 uint32_t autogate_mask; 100 }; 101 102 static const struct sun6idma_config sun6i_a31_dma_config = { 103 .num_channels = 16 104 }; 105 106 static const struct sun6idma_config sun8i_a83t_dma_config = { 107 .num_channels = 8, 108 .autogate = true, 109 .autogate_reg = 0x20, 110 .autogate_mask = 0x4, 111 }; 112 113 static const struct sun6idma_config sun8i_h3_dma_config = { 114 .num_channels = 12, 115 .autogate = true, 116 .autogate_reg = 0x28, 117 .autogate_mask = 0x4, 118 }; 119 120 static const struct sun6idma_config sun50i_a64_dma_config = { 121 .num_channels = 8, 122 .autogate = true, 123 .autogate_reg = 0x28, 124 .autogate_mask = 0x4, 125 }; 126 127 static const struct of_compat_data compat_data[] = { 128 { "allwinner,sun6i-a31-dma", (uintptr_t)&sun6i_a31_dma_config }, 129 { "allwinner,sun8i-a83t-dma", (uintptr_t)&sun8i_a83t_dma_config }, 130 { "allwinner,sun8i-h3-dma", (uintptr_t)&sun8i_h3_dma_config }, 131 { "allwinner,sun50i-a64-dma", (uintptr_t)&sun50i_a64_dma_config }, 132 { NULL } 133 }; 134 135 struct sun6idma_channel { 136 uint8_t ch_index; 137 void (*ch_callback)(void *); 138 void *ch_callbackarg; 139 u_int ch_portid; 140 141 bus_dma_segment_t ch_dmasegs[1]; 142 bus_dmamap_t ch_dmamap; 143 void *ch_dmadesc; 144 bus_size_t ch_dmadesclen; 145 }; 146 147 struct sun6idma_softc { 148 device_t sc_dev; 149 bus_space_tag_t sc_bst; 150 bus_space_handle_t sc_bsh; 151 bus_dma_tag_t sc_dmat; 152 int sc_phandle; 153 void *sc_ih; 154 155 kmutex_t sc_lock; 156 157 struct sun6idma_channel *sc_chan; 158 u_int sc_nchan; 159 }; 160 161 #define DMA_READ(sc, reg) \ 162 bus_space_read_4((sc)->sc_bst, (sc)->sc_bsh, (reg)) 163 #define DMA_WRITE(sc, reg, val) \ 164 bus_space_write_4((sc)->sc_bst, (sc)->sc_bsh, (reg), (val)) 165 166 static void * 167 sun6idma_acquire(device_t dev, const void *data, size_t len, 168 void (*cb)(void *), void *cbarg) 169 { 170 struct sun6idma_softc *sc = device_private(dev); 171 struct sun6idma_channel *ch = NULL; 172 uint32_t irqen; 173 uint8_t index; 174 175 if (len != 4) 176 return NULL; 177 178 const u_int portid = be32dec(data); 179 if (portid > __SHIFTOUT_MASK(DMA_CFG_SRC_DRQ_TYPE)) 180 return NULL; 181 182 mutex_enter(&sc->sc_lock); 183 184 for (index = 0; index < sc->sc_nchan; index++) { 185 if (sc->sc_chan[index].ch_callback == NULL) { 186 ch = &sc->sc_chan[index]; 187 ch->ch_callback = cb; 188 ch->ch_callbackarg = cbarg; 189 ch->ch_portid = portid; 190 191 irqen = DMA_READ(sc, index < 8 ? 192 DMA_IRQ_EN_REG0_REG : 193 DMA_IRQ_EN_REG1_REG); 194 irqen |= (index < 8 ? 195 DMA_IRQ_EN_REG0_PKG_IRQ_EN(index) : 196 DMA_IRQ_EN_REG1_PKG_IRQ_EN(index)); 197 DMA_WRITE(sc, index < 8 ? 198 DMA_IRQ_EN_REG0_REG : 199 DMA_IRQ_EN_REG1_REG, irqen); 200 201 break; 202 } 203 } 204 205 mutex_exit(&sc->sc_lock); 206 207 return ch; 208 } 209 210 static void 211 sun6idma_release(device_t dev, void *priv) 212 { 213 struct sun6idma_softc *sc = device_private(dev); 214 struct sun6idma_channel *ch = priv; 215 uint32_t irqen; 216 uint8_t index = ch->ch_index; 217 218 mutex_enter(&sc->sc_lock); 219 220 irqen = DMA_READ(sc, index < 8 ? 221 DMA_IRQ_EN_REG0_REG : 222 DMA_IRQ_EN_REG1_REG); 223 irqen &= ~(index < 8 ? 224 DMA_IRQ_EN_REG0_PKG_IRQ_EN(index) : 225 DMA_IRQ_EN_REG1_PKG_IRQ_EN(index)); 226 DMA_WRITE(sc, index < 8 ? 227 DMA_IRQ_EN_REG0_REG : 228 DMA_IRQ_EN_REG1_REG, irqen); 229 230 ch->ch_callback = NULL; 231 ch->ch_callbackarg = NULL; 232 233 mutex_exit(&sc->sc_lock); 234 } 235 236 static int 237 sun6idma_transfer(device_t dev, void *priv, struct fdtbus_dma_req *req) 238 { 239 struct sun6idma_softc *sc = device_private(dev); 240 struct sun6idma_channel *ch = priv; 241 struct sun6idma_desc *desc = ch->ch_dmadesc; 242 uint32_t src, dst, len, cfg, mem_cfg, dev_cfg; 243 uint32_t mem_width, dev_width, mem_burst, dev_burst; 244 245 if (req->dreq_nsegs != 1) 246 return EINVAL; 247 248 mem_width = DMA_CFG_DATA_WIDTH(req->dreq_mem_opt.opt_bus_width); 249 dev_width = DMA_CFG_DATA_WIDTH(req->dreq_dev_opt.opt_bus_width); 250 mem_burst = DMA_CFG_BST_LEN(req->dreq_mem_opt.opt_burst_len); 251 dev_burst = DMA_CFG_BST_LEN(req->dreq_dev_opt.opt_burst_len); 252 253 mem_cfg = __SHIFTIN(mem_width, DMA_CFG_SRC_DATA_WIDTH) | 254 __SHIFTIN(mem_burst, DMA_CFG_SRC_BST_LEN) | 255 __SHIFTIN(DMA_CFG_ADDR_MODE_LINEAR, DMA_CFG_SRC_ADDR_MODE) | 256 __SHIFTIN(DMA_CFG_DRQ_TYPE_SDRAM, DMA_CFG_SRC_DRQ_TYPE); 257 dev_cfg = __SHIFTIN(dev_width, DMA_CFG_SRC_DATA_WIDTH) | 258 __SHIFTIN(dev_burst, DMA_CFG_SRC_BST_LEN) | 259 __SHIFTIN(DMA_CFG_ADDR_MODE_IO, DMA_CFG_SRC_ADDR_MODE) | 260 __SHIFTIN(ch->ch_portid, DMA_CFG_SRC_DRQ_TYPE); 261 262 if (req->dreq_dir == FDT_DMA_READ) { 263 src = req->dreq_dev_phys; 264 dst = req->dreq_segs[0].ds_addr; 265 cfg = mem_cfg << 16 | dev_cfg; 266 } else { 267 src = req->dreq_segs[0].ds_addr; 268 dst = req->dreq_dev_phys; 269 cfg = dev_cfg << 16 | mem_cfg; 270 } 271 len = req->dreq_segs[0].ds_len; 272 273 desc->dma_config = htole32(cfg); 274 desc->dma_srcaddr = htole32(src); 275 desc->dma_dstaddr = htole32(dst); 276 desc->dma_bcnt = htole32(len); 277 desc->dma_para = htole32(0); 278 desc->dma_next = htole32(DMA_NULL); 279 280 bus_dmamap_sync(sc->sc_dmat, ch->ch_dmamap, 0, ch->ch_dmadesclen, 281 BUS_DMASYNC_PREWRITE); 282 283 DMA_WRITE(sc, DMA_START_ADDR_REG(ch->ch_index), 284 ch->ch_dmamap->dm_segs[0].ds_addr); 285 DMA_WRITE(sc, DMA_EN_REG(ch->ch_index), DMA_EN_EN); 286 287 if ((DMA_READ(sc, DMA_EN_REG(ch->ch_index)) & DMA_EN_EN) == 0) { 288 aprint_error_dev(sc->sc_dev, 289 "DMA Channel %u failed to start\n", ch->ch_index); 290 return EIO; 291 } 292 293 return 0; 294 } 295 296 static void 297 sun6idma_halt(device_t dev, void *priv) 298 { 299 struct sun6idma_softc *sc = device_private(dev); 300 struct sun6idma_channel *ch = priv; 301 302 DMA_WRITE(sc, DMA_EN_REG(ch->ch_index), 0); 303 } 304 305 static const struct fdtbus_dma_controller_func sun6idma_funcs = { 306 .acquire = sun6idma_acquire, 307 .release = sun6idma_release, 308 .transfer = sun6idma_transfer, 309 .halt = sun6idma_halt 310 }; 311 312 static int 313 sun6idma_intr(void *priv) 314 { 315 struct sun6idma_softc *sc = priv; 316 uint32_t pend0, pend1, bit; 317 uint64_t pend, mask; 318 uint8_t index; 319 320 pend0 = DMA_READ(sc, DMA_IRQ_PEND_REG0_REG); 321 pend1 = DMA_READ(sc, DMA_IRQ_PEND_REG1_REG); 322 if (!pend0 && !pend1) 323 return 0; 324 325 DMA_WRITE(sc, DMA_IRQ_PEND_REG0_REG, pend0); 326 DMA_WRITE(sc, DMA_IRQ_PEND_REG1_REG, pend1); 327 328 pend = pend0 | ((uint64_t)pend1 << 32); 329 330 while ((bit = ffs64(pend & DMA_IRQ_PKG_MASK)) != 0) { 331 mask = __BIT(bit - 1); 332 pend &= ~mask; 333 index = (bit - 1) / 4; 334 335 if (sc->sc_chan[index].ch_callback == NULL) 336 continue; 337 sc->sc_chan[index].ch_callback( 338 sc->sc_chan[index].ch_callbackarg); 339 } 340 341 return 1; 342 } 343 344 static int 345 sun6idma_match(device_t parent, cfdata_t cf, void *aux) 346 { 347 struct fdt_attach_args * const faa = aux; 348 349 return of_match_compat_data(faa->faa_phandle, compat_data); 350 } 351 352 static void 353 sun6idma_attach(device_t parent, device_t self, void *aux) 354 { 355 struct sun6idma_softc * const sc = device_private(self); 356 struct fdt_attach_args * const faa = aux; 357 const int phandle = faa->faa_phandle; 358 const size_t desclen = sizeof(struct sun6idma_desc); 359 const struct sun6idma_config *conf; 360 struct fdtbus_reset *rst; 361 struct clk *clk; 362 char intrstr[128]; 363 bus_addr_t addr; 364 bus_size_t size; 365 int error, nsegs; 366 u_int index; 367 368 if (fdtbus_get_reg(phandle, 0, &addr, &size) != 0) { 369 aprint_error(": couldn't get registers\n"); 370 return; 371 } 372 373 if ((clk = fdtbus_clock_get_index(phandle, 0)) == NULL || 374 clk_enable(clk) != 0) { 375 aprint_error(": couldn't enable clock\n"); 376 return; 377 } 378 if ((rst = fdtbus_reset_get_index(phandle, 0)) == NULL || 379 fdtbus_reset_deassert(rst) != 0) { 380 aprint_error(": couldn't de-assert reset\n"); 381 return; 382 } 383 384 sc->sc_dev = self; 385 sc->sc_phandle = phandle; 386 sc->sc_dmat = faa->faa_dmat; 387 sc->sc_bst = faa->faa_bst; 388 if (bus_space_map(sc->sc_bst, addr, size, 0, &sc->sc_bsh) != 0) { 389 aprint_error(": couldn't map registers\n"); 390 return; 391 } 392 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_SCHED); 393 394 if (!fdtbus_intr_str(phandle, 0, intrstr, sizeof(intrstr))) { 395 aprint_error(": failed to decode interrupt\n"); 396 return; 397 } 398 399 conf = (void *)of_search_compatible(phandle, compat_data)->data; 400 401 sc->sc_nchan = conf->num_channels; 402 sc->sc_chan = kmem_alloc(sizeof(*sc->sc_chan) * sc->sc_nchan, KM_SLEEP); 403 404 aprint_naive("\n"); 405 aprint_normal(": DMA controller (%u channels)\n", sc->sc_nchan); 406 407 DMA_WRITE(sc, DMA_IRQ_EN_REG0_REG, 0); 408 DMA_WRITE(sc, DMA_IRQ_EN_REG1_REG, 0); 409 DMA_WRITE(sc, DMA_IRQ_PEND_REG0_REG, ~0); 410 DMA_WRITE(sc, DMA_IRQ_PEND_REG1_REG, ~0); 411 412 for (index = 0; index < sc->sc_nchan; index++) { 413 struct sun6idma_channel *ch = &sc->sc_chan[index]; 414 ch->ch_index = index; 415 ch->ch_callback = NULL; 416 ch->ch_callbackarg = NULL; 417 ch->ch_dmadesclen = desclen; 418 419 error = bus_dmamem_alloc(sc->sc_dmat, desclen, 0, 0, 420 ch->ch_dmasegs, 1, &nsegs, BUS_DMA_WAITOK); 421 if (error) 422 panic("bus_dmamem_alloc failed: %d", error); 423 error = bus_dmamem_map(sc->sc_dmat, ch->ch_dmasegs, nsegs, 424 desclen, &ch->ch_dmadesc, BUS_DMA_WAITOK); 425 if (error) 426 panic("bus_dmamem_map failed: %d", error); 427 error = bus_dmamap_create(sc->sc_dmat, desclen, 1, desclen, 0, 428 BUS_DMA_WAITOK, &ch->ch_dmamap); 429 if (error) 430 panic("bus_dmamap_create failed: %d", error); 431 error = bus_dmamap_load(sc->sc_dmat, ch->ch_dmamap, 432 ch->ch_dmadesc, desclen, NULL, BUS_DMA_WAITOK); 433 if (error) 434 panic("bus_dmamap_load failed: %d", error); 435 436 DMA_WRITE(sc, DMA_EN_REG(index), 0); 437 } 438 439 if (conf->autogate) 440 DMA_WRITE(sc, conf->autogate_reg, conf->autogate_mask); 441 442 sc->sc_ih = fdtbus_intr_establish(phandle, 0, IPL_SCHED, FDT_INTR_MPSAFE, 443 sun6idma_intr, sc); 444 if (sc->sc_ih == NULL) { 445 aprint_error_dev(sc->sc_dev, 446 "couldn't establish interrupt on %s\n", intrstr); 447 return; 448 } 449 aprint_normal_dev(sc->sc_dev, "interrupting on %s\n", intrstr); 450 451 fdtbus_register_dma_controller(self, phandle, &sun6idma_funcs); 452 } 453 454 CFATTACH_DECL_NEW(sun6i_dma, sizeof(struct sun6idma_softc), 455 sun6idma_match, sun6idma_attach, NULL, NULL); 456 457 #ifdef DDB 458 void sun6idma_dump(void); 459 460 void 461 sun6idma_dump(void) 462 { 463 struct sun6idma_softc *sc; 464 device_t dev; 465 u_int index; 466 467 dev = device_find_by_driver_unit("sun6idma", 0); 468 if (dev == NULL) 469 return; 470 sc = device_private(dev); 471 472 device_printf(dev, "DMA_IRQ_EN_REG0_REG: %08x\n", DMA_READ(sc, DMA_IRQ_EN_REG0_REG)); 473 device_printf(dev, "DMA_IRQ_EN_REG1_REG: %08x\n", DMA_READ(sc, DMA_IRQ_EN_REG1_REG)); 474 device_printf(dev, "DMA_IRQ_PEND_REG0_REG: %08x\n", DMA_READ(sc, DMA_IRQ_PEND_REG0_REG)); 475 device_printf(dev, "DMA_IRQ_PEND_REG1_REG: %08x\n", DMA_READ(sc, DMA_IRQ_PEND_REG1_REG)); 476 device_printf(dev, "DMA_STA_REG: %08x\n", DMA_READ(sc, DMA_STA_REG)); 477 478 for (index = 0; index < sc->sc_nchan; index++) { 479 struct sun6idma_channel *ch = &sc->sc_chan[index]; 480 if (ch->ch_callback == NULL) 481 continue; 482 device_printf(dev, " %2d: DMA_EN_REG: %08x\n", index, DMA_READ(sc, DMA_EN_REG(index))); 483 device_printf(dev, " %2d: DMA_PAU_REG: %08x\n", index, DMA_READ(sc, DMA_PAU_REG(index))); 484 device_printf(dev, " %2d: DMA_START_ADDR_REG: %08x\n", index, DMA_READ(sc, DMA_START_ADDR_REG(index))); 485 device_printf(dev, " %2d: DMA_CFG_REG: %08x\n", index, DMA_READ(sc, DMA_CFG_REG(index))); 486 device_printf(dev, " %2d: DMA_CUR_SRC_REG: %08x\n", index, DMA_READ(sc, DMA_CUR_SRC_REG(index))); 487 device_printf(dev, " %2d: DMA_CUR_DEST_REG: %08x\n", index, DMA_READ(sc, DMA_CUR_DEST_REG(index))); 488 device_printf(dev, " %2d: DMA_BCNT_LEFT_REG: %08x\n", index, DMA_READ(sc, DMA_BCNT_LEFT_REG(index))); 489 device_printf(dev, " %2d: DMA_PARA_REG: %08x\n", index, DMA_READ(sc, DMA_PARA_REG(index))); 490 } 491 } 492 #endif 493