1 /* $NetBSD: sun6i_dma.c,v 1.16 2024/08/13 07:20:23 skrll Exp $ */ 2 3 /*- 4 * Copyright (c) 2014-2017 Jared McNeill <jmcneill@invisible.ca> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include "opt_ddb.h" 30 31 #include <sys/cdefs.h> 32 __KERNEL_RCSID(0, "$NetBSD: sun6i_dma.c,v 1.16 2024/08/13 07:20:23 skrll Exp $"); 33 34 #include <sys/param.h> 35 #include <sys/bus.h> 36 #include <sys/device.h> 37 #include <sys/intr.h> 38 #include <sys/systm.h> 39 #include <sys/mutex.h> 40 #include <sys/bitops.h> 41 #include <sys/kmem.h> 42 43 #include <dev/fdt/fdtvar.h> 44 45 #define DMA_IRQ_EN_REG0_REG 0x0000 46 #define DMA_IRQ_EN_REG1_REG 0x0004 47 #define DMA_IRQ_EN_REG0_QUEUE_IRQ_EN(n) __BIT(n * 4 + 2) 48 #define DMA_IRQ_EN_REG0_PKG_IRQ_EN(n) __BIT(n * 4 + 1) 49 #define DMA_IRQ_EN_REG0_HLAF_IRQ_EN(n) __BIT(n * 4 + 0) 50 #define DMA_IRQ_EN_REG1_QUEUE_IRQ_EN(n) __BIT((n - 8) * 4 + 2) 51 #define DMA_IRQ_EN_REG1_PKG_IRQ_EN(n) __BIT((n - 8) * 4 + 1) 52 #define DMA_IRQ_EN_REG1_HLAF_IRQ_EN(n) __BIT((n - 8) * 4 + 0) 53 #define DMA_IRQ_PEND_REG0_REG 0x0010 54 #define DMA_IRQ_PEND_REG1_REG 0x0014 55 #define DMA_IRQ_QUEUE_MASK 0x4444444444444444ULL 56 #define DMA_IRQ_PKG_MASK 0x2222222222222222ULL 57 #define DMA_IRQ_HF_MASK 0x1111111111111111ULL 58 #define DMA_STA_REG 0x0030 59 #define DMA_EN_REG(n) (0x0100 + (n) * 0x40 + 0x00) 60 #define DMA_EN_EN __BIT(0) 61 #define DMA_PAU_REG(n) (0x0100 + (n) * 0x40 + 0x04) 62 #define DMA_PAU_PAUSE __BIT(0) 63 #define DMA_START_ADDR_REG(n) (0x0100 + (n) * 0x40 + 0x08) 64 #define DMA_CFG_REG(n) (0x0100 + (n) * 0x40 + 0x0C) 65 #define DMA_CFG_DEST_DATA_WIDTH __BITS(26,25) 66 #define DMA_CFG_DATA_WIDTH(n) ((n) >> 4) 67 #define DMA_CFG_BST_LEN(n) ((n) == 1 ? 0 : (((n) >> 3) + 1)) 68 #define DMA_CFG_DEST_ADDR_MODE __BITS(22,21) 69 #define DMA_CFG_ADDR_MODE_LINEAR 0 70 #define DMA_CFG_ADDR_MODE_IO 1 71 #define DMA_CFG_DEST_DRQ_TYPE __BITS(20,16) 72 #define DMA_CFG_DRQ_TYPE_SDRAM 1 73 #define DMA_CFG_SRC_DATA_WIDTH __BITS(10,9) 74 #define DMA_CFG_SRC_ADDR_MODE __BITS(6,5) 75 #define DMA_CFG_SRC_DRQ_TYPE __BITS(4,0) 76 #define DMA_CUR_SRC_REG(n) (0x0100 + (n) * 0x40 + 0x10) 77 #define DMA_CUR_DEST_REG(n) (0x0100 + (n) * 0x40 + 0x14) 78 #define DMA_BCNT_LEFT_REG(n) (0x0100 + (n) * 0x40 + 0x18) 79 #define DMA_PARA_REG(n) (0x0100 + (n) * 0x40 + 0x1C) 80 #define DMA_PARA_DATA_BLK_SIZE __BITS(15,8) 81 #define DMA_PARA_WAIT_CYC __BITS(7,0) 82 #define DMA_MODE_REG(n) (0x0100 + (n) * 0x40 + 0x28) 83 #define MODE_WAIT 0b0 84 #define MODE_HANDSHAKE 0b1 85 #define DMA_MODE_DST(m) __SHIFTIN((m), __BIT(3)) 86 #define DMA_MODE_SRC(m) __SHIFTIN((m), __BIT(2)) 87 #define DMA_FDESC_ADDR_REG(n) (0x0100 + (n) * 0x40 + 0x2C) 88 #define DMA_PKG_NUM_REG(n) (0x0100 + (n) * 0x40 + 0x30) 89 90 struct sun6idma_desc { 91 uint32_t dma_config; 92 uint32_t dma_srcaddr; 93 uint32_t dma_dstaddr; 94 uint32_t dma_bcnt; 95 uint32_t dma_para; 96 uint32_t dma_next; 97 #define DMA_NULL 0xfffff800 98 }; 99 100 struct sun6idma_config { 101 u_int num_channels; 102 bool autogate; 103 uint8_t bursts; 104 uint8_t widths; 105 bus_size_t autogate_reg; 106 uint32_t autogate_mask; 107 uint32_t burst_mask; 108 }; 109 110 #define IL2B(x) __BIT(ilog2(x)) 111 #define IL2B_RANGE(x, y) __BITS(ilog2(x), ilog2(y)) 112 #define WIDTHS_1_2_4 IL2B_RANGE(4, 1) 113 #define WIDTHS_1_2_4_8 IL2B_RANGE(8, 1) 114 #define BURSTS_1_8 (IL2B(8)|IL2B(1)) 115 #define BURSTS_1_4_8_16 (IL2B(16)|IL2B(8)|IL2B(4)|IL2B(1)) 116 117 static const struct sun6idma_config sun6i_a31_dma_config = { 118 .num_channels = 16, 119 .burst_mask = __BITS(8,7), 120 .bursts = BURSTS_1_8, 121 .widths = WIDTHS_1_2_4, 122 }; 123 124 static const struct sun6idma_config sun8i_a83t_dma_config = { 125 .num_channels = 8, 126 .autogate = true, 127 .autogate_reg = 0x20, 128 .autogate_mask = 0x4, 129 .burst_mask = __BITS(8,7), 130 .bursts = BURSTS_1_8, 131 .widths = WIDTHS_1_2_4, 132 }; 133 134 static const struct sun6idma_config sun8i_h3_dma_config = { 135 .num_channels = 12, 136 .autogate = true, 137 .autogate_reg = 0x28, 138 .autogate_mask = 0x4, 139 .burst_mask = __BITS(7,6), 140 .bursts = BURSTS_1_4_8_16, 141 .widths = WIDTHS_1_2_4_8, 142 }; 143 144 static const struct sun6idma_config sun8i_v3s_dma_config = { 145 .num_channels = 8, 146 .autogate = true, 147 .autogate_reg = 0x20, 148 .autogate_mask = 0x4, 149 .burst_mask = __BITS(8,7), 150 .bursts = BURSTS_1_8, 151 .widths = WIDTHS_1_2_4, 152 }; 153 154 static const struct sun6idma_config sun20i_d1_dma_config = { 155 .num_channels = 16, 156 .autogate = true, 157 .autogate_reg = 0x28, 158 .autogate_mask = 0x4, 159 .burst_mask = __BITS(7,6), 160 .bursts = BURSTS_1_4_8_16, 161 .widths = WIDTHS_1_2_4_8, 162 }; 163 164 static const struct sun6idma_config sun50i_a64_dma_config = { 165 .num_channels = 8, 166 .autogate = true, 167 .autogate_reg = 0x28, 168 .autogate_mask = 0x4, 169 .burst_mask = __BITS(7,6), 170 .bursts = BURSTS_1_4_8_16, 171 .widths = WIDTHS_1_2_4_8, 172 }; 173 174 static const struct device_compatible_entry compat_data[] = { 175 { .compat = "allwinner,sun6i-a31-dma", 176 .data = &sun6i_a31_dma_config }, 177 { .compat = "allwinner,sun8i-a83t-dma", 178 .data = &sun8i_a83t_dma_config }, 179 { .compat = "allwinner,sun8i-h3-dma", 180 .data = &sun8i_h3_dma_config }, 181 { .compat = "allwinner,sun8i-v3s-dma", 182 .data = &sun8i_v3s_dma_config }, 183 { .compat = "allwinner,sun20i-d1-dma", 184 .data = &sun20i_d1_dma_config }, 185 { .compat = "allwinner,sun50i-a64-dma", 186 .data = &sun50i_a64_dma_config }, 187 188 DEVICE_COMPAT_EOL 189 }; 190 191 struct sun6idma_channel { 192 uint8_t ch_index; 193 void (*ch_callback)(void *); 194 void *ch_callbackarg; 195 u_int ch_portid; 196 void *ch_dmadesc; 197 }; 198 199 struct sun6idma_softc { 200 device_t sc_dev; 201 bus_space_tag_t sc_bst; 202 bus_space_handle_t sc_bsh; 203 bus_dma_tag_t sc_dmat; 204 int sc_phandle; 205 void *sc_ih; 206 207 uint32_t sc_burst_mask; 208 209 kmutex_t sc_lock; 210 211 struct sun6idma_channel *sc_chan; 212 u_int sc_nchan; 213 u_int sc_ndesc_ch; 214 uint8_t sc_widths; 215 uint8_t sc_bursts; 216 217 bus_dma_segment_t sc_dmasegs[1]; 218 bus_dmamap_t sc_dmamap; 219 void *sc_dmadescs; 220 }; 221 222 #define DMA_READ(sc, reg) \ 223 bus_space_read_4((sc)->sc_bst, (sc)->sc_bsh, (reg)) 224 #define DMA_WRITE(sc, reg, val) \ 225 bus_space_write_4((sc)->sc_bst, (sc)->sc_bsh, (reg), (val)) 226 227 #define DESC_NUM ((MAXPHYS / MIN_PAGE_SIZE + 1) + 1) 228 #define DESC_LEN(n) \ 229 (sizeof(struct sun6idma_desc) * (n)) 230 #define DESC_OFFS(ch, n) \ 231 ((ch) * roundup2(DESC_LEN(DESC_NUM), COHERENCY_UNIT) + DESC_LEN(n)) 232 #define DESC_ADDR(sc, chp, n) \ 233 ((sc)->sc_dmamap->dm_segs[0].ds_addr + DESC_OFFS((chp)->ch_index, (n))) 234 235 static void * 236 sun6idma_acquire(device_t dev, const void *data, size_t len, 237 void (*cb)(void *), void *cbarg) 238 { 239 struct sun6idma_softc *sc = device_private(dev); 240 struct sun6idma_channel *ch = NULL; 241 uint32_t irqen; 242 uint8_t index; 243 244 if (len != 4) 245 return NULL; 246 247 const u_int portid = be32dec(data); 248 if (portid > __SHIFTOUT_MASK(DMA_CFG_SRC_DRQ_TYPE)) 249 return NULL; 250 251 mutex_enter(&sc->sc_lock); 252 253 for (index = 0; index < sc->sc_nchan; index++) { 254 if (sc->sc_chan[index].ch_callback == NULL) { 255 ch = &sc->sc_chan[index]; 256 ch->ch_callback = cb; 257 ch->ch_callbackarg = cbarg; 258 ch->ch_portid = portid; 259 260 irqen = DMA_READ(sc, index < 8 ? 261 DMA_IRQ_EN_REG0_REG : 262 DMA_IRQ_EN_REG1_REG); 263 irqen |= (index < 8 ? 264 DMA_IRQ_EN_REG0_PKG_IRQ_EN(index) : 265 DMA_IRQ_EN_REG1_PKG_IRQ_EN(index)); 266 DMA_WRITE(sc, index < 8 ? 267 DMA_IRQ_EN_REG0_REG : 268 DMA_IRQ_EN_REG1_REG, irqen); 269 270 break; 271 } 272 } 273 274 mutex_exit(&sc->sc_lock); 275 276 return ch; 277 } 278 279 static void 280 sun6idma_release(device_t dev, void *priv) 281 { 282 struct sun6idma_softc *sc = device_private(dev); 283 struct sun6idma_channel *ch = priv; 284 uint32_t irqen; 285 uint8_t index = ch->ch_index; 286 287 mutex_enter(&sc->sc_lock); 288 289 irqen = DMA_READ(sc, index < 8 ? 290 DMA_IRQ_EN_REG0_REG : 291 DMA_IRQ_EN_REG1_REG); 292 irqen &= ~(index < 8 ? 293 DMA_IRQ_EN_REG0_PKG_IRQ_EN(index) : 294 DMA_IRQ_EN_REG1_PKG_IRQ_EN(index)); 295 DMA_WRITE(sc, index < 8 ? 296 DMA_IRQ_EN_REG0_REG : 297 DMA_IRQ_EN_REG1_REG, irqen); 298 299 ch->ch_callback = NULL; 300 ch->ch_callbackarg = NULL; 301 302 mutex_exit(&sc->sc_lock); 303 } 304 305 static int 306 sun6idma_transfer(device_t dev, void *priv, struct fdtbus_dma_req *req) 307 { 308 struct sun6idma_softc *sc = device_private(dev); 309 struct sun6idma_channel *ch = priv; 310 struct sun6idma_desc *desc = ch->ch_dmadesc; 311 uint32_t src, dst, len, cfg, mem_cfg, dev_cfg; 312 uint32_t mem_width, dev_width, mem_burst, dev_burst; 313 314 if (req->dreq_nsegs > sc->sc_ndesc_ch) 315 return EINVAL; 316 317 if ((sc->sc_widths & 318 IL2B(req->dreq_mem_opt.opt_bus_width/NBBY)) == 0) 319 return EINVAL; 320 if ((sc->sc_widths & 321 IL2B(req->dreq_dev_opt.opt_bus_width/NBBY)) == 0) 322 return EINVAL; 323 if ((sc->sc_bursts & 324 IL2B(req->dreq_mem_opt.opt_burst_len)) == 0) 325 return EINVAL; 326 if ((sc->sc_bursts & 327 IL2B(req->dreq_dev_opt.opt_burst_len)) == 0) 328 return EINVAL; 329 330 mem_width = DMA_CFG_DATA_WIDTH(req->dreq_mem_opt.opt_bus_width); 331 dev_width = DMA_CFG_DATA_WIDTH(req->dreq_dev_opt.opt_bus_width); 332 mem_burst = DMA_CFG_BST_LEN(req->dreq_mem_opt.opt_burst_len); 333 dev_burst = DMA_CFG_BST_LEN(req->dreq_dev_opt.opt_burst_len); 334 335 mem_cfg = __SHIFTIN(mem_width, DMA_CFG_SRC_DATA_WIDTH) | 336 __SHIFTIN(mem_burst, sc->sc_burst_mask) | 337 __SHIFTIN(DMA_CFG_ADDR_MODE_LINEAR, DMA_CFG_SRC_ADDR_MODE) | 338 __SHIFTIN(DMA_CFG_DRQ_TYPE_SDRAM, DMA_CFG_SRC_DRQ_TYPE); 339 dev_cfg = __SHIFTIN(dev_width, DMA_CFG_SRC_DATA_WIDTH) | 340 __SHIFTIN(dev_burst, sc->sc_burst_mask) | 341 __SHIFTIN(DMA_CFG_ADDR_MODE_IO, DMA_CFG_SRC_ADDR_MODE) | 342 __SHIFTIN(ch->ch_portid, DMA_CFG_SRC_DRQ_TYPE); 343 344 for (size_t j = 0; j < req->dreq_nsegs; j++) { 345 if (req->dreq_dir == FDT_DMA_READ) { 346 src = req->dreq_dev_phys; 347 dst = req->dreq_segs[j].ds_addr; 348 cfg = mem_cfg << 16 | dev_cfg; 349 } else { 350 src = req->dreq_segs[j].ds_addr; 351 dst = req->dreq_dev_phys; 352 cfg = dev_cfg << 16 | mem_cfg; 353 } 354 len = req->dreq_segs[j].ds_len; 355 356 desc[j].dma_config = htole32(cfg); 357 desc[j].dma_srcaddr = htole32(src); 358 desc[j].dma_dstaddr = htole32(dst); 359 desc[j].dma_bcnt = htole32(len); 360 desc[j].dma_para = htole32(0); 361 if (j < req->dreq_nsegs - 1) 362 desc[j].dma_next = htole32(DESC_ADDR(sc, ch, j + 1)); 363 else 364 desc[j].dma_next = htole32(DMA_NULL); 365 } 366 367 #if notyet && maybenever 368 DMA_WRITE(sc, DMA_MODE_REG(ch->ch_index), 369 DMA_MODE_DST(MODE_HANDSHAKE)|DMA_MODE_SRC(MODE_HANDSHAKE)); 370 #endif 371 372 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, DESC_OFFS(ch->ch_index, 0), 373 DESC_LEN(req->dreq_nsegs), BUS_DMASYNC_PREWRITE); 374 375 DMA_WRITE(sc, DMA_START_ADDR_REG(ch->ch_index), 376 DESC_ADDR(sc, ch, 0)); 377 DMA_WRITE(sc, DMA_EN_REG(ch->ch_index), DMA_EN_EN); 378 379 if ((DMA_READ(sc, DMA_EN_REG(ch->ch_index)) & DMA_EN_EN) == 0) { 380 aprint_error_dev(sc->sc_dev, 381 "DMA Channel %u failed to start\n", ch->ch_index); 382 return EIO; 383 } 384 385 return 0; 386 } 387 388 static void 389 sun6idma_halt(device_t dev, void *priv) 390 { 391 struct sun6idma_softc *sc = device_private(dev); 392 struct sun6idma_channel *ch = priv; 393 394 DMA_WRITE(sc, DMA_EN_REG(ch->ch_index), 0); 395 } 396 397 static const struct fdtbus_dma_controller_func sun6idma_funcs = { 398 .acquire = sun6idma_acquire, 399 .release = sun6idma_release, 400 .transfer = sun6idma_transfer, 401 .halt = sun6idma_halt 402 }; 403 404 static int 405 sun6idma_intr(void *priv) 406 { 407 struct sun6idma_softc *sc = priv; 408 uint32_t pend0, pend1, bit; 409 uint64_t pend, mask; 410 uint8_t index; 411 412 pend0 = DMA_READ(sc, DMA_IRQ_PEND_REG0_REG); 413 pend1 = DMA_READ(sc, DMA_IRQ_PEND_REG1_REG); 414 if (!pend0 && !pend1) 415 return 0; 416 417 DMA_WRITE(sc, DMA_IRQ_PEND_REG0_REG, pend0); 418 DMA_WRITE(sc, DMA_IRQ_PEND_REG1_REG, pend1); 419 420 pend = pend0 | ((uint64_t)pend1 << 32); 421 422 while ((bit = ffs64(pend & DMA_IRQ_PKG_MASK)) != 0) { 423 mask = __BIT(bit - 1); 424 pend &= ~mask; 425 index = (bit - 1) / 4; 426 427 if (sc->sc_chan[index].ch_callback == NULL) 428 continue; 429 sc->sc_chan[index].ch_callback( 430 sc->sc_chan[index].ch_callbackarg); 431 } 432 433 return 1; 434 } 435 436 static int 437 sun6idma_match(device_t parent, cfdata_t cf, void *aux) 438 { 439 struct fdt_attach_args * const faa = aux; 440 441 return of_compatible_match(faa->faa_phandle, compat_data); 442 } 443 444 static void 445 sun6idma_attach(device_t parent, device_t self, void *aux) 446 { 447 struct sun6idma_softc * const sc = device_private(self); 448 struct fdt_attach_args * const faa = aux; 449 const int phandle = faa->faa_phandle; 450 size_t desclen; 451 const struct sun6idma_config *conf; 452 struct fdtbus_reset *rst; 453 struct clk *clk; 454 char intrstr[128]; 455 bus_addr_t addr; 456 bus_size_t size; 457 int error, nsegs; 458 u_int index; 459 460 if (fdtbus_get_reg(phandle, 0, &addr, &size) != 0) { 461 aprint_error(": couldn't get registers\n"); 462 return; 463 } 464 465 if ((clk = fdtbus_clock_get_index(phandle, 0)) == NULL || 466 clk_enable(clk) != 0) { 467 aprint_error(": couldn't enable clock\n"); 468 return; 469 } 470 if ((rst = fdtbus_reset_get_index(phandle, 0)) == NULL || 471 fdtbus_reset_deassert(rst) != 0) { 472 aprint_error(": couldn't de-assert reset\n"); 473 return; 474 } 475 476 sc->sc_dev = self; 477 sc->sc_phandle = phandle; 478 sc->sc_dmat = faa->faa_dmat; 479 sc->sc_bst = faa->faa_bst; 480 if (bus_space_map(sc->sc_bst, addr, size, 0, &sc->sc_bsh) != 0) { 481 aprint_error(": couldn't map registers\n"); 482 return; 483 } 484 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_SCHED); 485 486 if (!fdtbus_intr_str(phandle, 0, intrstr, sizeof(intrstr))) { 487 aprint_error(": failed to decode interrupt\n"); 488 return; 489 } 490 491 conf = of_compatible_lookup(phandle, compat_data)->data; 492 493 sc->sc_burst_mask = conf->burst_mask; 494 sc->sc_nchan = conf->num_channels; 495 sc->sc_widths = conf->widths; 496 sc->sc_bursts = conf->bursts; 497 sc->sc_chan = kmem_alloc(sizeof(*sc->sc_chan) * sc->sc_nchan, KM_SLEEP); 498 desclen = DESC_OFFS(sc->sc_nchan, 0); 499 sc->sc_ndesc_ch = DESC_OFFS(1, 0) / sizeof(struct sun6idma_desc); 500 501 aprint_naive("\n"); 502 aprint_normal(": DMA controller (%u channels)\n", sc->sc_nchan); 503 504 DMA_WRITE(sc, DMA_IRQ_EN_REG0_REG, 0); 505 DMA_WRITE(sc, DMA_IRQ_EN_REG1_REG, 0); 506 DMA_WRITE(sc, DMA_IRQ_PEND_REG0_REG, ~0); 507 DMA_WRITE(sc, DMA_IRQ_PEND_REG1_REG, ~0); 508 509 error = bus_dmamem_alloc(sc->sc_dmat, desclen, 0, 0, 510 sc->sc_dmasegs, 1, &nsegs, BUS_DMA_WAITOK); 511 if (error) 512 panic("bus_dmamem_alloc failed: %d", error); 513 error = bus_dmamem_map(sc->sc_dmat, sc->sc_dmasegs, nsegs, 514 desclen, (void **)&sc->sc_dmadescs, BUS_DMA_WAITOK); 515 if (error) 516 panic("bus_dmamem_map failed: %d", error); 517 error = bus_dmamap_create(sc->sc_dmat, desclen, 1, desclen, 0, 518 BUS_DMA_WAITOK, &sc->sc_dmamap); 519 if (error) 520 panic("bus_dmamap_create failed: %d", error); 521 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap, 522 sc->sc_dmadescs, desclen, NULL, BUS_DMA_WAITOK); 523 if (error) 524 panic("bus_dmamap_load failed: %d", error); 525 526 for (index = 0; index < sc->sc_nchan; index++) { 527 struct sun6idma_channel *ch = &sc->sc_chan[index]; 528 ch->ch_index = index; 529 ch->ch_dmadesc = (void *)((uintptr_t)sc->sc_dmadescs + DESC_OFFS(index, 0)); 530 ch->ch_callback = NULL; 531 ch->ch_callbackarg = NULL; 532 533 DMA_WRITE(sc, DMA_EN_REG(index), 0); 534 } 535 536 if (conf->autogate) 537 DMA_WRITE(sc, conf->autogate_reg, conf->autogate_mask); 538 539 sc->sc_ih = fdtbus_intr_establish_xname(phandle, 0, IPL_SCHED, 540 FDT_INTR_MPSAFE, sun6idma_intr, sc, device_xname(sc->sc_dev)); 541 if (sc->sc_ih == NULL) { 542 aprint_error_dev(sc->sc_dev, 543 "couldn't establish interrupt on %s\n", intrstr); 544 return; 545 } 546 aprint_normal_dev(sc->sc_dev, "interrupting on %s\n", intrstr); 547 548 fdtbus_register_dma_controller(self, phandle, &sun6idma_funcs); 549 } 550 551 CFATTACH_DECL_NEW(sun6i_dma, sizeof(struct sun6idma_softc), 552 sun6idma_match, sun6idma_attach, NULL, NULL); 553 554 #ifdef DDB 555 void sun6idma_dump(void); 556 557 void 558 sun6idma_dump(void) 559 { 560 struct sun6idma_softc *sc; 561 device_t dev; 562 u_int index; 563 564 dev = device_find_by_driver_unit("sun6idma", 0); 565 if (dev == NULL) 566 return; 567 sc = device_private(dev); 568 569 device_printf(dev, "DMA_IRQ_EN_REG0_REG: %08x\n", DMA_READ(sc, DMA_IRQ_EN_REG0_REG)); 570 device_printf(dev, "DMA_IRQ_EN_REG1_REG: %08x\n", DMA_READ(sc, DMA_IRQ_EN_REG1_REG)); 571 device_printf(dev, "DMA_IRQ_PEND_REG0_REG: %08x\n", DMA_READ(sc, DMA_IRQ_PEND_REG0_REG)); 572 device_printf(dev, "DMA_IRQ_PEND_REG1_REG: %08x\n", DMA_READ(sc, DMA_IRQ_PEND_REG1_REG)); 573 device_printf(dev, "DMA_STA_REG: %08x\n", DMA_READ(sc, DMA_STA_REG)); 574 575 for (index = 0; index < sc->sc_nchan; index++) { 576 struct sun6idma_channel *ch = &sc->sc_chan[index]; 577 if (ch->ch_callback == NULL) 578 continue; 579 device_printf(dev, " %2d: DMA_EN_REG: %08x\n", index, DMA_READ(sc, DMA_EN_REG(index))); 580 device_printf(dev, " %2d: DMA_PAU_REG: %08x\n", index, DMA_READ(sc, DMA_PAU_REG(index))); 581 device_printf(dev, " %2d: DMA_START_ADDR_REG: %08x\n", index, DMA_READ(sc, DMA_START_ADDR_REG(index))); 582 device_printf(dev, " %2d: DMA_CFG_REG: %08x\n", index, DMA_READ(sc, DMA_CFG_REG(index))); 583 device_printf(dev, " %2d: DMA_CUR_SRC_REG: %08x\n", index, DMA_READ(sc, DMA_CUR_SRC_REG(index))); 584 device_printf(dev, " %2d: DMA_CUR_DEST_REG: %08x\n", index, DMA_READ(sc, DMA_CUR_DEST_REG(index))); 585 device_printf(dev, " %2d: DMA_BCNT_LEFT_REG: %08x\n", index, DMA_READ(sc, DMA_BCNT_LEFT_REG(index))); 586 device_printf(dev, " %2d: DMA_PARA_REG: %08x\n", index, DMA_READ(sc, DMA_PARA_REG(index))); 587 device_printf(dev, " %2d: DMA_MODE_REG: %08x\n", index, DMA_READ(sc, DMA_MODE_REG(index))); 588 device_printf(dev, " %2d: DMA_FDESC_ADDR_REG: %08x\n", index, DMA_READ(sc, DMA_FDESC_ADDR_REG(index))); 589 device_printf(dev, " %2d: DMA_PKG_NUM_REG: %08x\n", index, DMA_READ(sc, DMA_PKG_NUM_REG(index))); 590 } 591 } 592 #endif 593