xref: /netbsd-src/sys/arch/arm/acpi/acpi_machdep.c (revision 4ced5fcb492729705bdd33145b51c4ae94fcf435)
1 /* $NetBSD: acpi_machdep.c,v 1.28 2024/12/30 12:19:21 jmcneill Exp $ */
2 
3 /*-
4  * Copyright (c) 2018 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jared McNeill <jmcneill@invisible.ca>.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include "pci.h"
33 
34 #include <sys/cdefs.h>
35 __KERNEL_RCSID(0, "$NetBSD: acpi_machdep.c,v 1.28 2024/12/30 12:19:21 jmcneill Exp $");
36 
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/bus.h>
40 #include <sys/cpu.h>
41 #include <sys/device.h>
42 #include <sys/kmem.h>
43 
44 #include <uvm/uvm_extern.h>
45 
46 #include <dev/fdt/fdtvar.h>
47 
48 #include <dev/acpi/acpica.h>
49 #include <dev/acpi/acpivar.h>
50 #if NPCI > 0
51 #include <dev/acpi/acpi_mcfg.h>
52 #endif
53 #include <arm/acpi/acpi_iort.h>
54 
55 #include <arm/arm/efi_runtime.h>
56 
57 #include <arm/pic/picvar.h>
58 
59 #include <arm/locore.h>
60 
61 #include <machine/acpi_machdep.h>
62 
63 extern struct bus_space arm_generic_bs_tag;
64 extern struct arm32_bus_dma_tag acpi_coherent_dma_tag;
65 extern struct arm32_bus_dma_tag arm_generic_dma_tag;
66 
67 struct acpi_intrhandler {
68 	int				(*ah_fn)(void *);
69 	void				*ah_arg;
70 	TAILQ_ENTRY(acpi_intrhandler)	ah_list;
71 };
72 
73 struct acpi_intrvec {
74 	int				ai_irq;
75 	int				ai_ipl;
76 	int				ai_type;
77 	bool				ai_mpsafe;
78 	int				ai_refcnt;
79 	void				*ai_arg;
80 	void				*ai_ih;
81 	TAILQ_HEAD(, acpi_intrhandler)	ai_handlers;
82 	TAILQ_ENTRY(acpi_intrvec)	ai_list;
83 };
84 
85 static TAILQ_HEAD(, acpi_intrvec) acpi_intrvecs =
86     TAILQ_HEAD_INITIALIZER(acpi_intrvecs);
87 
88 bus_dma_tag_t	arm_acpi_dma32_tag(struct acpi_softc *, struct acpi_devnode *);
89 bus_dma_tag_t	arm_acpi_dma64_tag(struct acpi_softc *, struct acpi_devnode *);
90 
91 static int
92 acpi_md_pmapflags(paddr_t pa)
93 {
94 	int len;
95 
96 	const int chosen = OF_finddevice("/chosen");
97 	if (chosen == -1)
98 		return 0;
99 
100 	const uint32_t *map = fdtbus_get_prop(chosen, "netbsd,uefi-memmap", &len);
101 	if (map == NULL)
102 		return 0;
103 
104 	while (len >= 28) {
105 		const uint32_t type = be32dec(&map[0]);
106 		const uint64_t phys_start = be64dec(&map[1]);
107 		const uint64_t num_pages = be64dec(&map[3]);
108 		const uint64_t attr = be64dec(&map[5]);
109 
110 		if (pa >= phys_start && pa < phys_start + (num_pages * EFI_PAGE_SIZE)) {
111 			switch (type) {
112 			case EFI_MD_TYPE_RECLAIM:
113 				/* ACPI table memory */
114 				return PMAP_WRITE_BACK;
115 
116 			case EFI_MD_TYPE_IOMEM:
117 			case EFI_MD_TYPE_IOPORT:
118 				return PMAP_DEV_NP;
119 
120 			default:
121 				if ((attr & EFI_MD_ATTR_WB) != 0)
122 					return PMAP_WRITE_BACK;
123 				else if ((attr & EFI_MD_ATTR_WC) != 0)
124 					return PMAP_WRITE_COMBINE;
125 				else if ((attr & EFI_MD_ATTR_WT) != 0)
126 					return 0;	/* XXX */
127 
128 				return PMAP_DEV_NP;
129 			}
130 		}
131 
132 		map += 7;
133 		len -= 28;
134 	}
135 
136 	/* Not found; assume device memory */
137 	return PMAP_DEV;
138 }
139 
140 ACPI_STATUS
141 acpi_md_OsInitialize(void)
142 {
143 	return AE_OK;
144 }
145 
146 ACPI_PHYSICAL_ADDRESS
147 acpi_md_OsGetRootPointer(void)
148 {
149 	uint64_t pa;
150 
151 	const int chosen = OF_finddevice("/chosen");
152 	if (chosen == -1)
153 		return 0;
154 
155 	if (of_getprop_uint64(chosen, "netbsd,acpi-root-table", &pa) != 0)
156 		return 0;
157 
158 	return (ACPI_PHYSICAL_ADDRESS)pa;
159 }
160 
161 ACPI_STATUS
162 acpi_md_OsInstallInterruptHandler(UINT32 irq, ACPI_OSD_HANDLER handler, void *context,
163     void **cookiep, const char *xname)
164 {
165 	return AE_NOT_IMPLEMENTED;
166 }
167 
168 void
169 acpi_md_OsRemoveInterruptHandler(void *cookie)
170 {
171 	intr_disestablish(cookie);
172 }
173 
174 ACPI_STATUS
175 acpi_md_OsMapMemory(ACPI_PHYSICAL_ADDRESS pa, UINT32 size, void **vap)
176 {
177 	paddr_t spa, epa, curpa;
178 	vaddr_t va, curva;
179 
180 	spa = trunc_page(pa);
181 	epa = round_page(pa + size);
182 
183 	va = uvm_km_alloc(kernel_map, epa - spa, 0, UVM_KMF_VAONLY);
184 	if (va == 0)
185 		return AE_NO_MEMORY;
186 
187 	const int pmapflags = acpi_md_pmapflags(spa);
188 
189 	aprint_debug("%s: 0x%lx 0x%x flags = %#x\n", __func__, pa, size, pmapflags);
190 
191 	for (curpa = spa, curva = va; curpa < epa; curpa += PAGE_SIZE, curva += PAGE_SIZE)
192 		pmap_kenter_pa(curva, curpa, VM_PROT_READ | VM_PROT_WRITE, pmapflags);
193 	pmap_update(pmap_kernel());
194 
195 	*vap = (void *)(va + (pa - spa));
196 
197 	return AE_OK;
198 }
199 
200 void
201 acpi_md_OsUnmapMemory(void *va, UINT32 size)
202 {
203 	vaddr_t ova;
204 	vsize_t osz;
205 
206 	ova = trunc_page((vaddr_t)va);
207 	osz = round_page((vaddr_t)va + size) - ova;
208 
209 	pmap_kremove(ova, osz);
210 	pmap_update(pmap_kernel());
211 	uvm_km_free(kernel_map, ova, osz, UVM_KMF_VAONLY);
212 }
213 
214 ACPI_STATUS
215 acpi_md_OsGetPhysicalAddress(void *va, ACPI_PHYSICAL_ADDRESS *pap)
216 {
217 	paddr_t pa;
218 
219 	if (!pmap_extract(pmap_kernel(), (vaddr_t)va, &pa))
220 		return AE_ERROR;
221 
222 	*pap = pa;
223 
224 	return AE_OK;
225 }
226 
227 BOOLEAN
228 acpi_md_OsReadable(void *va, UINT32 len)
229 {
230 	vaddr_t sva, eva;
231 	pt_entry_t *pte;
232 
233 	sva = trunc_page((vaddr_t)va);
234 	eva = round_page((vaddr_t)va + len);
235 
236 	if (sva < VM_MIN_KERNEL_ADDRESS)
237 		return FALSE;
238 
239 	for (; sva < eva; sva += PAGE_SIZE) {
240 		pte = kvtopte(sva);
241 		if ((*pte & (LX_BLKPAG_AF|LX_BLKPAG_AP)) != (LX_BLKPAG_AF|LX_BLKPAG_AP_RO))
242 			return FALSE;
243 	}
244 
245 	return TRUE;
246 }
247 
248 BOOLEAN
249 acpi_md_OsWritable(void *va, UINT32 len)
250 {
251 	vaddr_t sva, eva;
252 	pt_entry_t *pte;
253 
254 	sva = trunc_page((vaddr_t)va);
255 	eva = round_page((vaddr_t)va + len);
256 
257 	if (sva < VM_MIN_KERNEL_ADDRESS)
258 		return FALSE;
259 
260 	for (; sva < eva; sva += PAGE_SIZE) {
261 		pte = kvtopte(sva);
262 		if ((*pte & (LX_BLKPAG_AF|LX_BLKPAG_AP)) != (LX_BLKPAG_AF|LX_BLKPAG_AP_RW))
263 			return FALSE;
264 	}
265 
266 	return TRUE;
267 }
268 
269 void
270 acpi_md_OsEnableInterrupt(void)
271 {
272 	cpsie(I32_bit);
273 }
274 
275 void
276 acpi_md_OsDisableInterrupt(void)
277 {
278 	cpsid(I32_bit);
279 }
280 
281 static struct acpi_intrvec *
282 acpi_md_intr_lookup(int irq)
283 {
284 	struct acpi_intrvec *ai;
285 
286 	TAILQ_FOREACH(ai, &acpi_intrvecs, ai_list) {
287 		if (ai->ai_irq == irq) {
288 			return ai;
289 		}
290 	}
291 
292 	return NULL;
293 }
294 
295 static int
296 acpi_md_intr(void *arg)
297 {
298 	struct acpi_intrvec *ai = arg;
299 	struct acpi_intrhandler *ah;
300 	int rv = 0;
301 
302 	TAILQ_FOREACH(ah, &ai->ai_handlers, ah_list) {
303 		rv += ah->ah_fn(ah->ah_arg);
304 	}
305 
306 	return rv;
307 }
308 
309 void *
310 acpi_md_intr_establish(uint32_t irq, int ipl, int type, int (*handler)(void *), void *arg, bool mpsafe, const char *xname)
311 {
312 	struct acpi_intrvec *ai;
313 	struct acpi_intrhandler *ah;
314 
315 	ai = acpi_md_intr_lookup(irq);
316 	if (ai == NULL) {
317 		ai = kmem_zalloc(sizeof(*ai), KM_SLEEP);
318 		ai->ai_refcnt = 0;
319 		ai->ai_irq = irq;
320 		ai->ai_ipl = ipl;
321 		ai->ai_type = type;
322 		ai->ai_mpsafe = mpsafe;
323 		ai->ai_arg = arg;
324 		TAILQ_INIT(&ai->ai_handlers);
325 		if (arg == NULL) {
326 			ai->ai_ih = intr_establish_xname(irq, ipl,
327 			    type | (mpsafe ? IST_MPSAFE : 0), handler, NULL,
328 			    xname);
329 		} else {
330 			ai->ai_ih = intr_establish_xname(irq, ipl,
331 			    type | (mpsafe ? IST_MPSAFE : 0), acpi_md_intr, ai,
332 			    xname);
333 		}
334 		if (ai->ai_ih == NULL) {
335 			kmem_free(ai, sizeof(*ai));
336 			return NULL;
337 		}
338 		TAILQ_INSERT_TAIL(&acpi_intrvecs, ai, ai_list);
339 	} else {
340 		if (ai->ai_arg == NULL) {
341 			printf("ACPI: cannot share irq with NULL arg\n");
342 			return NULL;
343 		}
344 		if (ai->ai_ipl != ipl) {
345 			printf("ACPI: cannot share irq with different ipl\n");
346 			return NULL;
347 		}
348 		if (ai->ai_type != type) {
349 			printf("ACPI: cannot share edge and level interrupts\n");
350 			return NULL;
351 		}
352 		if (ai->ai_mpsafe != mpsafe) {
353 			printf("ACPI: cannot share between mpsafe/non-mpsafe\n");
354 			return NULL;
355 		}
356 	}
357 
358 	ai->ai_refcnt++;
359 
360 	ah = kmem_zalloc(sizeof(*ah), KM_SLEEP);
361 	ah->ah_fn = handler;
362 	ah->ah_arg = arg;
363 	TAILQ_INSERT_TAIL(&ai->ai_handlers, ah, ah_list);
364 
365 	return ai->ai_ih;
366 }
367 
368 void
369 acpi_md_intr_disestablish(void *ih)
370 {
371 	struct acpi_intrvec *ai;
372 	struct acpi_intrhandler *ah;
373 
374 	TAILQ_FOREACH(ai, &acpi_intrvecs, ai_list) {
375 		if (ai->ai_ih == ih) {
376 			KASSERT(ai->ai_refcnt > 0);
377 			if (ai->ai_refcnt > 1) {
378 				panic("%s: cannot disestablish shared irq", __func__);
379 			}
380 
381 			TAILQ_REMOVE(&acpi_intrvecs, ai, ai_list);
382 			ah = TAILQ_FIRST(&ai->ai_handlers);
383 			kmem_free(ah, sizeof(*ah));
384 			intr_disestablish(ai->ai_ih);
385 			kmem_free(ai, sizeof(*ai));
386 			return;
387 		}
388 	}
389 
390 	panic("%s: interrupt not established", __func__);
391 }
392 
393 void
394 acpi_md_intr_mask(void *ih)
395 {
396 	intr_mask(ih);
397 }
398 
399 void
400 acpi_md_intr_unmask(void *ih)
401 {
402 	intr_unmask(ih);
403 }
404 
405 int
406 acpi_md_sleep(int state)
407 {
408 	printf("ERROR: ACPI sleep not implemented on this platform\n");
409 	return -1;
410 }
411 
412 uint32_t
413 acpi_md_pdc(void)
414 {
415 	return 0;
416 }
417 
418 uint32_t
419 acpi_md_ncpus(void)
420 {
421 	return kcpuset_countset(kcpuset_attached);
422 }
423 
424 static ACPI_STATUS
425 acpi_md_madt_probe_cpu(ACPI_SUBTABLE_HEADER *hdrp, void *aux)
426 {
427 	struct acpi_softc * const sc = aux;
428 
429 	if (hdrp->Type == ACPI_MADT_TYPE_GENERIC_INTERRUPT)
430 		config_found(sc->sc_dev, hdrp, NULL,
431 		    CFARGS(.iattr = "acpimadtbus"));
432 
433 	return AE_OK;
434 }
435 
436 static ACPI_STATUS
437 acpi_md_madt_probe_gic(ACPI_SUBTABLE_HEADER *hdrp, void *aux)
438 {
439 	struct acpi_softc * const sc = aux;
440 
441 	if (hdrp->Type == ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR)
442 		config_found(sc->sc_dev, hdrp, NULL,
443 		    CFARGS(.iattr = "acpimadtbus"));
444 
445 	return AE_OK;
446 }
447 
448 static ACPI_STATUS
449 acpi_md_gtdt_probe(ACPI_GTDT_HEADER *hdrp, void *aux)
450 {
451 	struct acpi_softc * const sc = aux;
452 
453 	config_found(sc->sc_dev, hdrp, NULL,
454 	    CFARGS(.iattr = "acpigtdtbus"));
455 
456 	return AE_OK;
457 }
458 
459 #if NPCI > 0
460 static struct bus_space acpi_md_mcfg_bs_tag;
461 
462 static int
463 acpi_md_mcfg_bs_map(void *t, bus_addr_t bpa, bus_size_t size, int flag,
464     bus_space_handle_t *bshp)
465 {
466 	return arm_generic_bs_tag.bs_map(t, bpa, size,
467 	    flag | BUS_SPACE_MAP_NONPOSTED, bshp);
468 }
469 #endif
470 
471 void
472 acpi_md_callback(struct acpi_softc *sc)
473 {
474 #if NPCI > 0
475 	acpi_md_mcfg_bs_tag = arm_generic_bs_tag;
476 	acpi_md_mcfg_bs_tag.bs_map = acpi_md_mcfg_bs_map;
477 	acpimcfg_init(&acpi_md_mcfg_bs_tag, NULL);
478 #endif
479 
480 	if (acpi_madt_map() != AE_OK)
481 		panic("Failed to map MADT");
482 	acpi_madt_walk(acpi_md_madt_probe_cpu, sc);
483 	acpi_madt_walk(acpi_md_madt_probe_gic, sc);
484 	acpi_madt_unmap();
485 
486 	if (acpi_gtdt_map() != AE_OK)
487 		panic("Failed to map GTDT");
488 	acpi_gtdt_walk(acpi_md_gtdt_probe, sc);
489 	acpi_gtdt_unmap();
490 }
491 
492 static const char * const module_hid[] = {
493 	"ACPI0004",	/* Module device */
494 	NULL
495 };
496 
497 static ACPI_HANDLE
498 arm_acpi_dma_module(struct acpi_softc *sc, struct acpi_devnode *ad)
499 {
500 	ACPI_HANDLE tmp;
501 	ACPI_STATUS rv;
502 
503 	/*
504 	 * Search up the tree for a module device with a _DMA method.
505 	 */
506 	for (; ad != NULL; ad = ad->ad_parent) {
507 		if (ad->ad_devinfo->Type != ACPI_TYPE_DEVICE)
508 			continue;
509 		if (!acpi_match_hid(ad->ad_devinfo, module_hid))
510 			continue;
511 		rv = AcpiGetHandle(ad->ad_handle, "_DMA", &tmp);
512 		if (ACPI_SUCCESS(rv))
513 			return ad->ad_handle;
514 	}
515 
516 	return NULL;
517 }
518 
519 static void
520 arm_acpi_dma_init_ranges(struct acpi_softc *sc, struct acpi_devnode *ad,
521     struct arm32_bus_dma_tag *dmat, uint32_t flags)
522 {
523 	struct acpi_resources res;
524 	struct acpi_mem *mem;
525 	ACPI_HANDLE module;
526 	ACPI_IORT_NAMED_COMPONENT *nc;
527 	ACPI_STATUS rv;
528 	uintptr_t dma_mask;
529 	int n;
530 
531 	module = arm_acpi_dma_module(sc, ad->ad_parent);
532 	if (module == NULL) {
533 default_tag:
534 		rv = acpi_iort_named_component(ad, &nc);
535 		if (ACPI_SUCCESS(rv) && nc->MemoryAddressLimit != 0) {
536 			dma_mask = __BITS(nc->MemoryAddressLimit - 1, 0);
537 		} else {
538 			dma_mask = UINTPTR_MAX;
539 		}
540 
541 		/* No translation required */
542 		dmat->_nranges = 1;
543 		dmat->_ranges = kmem_zalloc(sizeof(*dmat->_ranges), KM_SLEEP);
544 		dmat->_ranges[0].dr_sysbase = 0;
545 		dmat->_ranges[0].dr_busbase = 0;
546 		dmat->_ranges[0].dr_len = dma_mask;
547 		dmat->_ranges[0].dr_flags = flags;
548 		return;
549 	}
550 
551 	rv = acpi_resource_parse_any(sc->sc_dev, module, "_DMA", &res,
552 	    &acpi_resource_parse_ops_quiet);
553 	if (ACPI_FAILURE(rv)) {
554 		aprint_error_dev(sc->sc_dev,
555 		    "failed to parse _DMA on %s: %s\n",
556 		    acpi_name(module), AcpiFormatException(rv));
557 		goto default_tag;
558 	}
559 	if (res.ar_nmem == 0) {
560 		acpi_resource_cleanup(&res);
561 		goto default_tag;
562 	}
563 
564 	dmat->_nranges = res.ar_nmem;
565 	dmat->_ranges = kmem_zalloc(sizeof(*dmat->_ranges) * res.ar_nmem,
566 	    KM_SLEEP);
567 
568 	for (n = 0; n < res.ar_nmem; n++) {
569 		mem = acpi_res_mem(&res, n);
570 		dmat->_ranges[n].dr_busbase = mem->ar_base;
571 		dmat->_ranges[n].dr_sysbase = mem->ar_xbase;
572 		dmat->_ranges[n].dr_len = mem->ar_length;
573 		dmat->_ranges[n].dr_flags = flags;
574 
575 		aprint_debug_dev(sc->sc_dev,
576 		    "%s: DMA sys %#lx-%#lx bus %#lx-%#lx%s\n",
577 		    acpi_name(ad->ad_handle),
578 		    dmat->_ranges[n].dr_sysbase,
579 		    dmat->_ranges[n].dr_sysbase + dmat->_ranges[n].dr_len - 1,
580 		    dmat->_ranges[n].dr_busbase,
581 		    dmat->_ranges[n].dr_busbase + dmat->_ranges[n].dr_len - 1,
582 		    flags ? " (coherent)" : "");
583 	}
584 
585 	acpi_resource_cleanup(&res);
586 }
587 
588 static uint32_t
589 arm_acpi_dma_flags(struct acpi_softc *sc, struct acpi_devnode *ad)
590 {
591 	ACPI_INTEGER cca = 1;	/* default cache coherent */
592 	ACPI_STATUS rv;
593 
594 	for (; ad != NULL; ad = ad->ad_parent) {
595 		if (ad->ad_devinfo->Type != ACPI_TYPE_DEVICE)
596 			continue;
597 
598 		rv = acpi_eval_integer(ad->ad_handle, "_CCA", &cca);
599 		if (ACPI_SUCCESS(rv))
600 			break;
601 	}
602 
603 	return cca ? _BUS_DMAMAP_COHERENT : 0;
604 }
605 
606 bus_dma_tag_t
607 arm_acpi_dma32_tag(struct acpi_softc *sc, struct acpi_devnode *ad)
608 {
609 	bus_dma_tag_t dmat64, dmat32;
610 	int error;
611 
612 	if (ad->ad_dmat != NULL)
613 		return ad->ad_dmat;
614 
615 	dmat64 = arm_acpi_dma64_tag(sc, ad);
616 
617 	const uint32_t flags = arm_acpi_dma_flags(sc, ad);
618 	error = bus_dmatag_subregion(dmat64, 0, UINT32_MAX, &dmat32, flags);
619 	if (error != 0)
620 		panic("arm_acpi_dma32_tag: bus_dmatag_subregion returned %d",
621 		    error);
622 
623 	return dmat32;
624 }
625 __strong_alias(acpi_get_dma_tag,arm_acpi_dma32_tag);
626 
627 bus_dma_tag_t
628 arm_acpi_dma64_tag(struct acpi_softc *sc, struct acpi_devnode *ad)
629 {
630 	struct arm32_bus_dma_tag *dmat;
631 
632 	if (ad->ad_dmat64 != NULL)
633 		return ad->ad_dmat64;
634 
635 	dmat = kmem_alloc(sizeof(*dmat), KM_SLEEP);
636 	*dmat = arm_generic_dma_tag;
637 
638 	const uint32_t flags = arm_acpi_dma_flags(sc, ad);
639 	arm_acpi_dma_init_ranges(sc, ad, dmat, flags);
640 
641 	return dmat;
642 }
643 __strong_alias(acpi_get_dma64_tag,arm_acpi_dma64_tag);
644