xref: /netbsd-src/sys/external/bsd/vchiq/dist/interface/vchiq_arm/vchiq_2835_arm.c (revision 0b77a9f798ed42350d520acbeaad07b6f920608f)
1 /**
2  * Copyright (c) 2010-2012 Broadcom. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions, and the following disclaimer,
9  *    without modification.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. The names of the above-listed copyright holders may not be used
14  *    to endorse or promote products derived from this software without
15  *    specific prior written permission.
16  *
17  * ALTERNATIVELY, this software may be distributed under the terms of the
18  * GNU General Public License ("GPL") version 2, as published by the Free
19  * Software Foundation.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
22  * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
23  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
25  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
26  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
27  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
28  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <interface/compat/vchi_bsd.h>
35 
36 #include <sys/param.h>
37 #include <sys/malloc.h>
38 #include <sys/bus.h>
39 #include <sys/kmem.h>
40 
41 #include <linux/completion.h>
42 
43 #include <uvm/uvm_extern.h>
44 
45 #include <arm/cpufunc.h>
46 
47 #include <arch/arm/broadcom/bcm2835_mbox.h>
48 #include <arch/arm/broadcom/bcm2835var.h>
49 
50 #define TOTAL_SLOTS (VCHIQ_SLOT_ZERO_SLOTS + 2 * 32)
51 
52 #define VCHIQ_DOORBELL_IRQ IRQ_ARM_DOORBELL_0
53 
54 #define IS_USER_ADDRESS(va)	\
55 	((vaddr_t)(va) >= VM_MIN_ADDRESS && (vaddr_t)(va) < VM_MAX_ADDRESS)
56 
57 #include "vchiq_arm.h"
58 #include "vchiq_2835.h"
59 #include "vchiq_netbsd.h"
60 #include "vchiq_connected.h"
61 
62 #define VCPAGE_OFFSET 0x0fff
63 #define VCPAGE_SHIFT  12
64 
65 #define MAX_FRAGMENTS (VCHIQ_NUM_CURRENT_BULKS * 2)
66 
67 typedef struct vchiq_2835_state_struct {
68    int inited;
69    VCHIQ_ARM_STATE_T arm_state;
70 } VCHIQ_2835_ARM_STATE_T;
71 
72 /* BSD DMA */
73 static bus_dma_tag_t dma_tag;
74 static bus_dmamap_t dma_map;
75 
76 static unsigned int g_cache_line_size = CACHE_LINE_SIZE;
77 static unsigned int g_fragments_size;
78 static char *g_fragments_base;
79 static char *g_free_fragments;
80 
81 struct semaphore g_free_fragments_sema;
82 static struct semaphore g_free_fragments_mutex;
83 
84 void
vchiq_platform_attach(bus_dma_tag_t tag)85 vchiq_platform_attach(bus_dma_tag_t tag)
86 {
87 	dma_tag = tag;
88 }
89 
90 int __init
vchiq_platform_init(VCHIQ_STATE_T * state)91 vchiq_platform_init(VCHIQ_STATE_T *state)
92 {
93 	VCHIQ_SLOT_ZERO_T *vchiq_slot_zero;
94 	bus_dma_segment_t dma_segs[1];
95 	int dma_nsegs;
96 	void *slot_mem;
97 	bus_addr_t slot_phys;
98 	int slot_mem_size, frag_mem_size;
99 	int err;
100 	int i;
101 
102 	_sema_init(&g_free_fragments_mutex, 1);
103 
104 	g_cache_line_size = 32;
105 
106 	g_fragments_size = 2 * g_cache_line_size;
107 
108 	/* Allocate space for the channels in coherent memory */
109 	slot_mem_size = PAGE_ALIGN(TOTAL_SLOTS * VCHIQ_SLOT_SIZE);
110 	frag_mem_size = PAGE_ALIGN(g_fragments_size * MAX_FRAGMENTS);
111 
112 	dma_nsegs = __arraycount(dma_segs);
113 	err = bus_dmamem_alloc(dma_tag,
114 	    slot_mem_size + frag_mem_size, PAGE_SIZE, 0,
115 	    dma_segs, dma_nsegs, &dma_nsegs, BUS_DMA_WAITOK);
116 	if (err) {
117 		vchiq_log_error(vchiq_core_log_level, "Unable to allocate channel memory");
118 		err = -ENOMEM;
119 		goto failed_alloc;
120 	}
121 
122 	err = bus_dmamem_map(dma_tag,
123 	    dma_segs, dma_nsegs, slot_mem_size + frag_mem_size,
124 	    (void **)&slot_mem, BUS_DMA_COHERENT | BUS_DMA_WAITOK);
125 	if (err) {
126 		vchiq_log_error(vchiq_core_log_level, "Unable to map channel memory");
127 		err = -ENOMEM;
128 		goto failed_alloc;
129 	}
130 
131 	err = bus_dmamap_create(dma_tag,
132 	    slot_mem_size + frag_mem_size, 1,	/* maxsize, nsegments */
133 	    slot_mem_size + frag_mem_size, 0,	/* maxsegsize, boundary */
134 	    BUS_DMA_WAITOK,
135 	    &dma_map);
136 	if (err) {
137 		vchiq_log_error(vchiq_core_log_level, "Unable to create DMA map");
138 		err = -ENOMEM;
139 		goto failed_alloc;
140 	}
141 
142 	err = bus_dmamap_load(dma_tag, dma_map, slot_mem,
143 	    slot_mem_size + frag_mem_size, NULL, BUS_DMA_WAITOK);
144 	if (err) {
145 		vchiq_log_error(vchiq_core_log_level, "cannot load DMA map (%d)", err);
146 		err = -ENOMEM;
147 		goto failed_load;
148 	}
149 	slot_phys = dma_map->dm_segs[0].ds_addr;
150 
151 	vchiq_log_info(vchiq_arm_log_level,
152 	    "%s: slot_phys = %lx\n", __func__, slot_phys);
153 
154 	WARN_ON(((uintptr_t)slot_mem & (PAGE_SIZE - 1)) != 0);
155 
156 	vchiq_slot_zero = vchiq_init_slots(slot_mem, slot_mem_size);
157 	if (!vchiq_slot_zero) {
158 		err = -EINVAL;
159 		goto failed_init_slots;
160 	}
161 
162 	vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX] =
163 		(int)slot_phys + slot_mem_size;
164 	vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX] =
165 		MAX_FRAGMENTS;
166 
167 	g_fragments_base = (char *)slot_mem + slot_mem_size;
168 	slot_mem_size += frag_mem_size;
169 
170 	g_free_fragments = g_fragments_base;
171 	for (i = 0; i < (MAX_FRAGMENTS - 1); i++) {
172 		*(char **)&g_fragments_base[i*g_fragments_size] =
173 			&g_fragments_base[(i + 1)*g_fragments_size];
174 	}
175 	*(char **)&g_fragments_base[i * g_fragments_size] = NULL;
176 
177 	_sema_init(&g_free_fragments_sema, MAX_FRAGMENTS);
178 
179 	if (vchiq_init_state(state, vchiq_slot_zero, 0/*slave*/) !=
180 		VCHIQ_SUCCESS) {
181 		err = -EINVAL;
182 		goto failed_vchiq_init;
183 	}
184 
185 	/* Send the base address of the slots to VideoCore */
186 	dsb(sy); /* Ensure all writes have completed */
187 
188 	bus_dmamap_sync(dma_tag, dma_map, 0, slot_mem_size,
189 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
190 	bcm_mbox_write(BCM2835_MBOX_CHAN_VCHIQ, (unsigned int)slot_phys);
191 	bus_dmamap_sync(dma_tag, dma_map, 0, slot_mem_size,
192 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
193 
194 	vchiq_log_info(vchiq_arm_log_level,
195 		"vchiq_init - done (slots %p, phys %x)",
196 		vchiq_slot_zero, (unsigned int)slot_phys);
197 
198 	vchiq_call_connected_callbacks();
199 
200 	return 0;
201 
202 failed_vchiq_init:
203 failed_init_slots:
204 failed_load:
205 	bus_dmamap_unload(dma_tag, dma_map);
206 failed_alloc:
207 	bus_dmamap_destroy(dma_tag, dma_map);
208 
209    return err;
210 }
211 
212 void __exit
vchiq_platform_exit(VCHIQ_STATE_T * state)213 vchiq_platform_exit(VCHIQ_STATE_T *state)
214 {
215 
216 	bus_dmamap_unload(dma_tag, dma_map);
217 	bus_dmamap_destroy(dma_tag, dma_map);
218 }
219 
220 
221 VCHIQ_STATUS_T
vchiq_platform_init_state(VCHIQ_STATE_T * state)222 vchiq_platform_init_state(VCHIQ_STATE_T *state)
223 {
224    VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
225    state->platform_state = kzalloc(sizeof(VCHIQ_2835_ARM_STATE_T), GFP_KERNEL);
226    ((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->inited = 1;
227    status = vchiq_arm_init_state(state, &((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->arm_state);
228    if(status != VCHIQ_SUCCESS)
229    {
230       ((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->inited = 0;
231    }
232    return status;
233 }
234 
235 VCHIQ_ARM_STATE_T*
vchiq_platform_get_arm_state(VCHIQ_STATE_T * state)236 vchiq_platform_get_arm_state(VCHIQ_STATE_T *state)
237 {
238    if(!((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->inited)
239    {
240       BUG();
241    }
242    return &((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->arm_state;
243 }
244 
245 int
vchiq_copy_from_user(void * dst,const void * src,int size)246 vchiq_copy_from_user(void *dst, const void *src, int size)
247 {
248 	vaddr_t va = (vaddr_t)src;
249 
250 	if (IS_USER_ADDRESS(va)) {
251 		int error = copyin(src, dst, size);
252 		return error ? VCHIQ_ERROR : VCHIQ_SUCCESS;
253 	} else {
254 		kcopy(src, dst, size);
255 		return VCHIQ_SUCCESS;
256 	}
257 }
258 
259 typedef struct bulkinfo_struct {
260 	void		*pagelist;
261 	bus_dma_segment_t pagelist_sgs[1];
262 	bus_size_t	pagelist_size;
263 	bus_dmamap_t	pagelist_map;
264 	bus_dmamap_t	dmamap;
265 	struct proc	*proc;
266 	void		*buf;
267 	int		size;
268 } BULKINFO_T;
269 
270 /* There is a potential problem with partial cache lines (pages?)
271 ** at the ends of the block when reading. If the CPU accessed anything in
272 ** the same line (page?) then it may have pulled old data into the cache,
273 ** obscuring the new data underneath. We can solve this by transferring the
274 ** partial cache lines separately, and allowing the ARM to copy into the
275 ** cached area.
276 */
277 VCHIQ_STATUS_T
vchiq_prepare_bulk_data(VCHIQ_BULK_T * bulk,VCHI_MEM_HANDLE_T memhandle,void * buf,int size,int dir)278 vchiq_prepare_bulk_data(VCHIQ_BULK_T *bulk, VCHI_MEM_HANDLE_T memhandle,
279 	void *buf, int size, int dir)
280 {
281 	PAGELIST_T *pagelist;
282 	BULKINFO_T *bi;
283 	int nsegs;
284 	int ret;
285 
286 	vchiq_log_info(vchiq_arm_log_level,
287 	    "%s: buf %p size %08x dir %s", __func__, buf, size,
288 	    dir == VCHIQ_BULK_RECEIVE ? "read" : "write");
289 
290 	vaddr_t va = (vaddr_t)buf;
291 	const size_t maxsegs = atop(round_page(va + size) - trunc_page(va));
292  	const int uvmflags = (dir == VCHIQ_BULK_RECEIVE ?
293 	    VM_PROT_READ : VM_PROT_WRITE);
294 	const int dmaflags = (dir == VCHIQ_BULK_RECEIVE ?
295 	    BUS_DMA_READ : BUS_DMA_WRITE);
296 
297 	WARN_ON(memhandle != VCHI_MEM_HANDLE_INVALID);
298 
299 	bi = kmem_alloc(sizeof(*bi), KM_SLEEP);
300 	bi->buf = buf;
301 	bi->size = size;
302 	bi->pagelist_size = sizeof(PAGELIST_T) +
303 	    (maxsegs * sizeof(unsigned long));
304 	bi->proc = curproc;
305 
306 	ret = bus_dmamem_alloc(dma_tag, bi->pagelist_size,
307 	    0 /*CACHE_LINE_SIZE*/, 0, bi->pagelist_sgs,
308 	    __arraycount(bi->pagelist_sgs), &nsegs, BUS_DMA_WAITOK);
309 
310 	if (ret != 0)
311 		goto fail1;
312 
313 	ret = bus_dmamem_map(dma_tag, bi->pagelist_sgs, nsegs,
314 	    bi->pagelist_size, &bi->pagelist, BUS_DMA_COHERENT | BUS_DMA_WAITOK);
315 	if (ret != 0)
316 		goto fail2;
317 
318 	pagelist = bi->pagelist;
319 
320 	ret = bus_dmamap_create(dma_tag, bi->pagelist_size,
321 	    nsegs, bi->pagelist_size, 0, BUS_DMA_WAITOK, &bi->pagelist_map);
322 	if (ret != 0)
323 		goto fail3;
324 
325 	ret = bus_dmamap_load(dma_tag, bi->pagelist_map, pagelist,
326 	    bi->pagelist_size, NULL, BUS_DMA_WAITOK | BUS_DMA_WRITE);
327 	if (ret != 0)
328 		goto fail4;
329 
330 	/*
331 	 * Need to wire the buffer pages in.
332 	 */
333 	if (IS_USER_ADDRESS(buf)) {
334 		ret = uvm_vslock(bi->proc->p_vmspace, buf, size, uvmflags);
335 		if (ret != 0) {
336 			printf("%s: uvm_vslock failed (%d)\n", __func__, ret);
337 			goto fail5;
338 		}
339 	}
340 
341 	ret = bus_dmamap_create(dma_tag, size, maxsegs, size, 0,
342 	    BUS_DMA_WAITOK, &bi->dmamap);
343 
344 	if (ret != 0)
345 		goto fail6;
346 
347 	ret = bus_dmamap_load(dma_tag, bi->dmamap, buf, size,
348 	    curproc, BUS_DMA_WAITOK | dmaflags);
349 
350 	if (ret != 0)
351 		goto fail7;
352 
353 	bulk->handle = memhandle;
354 	/*
355 	 * We've now got the bus_addr_t for the pagelist we want the transfer
356 	 * to use.
357 	 */
358 	bulk->data = (void *)bi->pagelist_map->dm_segs[0].ds_addr;
359 
360 	pagelist->type = (dir == VCHIQ_BULK_RECEIVE) ?
361 	    PAGELIST_READ : PAGELIST_WRITE;
362 	pagelist->length = size;
363 	pagelist->offset = va & VCPAGE_OFFSET;
364 
365 	/*
366 	 * busdma already coalesces contiguous pages for us
367 	 */
368 	for (int i = 0; i < bi->dmamap->dm_nsegs; i++) {
369 		bus_addr_t addr = bi->dmamap->dm_segs[i].ds_addr;
370 		bus_size_t len = bi->dmamap->dm_segs[i].ds_len;
371 		bus_size_t off = addr & VCPAGE_OFFSET;
372 		int npgs = ((off + len + VCPAGE_OFFSET) >> VCPAGE_SHIFT);
373 
374 		pagelist->addrs[i] = addr & ~VCPAGE_OFFSET;
375 		pagelist->addrs[i] |= npgs - 1;
376 	}
377 
378 	/* Partial cache lines (fragments) require special measures */
379 	if ((pagelist->type == PAGELIST_READ) &&
380 	    ((pagelist->offset & (g_cache_line_size - 1)) ||
381 	    ((pagelist->offset + pagelist->length) & (g_cache_line_size - 1)))) {
382 		char *fragments;
383 
384 		if (down_interruptible(&g_free_fragments_sema) != 0) {
385 			goto fail7;
386 		}
387 
388 		WARN_ON(g_free_fragments == NULL);
389 
390 		down(&g_free_fragments_mutex);
391 		fragments = g_free_fragments;
392 		WARN_ON(fragments == NULL);
393 		g_free_fragments = *(char **) g_free_fragments;
394 		up(&g_free_fragments_mutex);
395 		pagelist->type = PAGELIST_READ_WITH_FRAGMENTS +
396 		    (fragments - g_fragments_base) / g_fragments_size;
397 		bus_dmamap_sync(dma_tag, dma_map,
398 		    (char *)fragments - g_fragments_base, sizeof(*fragments),
399 		    BUS_DMASYNC_PREREAD);
400 	}
401 
402 	/*
403 	 * Store the BULKINFO_T address in remote_data, which isn't used by the
404 	 * slave.
405 	 */
406 	bulk->remote_data = bi;
407 
408 	bus_dmamap_sync(dma_tag, bi->pagelist_map, 0,
409 	    bi->pagelist_size, BUS_DMASYNC_PREWRITE);
410 
411 	bus_dmamap_sync(dma_tag, bi->dmamap, 0, bi->size,
412 	    pagelist->type == PAGELIST_WRITE ?
413 	    BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
414 
415 	return VCHIQ_SUCCESS;
416 
417 fail7:
418 	bus_dmamap_destroy(dma_tag, bi->dmamap);
419 
420 fail6:
421 	if (IS_USER_ADDRESS(bi->buf))
422 		uvm_vsunlock(curproc->p_vmspace, bi->buf, bi->size);
423 
424 fail5:
425 	bus_dmamap_unload(dma_tag, bi->pagelist_map);
426 
427 fail4:
428 	bus_dmamap_destroy(dma_tag, bi->pagelist_map);
429 
430 fail3:
431 	bus_dmamem_unmap(dma_tag, bi->pagelist, bi->pagelist_size);
432 
433 fail2:
434 	bus_dmamem_free(dma_tag, bi->pagelist_sgs,
435 	    __arraycount(bi->pagelist_sgs));
436 
437 fail1:
438 	kmem_free(bi, sizeof(*bi));
439 	return VCHIQ_ERROR;
440 }
441 
442 void
vchiq_complete_bulk(VCHIQ_BULK_T * bulk)443 vchiq_complete_bulk(VCHIQ_BULK_T *bulk)
444 {
445 	if (bulk && bulk->remote_data && bulk->actual) {
446 		int actual = bulk->actual;
447 		BULKINFO_T *bi = bulk->remote_data;
448 		PAGELIST_T *pagelist = bi->pagelist;
449 
450 		vchiq_log_trace(vchiq_arm_log_level,
451 			"free_pagelist - %p, %d", pagelist, actual);
452 
453 		bus_dmamap_sync(dma_tag, bi->pagelist_map, 0,
454 		    bi->pagelist_size, BUS_DMASYNC_POSTWRITE);
455 
456 		bus_dmamap_sync(dma_tag, bi->dmamap, 0, bi->size,
457 		    pagelist->type == PAGELIST_WRITE ?
458 		    BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
459 
460 		/* Deal with any partial cache lines (fragments) */
461 		if (pagelist->type >= PAGELIST_READ_WITH_FRAGMENTS) {
462 			char *fragments = g_fragments_base +
463 			    (pagelist->type - PAGELIST_READ_WITH_FRAGMENTS) *
464 			    g_fragments_size;
465 			int head_bytes, tail_bytes;
466 
467 			bus_dmamap_sync(dma_tag, dma_map,
468 			    (char *)fragments - g_fragments_base, g_fragments_size,
469 			    BUS_DMASYNC_POSTREAD);
470 
471 			head_bytes = (g_cache_line_size - pagelist->offset) &
472 				(g_cache_line_size - 1);
473 			tail_bytes = (pagelist->offset + actual) &
474 				(g_cache_line_size - 1);
475 
476 			if ((actual >= 0) && (head_bytes != 0)) {
477 				if (head_bytes > actual)
478 					head_bytes = actual;
479 
480 				if (IS_USER_ADDRESS(bi->buf)) {
481 					copyout_proc(bi->proc, fragments,
482 					    bi->buf, head_bytes);
483 				} else {
484 					kcopy(fragments, bi->buf, head_bytes);
485 				}
486 			}
487 			if ((actual >= 0) && (head_bytes < actual) &&
488 			    (tail_bytes != 0)) {
489 				void *t = (char *)bi->buf + bi->size -
490 				    tail_bytes;
491 
492 				 if (IS_USER_ADDRESS(bi->buf)) {
493 					copyout_proc(bi->proc,
494 					    fragments + g_cache_line_size, t,
495 					    tail_bytes);
496 				} else {
497 					kcopy(fragments + g_cache_line_size, t,
498 					    tail_bytes);
499 				}
500 			}
501 
502 			down(&g_free_fragments_mutex);
503 			*(char **)fragments = g_free_fragments;
504 			g_free_fragments = fragments;
505 			up(&g_free_fragments_mutex);
506 			up(&g_free_fragments_sema);
507 		}
508 		bus_dmamap_unload(dma_tag, bi->dmamap);
509 		bus_dmamap_destroy(dma_tag, bi->dmamap);
510 		if (IS_USER_ADDRESS(bi->buf))
511 			uvm_vsunlock(bi->proc->p_vmspace, bi->buf, bi->size);
512 
513 		bus_dmamap_unload(dma_tag, bi->pagelist_map);
514 		bus_dmamap_destroy(dma_tag, bi->pagelist_map);
515 		bus_dmamem_unmap(dma_tag, bi->pagelist,
516 		    bi->pagelist_size);
517 		bus_dmamem_free(dma_tag, bi->pagelist_sgs,
518 		    __arraycount(bi->pagelist_sgs));
519 		kmem_free(bi, sizeof(*bi));
520 	}
521 }
522 
523 void
vchiq_transfer_bulk(VCHIQ_BULK_T * bulk)524 vchiq_transfer_bulk(VCHIQ_BULK_T *bulk)
525 {
526 	/*
527 	 * This should only be called on the master (VideoCore) side, but
528 	 * provide an implementation to avoid the need for ifdefery.
529 	 */
530 	BUG();
531 }
532 
533 void
vchiq_dump_platform_state(void * dump_context)534 vchiq_dump_platform_state(void *dump_context)
535 {
536 	char buf[80];
537 	int len;
538 	len = snprintf(buf, sizeof(buf),
539 		"  Platform: 2835 (VC master)");
540 	vchiq_dump(dump_context, buf, len + 1);
541 }
542 
543 VCHIQ_STATUS_T
vchiq_platform_suspend(VCHIQ_STATE_T * state)544 vchiq_platform_suspend(VCHIQ_STATE_T *state)
545 {
546    return VCHIQ_ERROR;
547 }
548 
549 VCHIQ_STATUS_T
vchiq_platform_resume(VCHIQ_STATE_T * state)550 vchiq_platform_resume(VCHIQ_STATE_T *state)
551 {
552    return VCHIQ_SUCCESS;
553 }
554 
555 void
vchiq_platform_paused(VCHIQ_STATE_T * state)556 vchiq_platform_paused(VCHIQ_STATE_T *state)
557 {
558 }
559 
560 void
vchiq_platform_resumed(VCHIQ_STATE_T * state)561 vchiq_platform_resumed(VCHIQ_STATE_T *state)
562 {
563 }
564 
565 int
vchiq_platform_videocore_wanted(VCHIQ_STATE_T * state)566 vchiq_platform_videocore_wanted(VCHIQ_STATE_T* state)
567 {
568    return 1; // autosuspend not supported - videocore always wanted
569 }
570 
571 int
vchiq_platform_use_suspend_timer(void)572 vchiq_platform_use_suspend_timer(void)
573 {
574    return 0;
575 }
576 void
vchiq_dump_platform_use_state(VCHIQ_STATE_T * state)577 vchiq_dump_platform_use_state(VCHIQ_STATE_T *state)
578 {
579 	vchiq_log_info(vchiq_arm_log_level, "Suspend timer not in use");
580 }
581 void
vchiq_platform_handle_timeout(VCHIQ_STATE_T * state)582 vchiq_platform_handle_timeout(VCHIQ_STATE_T *state)
583 {
584 	(void)state;
585 }
586