xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/amd/amdgpu/amdgpu_object.h (revision 41ec02673d281bbb3d38e6c78504ce6e30c228c1)
1*41ec0267Sriastradh /*	$NetBSD: amdgpu_object.h,v 1.5 2021/12/18 23:44:58 riastradh Exp $	*/
2efa246c0Sriastradh 
3efa246c0Sriastradh /*
4efa246c0Sriastradh  * Copyright 2008 Advanced Micro Devices, Inc.
5efa246c0Sriastradh  * Copyright 2008 Red Hat Inc.
6efa246c0Sriastradh  * Copyright 2009 Jerome Glisse.
7efa246c0Sriastradh  *
8efa246c0Sriastradh  * Permission is hereby granted, free of charge, to any person obtaining a
9efa246c0Sriastradh  * copy of this software and associated documentation files (the "Software"),
10efa246c0Sriastradh  * to deal in the Software without restriction, including without limitation
11efa246c0Sriastradh  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12efa246c0Sriastradh  * and/or sell copies of the Software, and to permit persons to whom the
13efa246c0Sriastradh  * Software is furnished to do so, subject to the following conditions:
14efa246c0Sriastradh  *
15efa246c0Sriastradh  * The above copyright notice and this permission notice shall be included in
16efa246c0Sriastradh  * all copies or substantial portions of the Software.
17efa246c0Sriastradh  *
18efa246c0Sriastradh  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19efa246c0Sriastradh  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20efa246c0Sriastradh  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
21efa246c0Sriastradh  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
22efa246c0Sriastradh  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
23efa246c0Sriastradh  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
24efa246c0Sriastradh  * OTHER DEALINGS IN THE SOFTWARE.
25efa246c0Sriastradh  *
26efa246c0Sriastradh  * Authors: Dave Airlie
27efa246c0Sriastradh  *          Alex Deucher
28efa246c0Sriastradh  *          Jerome Glisse
29efa246c0Sriastradh  */
30efa246c0Sriastradh #ifndef __AMDGPU_OBJECT_H__
31efa246c0Sriastradh #define __AMDGPU_OBJECT_H__
32efa246c0Sriastradh 
33efa246c0Sriastradh #include <drm/amdgpu_drm.h>
34efa246c0Sriastradh #include "amdgpu.h"
35*41ec0267Sriastradh #ifdef CONFIG_MMU_NOTIFIER
36*41ec0267Sriastradh #include <linux/mmu_notifier.h>
37*41ec0267Sriastradh #endif
38*41ec0267Sriastradh 
39*41ec0267Sriastradh #define AMDGPU_BO_INVALID_OFFSET	LONG_MAX
40*41ec0267Sriastradh #define AMDGPU_BO_MAX_PLACEMENTS	3
41*41ec0267Sriastradh 
42*41ec0267Sriastradh struct amdgpu_bo_param {
43*41ec0267Sriastradh 	unsigned long			size;
44*41ec0267Sriastradh 	int				byte_align;
45*41ec0267Sriastradh 	u32				domain;
46*41ec0267Sriastradh 	u32				preferred_domain;
47*41ec0267Sriastradh 	u64				flags;
48*41ec0267Sriastradh 	enum ttm_bo_type		type;
49*41ec0267Sriastradh 	bool				no_wait_gpu;
50*41ec0267Sriastradh 	struct dma_resv	*resv;
51*41ec0267Sriastradh };
52*41ec0267Sriastradh 
53*41ec0267Sriastradh /* bo virtual addresses in a vm */
54*41ec0267Sriastradh struct amdgpu_bo_va_mapping {
55*41ec0267Sriastradh 	struct amdgpu_bo_va		*bo_va;
56*41ec0267Sriastradh 	struct list_head		list;
57*41ec0267Sriastradh 	struct rb_node			rb;
58*41ec0267Sriastradh 	uint64_t			start;
59*41ec0267Sriastradh 	uint64_t			last;
60*41ec0267Sriastradh 	uint64_t			__subtree_last;
61*41ec0267Sriastradh 	uint64_t			offset;
62*41ec0267Sriastradh 	uint64_t			flags;
63*41ec0267Sriastradh };
64*41ec0267Sriastradh 
65*41ec0267Sriastradh /* User space allocated BO in a VM */
66*41ec0267Sriastradh struct amdgpu_bo_va {
67*41ec0267Sriastradh 	struct amdgpu_vm_bo_base	base;
68*41ec0267Sriastradh 
69*41ec0267Sriastradh 	/* protected by bo being reserved */
70*41ec0267Sriastradh 	unsigned			ref_count;
71*41ec0267Sriastradh 
72*41ec0267Sriastradh 	/* all other members protected by the VM PD being reserved */
73*41ec0267Sriastradh 	struct dma_fence	        *last_pt_update;
74*41ec0267Sriastradh 
75*41ec0267Sriastradh 	/* mappings for this bo_va */
76*41ec0267Sriastradh 	struct list_head		invalids;
77*41ec0267Sriastradh 	struct list_head		valids;
78*41ec0267Sriastradh 
79*41ec0267Sriastradh 	/* If the mappings are cleared or filled */
80*41ec0267Sriastradh 	bool				cleared;
81*41ec0267Sriastradh 
82*41ec0267Sriastradh 	bool				is_xgmi;
83*41ec0267Sriastradh };
84*41ec0267Sriastradh 
85*41ec0267Sriastradh struct amdgpu_bo {
86*41ec0267Sriastradh 	/* Protected by tbo.reserved */
87*41ec0267Sriastradh 	u32				preferred_domains;
88*41ec0267Sriastradh 	u32				allowed_domains;
89*41ec0267Sriastradh 	struct ttm_place		placements[AMDGPU_BO_MAX_PLACEMENTS];
90*41ec0267Sriastradh 	struct ttm_placement		placement;
91*41ec0267Sriastradh 	struct ttm_buffer_object	tbo;
92*41ec0267Sriastradh 	struct ttm_bo_kmap_obj		kmap;
93*41ec0267Sriastradh 	u64				flags;
94*41ec0267Sriastradh 	unsigned			pin_count;
95*41ec0267Sriastradh 	u64				tiling_flags;
96*41ec0267Sriastradh 	u64				metadata_flags;
97*41ec0267Sriastradh 	void				*metadata;
98*41ec0267Sriastradh 	u32				metadata_size;
99*41ec0267Sriastradh 	unsigned			prime_shared_count;
100*41ec0267Sriastradh 	/* per VM structure for page tables and with virtual addresses */
101*41ec0267Sriastradh 	struct amdgpu_vm_bo_base	*vm_bo;
102*41ec0267Sriastradh 	/* Constant after initialization */
103*41ec0267Sriastradh 	struct amdgpu_bo		*parent;
104*41ec0267Sriastradh 	struct amdgpu_bo		*shadow;
105*41ec0267Sriastradh 
106*41ec0267Sriastradh 	struct ttm_bo_kmap_obj		dma_buf_vmap;
107*41ec0267Sriastradh 	struct amdgpu_mn		*mn;
108*41ec0267Sriastradh 
109*41ec0267Sriastradh 
110*41ec0267Sriastradh #ifdef CONFIG_MMU_NOTIFIER
111*41ec0267Sriastradh 	struct mmu_interval_notifier	notifier;
112*41ec0267Sriastradh #endif
113*41ec0267Sriastradh 
114*41ec0267Sriastradh 	struct list_head		shadow_list;
115*41ec0267Sriastradh 
116*41ec0267Sriastradh 	struct kgd_mem                  *kfd_bo;
117*41ec0267Sriastradh };
118*41ec0267Sriastradh 
ttm_to_amdgpu_bo(struct ttm_buffer_object * tbo)119*41ec0267Sriastradh static inline struct amdgpu_bo *ttm_to_amdgpu_bo(struct ttm_buffer_object *tbo)
120*41ec0267Sriastradh {
121*41ec0267Sriastradh 	return container_of(tbo, struct amdgpu_bo, tbo);
122*41ec0267Sriastradh }
123efa246c0Sriastradh 
124efa246c0Sriastradh /**
125efa246c0Sriastradh  * amdgpu_mem_type_to_domain - return domain corresponding to mem_type
126efa246c0Sriastradh  * @mem_type:	ttm memory type
127efa246c0Sriastradh  *
128efa246c0Sriastradh  * Returns corresponding domain of the ttm mem_type
129efa246c0Sriastradh  */
amdgpu_mem_type_to_domain(u32 mem_type)130efa246c0Sriastradh static inline unsigned amdgpu_mem_type_to_domain(u32 mem_type)
131efa246c0Sriastradh {
132efa246c0Sriastradh 	switch (mem_type) {
133efa246c0Sriastradh 	case TTM_PL_VRAM:
134efa246c0Sriastradh 		return AMDGPU_GEM_DOMAIN_VRAM;
135efa246c0Sriastradh 	case TTM_PL_TT:
136efa246c0Sriastradh 		return AMDGPU_GEM_DOMAIN_GTT;
137efa246c0Sriastradh 	case TTM_PL_SYSTEM:
138efa246c0Sriastradh 		return AMDGPU_GEM_DOMAIN_CPU;
139efa246c0Sriastradh 	case AMDGPU_PL_GDS:
140efa246c0Sriastradh 		return AMDGPU_GEM_DOMAIN_GDS;
141efa246c0Sriastradh 	case AMDGPU_PL_GWS:
142efa246c0Sriastradh 		return AMDGPU_GEM_DOMAIN_GWS;
143efa246c0Sriastradh 	case AMDGPU_PL_OA:
144efa246c0Sriastradh 		return AMDGPU_GEM_DOMAIN_OA;
145efa246c0Sriastradh 	default:
146efa246c0Sriastradh 		break;
147efa246c0Sriastradh 	}
148efa246c0Sriastradh 	return 0;
149efa246c0Sriastradh }
150efa246c0Sriastradh 
151efa246c0Sriastradh /**
152efa246c0Sriastradh  * amdgpu_bo_reserve - reserve bo
153efa246c0Sriastradh  * @bo:		bo structure
154efa246c0Sriastradh  * @no_intr:	don't return -ERESTARTSYS on pending signal
155efa246c0Sriastradh  *
156efa246c0Sriastradh  * Returns:
157efa246c0Sriastradh  * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
158efa246c0Sriastradh  * a signal. Release all buffer reservations and return to user-space.
159efa246c0Sriastradh  */
amdgpu_bo_reserve(struct amdgpu_bo * bo,bool no_intr)160efa246c0Sriastradh static inline int amdgpu_bo_reserve(struct amdgpu_bo *bo, bool no_intr)
161efa246c0Sriastradh {
162*41ec0267Sriastradh 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
163efa246c0Sriastradh 	int r;
164efa246c0Sriastradh 
165*41ec0267Sriastradh 	r = __ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL);
166efa246c0Sriastradh 	if (unlikely(r != 0)) {
167efa246c0Sriastradh 		if (r != -ERESTARTSYS)
168*41ec0267Sriastradh 			dev_err(adev->dev, "%p reserve failed\n", bo);
169efa246c0Sriastradh 		return r;
170efa246c0Sriastradh 	}
171efa246c0Sriastradh 	return 0;
172efa246c0Sriastradh }
173efa246c0Sriastradh 
amdgpu_bo_unreserve(struct amdgpu_bo * bo)174efa246c0Sriastradh static inline void amdgpu_bo_unreserve(struct amdgpu_bo *bo)
175efa246c0Sriastradh {
176efa246c0Sriastradh 	ttm_bo_unreserve(&bo->tbo);
177efa246c0Sriastradh }
178efa246c0Sriastradh 
amdgpu_bo_size(struct amdgpu_bo * bo)179efa246c0Sriastradh static inline unsigned long amdgpu_bo_size(struct amdgpu_bo *bo)
180efa246c0Sriastradh {
181efa246c0Sriastradh 	return bo->tbo.num_pages << PAGE_SHIFT;
182efa246c0Sriastradh }
183efa246c0Sriastradh 
amdgpu_bo_ngpu_pages(struct amdgpu_bo * bo)184efa246c0Sriastradh static inline unsigned amdgpu_bo_ngpu_pages(struct amdgpu_bo *bo)
185efa246c0Sriastradh {
186efa246c0Sriastradh 	return (bo->tbo.num_pages << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE;
187efa246c0Sriastradh }
188efa246c0Sriastradh 
amdgpu_bo_gpu_page_alignment(struct amdgpu_bo * bo)189efa246c0Sriastradh static inline unsigned amdgpu_bo_gpu_page_alignment(struct amdgpu_bo *bo)
190efa246c0Sriastradh {
191efa246c0Sriastradh 	return (bo->tbo.mem.page_alignment << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE;
192efa246c0Sriastradh }
193efa246c0Sriastradh 
194efa246c0Sriastradh /**
195efa246c0Sriastradh  * amdgpu_bo_mmap_offset - return mmap offset of bo
196efa246c0Sriastradh  * @bo:	amdgpu object for which we query the offset
197efa246c0Sriastradh  *
198efa246c0Sriastradh  * Returns mmap offset of the object.
199efa246c0Sriastradh  */
amdgpu_bo_mmap_offset(struct amdgpu_bo * bo)200efa246c0Sriastradh static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo)
201efa246c0Sriastradh {
202*41ec0267Sriastradh 	return drm_vma_node_offset_addr(&bo->tbo.base.vma_node);
203efa246c0Sriastradh }
204efa246c0Sriastradh 
205*41ec0267Sriastradh /**
206*41ec0267Sriastradh  * amdgpu_bo_in_cpu_visible_vram - check if BO is (partly) in visible VRAM
207*41ec0267Sriastradh  */
amdgpu_bo_in_cpu_visible_vram(struct amdgpu_bo * bo)208*41ec0267Sriastradh static inline bool amdgpu_bo_in_cpu_visible_vram(struct amdgpu_bo *bo)
209*41ec0267Sriastradh {
210*41ec0267Sriastradh 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
211*41ec0267Sriastradh 	unsigned fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
212*41ec0267Sriastradh 	struct drm_mm_node *node = bo->tbo.mem.mm_node;
213*41ec0267Sriastradh 	unsigned long pages_left;
214*41ec0267Sriastradh 
215*41ec0267Sriastradh 	if (bo->tbo.mem.mem_type != TTM_PL_VRAM)
216*41ec0267Sriastradh 		return false;
217*41ec0267Sriastradh 
218*41ec0267Sriastradh 	for (pages_left = bo->tbo.mem.num_pages; pages_left;
219*41ec0267Sriastradh 	     pages_left -= node->size, node++)
220*41ec0267Sriastradh 		if (node->start < fpfn)
221*41ec0267Sriastradh 			return true;
222*41ec0267Sriastradh 
223*41ec0267Sriastradh 	return false;
224*41ec0267Sriastradh }
225*41ec0267Sriastradh 
226*41ec0267Sriastradh /**
227*41ec0267Sriastradh  * amdgpu_bo_explicit_sync - return whether the bo is explicitly synced
228*41ec0267Sriastradh  */
amdgpu_bo_explicit_sync(struct amdgpu_bo * bo)229*41ec0267Sriastradh static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo)
230*41ec0267Sriastradh {
231*41ec0267Sriastradh 	return bo->flags & AMDGPU_GEM_CREATE_EXPLICIT_SYNC;
232*41ec0267Sriastradh }
233*41ec0267Sriastradh 
234*41ec0267Sriastradh bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
235*41ec0267Sriastradh void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
236*41ec0267Sriastradh 
237efa246c0Sriastradh int amdgpu_bo_create(struct amdgpu_device *adev,
238*41ec0267Sriastradh 		     struct amdgpu_bo_param *bp,
239efa246c0Sriastradh 		     struct amdgpu_bo **bo_ptr);
240*41ec0267Sriastradh int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
241*41ec0267Sriastradh 			      unsigned long size, int align,
242*41ec0267Sriastradh 			      u32 domain, struct amdgpu_bo **bo_ptr,
243*41ec0267Sriastradh 			      u64 *gpu_addr, void **cpu_addr);
244*41ec0267Sriastradh int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
245*41ec0267Sriastradh 			    unsigned long size, int align,
246*41ec0267Sriastradh 			    u32 domain, struct amdgpu_bo **bo_ptr,
247*41ec0267Sriastradh 			    u64 *gpu_addr, void **cpu_addr);
248*41ec0267Sriastradh int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
249*41ec0267Sriastradh 			       uint64_t offset, uint64_t size, uint32_t domain,
250*41ec0267Sriastradh 			       struct amdgpu_bo **bo_ptr, void **cpu_addr);
251*41ec0267Sriastradh void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
252*41ec0267Sriastradh 			   void **cpu_addr);
253efa246c0Sriastradh int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr);
254*41ec0267Sriastradh void *amdgpu_bo_kptr(struct amdgpu_bo *bo);
255efa246c0Sriastradh void amdgpu_bo_kunmap(struct amdgpu_bo *bo);
256efa246c0Sriastradh struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo);
257efa246c0Sriastradh void amdgpu_bo_unref(struct amdgpu_bo **bo);
258*41ec0267Sriastradh int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain);
259efa246c0Sriastradh int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
260*41ec0267Sriastradh 			     u64 min_offset, u64 max_offset);
261efa246c0Sriastradh int amdgpu_bo_unpin(struct amdgpu_bo *bo);
262efa246c0Sriastradh int amdgpu_bo_evict_vram(struct amdgpu_device *adev);
263efa246c0Sriastradh int amdgpu_bo_init(struct amdgpu_device *adev);
264*41ec0267Sriastradh int amdgpu_bo_late_init(struct amdgpu_device *adev);
265efa246c0Sriastradh void amdgpu_bo_fini(struct amdgpu_device *adev);
2660d50c49dSriastradh #ifndef __NetBSD__
267efa246c0Sriastradh int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
268efa246c0Sriastradh 				struct vm_area_struct *vma);
2690d50c49dSriastradh #endif
270efa246c0Sriastradh int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags);
271efa246c0Sriastradh void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags);
272efa246c0Sriastradh int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
273efa246c0Sriastradh 			    uint32_t metadata_size, uint64_t flags);
274efa246c0Sriastradh int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
275efa246c0Sriastradh 			   size_t buffer_size, uint32_t *metadata_size,
276efa246c0Sriastradh 			   uint64_t *flags);
277efa246c0Sriastradh void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
278*41ec0267Sriastradh 			   bool evict,
279efa246c0Sriastradh 			   struct ttm_mem_reg *new_mem);
280*41ec0267Sriastradh void amdgpu_bo_release_notify(struct ttm_buffer_object *bo);
281efa246c0Sriastradh int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
282*41ec0267Sriastradh void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
283efa246c0Sriastradh 		     bool shared);
284*41ec0267Sriastradh int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr);
285*41ec0267Sriastradh u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
286*41ec0267Sriastradh int amdgpu_bo_validate(struct amdgpu_bo *bo);
287*41ec0267Sriastradh int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow,
288*41ec0267Sriastradh 			     struct dma_fence **fence);
289*41ec0267Sriastradh uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev,
290*41ec0267Sriastradh 					    uint32_t domain);
291efa246c0Sriastradh 
292efa246c0Sriastradh /*
293efa246c0Sriastradh  * sub allocation
294efa246c0Sriastradh  */
295efa246c0Sriastradh 
amdgpu_sa_bo_gpu_addr(struct amdgpu_sa_bo * sa_bo)296efa246c0Sriastradh static inline uint64_t amdgpu_sa_bo_gpu_addr(struct amdgpu_sa_bo *sa_bo)
297efa246c0Sriastradh {
298efa246c0Sriastradh 	return sa_bo->manager->gpu_addr + sa_bo->soffset;
299efa246c0Sriastradh }
300efa246c0Sriastradh 
amdgpu_sa_bo_cpu_addr(struct amdgpu_sa_bo * sa_bo)301efa246c0Sriastradh static inline void * amdgpu_sa_bo_cpu_addr(struct amdgpu_sa_bo *sa_bo)
302efa246c0Sriastradh {
3030d50c49dSriastradh 	return (char *)sa_bo->manager->cpu_ptr + sa_bo->soffset;
304efa246c0Sriastradh }
305efa246c0Sriastradh 
306efa246c0Sriastradh int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
307efa246c0Sriastradh 				     struct amdgpu_sa_manager *sa_manager,
308efa246c0Sriastradh 				     unsigned size, u32 align, u32 domain);
309efa246c0Sriastradh void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev,
310efa246c0Sriastradh 				      struct amdgpu_sa_manager *sa_manager);
311efa246c0Sriastradh int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
312efa246c0Sriastradh 				      struct amdgpu_sa_manager *sa_manager);
313efa246c0Sriastradh int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
314efa246c0Sriastradh 		     struct amdgpu_sa_bo **sa_bo,
315efa246c0Sriastradh 		     unsigned size, unsigned align);
316efa246c0Sriastradh void amdgpu_sa_bo_free(struct amdgpu_device *adev,
317efa246c0Sriastradh 			      struct amdgpu_sa_bo **sa_bo,
318*41ec0267Sriastradh 			      struct dma_fence *fence);
319efa246c0Sriastradh #if defined(CONFIG_DEBUG_FS)
320efa246c0Sriastradh void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
321efa246c0Sriastradh 					 struct seq_file *m);
322efa246c0Sriastradh #endif
323efa246c0Sriastradh 
324*41ec0267Sriastradh bool amdgpu_bo_support_uswc(u64 bo_flags);
325*41ec0267Sriastradh 
326efa246c0Sriastradh 
327efa246c0Sriastradh #endif
328