xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/nouveau/include/nvkm/subdev/mmu.h (revision 7649e88fcfe6a7c92de68bd5e592dec3e35224fb)
1 /*	$NetBSD: mmu.h,v 1.4 2021/12/19 10:51:56 riastradh Exp $	*/
2 
3 /* SPDX-License-Identifier: MIT */
4 #ifndef __NVKM_MMU_H__
5 #define __NVKM_MMU_H__
6 #include <core/subdev.h>
7 
8 struct nvkm_vma {
9 	struct list_head head;
10 	struct rb_node tree;
11 	u64 addr;
12 	u64 size:50;
13 	bool mapref:1; /* PTs (de)referenced on (un)map (vs pre-allocated). */
14 	bool sparse:1; /* Unmapped PDEs/PTEs will not trigger MMU faults. */
15 #define NVKM_VMA_PAGE_NONE 7
16 	u8   page:3; /* Requested page type (index, or NONE for automatic). */
17 	u8   refd:3; /* Current page type (index, or NONE for unreferenced). */
18 	bool used:1; /* Region allocated. */
19 	bool part:1; /* Region was split from an allocated region by map(). */
20 	bool user:1; /* Region user-allocated. */
21 	bool busy:1; /* Region busy (for temporarily preventing user access). */
22 	bool mapped:1; /* Region contains valid pages. */
23 	struct nvkm_memory *memory; /* Memory currently mapped into VMA. */
24 	struct nvkm_tags *tags; /* Compression tag reference. */
25 };
26 
27 struct nvkm_vmm {
28 	const struct nvkm_vmm_func *func;
29 	struct nvkm_mmu *mmu;
30 	const char *name;
31 	u32 debug;
32 	struct kref kref;
33 	struct mutex mutex;
34 
35 	u64 start;
36 	u64 limit;
37 
38 	struct nvkm_vmm_pt *pd;
39 	struct list_head join;
40 
41 	struct list_head list;
42 #ifdef __NetBSD__
43 	struct rb_tree free;
44 	struct rb_tree root;
45 #else
46 	struct rb_root free;
47 	struct rb_root root;
48 #endif
49 
50 	bool bootstrapped;
51 	atomic_t engref[NVKM_SUBDEV_NR];
52 
53 #ifdef __NetBSD__
54 	bus_dma_segment_t nullseg;
55 	bus_dmamap_t nullmap;
56 #endif
57 	dma_addr_t null;
58 	void *nullp;
59 
60 	bool replay;
61 };
62 
63 int nvkm_vmm_new(struct nvkm_device *, u64 addr, u64 size, void *argv, u32 argc,
64 		 struct lock_class_key *, const char *name, struct nvkm_vmm **);
65 struct nvkm_vmm *nvkm_vmm_ref(struct nvkm_vmm *);
66 void nvkm_vmm_unref(struct nvkm_vmm **);
67 int nvkm_vmm_boot(struct nvkm_vmm *);
68 int nvkm_vmm_join(struct nvkm_vmm *, struct nvkm_memory *inst);
69 void nvkm_vmm_part(struct nvkm_vmm *, struct nvkm_memory *inst);
70 int nvkm_vmm_get(struct nvkm_vmm *, u8 page, u64 size, struct nvkm_vma **);
71 void nvkm_vmm_put(struct nvkm_vmm *, struct nvkm_vma **);
72 
73 struct nvkm_vmm_map {
74 	struct nvkm_memory *memory;
75 	u64 offset;
76 
77 	struct nvkm_mm_node *mem;
78 #ifndef __NetBSD__
79 	struct scatterlist *sgl;
80 #endif
81 	dma_addr_t *dma;
82 	u64 *pfn;
83 	u64 off;
84 
85 	const struct nvkm_vmm_page *page;
86 
87 	struct nvkm_tags *tags;
88 	u64 next;
89 	u64 type;
90 	u64 ctag;
91 };
92 
93 int nvkm_vmm_map(struct nvkm_vmm *, struct nvkm_vma *, void *argv, u32 argc,
94 		 struct nvkm_vmm_map *);
95 void nvkm_vmm_unmap(struct nvkm_vmm *, struct nvkm_vma *);
96 
97 struct nvkm_memory *nvkm_umem_search(struct nvkm_client *, u64);
98 struct nvkm_vmm *nvkm_uvmm_search(struct nvkm_client *, u64 handle);
99 
100 struct nvkm_mmu {
101 	const struct nvkm_mmu_func *func;
102 	struct nvkm_subdev subdev;
103 
104 	u8  dma_bits;
105 
106 	int heap_nr;
107 	struct {
108 #define NVKM_MEM_VRAM                                                      0x01
109 #define NVKM_MEM_HOST                                                      0x02
110 #define NVKM_MEM_COMP                                                      0x04
111 #define NVKM_MEM_DISP                                                      0x08
112 		u8  type;
113 		u64 size;
114 	} heap[4];
115 
116 	int type_nr;
117 	struct {
118 #define NVKM_MEM_KIND                                                      0x10
119 #define NVKM_MEM_MAPPABLE                                                  0x20
120 #define NVKM_MEM_COHERENT                                                  0x40
121 #define NVKM_MEM_UNCACHED                                                  0x80
122 		u8 type;
123 		u8 heap;
124 	} type[16];
125 
126 	struct nvkm_vmm *vmm;
127 
128 	struct {
129 		struct mutex mutex;
130 		struct list_head list;
131 	} ptc, ptp;
132 
133 	struct nvkm_device_oclass user;
134 };
135 
136 int nv04_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
137 int nv41_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
138 int nv44_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
139 int nv50_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
140 int g84_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
141 int mcp77_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
142 int gf100_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
143 int gk104_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
144 int gk20a_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
145 int gm200_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
146 int gm20b_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
147 int gp100_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
148 int gp10b_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
149 int gv100_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
150 int tu102_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
151 #endif
152