xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/nouveau/nvkm/subdev/bar/nouveau_nvkm_subdev_bar_nv50.c (revision 41ec02673d281bbb3d38e6c78504ce6e30c228c1)
1 /*	$NetBSD: nouveau_nvkm_subdev_bar_nv50.c,v 1.3 2021/12/18 23:45:38 riastradh Exp $	*/
2 
3 /*
4  * Copyright 2012 Red Hat Inc.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Ben Skeggs
25  */
26 #include <sys/cdefs.h>
27 __KERNEL_RCSID(0, "$NetBSD: nouveau_nvkm_subdev_bar_nv50.c,v 1.3 2021/12/18 23:45:38 riastradh Exp $");
28 
29 #include "nv50.h"
30 
31 #include <core/gpuobj.h>
32 #include <subdev/fb.h>
33 #include <subdev/mmu.h>
34 #include <subdev/timer.h>
35 
36 static void
nv50_bar_flush(struct nvkm_bar * base)37 nv50_bar_flush(struct nvkm_bar *base)
38 {
39 	struct nv50_bar *bar = nv50_bar(base);
40 	struct nvkm_device *device = bar->base.subdev.device;
41 	unsigned long flags;
42 	spin_lock_irqsave(&bar->base.lock, flags);
43 	nvkm_wr32(device, 0x00330c, 0x00000001);
44 	nvkm_msec(device, 2000,
45 		if (!(nvkm_rd32(device, 0x00330c) & 0x00000002))
46 			break;
47 	);
48 	spin_unlock_irqrestore(&bar->base.lock, flags);
49 }
50 
51 struct nvkm_vmm *
nv50_bar_bar1_vmm(struct nvkm_bar * base)52 nv50_bar_bar1_vmm(struct nvkm_bar *base)
53 {
54 	return nv50_bar(base)->bar1_vmm;
55 }
56 
57 void
nv50_bar_bar1_wait(struct nvkm_bar * base)58 nv50_bar_bar1_wait(struct nvkm_bar *base)
59 {
60 	nvkm_bar_flush(base);
61 }
62 
63 void
nv50_bar_bar1_fini(struct nvkm_bar * bar)64 nv50_bar_bar1_fini(struct nvkm_bar *bar)
65 {
66 	nvkm_wr32(bar->subdev.device, 0x001708, 0x00000000);
67 }
68 
69 void
nv50_bar_bar1_init(struct nvkm_bar * base)70 nv50_bar_bar1_init(struct nvkm_bar *base)
71 {
72 	struct nvkm_device *device = base->subdev.device;
73 	struct nv50_bar *bar = nv50_bar(base);
74 	nvkm_wr32(device, 0x001708, 0x80000000 | bar->bar1->node->offset >> 4);
75 }
76 
77 struct nvkm_vmm *
nv50_bar_bar2_vmm(struct nvkm_bar * base)78 nv50_bar_bar2_vmm(struct nvkm_bar *base)
79 {
80 	return nv50_bar(base)->bar2_vmm;
81 }
82 
83 void
nv50_bar_bar2_fini(struct nvkm_bar * bar)84 nv50_bar_bar2_fini(struct nvkm_bar *bar)
85 {
86 	nvkm_wr32(bar->subdev.device, 0x00170c, 0x00000000);
87 }
88 
89 void
nv50_bar_bar2_init(struct nvkm_bar * base)90 nv50_bar_bar2_init(struct nvkm_bar *base)
91 {
92 	struct nvkm_device *device = base->subdev.device;
93 	struct nv50_bar *bar = nv50_bar(base);
94 	nvkm_wr32(device, 0x001704, 0x00000000 | bar->mem->addr >> 12);
95 	nvkm_wr32(device, 0x001704, 0x40000000 | bar->mem->addr >> 12);
96 	nvkm_wr32(device, 0x00170c, 0x80000000 | bar->bar2->node->offset >> 4);
97 }
98 
99 void
nv50_bar_init(struct nvkm_bar * base)100 nv50_bar_init(struct nvkm_bar *base)
101 {
102 	struct nv50_bar *bar = nv50_bar(base);
103 	struct nvkm_device *device = bar->base.subdev.device;
104 	int i;
105 
106 	for (i = 0; i < 8; i++)
107 		nvkm_wr32(device, 0x001900 + (i * 4), 0x00000000);
108 }
109 
110 int
nv50_bar_oneinit(struct nvkm_bar * base)111 nv50_bar_oneinit(struct nvkm_bar *base)
112 {
113 	struct nv50_bar *bar = nv50_bar(base);
114 	struct nvkm_device *device = bar->base.subdev.device;
115 	static struct lock_class_key bar1_lock;
116 	static struct lock_class_key bar2_lock;
117 	u64 start, limit, size;
118 	int ret;
119 
120 	ret = nvkm_gpuobj_new(device, 0x20000, 0, false, NULL, &bar->mem);
121 	if (ret)
122 		return ret;
123 
124 	ret = nvkm_gpuobj_new(device, bar->pgd_addr, 0, false, bar->mem,
125 			      &bar->pad);
126 	if (ret)
127 		return ret;
128 
129 	ret = nvkm_gpuobj_new(device, 0x4000, 0, false, bar->mem, &bar->pgd);
130 	if (ret)
131 		return ret;
132 
133 	/* BAR2 */
134 	start = 0x0100000000ULL;
135 	size = device->func->resource_size(device, 3);
136 	if (!size)
137 		return -ENOMEM;
138 	limit = start + size;
139 
140 	ret = nvkm_vmm_new(device, start, limit-- - start, NULL, 0,
141 			   &bar2_lock, "bar2", &bar->bar2_vmm);
142 	if (ret)
143 		return ret;
144 
145 	atomic_inc(&bar->bar2_vmm->engref[NVKM_SUBDEV_BAR]);
146 	bar->bar2_vmm->debug = bar->base.subdev.debug;
147 
148 	ret = nvkm_vmm_boot(bar->bar2_vmm);
149 	if (ret)
150 		return ret;
151 
152 	ret = nvkm_vmm_join(bar->bar2_vmm, bar->mem->memory);
153 	if (ret)
154 		return ret;
155 
156 	ret = nvkm_gpuobj_new(device, 24, 16, false, bar->mem, &bar->bar2);
157 	if (ret)
158 		return ret;
159 
160 	nvkm_kmap(bar->bar2);
161 	nvkm_wo32(bar->bar2, 0x00, 0x7fc00000);
162 	nvkm_wo32(bar->bar2, 0x04, lower_32_bits(limit));
163 	nvkm_wo32(bar->bar2, 0x08, lower_32_bits(start));
164 	nvkm_wo32(bar->bar2, 0x0c, upper_32_bits(limit) << 24 |
165 				   upper_32_bits(start));
166 	nvkm_wo32(bar->bar2, 0x10, 0x00000000);
167 	nvkm_wo32(bar->bar2, 0x14, 0x00000000);
168 	nvkm_done(bar->bar2);
169 
170 	bar->base.subdev.oneinit = true;
171 	nvkm_bar_bar2_init(device);
172 
173 	/* BAR1 */
174 	start = 0x0000000000ULL;
175 	size = device->func->resource_size(device, 1);
176 	if (!size)
177 		return -ENOMEM;
178 	limit = start + size;
179 
180 	ret = nvkm_vmm_new(device, start, limit-- - start, NULL, 0,
181 			   &bar1_lock, "bar1", &bar->bar1_vmm);
182 	if (ret)
183 		return ret;
184 
185 	atomic_inc(&bar->bar1_vmm->engref[NVKM_SUBDEV_BAR]);
186 	bar->bar1_vmm->debug = bar->base.subdev.debug;
187 
188 	ret = nvkm_vmm_join(bar->bar1_vmm, bar->mem->memory);
189 	if (ret)
190 		return ret;
191 
192 	ret = nvkm_gpuobj_new(device, 24, 16, false, bar->mem, &bar->bar1);
193 	if (ret)
194 		return ret;
195 
196 	nvkm_kmap(bar->bar1);
197 	nvkm_wo32(bar->bar1, 0x00, 0x7fc00000);
198 	nvkm_wo32(bar->bar1, 0x04, lower_32_bits(limit));
199 	nvkm_wo32(bar->bar1, 0x08, lower_32_bits(start));
200 	nvkm_wo32(bar->bar1, 0x0c, upper_32_bits(limit) << 24 |
201 				   upper_32_bits(start));
202 	nvkm_wo32(bar->bar1, 0x10, 0x00000000);
203 	nvkm_wo32(bar->bar1, 0x14, 0x00000000);
204 	nvkm_done(bar->bar1);
205 	return 0;
206 }
207 
208 void *
nv50_bar_dtor(struct nvkm_bar * base)209 nv50_bar_dtor(struct nvkm_bar *base)
210 {
211 	struct nv50_bar *bar = nv50_bar(base);
212 	if (bar->mem) {
213 		nvkm_gpuobj_del(&bar->bar1);
214 		nvkm_vmm_part(bar->bar1_vmm, bar->mem->memory);
215 		nvkm_vmm_unref(&bar->bar1_vmm);
216 		nvkm_gpuobj_del(&bar->bar2);
217 		nvkm_vmm_part(bar->bar2_vmm, bar->mem->memory);
218 		nvkm_vmm_unref(&bar->bar2_vmm);
219 		nvkm_gpuobj_del(&bar->pgd);
220 		nvkm_gpuobj_del(&bar->pad);
221 		nvkm_gpuobj_del(&bar->mem);
222 	}
223 	return bar;
224 }
225 
226 int
nv50_bar_new_(const struct nvkm_bar_func * func,struct nvkm_device * device,int index,u32 pgd_addr,struct nvkm_bar ** pbar)227 nv50_bar_new_(const struct nvkm_bar_func *func, struct nvkm_device *device,
228 	      int index, u32 pgd_addr, struct nvkm_bar **pbar)
229 {
230 	struct nv50_bar *bar;
231 	if (!(bar = kzalloc(sizeof(*bar), GFP_KERNEL)))
232 		return -ENOMEM;
233 	nvkm_bar_ctor(func, device, index, &bar->base);
234 	bar->pgd_addr = pgd_addr;
235 	*pbar = &bar->base;
236 	return 0;
237 }
238 
239 static const struct nvkm_bar_func
240 nv50_bar_func = {
241 	.dtor = nv50_bar_dtor,
242 	.oneinit = nv50_bar_oneinit,
243 	.init = nv50_bar_init,
244 	.bar1.init = nv50_bar_bar1_init,
245 	.bar1.fini = nv50_bar_bar1_fini,
246 	.bar1.wait = nv50_bar_bar1_wait,
247 	.bar1.vmm = nv50_bar_bar1_vmm,
248 	.bar2.init = nv50_bar_bar2_init,
249 	.bar2.fini = nv50_bar_bar2_fini,
250 	.bar2.wait = nv50_bar_bar1_wait,
251 	.bar2.vmm = nv50_bar_bar2_vmm,
252 	.flush = nv50_bar_flush,
253 };
254 
255 int
nv50_bar_new(struct nvkm_device * device,int index,struct nvkm_bar ** pbar)256 nv50_bar_new(struct nvkm_device *device, int index, struct nvkm_bar **pbar)
257 {
258 	return nv50_bar_new_(&nv50_bar_func, device, index, 0x1400, pbar);
259 }
260