1 /* $NetBSD: nouveau_nvkm_subdev_instmem_nv50.c,v 1.7 2021/12/19 10:51:58 riastradh Exp $ */
2
3 /*
4 * Copyright 2012 Red Hat Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Ben Skeggs
25 */
26 #include <sys/cdefs.h>
27 __KERNEL_RCSID(0, "$NetBSD: nouveau_nvkm_subdev_instmem_nv50.c,v 1.7 2021/12/19 10:51:58 riastradh Exp $");
28
29 #define nv50_instmem(p) container_of((p), struct nv50_instmem, base)
30 #include "priv.h"
31
32 #include <core/memory.h>
33 #include <subdev/bar.h>
34 #include <subdev/fb.h>
35 #include <subdev/mmu.h>
36
37 #ifdef __NetBSD__
38 # define __iomem __nvkm_memory_iomem
39 #endif
40
41 struct nv50_instmem {
42 struct nvkm_instmem base;
43 u64 addr;
44
45 /* Mappings that can be evicted when BAR2 space has been exhausted. */
46 struct list_head lru;
47 };
48
49 /******************************************************************************
50 * instmem object implementation
51 *****************************************************************************/
52 #define nv50_instobj(p) container_of((p), struct nv50_instobj, base.memory)
53
54 struct nv50_instobj {
55 struct nvkm_instobj base;
56 struct nv50_instmem *imem;
57 struct nvkm_memory *ram;
58 struct nvkm_vma *bar;
59 #ifdef __NetBSD__
60 bus_space_tag_t bst;
61 bus_space_handle_t bsh;
62 #endif
63 refcount_t maps;
64 void *map;
65 struct list_head lru;
66 };
67
68 static void
nv50_instobj_wr32_slow(struct nvkm_memory * memory,u64 offset,u32 data)69 nv50_instobj_wr32_slow(struct nvkm_memory *memory, u64 offset, u32 data)
70 {
71 struct nv50_instobj *iobj = nv50_instobj(memory);
72 struct nv50_instmem *imem = iobj->imem;
73 struct nvkm_device *device = imem->base.subdev.device;
74 u64 base = (nvkm_memory_addr(iobj->ram) + offset) & 0xffffff00000ULL;
75 u64 addr = (nvkm_memory_addr(iobj->ram) + offset) & 0x000000fffffULL;
76 unsigned long flags;
77
78 spin_lock_irqsave(&imem->base.lock, flags);
79 if (unlikely(imem->addr != base)) {
80 nvkm_wr32(device, 0x001700, base >> 16);
81 imem->addr = base;
82 }
83 nvkm_wr32(device, 0x700000 + addr, data);
84 spin_unlock_irqrestore(&imem->base.lock, flags);
85 }
86
87 static u32
nv50_instobj_rd32_slow(struct nvkm_memory * memory,u64 offset)88 nv50_instobj_rd32_slow(struct nvkm_memory *memory, u64 offset)
89 {
90 struct nv50_instobj *iobj = nv50_instobj(memory);
91 struct nv50_instmem *imem = iobj->imem;
92 struct nvkm_device *device = imem->base.subdev.device;
93 u64 base = (nvkm_memory_addr(iobj->ram) + offset) & 0xffffff00000ULL;
94 u64 addr = (nvkm_memory_addr(iobj->ram) + offset) & 0x000000fffffULL;
95 u32 data;
96 unsigned long flags;
97
98 spin_lock_irqsave(&imem->base.lock, flags);
99 if (unlikely(imem->addr != base)) {
100 nvkm_wr32(device, 0x001700, base >> 16);
101 imem->addr = base;
102 }
103 data = nvkm_rd32(device, 0x700000 + addr);
104 spin_unlock_irqrestore(&imem->base.lock, flags);
105 return data;
106 }
107
108 static const struct nvkm_memory_ptrs
109 nv50_instobj_slow = {
110 .rd32 = nv50_instobj_rd32_slow,
111 .wr32 = nv50_instobj_wr32_slow,
112 };
113
114 static void
nv50_instobj_wr32(struct nvkm_memory * memory,u64 offset,u32 data)115 nv50_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
116 {
117 #ifdef __NetBSD__
118 struct nv50_instobj *iobj = nv50_instobj(memory);
119 bus_space_write_stream_4(iobj->bst, iobj->bsh, offset, data);
120 #else
121 iowrite32_native(data, nv50_instobj(memory)->map + offset);
122 #endif
123 }
124
125 static u32
nv50_instobj_rd32(struct nvkm_memory * memory,u64 offset)126 nv50_instobj_rd32(struct nvkm_memory *memory, u64 offset)
127 {
128 #ifdef __NetBSD__
129 struct nv50_instobj *iobj = nv50_instobj(memory);
130 return bus_space_read_stream_4(iobj->bst, iobj->bsh, offset);
131 #else
132 return ioread32_native(nv50_instobj(memory)->map + offset);
133 #endif
134 }
135
136 static const struct nvkm_memory_ptrs
137 nv50_instobj_fast = {
138 .rd32 = nv50_instobj_rd32,
139 .wr32 = nv50_instobj_wr32,
140 };
141
142 static void
nv50_instobj_kmap(struct nv50_instobj * iobj,struct nvkm_vmm * vmm)143 nv50_instobj_kmap(struct nv50_instobj *iobj, struct nvkm_vmm *vmm)
144 {
145 struct nv50_instmem *imem = iobj->imem;
146 struct nv50_instobj *eobj;
147 struct nvkm_memory *memory = &iobj->base.memory;
148 struct nvkm_subdev *subdev = &imem->base.subdev;
149 struct nvkm_device *device = subdev->device;
150 struct nvkm_vma *bar = NULL, *ebar;
151 u64 size = nvkm_memory_size(memory);
152 void *emap;
153 #ifdef __NetBSD__
154 bus_space_tag_t ebst;
155 bus_space_handle_t ebsh;
156 bus_size_t esize;
157 #endif
158 int ret;
159
160 /* Attempt to allocate BAR2 address-space and map the object
161 * into it. The lock has to be dropped while doing this due
162 * to the possibility of recursion for page table allocation.
163 */
164 mutex_unlock(&subdev->mutex);
165 while ((ret = nvkm_vmm_get(vmm, 12, size, &bar))) {
166 /* Evict unused mappings, and keep retrying until we either
167 * succeed,or there's no more objects left on the LRU.
168 */
169 mutex_lock(&subdev->mutex);
170 eobj = list_first_entry_or_null(&imem->lru, typeof(*eobj), lru);
171 if (eobj) {
172 nvkm_debug(subdev, "evict %016"PRIx64" %016"PRIx64" @ %016"PRIx64"\n",
173 nvkm_memory_addr(&eobj->base.memory),
174 nvkm_memory_size(&eobj->base.memory),
175 eobj->bar->addr);
176 list_del_init(&eobj->lru);
177 ebar = eobj->bar;
178 eobj->bar = NULL;
179 emap = eobj->map;
180 eobj->map = NULL;
181 #ifdef __NetBSD__
182 ebst = eobj->bst;
183 ebsh = eobj->bsh;
184 esize = nvkm_memory_size(&eobj->base.memory);
185 #endif
186 }
187 mutex_unlock(&subdev->mutex);
188 if (!eobj)
189 break;
190 #ifdef __NetBSD__
191 __USE(emap);
192 bus_space_unmap(ebst, ebsh, esize);
193 #else
194 iounmap(emap);
195 #endif
196 nvkm_vmm_put(vmm, &ebar);
197 }
198
199 if (ret == 0)
200 ret = nvkm_memory_map(memory, 0, vmm, bar, NULL, 0);
201 mutex_lock(&subdev->mutex);
202 if (ret || iobj->bar) {
203 /* We either failed, or another thread beat us. */
204 mutex_unlock(&subdev->mutex);
205 nvkm_vmm_put(vmm, &bar);
206 mutex_lock(&subdev->mutex);
207 return;
208 }
209
210 /* Make the mapping visible to the host. */
211 iobj->bar = bar;
212 #ifdef __NetBSD__
213 iobj->bst = device->func->resource_tag(device, 3);
214 if (bus_space_map(iobj->bst,
215 device->func->resource_addr(device, 3) + (u32)iobj->bar->addr,
216 size, BUS_SPACE_MAP_PREFETCHABLE|BUS_SPACE_MAP_LINEAR,
217 &iobj->bsh)) {
218 nvkm_warn(subdev, "PRAMIN ioremap failed\n");
219 nvkm_vmm_put(vmm, &iobj->bar);
220 }
221 iobj->map = bus_space_vaddr(iobj->bst, iobj->bsh);
222 #else
223 iobj->map = ioremap_wc(device->func->resource_addr(device, 3) +
224 (u32)iobj->bar->addr, size);
225 if (!iobj->map) {
226 nvkm_warn(subdev, "PRAMIN ioremap failed\n");
227 nvkm_vmm_put(vmm, &iobj->bar);
228 }
229 #endif
230 }
231
232 static int
nv50_instobj_map(struct nvkm_memory * memory,u64 offset,struct nvkm_vmm * vmm,struct nvkm_vma * vma,void * argv,u32 argc)233 nv50_instobj_map(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm,
234 struct nvkm_vma *vma, void *argv, u32 argc)
235 {
236 memory = nv50_instobj(memory)->ram;
237 return nvkm_memory_map(memory, offset, vmm, vma, argv, argc);
238 }
239
240 static void
nv50_instobj_release(struct nvkm_memory * memory)241 nv50_instobj_release(struct nvkm_memory *memory)
242 {
243 struct nv50_instobj *iobj = nv50_instobj(memory);
244 struct nv50_instmem *imem = iobj->imem;
245 struct nvkm_subdev *subdev = &imem->base.subdev;
246
247 wmb();
248 nvkm_bar_flush(subdev->device->bar);
249
250 if (refcount_dec_and_mutex_lock(&iobj->maps, &subdev->mutex)) {
251 /* Add the now-unused mapping to the LRU instead of directly
252 * unmapping it here, in case we need to map it again later.
253 */
254 if (likely(iobj->lru.next) && iobj->map) {
255 BUG_ON(!list_empty(&iobj->lru));
256 list_add_tail(&iobj->lru, &imem->lru);
257 }
258
259 /* Switch back to NULL accessors when last map is gone. */
260 iobj->base.memory.ptrs = NULL;
261 mutex_unlock(&subdev->mutex);
262 }
263 }
264
265 static void __iomem *
nv50_instobj_acquire(struct nvkm_memory * memory)266 nv50_instobj_acquire(struct nvkm_memory *memory)
267 {
268 struct nv50_instobj *iobj = nv50_instobj(memory);
269 struct nvkm_instmem *imem = &iobj->imem->base;
270 struct nvkm_vmm *vmm;
271 void __iomem *map = NULL;
272
273 /* Already mapped? */
274 if (refcount_inc_not_zero(&iobj->maps))
275 return iobj->map;
276
277 /* Take the lock, and re-check that another thread hasn't
278 * already mapped the object in the meantime.
279 */
280 mutex_lock(&imem->subdev.mutex);
281 if (refcount_inc_not_zero(&iobj->maps)) {
282 mutex_unlock(&imem->subdev.mutex);
283 return iobj->map;
284 }
285
286 /* Attempt to get a direct CPU mapping of the object. */
287 if ((vmm = nvkm_bar_bar2_vmm(imem->subdev.device))) {
288 if (!iobj->map)
289 nv50_instobj_kmap(iobj, vmm);
290 map = iobj->map;
291 }
292
293 if (!refcount_inc_not_zero(&iobj->maps)) {
294 /* Exclude object from eviction while it's being accessed. */
295 if (likely(iobj->lru.next))
296 list_del_init(&iobj->lru);
297
298 if (map)
299 iobj->base.memory.ptrs = &nv50_instobj_fast;
300 else
301 iobj->base.memory.ptrs = &nv50_instobj_slow;
302 refcount_set(&iobj->maps, 1);
303 }
304
305 mutex_unlock(&imem->subdev.mutex);
306 return map;
307 }
308
309 static void
nv50_instobj_boot(struct nvkm_memory * memory,struct nvkm_vmm * vmm)310 nv50_instobj_boot(struct nvkm_memory *memory, struct nvkm_vmm *vmm)
311 {
312 struct nv50_instobj *iobj = nv50_instobj(memory);
313 struct nvkm_instmem *imem = &iobj->imem->base;
314
315 /* Exclude bootstrapped objects (ie. the page tables for the
316 * instmem BAR itself) from eviction.
317 */
318 mutex_lock(&imem->subdev.mutex);
319 if (likely(iobj->lru.next)) {
320 list_del_init(&iobj->lru);
321 iobj->lru.next = NULL;
322 }
323
324 nv50_instobj_kmap(iobj, vmm);
325 nvkm_instmem_boot(imem);
326 mutex_unlock(&imem->subdev.mutex);
327 }
328
329 static u64
nv50_instobj_size(struct nvkm_memory * memory)330 nv50_instobj_size(struct nvkm_memory *memory)
331 {
332 return nvkm_memory_size(nv50_instobj(memory)->ram);
333 }
334
335 static u64
nv50_instobj_addr(struct nvkm_memory * memory)336 nv50_instobj_addr(struct nvkm_memory *memory)
337 {
338 return nvkm_memory_addr(nv50_instobj(memory)->ram);
339 }
340
341 static u64
nv50_instobj_bar2(struct nvkm_memory * memory)342 nv50_instobj_bar2(struct nvkm_memory *memory)
343 {
344 struct nv50_instobj *iobj = nv50_instobj(memory);
345 u64 addr = ~0ULL;
346 if (nv50_instobj_acquire(&iobj->base.memory)) {
347 iobj->lru.next = NULL; /* Exclude from eviction. */
348 addr = iobj->bar->addr;
349 }
350 nv50_instobj_release(&iobj->base.memory);
351 return addr;
352 }
353
354 static enum nvkm_memory_target
nv50_instobj_target(struct nvkm_memory * memory)355 nv50_instobj_target(struct nvkm_memory *memory)
356 {
357 return nvkm_memory_target(nv50_instobj(memory)->ram);
358 }
359
360 static void *
nv50_instobj_dtor(struct nvkm_memory * memory)361 nv50_instobj_dtor(struct nvkm_memory *memory)
362 {
363 struct nv50_instobj *iobj = nv50_instobj(memory);
364 struct nvkm_instmem *imem = &iobj->imem->base;
365 struct nvkm_vma *bar;
366 void *map = map;
367
368 mutex_lock(&imem->subdev.mutex);
369 if (likely(iobj->lru.next))
370 list_del(&iobj->lru);
371 map = iobj->map;
372 bar = iobj->bar;
373 mutex_unlock(&imem->subdev.mutex);
374
375 if (map) {
376 struct nvkm_vmm *vmm = nvkm_bar_bar2_vmm(imem->subdev.device);
377 #ifdef __NetBSD__
378 bus_space_unmap(iobj->bst, iobj->bsh,
379 nvkm_memory_size(&iobj->base.memory));
380 iobj->map = NULL;
381 #else
382 iounmap(map);
383 #endif
384 if (likely(vmm)) /* Can be NULL during BAR destructor. */
385 nvkm_vmm_put(vmm, &bar);
386 }
387
388 nvkm_memory_unref(&iobj->ram);
389 nvkm_instobj_dtor(imem, &iobj->base);
390 return iobj;
391 }
392
393 static const struct nvkm_memory_func
394 nv50_instobj_func = {
395 .dtor = nv50_instobj_dtor,
396 .target = nv50_instobj_target,
397 .bar2 = nv50_instobj_bar2,
398 .addr = nv50_instobj_addr,
399 .size = nv50_instobj_size,
400 .boot = nv50_instobj_boot,
401 .acquire = nv50_instobj_acquire,
402 .release = nv50_instobj_release,
403 .map = nv50_instobj_map,
404 };
405
406 static int
nv50_instobj_new(struct nvkm_instmem * base,u32 size,u32 align,bool zero,struct nvkm_memory ** pmemory)407 nv50_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
408 struct nvkm_memory **pmemory)
409 {
410 struct nv50_instmem *imem = nv50_instmem(base);
411 struct nv50_instobj *iobj;
412 struct nvkm_device *device = imem->base.subdev.device;
413 u8 page = max(order_base_2(align), 12);
414
415 if (!(iobj = kzalloc(sizeof(*iobj), GFP_KERNEL)))
416 return -ENOMEM;
417 *pmemory = &iobj->base.memory;
418
419 nvkm_instobj_ctor(&nv50_instobj_func, &imem->base, &iobj->base);
420 iobj->imem = imem;
421 refcount_set(&iobj->maps, 0);
422 INIT_LIST_HEAD(&iobj->lru);
423
424 return nvkm_ram_get(device, 0, 1, page, size, true, true, &iobj->ram);
425 }
426
427 /******************************************************************************
428 * instmem subdev implementation
429 *****************************************************************************/
430
431 static void
nv50_instmem_fini(struct nvkm_instmem * base)432 nv50_instmem_fini(struct nvkm_instmem *base)
433 {
434 nv50_instmem(base)->addr = ~0ULL;
435 }
436
437 static const struct nvkm_instmem_func
438 nv50_instmem = {
439 .fini = nv50_instmem_fini,
440 .memory_new = nv50_instobj_new,
441 .zero = false,
442 };
443
444 int
nv50_instmem_new(struct nvkm_device * device,int index,struct nvkm_instmem ** pimem)445 nv50_instmem_new(struct nvkm_device *device, int index,
446 struct nvkm_instmem **pimem)
447 {
448 struct nv50_instmem *imem;
449
450 if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
451 return -ENOMEM;
452 nvkm_instmem_ctor(&nv50_instmem, device, index, &imem->base);
453 INIT_LIST_HEAD(&imem->lru);
454 *pimem = &imem->base;
455 return 0;
456 }
457