1 /* $NetBSD: nouveau_nvkm_engine_fifo_chan.c,v 1.10 2021/12/18 23:45:35 riastradh Exp $ */
2
3 /*
4 * Copyright 2012 Red Hat Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Ben Skeggs
25 */
26 #include <sys/cdefs.h>
27 __KERNEL_RCSID(0, "$NetBSD: nouveau_nvkm_engine_fifo_chan.c,v 1.10 2021/12/18 23:45:35 riastradh Exp $");
28
29 #include "chan.h"
30
31 #include <core/client.h>
32 #include <core/gpuobj.h>
33 #include <core/oproxy.h>
34 #include <subdev/mmu.h>
35 #include <engine/dma.h>
36
37 struct nvkm_fifo_chan_object {
38 struct nvkm_oproxy oproxy;
39 struct nvkm_fifo_chan *chan;
40 int hash;
41 };
42
43 static int
nvkm_fifo_chan_child_fini(struct nvkm_oproxy * base,bool suspend)44 nvkm_fifo_chan_child_fini(struct nvkm_oproxy *base, bool suspend)
45 {
46 struct nvkm_fifo_chan_object *object =
47 container_of(base, typeof(*object), oproxy);
48 struct nvkm_engine *engine = object->oproxy.object->engine;
49 struct nvkm_fifo_chan *chan = object->chan;
50 struct nvkm_fifo_engn *engn = &chan->engn[engine->subdev.index];
51 const char *name = nvkm_subdev_name[engine->subdev.index];
52 int ret = 0;
53
54 if (--engn->usecount)
55 return 0;
56
57 if (chan->func->engine_fini) {
58 ret = chan->func->engine_fini(chan, engine, suspend);
59 if (ret) {
60 nvif_error(&chan->object,
61 "detach %s failed, %d\n", name, ret);
62 return ret;
63 }
64 }
65
66 if (engn->object) {
67 ret = nvkm_object_fini(engn->object, suspend);
68 if (ret && suspend)
69 return ret;
70 }
71
72 nvif_trace(&chan->object, "detached %s\n", name);
73 return ret;
74 }
75
76 static int
nvkm_fifo_chan_child_init(struct nvkm_oproxy * base)77 nvkm_fifo_chan_child_init(struct nvkm_oproxy *base)
78 {
79 struct nvkm_fifo_chan_object *object =
80 container_of(base, typeof(*object), oproxy);
81 struct nvkm_engine *engine = object->oproxy.object->engine;
82 struct nvkm_fifo_chan *chan = object->chan;
83 struct nvkm_fifo_engn *engn = &chan->engn[engine->subdev.index];
84 const char *name = nvkm_subdev_name[engine->subdev.index];
85 int ret;
86
87 if (engn->usecount++)
88 return 0;
89
90 if (engn->object) {
91 ret = nvkm_object_init(engn->object);
92 if (ret)
93 return ret;
94 }
95
96 if (chan->func->engine_init) {
97 ret = chan->func->engine_init(chan, engine);
98 if (ret) {
99 nvif_error(&chan->object,
100 "attach %s failed, %d\n", name, ret);
101 return ret;
102 }
103 }
104
105 nvif_trace(&chan->object, "attached %s\n", name);
106 return 0;
107 }
108
109 static void
nvkm_fifo_chan_child_del(struct nvkm_oproxy * base)110 nvkm_fifo_chan_child_del(struct nvkm_oproxy *base)
111 {
112 struct nvkm_fifo_chan_object *object =
113 container_of(base, typeof(*object), oproxy);
114 struct nvkm_engine *engine = object->oproxy.base.engine;
115 struct nvkm_fifo_chan *chan = object->chan;
116 struct nvkm_fifo_engn *engn = &chan->engn[engine->subdev.index];
117
118 if (chan->func->object_dtor)
119 chan->func->object_dtor(chan, object->hash);
120
121 if (!--engn->refcount) {
122 if (chan->func->engine_dtor)
123 chan->func->engine_dtor(chan, engine);
124 nvkm_object_del(&engn->object);
125 if (chan->vmm)
126 atomic_dec(&chan->vmm->engref[engine->subdev.index]);
127 }
128 }
129
130 static const struct nvkm_oproxy_func
131 nvkm_fifo_chan_child_func = {
132 .dtor[0] = nvkm_fifo_chan_child_del,
133 .init[0] = nvkm_fifo_chan_child_init,
134 .fini[0] = nvkm_fifo_chan_child_fini,
135 };
136
137 static int
nvkm_fifo_chan_child_new(const struct nvkm_oclass * oclass,void * data,u32 size,struct nvkm_object ** pobject)138 nvkm_fifo_chan_child_new(const struct nvkm_oclass *oclass, void *data, u32 size,
139 struct nvkm_object **pobject)
140 {
141 struct nvkm_engine *engine = oclass->engine;
142 struct nvkm_fifo_chan *chan = nvkm_fifo_chan(oclass->parent);
143 struct nvkm_fifo_engn *engn = &chan->engn[engine->subdev.index];
144 struct nvkm_fifo_chan_object *object;
145 int ret = 0;
146
147 if (!(object = kzalloc(sizeof(*object), GFP_KERNEL)))
148 return -ENOMEM;
149 nvkm_oproxy_ctor(&nvkm_fifo_chan_child_func, oclass, &object->oproxy);
150 object->chan = chan;
151 *pobject = &object->oproxy.base;
152
153 if (!engn->refcount++) {
154 struct nvkm_oclass cclass = {
155 .client = oclass->client,
156 .engine = oclass->engine,
157 };
158
159 if (chan->vmm)
160 atomic_inc(&chan->vmm->engref[engine->subdev.index]);
161
162 if (engine->func->fifo.cclass) {
163 ret = engine->func->fifo.cclass(chan, &cclass,
164 &engn->object);
165 } else
166 if (engine->func->cclass) {
167 ret = nvkm_object_new_(engine->func->cclass, &cclass,
168 NULL, 0, &engn->object);
169 }
170 if (ret)
171 return ret;
172
173 if (chan->func->engine_ctor) {
174 ret = chan->func->engine_ctor(chan, oclass->engine,
175 engn->object);
176 if (ret)
177 return ret;
178 }
179 }
180
181 ret = oclass->base.ctor(&(const struct nvkm_oclass) {
182 .base = oclass->base,
183 .engn = oclass->engn,
184 .handle = oclass->handle,
185 .object = oclass->object,
186 .client = oclass->client,
187 .parent = engn->object ?
188 engn->object :
189 oclass->parent,
190 .engine = engine,
191 }, data, size, &object->oproxy.object);
192 if (ret)
193 return ret;
194
195 if (chan->func->object_ctor) {
196 object->hash =
197 chan->func->object_ctor(chan, object->oproxy.object);
198 if (object->hash < 0)
199 return object->hash;
200 }
201
202 return 0;
203 }
204
205 static int
nvkm_fifo_chan_child_get(struct nvkm_object * object,int index,struct nvkm_oclass * oclass)206 nvkm_fifo_chan_child_get(struct nvkm_object *object, int index,
207 struct nvkm_oclass *oclass)
208 {
209 struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
210 struct nvkm_fifo *fifo = chan->fifo;
211 struct nvkm_device *device = fifo->engine.subdev.device;
212 struct nvkm_engine *engine;
213 u64 mask = chan->engines;
214 int ret, i, c;
215
216 for (; c = 0, mask && (i = __ffs64(mask), 1); mask &= ~(1ULL << i)) {
217 if (!(engine = nvkm_device_engine(device, i)))
218 continue;
219 oclass->engine = engine;
220 oclass->base.oclass = 0;
221
222 if (engine->func->fifo.sclass) {
223 ret = engine->func->fifo.sclass(oclass, index);
224 if (oclass->base.oclass) {
225 if (!oclass->base.ctor)
226 oclass->base.ctor = nvkm_object_new;
227 oclass->ctor = nvkm_fifo_chan_child_new;
228 return 0;
229 }
230
231 index -= ret;
232 continue;
233 }
234
235 while (engine->func->sclass[c].oclass) {
236 if (c++ == index) {
237 oclass->base = engine->func->sclass[index];
238 if (!oclass->base.ctor)
239 oclass->base.ctor = nvkm_object_new;
240 oclass->ctor = nvkm_fifo_chan_child_new;
241 return 0;
242 }
243 }
244 index -= c;
245 }
246
247 return -EINVAL;
248 }
249
250 static int
nvkm_fifo_chan_ntfy(struct nvkm_object * object,u32 type,struct nvkm_event ** pevent)251 nvkm_fifo_chan_ntfy(struct nvkm_object *object, u32 type,
252 struct nvkm_event **pevent)
253 {
254 struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
255 if (chan->func->ntfy)
256 return chan->func->ntfy(chan, type, pevent);
257 return -ENODEV;
258 }
259
260 static int
261 #ifdef __NetBSD__
nvkm_fifo_chan_map(struct nvkm_object * object,void * argv,u32 argc,enum nvkm_object_map * type,bus_space_tag_t * tagp,u64 * addr,u64 * size)262 nvkm_fifo_chan_map(struct nvkm_object *object, void *argv, u32 argc,
263 enum nvkm_object_map *type, bus_space_tag_t *tagp,
264 u64 *addr, u64 *size)
265 #else
266 nvkm_fifo_chan_map(struct nvkm_object *object, void *argv, u32 argc,
267 enum nvkm_object_map *type, u64 *addr, u64 *size)
268 #endif
269 {
270 struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
271 #ifdef __NetBSD__
272 /* XXX Uh oh. Can't map this more than once. OK? */
273 *tagp = chan->bst;
274 #endif
275 *type = NVKM_OBJECT_MAP_IO;
276 *addr = chan->addr;
277 *size = chan->size;
278 return 0;
279 }
280
281 #ifdef __NetBSD__
282 static int
nvkm_fifo_chan_ensure_mapped(struct nvkm_fifo_chan * chan)283 nvkm_fifo_chan_ensure_mapped(struct nvkm_fifo_chan *chan)
284 {
285 int ret;
286
287 if (likely(chan->mapped))
288 goto out;
289
290 /* XXX errno NetBSD->Linux */
291 ret = -bus_space_map(chan->bst, chan->addr, chan->size, 0,
292 &chan->bsh);
293 if (ret)
294 return ret;
295 chan->mapped = true;
296
297 out: KASSERT(chan->mapped);
298 return 0;
299 }
300 #endif
301
302 static int
nvkm_fifo_chan_rd32(struct nvkm_object * object,u64 addr,u32 * data)303 nvkm_fifo_chan_rd32(struct nvkm_object *object, u64 addr, u32 *data)
304 {
305 struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
306 #ifdef __NetBSD__
307 int ret = nvkm_fifo_chan_ensure_mapped(chan);
308 if (ret)
309 return ret;
310 #else
311 if (unlikely(!chan->user)) {
312 chan->user = ioremap(chan->addr, chan->size);
313 if (!chan->user)
314 return -ENOMEM;
315 }
316 #endif
317 if (unlikely(addr + 4 > chan->size))
318 return -EINVAL;
319 #ifdef __NetBSD__
320 *data = bus_space_read_stream_4(chan->bst, chan->bsh, addr);
321 #else
322 *data = ioread32_native(chan->user + addr);
323 #endif
324 return 0;
325 }
326
327 static int
nvkm_fifo_chan_wr32(struct nvkm_object * object,u64 addr,u32 data)328 nvkm_fifo_chan_wr32(struct nvkm_object *object, u64 addr, u32 data)
329 {
330 struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
331 #ifdef __NetBSD__
332 int ret = nvkm_fifo_chan_ensure_mapped(chan);
333 if (ret)
334 return ret;
335 #else
336 if (unlikely(!chan->user)) {
337 chan->user = ioremap(chan->addr, chan->size);
338 if (!chan->user)
339 return -ENOMEM;
340 }
341 #endif
342 if (unlikely(addr + 4 > chan->size))
343 return -EINVAL;
344 #ifdef __NetBSD__
345 bus_space_write_stream_4(chan->bst, chan->bsh, addr, data);
346 #else
347 iowrite32_native(data, chan->user + addr);
348 #endif
349 return 0;
350 }
351
352 static int
nvkm_fifo_chan_fini(struct nvkm_object * object,bool suspend)353 nvkm_fifo_chan_fini(struct nvkm_object *object, bool suspend)
354 {
355 struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
356 chan->func->fini(chan);
357 return 0;
358 }
359
360 static int
nvkm_fifo_chan_init(struct nvkm_object * object)361 nvkm_fifo_chan_init(struct nvkm_object *object)
362 {
363 struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
364 chan->func->init(chan);
365 return 0;
366 }
367
368 static void *
nvkm_fifo_chan_dtor(struct nvkm_object * object)369 nvkm_fifo_chan_dtor(struct nvkm_object *object)
370 {
371 struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
372 struct nvkm_fifo *fifo = chan->fifo;
373 void *data = chan->func->dtor(chan);
374 unsigned long flags;
375
376 spin_lock_irqsave(&fifo->lock, flags);
377 if (!list_empty(&chan->head)) {
378 __clear_bit(chan->chid, fifo->mask);
379 list_del(&chan->head);
380 }
381 spin_unlock_irqrestore(&fifo->lock, flags);
382
383 #ifdef __NetBSD__
384 if (!chan->subregion && chan->mapped) {
385 bus_space_unmap(chan->bst, chan->bsh, chan->size);
386 chan->mapped = false;
387 }
388 #else
389 if (chan->user)
390 iounmap(chan->user);
391 #endif
392
393 if (chan->vmm) {
394 nvkm_vmm_part(chan->vmm, chan->inst->memory);
395 nvkm_vmm_unref(&chan->vmm);
396 }
397
398 nvkm_gpuobj_del(&chan->push);
399 nvkm_gpuobj_del(&chan->inst);
400 return data;
401 }
402
403 static const struct nvkm_object_func
404 nvkm_fifo_chan_func = {
405 .dtor = nvkm_fifo_chan_dtor,
406 .init = nvkm_fifo_chan_init,
407 .fini = nvkm_fifo_chan_fini,
408 .ntfy = nvkm_fifo_chan_ntfy,
409 .map = nvkm_fifo_chan_map,
410 .rd32 = nvkm_fifo_chan_rd32,
411 .wr32 = nvkm_fifo_chan_wr32,
412 .sclass = nvkm_fifo_chan_child_get,
413 };
414
415 int
nvkm_fifo_chan_ctor(const struct nvkm_fifo_chan_func * func,struct nvkm_fifo * fifo,u32 size,u32 align,bool zero,u64 hvmm,u64 push,u64 engines,int bar,u32 base,u32 user,const struct nvkm_oclass * oclass,struct nvkm_fifo_chan * chan)416 nvkm_fifo_chan_ctor(const struct nvkm_fifo_chan_func *func,
417 struct nvkm_fifo *fifo, u32 size, u32 align, bool zero,
418 u64 hvmm, u64 push, u64 engines, int bar, u32 base,
419 u32 user, const struct nvkm_oclass *oclass,
420 struct nvkm_fifo_chan *chan)
421 {
422 struct nvkm_client *client = oclass->client;
423 struct nvkm_device *device = fifo->engine.subdev.device;
424 struct nvkm_dmaobj *dmaobj;
425 unsigned long flags;
426 int ret;
427
428 nvkm_object_ctor(&nvkm_fifo_chan_func, oclass, &chan->object);
429 chan->func = func;
430 chan->fifo = fifo;
431 chan->engines = engines;
432 INIT_LIST_HEAD(&chan->head);
433
434 /* instance memory */
435 ret = nvkm_gpuobj_new(device, size, align, zero, NULL, &chan->inst);
436 if (ret)
437 return ret;
438
439 /* allocate push buffer ctxdma instance */
440 if (push) {
441 dmaobj = nvkm_dmaobj_search(client, push);
442 if (IS_ERR(dmaobj))
443 return PTR_ERR(dmaobj);
444
445 ret = nvkm_object_bind(&dmaobj->object, chan->inst, -16,
446 &chan->push);
447 if (ret)
448 return ret;
449 }
450
451 /* channel address space */
452 if (hvmm) {
453 struct nvkm_vmm *vmm = nvkm_uvmm_search(client, hvmm);
454 if (IS_ERR(vmm))
455 return PTR_ERR(vmm);
456
457 if (vmm->mmu != device->mmu)
458 return -EINVAL;
459
460 ret = nvkm_vmm_join(vmm, chan->inst->memory);
461 if (ret)
462 return ret;
463
464 chan->vmm = nvkm_vmm_ref(vmm);
465 }
466
467 /* allocate channel id */
468 spin_lock_irqsave(&fifo->lock, flags);
469 chan->chid = find_first_zero_bit(fifo->mask, NVKM_FIFO_CHID_NR);
470 if (chan->chid >= NVKM_FIFO_CHID_NR) {
471 spin_unlock_irqrestore(&fifo->lock, flags);
472 return -ENOSPC;
473 }
474 list_add(&chan->head, &fifo->chan);
475 __set_bit(chan->chid, fifo->mask);
476 spin_unlock_irqrestore(&fifo->lock, flags);
477
478 /* determine address of this channel's user registers */
479 chan->addr = device->func->resource_addr(device, bar) +
480 base + user * chan->chid;
481 chan->size = user;
482 #ifdef __NetBSD__
483 if (bar == 0) {
484 /*
485 * We already map BAR 0 in the engine device base, so
486 * grab a subregion of that.
487 */
488 bus_space_tag_t mmiot = device->mmiot;
489 bus_space_handle_t mmioh = device->mmioh;
490 bus_size_t mmiosz = device->mmiosz;
491 __diagused bus_addr_t mmioaddr =
492 device->func->resource_addr(device, bar);
493
494 /* Check whether it lies inside the region. */
495 if (mmiosz < base ||
496 mmiosz - base < user * chan->chid ||
497 mmiosz - base - user * chan->chid < user) {
498 nvif_error(&chan->object, "fifo channel out of range:"
499 " base 0x%jx chid 0x%jx user 0x%jx mmiosz 0x%jx\n",
500 (uintmax_t)base,
501 (uintmax_t)chan->chid, (uintmax_t)user,
502 (uintmax_t)mmiosz);
503 return -EIO;
504 }
505 KASSERT(mmioaddr <= chan->addr);
506 KASSERT(base + user * chan->chid <= mmiosz - user);
507 KASSERT(chan->addr <= mmioaddr + (mmiosz - user));
508 KASSERT(chan->addr - mmioaddr == base + user * chan->chid);
509 /* XXX errno NetBSD->Linux */
510 ret = -bus_space_subregion(mmiot, mmioh,
511 base + user * chan->chid, user, &chan->bsh);
512 if (ret) {
513 nvif_error(&chan->object, "bus_space_subregion failed:"
514 " %d\n", ret);
515 return ret;
516 }
517 chan->bst = mmiot;
518 chan->mapped = true;
519 chan->subregion = true;
520 } else {
521 /* XXX Why does nouveau map this lazily? */
522 chan->bst = device->func->resource_tag(device, bar);
523 chan->mapped = false;
524 chan->subregion = false;
525 }
526 #endif
527
528 nvkm_fifo_cevent(fifo);
529 return 0;
530 }
531