xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/nouveau/nvkm/engine/dma/nouveau_nvkm_engine_dma_user.c (revision 41ec02673d281bbb3d38e6c78504ce6e30c228c1)
1 /*	$NetBSD: nouveau_nvkm_engine_dma_user.c,v 1.4 2021/12/18 23:45:35 riastradh Exp $	*/
2 
3 /*
4  * Copyright 2012 Red Hat Inc.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Ben Skeggs
25  */
26 #include <sys/cdefs.h>
27 __KERNEL_RCSID(0, "$NetBSD: nouveau_nvkm_engine_dma_user.c,v 1.4 2021/12/18 23:45:35 riastradh Exp $");
28 
29 #include "user.h"
30 
31 #include <core/client.h>
32 #include <core/gpuobj.h>
33 #include <subdev/fb.h>
34 #include <subdev/instmem.h>
35 
36 #include <nvif/cl0002.h>
37 #include <nvif/unpack.h>
38 
39 static const struct nvkm_object_func nvkm_dmaobj_func;
40 struct nvkm_dmaobj *
nvkm_dmaobj_search(struct nvkm_client * client,u64 handle)41 nvkm_dmaobj_search(struct nvkm_client *client, u64 handle)
42 {
43 	struct nvkm_object *object;
44 
45 	object = nvkm_object_search(client, handle, &nvkm_dmaobj_func);
46 	if (IS_ERR(object))
47 		return (void *)object;
48 
49 	return nvkm_dmaobj(object);
50 }
51 
52 static int
nvkm_dmaobj_bind(struct nvkm_object * base,struct nvkm_gpuobj * gpuobj,int align,struct nvkm_gpuobj ** pgpuobj)53 nvkm_dmaobj_bind(struct nvkm_object *base, struct nvkm_gpuobj *gpuobj,
54 		 int align, struct nvkm_gpuobj **pgpuobj)
55 {
56 	struct nvkm_dmaobj *dmaobj = nvkm_dmaobj(base);
57 	return dmaobj->func->bind(dmaobj, gpuobj, align, pgpuobj);
58 }
59 
60 static void *
nvkm_dmaobj_dtor(struct nvkm_object * base)61 nvkm_dmaobj_dtor(struct nvkm_object *base)
62 {
63 	return nvkm_dmaobj(base);
64 }
65 
66 static const struct nvkm_object_func
67 nvkm_dmaobj_func = {
68 	.dtor = nvkm_dmaobj_dtor,
69 	.bind = nvkm_dmaobj_bind,
70 };
71 
72 int
nvkm_dmaobj_ctor(const struct nvkm_dmaobj_func * func,struct nvkm_dma * dma,const struct nvkm_oclass * oclass,void ** pdata,u32 * psize,struct nvkm_dmaobj * dmaobj)73 nvkm_dmaobj_ctor(const struct nvkm_dmaobj_func *func, struct nvkm_dma *dma,
74 		 const struct nvkm_oclass *oclass, void **pdata, u32 *psize,
75 		 struct nvkm_dmaobj *dmaobj)
76 {
77 	union {
78 		struct nv_dma_v0 v0;
79 	} *args = *pdata;
80 	struct nvkm_device *device = dma->engine.subdev.device;
81 	struct nvkm_client *client = oclass->client;
82 	struct nvkm_object *parent = oclass->parent;
83 	struct nvkm_instmem *instmem = device->imem;
84 	struct nvkm_fb *fb = device->fb;
85 	void *data = *pdata;
86 	u32 size = *psize;
87 	int ret = -ENOSYS;
88 
89 	nvkm_object_ctor(&nvkm_dmaobj_func, oclass, &dmaobj->object);
90 	dmaobj->func = func;
91 	dmaobj->dma = dma;
92 
93 	nvif_ioctl(parent, "create dma size %d\n", *psize);
94 	if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
95 		nvif_ioctl(parent, "create dma vers %d target %d access %d "
96 				   "start %016"PRIx64" limit %016"PRIx64"\n",
97 			   args->v0.version, args->v0.target, args->v0.access,
98 			   args->v0.start, args->v0.limit);
99 		dmaobj->target = args->v0.target;
100 		dmaobj->access = args->v0.access;
101 		dmaobj->start  = args->v0.start;
102 		dmaobj->limit  = args->v0.limit;
103 	} else
104 		return ret;
105 
106 	*pdata = data;
107 	*psize = size;
108 
109 	if (dmaobj->start > dmaobj->limit)
110 		return -EINVAL;
111 
112 	switch (dmaobj->target) {
113 	case NV_DMA_V0_TARGET_VM:
114 		dmaobj->target = NV_MEM_TARGET_VM;
115 		break;
116 	case NV_DMA_V0_TARGET_VRAM:
117 		if (!client->super) {
118 			if (dmaobj->limit >= fb->ram->size - instmem->reserved)
119 				return -EACCES;
120 			if (device->card_type >= NV_50)
121 				return -EACCES;
122 		}
123 		dmaobj->target = NV_MEM_TARGET_VRAM;
124 		break;
125 	case NV_DMA_V0_TARGET_PCI:
126 		if (!client->super)
127 			return -EACCES;
128 		dmaobj->target = NV_MEM_TARGET_PCI;
129 		break;
130 	case NV_DMA_V0_TARGET_PCI_US:
131 	case NV_DMA_V0_TARGET_AGP:
132 		if (!client->super)
133 			return -EACCES;
134 		dmaobj->target = NV_MEM_TARGET_PCI_NOSNOOP;
135 		break;
136 	default:
137 		return -EINVAL;
138 	}
139 
140 	switch (dmaobj->access) {
141 	case NV_DMA_V0_ACCESS_VM:
142 		dmaobj->access = NV_MEM_ACCESS_VM;
143 		break;
144 	case NV_DMA_V0_ACCESS_RD:
145 		dmaobj->access = NV_MEM_ACCESS_RO;
146 		break;
147 	case NV_DMA_V0_ACCESS_WR:
148 		dmaobj->access = NV_MEM_ACCESS_WO;
149 		break;
150 	case NV_DMA_V0_ACCESS_RDWR:
151 		dmaobj->access = NV_MEM_ACCESS_RW;
152 		break;
153 	default:
154 		return -EINVAL;
155 	}
156 
157 	return ret;
158 }
159