1 /* $NetBSD: nouveau_nvkm_subdev_fault_base.c,v 1.2 2021/12/18 23:45:39 riastradh Exp $ */
2
3 /*
4 * Copyright 2018 Red Hat Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 */
24 #include <sys/cdefs.h>
25 __KERNEL_RCSID(0, "$NetBSD: nouveau_nvkm_subdev_fault_base.c,v 1.2 2021/12/18 23:45:39 riastradh Exp $");
26
27 #include "priv.h"
28
29 #include <core/memory.h>
30 #include <core/notify.h>
31
32 static void
nvkm_fault_ntfy_fini(struct nvkm_event * event,int type,int index)33 nvkm_fault_ntfy_fini(struct nvkm_event *event, int type, int index)
34 {
35 struct nvkm_fault *fault = container_of(event, typeof(*fault), event);
36 fault->func->buffer.intr(fault->buffer[index], false);
37 }
38
39 static void
nvkm_fault_ntfy_init(struct nvkm_event * event,int type,int index)40 nvkm_fault_ntfy_init(struct nvkm_event *event, int type, int index)
41 {
42 struct nvkm_fault *fault = container_of(event, typeof(*fault), event);
43 fault->func->buffer.intr(fault->buffer[index], true);
44 }
45
46 static int
nvkm_fault_ntfy_ctor(struct nvkm_object * object,void * argv,u32 argc,struct nvkm_notify * notify)47 nvkm_fault_ntfy_ctor(struct nvkm_object *object, void *argv, u32 argc,
48 struct nvkm_notify *notify)
49 {
50 struct nvkm_fault_buffer *buffer = nvkm_fault_buffer(object);
51 if (argc == 0) {
52 notify->size = 0;
53 notify->types = 1;
54 notify->index = buffer->id;
55 return 0;
56 }
57 return -ENOSYS;
58 }
59
60 static const struct nvkm_event_func
61 nvkm_fault_ntfy = {
62 .ctor = nvkm_fault_ntfy_ctor,
63 .init = nvkm_fault_ntfy_init,
64 .fini = nvkm_fault_ntfy_fini,
65 };
66
67 static void
nvkm_fault_intr(struct nvkm_subdev * subdev)68 nvkm_fault_intr(struct nvkm_subdev *subdev)
69 {
70 struct nvkm_fault *fault = nvkm_fault(subdev);
71 return fault->func->intr(fault);
72 }
73
74 static int
nvkm_fault_fini(struct nvkm_subdev * subdev,bool suspend)75 nvkm_fault_fini(struct nvkm_subdev *subdev, bool suspend)
76 {
77 struct nvkm_fault *fault = nvkm_fault(subdev);
78 if (fault->func->fini)
79 fault->func->fini(fault);
80 return 0;
81 }
82
83 static int
nvkm_fault_init(struct nvkm_subdev * subdev)84 nvkm_fault_init(struct nvkm_subdev *subdev)
85 {
86 struct nvkm_fault *fault = nvkm_fault(subdev);
87 if (fault->func->init)
88 fault->func->init(fault);
89 return 0;
90 }
91
92 static int
nvkm_fault_oneinit_buffer(struct nvkm_fault * fault,int id)93 nvkm_fault_oneinit_buffer(struct nvkm_fault *fault, int id)
94 {
95 struct nvkm_subdev *subdev = &fault->subdev;
96 struct nvkm_device *device = subdev->device;
97 struct nvkm_fault_buffer *buffer;
98 int ret;
99
100 if (!(buffer = kzalloc(sizeof(*buffer), GFP_KERNEL)))
101 return -ENOMEM;
102 buffer->fault = fault;
103 buffer->id = id;
104 fault->func->buffer.info(buffer);
105 fault->buffer[id] = buffer;
106
107 nvkm_debug(subdev, "buffer %d: %d entries\n", id, buffer->entries);
108
109 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, buffer->entries *
110 fault->func->buffer.entry_size, 0x1000, true,
111 &buffer->mem);
112 if (ret)
113 return ret;
114
115 /* Pin fault buffer in BAR2. */
116 buffer->addr = fault->func->buffer.pin(buffer);
117 if (buffer->addr == ~0ULL)
118 return -EFAULT;
119
120 return 0;
121 }
122
123 static int
nvkm_fault_oneinit(struct nvkm_subdev * subdev)124 nvkm_fault_oneinit(struct nvkm_subdev *subdev)
125 {
126 struct nvkm_fault *fault = nvkm_fault(subdev);
127 int ret, i;
128
129 for (i = 0; i < ARRAY_SIZE(fault->buffer); i++) {
130 if (i < fault->func->buffer.nr) {
131 ret = nvkm_fault_oneinit_buffer(fault, i);
132 if (ret)
133 return ret;
134 fault->buffer_nr = i + 1;
135 }
136 }
137
138 ret = nvkm_event_init(&nvkm_fault_ntfy, 1, fault->buffer_nr,
139 &fault->event);
140 if (ret)
141 return ret;
142
143 if (fault->func->oneinit)
144 ret = fault->func->oneinit(fault);
145 return ret;
146 }
147
148 static void *
nvkm_fault_dtor(struct nvkm_subdev * subdev)149 nvkm_fault_dtor(struct nvkm_subdev *subdev)
150 {
151 struct nvkm_fault *fault = nvkm_fault(subdev);
152 int i;
153
154 nvkm_notify_fini(&fault->nrpfb);
155 nvkm_event_fini(&fault->event);
156
157 for (i = 0; i < fault->buffer_nr; i++) {
158 if (fault->buffer[i]) {
159 nvkm_memory_unref(&fault->buffer[i]->mem);
160 kfree(fault->buffer[i]);
161 }
162 }
163
164 return fault;
165 }
166
167 static const struct nvkm_subdev_func
168 nvkm_fault = {
169 .dtor = nvkm_fault_dtor,
170 .oneinit = nvkm_fault_oneinit,
171 .init = nvkm_fault_init,
172 .fini = nvkm_fault_fini,
173 .intr = nvkm_fault_intr,
174 };
175
176 int
nvkm_fault_new_(const struct nvkm_fault_func * func,struct nvkm_device * device,int index,struct nvkm_fault ** pfault)177 nvkm_fault_new_(const struct nvkm_fault_func *func, struct nvkm_device *device,
178 int index, struct nvkm_fault **pfault)
179 {
180 struct nvkm_fault *fault;
181 if (!(fault = *pfault = kzalloc(sizeof(*fault), GFP_KERNEL)))
182 return -ENOMEM;
183 nvkm_subdev_ctor(&nvkm_fault, device, index, &fault->subdev);
184 fault->func = func;
185 fault->user.ctor = nvkm_ufault_new;
186 fault->user.base = func->user.base;
187 return 0;
188 }
189