1 /* $NetBSD: nouveau_nvkm_subdev_instmem_base.c,v 1.9 2022/04/09 19:59:08 riastradh Exp $ */
2
3 /*
4 * Copyright 2012 Red Hat Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Ben Skeggs
25 */
26 #include <sys/cdefs.h>
27 __KERNEL_RCSID(0, "$NetBSD: nouveau_nvkm_subdev_instmem_base.c,v 1.9 2022/04/09 19:59:08 riastradh Exp $");
28
29 #include "priv.h"
30
31 #include <subdev/bar.h>
32
33 #ifdef __NetBSD__
34 # define __iomem __nvkm_memory_iomem
35 #endif
36
37 /******************************************************************************
38 * instmem object base implementation
39 *****************************************************************************/
40 static void
nvkm_instobj_load(struct nvkm_instobj * iobj)41 nvkm_instobj_load(struct nvkm_instobj *iobj)
42 {
43 struct nvkm_memory *memory = &iobj->memory;
44 const u64 size = nvkm_memory_size(memory);
45 void __iomem *map;
46 int i;
47
48 if (!(map = nvkm_kmap(memory))) {
49 for (i = 0; i < size; i += 4)
50 nvkm_wo32(memory, i, iobj->suspend[i / 4]);
51 } else {
52 memcpy_toio(map, iobj->suspend, size);
53 }
54 nvkm_done(memory);
55
56 kvfree(iobj->suspend);
57 iobj->suspend = NULL;
58 }
59
60 static int
nvkm_instobj_save(struct nvkm_instobj * iobj)61 nvkm_instobj_save(struct nvkm_instobj *iobj)
62 {
63 struct nvkm_memory *memory = &iobj->memory;
64 const u64 size = nvkm_memory_size(memory);
65 void __iomem *map;
66 int i;
67
68 iobj->suspend = kvmalloc(size, GFP_KERNEL);
69 if (!iobj->suspend)
70 return -ENOMEM;
71
72 if (!(map = nvkm_kmap(memory))) {
73 for (i = 0; i < size; i += 4)
74 iobj->suspend[i / 4] = nvkm_ro32(memory, i);
75 } else {
76 memcpy_fromio(iobj->suspend, map, size);
77 }
78 nvkm_done(memory);
79 return 0;
80 }
81
82 void
nvkm_instobj_dtor(struct nvkm_instmem * imem,struct nvkm_instobj * iobj)83 nvkm_instobj_dtor(struct nvkm_instmem *imem, struct nvkm_instobj *iobj)
84 {
85 spin_lock(&imem->lock);
86 list_del(&iobj->head);
87 spin_unlock(&imem->lock);
88 }
89
90 void
nvkm_instobj_ctor(const struct nvkm_memory_func * func,struct nvkm_instmem * imem,struct nvkm_instobj * iobj)91 nvkm_instobj_ctor(const struct nvkm_memory_func *func,
92 struct nvkm_instmem *imem, struct nvkm_instobj *iobj)
93 {
94 nvkm_memory_ctor(func, &iobj->memory);
95 iobj->suspend = NULL;
96 spin_lock(&imem->lock);
97 list_add_tail(&iobj->head, &imem->list);
98 spin_unlock(&imem->lock);
99 }
100
101 int
nvkm_instobj_new(struct nvkm_instmem * imem,u32 size,u32 align,bool zero,struct nvkm_memory ** pmemory)102 nvkm_instobj_new(struct nvkm_instmem *imem, u32 size, u32 align, bool zero,
103 struct nvkm_memory **pmemory)
104 {
105 struct nvkm_subdev *subdev = &imem->subdev;
106 struct nvkm_memory *memory = NULL;
107 u32 offset;
108 int ret;
109
110 ret = imem->func->memory_new(imem, size, align, zero, &memory);
111 if (ret) {
112 nvkm_error(subdev, "OOM: %08x %08x %d\n", size, align, ret);
113 goto done;
114 }
115
116 nvkm_trace(subdev, "new %08x %08x %d: %010"PRIx64" %010"PRIx64"\n", size, align,
117 zero, nvkm_memory_addr(memory), nvkm_memory_size(memory));
118
119 if (!imem->func->zero && zero) {
120 void __iomem *map = nvkm_kmap(memory);
121 if (unlikely(!map)) {
122 for (offset = 0; offset < size; offset += 4)
123 nvkm_wo32(memory, offset, 0x00000000);
124 } else {
125 memset_io(map, 0x00, size);
126 }
127 nvkm_done(memory);
128 }
129
130 done:
131 if (ret)
132 nvkm_memory_unref(&memory);
133 *pmemory = memory;
134 return ret;
135 }
136
137 /******************************************************************************
138 * instmem subdev base implementation
139 *****************************************************************************/
140
141 u32
nvkm_instmem_rd32(struct nvkm_instmem * imem,u32 addr)142 nvkm_instmem_rd32(struct nvkm_instmem *imem, u32 addr)
143 {
144 return imem->func->rd32(imem, addr);
145 }
146
147 void
nvkm_instmem_wr32(struct nvkm_instmem * imem,u32 addr,u32 data)148 nvkm_instmem_wr32(struct nvkm_instmem *imem, u32 addr, u32 data)
149 {
150 return imem->func->wr32(imem, addr, data);
151 }
152
153 void
nvkm_instmem_boot(struct nvkm_instmem * imem)154 nvkm_instmem_boot(struct nvkm_instmem *imem)
155 {
156 /* Separate bootstrapped objects from normal list, as we need
157 * to make sure they're accessed with the slowpath on suspend
158 * and resume.
159 */
160 struct nvkm_instobj *iobj, *itmp;
161 spin_lock(&imem->lock);
162 list_for_each_entry_safe(iobj, itmp, &imem->list, head) {
163 list_move_tail(&iobj->head, &imem->boot);
164 }
165 spin_unlock(&imem->lock);
166 }
167
168 static int
nvkm_instmem_fini(struct nvkm_subdev * subdev,bool suspend)169 nvkm_instmem_fini(struct nvkm_subdev *subdev, bool suspend)
170 {
171 struct nvkm_instmem *imem = nvkm_instmem(subdev);
172 struct nvkm_instobj *iobj;
173
174 if (suspend) {
175 list_for_each_entry(iobj, &imem->list, head) {
176 int ret = nvkm_instobj_save(iobj);
177 if (ret)
178 return ret;
179 }
180
181 nvkm_bar_bar2_fini(subdev->device);
182
183 list_for_each_entry(iobj, &imem->boot, head) {
184 int ret = nvkm_instobj_save(iobj);
185 if (ret)
186 return ret;
187 }
188 }
189
190 if (imem->func->fini)
191 imem->func->fini(imem);
192
193 return 0;
194 }
195
196 static int
nvkm_instmem_init(struct nvkm_subdev * subdev)197 nvkm_instmem_init(struct nvkm_subdev *subdev)
198 {
199 struct nvkm_instmem *imem = nvkm_instmem(subdev);
200 struct nvkm_instobj *iobj;
201
202 list_for_each_entry(iobj, &imem->boot, head) {
203 if (iobj->suspend)
204 nvkm_instobj_load(iobj);
205 }
206
207 nvkm_bar_bar2_init(subdev->device);
208
209 list_for_each_entry(iobj, &imem->list, head) {
210 if (iobj->suspend)
211 nvkm_instobj_load(iobj);
212 }
213
214 return 0;
215 }
216
217 static int
nvkm_instmem_oneinit(struct nvkm_subdev * subdev)218 nvkm_instmem_oneinit(struct nvkm_subdev *subdev)
219 {
220 struct nvkm_instmem *imem = nvkm_instmem(subdev);
221 if (imem->func->oneinit)
222 return imem->func->oneinit(imem);
223 return 0;
224 }
225
226 static void *
nvkm_instmem_dtor(struct nvkm_subdev * subdev)227 nvkm_instmem_dtor(struct nvkm_subdev *subdev)
228 {
229 struct nvkm_instmem *imem = nvkm_instmem(subdev);
230 spin_lock_destroy(&imem->lock);
231 if (imem->func->dtor)
232 return imem->func->dtor(imem);
233 return imem;
234 }
235
236 static const struct nvkm_subdev_func
237 nvkm_instmem = {
238 .dtor = nvkm_instmem_dtor,
239 .oneinit = nvkm_instmem_oneinit,
240 .init = nvkm_instmem_init,
241 .fini = nvkm_instmem_fini,
242 };
243
244 void
nvkm_instmem_ctor(const struct nvkm_instmem_func * func,struct nvkm_device * device,int index,struct nvkm_instmem * imem)245 nvkm_instmem_ctor(const struct nvkm_instmem_func *func,
246 struct nvkm_device *device, int index,
247 struct nvkm_instmem *imem)
248 {
249 nvkm_subdev_ctor(&nvkm_instmem, device, index, &imem->subdev);
250 imem->func = func;
251 spin_lock_init(&imem->lock);
252 INIT_LIST_HEAD(&imem->list);
253 INIT_LIST_HEAD(&imem->boot);
254 }
255