xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/nouveau/nvkm/subdev/fb/nouveau_nvkm_subdev_fb_base.c (revision 41ec02673d281bbb3d38e6c78504ce6e30c228c1)
1 /*	$NetBSD: nouveau_nvkm_subdev_fb_base.c,v 1.3 2021/12/18 23:45:39 riastradh Exp $	*/
2 
3 /*
4  * Copyright 2012 Red Hat Inc.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Ben Skeggs
25  */
26 #include <sys/cdefs.h>
27 __KERNEL_RCSID(0, "$NetBSD: nouveau_nvkm_subdev_fb_base.c,v 1.3 2021/12/18 23:45:39 riastradh Exp $");
28 
29 #include "priv.h"
30 #include "ram.h"
31 
32 #include <core/memory.h>
33 #include <core/option.h>
34 #include <subdev/bios.h>
35 #include <subdev/bios/M0203.h>
36 #include <engine/gr.h>
37 #include <engine/mpeg.h>
38 
39 void
nvkm_fb_tile_fini(struct nvkm_fb * fb,int region,struct nvkm_fb_tile * tile)40 nvkm_fb_tile_fini(struct nvkm_fb *fb, int region, struct nvkm_fb_tile *tile)
41 {
42 	fb->func->tile.fini(fb, region, tile);
43 }
44 
45 void
nvkm_fb_tile_init(struct nvkm_fb * fb,int region,u32 addr,u32 size,u32 pitch,u32 flags,struct nvkm_fb_tile * tile)46 nvkm_fb_tile_init(struct nvkm_fb *fb, int region, u32 addr, u32 size,
47 		  u32 pitch, u32 flags, struct nvkm_fb_tile *tile)
48 {
49 	fb->func->tile.init(fb, region, addr, size, pitch, flags, tile);
50 }
51 
52 void
nvkm_fb_tile_prog(struct nvkm_fb * fb,int region,struct nvkm_fb_tile * tile)53 nvkm_fb_tile_prog(struct nvkm_fb *fb, int region, struct nvkm_fb_tile *tile)
54 {
55 	struct nvkm_device *device = fb->subdev.device;
56 	if (fb->func->tile.prog) {
57 		fb->func->tile.prog(fb, region, tile);
58 		if (device->gr)
59 			nvkm_engine_tile(&device->gr->engine, region);
60 		if (device->mpeg)
61 			nvkm_engine_tile(device->mpeg, region);
62 	}
63 }
64 
65 int
nvkm_fb_bios_memtype(struct nvkm_bios * bios)66 nvkm_fb_bios_memtype(struct nvkm_bios *bios)
67 {
68 	struct nvkm_subdev *subdev = &bios->subdev;
69 	struct nvkm_device *device = subdev->device;
70 	const u8 ramcfg = (nvkm_rd32(device, 0x101000) & 0x0000003c) >> 2;
71 	struct nvbios_M0203E M0203E;
72 	u8 ver, hdr;
73 
74 	if (nvbios_M0203Em(bios, ramcfg, &ver, &hdr, &M0203E)) {
75 		switch (M0203E.type) {
76 		case M0203E_TYPE_DDR2  : return NVKM_RAM_TYPE_DDR2;
77 		case M0203E_TYPE_DDR3  : return NVKM_RAM_TYPE_DDR3;
78 		case M0203E_TYPE_GDDR3 : return NVKM_RAM_TYPE_GDDR3;
79 		case M0203E_TYPE_GDDR5 : return NVKM_RAM_TYPE_GDDR5;
80 		case M0203E_TYPE_GDDR5X: return NVKM_RAM_TYPE_GDDR5X;
81 		case M0203E_TYPE_GDDR6 : return NVKM_RAM_TYPE_GDDR6;
82 		case M0203E_TYPE_HBM2  : return NVKM_RAM_TYPE_HBM2;
83 		default:
84 			nvkm_warn(subdev, "M0203E type %02x\n", M0203E.type);
85 			return NVKM_RAM_TYPE_UNKNOWN;
86 		}
87 	}
88 
89 	nvkm_warn(subdev, "M0203E not matched!\n");
90 	return NVKM_RAM_TYPE_UNKNOWN;
91 }
92 
93 static void
nvkm_fb_intr(struct nvkm_subdev * subdev)94 nvkm_fb_intr(struct nvkm_subdev *subdev)
95 {
96 	struct nvkm_fb *fb = nvkm_fb(subdev);
97 	if (fb->func->intr)
98 		fb->func->intr(fb);
99 }
100 
101 static int
nvkm_fb_oneinit(struct nvkm_subdev * subdev)102 nvkm_fb_oneinit(struct nvkm_subdev *subdev)
103 {
104 	struct nvkm_fb *fb = nvkm_fb(subdev);
105 	u32 tags = 0;
106 
107 	if (fb->func->ram_new) {
108 		int ret = fb->func->ram_new(fb, &fb->ram);
109 		if (ret) {
110 			nvkm_error(subdev, "vram setup failed, %d\n", ret);
111 			return ret;
112 		}
113 	}
114 
115 	if (fb->func->oneinit) {
116 		int ret = fb->func->oneinit(fb);
117 		if (ret)
118 			return ret;
119 	}
120 
121 	/* Initialise compression tag allocator.
122 	 *
123 	 * LTC oneinit() will override this on Fermi and newer.
124 	 */
125 	if (fb->func->tags) {
126 		tags = fb->func->tags(fb);
127 		nvkm_debug(subdev, "%d comptags\n", tags);
128 	}
129 
130 	return nvkm_mm_init(&fb->tags, 0, 0, tags, 1);
131 }
132 
133 static int
nvkm_fb_init_scrub_vpr(struct nvkm_fb * fb)134 nvkm_fb_init_scrub_vpr(struct nvkm_fb *fb)
135 {
136 	struct nvkm_subdev *subdev = &fb->subdev;
137 	int ret;
138 
139 	nvkm_debug(subdev, "VPR locked, running scrubber binary\n");
140 
141 	if (!fb->vpr_scrubber.size) {
142 		nvkm_warn(subdev, "VPR locked, but no scrubber binary!\n");
143 		return 0;
144 	}
145 
146 	ret = fb->func->vpr.scrub(fb);
147 	if (ret) {
148 		nvkm_error(subdev, "VPR scrubber binary failed\n");
149 		return ret;
150 	}
151 
152 	if (fb->func->vpr.scrub_required(fb)) {
153 		nvkm_error(subdev, "VPR still locked after scrub!\n");
154 		return -EIO;
155 	}
156 
157 	nvkm_debug(subdev, "VPR scrubber binary successful\n");
158 	return 0;
159 }
160 
161 static int
nvkm_fb_init(struct nvkm_subdev * subdev)162 nvkm_fb_init(struct nvkm_subdev *subdev)
163 {
164 	struct nvkm_fb *fb = nvkm_fb(subdev);
165 	int ret, i;
166 
167 	if (fb->ram) {
168 		ret = nvkm_ram_init(fb->ram);
169 		if (ret)
170 			return ret;
171 	}
172 
173 	for (i = 0; i < fb->tile.regions; i++)
174 		fb->func->tile.prog(fb, i, &fb->tile.region[i]);
175 
176 	if (fb->func->init)
177 		fb->func->init(fb);
178 
179 	if (fb->func->init_remapper)
180 		fb->func->init_remapper(fb);
181 
182 	if (fb->func->init_page) {
183 		ret = fb->func->init_page(fb);
184 		if (WARN_ON(ret))
185 			return ret;
186 	}
187 
188 	if (fb->func->init_unkn)
189 		fb->func->init_unkn(fb);
190 
191 	if (fb->func->vpr.scrub_required &&
192 	    fb->func->vpr.scrub_required(fb)) {
193 		ret = nvkm_fb_init_scrub_vpr(fb);
194 		if (ret)
195 			return ret;
196 	}
197 
198 	return 0;
199 }
200 
201 static void *
nvkm_fb_dtor(struct nvkm_subdev * subdev)202 nvkm_fb_dtor(struct nvkm_subdev *subdev)
203 {
204 	struct nvkm_fb *fb = nvkm_fb(subdev);
205 	int i;
206 
207 	nvkm_memory_unref(&fb->mmu_wr);
208 	nvkm_memory_unref(&fb->mmu_rd);
209 
210 	for (i = 0; i < fb->tile.regions; i++)
211 		fb->func->tile.fini(fb, i, &fb->tile.region[i]);
212 
213 	nvkm_mm_fini(&fb->tags);
214 	nvkm_ram_del(&fb->ram);
215 
216 	nvkm_blob_dtor(&fb->vpr_scrubber);
217 
218 	if (fb->func->dtor)
219 		return fb->func->dtor(fb);
220 	return fb;
221 }
222 
223 static const struct nvkm_subdev_func
224 nvkm_fb = {
225 	.dtor = nvkm_fb_dtor,
226 	.oneinit = nvkm_fb_oneinit,
227 	.init = nvkm_fb_init,
228 	.intr = nvkm_fb_intr,
229 };
230 
231 void
nvkm_fb_ctor(const struct nvkm_fb_func * func,struct nvkm_device * device,int index,struct nvkm_fb * fb)232 nvkm_fb_ctor(const struct nvkm_fb_func *func, struct nvkm_device *device,
233 	     int index, struct nvkm_fb *fb)
234 {
235 	nvkm_subdev_ctor(&nvkm_fb, device, index, &fb->subdev);
236 	fb->func = func;
237 	fb->tile.regions = fb->func->tile.regions;
238 	fb->page = nvkm_longopt(device->cfgopt, "NvFbBigPage",
239 				fb->func->default_bigpage);
240 }
241 
242 int
nvkm_fb_new_(const struct nvkm_fb_func * func,struct nvkm_device * device,int index,struct nvkm_fb ** pfb)243 nvkm_fb_new_(const struct nvkm_fb_func *func, struct nvkm_device *device,
244 	     int index, struct nvkm_fb **pfb)
245 {
246 	if (!(*pfb = kzalloc(sizeof(**pfb), GFP_KERNEL)))
247 		return -ENOMEM;
248 	nvkm_fb_ctor(func, device, index, *pfb);
249 	return 0;
250 }
251