1 /* $NetBSD: nouveau_nvkm_engine_gr_base.c,v 1.3 2021/12/18 23:45:36 riastradh Exp $ */
2
3 /*
4 * Copyright 2015 Red Hat Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Ben Skeggs <bskeggs@redhat.com>
25 */
26 #include <sys/cdefs.h>
27 __KERNEL_RCSID(0, "$NetBSD: nouveau_nvkm_engine_gr_base.c,v 1.3 2021/12/18 23:45:36 riastradh Exp $");
28
29 #include "priv.h"
30
31 #include <engine/fifo.h>
32
33 u32
nvkm_gr_ctxsw_inst(struct nvkm_device * device)34 nvkm_gr_ctxsw_inst(struct nvkm_device *device)
35 {
36 struct nvkm_gr *gr = device->gr;
37 if (gr && gr->func->ctxsw.inst)
38 return gr->func->ctxsw.inst(gr);
39 return 0;
40 }
41
42 int
nvkm_gr_ctxsw_resume(struct nvkm_device * device)43 nvkm_gr_ctxsw_resume(struct nvkm_device *device)
44 {
45 struct nvkm_gr *gr = device->gr;
46 if (gr && gr->func->ctxsw.resume)
47 return gr->func->ctxsw.resume(gr);
48 return 0;
49 }
50
51 int
nvkm_gr_ctxsw_pause(struct nvkm_device * device)52 nvkm_gr_ctxsw_pause(struct nvkm_device *device)
53 {
54 struct nvkm_gr *gr = device->gr;
55 if (gr && gr->func->ctxsw.pause)
56 return gr->func->ctxsw.pause(gr);
57 return 0;
58 }
59
60 static bool
nvkm_gr_chsw_load(struct nvkm_engine * engine)61 nvkm_gr_chsw_load(struct nvkm_engine *engine)
62 {
63 struct nvkm_gr *gr = nvkm_gr(engine);
64 if (gr->func->chsw_load)
65 return gr->func->chsw_load(gr);
66 return false;
67 }
68
69 static void
nvkm_gr_tile(struct nvkm_engine * engine,int region,struct nvkm_fb_tile * tile)70 nvkm_gr_tile(struct nvkm_engine *engine, int region, struct nvkm_fb_tile *tile)
71 {
72 struct nvkm_gr *gr = nvkm_gr(engine);
73 if (gr->func->tile)
74 gr->func->tile(gr, region, tile);
75 }
76
77 u64
nvkm_gr_units(struct nvkm_gr * gr)78 nvkm_gr_units(struct nvkm_gr *gr)
79 {
80 if (gr->func->units)
81 return gr->func->units(gr);
82 return 0;
83 }
84
85 int
nvkm_gr_tlb_flush(struct nvkm_gr * gr)86 nvkm_gr_tlb_flush(struct nvkm_gr *gr)
87 {
88 if (gr->func->tlb_flush)
89 return gr->func->tlb_flush(gr);
90 return -ENODEV;
91 }
92
93 static int
nvkm_gr_oclass_get(struct nvkm_oclass * oclass,int index)94 nvkm_gr_oclass_get(struct nvkm_oclass *oclass, int index)
95 {
96 struct nvkm_gr *gr = nvkm_gr(oclass->engine);
97 int c = 0;
98
99 if (gr->func->object_get) {
100 int ret = gr->func->object_get(gr, index, &oclass->base);
101 if (oclass->base.oclass)
102 return index;
103 return ret;
104 }
105
106 while (gr->func->sclass[c].oclass) {
107 if (c++ == index) {
108 oclass->base = gr->func->sclass[index];
109 return index;
110 }
111 }
112
113 return c;
114 }
115
116 static int
nvkm_gr_cclass_new(struct nvkm_fifo_chan * chan,const struct nvkm_oclass * oclass,struct nvkm_object ** pobject)117 nvkm_gr_cclass_new(struct nvkm_fifo_chan *chan,
118 const struct nvkm_oclass *oclass,
119 struct nvkm_object **pobject)
120 {
121 struct nvkm_gr *gr = nvkm_gr(oclass->engine);
122 if (gr->func->chan_new)
123 return gr->func->chan_new(gr, chan, oclass, pobject);
124 return 0;
125 }
126
127 static void
nvkm_gr_intr(struct nvkm_engine * engine)128 nvkm_gr_intr(struct nvkm_engine *engine)
129 {
130 struct nvkm_gr *gr = nvkm_gr(engine);
131 gr->func->intr(gr);
132 }
133
134 static int
nvkm_gr_oneinit(struct nvkm_engine * engine)135 nvkm_gr_oneinit(struct nvkm_engine *engine)
136 {
137 struct nvkm_gr *gr = nvkm_gr(engine);
138 if (gr->func->oneinit)
139 return gr->func->oneinit(gr);
140 return 0;
141 }
142
143 static int
nvkm_gr_init(struct nvkm_engine * engine)144 nvkm_gr_init(struct nvkm_engine *engine)
145 {
146 struct nvkm_gr *gr = nvkm_gr(engine);
147 return gr->func->init(gr);
148 }
149
150 static int
nvkm_gr_fini(struct nvkm_engine * engine,bool suspend)151 nvkm_gr_fini(struct nvkm_engine *engine, bool suspend)
152 {
153 struct nvkm_gr *gr = nvkm_gr(engine);
154 if (gr->func->fini)
155 return gr->func->fini(gr, suspend);
156 return 0;
157 }
158
159 static void *
nvkm_gr_dtor(struct nvkm_engine * engine)160 nvkm_gr_dtor(struct nvkm_engine *engine)
161 {
162 struct nvkm_gr *gr = nvkm_gr(engine);
163 if (gr->func->dtor)
164 return gr->func->dtor(gr);
165 return gr;
166 }
167
168 static const struct nvkm_engine_func
169 nvkm_gr = {
170 .dtor = nvkm_gr_dtor,
171 .oneinit = nvkm_gr_oneinit,
172 .init = nvkm_gr_init,
173 .fini = nvkm_gr_fini,
174 .intr = nvkm_gr_intr,
175 .tile = nvkm_gr_tile,
176 .chsw_load = nvkm_gr_chsw_load,
177 .fifo.cclass = nvkm_gr_cclass_new,
178 .fifo.sclass = nvkm_gr_oclass_get,
179 };
180
181 int
nvkm_gr_ctor(const struct nvkm_gr_func * func,struct nvkm_device * device,int index,bool enable,struct nvkm_gr * gr)182 nvkm_gr_ctor(const struct nvkm_gr_func *func, struct nvkm_device *device,
183 int index, bool enable, struct nvkm_gr *gr)
184 {
185 gr->func = func;
186 return nvkm_engine_ctor(&nvkm_gr, device, index, enable, &gr->engine);
187 }
188