xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/nouveau/nvkm/subdev/fault/nouveau_nvkm_subdev_fault_gp100.c (revision 41ec02673d281bbb3d38e6c78504ce6e30c228c1)
1 /*	$NetBSD: nouveau_nvkm_subdev_fault_gp100.c,v 1.2 2021/12/18 23:45:39 riastradh Exp $	*/
2 
3 /*
4  * Copyright 2018 Red Hat Inc.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  */
24 #include <sys/cdefs.h>
25 __KERNEL_RCSID(0, "$NetBSD: nouveau_nvkm_subdev_fault_gp100.c,v 1.2 2021/12/18 23:45:39 riastradh Exp $");
26 
27 #include "priv.h"
28 
29 #include <core/memory.h>
30 #include <subdev/mc.h>
31 
32 #include <nvif/class.h>
33 
34 void
gp100_fault_buffer_intr(struct nvkm_fault_buffer * buffer,bool enable)35 gp100_fault_buffer_intr(struct nvkm_fault_buffer *buffer, bool enable)
36 {
37 	struct nvkm_device *device = buffer->fault->subdev.device;
38 	nvkm_mc_intr_mask(device, NVKM_SUBDEV_FAULT, enable);
39 }
40 
41 void
gp100_fault_buffer_fini(struct nvkm_fault_buffer * buffer)42 gp100_fault_buffer_fini(struct nvkm_fault_buffer *buffer)
43 {
44 	struct nvkm_device *device = buffer->fault->subdev.device;
45 	nvkm_mask(device, 0x002a70, 0x00000001, 0x00000000);
46 }
47 
48 void
gp100_fault_buffer_init(struct nvkm_fault_buffer * buffer)49 gp100_fault_buffer_init(struct nvkm_fault_buffer *buffer)
50 {
51 	struct nvkm_device *device = buffer->fault->subdev.device;
52 	nvkm_wr32(device, 0x002a74, upper_32_bits(buffer->addr));
53 	nvkm_wr32(device, 0x002a70, lower_32_bits(buffer->addr));
54 	nvkm_mask(device, 0x002a70, 0x00000001, 0x00000001);
55 }
56 
gp100_fault_buffer_pin(struct nvkm_fault_buffer * buffer)57 u64 gp100_fault_buffer_pin(struct nvkm_fault_buffer *buffer)
58 {
59 	return nvkm_memory_bar2(buffer->mem);
60 }
61 
62 void
gp100_fault_buffer_info(struct nvkm_fault_buffer * buffer)63 gp100_fault_buffer_info(struct nvkm_fault_buffer *buffer)
64 {
65 	buffer->entries = nvkm_rd32(buffer->fault->subdev.device, 0x002a78);
66 	buffer->get = 0x002a7c;
67 	buffer->put = 0x002a80;
68 }
69 
70 void
gp100_fault_intr(struct nvkm_fault * fault)71 gp100_fault_intr(struct nvkm_fault *fault)
72 {
73 	nvkm_event_send(&fault->event, 1, 0, NULL, 0);
74 }
75 
76 static const struct nvkm_fault_func
77 gp100_fault = {
78 	.intr = gp100_fault_intr,
79 	.buffer.nr = 1,
80 	.buffer.entry_size = 32,
81 	.buffer.info = gp100_fault_buffer_info,
82 	.buffer.pin = gp100_fault_buffer_pin,
83 	.buffer.init = gp100_fault_buffer_init,
84 	.buffer.fini = gp100_fault_buffer_fini,
85 	.buffer.intr = gp100_fault_buffer_intr,
86 	.user = { { 0, 0, MAXWELL_FAULT_BUFFER_A }, 0 },
87 };
88 
89 int
gp100_fault_new(struct nvkm_device * device,int index,struct nvkm_fault ** pfault)90 gp100_fault_new(struct nvkm_device *device, int index,
91 		struct nvkm_fault **pfault)
92 {
93 	return nvkm_fault_new_(&gp100_fault, device, index, pfault);
94 }
95