1 /* $NetBSD: nouveau_nvkm_falcon_msgq.c,v 1.4 2021/12/19 11:34:45 riastradh Exp $ */
2
3 /*
4 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 */
25 #include <sys/cdefs.h>
26 __KERNEL_RCSID(0, "$NetBSD: nouveau_nvkm_falcon_msgq.c,v 1.4 2021/12/19 11:34:45 riastradh Exp $");
27
28 #include "qmgr.h"
29
30 #include <linux/nbsd-namespace.h>
31
32 static void
nvkm_falcon_msgq_open(struct nvkm_falcon_msgq * msgq)33 nvkm_falcon_msgq_open(struct nvkm_falcon_msgq *msgq)
34 {
35 mutex_lock(&msgq->mutex);
36 msgq->position = nvkm_falcon_rd32(msgq->qmgr->falcon, msgq->tail_reg);
37 }
38
39 static void
nvkm_falcon_msgq_close(struct nvkm_falcon_msgq * msgq,bool commit)40 nvkm_falcon_msgq_close(struct nvkm_falcon_msgq *msgq, bool commit)
41 {
42 struct nvkm_falcon *falcon = msgq->qmgr->falcon;
43
44 if (commit)
45 nvkm_falcon_wr32(falcon, msgq->tail_reg, msgq->position);
46
47 mutex_unlock(&msgq->mutex);
48 }
49
50 static bool
nvkm_falcon_msgq_empty(struct nvkm_falcon_msgq * msgq)51 nvkm_falcon_msgq_empty(struct nvkm_falcon_msgq *msgq)
52 {
53 u32 head = nvkm_falcon_rd32(msgq->qmgr->falcon, msgq->head_reg);
54 u32 tail = nvkm_falcon_rd32(msgq->qmgr->falcon, msgq->tail_reg);
55 return head == tail;
56 }
57
58 static int
nvkm_falcon_msgq_pop(struct nvkm_falcon_msgq * msgq,void * data,u32 size)59 nvkm_falcon_msgq_pop(struct nvkm_falcon_msgq *msgq, void *data, u32 size)
60 {
61 struct nvkm_falcon *falcon = msgq->qmgr->falcon;
62 u32 head, tail, available;
63
64 head = nvkm_falcon_rd32(falcon, msgq->head_reg);
65 /* has the buffer looped? */
66 if (head < msgq->position)
67 msgq->position = msgq->offset;
68
69 tail = msgq->position;
70
71 available = head - tail;
72 if (size > available) {
73 FLCNQ_ERR(msgq, "requested %d bytes, but only %d available",
74 size, available);
75 return -EINVAL;
76 }
77
78 nvkm_falcon_read_dmem(falcon, tail, size, 0, data);
79 msgq->position += ALIGN(size, QUEUE_ALIGNMENT);
80 return 0;
81 }
82
83 static int
nvkm_falcon_msgq_read(struct nvkm_falcon_msgq * msgq,struct nv_falcon_msg * hdr)84 nvkm_falcon_msgq_read(struct nvkm_falcon_msgq *msgq, struct nv_falcon_msg *hdr)
85 {
86 int ret = 0;
87
88 nvkm_falcon_msgq_open(msgq);
89
90 if (nvkm_falcon_msgq_empty(msgq))
91 goto close;
92
93 ret = nvkm_falcon_msgq_pop(msgq, hdr, HDR_SIZE);
94 if (ret) {
95 FLCNQ_ERR(msgq, "failed to read message header");
96 goto close;
97 }
98
99 if (hdr->size > MSG_BUF_SIZE) {
100 FLCNQ_ERR(msgq, "message too big, %d bytes", hdr->size);
101 ret = -ENOSPC;
102 goto close;
103 }
104
105 if (hdr->size > HDR_SIZE) {
106 u32 read_size = hdr->size - HDR_SIZE;
107
108 ret = nvkm_falcon_msgq_pop(msgq, (hdr + 1), read_size);
109 if (ret) {
110 FLCNQ_ERR(msgq, "failed to read message data");
111 goto close;
112 }
113 }
114
115 ret = 1;
116 close:
117 nvkm_falcon_msgq_close(msgq, (ret >= 0));
118 return ret;
119 }
120
121 static int
nvkm_falcon_msgq_exec(struct nvkm_falcon_msgq * msgq,struct nv_falcon_msg * hdr)122 nvkm_falcon_msgq_exec(struct nvkm_falcon_msgq *msgq, struct nv_falcon_msg *hdr)
123 {
124 struct nvkm_falcon_qmgr_seq *seq;
125
126 seq = &msgq->qmgr->seq.id[hdr->seq_id];
127 if (seq->state != SEQ_STATE_USED && seq->state != SEQ_STATE_CANCELLED) {
128 FLCNQ_ERR(msgq, "message for unknown sequence %08x", seq->id);
129 return -EINVAL;
130 }
131
132 if (seq->state == SEQ_STATE_USED) {
133 if (seq->callback)
134 seq->result = seq->callback(seq->priv, hdr);
135 }
136
137 if (seq->async) {
138 nvkm_falcon_qmgr_seq_release(msgq->qmgr, seq);
139 return 0;
140 }
141
142 complete_all(&seq->done);
143 return 0;
144 }
145
146 void
nvkm_falcon_msgq_recv(struct nvkm_falcon_msgq * msgq)147 nvkm_falcon_msgq_recv(struct nvkm_falcon_msgq *msgq)
148 {
149 /*
150 * We are invoked from a worker thread, so normally we have plenty of
151 * stack space to work with.
152 */
153 u8 msg_buffer[MSG_BUF_SIZE];
154 struct nv_falcon_msg *hdr = (void *)msg_buffer;
155
156 while (nvkm_falcon_msgq_read(msgq, hdr) > 0)
157 nvkm_falcon_msgq_exec(msgq, hdr);
158 }
159
160 int
nvkm_falcon_msgq_recv_initmsg(struct nvkm_falcon_msgq * msgq,void * data,u32 size)161 nvkm_falcon_msgq_recv_initmsg(struct nvkm_falcon_msgq *msgq,
162 void *data, u32 size)
163 {
164 struct nvkm_falcon *falcon = msgq->qmgr->falcon;
165 struct nv_falcon_msg *hdr = data;
166 int ret;
167
168 msgq->head_reg = falcon->func->msgq.head;
169 msgq->tail_reg = falcon->func->msgq.tail;
170 msgq->offset = nvkm_falcon_rd32(falcon, falcon->func->msgq.tail);
171
172 nvkm_falcon_msgq_open(msgq);
173 ret = nvkm_falcon_msgq_pop(msgq, data, size);
174 if (ret == 0 && hdr->size != size) {
175 FLCN_ERR(falcon, "unexpected init message size %d vs %d",
176 hdr->size, size);
177 ret = -EINVAL;
178 }
179 nvkm_falcon_msgq_close(msgq, ret == 0);
180 return ret;
181 }
182
183 void
nvkm_falcon_msgq_init(struct nvkm_falcon_msgq * msgq,u32 index,u32 offset,u32 size)184 nvkm_falcon_msgq_init(struct nvkm_falcon_msgq *msgq,
185 u32 index, u32 offset, u32 size)
186 {
187 const struct nvkm_falcon_func *func = msgq->qmgr->falcon->func;
188
189 msgq->head_reg = func->msgq.head + index * func->msgq.stride;
190 msgq->tail_reg = func->msgq.tail + index * func->msgq.stride;
191 msgq->offset = offset;
192
193 FLCNQ_DBG(msgq, "initialised @ index %d offset 0x%08x size 0x%08x",
194 index, msgq->offset, size);
195 }
196
197 void
nvkm_falcon_msgq_del(struct nvkm_falcon_msgq ** pmsgq)198 nvkm_falcon_msgq_del(struct nvkm_falcon_msgq **pmsgq)
199 {
200 struct nvkm_falcon_msgq *msgq = *pmsgq;
201 if (msgq) {
202 mutex_destroy(&msgq->mutex);
203 kfree(*pmsgq);
204 *pmsgq = NULL;
205 }
206 }
207
208 int
nvkm_falcon_msgq_new(struct nvkm_falcon_qmgr * qmgr,const char * name,struct nvkm_falcon_msgq ** pmsgq)209 nvkm_falcon_msgq_new(struct nvkm_falcon_qmgr *qmgr, const char *name,
210 struct nvkm_falcon_msgq **pmsgq)
211 {
212 struct nvkm_falcon_msgq *msgq = *pmsgq;
213
214 if (!(msgq = *pmsgq = kzalloc(sizeof(*msgq), GFP_KERNEL)))
215 return -ENOMEM;
216
217 msgq->qmgr = qmgr;
218 msgq->name = name;
219 mutex_init(&msgq->mutex);
220 return 0;
221 }
222