1 /* $NetBSD: nouveau_nvkm_falcon_qmgr.c,v 1.4 2021/12/19 11:34:45 riastradh Exp $ */
2
3 /*
4 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 */
25 #include <sys/cdefs.h>
26 __KERNEL_RCSID(0, "$NetBSD: nouveau_nvkm_falcon_qmgr.c,v 1.4 2021/12/19 11:34:45 riastradh Exp $");
27
28 #include "qmgr.h"
29
30 #include <linux/nbsd-namespace.h>
31
32 struct nvkm_falcon_qmgr_seq *
nvkm_falcon_qmgr_seq_acquire(struct nvkm_falcon_qmgr * qmgr)33 nvkm_falcon_qmgr_seq_acquire(struct nvkm_falcon_qmgr *qmgr)
34 {
35 const struct nvkm_subdev *subdev = qmgr->falcon->owner;
36 struct nvkm_falcon_qmgr_seq *seq;
37 u32 index;
38
39 mutex_lock(&qmgr->seq.mutex);
40 index = find_first_zero_bit(qmgr->seq.tbl, NVKM_FALCON_QMGR_SEQ_NUM);
41 if (index >= NVKM_FALCON_QMGR_SEQ_NUM) {
42 nvkm_error(subdev, "no free sequence available\n");
43 mutex_unlock(&qmgr->seq.mutex);
44 return ERR_PTR(-EAGAIN);
45 }
46
47 set_bit(index, qmgr->seq.tbl);
48 mutex_unlock(&qmgr->seq.mutex);
49
50 seq = &qmgr->seq.id[index];
51 seq->state = SEQ_STATE_PENDING;
52 return seq;
53 }
54
55 void
nvkm_falcon_qmgr_seq_release(struct nvkm_falcon_qmgr * qmgr,struct nvkm_falcon_qmgr_seq * seq)56 nvkm_falcon_qmgr_seq_release(struct nvkm_falcon_qmgr *qmgr,
57 struct nvkm_falcon_qmgr_seq *seq)
58 {
59 /* no need to acquire seq.mutex since clear_bit is atomic */
60 seq->state = SEQ_STATE_FREE;
61 seq->callback = NULL;
62 reinit_completion(&seq->done);
63 clear_bit(seq->id, qmgr->seq.tbl);
64 }
65
66 void
nvkm_falcon_qmgr_del(struct nvkm_falcon_qmgr ** pqmgr)67 nvkm_falcon_qmgr_del(struct nvkm_falcon_qmgr **pqmgr)
68 {
69 struct nvkm_falcon_qmgr *qmgr = *pqmgr;
70 if (qmgr) {
71 mutex_destroy(&qmgr->seq.mutex);
72 kfree(*pqmgr);
73 *pqmgr = NULL;
74 }
75 }
76
77 int
nvkm_falcon_qmgr_new(struct nvkm_falcon * falcon,struct nvkm_falcon_qmgr ** pqmgr)78 nvkm_falcon_qmgr_new(struct nvkm_falcon *falcon,
79 struct nvkm_falcon_qmgr **pqmgr)
80 {
81 struct nvkm_falcon_qmgr *qmgr;
82 int i;
83
84 if (!(qmgr = *pqmgr = kzalloc(sizeof(*qmgr), GFP_KERNEL)))
85 return -ENOMEM;
86
87 qmgr->falcon = falcon;
88 mutex_init(&qmgr->seq.mutex);
89 for (i = 0; i < NVKM_FALCON_QMGR_SEQ_NUM; i++) {
90 qmgr->seq.id[i].id = i;
91 init_completion(&qmgr->seq.id[i].done);
92 }
93
94 return 0;
95 }
96