1 /* $NetBSD: nouveau_nvkm_core_notify.c,v 1.4 2021/12/18 23:45:34 riastradh Exp $ */
2
3 /*
4 * Copyright 2014 Red Hat Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Ben Skeggs <bskeggs@redhat.com>
25 */
26 #include <sys/cdefs.h>
27 __KERNEL_RCSID(0, "$NetBSD: nouveau_nvkm_core_notify.c,v 1.4 2021/12/18 23:45:34 riastradh Exp $");
28
29 #include <core/notify.h>
30 #include <core/event.h>
31
32 static inline void
nvkm_notify_put_locked(struct nvkm_notify * notify)33 nvkm_notify_put_locked(struct nvkm_notify *notify)
34 {
35 if (notify->block++ == 0)
36 nvkm_event_put(notify->event, notify->types, notify->index);
37 }
38
39 void
nvkm_notify_put(struct nvkm_notify * notify)40 nvkm_notify_put(struct nvkm_notify *notify)
41 {
42 struct nvkm_event *event = notify->event;
43 unsigned long flags;
44 if (likely(event) &&
45 test_and_clear_bit(NVKM_NOTIFY_USER, ¬ify->flags)) {
46 spin_lock_irqsave(&event->refs_lock, flags);
47 nvkm_notify_put_locked(notify);
48 spin_unlock_irqrestore(&event->refs_lock, flags);
49 if (test_bit(NVKM_NOTIFY_WORK, ¬ify->flags))
50 flush_work(¬ify->work);
51 }
52 }
53
54 static inline void
nvkm_notify_get_locked(struct nvkm_notify * notify)55 nvkm_notify_get_locked(struct nvkm_notify *notify)
56 {
57 if (--notify->block == 0)
58 nvkm_event_get(notify->event, notify->types, notify->index);
59 }
60
61 void
nvkm_notify_get(struct nvkm_notify * notify)62 nvkm_notify_get(struct nvkm_notify *notify)
63 {
64 struct nvkm_event *event = notify->event;
65 unsigned long flags;
66 if (likely(event) &&
67 !test_and_set_bit(NVKM_NOTIFY_USER, ¬ify->flags)) {
68 spin_lock_irqsave(&event->refs_lock, flags);
69 nvkm_notify_get_locked(notify);
70 spin_unlock_irqrestore(&event->refs_lock, flags);
71 }
72 }
73
74 static inline void
nvkm_notify_func(struct nvkm_notify * notify)75 nvkm_notify_func(struct nvkm_notify *notify)
76 {
77 struct nvkm_event *event = notify->event;
78 int ret = notify->func(notify);
79 unsigned long flags;
80 if ((ret == NVKM_NOTIFY_KEEP) ||
81 !test_and_clear_bit(NVKM_NOTIFY_USER, ¬ify->flags)) {
82 spin_lock_irqsave(&event->refs_lock, flags);
83 nvkm_notify_get_locked(notify);
84 spin_unlock_irqrestore(&event->refs_lock, flags);
85 }
86 }
87
88 static void
nvkm_notify_work(struct work_struct * work)89 nvkm_notify_work(struct work_struct *work)
90 {
91 struct nvkm_notify *notify = container_of(work, typeof(*notify), work);
92 nvkm_notify_func(notify);
93 }
94
95 void
nvkm_notify_send(struct nvkm_notify * notify,void * data,u32 size)96 nvkm_notify_send(struct nvkm_notify *notify, void *data, u32 size)
97 {
98 struct nvkm_event *event = notify->event;
99 unsigned long flags;
100
101 assert_spin_locked(&event->list_lock);
102 BUG_ON(size != notify->size);
103
104 spin_lock_irqsave(&event->refs_lock, flags);
105 if (notify->block) {
106 spin_unlock_irqrestore(&event->refs_lock, flags);
107 return;
108 }
109 nvkm_notify_put_locked(notify);
110 spin_unlock_irqrestore(&event->refs_lock, flags);
111
112 if (test_bit(NVKM_NOTIFY_WORK, ¬ify->flags)) {
113 memcpy(__UNCONST(notify->data), data, size);
114 schedule_work(¬ify->work);
115 } else {
116 notify->data = data;
117 nvkm_notify_func(notify);
118 notify->data = NULL;
119 }
120 }
121
122 void
nvkm_notify_fini(struct nvkm_notify * notify)123 nvkm_notify_fini(struct nvkm_notify *notify)
124 {
125 unsigned long flags;
126 if (notify->event) {
127 nvkm_notify_put(notify);
128 spin_lock_irqsave(¬ify->event->list_lock, flags);
129 list_del(¬ify->head);
130 spin_unlock_irqrestore(¬ify->event->list_lock, flags);
131 kfree(__UNCONST(notify->data));
132 notify->event = NULL;
133 }
134 }
135
136 int
nvkm_notify_init(struct nvkm_object * object,struct nvkm_event * event,int (* func)(struct nvkm_notify *),bool work,void * data,u32 size,u32 reply,struct nvkm_notify * notify)137 nvkm_notify_init(struct nvkm_object *object, struct nvkm_event *event,
138 int (*func)(struct nvkm_notify *), bool work,
139 void *data, u32 size, u32 reply,
140 struct nvkm_notify *notify)
141 {
142 unsigned long flags;
143 int ret = -ENODEV;
144 if ((notify->event = event), event->refs) {
145 ret = event->func->ctor(object, data, size, notify);
146 if (ret == 0 && (ret = -EINVAL, notify->size == reply)) {
147 notify->flags = 0;
148 notify->block = 1;
149 notify->func = func;
150 notify->data = NULL;
151 if (ret = 0, work) {
152 INIT_WORK(¬ify->work, nvkm_notify_work);
153 set_bit(NVKM_NOTIFY_WORK, ¬ify->flags);
154 notify->data = kmalloc(reply, GFP_KERNEL);
155 if (!notify->data)
156 ret = -ENOMEM;
157 }
158 }
159 if (ret == 0) {
160 spin_lock_irqsave(&event->list_lock, flags);
161 list_add_tail(¬ify->head, &event->list);
162 spin_unlock_irqrestore(&event->list_lock, flags);
163 }
164 }
165 if (ret)
166 notify->event = NULL;
167 return ret;
168 }
169