1 /* $NetBSD: via_mm.c,v 1.5 2021/12/18 23:45:44 riastradh Exp $ */
2
3 /*
4 * Copyright 2006 Tungsten Graphics Inc., Bismarck, ND., USA.
5 * All rights reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sub license,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
23 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
24 * DEALINGS IN THE SOFTWARE.
25 */
26 /*
27 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
28 */
29
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: via_mm.c,v 1.5 2021/12/18 23:45:44 riastradh Exp $");
32
33 #include <linux/slab.h>
34
35 #include <drm/drm_device.h>
36 #include <drm/drm_file.h>
37 #include <drm/drm_irq.h>
38 #include <drm/via_drm.h>
39
40 #include "via_drv.h"
41
42 #define VIA_MM_ALIGN_SHIFT 4
43 #define VIA_MM_ALIGN_MASK ((1 << VIA_MM_ALIGN_SHIFT) - 1)
44
45 struct via_memblock {
46 struct drm_mm_node mm_node;
47 struct list_head owner_list;
48 };
49
via_agp_init(struct drm_device * dev,void * data,struct drm_file * file_priv)50 int via_agp_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
51 {
52 drm_via_agp_t *agp = data;
53 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
54
55 mutex_lock(&dev->struct_mutex);
56 drm_mm_init(&dev_priv->agp_mm, 0, agp->size >> VIA_MM_ALIGN_SHIFT);
57
58 dev_priv->agp_initialized = 1;
59 dev_priv->agp_offset = agp->offset;
60 mutex_unlock(&dev->struct_mutex);
61
62 DRM_DEBUG("offset = %u, size = %u\n", agp->offset, agp->size);
63 return 0;
64 }
65
via_fb_init(struct drm_device * dev,void * data,struct drm_file * file_priv)66 int via_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
67 {
68 drm_via_fb_t *fb = data;
69 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
70
71 mutex_lock(&dev->struct_mutex);
72 drm_mm_init(&dev_priv->vram_mm, 0, fb->size >> VIA_MM_ALIGN_SHIFT);
73
74 dev_priv->vram_initialized = 1;
75 dev_priv->vram_offset = fb->offset;
76
77 mutex_unlock(&dev->struct_mutex);
78 DRM_DEBUG("offset = %u, size = %u\n", fb->offset, fb->size);
79
80 return 0;
81
82 }
83
via_final_context(struct drm_device * dev,int context)84 int via_final_context(struct drm_device *dev, int context)
85 {
86 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
87
88 via_release_futex(dev_priv, context);
89
90 /* Linux specific until context tracking code gets ported to BSD */
91 /* Last context, perform cleanup */
92 if (list_is_singular(&dev->ctxlist)) {
93 DRM_DEBUG("Last Context\n");
94 drm_irq_uninstall(dev);
95 via_cleanup_futex(dev_priv);
96 via_do_cleanup_map(dev);
97 }
98 return 1;
99 }
100
via_lastclose(struct drm_device * dev)101 void via_lastclose(struct drm_device *dev)
102 {
103 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
104
105 if (!dev_priv)
106 return;
107
108 mutex_lock(&dev->struct_mutex);
109 if (dev_priv->vram_initialized) {
110 drm_mm_takedown(&dev_priv->vram_mm);
111 dev_priv->vram_initialized = 0;
112 }
113 if (dev_priv->agp_initialized) {
114 drm_mm_takedown(&dev_priv->agp_mm);
115 dev_priv->agp_initialized = 0;
116 }
117 mutex_unlock(&dev->struct_mutex);
118 }
119
via_mem_alloc(struct drm_device * dev,void * data,struct drm_file * file)120 int via_mem_alloc(struct drm_device *dev, void *data,
121 struct drm_file *file)
122 {
123 drm_via_mem_t *mem = data;
124 int retval = 0, user_key;
125 struct via_memblock *item;
126 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
127 struct via_file_private *file_priv = file->driver_priv;
128 unsigned long tmpSize;
129
130 if (mem->type > VIA_MEM_AGP) {
131 DRM_ERROR("Unknown memory type allocation\n");
132 return -EINVAL;
133 }
134 idr_preload(GFP_KERNEL);
135 mutex_lock(&dev->struct_mutex);
136 if (0 == ((mem->type == VIA_MEM_VIDEO) ? dev_priv->vram_initialized :
137 dev_priv->agp_initialized)) {
138 DRM_ERROR
139 ("Attempt to allocate from uninitialized memory manager.\n");
140 mutex_unlock(&dev->struct_mutex);
141 idr_preload_end();
142 return -EINVAL;
143 }
144
145 item = kzalloc(sizeof(*item), GFP_KERNEL);
146 if (!item) {
147 retval = -ENOMEM;
148 goto fail_alloc;
149 }
150
151 tmpSize = (mem->size + VIA_MM_ALIGN_MASK) >> VIA_MM_ALIGN_SHIFT;
152 if (mem->type == VIA_MEM_AGP)
153 retval = drm_mm_insert_node(&dev_priv->agp_mm,
154 &item->mm_node,
155 tmpSize);
156 else
157 retval = drm_mm_insert_node(&dev_priv->vram_mm,
158 &item->mm_node,
159 tmpSize);
160 if (retval)
161 goto fail_alloc;
162
163 retval = idr_alloc(&dev_priv->object_idr, item, 1, 0, GFP_KERNEL);
164 if (retval < 0)
165 goto fail_idr;
166 user_key = retval;
167
168 list_add(&item->owner_list, &file_priv->obj_list);
169 mutex_unlock(&dev->struct_mutex);
170 idr_preload_end();
171
172 mem->offset = ((mem->type == VIA_MEM_VIDEO) ?
173 dev_priv->vram_offset : dev_priv->agp_offset) +
174 ((item->mm_node.start) << VIA_MM_ALIGN_SHIFT);
175 mem->index = user_key;
176
177 return 0;
178
179 fail_idr:
180 drm_mm_remove_node(&item->mm_node);
181 fail_alloc:
182 kfree(item);
183 mutex_unlock(&dev->struct_mutex);
184 idr_preload_end();
185
186 mem->offset = 0;
187 mem->size = 0;
188 mem->index = 0;
189 DRM_DEBUG("Video memory allocation failed\n");
190
191 return retval;
192 }
193
via_mem_free(struct drm_device * dev,void * data,struct drm_file * file_priv)194 int via_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
195 {
196 drm_via_private_t *dev_priv = dev->dev_private;
197 drm_via_mem_t *mem = data;
198 struct via_memblock *obj;
199
200 mutex_lock(&dev->struct_mutex);
201 obj = idr_find(&dev_priv->object_idr, mem->index);
202 if (obj == NULL) {
203 mutex_unlock(&dev->struct_mutex);
204 return -EINVAL;
205 }
206
207 idr_remove(&dev_priv->object_idr, mem->index);
208 list_del(&obj->owner_list);
209 drm_mm_remove_node(&obj->mm_node);
210 kfree(obj);
211 mutex_unlock(&dev->struct_mutex);
212
213 DRM_DEBUG("free = 0x%lx\n", mem->index);
214
215 return 0;
216 }
217
218
via_reclaim_buffers_locked(struct drm_device * dev,struct drm_file * file)219 void via_reclaim_buffers_locked(struct drm_device *dev,
220 struct drm_file *file)
221 {
222 struct via_file_private *file_priv = file->driver_priv;
223 struct via_memblock *entry, *next;
224
225 if (!(dev->master && file->master->lock.hw_lock))
226 return;
227
228 drm_legacy_idlelock_take(&file->master->lock);
229
230 mutex_lock(&dev->struct_mutex);
231 if (list_empty(&file_priv->obj_list)) {
232 mutex_unlock(&dev->struct_mutex);
233 drm_legacy_idlelock_release(&file->master->lock);
234
235 return;
236 }
237
238 via_driver_dma_quiescent(dev);
239
240 list_for_each_entry_safe(entry, next, &file_priv->obj_list,
241 owner_list) {
242 list_del(&entry->owner_list);
243 drm_mm_remove_node(&entry->mm_node);
244 kfree(entry);
245 }
246 mutex_unlock(&dev->struct_mutex);
247
248 drm_legacy_idlelock_release(&file->master->lock);
249
250 return;
251 }
252