1 /* $NetBSD: qxl_image.c,v 1.3 2021/12/18 23:45:42 riastradh Exp $ */
2
3 /*
4 * Copyright 2013 Red Hat Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alon Levy
26 */
27
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: qxl_image.c,v 1.3 2021/12/18 23:45:42 riastradh Exp $");
30
31 #include <linux/gfp.h>
32 #include <linux/slab.h>
33
34 #include "qxl_drv.h"
35 #include "qxl_object.h"
36
37 static int
qxl_allocate_chunk(struct qxl_device * qdev,struct qxl_release * release,struct qxl_drm_image * image,unsigned int chunk_size)38 qxl_allocate_chunk(struct qxl_device *qdev,
39 struct qxl_release *release,
40 struct qxl_drm_image *image,
41 unsigned int chunk_size)
42 {
43 struct qxl_drm_chunk *chunk;
44 int ret;
45
46 chunk = kmalloc(sizeof(struct qxl_drm_chunk), GFP_KERNEL);
47 if (!chunk)
48 return -ENOMEM;
49
50 ret = qxl_alloc_bo_reserved(qdev, release, chunk_size, &chunk->bo);
51 if (ret) {
52 kfree(chunk);
53 return ret;
54 }
55
56 list_add_tail(&chunk->head, &image->chunk_list);
57 return 0;
58 }
59
60 int
qxl_image_alloc_objects(struct qxl_device * qdev,struct qxl_release * release,struct qxl_drm_image ** image_ptr,int height,int stride)61 qxl_image_alloc_objects(struct qxl_device *qdev,
62 struct qxl_release *release,
63 struct qxl_drm_image **image_ptr,
64 int height, int stride)
65 {
66 struct qxl_drm_image *image;
67 int ret;
68
69 image = kmalloc(sizeof(struct qxl_drm_image), GFP_KERNEL);
70 if (!image)
71 return -ENOMEM;
72
73 INIT_LIST_HEAD(&image->chunk_list);
74
75 ret = qxl_alloc_bo_reserved(qdev, release, sizeof(struct qxl_image), &image->bo);
76 if (ret) {
77 kfree(image);
78 return ret;
79 }
80
81 ret = qxl_allocate_chunk(qdev, release, image, sizeof(struct qxl_data_chunk) + stride * height);
82 if (ret) {
83 qxl_bo_unref(&image->bo);
84 kfree(image);
85 return ret;
86 }
87 *image_ptr = image;
88 return 0;
89 }
90
qxl_image_free_objects(struct qxl_device * qdev,struct qxl_drm_image * dimage)91 void qxl_image_free_objects(struct qxl_device *qdev, struct qxl_drm_image *dimage)
92 {
93 struct qxl_drm_chunk *chunk, *tmp;
94
95 list_for_each_entry_safe(chunk, tmp, &dimage->chunk_list, head) {
96 qxl_bo_unref(&chunk->bo);
97 kfree(chunk);
98 }
99
100 qxl_bo_unref(&dimage->bo);
101 kfree(dimage);
102 }
103
104 static int
qxl_image_init_helper(struct qxl_device * qdev,struct qxl_release * release,struct qxl_drm_image * dimage,const uint8_t * data,int width,int height,int depth,unsigned int hash,int stride)105 qxl_image_init_helper(struct qxl_device *qdev,
106 struct qxl_release *release,
107 struct qxl_drm_image *dimage,
108 const uint8_t *data,
109 int width, int height,
110 int depth, unsigned int hash,
111 int stride)
112 {
113 struct qxl_drm_chunk *drv_chunk;
114 struct qxl_image *image;
115 struct qxl_data_chunk *chunk;
116 int i;
117 int chunk_stride;
118 int linesize = width * depth / 8;
119 struct qxl_bo *chunk_bo, *image_bo;
120 void *ptr;
121 /* Chunk */
122 /* FIXME: Check integer overflow */
123 /* TODO: variable number of chunks */
124
125 drv_chunk = list_first_entry(&dimage->chunk_list, struct qxl_drm_chunk, head);
126
127 chunk_bo = drv_chunk->bo;
128 chunk_stride = stride; /* TODO: should use linesize, but it renders
129 wrong (check the bitmaps are sent correctly
130 first) */
131
132 ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, 0);
133 chunk = ptr;
134 chunk->data_size = height * chunk_stride;
135 chunk->prev_chunk = 0;
136 chunk->next_chunk = 0;
137 qxl_bo_kunmap_atomic_page(qdev, chunk_bo, ptr);
138
139 {
140 void *k_data, *i_data;
141 int remain;
142 int page;
143 int size;
144
145 if (stride == linesize && chunk_stride == stride) {
146 remain = linesize * height;
147 page = 0;
148 i_data = (void *)data;
149
150 while (remain > 0) {
151 ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, page << PAGE_SHIFT);
152
153 if (page == 0) {
154 chunk = ptr;
155 k_data = chunk->data;
156 size = PAGE_SIZE - offsetof(struct qxl_data_chunk, data);
157 } else {
158 k_data = ptr;
159 size = PAGE_SIZE;
160 }
161 size = min(size, remain);
162
163 memcpy(k_data, i_data, size);
164
165 qxl_bo_kunmap_atomic_page(qdev, chunk_bo, ptr);
166 i_data += size;
167 remain -= size;
168 page++;
169 }
170 } else {
171 unsigned int page_base, page_offset, out_offset;
172
173 for (i = 0 ; i < height ; ++i) {
174 i_data = (void *)data + i * stride;
175 remain = linesize;
176 out_offset = offsetof(struct qxl_data_chunk, data) + i * chunk_stride;
177
178 while (remain > 0) {
179 page_base = out_offset & PAGE_MASK;
180 page_offset = offset_in_page(out_offset);
181 size = min((int)(PAGE_SIZE - page_offset), remain);
182
183 ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, page_base);
184 k_data = ptr + page_offset;
185 memcpy(k_data, i_data, size);
186 qxl_bo_kunmap_atomic_page(qdev, chunk_bo, ptr);
187 remain -= size;
188 i_data += size;
189 out_offset += size;
190 }
191 }
192 }
193 }
194 qxl_bo_kunmap(chunk_bo);
195
196 image_bo = dimage->bo;
197 ptr = qxl_bo_kmap_atomic_page(qdev, image_bo, 0);
198 image = ptr;
199
200 image->descriptor.id = 0;
201 image->descriptor.type = SPICE_IMAGE_TYPE_BITMAP;
202
203 image->descriptor.flags = 0;
204 image->descriptor.width = width;
205 image->descriptor.height = height;
206
207 switch (depth) {
208 case 1:
209 /* TODO: BE? check by arch? */
210 image->u.bitmap.format = SPICE_BITMAP_FMT_1BIT_BE;
211 break;
212 case 24:
213 image->u.bitmap.format = SPICE_BITMAP_FMT_24BIT;
214 break;
215 case 32:
216 image->u.bitmap.format = SPICE_BITMAP_FMT_32BIT;
217 break;
218 default:
219 DRM_ERROR("unsupported image bit depth\n");
220 return -EINVAL; /* TODO: cleanup */
221 }
222 image->u.bitmap.flags = QXL_BITMAP_TOP_DOWN;
223 image->u.bitmap.x = width;
224 image->u.bitmap.y = height;
225 image->u.bitmap.stride = chunk_stride;
226 image->u.bitmap.palette = 0;
227 image->u.bitmap.data = qxl_bo_physical_address(qdev, chunk_bo, 0);
228
229 qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr);
230
231 return 0;
232 }
233
qxl_image_init(struct qxl_device * qdev,struct qxl_release * release,struct qxl_drm_image * dimage,const uint8_t * data,int x,int y,int width,int height,int depth,int stride)234 int qxl_image_init(struct qxl_device *qdev,
235 struct qxl_release *release,
236 struct qxl_drm_image *dimage,
237 const uint8_t *data,
238 int x, int y, int width, int height,
239 int depth, int stride)
240 {
241 data += y * stride + x * (depth / 8);
242 return qxl_image_init_helper(qdev, release, dimage, data,
243 width, height, depth, 0, stride);
244 }
245