qxl_object.c revision 1.2 1 /* $NetBSD: qxl_object.c,v 1.2 2018/08/27 04:58:35 riastradh Exp $ */
2
3 /*
4 * Copyright 2013 Red Hat Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alon Levy
26 */
27
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: qxl_object.c,v 1.2 2018/08/27 04:58:35 riastradh Exp $");
30
31 #include "qxl_drv.h"
32 #include "qxl_object.h"
33
34 #include <linux/io-mapping.h>
35 static void qxl_ttm_bo_destroy(struct ttm_buffer_object *tbo)
36 {
37 struct qxl_bo *bo;
38 struct qxl_device *qdev;
39
40 bo = container_of(tbo, struct qxl_bo, tbo);
41 qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
42
43 qxl_surface_evict(qdev, bo, false);
44 mutex_lock(&qdev->gem.mutex);
45 list_del_init(&bo->list);
46 mutex_unlock(&qdev->gem.mutex);
47 drm_gem_object_release(&bo->gem_base);
48 kfree(bo);
49 }
50
51 bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo)
52 {
53 if (bo->destroy == &qxl_ttm_bo_destroy)
54 return true;
55 return false;
56 }
57
58 void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned)
59 {
60 u32 c = 0;
61 u32 pflag = pinned ? TTM_PL_FLAG_NO_EVICT : 0;
62 unsigned i;
63
64 qbo->placement.placement = qbo->placements;
65 qbo->placement.busy_placement = qbo->placements;
66 if (domain == QXL_GEM_DOMAIN_VRAM)
67 qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM | pflag;
68 if (domain == QXL_GEM_DOMAIN_SURFACE)
69 qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV0 | pflag;
70 if (domain == QXL_GEM_DOMAIN_CPU)
71 qbo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM | pflag;
72 if (!c)
73 qbo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
74 qbo->placement.num_placement = c;
75 qbo->placement.num_busy_placement = c;
76 for (i = 0; i < c; ++i) {
77 qbo->placements[i].fpfn = 0;
78 qbo->placements[i].lpfn = 0;
79 }
80 }
81
82
83 int qxl_bo_create(struct qxl_device *qdev,
84 unsigned long size, bool kernel, bool pinned, u32 domain,
85 struct qxl_surface *surf,
86 struct qxl_bo **bo_ptr)
87 {
88 struct qxl_bo *bo;
89 enum ttm_bo_type type;
90 int r;
91
92 if (kernel)
93 type = ttm_bo_type_kernel;
94 else
95 type = ttm_bo_type_device;
96 *bo_ptr = NULL;
97 bo = kzalloc(sizeof(struct qxl_bo), GFP_KERNEL);
98 if (bo == NULL)
99 return -ENOMEM;
100 size = roundup(size, PAGE_SIZE);
101 r = drm_gem_object_init(qdev->ddev, &bo->gem_base, size);
102 if (unlikely(r)) {
103 kfree(bo);
104 return r;
105 }
106 bo->type = domain;
107 bo->pin_count = pinned ? 1 : 0;
108 bo->surface_id = 0;
109 INIT_LIST_HEAD(&bo->list);
110
111 if (surf)
112 bo->surf = *surf;
113
114 qxl_ttm_placement_from_domain(bo, domain, pinned);
115
116 r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type,
117 &bo->placement, 0, !kernel, NULL, size,
118 NULL, NULL, &qxl_ttm_bo_destroy);
119 if (unlikely(r != 0)) {
120 if (r != -ERESTARTSYS)
121 dev_err(qdev->dev,
122 "object_init failed for (%lu, 0x%08X)\n",
123 size, domain);
124 return r;
125 }
126 *bo_ptr = bo;
127 return 0;
128 }
129
130 int qxl_bo_kmap(struct qxl_bo *bo, void **ptr)
131 {
132 bool is_iomem;
133 int r;
134
135 if (bo->kptr) {
136 if (ptr)
137 *ptr = bo->kptr;
138 return 0;
139 }
140 r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
141 if (r)
142 return r;
143 bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
144 if (ptr)
145 *ptr = bo->kptr;
146 return 0;
147 }
148
149 void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev,
150 struct qxl_bo *bo, int page_offset)
151 {
152 struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type];
153 void *rptr;
154 int ret;
155 struct io_mapping *map;
156
157 if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
158 map = qdev->vram_mapping;
159 else if (bo->tbo.mem.mem_type == TTM_PL_PRIV0)
160 map = qdev->surface_mapping;
161 else
162 goto fallback;
163
164 (void) ttm_mem_io_lock(man, false);
165 ret = ttm_mem_io_reserve(bo->tbo.bdev, &bo->tbo.mem);
166 ttm_mem_io_unlock(man);
167
168 return io_mapping_map_atomic_wc(map, bo->tbo.mem.bus.offset + page_offset);
169 fallback:
170 if (bo->kptr) {
171 rptr = bo->kptr + (page_offset * PAGE_SIZE);
172 return rptr;
173 }
174
175 ret = qxl_bo_kmap(bo, &rptr);
176 if (ret)
177 return NULL;
178
179 rptr += page_offset * PAGE_SIZE;
180 return rptr;
181 }
182
183 void qxl_bo_kunmap(struct qxl_bo *bo)
184 {
185 if (bo->kptr == NULL)
186 return;
187 bo->kptr = NULL;
188 ttm_bo_kunmap(&bo->kmap);
189 }
190
191 void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
192 struct qxl_bo *bo, void *pmap)
193 {
194 struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type];
195 struct io_mapping *map;
196
197 if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
198 map = qdev->vram_mapping;
199 else if (bo->tbo.mem.mem_type == TTM_PL_PRIV0)
200 map = qdev->surface_mapping;
201 else
202 goto fallback;
203
204 io_mapping_unmap_atomic(pmap);
205
206 (void) ttm_mem_io_lock(man, false);
207 ttm_mem_io_free(bo->tbo.bdev, &bo->tbo.mem);
208 ttm_mem_io_unlock(man);
209 return ;
210 fallback:
211 qxl_bo_kunmap(bo);
212 }
213
214 void qxl_bo_unref(struct qxl_bo **bo)
215 {
216 if ((*bo) == NULL)
217 return;
218
219 drm_gem_object_unreference_unlocked(&(*bo)->gem_base);
220 *bo = NULL;
221 }
222
223 struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo)
224 {
225 drm_gem_object_reference(&bo->gem_base);
226 return bo;
227 }
228
229 int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
230 {
231 struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
232 int r;
233
234 if (bo->pin_count) {
235 bo->pin_count++;
236 if (gpu_addr)
237 *gpu_addr = qxl_bo_gpu_offset(bo);
238 return 0;
239 }
240 qxl_ttm_placement_from_domain(bo, domain, true);
241 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
242 if (likely(r == 0)) {
243 bo->pin_count = 1;
244 if (gpu_addr != NULL)
245 *gpu_addr = qxl_bo_gpu_offset(bo);
246 }
247 if (unlikely(r != 0))
248 dev_err(qdev->dev, "%p pin failed\n", bo);
249 return r;
250 }
251
252 int qxl_bo_unpin(struct qxl_bo *bo)
253 {
254 struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
255 int r, i;
256
257 if (!bo->pin_count) {
258 dev_warn(qdev->dev, "%p unpin not necessary\n", bo);
259 return 0;
260 }
261 bo->pin_count--;
262 if (bo->pin_count)
263 return 0;
264 for (i = 0; i < bo->placement.num_placement; i++)
265 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
266 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
267 if (unlikely(r != 0))
268 dev_err(qdev->dev, "%p validate failed for unpin\n", bo);
269 return r;
270 }
271
272 void qxl_bo_force_delete(struct qxl_device *qdev)
273 {
274 struct qxl_bo *bo, *n;
275
276 if (list_empty(&qdev->gem.objects))
277 return;
278 dev_err(qdev->dev, "Userspace still has active objects !\n");
279 list_for_each_entry_safe(bo, n, &qdev->gem.objects, list) {
280 dev_err(qdev->dev, "%p %p %lu %lu force free\n",
281 &bo->gem_base, bo, (unsigned long)bo->gem_base.size,
282 *((unsigned long *)&bo->gem_base.refcount));
283 mutex_lock(&qdev->gem.mutex);
284 list_del_init(&bo->list);
285 mutex_unlock(&qdev->gem.mutex);
286 /* this should unref the ttm bo */
287 drm_gem_object_unreference_unlocked(&bo->gem_base);
288 }
289 }
290
291 int qxl_bo_init(struct qxl_device *qdev)
292 {
293 return qxl_ttm_init(qdev);
294 }
295
296 void qxl_bo_fini(struct qxl_device *qdev)
297 {
298 qxl_ttm_fini(qdev);
299 }
300
301 int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo)
302 {
303 int ret;
304 if (bo->type == QXL_GEM_DOMAIN_SURFACE && bo->surface_id == 0) {
305 /* allocate a surface id for this surface now */
306 ret = qxl_surface_id_alloc(qdev, bo);
307 if (ret)
308 return ret;
309
310 ret = qxl_hw_surface_alloc(qdev, bo, NULL);
311 if (ret)
312 return ret;
313 }
314 return 0;
315 }
316
317 int qxl_surf_evict(struct qxl_device *qdev)
318 {
319 return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_PRIV0);
320 }
321
322 int qxl_vram_evict(struct qxl_device *qdev)
323 {
324 return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_VRAM);
325 }
326