qxl_kms.c revision 1.1.1.1.32.1 1 /* $NetBSD: qxl_kms.c,v 1.1.1.1.32.1 2019/06/10 22:08:24 christos Exp $ */
2
3 /*
4 * Copyright 2013 Red Hat Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alon Levy
26 */
27
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: qxl_kms.c,v 1.1.1.1.32.1 2019/06/10 22:08:24 christos Exp $");
30
31 #include "qxl_drv.h"
32 #include "qxl_object.h"
33
34 #include <drm/drm_crtc_helper.h>
35 #include <linux/io-mapping.h>
36
37 int qxl_log_level;
38
39 static void qxl_dump_mode(struct qxl_device *qdev, void *p)
40 {
41 struct qxl_mode *m = p;
42 DRM_DEBUG_KMS("%d: %dx%d %d bits, stride %d, %dmm x %dmm, orientation %d\n",
43 m->id, m->x_res, m->y_res, m->bits, m->stride, m->x_mili,
44 m->y_mili, m->orientation);
45 }
46
47 static bool qxl_check_device(struct qxl_device *qdev)
48 {
49 struct qxl_rom *rom = qdev->rom;
50 int mode_offset;
51 int i;
52
53 if (rom->magic != 0x4f525851) {
54 DRM_ERROR("bad rom signature %x\n", rom->magic);
55 return false;
56 }
57
58 DRM_INFO("Device Version %d.%d\n", rom->id, rom->update_id);
59 DRM_INFO("Compression level %d log level %d\n", rom->compression_level,
60 rom->log_level);
61 DRM_INFO("Currently using mode #%d, list at 0x%x\n",
62 rom->mode, rom->modes_offset);
63 DRM_INFO("%d io pages at offset 0x%x\n",
64 rom->num_io_pages, rom->pages_offset);
65 DRM_INFO("%d byte draw area at offset 0x%x\n",
66 rom->surface0_area_size, rom->draw_area_offset);
67
68 qdev->vram_size = rom->surface0_area_size;
69 DRM_INFO("RAM header offset: 0x%x\n", rom->ram_header_offset);
70
71 mode_offset = rom->modes_offset / 4;
72 qdev->mode_info.num_modes = ((u32 *)rom)[mode_offset];
73 DRM_INFO("rom modes offset 0x%x for %d modes\n", rom->modes_offset,
74 qdev->mode_info.num_modes);
75 qdev->mode_info.modes = (void *)((uint32_t *)rom + mode_offset + 1);
76 for (i = 0; i < qdev->mode_info.num_modes; i++)
77 qxl_dump_mode(qdev, qdev->mode_info.modes + i);
78 return true;
79 }
80
81 static void setup_hw_slot(struct qxl_device *qdev, int slot_index,
82 struct qxl_memslot *slot)
83 {
84 qdev->ram_header->mem_slot.mem_start = slot->start_phys_addr;
85 qdev->ram_header->mem_slot.mem_end = slot->end_phys_addr;
86 qxl_io_memslot_add(qdev, slot_index);
87 }
88
89 static uint8_t setup_slot(struct qxl_device *qdev, uint8_t slot_index_offset,
90 unsigned long start_phys_addr, unsigned long end_phys_addr)
91 {
92 uint64_t high_bits;
93 struct qxl_memslot *slot;
94 uint8_t slot_index;
95
96 slot_index = qdev->rom->slots_start + slot_index_offset;
97 slot = &qdev->mem_slots[slot_index];
98 slot->start_phys_addr = start_phys_addr;
99 slot->end_phys_addr = end_phys_addr;
100
101 setup_hw_slot(qdev, slot_index, slot);
102
103 slot->generation = qdev->rom->slot_generation;
104 high_bits = slot_index << qdev->slot_gen_bits;
105 high_bits |= slot->generation;
106 high_bits <<= (64 - (qdev->slot_gen_bits + qdev->slot_id_bits));
107 slot->high_bits = high_bits;
108 return slot_index;
109 }
110
111 void qxl_reinit_memslots(struct qxl_device *qdev)
112 {
113 setup_hw_slot(qdev, qdev->main_mem_slot, &qdev->mem_slots[qdev->main_mem_slot]);
114 setup_hw_slot(qdev, qdev->surfaces_mem_slot, &qdev->mem_slots[qdev->surfaces_mem_slot]);
115 }
116
117 static void qxl_gc_work(struct work_struct *work)
118 {
119 struct qxl_device *qdev = container_of(work, struct qxl_device, gc_work);
120 qxl_garbage_collect(qdev);
121 }
122
123 static int qxl_device_init(struct qxl_device *qdev,
124 struct drm_device *ddev,
125 struct pci_dev *pdev,
126 unsigned long flags)
127 {
128 int r, sb;
129
130 qdev->dev = &pdev->dev;
131 qdev->ddev = ddev;
132 qdev->pdev = pdev;
133 qdev->flags = flags;
134
135 mutex_init(&qdev->gem.mutex);
136 mutex_init(&qdev->update_area_mutex);
137 mutex_init(&qdev->release_mutex);
138 mutex_init(&qdev->surf_evict_mutex);
139 INIT_LIST_HEAD(&qdev->gem.objects);
140
141 qdev->rom_base = pci_resource_start(pdev, 2);
142 qdev->rom_size = pci_resource_len(pdev, 2);
143 qdev->vram_base = pci_resource_start(pdev, 0);
144 qdev->io_base = pci_resource_start(pdev, 3);
145
146 qdev->vram_mapping = io_mapping_create_wc(qdev->vram_base, pci_resource_len(pdev, 0));
147
148 if (pci_resource_len(pdev, 4) > 0) {
149 /* 64bit surface bar present */
150 sb = 4;
151 qdev->surfaceram_base = pci_resource_start(pdev, sb);
152 qdev->surfaceram_size = pci_resource_len(pdev, sb);
153 qdev->surface_mapping =
154 io_mapping_create_wc(qdev->surfaceram_base,
155 qdev->surfaceram_size);
156 }
157 if (qdev->surface_mapping == NULL) {
158 /* 64bit surface bar not present (or mapping failed) */
159 sb = 1;
160 qdev->surfaceram_base = pci_resource_start(pdev, sb);
161 qdev->surfaceram_size = pci_resource_len(pdev, sb);
162 qdev->surface_mapping =
163 io_mapping_create_wc(qdev->surfaceram_base,
164 qdev->surfaceram_size);
165 }
166
167 DRM_DEBUG_KMS("qxl: vram %llx-%llx(%dM %dk), surface %llx-%llx(%dM %dk, %s)\n",
168 (unsigned long long)qdev->vram_base,
169 (unsigned long long)pci_resource_end(pdev, 0),
170 (int)pci_resource_len(pdev, 0) / 1024 / 1024,
171 (int)pci_resource_len(pdev, 0) / 1024,
172 (unsigned long long)qdev->surfaceram_base,
173 (unsigned long long)pci_resource_end(pdev, sb),
174 (int)qdev->surfaceram_size / 1024 / 1024,
175 (int)qdev->surfaceram_size / 1024,
176 (sb == 4) ? "64bit" : "32bit");
177
178 qdev->rom = ioremap(qdev->rom_base, qdev->rom_size);
179 if (!qdev->rom) {
180 pr_err("Unable to ioremap ROM\n");
181 return -ENOMEM;
182 }
183
184 qxl_check_device(qdev);
185
186 r = qxl_bo_init(qdev);
187 if (r) {
188 DRM_ERROR("bo init failed %d\n", r);
189 return r;
190 }
191
192 qdev->ram_header = ioremap(qdev->vram_base +
193 qdev->rom->ram_header_offset,
194 sizeof(*qdev->ram_header));
195
196 qdev->command_ring = qxl_ring_create(&(qdev->ram_header->cmd_ring_hdr),
197 sizeof(struct qxl_command),
198 QXL_COMMAND_RING_SIZE,
199 qdev->io_base + QXL_IO_NOTIFY_CMD,
200 false,
201 &qdev->display_event);
202
203 qdev->cursor_ring = qxl_ring_create(
204 &(qdev->ram_header->cursor_ring_hdr),
205 sizeof(struct qxl_command),
206 QXL_CURSOR_RING_SIZE,
207 qdev->io_base + QXL_IO_NOTIFY_CMD,
208 false,
209 &qdev->cursor_event);
210
211 qdev->release_ring = qxl_ring_create(
212 &(qdev->ram_header->release_ring_hdr),
213 sizeof(uint64_t),
214 QXL_RELEASE_RING_SIZE, 0, true,
215 NULL);
216
217 /* TODO - slot initialization should happen on reset. where is our
218 * reset handler? */
219 qdev->n_mem_slots = qdev->rom->slots_end;
220 qdev->slot_gen_bits = qdev->rom->slot_gen_bits;
221 qdev->slot_id_bits = qdev->rom->slot_id_bits;
222 qdev->va_slot_mask =
223 (~(uint64_t)0) >> (qdev->slot_id_bits + qdev->slot_gen_bits);
224
225 qdev->mem_slots =
226 kmalloc(qdev->n_mem_slots * sizeof(struct qxl_memslot),
227 GFP_KERNEL);
228
229 idr_init(&qdev->release_idr);
230 spin_lock_init(&qdev->release_idr_lock);
231 spin_lock_init(&qdev->release_lock);
232
233 idr_init(&qdev->surf_id_idr);
234 spin_lock_init(&qdev->surf_id_idr_lock);
235
236 mutex_init(&qdev->async_io_mutex);
237
238 /* reset the device into a known state - no memslots, no primary
239 * created, no surfaces. */
240 qxl_io_reset(qdev);
241
242 /* must initialize irq before first async io - slot creation */
243 r = qxl_irq_init(qdev);
244 if (r)
245 return r;
246
247 /*
248 * Note that virtual is surface0. We rely on the single ioremap done
249 * before.
250 */
251 qdev->main_mem_slot = setup_slot(qdev, 0,
252 (unsigned long)qdev->vram_base,
253 (unsigned long)qdev->vram_base + qdev->rom->ram_header_offset);
254 qdev->surfaces_mem_slot = setup_slot(qdev, 1,
255 (unsigned long)qdev->surfaceram_base,
256 (unsigned long)qdev->surfaceram_base + qdev->surfaceram_size);
257 DRM_INFO("main mem slot %d [%lx,%x]\n",
258 qdev->main_mem_slot,
259 (unsigned long)qdev->vram_base, qdev->rom->ram_header_offset);
260 DRM_INFO("surface mem slot %d [%lx,%lx]\n",
261 qdev->surfaces_mem_slot,
262 (unsigned long)qdev->surfaceram_base,
263 (unsigned long)qdev->surfaceram_size);
264
265
266 qdev->gc_queue = create_singlethread_workqueue("qxl_gc");
267 INIT_WORK(&qdev->gc_work, qxl_gc_work);
268
269 r = qxl_fb_init(qdev);
270 if (r)
271 return r;
272
273 return 0;
274 }
275
276 static void qxl_device_fini(struct qxl_device *qdev)
277 {
278 if (qdev->current_release_bo[0])
279 qxl_bo_unref(&qdev->current_release_bo[0]);
280 if (qdev->current_release_bo[1])
281 qxl_bo_unref(&qdev->current_release_bo[1]);
282 flush_workqueue(qdev->gc_queue);
283 destroy_workqueue(qdev->gc_queue);
284 qdev->gc_queue = NULL;
285
286 qxl_ring_free(qdev->command_ring);
287 qxl_ring_free(qdev->cursor_ring);
288 qxl_ring_free(qdev->release_ring);
289 qxl_bo_fini(qdev);
290 io_mapping_free(qdev->surface_mapping);
291 io_mapping_free(qdev->vram_mapping);
292 iounmap(qdev->ram_header);
293 iounmap(qdev->rom);
294 qdev->rom = NULL;
295 qdev->mode_info.modes = NULL;
296 qdev->mode_info.num_modes = 0;
297 qxl_debugfs_remove_files(qdev);
298 }
299
300 int qxl_driver_unload(struct drm_device *dev)
301 {
302 struct qxl_device *qdev = dev->dev_private;
303
304 if (qdev == NULL)
305 return 0;
306
307 drm_vblank_cleanup(dev);
308
309 qxl_modeset_fini(qdev);
310 qxl_device_fini(qdev);
311
312 kfree(qdev);
313 dev->dev_private = NULL;
314 return 0;
315 }
316
317 int qxl_driver_load(struct drm_device *dev, unsigned long flags)
318 {
319 struct qxl_device *qdev;
320 int r;
321
322 /* require kms */
323 if (!drm_core_check_feature(dev, DRIVER_MODESET))
324 return -ENODEV;
325
326 qdev = kzalloc(sizeof(struct qxl_device), GFP_KERNEL);
327 if (qdev == NULL)
328 return -ENOMEM;
329
330 dev->dev_private = qdev;
331
332 r = qxl_device_init(qdev, dev, dev->pdev, flags);
333 if (r)
334 goto out;
335
336 r = drm_vblank_init(dev, 1);
337 if (r)
338 goto unload;
339
340 r = qxl_modeset_init(qdev);
341 if (r)
342 goto unload;
343
344 drm_kms_helper_poll_init(qdev->ddev);
345
346 return 0;
347 unload:
348 qxl_driver_unload(dev);
349
350 out:
351 kfree(qdev);
352 return r;
353 }
354
355
356