drm_bufs.c revision 1.1.1.1.2.9 1 /**
2 * \file drm_bufs.c
3 * Generic buffer template
4 *
5 * \author Rickard E. (Rik) Faith <faith (at) valinux.com>
6 * \author Gareth Hughes <gareth (at) valinux.com>
7 */
8
9 /*
10 * Created: Thu Nov 23 03:10:50 2000 by gareth (at) valinux.com
11 *
12 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
22 *
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
25 * Software.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
34 */
35
36 #include <linux/vmalloc.h>
37 #include <linux/slab.h>
38 #include <linux/sched.h>
39 #include <linux/log2.h>
40 #include <linux/export.h>
41 #include <linux/mm.h>
42 #include <asm/mtrr.h>
43 #include <asm/shmparam.h>
44 #include <drm/drmP.h>
45
46 static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
47 struct drm_local_map *map)
48 {
49 struct drm_map_list *entry;
50 list_for_each_entry(entry, &dev->maplist, head) {
51 /*
52 * Because the kernel-userspace ABI is fixed at a 32-bit offset
53 * while PCI resources may live above that, we only compare the
54 * lower 32 bits of the map offset for maps of type
55 * _DRM_FRAMEBUFFER or _DRM_REGISTERS.
56 * It is assumed that if a driver have more than one resource
57 * of each type, the lower 32 bits are different.
58 */
59 if (!entry->map ||
60 map->type != entry->map->type ||
61 entry->master != dev->primary->master)
62 continue;
63 switch (map->type) {
64 case _DRM_SHM:
65 if (map->flags != _DRM_CONTAINS_LOCK)
66 break;
67 return entry;
68 case _DRM_REGISTERS:
69 case _DRM_FRAME_BUFFER:
70 if ((entry->map->offset & 0xffffffff) ==
71 (map->offset & 0xffffffff))
72 return entry;
73 default: /* Make gcc happy */
74 ;
75 }
76 if (entry->map->offset == map->offset)
77 return entry;
78 }
79
80 return NULL;
81 }
82
83 static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
84 unsigned long user_token, int hashed_handle, int shm)
85 {
86 int use_hashed_handle, shift;
87 unsigned long add;
88
89 use_hashed_handle = (user_token &~ 0xffffffffUL) || hashed_handle;
90 if (!use_hashed_handle) {
91 int ret;
92 hash->key = user_token >> PAGE_SHIFT;
93 ret = drm_ht_insert_item(&dev->map_hash, hash);
94 if (ret != -EINVAL)
95 return ret;
96 }
97
98 shift = 0;
99 add = DRM_MAP_HASH_OFFSET >> PAGE_SHIFT;
100 if (shm && (SHMLBA > PAGE_SIZE)) {
101 int bits = ilog2(SHMLBA >> PAGE_SHIFT) + 1;
102
103 /* For shared memory, we have to preserve the SHMLBA
104 * bits of the eventual vma->vm_pgoff value during
105 * mmap(). Otherwise we run into cache aliasing problems
106 * on some platforms. On these platforms, the pgoff of
107 * a mmap() request is used to pick a suitable virtual
108 * address for the mmap() region such that it will not
109 * cause cache aliasing problems.
110 *
111 * Therefore, make sure the SHMLBA relevant bits of the
112 * hash value we use are equal to those in the original
113 * kernel virtual address.
114 */
115 shift = bits;
116 add |= ((user_token >> PAGE_SHIFT) & ((1UL << bits) - 1UL));
117 }
118
119 return drm_ht_just_insert_please(&dev->map_hash, hash,
120 user_token, 32 - PAGE_SHIFT - 3,
121 shift, add);
122 }
123
124 /**
125 * Core function to create a range of memory available for mapping by a
126 * non-root process.
127 *
128 * Adjusts the memory offset to its absolute value according to the mapping
129 * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where
130 * applicable and if supported by the kernel.
131 */
132 static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
133 unsigned int size, enum drm_map_type type,
134 enum drm_map_flags flags,
135 struct drm_map_list ** maplist)
136 {
137 struct drm_local_map *map;
138 struct drm_map_list *list;
139 drm_dma_handle_t *dmah;
140 unsigned long user_token;
141 int ret;
142
143 map = kmalloc(sizeof(*map), GFP_KERNEL);
144 if (!map)
145 return -ENOMEM;
146
147 map->offset = offset;
148 map->size = size;
149 map->flags = flags;
150 map->type = type;
151
152 /* Only allow shared memory to be removable since we only keep enough
153 * book keeping information about shared memory to allow for removal
154 * when processes fork.
155 */
156 if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) {
157 kfree(map);
158 return -EINVAL;
159 }
160 DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
161 (unsigned long long)map->offset, map->size, map->type);
162
163 /* page-align _DRM_SHM maps. They are allocated here so there is no security
164 * hole created by that and it works around various broken drivers that use
165 * a non-aligned quantity to map the SAREA. --BenH
166 */
167 if (map->type == _DRM_SHM)
168 map->size = PAGE_ALIGN(map->size);
169
170 if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
171 kfree(map);
172 return -EINVAL;
173 }
174 map->mtrr = -1;
175 map->handle = NULL;
176
177 switch (map->type) {
178 case _DRM_REGISTERS:
179 case _DRM_FRAME_BUFFER:
180 #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__arm__)
181 if (map->offset + (map->size-1) < map->offset ||
182 map->offset < virt_to_phys(high_memory)) {
183 kfree(map);
184 return -EINVAL;
185 }
186 #endif
187 /* Some drivers preinitialize some maps, without the X Server
188 * needing to be aware of it. Therefore, we just return success
189 * when the server tries to create a duplicate map.
190 */
191 list = drm_find_matching_map(dev, map);
192 if (list != NULL) {
193 if (list->map->size != map->size) {
194 DRM_DEBUG("Matching maps of type %d with "
195 "mismatched sizes, (%ld vs %ld)\n",
196 map->type, map->size,
197 list->map->size);
198 list->map->size = map->size;
199 }
200
201 kfree(map);
202 *maplist = list;
203 return 0;
204 }
205
206 if (drm_core_has_MTRR(dev)) {
207 if (map->type == _DRM_FRAME_BUFFER ||
208 (map->flags & _DRM_WRITE_COMBINING)) {
209 map->mtrr = mtrr_add(map->offset, map->size,
210 MTRR_TYPE_WRCOMB, 1);
211 }
212 }
213 if (map->type == _DRM_REGISTERS) {
214 #ifdef __NetBSD__
215 map->handle = drm_ioremap(dev, map);
216 #else
217 map->handle = ioremap(map->offset, map->size);
218 #endif
219 if (!map->handle) {
220 kfree(map);
221 return -ENOMEM;
222 }
223 }
224
225 break;
226 case _DRM_SHM:
227 list = drm_find_matching_map(dev, map);
228 if (list != NULL) {
229 if(list->map->size != map->size) {
230 DRM_DEBUG("Matching maps of type %d with "
231 "mismatched sizes, (%ld vs %ld)\n",
232 map->type, map->size, list->map->size);
233 list->map->size = map->size;
234 }
235
236 kfree(map);
237 *maplist = list;
238 return 0;
239 }
240 map->handle = vmalloc_user(map->size);
241 DRM_DEBUG("%lu %d %p\n",
242 map->size, drm_order(map->size), map->handle);
243 if (!map->handle) {
244 kfree(map);
245 return -ENOMEM;
246 }
247 map->offset = (unsigned long)map->handle;
248 if (map->flags & _DRM_CONTAINS_LOCK) {
249 /* Prevent a 2nd X Server from creating a 2nd lock */
250 if (dev->primary->master->lock.hw_lock != NULL) {
251 vfree(map->handle);
252 kfree(map);
253 return -EBUSY;
254 }
255 dev->sigdata.lock = dev->primary->master->lock.hw_lock = map->handle; /* Pointer to lock */
256 }
257 break;
258 case _DRM_AGP: {
259 struct drm_agp_mem *entry;
260 int valid = 0;
261
262 if (!drm_core_has_AGP(dev)) {
263 kfree(map);
264 return -EINVAL;
265 }
266 #ifdef __alpha__
267 map->offset += dev->hose->mem_space->start;
268 #endif
269 /* In some cases (i810 driver), user space may have already
270 * added the AGP base itself, because dev->agp->base previously
271 * only got set during AGP enable. So, only add the base
272 * address if the map's offset isn't already within the
273 * aperture.
274 */
275 #ifdef __NetBSD__
276 if (map->offset < dev->agp->base ||
277 map->offset > dev->agp->base +
278 dev->agp->agp_info.ai_aperture_size - 1) {
279 map->offset += dev->agp->base;
280 }
281 #else
282 if (map->offset < dev->agp->base ||
283 map->offset > dev->agp->base +
284 dev->agp->agp_info.aper_size * 1024 * 1024 - 1) {
285 map->offset += dev->agp->base;
286 }
287 #endif
288 map->mtrr = dev->agp->agp_mtrr; /* for getmap */
289
290 /* This assumes the DRM is in total control of AGP space.
291 * It's not always the case as AGP can be in the control
292 * of user space (i.e. i810 driver). So this loop will get
293 * skipped and we double check that dev->agp->memory is
294 * actually set as well as being invalid before EPERM'ing
295 */
296 list_for_each_entry(entry, &dev->agp->memory, head) {
297 if ((map->offset >= entry->bound) &&
298 (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) {
299 valid = 1;
300 break;
301 }
302 }
303 if (!list_empty(&dev->agp->memory) && !valid) {
304 kfree(map);
305 return -EPERM;
306 }
307 DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n",
308 (unsigned long long)map->offset, map->size);
309
310 break;
311 }
312 case _DRM_GEM:
313 DRM_ERROR("tried to addmap GEM object\n");
314 break;
315 case _DRM_SCATTER_GATHER:
316 if (!dev->sg) {
317 kfree(map);
318 return -EINVAL;
319 }
320 map->offset += (unsigned long)dev->sg->virtual;
321 break;
322 case _DRM_CONSISTENT:
323 /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
324 * As we're limiting the address to 2^32-1 (or less),
325 * casting it down to 32 bits is no problem, but we
326 * need to point to a 64bit variable first. */
327 dmah = drm_pci_alloc(dev, map->size, map->size);
328 if (!dmah) {
329 kfree(map);
330 return -ENOMEM;
331 }
332 map->handle = dmah->vaddr;
333 map->offset = (unsigned long)dmah->busaddr;
334 kfree(dmah);
335 break;
336 default:
337 kfree(map);
338 return -EINVAL;
339 }
340
341 list = kzalloc(sizeof(*list), GFP_KERNEL);
342 if (!list) {
343 if (map->type == _DRM_REGISTERS)
344 #ifdef __NetBSD__
345 drm_iounmap(dev, map);
346 #else
347 iounmap(map->handle);
348 #endif
349 kfree(map);
350 return -EINVAL;
351 }
352 list->map = map;
353
354 mutex_lock(&dev->struct_mutex);
355 list_add(&list->head, &dev->maplist);
356
357 /* Assign a 32-bit handle */
358 /* We do it here so that dev->struct_mutex protects the increment */
359 user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle :
360 map->offset;
361 ret = drm_map_handle(dev, &list->hash, user_token, 0,
362 (map->type == _DRM_SHM));
363 if (ret) {
364 if (map->type == _DRM_REGISTERS)
365 #ifdef __NetBSD__
366 drm_iounmap(dev, map);
367 #else
368 iounmap(map->handle);
369 #endif
370 kfree(map);
371 kfree(list);
372 mutex_unlock(&dev->struct_mutex);
373 return ret;
374 }
375
376 list->user_token = list->hash.key << PAGE_SHIFT;
377 mutex_unlock(&dev->struct_mutex);
378
379 if (!(map->flags & _DRM_DRIVER))
380 list->master = dev->primary->master;
381 *maplist = list;
382 return 0;
383 }
384
385 int drm_addmap(struct drm_device * dev, resource_size_t offset,
386 unsigned int size, enum drm_map_type type,
387 enum drm_map_flags flags, struct drm_local_map ** map_ptr)
388 {
389 struct drm_map_list *list;
390 int rc;
391
392 rc = drm_addmap_core(dev, offset, size, type, flags, &list);
393 if (!rc)
394 *map_ptr = list->map;
395 return rc;
396 }
397
398 EXPORT_SYMBOL(drm_addmap);
399
400 /**
401 * Ioctl to specify a range of memory that is available for mapping by a
402 * non-root process.
403 *
404 * \param inode device inode.
405 * \param file_priv DRM file private.
406 * \param cmd command.
407 * \param arg pointer to a drm_map structure.
408 * \return zero on success or a negative value on error.
409 *
410 */
411 int drm_addmap_ioctl(struct drm_device *dev, void *data,
412 struct drm_file *file_priv)
413 {
414 struct drm_map *map = data;
415 struct drm_map_list *maplist;
416 int err;
417
418 #ifdef __NetBSD__
419 # if 0 /* XXX Old drm did this. */
420 if (!(dev->flags & (FREAD | FWRITE)))
421 return -EACCES;
422 # endif
423 if (!(DRM_SUSER() || map->type == _DRM_AGP || map->type == _DRM_SHM))
424 return -EACCES; /* XXX */
425 #else
426 if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM))
427 return -EPERM;
428 #endif
429
430 err = drm_addmap_core(dev, map->offset, map->size, map->type,
431 map->flags, &maplist);
432
433 if (err)
434 return err;
435
436 /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
437 map->handle = (void *)(unsigned long)maplist->user_token;
438 return 0;
439 }
440
441 /**
442 * Remove a map private from list and deallocate resources if the mapping
443 * isn't in use.
444 *
445 * Searches the map on drm_device::maplist, removes it from the list, see if
446 * its being used, and free any associate resource (such as MTRR's) if it's not
447 * being on use.
448 *
449 * \sa drm_addmap
450 */
451 int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
452 {
453 struct drm_map_list *r_list = NULL, *list_t;
454 drm_dma_handle_t dmah;
455 int found = 0;
456 struct drm_master *master;
457
458 /* Find the list entry for the map and remove it */
459 list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
460 if (r_list->map == map) {
461 master = r_list->master;
462 list_del(&r_list->head);
463 drm_ht_remove_key(&dev->map_hash,
464 r_list->user_token >> PAGE_SHIFT);
465 kfree(r_list);
466 found = 1;
467 break;
468 }
469 }
470
471 if (!found)
472 return -EINVAL;
473
474 switch (map->type) {
475 case _DRM_REGISTERS:
476 #ifdef __NetBSD__
477 drm_iounmap(dev, map);
478 #else
479 iounmap(map->handle);
480 #endif
481 /* FALLTHROUGH */
482 case _DRM_FRAME_BUFFER:
483 if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
484 int retcode;
485 retcode = mtrr_del(map->mtrr, map->offset, map->size);
486 DRM_DEBUG("mtrr_del=%d\n", retcode);
487 }
488 break;
489 case _DRM_SHM:
490 vfree(map->handle);
491 if (master) {
492 if (dev->sigdata.lock == master->lock.hw_lock)
493 dev->sigdata.lock = NULL;
494 master->lock.hw_lock = NULL; /* SHM removed */
495 master->lock.file_priv = NULL;
496 #ifdef __NetBSD__
497 DRM_WAKEUP_ALL(&master->lock.lock_queue,
498 &drm_global_mutex);
499 #else
500 wake_up_interruptible_all(&master->lock.lock_queue);
501 #endif
502 }
503 break;
504 case _DRM_AGP:
505 case _DRM_SCATTER_GATHER:
506 break;
507 case _DRM_CONSISTENT:
508 dmah.vaddr = map->handle;
509 dmah.busaddr = map->offset;
510 dmah.size = map->size;
511 __drm_pci_free(dev, &dmah);
512 break;
513 case _DRM_GEM:
514 DRM_ERROR("tried to rmmap GEM object\n");
515 break;
516 }
517 kfree(map);
518
519 return 0;
520 }
521 EXPORT_SYMBOL(drm_rmmap_locked);
522
523 int drm_rmmap(struct drm_device *dev, struct drm_local_map *map)
524 {
525 int ret;
526
527 mutex_lock(&dev->struct_mutex);
528 ret = drm_rmmap_locked(dev, map);
529 mutex_unlock(&dev->struct_mutex);
530
531 return ret;
532 }
533 EXPORT_SYMBOL(drm_rmmap);
534
535 /* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
536 * the last close of the device, and this is necessary for cleanup when things
537 * exit uncleanly. Therefore, having userland manually remove mappings seems
538 * like a pointless exercise since they're going away anyway.
539 *
540 * One use case might be after addmap is allowed for normal users for SHM and
541 * gets used by drivers that the server doesn't need to care about. This seems
542 * unlikely.
543 *
544 * \param inode device inode.
545 * \param file_priv DRM file private.
546 * \param cmd command.
547 * \param arg pointer to a struct drm_map structure.
548 * \return zero on success or a negative value on error.
549 */
550 int drm_rmmap_ioctl(struct drm_device *dev, void *data,
551 struct drm_file *file_priv)
552 {
553 struct drm_map *request = data;
554 struct drm_local_map *map = NULL;
555 struct drm_map_list *r_list;
556 int ret;
557
558 mutex_lock(&dev->struct_mutex);
559 list_for_each_entry(r_list, &dev->maplist, head) {
560 if (r_list->map &&
561 r_list->user_token == (unsigned long)request->handle &&
562 r_list->map->flags & _DRM_REMOVABLE) {
563 map = r_list->map;
564 break;
565 }
566 }
567
568 /* List has wrapped around to the head pointer, or its empty we didn't
569 * find anything.
570 */
571 if (list_empty(&dev->maplist) || !map) {
572 mutex_unlock(&dev->struct_mutex);
573 return -EINVAL;
574 }
575
576 /* Register and framebuffer maps are permanent */
577 if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
578 mutex_unlock(&dev->struct_mutex);
579 return 0;
580 }
581
582 ret = drm_rmmap_locked(dev, map);
583
584 mutex_unlock(&dev->struct_mutex);
585
586 return ret;
587 }
588
589 /**
590 * Cleanup after an error on one of the addbufs() functions.
591 *
592 * \param dev DRM device.
593 * \param entry buffer entry where the error occurred.
594 *
595 * Frees any pages and buffers associated with the given entry.
596 */
597 static void drm_cleanup_buf_error(struct drm_device * dev,
598 struct drm_buf_entry * entry)
599 {
600 int i;
601
602 if (entry->seg_count) {
603 for (i = 0; i < entry->seg_count; i++) {
604 if (entry->seglist[i]) {
605 drm_pci_free(dev, entry->seglist[i]);
606 }
607 }
608 kfree(entry->seglist);
609
610 entry->seg_count = 0;
611 }
612
613 if (entry->buf_count) {
614 for (i = 0; i < entry->buf_count; i++) {
615 kfree(entry->buflist[i].dev_private);
616 }
617 kfree(entry->buflist);
618
619 entry->buf_count = 0;
620 }
621 }
622
623 #if __OS_HAS_AGP
624 /**
625 * Add AGP buffers for DMA transfers.
626 *
627 * \param dev struct drm_device to which the buffers are to be added.
628 * \param request pointer to a struct drm_buf_desc describing the request.
629 * \return zero on success or a negative number on failure.
630 *
631 * After some sanity checks creates a drm_buf structure for each buffer and
632 * reallocates the buffer list of the same size order to accommodate the new
633 * buffers.
634 */
635 int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
636 {
637 struct drm_device_dma *dma = dev->dma;
638 struct drm_buf_entry *entry;
639 struct drm_agp_mem *agp_entry;
640 struct drm_buf *buf;
641 unsigned long offset;
642 unsigned long agp_offset;
643 int count;
644 int order;
645 int size;
646 int alignment;
647 int page_order;
648 int total;
649 int byte_count;
650 int i, valid;
651 struct drm_buf **temp_buflist;
652
653 if (!dma)
654 return -EINVAL;
655
656 count = request->count;
657 order = drm_order(request->size);
658 size = 1 << order;
659
660 alignment = (request->flags & _DRM_PAGE_ALIGN)
661 ? PAGE_ALIGN(size) : size;
662 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
663 total = PAGE_SIZE << page_order;
664
665 byte_count = 0;
666 agp_offset = dev->agp->base + request->agp_start;
667
668 DRM_DEBUG("count: %d\n", count);
669 DRM_DEBUG("order: %d\n", order);
670 DRM_DEBUG("size: %d\n", size);
671 DRM_DEBUG("agp_offset: %lx\n", agp_offset);
672 DRM_DEBUG("alignment: %d\n", alignment);
673 DRM_DEBUG("page_order: %d\n", page_order);
674 DRM_DEBUG("total: %d\n", total);
675
676 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
677 return -EINVAL;
678
679 /* Make sure buffers are located in AGP memory that we own */
680 valid = 0;
681 list_for_each_entry(agp_entry, &dev->agp->memory, head) {
682 if ((agp_offset >= agp_entry->bound) &&
683 (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
684 valid = 1;
685 break;
686 }
687 }
688 if (!list_empty(&dev->agp->memory) && !valid) {
689 DRM_DEBUG("zone invalid\n");
690 return -EINVAL;
691 }
692 spin_lock(&dev->count_lock);
693 if (dev->buf_use) {
694 spin_unlock(&dev->count_lock);
695 return -EBUSY;
696 }
697 atomic_inc(&dev->buf_alloc);
698 spin_unlock(&dev->count_lock);
699
700 mutex_lock(&dev->struct_mutex);
701 entry = &dma->bufs[order];
702 if (entry->buf_count) {
703 mutex_unlock(&dev->struct_mutex);
704 atomic_dec(&dev->buf_alloc);
705 return -ENOMEM; /* May only call once for each order */
706 }
707
708 if (count < 0 || count > 4096) {
709 mutex_unlock(&dev->struct_mutex);
710 atomic_dec(&dev->buf_alloc);
711 return -EINVAL;
712 }
713
714 entry->buflist = kzalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
715 if (!entry->buflist) {
716 mutex_unlock(&dev->struct_mutex);
717 atomic_dec(&dev->buf_alloc);
718 return -ENOMEM;
719 }
720
721 entry->buf_size = size;
722 entry->page_order = page_order;
723
724 offset = 0;
725
726 while (entry->buf_count < count) {
727 buf = &entry->buflist[entry->buf_count];
728 buf->idx = dma->buf_count + entry->buf_count;
729 buf->total = alignment;
730 buf->order = order;
731 buf->used = 0;
732
733 buf->offset = (dma->byte_count + offset);
734 buf->bus_address = agp_offset + offset;
735 buf->address = (void *)(agp_offset + offset);
736 buf->next = NULL;
737 buf->waiting = 0;
738 buf->pending = 0;
739 buf->file_priv = NULL;
740
741 buf->dev_priv_size = dev->driver->dev_priv_size;
742 buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
743 if (!buf->dev_private) {
744 /* Set count correctly so we free the proper amount. */
745 entry->buf_count = count;
746 drm_cleanup_buf_error(dev, entry);
747 mutex_unlock(&dev->struct_mutex);
748 atomic_dec(&dev->buf_alloc);
749 return -ENOMEM;
750 }
751
752 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
753
754 offset += alignment;
755 entry->buf_count++;
756 byte_count += PAGE_SIZE << page_order;
757 }
758
759 DRM_DEBUG("byte_count: %d\n", byte_count);
760
761 temp_buflist = krealloc(dma->buflist,
762 (dma->buf_count + entry->buf_count) *
763 sizeof(*dma->buflist), GFP_KERNEL);
764 if (!temp_buflist) {
765 /* Free the entry because it isn't valid */
766 drm_cleanup_buf_error(dev, entry);
767 mutex_unlock(&dev->struct_mutex);
768 atomic_dec(&dev->buf_alloc);
769 return -ENOMEM;
770 }
771 dma->buflist = temp_buflist;
772
773 for (i = 0; i < entry->buf_count; i++) {
774 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
775 }
776
777 dma->buf_count += entry->buf_count;
778 dma->seg_count += entry->seg_count;
779 dma->page_count += byte_count >> PAGE_SHIFT;
780 dma->byte_count += byte_count;
781
782 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
783 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
784
785 mutex_unlock(&dev->struct_mutex);
786
787 request->count = entry->buf_count;
788 request->size = size;
789
790 dma->flags = _DRM_DMA_USE_AGP;
791
792 atomic_dec(&dev->buf_alloc);
793 return 0;
794 }
795 EXPORT_SYMBOL(drm_addbufs_agp);
796 #endif /* __OS_HAS_AGP */
797
798 int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
799 {
800 struct drm_device_dma *dma = dev->dma;
801 int count;
802 int order;
803 int size;
804 int total;
805 int page_order;
806 struct drm_buf_entry *entry;
807 drm_dma_handle_t *dmah;
808 struct drm_buf *buf;
809 int alignment;
810 unsigned long offset;
811 int i;
812 int byte_count;
813 int page_count;
814 unsigned long *temp_pagelist;
815 struct drm_buf **temp_buflist;
816
817 if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
818 return -EINVAL;
819
820 if (!dma)
821 return -EINVAL;
822
823 #ifdef __NetBSD__
824 if (!DRM_SUSER())
825 return -EACCES; /* XXX */
826 #else
827 if (!capable(CAP_SYS_ADMIN))
828 return -EPERM;
829 #endif
830
831 count = request->count;
832 order = drm_order(request->size);
833 size = 1 << order;
834
835 DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
836 request->count, request->size, size, order);
837
838 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
839 return -EINVAL;
840
841 alignment = (request->flags & _DRM_PAGE_ALIGN)
842 ? PAGE_ALIGN(size) : size;
843 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
844 total = PAGE_SIZE << page_order;
845
846 spin_lock(&dev->count_lock);
847 if (dev->buf_use) {
848 spin_unlock(&dev->count_lock);
849 return -EBUSY;
850 }
851 atomic_inc(&dev->buf_alloc);
852 spin_unlock(&dev->count_lock);
853
854 mutex_lock(&dev->struct_mutex);
855 entry = &dma->bufs[order];
856 if (entry->buf_count) {
857 mutex_unlock(&dev->struct_mutex);
858 atomic_dec(&dev->buf_alloc);
859 return -ENOMEM; /* May only call once for each order */
860 }
861
862 if (count < 0 || count > 4096) {
863 mutex_unlock(&dev->struct_mutex);
864 atomic_dec(&dev->buf_alloc);
865 return -EINVAL;
866 }
867
868 entry->buflist = kzalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
869 if (!entry->buflist) {
870 mutex_unlock(&dev->struct_mutex);
871 atomic_dec(&dev->buf_alloc);
872 return -ENOMEM;
873 }
874
875 entry->seglist = kzalloc(count * sizeof(*entry->seglist), GFP_KERNEL);
876 if (!entry->seglist) {
877 kfree(entry->buflist);
878 mutex_unlock(&dev->struct_mutex);
879 atomic_dec(&dev->buf_alloc);
880 return -ENOMEM;
881 }
882
883 /* Keep the original pagelist until we know all the allocations
884 * have succeeded
885 */
886 temp_pagelist = kmalloc((dma->page_count + (count << page_order)) *
887 sizeof(*dma->pagelist), GFP_KERNEL);
888 if (!temp_pagelist) {
889 kfree(entry->buflist);
890 kfree(entry->seglist);
891 mutex_unlock(&dev->struct_mutex);
892 atomic_dec(&dev->buf_alloc);
893 return -ENOMEM;
894 }
895 memcpy(temp_pagelist,
896 dma->pagelist, dma->page_count * sizeof(*dma->pagelist));
897 DRM_DEBUG("pagelist: %d entries\n",
898 dma->page_count + (count << page_order));
899
900 entry->buf_size = size;
901 entry->page_order = page_order;
902 byte_count = 0;
903 page_count = 0;
904
905 while (entry->buf_count < count) {
906
907 dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000);
908
909 if (!dmah) {
910 /* Set count correctly so we free the proper amount. */
911 entry->buf_count = count;
912 entry->seg_count = count;
913 drm_cleanup_buf_error(dev, entry);
914 kfree(temp_pagelist);
915 mutex_unlock(&dev->struct_mutex);
916 atomic_dec(&dev->buf_alloc);
917 return -ENOMEM;
918 }
919 entry->seglist[entry->seg_count++] = dmah;
920 for (i = 0; i < (1 << page_order); i++) {
921 DRM_DEBUG("page %d @ 0x%08lx\n",
922 dma->page_count + page_count,
923 (unsigned long)dmah->vaddr + PAGE_SIZE * i);
924 temp_pagelist[dma->page_count + page_count++]
925 = (unsigned long)dmah->vaddr + PAGE_SIZE * i;
926 }
927 for (offset = 0;
928 offset + size <= total && entry->buf_count < count;
929 offset += alignment, ++entry->buf_count) {
930 buf = &entry->buflist[entry->buf_count];
931 buf->idx = dma->buf_count + entry->buf_count;
932 buf->total = alignment;
933 buf->order = order;
934 buf->used = 0;
935 buf->offset = (dma->byte_count + byte_count + offset);
936 #ifdef __NetBSD__
937 buf->address = (void *)((char *)dmah->vaddr + offset);
938 #else
939 buf->address = (void *)(dmah->vaddr + offset);
940 #endif
941 buf->bus_address = dmah->busaddr + offset;
942 buf->next = NULL;
943 buf->waiting = 0;
944 buf->pending = 0;
945 buf->file_priv = NULL;
946
947 buf->dev_priv_size = dev->driver->dev_priv_size;
948 buf->dev_private = kzalloc(buf->dev_priv_size,
949 GFP_KERNEL);
950 if (!buf->dev_private) {
951 /* Set count correctly so we free the proper amount. */
952 entry->buf_count = count;
953 entry->seg_count = count;
954 drm_cleanup_buf_error(dev, entry);
955 kfree(temp_pagelist);
956 mutex_unlock(&dev->struct_mutex);
957 atomic_dec(&dev->buf_alloc);
958 return -ENOMEM;
959 }
960
961 DRM_DEBUG("buffer %d @ %p\n",
962 entry->buf_count, buf->address);
963 }
964 byte_count += PAGE_SIZE << page_order;
965 }
966
967 temp_buflist = krealloc(dma->buflist,
968 (dma->buf_count + entry->buf_count) *
969 sizeof(*dma->buflist), GFP_KERNEL);
970 if (!temp_buflist) {
971 /* Free the entry because it isn't valid */
972 drm_cleanup_buf_error(dev, entry);
973 kfree(temp_pagelist);
974 mutex_unlock(&dev->struct_mutex);
975 atomic_dec(&dev->buf_alloc);
976 return -ENOMEM;
977 }
978 dma->buflist = temp_buflist;
979
980 for (i = 0; i < entry->buf_count; i++) {
981 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
982 }
983
984 /* No allocations failed, so now we can replace the original pagelist
985 * with the new one.
986 */
987 if (dma->page_count) {
988 kfree(dma->pagelist);
989 }
990 dma->pagelist = temp_pagelist;
991
992 dma->buf_count += entry->buf_count;
993 dma->seg_count += entry->seg_count;
994 dma->page_count += entry->seg_count << page_order;
995 dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
996
997 mutex_unlock(&dev->struct_mutex);
998
999 request->count = entry->buf_count;
1000 request->size = size;
1001
1002 if (request->flags & _DRM_PCI_BUFFER_RO)
1003 dma->flags = _DRM_DMA_USE_PCI_RO;
1004
1005 atomic_dec(&dev->buf_alloc);
1006 return 0;
1007
1008 }
1009 EXPORT_SYMBOL(drm_addbufs_pci);
1010
1011 static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request)
1012 {
1013 struct drm_device_dma *dma = dev->dma;
1014 struct drm_buf_entry *entry;
1015 struct drm_buf *buf;
1016 unsigned long offset;
1017 unsigned long agp_offset;
1018 int count;
1019 int order;
1020 int size;
1021 int alignment;
1022 int page_order;
1023 int total;
1024 int byte_count;
1025 int i;
1026 struct drm_buf **temp_buflist;
1027
1028 if (!drm_core_check_feature(dev, DRIVER_SG))
1029 return -EINVAL;
1030
1031 if (!dma)
1032 return -EINVAL;
1033
1034 #ifdef __NetBSD__
1035 if (!DRM_SUSER())
1036 return -EACCES; /* XXX */
1037 #else
1038 if (!capable(CAP_SYS_ADMIN))
1039 return -EPERM;
1040 #endif
1041
1042 count = request->count;
1043 order = drm_order(request->size);
1044 size = 1 << order;
1045
1046 alignment = (request->flags & _DRM_PAGE_ALIGN)
1047 ? PAGE_ALIGN(size) : size;
1048 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
1049 total = PAGE_SIZE << page_order;
1050
1051 byte_count = 0;
1052 agp_offset = request->agp_start;
1053
1054 DRM_DEBUG("count: %d\n", count);
1055 DRM_DEBUG("order: %d\n", order);
1056 DRM_DEBUG("size: %d\n", size);
1057 DRM_DEBUG("agp_offset: %lu\n", agp_offset);
1058 DRM_DEBUG("alignment: %d\n", alignment);
1059 DRM_DEBUG("page_order: %d\n", page_order);
1060 DRM_DEBUG("total: %d\n", total);
1061
1062 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1063 return -EINVAL;
1064
1065 spin_lock(&dev->count_lock);
1066 if (dev->buf_use) {
1067 spin_unlock(&dev->count_lock);
1068 return -EBUSY;
1069 }
1070 atomic_inc(&dev->buf_alloc);
1071 spin_unlock(&dev->count_lock);
1072
1073 mutex_lock(&dev->struct_mutex);
1074 entry = &dma->bufs[order];
1075 if (entry->buf_count) {
1076 mutex_unlock(&dev->struct_mutex);
1077 atomic_dec(&dev->buf_alloc);
1078 return -ENOMEM; /* May only call once for each order */
1079 }
1080
1081 if (count < 0 || count > 4096) {
1082 mutex_unlock(&dev->struct_mutex);
1083 atomic_dec(&dev->buf_alloc);
1084 return -EINVAL;
1085 }
1086
1087 entry->buflist = kzalloc(count * sizeof(*entry->buflist),
1088 GFP_KERNEL);
1089 if (!entry->buflist) {
1090 mutex_unlock(&dev->struct_mutex);
1091 atomic_dec(&dev->buf_alloc);
1092 return -ENOMEM;
1093 }
1094
1095 entry->buf_size = size;
1096 entry->page_order = page_order;
1097
1098 offset = 0;
1099
1100 while (entry->buf_count < count) {
1101 buf = &entry->buflist[entry->buf_count];
1102 buf->idx = dma->buf_count + entry->buf_count;
1103 buf->total = alignment;
1104 buf->order = order;
1105 buf->used = 0;
1106
1107 buf->offset = (dma->byte_count + offset);
1108 buf->bus_address = agp_offset + offset;
1109 buf->address = (void *)(agp_offset + offset
1110 + (unsigned long)dev->sg->virtual);
1111 buf->next = NULL;
1112 buf->waiting = 0;
1113 buf->pending = 0;
1114 buf->file_priv = NULL;
1115
1116 buf->dev_priv_size = dev->driver->dev_priv_size;
1117 buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
1118 if (!buf->dev_private) {
1119 /* Set count correctly so we free the proper amount. */
1120 entry->buf_count = count;
1121 drm_cleanup_buf_error(dev, entry);
1122 mutex_unlock(&dev->struct_mutex);
1123 atomic_dec(&dev->buf_alloc);
1124 return -ENOMEM;
1125 }
1126
1127 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1128
1129 offset += alignment;
1130 entry->buf_count++;
1131 byte_count += PAGE_SIZE << page_order;
1132 }
1133
1134 DRM_DEBUG("byte_count: %d\n", byte_count);
1135
1136 temp_buflist = krealloc(dma->buflist,
1137 (dma->buf_count + entry->buf_count) *
1138 sizeof(*dma->buflist), GFP_KERNEL);
1139 if (!temp_buflist) {
1140 /* Free the entry because it isn't valid */
1141 drm_cleanup_buf_error(dev, entry);
1142 mutex_unlock(&dev->struct_mutex);
1143 atomic_dec(&dev->buf_alloc);
1144 return -ENOMEM;
1145 }
1146 dma->buflist = temp_buflist;
1147
1148 for (i = 0; i < entry->buf_count; i++) {
1149 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1150 }
1151
1152 dma->buf_count += entry->buf_count;
1153 dma->seg_count += entry->seg_count;
1154 dma->page_count += byte_count >> PAGE_SHIFT;
1155 dma->byte_count += byte_count;
1156
1157 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1158 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1159
1160 mutex_unlock(&dev->struct_mutex);
1161
1162 request->count = entry->buf_count;
1163 request->size = size;
1164
1165 dma->flags = _DRM_DMA_USE_SG;
1166
1167 atomic_dec(&dev->buf_alloc);
1168 return 0;
1169 }
1170
1171 static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request)
1172 {
1173 struct drm_device_dma *dma = dev->dma;
1174 struct drm_buf_entry *entry;
1175 struct drm_buf *buf;
1176 unsigned long offset;
1177 unsigned long agp_offset;
1178 int count;
1179 int order;
1180 int size;
1181 int alignment;
1182 int page_order;
1183 int total;
1184 int byte_count;
1185 int i;
1186 struct drm_buf **temp_buflist;
1187
1188 if (!drm_core_check_feature(dev, DRIVER_FB_DMA))
1189 return -EINVAL;
1190
1191 if (!dma)
1192 return -EINVAL;
1193
1194 #ifdef __NetBSD__
1195 if (!DRM_SUSER())
1196 return -EACCES; /* XXX */
1197 #else
1198 if (!capable(CAP_SYS_ADMIN))
1199 return -EPERM;
1200 #endif
1201
1202 count = request->count;
1203 order = drm_order(request->size);
1204 size = 1 << order;
1205
1206 alignment = (request->flags & _DRM_PAGE_ALIGN)
1207 ? PAGE_ALIGN(size) : size;
1208 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
1209 total = PAGE_SIZE << page_order;
1210
1211 byte_count = 0;
1212 agp_offset = request->agp_start;
1213
1214 DRM_DEBUG("count: %d\n", count);
1215 DRM_DEBUG("order: %d\n", order);
1216 DRM_DEBUG("size: %d\n", size);
1217 DRM_DEBUG("agp_offset: %lu\n", agp_offset);
1218 DRM_DEBUG("alignment: %d\n", alignment);
1219 DRM_DEBUG("page_order: %d\n", page_order);
1220 DRM_DEBUG("total: %d\n", total);
1221
1222 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1223 return -EINVAL;
1224
1225 spin_lock(&dev->count_lock);
1226 if (dev->buf_use) {
1227 spin_unlock(&dev->count_lock);
1228 return -EBUSY;
1229 }
1230 atomic_inc(&dev->buf_alloc);
1231 spin_unlock(&dev->count_lock);
1232
1233 mutex_lock(&dev->struct_mutex);
1234 entry = &dma->bufs[order];
1235 if (entry->buf_count) {
1236 mutex_unlock(&dev->struct_mutex);
1237 atomic_dec(&dev->buf_alloc);
1238 return -ENOMEM; /* May only call once for each order */
1239 }
1240
1241 if (count < 0 || count > 4096) {
1242 mutex_unlock(&dev->struct_mutex);
1243 atomic_dec(&dev->buf_alloc);
1244 return -EINVAL;
1245 }
1246
1247 entry->buflist = kzalloc(count * sizeof(*entry->buflist),
1248 GFP_KERNEL);
1249 if (!entry->buflist) {
1250 mutex_unlock(&dev->struct_mutex);
1251 atomic_dec(&dev->buf_alloc);
1252 return -ENOMEM;
1253 }
1254
1255 entry->buf_size = size;
1256 entry->page_order = page_order;
1257
1258 offset = 0;
1259
1260 while (entry->buf_count < count) {
1261 buf = &entry->buflist[entry->buf_count];
1262 buf->idx = dma->buf_count + entry->buf_count;
1263 buf->total = alignment;
1264 buf->order = order;
1265 buf->used = 0;
1266
1267 buf->offset = (dma->byte_count + offset);
1268 buf->bus_address = agp_offset + offset;
1269 buf->address = (void *)(agp_offset + offset);
1270 buf->next = NULL;
1271 buf->waiting = 0;
1272 buf->pending = 0;
1273 buf->file_priv = NULL;
1274
1275 buf->dev_priv_size = dev->driver->dev_priv_size;
1276 buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
1277 if (!buf->dev_private) {
1278 /* Set count correctly so we free the proper amount. */
1279 entry->buf_count = count;
1280 drm_cleanup_buf_error(dev, entry);
1281 mutex_unlock(&dev->struct_mutex);
1282 atomic_dec(&dev->buf_alloc);
1283 return -ENOMEM;
1284 }
1285
1286 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1287
1288 offset += alignment;
1289 entry->buf_count++;
1290 byte_count += PAGE_SIZE << page_order;
1291 }
1292
1293 DRM_DEBUG("byte_count: %d\n", byte_count);
1294
1295 temp_buflist = krealloc(dma->buflist,
1296 (dma->buf_count + entry->buf_count) *
1297 sizeof(*dma->buflist), GFP_KERNEL);
1298 if (!temp_buflist) {
1299 /* Free the entry because it isn't valid */
1300 drm_cleanup_buf_error(dev, entry);
1301 mutex_unlock(&dev->struct_mutex);
1302 atomic_dec(&dev->buf_alloc);
1303 return -ENOMEM;
1304 }
1305 dma->buflist = temp_buflist;
1306
1307 for (i = 0; i < entry->buf_count; i++) {
1308 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1309 }
1310
1311 dma->buf_count += entry->buf_count;
1312 dma->seg_count += entry->seg_count;
1313 dma->page_count += byte_count >> PAGE_SHIFT;
1314 dma->byte_count += byte_count;
1315
1316 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1317 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1318
1319 mutex_unlock(&dev->struct_mutex);
1320
1321 request->count = entry->buf_count;
1322 request->size = size;
1323
1324 dma->flags = _DRM_DMA_USE_FB;
1325
1326 atomic_dec(&dev->buf_alloc);
1327 return 0;
1328 }
1329
1330
1331 /**
1332 * Add buffers for DMA transfers (ioctl).
1333 *
1334 * \param inode device inode.
1335 * \param file_priv DRM file private.
1336 * \param cmd command.
1337 * \param arg pointer to a struct drm_buf_desc request.
1338 * \return zero on success or a negative number on failure.
1339 *
1340 * According with the memory type specified in drm_buf_desc::flags and the
1341 * build options, it dispatches the call either to addbufs_agp(),
1342 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
1343 * PCI memory respectively.
1344 */
1345 int drm_addbufs(struct drm_device *dev, void *data,
1346 struct drm_file *file_priv)
1347 {
1348 struct drm_buf_desc *request = data;
1349 int ret;
1350
1351 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1352 return -EINVAL;
1353
1354 #if __OS_HAS_AGP
1355 if (request->flags & _DRM_AGP_BUFFER)
1356 ret = drm_addbufs_agp(dev, request);
1357 else
1358 #endif
1359 if (request->flags & _DRM_SG_BUFFER)
1360 ret = drm_addbufs_sg(dev, request);
1361 else if (request->flags & _DRM_FB_BUFFER)
1362 ret = drm_addbufs_fb(dev, request);
1363 else
1364 ret = drm_addbufs_pci(dev, request);
1365
1366 return ret;
1367 }
1368
1369 /**
1370 * Get information about the buffer mappings.
1371 *
1372 * This was originally mean for debugging purposes, or by a sophisticated
1373 * client library to determine how best to use the available buffers (e.g.,
1374 * large buffers can be used for image transfer).
1375 *
1376 * \param inode device inode.
1377 * \param file_priv DRM file private.
1378 * \param cmd command.
1379 * \param arg pointer to a drm_buf_info structure.
1380 * \return zero on success or a negative number on failure.
1381 *
1382 * Increments drm_device::buf_use while holding the drm_device::count_lock
1383 * lock, preventing of allocating more buffers after this call. Information
1384 * about each requested buffer is then copied into user space.
1385 */
1386 int drm_infobufs(struct drm_device *dev, void *data,
1387 struct drm_file *file_priv)
1388 {
1389 struct drm_device_dma *dma = dev->dma;
1390 struct drm_buf_info *request = data;
1391 int i;
1392 int count;
1393
1394 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1395 return -EINVAL;
1396
1397 if (!dma)
1398 return -EINVAL;
1399
1400 spin_lock(&dev->count_lock);
1401 if (atomic_read(&dev->buf_alloc)) {
1402 spin_unlock(&dev->count_lock);
1403 return -EBUSY;
1404 }
1405 ++dev->buf_use; /* Can't allocate more after this call */
1406 spin_unlock(&dev->count_lock);
1407
1408 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1409 if (dma->bufs[i].buf_count)
1410 ++count;
1411 }
1412
1413 DRM_DEBUG("count = %d\n", count);
1414
1415 if (request->count >= count) {
1416 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1417 if (dma->bufs[i].buf_count) {
1418 struct drm_buf_desc __user *to =
1419 &request->list[count];
1420 struct drm_buf_entry *from = &dma->bufs[i];
1421 struct drm_freelist *list = &dma->bufs[i].freelist;
1422 if (copy_to_user(&to->count,
1423 &from->buf_count,
1424 sizeof(from->buf_count)) ||
1425 copy_to_user(&to->size,
1426 &from->buf_size,
1427 sizeof(from->buf_size)) ||
1428 copy_to_user(&to->low_mark,
1429 &list->low_mark,
1430 sizeof(list->low_mark)) ||
1431 copy_to_user(&to->high_mark,
1432 &list->high_mark,
1433 sizeof(list->high_mark)))
1434 return -EFAULT;
1435
1436 DRM_DEBUG("%d %d %d %d %d\n",
1437 i,
1438 dma->bufs[i].buf_count,
1439 dma->bufs[i].buf_size,
1440 dma->bufs[i].freelist.low_mark,
1441 dma->bufs[i].freelist.high_mark);
1442 ++count;
1443 }
1444 }
1445 }
1446 request->count = count;
1447
1448 return 0;
1449 }
1450
1451 /**
1452 * Specifies a low and high water mark for buffer allocation
1453 *
1454 * \param inode device inode.
1455 * \param file_priv DRM file private.
1456 * \param cmd command.
1457 * \param arg a pointer to a drm_buf_desc structure.
1458 * \return zero on success or a negative number on failure.
1459 *
1460 * Verifies that the size order is bounded between the admissible orders and
1461 * updates the respective drm_device_dma::bufs entry low and high water mark.
1462 *
1463 * \note This ioctl is deprecated and mostly never used.
1464 */
1465 int drm_markbufs(struct drm_device *dev, void *data,
1466 struct drm_file *file_priv)
1467 {
1468 struct drm_device_dma *dma = dev->dma;
1469 struct drm_buf_desc *request = data;
1470 int order;
1471 struct drm_buf_entry *entry;
1472
1473 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1474 return -EINVAL;
1475
1476 if (!dma)
1477 return -EINVAL;
1478
1479 DRM_DEBUG("%d, %d, %d\n",
1480 request->size, request->low_mark, request->high_mark);
1481 order = drm_order(request->size);
1482 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1483 return -EINVAL;
1484 entry = &dma->bufs[order];
1485
1486 if (request->low_mark < 0 || request->low_mark > entry->buf_count)
1487 return -EINVAL;
1488 if (request->high_mark < 0 || request->high_mark > entry->buf_count)
1489 return -EINVAL;
1490
1491 entry->freelist.low_mark = request->low_mark;
1492 entry->freelist.high_mark = request->high_mark;
1493
1494 return 0;
1495 }
1496
1497 /**
1498 * Unreserve the buffers in list, previously reserved using drmDMA.
1499 *
1500 * \param inode device inode.
1501 * \param file_priv DRM file private.
1502 * \param cmd command.
1503 * \param arg pointer to a drm_buf_free structure.
1504 * \return zero on success or a negative number on failure.
1505 *
1506 * Calls free_buffer() for each used buffer.
1507 * This function is primarily used for debugging.
1508 */
1509 int drm_freebufs(struct drm_device *dev, void *data,
1510 struct drm_file *file_priv)
1511 {
1512 struct drm_device_dma *dma = dev->dma;
1513 struct drm_buf_free *request = data;
1514 int i;
1515 int idx;
1516 struct drm_buf *buf;
1517
1518 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1519 return -EINVAL;
1520
1521 if (!dma)
1522 return -EINVAL;
1523
1524 DRM_DEBUG("%d\n", request->count);
1525 for (i = 0; i < request->count; i++) {
1526 if (copy_from_user(&idx, &request->list[i], sizeof(idx)))
1527 return -EFAULT;
1528 if (idx < 0 || idx >= dma->buf_count) {
1529 DRM_ERROR("Index %d (of %d max)\n",
1530 idx, dma->buf_count - 1);
1531 return -EINVAL;
1532 }
1533 buf = dma->buflist[idx];
1534 if (buf->file_priv != file_priv) {
1535 DRM_ERROR("Process %d freeing buffer not owned\n",
1536 task_pid_nr(current));
1537 return -EINVAL;
1538 }
1539 drm_free_buffer(dev, buf);
1540 }
1541
1542 return 0;
1543 }
1544
1545 /**
1546 * Maps all of the DMA buffers into client-virtual space (ioctl).
1547 *
1548 * \param inode device inode.
1549 * \param file_priv DRM file private.
1550 * \param cmd command.
1551 * \param arg pointer to a drm_buf_map structure.
1552 * \return zero on success or a negative number on failure.
1553 *
1554 * Maps the AGP, SG or PCI buffer region with vm_mmap(), and copies information
1555 * about each buffer into user space. For PCI buffers, it calls vm_mmap() with
1556 * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
1557 * drm_mmap_dma().
1558 */
1559 int drm_mapbufs(struct drm_device *dev, void *data,
1560 struct drm_file *file_priv)
1561 {
1562 struct drm_device_dma *dma = dev->dma;
1563 int retcode = 0;
1564 const int zero = 0;
1565 unsigned long virtual;
1566 unsigned long address;
1567 struct drm_buf_map *request = data;
1568 int i;
1569
1570 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1571 return -EINVAL;
1572
1573 if (!dma)
1574 return -EINVAL;
1575
1576 spin_lock(&dev->count_lock);
1577 if (atomic_read(&dev->buf_alloc)) {
1578 spin_unlock(&dev->count_lock);
1579 return -EBUSY;
1580 }
1581 dev->buf_use++; /* Can't allocate more after this call */
1582 spin_unlock(&dev->count_lock);
1583
1584 if (request->count >= dma->buf_count) {
1585 if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
1586 || (drm_core_check_feature(dev, DRIVER_SG)
1587 && (dma->flags & _DRM_DMA_USE_SG))
1588 || (drm_core_check_feature(dev, DRIVER_FB_DMA)
1589 && (dma->flags & _DRM_DMA_USE_FB))) {
1590 struct drm_local_map *map = dev->agp_buffer_map;
1591 unsigned long token = dev->agp_buffer_token;
1592
1593 if (!map) {
1594 retcode = -EINVAL;
1595 goto done;
1596 }
1597 virtual = vm_mmap(file_priv->filp, 0, map->size,
1598 PROT_READ | PROT_WRITE,
1599 MAP_SHARED,
1600 token);
1601 } else {
1602 virtual = vm_mmap(file_priv->filp, 0, dma->byte_count,
1603 PROT_READ | PROT_WRITE,
1604 MAP_SHARED, 0);
1605 }
1606 if (virtual > -1024UL) {
1607 /* Real error */
1608 retcode = (signed long)virtual;
1609 goto done;
1610 }
1611 request->virtual = (void __user *)virtual;
1612
1613 for (i = 0; i < dma->buf_count; i++) {
1614 if (copy_to_user(&request->list[i].idx,
1615 &dma->buflist[i]->idx,
1616 sizeof(request->list[0].idx))) {
1617 retcode = -EFAULT;
1618 goto done;
1619 }
1620 if (copy_to_user(&request->list[i].total,
1621 &dma->buflist[i]->total,
1622 sizeof(request->list[0].total))) {
1623 retcode = -EFAULT;
1624 goto done;
1625 }
1626 if (copy_to_user(&request->list[i].used,
1627 &zero, sizeof(zero))) {
1628 retcode = -EFAULT;
1629 goto done;
1630 }
1631 address = virtual + dma->buflist[i]->offset; /* *** */
1632 if (copy_to_user(&request->list[i].address,
1633 &address, sizeof(address))) {
1634 retcode = -EFAULT;
1635 goto done;
1636 }
1637 }
1638 }
1639 done:
1640 request->count = dma->buf_count;
1641 DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
1642
1643 return retcode;
1644 }
1645
1646 /**
1647 * Compute size order. Returns the exponent of the smaller power of two which
1648 * is greater or equal to given number.
1649 *
1650 * \param size size.
1651 * \return order.
1652 *
1653 * \todo Can be made faster.
1654 */
1655 int drm_order(unsigned long size)
1656 {
1657 int order;
1658 unsigned long tmp;
1659
1660 for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ;
1661
1662 if (size & (size - 1))
1663 ++order;
1664
1665 return order;
1666 }
1667 EXPORT_SYMBOL(drm_order);
1668