drm_bufs.c revision 1.1.1.1.2.7 1 /**
2 * \file drm_bufs.c
3 * Generic buffer template
4 *
5 * \author Rickard E. (Rik) Faith <faith (at) valinux.com>
6 * \author Gareth Hughes <gareth (at) valinux.com>
7 */
8
9 /*
10 * Created: Thu Nov 23 03:10:50 2000 by gareth (at) valinux.com
11 *
12 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
22 *
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
25 * Software.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
34 */
35
36 #include <linux/vmalloc.h>
37 #include <linux/slab.h>
38 #include <linux/sched.h>
39 #include <linux/log2.h>
40 #include <linux/export.h>
41 #include <linux/mm.h>
42 #include <asm/mtrr.h>
43 #include <asm/shmparam.h>
44 #include <drm/drmP.h>
45
46 static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
47 struct drm_local_map *map)
48 {
49 struct drm_map_list *entry;
50 list_for_each_entry(entry, &dev->maplist, head) {
51 /*
52 * Because the kernel-userspace ABI is fixed at a 32-bit offset
53 * while PCI resources may live above that, we only compare the
54 * lower 32 bits of the map offset for maps of type
55 * _DRM_FRAMEBUFFER or _DRM_REGISTERS.
56 * It is assumed that if a driver have more than one resource
57 * of each type, the lower 32 bits are different.
58 */
59 if (!entry->map ||
60 map->type != entry->map->type ||
61 entry->master != dev->primary->master)
62 continue;
63 switch (map->type) {
64 case _DRM_SHM:
65 if (map->flags != _DRM_CONTAINS_LOCK)
66 break;
67 return entry;
68 case _DRM_REGISTERS:
69 case _DRM_FRAME_BUFFER:
70 if ((entry->map->offset & 0xffffffff) ==
71 (map->offset & 0xffffffff))
72 return entry;
73 default: /* Make gcc happy */
74 ;
75 }
76 if (entry->map->offset == map->offset)
77 return entry;
78 }
79
80 return NULL;
81 }
82
83 static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
84 unsigned long user_token, int hashed_handle, int shm)
85 {
86 int use_hashed_handle, shift;
87 unsigned long add;
88
89 use_hashed_handle = (user_token &~ 0xffffffffUL) || hashed_handle;
90 if (!use_hashed_handle) {
91 int ret;
92 hash->key = user_token >> PAGE_SHIFT;
93 ret = drm_ht_insert_item(&dev->map_hash, hash);
94 if (ret != -EINVAL)
95 return ret;
96 }
97
98 shift = 0;
99 add = DRM_MAP_HASH_OFFSET >> PAGE_SHIFT;
100 if (shm && (SHMLBA > PAGE_SIZE)) {
101 int bits = ilog2(SHMLBA >> PAGE_SHIFT) + 1;
102
103 /* For shared memory, we have to preserve the SHMLBA
104 * bits of the eventual vma->vm_pgoff value during
105 * mmap(). Otherwise we run into cache aliasing problems
106 * on some platforms. On these platforms, the pgoff of
107 * a mmap() request is used to pick a suitable virtual
108 * address for the mmap() region such that it will not
109 * cause cache aliasing problems.
110 *
111 * Therefore, make sure the SHMLBA relevant bits of the
112 * hash value we use are equal to those in the original
113 * kernel virtual address.
114 */
115 shift = bits;
116 add |= ((user_token >> PAGE_SHIFT) & ((1UL << bits) - 1UL));
117 }
118
119 return drm_ht_just_insert_please(&dev->map_hash, hash,
120 user_token, 32 - PAGE_SHIFT - 3,
121 shift, add);
122 }
123
124 /**
125 * Core function to create a range of memory available for mapping by a
126 * non-root process.
127 *
128 * Adjusts the memory offset to its absolute value according to the mapping
129 * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where
130 * applicable and if supported by the kernel.
131 */
132 static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
133 unsigned int size, enum drm_map_type type,
134 enum drm_map_flags flags,
135 struct drm_map_list ** maplist)
136 {
137 struct drm_local_map *map;
138 struct drm_map_list *list;
139 drm_dma_handle_t *dmah;
140 unsigned long user_token;
141 int ret;
142
143 map = kmalloc(sizeof(*map), GFP_KERNEL);
144 if (!map)
145 return -ENOMEM;
146
147 map->offset = offset;
148 map->size = size;
149 map->flags = flags;
150 map->type = type;
151
152 /* Only allow shared memory to be removable since we only keep enough
153 * book keeping information about shared memory to allow for removal
154 * when processes fork.
155 */
156 if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) {
157 kfree(map);
158 return -EINVAL;
159 }
160 DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
161 (unsigned long long)map->offset, map->size, map->type);
162
163 /* page-align _DRM_SHM maps. They are allocated here so there is no security
164 * hole created by that and it works around various broken drivers that use
165 * a non-aligned quantity to map the SAREA. --BenH
166 */
167 if (map->type == _DRM_SHM)
168 map->size = PAGE_ALIGN(map->size);
169
170 if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
171 kfree(map);
172 return -EINVAL;
173 }
174 map->mtrr = -1;
175 map->handle = NULL;
176
177 switch (map->type) {
178 case _DRM_REGISTERS:
179 case _DRM_FRAME_BUFFER:
180 #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__arm__)
181 if (map->offset + (map->size-1) < map->offset ||
182 map->offset < virt_to_phys(high_memory)) {
183 kfree(map);
184 return -EINVAL;
185 }
186 #endif
187 /* Some drivers preinitialize some maps, without the X Server
188 * needing to be aware of it. Therefore, we just return success
189 * when the server tries to create a duplicate map.
190 */
191 list = drm_find_matching_map(dev, map);
192 if (list != NULL) {
193 if (list->map->size != map->size) {
194 DRM_DEBUG("Matching maps of type %d with "
195 "mismatched sizes, (%ld vs %ld)\n",
196 map->type, map->size,
197 list->map->size);
198 list->map->size = map->size;
199 }
200
201 kfree(map);
202 *maplist = list;
203 return 0;
204 }
205
206 if (drm_core_has_MTRR(dev)) {
207 if (map->type == _DRM_FRAME_BUFFER ||
208 (map->flags & _DRM_WRITE_COMBINING)) {
209 map->mtrr = mtrr_add(map->offset, map->size,
210 MTRR_TYPE_WRCOMB, 1);
211 }
212 }
213 if (map->type == _DRM_REGISTERS) {
214 #ifdef __NetBSD__
215 map->handle = drm_ioremap(dev, map);
216 #else
217 map->handle = ioremap(map->offset, map->size);
218 #endif
219 if (!map->handle) {
220 kfree(map);
221 return -ENOMEM;
222 }
223 }
224
225 break;
226 case _DRM_SHM:
227 list = drm_find_matching_map(dev, map);
228 if (list != NULL) {
229 if(list->map->size != map->size) {
230 DRM_DEBUG("Matching maps of type %d with "
231 "mismatched sizes, (%ld vs %ld)\n",
232 map->type, map->size, list->map->size);
233 list->map->size = map->size;
234 }
235
236 kfree(map);
237 *maplist = list;
238 return 0;
239 }
240 map->handle = vmalloc_user(map->size);
241 DRM_DEBUG("%lu %d %p\n",
242 map->size, drm_order(map->size), map->handle);
243 if (!map->handle) {
244 kfree(map);
245 return -ENOMEM;
246 }
247 map->offset = (unsigned long)map->handle;
248 if (map->flags & _DRM_CONTAINS_LOCK) {
249 /* Prevent a 2nd X Server from creating a 2nd lock */
250 if (dev->primary->master->lock.hw_lock != NULL) {
251 vfree(map->handle);
252 kfree(map);
253 return -EBUSY;
254 }
255 dev->sigdata.lock = dev->primary->master->lock.hw_lock = map->handle; /* Pointer to lock */
256 }
257 break;
258 case _DRM_AGP: {
259 struct drm_agp_mem *entry;
260 int valid = 0;
261
262 if (!drm_core_has_AGP(dev)) {
263 kfree(map);
264 return -EINVAL;
265 }
266 #ifdef __alpha__
267 map->offset += dev->hose->mem_space->start;
268 #endif
269 /* In some cases (i810 driver), user space may have already
270 * added the AGP base itself, because dev->agp->base previously
271 * only got set during AGP enable. So, only add the base
272 * address if the map's offset isn't already within the
273 * aperture.
274 */
275 #ifdef __NetBSD__
276 if (map->offset < dev->agp->base ||
277 map->offset > dev->agp->base +
278 dev->agp->agp_info.ai_aperture_size - 1) {
279 map->offset += dev->agp->base;
280 }
281 #else
282 if (map->offset < dev->agp->base ||
283 map->offset > dev->agp->base +
284 dev->agp->agp_info.aper_size * 1024 * 1024 - 1) {
285 map->offset += dev->agp->base;
286 }
287 #endif
288 map->mtrr = dev->agp->agp_mtrr; /* for getmap */
289
290 /* This assumes the DRM is in total control of AGP space.
291 * It's not always the case as AGP can be in the control
292 * of user space (i.e. i810 driver). So this loop will get
293 * skipped and we double check that dev->agp->memory is
294 * actually set as well as being invalid before EPERM'ing
295 */
296 list_for_each_entry(entry, &dev->agp->memory, head) {
297 if ((map->offset >= entry->bound) &&
298 (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) {
299 valid = 1;
300 break;
301 }
302 }
303 if (!list_empty(&dev->agp->memory) && !valid) {
304 kfree(map);
305 return -EPERM;
306 }
307 DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n",
308 (unsigned long long)map->offset, map->size);
309
310 break;
311 }
312 case _DRM_GEM:
313 DRM_ERROR("tried to addmap GEM object\n");
314 break;
315 case _DRM_SCATTER_GATHER:
316 if (!dev->sg) {
317 kfree(map);
318 return -EINVAL;
319 }
320 map->offset += (unsigned long)dev->sg->virtual;
321 break;
322 case _DRM_CONSISTENT:
323 /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
324 * As we're limiting the address to 2^32-1 (or less),
325 * casting it down to 32 bits is no problem, but we
326 * need to point to a 64bit variable first. */
327 dmah = drm_pci_alloc(dev, map->size, map->size);
328 if (!dmah) {
329 kfree(map);
330 return -ENOMEM;
331 }
332 map->handle = dmah->vaddr;
333 map->offset = (unsigned long)dmah->busaddr;
334 kfree(dmah);
335 break;
336 default:
337 kfree(map);
338 return -EINVAL;
339 }
340
341 list = kzalloc(sizeof(*list), GFP_KERNEL);
342 if (!list) {
343 if (map->type == _DRM_REGISTERS)
344 #ifdef __NetBSD__
345 drm_iounmap(dev, map);
346 #else
347 iounmap(map->handle);
348 #endif
349 kfree(map);
350 return -EINVAL;
351 }
352 list->map = map;
353
354 mutex_lock(&dev->struct_mutex);
355 list_add(&list->head, &dev->maplist);
356
357 /* Assign a 32-bit handle */
358 /* We do it here so that dev->struct_mutex protects the increment */
359 user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle :
360 map->offset;
361 ret = drm_map_handle(dev, &list->hash, user_token, 0,
362 (map->type == _DRM_SHM));
363 if (ret) {
364 if (map->type == _DRM_REGISTERS)
365 #ifdef __NetBSD__
366 drm_iounmap(dev, map);
367 #else
368 iounmap(map->handle);
369 #endif
370 kfree(map);
371 kfree(list);
372 mutex_unlock(&dev->struct_mutex);
373 return ret;
374 }
375
376 list->user_token = list->hash.key << PAGE_SHIFT;
377 mutex_unlock(&dev->struct_mutex);
378
379 if (!(map->flags & _DRM_DRIVER))
380 list->master = dev->primary->master;
381 *maplist = list;
382 return 0;
383 }
384
385 int drm_addmap(struct drm_device * dev, resource_size_t offset,
386 unsigned int size, enum drm_map_type type,
387 enum drm_map_flags flags, struct drm_local_map ** map_ptr)
388 {
389 struct drm_map_list *list;
390 int rc;
391
392 rc = drm_addmap_core(dev, offset, size, type, flags, &list);
393 if (!rc)
394 *map_ptr = list->map;
395 return rc;
396 }
397
398 EXPORT_SYMBOL(drm_addmap);
399
400 /**
401 * Ioctl to specify a range of memory that is available for mapping by a
402 * non-root process.
403 *
404 * \param inode device inode.
405 * \param file_priv DRM file private.
406 * \param cmd command.
407 * \param arg pointer to a drm_map structure.
408 * \return zero on success or a negative value on error.
409 *
410 */
411 int drm_addmap_ioctl(struct drm_device *dev, void *data,
412 struct drm_file *file_priv)
413 {
414 struct drm_map *map = data;
415 struct drm_map_list *maplist;
416 int err;
417
418 if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM))
419 return -EPERM;
420
421 err = drm_addmap_core(dev, map->offset, map->size, map->type,
422 map->flags, &maplist);
423
424 if (err)
425 return err;
426
427 /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
428 map->handle = (void *)(unsigned long)maplist->user_token;
429 return 0;
430 }
431
432 /**
433 * Remove a map private from list and deallocate resources if the mapping
434 * isn't in use.
435 *
436 * Searches the map on drm_device::maplist, removes it from the list, see if
437 * its being used, and free any associate resource (such as MTRR's) if it's not
438 * being on use.
439 *
440 * \sa drm_addmap
441 */
442 int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
443 {
444 struct drm_map_list *r_list = NULL, *list_t;
445 drm_dma_handle_t dmah;
446 int found = 0;
447 struct drm_master *master;
448
449 /* Find the list entry for the map and remove it */
450 list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
451 if (r_list->map == map) {
452 master = r_list->master;
453 list_del(&r_list->head);
454 drm_ht_remove_key(&dev->map_hash,
455 r_list->user_token >> PAGE_SHIFT);
456 kfree(r_list);
457 found = 1;
458 break;
459 }
460 }
461
462 if (!found)
463 return -EINVAL;
464
465 switch (map->type) {
466 case _DRM_REGISTERS:
467 #ifdef __NetBSD__
468 drm_iounmap(dev, map);
469 #else
470 iounmap(map->handle);
471 #endif
472 /* FALLTHROUGH */
473 case _DRM_FRAME_BUFFER:
474 if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
475 int retcode;
476 retcode = mtrr_del(map->mtrr, map->offset, map->size);
477 DRM_DEBUG("mtrr_del=%d\n", retcode);
478 }
479 break;
480 case _DRM_SHM:
481 vfree(map->handle);
482 if (master) {
483 if (dev->sigdata.lock == master->lock.hw_lock)
484 dev->sigdata.lock = NULL;
485 master->lock.hw_lock = NULL; /* SHM removed */
486 master->lock.file_priv = NULL;
487 wake_up_interruptible_all(&master->lock.lock_queue);
488 }
489 break;
490 case _DRM_AGP:
491 case _DRM_SCATTER_GATHER:
492 break;
493 case _DRM_CONSISTENT:
494 dmah.vaddr = map->handle;
495 dmah.busaddr = map->offset;
496 dmah.size = map->size;
497 __drm_pci_free(dev, &dmah);
498 break;
499 case _DRM_GEM:
500 DRM_ERROR("tried to rmmap GEM object\n");
501 break;
502 }
503 kfree(map);
504
505 return 0;
506 }
507 EXPORT_SYMBOL(drm_rmmap_locked);
508
509 int drm_rmmap(struct drm_device *dev, struct drm_local_map *map)
510 {
511 int ret;
512
513 mutex_lock(&dev->struct_mutex);
514 ret = drm_rmmap_locked(dev, map);
515 mutex_unlock(&dev->struct_mutex);
516
517 return ret;
518 }
519 EXPORT_SYMBOL(drm_rmmap);
520
521 /* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
522 * the last close of the device, and this is necessary for cleanup when things
523 * exit uncleanly. Therefore, having userland manually remove mappings seems
524 * like a pointless exercise since they're going away anyway.
525 *
526 * One use case might be after addmap is allowed for normal users for SHM and
527 * gets used by drivers that the server doesn't need to care about. This seems
528 * unlikely.
529 *
530 * \param inode device inode.
531 * \param file_priv DRM file private.
532 * \param cmd command.
533 * \param arg pointer to a struct drm_map structure.
534 * \return zero on success or a negative value on error.
535 */
536 int drm_rmmap_ioctl(struct drm_device *dev, void *data,
537 struct drm_file *file_priv)
538 {
539 struct drm_map *request = data;
540 struct drm_local_map *map = NULL;
541 struct drm_map_list *r_list;
542 int ret;
543
544 mutex_lock(&dev->struct_mutex);
545 list_for_each_entry(r_list, &dev->maplist, head) {
546 if (r_list->map &&
547 r_list->user_token == (unsigned long)request->handle &&
548 r_list->map->flags & _DRM_REMOVABLE) {
549 map = r_list->map;
550 break;
551 }
552 }
553
554 /* List has wrapped around to the head pointer, or its empty we didn't
555 * find anything.
556 */
557 if (list_empty(&dev->maplist) || !map) {
558 mutex_unlock(&dev->struct_mutex);
559 return -EINVAL;
560 }
561
562 /* Register and framebuffer maps are permanent */
563 if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
564 mutex_unlock(&dev->struct_mutex);
565 return 0;
566 }
567
568 ret = drm_rmmap_locked(dev, map);
569
570 mutex_unlock(&dev->struct_mutex);
571
572 return ret;
573 }
574
575 /**
576 * Cleanup after an error on one of the addbufs() functions.
577 *
578 * \param dev DRM device.
579 * \param entry buffer entry where the error occurred.
580 *
581 * Frees any pages and buffers associated with the given entry.
582 */
583 static void drm_cleanup_buf_error(struct drm_device * dev,
584 struct drm_buf_entry * entry)
585 {
586 int i;
587
588 if (entry->seg_count) {
589 for (i = 0; i < entry->seg_count; i++) {
590 if (entry->seglist[i]) {
591 drm_pci_free(dev, entry->seglist[i]);
592 }
593 }
594 kfree(entry->seglist);
595
596 entry->seg_count = 0;
597 }
598
599 if (entry->buf_count) {
600 for (i = 0; i < entry->buf_count; i++) {
601 kfree(entry->buflist[i].dev_private);
602 }
603 kfree(entry->buflist);
604
605 entry->buf_count = 0;
606 }
607 }
608
609 #if __OS_HAS_AGP
610 /**
611 * Add AGP buffers for DMA transfers.
612 *
613 * \param dev struct drm_device to which the buffers are to be added.
614 * \param request pointer to a struct drm_buf_desc describing the request.
615 * \return zero on success or a negative number on failure.
616 *
617 * After some sanity checks creates a drm_buf structure for each buffer and
618 * reallocates the buffer list of the same size order to accommodate the new
619 * buffers.
620 */
621 int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
622 {
623 struct drm_device_dma *dma = dev->dma;
624 struct drm_buf_entry *entry;
625 struct drm_agp_mem *agp_entry;
626 struct drm_buf *buf;
627 unsigned long offset;
628 unsigned long agp_offset;
629 int count;
630 int order;
631 int size;
632 int alignment;
633 int page_order;
634 int total;
635 int byte_count;
636 int i, valid;
637 struct drm_buf **temp_buflist;
638
639 if (!dma)
640 return -EINVAL;
641
642 count = request->count;
643 order = drm_order(request->size);
644 size = 1 << order;
645
646 alignment = (request->flags & _DRM_PAGE_ALIGN)
647 ? PAGE_ALIGN(size) : size;
648 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
649 total = PAGE_SIZE << page_order;
650
651 byte_count = 0;
652 agp_offset = dev->agp->base + request->agp_start;
653
654 DRM_DEBUG("count: %d\n", count);
655 DRM_DEBUG("order: %d\n", order);
656 DRM_DEBUG("size: %d\n", size);
657 DRM_DEBUG("agp_offset: %lx\n", agp_offset);
658 DRM_DEBUG("alignment: %d\n", alignment);
659 DRM_DEBUG("page_order: %d\n", page_order);
660 DRM_DEBUG("total: %d\n", total);
661
662 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
663 return -EINVAL;
664
665 /* Make sure buffers are located in AGP memory that we own */
666 valid = 0;
667 list_for_each_entry(agp_entry, &dev->agp->memory, head) {
668 if ((agp_offset >= agp_entry->bound) &&
669 (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
670 valid = 1;
671 break;
672 }
673 }
674 if (!list_empty(&dev->agp->memory) && !valid) {
675 DRM_DEBUG("zone invalid\n");
676 return -EINVAL;
677 }
678 spin_lock(&dev->count_lock);
679 if (dev->buf_use) {
680 spin_unlock(&dev->count_lock);
681 return -EBUSY;
682 }
683 atomic_inc(&dev->buf_alloc);
684 spin_unlock(&dev->count_lock);
685
686 mutex_lock(&dev->struct_mutex);
687 entry = &dma->bufs[order];
688 if (entry->buf_count) {
689 mutex_unlock(&dev->struct_mutex);
690 atomic_dec(&dev->buf_alloc);
691 return -ENOMEM; /* May only call once for each order */
692 }
693
694 if (count < 0 || count > 4096) {
695 mutex_unlock(&dev->struct_mutex);
696 atomic_dec(&dev->buf_alloc);
697 return -EINVAL;
698 }
699
700 entry->buflist = kzalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
701 if (!entry->buflist) {
702 mutex_unlock(&dev->struct_mutex);
703 atomic_dec(&dev->buf_alloc);
704 return -ENOMEM;
705 }
706
707 entry->buf_size = size;
708 entry->page_order = page_order;
709
710 offset = 0;
711
712 while (entry->buf_count < count) {
713 buf = &entry->buflist[entry->buf_count];
714 buf->idx = dma->buf_count + entry->buf_count;
715 buf->total = alignment;
716 buf->order = order;
717 buf->used = 0;
718
719 buf->offset = (dma->byte_count + offset);
720 buf->bus_address = agp_offset + offset;
721 buf->address = (void *)(agp_offset + offset);
722 buf->next = NULL;
723 buf->waiting = 0;
724 buf->pending = 0;
725 buf->file_priv = NULL;
726
727 buf->dev_priv_size = dev->driver->dev_priv_size;
728 buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
729 if (!buf->dev_private) {
730 /* Set count correctly so we free the proper amount. */
731 entry->buf_count = count;
732 drm_cleanup_buf_error(dev, entry);
733 mutex_unlock(&dev->struct_mutex);
734 atomic_dec(&dev->buf_alloc);
735 return -ENOMEM;
736 }
737
738 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
739
740 offset += alignment;
741 entry->buf_count++;
742 byte_count += PAGE_SIZE << page_order;
743 }
744
745 DRM_DEBUG("byte_count: %d\n", byte_count);
746
747 temp_buflist = krealloc(dma->buflist,
748 (dma->buf_count + entry->buf_count) *
749 sizeof(*dma->buflist), GFP_KERNEL);
750 if (!temp_buflist) {
751 /* Free the entry because it isn't valid */
752 drm_cleanup_buf_error(dev, entry);
753 mutex_unlock(&dev->struct_mutex);
754 atomic_dec(&dev->buf_alloc);
755 return -ENOMEM;
756 }
757 dma->buflist = temp_buflist;
758
759 for (i = 0; i < entry->buf_count; i++) {
760 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
761 }
762
763 dma->buf_count += entry->buf_count;
764 dma->seg_count += entry->seg_count;
765 dma->page_count += byte_count >> PAGE_SHIFT;
766 dma->byte_count += byte_count;
767
768 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
769 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
770
771 mutex_unlock(&dev->struct_mutex);
772
773 request->count = entry->buf_count;
774 request->size = size;
775
776 dma->flags = _DRM_DMA_USE_AGP;
777
778 atomic_dec(&dev->buf_alloc);
779 return 0;
780 }
781 EXPORT_SYMBOL(drm_addbufs_agp);
782 #endif /* __OS_HAS_AGP */
783
784 int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
785 {
786 struct drm_device_dma *dma = dev->dma;
787 int count;
788 int order;
789 int size;
790 int total;
791 int page_order;
792 struct drm_buf_entry *entry;
793 drm_dma_handle_t *dmah;
794 struct drm_buf *buf;
795 int alignment;
796 unsigned long offset;
797 int i;
798 int byte_count;
799 int page_count;
800 unsigned long *temp_pagelist;
801 struct drm_buf **temp_buflist;
802
803 if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
804 return -EINVAL;
805
806 if (!dma)
807 return -EINVAL;
808
809 if (!capable(CAP_SYS_ADMIN))
810 return -EPERM;
811
812 count = request->count;
813 order = drm_order(request->size);
814 size = 1 << order;
815
816 DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
817 request->count, request->size, size, order);
818
819 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
820 return -EINVAL;
821
822 alignment = (request->flags & _DRM_PAGE_ALIGN)
823 ? PAGE_ALIGN(size) : size;
824 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
825 total = PAGE_SIZE << page_order;
826
827 spin_lock(&dev->count_lock);
828 if (dev->buf_use) {
829 spin_unlock(&dev->count_lock);
830 return -EBUSY;
831 }
832 atomic_inc(&dev->buf_alloc);
833 spin_unlock(&dev->count_lock);
834
835 mutex_lock(&dev->struct_mutex);
836 entry = &dma->bufs[order];
837 if (entry->buf_count) {
838 mutex_unlock(&dev->struct_mutex);
839 atomic_dec(&dev->buf_alloc);
840 return -ENOMEM; /* May only call once for each order */
841 }
842
843 if (count < 0 || count > 4096) {
844 mutex_unlock(&dev->struct_mutex);
845 atomic_dec(&dev->buf_alloc);
846 return -EINVAL;
847 }
848
849 entry->buflist = kzalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
850 if (!entry->buflist) {
851 mutex_unlock(&dev->struct_mutex);
852 atomic_dec(&dev->buf_alloc);
853 return -ENOMEM;
854 }
855
856 entry->seglist = kzalloc(count * sizeof(*entry->seglist), GFP_KERNEL);
857 if (!entry->seglist) {
858 kfree(entry->buflist);
859 mutex_unlock(&dev->struct_mutex);
860 atomic_dec(&dev->buf_alloc);
861 return -ENOMEM;
862 }
863
864 /* Keep the original pagelist until we know all the allocations
865 * have succeeded
866 */
867 temp_pagelist = kmalloc((dma->page_count + (count << page_order)) *
868 sizeof(*dma->pagelist), GFP_KERNEL);
869 if (!temp_pagelist) {
870 kfree(entry->buflist);
871 kfree(entry->seglist);
872 mutex_unlock(&dev->struct_mutex);
873 atomic_dec(&dev->buf_alloc);
874 return -ENOMEM;
875 }
876 memcpy(temp_pagelist,
877 dma->pagelist, dma->page_count * sizeof(*dma->pagelist));
878 DRM_DEBUG("pagelist: %d entries\n",
879 dma->page_count + (count << page_order));
880
881 entry->buf_size = size;
882 entry->page_order = page_order;
883 byte_count = 0;
884 page_count = 0;
885
886 while (entry->buf_count < count) {
887
888 dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000);
889
890 if (!dmah) {
891 /* Set count correctly so we free the proper amount. */
892 entry->buf_count = count;
893 entry->seg_count = count;
894 drm_cleanup_buf_error(dev, entry);
895 kfree(temp_pagelist);
896 mutex_unlock(&dev->struct_mutex);
897 atomic_dec(&dev->buf_alloc);
898 return -ENOMEM;
899 }
900 entry->seglist[entry->seg_count++] = dmah;
901 for (i = 0; i < (1 << page_order); i++) {
902 DRM_DEBUG("page %d @ 0x%08lx\n",
903 dma->page_count + page_count,
904 (unsigned long)dmah->vaddr + PAGE_SIZE * i);
905 temp_pagelist[dma->page_count + page_count++]
906 = (unsigned long)dmah->vaddr + PAGE_SIZE * i;
907 }
908 for (offset = 0;
909 offset + size <= total && entry->buf_count < count;
910 offset += alignment, ++entry->buf_count) {
911 buf = &entry->buflist[entry->buf_count];
912 buf->idx = dma->buf_count + entry->buf_count;
913 buf->total = alignment;
914 buf->order = order;
915 buf->used = 0;
916 buf->offset = (dma->byte_count + byte_count + offset);
917 #ifdef __NetBSD__
918 buf->address = (void *)((char *)dmah->vaddr + offset);
919 #else
920 buf->address = (void *)(dmah->vaddr + offset);
921 #endif
922 buf->bus_address = dmah->busaddr + offset;
923 buf->next = NULL;
924 buf->waiting = 0;
925 buf->pending = 0;
926 buf->file_priv = NULL;
927
928 buf->dev_priv_size = dev->driver->dev_priv_size;
929 buf->dev_private = kzalloc(buf->dev_priv_size,
930 GFP_KERNEL);
931 if (!buf->dev_private) {
932 /* Set count correctly so we free the proper amount. */
933 entry->buf_count = count;
934 entry->seg_count = count;
935 drm_cleanup_buf_error(dev, entry);
936 kfree(temp_pagelist);
937 mutex_unlock(&dev->struct_mutex);
938 atomic_dec(&dev->buf_alloc);
939 return -ENOMEM;
940 }
941
942 DRM_DEBUG("buffer %d @ %p\n",
943 entry->buf_count, buf->address);
944 }
945 byte_count += PAGE_SIZE << page_order;
946 }
947
948 temp_buflist = krealloc(dma->buflist,
949 (dma->buf_count + entry->buf_count) *
950 sizeof(*dma->buflist), GFP_KERNEL);
951 if (!temp_buflist) {
952 /* Free the entry because it isn't valid */
953 drm_cleanup_buf_error(dev, entry);
954 kfree(temp_pagelist);
955 mutex_unlock(&dev->struct_mutex);
956 atomic_dec(&dev->buf_alloc);
957 return -ENOMEM;
958 }
959 dma->buflist = temp_buflist;
960
961 for (i = 0; i < entry->buf_count; i++) {
962 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
963 }
964
965 /* No allocations failed, so now we can replace the original pagelist
966 * with the new one.
967 */
968 if (dma->page_count) {
969 kfree(dma->pagelist);
970 }
971 dma->pagelist = temp_pagelist;
972
973 dma->buf_count += entry->buf_count;
974 dma->seg_count += entry->seg_count;
975 dma->page_count += entry->seg_count << page_order;
976 dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
977
978 mutex_unlock(&dev->struct_mutex);
979
980 request->count = entry->buf_count;
981 request->size = size;
982
983 if (request->flags & _DRM_PCI_BUFFER_RO)
984 dma->flags = _DRM_DMA_USE_PCI_RO;
985
986 atomic_dec(&dev->buf_alloc);
987 return 0;
988
989 }
990 EXPORT_SYMBOL(drm_addbufs_pci);
991
992 static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request)
993 {
994 struct drm_device_dma *dma = dev->dma;
995 struct drm_buf_entry *entry;
996 struct drm_buf *buf;
997 unsigned long offset;
998 unsigned long agp_offset;
999 int count;
1000 int order;
1001 int size;
1002 int alignment;
1003 int page_order;
1004 int total;
1005 int byte_count;
1006 int i;
1007 struct drm_buf **temp_buflist;
1008
1009 if (!drm_core_check_feature(dev, DRIVER_SG))
1010 return -EINVAL;
1011
1012 if (!dma)
1013 return -EINVAL;
1014
1015 if (!capable(CAP_SYS_ADMIN))
1016 return -EPERM;
1017
1018 count = request->count;
1019 order = drm_order(request->size);
1020 size = 1 << order;
1021
1022 alignment = (request->flags & _DRM_PAGE_ALIGN)
1023 ? PAGE_ALIGN(size) : size;
1024 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
1025 total = PAGE_SIZE << page_order;
1026
1027 byte_count = 0;
1028 agp_offset = request->agp_start;
1029
1030 DRM_DEBUG("count: %d\n", count);
1031 DRM_DEBUG("order: %d\n", order);
1032 DRM_DEBUG("size: %d\n", size);
1033 DRM_DEBUG("agp_offset: %lu\n", agp_offset);
1034 DRM_DEBUG("alignment: %d\n", alignment);
1035 DRM_DEBUG("page_order: %d\n", page_order);
1036 DRM_DEBUG("total: %d\n", total);
1037
1038 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1039 return -EINVAL;
1040
1041 spin_lock(&dev->count_lock);
1042 if (dev->buf_use) {
1043 spin_unlock(&dev->count_lock);
1044 return -EBUSY;
1045 }
1046 atomic_inc(&dev->buf_alloc);
1047 spin_unlock(&dev->count_lock);
1048
1049 mutex_lock(&dev->struct_mutex);
1050 entry = &dma->bufs[order];
1051 if (entry->buf_count) {
1052 mutex_unlock(&dev->struct_mutex);
1053 atomic_dec(&dev->buf_alloc);
1054 return -ENOMEM; /* May only call once for each order */
1055 }
1056
1057 if (count < 0 || count > 4096) {
1058 mutex_unlock(&dev->struct_mutex);
1059 atomic_dec(&dev->buf_alloc);
1060 return -EINVAL;
1061 }
1062
1063 entry->buflist = kzalloc(count * sizeof(*entry->buflist),
1064 GFP_KERNEL);
1065 if (!entry->buflist) {
1066 mutex_unlock(&dev->struct_mutex);
1067 atomic_dec(&dev->buf_alloc);
1068 return -ENOMEM;
1069 }
1070
1071 entry->buf_size = size;
1072 entry->page_order = page_order;
1073
1074 offset = 0;
1075
1076 while (entry->buf_count < count) {
1077 buf = &entry->buflist[entry->buf_count];
1078 buf->idx = dma->buf_count + entry->buf_count;
1079 buf->total = alignment;
1080 buf->order = order;
1081 buf->used = 0;
1082
1083 buf->offset = (dma->byte_count + offset);
1084 buf->bus_address = agp_offset + offset;
1085 buf->address = (void *)(agp_offset + offset
1086 + (unsigned long)dev->sg->virtual);
1087 buf->next = NULL;
1088 buf->waiting = 0;
1089 buf->pending = 0;
1090 buf->file_priv = NULL;
1091
1092 buf->dev_priv_size = dev->driver->dev_priv_size;
1093 buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
1094 if (!buf->dev_private) {
1095 /* Set count correctly so we free the proper amount. */
1096 entry->buf_count = count;
1097 drm_cleanup_buf_error(dev, entry);
1098 mutex_unlock(&dev->struct_mutex);
1099 atomic_dec(&dev->buf_alloc);
1100 return -ENOMEM;
1101 }
1102
1103 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1104
1105 offset += alignment;
1106 entry->buf_count++;
1107 byte_count += PAGE_SIZE << page_order;
1108 }
1109
1110 DRM_DEBUG("byte_count: %d\n", byte_count);
1111
1112 temp_buflist = krealloc(dma->buflist,
1113 (dma->buf_count + entry->buf_count) *
1114 sizeof(*dma->buflist), GFP_KERNEL);
1115 if (!temp_buflist) {
1116 /* Free the entry because it isn't valid */
1117 drm_cleanup_buf_error(dev, entry);
1118 mutex_unlock(&dev->struct_mutex);
1119 atomic_dec(&dev->buf_alloc);
1120 return -ENOMEM;
1121 }
1122 dma->buflist = temp_buflist;
1123
1124 for (i = 0; i < entry->buf_count; i++) {
1125 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1126 }
1127
1128 dma->buf_count += entry->buf_count;
1129 dma->seg_count += entry->seg_count;
1130 dma->page_count += byte_count >> PAGE_SHIFT;
1131 dma->byte_count += byte_count;
1132
1133 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1134 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1135
1136 mutex_unlock(&dev->struct_mutex);
1137
1138 request->count = entry->buf_count;
1139 request->size = size;
1140
1141 dma->flags = _DRM_DMA_USE_SG;
1142
1143 atomic_dec(&dev->buf_alloc);
1144 return 0;
1145 }
1146
1147 static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request)
1148 {
1149 struct drm_device_dma *dma = dev->dma;
1150 struct drm_buf_entry *entry;
1151 struct drm_buf *buf;
1152 unsigned long offset;
1153 unsigned long agp_offset;
1154 int count;
1155 int order;
1156 int size;
1157 int alignment;
1158 int page_order;
1159 int total;
1160 int byte_count;
1161 int i;
1162 struct drm_buf **temp_buflist;
1163
1164 if (!drm_core_check_feature(dev, DRIVER_FB_DMA))
1165 return -EINVAL;
1166
1167 if (!dma)
1168 return -EINVAL;
1169
1170 if (!capable(CAP_SYS_ADMIN))
1171 return -EPERM;
1172
1173 count = request->count;
1174 order = drm_order(request->size);
1175 size = 1 << order;
1176
1177 alignment = (request->flags & _DRM_PAGE_ALIGN)
1178 ? PAGE_ALIGN(size) : size;
1179 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
1180 total = PAGE_SIZE << page_order;
1181
1182 byte_count = 0;
1183 agp_offset = request->agp_start;
1184
1185 DRM_DEBUG("count: %d\n", count);
1186 DRM_DEBUG("order: %d\n", order);
1187 DRM_DEBUG("size: %d\n", size);
1188 DRM_DEBUG("agp_offset: %lu\n", agp_offset);
1189 DRM_DEBUG("alignment: %d\n", alignment);
1190 DRM_DEBUG("page_order: %d\n", page_order);
1191 DRM_DEBUG("total: %d\n", total);
1192
1193 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1194 return -EINVAL;
1195
1196 spin_lock(&dev->count_lock);
1197 if (dev->buf_use) {
1198 spin_unlock(&dev->count_lock);
1199 return -EBUSY;
1200 }
1201 atomic_inc(&dev->buf_alloc);
1202 spin_unlock(&dev->count_lock);
1203
1204 mutex_lock(&dev->struct_mutex);
1205 entry = &dma->bufs[order];
1206 if (entry->buf_count) {
1207 mutex_unlock(&dev->struct_mutex);
1208 atomic_dec(&dev->buf_alloc);
1209 return -ENOMEM; /* May only call once for each order */
1210 }
1211
1212 if (count < 0 || count > 4096) {
1213 mutex_unlock(&dev->struct_mutex);
1214 atomic_dec(&dev->buf_alloc);
1215 return -EINVAL;
1216 }
1217
1218 entry->buflist = kzalloc(count * sizeof(*entry->buflist),
1219 GFP_KERNEL);
1220 if (!entry->buflist) {
1221 mutex_unlock(&dev->struct_mutex);
1222 atomic_dec(&dev->buf_alloc);
1223 return -ENOMEM;
1224 }
1225
1226 entry->buf_size = size;
1227 entry->page_order = page_order;
1228
1229 offset = 0;
1230
1231 while (entry->buf_count < count) {
1232 buf = &entry->buflist[entry->buf_count];
1233 buf->idx = dma->buf_count + entry->buf_count;
1234 buf->total = alignment;
1235 buf->order = order;
1236 buf->used = 0;
1237
1238 buf->offset = (dma->byte_count + offset);
1239 buf->bus_address = agp_offset + offset;
1240 buf->address = (void *)(agp_offset + offset);
1241 buf->next = NULL;
1242 buf->waiting = 0;
1243 buf->pending = 0;
1244 buf->file_priv = NULL;
1245
1246 buf->dev_priv_size = dev->driver->dev_priv_size;
1247 buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
1248 if (!buf->dev_private) {
1249 /* Set count correctly so we free the proper amount. */
1250 entry->buf_count = count;
1251 drm_cleanup_buf_error(dev, entry);
1252 mutex_unlock(&dev->struct_mutex);
1253 atomic_dec(&dev->buf_alloc);
1254 return -ENOMEM;
1255 }
1256
1257 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1258
1259 offset += alignment;
1260 entry->buf_count++;
1261 byte_count += PAGE_SIZE << page_order;
1262 }
1263
1264 DRM_DEBUG("byte_count: %d\n", byte_count);
1265
1266 temp_buflist = krealloc(dma->buflist,
1267 (dma->buf_count + entry->buf_count) *
1268 sizeof(*dma->buflist), GFP_KERNEL);
1269 if (!temp_buflist) {
1270 /* Free the entry because it isn't valid */
1271 drm_cleanup_buf_error(dev, entry);
1272 mutex_unlock(&dev->struct_mutex);
1273 atomic_dec(&dev->buf_alloc);
1274 return -ENOMEM;
1275 }
1276 dma->buflist = temp_buflist;
1277
1278 for (i = 0; i < entry->buf_count; i++) {
1279 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1280 }
1281
1282 dma->buf_count += entry->buf_count;
1283 dma->seg_count += entry->seg_count;
1284 dma->page_count += byte_count >> PAGE_SHIFT;
1285 dma->byte_count += byte_count;
1286
1287 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1288 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1289
1290 mutex_unlock(&dev->struct_mutex);
1291
1292 request->count = entry->buf_count;
1293 request->size = size;
1294
1295 dma->flags = _DRM_DMA_USE_FB;
1296
1297 atomic_dec(&dev->buf_alloc);
1298 return 0;
1299 }
1300
1301
1302 /**
1303 * Add buffers for DMA transfers (ioctl).
1304 *
1305 * \param inode device inode.
1306 * \param file_priv DRM file private.
1307 * \param cmd command.
1308 * \param arg pointer to a struct drm_buf_desc request.
1309 * \return zero on success or a negative number on failure.
1310 *
1311 * According with the memory type specified in drm_buf_desc::flags and the
1312 * build options, it dispatches the call either to addbufs_agp(),
1313 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
1314 * PCI memory respectively.
1315 */
1316 int drm_addbufs(struct drm_device *dev, void *data,
1317 struct drm_file *file_priv)
1318 {
1319 struct drm_buf_desc *request = data;
1320 int ret;
1321
1322 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1323 return -EINVAL;
1324
1325 #if __OS_HAS_AGP
1326 if (request->flags & _DRM_AGP_BUFFER)
1327 ret = drm_addbufs_agp(dev, request);
1328 else
1329 #endif
1330 if (request->flags & _DRM_SG_BUFFER)
1331 ret = drm_addbufs_sg(dev, request);
1332 else if (request->flags & _DRM_FB_BUFFER)
1333 ret = drm_addbufs_fb(dev, request);
1334 else
1335 ret = drm_addbufs_pci(dev, request);
1336
1337 return ret;
1338 }
1339
1340 /**
1341 * Get information about the buffer mappings.
1342 *
1343 * This was originally mean for debugging purposes, or by a sophisticated
1344 * client library to determine how best to use the available buffers (e.g.,
1345 * large buffers can be used for image transfer).
1346 *
1347 * \param inode device inode.
1348 * \param file_priv DRM file private.
1349 * \param cmd command.
1350 * \param arg pointer to a drm_buf_info structure.
1351 * \return zero on success or a negative number on failure.
1352 *
1353 * Increments drm_device::buf_use while holding the drm_device::count_lock
1354 * lock, preventing of allocating more buffers after this call. Information
1355 * about each requested buffer is then copied into user space.
1356 */
1357 int drm_infobufs(struct drm_device *dev, void *data,
1358 struct drm_file *file_priv)
1359 {
1360 struct drm_device_dma *dma = dev->dma;
1361 struct drm_buf_info *request = data;
1362 int i;
1363 int count;
1364
1365 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1366 return -EINVAL;
1367
1368 if (!dma)
1369 return -EINVAL;
1370
1371 spin_lock(&dev->count_lock);
1372 if (atomic_read(&dev->buf_alloc)) {
1373 spin_unlock(&dev->count_lock);
1374 return -EBUSY;
1375 }
1376 ++dev->buf_use; /* Can't allocate more after this call */
1377 spin_unlock(&dev->count_lock);
1378
1379 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1380 if (dma->bufs[i].buf_count)
1381 ++count;
1382 }
1383
1384 DRM_DEBUG("count = %d\n", count);
1385
1386 if (request->count >= count) {
1387 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1388 if (dma->bufs[i].buf_count) {
1389 struct drm_buf_desc __user *to =
1390 &request->list[count];
1391 struct drm_buf_entry *from = &dma->bufs[i];
1392 struct drm_freelist *list = &dma->bufs[i].freelist;
1393 if (copy_to_user(&to->count,
1394 &from->buf_count,
1395 sizeof(from->buf_count)) ||
1396 copy_to_user(&to->size,
1397 &from->buf_size,
1398 sizeof(from->buf_size)) ||
1399 copy_to_user(&to->low_mark,
1400 &list->low_mark,
1401 sizeof(list->low_mark)) ||
1402 copy_to_user(&to->high_mark,
1403 &list->high_mark,
1404 sizeof(list->high_mark)))
1405 return -EFAULT;
1406
1407 DRM_DEBUG("%d %d %d %d %d\n",
1408 i,
1409 dma->bufs[i].buf_count,
1410 dma->bufs[i].buf_size,
1411 dma->bufs[i].freelist.low_mark,
1412 dma->bufs[i].freelist.high_mark);
1413 ++count;
1414 }
1415 }
1416 }
1417 request->count = count;
1418
1419 return 0;
1420 }
1421
1422 /**
1423 * Specifies a low and high water mark for buffer allocation
1424 *
1425 * \param inode device inode.
1426 * \param file_priv DRM file private.
1427 * \param cmd command.
1428 * \param arg a pointer to a drm_buf_desc structure.
1429 * \return zero on success or a negative number on failure.
1430 *
1431 * Verifies that the size order is bounded between the admissible orders and
1432 * updates the respective drm_device_dma::bufs entry low and high water mark.
1433 *
1434 * \note This ioctl is deprecated and mostly never used.
1435 */
1436 int drm_markbufs(struct drm_device *dev, void *data,
1437 struct drm_file *file_priv)
1438 {
1439 struct drm_device_dma *dma = dev->dma;
1440 struct drm_buf_desc *request = data;
1441 int order;
1442 struct drm_buf_entry *entry;
1443
1444 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1445 return -EINVAL;
1446
1447 if (!dma)
1448 return -EINVAL;
1449
1450 DRM_DEBUG("%d, %d, %d\n",
1451 request->size, request->low_mark, request->high_mark);
1452 order = drm_order(request->size);
1453 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1454 return -EINVAL;
1455 entry = &dma->bufs[order];
1456
1457 if (request->low_mark < 0 || request->low_mark > entry->buf_count)
1458 return -EINVAL;
1459 if (request->high_mark < 0 || request->high_mark > entry->buf_count)
1460 return -EINVAL;
1461
1462 entry->freelist.low_mark = request->low_mark;
1463 entry->freelist.high_mark = request->high_mark;
1464
1465 return 0;
1466 }
1467
1468 /**
1469 * Unreserve the buffers in list, previously reserved using drmDMA.
1470 *
1471 * \param inode device inode.
1472 * \param file_priv DRM file private.
1473 * \param cmd command.
1474 * \param arg pointer to a drm_buf_free structure.
1475 * \return zero on success or a negative number on failure.
1476 *
1477 * Calls free_buffer() for each used buffer.
1478 * This function is primarily used for debugging.
1479 */
1480 int drm_freebufs(struct drm_device *dev, void *data,
1481 struct drm_file *file_priv)
1482 {
1483 struct drm_device_dma *dma = dev->dma;
1484 struct drm_buf_free *request = data;
1485 int i;
1486 int idx;
1487 struct drm_buf *buf;
1488
1489 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1490 return -EINVAL;
1491
1492 if (!dma)
1493 return -EINVAL;
1494
1495 DRM_DEBUG("%d\n", request->count);
1496 for (i = 0; i < request->count; i++) {
1497 if (copy_from_user(&idx, &request->list[i], sizeof(idx)))
1498 return -EFAULT;
1499 if (idx < 0 || idx >= dma->buf_count) {
1500 DRM_ERROR("Index %d (of %d max)\n",
1501 idx, dma->buf_count - 1);
1502 return -EINVAL;
1503 }
1504 buf = dma->buflist[idx];
1505 if (buf->file_priv != file_priv) {
1506 DRM_ERROR("Process %d freeing buffer not owned\n",
1507 task_pid_nr(current));
1508 return -EINVAL;
1509 }
1510 drm_free_buffer(dev, buf);
1511 }
1512
1513 return 0;
1514 }
1515
1516 /**
1517 * Maps all of the DMA buffers into client-virtual space (ioctl).
1518 *
1519 * \param inode device inode.
1520 * \param file_priv DRM file private.
1521 * \param cmd command.
1522 * \param arg pointer to a drm_buf_map structure.
1523 * \return zero on success or a negative number on failure.
1524 *
1525 * Maps the AGP, SG or PCI buffer region with vm_mmap(), and copies information
1526 * about each buffer into user space. For PCI buffers, it calls vm_mmap() with
1527 * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
1528 * drm_mmap_dma().
1529 */
1530 int drm_mapbufs(struct drm_device *dev, void *data,
1531 struct drm_file *file_priv)
1532 {
1533 struct drm_device_dma *dma = dev->dma;
1534 int retcode = 0;
1535 const int zero = 0;
1536 unsigned long virtual;
1537 unsigned long address;
1538 struct drm_buf_map *request = data;
1539 int i;
1540
1541 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1542 return -EINVAL;
1543
1544 if (!dma)
1545 return -EINVAL;
1546
1547 spin_lock(&dev->count_lock);
1548 if (atomic_read(&dev->buf_alloc)) {
1549 spin_unlock(&dev->count_lock);
1550 return -EBUSY;
1551 }
1552 dev->buf_use++; /* Can't allocate more after this call */
1553 spin_unlock(&dev->count_lock);
1554
1555 if (request->count >= dma->buf_count) {
1556 if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
1557 || (drm_core_check_feature(dev, DRIVER_SG)
1558 && (dma->flags & _DRM_DMA_USE_SG))
1559 || (drm_core_check_feature(dev, DRIVER_FB_DMA)
1560 && (dma->flags & _DRM_DMA_USE_FB))) {
1561 struct drm_local_map *map = dev->agp_buffer_map;
1562 unsigned long token = dev->agp_buffer_token;
1563
1564 if (!map) {
1565 retcode = -EINVAL;
1566 goto done;
1567 }
1568 virtual = vm_mmap(file_priv->filp, 0, map->size,
1569 PROT_READ | PROT_WRITE,
1570 MAP_SHARED,
1571 token);
1572 } else {
1573 virtual = vm_mmap(file_priv->filp, 0, dma->byte_count,
1574 PROT_READ | PROT_WRITE,
1575 MAP_SHARED, 0);
1576 }
1577 if (virtual > -1024UL) {
1578 /* Real error */
1579 retcode = (signed long)virtual;
1580 goto done;
1581 }
1582 request->virtual = (void __user *)virtual;
1583
1584 for (i = 0; i < dma->buf_count; i++) {
1585 if (copy_to_user(&request->list[i].idx,
1586 &dma->buflist[i]->idx,
1587 sizeof(request->list[0].idx))) {
1588 retcode = -EFAULT;
1589 goto done;
1590 }
1591 if (copy_to_user(&request->list[i].total,
1592 &dma->buflist[i]->total,
1593 sizeof(request->list[0].total))) {
1594 retcode = -EFAULT;
1595 goto done;
1596 }
1597 if (copy_to_user(&request->list[i].used,
1598 &zero, sizeof(zero))) {
1599 retcode = -EFAULT;
1600 goto done;
1601 }
1602 address = virtual + dma->buflist[i]->offset; /* *** */
1603 if (copy_to_user(&request->list[i].address,
1604 &address, sizeof(address))) {
1605 retcode = -EFAULT;
1606 goto done;
1607 }
1608 }
1609 }
1610 done:
1611 request->count = dma->buf_count;
1612 DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
1613
1614 return retcode;
1615 }
1616
1617 /**
1618 * Compute size order. Returns the exponent of the smaller power of two which
1619 * is greater or equal to given number.
1620 *
1621 * \param size size.
1622 * \return order.
1623 *
1624 * \todo Can be made faster.
1625 */
1626 int drm_order(unsigned long size)
1627 {
1628 int order;
1629 unsigned long tmp;
1630
1631 for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ;
1632
1633 if (size & (size - 1))
1634 ++order;
1635
1636 return order;
1637 }
1638 EXPORT_SYMBOL(drm_order);
1639