drm_bufs.c revision 1.1.1.1.2.8 1 /**
2 * \file drm_bufs.c
3 * Generic buffer template
4 *
5 * \author Rickard E. (Rik) Faith <faith (at) valinux.com>
6 * \author Gareth Hughes <gareth (at) valinux.com>
7 */
8
9 /*
10 * Created: Thu Nov 23 03:10:50 2000 by gareth (at) valinux.com
11 *
12 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
22 *
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
25 * Software.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
34 */
35
36 #include <linux/vmalloc.h>
37 #include <linux/slab.h>
38 #include <linux/sched.h>
39 #include <linux/log2.h>
40 #include <linux/export.h>
41 #include <linux/mm.h>
42 #include <asm/mtrr.h>
43 #include <asm/shmparam.h>
44 #include <drm/drmP.h>
45
46 static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
47 struct drm_local_map *map)
48 {
49 struct drm_map_list *entry;
50 list_for_each_entry(entry, &dev->maplist, head) {
51 /*
52 * Because the kernel-userspace ABI is fixed at a 32-bit offset
53 * while PCI resources may live above that, we only compare the
54 * lower 32 bits of the map offset for maps of type
55 * _DRM_FRAMEBUFFER or _DRM_REGISTERS.
56 * It is assumed that if a driver have more than one resource
57 * of each type, the lower 32 bits are different.
58 */
59 if (!entry->map ||
60 map->type != entry->map->type ||
61 entry->master != dev->primary->master)
62 continue;
63 switch (map->type) {
64 case _DRM_SHM:
65 if (map->flags != _DRM_CONTAINS_LOCK)
66 break;
67 return entry;
68 case _DRM_REGISTERS:
69 case _DRM_FRAME_BUFFER:
70 if ((entry->map->offset & 0xffffffff) ==
71 (map->offset & 0xffffffff))
72 return entry;
73 default: /* Make gcc happy */
74 ;
75 }
76 if (entry->map->offset == map->offset)
77 return entry;
78 }
79
80 return NULL;
81 }
82
83 static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
84 unsigned long user_token, int hashed_handle, int shm)
85 {
86 int use_hashed_handle, shift;
87 unsigned long add;
88
89 use_hashed_handle = (user_token &~ 0xffffffffUL) || hashed_handle;
90 if (!use_hashed_handle) {
91 int ret;
92 hash->key = user_token >> PAGE_SHIFT;
93 ret = drm_ht_insert_item(&dev->map_hash, hash);
94 if (ret != -EINVAL)
95 return ret;
96 }
97
98 shift = 0;
99 add = DRM_MAP_HASH_OFFSET >> PAGE_SHIFT;
100 if (shm && (SHMLBA > PAGE_SIZE)) {
101 int bits = ilog2(SHMLBA >> PAGE_SHIFT) + 1;
102
103 /* For shared memory, we have to preserve the SHMLBA
104 * bits of the eventual vma->vm_pgoff value during
105 * mmap(). Otherwise we run into cache aliasing problems
106 * on some platforms. On these platforms, the pgoff of
107 * a mmap() request is used to pick a suitable virtual
108 * address for the mmap() region such that it will not
109 * cause cache aliasing problems.
110 *
111 * Therefore, make sure the SHMLBA relevant bits of the
112 * hash value we use are equal to those in the original
113 * kernel virtual address.
114 */
115 shift = bits;
116 add |= ((user_token >> PAGE_SHIFT) & ((1UL << bits) - 1UL));
117 }
118
119 return drm_ht_just_insert_please(&dev->map_hash, hash,
120 user_token, 32 - PAGE_SHIFT - 3,
121 shift, add);
122 }
123
124 /**
125 * Core function to create a range of memory available for mapping by a
126 * non-root process.
127 *
128 * Adjusts the memory offset to its absolute value according to the mapping
129 * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where
130 * applicable and if supported by the kernel.
131 */
132 static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
133 unsigned int size, enum drm_map_type type,
134 enum drm_map_flags flags,
135 struct drm_map_list ** maplist)
136 {
137 struct drm_local_map *map;
138 struct drm_map_list *list;
139 drm_dma_handle_t *dmah;
140 unsigned long user_token;
141 int ret;
142
143 map = kmalloc(sizeof(*map), GFP_KERNEL);
144 if (!map)
145 return -ENOMEM;
146
147 map->offset = offset;
148 map->size = size;
149 map->flags = flags;
150 map->type = type;
151
152 /* Only allow shared memory to be removable since we only keep enough
153 * book keeping information about shared memory to allow for removal
154 * when processes fork.
155 */
156 if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) {
157 kfree(map);
158 return -EINVAL;
159 }
160 DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
161 (unsigned long long)map->offset, map->size, map->type);
162
163 /* page-align _DRM_SHM maps. They are allocated here so there is no security
164 * hole created by that and it works around various broken drivers that use
165 * a non-aligned quantity to map the SAREA. --BenH
166 */
167 if (map->type == _DRM_SHM)
168 map->size = PAGE_ALIGN(map->size);
169
170 if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
171 kfree(map);
172 return -EINVAL;
173 }
174 map->mtrr = -1;
175 map->handle = NULL;
176
177 switch (map->type) {
178 case _DRM_REGISTERS:
179 case _DRM_FRAME_BUFFER:
180 #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__arm__)
181 if (map->offset + (map->size-1) < map->offset ||
182 map->offset < virt_to_phys(high_memory)) {
183 kfree(map);
184 return -EINVAL;
185 }
186 #endif
187 /* Some drivers preinitialize some maps, without the X Server
188 * needing to be aware of it. Therefore, we just return success
189 * when the server tries to create a duplicate map.
190 */
191 list = drm_find_matching_map(dev, map);
192 if (list != NULL) {
193 if (list->map->size != map->size) {
194 DRM_DEBUG("Matching maps of type %d with "
195 "mismatched sizes, (%ld vs %ld)\n",
196 map->type, map->size,
197 list->map->size);
198 list->map->size = map->size;
199 }
200
201 kfree(map);
202 *maplist = list;
203 return 0;
204 }
205
206 if (drm_core_has_MTRR(dev)) {
207 if (map->type == _DRM_FRAME_BUFFER ||
208 (map->flags & _DRM_WRITE_COMBINING)) {
209 map->mtrr = mtrr_add(map->offset, map->size,
210 MTRR_TYPE_WRCOMB, 1);
211 }
212 }
213 if (map->type == _DRM_REGISTERS) {
214 #ifdef __NetBSD__
215 map->handle = drm_ioremap(dev, map);
216 #else
217 map->handle = ioremap(map->offset, map->size);
218 #endif
219 if (!map->handle) {
220 kfree(map);
221 return -ENOMEM;
222 }
223 }
224
225 break;
226 case _DRM_SHM:
227 list = drm_find_matching_map(dev, map);
228 if (list != NULL) {
229 if(list->map->size != map->size) {
230 DRM_DEBUG("Matching maps of type %d with "
231 "mismatched sizes, (%ld vs %ld)\n",
232 map->type, map->size, list->map->size);
233 list->map->size = map->size;
234 }
235
236 kfree(map);
237 *maplist = list;
238 return 0;
239 }
240 map->handle = vmalloc_user(map->size);
241 DRM_DEBUG("%lu %d %p\n",
242 map->size, drm_order(map->size), map->handle);
243 if (!map->handle) {
244 kfree(map);
245 return -ENOMEM;
246 }
247 map->offset = (unsigned long)map->handle;
248 if (map->flags & _DRM_CONTAINS_LOCK) {
249 /* Prevent a 2nd X Server from creating a 2nd lock */
250 if (dev->primary->master->lock.hw_lock != NULL) {
251 vfree(map->handle);
252 kfree(map);
253 return -EBUSY;
254 }
255 dev->sigdata.lock = dev->primary->master->lock.hw_lock = map->handle; /* Pointer to lock */
256 }
257 break;
258 case _DRM_AGP: {
259 struct drm_agp_mem *entry;
260 int valid = 0;
261
262 if (!drm_core_has_AGP(dev)) {
263 kfree(map);
264 return -EINVAL;
265 }
266 #ifdef __alpha__
267 map->offset += dev->hose->mem_space->start;
268 #endif
269 /* In some cases (i810 driver), user space may have already
270 * added the AGP base itself, because dev->agp->base previously
271 * only got set during AGP enable. So, only add the base
272 * address if the map's offset isn't already within the
273 * aperture.
274 */
275 #ifdef __NetBSD__
276 if (map->offset < dev->agp->base ||
277 map->offset > dev->agp->base +
278 dev->agp->agp_info.ai_aperture_size - 1) {
279 map->offset += dev->agp->base;
280 }
281 #else
282 if (map->offset < dev->agp->base ||
283 map->offset > dev->agp->base +
284 dev->agp->agp_info.aper_size * 1024 * 1024 - 1) {
285 map->offset += dev->agp->base;
286 }
287 #endif
288 map->mtrr = dev->agp->agp_mtrr; /* for getmap */
289
290 /* This assumes the DRM is in total control of AGP space.
291 * It's not always the case as AGP can be in the control
292 * of user space (i.e. i810 driver). So this loop will get
293 * skipped and we double check that dev->agp->memory is
294 * actually set as well as being invalid before EPERM'ing
295 */
296 list_for_each_entry(entry, &dev->agp->memory, head) {
297 if ((map->offset >= entry->bound) &&
298 (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) {
299 valid = 1;
300 break;
301 }
302 }
303 if (!list_empty(&dev->agp->memory) && !valid) {
304 kfree(map);
305 return -EPERM;
306 }
307 DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n",
308 (unsigned long long)map->offset, map->size);
309
310 break;
311 }
312 case _DRM_GEM:
313 DRM_ERROR("tried to addmap GEM object\n");
314 break;
315 case _DRM_SCATTER_GATHER:
316 if (!dev->sg) {
317 kfree(map);
318 return -EINVAL;
319 }
320 map->offset += (unsigned long)dev->sg->virtual;
321 break;
322 case _DRM_CONSISTENT:
323 /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
324 * As we're limiting the address to 2^32-1 (or less),
325 * casting it down to 32 bits is no problem, but we
326 * need to point to a 64bit variable first. */
327 dmah = drm_pci_alloc(dev, map->size, map->size);
328 if (!dmah) {
329 kfree(map);
330 return -ENOMEM;
331 }
332 map->handle = dmah->vaddr;
333 map->offset = (unsigned long)dmah->busaddr;
334 kfree(dmah);
335 break;
336 default:
337 kfree(map);
338 return -EINVAL;
339 }
340
341 list = kzalloc(sizeof(*list), GFP_KERNEL);
342 if (!list) {
343 if (map->type == _DRM_REGISTERS)
344 #ifdef __NetBSD__
345 drm_iounmap(dev, map);
346 #else
347 iounmap(map->handle);
348 #endif
349 kfree(map);
350 return -EINVAL;
351 }
352 list->map = map;
353
354 mutex_lock(&dev->struct_mutex);
355 list_add(&list->head, &dev->maplist);
356
357 /* Assign a 32-bit handle */
358 /* We do it here so that dev->struct_mutex protects the increment */
359 user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle :
360 map->offset;
361 ret = drm_map_handle(dev, &list->hash, user_token, 0,
362 (map->type == _DRM_SHM));
363 if (ret) {
364 if (map->type == _DRM_REGISTERS)
365 #ifdef __NetBSD__
366 drm_iounmap(dev, map);
367 #else
368 iounmap(map->handle);
369 #endif
370 kfree(map);
371 kfree(list);
372 mutex_unlock(&dev->struct_mutex);
373 return ret;
374 }
375
376 list->user_token = list->hash.key << PAGE_SHIFT;
377 mutex_unlock(&dev->struct_mutex);
378
379 if (!(map->flags & _DRM_DRIVER))
380 list->master = dev->primary->master;
381 *maplist = list;
382 return 0;
383 }
384
385 int drm_addmap(struct drm_device * dev, resource_size_t offset,
386 unsigned int size, enum drm_map_type type,
387 enum drm_map_flags flags, struct drm_local_map ** map_ptr)
388 {
389 struct drm_map_list *list;
390 int rc;
391
392 rc = drm_addmap_core(dev, offset, size, type, flags, &list);
393 if (!rc)
394 *map_ptr = list->map;
395 return rc;
396 }
397
398 EXPORT_SYMBOL(drm_addmap);
399
400 /**
401 * Ioctl to specify a range of memory that is available for mapping by a
402 * non-root process.
403 *
404 * \param inode device inode.
405 * \param file_priv DRM file private.
406 * \param cmd command.
407 * \param arg pointer to a drm_map structure.
408 * \return zero on success or a negative value on error.
409 *
410 */
411 int drm_addmap_ioctl(struct drm_device *dev, void *data,
412 struct drm_file *file_priv)
413 {
414 struct drm_map *map = data;
415 struct drm_map_list *maplist;
416 int err;
417
418 if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM))
419 return -EPERM;
420
421 err = drm_addmap_core(dev, map->offset, map->size, map->type,
422 map->flags, &maplist);
423
424 if (err)
425 return err;
426
427 /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
428 map->handle = (void *)(unsigned long)maplist->user_token;
429 return 0;
430 }
431
432 /**
433 * Remove a map private from list and deallocate resources if the mapping
434 * isn't in use.
435 *
436 * Searches the map on drm_device::maplist, removes it from the list, see if
437 * its being used, and free any associate resource (such as MTRR's) if it's not
438 * being on use.
439 *
440 * \sa drm_addmap
441 */
442 int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
443 {
444 struct drm_map_list *r_list = NULL, *list_t;
445 drm_dma_handle_t dmah;
446 int found = 0;
447 struct drm_master *master;
448
449 /* Find the list entry for the map and remove it */
450 list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
451 if (r_list->map == map) {
452 master = r_list->master;
453 list_del(&r_list->head);
454 drm_ht_remove_key(&dev->map_hash,
455 r_list->user_token >> PAGE_SHIFT);
456 kfree(r_list);
457 found = 1;
458 break;
459 }
460 }
461
462 if (!found)
463 return -EINVAL;
464
465 switch (map->type) {
466 case _DRM_REGISTERS:
467 #ifdef __NetBSD__
468 drm_iounmap(dev, map);
469 #else
470 iounmap(map->handle);
471 #endif
472 /* FALLTHROUGH */
473 case _DRM_FRAME_BUFFER:
474 if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
475 int retcode;
476 retcode = mtrr_del(map->mtrr, map->offset, map->size);
477 DRM_DEBUG("mtrr_del=%d\n", retcode);
478 }
479 break;
480 case _DRM_SHM:
481 vfree(map->handle);
482 if (master) {
483 if (dev->sigdata.lock == master->lock.hw_lock)
484 dev->sigdata.lock = NULL;
485 master->lock.hw_lock = NULL; /* SHM removed */
486 master->lock.file_priv = NULL;
487 #ifdef __NetBSD__
488 DRM_WAKEUP_ALL(&master->lock.lock_queue,
489 &drm_global_mutex);
490 #else
491 wake_up_interruptible_all(&master->lock.lock_queue);
492 #endif
493 }
494 break;
495 case _DRM_AGP:
496 case _DRM_SCATTER_GATHER:
497 break;
498 case _DRM_CONSISTENT:
499 dmah.vaddr = map->handle;
500 dmah.busaddr = map->offset;
501 dmah.size = map->size;
502 __drm_pci_free(dev, &dmah);
503 break;
504 case _DRM_GEM:
505 DRM_ERROR("tried to rmmap GEM object\n");
506 break;
507 }
508 kfree(map);
509
510 return 0;
511 }
512 EXPORT_SYMBOL(drm_rmmap_locked);
513
514 int drm_rmmap(struct drm_device *dev, struct drm_local_map *map)
515 {
516 int ret;
517
518 mutex_lock(&dev->struct_mutex);
519 ret = drm_rmmap_locked(dev, map);
520 mutex_unlock(&dev->struct_mutex);
521
522 return ret;
523 }
524 EXPORT_SYMBOL(drm_rmmap);
525
526 /* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
527 * the last close of the device, and this is necessary for cleanup when things
528 * exit uncleanly. Therefore, having userland manually remove mappings seems
529 * like a pointless exercise since they're going away anyway.
530 *
531 * One use case might be after addmap is allowed for normal users for SHM and
532 * gets used by drivers that the server doesn't need to care about. This seems
533 * unlikely.
534 *
535 * \param inode device inode.
536 * \param file_priv DRM file private.
537 * \param cmd command.
538 * \param arg pointer to a struct drm_map structure.
539 * \return zero on success or a negative value on error.
540 */
541 int drm_rmmap_ioctl(struct drm_device *dev, void *data,
542 struct drm_file *file_priv)
543 {
544 struct drm_map *request = data;
545 struct drm_local_map *map = NULL;
546 struct drm_map_list *r_list;
547 int ret;
548
549 mutex_lock(&dev->struct_mutex);
550 list_for_each_entry(r_list, &dev->maplist, head) {
551 if (r_list->map &&
552 r_list->user_token == (unsigned long)request->handle &&
553 r_list->map->flags & _DRM_REMOVABLE) {
554 map = r_list->map;
555 break;
556 }
557 }
558
559 /* List has wrapped around to the head pointer, or its empty we didn't
560 * find anything.
561 */
562 if (list_empty(&dev->maplist) || !map) {
563 mutex_unlock(&dev->struct_mutex);
564 return -EINVAL;
565 }
566
567 /* Register and framebuffer maps are permanent */
568 if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
569 mutex_unlock(&dev->struct_mutex);
570 return 0;
571 }
572
573 ret = drm_rmmap_locked(dev, map);
574
575 mutex_unlock(&dev->struct_mutex);
576
577 return ret;
578 }
579
580 /**
581 * Cleanup after an error on one of the addbufs() functions.
582 *
583 * \param dev DRM device.
584 * \param entry buffer entry where the error occurred.
585 *
586 * Frees any pages and buffers associated with the given entry.
587 */
588 static void drm_cleanup_buf_error(struct drm_device * dev,
589 struct drm_buf_entry * entry)
590 {
591 int i;
592
593 if (entry->seg_count) {
594 for (i = 0; i < entry->seg_count; i++) {
595 if (entry->seglist[i]) {
596 drm_pci_free(dev, entry->seglist[i]);
597 }
598 }
599 kfree(entry->seglist);
600
601 entry->seg_count = 0;
602 }
603
604 if (entry->buf_count) {
605 for (i = 0; i < entry->buf_count; i++) {
606 kfree(entry->buflist[i].dev_private);
607 }
608 kfree(entry->buflist);
609
610 entry->buf_count = 0;
611 }
612 }
613
614 #if __OS_HAS_AGP
615 /**
616 * Add AGP buffers for DMA transfers.
617 *
618 * \param dev struct drm_device to which the buffers are to be added.
619 * \param request pointer to a struct drm_buf_desc describing the request.
620 * \return zero on success or a negative number on failure.
621 *
622 * After some sanity checks creates a drm_buf structure for each buffer and
623 * reallocates the buffer list of the same size order to accommodate the new
624 * buffers.
625 */
626 int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
627 {
628 struct drm_device_dma *dma = dev->dma;
629 struct drm_buf_entry *entry;
630 struct drm_agp_mem *agp_entry;
631 struct drm_buf *buf;
632 unsigned long offset;
633 unsigned long agp_offset;
634 int count;
635 int order;
636 int size;
637 int alignment;
638 int page_order;
639 int total;
640 int byte_count;
641 int i, valid;
642 struct drm_buf **temp_buflist;
643
644 if (!dma)
645 return -EINVAL;
646
647 count = request->count;
648 order = drm_order(request->size);
649 size = 1 << order;
650
651 alignment = (request->flags & _DRM_PAGE_ALIGN)
652 ? PAGE_ALIGN(size) : size;
653 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
654 total = PAGE_SIZE << page_order;
655
656 byte_count = 0;
657 agp_offset = dev->agp->base + request->agp_start;
658
659 DRM_DEBUG("count: %d\n", count);
660 DRM_DEBUG("order: %d\n", order);
661 DRM_DEBUG("size: %d\n", size);
662 DRM_DEBUG("agp_offset: %lx\n", agp_offset);
663 DRM_DEBUG("alignment: %d\n", alignment);
664 DRM_DEBUG("page_order: %d\n", page_order);
665 DRM_DEBUG("total: %d\n", total);
666
667 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
668 return -EINVAL;
669
670 /* Make sure buffers are located in AGP memory that we own */
671 valid = 0;
672 list_for_each_entry(agp_entry, &dev->agp->memory, head) {
673 if ((agp_offset >= agp_entry->bound) &&
674 (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
675 valid = 1;
676 break;
677 }
678 }
679 if (!list_empty(&dev->agp->memory) && !valid) {
680 DRM_DEBUG("zone invalid\n");
681 return -EINVAL;
682 }
683 spin_lock(&dev->count_lock);
684 if (dev->buf_use) {
685 spin_unlock(&dev->count_lock);
686 return -EBUSY;
687 }
688 atomic_inc(&dev->buf_alloc);
689 spin_unlock(&dev->count_lock);
690
691 mutex_lock(&dev->struct_mutex);
692 entry = &dma->bufs[order];
693 if (entry->buf_count) {
694 mutex_unlock(&dev->struct_mutex);
695 atomic_dec(&dev->buf_alloc);
696 return -ENOMEM; /* May only call once for each order */
697 }
698
699 if (count < 0 || count > 4096) {
700 mutex_unlock(&dev->struct_mutex);
701 atomic_dec(&dev->buf_alloc);
702 return -EINVAL;
703 }
704
705 entry->buflist = kzalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
706 if (!entry->buflist) {
707 mutex_unlock(&dev->struct_mutex);
708 atomic_dec(&dev->buf_alloc);
709 return -ENOMEM;
710 }
711
712 entry->buf_size = size;
713 entry->page_order = page_order;
714
715 offset = 0;
716
717 while (entry->buf_count < count) {
718 buf = &entry->buflist[entry->buf_count];
719 buf->idx = dma->buf_count + entry->buf_count;
720 buf->total = alignment;
721 buf->order = order;
722 buf->used = 0;
723
724 buf->offset = (dma->byte_count + offset);
725 buf->bus_address = agp_offset + offset;
726 buf->address = (void *)(agp_offset + offset);
727 buf->next = NULL;
728 buf->waiting = 0;
729 buf->pending = 0;
730 buf->file_priv = NULL;
731
732 buf->dev_priv_size = dev->driver->dev_priv_size;
733 buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
734 if (!buf->dev_private) {
735 /* Set count correctly so we free the proper amount. */
736 entry->buf_count = count;
737 drm_cleanup_buf_error(dev, entry);
738 mutex_unlock(&dev->struct_mutex);
739 atomic_dec(&dev->buf_alloc);
740 return -ENOMEM;
741 }
742
743 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
744
745 offset += alignment;
746 entry->buf_count++;
747 byte_count += PAGE_SIZE << page_order;
748 }
749
750 DRM_DEBUG("byte_count: %d\n", byte_count);
751
752 temp_buflist = krealloc(dma->buflist,
753 (dma->buf_count + entry->buf_count) *
754 sizeof(*dma->buflist), GFP_KERNEL);
755 if (!temp_buflist) {
756 /* Free the entry because it isn't valid */
757 drm_cleanup_buf_error(dev, entry);
758 mutex_unlock(&dev->struct_mutex);
759 atomic_dec(&dev->buf_alloc);
760 return -ENOMEM;
761 }
762 dma->buflist = temp_buflist;
763
764 for (i = 0; i < entry->buf_count; i++) {
765 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
766 }
767
768 dma->buf_count += entry->buf_count;
769 dma->seg_count += entry->seg_count;
770 dma->page_count += byte_count >> PAGE_SHIFT;
771 dma->byte_count += byte_count;
772
773 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
774 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
775
776 mutex_unlock(&dev->struct_mutex);
777
778 request->count = entry->buf_count;
779 request->size = size;
780
781 dma->flags = _DRM_DMA_USE_AGP;
782
783 atomic_dec(&dev->buf_alloc);
784 return 0;
785 }
786 EXPORT_SYMBOL(drm_addbufs_agp);
787 #endif /* __OS_HAS_AGP */
788
789 int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
790 {
791 struct drm_device_dma *dma = dev->dma;
792 int count;
793 int order;
794 int size;
795 int total;
796 int page_order;
797 struct drm_buf_entry *entry;
798 drm_dma_handle_t *dmah;
799 struct drm_buf *buf;
800 int alignment;
801 unsigned long offset;
802 int i;
803 int byte_count;
804 int page_count;
805 unsigned long *temp_pagelist;
806 struct drm_buf **temp_buflist;
807
808 if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
809 return -EINVAL;
810
811 if (!dma)
812 return -EINVAL;
813
814 if (!capable(CAP_SYS_ADMIN))
815 return -EPERM;
816
817 count = request->count;
818 order = drm_order(request->size);
819 size = 1 << order;
820
821 DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
822 request->count, request->size, size, order);
823
824 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
825 return -EINVAL;
826
827 alignment = (request->flags & _DRM_PAGE_ALIGN)
828 ? PAGE_ALIGN(size) : size;
829 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
830 total = PAGE_SIZE << page_order;
831
832 spin_lock(&dev->count_lock);
833 if (dev->buf_use) {
834 spin_unlock(&dev->count_lock);
835 return -EBUSY;
836 }
837 atomic_inc(&dev->buf_alloc);
838 spin_unlock(&dev->count_lock);
839
840 mutex_lock(&dev->struct_mutex);
841 entry = &dma->bufs[order];
842 if (entry->buf_count) {
843 mutex_unlock(&dev->struct_mutex);
844 atomic_dec(&dev->buf_alloc);
845 return -ENOMEM; /* May only call once for each order */
846 }
847
848 if (count < 0 || count > 4096) {
849 mutex_unlock(&dev->struct_mutex);
850 atomic_dec(&dev->buf_alloc);
851 return -EINVAL;
852 }
853
854 entry->buflist = kzalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
855 if (!entry->buflist) {
856 mutex_unlock(&dev->struct_mutex);
857 atomic_dec(&dev->buf_alloc);
858 return -ENOMEM;
859 }
860
861 entry->seglist = kzalloc(count * sizeof(*entry->seglist), GFP_KERNEL);
862 if (!entry->seglist) {
863 kfree(entry->buflist);
864 mutex_unlock(&dev->struct_mutex);
865 atomic_dec(&dev->buf_alloc);
866 return -ENOMEM;
867 }
868
869 /* Keep the original pagelist until we know all the allocations
870 * have succeeded
871 */
872 temp_pagelist = kmalloc((dma->page_count + (count << page_order)) *
873 sizeof(*dma->pagelist), GFP_KERNEL);
874 if (!temp_pagelist) {
875 kfree(entry->buflist);
876 kfree(entry->seglist);
877 mutex_unlock(&dev->struct_mutex);
878 atomic_dec(&dev->buf_alloc);
879 return -ENOMEM;
880 }
881 memcpy(temp_pagelist,
882 dma->pagelist, dma->page_count * sizeof(*dma->pagelist));
883 DRM_DEBUG("pagelist: %d entries\n",
884 dma->page_count + (count << page_order));
885
886 entry->buf_size = size;
887 entry->page_order = page_order;
888 byte_count = 0;
889 page_count = 0;
890
891 while (entry->buf_count < count) {
892
893 dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000);
894
895 if (!dmah) {
896 /* Set count correctly so we free the proper amount. */
897 entry->buf_count = count;
898 entry->seg_count = count;
899 drm_cleanup_buf_error(dev, entry);
900 kfree(temp_pagelist);
901 mutex_unlock(&dev->struct_mutex);
902 atomic_dec(&dev->buf_alloc);
903 return -ENOMEM;
904 }
905 entry->seglist[entry->seg_count++] = dmah;
906 for (i = 0; i < (1 << page_order); i++) {
907 DRM_DEBUG("page %d @ 0x%08lx\n",
908 dma->page_count + page_count,
909 (unsigned long)dmah->vaddr + PAGE_SIZE * i);
910 temp_pagelist[dma->page_count + page_count++]
911 = (unsigned long)dmah->vaddr + PAGE_SIZE * i;
912 }
913 for (offset = 0;
914 offset + size <= total && entry->buf_count < count;
915 offset += alignment, ++entry->buf_count) {
916 buf = &entry->buflist[entry->buf_count];
917 buf->idx = dma->buf_count + entry->buf_count;
918 buf->total = alignment;
919 buf->order = order;
920 buf->used = 0;
921 buf->offset = (dma->byte_count + byte_count + offset);
922 #ifdef __NetBSD__
923 buf->address = (void *)((char *)dmah->vaddr + offset);
924 #else
925 buf->address = (void *)(dmah->vaddr + offset);
926 #endif
927 buf->bus_address = dmah->busaddr + offset;
928 buf->next = NULL;
929 buf->waiting = 0;
930 buf->pending = 0;
931 buf->file_priv = NULL;
932
933 buf->dev_priv_size = dev->driver->dev_priv_size;
934 buf->dev_private = kzalloc(buf->dev_priv_size,
935 GFP_KERNEL);
936 if (!buf->dev_private) {
937 /* Set count correctly so we free the proper amount. */
938 entry->buf_count = count;
939 entry->seg_count = count;
940 drm_cleanup_buf_error(dev, entry);
941 kfree(temp_pagelist);
942 mutex_unlock(&dev->struct_mutex);
943 atomic_dec(&dev->buf_alloc);
944 return -ENOMEM;
945 }
946
947 DRM_DEBUG("buffer %d @ %p\n",
948 entry->buf_count, buf->address);
949 }
950 byte_count += PAGE_SIZE << page_order;
951 }
952
953 temp_buflist = krealloc(dma->buflist,
954 (dma->buf_count + entry->buf_count) *
955 sizeof(*dma->buflist), GFP_KERNEL);
956 if (!temp_buflist) {
957 /* Free the entry because it isn't valid */
958 drm_cleanup_buf_error(dev, entry);
959 kfree(temp_pagelist);
960 mutex_unlock(&dev->struct_mutex);
961 atomic_dec(&dev->buf_alloc);
962 return -ENOMEM;
963 }
964 dma->buflist = temp_buflist;
965
966 for (i = 0; i < entry->buf_count; i++) {
967 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
968 }
969
970 /* No allocations failed, so now we can replace the original pagelist
971 * with the new one.
972 */
973 if (dma->page_count) {
974 kfree(dma->pagelist);
975 }
976 dma->pagelist = temp_pagelist;
977
978 dma->buf_count += entry->buf_count;
979 dma->seg_count += entry->seg_count;
980 dma->page_count += entry->seg_count << page_order;
981 dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
982
983 mutex_unlock(&dev->struct_mutex);
984
985 request->count = entry->buf_count;
986 request->size = size;
987
988 if (request->flags & _DRM_PCI_BUFFER_RO)
989 dma->flags = _DRM_DMA_USE_PCI_RO;
990
991 atomic_dec(&dev->buf_alloc);
992 return 0;
993
994 }
995 EXPORT_SYMBOL(drm_addbufs_pci);
996
997 static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request)
998 {
999 struct drm_device_dma *dma = dev->dma;
1000 struct drm_buf_entry *entry;
1001 struct drm_buf *buf;
1002 unsigned long offset;
1003 unsigned long agp_offset;
1004 int count;
1005 int order;
1006 int size;
1007 int alignment;
1008 int page_order;
1009 int total;
1010 int byte_count;
1011 int i;
1012 struct drm_buf **temp_buflist;
1013
1014 if (!drm_core_check_feature(dev, DRIVER_SG))
1015 return -EINVAL;
1016
1017 if (!dma)
1018 return -EINVAL;
1019
1020 if (!capable(CAP_SYS_ADMIN))
1021 return -EPERM;
1022
1023 count = request->count;
1024 order = drm_order(request->size);
1025 size = 1 << order;
1026
1027 alignment = (request->flags & _DRM_PAGE_ALIGN)
1028 ? PAGE_ALIGN(size) : size;
1029 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
1030 total = PAGE_SIZE << page_order;
1031
1032 byte_count = 0;
1033 agp_offset = request->agp_start;
1034
1035 DRM_DEBUG("count: %d\n", count);
1036 DRM_DEBUG("order: %d\n", order);
1037 DRM_DEBUG("size: %d\n", size);
1038 DRM_DEBUG("agp_offset: %lu\n", agp_offset);
1039 DRM_DEBUG("alignment: %d\n", alignment);
1040 DRM_DEBUG("page_order: %d\n", page_order);
1041 DRM_DEBUG("total: %d\n", total);
1042
1043 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1044 return -EINVAL;
1045
1046 spin_lock(&dev->count_lock);
1047 if (dev->buf_use) {
1048 spin_unlock(&dev->count_lock);
1049 return -EBUSY;
1050 }
1051 atomic_inc(&dev->buf_alloc);
1052 spin_unlock(&dev->count_lock);
1053
1054 mutex_lock(&dev->struct_mutex);
1055 entry = &dma->bufs[order];
1056 if (entry->buf_count) {
1057 mutex_unlock(&dev->struct_mutex);
1058 atomic_dec(&dev->buf_alloc);
1059 return -ENOMEM; /* May only call once for each order */
1060 }
1061
1062 if (count < 0 || count > 4096) {
1063 mutex_unlock(&dev->struct_mutex);
1064 atomic_dec(&dev->buf_alloc);
1065 return -EINVAL;
1066 }
1067
1068 entry->buflist = kzalloc(count * sizeof(*entry->buflist),
1069 GFP_KERNEL);
1070 if (!entry->buflist) {
1071 mutex_unlock(&dev->struct_mutex);
1072 atomic_dec(&dev->buf_alloc);
1073 return -ENOMEM;
1074 }
1075
1076 entry->buf_size = size;
1077 entry->page_order = page_order;
1078
1079 offset = 0;
1080
1081 while (entry->buf_count < count) {
1082 buf = &entry->buflist[entry->buf_count];
1083 buf->idx = dma->buf_count + entry->buf_count;
1084 buf->total = alignment;
1085 buf->order = order;
1086 buf->used = 0;
1087
1088 buf->offset = (dma->byte_count + offset);
1089 buf->bus_address = agp_offset + offset;
1090 buf->address = (void *)(agp_offset + offset
1091 + (unsigned long)dev->sg->virtual);
1092 buf->next = NULL;
1093 buf->waiting = 0;
1094 buf->pending = 0;
1095 buf->file_priv = NULL;
1096
1097 buf->dev_priv_size = dev->driver->dev_priv_size;
1098 buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
1099 if (!buf->dev_private) {
1100 /* Set count correctly so we free the proper amount. */
1101 entry->buf_count = count;
1102 drm_cleanup_buf_error(dev, entry);
1103 mutex_unlock(&dev->struct_mutex);
1104 atomic_dec(&dev->buf_alloc);
1105 return -ENOMEM;
1106 }
1107
1108 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1109
1110 offset += alignment;
1111 entry->buf_count++;
1112 byte_count += PAGE_SIZE << page_order;
1113 }
1114
1115 DRM_DEBUG("byte_count: %d\n", byte_count);
1116
1117 temp_buflist = krealloc(dma->buflist,
1118 (dma->buf_count + entry->buf_count) *
1119 sizeof(*dma->buflist), GFP_KERNEL);
1120 if (!temp_buflist) {
1121 /* Free the entry because it isn't valid */
1122 drm_cleanup_buf_error(dev, entry);
1123 mutex_unlock(&dev->struct_mutex);
1124 atomic_dec(&dev->buf_alloc);
1125 return -ENOMEM;
1126 }
1127 dma->buflist = temp_buflist;
1128
1129 for (i = 0; i < entry->buf_count; i++) {
1130 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1131 }
1132
1133 dma->buf_count += entry->buf_count;
1134 dma->seg_count += entry->seg_count;
1135 dma->page_count += byte_count >> PAGE_SHIFT;
1136 dma->byte_count += byte_count;
1137
1138 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1139 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1140
1141 mutex_unlock(&dev->struct_mutex);
1142
1143 request->count = entry->buf_count;
1144 request->size = size;
1145
1146 dma->flags = _DRM_DMA_USE_SG;
1147
1148 atomic_dec(&dev->buf_alloc);
1149 return 0;
1150 }
1151
1152 static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request)
1153 {
1154 struct drm_device_dma *dma = dev->dma;
1155 struct drm_buf_entry *entry;
1156 struct drm_buf *buf;
1157 unsigned long offset;
1158 unsigned long agp_offset;
1159 int count;
1160 int order;
1161 int size;
1162 int alignment;
1163 int page_order;
1164 int total;
1165 int byte_count;
1166 int i;
1167 struct drm_buf **temp_buflist;
1168
1169 if (!drm_core_check_feature(dev, DRIVER_FB_DMA))
1170 return -EINVAL;
1171
1172 if (!dma)
1173 return -EINVAL;
1174
1175 if (!capable(CAP_SYS_ADMIN))
1176 return -EPERM;
1177
1178 count = request->count;
1179 order = drm_order(request->size);
1180 size = 1 << order;
1181
1182 alignment = (request->flags & _DRM_PAGE_ALIGN)
1183 ? PAGE_ALIGN(size) : size;
1184 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
1185 total = PAGE_SIZE << page_order;
1186
1187 byte_count = 0;
1188 agp_offset = request->agp_start;
1189
1190 DRM_DEBUG("count: %d\n", count);
1191 DRM_DEBUG("order: %d\n", order);
1192 DRM_DEBUG("size: %d\n", size);
1193 DRM_DEBUG("agp_offset: %lu\n", agp_offset);
1194 DRM_DEBUG("alignment: %d\n", alignment);
1195 DRM_DEBUG("page_order: %d\n", page_order);
1196 DRM_DEBUG("total: %d\n", total);
1197
1198 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1199 return -EINVAL;
1200
1201 spin_lock(&dev->count_lock);
1202 if (dev->buf_use) {
1203 spin_unlock(&dev->count_lock);
1204 return -EBUSY;
1205 }
1206 atomic_inc(&dev->buf_alloc);
1207 spin_unlock(&dev->count_lock);
1208
1209 mutex_lock(&dev->struct_mutex);
1210 entry = &dma->bufs[order];
1211 if (entry->buf_count) {
1212 mutex_unlock(&dev->struct_mutex);
1213 atomic_dec(&dev->buf_alloc);
1214 return -ENOMEM; /* May only call once for each order */
1215 }
1216
1217 if (count < 0 || count > 4096) {
1218 mutex_unlock(&dev->struct_mutex);
1219 atomic_dec(&dev->buf_alloc);
1220 return -EINVAL;
1221 }
1222
1223 entry->buflist = kzalloc(count * sizeof(*entry->buflist),
1224 GFP_KERNEL);
1225 if (!entry->buflist) {
1226 mutex_unlock(&dev->struct_mutex);
1227 atomic_dec(&dev->buf_alloc);
1228 return -ENOMEM;
1229 }
1230
1231 entry->buf_size = size;
1232 entry->page_order = page_order;
1233
1234 offset = 0;
1235
1236 while (entry->buf_count < count) {
1237 buf = &entry->buflist[entry->buf_count];
1238 buf->idx = dma->buf_count + entry->buf_count;
1239 buf->total = alignment;
1240 buf->order = order;
1241 buf->used = 0;
1242
1243 buf->offset = (dma->byte_count + offset);
1244 buf->bus_address = agp_offset + offset;
1245 buf->address = (void *)(agp_offset + offset);
1246 buf->next = NULL;
1247 buf->waiting = 0;
1248 buf->pending = 0;
1249 buf->file_priv = NULL;
1250
1251 buf->dev_priv_size = dev->driver->dev_priv_size;
1252 buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
1253 if (!buf->dev_private) {
1254 /* Set count correctly so we free the proper amount. */
1255 entry->buf_count = count;
1256 drm_cleanup_buf_error(dev, entry);
1257 mutex_unlock(&dev->struct_mutex);
1258 atomic_dec(&dev->buf_alloc);
1259 return -ENOMEM;
1260 }
1261
1262 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1263
1264 offset += alignment;
1265 entry->buf_count++;
1266 byte_count += PAGE_SIZE << page_order;
1267 }
1268
1269 DRM_DEBUG("byte_count: %d\n", byte_count);
1270
1271 temp_buflist = krealloc(dma->buflist,
1272 (dma->buf_count + entry->buf_count) *
1273 sizeof(*dma->buflist), GFP_KERNEL);
1274 if (!temp_buflist) {
1275 /* Free the entry because it isn't valid */
1276 drm_cleanup_buf_error(dev, entry);
1277 mutex_unlock(&dev->struct_mutex);
1278 atomic_dec(&dev->buf_alloc);
1279 return -ENOMEM;
1280 }
1281 dma->buflist = temp_buflist;
1282
1283 for (i = 0; i < entry->buf_count; i++) {
1284 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1285 }
1286
1287 dma->buf_count += entry->buf_count;
1288 dma->seg_count += entry->seg_count;
1289 dma->page_count += byte_count >> PAGE_SHIFT;
1290 dma->byte_count += byte_count;
1291
1292 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1293 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1294
1295 mutex_unlock(&dev->struct_mutex);
1296
1297 request->count = entry->buf_count;
1298 request->size = size;
1299
1300 dma->flags = _DRM_DMA_USE_FB;
1301
1302 atomic_dec(&dev->buf_alloc);
1303 return 0;
1304 }
1305
1306
1307 /**
1308 * Add buffers for DMA transfers (ioctl).
1309 *
1310 * \param inode device inode.
1311 * \param file_priv DRM file private.
1312 * \param cmd command.
1313 * \param arg pointer to a struct drm_buf_desc request.
1314 * \return zero on success or a negative number on failure.
1315 *
1316 * According with the memory type specified in drm_buf_desc::flags and the
1317 * build options, it dispatches the call either to addbufs_agp(),
1318 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
1319 * PCI memory respectively.
1320 */
1321 int drm_addbufs(struct drm_device *dev, void *data,
1322 struct drm_file *file_priv)
1323 {
1324 struct drm_buf_desc *request = data;
1325 int ret;
1326
1327 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1328 return -EINVAL;
1329
1330 #if __OS_HAS_AGP
1331 if (request->flags & _DRM_AGP_BUFFER)
1332 ret = drm_addbufs_agp(dev, request);
1333 else
1334 #endif
1335 if (request->flags & _DRM_SG_BUFFER)
1336 ret = drm_addbufs_sg(dev, request);
1337 else if (request->flags & _DRM_FB_BUFFER)
1338 ret = drm_addbufs_fb(dev, request);
1339 else
1340 ret = drm_addbufs_pci(dev, request);
1341
1342 return ret;
1343 }
1344
1345 /**
1346 * Get information about the buffer mappings.
1347 *
1348 * This was originally mean for debugging purposes, or by a sophisticated
1349 * client library to determine how best to use the available buffers (e.g.,
1350 * large buffers can be used for image transfer).
1351 *
1352 * \param inode device inode.
1353 * \param file_priv DRM file private.
1354 * \param cmd command.
1355 * \param arg pointer to a drm_buf_info structure.
1356 * \return zero on success or a negative number on failure.
1357 *
1358 * Increments drm_device::buf_use while holding the drm_device::count_lock
1359 * lock, preventing of allocating more buffers after this call. Information
1360 * about each requested buffer is then copied into user space.
1361 */
1362 int drm_infobufs(struct drm_device *dev, void *data,
1363 struct drm_file *file_priv)
1364 {
1365 struct drm_device_dma *dma = dev->dma;
1366 struct drm_buf_info *request = data;
1367 int i;
1368 int count;
1369
1370 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1371 return -EINVAL;
1372
1373 if (!dma)
1374 return -EINVAL;
1375
1376 spin_lock(&dev->count_lock);
1377 if (atomic_read(&dev->buf_alloc)) {
1378 spin_unlock(&dev->count_lock);
1379 return -EBUSY;
1380 }
1381 ++dev->buf_use; /* Can't allocate more after this call */
1382 spin_unlock(&dev->count_lock);
1383
1384 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1385 if (dma->bufs[i].buf_count)
1386 ++count;
1387 }
1388
1389 DRM_DEBUG("count = %d\n", count);
1390
1391 if (request->count >= count) {
1392 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1393 if (dma->bufs[i].buf_count) {
1394 struct drm_buf_desc __user *to =
1395 &request->list[count];
1396 struct drm_buf_entry *from = &dma->bufs[i];
1397 struct drm_freelist *list = &dma->bufs[i].freelist;
1398 if (copy_to_user(&to->count,
1399 &from->buf_count,
1400 sizeof(from->buf_count)) ||
1401 copy_to_user(&to->size,
1402 &from->buf_size,
1403 sizeof(from->buf_size)) ||
1404 copy_to_user(&to->low_mark,
1405 &list->low_mark,
1406 sizeof(list->low_mark)) ||
1407 copy_to_user(&to->high_mark,
1408 &list->high_mark,
1409 sizeof(list->high_mark)))
1410 return -EFAULT;
1411
1412 DRM_DEBUG("%d %d %d %d %d\n",
1413 i,
1414 dma->bufs[i].buf_count,
1415 dma->bufs[i].buf_size,
1416 dma->bufs[i].freelist.low_mark,
1417 dma->bufs[i].freelist.high_mark);
1418 ++count;
1419 }
1420 }
1421 }
1422 request->count = count;
1423
1424 return 0;
1425 }
1426
1427 /**
1428 * Specifies a low and high water mark for buffer allocation
1429 *
1430 * \param inode device inode.
1431 * \param file_priv DRM file private.
1432 * \param cmd command.
1433 * \param arg a pointer to a drm_buf_desc structure.
1434 * \return zero on success or a negative number on failure.
1435 *
1436 * Verifies that the size order is bounded between the admissible orders and
1437 * updates the respective drm_device_dma::bufs entry low and high water mark.
1438 *
1439 * \note This ioctl is deprecated and mostly never used.
1440 */
1441 int drm_markbufs(struct drm_device *dev, void *data,
1442 struct drm_file *file_priv)
1443 {
1444 struct drm_device_dma *dma = dev->dma;
1445 struct drm_buf_desc *request = data;
1446 int order;
1447 struct drm_buf_entry *entry;
1448
1449 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1450 return -EINVAL;
1451
1452 if (!dma)
1453 return -EINVAL;
1454
1455 DRM_DEBUG("%d, %d, %d\n",
1456 request->size, request->low_mark, request->high_mark);
1457 order = drm_order(request->size);
1458 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1459 return -EINVAL;
1460 entry = &dma->bufs[order];
1461
1462 if (request->low_mark < 0 || request->low_mark > entry->buf_count)
1463 return -EINVAL;
1464 if (request->high_mark < 0 || request->high_mark > entry->buf_count)
1465 return -EINVAL;
1466
1467 entry->freelist.low_mark = request->low_mark;
1468 entry->freelist.high_mark = request->high_mark;
1469
1470 return 0;
1471 }
1472
1473 /**
1474 * Unreserve the buffers in list, previously reserved using drmDMA.
1475 *
1476 * \param inode device inode.
1477 * \param file_priv DRM file private.
1478 * \param cmd command.
1479 * \param arg pointer to a drm_buf_free structure.
1480 * \return zero on success or a negative number on failure.
1481 *
1482 * Calls free_buffer() for each used buffer.
1483 * This function is primarily used for debugging.
1484 */
1485 int drm_freebufs(struct drm_device *dev, void *data,
1486 struct drm_file *file_priv)
1487 {
1488 struct drm_device_dma *dma = dev->dma;
1489 struct drm_buf_free *request = data;
1490 int i;
1491 int idx;
1492 struct drm_buf *buf;
1493
1494 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1495 return -EINVAL;
1496
1497 if (!dma)
1498 return -EINVAL;
1499
1500 DRM_DEBUG("%d\n", request->count);
1501 for (i = 0; i < request->count; i++) {
1502 if (copy_from_user(&idx, &request->list[i], sizeof(idx)))
1503 return -EFAULT;
1504 if (idx < 0 || idx >= dma->buf_count) {
1505 DRM_ERROR("Index %d (of %d max)\n",
1506 idx, dma->buf_count - 1);
1507 return -EINVAL;
1508 }
1509 buf = dma->buflist[idx];
1510 if (buf->file_priv != file_priv) {
1511 DRM_ERROR("Process %d freeing buffer not owned\n",
1512 task_pid_nr(current));
1513 return -EINVAL;
1514 }
1515 drm_free_buffer(dev, buf);
1516 }
1517
1518 return 0;
1519 }
1520
1521 /**
1522 * Maps all of the DMA buffers into client-virtual space (ioctl).
1523 *
1524 * \param inode device inode.
1525 * \param file_priv DRM file private.
1526 * \param cmd command.
1527 * \param arg pointer to a drm_buf_map structure.
1528 * \return zero on success or a negative number on failure.
1529 *
1530 * Maps the AGP, SG or PCI buffer region with vm_mmap(), and copies information
1531 * about each buffer into user space. For PCI buffers, it calls vm_mmap() with
1532 * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
1533 * drm_mmap_dma().
1534 */
1535 int drm_mapbufs(struct drm_device *dev, void *data,
1536 struct drm_file *file_priv)
1537 {
1538 struct drm_device_dma *dma = dev->dma;
1539 int retcode = 0;
1540 const int zero = 0;
1541 unsigned long virtual;
1542 unsigned long address;
1543 struct drm_buf_map *request = data;
1544 int i;
1545
1546 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1547 return -EINVAL;
1548
1549 if (!dma)
1550 return -EINVAL;
1551
1552 spin_lock(&dev->count_lock);
1553 if (atomic_read(&dev->buf_alloc)) {
1554 spin_unlock(&dev->count_lock);
1555 return -EBUSY;
1556 }
1557 dev->buf_use++; /* Can't allocate more after this call */
1558 spin_unlock(&dev->count_lock);
1559
1560 if (request->count >= dma->buf_count) {
1561 if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
1562 || (drm_core_check_feature(dev, DRIVER_SG)
1563 && (dma->flags & _DRM_DMA_USE_SG))
1564 || (drm_core_check_feature(dev, DRIVER_FB_DMA)
1565 && (dma->flags & _DRM_DMA_USE_FB))) {
1566 struct drm_local_map *map = dev->agp_buffer_map;
1567 unsigned long token = dev->agp_buffer_token;
1568
1569 if (!map) {
1570 retcode = -EINVAL;
1571 goto done;
1572 }
1573 virtual = vm_mmap(file_priv->filp, 0, map->size,
1574 PROT_READ | PROT_WRITE,
1575 MAP_SHARED,
1576 token);
1577 } else {
1578 virtual = vm_mmap(file_priv->filp, 0, dma->byte_count,
1579 PROT_READ | PROT_WRITE,
1580 MAP_SHARED, 0);
1581 }
1582 if (virtual > -1024UL) {
1583 /* Real error */
1584 retcode = (signed long)virtual;
1585 goto done;
1586 }
1587 request->virtual = (void __user *)virtual;
1588
1589 for (i = 0; i < dma->buf_count; i++) {
1590 if (copy_to_user(&request->list[i].idx,
1591 &dma->buflist[i]->idx,
1592 sizeof(request->list[0].idx))) {
1593 retcode = -EFAULT;
1594 goto done;
1595 }
1596 if (copy_to_user(&request->list[i].total,
1597 &dma->buflist[i]->total,
1598 sizeof(request->list[0].total))) {
1599 retcode = -EFAULT;
1600 goto done;
1601 }
1602 if (copy_to_user(&request->list[i].used,
1603 &zero, sizeof(zero))) {
1604 retcode = -EFAULT;
1605 goto done;
1606 }
1607 address = virtual + dma->buflist[i]->offset; /* *** */
1608 if (copy_to_user(&request->list[i].address,
1609 &address, sizeof(address))) {
1610 retcode = -EFAULT;
1611 goto done;
1612 }
1613 }
1614 }
1615 done:
1616 request->count = dma->buf_count;
1617 DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
1618
1619 return retcode;
1620 }
1621
1622 /**
1623 * Compute size order. Returns the exponent of the smaller power of two which
1624 * is greater or equal to given number.
1625 *
1626 * \param size size.
1627 * \return order.
1628 *
1629 * \todo Can be made faster.
1630 */
1631 int drm_order(unsigned long size)
1632 {
1633 int order;
1634 unsigned long tmp;
1635
1636 for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ;
1637
1638 if (size & (size - 1))
1639 ++order;
1640
1641 return order;
1642 }
1643 EXPORT_SYMBOL(drm_order);
1644