drm_bufs.c revision 1.1.1.1.2.5 1 /**
2 * \file drm_bufs.c
3 * Generic buffer template
4 *
5 * \author Rickard E. (Rik) Faith <faith (at) valinux.com>
6 * \author Gareth Hughes <gareth (at) valinux.com>
7 */
8
9 /*
10 * Created: Thu Nov 23 03:10:50 2000 by gareth (at) valinux.com
11 *
12 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
22 *
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
25 * Software.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
34 */
35
36 #include <linux/vmalloc.h>
37 #include <linux/slab.h>
38 #include <linux/sched.h>
39 #include <linux/log2.h>
40 #include <linux/export.h>
41 #include <linux/mm.h>
42 #include <asm/mtrr.h>
43 #include <asm/shmparam.h>
44 #include <drm/drmP.h>
45
46 static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
47 struct drm_local_map *map)
48 {
49 struct drm_map_list *entry;
50 list_for_each_entry(entry, &dev->maplist, head) {
51 /*
52 * Because the kernel-userspace ABI is fixed at a 32-bit offset
53 * while PCI resources may live above that, we only compare the
54 * lower 32 bits of the map offset for maps of type
55 * _DRM_FRAMEBUFFER or _DRM_REGISTERS.
56 * It is assumed that if a driver have more than one resource
57 * of each type, the lower 32 bits are different.
58 */
59 if (!entry->map ||
60 map->type != entry->map->type ||
61 entry->master != dev->primary->master)
62 continue;
63 switch (map->type) {
64 case _DRM_SHM:
65 if (map->flags != _DRM_CONTAINS_LOCK)
66 break;
67 return entry;
68 case _DRM_REGISTERS:
69 case _DRM_FRAME_BUFFER:
70 if ((entry->map->offset & 0xffffffff) ==
71 (map->offset & 0xffffffff))
72 return entry;
73 default: /* Make gcc happy */
74 ;
75 }
76 if (entry->map->offset == map->offset)
77 return entry;
78 }
79
80 return NULL;
81 }
82
83 static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
84 unsigned long user_token, int hashed_handle, int shm)
85 {
86 int use_hashed_handle, shift;
87 unsigned long add;
88
89 use_hashed_handle = (user_token &~ 0xffffffffUL) || hashed_handle;
90 if (!use_hashed_handle) {
91 int ret;
92 hash->key = user_token >> PAGE_SHIFT;
93 ret = drm_ht_insert_item(&dev->map_hash, hash);
94 if (ret != -EINVAL)
95 return ret;
96 }
97
98 shift = 0;
99 add = DRM_MAP_HASH_OFFSET >> PAGE_SHIFT;
100 if (shm && (SHMLBA > PAGE_SIZE)) {
101 int bits = ilog2(SHMLBA >> PAGE_SHIFT) + 1;
102
103 /* For shared memory, we have to preserve the SHMLBA
104 * bits of the eventual vma->vm_pgoff value during
105 * mmap(). Otherwise we run into cache aliasing problems
106 * on some platforms. On these platforms, the pgoff of
107 * a mmap() request is used to pick a suitable virtual
108 * address for the mmap() region such that it will not
109 * cause cache aliasing problems.
110 *
111 * Therefore, make sure the SHMLBA relevant bits of the
112 * hash value we use are equal to those in the original
113 * kernel virtual address.
114 */
115 shift = bits;
116 add |= ((user_token >> PAGE_SHIFT) & ((1UL << bits) - 1UL));
117 }
118
119 return drm_ht_just_insert_please(&dev->map_hash, hash,
120 user_token, 32 - PAGE_SHIFT - 3,
121 shift, add);
122 }
123
124 /**
125 * Core function to create a range of memory available for mapping by a
126 * non-root process.
127 *
128 * Adjusts the memory offset to its absolute value according to the mapping
129 * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where
130 * applicable and if supported by the kernel.
131 */
132 static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
133 unsigned int size, enum drm_map_type type,
134 enum drm_map_flags flags,
135 struct drm_map_list ** maplist)
136 {
137 struct drm_local_map *map;
138 struct drm_map_list *list;
139 drm_dma_handle_t *dmah;
140 unsigned long user_token;
141 int ret;
142
143 map = kmalloc(sizeof(*map), GFP_KERNEL);
144 if (!map)
145 return -ENOMEM;
146
147 map->offset = offset;
148 map->size = size;
149 map->flags = flags;
150 map->type = type;
151
152 /* Only allow shared memory to be removable since we only keep enough
153 * book keeping information about shared memory to allow for removal
154 * when processes fork.
155 */
156 if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) {
157 kfree(map);
158 return -EINVAL;
159 }
160 DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
161 (unsigned long long)map->offset, map->size, map->type);
162
163 /* page-align _DRM_SHM maps. They are allocated here so there is no security
164 * hole created by that and it works around various broken drivers that use
165 * a non-aligned quantity to map the SAREA. --BenH
166 */
167 if (map->type == _DRM_SHM)
168 map->size = PAGE_ALIGN(map->size);
169
170 if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
171 kfree(map);
172 return -EINVAL;
173 }
174 map->mtrr = -1;
175 map->handle = NULL;
176
177 switch (map->type) {
178 case _DRM_REGISTERS:
179 case _DRM_FRAME_BUFFER:
180 #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__arm__)
181 if (map->offset + (map->size-1) < map->offset ||
182 map->offset < virt_to_phys(high_memory)) {
183 kfree(map);
184 return -EINVAL;
185 }
186 #endif
187 /* Some drivers preinitialize some maps, without the X Server
188 * needing to be aware of it. Therefore, we just return success
189 * when the server tries to create a duplicate map.
190 */
191 list = drm_find_matching_map(dev, map);
192 if (list != NULL) {
193 if (list->map->size != map->size) {
194 DRM_DEBUG("Matching maps of type %d with "
195 "mismatched sizes, (%ld vs %ld)\n",
196 map->type, map->size,
197 list->map->size);
198 list->map->size = map->size;
199 }
200
201 kfree(map);
202 *maplist = list;
203 return 0;
204 }
205
206 if (drm_core_has_MTRR(dev)) {
207 if (map->type == _DRM_FRAME_BUFFER ||
208 (map->flags & _DRM_WRITE_COMBINING)) {
209 map->mtrr = mtrr_add(map->offset, map->size,
210 MTRR_TYPE_WRCOMB, 1);
211 }
212 }
213 if (map->type == _DRM_REGISTERS) {
214 map->handle = ioremap(map->offset, map->size);
215 if (!map->handle) {
216 kfree(map);
217 return -ENOMEM;
218 }
219 }
220
221 break;
222 case _DRM_SHM:
223 list = drm_find_matching_map(dev, map);
224 if (list != NULL) {
225 if(list->map->size != map->size) {
226 DRM_DEBUG("Matching maps of type %d with "
227 "mismatched sizes, (%ld vs %ld)\n",
228 map->type, map->size, list->map->size);
229 list->map->size = map->size;
230 }
231
232 kfree(map);
233 *maplist = list;
234 return 0;
235 }
236 map->handle = vmalloc_user(map->size);
237 DRM_DEBUG("%lu %d %p\n",
238 map->size, drm_order(map->size), map->handle);
239 if (!map->handle) {
240 kfree(map);
241 return -ENOMEM;
242 }
243 map->offset = (unsigned long)map->handle;
244 if (map->flags & _DRM_CONTAINS_LOCK) {
245 /* Prevent a 2nd X Server from creating a 2nd lock */
246 if (dev->primary->master->lock.hw_lock != NULL) {
247 vfree(map->handle);
248 kfree(map);
249 return -EBUSY;
250 }
251 dev->sigdata.lock = dev->primary->master->lock.hw_lock = map->handle; /* Pointer to lock */
252 }
253 break;
254 case _DRM_AGP: {
255 struct drm_agp_mem *entry;
256 int valid = 0;
257
258 if (!drm_core_has_AGP(dev)) {
259 kfree(map);
260 return -EINVAL;
261 }
262 #ifdef __alpha__
263 map->offset += dev->hose->mem_space->start;
264 #endif
265 /* In some cases (i810 driver), user space may have already
266 * added the AGP base itself, because dev->agp->base previously
267 * only got set during AGP enable. So, only add the base
268 * address if the map's offset isn't already within the
269 * aperture.
270 */
271 #ifdef __NetBSD__
272 if (map->offset < dev->agp->base ||
273 map->offset > dev->agp->base +
274 dev->agp->agp_info.ai_aperture_size - 1) {
275 map->offset += dev->agp->base;
276 }
277 #else
278 if (map->offset < dev->agp->base ||
279 map->offset > dev->agp->base +
280 dev->agp->agp_info.aper_size * 1024 * 1024 - 1) {
281 map->offset += dev->agp->base;
282 }
283 #endif
284 map->mtrr = dev->agp->agp_mtrr; /* for getmap */
285
286 /* This assumes the DRM is in total control of AGP space.
287 * It's not always the case as AGP can be in the control
288 * of user space (i.e. i810 driver). So this loop will get
289 * skipped and we double check that dev->agp->memory is
290 * actually set as well as being invalid before EPERM'ing
291 */
292 list_for_each_entry(entry, &dev->agp->memory, head) {
293 if ((map->offset >= entry->bound) &&
294 (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) {
295 valid = 1;
296 break;
297 }
298 }
299 if (!list_empty(&dev->agp->memory) && !valid) {
300 kfree(map);
301 return -EPERM;
302 }
303 DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n",
304 (unsigned long long)map->offset, map->size);
305
306 break;
307 }
308 case _DRM_GEM:
309 DRM_ERROR("tried to addmap GEM object\n");
310 break;
311 case _DRM_SCATTER_GATHER:
312 if (!dev->sg) {
313 kfree(map);
314 return -EINVAL;
315 }
316 map->offset += (unsigned long)dev->sg->virtual;
317 break;
318 case _DRM_CONSISTENT:
319 /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
320 * As we're limiting the address to 2^32-1 (or less),
321 * casting it down to 32 bits is no problem, but we
322 * need to point to a 64bit variable first. */
323 dmah = drm_pci_alloc(dev, map->size, map->size);
324 if (!dmah) {
325 kfree(map);
326 return -ENOMEM;
327 }
328 map->handle = dmah->vaddr;
329 map->offset = (unsigned long)dmah->busaddr;
330 kfree(dmah);
331 break;
332 default:
333 kfree(map);
334 return -EINVAL;
335 }
336
337 list = kzalloc(sizeof(*list), GFP_KERNEL);
338 if (!list) {
339 if (map->type == _DRM_REGISTERS)
340 iounmap(map->handle);
341 kfree(map);
342 return -EINVAL;
343 }
344 list->map = map;
345
346 mutex_lock(&dev->struct_mutex);
347 list_add(&list->head, &dev->maplist);
348
349 /* Assign a 32-bit handle */
350 /* We do it here so that dev->struct_mutex protects the increment */
351 user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle :
352 map->offset;
353 ret = drm_map_handle(dev, &list->hash, user_token, 0,
354 (map->type == _DRM_SHM));
355 if (ret) {
356 if (map->type == _DRM_REGISTERS)
357 iounmap(map->handle);
358 kfree(map);
359 kfree(list);
360 mutex_unlock(&dev->struct_mutex);
361 return ret;
362 }
363
364 list->user_token = list->hash.key << PAGE_SHIFT;
365 mutex_unlock(&dev->struct_mutex);
366
367 if (!(map->flags & _DRM_DRIVER))
368 list->master = dev->primary->master;
369 *maplist = list;
370 return 0;
371 }
372
373 int drm_addmap(struct drm_device * dev, resource_size_t offset,
374 unsigned int size, enum drm_map_type type,
375 enum drm_map_flags flags, struct drm_local_map ** map_ptr)
376 {
377 struct drm_map_list *list;
378 int rc;
379
380 rc = drm_addmap_core(dev, offset, size, type, flags, &list);
381 if (!rc)
382 *map_ptr = list->map;
383 return rc;
384 }
385
386 EXPORT_SYMBOL(drm_addmap);
387
388 /**
389 * Ioctl to specify a range of memory that is available for mapping by a
390 * non-root process.
391 *
392 * \param inode device inode.
393 * \param file_priv DRM file private.
394 * \param cmd command.
395 * \param arg pointer to a drm_map structure.
396 * \return zero on success or a negative value on error.
397 *
398 */
399 int drm_addmap_ioctl(struct drm_device *dev, void *data,
400 struct drm_file *file_priv)
401 {
402 struct drm_map *map = data;
403 struct drm_map_list *maplist;
404 int err;
405
406 if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM))
407 return -EPERM;
408
409 err = drm_addmap_core(dev, map->offset, map->size, map->type,
410 map->flags, &maplist);
411
412 if (err)
413 return err;
414
415 /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
416 map->handle = (void *)(unsigned long)maplist->user_token;
417 return 0;
418 }
419
420 /**
421 * Remove a map private from list and deallocate resources if the mapping
422 * isn't in use.
423 *
424 * Searches the map on drm_device::maplist, removes it from the list, see if
425 * its being used, and free any associate resource (such as MTRR's) if it's not
426 * being on use.
427 *
428 * \sa drm_addmap
429 */
430 int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
431 {
432 struct drm_map_list *r_list = NULL, *list_t;
433 drm_dma_handle_t dmah;
434 int found = 0;
435 struct drm_master *master;
436
437 /* Find the list entry for the map and remove it */
438 list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
439 if (r_list->map == map) {
440 master = r_list->master;
441 list_del(&r_list->head);
442 drm_ht_remove_key(&dev->map_hash,
443 r_list->user_token >> PAGE_SHIFT);
444 kfree(r_list);
445 found = 1;
446 break;
447 }
448 }
449
450 if (!found)
451 return -EINVAL;
452
453 switch (map->type) {
454 case _DRM_REGISTERS:
455 iounmap(map->handle);
456 /* FALLTHROUGH */
457 case _DRM_FRAME_BUFFER:
458 if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
459 int retcode;
460 retcode = mtrr_del(map->mtrr, map->offset, map->size);
461 DRM_DEBUG("mtrr_del=%d\n", retcode);
462 }
463 break;
464 case _DRM_SHM:
465 vfree(map->handle);
466 if (master) {
467 if (dev->sigdata.lock == master->lock.hw_lock)
468 dev->sigdata.lock = NULL;
469 master->lock.hw_lock = NULL; /* SHM removed */
470 master->lock.file_priv = NULL;
471 wake_up_interruptible_all(&master->lock.lock_queue);
472 }
473 break;
474 case _DRM_AGP:
475 case _DRM_SCATTER_GATHER:
476 break;
477 case _DRM_CONSISTENT:
478 dmah.vaddr = map->handle;
479 dmah.busaddr = map->offset;
480 dmah.size = map->size;
481 __drm_pci_free(dev, &dmah);
482 break;
483 case _DRM_GEM:
484 DRM_ERROR("tried to rmmap GEM object\n");
485 break;
486 }
487 kfree(map);
488
489 return 0;
490 }
491 EXPORT_SYMBOL(drm_rmmap_locked);
492
493 int drm_rmmap(struct drm_device *dev, struct drm_local_map *map)
494 {
495 int ret;
496
497 mutex_lock(&dev->struct_mutex);
498 ret = drm_rmmap_locked(dev, map);
499 mutex_unlock(&dev->struct_mutex);
500
501 return ret;
502 }
503 EXPORT_SYMBOL(drm_rmmap);
504
505 /* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
506 * the last close of the device, and this is necessary for cleanup when things
507 * exit uncleanly. Therefore, having userland manually remove mappings seems
508 * like a pointless exercise since they're going away anyway.
509 *
510 * One use case might be after addmap is allowed for normal users for SHM and
511 * gets used by drivers that the server doesn't need to care about. This seems
512 * unlikely.
513 *
514 * \param inode device inode.
515 * \param file_priv DRM file private.
516 * \param cmd command.
517 * \param arg pointer to a struct drm_map structure.
518 * \return zero on success or a negative value on error.
519 */
520 int drm_rmmap_ioctl(struct drm_device *dev, void *data,
521 struct drm_file *file_priv)
522 {
523 struct drm_map *request = data;
524 struct drm_local_map *map = NULL;
525 struct drm_map_list *r_list;
526 int ret;
527
528 mutex_lock(&dev->struct_mutex);
529 list_for_each_entry(r_list, &dev->maplist, head) {
530 if (r_list->map &&
531 r_list->user_token == (unsigned long)request->handle &&
532 r_list->map->flags & _DRM_REMOVABLE) {
533 map = r_list->map;
534 break;
535 }
536 }
537
538 /* List has wrapped around to the head pointer, or its empty we didn't
539 * find anything.
540 */
541 if (list_empty(&dev->maplist) || !map) {
542 mutex_unlock(&dev->struct_mutex);
543 return -EINVAL;
544 }
545
546 /* Register and framebuffer maps are permanent */
547 if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
548 mutex_unlock(&dev->struct_mutex);
549 return 0;
550 }
551
552 ret = drm_rmmap_locked(dev, map);
553
554 mutex_unlock(&dev->struct_mutex);
555
556 return ret;
557 }
558
559 /**
560 * Cleanup after an error on one of the addbufs() functions.
561 *
562 * \param dev DRM device.
563 * \param entry buffer entry where the error occurred.
564 *
565 * Frees any pages and buffers associated with the given entry.
566 */
567 static void drm_cleanup_buf_error(struct drm_device * dev,
568 struct drm_buf_entry * entry)
569 {
570 int i;
571
572 if (entry->seg_count) {
573 for (i = 0; i < entry->seg_count; i++) {
574 if (entry->seglist[i]) {
575 drm_pci_free(dev, entry->seglist[i]);
576 }
577 }
578 kfree(entry->seglist);
579
580 entry->seg_count = 0;
581 }
582
583 if (entry->buf_count) {
584 for (i = 0; i < entry->buf_count; i++) {
585 kfree(entry->buflist[i].dev_private);
586 }
587 kfree(entry->buflist);
588
589 entry->buf_count = 0;
590 }
591 }
592
593 #if __OS_HAS_AGP
594 /**
595 * Add AGP buffers for DMA transfers.
596 *
597 * \param dev struct drm_device to which the buffers are to be added.
598 * \param request pointer to a struct drm_buf_desc describing the request.
599 * \return zero on success or a negative number on failure.
600 *
601 * After some sanity checks creates a drm_buf structure for each buffer and
602 * reallocates the buffer list of the same size order to accommodate the new
603 * buffers.
604 */
605 int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
606 {
607 struct drm_device_dma *dma = dev->dma;
608 struct drm_buf_entry *entry;
609 struct drm_agp_mem *agp_entry;
610 struct drm_buf *buf;
611 unsigned long offset;
612 unsigned long agp_offset;
613 int count;
614 int order;
615 int size;
616 int alignment;
617 int page_order;
618 int total;
619 int byte_count;
620 int i, valid;
621 struct drm_buf **temp_buflist;
622
623 if (!dma)
624 return -EINVAL;
625
626 count = request->count;
627 order = drm_order(request->size);
628 size = 1 << order;
629
630 alignment = (request->flags & _DRM_PAGE_ALIGN)
631 ? PAGE_ALIGN(size) : size;
632 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
633 total = PAGE_SIZE << page_order;
634
635 byte_count = 0;
636 agp_offset = dev->agp->base + request->agp_start;
637
638 DRM_DEBUG("count: %d\n", count);
639 DRM_DEBUG("order: %d\n", order);
640 DRM_DEBUG("size: %d\n", size);
641 DRM_DEBUG("agp_offset: %lx\n", agp_offset);
642 DRM_DEBUG("alignment: %d\n", alignment);
643 DRM_DEBUG("page_order: %d\n", page_order);
644 DRM_DEBUG("total: %d\n", total);
645
646 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
647 return -EINVAL;
648
649 /* Make sure buffers are located in AGP memory that we own */
650 valid = 0;
651 list_for_each_entry(agp_entry, &dev->agp->memory, head) {
652 if ((agp_offset >= agp_entry->bound) &&
653 (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
654 valid = 1;
655 break;
656 }
657 }
658 if (!list_empty(&dev->agp->memory) && !valid) {
659 DRM_DEBUG("zone invalid\n");
660 return -EINVAL;
661 }
662 spin_lock(&dev->count_lock);
663 if (dev->buf_use) {
664 spin_unlock(&dev->count_lock);
665 return -EBUSY;
666 }
667 atomic_inc(&dev->buf_alloc);
668 spin_unlock(&dev->count_lock);
669
670 mutex_lock(&dev->struct_mutex);
671 entry = &dma->bufs[order];
672 if (entry->buf_count) {
673 mutex_unlock(&dev->struct_mutex);
674 atomic_dec(&dev->buf_alloc);
675 return -ENOMEM; /* May only call once for each order */
676 }
677
678 if (count < 0 || count > 4096) {
679 mutex_unlock(&dev->struct_mutex);
680 atomic_dec(&dev->buf_alloc);
681 return -EINVAL;
682 }
683
684 entry->buflist = kzalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
685 if (!entry->buflist) {
686 mutex_unlock(&dev->struct_mutex);
687 atomic_dec(&dev->buf_alloc);
688 return -ENOMEM;
689 }
690
691 entry->buf_size = size;
692 entry->page_order = page_order;
693
694 offset = 0;
695
696 while (entry->buf_count < count) {
697 buf = &entry->buflist[entry->buf_count];
698 buf->idx = dma->buf_count + entry->buf_count;
699 buf->total = alignment;
700 buf->order = order;
701 buf->used = 0;
702
703 buf->offset = (dma->byte_count + offset);
704 buf->bus_address = agp_offset + offset;
705 buf->address = (void *)(agp_offset + offset);
706 buf->next = NULL;
707 buf->waiting = 0;
708 buf->pending = 0;
709 buf->file_priv = NULL;
710
711 buf->dev_priv_size = dev->driver->dev_priv_size;
712 buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
713 if (!buf->dev_private) {
714 /* Set count correctly so we free the proper amount. */
715 entry->buf_count = count;
716 drm_cleanup_buf_error(dev, entry);
717 mutex_unlock(&dev->struct_mutex);
718 atomic_dec(&dev->buf_alloc);
719 return -ENOMEM;
720 }
721
722 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
723
724 offset += alignment;
725 entry->buf_count++;
726 byte_count += PAGE_SIZE << page_order;
727 }
728
729 DRM_DEBUG("byte_count: %d\n", byte_count);
730
731 temp_buflist = krealloc(dma->buflist,
732 (dma->buf_count + entry->buf_count) *
733 sizeof(*dma->buflist), GFP_KERNEL);
734 if (!temp_buflist) {
735 /* Free the entry because it isn't valid */
736 drm_cleanup_buf_error(dev, entry);
737 mutex_unlock(&dev->struct_mutex);
738 atomic_dec(&dev->buf_alloc);
739 return -ENOMEM;
740 }
741 dma->buflist = temp_buflist;
742
743 for (i = 0; i < entry->buf_count; i++) {
744 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
745 }
746
747 dma->buf_count += entry->buf_count;
748 dma->seg_count += entry->seg_count;
749 dma->page_count += byte_count >> PAGE_SHIFT;
750 dma->byte_count += byte_count;
751
752 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
753 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
754
755 mutex_unlock(&dev->struct_mutex);
756
757 request->count = entry->buf_count;
758 request->size = size;
759
760 dma->flags = _DRM_DMA_USE_AGP;
761
762 atomic_dec(&dev->buf_alloc);
763 return 0;
764 }
765 EXPORT_SYMBOL(drm_addbufs_agp);
766 #endif /* __OS_HAS_AGP */
767
768 int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
769 {
770 struct drm_device_dma *dma = dev->dma;
771 int count;
772 int order;
773 int size;
774 int total;
775 int page_order;
776 struct drm_buf_entry *entry;
777 drm_dma_handle_t *dmah;
778 struct drm_buf *buf;
779 int alignment;
780 unsigned long offset;
781 int i;
782 int byte_count;
783 int page_count;
784 unsigned long *temp_pagelist;
785 struct drm_buf **temp_buflist;
786
787 if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
788 return -EINVAL;
789
790 if (!dma)
791 return -EINVAL;
792
793 if (!capable(CAP_SYS_ADMIN))
794 return -EPERM;
795
796 count = request->count;
797 order = drm_order(request->size);
798 size = 1 << order;
799
800 DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
801 request->count, request->size, size, order);
802
803 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
804 return -EINVAL;
805
806 alignment = (request->flags & _DRM_PAGE_ALIGN)
807 ? PAGE_ALIGN(size) : size;
808 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
809 total = PAGE_SIZE << page_order;
810
811 spin_lock(&dev->count_lock);
812 if (dev->buf_use) {
813 spin_unlock(&dev->count_lock);
814 return -EBUSY;
815 }
816 atomic_inc(&dev->buf_alloc);
817 spin_unlock(&dev->count_lock);
818
819 mutex_lock(&dev->struct_mutex);
820 entry = &dma->bufs[order];
821 if (entry->buf_count) {
822 mutex_unlock(&dev->struct_mutex);
823 atomic_dec(&dev->buf_alloc);
824 return -ENOMEM; /* May only call once for each order */
825 }
826
827 if (count < 0 || count > 4096) {
828 mutex_unlock(&dev->struct_mutex);
829 atomic_dec(&dev->buf_alloc);
830 return -EINVAL;
831 }
832
833 entry->buflist = kzalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
834 if (!entry->buflist) {
835 mutex_unlock(&dev->struct_mutex);
836 atomic_dec(&dev->buf_alloc);
837 return -ENOMEM;
838 }
839
840 entry->seglist = kzalloc(count * sizeof(*entry->seglist), GFP_KERNEL);
841 if (!entry->seglist) {
842 kfree(entry->buflist);
843 mutex_unlock(&dev->struct_mutex);
844 atomic_dec(&dev->buf_alloc);
845 return -ENOMEM;
846 }
847
848 /* Keep the original pagelist until we know all the allocations
849 * have succeeded
850 */
851 temp_pagelist = kmalloc((dma->page_count + (count << page_order)) *
852 sizeof(*dma->pagelist), GFP_KERNEL);
853 if (!temp_pagelist) {
854 kfree(entry->buflist);
855 kfree(entry->seglist);
856 mutex_unlock(&dev->struct_mutex);
857 atomic_dec(&dev->buf_alloc);
858 return -ENOMEM;
859 }
860 memcpy(temp_pagelist,
861 dma->pagelist, dma->page_count * sizeof(*dma->pagelist));
862 DRM_DEBUG("pagelist: %d entries\n",
863 dma->page_count + (count << page_order));
864
865 entry->buf_size = size;
866 entry->page_order = page_order;
867 byte_count = 0;
868 page_count = 0;
869
870 while (entry->buf_count < count) {
871
872 dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000);
873
874 if (!dmah) {
875 /* Set count correctly so we free the proper amount. */
876 entry->buf_count = count;
877 entry->seg_count = count;
878 drm_cleanup_buf_error(dev, entry);
879 kfree(temp_pagelist);
880 mutex_unlock(&dev->struct_mutex);
881 atomic_dec(&dev->buf_alloc);
882 return -ENOMEM;
883 }
884 entry->seglist[entry->seg_count++] = dmah;
885 for (i = 0; i < (1 << page_order); i++) {
886 DRM_DEBUG("page %d @ 0x%08lx\n",
887 dma->page_count + page_count,
888 (unsigned long)dmah->vaddr + PAGE_SIZE * i);
889 temp_pagelist[dma->page_count + page_count++]
890 = (unsigned long)dmah->vaddr + PAGE_SIZE * i;
891 }
892 for (offset = 0;
893 offset + size <= total && entry->buf_count < count;
894 offset += alignment, ++entry->buf_count) {
895 buf = &entry->buflist[entry->buf_count];
896 buf->idx = dma->buf_count + entry->buf_count;
897 buf->total = alignment;
898 buf->order = order;
899 buf->used = 0;
900 buf->offset = (dma->byte_count + byte_count + offset);
901 buf->address = (void *)(dmah->vaddr + offset);
902 buf->bus_address = dmah->busaddr + offset;
903 buf->next = NULL;
904 buf->waiting = 0;
905 buf->pending = 0;
906 buf->file_priv = NULL;
907
908 buf->dev_priv_size = dev->driver->dev_priv_size;
909 buf->dev_private = kzalloc(buf->dev_priv_size,
910 GFP_KERNEL);
911 if (!buf->dev_private) {
912 /* Set count correctly so we free the proper amount. */
913 entry->buf_count = count;
914 entry->seg_count = count;
915 drm_cleanup_buf_error(dev, entry);
916 kfree(temp_pagelist);
917 mutex_unlock(&dev->struct_mutex);
918 atomic_dec(&dev->buf_alloc);
919 return -ENOMEM;
920 }
921
922 DRM_DEBUG("buffer %d @ %p\n",
923 entry->buf_count, buf->address);
924 }
925 byte_count += PAGE_SIZE << page_order;
926 }
927
928 temp_buflist = krealloc(dma->buflist,
929 (dma->buf_count + entry->buf_count) *
930 sizeof(*dma->buflist), GFP_KERNEL);
931 if (!temp_buflist) {
932 /* Free the entry because it isn't valid */
933 drm_cleanup_buf_error(dev, entry);
934 kfree(temp_pagelist);
935 mutex_unlock(&dev->struct_mutex);
936 atomic_dec(&dev->buf_alloc);
937 return -ENOMEM;
938 }
939 dma->buflist = temp_buflist;
940
941 for (i = 0; i < entry->buf_count; i++) {
942 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
943 }
944
945 /* No allocations failed, so now we can replace the original pagelist
946 * with the new one.
947 */
948 if (dma->page_count) {
949 kfree(dma->pagelist);
950 }
951 dma->pagelist = temp_pagelist;
952
953 dma->buf_count += entry->buf_count;
954 dma->seg_count += entry->seg_count;
955 dma->page_count += entry->seg_count << page_order;
956 dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
957
958 mutex_unlock(&dev->struct_mutex);
959
960 request->count = entry->buf_count;
961 request->size = size;
962
963 if (request->flags & _DRM_PCI_BUFFER_RO)
964 dma->flags = _DRM_DMA_USE_PCI_RO;
965
966 atomic_dec(&dev->buf_alloc);
967 return 0;
968
969 }
970 EXPORT_SYMBOL(drm_addbufs_pci);
971
972 static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request)
973 {
974 struct drm_device_dma *dma = dev->dma;
975 struct drm_buf_entry *entry;
976 struct drm_buf *buf;
977 unsigned long offset;
978 unsigned long agp_offset;
979 int count;
980 int order;
981 int size;
982 int alignment;
983 int page_order;
984 int total;
985 int byte_count;
986 int i;
987 struct drm_buf **temp_buflist;
988
989 if (!drm_core_check_feature(dev, DRIVER_SG))
990 return -EINVAL;
991
992 if (!dma)
993 return -EINVAL;
994
995 if (!capable(CAP_SYS_ADMIN))
996 return -EPERM;
997
998 count = request->count;
999 order = drm_order(request->size);
1000 size = 1 << order;
1001
1002 alignment = (request->flags & _DRM_PAGE_ALIGN)
1003 ? PAGE_ALIGN(size) : size;
1004 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
1005 total = PAGE_SIZE << page_order;
1006
1007 byte_count = 0;
1008 agp_offset = request->agp_start;
1009
1010 DRM_DEBUG("count: %d\n", count);
1011 DRM_DEBUG("order: %d\n", order);
1012 DRM_DEBUG("size: %d\n", size);
1013 DRM_DEBUG("agp_offset: %lu\n", agp_offset);
1014 DRM_DEBUG("alignment: %d\n", alignment);
1015 DRM_DEBUG("page_order: %d\n", page_order);
1016 DRM_DEBUG("total: %d\n", total);
1017
1018 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1019 return -EINVAL;
1020
1021 spin_lock(&dev->count_lock);
1022 if (dev->buf_use) {
1023 spin_unlock(&dev->count_lock);
1024 return -EBUSY;
1025 }
1026 atomic_inc(&dev->buf_alloc);
1027 spin_unlock(&dev->count_lock);
1028
1029 mutex_lock(&dev->struct_mutex);
1030 entry = &dma->bufs[order];
1031 if (entry->buf_count) {
1032 mutex_unlock(&dev->struct_mutex);
1033 atomic_dec(&dev->buf_alloc);
1034 return -ENOMEM; /* May only call once for each order */
1035 }
1036
1037 if (count < 0 || count > 4096) {
1038 mutex_unlock(&dev->struct_mutex);
1039 atomic_dec(&dev->buf_alloc);
1040 return -EINVAL;
1041 }
1042
1043 entry->buflist = kzalloc(count * sizeof(*entry->buflist),
1044 GFP_KERNEL);
1045 if (!entry->buflist) {
1046 mutex_unlock(&dev->struct_mutex);
1047 atomic_dec(&dev->buf_alloc);
1048 return -ENOMEM;
1049 }
1050
1051 entry->buf_size = size;
1052 entry->page_order = page_order;
1053
1054 offset = 0;
1055
1056 while (entry->buf_count < count) {
1057 buf = &entry->buflist[entry->buf_count];
1058 buf->idx = dma->buf_count + entry->buf_count;
1059 buf->total = alignment;
1060 buf->order = order;
1061 buf->used = 0;
1062
1063 buf->offset = (dma->byte_count + offset);
1064 buf->bus_address = agp_offset + offset;
1065 buf->address = (void *)(agp_offset + offset
1066 + (unsigned long)dev->sg->virtual);
1067 buf->next = NULL;
1068 buf->waiting = 0;
1069 buf->pending = 0;
1070 buf->file_priv = NULL;
1071
1072 buf->dev_priv_size = dev->driver->dev_priv_size;
1073 buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
1074 if (!buf->dev_private) {
1075 /* Set count correctly so we free the proper amount. */
1076 entry->buf_count = count;
1077 drm_cleanup_buf_error(dev, entry);
1078 mutex_unlock(&dev->struct_mutex);
1079 atomic_dec(&dev->buf_alloc);
1080 return -ENOMEM;
1081 }
1082
1083 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1084
1085 offset += alignment;
1086 entry->buf_count++;
1087 byte_count += PAGE_SIZE << page_order;
1088 }
1089
1090 DRM_DEBUG("byte_count: %d\n", byte_count);
1091
1092 temp_buflist = krealloc(dma->buflist,
1093 (dma->buf_count + entry->buf_count) *
1094 sizeof(*dma->buflist), GFP_KERNEL);
1095 if (!temp_buflist) {
1096 /* Free the entry because it isn't valid */
1097 drm_cleanup_buf_error(dev, entry);
1098 mutex_unlock(&dev->struct_mutex);
1099 atomic_dec(&dev->buf_alloc);
1100 return -ENOMEM;
1101 }
1102 dma->buflist = temp_buflist;
1103
1104 for (i = 0; i < entry->buf_count; i++) {
1105 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1106 }
1107
1108 dma->buf_count += entry->buf_count;
1109 dma->seg_count += entry->seg_count;
1110 dma->page_count += byte_count >> PAGE_SHIFT;
1111 dma->byte_count += byte_count;
1112
1113 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1114 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1115
1116 mutex_unlock(&dev->struct_mutex);
1117
1118 request->count = entry->buf_count;
1119 request->size = size;
1120
1121 dma->flags = _DRM_DMA_USE_SG;
1122
1123 atomic_dec(&dev->buf_alloc);
1124 return 0;
1125 }
1126
1127 static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request)
1128 {
1129 struct drm_device_dma *dma = dev->dma;
1130 struct drm_buf_entry *entry;
1131 struct drm_buf *buf;
1132 unsigned long offset;
1133 unsigned long agp_offset;
1134 int count;
1135 int order;
1136 int size;
1137 int alignment;
1138 int page_order;
1139 int total;
1140 int byte_count;
1141 int i;
1142 struct drm_buf **temp_buflist;
1143
1144 if (!drm_core_check_feature(dev, DRIVER_FB_DMA))
1145 return -EINVAL;
1146
1147 if (!dma)
1148 return -EINVAL;
1149
1150 if (!capable(CAP_SYS_ADMIN))
1151 return -EPERM;
1152
1153 count = request->count;
1154 order = drm_order(request->size);
1155 size = 1 << order;
1156
1157 alignment = (request->flags & _DRM_PAGE_ALIGN)
1158 ? PAGE_ALIGN(size) : size;
1159 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
1160 total = PAGE_SIZE << page_order;
1161
1162 byte_count = 0;
1163 agp_offset = request->agp_start;
1164
1165 DRM_DEBUG("count: %d\n", count);
1166 DRM_DEBUG("order: %d\n", order);
1167 DRM_DEBUG("size: %d\n", size);
1168 DRM_DEBUG("agp_offset: %lu\n", agp_offset);
1169 DRM_DEBUG("alignment: %d\n", alignment);
1170 DRM_DEBUG("page_order: %d\n", page_order);
1171 DRM_DEBUG("total: %d\n", total);
1172
1173 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1174 return -EINVAL;
1175
1176 spin_lock(&dev->count_lock);
1177 if (dev->buf_use) {
1178 spin_unlock(&dev->count_lock);
1179 return -EBUSY;
1180 }
1181 atomic_inc(&dev->buf_alloc);
1182 spin_unlock(&dev->count_lock);
1183
1184 mutex_lock(&dev->struct_mutex);
1185 entry = &dma->bufs[order];
1186 if (entry->buf_count) {
1187 mutex_unlock(&dev->struct_mutex);
1188 atomic_dec(&dev->buf_alloc);
1189 return -ENOMEM; /* May only call once for each order */
1190 }
1191
1192 if (count < 0 || count > 4096) {
1193 mutex_unlock(&dev->struct_mutex);
1194 atomic_dec(&dev->buf_alloc);
1195 return -EINVAL;
1196 }
1197
1198 entry->buflist = kzalloc(count * sizeof(*entry->buflist),
1199 GFP_KERNEL);
1200 if (!entry->buflist) {
1201 mutex_unlock(&dev->struct_mutex);
1202 atomic_dec(&dev->buf_alloc);
1203 return -ENOMEM;
1204 }
1205
1206 entry->buf_size = size;
1207 entry->page_order = page_order;
1208
1209 offset = 0;
1210
1211 while (entry->buf_count < count) {
1212 buf = &entry->buflist[entry->buf_count];
1213 buf->idx = dma->buf_count + entry->buf_count;
1214 buf->total = alignment;
1215 buf->order = order;
1216 buf->used = 0;
1217
1218 buf->offset = (dma->byte_count + offset);
1219 buf->bus_address = agp_offset + offset;
1220 buf->address = (void *)(agp_offset + offset);
1221 buf->next = NULL;
1222 buf->waiting = 0;
1223 buf->pending = 0;
1224 buf->file_priv = NULL;
1225
1226 buf->dev_priv_size = dev->driver->dev_priv_size;
1227 buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
1228 if (!buf->dev_private) {
1229 /* Set count correctly so we free the proper amount. */
1230 entry->buf_count = count;
1231 drm_cleanup_buf_error(dev, entry);
1232 mutex_unlock(&dev->struct_mutex);
1233 atomic_dec(&dev->buf_alloc);
1234 return -ENOMEM;
1235 }
1236
1237 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1238
1239 offset += alignment;
1240 entry->buf_count++;
1241 byte_count += PAGE_SIZE << page_order;
1242 }
1243
1244 DRM_DEBUG("byte_count: %d\n", byte_count);
1245
1246 temp_buflist = krealloc(dma->buflist,
1247 (dma->buf_count + entry->buf_count) *
1248 sizeof(*dma->buflist), GFP_KERNEL);
1249 if (!temp_buflist) {
1250 /* Free the entry because it isn't valid */
1251 drm_cleanup_buf_error(dev, entry);
1252 mutex_unlock(&dev->struct_mutex);
1253 atomic_dec(&dev->buf_alloc);
1254 return -ENOMEM;
1255 }
1256 dma->buflist = temp_buflist;
1257
1258 for (i = 0; i < entry->buf_count; i++) {
1259 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1260 }
1261
1262 dma->buf_count += entry->buf_count;
1263 dma->seg_count += entry->seg_count;
1264 dma->page_count += byte_count >> PAGE_SHIFT;
1265 dma->byte_count += byte_count;
1266
1267 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1268 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1269
1270 mutex_unlock(&dev->struct_mutex);
1271
1272 request->count = entry->buf_count;
1273 request->size = size;
1274
1275 dma->flags = _DRM_DMA_USE_FB;
1276
1277 atomic_dec(&dev->buf_alloc);
1278 return 0;
1279 }
1280
1281
1282 /**
1283 * Add buffers for DMA transfers (ioctl).
1284 *
1285 * \param inode device inode.
1286 * \param file_priv DRM file private.
1287 * \param cmd command.
1288 * \param arg pointer to a struct drm_buf_desc request.
1289 * \return zero on success or a negative number on failure.
1290 *
1291 * According with the memory type specified in drm_buf_desc::flags and the
1292 * build options, it dispatches the call either to addbufs_agp(),
1293 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
1294 * PCI memory respectively.
1295 */
1296 int drm_addbufs(struct drm_device *dev, void *data,
1297 struct drm_file *file_priv)
1298 {
1299 struct drm_buf_desc *request = data;
1300 int ret;
1301
1302 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1303 return -EINVAL;
1304
1305 #if __OS_HAS_AGP
1306 if (request->flags & _DRM_AGP_BUFFER)
1307 ret = drm_addbufs_agp(dev, request);
1308 else
1309 #endif
1310 if (request->flags & _DRM_SG_BUFFER)
1311 ret = drm_addbufs_sg(dev, request);
1312 else if (request->flags & _DRM_FB_BUFFER)
1313 ret = drm_addbufs_fb(dev, request);
1314 else
1315 ret = drm_addbufs_pci(dev, request);
1316
1317 return ret;
1318 }
1319
1320 /**
1321 * Get information about the buffer mappings.
1322 *
1323 * This was originally mean for debugging purposes, or by a sophisticated
1324 * client library to determine how best to use the available buffers (e.g.,
1325 * large buffers can be used for image transfer).
1326 *
1327 * \param inode device inode.
1328 * \param file_priv DRM file private.
1329 * \param cmd command.
1330 * \param arg pointer to a drm_buf_info structure.
1331 * \return zero on success or a negative number on failure.
1332 *
1333 * Increments drm_device::buf_use while holding the drm_device::count_lock
1334 * lock, preventing of allocating more buffers after this call. Information
1335 * about each requested buffer is then copied into user space.
1336 */
1337 int drm_infobufs(struct drm_device *dev, void *data,
1338 struct drm_file *file_priv)
1339 {
1340 struct drm_device_dma *dma = dev->dma;
1341 struct drm_buf_info *request = data;
1342 int i;
1343 int count;
1344
1345 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1346 return -EINVAL;
1347
1348 if (!dma)
1349 return -EINVAL;
1350
1351 spin_lock(&dev->count_lock);
1352 if (atomic_read(&dev->buf_alloc)) {
1353 spin_unlock(&dev->count_lock);
1354 return -EBUSY;
1355 }
1356 ++dev->buf_use; /* Can't allocate more after this call */
1357 spin_unlock(&dev->count_lock);
1358
1359 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1360 if (dma->bufs[i].buf_count)
1361 ++count;
1362 }
1363
1364 DRM_DEBUG("count = %d\n", count);
1365
1366 if (request->count >= count) {
1367 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1368 if (dma->bufs[i].buf_count) {
1369 struct drm_buf_desc __user *to =
1370 &request->list[count];
1371 struct drm_buf_entry *from = &dma->bufs[i];
1372 struct drm_freelist *list = &dma->bufs[i].freelist;
1373 if (copy_to_user(&to->count,
1374 &from->buf_count,
1375 sizeof(from->buf_count)) ||
1376 copy_to_user(&to->size,
1377 &from->buf_size,
1378 sizeof(from->buf_size)) ||
1379 copy_to_user(&to->low_mark,
1380 &list->low_mark,
1381 sizeof(list->low_mark)) ||
1382 copy_to_user(&to->high_mark,
1383 &list->high_mark,
1384 sizeof(list->high_mark)))
1385 return -EFAULT;
1386
1387 DRM_DEBUG("%d %d %d %d %d\n",
1388 i,
1389 dma->bufs[i].buf_count,
1390 dma->bufs[i].buf_size,
1391 dma->bufs[i].freelist.low_mark,
1392 dma->bufs[i].freelist.high_mark);
1393 ++count;
1394 }
1395 }
1396 }
1397 request->count = count;
1398
1399 return 0;
1400 }
1401
1402 /**
1403 * Specifies a low and high water mark for buffer allocation
1404 *
1405 * \param inode device inode.
1406 * \param file_priv DRM file private.
1407 * \param cmd command.
1408 * \param arg a pointer to a drm_buf_desc structure.
1409 * \return zero on success or a negative number on failure.
1410 *
1411 * Verifies that the size order is bounded between the admissible orders and
1412 * updates the respective drm_device_dma::bufs entry low and high water mark.
1413 *
1414 * \note This ioctl is deprecated and mostly never used.
1415 */
1416 int drm_markbufs(struct drm_device *dev, void *data,
1417 struct drm_file *file_priv)
1418 {
1419 struct drm_device_dma *dma = dev->dma;
1420 struct drm_buf_desc *request = data;
1421 int order;
1422 struct drm_buf_entry *entry;
1423
1424 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1425 return -EINVAL;
1426
1427 if (!dma)
1428 return -EINVAL;
1429
1430 DRM_DEBUG("%d, %d, %d\n",
1431 request->size, request->low_mark, request->high_mark);
1432 order = drm_order(request->size);
1433 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1434 return -EINVAL;
1435 entry = &dma->bufs[order];
1436
1437 if (request->low_mark < 0 || request->low_mark > entry->buf_count)
1438 return -EINVAL;
1439 if (request->high_mark < 0 || request->high_mark > entry->buf_count)
1440 return -EINVAL;
1441
1442 entry->freelist.low_mark = request->low_mark;
1443 entry->freelist.high_mark = request->high_mark;
1444
1445 return 0;
1446 }
1447
1448 /**
1449 * Unreserve the buffers in list, previously reserved using drmDMA.
1450 *
1451 * \param inode device inode.
1452 * \param file_priv DRM file private.
1453 * \param cmd command.
1454 * \param arg pointer to a drm_buf_free structure.
1455 * \return zero on success or a negative number on failure.
1456 *
1457 * Calls free_buffer() for each used buffer.
1458 * This function is primarily used for debugging.
1459 */
1460 int drm_freebufs(struct drm_device *dev, void *data,
1461 struct drm_file *file_priv)
1462 {
1463 struct drm_device_dma *dma = dev->dma;
1464 struct drm_buf_free *request = data;
1465 int i;
1466 int idx;
1467 struct drm_buf *buf;
1468
1469 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1470 return -EINVAL;
1471
1472 if (!dma)
1473 return -EINVAL;
1474
1475 DRM_DEBUG("%d\n", request->count);
1476 for (i = 0; i < request->count; i++) {
1477 if (copy_from_user(&idx, &request->list[i], sizeof(idx)))
1478 return -EFAULT;
1479 if (idx < 0 || idx >= dma->buf_count) {
1480 DRM_ERROR("Index %d (of %d max)\n",
1481 idx, dma->buf_count - 1);
1482 return -EINVAL;
1483 }
1484 buf = dma->buflist[idx];
1485 if (buf->file_priv != file_priv) {
1486 DRM_ERROR("Process %d freeing buffer not owned\n",
1487 task_pid_nr(current));
1488 return -EINVAL;
1489 }
1490 drm_free_buffer(dev, buf);
1491 }
1492
1493 return 0;
1494 }
1495
1496 /**
1497 * Maps all of the DMA buffers into client-virtual space (ioctl).
1498 *
1499 * \param inode device inode.
1500 * \param file_priv DRM file private.
1501 * \param cmd command.
1502 * \param arg pointer to a drm_buf_map structure.
1503 * \return zero on success or a negative number on failure.
1504 *
1505 * Maps the AGP, SG or PCI buffer region with vm_mmap(), and copies information
1506 * about each buffer into user space. For PCI buffers, it calls vm_mmap() with
1507 * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
1508 * drm_mmap_dma().
1509 */
1510 int drm_mapbufs(struct drm_device *dev, void *data,
1511 struct drm_file *file_priv)
1512 {
1513 struct drm_device_dma *dma = dev->dma;
1514 int retcode = 0;
1515 const int zero = 0;
1516 unsigned long virtual;
1517 unsigned long address;
1518 struct drm_buf_map *request = data;
1519 int i;
1520
1521 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1522 return -EINVAL;
1523
1524 if (!dma)
1525 return -EINVAL;
1526
1527 spin_lock(&dev->count_lock);
1528 if (atomic_read(&dev->buf_alloc)) {
1529 spin_unlock(&dev->count_lock);
1530 return -EBUSY;
1531 }
1532 dev->buf_use++; /* Can't allocate more after this call */
1533 spin_unlock(&dev->count_lock);
1534
1535 if (request->count >= dma->buf_count) {
1536 if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
1537 || (drm_core_check_feature(dev, DRIVER_SG)
1538 && (dma->flags & _DRM_DMA_USE_SG))
1539 || (drm_core_check_feature(dev, DRIVER_FB_DMA)
1540 && (dma->flags & _DRM_DMA_USE_FB))) {
1541 struct drm_local_map *map = dev->agp_buffer_map;
1542 unsigned long token = dev->agp_buffer_token;
1543
1544 if (!map) {
1545 retcode = -EINVAL;
1546 goto done;
1547 }
1548 virtual = vm_mmap(file_priv->filp, 0, map->size,
1549 PROT_READ | PROT_WRITE,
1550 MAP_SHARED,
1551 token);
1552 } else {
1553 virtual = vm_mmap(file_priv->filp, 0, dma->byte_count,
1554 PROT_READ | PROT_WRITE,
1555 MAP_SHARED, 0);
1556 }
1557 if (virtual > -1024UL) {
1558 /* Real error */
1559 retcode = (signed long)virtual;
1560 goto done;
1561 }
1562 request->virtual = (void __user *)virtual;
1563
1564 for (i = 0; i < dma->buf_count; i++) {
1565 if (copy_to_user(&request->list[i].idx,
1566 &dma->buflist[i]->idx,
1567 sizeof(request->list[0].idx))) {
1568 retcode = -EFAULT;
1569 goto done;
1570 }
1571 if (copy_to_user(&request->list[i].total,
1572 &dma->buflist[i]->total,
1573 sizeof(request->list[0].total))) {
1574 retcode = -EFAULT;
1575 goto done;
1576 }
1577 if (copy_to_user(&request->list[i].used,
1578 &zero, sizeof(zero))) {
1579 retcode = -EFAULT;
1580 goto done;
1581 }
1582 address = virtual + dma->buflist[i]->offset; /* *** */
1583 if (copy_to_user(&request->list[i].address,
1584 &address, sizeof(address))) {
1585 retcode = -EFAULT;
1586 goto done;
1587 }
1588 }
1589 }
1590 done:
1591 request->count = dma->buf_count;
1592 DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
1593
1594 return retcode;
1595 }
1596
1597 /**
1598 * Compute size order. Returns the exponent of the smaller power of two which
1599 * is greater or equal to given number.
1600 *
1601 * \param size size.
1602 * \return order.
1603 *
1604 * \todo Can be made faster.
1605 */
1606 int drm_order(unsigned long size)
1607 {
1608 int order;
1609 unsigned long tmp;
1610
1611 for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ;
1612
1613 if (size & (size - 1))
1614 ++order;
1615
1616 return order;
1617 }
1618 EXPORT_SYMBOL(drm_order);
1619