Home | History | Annotate | Line # | Download | only in drm
drm_bufs.c revision 1.1.1.1.4.3
      1 /**
      2  * \file drm_bufs.c
      3  * Generic buffer template
      4  *
      5  * \author Rickard E. (Rik) Faith <faith (at) valinux.com>
      6  * \author Gareth Hughes <gareth (at) valinux.com>
      7  */
      8 
      9 /*
     10  * Created: Thu Nov 23 03:10:50 2000 by gareth (at) valinux.com
     11  *
     12  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
     13  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
     14  * All Rights Reserved.
     15  *
     16  * Permission is hereby granted, free of charge, to any person obtaining a
     17  * copy of this software and associated documentation files (the "Software"),
     18  * to deal in the Software without restriction, including without limitation
     19  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     20  * and/or sell copies of the Software, and to permit persons to whom the
     21  * Software is furnished to do so, subject to the following conditions:
     22  *
     23  * The above copyright notice and this permission notice (including the next
     24  * paragraph) shall be included in all copies or substantial portions of the
     25  * Software.
     26  *
     27  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     28  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     29  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     30  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
     31  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     32  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     33  * OTHER DEALINGS IN THE SOFTWARE.
     34  */
     35 
     36 #include <linux/vmalloc.h>
     37 #include <linux/slab.h>
     38 #include <linux/sched.h>
     39 #include <linux/log2.h>
     40 #include <linux/export.h>
     41 #include <linux/mm.h>
     42 #include <asm/bug.h>
     43 #include <asm/shmparam.h>
     44 #include <drm/drmP.h>
     45 
     46 static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
     47 						  struct drm_local_map *map)
     48 {
     49 	struct drm_map_list *entry;
     50 	list_for_each_entry(entry, &dev->maplist, head) {
     51 		/*
     52 		 * Because the kernel-userspace ABI is fixed at a 32-bit offset
     53 		 * while PCI resources may live above that, we only compare the
     54 		 * lower 32 bits of the map offset for maps of type
     55 		 * _DRM_FRAMEBUFFER or _DRM_REGISTERS.
     56 		 * It is assumed that if a driver have more than one resource
     57 		 * of each type, the lower 32 bits are different.
     58 		 */
     59 		if (!entry->map ||
     60 		    map->type != entry->map->type ||
     61 		    entry->master != dev->primary->master)
     62 			continue;
     63 		switch (map->type) {
     64 		case _DRM_SHM:
     65 			if (map->flags != _DRM_CONTAINS_LOCK)
     66 				break;
     67 			return entry;
     68 		case _DRM_REGISTERS:
     69 		case _DRM_FRAME_BUFFER:
     70 			if ((entry->map->offset & 0xffffffff) ==
     71 			    (map->offset & 0xffffffff))
     72 				return entry;
     73 		default: /* Make gcc happy */
     74 			;
     75 		}
     76 		if (entry->map->offset == map->offset)
     77 			return entry;
     78 	}
     79 
     80 	return NULL;
     81 }
     82 
     83 static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
     84 			  unsigned long user_token, int hashed_handle, int shm)
     85 {
     86 	int use_hashed_handle, shift;
     87 	unsigned long add;
     88 
     89 	use_hashed_handle = (user_token &~ 0xffffffffUL) || hashed_handle;
     90 	if (!use_hashed_handle) {
     91 		int ret;
     92 		hash->key = user_token >> PAGE_SHIFT;
     93 		ret = drm_ht_insert_item(&dev->map_hash, hash);
     94 		if (ret != -EINVAL)
     95 			return ret;
     96 	}
     97 
     98 	shift = 0;
     99 	add = DRM_MAP_HASH_OFFSET >> PAGE_SHIFT;
    100 	if (shm && (SHMLBA > PAGE_SIZE)) {
    101 		int bits = ilog2(SHMLBA >> PAGE_SHIFT) + 1;
    102 
    103 		/* For shared memory, we have to preserve the SHMLBA
    104 		 * bits of the eventual vma->vm_pgoff value during
    105 		 * mmap().  Otherwise we run into cache aliasing problems
    106 		 * on some platforms.  On these platforms, the pgoff of
    107 		 * a mmap() request is used to pick a suitable virtual
    108 		 * address for the mmap() region such that it will not
    109 		 * cause cache aliasing problems.
    110 		 *
    111 		 * Therefore, make sure the SHMLBA relevant bits of the
    112 		 * hash value we use are equal to those in the original
    113 		 * kernel virtual address.
    114 		 */
    115 		shift = bits;
    116 		add |= ((user_token >> PAGE_SHIFT) & ((1UL << bits) - 1UL));
    117 	}
    118 
    119 	return drm_ht_just_insert_please(&dev->map_hash, hash,
    120 					 user_token, 32 - PAGE_SHIFT - 3,
    121 					 shift, add);
    122 }
    123 
    124 /**
    125  * Core function to create a range of memory available for mapping by a
    126  * non-root process.
    127  *
    128  * Adjusts the memory offset to its absolute value according to the mapping
    129  * type.  Adds the map to the map list drm_device::maplist. Adds MTRR's where
    130  * applicable and if supported by the kernel.
    131  */
    132 static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
    133 			   unsigned int size, enum drm_map_type type,
    134 			   enum drm_map_flags flags,
    135 			   struct drm_map_list ** maplist)
    136 {
    137 	struct drm_local_map *map;
    138 	struct drm_map_list *list;
    139 	drm_dma_handle_t *dmah;
    140 	unsigned long user_token;
    141 	int ret;
    142 
    143 	map = kmalloc(sizeof(*map), GFP_KERNEL);
    144 	if (!map)
    145 		return -ENOMEM;
    146 
    147 	map->offset = offset;
    148 	map->size = size;
    149 	map->flags = flags;
    150 	map->type = type;
    151 
    152 	/* Only allow shared memory to be removable since we only keep enough
    153 	 * book keeping information about shared memory to allow for removal
    154 	 * when processes fork.
    155 	 */
    156 	if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) {
    157 		kfree(map);
    158 		return -EINVAL;
    159 	}
    160 	DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
    161 		  (unsigned long long)map->offset, map->size, map->type);
    162 
    163 	/* page-align _DRM_SHM maps. They are allocated here so there is no security
    164 	 * hole created by that and it works around various broken drivers that use
    165 	 * a non-aligned quantity to map the SAREA. --BenH
    166 	 */
    167 	if (map->type == _DRM_SHM)
    168 		map->size = PAGE_ALIGN(map->size);
    169 
    170 	if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
    171 		kfree(map);
    172 		return -EINVAL;
    173 	}
    174 	map->mtrr = -1;
    175 	map->handle = NULL;
    176 
    177 	switch (map->type) {
    178 	case _DRM_REGISTERS:
    179 	case _DRM_FRAME_BUFFER:
    180 #ifndef __NetBSD__		/* XXX No idea what this is for...  */
    181 #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__arm__)
    182 		if (map->offset + (map->size-1) < map->offset ||
    183 		    map->offset < virt_to_phys(high_memory)) {
    184 			kfree(map);
    185 			return -EINVAL;
    186 		}
    187 #endif
    188 #endif
    189 		/* Some drivers preinitialize some maps, without the X Server
    190 		 * needing to be aware of it.  Therefore, we just return success
    191 		 * when the server tries to create a duplicate map.
    192 		 */
    193 		list = drm_find_matching_map(dev, map);
    194 		if (list != NULL) {
    195 			if (list->map->size != map->size) {
    196 				DRM_DEBUG("Matching maps of type %d with "
    197 					  "mismatched sizes, (%ld vs %ld)\n",
    198 					  map->type, map->size,
    199 					  list->map->size);
    200 				list->map->size = map->size;
    201 			}
    202 
    203 			kfree(map);
    204 			*maplist = list;
    205 			return 0;
    206 		}
    207 
    208 		if (drm_core_has_MTRR(dev)) {
    209 			if (map->type == _DRM_FRAME_BUFFER ||
    210 			    (map->flags & _DRM_WRITE_COMBINING)) {
    211 				map->mtrr = mtrr_add(map->offset, map->size,
    212 						     MTRR_TYPE_WRCOMB, 1);
    213 			}
    214 		}
    215 		if (map->type == _DRM_REGISTERS) {
    216 #ifdef __NetBSD__
    217 			map->handle = drm_ioremap(dev, map);
    218 #else
    219 			map->handle = ioremap(map->offset, map->size);
    220 #endif
    221 			if (!map->handle) {
    222 				kfree(map);
    223 				return -ENOMEM;
    224 			}
    225 		}
    226 
    227 		break;
    228 	case _DRM_SHM:
    229 		list = drm_find_matching_map(dev, map);
    230 		if (list != NULL) {
    231 			if(list->map->size != map->size) {
    232 				DRM_DEBUG("Matching maps of type %d with "
    233 					  "mismatched sizes, (%ld vs %ld)\n",
    234 					  map->type, map->size, list->map->size);
    235 				list->map->size = map->size;
    236 			}
    237 
    238 			kfree(map);
    239 			*maplist = list;
    240 			return 0;
    241 		}
    242 		map->handle = vmalloc_user(map->size);
    243 		DRM_DEBUG("%lu %d %p\n",
    244 			  map->size, drm_order(map->size), map->handle);
    245 		if (!map->handle) {
    246 			kfree(map);
    247 			return -ENOMEM;
    248 		}
    249 		map->offset = (unsigned long)map->handle;
    250 		if (map->flags & _DRM_CONTAINS_LOCK) {
    251 			/* Prevent a 2nd X Server from creating a 2nd lock */
    252 			spin_lock(&dev->primary->master->lock.spinlock);
    253 			if (dev->primary->master->lock.hw_lock != NULL) {
    254 				vfree(map->handle);
    255 				kfree(map);
    256 				spin_unlock(&dev->primary->master->lock.spinlock);
    257 				return -EBUSY;
    258 			}
    259 			dev->sigdata.lock = dev->primary->master->lock.hw_lock = map->handle;	/* Pointer to lock */
    260 			spin_unlock(&dev->primary->master->lock.spinlock);
    261 		}
    262 		break;
    263 	case _DRM_AGP: {
    264 		struct drm_agp_mem *entry;
    265 		int valid = 0;
    266 
    267 		if (!drm_core_has_AGP(dev)) {
    268 			kfree(map);
    269 			return -EINVAL;
    270 		}
    271 #ifdef __alpha__
    272 		map->offset += dev->hose->mem_space->start;
    273 #endif
    274 		/* In some cases (i810 driver), user space may have already
    275 		 * added the AGP base itself, because dev->agp->base previously
    276 		 * only got set during AGP enable.  So, only add the base
    277 		 * address if the map's offset isn't already within the
    278 		 * aperture.
    279 		 */
    280 #ifdef __NetBSD__
    281 		if (map->offset < dev->agp->base ||
    282 		    map->offset > dev->agp->base +
    283 		    dev->agp->agp_info.ai_aperture_size - 1) {
    284 			map->offset += dev->agp->base;
    285 		}
    286 #else
    287 		if (map->offset < dev->agp->base ||
    288 		    map->offset > dev->agp->base +
    289 		    dev->agp->agp_info.aper_size * 1024 * 1024 - 1) {
    290 			map->offset += dev->agp->base;
    291 		}
    292 #endif
    293 		map->mtrr = dev->agp->agp_mtrr;	/* for getmap */
    294 
    295 		/* This assumes the DRM is in total control of AGP space.
    296 		 * It's not always the case as AGP can be in the control
    297 		 * of user space (i.e. i810 driver). So this loop will get
    298 		 * skipped and we double check that dev->agp->memory is
    299 		 * actually set as well as being invalid before EPERM'ing
    300 		 */
    301 		list_for_each_entry(entry, &dev->agp->memory, head) {
    302 			if ((map->offset >= entry->bound) &&
    303 			    (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) {
    304 				valid = 1;
    305 				break;
    306 			}
    307 		}
    308 		if (!list_empty(&dev->agp->memory) && !valid) {
    309 			kfree(map);
    310 			return -EPERM;
    311 		}
    312 		DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n",
    313 			  (unsigned long long)map->offset, map->size);
    314 
    315 		break;
    316 	}
    317 	case _DRM_GEM:
    318 		DRM_ERROR("tried to addmap GEM object\n");
    319 		break;
    320 	case _DRM_SCATTER_GATHER:
    321 		if (!dev->sg) {
    322 			kfree(map);
    323 			return -EINVAL;
    324 		}
    325 		map->offset += (unsigned long)dev->sg->virtual;
    326 		break;
    327 	case _DRM_CONSISTENT:
    328 		/* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
    329 		 * As we're limiting the address to 2^32-1 (or less),
    330 		 * casting it down to 32 bits is no problem, but we
    331 		 * need to point to a 64bit variable first. */
    332 		dmah = drm_pci_alloc(dev, map->size, map->size);
    333 		if (!dmah) {
    334 			kfree(map);
    335 			return -ENOMEM;
    336 		}
    337 		map->handle = dmah->vaddr;
    338 		map->offset = (unsigned long)dmah->busaddr;
    339 #ifdef __NetBSD__
    340 		map->lm_data.dmah = dmah;
    341 #else
    342 		kfree(dmah);
    343 #endif
    344 		break;
    345 	default:
    346 		kfree(map);
    347 		return -EINVAL;
    348 	}
    349 
    350 	list = kzalloc(sizeof(*list), GFP_KERNEL);
    351 	if (!list) {
    352 		if (map->type == _DRM_REGISTERS)
    353 #ifdef __NetBSD__
    354 			drm_iounmap(dev, map);
    355 #else
    356 			iounmap(map->handle);
    357 #endif
    358 		kfree(map);
    359 		return -EINVAL;
    360 	}
    361 	list->map = map;
    362 
    363 	mutex_lock(&dev->struct_mutex);
    364 	list_add(&list->head, &dev->maplist);
    365 
    366 	/* Assign a 32-bit handle */
    367 	/* We do it here so that dev->struct_mutex protects the increment */
    368 	user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle :
    369 		map->offset;
    370 	ret = drm_map_handle(dev, &list->hash, user_token, 0,
    371 			     (map->type == _DRM_SHM));
    372 	if (ret) {
    373 		if (map->type == _DRM_REGISTERS)
    374 #ifdef __NetBSD__		/* XXX What about other map types...?  */
    375 			drm_iounmap(dev, map);
    376 #else
    377 			iounmap(map->handle);
    378 #endif
    379 		kfree(map);
    380 		kfree(list);
    381 		mutex_unlock(&dev->struct_mutex);
    382 		return ret;
    383 	}
    384 
    385 	list->user_token = list->hash.key << PAGE_SHIFT;
    386 	mutex_unlock(&dev->struct_mutex);
    387 
    388 	if (!(map->flags & _DRM_DRIVER))
    389 		list->master = dev->primary->master;
    390 	*maplist = list;
    391 	return 0;
    392 	}
    393 
    394 int drm_addmap(struct drm_device * dev, resource_size_t offset,
    395 	       unsigned int size, enum drm_map_type type,
    396 	       enum drm_map_flags flags, struct drm_local_map ** map_ptr)
    397 {
    398 	struct drm_map_list *list;
    399 	int rc;
    400 
    401 	rc = drm_addmap_core(dev, offset, size, type, flags, &list);
    402 	if (!rc)
    403 		*map_ptr = list->map;
    404 	return rc;
    405 }
    406 
    407 EXPORT_SYMBOL(drm_addmap);
    408 
    409 /**
    410  * Ioctl to specify a range of memory that is available for mapping by a
    411  * non-root process.
    412  *
    413  * \param inode device inode.
    414  * \param file_priv DRM file private.
    415  * \param cmd command.
    416  * \param arg pointer to a drm_map structure.
    417  * \return zero on success or a negative value on error.
    418  *
    419  */
    420 int drm_addmap_ioctl(struct drm_device *dev, void *data,
    421 		     struct drm_file *file_priv)
    422 {
    423 	struct drm_map *map = data;
    424 	struct drm_map_list *maplist;
    425 	int err;
    426 
    427 #ifdef __NetBSD__
    428 #  if 0				/* XXX Old drm did this.  */
    429 	if (!(dev->flags & (FREAD | FWRITE)))
    430 		return -EACCES;
    431 #  endif
    432 	if (!(DRM_SUSER() || map->type == _DRM_AGP || map->type == _DRM_SHM))
    433 		return -EACCES;	/* XXX */
    434 #else
    435 	if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM))
    436 		return -EPERM;
    437 #endif
    438 
    439 	err = drm_addmap_core(dev, map->offset, map->size, map->type,
    440 			      map->flags, &maplist);
    441 
    442 	if (err)
    443 		return err;
    444 
    445 	/* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
    446 	map->handle = (void *)(unsigned long)maplist->user_token;
    447 	return 0;
    448 }
    449 
    450 /**
    451  * Remove a map private from list and deallocate resources if the mapping
    452  * isn't in use.
    453  *
    454  * Searches the map on drm_device::maplist, removes it from the list, see if
    455  * its being used, and free any associate resource (such as MTRR's) if it's not
    456  * being on use.
    457  *
    458  * \sa drm_addmap
    459  */
    460 int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
    461 {
    462 	struct drm_map_list *r_list = NULL, *list_t;
    463 #ifndef __NetBSD__
    464 	drm_dma_handle_t dmah;
    465 #endif
    466 	int found = 0;
    467 	struct drm_master *master;
    468 
    469 	/* Find the list entry for the map and remove it */
    470 	list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
    471 		if (r_list->map == map) {
    472 			master = r_list->master;
    473 			list_del(&r_list->head);
    474 			drm_ht_remove_key(&dev->map_hash,
    475 					  r_list->user_token >> PAGE_SHIFT);
    476 			kfree(r_list);
    477 			found = 1;
    478 			break;
    479 		}
    480 	}
    481 
    482 	if (!found)
    483 		return -EINVAL;
    484 
    485 	switch (map->type) {
    486 	case _DRM_REGISTERS:
    487 #ifdef __NetBSD__
    488 		drm_iounmap(dev, map);
    489 #else
    490 		iounmap(map->handle);
    491 #endif
    492 		/* FALLTHROUGH */
    493 	case _DRM_FRAME_BUFFER:
    494 		if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
    495 			int retcode;
    496 			retcode = mtrr_del(map->mtrr, map->offset, map->size);
    497 			DRM_DEBUG("mtrr_del=%d\n", retcode);
    498 		}
    499 		break;
    500 	case _DRM_SHM:
    501 		if (master && (map->flags & _DRM_CONTAINS_LOCK)) {
    502 			spin_lock(&master->lock.spinlock);
    503 			/*
    504 			 * If we successfully removed this mapping,
    505 			 * then the mapping must have been there in the
    506 			 * first place, and we must have had a
    507 			 * heavyweight lock, so we assert here instead
    508 			 * of just checking and failing.
    509 			 *
    510 			 * XXX What about the _DRM_CONTAINS_LOCK flag?
    511 			 * Where is that supposed to be set?  Is it
    512 			 * equivalent to having a master set?
    513 			 *
    514 			 * XXX There is copypasta of this in
    515 			 * drm_fops.c.
    516 			 */
    517 			BUG_ON(master->lock.hw_lock == NULL);
    518 			if (dev->sigdata.lock == master->lock.hw_lock)
    519 				dev->sigdata.lock = NULL;
    520 			master->lock.hw_lock = NULL;   /* SHM removed */
    521 			master->lock.file_priv = NULL;
    522 #ifdef __NetBSD__
    523 			DRM_SPIN_WAKEUP_ALL(&master->lock.lock_queue,
    524 			    &master->lock.spinlock);
    525 #else
    526 			wake_up_interruptible_all(&master->lock.lock_queue);
    527 #endif
    528 			spin_unlock(&master->lock.spinlock);
    529 		}
    530 		vfree(map->handle);
    531 		break;
    532 	case _DRM_AGP:
    533 	case _DRM_SCATTER_GATHER:
    534 		break;
    535 	case _DRM_CONSISTENT:
    536 #ifdef __NetBSD__
    537 		drm_pci_free(dev, map->lm_data.dmah);
    538 #else
    539 		dmah.vaddr = map->handle;
    540 		dmah.busaddr = map->offset;
    541 		dmah.size = map->size;
    542 		__drm_pci_free(dev, &dmah);
    543 #endif
    544 		break;
    545 	case _DRM_GEM:
    546 		DRM_ERROR("tried to rmmap GEM object\n");
    547 		break;
    548 	}
    549 	kfree(map);
    550 
    551 	return 0;
    552 }
    553 EXPORT_SYMBOL(drm_rmmap_locked);
    554 
    555 int drm_rmmap(struct drm_device *dev, struct drm_local_map *map)
    556 {
    557 	int ret;
    558 
    559 	mutex_lock(&dev->struct_mutex);
    560 	ret = drm_rmmap_locked(dev, map);
    561 	mutex_unlock(&dev->struct_mutex);
    562 
    563 	return ret;
    564 }
    565 EXPORT_SYMBOL(drm_rmmap);
    566 
    567 /* The rmmap ioctl appears to be unnecessary.  All mappings are torn down on
    568  * the last close of the device, and this is necessary for cleanup when things
    569  * exit uncleanly.  Therefore, having userland manually remove mappings seems
    570  * like a pointless exercise since they're going away anyway.
    571  *
    572  * One use case might be after addmap is allowed for normal users for SHM and
    573  * gets used by drivers that the server doesn't need to care about.  This seems
    574  * unlikely.
    575  *
    576  * \param inode device inode.
    577  * \param file_priv DRM file private.
    578  * \param cmd command.
    579  * \param arg pointer to a struct drm_map structure.
    580  * \return zero on success or a negative value on error.
    581  */
    582 int drm_rmmap_ioctl(struct drm_device *dev, void *data,
    583 		    struct drm_file *file_priv)
    584 {
    585 	struct drm_map *request = data;
    586 	struct drm_local_map *map = NULL;
    587 	struct drm_map_list *r_list;
    588 	int ret;
    589 
    590 	mutex_lock(&dev->struct_mutex);
    591 	list_for_each_entry(r_list, &dev->maplist, head) {
    592 		if (r_list->map &&
    593 		    r_list->user_token == (unsigned long)request->handle &&
    594 		    r_list->map->flags & _DRM_REMOVABLE) {
    595 			map = r_list->map;
    596 			break;
    597 		}
    598 	}
    599 
    600 	/* List has wrapped around to the head pointer, or its empty we didn't
    601 	 * find anything.
    602 	 */
    603 	if (list_empty(&dev->maplist) || !map) {
    604 		mutex_unlock(&dev->struct_mutex);
    605 		return -EINVAL;
    606 	}
    607 
    608 	/* Register and framebuffer maps are permanent */
    609 	if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
    610 		mutex_unlock(&dev->struct_mutex);
    611 		return 0;
    612 	}
    613 
    614 	ret = drm_rmmap_locked(dev, map);
    615 
    616 	mutex_unlock(&dev->struct_mutex);
    617 
    618 	return ret;
    619 }
    620 
    621 /**
    622  * Cleanup after an error on one of the addbufs() functions.
    623  *
    624  * \param dev DRM device.
    625  * \param entry buffer entry where the error occurred.
    626  *
    627  * Frees any pages and buffers associated with the given entry.
    628  */
    629 static void drm_cleanup_buf_error(struct drm_device * dev,
    630 				  struct drm_buf_entry * entry)
    631 {
    632 	int i;
    633 
    634 	if (entry->seg_count) {
    635 		for (i = 0; i < entry->seg_count; i++) {
    636 			if (entry->seglist[i]) {
    637 				drm_pci_free(dev, entry->seglist[i]);
    638 			}
    639 		}
    640 		kfree(entry->seglist);
    641 
    642 		entry->seg_count = 0;
    643 	}
    644 
    645 	if (entry->buf_count) {
    646 		for (i = 0; i < entry->buf_count; i++) {
    647 			kfree(entry->buflist[i].dev_private);
    648 		}
    649 		kfree(entry->buflist);
    650 
    651 		entry->buf_count = 0;
    652 	}
    653 }
    654 
    655 #if __OS_HAS_AGP
    656 /**
    657  * Add AGP buffers for DMA transfers.
    658  *
    659  * \param dev struct drm_device to which the buffers are to be added.
    660  * \param request pointer to a struct drm_buf_desc describing the request.
    661  * \return zero on success or a negative number on failure.
    662  *
    663  * After some sanity checks creates a drm_buf structure for each buffer and
    664  * reallocates the buffer list of the same size order to accommodate the new
    665  * buffers.
    666  */
    667 int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
    668 {
    669 	struct drm_device_dma *dma = dev->dma;
    670 	struct drm_buf_entry *entry;
    671 	struct drm_agp_mem *agp_entry;
    672 	struct drm_buf *buf;
    673 	unsigned long offset;
    674 	unsigned long agp_offset;
    675 	int count;
    676 	int order;
    677 	int size;
    678 	int alignment;
    679 	int page_order;
    680 	int total;
    681 	int byte_count;
    682 	int i, valid;
    683 	struct drm_buf **temp_buflist;
    684 
    685 	if (!dma)
    686 		return -EINVAL;
    687 
    688 	count = request->count;
    689 	order = drm_order(request->size);
    690 	size = 1 << order;
    691 
    692 	alignment = (request->flags & _DRM_PAGE_ALIGN)
    693 	    ? PAGE_ALIGN(size) : size;
    694 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
    695 	total = PAGE_SIZE << page_order;
    696 
    697 	byte_count = 0;
    698 	agp_offset = dev->agp->base + request->agp_start;
    699 
    700 	DRM_DEBUG("count:      %d\n", count);
    701 	DRM_DEBUG("order:      %d\n", order);
    702 	DRM_DEBUG("size:       %d\n", size);
    703 	DRM_DEBUG("agp_offset: %lx\n", agp_offset);
    704 	DRM_DEBUG("alignment:  %d\n", alignment);
    705 	DRM_DEBUG("page_order: %d\n", page_order);
    706 	DRM_DEBUG("total:      %d\n", total);
    707 
    708 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
    709 		return -EINVAL;
    710 
    711 	/* Make sure buffers are located in AGP memory that we own */
    712 	valid = 0;
    713 	list_for_each_entry(agp_entry, &dev->agp->memory, head) {
    714 		if ((agp_offset >= agp_entry->bound) &&
    715 		    (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
    716 			valid = 1;
    717 			break;
    718 		}
    719 	}
    720 	if (!list_empty(&dev->agp->memory) && !valid) {
    721 		DRM_DEBUG("zone invalid\n");
    722 		return -EINVAL;
    723 	}
    724 	spin_lock(&dev->count_lock);
    725 	if (dev->buf_use) {
    726 		spin_unlock(&dev->count_lock);
    727 		return -EBUSY;
    728 	}
    729 	atomic_inc(&dev->buf_alloc);
    730 	spin_unlock(&dev->count_lock);
    731 
    732 	mutex_lock(&dev->struct_mutex);
    733 	entry = &dma->bufs[order];
    734 	if (entry->buf_count) {
    735 		mutex_unlock(&dev->struct_mutex);
    736 		atomic_dec(&dev->buf_alloc);
    737 		return -ENOMEM;	/* May only call once for each order */
    738 	}
    739 
    740 	if (count < 0 || count > 4096) {
    741 		mutex_unlock(&dev->struct_mutex);
    742 		atomic_dec(&dev->buf_alloc);
    743 		return -EINVAL;
    744 	}
    745 
    746 	entry->buflist = kzalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
    747 	if (!entry->buflist) {
    748 		mutex_unlock(&dev->struct_mutex);
    749 		atomic_dec(&dev->buf_alloc);
    750 		return -ENOMEM;
    751 	}
    752 
    753 	entry->buf_size = size;
    754 	entry->page_order = page_order;
    755 
    756 	offset = 0;
    757 
    758 	while (entry->buf_count < count) {
    759 		buf = &entry->buflist[entry->buf_count];
    760 		buf->idx = dma->buf_count + entry->buf_count;
    761 		buf->total = alignment;
    762 		buf->order = order;
    763 		buf->used = 0;
    764 
    765 		buf->offset = (dma->byte_count + offset);
    766 		buf->bus_address = agp_offset + offset;
    767 		buf->address = (void *)(agp_offset + offset);
    768 		buf->next = NULL;
    769 		buf->waiting = 0;
    770 		buf->pending = 0;
    771 		buf->file_priv = NULL;
    772 
    773 		buf->dev_priv_size = dev->driver->dev_priv_size;
    774 		buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
    775 		if (!buf->dev_private) {
    776 			/* Set count correctly so we free the proper amount. */
    777 			entry->buf_count = count;
    778 			drm_cleanup_buf_error(dev, entry);
    779 			mutex_unlock(&dev->struct_mutex);
    780 			atomic_dec(&dev->buf_alloc);
    781 			return -ENOMEM;
    782 		}
    783 
    784 		DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
    785 
    786 		offset += alignment;
    787 		entry->buf_count++;
    788 		byte_count += PAGE_SIZE << page_order;
    789 	}
    790 
    791 	DRM_DEBUG("byte_count: %d\n", byte_count);
    792 
    793 	temp_buflist = krealloc(dma->buflist,
    794 				(dma->buf_count + entry->buf_count) *
    795 				sizeof(*dma->buflist), GFP_KERNEL);
    796 	if (!temp_buflist) {
    797 		/* Free the entry because it isn't valid */
    798 		drm_cleanup_buf_error(dev, entry);
    799 		mutex_unlock(&dev->struct_mutex);
    800 		atomic_dec(&dev->buf_alloc);
    801 		return -ENOMEM;
    802 	}
    803 	dma->buflist = temp_buflist;
    804 
    805 	for (i = 0; i < entry->buf_count; i++) {
    806 		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
    807 	}
    808 
    809 	dma->buf_count += entry->buf_count;
    810 	dma->seg_count += entry->seg_count;
    811 	dma->page_count += byte_count >> PAGE_SHIFT;
    812 	dma->byte_count += byte_count;
    813 
    814 	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
    815 	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
    816 
    817 	mutex_unlock(&dev->struct_mutex);
    818 
    819 	request->count = entry->buf_count;
    820 	request->size = size;
    821 
    822 	dma->flags = _DRM_DMA_USE_AGP;
    823 
    824 	atomic_dec(&dev->buf_alloc);
    825 	return 0;
    826 }
    827 EXPORT_SYMBOL(drm_addbufs_agp);
    828 #endif				/* __OS_HAS_AGP */
    829 
    830 int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
    831 {
    832 	struct drm_device_dma *dma = dev->dma;
    833 	int count;
    834 	int order;
    835 	int size;
    836 	int total;
    837 	int page_order;
    838 	struct drm_buf_entry *entry;
    839 	drm_dma_handle_t *dmah;
    840 	struct drm_buf *buf;
    841 	int alignment;
    842 	unsigned long offset;
    843 	int i;
    844 	int byte_count;
    845 	int page_count;
    846 	unsigned long *temp_pagelist;
    847 	struct drm_buf **temp_buflist;
    848 
    849 	if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
    850 		return -EINVAL;
    851 
    852 	if (!dma)
    853 		return -EINVAL;
    854 
    855 #ifdef __NetBSD__
    856 	if (!DRM_SUSER())
    857 		return -EACCES;	/* XXX */
    858 #else
    859 	if (!capable(CAP_SYS_ADMIN))
    860 		return -EPERM;
    861 #endif
    862 
    863 	count = request->count;
    864 	order = drm_order(request->size);
    865 	size = 1 << order;
    866 
    867 	DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
    868 		  request->count, request->size, size, order);
    869 
    870 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
    871 		return -EINVAL;
    872 
    873 	alignment = (request->flags & _DRM_PAGE_ALIGN)
    874 	    ? PAGE_ALIGN(size) : size;
    875 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
    876 	total = PAGE_SIZE << page_order;
    877 
    878 	spin_lock(&dev->count_lock);
    879 	if (dev->buf_use) {
    880 		spin_unlock(&dev->count_lock);
    881 		return -EBUSY;
    882 	}
    883 	atomic_inc(&dev->buf_alloc);
    884 	spin_unlock(&dev->count_lock);
    885 
    886 	mutex_lock(&dev->struct_mutex);
    887 	entry = &dma->bufs[order];
    888 	if (entry->buf_count) {
    889 		mutex_unlock(&dev->struct_mutex);
    890 		atomic_dec(&dev->buf_alloc);
    891 		return -ENOMEM;	/* May only call once for each order */
    892 	}
    893 
    894 	if (count < 0 || count > 4096) {
    895 		mutex_unlock(&dev->struct_mutex);
    896 		atomic_dec(&dev->buf_alloc);
    897 		return -EINVAL;
    898 	}
    899 
    900 	entry->buflist = kzalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
    901 	if (!entry->buflist) {
    902 		mutex_unlock(&dev->struct_mutex);
    903 		atomic_dec(&dev->buf_alloc);
    904 		return -ENOMEM;
    905 	}
    906 
    907 	entry->seglist = kzalloc(count * sizeof(*entry->seglist), GFP_KERNEL);
    908 	if (!entry->seglist) {
    909 		kfree(entry->buflist);
    910 		mutex_unlock(&dev->struct_mutex);
    911 		atomic_dec(&dev->buf_alloc);
    912 		return -ENOMEM;
    913 	}
    914 
    915 	/* Keep the original pagelist until we know all the allocations
    916 	 * have succeeded
    917 	 */
    918 	temp_pagelist = kmalloc((dma->page_count + (count << page_order)) *
    919 			       sizeof(*dma->pagelist), GFP_KERNEL);
    920 	if (!temp_pagelist) {
    921 		kfree(entry->buflist);
    922 		kfree(entry->seglist);
    923 		mutex_unlock(&dev->struct_mutex);
    924 		atomic_dec(&dev->buf_alloc);
    925 		return -ENOMEM;
    926 	}
    927 	memcpy(temp_pagelist,
    928 	       dma->pagelist, dma->page_count * sizeof(*dma->pagelist));
    929 	DRM_DEBUG("pagelist: %d entries\n",
    930 		  dma->page_count + (count << page_order));
    931 
    932 	entry->buf_size = size;
    933 	entry->page_order = page_order;
    934 	byte_count = 0;
    935 	page_count = 0;
    936 
    937 	while (entry->buf_count < count) {
    938 
    939 		dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000);
    940 
    941 		if (!dmah) {
    942 			/* Set count correctly so we free the proper amount. */
    943 			entry->buf_count = count;
    944 			entry->seg_count = count;
    945 			drm_cleanup_buf_error(dev, entry);
    946 			kfree(temp_pagelist);
    947 			mutex_unlock(&dev->struct_mutex);
    948 			atomic_dec(&dev->buf_alloc);
    949 			return -ENOMEM;
    950 		}
    951 		entry->seglist[entry->seg_count++] = dmah;
    952 		for (i = 0; i < (1 << page_order); i++) {
    953 			DRM_DEBUG("page %d @ 0x%08lx\n",
    954 				  dma->page_count + page_count,
    955 				  (unsigned long)dmah->vaddr + PAGE_SIZE * i);
    956 			temp_pagelist[dma->page_count + page_count++]
    957 				= (unsigned long)dmah->vaddr + PAGE_SIZE * i;
    958 		}
    959 		for (offset = 0;
    960 		     offset + size <= total && entry->buf_count < count;
    961 		     offset += alignment, ++entry->buf_count) {
    962 			buf = &entry->buflist[entry->buf_count];
    963 			buf->idx = dma->buf_count + entry->buf_count;
    964 			buf->total = alignment;
    965 			buf->order = order;
    966 			buf->used = 0;
    967 			buf->offset = (dma->byte_count + byte_count + offset);
    968 			buf->address = (void *)((char *)dmah->vaddr + offset);
    969 			buf->bus_address = dmah->busaddr + offset;
    970 			buf->next = NULL;
    971 			buf->waiting = 0;
    972 			buf->pending = 0;
    973 			buf->file_priv = NULL;
    974 
    975 			buf->dev_priv_size = dev->driver->dev_priv_size;
    976 			buf->dev_private = kzalloc(buf->dev_priv_size,
    977 						GFP_KERNEL);
    978 			if (!buf->dev_private) {
    979 				/* Set count correctly so we free the proper amount. */
    980 				entry->buf_count = count;
    981 				entry->seg_count = count;
    982 				drm_cleanup_buf_error(dev, entry);
    983 				kfree(temp_pagelist);
    984 				mutex_unlock(&dev->struct_mutex);
    985 				atomic_dec(&dev->buf_alloc);
    986 				return -ENOMEM;
    987 			}
    988 
    989 			DRM_DEBUG("buffer %d @ %p\n",
    990 				  entry->buf_count, buf->address);
    991 		}
    992 		byte_count += PAGE_SIZE << page_order;
    993 	}
    994 
    995 	temp_buflist = krealloc(dma->buflist,
    996 				(dma->buf_count + entry->buf_count) *
    997 				sizeof(*dma->buflist), GFP_KERNEL);
    998 	if (!temp_buflist) {
    999 		/* Free the entry because it isn't valid */
   1000 		drm_cleanup_buf_error(dev, entry);
   1001 		kfree(temp_pagelist);
   1002 		mutex_unlock(&dev->struct_mutex);
   1003 		atomic_dec(&dev->buf_alloc);
   1004 		return -ENOMEM;
   1005 	}
   1006 	dma->buflist = temp_buflist;
   1007 
   1008 	for (i = 0; i < entry->buf_count; i++) {
   1009 		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
   1010 	}
   1011 
   1012 	/* No allocations failed, so now we can replace the original pagelist
   1013 	 * with the new one.
   1014 	 */
   1015 	if (dma->page_count) {
   1016 		kfree(dma->pagelist);
   1017 	}
   1018 	dma->pagelist = temp_pagelist;
   1019 
   1020 	dma->buf_count += entry->buf_count;
   1021 	dma->seg_count += entry->seg_count;
   1022 	dma->page_count += entry->seg_count << page_order;
   1023 	dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
   1024 
   1025 	mutex_unlock(&dev->struct_mutex);
   1026 
   1027 	request->count = entry->buf_count;
   1028 	request->size = size;
   1029 
   1030 	if (request->flags & _DRM_PCI_BUFFER_RO)
   1031 		dma->flags = _DRM_DMA_USE_PCI_RO;
   1032 
   1033 	atomic_dec(&dev->buf_alloc);
   1034 	return 0;
   1035 
   1036 }
   1037 EXPORT_SYMBOL(drm_addbufs_pci);
   1038 
   1039 static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request)
   1040 {
   1041 	struct drm_device_dma *dma = dev->dma;
   1042 	struct drm_buf_entry *entry;
   1043 	struct drm_buf *buf;
   1044 	unsigned long offset;
   1045 	unsigned long agp_offset;
   1046 	int count;
   1047 	int order;
   1048 	int size;
   1049 	int alignment;
   1050 	int page_order;
   1051 	int total;
   1052 	int byte_count;
   1053 	int i;
   1054 	struct drm_buf **temp_buflist;
   1055 
   1056 	if (!drm_core_check_feature(dev, DRIVER_SG))
   1057 		return -EINVAL;
   1058 
   1059 	if (!dma)
   1060 		return -EINVAL;
   1061 
   1062 #ifdef __NetBSD__
   1063 	if (!DRM_SUSER())
   1064 		return -EACCES;	/* XXX */
   1065 #else
   1066 	if (!capable(CAP_SYS_ADMIN))
   1067 		return -EPERM;
   1068 #endif
   1069 
   1070 	count = request->count;
   1071 	order = drm_order(request->size);
   1072 	size = 1 << order;
   1073 
   1074 	alignment = (request->flags & _DRM_PAGE_ALIGN)
   1075 	    ? PAGE_ALIGN(size) : size;
   1076 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
   1077 	total = PAGE_SIZE << page_order;
   1078 
   1079 	byte_count = 0;
   1080 	agp_offset = request->agp_start;
   1081 
   1082 	DRM_DEBUG("count:      %d\n", count);
   1083 	DRM_DEBUG("order:      %d\n", order);
   1084 	DRM_DEBUG("size:       %d\n", size);
   1085 	DRM_DEBUG("agp_offset: %lu\n", agp_offset);
   1086 	DRM_DEBUG("alignment:  %d\n", alignment);
   1087 	DRM_DEBUG("page_order: %d\n", page_order);
   1088 	DRM_DEBUG("total:      %d\n", total);
   1089 
   1090 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
   1091 		return -EINVAL;
   1092 
   1093 	spin_lock(&dev->count_lock);
   1094 	if (dev->buf_use) {
   1095 		spin_unlock(&dev->count_lock);
   1096 		return -EBUSY;
   1097 	}
   1098 	atomic_inc(&dev->buf_alloc);
   1099 	spin_unlock(&dev->count_lock);
   1100 
   1101 	mutex_lock(&dev->struct_mutex);
   1102 	entry = &dma->bufs[order];
   1103 	if (entry->buf_count) {
   1104 		mutex_unlock(&dev->struct_mutex);
   1105 		atomic_dec(&dev->buf_alloc);
   1106 		return -ENOMEM;	/* May only call once for each order */
   1107 	}
   1108 
   1109 	if (count < 0 || count > 4096) {
   1110 		mutex_unlock(&dev->struct_mutex);
   1111 		atomic_dec(&dev->buf_alloc);
   1112 		return -EINVAL;
   1113 	}
   1114 
   1115 	entry->buflist = kzalloc(count * sizeof(*entry->buflist),
   1116 				GFP_KERNEL);
   1117 	if (!entry->buflist) {
   1118 		mutex_unlock(&dev->struct_mutex);
   1119 		atomic_dec(&dev->buf_alloc);
   1120 		return -ENOMEM;
   1121 	}
   1122 
   1123 	entry->buf_size = size;
   1124 	entry->page_order = page_order;
   1125 
   1126 	offset = 0;
   1127 
   1128 	while (entry->buf_count < count) {
   1129 		buf = &entry->buflist[entry->buf_count];
   1130 		buf->idx = dma->buf_count + entry->buf_count;
   1131 		buf->total = alignment;
   1132 		buf->order = order;
   1133 		buf->used = 0;
   1134 
   1135 		buf->offset = (dma->byte_count + offset);
   1136 		buf->bus_address = agp_offset + offset;
   1137 		buf->address = (void *)(agp_offset + offset
   1138 					+ (unsigned long)dev->sg->virtual);
   1139 		buf->next = NULL;
   1140 		buf->waiting = 0;
   1141 		buf->pending = 0;
   1142 		buf->file_priv = NULL;
   1143 
   1144 		buf->dev_priv_size = dev->driver->dev_priv_size;
   1145 		buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
   1146 		if (!buf->dev_private) {
   1147 			/* Set count correctly so we free the proper amount. */
   1148 			entry->buf_count = count;
   1149 			drm_cleanup_buf_error(dev, entry);
   1150 			mutex_unlock(&dev->struct_mutex);
   1151 			atomic_dec(&dev->buf_alloc);
   1152 			return -ENOMEM;
   1153 		}
   1154 
   1155 		DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
   1156 
   1157 		offset += alignment;
   1158 		entry->buf_count++;
   1159 		byte_count += PAGE_SIZE << page_order;
   1160 	}
   1161 
   1162 	DRM_DEBUG("byte_count: %d\n", byte_count);
   1163 
   1164 	temp_buflist = krealloc(dma->buflist,
   1165 				(dma->buf_count + entry->buf_count) *
   1166 				sizeof(*dma->buflist), GFP_KERNEL);
   1167 	if (!temp_buflist) {
   1168 		/* Free the entry because it isn't valid */
   1169 		drm_cleanup_buf_error(dev, entry);
   1170 		mutex_unlock(&dev->struct_mutex);
   1171 		atomic_dec(&dev->buf_alloc);
   1172 		return -ENOMEM;
   1173 	}
   1174 	dma->buflist = temp_buflist;
   1175 
   1176 	for (i = 0; i < entry->buf_count; i++) {
   1177 		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
   1178 	}
   1179 
   1180 	dma->buf_count += entry->buf_count;
   1181 	dma->seg_count += entry->seg_count;
   1182 	dma->page_count += byte_count >> PAGE_SHIFT;
   1183 	dma->byte_count += byte_count;
   1184 
   1185 	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
   1186 	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
   1187 
   1188 	mutex_unlock(&dev->struct_mutex);
   1189 
   1190 	request->count = entry->buf_count;
   1191 	request->size = size;
   1192 
   1193 	dma->flags = _DRM_DMA_USE_SG;
   1194 
   1195 	atomic_dec(&dev->buf_alloc);
   1196 	return 0;
   1197 }
   1198 
   1199 static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request)
   1200 {
   1201 	struct drm_device_dma *dma = dev->dma;
   1202 	struct drm_buf_entry *entry;
   1203 	struct drm_buf *buf;
   1204 	unsigned long offset;
   1205 	unsigned long agp_offset;
   1206 	int count;
   1207 	int order;
   1208 	int size;
   1209 	int alignment;
   1210 	int page_order;
   1211 	int total;
   1212 	int byte_count;
   1213 	int i;
   1214 	struct drm_buf **temp_buflist;
   1215 
   1216 	if (!drm_core_check_feature(dev, DRIVER_FB_DMA))
   1217 		return -EINVAL;
   1218 
   1219 	if (!dma)
   1220 		return -EINVAL;
   1221 
   1222 #ifdef __NetBSD__
   1223 	if (!DRM_SUSER())
   1224 		return -EACCES;	/* XXX */
   1225 #else
   1226 	if (!capable(CAP_SYS_ADMIN))
   1227 		return -EPERM;
   1228 #endif
   1229 
   1230 	count = request->count;
   1231 	order = drm_order(request->size);
   1232 	size = 1 << order;
   1233 
   1234 	alignment = (request->flags & _DRM_PAGE_ALIGN)
   1235 	    ? PAGE_ALIGN(size) : size;
   1236 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
   1237 	total = PAGE_SIZE << page_order;
   1238 
   1239 	byte_count = 0;
   1240 	agp_offset = request->agp_start;
   1241 
   1242 	DRM_DEBUG("count:      %d\n", count);
   1243 	DRM_DEBUG("order:      %d\n", order);
   1244 	DRM_DEBUG("size:       %d\n", size);
   1245 	DRM_DEBUG("agp_offset: %lu\n", agp_offset);
   1246 	DRM_DEBUG("alignment:  %d\n", alignment);
   1247 	DRM_DEBUG("page_order: %d\n", page_order);
   1248 	DRM_DEBUG("total:      %d\n", total);
   1249 
   1250 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
   1251 		return -EINVAL;
   1252 
   1253 	spin_lock(&dev->count_lock);
   1254 	if (dev->buf_use) {
   1255 		spin_unlock(&dev->count_lock);
   1256 		return -EBUSY;
   1257 	}
   1258 	atomic_inc(&dev->buf_alloc);
   1259 	spin_unlock(&dev->count_lock);
   1260 
   1261 	mutex_lock(&dev->struct_mutex);
   1262 	entry = &dma->bufs[order];
   1263 	if (entry->buf_count) {
   1264 		mutex_unlock(&dev->struct_mutex);
   1265 		atomic_dec(&dev->buf_alloc);
   1266 		return -ENOMEM;	/* May only call once for each order */
   1267 	}
   1268 
   1269 	if (count < 0 || count > 4096) {
   1270 		mutex_unlock(&dev->struct_mutex);
   1271 		atomic_dec(&dev->buf_alloc);
   1272 		return -EINVAL;
   1273 	}
   1274 
   1275 	entry->buflist = kzalloc(count * sizeof(*entry->buflist),
   1276 				GFP_KERNEL);
   1277 	if (!entry->buflist) {
   1278 		mutex_unlock(&dev->struct_mutex);
   1279 		atomic_dec(&dev->buf_alloc);
   1280 		return -ENOMEM;
   1281 	}
   1282 
   1283 	entry->buf_size = size;
   1284 	entry->page_order = page_order;
   1285 
   1286 	offset = 0;
   1287 
   1288 	while (entry->buf_count < count) {
   1289 		buf = &entry->buflist[entry->buf_count];
   1290 		buf->idx = dma->buf_count + entry->buf_count;
   1291 		buf->total = alignment;
   1292 		buf->order = order;
   1293 		buf->used = 0;
   1294 
   1295 		buf->offset = (dma->byte_count + offset);
   1296 		buf->bus_address = agp_offset + offset;
   1297 		buf->address = (void *)(agp_offset + offset);
   1298 		buf->next = NULL;
   1299 		buf->waiting = 0;
   1300 		buf->pending = 0;
   1301 		buf->file_priv = NULL;
   1302 
   1303 		buf->dev_priv_size = dev->driver->dev_priv_size;
   1304 		buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
   1305 		if (!buf->dev_private) {
   1306 			/* Set count correctly so we free the proper amount. */
   1307 			entry->buf_count = count;
   1308 			drm_cleanup_buf_error(dev, entry);
   1309 			mutex_unlock(&dev->struct_mutex);
   1310 			atomic_dec(&dev->buf_alloc);
   1311 			return -ENOMEM;
   1312 		}
   1313 
   1314 		DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
   1315 
   1316 		offset += alignment;
   1317 		entry->buf_count++;
   1318 		byte_count += PAGE_SIZE << page_order;
   1319 	}
   1320 
   1321 	DRM_DEBUG("byte_count: %d\n", byte_count);
   1322 
   1323 	temp_buflist = krealloc(dma->buflist,
   1324 				(dma->buf_count + entry->buf_count) *
   1325 				sizeof(*dma->buflist), GFP_KERNEL);
   1326 	if (!temp_buflist) {
   1327 		/* Free the entry because it isn't valid */
   1328 		drm_cleanup_buf_error(dev, entry);
   1329 		mutex_unlock(&dev->struct_mutex);
   1330 		atomic_dec(&dev->buf_alloc);
   1331 		return -ENOMEM;
   1332 	}
   1333 	dma->buflist = temp_buflist;
   1334 
   1335 	for (i = 0; i < entry->buf_count; i++) {
   1336 		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
   1337 	}
   1338 
   1339 	dma->buf_count += entry->buf_count;
   1340 	dma->seg_count += entry->seg_count;
   1341 	dma->page_count += byte_count >> PAGE_SHIFT;
   1342 	dma->byte_count += byte_count;
   1343 
   1344 	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
   1345 	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
   1346 
   1347 	mutex_unlock(&dev->struct_mutex);
   1348 
   1349 	request->count = entry->buf_count;
   1350 	request->size = size;
   1351 
   1352 	dma->flags = _DRM_DMA_USE_FB;
   1353 
   1354 	atomic_dec(&dev->buf_alloc);
   1355 	return 0;
   1356 }
   1357 
   1358 
   1359 /**
   1360  * Add buffers for DMA transfers (ioctl).
   1361  *
   1362  * \param inode device inode.
   1363  * \param file_priv DRM file private.
   1364  * \param cmd command.
   1365  * \param arg pointer to a struct drm_buf_desc request.
   1366  * \return zero on success or a negative number on failure.
   1367  *
   1368  * According with the memory type specified in drm_buf_desc::flags and the
   1369  * build options, it dispatches the call either to addbufs_agp(),
   1370  * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
   1371  * PCI memory respectively.
   1372  */
   1373 int drm_addbufs(struct drm_device *dev, void *data,
   1374 		struct drm_file *file_priv)
   1375 {
   1376 	struct drm_buf_desc *request = data;
   1377 	int ret;
   1378 
   1379 	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
   1380 		return -EINVAL;
   1381 
   1382 #if __OS_HAS_AGP
   1383 	if (request->flags & _DRM_AGP_BUFFER)
   1384 		ret = drm_addbufs_agp(dev, request);
   1385 	else
   1386 #endif
   1387 	if (request->flags & _DRM_SG_BUFFER)
   1388 		ret = drm_addbufs_sg(dev, request);
   1389 	else if (request->flags & _DRM_FB_BUFFER)
   1390 		ret = drm_addbufs_fb(dev, request);
   1391 	else
   1392 		ret = drm_addbufs_pci(dev, request);
   1393 
   1394 	return ret;
   1395 }
   1396 
   1397 /**
   1398  * Get information about the buffer mappings.
   1399  *
   1400  * This was originally mean for debugging purposes, or by a sophisticated
   1401  * client library to determine how best to use the available buffers (e.g.,
   1402  * large buffers can be used for image transfer).
   1403  *
   1404  * \param inode device inode.
   1405  * \param file_priv DRM file private.
   1406  * \param cmd command.
   1407  * \param arg pointer to a drm_buf_info structure.
   1408  * \return zero on success or a negative number on failure.
   1409  *
   1410  * Increments drm_device::buf_use while holding the drm_device::count_lock
   1411  * lock, preventing of allocating more buffers after this call. Information
   1412  * about each requested buffer is then copied into user space.
   1413  */
   1414 int drm_infobufs(struct drm_device *dev, void *data,
   1415 		 struct drm_file *file_priv)
   1416 {
   1417 	struct drm_device_dma *dma = dev->dma;
   1418 	struct drm_buf_info *request = data;
   1419 	int i;
   1420 	int count;
   1421 
   1422 	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
   1423 		return -EINVAL;
   1424 
   1425 	if (!dma)
   1426 		return -EINVAL;
   1427 
   1428 	spin_lock(&dev->count_lock);
   1429 	if (atomic_read(&dev->buf_alloc)) {
   1430 		spin_unlock(&dev->count_lock);
   1431 		return -EBUSY;
   1432 	}
   1433 	++dev->buf_use;		/* Can't allocate more after this call */
   1434 	spin_unlock(&dev->count_lock);
   1435 
   1436 	for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
   1437 		if (dma->bufs[i].buf_count)
   1438 			++count;
   1439 	}
   1440 
   1441 	DRM_DEBUG("count = %d\n", count);
   1442 
   1443 	if (request->count >= count) {
   1444 		for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
   1445 			if (dma->bufs[i].buf_count) {
   1446 				struct drm_buf_desc __user *to =
   1447 				    &request->list[count];
   1448 				struct drm_buf_entry *from = &dma->bufs[i];
   1449 				struct drm_freelist *list = &dma->bufs[i].freelist;
   1450 				if (copy_to_user(&to->count,
   1451 						 &from->buf_count,
   1452 						 sizeof(from->buf_count)) ||
   1453 				    copy_to_user(&to->size,
   1454 						 &from->buf_size,
   1455 						 sizeof(from->buf_size)) ||
   1456 				    copy_to_user(&to->low_mark,
   1457 						 &list->low_mark,
   1458 						 sizeof(list->low_mark)) ||
   1459 				    copy_to_user(&to->high_mark,
   1460 						 &list->high_mark,
   1461 						 sizeof(list->high_mark)))
   1462 					return -EFAULT;
   1463 
   1464 				DRM_DEBUG("%d %d %d %d %d\n",
   1465 					  i,
   1466 					  dma->bufs[i].buf_count,
   1467 					  dma->bufs[i].buf_size,
   1468 					  dma->bufs[i].freelist.low_mark,
   1469 					  dma->bufs[i].freelist.high_mark);
   1470 				++count;
   1471 			}
   1472 		}
   1473 	}
   1474 	request->count = count;
   1475 
   1476 	return 0;
   1477 }
   1478 
   1479 /**
   1480  * Specifies a low and high water mark for buffer allocation
   1481  *
   1482  * \param inode device inode.
   1483  * \param file_priv DRM file private.
   1484  * \param cmd command.
   1485  * \param arg a pointer to a drm_buf_desc structure.
   1486  * \return zero on success or a negative number on failure.
   1487  *
   1488  * Verifies that the size order is bounded between the admissible orders and
   1489  * updates the respective drm_device_dma::bufs entry low and high water mark.
   1490  *
   1491  * \note This ioctl is deprecated and mostly never used.
   1492  */
   1493 int drm_markbufs(struct drm_device *dev, void *data,
   1494 		 struct drm_file *file_priv)
   1495 {
   1496 	struct drm_device_dma *dma = dev->dma;
   1497 	struct drm_buf_desc *request = data;
   1498 	int order;
   1499 	struct drm_buf_entry *entry;
   1500 
   1501 	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
   1502 		return -EINVAL;
   1503 
   1504 	if (!dma)
   1505 		return -EINVAL;
   1506 
   1507 	DRM_DEBUG("%d, %d, %d\n",
   1508 		  request->size, request->low_mark, request->high_mark);
   1509 	order = drm_order(request->size);
   1510 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
   1511 		return -EINVAL;
   1512 	entry = &dma->bufs[order];
   1513 
   1514 	if (request->low_mark < 0 || request->low_mark > entry->buf_count)
   1515 		return -EINVAL;
   1516 	if (request->high_mark < 0 || request->high_mark > entry->buf_count)
   1517 		return -EINVAL;
   1518 
   1519 	entry->freelist.low_mark = request->low_mark;
   1520 	entry->freelist.high_mark = request->high_mark;
   1521 
   1522 	return 0;
   1523 }
   1524 
   1525 /**
   1526  * Unreserve the buffers in list, previously reserved using drmDMA.
   1527  *
   1528  * \param inode device inode.
   1529  * \param file_priv DRM file private.
   1530  * \param cmd command.
   1531  * \param arg pointer to a drm_buf_free structure.
   1532  * \return zero on success or a negative number on failure.
   1533  *
   1534  * Calls free_buffer() for each used buffer.
   1535  * This function is primarily used for debugging.
   1536  */
   1537 int drm_freebufs(struct drm_device *dev, void *data,
   1538 		 struct drm_file *file_priv)
   1539 {
   1540 	struct drm_device_dma *dma = dev->dma;
   1541 	struct drm_buf_free *request = data;
   1542 	int i;
   1543 	int idx;
   1544 	struct drm_buf *buf;
   1545 
   1546 	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
   1547 		return -EINVAL;
   1548 
   1549 	if (!dma)
   1550 		return -EINVAL;
   1551 
   1552 	DRM_DEBUG("%d\n", request->count);
   1553 	for (i = 0; i < request->count; i++) {
   1554 		if (copy_from_user(&idx, &request->list[i], sizeof(idx)))
   1555 			return -EFAULT;
   1556 		if (idx < 0 || idx >= dma->buf_count) {
   1557 			DRM_ERROR("Index %d (of %d max)\n",
   1558 				  idx, dma->buf_count - 1);
   1559 			return -EINVAL;
   1560 		}
   1561 		buf = dma->buflist[idx];
   1562 		if (buf->file_priv != file_priv) {
   1563 			DRM_ERROR("Process %d freeing buffer not owned\n",
   1564 				  task_pid_nr(current));
   1565 			return -EINVAL;
   1566 		}
   1567 		drm_free_buffer(dev, buf);
   1568 	}
   1569 
   1570 	return 0;
   1571 }
   1572 
   1573 /**
   1574  * Maps all of the DMA buffers into client-virtual space (ioctl).
   1575  *
   1576  * \param inode device inode.
   1577  * \param file_priv DRM file private.
   1578  * \param cmd command.
   1579  * \param arg pointer to a drm_buf_map structure.
   1580  * \return zero on success or a negative number on failure.
   1581  *
   1582  * Maps the AGP, SG or PCI buffer region with vm_mmap(), and copies information
   1583  * about each buffer into user space. For PCI buffers, it calls vm_mmap() with
   1584  * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
   1585  * drm_mmap_dma().
   1586  */
   1587 int drm_mapbufs(struct drm_device *dev, void *data,
   1588 	        struct drm_file *file_priv)
   1589 {
   1590 	struct drm_device_dma *dma = dev->dma;
   1591 	int retcode = 0;
   1592 	const int zero = 0;
   1593 	unsigned long virtual;
   1594 	unsigned long address;
   1595 	struct drm_buf_map *request = data;
   1596 	int i;
   1597 
   1598 	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
   1599 		return -EINVAL;
   1600 
   1601 	if (!dma)
   1602 		return -EINVAL;
   1603 
   1604 	spin_lock(&dev->count_lock);
   1605 	if (atomic_read(&dev->buf_alloc)) {
   1606 		spin_unlock(&dev->count_lock);
   1607 		return -EBUSY;
   1608 	}
   1609 	dev->buf_use++;		/* Can't allocate more after this call */
   1610 	spin_unlock(&dev->count_lock);
   1611 
   1612 	if (request->count >= dma->buf_count) {
   1613 		if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
   1614 		    || (drm_core_check_feature(dev, DRIVER_SG)
   1615 			&& (dma->flags & _DRM_DMA_USE_SG))
   1616 		    || (drm_core_check_feature(dev, DRIVER_FB_DMA)
   1617 			&& (dma->flags & _DRM_DMA_USE_FB))) {
   1618 			struct drm_local_map *map = dev->agp_buffer_map;
   1619 			unsigned long token = dev->agp_buffer_token;
   1620 
   1621 			if (!map) {
   1622 				retcode = -EINVAL;
   1623 				goto done;
   1624 			}
   1625 			virtual = vm_mmap(file_priv->filp, 0, map->size,
   1626 					  PROT_READ | PROT_WRITE,
   1627 					  MAP_SHARED,
   1628 					  token);
   1629 		} else {
   1630 			virtual = vm_mmap(file_priv->filp, 0, dma->byte_count,
   1631 					  PROT_READ | PROT_WRITE,
   1632 					  MAP_SHARED, 0);
   1633 		}
   1634 		if (virtual > -1024UL) {
   1635 			/* Real error */
   1636 			retcode = (signed long)virtual;
   1637 			goto done;
   1638 		}
   1639 		request->virtual = (void __user *)virtual;
   1640 
   1641 		for (i = 0; i < dma->buf_count; i++) {
   1642 			if (copy_to_user(&request->list[i].idx,
   1643 					 &dma->buflist[i]->idx,
   1644 					 sizeof(request->list[0].idx))) {
   1645 				retcode = -EFAULT;
   1646 				goto done;
   1647 			}
   1648 			if (copy_to_user(&request->list[i].total,
   1649 					 &dma->buflist[i]->total,
   1650 					 sizeof(request->list[0].total))) {
   1651 				retcode = -EFAULT;
   1652 				goto done;
   1653 			}
   1654 			if (copy_to_user(&request->list[i].used,
   1655 					 &zero, sizeof(zero))) {
   1656 				retcode = -EFAULT;
   1657 				goto done;
   1658 			}
   1659 			address = virtual + dma->buflist[i]->offset;	/* *** */
   1660 			if (copy_to_user(&request->list[i].address,
   1661 					 &address, sizeof(address))) {
   1662 				retcode = -EFAULT;
   1663 				goto done;
   1664 			}
   1665 		}
   1666 	}
   1667       done:
   1668 	request->count = dma->buf_count;
   1669 	DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
   1670 
   1671 	return retcode;
   1672 }
   1673 
   1674 /**
   1675  * Compute size order.  Returns the exponent of the smaller power of two which
   1676  * is greater or equal to given number.
   1677  *
   1678  * \param size size.
   1679  * \return order.
   1680  *
   1681  * \todo Can be made faster.
   1682  */
   1683 int drm_order(unsigned long size)
   1684 {
   1685 	int order;
   1686 	unsigned long tmp;
   1687 
   1688 	for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ;
   1689 
   1690 	if (size & (size - 1))
   1691 		++order;
   1692 
   1693 	return order;
   1694 }
   1695 EXPORT_SYMBOL(drm_order);
   1696