Home | History | Annotate | Line # | Download | only in ttm
      1  1.28  riastrad /*	$NetBSD: ttm_bo_util.c,v 1.28 2021/12/19 11:34:29 riastradh Exp $	*/
      2   1.7  riastrad 
      3  1.21  riastrad /* SPDX-License-Identifier: GPL-2.0 OR MIT */
      4   1.1  riastrad /**************************************************************************
      5   1.1  riastrad  *
      6   1.1  riastrad  * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
      7   1.1  riastrad  * All Rights Reserved.
      8   1.1  riastrad  *
      9   1.1  riastrad  * Permission is hereby granted, free of charge, to any person obtaining a
     10   1.1  riastrad  * copy of this software and associated documentation files (the
     11   1.1  riastrad  * "Software"), to deal in the Software without restriction, including
     12   1.1  riastrad  * without limitation the rights to use, copy, modify, merge, publish,
     13   1.1  riastrad  * distribute, sub license, and/or sell copies of the Software, and to
     14   1.1  riastrad  * permit persons to whom the Software is furnished to do so, subject to
     15   1.1  riastrad  * the following conditions:
     16   1.1  riastrad  *
     17   1.1  riastrad  * The above copyright notice and this permission notice (including the
     18   1.1  riastrad  * next paragraph) shall be included in all copies or substantial portions
     19   1.1  riastrad  * of the Software.
     20   1.1  riastrad  *
     21   1.1  riastrad  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     22   1.1  riastrad  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     23   1.1  riastrad  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
     24   1.1  riastrad  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
     25   1.1  riastrad  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
     26   1.1  riastrad  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
     27   1.1  riastrad  * USE OR OTHER DEALINGS IN THE SOFTWARE.
     28   1.1  riastrad  *
     29   1.1  riastrad  **************************************************************************/
     30   1.1  riastrad /*
     31   1.1  riastrad  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
     32   1.1  riastrad  */
     33   1.1  riastrad 
     34   1.7  riastrad #include <sys/cdefs.h>
     35  1.28  riastrad __KERNEL_RCSID(0, "$NetBSD: ttm_bo_util.c,v 1.28 2021/12/19 11:34:29 riastradh Exp $");
     36   1.7  riastrad 
     37   1.1  riastrad #include <drm/ttm/ttm_bo_driver.h>
     38   1.1  riastrad #include <drm/ttm/ttm_placement.h>
     39   1.2  riastrad #include <drm/drm_vma_manager.h>
     40   1.1  riastrad #include <linux/io.h>
     41   1.1  riastrad #include <linux/highmem.h>
     42   1.1  riastrad #include <linux/wait.h>
     43   1.1  riastrad #include <linux/slab.h>
     44   1.1  riastrad #include <linux/vmalloc.h>
     45   1.1  riastrad #include <linux/module.h>
     46  1.21  riastrad #include <linux/dma-resv.h>
     47  1.21  riastrad 
     48  1.21  riastrad struct ttm_transfer_obj {
     49  1.21  riastrad 	struct ttm_buffer_object base;
     50  1.21  riastrad 	struct ttm_buffer_object *bo;
     51  1.21  riastrad };
     52   1.2  riastrad 
     53   1.2  riastrad #ifdef __NetBSD__		/* PMAP_* caching flags for ttm_io_prot */
     54   1.2  riastrad #include <uvm/uvm_pmap.h>
     55  1.16  riastrad #include <linux/nbsd-namespace.h>
     56   1.2  riastrad #endif
     57   1.1  riastrad 
     58   1.1  riastrad void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
     59   1.1  riastrad {
     60   1.1  riastrad 	ttm_bo_mem_put(bo, &bo->mem);
     61   1.1  riastrad }
     62   1.1  riastrad 
     63   1.1  riastrad int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
     64  1.21  riastrad 		   struct ttm_operation_ctx *ctx,
     65  1.21  riastrad 		    struct ttm_mem_reg *new_mem)
     66   1.1  riastrad {
     67   1.1  riastrad 	struct ttm_tt *ttm = bo->ttm;
     68   1.1  riastrad 	struct ttm_mem_reg *old_mem = &bo->mem;
     69   1.1  riastrad 	int ret;
     70   1.1  riastrad 
     71   1.1  riastrad 	if (old_mem->mem_type != TTM_PL_SYSTEM) {
     72  1.21  riastrad 		ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
     73  1.21  riastrad 
     74  1.21  riastrad 		if (unlikely(ret != 0)) {
     75  1.21  riastrad 			if (ret != -ERESTARTSYS)
     76  1.21  riastrad 				pr_err("Failed to expire sync object before unbinding TTM\n");
     77  1.21  riastrad 			return ret;
     78  1.21  riastrad 		}
     79  1.21  riastrad 
     80   1.1  riastrad 		ttm_tt_unbind(ttm);
     81   1.1  riastrad 		ttm_bo_free_old_node(bo);
     82   1.1  riastrad 		ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
     83   1.1  riastrad 				TTM_PL_MASK_MEM);
     84   1.1  riastrad 		old_mem->mem_type = TTM_PL_SYSTEM;
     85   1.1  riastrad 	}
     86   1.1  riastrad 
     87   1.1  riastrad 	ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
     88   1.1  riastrad 	if (unlikely(ret != 0))
     89   1.1  riastrad 		return ret;
     90   1.1  riastrad 
     91   1.1  riastrad 	if (new_mem->mem_type != TTM_PL_SYSTEM) {
     92  1.21  riastrad 		ret = ttm_tt_bind(ttm, new_mem, ctx);
     93   1.1  riastrad 		if (unlikely(ret != 0))
     94   1.1  riastrad 			return ret;
     95   1.1  riastrad 	}
     96   1.1  riastrad 
     97   1.1  riastrad 	*old_mem = *new_mem;
     98   1.1  riastrad 	new_mem->mm_node = NULL;
     99   1.1  riastrad 
    100   1.1  riastrad 	return 0;
    101   1.1  riastrad }
    102   1.1  riastrad EXPORT_SYMBOL(ttm_bo_move_ttm);
    103   1.1  riastrad 
    104   1.1  riastrad int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
    105   1.1  riastrad {
    106   1.1  riastrad 	if (likely(man->io_reserve_fastpath))
    107   1.1  riastrad 		return 0;
    108   1.1  riastrad 
    109   1.1  riastrad 	if (interruptible)
    110   1.1  riastrad 		return mutex_lock_interruptible(&man->io_reserve_mutex);
    111   1.1  riastrad 
    112   1.1  riastrad 	mutex_lock(&man->io_reserve_mutex);
    113   1.1  riastrad 	return 0;
    114   1.1  riastrad }
    115   1.1  riastrad 
    116   1.1  riastrad void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
    117   1.1  riastrad {
    118   1.1  riastrad 	if (likely(man->io_reserve_fastpath))
    119   1.1  riastrad 		return;
    120   1.1  riastrad 
    121   1.1  riastrad 	mutex_unlock(&man->io_reserve_mutex);
    122   1.1  riastrad }
    123   1.1  riastrad 
    124   1.1  riastrad static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
    125   1.1  riastrad {
    126   1.1  riastrad 	struct ttm_buffer_object *bo;
    127   1.1  riastrad 
    128   1.1  riastrad 	if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
    129   1.1  riastrad 		return -EAGAIN;
    130   1.1  riastrad 
    131   1.1  riastrad 	bo = list_first_entry(&man->io_reserve_lru,
    132   1.1  riastrad 			      struct ttm_buffer_object,
    133   1.1  riastrad 			      io_reserve_lru);
    134   1.1  riastrad 	list_del_init(&bo->io_reserve_lru);
    135   1.1  riastrad 	ttm_bo_unmap_virtual_locked(bo);
    136   1.1  riastrad 
    137   1.1  riastrad 	return 0;
    138   1.1  riastrad }
    139   1.1  riastrad 
    140   1.2  riastrad 
    141   1.2  riastrad int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
    142   1.2  riastrad 		       struct ttm_mem_reg *mem)
    143   1.1  riastrad {
    144   1.1  riastrad 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
    145   1.1  riastrad 	int ret = 0;
    146   1.1  riastrad 
    147   1.1  riastrad 	if (!bdev->driver->io_mem_reserve)
    148   1.1  riastrad 		return 0;
    149   1.1  riastrad 	if (likely(man->io_reserve_fastpath))
    150   1.1  riastrad 		return bdev->driver->io_mem_reserve(bdev, mem);
    151   1.1  riastrad 
    152   1.1  riastrad 	if (bdev->driver->io_mem_reserve &&
    153   1.1  riastrad 	    mem->bus.io_reserved_count++ == 0) {
    154   1.1  riastrad retry:
    155   1.1  riastrad 		ret = bdev->driver->io_mem_reserve(bdev, mem);
    156   1.1  riastrad 		if (ret == -EAGAIN) {
    157   1.1  riastrad 			ret = ttm_mem_io_evict(man);
    158   1.1  riastrad 			if (ret == 0)
    159   1.1  riastrad 				goto retry;
    160   1.1  riastrad 		}
    161   1.1  riastrad 	}
    162   1.1  riastrad 	return ret;
    163   1.1  riastrad }
    164   1.1  riastrad 
    165   1.2  riastrad void ttm_mem_io_free(struct ttm_bo_device *bdev,
    166   1.2  riastrad 		     struct ttm_mem_reg *mem)
    167   1.1  riastrad {
    168   1.1  riastrad 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
    169   1.1  riastrad 
    170   1.1  riastrad 	if (likely(man->io_reserve_fastpath))
    171   1.1  riastrad 		return;
    172   1.1  riastrad 
    173   1.1  riastrad 	if (bdev->driver->io_mem_reserve &&
    174   1.1  riastrad 	    --mem->bus.io_reserved_count == 0 &&
    175   1.1  riastrad 	    bdev->driver->io_mem_free)
    176   1.1  riastrad 		bdev->driver->io_mem_free(bdev, mem);
    177   1.1  riastrad 
    178   1.1  riastrad }
    179   1.1  riastrad 
    180   1.1  riastrad int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
    181   1.1  riastrad {
    182   1.1  riastrad 	struct ttm_mem_reg *mem = &bo->mem;
    183   1.1  riastrad 	int ret;
    184   1.1  riastrad 
    185   1.1  riastrad 	if (!mem->bus.io_reserved_vm) {
    186   1.1  riastrad 		struct ttm_mem_type_manager *man =
    187   1.1  riastrad 			&bo->bdev->man[mem->mem_type];
    188   1.1  riastrad 
    189   1.1  riastrad 		ret = ttm_mem_io_reserve(bo->bdev, mem);
    190   1.1  riastrad 		if (unlikely(ret != 0))
    191   1.1  riastrad 			return ret;
    192   1.1  riastrad 		mem->bus.io_reserved_vm = true;
    193   1.1  riastrad 		if (man->use_io_reserve_lru)
    194   1.1  riastrad 			list_add_tail(&bo->io_reserve_lru,
    195   1.1  riastrad 				      &man->io_reserve_lru);
    196   1.1  riastrad 	}
    197   1.1  riastrad 	return 0;
    198   1.1  riastrad }
    199   1.1  riastrad 
    200   1.1  riastrad void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
    201   1.1  riastrad {
    202   1.1  riastrad 	struct ttm_mem_reg *mem = &bo->mem;
    203   1.1  riastrad 
    204   1.1  riastrad 	if (mem->bus.io_reserved_vm) {
    205   1.1  riastrad 		mem->bus.io_reserved_vm = false;
    206   1.1  riastrad 		list_del_init(&bo->io_reserve_lru);
    207   1.1  riastrad 		ttm_mem_io_free(bo->bdev, mem);
    208   1.1  riastrad 	}
    209   1.1  riastrad }
    210   1.1  riastrad 
    211   1.2  riastrad static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
    212   1.1  riastrad 			void **virtual)
    213   1.1  riastrad {
    214   1.1  riastrad 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
    215   1.1  riastrad 	int ret;
    216   1.1  riastrad 	void *addr;
    217   1.1  riastrad 
    218   1.1  riastrad 	*virtual = NULL;
    219   1.1  riastrad 	(void) ttm_mem_io_lock(man, false);
    220   1.1  riastrad 	ret = ttm_mem_io_reserve(bdev, mem);
    221   1.1  riastrad 	ttm_mem_io_unlock(man);
    222   1.1  riastrad 	if (ret || !mem->bus.is_iomem)
    223   1.1  riastrad 		return ret;
    224   1.1  riastrad 
    225   1.1  riastrad 	if (mem->bus.addr) {
    226   1.1  riastrad 		addr = mem->bus.addr;
    227   1.1  riastrad 	} else {
    228   1.2  riastrad #ifdef __NetBSD__
    229   1.2  riastrad 		const bus_addr_t bus_addr = (mem->bus.base + mem->bus.offset);
    230   1.2  riastrad 		int flags = BUS_SPACE_MAP_LINEAR;
    231   1.2  riastrad 
    232   1.2  riastrad 		if (ISSET(mem->placement, TTM_PL_FLAG_WC))
    233   1.2  riastrad 			flags |= BUS_SPACE_MAP_PREFETCHABLE;
    234   1.2  riastrad 		/* XXX errno NetBSD->Linux */
    235   1.2  riastrad 		ret = -bus_space_map(bdev->memt, bus_addr, mem->bus.size,
    236   1.2  riastrad 		    flags, &mem->bus.memh);
    237   1.2  riastrad 		if (ret) {
    238   1.2  riastrad 			(void) ttm_mem_io_lock(man, false);
    239   1.2  riastrad 			ttm_mem_io_free(bdev, mem);
    240   1.2  riastrad 			ttm_mem_io_unlock(man);
    241   1.2  riastrad 			return ret;
    242   1.2  riastrad 		}
    243   1.2  riastrad 		addr = bus_space_vaddr(bdev->memt, mem->bus.memh);
    244   1.2  riastrad #else
    245   1.1  riastrad 		if (mem->placement & TTM_PL_FLAG_WC)
    246   1.1  riastrad 			addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
    247   1.1  riastrad 		else
    248  1.21  riastrad 			addr = ioremap(mem->bus.base + mem->bus.offset, mem->bus.size);
    249   1.1  riastrad 		if (!addr) {
    250   1.1  riastrad 			(void) ttm_mem_io_lock(man, false);
    251   1.1  riastrad 			ttm_mem_io_free(bdev, mem);
    252   1.1  riastrad 			ttm_mem_io_unlock(man);
    253   1.1  riastrad 			return -ENOMEM;
    254   1.1  riastrad 		}
    255   1.2  riastrad #endif
    256   1.1  riastrad 	}
    257   1.1  riastrad 	*virtual = addr;
    258   1.1  riastrad 	return 0;
    259   1.1  riastrad }
    260   1.1  riastrad 
    261   1.2  riastrad static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
    262   1.1  riastrad 			 void *virtual)
    263   1.1  riastrad {
    264   1.1  riastrad 	struct ttm_mem_type_manager *man;
    265   1.1  riastrad 
    266   1.1  riastrad 	man = &bdev->man[mem->mem_type];
    267   1.1  riastrad 
    268   1.1  riastrad 	if (virtual && mem->bus.addr == NULL)
    269   1.2  riastrad #ifdef __NetBSD__
    270   1.2  riastrad 		bus_space_unmap(bdev->memt, mem->bus.memh, mem->bus.size);
    271   1.2  riastrad #else
    272   1.1  riastrad 		iounmap(virtual);
    273   1.2  riastrad #endif
    274   1.1  riastrad 	(void) ttm_mem_io_lock(man, false);
    275   1.1  riastrad 	ttm_mem_io_free(bdev, mem);
    276   1.1  riastrad 	ttm_mem_io_unlock(man);
    277   1.1  riastrad }
    278   1.1  riastrad 
    279   1.2  riastrad #ifdef __NetBSD__
    280   1.2  riastrad #  define	ioread32	fake_ioread32
    281   1.2  riastrad #  define	iowrite32	fake_iowrite32
    282   1.2  riastrad 
    283   1.2  riastrad static inline uint32_t
    284  1.12  riastrad ioread32(const volatile uint32_t *p)
    285   1.2  riastrad {
    286   1.2  riastrad 	uint32_t v;
    287   1.2  riastrad 
    288   1.2  riastrad 	v = *p;
    289  1.12  riastrad 	__insn_barrier();	/* XXX ttm io barrier */
    290   1.2  riastrad 
    291  1.12  riastrad 	return v;		/* XXX ttm byte order */
    292   1.2  riastrad }
    293   1.2  riastrad 
    294   1.2  riastrad static inline void
    295   1.2  riastrad iowrite32(uint32_t v, volatile uint32_t *p)
    296   1.2  riastrad {
    297   1.2  riastrad 
    298  1.12  riastrad 	__insn_barrier();	/* XXX ttm io barrier */
    299  1.12  riastrad 	*p = v;			/* XXX ttm byte order */
    300   1.2  riastrad }
    301   1.2  riastrad #endif
    302   1.2  riastrad 
    303   1.1  riastrad static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
    304   1.1  riastrad {
    305   1.1  riastrad 	uint32_t *dstP =
    306   1.1  riastrad 	    (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
    307   1.1  riastrad 	uint32_t *srcP =
    308   1.1  riastrad 	    (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
    309   1.1  riastrad 
    310   1.1  riastrad 	int i;
    311   1.1  riastrad 	for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
    312   1.1  riastrad 		iowrite32(ioread32(srcP++), dstP++);
    313   1.1  riastrad 	return 0;
    314   1.1  riastrad }
    315   1.1  riastrad 
    316   1.2  riastrad #ifdef __NetBSD__
    317   1.2  riastrad #  undef	ioread32
    318   1.2  riastrad #  undef	iowrite32
    319   1.2  riastrad #endif
    320   1.2  riastrad 
    321  1.21  riastrad #ifdef CONFIG_X86
    322  1.21  riastrad #define __ttm_kmap_atomic_prot(__page, __prot) kmap_atomic_prot(__page, __prot)
    323  1.21  riastrad #define __ttm_kunmap_atomic(__addr) kunmap_atomic(__addr)
    324  1.21  riastrad #else
    325  1.21  riastrad #define __ttm_kmap_atomic_prot(__page, __prot) vmap(&__page, 1, 0,  __prot)
    326  1.27  riastrad #define __ttm_kunmap_atomic(__addr) vunmap(__addr, 1)
    327  1.21  riastrad #endif
    328  1.21  riastrad 
    329  1.21  riastrad 
    330  1.21  riastrad /**
    331  1.21  riastrad  * ttm_kmap_atomic_prot - Efficient kernel map of a single page with
    332  1.21  riastrad  * specified page protection.
    333  1.21  riastrad  *
    334  1.21  riastrad  * @page: The page to map.
    335  1.21  riastrad  * @prot: The page protection.
    336  1.21  riastrad  *
    337  1.21  riastrad  * This function maps a TTM page using the kmap_atomic api if available,
    338  1.21  riastrad  * otherwise falls back to vmap. The user must make sure that the
    339  1.21  riastrad  * specified page does not have an aliased mapping with a different caching
    340  1.21  riastrad  * policy unless the architecture explicitly allows it. Also mapping and
    341  1.21  riastrad  * unmapping using this api must be correctly nested. Unmapping should
    342  1.21  riastrad  * occur in the reverse order of mapping.
    343  1.21  riastrad  */
    344  1.21  riastrad void *ttm_kmap_atomic_prot(struct page *page, pgprot_t prot)
    345  1.21  riastrad {
    346  1.21  riastrad 	if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL))
    347  1.21  riastrad 		return kmap_atomic(page);
    348  1.21  riastrad 	else
    349  1.21  riastrad 		return __ttm_kmap_atomic_prot(page, prot);
    350  1.21  riastrad }
    351  1.21  riastrad EXPORT_SYMBOL(ttm_kmap_atomic_prot);
    352  1.21  riastrad 
    353  1.21  riastrad /**
    354  1.21  riastrad  * ttm_kunmap_atomic_prot - Unmap a page that was mapped using
    355  1.21  riastrad  * ttm_kmap_atomic_prot.
    356  1.21  riastrad  *
    357  1.21  riastrad  * @addr: The virtual address from the map.
    358  1.21  riastrad  * @prot: The page protection.
    359  1.21  riastrad  */
    360  1.21  riastrad void ttm_kunmap_atomic_prot(void *addr, pgprot_t prot)
    361  1.21  riastrad {
    362  1.21  riastrad 	if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL))
    363  1.21  riastrad 		kunmap_atomic(addr);
    364  1.21  riastrad 	else
    365  1.21  riastrad 		__ttm_kunmap_atomic(addr);
    366  1.21  riastrad }
    367  1.21  riastrad EXPORT_SYMBOL(ttm_kunmap_atomic_prot);
    368  1.21  riastrad 
    369   1.1  riastrad static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
    370   1.1  riastrad 				unsigned long page,
    371   1.1  riastrad 				pgprot_t prot)
    372   1.1  riastrad {
    373   1.1  riastrad 	struct page *d = ttm->pages[page];
    374   1.1  riastrad 	void *dst;
    375   1.1  riastrad 
    376   1.1  riastrad 	if (!d)
    377   1.1  riastrad 		return -ENOMEM;
    378   1.1  riastrad 
    379   1.1  riastrad 	src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
    380  1.21  riastrad 	dst = ttm_kmap_atomic_prot(d, prot);
    381   1.1  riastrad 	if (!dst)
    382   1.1  riastrad 		return -ENOMEM;
    383   1.1  riastrad 
    384   1.1  riastrad 	memcpy_fromio(dst, src, PAGE_SIZE);
    385   1.1  riastrad 
    386  1.21  riastrad 	ttm_kunmap_atomic_prot(dst, prot);
    387   1.1  riastrad 
    388   1.1  riastrad 	return 0;
    389   1.1  riastrad }
    390   1.1  riastrad 
    391   1.1  riastrad static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
    392   1.1  riastrad 				unsigned long page,
    393   1.1  riastrad 				pgprot_t prot)
    394   1.1  riastrad {
    395   1.1  riastrad 	struct page *s = ttm->pages[page];
    396   1.1  riastrad 	void *src;
    397   1.1  riastrad 
    398   1.1  riastrad 	if (!s)
    399   1.1  riastrad 		return -ENOMEM;
    400   1.1  riastrad 
    401   1.1  riastrad 	dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
    402  1.21  riastrad 	src = ttm_kmap_atomic_prot(s, prot);
    403   1.1  riastrad 	if (!src)
    404   1.1  riastrad 		return -ENOMEM;
    405   1.1  riastrad 
    406   1.1  riastrad 	memcpy_toio(dst, src, PAGE_SIZE);
    407   1.1  riastrad 
    408  1.21  riastrad 	ttm_kunmap_atomic_prot(src, prot);
    409   1.1  riastrad 
    410   1.1  riastrad 	return 0;
    411   1.1  riastrad }
    412   1.1  riastrad 
    413   1.1  riastrad int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
    414  1.21  riastrad 		       struct ttm_operation_ctx *ctx,
    415   1.1  riastrad 		       struct ttm_mem_reg *new_mem)
    416   1.1  riastrad {
    417   1.1  riastrad 	struct ttm_bo_device *bdev = bo->bdev;
    418   1.1  riastrad 	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
    419   1.1  riastrad 	struct ttm_tt *ttm = bo->ttm;
    420   1.1  riastrad 	struct ttm_mem_reg *old_mem = &bo->mem;
    421   1.1  riastrad 	struct ttm_mem_reg old_copy = *old_mem;
    422   1.1  riastrad 	void *old_iomap;
    423   1.1  riastrad 	void *new_iomap;
    424   1.1  riastrad 	int ret;
    425   1.1  riastrad 	unsigned long i;
    426   1.1  riastrad 	unsigned long page;
    427   1.1  riastrad 	unsigned long add = 0;
    428   1.1  riastrad 	int dir;
    429   1.1  riastrad 
    430  1.21  riastrad 	ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
    431  1.21  riastrad 	if (ret)
    432  1.21  riastrad 		return ret;
    433  1.21  riastrad 
    434   1.1  riastrad 	ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
    435   1.1  riastrad 	if (ret)
    436   1.1  riastrad 		return ret;
    437   1.1  riastrad 	ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
    438   1.1  riastrad 	if (ret)
    439   1.1  riastrad 		goto out;
    440   1.1  riastrad 
    441   1.2  riastrad 	/*
    442   1.2  riastrad 	 * Single TTM move. NOP.
    443   1.2  riastrad 	 */
    444   1.1  riastrad 	if (old_iomap == NULL && new_iomap == NULL)
    445   1.1  riastrad 		goto out2;
    446   1.2  riastrad 
    447   1.2  riastrad 	/*
    448   1.2  riastrad 	 * Don't move nonexistent data. Clear destination instead.
    449   1.2  riastrad 	 */
    450   1.2  riastrad 	if (old_iomap == NULL &&
    451   1.2  riastrad 	    (ttm == NULL || (ttm->state == tt_unpopulated &&
    452   1.2  riastrad 			     !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
    453   1.2  riastrad 		memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
    454   1.1  riastrad 		goto out2;
    455   1.2  riastrad 	}
    456   1.1  riastrad 
    457   1.2  riastrad 	/*
    458   1.2  riastrad 	 * TTM might be null for moves within the same region.
    459   1.2  riastrad 	 */
    460  1.21  riastrad 	if (ttm) {
    461  1.21  riastrad 		ret = ttm_tt_populate(ttm, ctx);
    462   1.2  riastrad 		if (ret)
    463   1.1  riastrad 			goto out1;
    464   1.1  riastrad 	}
    465   1.1  riastrad 
    466   1.1  riastrad 	add = 0;
    467   1.1  riastrad 	dir = 1;
    468   1.1  riastrad 
    469   1.1  riastrad 	if ((old_mem->mem_type == new_mem->mem_type) &&
    470   1.1  riastrad 	    (new_mem->start < old_mem->start + old_mem->size)) {
    471   1.1  riastrad 		dir = -1;
    472   1.1  riastrad 		add = new_mem->num_pages - 1;
    473   1.1  riastrad 	}
    474   1.1  riastrad 
    475   1.1  riastrad 	for (i = 0; i < new_mem->num_pages; ++i) {
    476   1.1  riastrad 		page = i * dir + add;
    477   1.1  riastrad 		if (old_iomap == NULL) {
    478   1.1  riastrad 			pgprot_t prot = ttm_io_prot(old_mem->placement,
    479   1.1  riastrad 						    PAGE_KERNEL);
    480   1.1  riastrad 			ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
    481   1.1  riastrad 						   prot);
    482   1.1  riastrad 		} else if (new_iomap == NULL) {
    483   1.1  riastrad 			pgprot_t prot = ttm_io_prot(new_mem->placement,
    484   1.1  riastrad 						    PAGE_KERNEL);
    485   1.1  riastrad 			ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
    486   1.1  riastrad 						   prot);
    487  1.21  riastrad 		} else {
    488   1.1  riastrad 			ret = ttm_copy_io_page(new_iomap, old_iomap, page);
    489  1.21  riastrad 		}
    490   1.2  riastrad 		if (ret)
    491   1.1  riastrad 			goto out1;
    492   1.1  riastrad 	}
    493   1.1  riastrad 	mb();
    494   1.1  riastrad out2:
    495   1.1  riastrad 	old_copy = *old_mem;
    496   1.1  riastrad 	*old_mem = *new_mem;
    497   1.1  riastrad 	new_mem->mm_node = NULL;
    498   1.1  riastrad 
    499  1.21  riastrad 	if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
    500   1.1  riastrad 		ttm_tt_destroy(ttm);
    501   1.1  riastrad 		bo->ttm = NULL;
    502   1.1  riastrad 	}
    503   1.1  riastrad 
    504   1.1  riastrad out1:
    505   1.1  riastrad 	ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
    506   1.1  riastrad out:
    507   1.1  riastrad 	ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
    508   1.2  riastrad 
    509   1.2  riastrad 	/*
    510   1.2  riastrad 	 * On error, keep the mm node!
    511   1.2  riastrad 	 */
    512   1.2  riastrad 	if (!ret)
    513   1.2  riastrad 		ttm_bo_mem_put(bo, &old_copy);
    514   1.1  riastrad 	return ret;
    515   1.1  riastrad }
    516   1.1  riastrad EXPORT_SYMBOL(ttm_bo_move_memcpy);
    517   1.1  riastrad 
    518   1.1  riastrad static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
    519   1.1  riastrad {
    520  1.21  riastrad 	struct ttm_transfer_obj *fbo;
    521  1.21  riastrad 
    522  1.21  riastrad 	fbo = container_of(bo, struct ttm_transfer_obj, base);
    523  1.21  riastrad 	ttm_bo_put(fbo->bo);
    524  1.28  riastrad 	dma_resv_fini(&fbo->base.base._resv);
    525  1.28  riastrad 	if (ttm_bo_uses_embedded_gem_object(bo)) {
    526  1.28  riastrad 		/*
    527  1.28  riastrad 		 * Initialization is unconditional, but we don't go
    528  1.28  riastrad 		 * through drm_gem_object_release, and destruction in
    529  1.28  riastrad 		 * ttm_bo_release is conditional, so do this
    530  1.28  riastrad 		 * conditionally with the reverse sense.
    531  1.28  riastrad 		 *
    532  1.28  riastrad 		 * Yes, this is a kludge.
    533  1.28  riastrad 		 */
    534  1.28  riastrad 		drm_vma_node_destroy(&fbo->base.base.vma_node);
    535  1.28  riastrad 	}
    536  1.21  riastrad 	kfree(fbo);
    537   1.1  riastrad }
    538   1.1  riastrad 
    539   1.1  riastrad /**
    540   1.1  riastrad  * ttm_buffer_object_transfer
    541   1.1  riastrad  *
    542   1.1  riastrad  * @bo: A pointer to a struct ttm_buffer_object.
    543   1.1  riastrad  * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
    544   1.1  riastrad  * holding the data of @bo with the old placement.
    545   1.1  riastrad  *
    546   1.1  riastrad  * This is a utility function that may be called after an accelerated move
    547   1.1  riastrad  * has been scheduled. A new buffer object is created as a placeholder for
    548   1.1  riastrad  * the old data while it's being copied. When that buffer object is idle,
    549   1.1  riastrad  * it can be destroyed, releasing the space of the old placement.
    550   1.1  riastrad  * Returns:
    551   1.1  riastrad  * !0: Failure.
    552   1.1  riastrad  */
    553   1.1  riastrad 
    554   1.1  riastrad static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
    555   1.1  riastrad 				      struct ttm_buffer_object **new_obj)
    556   1.1  riastrad {
    557  1.21  riastrad 	struct ttm_transfer_obj *fbo;
    558   1.2  riastrad 	int ret;
    559   1.1  riastrad 
    560   1.2  riastrad 	fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
    561   1.1  riastrad 	if (!fbo)
    562   1.1  riastrad 		return -ENOMEM;
    563   1.1  riastrad 
    564  1.21  riastrad 	fbo->base = *bo;
    565  1.21  riastrad 	fbo->base.mem.placement |= TTM_PL_FLAG_NO_EVICT;
    566  1.21  riastrad 
    567  1.21  riastrad 	ttm_bo_get(bo);
    568  1.21  riastrad 	fbo->bo = bo;
    569   1.1  riastrad 
    570   1.1  riastrad 	/**
    571   1.1  riastrad 	 * Fix up members that we shouldn't copy directly:
    572   1.1  riastrad 	 * TODO: Explicit member copy would probably be better here.
    573   1.1  riastrad 	 */
    574   1.1  riastrad 
    575  1.21  riastrad 	atomic_inc(&ttm_bo_glob.bo_count);
    576  1.21  riastrad 	INIT_LIST_HEAD(&fbo->base.ddestroy);
    577  1.21  riastrad 	INIT_LIST_HEAD(&fbo->base.lru);
    578  1.21  riastrad 	INIT_LIST_HEAD(&fbo->base.swap);
    579  1.21  riastrad 	INIT_LIST_HEAD(&fbo->base.io_reserve_lru);
    580  1.21  riastrad 	fbo->base.moving = NULL;
    581   1.2  riastrad #ifdef __NetBSD__
    582  1.28  riastrad 	drm_vma_node_init(&fbo->base.base.vma_node);
    583  1.22  riastrad 	uvm_obj_init(&fbo->base.uvmobj, bo->bdev->driver->ttm_uvm_ops, true, 1);
    584  1.20        ad 	rw_obj_hold(bo->uvmobj.vmobjlock);
    585  1.22  riastrad 	uvm_obj_setlock(&fbo->base.uvmobj, bo->uvmobj.vmobjlock);
    586   1.2  riastrad #else
    587  1.21  riastrad 	drm_vma_node_reset(&fbo->base.base.vma_node);
    588   1.2  riastrad #endif
    589   1.1  riastrad 
    590  1.21  riastrad 	kref_init(&fbo->base.list_kref);
    591  1.21  riastrad 	kref_init(&fbo->base.kref);
    592  1.21  riastrad 	fbo->base.destroy = &ttm_transfered_destroy;
    593  1.21  riastrad 	fbo->base.acc_size = 0;
    594  1.21  riastrad 	if (bo->base.resv == &bo->base._resv)
    595  1.21  riastrad 		fbo->base.base.resv = &fbo->base.base._resv;
    596  1.21  riastrad 
    597  1.21  riastrad 	dma_resv_init(&fbo->base.base._resv);
    598  1.21  riastrad 	ret = dma_resv_trylock(&fbo->base.base._resv);
    599   1.2  riastrad 	WARN_ON(!ret);
    600   1.1  riastrad 
    601  1.21  riastrad 	*new_obj = &fbo->base;
    602   1.1  riastrad 	return 0;
    603   1.1  riastrad }
    604   1.1  riastrad 
    605   1.1  riastrad pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
    606   1.1  riastrad {
    607   1.7  riastrad 	/* Cached mappings need no adjustment */
    608   1.7  riastrad 	if (caching_flags & TTM_PL_FLAG_CACHED)
    609   1.7  riastrad 		return tmp;
    610   1.7  riastrad 
    611   1.2  riastrad #ifdef __NetBSD__
    612  1.15  jmcneill 	tmp &= ~PMAP_CACHE_MASK;
    613  1.15  jmcneill 	if (caching_flags & TTM_PL_FLAG_WC)
    614   1.2  riastrad 		return (tmp | PMAP_WRITE_COMBINE);
    615  1.15  jmcneill 	else
    616   1.2  riastrad 		return (tmp | PMAP_NOCACHE);
    617   1.2  riastrad #else
    618   1.1  riastrad #if defined(__i386__) || defined(__x86_64__)
    619   1.1  riastrad 	if (caching_flags & TTM_PL_FLAG_WC)
    620   1.1  riastrad 		tmp = pgprot_writecombine(tmp);
    621   1.1  riastrad 	else if (boot_cpu_data.x86 > 3)
    622   1.1  riastrad 		tmp = pgprot_noncached(tmp);
    623   1.1  riastrad #endif
    624   1.7  riastrad #if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
    625  1.21  riastrad     defined(__powerpc__) || defined(__mips__)
    626   1.1  riastrad 	if (caching_flags & TTM_PL_FLAG_WC)
    627   1.1  riastrad 		tmp = pgprot_writecombine(tmp);
    628   1.1  riastrad 	else
    629   1.1  riastrad 		tmp = pgprot_noncached(tmp);
    630   1.1  riastrad #endif
    631  1.21  riastrad #if defined(__sparc__)
    632   1.7  riastrad 	tmp = pgprot_noncached(tmp);
    633   1.1  riastrad #endif
    634   1.1  riastrad 	return tmp;
    635   1.2  riastrad #endif
    636   1.1  riastrad }
    637   1.1  riastrad EXPORT_SYMBOL(ttm_io_prot);
    638   1.1  riastrad 
    639   1.1  riastrad static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
    640   1.1  riastrad 			  unsigned long offset,
    641   1.1  riastrad 			  unsigned long size,
    642   1.1  riastrad 			  struct ttm_bo_kmap_obj *map)
    643   1.1  riastrad {
    644   1.1  riastrad 	struct ttm_mem_reg *mem = &bo->mem;
    645   1.1  riastrad 
    646   1.1  riastrad 	if (bo->mem.bus.addr) {
    647   1.1  riastrad 		map->bo_kmap_type = ttm_bo_map_premapped;
    648   1.1  riastrad 		map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
    649   1.1  riastrad 	} else {
    650   1.1  riastrad 		map->bo_kmap_type = ttm_bo_map_iomap;
    651   1.2  riastrad #ifdef __NetBSD__
    652   1.2  riastrad 	    {
    653   1.2  riastrad 		bus_addr_t addr;
    654   1.2  riastrad 		int flags = BUS_SPACE_MAP_LINEAR;
    655   1.2  riastrad 		int ret;
    656   1.2  riastrad 
    657   1.2  riastrad 		addr = (bo->mem.bus.base + bo->mem.bus.offset + offset);
    658   1.2  riastrad 		if (ISSET(mem->placement, TTM_PL_FLAG_WC))
    659   1.2  riastrad 			flags |= BUS_SPACE_MAP_PREFETCHABLE;
    660   1.2  riastrad 		/* XXX errno NetBSD->Linux */
    661   1.2  riastrad 		ret = -bus_space_map(bo->bdev->memt, addr, size, flags,
    662   1.2  riastrad 		    &map->u.io.memh);
    663   1.2  riastrad 		if (ret)
    664   1.2  riastrad 			return ret;
    665   1.2  riastrad 		map->u.io.size = size;
    666   1.2  riastrad 		map->virtual = bus_space_vaddr(bo->bdev->memt, map->u.io.memh);
    667   1.2  riastrad 	    }
    668   1.2  riastrad #else
    669   1.1  riastrad 		if (mem->placement & TTM_PL_FLAG_WC)
    670   1.1  riastrad 			map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
    671   1.1  riastrad 						  size);
    672   1.1  riastrad 		else
    673  1.21  riastrad 			map->virtual = ioremap(bo->mem.bus.base + bo->mem.bus.offset + offset,
    674   1.1  riastrad 						       size);
    675   1.2  riastrad #endif
    676   1.1  riastrad 	}
    677   1.1  riastrad 	return (!map->virtual) ? -ENOMEM : 0;
    678   1.1  riastrad }
    679   1.1  riastrad 
    680   1.1  riastrad static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
    681   1.1  riastrad 			   unsigned long start_page,
    682   1.1  riastrad 			   unsigned long num_pages,
    683   1.1  riastrad 			   struct ttm_bo_kmap_obj *map)
    684   1.1  riastrad {
    685  1.21  riastrad 	struct ttm_mem_reg *mem = &bo->mem;
    686  1.21  riastrad 	struct ttm_operation_ctx ctx = {
    687  1.21  riastrad 		.interruptible = false,
    688  1.21  riastrad 		.no_wait_gpu = false
    689  1.21  riastrad 	};
    690   1.1  riastrad 	struct ttm_tt *ttm = bo->ttm;
    691  1.21  riastrad 	pgprot_t prot;
    692   1.1  riastrad 	int ret;
    693   1.1  riastrad 
    694   1.1  riastrad 	BUG_ON(!ttm);
    695   1.1  riastrad 
    696  1.21  riastrad 	ret = ttm_tt_populate(ttm, &ctx);
    697  1.21  riastrad 	if (ret)
    698  1.21  riastrad 		return ret;
    699   1.1  riastrad 
    700   1.1  riastrad 	if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
    701   1.1  riastrad 		/*
    702   1.1  riastrad 		 * We're mapping a single page, and the desired
    703   1.1  riastrad 		 * page protection is consistent with the bo.
    704   1.1  riastrad 		 */
    705   1.1  riastrad 
    706   1.1  riastrad 		map->bo_kmap_type = ttm_bo_map_kmap;
    707  1.11  riastrad #ifdef __NetBSD__
    708  1.11  riastrad 		map->u.kmapped.page = ttm->pages[start_page];
    709  1.11  riastrad 		map->virtual = kmap(map->u.kmapped.page);
    710  1.11  riastrad #else
    711   1.1  riastrad 		map->page = ttm->pages[start_page];
    712   1.1  riastrad 		map->virtual = kmap(map->page);
    713  1.11  riastrad #endif
    714   1.1  riastrad 	} else {
    715   1.1  riastrad 		/*
    716   1.1  riastrad 		 * We need to use vmap to get the desired page protection
    717   1.1  riastrad 		 * or to make the buffer object look contiguous.
    718   1.1  riastrad 		 */
    719   1.7  riastrad 		prot = ttm_io_prot(mem->placement, PAGE_KERNEL);
    720   1.1  riastrad 		map->bo_kmap_type = ttm_bo_map_vmap;
    721   1.1  riastrad 		map->virtual = vmap(ttm->pages + start_page, num_pages,
    722   1.1  riastrad 				    0, prot);
    723  1.11  riastrad #ifdef __NetBSD__
    724  1.11  riastrad 		map->u.vmapped.vsize = (vsize_t)num_pages << PAGE_SHIFT;
    725  1.11  riastrad #endif
    726   1.1  riastrad 	}
    727   1.1  riastrad 	return (!map->virtual) ? -ENOMEM : 0;
    728   1.1  riastrad }
    729   1.1  riastrad 
    730   1.1  riastrad int ttm_bo_kmap(struct ttm_buffer_object *bo,
    731   1.1  riastrad 		unsigned long start_page, unsigned long num_pages,
    732   1.1  riastrad 		struct ttm_bo_kmap_obj *map)
    733   1.1  riastrad {
    734   1.1  riastrad 	struct ttm_mem_type_manager *man =
    735   1.1  riastrad 		&bo->bdev->man[bo->mem.mem_type];
    736   1.1  riastrad 	unsigned long offset, size;
    737   1.1  riastrad 	int ret;
    738   1.1  riastrad 
    739   1.1  riastrad 	map->virtual = NULL;
    740   1.1  riastrad 	map->bo = bo;
    741   1.1  riastrad 	if (num_pages > bo->num_pages)
    742   1.1  riastrad 		return -EINVAL;
    743   1.1  riastrad 	if (start_page > bo->num_pages)
    744   1.1  riastrad 		return -EINVAL;
    745  1.21  riastrad 
    746   1.1  riastrad 	(void) ttm_mem_io_lock(man, false);
    747   1.1  riastrad 	ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
    748   1.1  riastrad 	ttm_mem_io_unlock(man);
    749   1.1  riastrad 	if (ret)
    750   1.1  riastrad 		return ret;
    751   1.1  riastrad 	if (!bo->mem.bus.is_iomem) {
    752   1.1  riastrad 		return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
    753   1.1  riastrad 	} else {
    754   1.1  riastrad 		offset = start_page << PAGE_SHIFT;
    755   1.1  riastrad 		size = num_pages << PAGE_SHIFT;
    756   1.1  riastrad 		return ttm_bo_ioremap(bo, offset, size, map);
    757   1.1  riastrad 	}
    758   1.1  riastrad }
    759   1.1  riastrad EXPORT_SYMBOL(ttm_bo_kmap);
    760   1.1  riastrad 
    761   1.1  riastrad void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
    762   1.1  riastrad {
    763   1.1  riastrad 	struct ttm_buffer_object *bo = map->bo;
    764   1.1  riastrad 	struct ttm_mem_type_manager *man =
    765   1.1  riastrad 		&bo->bdev->man[bo->mem.mem_type];
    766   1.1  riastrad 
    767   1.1  riastrad 	if (!map->virtual)
    768   1.1  riastrad 		return;
    769   1.1  riastrad 	switch (map->bo_kmap_type) {
    770   1.1  riastrad 	case ttm_bo_map_iomap:
    771   1.2  riastrad #ifdef __NetBSD__
    772   1.2  riastrad 		bus_space_unmap(bo->bdev->memt, map->u.io.memh,
    773   1.2  riastrad 		    map->u.io.size);
    774   1.2  riastrad #else
    775   1.1  riastrad 		iounmap(map->virtual);
    776   1.2  riastrad #endif
    777   1.1  riastrad 		break;
    778   1.1  riastrad 	case ttm_bo_map_vmap:
    779   1.2  riastrad #ifdef __NetBSD__
    780  1.11  riastrad 		vunmap(map->virtual, map->u.vmapped.vsize >> PAGE_SHIFT);
    781   1.2  riastrad #else
    782   1.1  riastrad 		vunmap(map->virtual);
    783   1.2  riastrad #endif
    784   1.1  riastrad 		break;
    785   1.1  riastrad 	case ttm_bo_map_kmap:
    786   1.2  riastrad #ifdef __NetBSD__
    787  1.11  riastrad 		kunmap(map->u.kmapped.page);
    788   1.2  riastrad #else
    789   1.1  riastrad 		kunmap(map->page);
    790   1.2  riastrad #endif
    791   1.1  riastrad 		break;
    792   1.1  riastrad 	case ttm_bo_map_premapped:
    793   1.1  riastrad 		break;
    794   1.1  riastrad 	default:
    795   1.1  riastrad 		BUG();
    796   1.1  riastrad 	}
    797   1.1  riastrad 	(void) ttm_mem_io_lock(man, false);
    798   1.1  riastrad 	ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
    799   1.1  riastrad 	ttm_mem_io_unlock(man);
    800   1.1  riastrad 	map->virtual = NULL;
    801   1.2  riastrad #ifndef __NetBSD__
    802   1.1  riastrad 	map->page = NULL;
    803   1.2  riastrad #endif
    804   1.1  riastrad }
    805   1.1  riastrad EXPORT_SYMBOL(ttm_bo_kunmap);
    806   1.1  riastrad 
    807   1.1  riastrad int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
    808  1.21  riastrad 			      struct dma_fence *fence,
    809   1.1  riastrad 			      bool evict,
    810   1.1  riastrad 			      struct ttm_mem_reg *new_mem)
    811   1.1  riastrad {
    812   1.1  riastrad 	struct ttm_bo_device *bdev = bo->bdev;
    813   1.1  riastrad 	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
    814   1.1  riastrad 	struct ttm_mem_reg *old_mem = &bo->mem;
    815   1.1  riastrad 	int ret;
    816   1.1  riastrad 	struct ttm_buffer_object *ghost_obj;
    817   1.1  riastrad 
    818  1.21  riastrad 	dma_resv_add_excl_fence(bo->base.resv, fence);
    819   1.1  riastrad 	if (evict) {
    820  1.21  riastrad 		ret = ttm_bo_wait(bo, false, false);
    821   1.1  riastrad 		if (ret)
    822   1.1  riastrad 			return ret;
    823   1.1  riastrad 
    824  1.21  riastrad 		if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
    825   1.1  riastrad 			ttm_tt_destroy(bo->ttm);
    826   1.1  riastrad 			bo->ttm = NULL;
    827   1.1  riastrad 		}
    828   1.1  riastrad 		ttm_bo_free_old_node(bo);
    829   1.1  riastrad 	} else {
    830   1.1  riastrad 		/**
    831   1.1  riastrad 		 * This should help pipeline ordinary buffer moves.
    832   1.1  riastrad 		 *
    833   1.1  riastrad 		 * Hang old buffer memory on a new buffer object,
    834   1.1  riastrad 		 * and leave it to be released when the GPU
    835   1.1  riastrad 		 * operation has completed.
    836   1.1  riastrad 		 */
    837   1.1  riastrad 
    838  1.21  riastrad 		dma_fence_put(bo->moving);
    839  1.21  riastrad 		bo->moving = dma_fence_get(fence);
    840   1.1  riastrad 
    841   1.2  riastrad 		ret = ttm_buffer_object_transfer(bo, &ghost_obj);
    842   1.1  riastrad 		if (ret)
    843   1.1  riastrad 			return ret;
    844   1.1  riastrad 
    845  1.21  riastrad 		dma_resv_add_excl_fence(&ghost_obj->base._resv, fence);
    846   1.7  riastrad 
    847   1.1  riastrad 		/**
    848   1.1  riastrad 		 * If we're not moving to fixed memory, the TTM object
    849   1.1  riastrad 		 * needs to stay alive. Otherwhise hang it on the ghost
    850   1.1  riastrad 		 * bo to be unbound and destroyed.
    851   1.1  riastrad 		 */
    852   1.1  riastrad 
    853   1.1  riastrad 		if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
    854   1.1  riastrad 			ghost_obj->ttm = NULL;
    855   1.1  riastrad 		else
    856   1.1  riastrad 			bo->ttm = NULL;
    857   1.1  riastrad 
    858  1.21  riastrad 		dma_resv_unlock(&ghost_obj->base._resv);
    859  1.21  riastrad 		ttm_bo_put(ghost_obj);
    860   1.1  riastrad 	}
    861   1.1  riastrad 
    862   1.1  riastrad 	*old_mem = *new_mem;
    863   1.1  riastrad 	new_mem->mm_node = NULL;
    864   1.1  riastrad 
    865   1.1  riastrad 	return 0;
    866   1.1  riastrad }
    867   1.1  riastrad EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
    868  1.21  riastrad 
    869  1.21  riastrad int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
    870  1.21  riastrad 			 struct dma_fence *fence, bool evict,
    871  1.21  riastrad 			 struct ttm_mem_reg *new_mem)
    872  1.21  riastrad {
    873  1.21  riastrad 	struct ttm_bo_device *bdev = bo->bdev;
    874  1.21  riastrad 	struct ttm_mem_reg *old_mem = &bo->mem;
    875  1.21  riastrad 
    876  1.21  riastrad 	struct ttm_mem_type_manager *from = &bdev->man[old_mem->mem_type];
    877  1.21  riastrad 	struct ttm_mem_type_manager *to = &bdev->man[new_mem->mem_type];
    878  1.21  riastrad 
    879  1.21  riastrad 	int ret;
    880  1.21  riastrad 
    881  1.21  riastrad 	dma_resv_add_excl_fence(bo->base.resv, fence);
    882  1.21  riastrad 
    883  1.21  riastrad 	if (!evict) {
    884  1.21  riastrad 		struct ttm_buffer_object *ghost_obj;
    885  1.21  riastrad 
    886  1.21  riastrad 		/**
    887  1.21  riastrad 		 * This should help pipeline ordinary buffer moves.
    888  1.21  riastrad 		 *
    889  1.21  riastrad 		 * Hang old buffer memory on a new buffer object,
    890  1.21  riastrad 		 * and leave it to be released when the GPU
    891  1.21  riastrad 		 * operation has completed.
    892  1.21  riastrad 		 */
    893  1.21  riastrad 
    894  1.21  riastrad 		dma_fence_put(bo->moving);
    895  1.21  riastrad 		bo->moving = dma_fence_get(fence);
    896  1.21  riastrad 
    897  1.21  riastrad 		ret = ttm_buffer_object_transfer(bo, &ghost_obj);
    898  1.21  riastrad 		if (ret)
    899  1.21  riastrad 			return ret;
    900  1.21  riastrad 
    901  1.21  riastrad 		dma_resv_add_excl_fence(&ghost_obj->base._resv, fence);
    902  1.21  riastrad 
    903  1.21  riastrad 		/**
    904  1.21  riastrad 		 * If we're not moving to fixed memory, the TTM object
    905  1.21  riastrad 		 * needs to stay alive. Otherwhise hang it on the ghost
    906  1.21  riastrad 		 * bo to be unbound and destroyed.
    907  1.21  riastrad 		 */
    908  1.21  riastrad 
    909  1.21  riastrad 		if (!(to->flags & TTM_MEMTYPE_FLAG_FIXED))
    910  1.21  riastrad 			ghost_obj->ttm = NULL;
    911  1.21  riastrad 		else
    912  1.21  riastrad 			bo->ttm = NULL;
    913  1.21  riastrad 
    914  1.21  riastrad 		dma_resv_unlock(&ghost_obj->base._resv);
    915  1.21  riastrad 		ttm_bo_put(ghost_obj);
    916  1.21  riastrad 
    917  1.21  riastrad 	} else if (from->flags & TTM_MEMTYPE_FLAG_FIXED) {
    918  1.21  riastrad 
    919  1.21  riastrad 		/**
    920  1.21  riastrad 		 * BO doesn't have a TTM we need to bind/unbind. Just remember
    921  1.21  riastrad 		 * this eviction and free up the allocation
    922  1.21  riastrad 		 */
    923  1.21  riastrad 
    924  1.21  riastrad 		spin_lock(&from->move_lock);
    925  1.21  riastrad 		if (!from->move || dma_fence_is_later(fence, from->move)) {
    926  1.21  riastrad 			dma_fence_put(from->move);
    927  1.21  riastrad 			from->move = dma_fence_get(fence);
    928  1.21  riastrad 		}
    929  1.21  riastrad 		spin_unlock(&from->move_lock);
    930  1.21  riastrad 
    931  1.21  riastrad 		ttm_bo_free_old_node(bo);
    932  1.21  riastrad 
    933  1.21  riastrad 		dma_fence_put(bo->moving);
    934  1.21  riastrad 		bo->moving = dma_fence_get(fence);
    935  1.21  riastrad 
    936  1.21  riastrad 	} else {
    937  1.21  riastrad 		/**
    938  1.21  riastrad 		 * Last resort, wait for the move to be completed.
    939  1.21  riastrad 		 *
    940  1.21  riastrad 		 * Should never happen in pratice.
    941  1.21  riastrad 		 */
    942  1.21  riastrad 
    943  1.21  riastrad 		ret = ttm_bo_wait(bo, false, false);
    944  1.21  riastrad 		if (ret)
    945  1.21  riastrad 			return ret;
    946  1.21  riastrad 
    947  1.21  riastrad 		if (to->flags & TTM_MEMTYPE_FLAG_FIXED) {
    948  1.21  riastrad 			ttm_tt_destroy(bo->ttm);
    949  1.21  riastrad 			bo->ttm = NULL;
    950  1.21  riastrad 		}
    951  1.21  riastrad 		ttm_bo_free_old_node(bo);
    952  1.21  riastrad 	}
    953  1.21  riastrad 
    954  1.21  riastrad 	*old_mem = *new_mem;
    955  1.21  riastrad 	new_mem->mm_node = NULL;
    956  1.21  riastrad 
    957  1.21  riastrad 	return 0;
    958  1.21  riastrad }
    959  1.21  riastrad EXPORT_SYMBOL(ttm_bo_pipeline_move);
    960  1.21  riastrad 
    961  1.21  riastrad int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
    962  1.21  riastrad {
    963  1.21  riastrad 	struct ttm_buffer_object *ghost;
    964  1.21  riastrad 	int ret;
    965  1.21  riastrad 
    966  1.21  riastrad 	ret = ttm_buffer_object_transfer(bo, &ghost);
    967  1.21  riastrad 	if (ret)
    968  1.21  riastrad 		return ret;
    969  1.21  riastrad 
    970  1.21  riastrad 	ret = dma_resv_copy_fences(&ghost->base._resv, bo->base.resv);
    971  1.21  riastrad 	/* Last resort, wait for the BO to be idle when we are OOM */
    972  1.21  riastrad 	if (ret)
    973  1.21  riastrad 		ttm_bo_wait(bo, false, false);
    974  1.21  riastrad 
    975  1.21  riastrad 	memset(&bo->mem, 0, sizeof(bo->mem));
    976  1.21  riastrad 	bo->mem.mem_type = TTM_PL_SYSTEM;
    977  1.21  riastrad 	bo->ttm = NULL;
    978  1.21  riastrad 
    979  1.21  riastrad 	dma_resv_unlock(&ghost->base._resv);
    980  1.21  riastrad 	ttm_bo_put(ghost);
    981  1.21  riastrad 
    982  1.21  riastrad 	return 0;
    983  1.21  riastrad }
    984