Home | History | Annotate | Line # | Download | only in amdgpu
amdgpu_ih.c revision 1.5
      1 /*	$NetBSD: amdgpu_ih.c,v 1.5 2021/12/18 23:44:58 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright 2014 Advanced Micro Devices, Inc.
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a
      7  * copy of this software and associated documentation files (the "Software"),
      8  * to deal in the Software without restriction, including without limitation
      9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     10  * and/or sell copies of the Software, and to permit persons to whom the
     11  * Software is furnished to do so, subject to the following conditions:
     12  *
     13  * The above copyright notice and this permission notice shall be included in
     14  * all copies or substantial portions of the Software.
     15  *
     16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     22  * OTHER DEALINGS IN THE SOFTWARE.
     23  *
     24  */
     25 
     26 #include <sys/cdefs.h>
     27 __KERNEL_RCSID(0, "$NetBSD: amdgpu_ih.c,v 1.5 2021/12/18 23:44:58 riastradh Exp $");
     28 
     29 #include <linux/dma-mapping.h>
     30 
     31 #include "amdgpu.h"
     32 #include "amdgpu_ih.h"
     33 
     34 /**
     35  * amdgpu_ih_ring_init - initialize the IH state
     36  *
     37  * @adev: amdgpu_device pointer
     38  * @ih: ih ring to initialize
     39  * @ring_size: ring size to allocate
     40  * @use_bus_addr: true when we can use dma_alloc_coherent
     41  *
     42  * Initializes the IH state and allocates a buffer
     43  * for the IH ring buffer.
     44  * Returns 0 for success, errors for failure.
     45  */
     46 int amdgpu_ih_ring_init(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih,
     47 			unsigned ring_size, bool use_bus_addr)
     48 {
     49 	u32 rb_bufsz;
     50 	int r;
     51 
     52 	/* Align ring size */
     53 	rb_bufsz = order_base_2(ring_size / 4);
     54 	ring_size = (1 << rb_bufsz) * 4;
     55 	ih->ring_size = ring_size;
     56 	ih->ptr_mask = ih->ring_size - 1;
     57 	ih->rptr = 0;
     58 	ih->use_bus_addr = use_bus_addr;
     59 
     60 	if (use_bus_addr) {
     61 		dma_addr_t dma_addr;
     62 
     63 		if (ih->ring)
     64 			return 0;
     65 
     66 		/* add 8 bytes for the rptr/wptr shadows and
     67 		 * add them to the end of the ring allocation.
     68 		 */
     69 #ifdef __NetBSD__ /* XXX post-merge audit */
     70 		const bus_size_t size = adev->irq.ih.ring_size + 8;
     71 		int rseg __diagused;
     72 		void *kva;
     73 		r = -bus_dmamem_alloc(adev->ddev->dmat, size,
     74 		    PAGE_SIZE, 0, &adev->irq.ih.ring_seg, 1, &rseg,
     75 		    BUS_DMA_WAITOK);
     76 		if (r) {
     77 fail0:			KASSERT(r);
     78 			return r;
     79 		}
     80 		KASSERT(rseg == 0);
     81 		r = -bus_dmamap_create(adev->ddev->dmat, size, 1,
     82 		    PAGE_SIZE, 0, BUS_DMA_WAITOK,
     83 		    &adev->irq.ih.ring_map);
     84 		if (r) {
     85 fail1:			bus_dmamem_free(adev->ddev->dmat,
     86 			    &adev->irq.ih.ring_seg, 1);
     87 			goto fail0;
     88 		}
     89 		r = -bus_dmamem_map(adev->ddev->dmat,
     90 		    &adev->irq.ih.ring_seg, 1, size, &kva,
     91 		    BUS_DMA_WAITOK);
     92 		if (r) {
     93 fail2:			bus_dmamap_destroy(adev->ddev->dmat,
     94 			    adev->irq.ih.ring_map);
     95 			adev->irq.ih.ring_map = NULL;
     96 			goto fail1;
     97 		}
     98 		r = -bus_dmamap_load(adev->ddev->dmat,
     99 		    adev->irq.ih.ring_map, kva, size, NULL,
    100 		    BUS_DMA_WAITOK);
    101 		if (r) {
    102 fail3: __unused		bus_dmamem_unmap(adev->ddev->dmat, kva, size);
    103 			goto fail2;
    104 		}
    105 		adev->irq.ih.ring = kva;
    106 		adev->irq.ih.rb_dma_addr =
    107 		    adev->irq.ih.ring_map->dm_segs[0].ds_addr;
    108 #else
    109 		ih->ring = dma_alloc_coherent(adev->dev, ih->ring_size + 8,
    110 					      &dma_addr, GFP_KERNEL);
    111 		if (ih->ring == NULL)
    112 			return -ENOMEM;
    113 #endif
    114 
    115 		ih->gpu_addr = dma_addr;
    116 		ih->wptr_addr = dma_addr + ih->ring_size;
    117 		ih->wptr_cpu = &ih->ring[ih->ring_size / 4];
    118 		ih->rptr_addr = dma_addr + ih->ring_size + 4;
    119 		ih->rptr_cpu = &ih->ring[(ih->ring_size / 4) + 1];
    120 	} else {
    121 		unsigned wptr_offs, rptr_offs;
    122 
    123 		r = amdgpu_device_wb_get(adev, &wptr_offs);
    124 		if (r)
    125 			return r;
    126 
    127 		r = amdgpu_device_wb_get(adev, &rptr_offs);
    128 		if (r) {
    129 			amdgpu_device_wb_free(adev, wptr_offs);
    130 			return r;
    131 		}
    132 
    133 		r = amdgpu_bo_create_kernel(adev, ih->ring_size, PAGE_SIZE,
    134 					    AMDGPU_GEM_DOMAIN_GTT,
    135 					    &ih->ring_obj, &ih->gpu_addr,
    136 					    (void **)&ih->ring);
    137 		if (r) {
    138 			amdgpu_device_wb_free(adev, rptr_offs);
    139 			amdgpu_device_wb_free(adev, wptr_offs);
    140 			return r;
    141 		}
    142 
    143 		ih->wptr_addr = adev->wb.gpu_addr + wptr_offs * 4;
    144 		ih->wptr_cpu = &adev->wb.wb[wptr_offs];
    145 		ih->rptr_addr = adev->wb.gpu_addr + rptr_offs * 4;
    146 		ih->rptr_cpu = &adev->wb.wb[rptr_offs];
    147 	}
    148 	return 0;
    149 }
    150 
    151 /**
    152  * amdgpu_ih_ring_fini - tear down the IH state
    153  *
    154  * @adev: amdgpu_device pointer
    155  * @ih: ih ring to tear down
    156  *
    157  * Tears down the IH state and frees buffer
    158  * used for the IH ring buffer.
    159  */
    160 void amdgpu_ih_ring_fini(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih)
    161 {
    162 	if (ih->use_bus_addr) {
    163 		if (!ih->ring)
    164 			return;
    165 
    166 		/* add 8 bytes for the rptr/wptr shadows and
    167 		 * add them to the end of the ring allocation.
    168 		 */
    169 #ifdef __NetBSD__ /* XXX post-merge audit */
    170 		const bus_size_t size = adev->irq.ih.ring_size + 8;
    171 		void *kva = __UNVOLATILE(adev->irq.ih.ring);
    172 		bus_dmamap_unload(adev->ddev->dmat,
    173 		    adev->irq.ih.ring_map);
    174 		bus_dmamem_unmap(adev->ddev->dmat, kva, size);
    175 		bus_dmamap_destroy(adev->ddev->dmat,
    176 		    adev->irq.ih.ring_map);
    177 		bus_dmamem_free(adev->ddev->dmat,
    178 		    &adev->irq.ih.ring_seg, 1);
    179 #else
    180 		dma_free_coherent(adev->dev, ih->ring_size + 8,
    181 				  (void *)ih->ring, ih->gpu_addr);
    182 #endif
    183 		ih->ring = NULL;
    184 	} else {
    185 		amdgpu_bo_free_kernel(&ih->ring_obj, &ih->gpu_addr,
    186 				      (void **)&ih->ring);
    187 		amdgpu_device_wb_free(adev, (ih->wptr_addr - ih->gpu_addr) / 4);
    188 		amdgpu_device_wb_free(adev, (ih->rptr_addr - ih->gpu_addr) / 4);
    189 	}
    190 }
    191 
    192 /**
    193  * amdgpu_ih_process - interrupt handler
    194  *
    195  * @adev: amdgpu_device pointer
    196  * @ih: ih ring to process
    197  *
    198  * Interrupt hander (VI), walk the IH ring.
    199  * Returns irq process return code.
    200  */
    201 int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih)
    202 {
    203 	unsigned int count = AMDGPU_IH_MAX_NUM_IVS;
    204 	u32 wptr;
    205 
    206 	if (!ih->enabled || adev->shutdown)
    207 		return IRQ_NONE;
    208 
    209 	wptr = amdgpu_ih_get_wptr(adev, ih);
    210 
    211 restart_ih:
    212 	/* is somebody else already processing irqs? */
    213 	if (atomic_xchg(&ih->lock, 1))
    214 		return IRQ_NONE;
    215 
    216 	DRM_DEBUG("%s: rptr %d, wptr %d\n", __func__, ih->rptr, wptr);
    217 
    218 	/* Order reading of wptr vs. reading of IH ring data */
    219 	rmb();
    220 
    221 	while (ih->rptr != wptr && --count) {
    222 		amdgpu_irq_dispatch(adev, ih);
    223 		ih->rptr &= ih->ptr_mask;
    224 	}
    225 
    226 	amdgpu_ih_set_rptr(adev, ih);
    227 	atomic_set(&ih->lock, 0);
    228 
    229 	/* make sure wptr hasn't changed while processing */
    230 	wptr = amdgpu_ih_get_wptr(adev, ih);
    231 	if (wptr != ih->rptr)
    232 		goto restart_ih;
    233 
    234 	return IRQ_HANDLED;
    235 }
    236 
    237