1 1.11 riastrad /* $NetBSD: via_dmablit.c,v 1.11 2021/12/19 12:30:23 riastradh Exp $ */ 2 1.6 riastrad 3 1.1 riastrad /* via_dmablit.c -- PCI DMA BitBlt support for the VIA Unichrome/Pro 4 1.1 riastrad * 5 1.1 riastrad * Copyright (C) 2005 Thomas Hellstrom, All Rights Reserved. 6 1.1 riastrad * 7 1.1 riastrad * Permission is hereby granted, free of charge, to any person obtaining a 8 1.1 riastrad * copy of this software and associated documentation files (the "Software"), 9 1.1 riastrad * to deal in the Software without restriction, including without limitation 10 1.1 riastrad * the rights to use, copy, modify, merge, publish, distribute, sub license, 11 1.1 riastrad * and/or sell copies of the Software, and to permit persons to whom the 12 1.1 riastrad * Software is furnished to do so, subject to the following conditions: 13 1.1 riastrad * 14 1.1 riastrad * The above copyright notice and this permission notice (including the 15 1.1 riastrad * next paragraph) shall be included in all copies or substantial portions 16 1.1 riastrad * of the Software. 17 1.1 riastrad * 18 1.1 riastrad * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 1.1 riastrad * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 1.1 riastrad * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 1.1 riastrad * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 1.1 riastrad * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 1.1 riastrad * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 1.1 riastrad * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 1.1 riastrad * 26 1.1 riastrad * Authors: 27 1.1 riastrad * Thomas Hellstrom. 28 1.1 riastrad * Partially based on code obtained from Digeo Inc. 29 1.1 riastrad */ 30 1.1 riastrad 31 1.1 riastrad 32 1.1 riastrad /* 33 1.1 riastrad * Unmaps the DMA mappings. 34 1.1 riastrad * FIXME: Is this a NoOp on x86? Also 35 1.1 riastrad * FIXME: What happens if this one is called and a pending blit has previously done 36 1.1 riastrad * the same DMA mappings? 37 1.1 riastrad */ 38 1.1 riastrad 39 1.6 riastrad #include <sys/cdefs.h> 40 1.11 riastrad __KERNEL_RCSID(0, "$NetBSD: via_dmablit.c,v 1.11 2021/12/19 12:30:23 riastradh Exp $"); 41 1.6 riastrad 42 1.9 riastrad #include <linux/pagemap.h> 43 1.9 riastrad #include <linux/pci.h> 44 1.9 riastrad #include <linux/slab.h> 45 1.9 riastrad #include <linux/vmalloc.h> 46 1.9 riastrad 47 1.9 riastrad #include <drm/drm_device.h> 48 1.1 riastrad #include <drm/via_drm.h> 49 1.9 riastrad 50 1.9 riastrad #include "via_dmablit.h" 51 1.1 riastrad #include "via_drv.h" 52 1.1 riastrad 53 1.1 riastrad #define VIA_PGDN(x) (((unsigned long)(x)) & PAGE_MASK) 54 1.1 riastrad #define VIA_PGOFF(x) (((unsigned long)(x)) & ~PAGE_MASK) 55 1.1 riastrad #define VIA_PFN(x) ((unsigned long)(x) >> PAGE_SHIFT) 56 1.1 riastrad 57 1.1 riastrad typedef struct _drm_via_descriptor { 58 1.1 riastrad uint32_t mem_addr; 59 1.1 riastrad uint32_t dev_addr; 60 1.1 riastrad uint32_t size; 61 1.1 riastrad uint32_t next; 62 1.1 riastrad } drm_via_descriptor_t; 63 1.1 riastrad 64 1.1 riastrad 65 1.1 riastrad /* 66 1.1 riastrad * Unmap a DMA mapping. 67 1.1 riastrad */ 68 1.1 riastrad 69 1.1 riastrad 70 1.1 riastrad 71 1.1 riastrad static void 72 1.2 riastrad via_unmap_blit_from_device(struct drm_device *dev, struct pci_dev *pdev, 73 1.2 riastrad drm_via_sg_info_t *vsg) 74 1.1 riastrad { 75 1.2 riastrad #ifdef __NetBSD__ 76 1.2 riastrad bus_dmamap_unload(dev->dmat, vsg->dmamap); 77 1.2 riastrad #else 78 1.1 riastrad int num_desc = vsg->num_desc; 79 1.1 riastrad unsigned cur_descriptor_page = num_desc / vsg->descriptors_per_page; 80 1.1 riastrad unsigned descriptor_this_page = num_desc % vsg->descriptors_per_page; 81 1.1 riastrad drm_via_descriptor_t *desc_ptr = vsg->desc_pages[cur_descriptor_page] + 82 1.1 riastrad descriptor_this_page; 83 1.1 riastrad dma_addr_t next = vsg->chain_start; 84 1.1 riastrad 85 1.1 riastrad while (num_desc--) { 86 1.1 riastrad if (descriptor_this_page-- == 0) { 87 1.1 riastrad cur_descriptor_page--; 88 1.1 riastrad descriptor_this_page = vsg->descriptors_per_page - 1; 89 1.1 riastrad desc_ptr = vsg->desc_pages[cur_descriptor_page] + 90 1.1 riastrad descriptor_this_page; 91 1.1 riastrad } 92 1.1 riastrad dma_unmap_single(&pdev->dev, next, sizeof(*desc_ptr), DMA_TO_DEVICE); 93 1.1 riastrad dma_unmap_page(&pdev->dev, desc_ptr->mem_addr, desc_ptr->size, vsg->direction); 94 1.1 riastrad next = (dma_addr_t) desc_ptr->next; 95 1.1 riastrad desc_ptr--; 96 1.1 riastrad } 97 1.2 riastrad #endif 98 1.1 riastrad } 99 1.1 riastrad 100 1.1 riastrad /* 101 1.1 riastrad * If mode = 0, count how many descriptors are needed. 102 1.1 riastrad * If mode = 1, Map the DMA pages for the device, put together and map also the descriptors. 103 1.1 riastrad * Descriptors are run in reverse order by the hardware because we are not allowed to update the 104 1.1 riastrad * 'next' field without syncing calls when the descriptor is already mapped. 105 1.1 riastrad */ 106 1.1 riastrad 107 1.1 riastrad static void 108 1.1 riastrad via_map_blit_for_device(struct pci_dev *pdev, 109 1.1 riastrad const drm_via_dmablit_t *xfer, 110 1.1 riastrad drm_via_sg_info_t *vsg, 111 1.1 riastrad int mode) 112 1.1 riastrad { 113 1.1 riastrad unsigned cur_descriptor_page = 0; 114 1.1 riastrad unsigned num_descriptors_this_page = 0; 115 1.1 riastrad unsigned char *mem_addr = xfer->mem_addr; 116 1.1 riastrad unsigned char *cur_mem; 117 1.2 riastrad #ifndef __NetBSD__ 118 1.1 riastrad unsigned char *first_addr = (unsigned char *)VIA_PGDN(mem_addr); 119 1.2 riastrad #endif 120 1.1 riastrad uint32_t fb_addr = xfer->fb_addr; 121 1.1 riastrad uint32_t cur_fb; 122 1.1 riastrad unsigned long line_len; 123 1.1 riastrad unsigned remaining_len; 124 1.1 riastrad int num_desc = 0; 125 1.1 riastrad int cur_line; 126 1.1 riastrad dma_addr_t next = 0 | VIA_DMA_DPR_EC; 127 1.1 riastrad drm_via_descriptor_t *desc_ptr = NULL; 128 1.1 riastrad 129 1.1 riastrad if (mode == 1) 130 1.1 riastrad desc_ptr = vsg->desc_pages[cur_descriptor_page]; 131 1.1 riastrad 132 1.1 riastrad for (cur_line = 0; cur_line < xfer->num_lines; ++cur_line) { 133 1.1 riastrad 134 1.1 riastrad line_len = xfer->line_length; 135 1.1 riastrad cur_fb = fb_addr; 136 1.1 riastrad cur_mem = mem_addr; 137 1.1 riastrad 138 1.1 riastrad while (line_len > 0) { 139 1.1 riastrad 140 1.1 riastrad remaining_len = min(PAGE_SIZE-VIA_PGOFF(cur_mem), line_len); 141 1.1 riastrad line_len -= remaining_len; 142 1.1 riastrad 143 1.1 riastrad if (mode == 1) { 144 1.2 riastrad #ifdef __NetBSD__ 145 1.5 riastrad const vaddr_t cur_va = (vaddr_t)cur_mem; 146 1.2 riastrad const bus_dma_segment_t *const seg = 147 1.5 riastrad &vsg->dmamap->dm_segs[atop(cur_va)]; 148 1.2 riastrad desc_ptr->mem_addr = 149 1.5 riastrad seg->ds_addr + trunc_page(cur_va); 150 1.2 riastrad #else 151 1.1 riastrad desc_ptr->mem_addr = 152 1.1 riastrad dma_map_page(&pdev->dev, 153 1.1 riastrad vsg->pages[VIA_PFN(cur_mem) - 154 1.1 riastrad VIA_PFN(first_addr)], 155 1.1 riastrad VIA_PGOFF(cur_mem), remaining_len, 156 1.1 riastrad vsg->direction); 157 1.2 riastrad #endif 158 1.1 riastrad desc_ptr->dev_addr = cur_fb; 159 1.1 riastrad 160 1.1 riastrad desc_ptr->size = remaining_len; 161 1.1 riastrad desc_ptr->next = (uint32_t) next; 162 1.2 riastrad #ifdef __NetBSD__ 163 1.2 riastrad next = vsg->desc_dmamap 164 1.2 riastrad ->dm_segs[cur_descriptor_page].ds_addr 165 1.2 riastrad + num_descriptors_this_page; 166 1.2 riastrad #else 167 1.1 riastrad next = dma_map_single(&pdev->dev, desc_ptr, sizeof(*desc_ptr), 168 1.1 riastrad DMA_TO_DEVICE); 169 1.2 riastrad #endif 170 1.1 riastrad desc_ptr++; 171 1.1 riastrad if (++num_descriptors_this_page >= vsg->descriptors_per_page) { 172 1.1 riastrad num_descriptors_this_page = 0; 173 1.1 riastrad desc_ptr = vsg->desc_pages[++cur_descriptor_page]; 174 1.1 riastrad } 175 1.1 riastrad } 176 1.1 riastrad 177 1.1 riastrad num_desc++; 178 1.1 riastrad cur_mem += remaining_len; 179 1.1 riastrad cur_fb += remaining_len; 180 1.1 riastrad } 181 1.1 riastrad 182 1.1 riastrad mem_addr += xfer->mem_stride; 183 1.1 riastrad fb_addr += xfer->fb_stride; 184 1.1 riastrad } 185 1.1 riastrad 186 1.1 riastrad if (mode == 1) { 187 1.1 riastrad vsg->chain_start = next; 188 1.1 riastrad vsg->state = dr_via_device_mapped; 189 1.1 riastrad } 190 1.1 riastrad vsg->num_desc = num_desc; 191 1.1 riastrad } 192 1.1 riastrad 193 1.1 riastrad /* 194 1.1 riastrad * Function that frees up all resources for a blit. It is usable even if the 195 1.1 riastrad * blit info has only been partially built as long as the status enum is consistent 196 1.1 riastrad * with the actual status of the used resources. 197 1.1 riastrad */ 198 1.1 riastrad 199 1.1 riastrad 200 1.1 riastrad static void 201 1.2 riastrad via_free_sg_info(struct drm_device *dev, struct pci_dev *pdev, 202 1.2 riastrad drm_via_sg_info_t *vsg) 203 1.1 riastrad { 204 1.1 riastrad int i; 205 1.1 riastrad 206 1.1 riastrad switch (vsg->state) { 207 1.1 riastrad case dr_via_device_mapped: 208 1.11 riastrad via_unmap_blit_from_device(dev, pdev, vsg); 209 1.9 riastrad /* fall through */ 210 1.1 riastrad case dr_via_desc_pages_alloc: 211 1.2 riastrad #ifdef __NetBSD__ 212 1.11 riastrad __USE(i); 213 1.2 riastrad bus_dmamap_unload(dev->dmat, vsg->desc_dmamap); 214 1.2 riastrad bus_dmamap_destroy(dev->dmat, vsg->desc_dmamap); 215 1.2 riastrad bus_dmamem_unmap(dev->dmat, vsg->desc_kva, 216 1.10 riastrad (bus_size_t)vsg->num_desc_pages << PAGE_SHIFT); 217 1.2 riastrad bus_dmamem_free(dev->dmat, vsg->desc_segs, vsg->num_desc_segs); 218 1.2 riastrad kfree(vsg->desc_segs); 219 1.2 riastrad #else 220 1.1 riastrad for (i = 0; i < vsg->num_desc_pages; ++i) { 221 1.1 riastrad if (vsg->desc_pages[i] != NULL) 222 1.1 riastrad free_page((unsigned long)vsg->desc_pages[i]); 223 1.1 riastrad } 224 1.2 riastrad #endif 225 1.1 riastrad kfree(vsg->desc_pages); 226 1.9 riastrad /* fall through */ 227 1.1 riastrad case dr_via_pages_locked: 228 1.11 riastrad #ifdef __NetBSD__ 229 1.11 riastrad /* XXX uvm_vsunlock? */ 230 1.11 riastrad bus_dmamap_unload(dev->dmat, vsg->dmamap); 231 1.11 riastrad #else 232 1.9 riastrad unpin_user_pages_dirty_lock(vsg->pages, vsg->num_pages, 233 1.9 riastrad (vsg->direction == DMA_FROM_DEVICE)); 234 1.11 riastrad #endif 235 1.9 riastrad /* fall through */ 236 1.1 riastrad case dr_via_pages_alloc: 237 1.2 riastrad #ifdef __NetBSD__ 238 1.2 riastrad bus_dmamap_destroy(dev->dmat, vsg->dmamap); 239 1.2 riastrad #else 240 1.1 riastrad vfree(vsg->pages); 241 1.2 riastrad #endif 242 1.9 riastrad /* fall through */ 243 1.1 riastrad default: 244 1.1 riastrad vsg->state = dr_via_sg_init; 245 1.1 riastrad } 246 1.1 riastrad vsg->free_on_sequence = 0; 247 1.1 riastrad } 248 1.1 riastrad 249 1.1 riastrad /* 250 1.1 riastrad * Fire a blit engine. 251 1.1 riastrad */ 252 1.1 riastrad 253 1.1 riastrad static void 254 1.1 riastrad via_fire_dmablit(struct drm_device *dev, drm_via_sg_info_t *vsg, int engine) 255 1.1 riastrad { 256 1.1 riastrad drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; 257 1.1 riastrad 258 1.9 riastrad via_write(dev_priv, VIA_PCI_DMA_MAR0 + engine*0x10, 0); 259 1.9 riastrad via_write(dev_priv, VIA_PCI_DMA_DAR0 + engine*0x10, 0); 260 1.9 riastrad via_write(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DD | VIA_DMA_CSR_TD | 261 1.1 riastrad VIA_DMA_CSR_DE); 262 1.9 riastrad via_write(dev_priv, VIA_PCI_DMA_MR0 + engine*0x04, VIA_DMA_MR_CM | VIA_DMA_MR_TDIE); 263 1.9 riastrad via_write(dev_priv, VIA_PCI_DMA_BCR0 + engine*0x10, 0); 264 1.9 riastrad via_write(dev_priv, VIA_PCI_DMA_DPR0 + engine*0x10, vsg->chain_start); 265 1.2 riastrad wmb(); 266 1.9 riastrad via_write(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DE | VIA_DMA_CSR_TS); 267 1.9 riastrad via_read(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04); 268 1.1 riastrad } 269 1.1 riastrad 270 1.1 riastrad /* 271 1.1 riastrad * Obtain a page pointer array and lock all pages into system memory. A segmentation violation will 272 1.1 riastrad * occur here if the calling user does not have access to the submitted address. 273 1.1 riastrad */ 274 1.1 riastrad 275 1.1 riastrad static int 276 1.2 riastrad via_lock_all_dma_pages(struct drm_device *dev, drm_via_sg_info_t *vsg, 277 1.2 riastrad drm_via_dmablit_t *xfer) 278 1.1 riastrad { 279 1.1 riastrad int ret; 280 1.2 riastrad #ifdef __NetBSD__ 281 1.2 riastrad const bus_size_t nbytes = roundup2(xfer->num_lines * xfer->mem_stride, 282 1.2 riastrad PAGE_SIZE); 283 1.2 riastrad const bus_size_t npages = nbytes >> PAGE_SHIFT; 284 1.2 riastrad struct iovec iov = { 285 1.2 riastrad .iov_base = xfer->mem_addr, 286 1.2 riastrad .iov_len = nbytes, 287 1.2 riastrad }; 288 1.2 riastrad struct uio uio = { 289 1.2 riastrad .uio_iov = &iov, 290 1.2 riastrad .uio_iovcnt = 1, 291 1.2 riastrad .uio_offset = 0, 292 1.2 riastrad .uio_resid = nbytes, 293 1.2 riastrad .uio_rw = xfer->to_fb ? UIO_WRITE : UIO_READ, 294 1.2 riastrad .uio_vmspace = curproc->p_vmspace, 295 1.2 riastrad }; 296 1.2 riastrad 297 1.2 riastrad /* 298 1.2 riastrad * XXX Lock out anyone else from doing this? Add a 299 1.2 riastrad * dr_via_pages_loading state? Just rely on the giant lock? 300 1.2 riastrad */ 301 1.2 riastrad /* XXX errno NetBSD->Linux */ 302 1.2 riastrad ret = -bus_dmamap_create(dev->dmat, nbytes, npages, nbytes, PAGE_SIZE, 303 1.2 riastrad BUS_DMA_WAITOK, &vsg->dmamap); 304 1.2 riastrad if (ret) { 305 1.2 riastrad DRM_ERROR("bus_dmamap_create failed: %d\n", ret); 306 1.2 riastrad return ret; 307 1.2 riastrad } 308 1.11 riastrad /* XXX uvm_vslock? */ 309 1.2 riastrad ret = -bus_dmamap_load_uio(dev->dmat, vsg->dmamap, &uio, 310 1.2 riastrad BUS_DMA_WAITOK | (xfer->to_fb? BUS_DMA_WRITE : BUS_DMA_READ)); 311 1.2 riastrad if (ret) { 312 1.2 riastrad DRM_ERROR("bus_dmamap_load failed: %d\n", ret); 313 1.2 riastrad bus_dmamap_destroy(dev->dmat, vsg->dmamap); 314 1.2 riastrad return ret; 315 1.2 riastrad } 316 1.2 riastrad vsg->num_pages = npages; 317 1.2 riastrad #else 318 1.1 riastrad unsigned long first_pfn = VIA_PFN(xfer->mem_addr); 319 1.1 riastrad vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride - 1)) - 320 1.1 riastrad first_pfn + 1; 321 1.1 riastrad 322 1.9 riastrad vsg->pages = vzalloc(array_size(sizeof(struct page *), vsg->num_pages)); 323 1.1 riastrad if (NULL == vsg->pages) 324 1.1 riastrad return -ENOMEM; 325 1.9 riastrad ret = pin_user_pages_fast((unsigned long)xfer->mem_addr, 326 1.9 riastrad vsg->num_pages, 327 1.9 riastrad vsg->direction == DMA_FROM_DEVICE ? FOLL_WRITE : 0, 328 1.9 riastrad vsg->pages); 329 1.1 riastrad if (ret != vsg->num_pages) { 330 1.1 riastrad if (ret < 0) 331 1.1 riastrad return ret; 332 1.1 riastrad vsg->state = dr_via_pages_locked; 333 1.1 riastrad return -EINVAL; 334 1.1 riastrad } 335 1.2 riastrad #endif 336 1.1 riastrad vsg->state = dr_via_pages_locked; 337 1.1 riastrad DRM_DEBUG("DMA pages locked\n"); 338 1.1 riastrad return 0; 339 1.1 riastrad } 340 1.1 riastrad 341 1.1 riastrad /* 342 1.1 riastrad * Allocate DMA capable memory for the blit descriptor chain, and an array that keeps track of the 343 1.1 riastrad * pages we allocate. We don't want to use kmalloc for the descriptor chain because it may be 344 1.6 riastrad * quite large for some blits, and pages don't need to be contiguous. 345 1.1 riastrad */ 346 1.1 riastrad 347 1.1 riastrad static int 348 1.2 riastrad via_alloc_desc_pages(struct drm_device *dev, drm_via_sg_info_t *vsg) 349 1.1 riastrad { 350 1.1 riastrad int i; 351 1.2 riastrad #ifdef __NetBSD__ 352 1.2 riastrad int ret; 353 1.2 riastrad #endif 354 1.1 riastrad 355 1.1 riastrad vsg->descriptors_per_page = PAGE_SIZE / sizeof(drm_via_descriptor_t); 356 1.1 riastrad vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) / 357 1.1 riastrad vsg->descriptors_per_page; 358 1.1 riastrad 359 1.1 riastrad if (NULL == (vsg->desc_pages = kcalloc(vsg->num_desc_pages, sizeof(void *), GFP_KERNEL))) 360 1.1 riastrad return -ENOMEM; 361 1.1 riastrad 362 1.2 riastrad #ifdef __NetBSD__ 363 1.2 riastrad vsg->desc_segs = kcalloc(vsg->num_desc_pages, sizeof(*vsg->desc_segs), 364 1.2 riastrad GFP_KERNEL); 365 1.2 riastrad if (vsg->desc_segs == NULL) { 366 1.2 riastrad kfree(vsg->desc_pages); 367 1.2 riastrad return -ENOMEM; 368 1.2 riastrad } 369 1.2 riastrad /* XXX errno NetBSD->Linux */ 370 1.10 riastrad ret = -bus_dmamem_alloc(dev->dmat, 371 1.10 riastrad (bus_size_t)vsg->num_desc_pages << PAGE_SHIFT, 372 1.2 riastrad PAGE_SIZE, 0, vsg->desc_segs, vsg->num_pages, &vsg->num_desc_segs, 373 1.2 riastrad BUS_DMA_WAITOK); 374 1.2 riastrad if (ret) { 375 1.2 riastrad kfree(vsg->desc_segs); 376 1.2 riastrad kfree(vsg->desc_pages); 377 1.2 riastrad return -ENOMEM; 378 1.2 riastrad } 379 1.2 riastrad /* XXX No nice way to scatter/gather map bus_dmamem. */ 380 1.2 riastrad /* XXX errno NetBSD->Linux */ 381 1.2 riastrad ret = -bus_dmamem_map(dev->dmat, vsg->desc_segs, vsg->num_desc_segs, 382 1.10 riastrad (bus_size_t)vsg->num_desc_pages << PAGE_SHIFT, &vsg->desc_kva, 383 1.10 riastrad BUS_DMA_WAITOK); 384 1.2 riastrad if (ret) { 385 1.2 riastrad bus_dmamem_free(dev->dmat, vsg->desc_segs, vsg->num_desc_segs); 386 1.2 riastrad kfree(vsg->desc_segs); 387 1.2 riastrad kfree(vsg->desc_pages); 388 1.2 riastrad return -ENOMEM; 389 1.2 riastrad } 390 1.2 riastrad /* XXX errno NetBSD->Linux */ 391 1.10 riastrad ret = -bus_dmamap_create(dev->dmat, 392 1.10 riastrad (bus_size_t)vsg->num_desc_pages << PAGE_SHIFT, 393 1.2 riastrad vsg->num_desc_pages, PAGE_SIZE, 0, BUS_DMA_WAITOK, 394 1.2 riastrad &vsg->desc_dmamap); 395 1.2 riastrad if (ret) { 396 1.2 riastrad bus_dmamem_unmap(dev->dmat, vsg->desc_kva, 397 1.10 riastrad (bus_size_t)vsg->num_desc_pages << PAGE_SHIFT); 398 1.2 riastrad bus_dmamem_free(dev->dmat, vsg->desc_segs, vsg->num_desc_segs); 399 1.2 riastrad kfree(vsg->desc_segs); 400 1.2 riastrad kfree(vsg->desc_pages); 401 1.2 riastrad return -ENOMEM; 402 1.2 riastrad } 403 1.2 riastrad ret = -bus_dmamap_load(dev->dmat, vsg->desc_dmamap, vsg->desc_kva, 404 1.10 riastrad (bus_size_t)vsg->num_desc_pages << PAGE_SHIFT, NULL, 405 1.10 riastrad BUS_DMA_WAITOK); 406 1.2 riastrad if (ret) { 407 1.2 riastrad bus_dmamap_destroy(dev->dmat, vsg->desc_dmamap); 408 1.2 riastrad bus_dmamem_unmap(dev->dmat, vsg->desc_kva, 409 1.10 riastrad (bus_size_t)vsg->num_desc_pages << PAGE_SHIFT); 410 1.2 riastrad bus_dmamem_free(dev->dmat, vsg->desc_segs, vsg->num_desc_segs); 411 1.2 riastrad kfree(vsg->desc_segs); 412 1.2 riastrad kfree(vsg->desc_pages); 413 1.2 riastrad return -ENOMEM; 414 1.2 riastrad } 415 1.2 riastrad for (i = 0; i < vsg->num_desc_pages; i++) 416 1.2 riastrad vsg->desc_pages[i] = (void *) 417 1.2 riastrad ((char *)vsg->desc_kva + (i * PAGE_SIZE)); 418 1.2 riastrad vsg->state = dr_via_desc_pages_alloc; 419 1.2 riastrad #else 420 1.1 riastrad vsg->state = dr_via_desc_pages_alloc; 421 1.1 riastrad for (i = 0; i < vsg->num_desc_pages; ++i) { 422 1.1 riastrad if (NULL == (vsg->desc_pages[i] = 423 1.1 riastrad (drm_via_descriptor_t *) __get_free_page(GFP_KERNEL))) 424 1.1 riastrad return -ENOMEM; 425 1.1 riastrad } 426 1.2 riastrad #endif 427 1.1 riastrad DRM_DEBUG("Allocated %d pages for %d descriptors.\n", vsg->num_desc_pages, 428 1.1 riastrad vsg->num_desc); 429 1.1 riastrad return 0; 430 1.1 riastrad } 431 1.1 riastrad 432 1.1 riastrad static void 433 1.1 riastrad via_abort_dmablit(struct drm_device *dev, int engine) 434 1.1 riastrad { 435 1.1 riastrad drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; 436 1.1 riastrad 437 1.9 riastrad via_write(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TA); 438 1.1 riastrad } 439 1.1 riastrad 440 1.1 riastrad static void 441 1.1 riastrad via_dmablit_engine_off(struct drm_device *dev, int engine) 442 1.1 riastrad { 443 1.1 riastrad drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; 444 1.1 riastrad 445 1.9 riastrad via_write(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD | VIA_DMA_CSR_DD); 446 1.1 riastrad } 447 1.1 riastrad 448 1.1 riastrad 449 1.1 riastrad 450 1.1 riastrad /* 451 1.1 riastrad * The dmablit part of the IRQ handler. Trying to do only reasonably fast things here. 452 1.1 riastrad * The rest, like unmapping and freeing memory for done blits is done in a separate workqueue 453 1.1 riastrad * task. Basically the task of the interrupt handler is to submit a new blit to the engine, while 454 1.1 riastrad * the workqueue task takes care of processing associated with the old blit. 455 1.1 riastrad */ 456 1.1 riastrad 457 1.1 riastrad void 458 1.1 riastrad via_dmablit_handler(struct drm_device *dev, int engine, int from_irq) 459 1.1 riastrad { 460 1.1 riastrad drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; 461 1.1 riastrad drm_via_blitq_t *blitq = dev_priv->blit_queues + engine; 462 1.1 riastrad int cur; 463 1.1 riastrad int done_transfer; 464 1.1 riastrad unsigned long irqsave = 0; 465 1.1 riastrad uint32_t status = 0; 466 1.1 riastrad 467 1.1 riastrad DRM_DEBUG("DMA blit handler called. engine = %d, from_irq = %d, blitq = 0x%lx\n", 468 1.1 riastrad engine, from_irq, (unsigned long) blitq); 469 1.1 riastrad 470 1.1 riastrad if (from_irq) 471 1.1 riastrad spin_lock(&blitq->blit_lock); 472 1.1 riastrad else 473 1.1 riastrad spin_lock_irqsave(&blitq->blit_lock, irqsave); 474 1.1 riastrad 475 1.1 riastrad done_transfer = blitq->is_active && 476 1.9 riastrad ((status = via_read(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04)) & VIA_DMA_CSR_TD); 477 1.1 riastrad done_transfer = done_transfer || (blitq->aborting && !(status & VIA_DMA_CSR_DE)); 478 1.1 riastrad 479 1.1 riastrad cur = blitq->cur; 480 1.1 riastrad if (done_transfer) { 481 1.1 riastrad 482 1.1 riastrad blitq->blits[cur]->aborted = blitq->aborting; 483 1.1 riastrad blitq->done_blit_handle++; 484 1.2 riastrad #ifdef __NetBSD__ 485 1.2 riastrad DRM_SPIN_WAKEUP_ALL(&blitq->blit_queue[cur], 486 1.2 riastrad &blitq->blit_lock); 487 1.2 riastrad #else 488 1.2 riastrad wake_up(blitq->blit_queue + cur); 489 1.2 riastrad #endif 490 1.1 riastrad 491 1.1 riastrad cur++; 492 1.1 riastrad if (cur >= VIA_NUM_BLIT_SLOTS) 493 1.1 riastrad cur = 0; 494 1.1 riastrad blitq->cur = cur; 495 1.1 riastrad 496 1.1 riastrad /* 497 1.1 riastrad * Clear transfer done flag. 498 1.1 riastrad */ 499 1.1 riastrad 500 1.9 riastrad via_write(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD); 501 1.1 riastrad 502 1.1 riastrad blitq->is_active = 0; 503 1.1 riastrad blitq->aborting = 0; 504 1.1 riastrad schedule_work(&blitq->wq); 505 1.1 riastrad 506 1.1 riastrad } else if (blitq->is_active && time_after_eq(jiffies, blitq->end)) { 507 1.1 riastrad 508 1.1 riastrad /* 509 1.1 riastrad * Abort transfer after one second. 510 1.1 riastrad */ 511 1.1 riastrad 512 1.1 riastrad via_abort_dmablit(dev, engine); 513 1.1 riastrad blitq->aborting = 1; 514 1.7 riastrad blitq->end = jiffies + HZ; 515 1.1 riastrad } 516 1.1 riastrad 517 1.1 riastrad if (!blitq->is_active) { 518 1.1 riastrad if (blitq->num_outstanding) { 519 1.1 riastrad via_fire_dmablit(dev, blitq->blits[cur], engine); 520 1.1 riastrad blitq->is_active = 1; 521 1.1 riastrad blitq->cur = cur; 522 1.1 riastrad blitq->num_outstanding--; 523 1.7 riastrad blitq->end = jiffies + HZ; 524 1.1 riastrad if (!timer_pending(&blitq->poll_timer)) 525 1.1 riastrad mod_timer(&blitq->poll_timer, jiffies + 1); 526 1.1 riastrad } else { 527 1.1 riastrad if (timer_pending(&blitq->poll_timer)) 528 1.1 riastrad del_timer(&blitq->poll_timer); 529 1.1 riastrad via_dmablit_engine_off(dev, engine); 530 1.1 riastrad } 531 1.1 riastrad } 532 1.1 riastrad 533 1.1 riastrad if (from_irq) 534 1.1 riastrad spin_unlock(&blitq->blit_lock); 535 1.1 riastrad else 536 1.1 riastrad spin_unlock_irqrestore(&blitq->blit_lock, irqsave); 537 1.1 riastrad } 538 1.1 riastrad 539 1.1 riastrad 540 1.1 riastrad 541 1.1 riastrad /* 542 1.1 riastrad * Check whether this blit is still active, performing necessary locking. 543 1.1 riastrad */ 544 1.1 riastrad 545 1.1 riastrad static int 546 1.2 riastrad #ifdef __NetBSD__ 547 1.2 riastrad via_dmablit_active(drm_via_blitq_t *blitq, int engine, uint32_t handle, drm_waitqueue_t **queue) 548 1.2 riastrad #else 549 1.1 riastrad via_dmablit_active(drm_via_blitq_t *blitq, int engine, uint32_t handle, wait_queue_head_t **queue) 550 1.2 riastrad #endif 551 1.1 riastrad { 552 1.2 riastrad #ifndef __NetBSD__ 553 1.1 riastrad unsigned long irqsave; 554 1.2 riastrad #endif 555 1.1 riastrad uint32_t slot; 556 1.1 riastrad int active; 557 1.1 riastrad 558 1.2 riastrad #ifndef __NetBSD__ 559 1.1 riastrad spin_lock_irqsave(&blitq->blit_lock, irqsave); 560 1.2 riastrad #endif 561 1.1 riastrad 562 1.1 riastrad /* 563 1.1 riastrad * Allow for handle wraparounds. 564 1.1 riastrad */ 565 1.1 riastrad 566 1.1 riastrad active = ((blitq->done_blit_handle - handle) > (1 << 23)) && 567 1.1 riastrad ((blitq->cur_blit_handle - handle) <= (1 << 23)); 568 1.1 riastrad 569 1.1 riastrad if (queue && active) { 570 1.1 riastrad slot = handle - blitq->done_blit_handle + blitq->cur - 1; 571 1.1 riastrad if (slot >= VIA_NUM_BLIT_SLOTS) 572 1.1 riastrad slot -= VIA_NUM_BLIT_SLOTS; 573 1.1 riastrad *queue = blitq->blit_queue + slot; 574 1.1 riastrad } 575 1.1 riastrad 576 1.2 riastrad #ifndef __NetBSD__ 577 1.1 riastrad spin_unlock_irqrestore(&blitq->blit_lock, irqsave); 578 1.2 riastrad #endif 579 1.1 riastrad 580 1.1 riastrad return active; 581 1.1 riastrad } 582 1.1 riastrad 583 1.1 riastrad /* 584 1.1 riastrad * Sync. Wait for at least three seconds for the blit to be performed. 585 1.1 riastrad */ 586 1.1 riastrad 587 1.1 riastrad static int 588 1.1 riastrad via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine) 589 1.1 riastrad { 590 1.1 riastrad 591 1.1 riastrad drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; 592 1.1 riastrad drm_via_blitq_t *blitq = dev_priv->blit_queues + engine; 593 1.2 riastrad #ifdef __NetBSD__ 594 1.2 riastrad drm_waitqueue_t *queue; 595 1.2 riastrad #else 596 1.1 riastrad wait_queue_head_t *queue; 597 1.2 riastrad #endif 598 1.1 riastrad int ret = 0; 599 1.1 riastrad 600 1.2 riastrad #ifdef __NetBSD__ 601 1.2 riastrad spin_lock(&blitq->blit_lock); 602 1.1 riastrad if (via_dmablit_active(blitq, engine, handle, &queue)) { 603 1.7 riastrad DRM_SPIN_WAIT_ON(ret, queue, &blitq->blit_lock, 3*HZ, 604 1.2 riastrad !via_dmablit_active(blitq, engine, handle, NULL)); 605 1.2 riastrad } 606 1.2 riastrad spin_unlock(&blitq->blit_lock); 607 1.2 riastrad #else 608 1.2 riastrad if (via_dmablit_active(blitq, engine, handle, &queue)) { 609 1.9 riastrad VIA_WAIT_ON(ret, *queue, 3 * HZ, 610 1.1 riastrad !via_dmablit_active(blitq, engine, handle, NULL)); 611 1.1 riastrad } 612 1.2 riastrad #endif 613 1.1 riastrad DRM_DEBUG("DMA blit sync handle 0x%x engine %d returned %d\n", 614 1.1 riastrad handle, engine, ret); 615 1.1 riastrad 616 1.1 riastrad return ret; 617 1.1 riastrad } 618 1.1 riastrad 619 1.1 riastrad 620 1.1 riastrad /* 621 1.1 riastrad * A timer that regularly polls the blit engine in cases where we don't have interrupts: 622 1.1 riastrad * a) Broken hardware (typically those that don't have any video capture facility). 623 1.1 riastrad * b) Blit abort. The hardware doesn't send an interrupt when a blit is aborted. 624 1.1 riastrad * The timer and hardware IRQ's can and do work in parallel. If the hardware has 625 1.1 riastrad * irqs, it will shorten the latency somewhat. 626 1.1 riastrad */ 627 1.1 riastrad 628 1.1 riastrad 629 1.1 riastrad 630 1.1 riastrad static void 631 1.9 riastrad via_dmablit_timer(struct timer_list *t) 632 1.1 riastrad { 633 1.9 riastrad drm_via_blitq_t *blitq = from_timer(blitq, t, poll_timer); 634 1.1 riastrad struct drm_device *dev = blitq->dev; 635 1.1 riastrad int engine = (int) 636 1.1 riastrad (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues); 637 1.1 riastrad 638 1.1 riastrad DRM_DEBUG("Polling timer called for engine %d, jiffies %lu\n", engine, 639 1.1 riastrad (unsigned long) jiffies); 640 1.1 riastrad 641 1.1 riastrad via_dmablit_handler(dev, engine, 0); 642 1.1 riastrad 643 1.1 riastrad if (!timer_pending(&blitq->poll_timer)) { 644 1.1 riastrad mod_timer(&blitq->poll_timer, jiffies + 1); 645 1.1 riastrad 646 1.1 riastrad /* 647 1.1 riastrad * Rerun handler to delete timer if engines are off, and 648 1.1 riastrad * to shorten abort latency. This is a little nasty. 649 1.1 riastrad */ 650 1.1 riastrad 651 1.1 riastrad via_dmablit_handler(dev, engine, 0); 652 1.1 riastrad 653 1.1 riastrad } 654 1.1 riastrad } 655 1.1 riastrad 656 1.1 riastrad 657 1.1 riastrad 658 1.1 riastrad 659 1.1 riastrad /* 660 1.1 riastrad * Workqueue task that frees data and mappings associated with a blit. 661 1.1 riastrad * Also wakes up waiting processes. Each of these tasks handles one 662 1.1 riastrad * blit engine only and may not be called on each interrupt. 663 1.1 riastrad */ 664 1.1 riastrad 665 1.1 riastrad 666 1.1 riastrad static void 667 1.1 riastrad via_dmablit_workqueue(struct work_struct *work) 668 1.1 riastrad { 669 1.1 riastrad drm_via_blitq_t *blitq = container_of(work, drm_via_blitq_t, wq); 670 1.1 riastrad struct drm_device *dev = blitq->dev; 671 1.1 riastrad unsigned long irqsave; 672 1.1 riastrad drm_via_sg_info_t *cur_sg; 673 1.1 riastrad int cur_released; 674 1.1 riastrad 675 1.1 riastrad 676 1.1 riastrad DRM_DEBUG("Workqueue task called for blit engine %ld\n", (unsigned long) 677 1.1 riastrad (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues)); 678 1.1 riastrad 679 1.1 riastrad spin_lock_irqsave(&blitq->blit_lock, irqsave); 680 1.1 riastrad 681 1.1 riastrad while (blitq->serviced != blitq->cur) { 682 1.1 riastrad 683 1.1 riastrad cur_released = blitq->serviced++; 684 1.1 riastrad 685 1.1 riastrad DRM_DEBUG("Releasing blit slot %d\n", cur_released); 686 1.1 riastrad 687 1.1 riastrad if (blitq->serviced >= VIA_NUM_BLIT_SLOTS) 688 1.1 riastrad blitq->serviced = 0; 689 1.1 riastrad 690 1.1 riastrad cur_sg = blitq->blits[cur_released]; 691 1.1 riastrad blitq->num_free++; 692 1.1 riastrad 693 1.2 riastrad #ifdef __NetBSD__ 694 1.2 riastrad DRM_SPIN_WAKEUP_ONE(&blitq->busy_queue, &blitq->blit_lock); 695 1.2 riastrad #endif 696 1.2 riastrad 697 1.1 riastrad spin_unlock_irqrestore(&blitq->blit_lock, irqsave); 698 1.1 riastrad 699 1.2 riastrad #ifndef __NetBSD__ 700 1.2 riastrad wake_up(&blitq->busy_queue); 701 1.2 riastrad #endif 702 1.1 riastrad 703 1.2 riastrad #ifdef __NetBSD__ 704 1.2 riastrad /* Transfer completed. Sync it. */ 705 1.2 riastrad bus_dmamap_sync(dev->dmat, cur_sg->dmamap, 0, 706 1.2 riastrad cur_sg->num_pages << PAGE_SHIFT, 707 1.2 riastrad (cur_sg->direction == DMA_FROM_DEVICE 708 1.2 riastrad ? BUS_DMASYNC_POSTREAD 709 1.2 riastrad : BUS_DMASYNC_POSTWRITE)); 710 1.2 riastrad #endif 711 1.2 riastrad via_free_sg_info(dev, dev->pdev, cur_sg); 712 1.1 riastrad kfree(cur_sg); 713 1.1 riastrad 714 1.1 riastrad spin_lock_irqsave(&blitq->blit_lock, irqsave); 715 1.1 riastrad } 716 1.1 riastrad 717 1.1 riastrad spin_unlock_irqrestore(&blitq->blit_lock, irqsave); 718 1.1 riastrad } 719 1.1 riastrad 720 1.1 riastrad 721 1.1 riastrad /* 722 1.1 riastrad * Init all blit engines. Currently we use two, but some hardware have 4. 723 1.1 riastrad */ 724 1.1 riastrad 725 1.1 riastrad 726 1.1 riastrad void 727 1.1 riastrad via_init_dmablit(struct drm_device *dev) 728 1.1 riastrad { 729 1.1 riastrad int i, j; 730 1.1 riastrad drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; 731 1.1 riastrad drm_via_blitq_t *blitq; 732 1.1 riastrad 733 1.1 riastrad pci_set_master(dev->pdev); 734 1.1 riastrad 735 1.1 riastrad for (i = 0; i < VIA_NUM_BLIT_ENGINES; ++i) { 736 1.1 riastrad blitq = dev_priv->blit_queues + i; 737 1.1 riastrad blitq->dev = dev; 738 1.1 riastrad blitq->cur_blit_handle = 0; 739 1.1 riastrad blitq->done_blit_handle = 0; 740 1.1 riastrad blitq->head = 0; 741 1.1 riastrad blitq->cur = 0; 742 1.1 riastrad blitq->serviced = 0; 743 1.1 riastrad blitq->num_free = VIA_NUM_BLIT_SLOTS - 1; 744 1.1 riastrad blitq->num_outstanding = 0; 745 1.1 riastrad blitq->is_active = 0; 746 1.1 riastrad blitq->aborting = 0; 747 1.1 riastrad spin_lock_init(&blitq->blit_lock); 748 1.2 riastrad #ifdef __NetBSD__ 749 1.2 riastrad for (j = 0; j < VIA_NUM_BLIT_SLOTS; ++j) 750 1.2 riastrad DRM_INIT_WAITQUEUE(blitq->blit_queue + j, "viablt"); 751 1.2 riastrad DRM_INIT_WAITQUEUE(&blitq->busy_queue, "viabusy"); 752 1.2 riastrad #else 753 1.1 riastrad for (j = 0; j < VIA_NUM_BLIT_SLOTS; ++j) 754 1.2 riastrad init_waitqueue_head(blitq->blit_queue + j); 755 1.2 riastrad init_waitqueue_head(&blitq->busy_queue); 756 1.2 riastrad #endif 757 1.1 riastrad INIT_WORK(&blitq->wq, via_dmablit_workqueue); 758 1.9 riastrad timer_setup(&blitq->poll_timer, via_dmablit_timer, 0); 759 1.1 riastrad } 760 1.1 riastrad } 761 1.1 riastrad 762 1.1 riastrad /* 763 1.1 riastrad * Build all info and do all mappings required for a blit. 764 1.1 riastrad */ 765 1.1 riastrad 766 1.1 riastrad 767 1.1 riastrad static int 768 1.1 riastrad via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer) 769 1.1 riastrad { 770 1.1 riastrad int draw = xfer->to_fb; 771 1.1 riastrad int ret = 0; 772 1.1 riastrad 773 1.1 riastrad vsg->direction = (draw) ? DMA_TO_DEVICE : DMA_FROM_DEVICE; 774 1.1 riastrad 775 1.1 riastrad vsg->state = dr_via_sg_init; 776 1.1 riastrad 777 1.1 riastrad if (xfer->num_lines <= 0 || xfer->line_length <= 0) { 778 1.1 riastrad DRM_ERROR("Zero size bitblt.\n"); 779 1.1 riastrad return -EINVAL; 780 1.1 riastrad } 781 1.1 riastrad 782 1.1 riastrad /* 783 1.1 riastrad * Below check is a driver limitation, not a hardware one. We 784 1.1 riastrad * don't want to lock unused pages, and don't want to incoporate the 785 1.1 riastrad * extra logic of avoiding them. Make sure there are no. 786 1.1 riastrad * (Not a big limitation anyway.) 787 1.1 riastrad */ 788 1.1 riastrad 789 1.1 riastrad if ((xfer->mem_stride - xfer->line_length) > 2*PAGE_SIZE) { 790 1.1 riastrad DRM_ERROR("Too large system memory stride. Stride: %d, " 791 1.1 riastrad "Length: %d\n", xfer->mem_stride, xfer->line_length); 792 1.1 riastrad return -EINVAL; 793 1.1 riastrad } 794 1.1 riastrad 795 1.1 riastrad if ((xfer->mem_stride == xfer->line_length) && 796 1.1 riastrad (xfer->fb_stride == xfer->line_length)) { 797 1.1 riastrad xfer->mem_stride *= xfer->num_lines; 798 1.1 riastrad xfer->line_length = xfer->mem_stride; 799 1.1 riastrad xfer->fb_stride = xfer->mem_stride; 800 1.1 riastrad xfer->num_lines = 1; 801 1.1 riastrad } 802 1.1 riastrad 803 1.1 riastrad /* 804 1.1 riastrad * Don't lock an arbitrary large number of pages, since that causes a 805 1.1 riastrad * DOS security hole. 806 1.1 riastrad */ 807 1.1 riastrad 808 1.1 riastrad if (xfer->num_lines > 2048 || (xfer->num_lines*xfer->mem_stride > (2048*2048*4))) { 809 1.1 riastrad DRM_ERROR("Too large PCI DMA bitblt.\n"); 810 1.1 riastrad return -EINVAL; 811 1.1 riastrad } 812 1.1 riastrad 813 1.1 riastrad /* 814 1.1 riastrad * we allow a negative fb stride to allow flipping of images in 815 1.1 riastrad * transfer. 816 1.1 riastrad */ 817 1.1 riastrad 818 1.1 riastrad if (xfer->mem_stride < xfer->line_length || 819 1.1 riastrad abs(xfer->fb_stride) < xfer->line_length) { 820 1.1 riastrad DRM_ERROR("Invalid frame-buffer / memory stride.\n"); 821 1.1 riastrad return -EINVAL; 822 1.1 riastrad } 823 1.1 riastrad 824 1.1 riastrad /* 825 1.1 riastrad * A hardware bug seems to be worked around if system memory addresses start on 826 1.1 riastrad * 16 byte boundaries. This seems a bit restrictive however. VIA is contacted 827 1.1 riastrad * about this. Meanwhile, impose the following restrictions: 828 1.1 riastrad */ 829 1.1 riastrad 830 1.1 riastrad #ifdef VIA_BUGFREE 831 1.1 riastrad if ((((unsigned long)xfer->mem_addr & 3) != ((unsigned long)xfer->fb_addr & 3)) || 832 1.1 riastrad ((xfer->num_lines > 1) && ((xfer->mem_stride & 3) != (xfer->fb_stride & 3)))) { 833 1.1 riastrad DRM_ERROR("Invalid DRM bitblt alignment.\n"); 834 1.1 riastrad return -EINVAL; 835 1.1 riastrad } 836 1.1 riastrad #else 837 1.1 riastrad if ((((unsigned long)xfer->mem_addr & 15) || 838 1.1 riastrad ((unsigned long)xfer->fb_addr & 3)) || 839 1.1 riastrad ((xfer->num_lines > 1) && 840 1.1 riastrad ((xfer->mem_stride & 15) || (xfer->fb_stride & 3)))) { 841 1.1 riastrad DRM_ERROR("Invalid DRM bitblt alignment.\n"); 842 1.1 riastrad return -EINVAL; 843 1.1 riastrad } 844 1.1 riastrad #endif 845 1.1 riastrad 846 1.2 riastrad if (0 != (ret = via_lock_all_dma_pages(dev, vsg, xfer))) { 847 1.1 riastrad DRM_ERROR("Could not lock DMA pages.\n"); 848 1.2 riastrad via_free_sg_info(dev, dev->pdev, vsg); 849 1.1 riastrad return ret; 850 1.1 riastrad } 851 1.1 riastrad 852 1.1 riastrad via_map_blit_for_device(dev->pdev, xfer, vsg, 0); 853 1.2 riastrad if (0 != (ret = via_alloc_desc_pages(dev, vsg))) { 854 1.1 riastrad DRM_ERROR("Could not allocate DMA descriptor pages.\n"); 855 1.2 riastrad via_free_sg_info(dev, dev->pdev, vsg); 856 1.1 riastrad return ret; 857 1.1 riastrad } 858 1.1 riastrad via_map_blit_for_device(dev->pdev, xfer, vsg, 1); 859 1.1 riastrad 860 1.1 riastrad return 0; 861 1.1 riastrad } 862 1.1 riastrad 863 1.1 riastrad 864 1.1 riastrad /* 865 1.1 riastrad * Reserve one free slot in the blit queue. Will wait for one second for one 866 1.1 riastrad * to become available. Otherwise -EBUSY is returned. 867 1.1 riastrad */ 868 1.1 riastrad 869 1.1 riastrad static int 870 1.1 riastrad via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine) 871 1.1 riastrad { 872 1.1 riastrad int ret = 0; 873 1.1 riastrad unsigned long irqsave; 874 1.1 riastrad 875 1.1 riastrad DRM_DEBUG("Num free is %d\n", blitq->num_free); 876 1.1 riastrad spin_lock_irqsave(&blitq->blit_lock, irqsave); 877 1.1 riastrad while (blitq->num_free == 0) { 878 1.2 riastrad #ifdef __NetBSD__ 879 1.4 riastrad DRM_SPIN_WAIT_ON(ret, &blitq->busy_queue, &blitq->blit_lock, 880 1.7 riastrad HZ, 881 1.2 riastrad blitq->num_free > 0); 882 1.3 riastrad /* Map -EINTR to -EAGAIN. */ 883 1.3 riastrad if (ret == -EINTR) 884 1.3 riastrad ret = -EAGAIN; 885 1.3 riastrad /* Bail on failure. */ 886 1.2 riastrad if (ret) { 887 1.3 riastrad spin_unlock_irqrestore(&blitq->blit_lock, irqsave); 888 1.2 riastrad return ret; 889 1.2 riastrad } 890 1.2 riastrad #else 891 1.1 riastrad spin_unlock_irqrestore(&blitq->blit_lock, irqsave); 892 1.1 riastrad 893 1.9 riastrad VIA_WAIT_ON(ret, blitq->busy_queue, HZ, blitq->num_free > 0); 894 1.1 riastrad if (ret) 895 1.1 riastrad return (-EINTR == ret) ? -EAGAIN : ret; 896 1.1 riastrad 897 1.1 riastrad spin_lock_irqsave(&blitq->blit_lock, irqsave); 898 1.2 riastrad #endif 899 1.1 riastrad } 900 1.1 riastrad 901 1.1 riastrad blitq->num_free--; 902 1.1 riastrad spin_unlock_irqrestore(&blitq->blit_lock, irqsave); 903 1.1 riastrad 904 1.1 riastrad return 0; 905 1.1 riastrad } 906 1.1 riastrad 907 1.1 riastrad /* 908 1.1 riastrad * Hand back a free slot if we changed our mind. 909 1.1 riastrad */ 910 1.1 riastrad 911 1.1 riastrad static void 912 1.1 riastrad via_dmablit_release_slot(drm_via_blitq_t *blitq) 913 1.1 riastrad { 914 1.1 riastrad unsigned long irqsave; 915 1.1 riastrad 916 1.1 riastrad spin_lock_irqsave(&blitq->blit_lock, irqsave); 917 1.1 riastrad blitq->num_free++; 918 1.2 riastrad #ifdef __NetBSD__ 919 1.2 riastrad DRM_SPIN_WAKEUP_ONE(&blitq->busy_queue, &blitq->blit_lock); 920 1.2 riastrad #endif 921 1.1 riastrad spin_unlock_irqrestore(&blitq->blit_lock, irqsave); 922 1.2 riastrad #ifndef __NetBSD__ 923 1.2 riastrad wake_up(&blitq->busy_queue); 924 1.2 riastrad #endif 925 1.1 riastrad } 926 1.1 riastrad 927 1.1 riastrad /* 928 1.1 riastrad * Grab a free slot. Build blit info and queue a blit. 929 1.1 riastrad */ 930 1.1 riastrad 931 1.1 riastrad 932 1.1 riastrad static int 933 1.1 riastrad via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer) 934 1.1 riastrad { 935 1.1 riastrad drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; 936 1.1 riastrad drm_via_sg_info_t *vsg; 937 1.1 riastrad drm_via_blitq_t *blitq; 938 1.1 riastrad int ret; 939 1.1 riastrad int engine; 940 1.1 riastrad unsigned long irqsave; 941 1.1 riastrad 942 1.1 riastrad if (dev_priv == NULL) { 943 1.1 riastrad DRM_ERROR("Called without initialization.\n"); 944 1.1 riastrad return -EINVAL; 945 1.1 riastrad } 946 1.1 riastrad 947 1.1 riastrad engine = (xfer->to_fb) ? 0 : 1; 948 1.1 riastrad blitq = dev_priv->blit_queues + engine; 949 1.1 riastrad if (0 != (ret = via_dmablit_grab_slot(blitq, engine))) 950 1.1 riastrad return ret; 951 1.1 riastrad if (NULL == (vsg = kmalloc(sizeof(*vsg), GFP_KERNEL))) { 952 1.1 riastrad via_dmablit_release_slot(blitq); 953 1.1 riastrad return -ENOMEM; 954 1.1 riastrad } 955 1.1 riastrad if (0 != (ret = via_build_sg_info(dev, vsg, xfer))) { 956 1.1 riastrad via_dmablit_release_slot(blitq); 957 1.1 riastrad kfree(vsg); 958 1.1 riastrad return ret; 959 1.1 riastrad } 960 1.2 riastrad #ifdef __NetBSD__ 961 1.2 riastrad /* Prepare to begin a DMA transfer. */ 962 1.2 riastrad bus_dmamap_sync(dev->dmat, vsg->dmamap, 0, 963 1.2 riastrad vsg->num_pages << PAGE_SHIFT, 964 1.2 riastrad (vsg->direction == DMA_FROM_DEVICE 965 1.2 riastrad ? BUS_DMASYNC_PREREAD 966 1.2 riastrad : BUS_DMASYNC_PREWRITE)); 967 1.2 riastrad #endif 968 1.1 riastrad spin_lock_irqsave(&blitq->blit_lock, irqsave); 969 1.1 riastrad 970 1.1 riastrad blitq->blits[blitq->head++] = vsg; 971 1.1 riastrad if (blitq->head >= VIA_NUM_BLIT_SLOTS) 972 1.1 riastrad blitq->head = 0; 973 1.1 riastrad blitq->num_outstanding++; 974 1.1 riastrad xfer->sync.sync_handle = ++blitq->cur_blit_handle; 975 1.1 riastrad 976 1.1 riastrad spin_unlock_irqrestore(&blitq->blit_lock, irqsave); 977 1.1 riastrad xfer->sync.engine = engine; 978 1.1 riastrad 979 1.1 riastrad via_dmablit_handler(dev, engine, 0); 980 1.1 riastrad 981 1.1 riastrad return 0; 982 1.1 riastrad } 983 1.1 riastrad 984 1.1 riastrad /* 985 1.1 riastrad * Sync on a previously submitted blit. Note that the X server use signals extensively, and 986 1.1 riastrad * that there is a very big probability that this IOCTL will be interrupted by a signal. In that 987 1.1 riastrad * case it returns with -EAGAIN for the signal to be delivered. 988 1.1 riastrad * The caller should then reissue the IOCTL. This is similar to what is being done for drmGetLock(). 989 1.1 riastrad */ 990 1.1 riastrad 991 1.1 riastrad int 992 1.1 riastrad via_dma_blit_sync(struct drm_device *dev, void *data, struct drm_file *file_priv) 993 1.1 riastrad { 994 1.1 riastrad drm_via_blitsync_t *sync = data; 995 1.1 riastrad int err; 996 1.1 riastrad 997 1.1 riastrad if (sync->engine >= VIA_NUM_BLIT_ENGINES) 998 1.1 riastrad return -EINVAL; 999 1.1 riastrad 1000 1.1 riastrad err = via_dmablit_sync(dev, sync->sync_handle, sync->engine); 1001 1.1 riastrad 1002 1.1 riastrad if (-EINTR == err) 1003 1.1 riastrad err = -EAGAIN; 1004 1.1 riastrad 1005 1.1 riastrad return err; 1006 1.1 riastrad } 1007 1.1 riastrad 1008 1.1 riastrad 1009 1.1 riastrad /* 1010 1.1 riastrad * Queue a blit and hand back a handle to be used for sync. This IOCTL may be interrupted by a signal 1011 1.1 riastrad * while waiting for a free slot in the blit queue. In that case it returns with -EAGAIN and should 1012 1.1 riastrad * be reissued. See the above IOCTL code. 1013 1.1 riastrad */ 1014 1.1 riastrad 1015 1.1 riastrad int 1016 1.1 riastrad via_dma_blit(struct drm_device *dev, void *data, struct drm_file *file_priv) 1017 1.1 riastrad { 1018 1.1 riastrad drm_via_dmablit_t *xfer = data; 1019 1.1 riastrad int err; 1020 1.1 riastrad 1021 1.1 riastrad err = via_dmablit(dev, xfer); 1022 1.1 riastrad 1023 1.1 riastrad return err; 1024 1.1 riastrad } 1025