1 1.88 andvar /* $NetBSD: agp.c,v 1.88 2022/05/22 11:27:35 andvar Exp $ */ 2 1.1 fvdl 3 1.1 fvdl /*- 4 1.1 fvdl * Copyright (c) 2000 Doug Rabson 5 1.1 fvdl * All rights reserved. 6 1.1 fvdl * 7 1.1 fvdl * Redistribution and use in source and binary forms, with or without 8 1.1 fvdl * modification, are permitted provided that the following conditions 9 1.1 fvdl * are met: 10 1.1 fvdl * 1. Redistributions of source code must retain the above copyright 11 1.1 fvdl * notice, this list of conditions and the following disclaimer. 12 1.1 fvdl * 2. Redistributions in binary form must reproduce the above copyright 13 1.1 fvdl * notice, this list of conditions and the following disclaimer in the 14 1.1 fvdl * documentation and/or other materials provided with the distribution. 15 1.1 fvdl * 16 1.1 fvdl * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 1.1 fvdl * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 1.1 fvdl * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 1.1 fvdl * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 1.1 fvdl * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 1.1 fvdl * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 1.1 fvdl * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 1.1 fvdl * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 1.1 fvdl * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 1.1 fvdl * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 1.1 fvdl * SUCH DAMAGE. 27 1.1 fvdl * 28 1.1 fvdl * $FreeBSD: src/sys/pci/agp.c,v 1.12 2001/05/19 01:28:07 alfred Exp $ 29 1.1 fvdl */ 30 1.1 fvdl 31 1.1 fvdl /* 32 1.1 fvdl * Copyright (c) 2001 Wasabi Systems, Inc. 33 1.1 fvdl * All rights reserved. 34 1.1 fvdl * 35 1.1 fvdl * Written by Frank van der Linden for Wasabi Systems, Inc. 36 1.1 fvdl * 37 1.1 fvdl * Redistribution and use in source and binary forms, with or without 38 1.1 fvdl * modification, are permitted provided that the following conditions 39 1.1 fvdl * are met: 40 1.1 fvdl * 1. Redistributions of source code must retain the above copyright 41 1.1 fvdl * notice, this list of conditions and the following disclaimer. 42 1.1 fvdl * 2. Redistributions in binary form must reproduce the above copyright 43 1.1 fvdl * notice, this list of conditions and the following disclaimer in the 44 1.1 fvdl * documentation and/or other materials provided with the distribution. 45 1.1 fvdl * 3. All advertising materials mentioning features or use of this software 46 1.1 fvdl * must display the following acknowledgement: 47 1.1 fvdl * This product includes software developed for the NetBSD Project by 48 1.1 fvdl * Wasabi Systems, Inc. 49 1.1 fvdl * 4. The name of Wasabi Systems, Inc. may not be used to endorse 50 1.1 fvdl * or promote products derived from this software without specific prior 51 1.1 fvdl * written permission. 52 1.1 fvdl * 53 1.1 fvdl * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 54 1.1 fvdl * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 55 1.1 fvdl * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 56 1.1 fvdl * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 57 1.1 fvdl * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 58 1.1 fvdl * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 59 1.1 fvdl * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 60 1.1 fvdl * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 61 1.1 fvdl * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 62 1.1 fvdl * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 63 1.1 fvdl * POSSIBILITY OF SUCH DAMAGE. 64 1.1 fvdl */ 65 1.1 fvdl 66 1.12 lukem 67 1.12 lukem #include <sys/cdefs.h> 68 1.88 andvar __KERNEL_RCSID(0, "$NetBSD: agp.c,v 1.88 2022/05/22 11:27:35 andvar Exp $"); 69 1.1 fvdl 70 1.1 fvdl #include <sys/param.h> 71 1.1 fvdl #include <sys/systm.h> 72 1.1 fvdl #include <sys/malloc.h> 73 1.1 fvdl #include <sys/kernel.h> 74 1.1 fvdl #include <sys/device.h> 75 1.1 fvdl #include <sys/conf.h> 76 1.1 fvdl #include <sys/ioctl.h> 77 1.1 fvdl #include <sys/fcntl.h> 78 1.1 fvdl #include <sys/agpio.h> 79 1.1 fvdl #include <sys/proc.h> 80 1.46 xtraeme #include <sys/mutex.h> 81 1.1 fvdl 82 1.1 fvdl #include <dev/pci/pcireg.h> 83 1.1 fvdl #include <dev/pci/pcivar.h> 84 1.1 fvdl #include <dev/pci/agpvar.h> 85 1.1 fvdl #include <dev/pci/agpreg.h> 86 1.1 fvdl #include <dev/pci/pcidevs.h> 87 1.1 fvdl 88 1.49 ad #include <sys/bus.h> 89 1.25 thorpej 90 1.25 thorpej MALLOC_DEFINE(M_AGP, "AGP", "AGP memory"); 91 1.1 fvdl 92 1.1 fvdl /* Helper functions for implementing chipset mini drivers. */ 93 1.1 fvdl /* XXXfvdl get rid of this one. */ 94 1.1 fvdl 95 1.1 fvdl extern struct cfdriver agp_cd; 96 1.17 gehenna 97 1.1 fvdl static int agp_info_user(struct agp_softc *, agp_info *); 98 1.1 fvdl static int agp_setup_user(struct agp_softc *, agp_setup *); 99 1.1 fvdl static int agp_allocate_user(struct agp_softc *, agp_allocate *); 100 1.1 fvdl static int agp_deallocate_user(struct agp_softc *, int); 101 1.1 fvdl static int agp_bind_user(struct agp_softc *, agp_bind *); 102 1.1 fvdl static int agp_unbind_user(struct agp_softc *, agp_unbind *); 103 1.79 dyoung static int agp_generic_enable_v2(struct agp_softc *, 104 1.79 dyoung const struct pci_attach_args *, int, u_int32_t); 105 1.79 dyoung static int agp_generic_enable_v3(struct agp_softc *, 106 1.79 dyoung const struct pci_attach_args *, int, u_int32_t); 107 1.79 dyoung static int agpdev_match(const struct pci_attach_args *); 108 1.68 dyoung static bool agp_resume(device_t, const pmf_qual_t *); 109 1.1 fvdl 110 1.7 thorpej #include "agp_ali.h" 111 1.7 thorpej #include "agp_amd.h" 112 1.7 thorpej #include "agp_i810.h" 113 1.7 thorpej #include "agp_intel.h" 114 1.7 thorpej #include "agp_sis.h" 115 1.7 thorpej #include "agp_via.h" 116 1.47 kiyohara #include "agp_amd64.h" 117 1.7 thorpej 118 1.5 thorpej const struct agp_product { 119 1.5 thorpej uint32_t ap_vendor; 120 1.5 thorpej uint32_t ap_product; 121 1.5 thorpej int (*ap_match)(const struct pci_attach_args *); 122 1.59 freza int (*ap_attach)(device_t, device_t, void *); 123 1.5 thorpej } agp_products[] = { 124 1.57 njoly #if NAGP_AMD64 > 0 125 1.57 njoly { PCI_VENDOR_ALI, PCI_PRODUCT_ALI_M1689, 126 1.57 njoly agp_amd64_match, agp_amd64_attach }, 127 1.57 njoly #endif 128 1.57 njoly 129 1.7 thorpej #if NAGP_ALI > 0 130 1.5 thorpej { PCI_VENDOR_ALI, -1, 131 1.5 thorpej NULL, agp_ali_attach }, 132 1.7 thorpej #endif 133 1.5 thorpej 134 1.51 joerg #if NAGP_AMD64 > 0 135 1.51 joerg { PCI_VENDOR_AMD, PCI_PRODUCT_AMD_AGP8151_DEV, 136 1.51 joerg agp_amd64_match, agp_amd64_attach }, 137 1.51 joerg #endif 138 1.51 joerg 139 1.7 thorpej #if NAGP_AMD > 0 140 1.5 thorpej { PCI_VENDOR_AMD, -1, 141 1.5 thorpej agp_amd_match, agp_amd_attach }, 142 1.7 thorpej #endif 143 1.5 thorpej 144 1.7 thorpej #if NAGP_I810 > 0 145 1.5 thorpej { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82810_MCH, 146 1.5 thorpej NULL, agp_i810_attach }, 147 1.5 thorpej { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82810_DC100_MCH, 148 1.5 thorpej NULL, agp_i810_attach }, 149 1.5 thorpej { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82810E_MCH, 150 1.5 thorpej NULL, agp_i810_attach }, 151 1.5 thorpej { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82815_FULL_HUB, 152 1.5 thorpej NULL, agp_i810_attach }, 153 1.11 fvdl { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82840_HB, 154 1.13 augustss NULL, agp_i810_attach }, 155 1.13 augustss { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82830MP_IO_1, 156 1.11 fvdl NULL, agp_i810_attach }, 157 1.23 scw { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82845G_DRAM, 158 1.29 hannken NULL, agp_i810_attach }, 159 1.29 hannken { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82855GM_MCH, 160 1.31 tron NULL, agp_i810_attach }, 161 1.31 tron { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82865_HB, 162 1.23 scw NULL, agp_i810_attach }, 163 1.37 christos { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82915G_HB, 164 1.37 christos NULL, agp_i810_attach }, 165 1.37 christos { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82915GM_HB, 166 1.37 christos NULL, agp_i810_attach }, 167 1.39 simonb { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82945P_MCH, 168 1.39 simonb NULL, agp_i810_attach }, 169 1.39 simonb { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82945GM_HB, 170 1.39 simonb NULL, agp_i810_attach }, 171 1.61 tnn { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82945GME_HB, 172 1.61 tnn NULL, agp_i810_attach }, 173 1.48 markd { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82965Q_HB, 174 1.48 markd NULL, agp_i810_attach }, 175 1.51 joerg { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82965PM_HB, 176 1.51 joerg NULL, agp_i810_attach }, 177 1.50 jnemeth { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82965G_HB, 178 1.50 jnemeth NULL, agp_i810_attach }, 179 1.52 markd { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82Q35_HB, 180 1.52 markd NULL, agp_i810_attach }, 181 1.52 markd { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82G33_HB, 182 1.52 markd NULL, agp_i810_attach }, 183 1.52 markd { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82Q33_HB, 184 1.52 markd NULL, agp_i810_attach }, 185 1.63 christos { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82G35_HB, 186 1.63 christos NULL, agp_i810_attach }, 187 1.60 matthias { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82946GZ_HB, 188 1.60 matthias NULL, agp_i810_attach }, 189 1.64 christos { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82GM45_HB, 190 1.64 christos NULL, agp_i810_attach }, 191 1.65 markd { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82IGD_E_HB, 192 1.65 markd NULL, agp_i810_attach }, 193 1.65 markd { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82Q45_HB, 194 1.65 markd NULL, agp_i810_attach }, 195 1.65 markd { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82G45_HB, 196 1.65 markd NULL, agp_i810_attach }, 197 1.69 riz { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82G41_HB, 198 1.69 riz NULL, agp_i810_attach }, 199 1.69 riz { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_E7221_HB, 200 1.69 riz NULL, agp_i810_attach }, 201 1.69 riz { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82965GME_HB, 202 1.69 riz NULL, agp_i810_attach }, 203 1.69 riz { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82B43_HB, 204 1.69 riz NULL, agp_i810_attach }, 205 1.69 riz { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_IRONLAKE_D_HB, 206 1.69 riz NULL, agp_i810_attach }, 207 1.69 riz { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_IRONLAKE_M_HB, 208 1.69 riz NULL, agp_i810_attach }, 209 1.69 riz { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_IRONLAKE_MA_HB, 210 1.69 riz NULL, agp_i810_attach }, 211 1.69 riz { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_IRONLAKE_MC2_HB, 212 1.69 riz NULL, agp_i810_attach }, 213 1.78 matt { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PINEVIEW_HB, 214 1.78 matt NULL, agp_i810_attach }, 215 1.78 matt { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PINEVIEW_M_HB, 216 1.78 matt NULL, agp_i810_attach }, 217 1.7 thorpej #endif 218 1.5 thorpej 219 1.7 thorpej #if NAGP_INTEL > 0 220 1.5 thorpej { PCI_VENDOR_INTEL, -1, 221 1.5 thorpej NULL, agp_intel_attach }, 222 1.7 thorpej #endif 223 1.5 thorpej 224 1.51 joerg #if NAGP_AMD64 > 0 225 1.51 joerg { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_PCHB, 226 1.51 joerg agp_amd64_match, agp_amd64_attach }, 227 1.51 joerg { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_250_PCHB, 228 1.51 joerg agp_amd64_match, agp_amd64_attach }, 229 1.7 thorpej #endif 230 1.5 thorpej 231 1.47 kiyohara #if NAGP_AMD64 > 0 232 1.47 kiyohara { PCI_VENDOR_SIS, PCI_PRODUCT_SIS_755, 233 1.47 kiyohara agp_amd64_match, agp_amd64_attach }, 234 1.47 kiyohara { PCI_VENDOR_SIS, PCI_PRODUCT_SIS_760, 235 1.47 kiyohara agp_amd64_match, agp_amd64_attach }, 236 1.51 joerg #endif 237 1.51 joerg 238 1.51 joerg #if NAGP_SIS > 0 239 1.51 joerg { PCI_VENDOR_SIS, -1, 240 1.51 joerg NULL, agp_sis_attach }, 241 1.51 joerg #endif 242 1.51 joerg 243 1.51 joerg #if NAGP_AMD64 > 0 244 1.47 kiyohara { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_K8M800_0, 245 1.47 kiyohara agp_amd64_match, agp_amd64_attach }, 246 1.47 kiyohara { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_K8T890_0, 247 1.47 kiyohara agp_amd64_match, agp_amd64_attach }, 248 1.47 kiyohara { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_K8HTB_0, 249 1.47 kiyohara agp_amd64_match, agp_amd64_attach }, 250 1.47 kiyohara { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_K8HTB, 251 1.47 kiyohara agp_amd64_match, agp_amd64_attach }, 252 1.47 kiyohara #endif 253 1.47 kiyohara 254 1.51 joerg #if NAGP_VIA > 0 255 1.51 joerg { PCI_VENDOR_VIATECH, -1, 256 1.51 joerg NULL, agp_via_attach }, 257 1.51 joerg #endif 258 1.51 joerg 259 1.5 thorpej { 0, 0, 260 1.5 thorpej NULL, NULL }, 261 1.5 thorpej }; 262 1.5 thorpej 263 1.5 thorpej static const struct agp_product * 264 1.5 thorpej agp_lookup(const struct pci_attach_args *pa) 265 1.5 thorpej { 266 1.5 thorpej const struct agp_product *ap; 267 1.5 thorpej 268 1.5 thorpej /* First find the vendor. */ 269 1.5 thorpej for (ap = agp_products; ap->ap_attach != NULL; ap++) { 270 1.5 thorpej if (PCI_VENDOR(pa->pa_id) == ap->ap_vendor) 271 1.5 thorpej break; 272 1.5 thorpej } 273 1.5 thorpej 274 1.5 thorpej if (ap->ap_attach == NULL) 275 1.5 thorpej return (NULL); 276 1.5 thorpej 277 1.5 thorpej /* Now find the product within the vendor's domain. */ 278 1.5 thorpej for (; ap->ap_attach != NULL; ap++) { 279 1.5 thorpej if (PCI_VENDOR(pa->pa_id) != ap->ap_vendor) { 280 1.5 thorpej /* Ran out of this vendor's section of the table. */ 281 1.5 thorpej return (NULL); 282 1.5 thorpej } 283 1.5 thorpej if (ap->ap_product == PCI_PRODUCT(pa->pa_id)) { 284 1.5 thorpej /* Exact match. */ 285 1.5 thorpej break; 286 1.5 thorpej } 287 1.5 thorpej if (ap->ap_product == (uint32_t) -1) { 288 1.5 thorpej /* Wildcard match. */ 289 1.5 thorpej break; 290 1.5 thorpej } 291 1.5 thorpej } 292 1.5 thorpej 293 1.5 thorpej if (ap->ap_attach == NULL) 294 1.5 thorpej return (NULL); 295 1.5 thorpej 296 1.5 thorpej /* Now let the product-specific driver filter the match. */ 297 1.5 thorpej if (ap->ap_match != NULL && (*ap->ap_match)(pa) == 0) 298 1.5 thorpej return (NULL); 299 1.5 thorpej 300 1.5 thorpej return (ap); 301 1.5 thorpej } 302 1.5 thorpej 303 1.35 thorpej static int 304 1.59 freza agpmatch(device_t parent, cfdata_t match, void *aux) 305 1.1 fvdl { 306 1.5 thorpej struct agpbus_attach_args *apa = aux; 307 1.1 fvdl struct pci_attach_args *pa = &apa->apa_pci_args; 308 1.1 fvdl 309 1.5 thorpej if (agp_lookup(pa) == NULL) 310 1.5 thorpej return (0); 311 1.1 fvdl 312 1.5 thorpej return (1); 313 1.1 fvdl } 314 1.1 fvdl 315 1.86 msaitoh static const u_int agp_max[][2] = { 316 1.1 fvdl {0, 0}, 317 1.1 fvdl {32, 4}, 318 1.1 fvdl {64, 28}, 319 1.1 fvdl {128, 96}, 320 1.1 fvdl {256, 204}, 321 1.1 fvdl {512, 440}, 322 1.1 fvdl {1024, 942}, 323 1.1 fvdl {2048, 1920}, 324 1.1 fvdl {4096, 3932} 325 1.1 fvdl }; 326 1.1 fvdl #define agp_max_size (sizeof(agp_max) / sizeof(agp_max[0])) 327 1.1 fvdl 328 1.35 thorpej static void 329 1.59 freza agpattach(device_t parent, device_t self, void *aux) 330 1.1 fvdl { 331 1.5 thorpej struct agpbus_attach_args *apa = aux; 332 1.1 fvdl struct pci_attach_args *pa = &apa->apa_pci_args; 333 1.59 freza struct agp_softc *sc = device_private(self); 334 1.5 thorpej const struct agp_product *ap; 335 1.86 msaitoh int ret; 336 1.86 msaitoh u_int memsize, i; 337 1.1 fvdl 338 1.5 thorpej ap = agp_lookup(pa); 339 1.59 freza KASSERT(ap != NULL); 340 1.1 fvdl 341 1.24 thorpej aprint_naive(": AGP controller\n"); 342 1.24 thorpej 343 1.59 freza sc->as_dev = self; 344 1.1 fvdl sc->as_dmat = pa->pa_dmat; 345 1.1 fvdl sc->as_pc = pa->pa_pc; 346 1.1 fvdl sc->as_tag = pa->pa_tag; 347 1.1 fvdl sc->as_id = pa->pa_id; 348 1.1 fvdl 349 1.1 fvdl /* 350 1.1 fvdl * Work out an upper bound for agp memory allocation. This 351 1.59 freza * uses a heuristic table from the Linux driver. 352 1.1 fvdl */ 353 1.67 jym memsize = physmem >> (20 - PAGE_SHIFT); /* memsize is in MB */ 354 1.1 fvdl for (i = 0; i < agp_max_size; i++) { 355 1.1 fvdl if (memsize <= agp_max[i][0]) 356 1.1 fvdl break; 357 1.1 fvdl } 358 1.1 fvdl if (i == agp_max_size) 359 1.1 fvdl i = agp_max_size - 1; 360 1.1 fvdl sc->as_maxmem = agp_max[i][1] << 20U; 361 1.1 fvdl 362 1.1 fvdl /* 363 1.46 xtraeme * The mutex is used to prevent re-entry to 364 1.1 fvdl * agp_generic_bind_memory() since that function can sleep. 365 1.1 fvdl */ 366 1.53 ad mutex_init(&sc->as_mtx, MUTEX_DEFAULT, IPL_NONE); 367 1.1 fvdl 368 1.1 fvdl TAILQ_INIT(&sc->as_memory); 369 1.1 fvdl 370 1.5 thorpej ret = (*ap->ap_attach)(parent, self, pa); 371 1.1 fvdl if (ret == 0) 372 1.24 thorpej aprint_normal(": aperture at 0x%lx, size 0x%lx\n", 373 1.1 fvdl (unsigned long)sc->as_apaddr, 374 1.1 fvdl (unsigned long)AGP_GET_APERTURE(sc)); 375 1.1 fvdl else 376 1.1 fvdl sc->as_chipc = NULL; 377 1.54 jmcneill 378 1.80 plunky if (!pmf_device_register(self, NULL, agp_resume)) 379 1.80 plunky aprint_error_dev(self, "couldn't establish power handler\n"); 380 1.1 fvdl } 381 1.30 tron 382 1.59 freza CFATTACH_DECL_NEW(agp, sizeof(struct agp_softc), 383 1.35 thorpej agpmatch, agpattach, NULL, NULL); 384 1.35 thorpej 385 1.1 fvdl int 386 1.37 christos agp_map_aperture(struct pci_attach_args *pa, struct agp_softc *sc, int reg) 387 1.1 fvdl { 388 1.1 fvdl /* 389 1.18 nathanw * Find the aperture. Don't map it (yet), this would 390 1.11 fvdl * eat KVA. 391 1.1 fvdl */ 392 1.37 christos if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, reg, 393 1.11 fvdl PCI_MAPREG_TYPE_MEM, &sc->as_apaddr, &sc->as_apsize, 394 1.11 fvdl &sc->as_apflags) != 0) 395 1.1 fvdl return ENXIO; 396 1.8 drochner 397 1.11 fvdl sc->as_apt = pa->pa_memt; 398 1.11 fvdl 399 1.1 fvdl return 0; 400 1.1 fvdl } 401 1.1 fvdl 402 1.1 fvdl struct agp_gatt * 403 1.1 fvdl agp_alloc_gatt(struct agp_softc *sc) 404 1.1 fvdl { 405 1.1 fvdl u_int32_t apsize = AGP_GET_APERTURE(sc); 406 1.1 fvdl u_int32_t entries = apsize >> AGP_PAGE_SHIFT; 407 1.1 fvdl struct agp_gatt *gatt; 408 1.45 christos void *virtual; 409 1.1 fvdl int dummyseg; 410 1.1 fvdl 411 1.87 chs gatt = malloc(sizeof(struct agp_gatt), M_AGP, M_WAITOK); 412 1.1 fvdl gatt->ag_entries = entries; 413 1.1 fvdl 414 1.1 fvdl if (agp_alloc_dmamem(sc->as_dmat, entries * sizeof(u_int32_t), 415 1.38 tron 0, &gatt->ag_dmamap, &virtual, &gatt->ag_physical, 416 1.62 christos &gatt->ag_dmaseg, 1, &dummyseg) != 0) { 417 1.62 christos free(gatt, M_AGP); 418 1.1 fvdl return NULL; 419 1.62 christos } 420 1.38 tron gatt->ag_virtual = (uint32_t *)virtual; 421 1.1 fvdl 422 1.1 fvdl gatt->ag_size = entries * sizeof(u_int32_t); 423 1.1 fvdl memset(gatt->ag_virtual, 0, gatt->ag_size); 424 1.1 fvdl agp_flush_cache(); 425 1.1 fvdl 426 1.1 fvdl return gatt; 427 1.1 fvdl } 428 1.1 fvdl 429 1.1 fvdl void 430 1.1 fvdl agp_free_gatt(struct agp_softc *sc, struct agp_gatt *gatt) 431 1.1 fvdl { 432 1.1 fvdl agp_free_dmamem(sc->as_dmat, gatt->ag_size, gatt->ag_dmamap, 433 1.45 christos (void *)gatt->ag_virtual, &gatt->ag_dmaseg, 1); 434 1.1 fvdl free(gatt, M_AGP); 435 1.1 fvdl } 436 1.1 fvdl 437 1.1 fvdl 438 1.1 fvdl int 439 1.1 fvdl agp_generic_detach(struct agp_softc *sc) 440 1.1 fvdl { 441 1.46 xtraeme mutex_destroy(&sc->as_mtx); 442 1.1 fvdl agp_flush_cache(); 443 1.1 fvdl return 0; 444 1.1 fvdl } 445 1.1 fvdl 446 1.1 fvdl static int 447 1.79 dyoung agpdev_match(const struct pci_attach_args *pa) 448 1.1 fvdl { 449 1.1 fvdl if (PCI_CLASS(pa->pa_class) == PCI_CLASS_DISPLAY && 450 1.1 fvdl PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_DISPLAY_VGA) 451 1.26 tron if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_AGP, 452 1.26 tron NULL, NULL)) 453 1.1 fvdl return 1; 454 1.1 fvdl 455 1.1 fvdl return 0; 456 1.1 fvdl } 457 1.1 fvdl 458 1.1 fvdl int 459 1.1 fvdl agp_generic_enable(struct agp_softc *sc, u_int32_t mode) 460 1.1 fvdl { 461 1.1 fvdl struct pci_attach_args pa; 462 1.1 fvdl pcireg_t tstatus, mstatus; 463 1.77 jmcneill int capoff; 464 1.1 fvdl 465 1.1 fvdl if (pci_find_device(&pa, agpdev_match) == 0 || 466 1.1 fvdl pci_get_capability(pa.pa_pc, pa.pa_tag, PCI_CAP_AGP, 467 1.1 fvdl &capoff, NULL) == 0) { 468 1.59 freza aprint_error_dev(sc->as_dev, "can't find display\n"); 469 1.1 fvdl return ENXIO; 470 1.1 fvdl } 471 1.1 fvdl 472 1.1 fvdl tstatus = pci_conf_read(sc->as_pc, sc->as_tag, 473 1.84 msaitoh sc->as_capoff + PCI_AGP_STATUS); 474 1.1 fvdl mstatus = pci_conf_read(pa.pa_pc, pa.pa_tag, 475 1.84 msaitoh capoff + PCI_AGP_STATUS); 476 1.1 fvdl 477 1.77 jmcneill if (AGP_MODE_GET_MODE_3(mode) && 478 1.77 jmcneill AGP_MODE_GET_MODE_3(tstatus) && 479 1.77 jmcneill AGP_MODE_GET_MODE_3(mstatus)) 480 1.77 jmcneill return agp_generic_enable_v3(sc, &pa, capoff, mode); 481 1.77 jmcneill else 482 1.77 jmcneill return agp_generic_enable_v2(sc, &pa, capoff, mode); 483 1.77 jmcneill } 484 1.77 jmcneill 485 1.77 jmcneill static int 486 1.79 dyoung agp_generic_enable_v2(struct agp_softc *sc, const struct pci_attach_args *pa, 487 1.77 jmcneill int capoff, u_int32_t mode) 488 1.77 jmcneill { 489 1.77 jmcneill pcireg_t tstatus, mstatus; 490 1.77 jmcneill pcireg_t command; 491 1.77 jmcneill int rq, sba, fw, rate; 492 1.77 jmcneill 493 1.77 jmcneill tstatus = pci_conf_read(sc->as_pc, sc->as_tag, 494 1.84 msaitoh sc->as_capoff + PCI_AGP_STATUS); 495 1.77 jmcneill mstatus = pci_conf_read(pa->pa_pc, pa->pa_tag, 496 1.84 msaitoh capoff + PCI_AGP_STATUS); 497 1.77 jmcneill 498 1.1 fvdl /* Set RQ to the min of mode, tstatus and mstatus */ 499 1.1 fvdl rq = AGP_MODE_GET_RQ(mode); 500 1.1 fvdl if (AGP_MODE_GET_RQ(tstatus) < rq) 501 1.1 fvdl rq = AGP_MODE_GET_RQ(tstatus); 502 1.1 fvdl if (AGP_MODE_GET_RQ(mstatus) < rq) 503 1.1 fvdl rq = AGP_MODE_GET_RQ(mstatus); 504 1.1 fvdl 505 1.1 fvdl /* Set SBA if all three can deal with SBA */ 506 1.1 fvdl sba = (AGP_MODE_GET_SBA(tstatus) 507 1.1 fvdl & AGP_MODE_GET_SBA(mstatus) 508 1.1 fvdl & AGP_MODE_GET_SBA(mode)); 509 1.1 fvdl 510 1.1 fvdl /* Similar for FW */ 511 1.1 fvdl fw = (AGP_MODE_GET_FW(tstatus) 512 1.1 fvdl & AGP_MODE_GET_FW(mstatus) 513 1.1 fvdl & AGP_MODE_GET_FW(mode)); 514 1.1 fvdl 515 1.1 fvdl /* Figure out the max rate */ 516 1.1 fvdl rate = (AGP_MODE_GET_RATE(tstatus) 517 1.1 fvdl & AGP_MODE_GET_RATE(mstatus) 518 1.1 fvdl & AGP_MODE_GET_RATE(mode)); 519 1.77 jmcneill if (rate & AGP_MODE_V2_RATE_4x) 520 1.77 jmcneill rate = AGP_MODE_V2_RATE_4x; 521 1.77 jmcneill else if (rate & AGP_MODE_V2_RATE_2x) 522 1.77 jmcneill rate = AGP_MODE_V2_RATE_2x; 523 1.77 jmcneill else 524 1.77 jmcneill rate = AGP_MODE_V2_RATE_1x; 525 1.77 jmcneill 526 1.77 jmcneill /* Construct the new mode word and tell the hardware */ 527 1.77 jmcneill command = AGP_MODE_SET_RQ(0, rq); 528 1.77 jmcneill command = AGP_MODE_SET_SBA(command, sba); 529 1.77 jmcneill command = AGP_MODE_SET_FW(command, fw); 530 1.77 jmcneill command = AGP_MODE_SET_RATE(command, rate); 531 1.77 jmcneill command = AGP_MODE_SET_AGP(command, 1); 532 1.77 jmcneill pci_conf_write(sc->as_pc, sc->as_tag, 533 1.84 msaitoh sc->as_capoff + PCI_AGP_COMMAND, command); 534 1.84 msaitoh pci_conf_write(pa->pa_pc, pa->pa_tag, capoff + PCI_AGP_COMMAND, 535 1.84 msaitoh command); 536 1.77 jmcneill 537 1.77 jmcneill return 0; 538 1.77 jmcneill } 539 1.77 jmcneill 540 1.77 jmcneill static int 541 1.79 dyoung agp_generic_enable_v3(struct agp_softc *sc, const struct pci_attach_args *pa, 542 1.77 jmcneill int capoff, u_int32_t mode) 543 1.77 jmcneill { 544 1.77 jmcneill pcireg_t tstatus, mstatus; 545 1.77 jmcneill pcireg_t command; 546 1.77 jmcneill int rq, sba, fw, rate, arqsz, cal; 547 1.77 jmcneill 548 1.77 jmcneill tstatus = pci_conf_read(sc->as_pc, sc->as_tag, 549 1.84 msaitoh sc->as_capoff + PCI_AGP_STATUS); 550 1.77 jmcneill mstatus = pci_conf_read(pa->pa_pc, pa->pa_tag, 551 1.84 msaitoh capoff + PCI_AGP_STATUS); 552 1.77 jmcneill 553 1.77 jmcneill /* Set RQ to the min of mode, tstatus and mstatus */ 554 1.77 jmcneill rq = AGP_MODE_GET_RQ(mode); 555 1.77 jmcneill if (AGP_MODE_GET_RQ(tstatus) < rq) 556 1.77 jmcneill rq = AGP_MODE_GET_RQ(tstatus); 557 1.77 jmcneill if (AGP_MODE_GET_RQ(mstatus) < rq) 558 1.77 jmcneill rq = AGP_MODE_GET_RQ(mstatus); 559 1.77 jmcneill 560 1.77 jmcneill /* 561 1.77 jmcneill * ARQSZ - Set the value to the maximum one. 562 1.77 jmcneill * Don't allow the mode register to override values. 563 1.77 jmcneill */ 564 1.77 jmcneill arqsz = AGP_MODE_GET_ARQSZ(mode); 565 1.77 jmcneill if (AGP_MODE_GET_ARQSZ(tstatus) > arqsz) 566 1.77 jmcneill arqsz = AGP_MODE_GET_ARQSZ(tstatus); 567 1.77 jmcneill if (AGP_MODE_GET_ARQSZ(mstatus) > arqsz) 568 1.77 jmcneill arqsz = AGP_MODE_GET_ARQSZ(mstatus); 569 1.77 jmcneill 570 1.77 jmcneill /* Calibration cycle - don't allow override by mode register */ 571 1.77 jmcneill cal = AGP_MODE_GET_CAL(tstatus); 572 1.77 jmcneill if (AGP_MODE_GET_CAL(mstatus) < cal) 573 1.77 jmcneill cal = AGP_MODE_GET_CAL(mstatus); 574 1.77 jmcneill 575 1.77 jmcneill /* SBA must be supported for AGP v3. */ 576 1.77 jmcneill sba = 1; 577 1.77 jmcneill 578 1.77 jmcneill /* Set FW if all three support it. */ 579 1.77 jmcneill fw = (AGP_MODE_GET_FW(tstatus) 580 1.77 jmcneill & AGP_MODE_GET_FW(mstatus) 581 1.77 jmcneill & AGP_MODE_GET_FW(mode)); 582 1.77 jmcneill 583 1.77 jmcneill /* Figure out the max rate */ 584 1.77 jmcneill rate = (AGP_MODE_GET_RATE(tstatus) 585 1.77 jmcneill & AGP_MODE_GET_RATE(mstatus) 586 1.77 jmcneill & AGP_MODE_GET_RATE(mode)); 587 1.77 jmcneill if (rate & AGP_MODE_V3_RATE_8x) 588 1.77 jmcneill rate = AGP_MODE_V3_RATE_8x; 589 1.1 fvdl else 590 1.77 jmcneill rate = AGP_MODE_V3_RATE_4x; 591 1.1 fvdl 592 1.1 fvdl /* Construct the new mode word and tell the hardware */ 593 1.1 fvdl command = AGP_MODE_SET_RQ(0, rq); 594 1.77 jmcneill command = AGP_MODE_SET_ARQSZ(command, arqsz); 595 1.77 jmcneill command = AGP_MODE_SET_CAL(command, cal); 596 1.1 fvdl command = AGP_MODE_SET_SBA(command, sba); 597 1.1 fvdl command = AGP_MODE_SET_FW(command, fw); 598 1.1 fvdl command = AGP_MODE_SET_RATE(command, rate); 599 1.1 fvdl command = AGP_MODE_SET_AGP(command, 1); 600 1.1 fvdl pci_conf_write(sc->as_pc, sc->as_tag, 601 1.84 msaitoh sc->as_capoff + PCI_AGP_COMMAND, command); 602 1.84 msaitoh pci_conf_write(pa->pa_pc, pa->pa_tag, capoff + PCI_AGP_COMMAND, 603 1.84 msaitoh command); 604 1.1 fvdl 605 1.1 fvdl return 0; 606 1.1 fvdl } 607 1.1 fvdl 608 1.1 fvdl struct agp_memory * 609 1.1 fvdl agp_generic_alloc_memory(struct agp_softc *sc, int type, vsize_t size) 610 1.1 fvdl { 611 1.1 fvdl struct agp_memory *mem; 612 1.1 fvdl 613 1.1 fvdl if ((size & (AGP_PAGE_SIZE - 1)) != 0) 614 1.1 fvdl return 0; 615 1.1 fvdl 616 1.1 fvdl if (sc->as_allocated + size > sc->as_maxmem) 617 1.1 fvdl return 0; 618 1.1 fvdl 619 1.1 fvdl if (type != 0) { 620 1.1 fvdl printf("agp_generic_alloc_memory: unsupported type %d\n", 621 1.1 fvdl type); 622 1.1 fvdl return 0; 623 1.1 fvdl } 624 1.1 fvdl 625 1.1 fvdl mem = malloc(sizeof *mem, M_AGP, M_WAITOK); 626 1.1 fvdl if (mem == NULL) 627 1.1 fvdl return NULL; 628 1.1 fvdl 629 1.3 drochner if (bus_dmamap_create(sc->as_dmat, size, size / PAGE_SIZE + 1, 630 1.3 drochner size, 0, BUS_DMA_NOWAIT, &mem->am_dmamap) != 0) { 631 1.1 fvdl free(mem, M_AGP); 632 1.1 fvdl return NULL; 633 1.1 fvdl } 634 1.1 fvdl 635 1.1 fvdl mem->am_id = sc->as_nextid++; 636 1.1 fvdl mem->am_size = size; 637 1.1 fvdl mem->am_type = 0; 638 1.1 fvdl mem->am_physical = 0; 639 1.1 fvdl mem->am_offset = 0; 640 1.1 fvdl mem->am_is_bound = 0; 641 1.1 fvdl TAILQ_INSERT_TAIL(&sc->as_memory, mem, am_link); 642 1.1 fvdl sc->as_allocated += size; 643 1.1 fvdl 644 1.1 fvdl return mem; 645 1.1 fvdl } 646 1.1 fvdl 647 1.1 fvdl int 648 1.1 fvdl agp_generic_free_memory(struct agp_softc *sc, struct agp_memory *mem) 649 1.1 fvdl { 650 1.1 fvdl if (mem->am_is_bound) 651 1.1 fvdl return EBUSY; 652 1.1 fvdl 653 1.1 fvdl sc->as_allocated -= mem->am_size; 654 1.1 fvdl TAILQ_REMOVE(&sc->as_memory, mem, am_link); 655 1.1 fvdl bus_dmamap_destroy(sc->as_dmat, mem->am_dmamap); 656 1.1 fvdl free(mem, M_AGP); 657 1.1 fvdl return 0; 658 1.1 fvdl } 659 1.1 fvdl 660 1.1 fvdl int 661 1.1 fvdl agp_generic_bind_memory(struct agp_softc *sc, struct agp_memory *mem, 662 1.82 riastrad off_t offset) 663 1.82 riastrad { 664 1.82 riastrad 665 1.82 riastrad return agp_generic_bind_memory_bounded(sc, mem, offset, 666 1.82 riastrad 0, AGP_GET_APERTURE(sc)); 667 1.82 riastrad } 668 1.82 riastrad 669 1.82 riastrad int 670 1.82 riastrad agp_generic_bind_memory_bounded(struct agp_softc *sc, struct agp_memory *mem, 671 1.82 riastrad off_t offset, off_t start, off_t end) 672 1.1 fvdl { 673 1.1 fvdl off_t i, k; 674 1.1 fvdl bus_size_t done, j; 675 1.1 fvdl int error; 676 1.1 fvdl bus_dma_segment_t *segs, *seg; 677 1.1 fvdl bus_addr_t pa; 678 1.1 fvdl int contigpages, nseg; 679 1.1 fvdl 680 1.46 xtraeme mutex_enter(&sc->as_mtx); 681 1.1 fvdl 682 1.1 fvdl if (mem->am_is_bound) { 683 1.59 freza aprint_error_dev(sc->as_dev, "memory already bound\n"); 684 1.46 xtraeme mutex_exit(&sc->as_mtx); 685 1.1 fvdl return EINVAL; 686 1.1 fvdl } 687 1.34 perry 688 1.82 riastrad if (offset < start 689 1.1 fvdl || (offset & (AGP_PAGE_SIZE - 1)) != 0 690 1.82 riastrad || offset > end 691 1.82 riastrad || mem->am_size > (end - offset)) { 692 1.59 freza aprint_error_dev(sc->as_dev, 693 1.59 freza "binding memory at bad offset %#lx\n", 694 1.56 cegger (unsigned long) offset); 695 1.46 xtraeme mutex_exit(&sc->as_mtx); 696 1.1 fvdl return EINVAL; 697 1.1 fvdl } 698 1.1 fvdl 699 1.1 fvdl /* 700 1.1 fvdl * XXXfvdl 701 1.88 andvar * The memory here needs to be directly accessible from the 702 1.1 fvdl * AGP video card, so it should be allocated using bus_dma. 703 1.1 fvdl * However, it need not be contiguous, since individual pages 704 1.1 fvdl * are translated using the GATT. 705 1.1 fvdl * 706 1.1 fvdl * Using a large chunk of contiguous memory may get in the way 707 1.1 fvdl * of other subsystems that may need one, so we try to be friendly 708 1.1 fvdl * and ask for allocation in chunks of a minimum of 8 pages 709 1.1 fvdl * of contiguous memory on average, falling back to 4, 2 and 1 710 1.1 fvdl * if really needed. Larger chunks are preferred, since allocating 711 1.1 fvdl * a bus_dma_segment per page would be overkill. 712 1.1 fvdl */ 713 1.1 fvdl 714 1.1 fvdl for (contigpages = 8; contigpages > 0; contigpages >>= 1) { 715 1.1 fvdl nseg = (mem->am_size / (contigpages * PAGE_SIZE)) + 1; 716 1.3 drochner segs = malloc(nseg * sizeof *segs, M_AGP, M_WAITOK); 717 1.16 drochner if (segs == NULL) { 718 1.46 xtraeme mutex_exit(&sc->as_mtx); 719 1.10 thorpej return ENOMEM; 720 1.16 drochner } 721 1.1 fvdl if (bus_dmamem_alloc(sc->as_dmat, mem->am_size, PAGE_SIZE, 0, 722 1.4 drochner segs, nseg, &mem->am_nseg, 723 1.15 drochner contigpages > 1 ? 724 1.15 drochner BUS_DMA_NOWAIT : BUS_DMA_WAITOK) != 0) { 725 1.4 drochner free(segs, M_AGP); 726 1.1 fvdl continue; 727 1.4 drochner } 728 1.1 fvdl if (bus_dmamem_map(sc->as_dmat, segs, mem->am_nseg, 729 1.1 fvdl mem->am_size, &mem->am_virtual, BUS_DMA_WAITOK) != 0) { 730 1.1 fvdl bus_dmamem_free(sc->as_dmat, segs, mem->am_nseg); 731 1.4 drochner free(segs, M_AGP); 732 1.1 fvdl continue; 733 1.1 fvdl } 734 1.1 fvdl if (bus_dmamap_load(sc->as_dmat, mem->am_dmamap, 735 1.1 fvdl mem->am_virtual, mem->am_size, NULL, BUS_DMA_WAITOK) != 0) { 736 1.34 perry bus_dmamem_unmap(sc->as_dmat, mem->am_virtual, 737 1.1 fvdl mem->am_size); 738 1.1 fvdl bus_dmamem_free(sc->as_dmat, segs, mem->am_nseg); 739 1.4 drochner free(segs, M_AGP); 740 1.1 fvdl continue; 741 1.1 fvdl } 742 1.1 fvdl mem->am_dmaseg = segs; 743 1.1 fvdl break; 744 1.1 fvdl } 745 1.1 fvdl 746 1.1 fvdl if (contigpages == 0) { 747 1.46 xtraeme mutex_exit(&sc->as_mtx); 748 1.1 fvdl return ENOMEM; 749 1.1 fvdl } 750 1.1 fvdl 751 1.1 fvdl 752 1.1 fvdl /* 753 1.1 fvdl * Bind the individual pages and flush the chipset's 754 1.1 fvdl * TLB. 755 1.1 fvdl */ 756 1.1 fvdl done = 0; 757 1.1 fvdl for (i = 0; i < mem->am_dmamap->dm_nsegs; i++) { 758 1.1 fvdl seg = &mem->am_dmamap->dm_segs[i]; 759 1.1 fvdl /* 760 1.1 fvdl * Install entries in the GATT, making sure that if 761 1.1 fvdl * AGP_PAGE_SIZE < PAGE_SIZE and mem->am_size is not 762 1.34 perry * aligned to PAGE_SIZE, we don't modify too many GATT 763 1.1 fvdl * entries. 764 1.1 fvdl */ 765 1.1 fvdl for (j = 0; j < seg->ds_len && (done + j) < mem->am_size; 766 1.1 fvdl j += AGP_PAGE_SIZE) { 767 1.1 fvdl pa = seg->ds_addr + j; 768 1.40 christos AGP_DPF(("binding offset %#lx to pa %#lx\n", 769 1.3 drochner (unsigned long)(offset + done + j), 770 1.40 christos (unsigned long)pa)); 771 1.1 fvdl error = AGP_BIND_PAGE(sc, offset + done + j, pa); 772 1.1 fvdl if (error) { 773 1.1 fvdl /* 774 1.1 fvdl * Bail out. Reverse all the mappings 775 1.1 fvdl * and unwire the pages. 776 1.1 fvdl */ 777 1.1 fvdl for (k = 0; k < done + j; k += AGP_PAGE_SIZE) 778 1.1 fvdl AGP_UNBIND_PAGE(sc, offset + k); 779 1.1 fvdl 780 1.4 drochner bus_dmamap_unload(sc->as_dmat, mem->am_dmamap); 781 1.4 drochner bus_dmamem_unmap(sc->as_dmat, mem->am_virtual, 782 1.4 drochner mem->am_size); 783 1.4 drochner bus_dmamem_free(sc->as_dmat, mem->am_dmaseg, 784 1.4 drochner mem->am_nseg); 785 1.4 drochner free(mem->am_dmaseg, M_AGP); 786 1.46 xtraeme mutex_exit(&sc->as_mtx); 787 1.1 fvdl return error; 788 1.1 fvdl } 789 1.1 fvdl } 790 1.1 fvdl done += seg->ds_len; 791 1.1 fvdl } 792 1.1 fvdl 793 1.1 fvdl /* 794 1.32 wiz * Flush the CPU cache since we are providing a new mapping 795 1.1 fvdl * for these pages. 796 1.1 fvdl */ 797 1.1 fvdl agp_flush_cache(); 798 1.1 fvdl 799 1.1 fvdl /* 800 1.1 fvdl * Make sure the chipset gets the new mappings. 801 1.1 fvdl */ 802 1.1 fvdl AGP_FLUSH_TLB(sc); 803 1.1 fvdl 804 1.1 fvdl mem->am_offset = offset; 805 1.1 fvdl mem->am_is_bound = 1; 806 1.1 fvdl 807 1.46 xtraeme mutex_exit(&sc->as_mtx); 808 1.1 fvdl 809 1.1 fvdl return 0; 810 1.1 fvdl } 811 1.1 fvdl 812 1.1 fvdl int 813 1.1 fvdl agp_generic_unbind_memory(struct agp_softc *sc, struct agp_memory *mem) 814 1.1 fvdl { 815 1.1 fvdl int i; 816 1.1 fvdl 817 1.46 xtraeme mutex_enter(&sc->as_mtx); 818 1.1 fvdl 819 1.1 fvdl if (!mem->am_is_bound) { 820 1.59 freza aprint_error_dev(sc->as_dev, "memory is not bound\n"); 821 1.46 xtraeme mutex_exit(&sc->as_mtx); 822 1.1 fvdl return EINVAL; 823 1.1 fvdl } 824 1.1 fvdl 825 1.1 fvdl 826 1.1 fvdl /* 827 1.1 fvdl * Unbind the individual pages and flush the chipset's 828 1.1 fvdl * TLB. Unwire the pages so they can be swapped. 829 1.1 fvdl */ 830 1.1 fvdl for (i = 0; i < mem->am_size; i += AGP_PAGE_SIZE) 831 1.1 fvdl AGP_UNBIND_PAGE(sc, mem->am_offset + i); 832 1.34 perry 833 1.1 fvdl agp_flush_cache(); 834 1.1 fvdl AGP_FLUSH_TLB(sc); 835 1.1 fvdl 836 1.1 fvdl bus_dmamap_unload(sc->as_dmat, mem->am_dmamap); 837 1.1 fvdl bus_dmamem_unmap(sc->as_dmat, mem->am_virtual, mem->am_size); 838 1.1 fvdl bus_dmamem_free(sc->as_dmat, mem->am_dmaseg, mem->am_nseg); 839 1.1 fvdl 840 1.1 fvdl free(mem->am_dmaseg, M_AGP); 841 1.1 fvdl 842 1.1 fvdl mem->am_offset = 0; 843 1.1 fvdl mem->am_is_bound = 0; 844 1.1 fvdl 845 1.46 xtraeme mutex_exit(&sc->as_mtx); 846 1.1 fvdl 847 1.1 fvdl return 0; 848 1.1 fvdl } 849 1.1 fvdl 850 1.1 fvdl /* Helper functions for implementing user/kernel api */ 851 1.1 fvdl 852 1.1 fvdl static int 853 1.1 fvdl agp_acquire_helper(struct agp_softc *sc, enum agp_acquire_state state) 854 1.1 fvdl { 855 1.1 fvdl if (sc->as_state != AGP_ACQUIRE_FREE) 856 1.1 fvdl return EBUSY; 857 1.1 fvdl sc->as_state = state; 858 1.1 fvdl 859 1.1 fvdl return 0; 860 1.1 fvdl } 861 1.1 fvdl 862 1.1 fvdl static int 863 1.1 fvdl agp_release_helper(struct agp_softc *sc, enum agp_acquire_state state) 864 1.1 fvdl { 865 1.1 fvdl 866 1.1 fvdl if (sc->as_state == AGP_ACQUIRE_FREE) 867 1.1 fvdl return 0; 868 1.1 fvdl 869 1.1 fvdl if (sc->as_state != state) 870 1.1 fvdl return EBUSY; 871 1.1 fvdl 872 1.1 fvdl sc->as_state = AGP_ACQUIRE_FREE; 873 1.1 fvdl return 0; 874 1.1 fvdl } 875 1.1 fvdl 876 1.1 fvdl static struct agp_memory * 877 1.1 fvdl agp_find_memory(struct agp_softc *sc, int id) 878 1.1 fvdl { 879 1.1 fvdl struct agp_memory *mem; 880 1.1 fvdl 881 1.40 christos AGP_DPF(("searching for memory block %d\n", id)); 882 1.1 fvdl TAILQ_FOREACH(mem, &sc->as_memory, am_link) { 883 1.40 christos AGP_DPF(("considering memory block %d\n", mem->am_id)); 884 1.1 fvdl if (mem->am_id == id) 885 1.1 fvdl return mem; 886 1.1 fvdl } 887 1.1 fvdl return 0; 888 1.1 fvdl } 889 1.1 fvdl 890 1.1 fvdl /* Implementation of the userland ioctl api */ 891 1.1 fvdl 892 1.1 fvdl static int 893 1.1 fvdl agp_info_user(struct agp_softc *sc, agp_info *info) 894 1.1 fvdl { 895 1.1 fvdl memset(info, 0, sizeof *info); 896 1.1 fvdl info->bridge_id = sc->as_id; 897 1.3 drochner if (sc->as_capoff != 0) 898 1.3 drochner info->agp_mode = pci_conf_read(sc->as_pc, sc->as_tag, 899 1.84 msaitoh sc->as_capoff + PCI_AGP_STATUS); 900 1.3 drochner else 901 1.3 drochner info->agp_mode = 0; /* i810 doesn't have real AGP */ 902 1.1 fvdl info->aper_base = sc->as_apaddr; 903 1.1 fvdl info->aper_size = AGP_GET_APERTURE(sc) >> 20; 904 1.1 fvdl info->pg_total = info->pg_system = sc->as_maxmem >> AGP_PAGE_SHIFT; 905 1.1 fvdl info->pg_used = sc->as_allocated >> AGP_PAGE_SHIFT; 906 1.1 fvdl 907 1.1 fvdl return 0; 908 1.1 fvdl } 909 1.1 fvdl 910 1.1 fvdl static int 911 1.1 fvdl agp_setup_user(struct agp_softc *sc, agp_setup *setup) 912 1.1 fvdl { 913 1.1 fvdl return AGP_ENABLE(sc, setup->agp_mode); 914 1.1 fvdl } 915 1.1 fvdl 916 1.1 fvdl static int 917 1.1 fvdl agp_allocate_user(struct agp_softc *sc, agp_allocate *alloc) 918 1.1 fvdl { 919 1.1 fvdl struct agp_memory *mem; 920 1.1 fvdl 921 1.1 fvdl mem = AGP_ALLOC_MEMORY(sc, 922 1.1 fvdl alloc->type, 923 1.1 fvdl alloc->pg_count << AGP_PAGE_SHIFT); 924 1.1 fvdl if (mem) { 925 1.1 fvdl alloc->key = mem->am_id; 926 1.1 fvdl alloc->physical = mem->am_physical; 927 1.1 fvdl return 0; 928 1.1 fvdl } else { 929 1.1 fvdl return ENOMEM; 930 1.1 fvdl } 931 1.1 fvdl } 932 1.1 fvdl 933 1.1 fvdl static int 934 1.1 fvdl agp_deallocate_user(struct agp_softc *sc, int id) 935 1.1 fvdl { 936 1.1 fvdl struct agp_memory *mem = agp_find_memory(sc, id); 937 1.1 fvdl 938 1.1 fvdl if (mem) { 939 1.1 fvdl AGP_FREE_MEMORY(sc, mem); 940 1.1 fvdl return 0; 941 1.1 fvdl } else { 942 1.1 fvdl return ENOENT; 943 1.1 fvdl } 944 1.1 fvdl } 945 1.1 fvdl 946 1.1 fvdl static int 947 1.1 fvdl agp_bind_user(struct agp_softc *sc, agp_bind *bind) 948 1.1 fvdl { 949 1.1 fvdl struct agp_memory *mem = agp_find_memory(sc, bind->key); 950 1.1 fvdl 951 1.1 fvdl if (!mem) 952 1.1 fvdl return ENOENT; 953 1.1 fvdl 954 1.1 fvdl return AGP_BIND_MEMORY(sc, mem, bind->pg_start << AGP_PAGE_SHIFT); 955 1.1 fvdl } 956 1.1 fvdl 957 1.1 fvdl static int 958 1.1 fvdl agp_unbind_user(struct agp_softc *sc, agp_unbind *unbind) 959 1.1 fvdl { 960 1.1 fvdl struct agp_memory *mem = agp_find_memory(sc, unbind->key); 961 1.1 fvdl 962 1.1 fvdl if (!mem) 963 1.1 fvdl return ENOENT; 964 1.1 fvdl 965 1.1 fvdl return AGP_UNBIND_MEMORY(sc, mem); 966 1.1 fvdl } 967 1.1 fvdl 968 1.35 thorpej static int 969 1.59 freza agpopen(dev_t dev, int oflags, int devtype, struct lwp *l) 970 1.1 fvdl { 971 1.58 tsutsui struct agp_softc *sc = device_lookup_private(&agp_cd, AGPUNIT(dev)); 972 1.9 thorpej 973 1.9 thorpej if (sc == NULL) 974 1.9 thorpej return ENXIO; 975 1.1 fvdl 976 1.1 fvdl if (sc->as_chipc == NULL) 977 1.1 fvdl return ENXIO; 978 1.1 fvdl 979 1.1 fvdl if (!sc->as_isopen) 980 1.1 fvdl sc->as_isopen = 1; 981 1.1 fvdl else 982 1.1 fvdl return EBUSY; 983 1.1 fvdl 984 1.1 fvdl return 0; 985 1.1 fvdl } 986 1.1 fvdl 987 1.35 thorpej static int 988 1.59 freza agpclose(dev_t dev, int fflag, int devtype, struct lwp *l) 989 1.1 fvdl { 990 1.58 tsutsui struct agp_softc *sc = device_lookup_private(&agp_cd, AGPUNIT(dev)); 991 1.16 drochner struct agp_memory *mem; 992 1.1 fvdl 993 1.59 freza if (sc == NULL) 994 1.59 freza return ENODEV; 995 1.59 freza 996 1.1 fvdl /* 997 1.1 fvdl * Clear the GATT and force release on last close 998 1.1 fvdl */ 999 1.16 drochner if (sc->as_state == AGP_ACQUIRE_USER) { 1000 1.16 drochner while ((mem = TAILQ_FIRST(&sc->as_memory))) { 1001 1.16 drochner if (mem->am_is_bound) { 1002 1.16 drochner printf("agpclose: mem %d is bound\n", 1003 1.16 drochner mem->am_id); 1004 1.16 drochner AGP_UNBIND_MEMORY(sc, mem); 1005 1.16 drochner } 1006 1.16 drochner /* 1007 1.16 drochner * XXX it is not documented, but if the protocol allows 1008 1.16 drochner * allocate->acquire->bind, it would be possible that 1009 1.16 drochner * memory ranges are allocated by the kernel here, 1010 1.16 drochner * which we shouldn't free. We'd have to keep track of 1011 1.16 drochner * the memory range's owner. 1012 1.16 drochner * The kernel API is unsed yet, so we get away with 1013 1.16 drochner * freeing all. 1014 1.16 drochner */ 1015 1.16 drochner AGP_FREE_MEMORY(sc, mem); 1016 1.16 drochner } 1017 1.1 fvdl agp_release_helper(sc, AGP_ACQUIRE_USER); 1018 1.16 drochner } 1019 1.1 fvdl sc->as_isopen = 0; 1020 1.1 fvdl 1021 1.1 fvdl return 0; 1022 1.1 fvdl } 1023 1.1 fvdl 1024 1.35 thorpej static int 1025 1.45 christos agpioctl(dev_t dev, u_long cmd, void *data, int fflag, struct lwp *l) 1026 1.1 fvdl { 1027 1.58 tsutsui struct agp_softc *sc = device_lookup_private(&agp_cd, AGPUNIT(dev)); 1028 1.1 fvdl 1029 1.1 fvdl if (sc == NULL) 1030 1.1 fvdl return ENODEV; 1031 1.1 fvdl 1032 1.1 fvdl if ((fflag & FWRITE) == 0 && cmd != AGPIOC_INFO) 1033 1.1 fvdl return EPERM; 1034 1.1 fvdl 1035 1.1 fvdl switch (cmd) { 1036 1.1 fvdl case AGPIOC_INFO: 1037 1.1 fvdl return agp_info_user(sc, (agp_info *) data); 1038 1.1 fvdl 1039 1.1 fvdl case AGPIOC_ACQUIRE: 1040 1.1 fvdl return agp_acquire_helper(sc, AGP_ACQUIRE_USER); 1041 1.1 fvdl 1042 1.1 fvdl case AGPIOC_RELEASE: 1043 1.1 fvdl return agp_release_helper(sc, AGP_ACQUIRE_USER); 1044 1.1 fvdl 1045 1.1 fvdl case AGPIOC_SETUP: 1046 1.1 fvdl return agp_setup_user(sc, (agp_setup *)data); 1047 1.1 fvdl 1048 1.75 christos #ifdef __x86_64__ 1049 1.75 christos { 1050 1.75 christos /* 1051 1.75 christos * Handle paddr_t change from 32 bit for non PAE kernels 1052 1.75 christos * to 64 bit. 1053 1.75 christos */ 1054 1.75 christos #define AGPIOC_OALLOCATE _IOWR(AGPIOC_BASE, 6, agp_oallocate) 1055 1.75 christos 1056 1.75 christos typedef struct _agp_oallocate { 1057 1.75 christos int key; /* tag of allocation */ 1058 1.75 christos size_t pg_count; /* number of pages */ 1059 1.75 christos uint32_t type; /* 0 == normal, other devspec */ 1060 1.75 christos u_long physical; /* device specific (some devices 1061 1.75 christos * need a phys address of the 1062 1.75 christos * actual page behind the gatt 1063 1.75 christos * table) */ 1064 1.75 christos } agp_oallocate; 1065 1.75 christos 1066 1.75 christos case AGPIOC_OALLOCATE: { 1067 1.75 christos int ret; 1068 1.75 christos agp_allocate aga; 1069 1.75 christos agp_oallocate *oaga = data; 1070 1.75 christos 1071 1.75 christos aga.type = oaga->type; 1072 1.75 christos aga.pg_count = oaga->pg_count; 1073 1.75 christos 1074 1.75 christos if ((ret = agp_allocate_user(sc, &aga)) == 0) { 1075 1.75 christos oaga->key = aga.key; 1076 1.75 christos oaga->physical = (u_long)aga.physical; 1077 1.75 christos } 1078 1.75 christos 1079 1.75 christos return ret; 1080 1.75 christos } 1081 1.75 christos } 1082 1.75 christos #endif 1083 1.1 fvdl case AGPIOC_ALLOCATE: 1084 1.1 fvdl return agp_allocate_user(sc, (agp_allocate *)data); 1085 1.1 fvdl 1086 1.1 fvdl case AGPIOC_DEALLOCATE: 1087 1.1 fvdl return agp_deallocate_user(sc, *(int *) data); 1088 1.1 fvdl 1089 1.1 fvdl case AGPIOC_BIND: 1090 1.1 fvdl return agp_bind_user(sc, (agp_bind *)data); 1091 1.1 fvdl 1092 1.1 fvdl case AGPIOC_UNBIND: 1093 1.1 fvdl return agp_unbind_user(sc, (agp_unbind *)data); 1094 1.1 fvdl 1095 1.1 fvdl } 1096 1.1 fvdl 1097 1.1 fvdl return EINVAL; 1098 1.1 fvdl } 1099 1.1 fvdl 1100 1.35 thorpej static paddr_t 1101 1.1 fvdl agpmmap(dev_t dev, off_t offset, int prot) 1102 1.1 fvdl { 1103 1.58 tsutsui struct agp_softc *sc = device_lookup_private(&agp_cd, AGPUNIT(dev)); 1104 1.1 fvdl 1105 1.59 freza if (sc == NULL) 1106 1.59 freza return ENODEV; 1107 1.59 freza 1108 1.1 fvdl if (offset > AGP_GET_APERTURE(sc)) 1109 1.1 fvdl return -1; 1110 1.6 thorpej 1111 1.6 thorpej return (bus_space_mmap(sc->as_apt, sc->as_apaddr, offset, prot, 1112 1.6 thorpej BUS_SPACE_MAP_LINEAR)); 1113 1.1 fvdl } 1114 1.1 fvdl 1115 1.35 thorpej const struct cdevsw agp_cdevsw = { 1116 1.81 dholland .d_open = agpopen, 1117 1.81 dholland .d_close = agpclose, 1118 1.81 dholland .d_read = noread, 1119 1.81 dholland .d_write = nowrite, 1120 1.81 dholland .d_ioctl = agpioctl, 1121 1.81 dholland .d_stop = nostop, 1122 1.81 dholland .d_tty = notty, 1123 1.81 dholland .d_poll = nopoll, 1124 1.81 dholland .d_mmap = agpmmap, 1125 1.81 dholland .d_kqfilter = nokqfilter, 1126 1.83 dholland .d_discard = nodiscard, 1127 1.81 dholland .d_flag = D_OTHER 1128 1.35 thorpej }; 1129 1.35 thorpej 1130 1.1 fvdl /* Implementation of the kernel api */ 1131 1.1 fvdl 1132 1.1 fvdl void * 1133 1.1 fvdl agp_find_device(int unit) 1134 1.1 fvdl { 1135 1.59 freza return device_lookup_private(&agp_cd, unit); 1136 1.1 fvdl } 1137 1.1 fvdl 1138 1.1 fvdl enum agp_acquire_state 1139 1.1 fvdl agp_state(void *devcookie) 1140 1.1 fvdl { 1141 1.1 fvdl struct agp_softc *sc = devcookie; 1142 1.59 freza 1143 1.1 fvdl return sc->as_state; 1144 1.1 fvdl } 1145 1.1 fvdl 1146 1.1 fvdl void 1147 1.1 fvdl agp_get_info(void *devcookie, struct agp_info *info) 1148 1.1 fvdl { 1149 1.1 fvdl struct agp_softc *sc = devcookie; 1150 1.1 fvdl 1151 1.1 fvdl info->ai_mode = pci_conf_read(sc->as_pc, sc->as_tag, 1152 1.84 msaitoh sc->as_capoff + PCI_AGP_STATUS); 1153 1.1 fvdl info->ai_aperture_base = sc->as_apaddr; 1154 1.1 fvdl info->ai_aperture_size = sc->as_apsize; /* XXXfvdl inconsistent */ 1155 1.1 fvdl info->ai_memory_allowed = sc->as_maxmem; 1156 1.1 fvdl info->ai_memory_used = sc->as_allocated; 1157 1.85 riastrad info->ai_devid = sc->as_id; 1158 1.1 fvdl } 1159 1.1 fvdl 1160 1.1 fvdl int 1161 1.1 fvdl agp_acquire(void *dev) 1162 1.1 fvdl { 1163 1.1 fvdl return agp_acquire_helper(dev, AGP_ACQUIRE_KERNEL); 1164 1.1 fvdl } 1165 1.1 fvdl 1166 1.1 fvdl int 1167 1.1 fvdl agp_release(void *dev) 1168 1.1 fvdl { 1169 1.1 fvdl return agp_release_helper(dev, AGP_ACQUIRE_KERNEL); 1170 1.1 fvdl } 1171 1.1 fvdl 1172 1.1 fvdl int 1173 1.1 fvdl agp_enable(void *dev, u_int32_t mode) 1174 1.1 fvdl { 1175 1.1 fvdl struct agp_softc *sc = dev; 1176 1.1 fvdl 1177 1.1 fvdl return AGP_ENABLE(sc, mode); 1178 1.1 fvdl } 1179 1.1 fvdl 1180 1.59 freza void * 1181 1.59 freza agp_alloc_memory(void *dev, int type, vsize_t bytes) 1182 1.1 fvdl { 1183 1.1 fvdl struct agp_softc *sc = dev; 1184 1.1 fvdl 1185 1.1 fvdl return (void *)AGP_ALLOC_MEMORY(sc, type, bytes); 1186 1.1 fvdl } 1187 1.1 fvdl 1188 1.59 freza void 1189 1.59 freza agp_free_memory(void *dev, void *handle) 1190 1.1 fvdl { 1191 1.1 fvdl struct agp_softc *sc = dev; 1192 1.59 freza struct agp_memory *mem = handle; 1193 1.59 freza 1194 1.1 fvdl AGP_FREE_MEMORY(sc, mem); 1195 1.1 fvdl } 1196 1.1 fvdl 1197 1.59 freza int 1198 1.59 freza agp_bind_memory(void *dev, void *handle, off_t offset) 1199 1.1 fvdl { 1200 1.1 fvdl struct agp_softc *sc = dev; 1201 1.59 freza struct agp_memory *mem = handle; 1202 1.1 fvdl 1203 1.1 fvdl return AGP_BIND_MEMORY(sc, mem, offset); 1204 1.1 fvdl } 1205 1.1 fvdl 1206 1.59 freza int 1207 1.59 freza agp_unbind_memory(void *dev, void *handle) 1208 1.1 fvdl { 1209 1.1 fvdl struct agp_softc *sc = dev; 1210 1.59 freza struct agp_memory *mem = handle; 1211 1.1 fvdl 1212 1.1 fvdl return AGP_UNBIND_MEMORY(sc, mem); 1213 1.1 fvdl } 1214 1.1 fvdl 1215 1.59 freza void 1216 1.59 freza agp_memory_info(void *dev, void *handle, struct agp_memory_info *mi) 1217 1.1 fvdl { 1218 1.59 freza struct agp_memory *mem = handle; 1219 1.1 fvdl 1220 1.1 fvdl mi->ami_size = mem->am_size; 1221 1.1 fvdl mi->ami_physical = mem->am_physical; 1222 1.1 fvdl mi->ami_offset = mem->am_offset; 1223 1.1 fvdl mi->ami_is_bound = mem->am_is_bound; 1224 1.1 fvdl } 1225 1.1 fvdl 1226 1.1 fvdl int 1227 1.1 fvdl agp_alloc_dmamem(bus_dma_tag_t tag, size_t size, int flags, 1228 1.45 christos bus_dmamap_t *mapp, void **vaddr, bus_addr_t *baddr, 1229 1.1 fvdl bus_dma_segment_t *seg, int nseg, int *rseg) 1230 1.1 fvdl 1231 1.1 fvdl { 1232 1.1 fvdl int error, level = 0; 1233 1.1 fvdl 1234 1.1 fvdl if ((error = bus_dmamem_alloc(tag, size, PAGE_SIZE, 0, 1235 1.1 fvdl seg, nseg, rseg, BUS_DMA_NOWAIT)) != 0) 1236 1.1 fvdl goto out; 1237 1.1 fvdl level++; 1238 1.1 fvdl 1239 1.1 fvdl if ((error = bus_dmamem_map(tag, seg, *rseg, size, vaddr, 1240 1.1 fvdl BUS_DMA_NOWAIT | flags)) != 0) 1241 1.1 fvdl goto out; 1242 1.1 fvdl level++; 1243 1.1 fvdl 1244 1.3 drochner if ((error = bus_dmamap_create(tag, size, *rseg, size, 0, 1245 1.1 fvdl BUS_DMA_NOWAIT, mapp)) != 0) 1246 1.1 fvdl goto out; 1247 1.1 fvdl level++; 1248 1.1 fvdl 1249 1.1 fvdl if ((error = bus_dmamap_load(tag, *mapp, *vaddr, size, NULL, 1250 1.1 fvdl BUS_DMA_NOWAIT)) != 0) 1251 1.1 fvdl goto out; 1252 1.1 fvdl 1253 1.1 fvdl *baddr = (*mapp)->dm_segs[0].ds_addr; 1254 1.1 fvdl 1255 1.1 fvdl return 0; 1256 1.1 fvdl out: 1257 1.1 fvdl switch (level) { 1258 1.1 fvdl case 3: 1259 1.1 fvdl bus_dmamap_destroy(tag, *mapp); 1260 1.1 fvdl /* FALLTHROUGH */ 1261 1.1 fvdl case 2: 1262 1.1 fvdl bus_dmamem_unmap(tag, *vaddr, size); 1263 1.1 fvdl /* FALLTHROUGH */ 1264 1.1 fvdl case 1: 1265 1.1 fvdl bus_dmamem_free(tag, seg, *rseg); 1266 1.1 fvdl break; 1267 1.1 fvdl default: 1268 1.1 fvdl break; 1269 1.1 fvdl } 1270 1.1 fvdl 1271 1.1 fvdl return error; 1272 1.1 fvdl } 1273 1.1 fvdl 1274 1.1 fvdl void 1275 1.1 fvdl agp_free_dmamem(bus_dma_tag_t tag, size_t size, bus_dmamap_t map, 1276 1.45 christos void *vaddr, bus_dma_segment_t *seg, int nseg) 1277 1.1 fvdl { 1278 1.1 fvdl bus_dmamap_unload(tag, map); 1279 1.1 fvdl bus_dmamap_destroy(tag, map); 1280 1.1 fvdl bus_dmamem_unmap(tag, vaddr, size); 1281 1.1 fvdl bus_dmamem_free(tag, seg, nseg); 1282 1.1 fvdl } 1283 1.54 jmcneill 1284 1.54 jmcneill static bool 1285 1.68 dyoung agp_resume(device_t dv, const pmf_qual_t *qual) 1286 1.54 jmcneill { 1287 1.54 jmcneill agp_flush_cache(); 1288 1.54 jmcneill 1289 1.54 jmcneill return true; 1290 1.54 jmcneill } 1291