1 1.7 riastrad /* $NetBSD: vmwgfx_drv.c,v 1.7 2022/10/25 23:35:43 riastradh Exp $ */ 2 1.2 riastrad 3 1.4 riastrad // SPDX-License-Identifier: GPL-2.0 OR MIT 4 1.1 riastrad /************************************************************************** 5 1.1 riastrad * 6 1.4 riastrad * Copyright 2009-2016 VMware, Inc., Palo Alto, CA., USA 7 1.1 riastrad * 8 1.1 riastrad * Permission is hereby granted, free of charge, to any person obtaining a 9 1.1 riastrad * copy of this software and associated documentation files (the 10 1.1 riastrad * "Software"), to deal in the Software without restriction, including 11 1.1 riastrad * without limitation the rights to use, copy, modify, merge, publish, 12 1.1 riastrad * distribute, sub license, and/or sell copies of the Software, and to 13 1.1 riastrad * permit persons to whom the Software is furnished to do so, subject to 14 1.1 riastrad * the following conditions: 15 1.1 riastrad * 16 1.1 riastrad * The above copyright notice and this permission notice (including the 17 1.1 riastrad * next paragraph) shall be included in all copies or substantial portions 18 1.1 riastrad * of the Software. 19 1.1 riastrad * 20 1.1 riastrad * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 21 1.1 riastrad * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 22 1.1 riastrad * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 23 1.1 riastrad * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 24 1.1 riastrad * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 25 1.1 riastrad * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 26 1.1 riastrad * USE OR OTHER DEALINGS IN THE SOFTWARE. 27 1.1 riastrad * 28 1.1 riastrad **************************************************************************/ 29 1.4 riastrad 30 1.2 riastrad #include <sys/cdefs.h> 31 1.7 riastrad __KERNEL_RCSID(0, "$NetBSD: vmwgfx_drv.c,v 1.7 2022/10/25 23:35:43 riastradh Exp $"); 32 1.2 riastrad 33 1.4 riastrad #include <linux/console.h> 34 1.4 riastrad #include <linux/dma-mapping.h> 35 1.1 riastrad #include <linux/module.h> 36 1.4 riastrad #include <linux/pci.h> 37 1.1 riastrad 38 1.4 riastrad #include <drm/drm_drv.h> 39 1.4 riastrad #include <drm/drm_ioctl.h> 40 1.4 riastrad #include <drm/drm_sysfs.h> 41 1.1 riastrad #include <drm/ttm/ttm_bo_driver.h> 42 1.1 riastrad #include <drm/ttm/ttm_module.h> 43 1.4 riastrad #include <drm/ttm/ttm_placement.h> 44 1.4 riastrad 45 1.4 riastrad #include "ttm_object.h" 46 1.4 riastrad #include "vmwgfx_binding.h" 47 1.4 riastrad #include "vmwgfx_drv.h" 48 1.1 riastrad 49 1.7 riastrad #include <linux/nbsd-namespace.h> 50 1.7 riastrad 51 1.1 riastrad #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices" 52 1.1 riastrad #define VMWGFX_CHIP_SVGAII 0 53 1.1 riastrad #define VMW_FB_RESERVATION 0 54 1.1 riastrad 55 1.1 riastrad #define VMW_MIN_INITIAL_WIDTH 800 56 1.1 riastrad #define VMW_MIN_INITIAL_HEIGHT 600 57 1.1 riastrad 58 1.4 riastrad #ifndef VMWGFX_GIT_VERSION 59 1.4 riastrad #define VMWGFX_GIT_VERSION "Unknown" 60 1.4 riastrad #endif 61 1.4 riastrad 62 1.4 riastrad #define VMWGFX_REPO "In Tree" 63 1.4 riastrad 64 1.4 riastrad #define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE) 65 1.4 riastrad 66 1.1 riastrad 67 1.1 riastrad /** 68 1.1 riastrad * Fully encoded drm commands. Might move to vmw_drm.h 69 1.1 riastrad */ 70 1.1 riastrad 71 1.1 riastrad #define DRM_IOCTL_VMW_GET_PARAM \ 72 1.1 riastrad DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM, \ 73 1.1 riastrad struct drm_vmw_getparam_arg) 74 1.1 riastrad #define DRM_IOCTL_VMW_ALLOC_DMABUF \ 75 1.1 riastrad DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF, \ 76 1.1 riastrad union drm_vmw_alloc_dmabuf_arg) 77 1.1 riastrad #define DRM_IOCTL_VMW_UNREF_DMABUF \ 78 1.1 riastrad DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF, \ 79 1.1 riastrad struct drm_vmw_unref_dmabuf_arg) 80 1.1 riastrad #define DRM_IOCTL_VMW_CURSOR_BYPASS \ 81 1.1 riastrad DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS, \ 82 1.1 riastrad struct drm_vmw_cursor_bypass_arg) 83 1.1 riastrad 84 1.1 riastrad #define DRM_IOCTL_VMW_CONTROL_STREAM \ 85 1.1 riastrad DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM, \ 86 1.1 riastrad struct drm_vmw_control_stream_arg) 87 1.1 riastrad #define DRM_IOCTL_VMW_CLAIM_STREAM \ 88 1.1 riastrad DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM, \ 89 1.1 riastrad struct drm_vmw_stream_arg) 90 1.1 riastrad #define DRM_IOCTL_VMW_UNREF_STREAM \ 91 1.1 riastrad DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM, \ 92 1.1 riastrad struct drm_vmw_stream_arg) 93 1.1 riastrad 94 1.1 riastrad #define DRM_IOCTL_VMW_CREATE_CONTEXT \ 95 1.1 riastrad DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT, \ 96 1.1 riastrad struct drm_vmw_context_arg) 97 1.1 riastrad #define DRM_IOCTL_VMW_UNREF_CONTEXT \ 98 1.1 riastrad DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT, \ 99 1.1 riastrad struct drm_vmw_context_arg) 100 1.1 riastrad #define DRM_IOCTL_VMW_CREATE_SURFACE \ 101 1.1 riastrad DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE, \ 102 1.1 riastrad union drm_vmw_surface_create_arg) 103 1.1 riastrad #define DRM_IOCTL_VMW_UNREF_SURFACE \ 104 1.1 riastrad DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE, \ 105 1.1 riastrad struct drm_vmw_surface_arg) 106 1.1 riastrad #define DRM_IOCTL_VMW_REF_SURFACE \ 107 1.1 riastrad DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE, \ 108 1.1 riastrad union drm_vmw_surface_reference_arg) 109 1.1 riastrad #define DRM_IOCTL_VMW_EXECBUF \ 110 1.1 riastrad DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \ 111 1.1 riastrad struct drm_vmw_execbuf_arg) 112 1.1 riastrad #define DRM_IOCTL_VMW_GET_3D_CAP \ 113 1.1 riastrad DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP, \ 114 1.1 riastrad struct drm_vmw_get_3d_cap_arg) 115 1.1 riastrad #define DRM_IOCTL_VMW_FENCE_WAIT \ 116 1.1 riastrad DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \ 117 1.1 riastrad struct drm_vmw_fence_wait_arg) 118 1.1 riastrad #define DRM_IOCTL_VMW_FENCE_SIGNALED \ 119 1.1 riastrad DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED, \ 120 1.1 riastrad struct drm_vmw_fence_signaled_arg) 121 1.1 riastrad #define DRM_IOCTL_VMW_FENCE_UNREF \ 122 1.1 riastrad DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF, \ 123 1.1 riastrad struct drm_vmw_fence_arg) 124 1.1 riastrad #define DRM_IOCTL_VMW_FENCE_EVENT \ 125 1.1 riastrad DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT, \ 126 1.1 riastrad struct drm_vmw_fence_event_arg) 127 1.1 riastrad #define DRM_IOCTL_VMW_PRESENT \ 128 1.1 riastrad DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT, \ 129 1.1 riastrad struct drm_vmw_present_arg) 130 1.1 riastrad #define DRM_IOCTL_VMW_PRESENT_READBACK \ 131 1.1 riastrad DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK, \ 132 1.1 riastrad struct drm_vmw_present_readback_arg) 133 1.1 riastrad #define DRM_IOCTL_VMW_UPDATE_LAYOUT \ 134 1.1 riastrad DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \ 135 1.1 riastrad struct drm_vmw_update_layout_arg) 136 1.2 riastrad #define DRM_IOCTL_VMW_CREATE_SHADER \ 137 1.2 riastrad DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SHADER, \ 138 1.2 riastrad struct drm_vmw_shader_create_arg) 139 1.2 riastrad #define DRM_IOCTL_VMW_UNREF_SHADER \ 140 1.2 riastrad DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SHADER, \ 141 1.2 riastrad struct drm_vmw_shader_arg) 142 1.2 riastrad #define DRM_IOCTL_VMW_GB_SURFACE_CREATE \ 143 1.2 riastrad DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE, \ 144 1.2 riastrad union drm_vmw_gb_surface_create_arg) 145 1.2 riastrad #define DRM_IOCTL_VMW_GB_SURFACE_REF \ 146 1.2 riastrad DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF, \ 147 1.2 riastrad union drm_vmw_gb_surface_reference_arg) 148 1.2 riastrad #define DRM_IOCTL_VMW_SYNCCPU \ 149 1.2 riastrad DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU, \ 150 1.2 riastrad struct drm_vmw_synccpu_arg) 151 1.2 riastrad #define DRM_IOCTL_VMW_CREATE_EXTENDED_CONTEXT \ 152 1.2 riastrad DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_EXTENDED_CONTEXT, \ 153 1.2 riastrad struct drm_vmw_context_arg) 154 1.4 riastrad #define DRM_IOCTL_VMW_GB_SURFACE_CREATE_EXT \ 155 1.4 riastrad DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE_EXT, \ 156 1.4 riastrad union drm_vmw_gb_surface_create_ext_arg) 157 1.4 riastrad #define DRM_IOCTL_VMW_GB_SURFACE_REF_EXT \ 158 1.4 riastrad DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF_EXT, \ 159 1.4 riastrad union drm_vmw_gb_surface_reference_ext_arg) 160 1.4 riastrad #define DRM_IOCTL_VMW_MSG \ 161 1.4 riastrad DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_MSG, \ 162 1.4 riastrad struct drm_vmw_msg_arg) 163 1.1 riastrad 164 1.1 riastrad /** 165 1.1 riastrad * The core DRM version of this macro doesn't account for 166 1.1 riastrad * DRM_COMMAND_BASE. 167 1.1 riastrad */ 168 1.1 riastrad 169 1.1 riastrad #define VMW_IOCTL_DEF(ioctl, func, flags) \ 170 1.2 riastrad [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_IOCTL_##ioctl, flags, func} 171 1.1 riastrad 172 1.1 riastrad /** 173 1.1 riastrad * Ioctl definitions. 174 1.1 riastrad */ 175 1.1 riastrad 176 1.2 riastrad static const struct drm_ioctl_desc vmw_ioctls[] = { 177 1.1 riastrad VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl, 178 1.4 riastrad DRM_RENDER_ALLOW), 179 1.4 riastrad VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_bo_alloc_ioctl, 180 1.4 riastrad DRM_RENDER_ALLOW), 181 1.4 riastrad VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_bo_unref_ioctl, 182 1.2 riastrad DRM_RENDER_ALLOW), 183 1.1 riastrad VMW_IOCTL_DEF(VMW_CURSOR_BYPASS, 184 1.1 riastrad vmw_kms_cursor_bypass_ioctl, 185 1.4 riastrad DRM_MASTER), 186 1.1 riastrad 187 1.1 riastrad VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl, 188 1.4 riastrad DRM_MASTER), 189 1.1 riastrad VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl, 190 1.4 riastrad DRM_MASTER), 191 1.1 riastrad VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl, 192 1.4 riastrad DRM_MASTER), 193 1.1 riastrad 194 1.1 riastrad VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl, 195 1.4 riastrad DRM_RENDER_ALLOW), 196 1.1 riastrad VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl, 197 1.2 riastrad DRM_RENDER_ALLOW), 198 1.1 riastrad VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl, 199 1.4 riastrad DRM_RENDER_ALLOW), 200 1.1 riastrad VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl, 201 1.2 riastrad DRM_RENDER_ALLOW), 202 1.1 riastrad VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl, 203 1.4 riastrad DRM_RENDER_ALLOW), 204 1.4 riastrad VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl, 205 1.2 riastrad DRM_RENDER_ALLOW), 206 1.1 riastrad VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl, 207 1.2 riastrad DRM_RENDER_ALLOW), 208 1.1 riastrad VMW_IOCTL_DEF(VMW_FENCE_SIGNALED, 209 1.1 riastrad vmw_fence_obj_signaled_ioctl, 210 1.2 riastrad DRM_RENDER_ALLOW), 211 1.1 riastrad VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl, 212 1.2 riastrad DRM_RENDER_ALLOW), 213 1.2 riastrad VMW_IOCTL_DEF(VMW_FENCE_EVENT, vmw_fence_event_ioctl, 214 1.4 riastrad DRM_RENDER_ALLOW), 215 1.1 riastrad VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl, 216 1.4 riastrad DRM_RENDER_ALLOW), 217 1.1 riastrad 218 1.1 riastrad /* these allow direct access to the framebuffers mark as master only */ 219 1.1 riastrad VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl, 220 1.2 riastrad DRM_MASTER | DRM_AUTH), 221 1.1 riastrad VMW_IOCTL_DEF(VMW_PRESENT_READBACK, 222 1.1 riastrad vmw_present_readback_ioctl, 223 1.2 riastrad DRM_MASTER | DRM_AUTH), 224 1.4 riastrad /* 225 1.4 riastrad * The permissions of the below ioctl are overridden in 226 1.4 riastrad * vmw_generic_ioctl(). We require either 227 1.4 riastrad * DRM_MASTER or capable(CAP_SYS_ADMIN). 228 1.4 riastrad */ 229 1.1 riastrad VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT, 230 1.1 riastrad vmw_kms_update_layout_ioctl, 231 1.4 riastrad DRM_RENDER_ALLOW), 232 1.2 riastrad VMW_IOCTL_DEF(VMW_CREATE_SHADER, 233 1.2 riastrad vmw_shader_define_ioctl, 234 1.4 riastrad DRM_RENDER_ALLOW), 235 1.2 riastrad VMW_IOCTL_DEF(VMW_UNREF_SHADER, 236 1.2 riastrad vmw_shader_destroy_ioctl, 237 1.2 riastrad DRM_RENDER_ALLOW), 238 1.2 riastrad VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE, 239 1.2 riastrad vmw_gb_surface_define_ioctl, 240 1.4 riastrad DRM_RENDER_ALLOW), 241 1.2 riastrad VMW_IOCTL_DEF(VMW_GB_SURFACE_REF, 242 1.2 riastrad vmw_gb_surface_reference_ioctl, 243 1.4 riastrad DRM_RENDER_ALLOW), 244 1.2 riastrad VMW_IOCTL_DEF(VMW_SYNCCPU, 245 1.4 riastrad vmw_user_bo_synccpu_ioctl, 246 1.2 riastrad DRM_RENDER_ALLOW), 247 1.2 riastrad VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT, 248 1.2 riastrad vmw_extended_context_define_ioctl, 249 1.4 riastrad DRM_RENDER_ALLOW), 250 1.4 riastrad VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE_EXT, 251 1.4 riastrad vmw_gb_surface_define_ext_ioctl, 252 1.4 riastrad DRM_RENDER_ALLOW), 253 1.4 riastrad VMW_IOCTL_DEF(VMW_GB_SURFACE_REF_EXT, 254 1.4 riastrad vmw_gb_surface_reference_ext_ioctl, 255 1.4 riastrad DRM_RENDER_ALLOW), 256 1.4 riastrad VMW_IOCTL_DEF(VMW_MSG, 257 1.4 riastrad vmw_msg_ioctl, 258 1.4 riastrad DRM_RENDER_ALLOW), 259 1.1 riastrad }; 260 1.1 riastrad 261 1.4 riastrad static const struct pci_device_id vmw_pci_id_list[] = { 262 1.1 riastrad {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII}, 263 1.1 riastrad {0, 0, 0} 264 1.1 riastrad }; 265 1.1 riastrad MODULE_DEVICE_TABLE(pci, vmw_pci_id_list); 266 1.1 riastrad 267 1.1 riastrad static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON); 268 1.2 riastrad static int vmw_force_iommu; 269 1.2 riastrad static int vmw_restrict_iommu; 270 1.2 riastrad static int vmw_force_coherent; 271 1.2 riastrad static int vmw_restrict_dma_mask; 272 1.2 riastrad static int vmw_assume_16bpp; 273 1.1 riastrad 274 1.7 riastrad #ifndef __NetBSD__ 275 1.1 riastrad static int vmw_probe(struct pci_dev *, const struct pci_device_id *); 276 1.7 riastrad #endif 277 1.1 riastrad static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, 278 1.1 riastrad void *ptr); 279 1.1 riastrad 280 1.1 riastrad MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev"); 281 1.1 riastrad module_param_named(enable_fbdev, enable_fbdev, int, 0600); 282 1.2 riastrad MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages"); 283 1.2 riastrad module_param_named(force_dma_api, vmw_force_iommu, int, 0600); 284 1.2 riastrad MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages"); 285 1.2 riastrad module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600); 286 1.2 riastrad MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages"); 287 1.2 riastrad module_param_named(force_coherent, vmw_force_coherent, int, 0600); 288 1.2 riastrad MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU"); 289 1.2 riastrad module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600); 290 1.2 riastrad MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes"); 291 1.2 riastrad module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600); 292 1.2 riastrad 293 1.1 riastrad 294 1.4 riastrad static void vmw_print_capabilities2(uint32_t capabilities2) 295 1.4 riastrad { 296 1.4 riastrad DRM_INFO("Capabilities2:\n"); 297 1.4 riastrad if (capabilities2 & SVGA_CAP2_GROW_OTABLE) 298 1.4 riastrad DRM_INFO(" Grow oTable.\n"); 299 1.4 riastrad if (capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY) 300 1.4 riastrad DRM_INFO(" IntraSurface copy.\n"); 301 1.4 riastrad } 302 1.4 riastrad 303 1.1 riastrad static void vmw_print_capabilities(uint32_t capabilities) 304 1.1 riastrad { 305 1.1 riastrad DRM_INFO("Capabilities:\n"); 306 1.1 riastrad if (capabilities & SVGA_CAP_RECT_COPY) 307 1.1 riastrad DRM_INFO(" Rect copy.\n"); 308 1.1 riastrad if (capabilities & SVGA_CAP_CURSOR) 309 1.1 riastrad DRM_INFO(" Cursor.\n"); 310 1.1 riastrad if (capabilities & SVGA_CAP_CURSOR_BYPASS) 311 1.1 riastrad DRM_INFO(" Cursor bypass.\n"); 312 1.1 riastrad if (capabilities & SVGA_CAP_CURSOR_BYPASS_2) 313 1.1 riastrad DRM_INFO(" Cursor bypass 2.\n"); 314 1.1 riastrad if (capabilities & SVGA_CAP_8BIT_EMULATION) 315 1.1 riastrad DRM_INFO(" 8bit emulation.\n"); 316 1.1 riastrad if (capabilities & SVGA_CAP_ALPHA_CURSOR) 317 1.1 riastrad DRM_INFO(" Alpha cursor.\n"); 318 1.1 riastrad if (capabilities & SVGA_CAP_3D) 319 1.1 riastrad DRM_INFO(" 3D.\n"); 320 1.1 riastrad if (capabilities & SVGA_CAP_EXTENDED_FIFO) 321 1.1 riastrad DRM_INFO(" Extended Fifo.\n"); 322 1.1 riastrad if (capabilities & SVGA_CAP_MULTIMON) 323 1.1 riastrad DRM_INFO(" Multimon.\n"); 324 1.1 riastrad if (capabilities & SVGA_CAP_PITCHLOCK) 325 1.1 riastrad DRM_INFO(" Pitchlock.\n"); 326 1.1 riastrad if (capabilities & SVGA_CAP_IRQMASK) 327 1.1 riastrad DRM_INFO(" Irq mask.\n"); 328 1.1 riastrad if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) 329 1.1 riastrad DRM_INFO(" Display Topology.\n"); 330 1.1 riastrad if (capabilities & SVGA_CAP_GMR) 331 1.1 riastrad DRM_INFO(" GMR.\n"); 332 1.1 riastrad if (capabilities & SVGA_CAP_TRACES) 333 1.1 riastrad DRM_INFO(" Traces.\n"); 334 1.1 riastrad if (capabilities & SVGA_CAP_GMR2) 335 1.1 riastrad DRM_INFO(" GMR2.\n"); 336 1.1 riastrad if (capabilities & SVGA_CAP_SCREEN_OBJECT_2) 337 1.1 riastrad DRM_INFO(" Screen Object 2.\n"); 338 1.2 riastrad if (capabilities & SVGA_CAP_COMMAND_BUFFERS) 339 1.2 riastrad DRM_INFO(" Command Buffers.\n"); 340 1.2 riastrad if (capabilities & SVGA_CAP_CMD_BUFFERS_2) 341 1.2 riastrad DRM_INFO(" Command Buffers 2.\n"); 342 1.2 riastrad if (capabilities & SVGA_CAP_GBOBJECTS) 343 1.2 riastrad DRM_INFO(" Guest Backed Resources.\n"); 344 1.2 riastrad if (capabilities & SVGA_CAP_DX) 345 1.2 riastrad DRM_INFO(" DX Features.\n"); 346 1.4 riastrad if (capabilities & SVGA_CAP_HP_CMD_QUEUE) 347 1.4 riastrad DRM_INFO(" HP Command Queue.\n"); 348 1.1 riastrad } 349 1.1 riastrad 350 1.1 riastrad /** 351 1.2 riastrad * vmw_dummy_query_bo_create - create a bo to hold a dummy query result 352 1.1 riastrad * 353 1.2 riastrad * @dev_priv: A device private structure. 354 1.1 riastrad * 355 1.2 riastrad * This function creates a small buffer object that holds the query 356 1.2 riastrad * result for dummy queries emitted as query barriers. 357 1.2 riastrad * The function will then map the first page and initialize a pending 358 1.2 riastrad * occlusion query result structure, Finally it will unmap the buffer. 359 1.2 riastrad * No interruptible waits are done within this function. 360 1.1 riastrad * 361 1.2 riastrad * Returns an error if bo creation or initialization fails. 362 1.1 riastrad */ 363 1.2 riastrad static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv) 364 1.1 riastrad { 365 1.2 riastrad int ret; 366 1.4 riastrad struct vmw_buffer_object *vbo; 367 1.1 riastrad struct ttm_bo_kmap_obj map; 368 1.1 riastrad volatile SVGA3dQueryResult *result; 369 1.1 riastrad bool dummy; 370 1.1 riastrad 371 1.2 riastrad /* 372 1.2 riastrad * Create the vbo as pinned, so that a tryreserve will 373 1.2 riastrad * immediately succeed. This is because we're the only 374 1.2 riastrad * user of the bo currently. 375 1.2 riastrad */ 376 1.2 riastrad vbo = kzalloc(sizeof(*vbo), GFP_KERNEL); 377 1.2 riastrad if (!vbo) 378 1.2 riastrad return -ENOMEM; 379 1.2 riastrad 380 1.4 riastrad ret = vmw_bo_init(dev_priv, vbo, PAGE_SIZE, 381 1.4 riastrad &vmw_sys_ne_placement, false, 382 1.4 riastrad &vmw_bo_bo_free); 383 1.1 riastrad if (unlikely(ret != 0)) 384 1.2 riastrad return ret; 385 1.2 riastrad 386 1.4 riastrad ret = ttm_bo_reserve(&vbo->base, false, true, NULL); 387 1.2 riastrad BUG_ON(ret != 0); 388 1.2 riastrad vmw_bo_pin_reserved(vbo, true); 389 1.1 riastrad 390 1.2 riastrad ret = ttm_bo_kmap(&vbo->base, 0, 1, &map); 391 1.1 riastrad if (likely(ret == 0)) { 392 1.1 riastrad result = ttm_kmap_obj_virtual(&map, &dummy); 393 1.1 riastrad result->totalSize = sizeof(*result); 394 1.1 riastrad result->state = SVGA3D_QUERYSTATE_PENDING; 395 1.1 riastrad result->result32 = 0xff; 396 1.1 riastrad ttm_bo_kunmap(&map); 397 1.2 riastrad } 398 1.2 riastrad vmw_bo_pin_reserved(vbo, false); 399 1.2 riastrad ttm_bo_unreserve(&vbo->base); 400 1.2 riastrad 401 1.2 riastrad if (unlikely(ret != 0)) { 402 1.2 riastrad DRM_ERROR("Dummy query buffer map failed.\n"); 403 1.4 riastrad vmw_bo_unreference(&vbo); 404 1.1 riastrad } else 405 1.2 riastrad dev_priv->dummy_query_bo = vbo; 406 1.2 riastrad 407 1.2 riastrad return ret; 408 1.1 riastrad } 409 1.1 riastrad 410 1.1 riastrad /** 411 1.2 riastrad * vmw_request_device_late - Perform late device setup 412 1.1 riastrad * 413 1.2 riastrad * @dev_priv: Pointer to device private. 414 1.1 riastrad * 415 1.2 riastrad * This function performs setup of otables and enables large command 416 1.2 riastrad * buffer submission. These tasks are split out to a separate function 417 1.2 riastrad * because it reverts vmw_release_device_early and is intended to be used 418 1.2 riastrad * by an error path in the hibernation code. 419 1.1 riastrad */ 420 1.2 riastrad static int vmw_request_device_late(struct vmw_private *dev_priv) 421 1.1 riastrad { 422 1.2 riastrad int ret; 423 1.2 riastrad 424 1.2 riastrad if (dev_priv->has_mob) { 425 1.2 riastrad ret = vmw_otables_setup(dev_priv); 426 1.2 riastrad if (unlikely(ret != 0)) { 427 1.2 riastrad DRM_ERROR("Unable to initialize " 428 1.2 riastrad "guest Memory OBjects.\n"); 429 1.2 riastrad return ret; 430 1.2 riastrad } 431 1.2 riastrad } 432 1.2 riastrad 433 1.2 riastrad if (dev_priv->cman) { 434 1.2 riastrad ret = vmw_cmdbuf_set_pool_size(dev_priv->cman, 435 1.2 riastrad 256*4096, 2*4096); 436 1.2 riastrad if (ret) { 437 1.2 riastrad struct vmw_cmdbuf_man *man = dev_priv->cman; 438 1.2 riastrad 439 1.2 riastrad dev_priv->cman = NULL; 440 1.2 riastrad vmw_cmdbuf_man_destroy(man); 441 1.2 riastrad } 442 1.2 riastrad } 443 1.2 riastrad 444 1.2 riastrad return 0; 445 1.1 riastrad } 446 1.1 riastrad 447 1.1 riastrad static int vmw_request_device(struct vmw_private *dev_priv) 448 1.1 riastrad { 449 1.1 riastrad int ret; 450 1.1 riastrad 451 1.1 riastrad ret = vmw_fifo_init(dev_priv, &dev_priv->fifo); 452 1.1 riastrad if (unlikely(ret != 0)) { 453 1.1 riastrad DRM_ERROR("Unable to initialize FIFO.\n"); 454 1.1 riastrad return ret; 455 1.1 riastrad } 456 1.1 riastrad vmw_fence_fifo_up(dev_priv->fman); 457 1.2 riastrad dev_priv->cman = vmw_cmdbuf_man_create(dev_priv); 458 1.2 riastrad if (IS_ERR(dev_priv->cman)) { 459 1.2 riastrad dev_priv->cman = NULL; 460 1.2 riastrad dev_priv->has_dx = false; 461 1.2 riastrad } 462 1.2 riastrad 463 1.2 riastrad ret = vmw_request_device_late(dev_priv); 464 1.2 riastrad if (ret) 465 1.2 riastrad goto out_no_mob; 466 1.2 riastrad 467 1.1 riastrad ret = vmw_dummy_query_bo_create(dev_priv); 468 1.1 riastrad if (unlikely(ret != 0)) 469 1.1 riastrad goto out_no_query_bo; 470 1.1 riastrad 471 1.1 riastrad return 0; 472 1.1 riastrad 473 1.1 riastrad out_no_query_bo: 474 1.2 riastrad if (dev_priv->cman) 475 1.2 riastrad vmw_cmdbuf_remove_pool(dev_priv->cman); 476 1.2 riastrad if (dev_priv->has_mob) { 477 1.2 riastrad (void) ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB); 478 1.2 riastrad vmw_otables_takedown(dev_priv); 479 1.2 riastrad } 480 1.2 riastrad if (dev_priv->cman) 481 1.2 riastrad vmw_cmdbuf_man_destroy(dev_priv->cman); 482 1.2 riastrad out_no_mob: 483 1.1 riastrad vmw_fence_fifo_down(dev_priv->fman); 484 1.1 riastrad vmw_fifo_release(dev_priv, &dev_priv->fifo); 485 1.1 riastrad return ret; 486 1.1 riastrad } 487 1.1 riastrad 488 1.2 riastrad /** 489 1.2 riastrad * vmw_release_device_early - Early part of fifo takedown. 490 1.2 riastrad * 491 1.2 riastrad * @dev_priv: Pointer to device private struct. 492 1.2 riastrad * 493 1.2 riastrad * This is the first part of command submission takedown, to be called before 494 1.2 riastrad * buffer management is taken down. 495 1.2 riastrad */ 496 1.2 riastrad static void vmw_release_device_early(struct vmw_private *dev_priv) 497 1.1 riastrad { 498 1.1 riastrad /* 499 1.1 riastrad * Previous destructions should've released 500 1.1 riastrad * the pinned bo. 501 1.1 riastrad */ 502 1.1 riastrad 503 1.1 riastrad BUG_ON(dev_priv->pinned_bo != NULL); 504 1.1 riastrad 505 1.4 riastrad vmw_bo_unreference(&dev_priv->dummy_query_bo); 506 1.2 riastrad if (dev_priv->cman) 507 1.2 riastrad vmw_cmdbuf_remove_pool(dev_priv->cman); 508 1.2 riastrad 509 1.2 riastrad if (dev_priv->has_mob) { 510 1.2 riastrad ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB); 511 1.2 riastrad vmw_otables_takedown(dev_priv); 512 1.1 riastrad } 513 1.1 riastrad } 514 1.1 riastrad 515 1.1 riastrad /** 516 1.2 riastrad * vmw_release_device_late - Late part of fifo takedown. 517 1.2 riastrad * 518 1.2 riastrad * @dev_priv: Pointer to device private struct. 519 1.2 riastrad * 520 1.2 riastrad * This is the last part of the command submission takedown, to be called when 521 1.2 riastrad * command submission is no longer needed. It may wait on pending fences. 522 1.1 riastrad */ 523 1.2 riastrad static void vmw_release_device_late(struct vmw_private *dev_priv) 524 1.1 riastrad { 525 1.2 riastrad vmw_fence_fifo_down(dev_priv->fman); 526 1.2 riastrad if (dev_priv->cman) 527 1.2 riastrad vmw_cmdbuf_man_destroy(dev_priv->cman); 528 1.1 riastrad 529 1.2 riastrad vmw_fifo_release(dev_priv, &dev_priv->fifo); 530 1.1 riastrad } 531 1.1 riastrad 532 1.1 riastrad /** 533 1.1 riastrad * Sets the initial_[width|height] fields on the given vmw_private. 534 1.1 riastrad * 535 1.1 riastrad * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then 536 1.1 riastrad * clamping the value to fb_max_[width|height] fields and the 537 1.1 riastrad * VMW_MIN_INITIAL_[WIDTH|HEIGHT]. 538 1.1 riastrad * If the values appear to be invalid, set them to 539 1.1 riastrad * VMW_MIN_INITIAL_[WIDTH|HEIGHT]. 540 1.1 riastrad */ 541 1.1 riastrad static void vmw_get_initial_size(struct vmw_private *dev_priv) 542 1.1 riastrad { 543 1.1 riastrad uint32_t width; 544 1.1 riastrad uint32_t height; 545 1.1 riastrad 546 1.1 riastrad width = vmw_read(dev_priv, SVGA_REG_WIDTH); 547 1.1 riastrad height = vmw_read(dev_priv, SVGA_REG_HEIGHT); 548 1.1 riastrad 549 1.1 riastrad width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH); 550 1.1 riastrad height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT); 551 1.1 riastrad 552 1.1 riastrad if (width > dev_priv->fb_max_width || 553 1.1 riastrad height > dev_priv->fb_max_height) { 554 1.1 riastrad 555 1.1 riastrad /* 556 1.1 riastrad * This is a host error and shouldn't occur. 557 1.1 riastrad */ 558 1.1 riastrad 559 1.1 riastrad width = VMW_MIN_INITIAL_WIDTH; 560 1.1 riastrad height = VMW_MIN_INITIAL_HEIGHT; 561 1.1 riastrad } 562 1.1 riastrad 563 1.1 riastrad dev_priv->initial_width = width; 564 1.1 riastrad dev_priv->initial_height = height; 565 1.1 riastrad } 566 1.1 riastrad 567 1.2 riastrad /** 568 1.2 riastrad * vmw_dma_select_mode - Determine how DMA mappings should be set up for this 569 1.2 riastrad * system. 570 1.2 riastrad * 571 1.2 riastrad * @dev_priv: Pointer to a struct vmw_private 572 1.2 riastrad * 573 1.4 riastrad * This functions tries to determine what actions need to be taken by the 574 1.4 riastrad * driver to make system pages visible to the device. 575 1.2 riastrad * If this function decides that DMA is not possible, it returns -EINVAL. 576 1.2 riastrad * The driver may then try to disable features of the device that require 577 1.2 riastrad * DMA. 578 1.2 riastrad */ 579 1.2 riastrad static int vmw_dma_select_mode(struct vmw_private *dev_priv) 580 1.2 riastrad { 581 1.2 riastrad static const char *names[vmw_dma_map_max] = { 582 1.2 riastrad [vmw_dma_phys] = "Using physical TTM page addresses.", 583 1.2 riastrad [vmw_dma_alloc_coherent] = "Using coherent TTM pages.", 584 1.4 riastrad [vmw_dma_map_populate] = "Caching DMA mappings.", 585 1.2 riastrad [vmw_dma_map_bind] = "Giving up DMA mappings early."}; 586 1.2 riastrad 587 1.4 riastrad if (vmw_force_coherent) 588 1.2 riastrad dev_priv->map_mode = vmw_dma_alloc_coherent; 589 1.4 riastrad else if (vmw_restrict_iommu) 590 1.4 riastrad dev_priv->map_mode = vmw_dma_map_bind; 591 1.4 riastrad else 592 1.2 riastrad dev_priv->map_mode = vmw_dma_map_populate; 593 1.2 riastrad 594 1.4 riastrad if (!IS_ENABLED(CONFIG_DRM_TTM_DMA_PAGE_POOL) && 595 1.4 riastrad (dev_priv->map_mode == vmw_dma_alloc_coherent)) 596 1.2 riastrad return -EINVAL; 597 1.2 riastrad 598 1.2 riastrad DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]); 599 1.2 riastrad return 0; 600 1.2 riastrad } 601 1.2 riastrad 602 1.2 riastrad /** 603 1.2 riastrad * vmw_dma_masks - set required page- and dma masks 604 1.2 riastrad * 605 1.2 riastrad * @dev: Pointer to struct drm-device 606 1.2 riastrad * 607 1.2 riastrad * With 32-bit we can only handle 32 bit PFNs. Optionally set that 608 1.2 riastrad * restriction also for 64-bit systems. 609 1.2 riastrad */ 610 1.2 riastrad static int vmw_dma_masks(struct vmw_private *dev_priv) 611 1.2 riastrad { 612 1.2 riastrad struct drm_device *dev = dev_priv->dev; 613 1.4 riastrad int ret = 0; 614 1.2 riastrad 615 1.7 riastrad #ifdef __NetBSD__ 616 1.7 riastrad ret = drm_limit_dma_space(dev, 0, __BITS(63,0)); 617 1.7 riastrad #else 618 1.4 riastrad ret = dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)); 619 1.7 riastrad #endif 620 1.4 riastrad if (dev_priv->map_mode != vmw_dma_phys && 621 1.2 riastrad (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) { 622 1.2 riastrad DRM_INFO("Restricting DMA addresses to 44 bits.\n"); 623 1.7 riastrad #ifdef __NetBSD__ 624 1.7 riastrad return drm_limit_dma_space(dev, 0, __BITS(43,0)); 625 1.7 riastrad #else 626 1.4 riastrad return dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(44)); 627 1.7 riastrad #endif 628 1.2 riastrad } 629 1.4 riastrad 630 1.4 riastrad return ret; 631 1.2 riastrad } 632 1.2 riastrad 633 1.1 riastrad static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) 634 1.1 riastrad { 635 1.1 riastrad struct vmw_private *dev_priv; 636 1.1 riastrad int ret; 637 1.1 riastrad uint32_t svga_id; 638 1.1 riastrad enum vmw_res_type i; 639 1.2 riastrad bool refuse_dma = false; 640 1.4 riastrad char host_log[100] = {0}; 641 1.1 riastrad 642 1.1 riastrad dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); 643 1.4 riastrad if (unlikely(!dev_priv)) { 644 1.1 riastrad DRM_ERROR("Failed allocating a device private struct.\n"); 645 1.1 riastrad return -ENOMEM; 646 1.1 riastrad } 647 1.1 riastrad 648 1.1 riastrad pci_set_master(dev->pdev); 649 1.1 riastrad 650 1.1 riastrad dev_priv->dev = dev; 651 1.1 riastrad dev_priv->vmw_chipset = chipset; 652 1.1 riastrad dev_priv->last_read_seqno = (uint32_t) -100; 653 1.1 riastrad mutex_init(&dev_priv->cmdbuf_mutex); 654 1.1 riastrad mutex_init(&dev_priv->release_mutex); 655 1.2 riastrad mutex_init(&dev_priv->binding_mutex); 656 1.4 riastrad mutex_init(&dev_priv->global_kms_state_mutex); 657 1.2 riastrad ttm_lock_init(&dev_priv->reservation_sem); 658 1.4 riastrad spin_lock_init(&dev_priv->resource_lock); 659 1.2 riastrad spin_lock_init(&dev_priv->hw_lock); 660 1.2 riastrad spin_lock_init(&dev_priv->waiter_lock); 661 1.2 riastrad spin_lock_init(&dev_priv->cap_lock); 662 1.2 riastrad spin_lock_init(&dev_priv->svga_lock); 663 1.4 riastrad spin_lock_init(&dev_priv->cursor_lock); 664 1.1 riastrad 665 1.1 riastrad for (i = vmw_res_context; i < vmw_res_max; ++i) { 666 1.1 riastrad idr_init(&dev_priv->res_idr[i]); 667 1.1 riastrad INIT_LIST_HEAD(&dev_priv->res_lru[i]); 668 1.1 riastrad } 669 1.1 riastrad 670 1.6 riastrad DRM_INIT_WAITQUEUE(&dev_priv->fence_queue, "vmwgfence"); 671 1.6 riastrad spin_lock_init(&dev_priv->fence_lock); 672 1.6 riastrad DRM_INIT_WAITQUEUE(&dev_priv->fifo_queue, "vmwgfifo"); 673 1.6 riastrad spin_lock_init(&dev_priv->fifo_lock); 674 1.1 riastrad dev_priv->fence_queue_waiters = 0; 675 1.2 riastrad dev_priv->fifo_queue_waiters = 0; 676 1.1 riastrad 677 1.1 riastrad dev_priv->used_memory_size = 0; 678 1.1 riastrad 679 1.1 riastrad dev_priv->io_start = pci_resource_start(dev->pdev, 0); 680 1.1 riastrad dev_priv->vram_start = pci_resource_start(dev->pdev, 1); 681 1.1 riastrad dev_priv->mmio_start = pci_resource_start(dev->pdev, 2); 682 1.1 riastrad 683 1.2 riastrad dev_priv->assume_16bpp = !!vmw_assume_16bpp; 684 1.2 riastrad 685 1.1 riastrad dev_priv->enable_fb = enable_fbdev; 686 1.1 riastrad 687 1.1 riastrad vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); 688 1.1 riastrad svga_id = vmw_read(dev_priv, SVGA_REG_ID); 689 1.1 riastrad if (svga_id != SVGA_ID_2) { 690 1.1 riastrad ret = -ENOSYS; 691 1.1 riastrad DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id); 692 1.1 riastrad goto out_err0; 693 1.1 riastrad } 694 1.1 riastrad 695 1.1 riastrad dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES); 696 1.4 riastrad 697 1.4 riastrad if (dev_priv->capabilities & SVGA_CAP_CAP2_REGISTER) { 698 1.4 riastrad dev_priv->capabilities2 = vmw_read(dev_priv, SVGA_REG_CAP2); 699 1.4 riastrad } 700 1.4 riastrad 701 1.4 riastrad 702 1.2 riastrad ret = vmw_dma_select_mode(dev_priv); 703 1.2 riastrad if (unlikely(ret != 0)) { 704 1.2 riastrad DRM_INFO("Restricting capabilities due to IOMMU setup.\n"); 705 1.2 riastrad refuse_dma = true; 706 1.2 riastrad } 707 1.1 riastrad 708 1.1 riastrad dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE); 709 1.1 riastrad dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE); 710 1.1 riastrad dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH); 711 1.1 riastrad dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT); 712 1.1 riastrad 713 1.1 riastrad vmw_get_initial_size(dev_priv); 714 1.1 riastrad 715 1.2 riastrad if (dev_priv->capabilities & SVGA_CAP_GMR2) { 716 1.1 riastrad dev_priv->max_gmr_ids = 717 1.1 riastrad vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS); 718 1.1 riastrad dev_priv->max_gmr_pages = 719 1.1 riastrad vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES); 720 1.1 riastrad dev_priv->memory_size = 721 1.1 riastrad vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE); 722 1.1 riastrad dev_priv->memory_size -= dev_priv->vram_size; 723 1.1 riastrad } else { 724 1.1 riastrad /* 725 1.1 riastrad * An arbitrary limit of 512MiB on surface 726 1.1 riastrad * memory. But all HWV8 hardware supports GMR2. 727 1.1 riastrad */ 728 1.1 riastrad dev_priv->memory_size = 512*1024*1024; 729 1.1 riastrad } 730 1.2 riastrad dev_priv->max_mob_pages = 0; 731 1.2 riastrad dev_priv->max_mob_size = 0; 732 1.2 riastrad if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { 733 1.2 riastrad uint64_t mem_size = 734 1.2 riastrad vmw_read(dev_priv, 735 1.2 riastrad SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB); 736 1.1 riastrad 737 1.2 riastrad /* 738 1.2 riastrad * Workaround for low memory 2D VMs to compensate for the 739 1.2 riastrad * allocation taken by fbdev 740 1.2 riastrad */ 741 1.2 riastrad if (!(dev_priv->capabilities & SVGA_CAP_3D)) 742 1.2 riastrad mem_size *= 3; 743 1.2 riastrad 744 1.2 riastrad dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE; 745 1.2 riastrad dev_priv->prim_bb_mem = 746 1.2 riastrad vmw_read(dev_priv, 747 1.2 riastrad SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM); 748 1.2 riastrad dev_priv->max_mob_size = 749 1.2 riastrad vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE); 750 1.2 riastrad dev_priv->stdu_max_width = 751 1.2 riastrad vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_WIDTH); 752 1.2 riastrad dev_priv->stdu_max_height = 753 1.2 riastrad vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_HEIGHT); 754 1.2 riastrad 755 1.2 riastrad vmw_write(dev_priv, SVGA_REG_DEV_CAP, 756 1.2 riastrad SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH); 757 1.2 riastrad dev_priv->texture_max_width = vmw_read(dev_priv, 758 1.2 riastrad SVGA_REG_DEV_CAP); 759 1.2 riastrad vmw_write(dev_priv, SVGA_REG_DEV_CAP, 760 1.2 riastrad SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT); 761 1.2 riastrad dev_priv->texture_max_height = vmw_read(dev_priv, 762 1.2 riastrad SVGA_REG_DEV_CAP); 763 1.2 riastrad } else { 764 1.2 riastrad dev_priv->texture_max_width = 8192; 765 1.2 riastrad dev_priv->texture_max_height = 8192; 766 1.2 riastrad dev_priv->prim_bb_mem = dev_priv->vram_size; 767 1.2 riastrad } 768 1.1 riastrad 769 1.1 riastrad vmw_print_capabilities(dev_priv->capabilities); 770 1.4 riastrad if (dev_priv->capabilities & SVGA_CAP_CAP2_REGISTER) 771 1.4 riastrad vmw_print_capabilities2(dev_priv->capabilities2); 772 1.1 riastrad 773 1.2 riastrad ret = vmw_dma_masks(dev_priv); 774 1.2 riastrad if (unlikely(ret != 0)) 775 1.2 riastrad goto out_err0; 776 1.2 riastrad 777 1.7 riastrad #ifndef __NetBSD__ /* XXX set bus_dma maxsegsz? */ 778 1.4 riastrad dma_set_max_seg_size(dev->dev, min_t(unsigned int, U32_MAX & PAGE_MASK, 779 1.4 riastrad SCATTERLIST_MAX_SEGMENT)); 780 1.7 riastrad #endif 781 1.4 riastrad 782 1.2 riastrad if (dev_priv->capabilities & SVGA_CAP_GMR2) { 783 1.1 riastrad DRM_INFO("Max GMR ids is %u\n", 784 1.1 riastrad (unsigned)dev_priv->max_gmr_ids); 785 1.1 riastrad DRM_INFO("Max number of GMR pages is %u\n", 786 1.1 riastrad (unsigned)dev_priv->max_gmr_pages); 787 1.1 riastrad DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n", 788 1.1 riastrad (unsigned)dev_priv->memory_size / 1024); 789 1.1 riastrad } 790 1.2 riastrad DRM_INFO("Maximum display memory size is %u kiB\n", 791 1.2 riastrad dev_priv->prim_bb_mem / 1024); 792 1.1 riastrad DRM_INFO("VRAM at 0x%08x size is %u kiB\n", 793 1.1 riastrad dev_priv->vram_start, dev_priv->vram_size / 1024); 794 1.1 riastrad DRM_INFO("MMIO at 0x%08x size is %u kiB\n", 795 1.1 riastrad dev_priv->mmio_start, dev_priv->mmio_size / 1024); 796 1.1 riastrad 797 1.7 riastrad #ifdef __NetBSD__ 798 1.7 riastrad dev_priv->mmio_bst = dev->bst; 799 1.7 riastrad if (bus_space_map(dev_priv->mmio_bst, dev_priv->mmio_start, 800 1.7 riastrad dev_priv->mmio_size, BUS_SPACE_MAP_LINEAR, 801 1.7 riastrad &dev_priv->mmio_bsh) == 0) { 802 1.7 riastrad dev_priv->mmio_virt = bus_space_vaddr(dev_priv->mmio_bst, 803 1.7 riastrad dev_priv->mmio_bsh); 804 1.7 riastrad } else { 805 1.7 riastrad dev_priv->mmio_virt = NULL; 806 1.7 riastrad } 807 1.7 riastrad #else 808 1.2 riastrad dev_priv->mmio_virt = memremap(dev_priv->mmio_start, 809 1.2 riastrad dev_priv->mmio_size, MEMREMAP_WB); 810 1.7 riastrad #endif 811 1.1 riastrad 812 1.1 riastrad if (unlikely(dev_priv->mmio_virt == NULL)) { 813 1.1 riastrad ret = -ENOMEM; 814 1.1 riastrad DRM_ERROR("Failed mapping MMIO.\n"); 815 1.4 riastrad goto out_err0; 816 1.1 riastrad } 817 1.1 riastrad 818 1.7 riastrad #ifdef __NetBSD__ 819 1.7 riastrad dev_priv->iot = dev->pdev->pd_pa.pa_iot; 820 1.7 riastrad 821 1.7 riastrad /* XXX errno NetBSD->Linux */ 822 1.7 riastrad ret = -bus_space_map(dev_priv->iot, dev_priv->io_start, VMWGFX_IOSIZE, 823 1.7 riastrad 0, &dev_priv->ioh); 824 1.7 riastrad if (ret) { 825 1.7 riastrad DRM_ERROR("Failed mapping IO ports.\n"); 826 1.7 riastrad goto out_err3; 827 1.7 riastrad } 828 1.7 riastrad #endif 829 1.7 riastrad 830 1.1 riastrad /* Need mmio memory to check for fifo pitchlock cap. */ 831 1.1 riastrad if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) && 832 1.1 riastrad !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) && 833 1.1 riastrad !vmw_fifo_have_pitchlock(dev_priv)) { 834 1.1 riastrad ret = -ENOSYS; 835 1.1 riastrad DRM_ERROR("Hardware has no pitchlock\n"); 836 1.1 riastrad goto out_err4; 837 1.1 riastrad } 838 1.1 riastrad 839 1.4 riastrad dev_priv->tdev = ttm_object_device_init(&ttm_mem_glob, 12, 840 1.4 riastrad &vmw_prime_dmabuf_ops); 841 1.1 riastrad 842 1.1 riastrad if (unlikely(dev_priv->tdev == NULL)) { 843 1.1 riastrad DRM_ERROR("Unable to initialize TTM object management.\n"); 844 1.1 riastrad ret = -ENOMEM; 845 1.1 riastrad goto out_err4; 846 1.1 riastrad } 847 1.1 riastrad 848 1.1 riastrad dev->dev_private = dev_priv; 849 1.1 riastrad 850 1.1 riastrad ret = pci_request_regions(dev->pdev, "vmwgfx probe"); 851 1.1 riastrad dev_priv->stealth = (ret != 0); 852 1.1 riastrad if (dev_priv->stealth) { 853 1.1 riastrad /** 854 1.1 riastrad * Request at least the mmio PCI resource. 855 1.1 riastrad */ 856 1.1 riastrad 857 1.1 riastrad DRM_INFO("It appears like vesafb is loaded. " 858 1.1 riastrad "Ignore above error if any.\n"); 859 1.1 riastrad ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe"); 860 1.1 riastrad if (unlikely(ret != 0)) { 861 1.1 riastrad DRM_ERROR("Failed reserving the SVGA MMIO resource.\n"); 862 1.1 riastrad goto out_no_device; 863 1.1 riastrad } 864 1.1 riastrad } 865 1.1 riastrad 866 1.1 riastrad if (dev_priv->capabilities & SVGA_CAP_IRQMASK) { 867 1.7 riastrad #ifdef __NetBSD__ 868 1.7 riastrad ret = vmw_irq_install(dev, 0); 869 1.7 riastrad #else 870 1.4 riastrad ret = vmw_irq_install(dev, dev->pdev->irq); 871 1.7 riastrad #endif 872 1.1 riastrad if (ret != 0) { 873 1.1 riastrad DRM_ERROR("Failed installing irq: %d\n", ret); 874 1.1 riastrad goto out_no_irq; 875 1.1 riastrad } 876 1.1 riastrad } 877 1.1 riastrad 878 1.1 riastrad dev_priv->fman = vmw_fence_manager_init(dev_priv); 879 1.2 riastrad if (unlikely(dev_priv->fman == NULL)) { 880 1.2 riastrad ret = -ENOMEM; 881 1.1 riastrad goto out_no_fman; 882 1.2 riastrad } 883 1.2 riastrad 884 1.4 riastrad drm_vma_offset_manager_init(&dev_priv->vma_manager, 885 1.4 riastrad DRM_FILE_PAGE_OFFSET_START, 886 1.4 riastrad DRM_FILE_PAGE_OFFSET_SIZE); 887 1.2 riastrad ret = ttm_bo_device_init(&dev_priv->bdev, 888 1.2 riastrad &vmw_bo_driver, 889 1.7 riastrad #ifdef __NetBSD__ 890 1.7 riastrad dev->bst, 891 1.7 riastrad dev->dmat, 892 1.7 riastrad #else 893 1.2 riastrad dev->anon_inode->i_mapping, 894 1.7 riastrad #endif 895 1.4 riastrad &dev_priv->vma_manager, 896 1.2 riastrad false); 897 1.2 riastrad if (unlikely(ret != 0)) { 898 1.2 riastrad DRM_ERROR("Failed initializing TTM buffer object driver.\n"); 899 1.2 riastrad goto out_no_bdev; 900 1.2 riastrad } 901 1.2 riastrad 902 1.2 riastrad /* 903 1.2 riastrad * Enable VRAM, but initially don't use it until SVGA is enabled and 904 1.2 riastrad * unhidden. 905 1.2 riastrad */ 906 1.2 riastrad ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM, 907 1.2 riastrad (dev_priv->vram_size >> PAGE_SHIFT)); 908 1.2 riastrad if (unlikely(ret != 0)) { 909 1.2 riastrad DRM_ERROR("Failed initializing memory manager for VRAM.\n"); 910 1.2 riastrad goto out_no_vram; 911 1.2 riastrad } 912 1.2 riastrad dev_priv->bdev.man[TTM_PL_VRAM].use_type = false; 913 1.2 riastrad 914 1.2 riastrad dev_priv->has_gmr = true; 915 1.2 riastrad if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) || 916 1.2 riastrad refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR, 917 1.2 riastrad VMW_PL_GMR) != 0) { 918 1.2 riastrad DRM_INFO("No GMR memory available. " 919 1.2 riastrad "Graphics memory resources are very limited.\n"); 920 1.2 riastrad dev_priv->has_gmr = false; 921 1.2 riastrad } 922 1.2 riastrad 923 1.2 riastrad if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { 924 1.2 riastrad dev_priv->has_mob = true; 925 1.2 riastrad if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB, 926 1.2 riastrad VMW_PL_MOB) != 0) { 927 1.2 riastrad DRM_INFO("No MOB memory available. " 928 1.2 riastrad "3D will be disabled.\n"); 929 1.2 riastrad dev_priv->has_mob = false; 930 1.2 riastrad } 931 1.2 riastrad } 932 1.2 riastrad 933 1.2 riastrad if (dev_priv->has_mob) { 934 1.2 riastrad spin_lock(&dev_priv->cap_lock); 935 1.4 riastrad vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DXCONTEXT); 936 1.2 riastrad dev_priv->has_dx = !!vmw_read(dev_priv, SVGA_REG_DEV_CAP); 937 1.2 riastrad spin_unlock(&dev_priv->cap_lock); 938 1.2 riastrad } 939 1.1 riastrad 940 1.4 riastrad vmw_validation_mem_init_ttm(dev_priv, VMWGFX_VALIDATION_MEM_GRAN); 941 1.1 riastrad ret = vmw_kms_init(dev_priv); 942 1.1 riastrad if (unlikely(ret != 0)) 943 1.1 riastrad goto out_no_kms; 944 1.1 riastrad vmw_overlay_init(dev_priv); 945 1.1 riastrad 946 1.2 riastrad ret = vmw_request_device(dev_priv); 947 1.2 riastrad if (ret) 948 1.2 riastrad goto out_no_fifo; 949 1.2 riastrad 950 1.4 riastrad if (dev_priv->has_dx) { 951 1.4 riastrad /* 952 1.4 riastrad * SVGA_CAP2_DX2 (DefineGBSurface_v3) is needed for SM4_1 953 1.4 riastrad * support 954 1.4 riastrad */ 955 1.4 riastrad if ((dev_priv->capabilities2 & SVGA_CAP2_DX2) != 0) { 956 1.4 riastrad vmw_write(dev_priv, SVGA_REG_DEV_CAP, 957 1.4 riastrad SVGA3D_DEVCAP_SM41); 958 1.4 riastrad dev_priv->has_sm4_1 = vmw_read(dev_priv, 959 1.4 riastrad SVGA_REG_DEV_CAP); 960 1.4 riastrad } 961 1.4 riastrad } 962 1.4 riastrad 963 1.2 riastrad DRM_INFO("DX: %s\n", dev_priv->has_dx ? "yes." : "no."); 964 1.4 riastrad DRM_INFO("Atomic: %s\n", (dev->driver->driver_features & DRIVER_ATOMIC) 965 1.4 riastrad ? "yes." : "no."); 966 1.4 riastrad DRM_INFO("SM4_1: %s\n", dev_priv->has_sm4_1 ? "yes." : "no."); 967 1.4 riastrad 968 1.4 riastrad snprintf(host_log, sizeof(host_log), "vmwgfx: %s-%s", 969 1.4 riastrad VMWGFX_REPO, VMWGFX_GIT_VERSION); 970 1.4 riastrad vmw_host_log(host_log); 971 1.4 riastrad 972 1.4 riastrad memset(host_log, 0, sizeof(host_log)); 973 1.4 riastrad snprintf(host_log, sizeof(host_log), "vmwgfx: Module Version: %d.%d.%d", 974 1.4 riastrad VMWGFX_DRIVER_MAJOR, VMWGFX_DRIVER_MINOR, 975 1.4 riastrad VMWGFX_DRIVER_PATCHLEVEL); 976 1.4 riastrad vmw_host_log(host_log); 977 1.2 riastrad 978 1.1 riastrad if (dev_priv->enable_fb) { 979 1.2 riastrad vmw_fifo_resource_inc(dev_priv); 980 1.2 riastrad vmw_svga_enable(dev_priv); 981 1.1 riastrad vmw_fb_init(dev_priv); 982 1.1 riastrad } 983 1.1 riastrad 984 1.1 riastrad dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier; 985 1.1 riastrad register_pm_notifier(&dev_priv->pm_nb); 986 1.1 riastrad 987 1.1 riastrad return 0; 988 1.1 riastrad 989 1.1 riastrad out_no_fifo: 990 1.1 riastrad vmw_overlay_close(dev_priv); 991 1.1 riastrad vmw_kms_close(dev_priv); 992 1.1 riastrad out_no_kms: 993 1.2 riastrad if (dev_priv->has_mob) 994 1.2 riastrad (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); 995 1.2 riastrad if (dev_priv->has_gmr) 996 1.2 riastrad (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); 997 1.2 riastrad (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); 998 1.2 riastrad out_no_vram: 999 1.2 riastrad (void)ttm_bo_device_release(&dev_priv->bdev); 1000 1.2 riastrad out_no_bdev: 1001 1.1 riastrad vmw_fence_manager_takedown(dev_priv->fman); 1002 1.1 riastrad out_no_fman: 1003 1.1 riastrad if (dev_priv->capabilities & SVGA_CAP_IRQMASK) 1004 1.4 riastrad vmw_irq_uninstall(dev_priv->dev); 1005 1.1 riastrad out_no_irq: 1006 1.1 riastrad if (dev_priv->stealth) 1007 1.1 riastrad pci_release_region(dev->pdev, 2); 1008 1.1 riastrad else 1009 1.1 riastrad pci_release_regions(dev->pdev); 1010 1.1 riastrad out_no_device: 1011 1.1 riastrad ttm_object_device_release(&dev_priv->tdev); 1012 1.1 riastrad out_err4: 1013 1.7 riastrad #ifdef __NetBSD__ 1014 1.7 riastrad bus_space_unmap(dev_priv->iot, dev_priv->ioh, VMWGFX_IOSIZE); 1015 1.7 riastrad out_err3: 1016 1.7 riastrad dev_priv->mmio_virt = NULL; 1017 1.7 riastrad bus_space_unmap(dev_priv->mmio_bst, dev_priv->mmio_bsh, 1018 1.7 riastrad dev_priv->mmio_size); 1019 1.7 riastrad #else 1020 1.2 riastrad memunmap(dev_priv->mmio_virt); 1021 1.7 riastrad #endif 1022 1.1 riastrad out_err0: 1023 1.6 riastrad spin_lock_destroy(&dev_priv->fifo_lock); 1024 1.6 riastrad DRM_DESTROY_WAITQUEUE(&dev_priv->fifo_queue); 1025 1.6 riastrad spin_lock_destroy(&dev_priv->fence_lock); 1026 1.6 riastrad DRM_DESTROY_WAITQUEUE(&dev_priv->fence_queue); 1027 1.6 riastrad 1028 1.1 riastrad for (i = vmw_res_context; i < vmw_res_max; ++i) 1029 1.1 riastrad idr_destroy(&dev_priv->res_idr[i]); 1030 1.1 riastrad 1031 1.2 riastrad if (dev_priv->ctx.staged_bindings) 1032 1.2 riastrad vmw_binding_state_free(dev_priv->ctx.staged_bindings); 1033 1.1 riastrad kfree(dev_priv); 1034 1.1 riastrad return ret; 1035 1.1 riastrad } 1036 1.1 riastrad 1037 1.4 riastrad static void vmw_driver_unload(struct drm_device *dev) 1038 1.1 riastrad { 1039 1.1 riastrad struct vmw_private *dev_priv = vmw_priv(dev); 1040 1.1 riastrad enum vmw_res_type i; 1041 1.1 riastrad 1042 1.1 riastrad unregister_pm_notifier(&dev_priv->pm_nb); 1043 1.1 riastrad 1044 1.1 riastrad if (dev_priv->ctx.res_ht_initialized) 1045 1.1 riastrad drm_ht_remove(&dev_priv->ctx.res_ht); 1046 1.2 riastrad vfree(dev_priv->ctx.cmd_bounce); 1047 1.1 riastrad if (dev_priv->enable_fb) { 1048 1.2 riastrad vmw_fb_off(dev_priv); 1049 1.1 riastrad vmw_fb_close(dev_priv); 1050 1.2 riastrad vmw_fifo_resource_dec(dev_priv); 1051 1.2 riastrad vmw_svga_disable(dev_priv); 1052 1.1 riastrad } 1053 1.2 riastrad 1054 1.1 riastrad vmw_kms_close(dev_priv); 1055 1.1 riastrad vmw_overlay_close(dev_priv); 1056 1.2 riastrad 1057 1.2 riastrad if (dev_priv->has_gmr) 1058 1.2 riastrad (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); 1059 1.2 riastrad (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); 1060 1.2 riastrad 1061 1.2 riastrad vmw_release_device_early(dev_priv); 1062 1.2 riastrad if (dev_priv->has_mob) 1063 1.2 riastrad (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); 1064 1.2 riastrad (void) ttm_bo_device_release(&dev_priv->bdev); 1065 1.4 riastrad drm_vma_offset_manager_destroy(&dev_priv->vma_manager); 1066 1.2 riastrad vmw_release_device_late(dev_priv); 1067 1.1 riastrad vmw_fence_manager_takedown(dev_priv->fman); 1068 1.1 riastrad if (dev_priv->capabilities & SVGA_CAP_IRQMASK) 1069 1.4 riastrad vmw_irq_uninstall(dev_priv->dev); 1070 1.1 riastrad if (dev_priv->stealth) 1071 1.1 riastrad pci_release_region(dev->pdev, 2); 1072 1.1 riastrad else 1073 1.1 riastrad pci_release_regions(dev->pdev); 1074 1.1 riastrad 1075 1.1 riastrad ttm_object_device_release(&dev_priv->tdev); 1076 1.7 riastrad #ifdef __NetBSD__ 1077 1.7 riastrad dev_priv->mmio_virt = NULL; 1078 1.7 riastrad bus_space_unmap(dev_priv->mmio_bst, dev_priv->mmio_bsh, 1079 1.7 riastrad dev_priv->mmio_size); 1080 1.7 riastrad #else 1081 1.2 riastrad memunmap(dev_priv->mmio_virt); 1082 1.7 riastrad #endif 1083 1.2 riastrad if (dev_priv->ctx.staged_bindings) 1084 1.2 riastrad vmw_binding_state_free(dev_priv->ctx.staged_bindings); 1085 1.1 riastrad 1086 1.6 riastrad spin_lock_destroy(&dev_priv->fifo_lock); 1087 1.6 riastrad DRM_DESTROY_WAITQUEUE(&dev_priv->fifo_queue); 1088 1.6 riastrad spin_lock_destroy(&dev_priv->fence_lock); 1089 1.6 riastrad DRM_DESTROY_WAITQUEUE(&dev_priv->fence_queue); 1090 1.6 riastrad 1091 1.1 riastrad for (i = vmw_res_context; i < vmw_res_max; ++i) 1092 1.1 riastrad idr_destroy(&dev_priv->res_idr[i]); 1093 1.1 riastrad 1094 1.1 riastrad kfree(dev_priv); 1095 1.1 riastrad } 1096 1.1 riastrad 1097 1.4 riastrad static void vmw_postclose(struct drm_device *dev, 1098 1.1 riastrad struct drm_file *file_priv) 1099 1.1 riastrad { 1100 1.1 riastrad struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); 1101 1.2 riastrad 1102 1.1 riastrad ttm_object_file_release(&vmw_fp->tfile); 1103 1.1 riastrad kfree(vmw_fp); 1104 1.1 riastrad } 1105 1.1 riastrad 1106 1.1 riastrad static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv) 1107 1.1 riastrad { 1108 1.1 riastrad struct vmw_private *dev_priv = vmw_priv(dev); 1109 1.1 riastrad struct vmw_fpriv *vmw_fp; 1110 1.1 riastrad int ret = -ENOMEM; 1111 1.1 riastrad 1112 1.1 riastrad vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL); 1113 1.4 riastrad if (unlikely(!vmw_fp)) 1114 1.1 riastrad return ret; 1115 1.1 riastrad 1116 1.1 riastrad vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10); 1117 1.1 riastrad if (unlikely(vmw_fp->tfile == NULL)) 1118 1.1 riastrad goto out_no_tfile; 1119 1.1 riastrad 1120 1.1 riastrad file_priv->driver_priv = vmw_fp; 1121 1.1 riastrad 1122 1.1 riastrad return 0; 1123 1.1 riastrad 1124 1.1 riastrad out_no_tfile: 1125 1.1 riastrad kfree(vmw_fp); 1126 1.1 riastrad return ret; 1127 1.1 riastrad } 1128 1.1 riastrad 1129 1.7 riastrad #ifdef __NetBSD__ 1130 1.7 riastrad static int vmw_generic_ioctl(struct file *filp, unsigned long cmd, 1131 1.7 riastrad void *arg, 1132 1.7 riastrad int (*ioctl_func)(struct file *, unsigned long, 1133 1.7 riastrad void *)) 1134 1.7 riastrad #else 1135 1.2 riastrad static long vmw_generic_ioctl(struct file *filp, unsigned int cmd, 1136 1.2 riastrad unsigned long arg, 1137 1.2 riastrad long (*ioctl_func)(struct file *, unsigned int, 1138 1.2 riastrad unsigned long)) 1139 1.7 riastrad #endif 1140 1.1 riastrad { 1141 1.7 riastrad #ifdef __NetBSD__ 1142 1.7 riastrad struct drm_file *file_priv = filp->f_data; 1143 1.7 riastrad #else 1144 1.1 riastrad struct drm_file *file_priv = filp->private_data; 1145 1.7 riastrad #endif 1146 1.1 riastrad struct drm_device *dev = file_priv->minor->dev; 1147 1.1 riastrad unsigned int nr = DRM_IOCTL_NR(cmd); 1148 1.2 riastrad unsigned int flags; 1149 1.1 riastrad 1150 1.1 riastrad /* 1151 1.1 riastrad * Do extra checking on driver private ioctls. 1152 1.1 riastrad */ 1153 1.1 riastrad 1154 1.1 riastrad if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) 1155 1.1 riastrad && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) { 1156 1.2 riastrad const struct drm_ioctl_desc *ioctl = 1157 1.2 riastrad &vmw_ioctls[nr - DRM_COMMAND_BASE]; 1158 1.2 riastrad 1159 1.2 riastrad if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) { 1160 1.4 riastrad return ioctl_func(filp, cmd, arg); 1161 1.4 riastrad } else if (nr == DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT) { 1162 1.4 riastrad if (!drm_is_current_master(file_priv) && 1163 1.4 riastrad !capable(CAP_SYS_ADMIN)) 1164 1.4 riastrad return -EACCES; 1165 1.1 riastrad } 1166 1.2 riastrad 1167 1.2 riastrad if (unlikely(ioctl->cmd != cmd)) 1168 1.2 riastrad goto out_io_encoding; 1169 1.2 riastrad 1170 1.2 riastrad flags = ioctl->flags; 1171 1.2 riastrad } else if (!drm_ioctl_flags(nr, &flags)) 1172 1.2 riastrad return -EINVAL; 1173 1.2 riastrad 1174 1.4 riastrad return ioctl_func(filp, cmd, arg); 1175 1.2 riastrad 1176 1.2 riastrad out_io_encoding: 1177 1.2 riastrad DRM_ERROR("Invalid command format, ioctl %d\n", 1178 1.2 riastrad nr - DRM_COMMAND_BASE); 1179 1.2 riastrad 1180 1.2 riastrad return -EINVAL; 1181 1.1 riastrad } 1182 1.1 riastrad 1183 1.7 riastrad #ifdef __NetBSD__ 1184 1.7 riastrad static int vmw_unlocked_ioctl(struct file *filp, unsigned long cmd, void *arg) 1185 1.7 riastrad #else 1186 1.2 riastrad static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd, 1187 1.2 riastrad unsigned long arg) 1188 1.7 riastrad #endif 1189 1.1 riastrad { 1190 1.2 riastrad return vmw_generic_ioctl(filp, cmd, arg, &drm_ioctl); 1191 1.2 riastrad } 1192 1.1 riastrad 1193 1.2 riastrad #ifdef CONFIG_COMPAT 1194 1.2 riastrad static long vmw_compat_ioctl(struct file *filp, unsigned int cmd, 1195 1.2 riastrad unsigned long arg) 1196 1.2 riastrad { 1197 1.2 riastrad return vmw_generic_ioctl(filp, cmd, arg, &drm_compat_ioctl); 1198 1.1 riastrad } 1199 1.2 riastrad #endif 1200 1.1 riastrad 1201 1.1 riastrad static int vmw_master_set(struct drm_device *dev, 1202 1.1 riastrad struct drm_file *file_priv, 1203 1.1 riastrad bool from_open) 1204 1.1 riastrad { 1205 1.4 riastrad /* 1206 1.4 riastrad * Inform a new master that the layout may have changed while 1207 1.4 riastrad * it was gone. 1208 1.4 riastrad */ 1209 1.4 riastrad if (!from_open) 1210 1.4 riastrad drm_sysfs_hotplug_event(dev); 1211 1.1 riastrad 1212 1.1 riastrad return 0; 1213 1.1 riastrad } 1214 1.1 riastrad 1215 1.1 riastrad static void vmw_master_drop(struct drm_device *dev, 1216 1.4 riastrad struct drm_file *file_priv) 1217 1.1 riastrad { 1218 1.1 riastrad struct vmw_private *dev_priv = vmw_priv(dev); 1219 1.1 riastrad 1220 1.2 riastrad vmw_kms_legacy_hotspot_clear(dev_priv); 1221 1.2 riastrad if (!dev_priv->enable_fb) 1222 1.2 riastrad vmw_svga_disable(dev_priv); 1223 1.1 riastrad } 1224 1.1 riastrad 1225 1.2 riastrad /** 1226 1.2 riastrad * __vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM. 1227 1.2 riastrad * 1228 1.2 riastrad * @dev_priv: Pointer to device private struct. 1229 1.2 riastrad * Needs the reservation sem to be held in non-exclusive mode. 1230 1.2 riastrad */ 1231 1.2 riastrad static void __vmw_svga_enable(struct vmw_private *dev_priv) 1232 1.2 riastrad { 1233 1.2 riastrad spin_lock(&dev_priv->svga_lock); 1234 1.2 riastrad if (!dev_priv->bdev.man[TTM_PL_VRAM].use_type) { 1235 1.2 riastrad vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE); 1236 1.2 riastrad dev_priv->bdev.man[TTM_PL_VRAM].use_type = true; 1237 1.2 riastrad } 1238 1.2 riastrad spin_unlock(&dev_priv->svga_lock); 1239 1.2 riastrad } 1240 1.2 riastrad 1241 1.2 riastrad /** 1242 1.2 riastrad * vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM. 1243 1.2 riastrad * 1244 1.2 riastrad * @dev_priv: Pointer to device private struct. 1245 1.2 riastrad */ 1246 1.2 riastrad void vmw_svga_enable(struct vmw_private *dev_priv) 1247 1.2 riastrad { 1248 1.4 riastrad (void) ttm_read_lock(&dev_priv->reservation_sem, false); 1249 1.2 riastrad __vmw_svga_enable(dev_priv); 1250 1.2 riastrad ttm_read_unlock(&dev_priv->reservation_sem); 1251 1.2 riastrad } 1252 1.2 riastrad 1253 1.2 riastrad /** 1254 1.2 riastrad * __vmw_svga_disable - Disable SVGA mode and use of VRAM. 1255 1.2 riastrad * 1256 1.2 riastrad * @dev_priv: Pointer to device private struct. 1257 1.2 riastrad * Needs the reservation sem to be held in exclusive mode. 1258 1.2 riastrad * Will not empty VRAM. VRAM must be emptied by caller. 1259 1.2 riastrad */ 1260 1.2 riastrad static void __vmw_svga_disable(struct vmw_private *dev_priv) 1261 1.2 riastrad { 1262 1.2 riastrad spin_lock(&dev_priv->svga_lock); 1263 1.2 riastrad if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) { 1264 1.2 riastrad dev_priv->bdev.man[TTM_PL_VRAM].use_type = false; 1265 1.2 riastrad vmw_write(dev_priv, SVGA_REG_ENABLE, 1266 1.2 riastrad SVGA_REG_ENABLE_HIDE | 1267 1.2 riastrad SVGA_REG_ENABLE_ENABLE); 1268 1.2 riastrad } 1269 1.2 riastrad spin_unlock(&dev_priv->svga_lock); 1270 1.2 riastrad } 1271 1.2 riastrad 1272 1.2 riastrad /** 1273 1.2 riastrad * vmw_svga_disable - Disable SVGA_MODE, and use of VRAM. Keep the fifo 1274 1.2 riastrad * running. 1275 1.2 riastrad * 1276 1.2 riastrad * @dev_priv: Pointer to device private struct. 1277 1.2 riastrad * Will empty VRAM. 1278 1.2 riastrad */ 1279 1.2 riastrad void vmw_svga_disable(struct vmw_private *dev_priv) 1280 1.2 riastrad { 1281 1.4 riastrad /* 1282 1.4 riastrad * Disabling SVGA will turn off device modesetting capabilities, so 1283 1.4 riastrad * notify KMS about that so that it doesn't cache atomic state that 1284 1.4 riastrad * isn't valid anymore, for example crtcs turned on. 1285 1.4 riastrad * Strictly we'd want to do this under the SVGA lock (or an SVGA mutex), 1286 1.4 riastrad * but vmw_kms_lost_device() takes the reservation sem and thus we'll 1287 1.4 riastrad * end up with lock order reversal. Thus, a master may actually perform 1288 1.4 riastrad * a new modeset just after we call vmw_kms_lost_device() and race with 1289 1.4 riastrad * vmw_svga_disable(), but that should at worst cause atomic KMS state 1290 1.4 riastrad * to be inconsistent with the device, causing modesetting problems. 1291 1.4 riastrad * 1292 1.4 riastrad */ 1293 1.4 riastrad vmw_kms_lost_device(dev_priv->dev); 1294 1.2 riastrad ttm_write_lock(&dev_priv->reservation_sem, false); 1295 1.2 riastrad spin_lock(&dev_priv->svga_lock); 1296 1.2 riastrad if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) { 1297 1.2 riastrad dev_priv->bdev.man[TTM_PL_VRAM].use_type = false; 1298 1.2 riastrad spin_unlock(&dev_priv->svga_lock); 1299 1.2 riastrad if (ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM)) 1300 1.2 riastrad DRM_ERROR("Failed evicting VRAM buffers.\n"); 1301 1.2 riastrad vmw_write(dev_priv, SVGA_REG_ENABLE, 1302 1.2 riastrad SVGA_REG_ENABLE_HIDE | 1303 1.2 riastrad SVGA_REG_ENABLE_ENABLE); 1304 1.2 riastrad } else 1305 1.2 riastrad spin_unlock(&dev_priv->svga_lock); 1306 1.2 riastrad ttm_write_unlock(&dev_priv->reservation_sem); 1307 1.2 riastrad } 1308 1.1 riastrad 1309 1.7 riastrad #ifndef __NetBSD__ 1310 1.1 riastrad static void vmw_remove(struct pci_dev *pdev) 1311 1.1 riastrad { 1312 1.1 riastrad struct drm_device *dev = pci_get_drvdata(pdev); 1313 1.1 riastrad 1314 1.4 riastrad drm_dev_unregister(dev); 1315 1.4 riastrad vmw_driver_unload(dev); 1316 1.4 riastrad drm_dev_put(dev); 1317 1.2 riastrad pci_disable_device(pdev); 1318 1.1 riastrad } 1319 1.7 riastrad #endif 1320 1.1 riastrad 1321 1.1 riastrad static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, 1322 1.1 riastrad void *ptr) 1323 1.1 riastrad { 1324 1.7 riastrad #ifndef __NetBSD__ 1325 1.1 riastrad struct vmw_private *dev_priv = 1326 1.1 riastrad container_of(nb, struct vmw_private, pm_nb); 1327 1.1 riastrad 1328 1.1 riastrad switch (val) { 1329 1.1 riastrad case PM_HIBERNATION_PREPARE: 1330 1.2 riastrad /* 1331 1.4 riastrad * Take the reservation sem in write mode, which will make sure 1332 1.4 riastrad * there are no other processes holding a buffer object 1333 1.4 riastrad * reservation, meaning we should be able to evict all buffer 1334 1.4 riastrad * objects if needed. 1335 1.4 riastrad * Once user-space processes have been frozen, we can release 1336 1.4 riastrad * the lock again. 1337 1.1 riastrad */ 1338 1.4 riastrad ttm_suspend_lock(&dev_priv->reservation_sem); 1339 1.4 riastrad dev_priv->suspend_locked = true; 1340 1.1 riastrad break; 1341 1.1 riastrad case PM_POST_HIBERNATION: 1342 1.1 riastrad case PM_POST_RESTORE: 1343 1.4 riastrad if (READ_ONCE(dev_priv->suspend_locked)) { 1344 1.4 riastrad dev_priv->suspend_locked = false; 1345 1.4 riastrad ttm_suspend_unlock(&dev_priv->reservation_sem); 1346 1.4 riastrad } 1347 1.1 riastrad break; 1348 1.1 riastrad default: 1349 1.1 riastrad break; 1350 1.1 riastrad } 1351 1.7 riastrad #endif 1352 1.1 riastrad return 0; 1353 1.1 riastrad } 1354 1.1 riastrad 1355 1.7 riastrad #ifndef __NetBSD__ 1356 1.7 riastrad 1357 1.1 riastrad static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state) 1358 1.1 riastrad { 1359 1.1 riastrad struct drm_device *dev = pci_get_drvdata(pdev); 1360 1.1 riastrad struct vmw_private *dev_priv = vmw_priv(dev); 1361 1.1 riastrad 1362 1.2 riastrad if (dev_priv->refuse_hibernation) 1363 1.1 riastrad return -EBUSY; 1364 1.1 riastrad 1365 1.1 riastrad pci_save_state(pdev); 1366 1.1 riastrad pci_disable_device(pdev); 1367 1.1 riastrad pci_set_power_state(pdev, PCI_D3hot); 1368 1.1 riastrad return 0; 1369 1.1 riastrad } 1370 1.1 riastrad 1371 1.1 riastrad static int vmw_pci_resume(struct pci_dev *pdev) 1372 1.1 riastrad { 1373 1.1 riastrad pci_set_power_state(pdev, PCI_D0); 1374 1.1 riastrad pci_restore_state(pdev); 1375 1.1 riastrad return pci_enable_device(pdev); 1376 1.1 riastrad } 1377 1.1 riastrad 1378 1.1 riastrad static int vmw_pm_suspend(struct device *kdev) 1379 1.1 riastrad { 1380 1.1 riastrad struct pci_dev *pdev = to_pci_dev(kdev); 1381 1.1 riastrad struct pm_message dummy; 1382 1.1 riastrad 1383 1.1 riastrad dummy.event = 0; 1384 1.1 riastrad 1385 1.1 riastrad return vmw_pci_suspend(pdev, dummy); 1386 1.1 riastrad } 1387 1.1 riastrad 1388 1.1 riastrad static int vmw_pm_resume(struct device *kdev) 1389 1.1 riastrad { 1390 1.1 riastrad struct pci_dev *pdev = to_pci_dev(kdev); 1391 1.1 riastrad 1392 1.1 riastrad return vmw_pci_resume(pdev); 1393 1.1 riastrad } 1394 1.1 riastrad 1395 1.2 riastrad static int vmw_pm_freeze(struct device *kdev) 1396 1.1 riastrad { 1397 1.1 riastrad struct pci_dev *pdev = to_pci_dev(kdev); 1398 1.1 riastrad struct drm_device *dev = pci_get_drvdata(pdev); 1399 1.1 riastrad struct vmw_private *dev_priv = vmw_priv(dev); 1400 1.4 riastrad int ret; 1401 1.4 riastrad 1402 1.4 riastrad /* 1403 1.4 riastrad * Unlock for vmw_kms_suspend. 1404 1.4 riastrad * No user-space processes should be running now. 1405 1.4 riastrad */ 1406 1.4 riastrad ttm_suspend_unlock(&dev_priv->reservation_sem); 1407 1.4 riastrad ret = vmw_kms_suspend(dev_priv->dev); 1408 1.4 riastrad if (ret) { 1409 1.4 riastrad ttm_suspend_lock(&dev_priv->reservation_sem); 1410 1.4 riastrad DRM_ERROR("Failed to freeze modesetting.\n"); 1411 1.4 riastrad return ret; 1412 1.4 riastrad } 1413 1.4 riastrad if (dev_priv->enable_fb) 1414 1.4 riastrad vmw_fb_off(dev_priv); 1415 1.1 riastrad 1416 1.4 riastrad ttm_suspend_lock(&dev_priv->reservation_sem); 1417 1.4 riastrad vmw_execbuf_release_pinned_bo(dev_priv); 1418 1.4 riastrad vmw_resource_evict_all(dev_priv); 1419 1.4 riastrad vmw_release_device_early(dev_priv); 1420 1.4 riastrad ttm_bo_swapout_all(&dev_priv->bdev); 1421 1.1 riastrad if (dev_priv->enable_fb) 1422 1.2 riastrad vmw_fifo_resource_dec(dev_priv); 1423 1.2 riastrad if (atomic_read(&dev_priv->num_fifo_resources) != 0) { 1424 1.2 riastrad DRM_ERROR("Can't hibernate while 3D resources are active.\n"); 1425 1.1 riastrad if (dev_priv->enable_fb) 1426 1.2 riastrad vmw_fifo_resource_inc(dev_priv); 1427 1.2 riastrad WARN_ON(vmw_request_device_late(dev_priv)); 1428 1.4 riastrad dev_priv->suspend_locked = false; 1429 1.4 riastrad ttm_suspend_unlock(&dev_priv->reservation_sem); 1430 1.4 riastrad if (dev_priv->suspend_state) 1431 1.4 riastrad vmw_kms_resume(dev); 1432 1.4 riastrad if (dev_priv->enable_fb) 1433 1.4 riastrad vmw_fb_on(dev_priv); 1434 1.1 riastrad return -EBUSY; 1435 1.1 riastrad } 1436 1.1 riastrad 1437 1.4 riastrad vmw_fence_fifo_down(dev_priv->fman); 1438 1.4 riastrad __vmw_svga_disable(dev_priv); 1439 1.2 riastrad 1440 1.2 riastrad vmw_release_device_late(dev_priv); 1441 1.1 riastrad return 0; 1442 1.1 riastrad } 1443 1.1 riastrad 1444 1.2 riastrad static int vmw_pm_restore(struct device *kdev) 1445 1.1 riastrad { 1446 1.1 riastrad struct pci_dev *pdev = to_pci_dev(kdev); 1447 1.1 riastrad struct drm_device *dev = pci_get_drvdata(pdev); 1448 1.1 riastrad struct vmw_private *dev_priv = vmw_priv(dev); 1449 1.2 riastrad int ret; 1450 1.1 riastrad 1451 1.1 riastrad vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); 1452 1.1 riastrad (void) vmw_read(dev_priv, SVGA_REG_ID); 1453 1.1 riastrad 1454 1.1 riastrad if (dev_priv->enable_fb) 1455 1.2 riastrad vmw_fifo_resource_inc(dev_priv); 1456 1.2 riastrad 1457 1.2 riastrad ret = vmw_request_device(dev_priv); 1458 1.2 riastrad if (ret) 1459 1.2 riastrad return ret; 1460 1.2 riastrad 1461 1.2 riastrad if (dev_priv->enable_fb) 1462 1.2 riastrad __vmw_svga_enable(dev_priv); 1463 1.1 riastrad 1464 1.4 riastrad vmw_fence_fifo_up(dev_priv->fman); 1465 1.4 riastrad dev_priv->suspend_locked = false; 1466 1.4 riastrad ttm_suspend_unlock(&dev_priv->reservation_sem); 1467 1.4 riastrad if (dev_priv->suspend_state) 1468 1.4 riastrad vmw_kms_resume(dev_priv->dev); 1469 1.4 riastrad 1470 1.4 riastrad if (dev_priv->enable_fb) 1471 1.4 riastrad vmw_fb_on(dev_priv); 1472 1.2 riastrad 1473 1.2 riastrad return 0; 1474 1.1 riastrad } 1475 1.1 riastrad 1476 1.1 riastrad static const struct dev_pm_ops vmw_pm_ops = { 1477 1.2 riastrad .freeze = vmw_pm_freeze, 1478 1.2 riastrad .thaw = vmw_pm_restore, 1479 1.2 riastrad .restore = vmw_pm_restore, 1480 1.1 riastrad .suspend = vmw_pm_suspend, 1481 1.1 riastrad .resume = vmw_pm_resume, 1482 1.1 riastrad }; 1483 1.1 riastrad 1484 1.1 riastrad static const struct file_operations vmwgfx_driver_fops = { 1485 1.1 riastrad .owner = THIS_MODULE, 1486 1.1 riastrad .open = drm_open, 1487 1.1 riastrad .release = drm_release, 1488 1.1 riastrad .unlocked_ioctl = vmw_unlocked_ioctl, 1489 1.1 riastrad .mmap = vmw_mmap, 1490 1.1 riastrad .poll = vmw_fops_poll, 1491 1.1 riastrad .read = vmw_fops_read, 1492 1.1 riastrad #if defined(CONFIG_COMPAT) 1493 1.2 riastrad .compat_ioctl = vmw_compat_ioctl, 1494 1.1 riastrad #endif 1495 1.1 riastrad .llseek = noop_llseek, 1496 1.1 riastrad }; 1497 1.1 riastrad 1498 1.7 riastrad #endif 1499 1.7 riastrad 1500 1.1 riastrad static struct drm_driver driver = { 1501 1.4 riastrad .driver_features = 1502 1.4 riastrad DRIVER_MODESET | DRIVER_RENDER | DRIVER_ATOMIC, 1503 1.1 riastrad .get_vblank_counter = vmw_get_vblank_counter, 1504 1.1 riastrad .enable_vblank = vmw_enable_vblank, 1505 1.1 riastrad .disable_vblank = vmw_disable_vblank, 1506 1.1 riastrad .ioctls = vmw_ioctls, 1507 1.2 riastrad .num_ioctls = ARRAY_SIZE(vmw_ioctls), 1508 1.1 riastrad .master_set = vmw_master_set, 1509 1.1 riastrad .master_drop = vmw_master_drop, 1510 1.7 riastrad .load = vmw_driver_load, 1511 1.1 riastrad .open = vmw_driver_open, 1512 1.1 riastrad .postclose = vmw_postclose, 1513 1.7 riastrad .unload = vmw_driver_unload, 1514 1.1 riastrad 1515 1.1 riastrad .dumb_create = vmw_dumb_create, 1516 1.1 riastrad .dumb_map_offset = vmw_dumb_map_offset, 1517 1.1 riastrad .dumb_destroy = vmw_dumb_destroy, 1518 1.1 riastrad 1519 1.2 riastrad .prime_fd_to_handle = vmw_prime_fd_to_handle, 1520 1.2 riastrad .prime_handle_to_fd = vmw_prime_handle_to_fd, 1521 1.2 riastrad 1522 1.7 riastrad #ifdef __NetBSD__ 1523 1.7 riastrad .ioctl_override = &vmw_unlocked_ioctl, 1524 1.7 riastrad #else 1525 1.1 riastrad .fops = &vmwgfx_driver_fops, 1526 1.7 riastrad #endif 1527 1.1 riastrad .name = VMWGFX_DRIVER_NAME, 1528 1.1 riastrad .desc = VMWGFX_DRIVER_DESC, 1529 1.1 riastrad .date = VMWGFX_DRIVER_DATE, 1530 1.1 riastrad .major = VMWGFX_DRIVER_MAJOR, 1531 1.1 riastrad .minor = VMWGFX_DRIVER_MINOR, 1532 1.1 riastrad .patchlevel = VMWGFX_DRIVER_PATCHLEVEL 1533 1.1 riastrad }; 1534 1.1 riastrad 1535 1.5 riastrad #ifdef __NetBSD__ 1536 1.5 riastrad 1537 1.7 riastrad const struct drm_driver *const vmwgfx_driver = &driver; 1538 1.7 riastrad const struct pci_device_id *const vmwgfx_pci_ids = vmw_pci_id_list; 1539 1.7 riastrad const size_t vmwgfx_n_pci_ids = __arraycount(vmw_pci_id_list); 1540 1.5 riastrad 1541 1.5 riastrad #else 1542 1.5 riastrad 1543 1.1 riastrad static struct pci_driver vmw_pci_driver = { 1544 1.1 riastrad .name = VMWGFX_DRIVER_NAME, 1545 1.1 riastrad .id_table = vmw_pci_id_list, 1546 1.1 riastrad .probe = vmw_probe, 1547 1.1 riastrad .remove = vmw_remove, 1548 1.1 riastrad .driver = { 1549 1.1 riastrad .pm = &vmw_pm_ops 1550 1.1 riastrad } 1551 1.1 riastrad }; 1552 1.1 riastrad 1553 1.1 riastrad static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 1554 1.1 riastrad { 1555 1.4 riastrad struct drm_device *dev; 1556 1.4 riastrad int ret; 1557 1.4 riastrad 1558 1.4 riastrad ret = pci_enable_device(pdev); 1559 1.4 riastrad if (ret) 1560 1.4 riastrad return ret; 1561 1.4 riastrad 1562 1.4 riastrad dev = drm_dev_alloc(&driver, &pdev->dev); 1563 1.4 riastrad if (IS_ERR(dev)) { 1564 1.4 riastrad ret = PTR_ERR(dev); 1565 1.4 riastrad goto err_pci_disable_device; 1566 1.4 riastrad } 1567 1.4 riastrad 1568 1.4 riastrad dev->pdev = pdev; 1569 1.4 riastrad pci_set_drvdata(pdev, dev); 1570 1.4 riastrad 1571 1.4 riastrad ret = vmw_driver_load(dev, ent->driver_data); 1572 1.4 riastrad if (ret) 1573 1.4 riastrad goto err_drm_dev_put; 1574 1.4 riastrad 1575 1.4 riastrad ret = drm_dev_register(dev, ent->driver_data); 1576 1.4 riastrad if (ret) 1577 1.4 riastrad goto err_vmw_driver_unload; 1578 1.4 riastrad 1579 1.4 riastrad return 0; 1580 1.4 riastrad 1581 1.4 riastrad err_vmw_driver_unload: 1582 1.4 riastrad vmw_driver_unload(dev); 1583 1.4 riastrad err_drm_dev_put: 1584 1.4 riastrad drm_dev_put(dev); 1585 1.4 riastrad err_pci_disable_device: 1586 1.4 riastrad pci_disable_device(pdev); 1587 1.4 riastrad return ret; 1588 1.1 riastrad } 1589 1.1 riastrad 1590 1.1 riastrad static int __init vmwgfx_init(void) 1591 1.1 riastrad { 1592 1.1 riastrad int ret; 1593 1.2 riastrad 1594 1.2 riastrad if (vgacon_text_force()) 1595 1.2 riastrad return -EINVAL; 1596 1.2 riastrad 1597 1.4 riastrad ret = pci_register_driver(&vmw_pci_driver); 1598 1.1 riastrad if (ret) 1599 1.1 riastrad DRM_ERROR("Failed initializing DRM.\n"); 1600 1.1 riastrad return ret; 1601 1.1 riastrad } 1602 1.1 riastrad 1603 1.1 riastrad static void __exit vmwgfx_exit(void) 1604 1.1 riastrad { 1605 1.4 riastrad pci_unregister_driver(&vmw_pci_driver); 1606 1.1 riastrad } 1607 1.1 riastrad 1608 1.5 riastrad #endif 1609 1.5 riastrad 1610 1.1 riastrad module_init(vmwgfx_init); 1611 1.1 riastrad module_exit(vmwgfx_exit); 1612 1.1 riastrad 1613 1.1 riastrad MODULE_AUTHOR("VMware Inc. and others"); 1614 1.1 riastrad MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device"); 1615 1.1 riastrad MODULE_LICENSE("GPL and additional rights"); 1616 1.1 riastrad MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "." 1617 1.1 riastrad __stringify(VMWGFX_DRIVER_MINOR) "." 1618 1.1 riastrad __stringify(VMWGFX_DRIVER_PATCHLEVEL) "." 1619 1.1 riastrad "0"); 1620