amdgpu_fb.c revision 1.1.1.2 1 /* $NetBSD: amdgpu_fb.c,v 1.1.1.2 2021/12/18 20:11:06 riastradh Exp $ */
2
3 /*
4 * Copyright 2007 David Airlie
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23 * DEALINGS IN THE SOFTWARE.
24 *
25 * Authors:
26 * David Airlie
27 */
28
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: amdgpu_fb.c,v 1.1.1.2 2021/12/18 20:11:06 riastradh Exp $");
31
32 #include <linux/module.h>
33 #include <linux/pm_runtime.h>
34 #include <linux/slab.h>
35 #include <linux/vga_switcheroo.h>
36
37 #include <drm/amdgpu_drm.h>
38 #include <drm/drm_crtc.h>
39 #include <drm/drm_crtc_helper.h>
40 #include <drm/drm_fb_helper.h>
41 #include <drm/drm_fourcc.h>
42
43 #include "amdgpu.h"
44 #include "cikd.h"
45 #include "amdgpu_gem.h"
46
47 #include "amdgpu_display.h"
48
49 /* object hierarchy -
50 this contains a helper + a amdgpu fb
51 the helper contains a pointer to amdgpu framebuffer baseclass.
52 */
53
54 static int
55 amdgpufb_open(struct fb_info *info, int user)
56 {
57 struct drm_fb_helper *fb_helper = info->par;
58 int ret = pm_runtime_get_sync(fb_helper->dev->dev);
59 if (ret < 0 && ret != -EACCES) {
60 pm_runtime_mark_last_busy(fb_helper->dev->dev);
61 pm_runtime_put_autosuspend(fb_helper->dev->dev);
62 return ret;
63 }
64 return 0;
65 }
66
67 static int
68 amdgpufb_release(struct fb_info *info, int user)
69 {
70 struct drm_fb_helper *fb_helper = info->par;
71
72 pm_runtime_mark_last_busy(fb_helper->dev->dev);
73 pm_runtime_put_autosuspend(fb_helper->dev->dev);
74 return 0;
75 }
76
77 static const struct fb_ops amdgpufb_ops = {
78 .owner = THIS_MODULE,
79 DRM_FB_HELPER_DEFAULT_OPS,
80 .fb_open = amdgpufb_open,
81 .fb_release = amdgpufb_release,
82 .fb_fillrect = drm_fb_helper_cfb_fillrect,
83 .fb_copyarea = drm_fb_helper_cfb_copyarea,
84 .fb_imageblit = drm_fb_helper_cfb_imageblit,
85 };
86
87
88 int amdgpu_align_pitch(struct amdgpu_device *adev, int width, int cpp, bool tiled)
89 {
90 int aligned = width;
91 int pitch_mask = 0;
92
93 switch (cpp) {
94 case 1:
95 pitch_mask = 255;
96 break;
97 case 2:
98 pitch_mask = 127;
99 break;
100 case 3:
101 case 4:
102 pitch_mask = 63;
103 break;
104 }
105
106 aligned += pitch_mask;
107 aligned &= ~pitch_mask;
108 return aligned * cpp;
109 }
110
111 static void amdgpufb_destroy_pinned_object(struct drm_gem_object *gobj)
112 {
113 struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
114 int ret;
115
116 ret = amdgpu_bo_reserve(abo, true);
117 if (likely(ret == 0)) {
118 amdgpu_bo_kunmap(abo);
119 amdgpu_bo_unpin(abo);
120 amdgpu_bo_unreserve(abo);
121 }
122 drm_gem_object_put_unlocked(gobj);
123 }
124
125 static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
126 struct drm_mode_fb_cmd2 *mode_cmd,
127 struct drm_gem_object **gobj_p)
128 {
129 const struct drm_format_info *info;
130 struct amdgpu_device *adev = rfbdev->adev;
131 struct drm_gem_object *gobj = NULL;
132 struct amdgpu_bo *abo = NULL;
133 bool fb_tiled = false; /* useful for testing */
134 u32 tiling_flags = 0, domain;
135 int ret;
136 int aligned_size, size;
137 int height = mode_cmd->height;
138 u32 cpp;
139 u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
140 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
141 AMDGPU_GEM_CREATE_VRAM_CLEARED |
142 AMDGPU_GEM_CREATE_CPU_GTT_USWC;
143
144 info = drm_get_format_info(adev->ddev, mode_cmd);
145 cpp = info->cpp[0];
146
147 /* need to align pitch with crtc limits */
148 mode_cmd->pitches[0] = amdgpu_align_pitch(adev, mode_cmd->width, cpp,
149 fb_tiled);
150 domain = amdgpu_display_supported_domains(adev, flags);
151 height = ALIGN(mode_cmd->height, 8);
152 size = mode_cmd->pitches[0] * height;
153 aligned_size = ALIGN(size, PAGE_SIZE);
154 ret = amdgpu_gem_object_create(adev, aligned_size, 0, domain, flags,
155 ttm_bo_type_kernel, NULL, &gobj);
156 if (ret) {
157 pr_err("failed to allocate framebuffer (%d)\n", aligned_size);
158 return -ENOMEM;
159 }
160 abo = gem_to_amdgpu_bo(gobj);
161
162 if (fb_tiled)
163 tiling_flags = AMDGPU_TILING_SET(ARRAY_MODE, GRPH_ARRAY_2D_TILED_THIN1);
164
165 ret = amdgpu_bo_reserve(abo, false);
166 if (unlikely(ret != 0))
167 goto out_unref;
168
169 if (tiling_flags) {
170 ret = amdgpu_bo_set_tiling_flags(abo,
171 tiling_flags);
172 if (ret)
173 dev_err(adev->dev, "FB failed to set tiling flags\n");
174 }
175
176 ret = amdgpu_bo_pin(abo, domain);
177 if (ret) {
178 amdgpu_bo_unreserve(abo);
179 goto out_unref;
180 }
181
182 ret = amdgpu_ttm_alloc_gart(&abo->tbo);
183 if (ret) {
184 amdgpu_bo_unreserve(abo);
185 dev_err(adev->dev, "%p bind failed\n", abo);
186 goto out_unref;
187 }
188
189 ret = amdgpu_bo_kmap(abo, NULL);
190 amdgpu_bo_unreserve(abo);
191 if (ret) {
192 goto out_unref;
193 }
194
195 *gobj_p = gobj;
196 return 0;
197 out_unref:
198 amdgpufb_destroy_pinned_object(gobj);
199 *gobj_p = NULL;
200 return ret;
201 }
202
203 static int amdgpufb_create(struct drm_fb_helper *helper,
204 struct drm_fb_helper_surface_size *sizes)
205 {
206 struct amdgpu_fbdev *rfbdev = (struct amdgpu_fbdev *)helper;
207 struct amdgpu_device *adev = rfbdev->adev;
208 struct fb_info *info;
209 struct drm_framebuffer *fb = NULL;
210 struct drm_mode_fb_cmd2 mode_cmd;
211 struct drm_gem_object *gobj = NULL;
212 struct amdgpu_bo *abo = NULL;
213 int ret;
214 unsigned long tmp;
215
216 mode_cmd.width = sizes->surface_width;
217 mode_cmd.height = sizes->surface_height;
218
219 if (sizes->surface_bpp == 24)
220 sizes->surface_bpp = 32;
221
222 mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
223 sizes->surface_depth);
224
225 ret = amdgpufb_create_pinned_object(rfbdev, &mode_cmd, &gobj);
226 if (ret) {
227 DRM_ERROR("failed to create fbcon object %d\n", ret);
228 return ret;
229 }
230
231 abo = gem_to_amdgpu_bo(gobj);
232
233 /* okay we have an object now allocate the framebuffer */
234 info = drm_fb_helper_alloc_fbi(helper);
235 if (IS_ERR(info)) {
236 ret = PTR_ERR(info);
237 goto out;
238 }
239
240 ret = amdgpu_display_framebuffer_init(adev->ddev, &rfbdev->rfb,
241 &mode_cmd, gobj);
242 if (ret) {
243 DRM_ERROR("failed to initialize framebuffer %d\n", ret);
244 goto out;
245 }
246
247 fb = &rfbdev->rfb.base;
248
249 /* setup helper */
250 rfbdev->helper.fb = fb;
251
252 info->fbops = &amdgpufb_ops;
253
254 tmp = amdgpu_bo_gpu_offset(abo) - adev->gmc.vram_start;
255 info->fix.smem_start = adev->gmc.aper_base + tmp;
256 info->fix.smem_len = amdgpu_bo_size(abo);
257 info->screen_base = amdgpu_bo_kptr(abo);
258 info->screen_size = amdgpu_bo_size(abo);
259
260 drm_fb_helper_fill_info(info, &rfbdev->helper, sizes);
261
262 /* setup aperture base/size for vesafb takeover */
263 info->apertures->ranges[0].base = adev->ddev->mode_config.fb_base;
264 info->apertures->ranges[0].size = adev->gmc.aper_size;
265
266 /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
267
268 if (info->screen_base == NULL) {
269 ret = -ENOSPC;
270 goto out;
271 }
272
273 DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start);
274 DRM_INFO("vram apper at 0x%lX\n", (unsigned long)adev->gmc.aper_base);
275 DRM_INFO("size %lu\n", (unsigned long)amdgpu_bo_size(abo));
276 DRM_INFO("fb depth is %d\n", fb->format->depth);
277 DRM_INFO(" pitch is %d\n", fb->pitches[0]);
278
279 vga_switcheroo_client_fb_set(adev->ddev->pdev, info);
280 return 0;
281
282 out:
283 if (abo) {
284
285 }
286 if (fb && ret) {
287 drm_gem_object_put_unlocked(gobj);
288 drm_framebuffer_unregister_private(fb);
289 drm_framebuffer_cleanup(fb);
290 kfree(fb);
291 }
292 return ret;
293 }
294
295 static int amdgpu_fbdev_destroy(struct drm_device *dev, struct amdgpu_fbdev *rfbdev)
296 {
297 struct amdgpu_framebuffer *rfb = &rfbdev->rfb;
298
299 drm_fb_helper_unregister_fbi(&rfbdev->helper);
300
301 if (rfb->base.obj[0]) {
302 amdgpufb_destroy_pinned_object(rfb->base.obj[0]);
303 rfb->base.obj[0] = NULL;
304 drm_framebuffer_unregister_private(&rfb->base);
305 drm_framebuffer_cleanup(&rfb->base);
306 }
307 drm_fb_helper_fini(&rfbdev->helper);
308
309 return 0;
310 }
311
312 static const struct drm_fb_helper_funcs amdgpu_fb_helper_funcs = {
313 .fb_probe = amdgpufb_create,
314 };
315
316 int amdgpu_fbdev_init(struct amdgpu_device *adev)
317 {
318 struct amdgpu_fbdev *rfbdev;
319 int bpp_sel = 32;
320 int ret;
321
322 /* don't init fbdev on hw without DCE */
323 if (!adev->mode_info.mode_config_initialized)
324 return 0;
325
326 /* don't init fbdev if there are no connectors */
327 if (list_empty(&adev->ddev->mode_config.connector_list))
328 return 0;
329
330 /* select 8 bpp console on low vram cards */
331 if (adev->gmc.real_vram_size <= (32*1024*1024))
332 bpp_sel = 8;
333
334 rfbdev = kzalloc(sizeof(struct amdgpu_fbdev), GFP_KERNEL);
335 if (!rfbdev)
336 return -ENOMEM;
337
338 rfbdev->adev = adev;
339 adev->mode_info.rfbdev = rfbdev;
340
341 drm_fb_helper_prepare(adev->ddev, &rfbdev->helper,
342 &amdgpu_fb_helper_funcs);
343
344 ret = drm_fb_helper_init(adev->ddev, &rfbdev->helper,
345 AMDGPUFB_CONN_LIMIT);
346 if (ret) {
347 kfree(rfbdev);
348 return ret;
349 }
350
351 drm_fb_helper_single_add_all_connectors(&rfbdev->helper);
352
353 /* disable all the possible outputs/crtcs before entering KMS mode */
354 if (!amdgpu_device_has_dc_support(adev))
355 drm_helper_disable_unused_functions(adev->ddev);
356
357 drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel);
358 return 0;
359 }
360
361 void amdgpu_fbdev_fini(struct amdgpu_device *adev)
362 {
363 if (!adev->mode_info.rfbdev)
364 return;
365
366 amdgpu_fbdev_destroy(adev->ddev, adev->mode_info.rfbdev);
367 kfree(adev->mode_info.rfbdev);
368 adev->mode_info.rfbdev = NULL;
369 }
370
371 void amdgpu_fbdev_set_suspend(struct amdgpu_device *adev, int state)
372 {
373 if (adev->mode_info.rfbdev)
374 drm_fb_helper_set_suspend_unlocked(&adev->mode_info.rfbdev->helper,
375 state);
376 }
377
378 int amdgpu_fbdev_total_size(struct amdgpu_device *adev)
379 {
380 struct amdgpu_bo *robj;
381 int size = 0;
382
383 if (!adev->mode_info.rfbdev)
384 return 0;
385
386 robj = gem_to_amdgpu_bo(adev->mode_info.rfbdev->rfb.base.obj[0]);
387 size += amdgpu_bo_size(robj);
388 return size;
389 }
390
391 bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj)
392 {
393 if (!adev->mode_info.rfbdev)
394 return false;
395 if (robj == gem_to_amdgpu_bo(adev->mode_info.rfbdev->rfb.base.obj[0]))
396 return true;
397 return false;
398 }
399