2 * Copyright 2007-8 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
23 * Authors: Dave Airlie
27 #include <drm/amdgpu_drm.h>
29 #include "amdgpu_i2c.h"
31 #include "amdgpu_connectors.h"
32 #include "amdgpu_display.h"
33 #include <asm/div64.h>
35 #include <linux/pci.h>
36 #include <linux/pm_runtime.h>
37 #include <drm/drm_crtc_helper.h>
38 #include <drm/drm_damage_helper.h>
39 #include <drm/drm_drv.h>
40 #include <drm/drm_edid.h>
41 #include <drm/drm_gem_framebuffer_helper.h>
42 #include <drm/drm_fb_helper.h>
43 #include <drm/drm_fourcc.h>
44 #include <drm/drm_vblank.h>
46 static int amdgpu_display_framebuffer_init(struct drm_device *dev,
47 struct amdgpu_framebuffer *rfb,
48 const struct drm_mode_fb_cmd2 *mode_cmd,
49 struct drm_gem_object *obj);
51 static void amdgpu_display_flip_callback(struct dma_fence *f,
52 struct dma_fence_cb *cb)
54 struct amdgpu_flip_work *work =
55 container_of(cb, struct amdgpu_flip_work, cb);
58 schedule_work(&work->flip_work.work);
61 static bool amdgpu_display_flip_handle_fence(struct amdgpu_flip_work *work,
64 struct dma_fence *fence= *f;
71 if (!dma_fence_add_callback(fence, &work->cb,
72 amdgpu_display_flip_callback))
79 static void amdgpu_display_flip_work_func(struct work_struct *__work)
81 struct delayed_work *delayed_work =
82 container_of(__work, struct delayed_work, work);
83 struct amdgpu_flip_work *work =
84 container_of(delayed_work, struct amdgpu_flip_work, flip_work);
85 struct amdgpu_device *adev = work->adev;
86 struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[work->crtc_id];
88 struct drm_crtc *crtc = &amdgpu_crtc->base;
93 for (i = 0; i < work->shared_count; ++i)
94 if (amdgpu_display_flip_handle_fence(work, &work->shared[i]))
97 /* Wait until we're out of the vertical blank period before the one
98 * targeted by the flip
100 if (amdgpu_crtc->enabled &&
101 (amdgpu_display_get_crtc_scanoutpos(adev_to_drm(adev), work->crtc_id, 0,
102 &vpos, &hpos, NULL, NULL,
104 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
105 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
106 (int)(work->target_vblank -
107 amdgpu_get_vblank_counter_kms(crtc)) > 0) {
108 schedule_delayed_work(&work->flip_work, usecs_to_jiffies(1000));
112 /* We borrow the event spin lock for protecting flip_status */
113 spin_lock_irqsave(&crtc->dev->event_lock, flags);
115 /* Do the flip (mmio) */
116 adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base, work->async);
118 /* Set the flip status */
119 amdgpu_crtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
120 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
123 drm_dbg_vbl(adev_to_drm(adev),
124 "crtc:%d[%p], pflip_stat:AMDGPU_FLIP_SUBMITTED, work: %p,\n",
125 amdgpu_crtc->crtc_id, amdgpu_crtc, work);
130 * Handle unpin events outside the interrupt handler proper.
132 static void amdgpu_display_unpin_work_func(struct work_struct *__work)
134 struct amdgpu_flip_work *work =
135 container_of(__work, struct amdgpu_flip_work, unpin_work);
138 /* unpin of the old buffer */
139 r = amdgpu_bo_reserve(work->old_abo, true);
140 if (likely(r == 0)) {
141 amdgpu_bo_unpin(work->old_abo);
142 amdgpu_bo_unreserve(work->old_abo);
144 DRM_ERROR("failed to reserve buffer after flip\n");
146 amdgpu_bo_unref(&work->old_abo);
151 int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
152 struct drm_framebuffer *fb,
153 struct drm_pending_vblank_event *event,
154 uint32_t page_flip_flags, uint32_t target,
155 struct drm_modeset_acquire_ctx *ctx)
157 struct drm_device *dev = crtc->dev;
158 struct amdgpu_device *adev = drm_to_adev(dev);
159 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
160 struct drm_gem_object *obj;
161 struct amdgpu_flip_work *work;
162 struct amdgpu_bo *new_abo;
167 work = kzalloc(sizeof *work, GFP_KERNEL);
171 INIT_DELAYED_WORK(&work->flip_work, amdgpu_display_flip_work_func);
172 INIT_WORK(&work->unpin_work, amdgpu_display_unpin_work_func);
176 work->crtc_id = amdgpu_crtc->crtc_id;
177 work->async = (page_flip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
179 /* schedule unpin of the old buffer */
180 obj = crtc->primary->fb->obj[0];
182 /* take a reference to the old object */
183 work->old_abo = gem_to_amdgpu_bo(obj);
184 amdgpu_bo_ref(work->old_abo);
187 new_abo = gem_to_amdgpu_bo(obj);
189 /* pin the new buffer */
190 r = amdgpu_bo_reserve(new_abo, false);
191 if (unlikely(r != 0)) {
192 DRM_ERROR("failed to reserve new abo buffer before flip\n");
196 if (!adev->enable_virtual_display) {
197 r = amdgpu_bo_pin(new_abo,
198 amdgpu_display_supported_domains(adev, new_abo->flags));
199 if (unlikely(r != 0)) {
200 DRM_ERROR("failed to pin new abo buffer before flip\n");
205 r = amdgpu_ttm_alloc_gart(&new_abo->tbo);
206 if (unlikely(r != 0)) {
207 DRM_ERROR("%p bind failed\n", new_abo);
211 r = dma_resv_get_fences(new_abo->tbo.base.resv, DMA_RESV_USAGE_WRITE,
214 if (unlikely(r != 0)) {
215 DRM_ERROR("failed to get fences for buffer\n");
219 amdgpu_bo_get_tiling_flags(new_abo, &tiling_flags);
220 amdgpu_bo_unreserve(new_abo);
222 if (!adev->enable_virtual_display)
223 work->base = amdgpu_bo_gpu_offset(new_abo);
224 work->target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) +
225 amdgpu_get_vblank_counter_kms(crtc);
227 /* we borrow the event spin lock for protecting flip_wrok */
228 spin_lock_irqsave(&crtc->dev->event_lock, flags);
229 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_NONE) {
230 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
231 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
236 amdgpu_crtc->pflip_status = AMDGPU_FLIP_PENDING;
237 amdgpu_crtc->pflip_works = work;
240 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_PENDING, work: %p,\n",
241 amdgpu_crtc->crtc_id, amdgpu_crtc, work);
243 crtc->primary->fb = fb;
244 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
245 amdgpu_display_flip_work_func(&work->flip_work.work);
249 if (unlikely(amdgpu_bo_reserve(new_abo, false) != 0)) {
250 DRM_ERROR("failed to reserve new abo in error path\n");
254 if (!adev->enable_virtual_display)
255 amdgpu_bo_unpin(new_abo);
258 amdgpu_bo_unreserve(new_abo);
261 amdgpu_bo_unref(&work->old_abo);
262 for (i = 0; i < work->shared_count; ++i)
263 dma_fence_put(work->shared[i]);
270 int amdgpu_display_crtc_set_config(struct drm_mode_set *set,
271 struct drm_modeset_acquire_ctx *ctx)
273 struct drm_device *dev;
274 struct amdgpu_device *adev;
275 struct drm_crtc *crtc;
279 if (!set || !set->crtc)
282 dev = set->crtc->dev;
284 ret = pm_runtime_get_sync(dev->dev);
288 ret = drm_crtc_helper_set_config(set, ctx);
290 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
294 pm_runtime_mark_last_busy(dev->dev);
296 adev = drm_to_adev(dev);
297 /* if we have active crtcs and we don't have a power ref,
298 take the current one */
299 if (active && !adev->have_disp_power_ref) {
300 adev->have_disp_power_ref = true;
303 /* if we have no active crtcs, then drop the power ref
305 if (!active && adev->have_disp_power_ref) {
306 pm_runtime_put_autosuspend(dev->dev);
307 adev->have_disp_power_ref = false;
311 /* drop the power reference we got coming in here */
312 pm_runtime_put_autosuspend(dev->dev);
316 static const char *encoder_names[41] = {
336 "INTERNAL_KLDSCP_TMDS1",
337 "INTERNAL_KLDSCP_DVO1",
338 "INTERNAL_KLDSCP_DAC1",
339 "INTERNAL_KLDSCP_DAC2",
348 "INTERNAL_KLDSCP_LVTMA",
360 static const char *hpd_names[6] = {
369 void amdgpu_display_print_display_setup(struct drm_device *dev)
371 struct drm_connector *connector;
372 struct amdgpu_connector *amdgpu_connector;
373 struct drm_encoder *encoder;
374 struct amdgpu_encoder *amdgpu_encoder;
375 struct drm_connector_list_iter iter;
379 drm_connector_list_iter_begin(dev, &iter);
380 DRM_INFO("AMDGPU Display Connectors\n");
381 drm_for_each_connector_iter(connector, &iter) {
382 amdgpu_connector = to_amdgpu_connector(connector);
383 DRM_INFO("Connector %d:\n", i);
384 DRM_INFO(" %s\n", connector->name);
385 if (amdgpu_connector->hpd.hpd != AMDGPU_HPD_NONE)
386 DRM_INFO(" %s\n", hpd_names[amdgpu_connector->hpd.hpd]);
387 if (amdgpu_connector->ddc_bus) {
388 DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
389 amdgpu_connector->ddc_bus->rec.mask_clk_reg,
390 amdgpu_connector->ddc_bus->rec.mask_data_reg,
391 amdgpu_connector->ddc_bus->rec.a_clk_reg,
392 amdgpu_connector->ddc_bus->rec.a_data_reg,
393 amdgpu_connector->ddc_bus->rec.en_clk_reg,
394 amdgpu_connector->ddc_bus->rec.en_data_reg,
395 amdgpu_connector->ddc_bus->rec.y_clk_reg,
396 amdgpu_connector->ddc_bus->rec.y_data_reg);
397 if (amdgpu_connector->router.ddc_valid)
398 DRM_INFO(" DDC Router 0x%x/0x%x\n",
399 amdgpu_connector->router.ddc_mux_control_pin,
400 amdgpu_connector->router.ddc_mux_state);
401 if (amdgpu_connector->router.cd_valid)
402 DRM_INFO(" Clock/Data Router 0x%x/0x%x\n",
403 amdgpu_connector->router.cd_mux_control_pin,
404 amdgpu_connector->router.cd_mux_state);
406 if (connector->connector_type == DRM_MODE_CONNECTOR_VGA ||
407 connector->connector_type == DRM_MODE_CONNECTOR_DVII ||
408 connector->connector_type == DRM_MODE_CONNECTOR_DVID ||
409 connector->connector_type == DRM_MODE_CONNECTOR_DVIA ||
410 connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
411 connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)
412 DRM_INFO(" DDC: no ddc bus - possible BIOS bug - please report to xorg-driver-ati@lists.x.org\n");
414 DRM_INFO(" Encoders:\n");
415 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
416 amdgpu_encoder = to_amdgpu_encoder(encoder);
417 devices = amdgpu_encoder->devices & amdgpu_connector->devices;
419 if (devices & ATOM_DEVICE_CRT1_SUPPORT)
420 DRM_INFO(" CRT1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
421 if (devices & ATOM_DEVICE_CRT2_SUPPORT)
422 DRM_INFO(" CRT2: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
423 if (devices & ATOM_DEVICE_LCD1_SUPPORT)
424 DRM_INFO(" LCD1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
425 if (devices & ATOM_DEVICE_DFP1_SUPPORT)
426 DRM_INFO(" DFP1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
427 if (devices & ATOM_DEVICE_DFP2_SUPPORT)
428 DRM_INFO(" DFP2: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
429 if (devices & ATOM_DEVICE_DFP3_SUPPORT)
430 DRM_INFO(" DFP3: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
431 if (devices & ATOM_DEVICE_DFP4_SUPPORT)
432 DRM_INFO(" DFP4: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
433 if (devices & ATOM_DEVICE_DFP5_SUPPORT)
434 DRM_INFO(" DFP5: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
435 if (devices & ATOM_DEVICE_DFP6_SUPPORT)
436 DRM_INFO(" DFP6: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
437 if (devices & ATOM_DEVICE_TV1_SUPPORT)
438 DRM_INFO(" TV1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
439 if (devices & ATOM_DEVICE_CV_SUPPORT)
440 DRM_INFO(" CV: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
445 drm_connector_list_iter_end(&iter);
448 bool amdgpu_display_ddc_probe(struct amdgpu_connector *amdgpu_connector,
454 struct i2c_msg msgs[] = {
469 /* on hw with routers, select right port */
470 if (amdgpu_connector->router.ddc_valid)
471 amdgpu_i2c_router_select_ddc_port(amdgpu_connector);
474 ret = i2c_transfer(&amdgpu_connector->ddc_bus->aux.ddc, msgs, 2);
476 ret = i2c_transfer(&amdgpu_connector->ddc_bus->adapter, msgs, 2);
480 /* Couldn't find an accessible DDC on this connector */
482 /* Probe also for valid EDID header
483 * EDID header starts with:
484 * 0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00.
485 * Only the first 6 bytes must be valid as
486 * drm_edid_block_valid() can fix the last 2 bytes */
487 if (drm_edid_header_is_valid(buf) < 6) {
488 /* Couldn't find an accessible EDID on this
495 static const struct drm_framebuffer_funcs amdgpu_fb_funcs = {
496 .destroy = drm_gem_fb_destroy,
497 .create_handle = drm_gem_fb_create_handle,
500 static const struct drm_framebuffer_funcs amdgpu_fb_funcs_atomic = {
501 .destroy = drm_gem_fb_destroy,
502 .create_handle = drm_gem_fb_create_handle,
503 .dirty = drm_atomic_helper_dirtyfb,
506 uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
509 uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM;
511 #if defined(CONFIG_DRM_AMD_DC)
513 * if amdgpu_bo_support_uswc returns false it means that USWC mappings
514 * is not supported for this board. But this mapping is required
515 * to avoid hang caused by placement of scanout BO in GTT on certain
516 * APUs. So force the BO placement to VRAM in case this architecture
517 * will not allow USWC mappings.
518 * Also, don't allow GTT domain if the BO doesn't have USWC flag set.
520 if ((bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) &&
521 amdgpu_bo_support_uswc(bo_flags) &&
522 amdgpu_device_asic_has_dc_support(adev->asic_type) &&
523 adev->mode_info.gpu_vm_support)
524 domain |= AMDGPU_GEM_DOMAIN_GTT;
530 static const struct drm_format_info dcc_formats[] = {
531 { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
532 .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
533 { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
534 .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
535 { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
536 .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
537 .has_alpha = true, },
538 { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
539 .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
540 .has_alpha = true, },
541 { .format = DRM_FORMAT_BGRA8888, .depth = 32, .num_planes = 2,
542 .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
543 .has_alpha = true, },
544 { .format = DRM_FORMAT_XRGB2101010, .depth = 30, .num_planes = 2,
545 .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
546 { .format = DRM_FORMAT_XBGR2101010, .depth = 30, .num_planes = 2,
547 .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
548 { .format = DRM_FORMAT_ARGB2101010, .depth = 30, .num_planes = 2,
549 .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
550 .has_alpha = true, },
551 { .format = DRM_FORMAT_ABGR2101010, .depth = 30, .num_planes = 2,
552 .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
553 .has_alpha = true, },
554 { .format = DRM_FORMAT_RGB565, .depth = 16, .num_planes = 2,
555 .cpp = { 2, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
558 static const struct drm_format_info dcc_retile_formats[] = {
559 { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 3,
560 .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
561 { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 3,
562 .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
563 { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 3,
564 .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
565 .has_alpha = true, },
566 { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 3,
567 .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
568 .has_alpha = true, },
569 { .format = DRM_FORMAT_BGRA8888, .depth = 32, .num_planes = 3,
570 .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
571 .has_alpha = true, },
572 { .format = DRM_FORMAT_XRGB2101010, .depth = 30, .num_planes = 3,
573 .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
574 { .format = DRM_FORMAT_XBGR2101010, .depth = 30, .num_planes = 3,
575 .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
576 { .format = DRM_FORMAT_ARGB2101010, .depth = 30, .num_planes = 3,
577 .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
578 .has_alpha = true, },
579 { .format = DRM_FORMAT_ABGR2101010, .depth = 30, .num_planes = 3,
580 .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
581 .has_alpha = true, },
582 { .format = DRM_FORMAT_RGB565, .depth = 16, .num_planes = 3,
583 .cpp = { 2, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
586 static const struct drm_format_info *
587 lookup_format_info(const struct drm_format_info formats[],
588 int num_formats, u32 format)
592 for (i = 0; i < num_formats; i++) {
593 if (formats[i].format == format)
600 const struct drm_format_info *
601 amdgpu_lookup_format_info(u32 format, uint64_t modifier)
603 if (!IS_AMD_FMT_MOD(modifier))
606 if (AMD_FMT_MOD_GET(DCC_RETILE, modifier))
607 return lookup_format_info(dcc_retile_formats,
608 ARRAY_SIZE(dcc_retile_formats),
611 if (AMD_FMT_MOD_GET(DCC, modifier))
612 return lookup_format_info(dcc_formats, ARRAY_SIZE(dcc_formats),
615 /* returning NULL will cause the default format structs to be used. */
621 * Tries to extract the renderable DCC offset from the opaque metadata attached
625 extract_render_dcc_offset(struct amdgpu_device *adev,
626 struct drm_gem_object *obj,
629 struct amdgpu_bo *rbo;
631 uint32_t metadata[10]; /* Something that fits a descriptor + header. */
634 rbo = gem_to_amdgpu_bo(obj);
635 r = amdgpu_bo_reserve(rbo, false);
638 /* Don't show error message when returning -ERESTARTSYS */
639 if (r != -ERESTARTSYS)
640 DRM_ERROR("Unable to reserve buffer: %d\n", r);
644 r = amdgpu_bo_get_metadata(rbo, metadata, sizeof(metadata), &size, NULL);
645 amdgpu_bo_unreserve(rbo);
651 * The first word is the metadata version, and we need space for at least
652 * the version + pci vendor+device id + 8 words for a descriptor.
654 if (size < 40 || metadata[0] != 1)
657 if (adev->family >= AMDGPU_FAMILY_NV) {
658 /* resource word 6/7 META_DATA_ADDRESS{_LO} */
659 *offset = ((u64)metadata[9] << 16u) |
660 ((metadata[8] & 0xFF000000u) >> 16);
662 /* resource word 5/7 META_DATA_ADDRESS */
663 *offset = ((u64)metadata[9] << 8u) |
664 ((u64)(metadata[7] & 0x1FE0000u) << 23);
670 static int convert_tiling_flags_to_modifier(struct amdgpu_framebuffer *afb)
672 struct amdgpu_device *adev = drm_to_adev(afb->base.dev);
673 uint64_t modifier = 0;
675 if (!afb->tiling_flags || !AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE)) {
676 modifier = DRM_FORMAT_MOD_LINEAR;
678 int swizzle = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE);
679 bool has_xor = swizzle >= 16;
682 int pipe_xor_bits = 0;
683 int bank_xor_bits = 0;
686 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
687 uint32_t dcc_offset = AMDGPU_TILING_GET(afb->tiling_flags, DCC_OFFSET_256B);
689 switch (swizzle >> 2) {
694 case 5: /* 4KiB _X */
695 block_size_bits = 12;
698 case 4: /* 64 KiB _T */
699 case 6: /* 64 KiB _X */
700 block_size_bits = 16;
703 /* RESERVED or VAR */
707 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
708 version = AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS;
709 else if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 0, 0))
710 version = AMD_FMT_MOD_TILE_VER_GFX10;
712 version = AMD_FMT_MOD_TILE_VER_GFX9;
714 switch (swizzle & 3) {
715 case 0: /* Z microtiling */
717 case 1: /* S microtiling */
719 version = AMD_FMT_MOD_TILE_VER_GFX9;
722 if (!has_xor && afb->base.format->cpp[0] != 4)
723 version = AMD_FMT_MOD_TILE_VER_GFX9;
731 case AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS:
732 pipe_xor_bits = min(block_size_bits - 8, pipes);
733 packers = min(block_size_bits - 8 - pipe_xor_bits,
734 ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs));
736 case AMD_FMT_MOD_TILE_VER_GFX10:
737 pipe_xor_bits = min(block_size_bits - 8, pipes);
739 case AMD_FMT_MOD_TILE_VER_GFX9:
740 rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
741 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
742 pipe_xor_bits = min(block_size_bits - 8, pipes +
743 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
744 bank_xor_bits = min(block_size_bits - 8 - pipe_xor_bits,
745 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
750 modifier = AMD_FMT_MOD |
751 AMD_FMT_MOD_SET(TILE, AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE)) |
752 AMD_FMT_MOD_SET(TILE_VERSION, version) |
753 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
754 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
755 AMD_FMT_MOD_SET(PACKERS, packers);
757 if (dcc_offset != 0) {
758 bool dcc_i64b = AMDGPU_TILING_GET(afb->tiling_flags, DCC_INDEPENDENT_64B) != 0;
759 bool dcc_i128b = version >= AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS;
760 const struct drm_format_info *format_info;
761 u64 render_dcc_offset;
763 /* Enable constant encode on RAVEN2 and later. */
764 bool dcc_constant_encode = adev->asic_type > CHIP_RAVEN ||
765 (adev->asic_type == CHIP_RAVEN &&
766 adev->external_rev_id >= 0x81);
768 int max_cblock_size = dcc_i64b ? AMD_FMT_MOD_DCC_BLOCK_64B :
769 dcc_i128b ? AMD_FMT_MOD_DCC_BLOCK_128B :
770 AMD_FMT_MOD_DCC_BLOCK_256B;
772 modifier |= AMD_FMT_MOD_SET(DCC, 1) |
773 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, dcc_constant_encode) |
774 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, dcc_i64b) |
775 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, dcc_i128b) |
776 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, max_cblock_size);
778 afb->base.offsets[1] = dcc_offset * 256 + afb->base.offsets[0];
779 afb->base.pitches[1] =
780 AMDGPU_TILING_GET(afb->tiling_flags, DCC_PITCH_MAX) + 1;
783 * If the userspace driver uses retiling the tiling flags do not contain
784 * info on the renderable DCC buffer. Luckily the opaque metadata contains
785 * the info so we can try to extract it. The kernel does not use this info
786 * but we should convert it to a modifier plane for getfb2, so the
787 * userspace driver that gets it doesn't have to juggle around another DCC
790 if (extract_render_dcc_offset(adev, afb->base.obj[0],
791 &render_dcc_offset) == 0 &&
792 render_dcc_offset != 0 &&
793 render_dcc_offset != afb->base.offsets[1] &&
794 render_dcc_offset < UINT_MAX) {
795 uint32_t dcc_block_bits; /* of base surface data */
797 modifier |= AMD_FMT_MOD_SET(DCC_RETILE, 1);
798 afb->base.offsets[2] = render_dcc_offset;
800 if (adev->family >= AMDGPU_FAMILY_NV) {
803 if ((adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0)) &&
804 pipes == packers && pipes > 1)
807 dcc_block_bits = max(20, 16 + pipes + extra_pipe);
809 modifier |= AMD_FMT_MOD_SET(RB, rb) |
810 AMD_FMT_MOD_SET(PIPE, pipes);
811 dcc_block_bits = max(20, 18 + rb);
814 dcc_block_bits -= ilog2(afb->base.format->cpp[0]);
815 afb->base.pitches[2] = ALIGN(afb->base.width,
816 1u << ((dcc_block_bits + 1) / 2));
818 format_info = amdgpu_lookup_format_info(afb->base.format->format,
823 afb->base.format = format_info;
827 afb->base.modifier = modifier;
828 afb->base.flags |= DRM_MODE_FB_MODIFIERS;
832 /* Mirrors the is_displayable check in radeonsi's gfx6_compute_surface */
833 static int check_tiling_flags_gfx6(struct amdgpu_framebuffer *afb)
837 /* Zero swizzle mode means linear */
838 if (AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0)
841 micro_tile_mode = AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE);
842 switch (micro_tile_mode) {
843 case 0: /* DISPLAY */
847 drm_dbg_kms(afb->base.dev,
848 "Micro tile mode %llu not supported for scanout\n",
854 static void get_block_dimensions(unsigned int block_log2, unsigned int cpp,
855 unsigned int *width, unsigned int *height)
857 unsigned int cpp_log2 = ilog2(cpp);
858 unsigned int pixel_log2 = block_log2 - cpp_log2;
859 unsigned int width_log2 = (pixel_log2 + 1) / 2;
860 unsigned int height_log2 = pixel_log2 - width_log2;
862 *width = 1 << width_log2;
863 *height = 1 << height_log2;
866 static unsigned int get_dcc_block_size(uint64_t modifier, bool rb_aligned,
869 unsigned int ver = AMD_FMT_MOD_GET(TILE_VERSION, modifier);
872 case AMD_FMT_MOD_TILE_VER_GFX9: {
874 * TODO: for pipe aligned we may need to check the alignment of the
875 * total size of the surface, which may need to be bigger than the
876 * natural alignment due to some HW workarounds
878 return max(10 + (rb_aligned ? (int)AMD_FMT_MOD_GET(RB, modifier) : 0), 12);
880 case AMD_FMT_MOD_TILE_VER_GFX10:
881 case AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS: {
882 int pipes_log2 = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
884 if (ver == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS && pipes_log2 > 1 &&
885 AMD_FMT_MOD_GET(PACKERS, modifier) == pipes_log2)
888 return max(8 + (pipe_aligned ? pipes_log2 : 0), 12);
895 static int amdgpu_display_verify_plane(struct amdgpu_framebuffer *rfb, int plane,
896 const struct drm_format_info *format,
897 unsigned int block_width, unsigned int block_height,
898 unsigned int block_size_log2)
900 unsigned int width = rfb->base.width /
901 ((plane && plane < format->num_planes) ? format->hsub : 1);
902 unsigned int height = rfb->base.height /
903 ((plane && plane < format->num_planes) ? format->vsub : 1);
904 unsigned int cpp = plane < format->num_planes ? format->cpp[plane] : 1;
905 unsigned int block_pitch = block_width * cpp;
906 unsigned int min_pitch = ALIGN(width * cpp, block_pitch);
907 unsigned int block_size = 1 << block_size_log2;
910 if (rfb->base.pitches[plane] % block_pitch) {
911 drm_dbg_kms(rfb->base.dev,
912 "pitch %d for plane %d is not a multiple of block pitch %d\n",
913 rfb->base.pitches[plane], plane, block_pitch);
916 if (rfb->base.pitches[plane] < min_pitch) {
917 drm_dbg_kms(rfb->base.dev,
918 "pitch %d for plane %d is less than minimum pitch %d\n",
919 rfb->base.pitches[plane], plane, min_pitch);
923 /* Force at least natural alignment. */
924 if (rfb->base.offsets[plane] % block_size) {
925 drm_dbg_kms(rfb->base.dev,
926 "offset 0x%x for plane %d is not a multiple of block pitch 0x%x\n",
927 rfb->base.offsets[plane], plane, block_size);
931 size = rfb->base.offsets[plane] +
932 (uint64_t)rfb->base.pitches[plane] / block_pitch *
933 block_size * DIV_ROUND_UP(height, block_height);
935 if (rfb->base.obj[0]->size < size) {
936 drm_dbg_kms(rfb->base.dev,
937 "BO size 0x%zx is less than 0x%llx required for plane %d\n",
938 rfb->base.obj[0]->size, size, plane);
946 static int amdgpu_display_verify_sizes(struct amdgpu_framebuffer *rfb)
948 const struct drm_format_info *format_info = drm_format_info(rfb->base.format->format);
949 uint64_t modifier = rfb->base.modifier;
951 unsigned int i, block_width, block_height, block_size_log2;
953 if (rfb->base.dev->mode_config.fb_modifiers_not_supported)
956 for (i = 0; i < format_info->num_planes; ++i) {
957 if (modifier == DRM_FORMAT_MOD_LINEAR) {
958 block_width = 256 / format_info->cpp[i];
962 int swizzle = AMD_FMT_MOD_GET(TILE, modifier);
964 switch ((swizzle & ~3) + 1) {
970 block_size_log2 = 12;
975 block_size_log2 = 16;
978 drm_dbg_kms(rfb->base.dev,
979 "Swizzle mode with unknown block size: %d\n", swizzle);
983 get_block_dimensions(block_size_log2, format_info->cpp[i],
984 &block_width, &block_height);
987 ret = amdgpu_display_verify_plane(rfb, i, format_info,
988 block_width, block_height, block_size_log2);
993 if (AMD_FMT_MOD_GET(DCC, modifier)) {
994 if (AMD_FMT_MOD_GET(DCC_RETILE, modifier)) {
995 block_size_log2 = get_dcc_block_size(modifier, false, false);
996 get_block_dimensions(block_size_log2 + 8, format_info->cpp[0],
997 &block_width, &block_height);
998 ret = amdgpu_display_verify_plane(rfb, i, format_info,
999 block_width, block_height,
1005 block_size_log2 = get_dcc_block_size(modifier, true, true);
1007 bool pipe_aligned = AMD_FMT_MOD_GET(DCC_PIPE_ALIGN, modifier);
1009 block_size_log2 = get_dcc_block_size(modifier, true, pipe_aligned);
1011 get_block_dimensions(block_size_log2 + 8, format_info->cpp[0],
1012 &block_width, &block_height);
1013 ret = amdgpu_display_verify_plane(rfb, i, format_info,
1014 block_width, block_height, block_size_log2);
1022 static int amdgpu_display_get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
1023 uint64_t *tiling_flags, bool *tmz_surface)
1025 struct amdgpu_bo *rbo;
1030 *tmz_surface = false;
1034 rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
1035 r = amdgpu_bo_reserve(rbo, false);
1038 /* Don't show error message when returning -ERESTARTSYS */
1039 if (r != -ERESTARTSYS)
1040 DRM_ERROR("Unable to reserve buffer: %d\n", r);
1045 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
1048 *tmz_surface = amdgpu_bo_encrypted(rbo);
1050 amdgpu_bo_unreserve(rbo);
1055 static int amdgpu_display_gem_fb_verify_and_init(struct drm_device *dev,
1056 struct amdgpu_framebuffer *rfb,
1057 struct drm_file *file_priv,
1058 const struct drm_mode_fb_cmd2 *mode_cmd,
1059 struct drm_gem_object *obj)
1063 rfb->base.obj[0] = obj;
1064 drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd);
1065 /* Verify that the modifier is supported. */
1066 if (!drm_any_plane_has_format(dev, mode_cmd->pixel_format,
1067 mode_cmd->modifier[0])) {
1069 "unsupported pixel format %p4cc / modifier 0x%llx\n",
1070 &mode_cmd->pixel_format, mode_cmd->modifier[0]);
1076 ret = amdgpu_display_framebuffer_init(dev, rfb, mode_cmd, obj);
1080 if (drm_drv_uses_atomic_modeset(dev))
1081 ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs_atomic);
1083 ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
1089 drm_dbg_kms(dev, "Failed to verify and init gem fb: %d\n", ret);
1090 rfb->base.obj[0] = NULL;
1094 static int amdgpu_display_framebuffer_init(struct drm_device *dev,
1095 struct amdgpu_framebuffer *rfb,
1096 const struct drm_mode_fb_cmd2 *mode_cmd,
1097 struct drm_gem_object *obj)
1099 struct amdgpu_device *adev = drm_to_adev(dev);
1103 * This needs to happen before modifier conversion as that might change
1104 * the number of planes.
1106 for (i = 1; i < rfb->base.format->num_planes; ++i) {
1107 if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
1108 drm_dbg_kms(dev, "Plane 0 and %d have different BOs: %u vs. %u\n",
1109 i, mode_cmd->handles[0], mode_cmd->handles[i]);
1115 ret = amdgpu_display_get_fb_info(rfb, &rfb->tiling_flags, &rfb->tmz_surface);
1119 if (dev->mode_config.fb_modifiers_not_supported && !adev->enable_virtual_display) {
1120 drm_WARN_ONCE(dev, adev->family >= AMDGPU_FAMILY_AI,
1121 "GFX9+ requires FB check based on format modifier\n");
1122 ret = check_tiling_flags_gfx6(rfb);
1127 if (!dev->mode_config.fb_modifiers_not_supported &&
1128 !(rfb->base.flags & DRM_MODE_FB_MODIFIERS)) {
1129 ret = convert_tiling_flags_to_modifier(rfb);
1131 drm_dbg_kms(dev, "Failed to convert tiling flags 0x%llX to a modifier",
1137 ret = amdgpu_display_verify_sizes(rfb);
1141 for (i = 0; i < rfb->base.format->num_planes; ++i) {
1142 drm_gem_object_get(rfb->base.obj[0]);
1143 rfb->base.obj[i] = rfb->base.obj[0];
1149 struct drm_framebuffer *
1150 amdgpu_display_user_framebuffer_create(struct drm_device *dev,
1151 struct drm_file *file_priv,
1152 const struct drm_mode_fb_cmd2 *mode_cmd)
1154 struct amdgpu_framebuffer *amdgpu_fb;
1155 struct drm_gem_object *obj;
1156 struct amdgpu_bo *bo;
1160 obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
1162 drm_dbg_kms(dev, "No GEM object associated to handle 0x%08X, "
1163 "can't create framebuffer\n", mode_cmd->handles[0]);
1164 return ERR_PTR(-ENOENT);
1167 /* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */
1168 bo = gem_to_amdgpu_bo(obj);
1169 domains = amdgpu_display_supported_domains(drm_to_adev(dev), bo->flags);
1170 if (obj->import_attach && !(domains & AMDGPU_GEM_DOMAIN_GTT)) {
1171 drm_dbg_kms(dev, "Cannot create framebuffer from imported dma_buf\n");
1172 drm_gem_object_put(obj);
1173 return ERR_PTR(-EINVAL);
1176 amdgpu_fb = kzalloc(sizeof(*amdgpu_fb), GFP_KERNEL);
1177 if (amdgpu_fb == NULL) {
1178 drm_gem_object_put(obj);
1179 return ERR_PTR(-ENOMEM);
1182 ret = amdgpu_display_gem_fb_verify_and_init(dev, amdgpu_fb, file_priv,
1186 drm_gem_object_put(obj);
1187 return ERR_PTR(ret);
1190 drm_gem_object_put(obj);
1191 return &amdgpu_fb->base;
1194 const struct drm_mode_config_funcs amdgpu_mode_funcs = {
1195 .fb_create = amdgpu_display_user_framebuffer_create,
1196 .output_poll_changed = drm_fb_helper_output_poll_changed,
1199 static const struct drm_prop_enum_list amdgpu_underscan_enum_list[] =
1200 { { UNDERSCAN_OFF, "off" },
1201 { UNDERSCAN_ON, "on" },
1202 { UNDERSCAN_AUTO, "auto" },
1205 static const struct drm_prop_enum_list amdgpu_audio_enum_list[] =
1206 { { AMDGPU_AUDIO_DISABLE, "off" },
1207 { AMDGPU_AUDIO_ENABLE, "on" },
1208 { AMDGPU_AUDIO_AUTO, "auto" },
1211 /* XXX support different dither options? spatial, temporal, both, etc. */
1212 static const struct drm_prop_enum_list amdgpu_dither_enum_list[] =
1213 { { AMDGPU_FMT_DITHER_DISABLE, "off" },
1214 { AMDGPU_FMT_DITHER_ENABLE, "on" },
1217 int amdgpu_display_modeset_create_props(struct amdgpu_device *adev)
1221 adev->mode_info.coherent_mode_property =
1222 drm_property_create_range(adev_to_drm(adev), 0, "coherent", 0, 1);
1223 if (!adev->mode_info.coherent_mode_property)
1226 adev->mode_info.load_detect_property =
1227 drm_property_create_range(adev_to_drm(adev), 0, "load detection", 0, 1);
1228 if (!adev->mode_info.load_detect_property)
1231 drm_mode_create_scaling_mode_property(adev_to_drm(adev));
1233 sz = ARRAY_SIZE(amdgpu_underscan_enum_list);
1234 adev->mode_info.underscan_property =
1235 drm_property_create_enum(adev_to_drm(adev), 0,
1237 amdgpu_underscan_enum_list, sz);
1239 adev->mode_info.underscan_hborder_property =
1240 drm_property_create_range(adev_to_drm(adev), 0,
1241 "underscan hborder", 0, 128);
1242 if (!adev->mode_info.underscan_hborder_property)
1245 adev->mode_info.underscan_vborder_property =
1246 drm_property_create_range(adev_to_drm(adev), 0,
1247 "underscan vborder", 0, 128);
1248 if (!adev->mode_info.underscan_vborder_property)
1251 sz = ARRAY_SIZE(amdgpu_audio_enum_list);
1252 adev->mode_info.audio_property =
1253 drm_property_create_enum(adev_to_drm(adev), 0,
1255 amdgpu_audio_enum_list, sz);
1257 sz = ARRAY_SIZE(amdgpu_dither_enum_list);
1258 adev->mode_info.dither_property =
1259 drm_property_create_enum(adev_to_drm(adev), 0,
1261 amdgpu_dither_enum_list, sz);
1263 if (amdgpu_device_has_dc_support(adev)) {
1264 adev->mode_info.abm_level_property =
1265 drm_property_create_range(adev_to_drm(adev), 0,
1267 if (!adev->mode_info.abm_level_property)
1274 void amdgpu_display_update_priority(struct amdgpu_device *adev)
1276 /* adjustment options for the display watermarks */
1277 if ((amdgpu_disp_priority == 0) || (amdgpu_disp_priority > 2))
1278 adev->mode_info.disp_priority = 0;
1280 adev->mode_info.disp_priority = amdgpu_disp_priority;
1284 static bool amdgpu_display_is_hdtv_mode(const struct drm_display_mode *mode)
1286 /* try and guess if this is a tv or a monitor */
1287 if ((mode->vdisplay == 480 && mode->hdisplay == 720) || /* 480p */
1288 (mode->vdisplay == 576) || /* 576p */
1289 (mode->vdisplay == 720) || /* 720p */
1290 (mode->vdisplay == 1080)) /* 1080p */
1296 bool amdgpu_display_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
1297 const struct drm_display_mode *mode,
1298 struct drm_display_mode *adjusted_mode)
1300 struct drm_device *dev = crtc->dev;
1301 struct drm_encoder *encoder;
1302 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1303 struct amdgpu_encoder *amdgpu_encoder;
1304 struct drm_connector *connector;
1305 u32 src_v = 1, dst_v = 1;
1306 u32 src_h = 1, dst_h = 1;
1308 amdgpu_crtc->h_border = 0;
1309 amdgpu_crtc->v_border = 0;
1311 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
1312 if (encoder->crtc != crtc)
1314 amdgpu_encoder = to_amdgpu_encoder(encoder);
1315 connector = amdgpu_get_connector_for_encoder(encoder);
1318 if (amdgpu_encoder->rmx_type == RMX_OFF)
1319 amdgpu_crtc->rmx_type = RMX_OFF;
1320 else if (mode->hdisplay < amdgpu_encoder->native_mode.hdisplay ||
1321 mode->vdisplay < amdgpu_encoder->native_mode.vdisplay)
1322 amdgpu_crtc->rmx_type = amdgpu_encoder->rmx_type;
1324 amdgpu_crtc->rmx_type = RMX_OFF;
1325 /* copy native mode */
1326 memcpy(&amdgpu_crtc->native_mode,
1327 &amdgpu_encoder->native_mode,
1328 sizeof(struct drm_display_mode));
1329 src_v = crtc->mode.vdisplay;
1330 dst_v = amdgpu_crtc->native_mode.vdisplay;
1331 src_h = crtc->mode.hdisplay;
1332 dst_h = amdgpu_crtc->native_mode.hdisplay;
1334 /* fix up for overscan on hdmi */
1335 if ((!(mode->flags & DRM_MODE_FLAG_INTERLACE)) &&
1336 ((amdgpu_encoder->underscan_type == UNDERSCAN_ON) ||
1337 ((amdgpu_encoder->underscan_type == UNDERSCAN_AUTO) &&
1338 connector->display_info.is_hdmi &&
1339 amdgpu_display_is_hdtv_mode(mode)))) {
1340 if (amdgpu_encoder->underscan_hborder != 0)
1341 amdgpu_crtc->h_border = amdgpu_encoder->underscan_hborder;
1343 amdgpu_crtc->h_border = (mode->hdisplay >> 5) + 16;
1344 if (amdgpu_encoder->underscan_vborder != 0)
1345 amdgpu_crtc->v_border = amdgpu_encoder->underscan_vborder;
1347 amdgpu_crtc->v_border = (mode->vdisplay >> 5) + 16;
1348 amdgpu_crtc->rmx_type = RMX_FULL;
1349 src_v = crtc->mode.vdisplay;
1350 dst_v = crtc->mode.vdisplay - (amdgpu_crtc->v_border * 2);
1351 src_h = crtc->mode.hdisplay;
1352 dst_h = crtc->mode.hdisplay - (amdgpu_crtc->h_border * 2);
1355 if (amdgpu_crtc->rmx_type != RMX_OFF) {
1357 a.full = dfixed_const(src_v);
1358 b.full = dfixed_const(dst_v);
1359 amdgpu_crtc->vsc.full = dfixed_div(a, b);
1360 a.full = dfixed_const(src_h);
1361 b.full = dfixed_const(dst_h);
1362 amdgpu_crtc->hsc.full = dfixed_div(a, b);
1364 amdgpu_crtc->vsc.full = dfixed_const(1);
1365 amdgpu_crtc->hsc.full = dfixed_const(1);
1371 * Retrieve current video scanout position of crtc on a given gpu, and
1372 * an optional accurate timestamp of when query happened.
1374 * \param dev Device to query.
1375 * \param pipe Crtc to query.
1376 * \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0).
1377 * For driver internal use only also supports these flags:
1379 * USE_REAL_VBLANKSTART to use the real start of vblank instead
1380 * of a fudged earlier start of vblank.
1382 * GET_DISTANCE_TO_VBLANKSTART to return distance to the
1383 * fudged earlier start of vblank in *vpos and the distance
1384 * to true start of vblank in *hpos.
1386 * \param *vpos Location where vertical scanout position should be stored.
1387 * \param *hpos Location where horizontal scanout position should go.
1388 * \param *stime Target location for timestamp taken immediately before
1389 * scanout position query. Can be NULL to skip timestamp.
1390 * \param *etime Target location for timestamp taken immediately after
1391 * scanout position query. Can be NULL to skip timestamp.
1393 * Returns vpos as a positive number while in active scanout area.
1394 * Returns vpos as a negative number inside vblank, counting the number
1395 * of scanlines to go until end of vblank, e.g., -1 means "one scanline
1396 * until start of active scanout / end of vblank."
1398 * \return Flags, or'ed together as follows:
1400 * DRM_SCANOUTPOS_VALID = Query successful.
1401 * DRM_SCANOUTPOS_INVBL = Inside vblank.
1402 * DRM_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of
1403 * this flag means that returned position may be offset by a constant but
1404 * unknown small number of scanlines wrt. real scanout position.
1407 int amdgpu_display_get_crtc_scanoutpos(struct drm_device *dev,
1408 unsigned int pipe, unsigned int flags, int *vpos,
1409 int *hpos, ktime_t *stime, ktime_t *etime,
1410 const struct drm_display_mode *mode)
1412 u32 vbl = 0, position = 0;
1413 int vbl_start, vbl_end, vtotal, ret = 0;
1416 struct amdgpu_device *adev = drm_to_adev(dev);
1418 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
1420 /* Get optional system timestamp before query. */
1422 *stime = ktime_get();
1424 if (amdgpu_display_page_flip_get_scanoutpos(adev, pipe, &vbl, &position) == 0)
1425 ret |= DRM_SCANOUTPOS_VALID;
1427 /* Get optional system timestamp after query. */
1429 *etime = ktime_get();
1431 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
1433 /* Decode into vertical and horizontal scanout position. */
1434 *vpos = position & 0x1fff;
1435 *hpos = (position >> 16) & 0x1fff;
1437 /* Valid vblank area boundaries from gpu retrieved? */
1440 ret |= DRM_SCANOUTPOS_ACCURATE;
1441 vbl_start = vbl & 0x1fff;
1442 vbl_end = (vbl >> 16) & 0x1fff;
1445 /* No: Fake something reasonable which gives at least ok results. */
1446 vbl_start = mode->crtc_vdisplay;
1450 /* Called from driver internal vblank counter query code? */
1451 if (flags & GET_DISTANCE_TO_VBLANKSTART) {
1452 /* Caller wants distance from real vbl_start in *hpos */
1453 *hpos = *vpos - vbl_start;
1456 /* Fudge vblank to start a few scanlines earlier to handle the
1457 * problem that vblank irqs fire a few scanlines before start
1458 * of vblank. Some driver internal callers need the true vblank
1459 * start to be used and signal this via the USE_REAL_VBLANKSTART flag.
1461 * The cause of the "early" vblank irq is that the irq is triggered
1462 * by the line buffer logic when the line buffer read position enters
1463 * the vblank, whereas our crtc scanout position naturally lags the
1464 * line buffer read position.
1466 if (!(flags & USE_REAL_VBLANKSTART))
1467 vbl_start -= adev->mode_info.crtcs[pipe]->lb_vblank_lead_lines;
1469 /* Test scanout position against vblank region. */
1470 if ((*vpos < vbl_start) && (*vpos >= vbl_end))
1475 ret |= DRM_SCANOUTPOS_IN_VBLANK;
1477 /* Called from driver internal vblank counter query code? */
1478 if (flags & GET_DISTANCE_TO_VBLANKSTART) {
1479 /* Caller wants distance from fudged earlier vbl_start */
1484 /* Check if inside vblank area and apply corrective offsets:
1485 * vpos will then be >=0 in video scanout area, but negative
1486 * within vblank area, counting down the number of lines until
1490 /* Inside "upper part" of vblank area? Apply corrective offset if so: */
1491 if (in_vbl && (*vpos >= vbl_start)) {
1492 vtotal = mode->crtc_vtotal;
1494 /* With variable refresh rate displays the vpos can exceed
1495 * the vtotal value. Clamp to 0 to return -vbl_end instead
1496 * of guessing the remaining number of lines until scanout.
1498 *vpos = (*vpos < vtotal) ? (*vpos - vtotal) : 0;
1501 /* Correct for shifted end of vbl at vbl_end. */
1502 *vpos = *vpos - vbl_end;
1507 int amdgpu_display_crtc_idx_to_irq_type(struct amdgpu_device *adev, int crtc)
1509 if (crtc < 0 || crtc >= adev->mode_info.num_crtc)
1510 return AMDGPU_CRTC_IRQ_NONE;
1514 return AMDGPU_CRTC_IRQ_VBLANK1;
1516 return AMDGPU_CRTC_IRQ_VBLANK2;
1518 return AMDGPU_CRTC_IRQ_VBLANK3;
1520 return AMDGPU_CRTC_IRQ_VBLANK4;
1522 return AMDGPU_CRTC_IRQ_VBLANK5;
1524 return AMDGPU_CRTC_IRQ_VBLANK6;
1526 return AMDGPU_CRTC_IRQ_NONE;
1530 bool amdgpu_crtc_get_scanout_position(struct drm_crtc *crtc,
1531 bool in_vblank_irq, int *vpos,
1532 int *hpos, ktime_t *stime, ktime_t *etime,
1533 const struct drm_display_mode *mode)
1535 struct drm_device *dev = crtc->dev;
1536 unsigned int pipe = crtc->index;
1538 return amdgpu_display_get_crtc_scanoutpos(dev, pipe, 0, vpos, hpos,
1539 stime, etime, mode);
1543 amdgpu_display_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj)
1545 struct drm_device *dev = adev_to_drm(adev);
1546 struct drm_fb_helper *fb_helper = dev->fb_helper;
1548 if (!fb_helper || !fb_helper->buffer)
1551 if (gem_to_amdgpu_bo(fb_helper->buffer->gem) != robj)
1557 int amdgpu_display_suspend_helper(struct amdgpu_device *adev)
1559 struct drm_device *dev = adev_to_drm(adev);
1560 struct drm_crtc *crtc;
1561 struct drm_connector *connector;
1562 struct drm_connector_list_iter iter;
1565 /* turn off display hw */
1566 drm_modeset_lock_all(dev);
1567 drm_connector_list_iter_begin(dev, &iter);
1568 drm_for_each_connector_iter(connector, &iter)
1569 drm_helper_connector_dpms(connector,
1571 drm_connector_list_iter_end(&iter);
1572 drm_modeset_unlock_all(dev);
1573 /* unpin the front buffers and cursors */
1574 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1575 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1576 struct drm_framebuffer *fb = crtc->primary->fb;
1577 struct amdgpu_bo *robj;
1579 if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
1580 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
1581 r = amdgpu_bo_reserve(aobj, true);
1583 amdgpu_bo_unpin(aobj);
1584 amdgpu_bo_unreserve(aobj);
1588 if (fb == NULL || fb->obj[0] == NULL) {
1591 robj = gem_to_amdgpu_bo(fb->obj[0]);
1592 if (!amdgpu_display_robj_is_fb(adev, robj)) {
1593 r = amdgpu_bo_reserve(robj, true);
1595 amdgpu_bo_unpin(robj);
1596 amdgpu_bo_unreserve(robj);
1603 int amdgpu_display_resume_helper(struct amdgpu_device *adev)
1605 struct drm_device *dev = adev_to_drm(adev);
1606 struct drm_connector *connector;
1607 struct drm_connector_list_iter iter;
1608 struct drm_crtc *crtc;
1612 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1613 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1615 if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
1616 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
1617 r = amdgpu_bo_reserve(aobj, true);
1619 r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
1621 dev_err(adev->dev, "Failed to pin cursor BO (%d)\n", r);
1622 amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
1623 amdgpu_bo_unreserve(aobj);
1628 drm_helper_resume_force_mode(dev);
1630 /* turn on display hw */
1631 drm_modeset_lock_all(dev);
1633 drm_connector_list_iter_begin(dev, &iter);
1634 drm_for_each_connector_iter(connector, &iter)
1635 drm_helper_connector_dpms(connector,
1637 drm_connector_list_iter_end(&iter);
1639 drm_modeset_unlock_all(dev);