]> git.itanic.dy.fi Git - linux-stable/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
3451147beda3ab84fea2fb290875dd45df0ceb48
[linux-stable] / drivers / gpu / drm / amd / amdgpu / amdgpu_display.c
1 /*
2  * Copyright 2007-8 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  * Authors: Dave Airlie
24  *          Alex Deucher
25  */
26
27 #include <drm/amdgpu_drm.h>
28 #include "amdgpu.h"
29 #include "amdgpu_i2c.h"
30 #include "atom.h"
31 #include "amdgpu_connectors.h"
32 #include "amdgpu_display.h"
33 #include <asm/div64.h>
34
35 #include <linux/pci.h>
36 #include <linux/pm_runtime.h>
37 #include <drm/drm_crtc_helper.h>
38 #include <drm/drm_damage_helper.h>
39 #include <drm/drm_edid.h>
40 #include <drm/drm_gem_framebuffer_helper.h>
41 #include <drm/drm_fb_helper.h>
42 #include <drm/drm_fourcc.h>
43 #include <drm/drm_vblank.h>
44
45 static int amdgpu_display_framebuffer_init(struct drm_device *dev,
46                                            struct amdgpu_framebuffer *rfb,
47                                            const struct drm_mode_fb_cmd2 *mode_cmd,
48                                            struct drm_gem_object *obj);
49
50 static void amdgpu_display_flip_callback(struct dma_fence *f,
51                                          struct dma_fence_cb *cb)
52 {
53         struct amdgpu_flip_work *work =
54                 container_of(cb, struct amdgpu_flip_work, cb);
55
56         dma_fence_put(f);
57         schedule_work(&work->flip_work.work);
58 }
59
60 static bool amdgpu_display_flip_handle_fence(struct amdgpu_flip_work *work,
61                                              struct dma_fence **f)
62 {
63         struct dma_fence *fence= *f;
64
65         if (fence == NULL)
66                 return false;
67
68         *f = NULL;
69
70         if (!dma_fence_add_callback(fence, &work->cb,
71                                     amdgpu_display_flip_callback))
72                 return true;
73
74         dma_fence_put(fence);
75         return false;
76 }
77
78 static void amdgpu_display_flip_work_func(struct work_struct *__work)
79 {
80         struct delayed_work *delayed_work =
81                 container_of(__work, struct delayed_work, work);
82         struct amdgpu_flip_work *work =
83                 container_of(delayed_work, struct amdgpu_flip_work, flip_work);
84         struct amdgpu_device *adev = work->adev;
85         struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[work->crtc_id];
86
87         struct drm_crtc *crtc = &amdgpu_crtc->base;
88         unsigned long flags;
89         unsigned i;
90         int vpos, hpos;
91
92         for (i = 0; i < work->shared_count; ++i)
93                 if (amdgpu_display_flip_handle_fence(work, &work->shared[i]))
94                         return;
95
96         /* Wait until we're out of the vertical blank period before the one
97          * targeted by the flip
98          */
99         if (amdgpu_crtc->enabled &&
100             (amdgpu_display_get_crtc_scanoutpos(adev_to_drm(adev), work->crtc_id, 0,
101                                                 &vpos, &hpos, NULL, NULL,
102                                                 &crtc->hwmode)
103              & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
104             (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
105             (int)(work->target_vblank -
106                   amdgpu_get_vblank_counter_kms(crtc)) > 0) {
107                 schedule_delayed_work(&work->flip_work, usecs_to_jiffies(1000));
108                 return;
109         }
110
111         /* We borrow the event spin lock for protecting flip_status */
112         spin_lock_irqsave(&crtc->dev->event_lock, flags);
113
114         /* Do the flip (mmio) */
115         adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base, work->async);
116
117         /* Set the flip status */
118         amdgpu_crtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
119         spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
120
121
122         drm_dbg_vbl(adev_to_drm(adev),
123                     "crtc:%d[%p], pflip_stat:AMDGPU_FLIP_SUBMITTED, work: %p,\n",
124                     amdgpu_crtc->crtc_id, amdgpu_crtc, work);
125
126 }
127
128 /*
129  * Handle unpin events outside the interrupt handler proper.
130  */
131 static void amdgpu_display_unpin_work_func(struct work_struct *__work)
132 {
133         struct amdgpu_flip_work *work =
134                 container_of(__work, struct amdgpu_flip_work, unpin_work);
135         int r;
136
137         /* unpin of the old buffer */
138         r = amdgpu_bo_reserve(work->old_abo, true);
139         if (likely(r == 0)) {
140                 amdgpu_bo_unpin(work->old_abo);
141                 amdgpu_bo_unreserve(work->old_abo);
142         } else
143                 DRM_ERROR("failed to reserve buffer after flip\n");
144
145         amdgpu_bo_unref(&work->old_abo);
146         kfree(work->shared);
147         kfree(work);
148 }
149
150 int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
151                                 struct drm_framebuffer *fb,
152                                 struct drm_pending_vblank_event *event,
153                                 uint32_t page_flip_flags, uint32_t target,
154                                 struct drm_modeset_acquire_ctx *ctx)
155 {
156         struct drm_device *dev = crtc->dev;
157         struct amdgpu_device *adev = drm_to_adev(dev);
158         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
159         struct drm_gem_object *obj;
160         struct amdgpu_flip_work *work;
161         struct amdgpu_bo *new_abo;
162         unsigned long flags;
163         u64 tiling_flags;
164         int i, r;
165
166         work = kzalloc(sizeof *work, GFP_KERNEL);
167         if (work == NULL)
168                 return -ENOMEM;
169
170         INIT_DELAYED_WORK(&work->flip_work, amdgpu_display_flip_work_func);
171         INIT_WORK(&work->unpin_work, amdgpu_display_unpin_work_func);
172
173         work->event = event;
174         work->adev = adev;
175         work->crtc_id = amdgpu_crtc->crtc_id;
176         work->async = (page_flip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
177
178         /* schedule unpin of the old buffer */
179         obj = crtc->primary->fb->obj[0];
180
181         /* take a reference to the old object */
182         work->old_abo = gem_to_amdgpu_bo(obj);
183         amdgpu_bo_ref(work->old_abo);
184
185         obj = fb->obj[0];
186         new_abo = gem_to_amdgpu_bo(obj);
187
188         /* pin the new buffer */
189         r = amdgpu_bo_reserve(new_abo, false);
190         if (unlikely(r != 0)) {
191                 DRM_ERROR("failed to reserve new abo buffer before flip\n");
192                 goto cleanup;
193         }
194
195         if (!adev->enable_virtual_display) {
196                 r = amdgpu_bo_pin(new_abo,
197                                   amdgpu_display_supported_domains(adev, new_abo->flags));
198                 if (unlikely(r != 0)) {
199                         DRM_ERROR("failed to pin new abo buffer before flip\n");
200                         goto unreserve;
201                 }
202         }
203
204         r = amdgpu_ttm_alloc_gart(&new_abo->tbo);
205         if (unlikely(r != 0)) {
206                 DRM_ERROR("%p bind failed\n", new_abo);
207                 goto unpin;
208         }
209
210         r = dma_resv_get_fences(new_abo->tbo.base.resv, DMA_RESV_USAGE_WRITE,
211                                 &work->shared_count,
212                                 &work->shared);
213         if (unlikely(r != 0)) {
214                 DRM_ERROR("failed to get fences for buffer\n");
215                 goto unpin;
216         }
217
218         amdgpu_bo_get_tiling_flags(new_abo, &tiling_flags);
219         amdgpu_bo_unreserve(new_abo);
220
221         if (!adev->enable_virtual_display)
222                 work->base = amdgpu_bo_gpu_offset(new_abo);
223         work->target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) +
224                 amdgpu_get_vblank_counter_kms(crtc);
225
226         /* we borrow the event spin lock for protecting flip_wrok */
227         spin_lock_irqsave(&crtc->dev->event_lock, flags);
228         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_NONE) {
229                 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
230                 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
231                 r = -EBUSY;
232                 goto pflip_cleanup;
233         }
234
235         amdgpu_crtc->pflip_status = AMDGPU_FLIP_PENDING;
236         amdgpu_crtc->pflip_works = work;
237
238
239         DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_PENDING, work: %p,\n",
240                                          amdgpu_crtc->crtc_id, amdgpu_crtc, work);
241         /* update crtc fb */
242         crtc->primary->fb = fb;
243         spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
244         amdgpu_display_flip_work_func(&work->flip_work.work);
245         return 0;
246
247 pflip_cleanup:
248         if (unlikely(amdgpu_bo_reserve(new_abo, false) != 0)) {
249                 DRM_ERROR("failed to reserve new abo in error path\n");
250                 goto cleanup;
251         }
252 unpin:
253         if (!adev->enable_virtual_display)
254                 amdgpu_bo_unpin(new_abo);
255
256 unreserve:
257         amdgpu_bo_unreserve(new_abo);
258
259 cleanup:
260         amdgpu_bo_unref(&work->old_abo);
261         for (i = 0; i < work->shared_count; ++i)
262                 dma_fence_put(work->shared[i]);
263         kfree(work->shared);
264         kfree(work);
265
266         return r;
267 }
268
269 int amdgpu_display_crtc_set_config(struct drm_mode_set *set,
270                                    struct drm_modeset_acquire_ctx *ctx)
271 {
272         struct drm_device *dev;
273         struct amdgpu_device *adev;
274         struct drm_crtc *crtc;
275         bool active = false;
276         int ret;
277
278         if (!set || !set->crtc)
279                 return -EINVAL;
280
281         dev = set->crtc->dev;
282
283         ret = pm_runtime_get_sync(dev->dev);
284         if (ret < 0)
285                 goto out;
286
287         ret = drm_crtc_helper_set_config(set, ctx);
288
289         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
290                 if (crtc->enabled)
291                         active = true;
292
293         pm_runtime_mark_last_busy(dev->dev);
294
295         adev = drm_to_adev(dev);
296         /* if we have active crtcs and we don't have a power ref,
297            take the current one */
298         if (active && !adev->have_disp_power_ref) {
299                 adev->have_disp_power_ref = true;
300                 return ret;
301         }
302         /* if we have no active crtcs, then drop the power ref
303            we got before */
304         if (!active && adev->have_disp_power_ref) {
305                 pm_runtime_put_autosuspend(dev->dev);
306                 adev->have_disp_power_ref = false;
307         }
308
309 out:
310         /* drop the power reference we got coming in here */
311         pm_runtime_put_autosuspend(dev->dev);
312         return ret;
313 }
314
315 static const char *encoder_names[41] = {
316         "NONE",
317         "INTERNAL_LVDS",
318         "INTERNAL_TMDS1",
319         "INTERNAL_TMDS2",
320         "INTERNAL_DAC1",
321         "INTERNAL_DAC2",
322         "INTERNAL_SDVOA",
323         "INTERNAL_SDVOB",
324         "SI170B",
325         "CH7303",
326         "CH7301",
327         "INTERNAL_DVO1",
328         "EXTERNAL_SDVOA",
329         "EXTERNAL_SDVOB",
330         "TITFP513",
331         "INTERNAL_LVTM1",
332         "VT1623",
333         "HDMI_SI1930",
334         "HDMI_INTERNAL",
335         "INTERNAL_KLDSCP_TMDS1",
336         "INTERNAL_KLDSCP_DVO1",
337         "INTERNAL_KLDSCP_DAC1",
338         "INTERNAL_KLDSCP_DAC2",
339         "SI178",
340         "MVPU_FPGA",
341         "INTERNAL_DDI",
342         "VT1625",
343         "HDMI_SI1932",
344         "DP_AN9801",
345         "DP_DP501",
346         "INTERNAL_UNIPHY",
347         "INTERNAL_KLDSCP_LVTMA",
348         "INTERNAL_UNIPHY1",
349         "INTERNAL_UNIPHY2",
350         "NUTMEG",
351         "TRAVIS",
352         "INTERNAL_VCE",
353         "INTERNAL_UNIPHY3",
354         "HDMI_ANX9805",
355         "INTERNAL_AMCLK",
356         "VIRTUAL",
357 };
358
359 static const char *hpd_names[6] = {
360         "HPD1",
361         "HPD2",
362         "HPD3",
363         "HPD4",
364         "HPD5",
365         "HPD6",
366 };
367
368 void amdgpu_display_print_display_setup(struct drm_device *dev)
369 {
370         struct drm_connector *connector;
371         struct amdgpu_connector *amdgpu_connector;
372         struct drm_encoder *encoder;
373         struct amdgpu_encoder *amdgpu_encoder;
374         struct drm_connector_list_iter iter;
375         uint32_t devices;
376         int i = 0;
377
378         drm_connector_list_iter_begin(dev, &iter);
379         DRM_INFO("AMDGPU Display Connectors\n");
380         drm_for_each_connector_iter(connector, &iter) {
381                 amdgpu_connector = to_amdgpu_connector(connector);
382                 DRM_INFO("Connector %d:\n", i);
383                 DRM_INFO("  %s\n", connector->name);
384                 if (amdgpu_connector->hpd.hpd != AMDGPU_HPD_NONE)
385                         DRM_INFO("  %s\n", hpd_names[amdgpu_connector->hpd.hpd]);
386                 if (amdgpu_connector->ddc_bus) {
387                         DRM_INFO("  DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
388                                  amdgpu_connector->ddc_bus->rec.mask_clk_reg,
389                                  amdgpu_connector->ddc_bus->rec.mask_data_reg,
390                                  amdgpu_connector->ddc_bus->rec.a_clk_reg,
391                                  amdgpu_connector->ddc_bus->rec.a_data_reg,
392                                  amdgpu_connector->ddc_bus->rec.en_clk_reg,
393                                  amdgpu_connector->ddc_bus->rec.en_data_reg,
394                                  amdgpu_connector->ddc_bus->rec.y_clk_reg,
395                                  amdgpu_connector->ddc_bus->rec.y_data_reg);
396                         if (amdgpu_connector->router.ddc_valid)
397                                 DRM_INFO("  DDC Router 0x%x/0x%x\n",
398                                          amdgpu_connector->router.ddc_mux_control_pin,
399                                          amdgpu_connector->router.ddc_mux_state);
400                         if (amdgpu_connector->router.cd_valid)
401                                 DRM_INFO("  Clock/Data Router 0x%x/0x%x\n",
402                                          amdgpu_connector->router.cd_mux_control_pin,
403                                          amdgpu_connector->router.cd_mux_state);
404                 } else {
405                         if (connector->connector_type == DRM_MODE_CONNECTOR_VGA ||
406                             connector->connector_type == DRM_MODE_CONNECTOR_DVII ||
407                             connector->connector_type == DRM_MODE_CONNECTOR_DVID ||
408                             connector->connector_type == DRM_MODE_CONNECTOR_DVIA ||
409                             connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
410                             connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)
411                                 DRM_INFO("  DDC: no ddc bus - possible BIOS bug - please report to xorg-driver-ati@lists.x.org\n");
412                 }
413                 DRM_INFO("  Encoders:\n");
414                 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
415                         amdgpu_encoder = to_amdgpu_encoder(encoder);
416                         devices = amdgpu_encoder->devices & amdgpu_connector->devices;
417                         if (devices) {
418                                 if (devices & ATOM_DEVICE_CRT1_SUPPORT)
419                                         DRM_INFO("    CRT1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
420                                 if (devices & ATOM_DEVICE_CRT2_SUPPORT)
421                                         DRM_INFO("    CRT2: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
422                                 if (devices & ATOM_DEVICE_LCD1_SUPPORT)
423                                         DRM_INFO("    LCD1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
424                                 if (devices & ATOM_DEVICE_DFP1_SUPPORT)
425                                         DRM_INFO("    DFP1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
426                                 if (devices & ATOM_DEVICE_DFP2_SUPPORT)
427                                         DRM_INFO("    DFP2: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
428                                 if (devices & ATOM_DEVICE_DFP3_SUPPORT)
429                                         DRM_INFO("    DFP3: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
430                                 if (devices & ATOM_DEVICE_DFP4_SUPPORT)
431                                         DRM_INFO("    DFP4: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
432                                 if (devices & ATOM_DEVICE_DFP5_SUPPORT)
433                                         DRM_INFO("    DFP5: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
434                                 if (devices & ATOM_DEVICE_DFP6_SUPPORT)
435                                         DRM_INFO("    DFP6: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
436                                 if (devices & ATOM_DEVICE_TV1_SUPPORT)
437                                         DRM_INFO("    TV1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
438                                 if (devices & ATOM_DEVICE_CV_SUPPORT)
439                                         DRM_INFO("    CV: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
440                         }
441                 }
442                 i++;
443         }
444         drm_connector_list_iter_end(&iter);
445 }
446
447 bool amdgpu_display_ddc_probe(struct amdgpu_connector *amdgpu_connector,
448                               bool use_aux)
449 {
450         u8 out = 0x0;
451         u8 buf[8];
452         int ret;
453         struct i2c_msg msgs[] = {
454                 {
455                         .addr = DDC_ADDR,
456                         .flags = 0,
457                         .len = 1,
458                         .buf = &out,
459                 },
460                 {
461                         .addr = DDC_ADDR,
462                         .flags = I2C_M_RD,
463                         .len = 8,
464                         .buf = buf,
465                 }
466         };
467
468         /* on hw with routers, select right port */
469         if (amdgpu_connector->router.ddc_valid)
470                 amdgpu_i2c_router_select_ddc_port(amdgpu_connector);
471
472         if (use_aux) {
473                 ret = i2c_transfer(&amdgpu_connector->ddc_bus->aux.ddc, msgs, 2);
474         } else {
475                 ret = i2c_transfer(&amdgpu_connector->ddc_bus->adapter, msgs, 2);
476         }
477
478         if (ret != 2)
479                 /* Couldn't find an accessible DDC on this connector */
480                 return false;
481         /* Probe also for valid EDID header
482          * EDID header starts with:
483          * 0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00.
484          * Only the first 6 bytes must be valid as
485          * drm_edid_block_valid() can fix the last 2 bytes */
486         if (drm_edid_header_is_valid(buf) < 6) {
487                 /* Couldn't find an accessible EDID on this
488                  * connector */
489                 return false;
490         }
491         return true;
492 }
493
494 static const struct drm_framebuffer_funcs amdgpu_fb_funcs = {
495         .destroy = drm_gem_fb_destroy,
496         .create_handle = drm_gem_fb_create_handle,
497         .dirty = drm_atomic_helper_dirtyfb,
498 };
499
500 uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
501                                           uint64_t bo_flags)
502 {
503         uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM;
504
505 #if defined(CONFIG_DRM_AMD_DC)
506         /*
507          * if amdgpu_bo_support_uswc returns false it means that USWC mappings
508          * is not supported for this board. But this mapping is required
509          * to avoid hang caused by placement of scanout BO in GTT on certain
510          * APUs. So force the BO placement to VRAM in case this architecture
511          * will not allow USWC mappings.
512          * Also, don't allow GTT domain if the BO doesn't have USWC flag set.
513          */
514         if ((bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) &&
515             amdgpu_bo_support_uswc(bo_flags) &&
516             amdgpu_device_asic_has_dc_support(adev->asic_type) &&
517             adev->mode_info.gpu_vm_support)
518                 domain |= AMDGPU_GEM_DOMAIN_GTT;
519 #endif
520
521         return domain;
522 }
523
524 static const struct drm_format_info dcc_formats[] = {
525         { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
526           .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
527          { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
528           .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
529         { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
530           .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
531            .has_alpha = true, },
532         { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
533           .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
534           .has_alpha = true, },
535         { .format = DRM_FORMAT_BGRA8888, .depth = 32, .num_planes = 2,
536           .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
537           .has_alpha = true, },
538         { .format = DRM_FORMAT_XRGB2101010, .depth = 30, .num_planes = 2,
539           .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
540         { .format = DRM_FORMAT_XBGR2101010, .depth = 30, .num_planes = 2,
541           .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
542         { .format = DRM_FORMAT_ARGB2101010, .depth = 30, .num_planes = 2,
543           .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
544           .has_alpha = true, },
545         { .format = DRM_FORMAT_ABGR2101010, .depth = 30, .num_planes = 2,
546           .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
547           .has_alpha = true, },
548         { .format = DRM_FORMAT_RGB565, .depth = 16, .num_planes = 2,
549           .cpp = { 2, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
550 };
551
552 static const struct drm_format_info dcc_retile_formats[] = {
553         { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 3,
554           .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
555          { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 3,
556           .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
557         { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 3,
558           .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
559            .has_alpha = true, },
560         { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 3,
561           .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
562           .has_alpha = true, },
563         { .format = DRM_FORMAT_BGRA8888, .depth = 32, .num_planes = 3,
564           .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
565           .has_alpha = true, },
566         { .format = DRM_FORMAT_XRGB2101010, .depth = 30, .num_planes = 3,
567           .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
568         { .format = DRM_FORMAT_XBGR2101010, .depth = 30, .num_planes = 3,
569           .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
570         { .format = DRM_FORMAT_ARGB2101010, .depth = 30, .num_planes = 3,
571           .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
572           .has_alpha = true, },
573         { .format = DRM_FORMAT_ABGR2101010, .depth = 30, .num_planes = 3,
574           .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
575           .has_alpha = true, },
576         { .format = DRM_FORMAT_RGB565, .depth = 16, .num_planes = 3,
577           .cpp = { 2, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
578 };
579
580 static const struct drm_format_info *
581 lookup_format_info(const struct drm_format_info formats[],
582                   int num_formats, u32 format)
583 {
584         int i;
585
586         for (i = 0; i < num_formats; i++) {
587                 if (formats[i].format == format)
588                         return &formats[i];
589         }
590
591         return NULL;
592 }
593
594 const struct drm_format_info *
595 amdgpu_lookup_format_info(u32 format, uint64_t modifier)
596 {
597         if (!IS_AMD_FMT_MOD(modifier))
598                 return NULL;
599
600         if (AMD_FMT_MOD_GET(DCC_RETILE, modifier))
601                 return lookup_format_info(dcc_retile_formats,
602                                           ARRAY_SIZE(dcc_retile_formats),
603                                           format);
604
605         if (AMD_FMT_MOD_GET(DCC, modifier))
606                 return lookup_format_info(dcc_formats, ARRAY_SIZE(dcc_formats),
607                                           format);
608
609         /* returning NULL will cause the default format structs to be used. */
610         return NULL;
611 }
612
613
614 /*
615  * Tries to extract the renderable DCC offset from the opaque metadata attached
616  * to the buffer.
617  */
618 static int
619 extract_render_dcc_offset(struct amdgpu_device *adev,
620                           struct drm_gem_object *obj,
621                           uint64_t *offset)
622 {
623         struct amdgpu_bo *rbo;
624         int r = 0;
625         uint32_t metadata[10]; /* Something that fits a descriptor + header. */
626         uint32_t size;
627
628         rbo = gem_to_amdgpu_bo(obj);
629         r = amdgpu_bo_reserve(rbo, false);
630
631         if (unlikely(r)) {
632                 /* Don't show error message when returning -ERESTARTSYS */
633                 if (r != -ERESTARTSYS)
634                         DRM_ERROR("Unable to reserve buffer: %d\n", r);
635                 return r;
636         }
637
638         r = amdgpu_bo_get_metadata(rbo, metadata, sizeof(metadata), &size, NULL);
639         amdgpu_bo_unreserve(rbo);
640
641         if (r)
642                 return r;
643
644         /*
645          * The first word is the metadata version, and we need space for at least
646          * the version + pci vendor+device id + 8 words for a descriptor.
647          */
648         if (size < 40  || metadata[0] != 1)
649                 return -EINVAL;
650
651         if (adev->family >= AMDGPU_FAMILY_NV) {
652                 /* resource word 6/7 META_DATA_ADDRESS{_LO} */
653                 *offset = ((u64)metadata[9] << 16u) |
654                           ((metadata[8] & 0xFF000000u) >> 16);
655         } else {
656                 /* resource word 5/7 META_DATA_ADDRESS */
657                 *offset = ((u64)metadata[9] << 8u) |
658                           ((u64)(metadata[7] & 0x1FE0000u) << 23);
659         }
660
661         return 0;
662 }
663
664 static int convert_tiling_flags_to_modifier(struct amdgpu_framebuffer *afb)
665 {
666         struct amdgpu_device *adev = drm_to_adev(afb->base.dev);
667         uint64_t modifier = 0;
668
669         if (!afb->tiling_flags || !AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE)) {
670                 modifier = DRM_FORMAT_MOD_LINEAR;
671         } else {
672                 int swizzle = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE);
673                 bool has_xor = swizzle >= 16;
674                 int block_size_bits;
675                 int version;
676                 int pipe_xor_bits = 0;
677                 int bank_xor_bits = 0;
678                 int packers = 0;
679                 int rb = 0;
680                 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
681                 uint32_t dcc_offset = AMDGPU_TILING_GET(afb->tiling_flags, DCC_OFFSET_256B);
682
683                 switch (swizzle >> 2) {
684                 case 0: /* 256B */
685                         block_size_bits = 8;
686                         break;
687                 case 1: /* 4KiB */
688                 case 5: /* 4KiB _X */
689                         block_size_bits = 12;
690                         break;
691                 case 2: /* 64KiB */
692                 case 4: /* 64 KiB _T */
693                 case 6: /* 64 KiB _X */
694                         block_size_bits = 16;
695                         break;
696                 default:
697                         /* RESERVED or VAR */
698                         return -EINVAL;
699                 }
700
701                 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
702                         version = AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS;
703                 else if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 0, 0))
704                         version = AMD_FMT_MOD_TILE_VER_GFX10;
705                 else
706                         version = AMD_FMT_MOD_TILE_VER_GFX9;
707
708                 switch (swizzle & 3) {
709                 case 0: /* Z microtiling */
710                         return -EINVAL;
711                 case 1: /* S microtiling */
712                         if (!has_xor)
713                                 version = AMD_FMT_MOD_TILE_VER_GFX9;
714                         break;
715                 case 2:
716                         if (!has_xor && afb->base.format->cpp[0] != 4)
717                                 version = AMD_FMT_MOD_TILE_VER_GFX9;
718                         break;
719                 case 3:
720                         break;
721                 }
722
723                 if (has_xor) {
724                         switch (version) {
725                         case AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS:
726                                 pipe_xor_bits = min(block_size_bits - 8, pipes);
727                                 packers = min(block_size_bits - 8 - pipe_xor_bits,
728                                               ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs));
729                                 break;
730                         case AMD_FMT_MOD_TILE_VER_GFX10:
731                                 pipe_xor_bits = min(block_size_bits - 8, pipes);
732                                 break;
733                         case AMD_FMT_MOD_TILE_VER_GFX9:
734                                 rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
735                                      ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
736                                 pipe_xor_bits = min(block_size_bits - 8, pipes +
737                                                     ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
738                                 bank_xor_bits = min(block_size_bits - 8 - pipe_xor_bits,
739                                                     ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
740                                 break;
741                         }
742                 }
743
744                 modifier = AMD_FMT_MOD |
745                            AMD_FMT_MOD_SET(TILE, AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE)) |
746                            AMD_FMT_MOD_SET(TILE_VERSION, version) |
747                            AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
748                            AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
749                            AMD_FMT_MOD_SET(PACKERS, packers);
750
751                 if (dcc_offset != 0) {
752                         bool dcc_i64b = AMDGPU_TILING_GET(afb->tiling_flags, DCC_INDEPENDENT_64B) != 0;
753                         bool dcc_i128b = version >= AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS;
754                         const struct drm_format_info *format_info;
755                         u64 render_dcc_offset;
756
757                         /* Enable constant encode on RAVEN2 and later. */
758                         bool dcc_constant_encode = adev->asic_type > CHIP_RAVEN ||
759                                                    (adev->asic_type == CHIP_RAVEN &&
760                                                     adev->external_rev_id >= 0x81);
761
762                         int max_cblock_size = dcc_i64b ? AMD_FMT_MOD_DCC_BLOCK_64B :
763                                               dcc_i128b ? AMD_FMT_MOD_DCC_BLOCK_128B :
764                                               AMD_FMT_MOD_DCC_BLOCK_256B;
765
766                         modifier |= AMD_FMT_MOD_SET(DCC, 1) |
767                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, dcc_constant_encode) |
768                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, dcc_i64b) |
769                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, dcc_i128b) |
770                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, max_cblock_size);
771
772                         afb->base.offsets[1] = dcc_offset * 256 + afb->base.offsets[0];
773                         afb->base.pitches[1] =
774                                 AMDGPU_TILING_GET(afb->tiling_flags, DCC_PITCH_MAX) + 1;
775
776                         /*
777                          * If the userspace driver uses retiling the tiling flags do not contain
778                          * info on the renderable DCC buffer. Luckily the opaque metadata contains
779                          * the info so we can try to extract it. The kernel does not use this info
780                          * but we should convert it to a modifier plane for getfb2, so the
781                          * userspace driver that gets it doesn't have to juggle around another DCC
782                          * plane internally.
783                          */
784                         if (extract_render_dcc_offset(adev, afb->base.obj[0],
785                                                       &render_dcc_offset) == 0 &&
786                             render_dcc_offset != 0 &&
787                             render_dcc_offset != afb->base.offsets[1] &&
788                             render_dcc_offset < UINT_MAX) {
789                                 uint32_t dcc_block_bits;  /* of base surface data */
790
791                                 modifier |= AMD_FMT_MOD_SET(DCC_RETILE, 1);
792                                 afb->base.offsets[2] = render_dcc_offset;
793
794                                 if (adev->family >= AMDGPU_FAMILY_NV) {
795                                         int extra_pipe = 0;
796
797                                         if ((adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0)) &&
798                                             pipes == packers && pipes > 1)
799                                                 extra_pipe = 1;
800
801                                         dcc_block_bits = max(20, 16 + pipes + extra_pipe);
802                                 } else {
803                                         modifier |= AMD_FMT_MOD_SET(RB, rb) |
804                                                     AMD_FMT_MOD_SET(PIPE, pipes);
805                                         dcc_block_bits = max(20, 18 + rb);
806                                 }
807
808                                 dcc_block_bits -= ilog2(afb->base.format->cpp[0]);
809                                 afb->base.pitches[2] = ALIGN(afb->base.width,
810                                                              1u << ((dcc_block_bits + 1) / 2));
811                         }
812                         format_info = amdgpu_lookup_format_info(afb->base.format->format,
813                                                                 modifier);
814                         if (!format_info)
815                                 return -EINVAL;
816
817                         afb->base.format = format_info;
818                 }
819         }
820
821         afb->base.modifier = modifier;
822         afb->base.flags |= DRM_MODE_FB_MODIFIERS;
823         return 0;
824 }
825
826 /* Mirrors the is_displayable check in radeonsi's gfx6_compute_surface */
827 static int check_tiling_flags_gfx6(struct amdgpu_framebuffer *afb)
828 {
829         u64 micro_tile_mode;
830
831         /* Zero swizzle mode means linear */
832         if (AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0)
833                 return 0;
834
835         micro_tile_mode = AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE);
836         switch (micro_tile_mode) {
837         case 0: /* DISPLAY */
838         case 3: /* RENDER */
839                 return 0;
840         default:
841                 drm_dbg_kms(afb->base.dev,
842                             "Micro tile mode %llu not supported for scanout\n",
843                             micro_tile_mode);
844                 return -EINVAL;
845         }
846 }
847
848 static void get_block_dimensions(unsigned int block_log2, unsigned int cpp,
849                                  unsigned int *width, unsigned int *height)
850 {
851         unsigned int cpp_log2 = ilog2(cpp);
852         unsigned int pixel_log2 = block_log2 - cpp_log2;
853         unsigned int width_log2 = (pixel_log2 + 1) / 2;
854         unsigned int height_log2 = pixel_log2 - width_log2;
855
856         *width = 1 << width_log2;
857         *height = 1 << height_log2;
858 }
859
860 static unsigned int get_dcc_block_size(uint64_t modifier, bool rb_aligned,
861                                        bool pipe_aligned)
862 {
863         unsigned int ver = AMD_FMT_MOD_GET(TILE_VERSION, modifier);
864
865         switch (ver) {
866         case AMD_FMT_MOD_TILE_VER_GFX9: {
867                 /*
868                  * TODO: for pipe aligned we may need to check the alignment of the
869                  * total size of the surface, which may need to be bigger than the
870                  * natural alignment due to some HW workarounds
871                  */
872                 return max(10 + (rb_aligned ? (int)AMD_FMT_MOD_GET(RB, modifier) : 0), 12);
873         }
874         case AMD_FMT_MOD_TILE_VER_GFX10:
875         case AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS: {
876                 int pipes_log2 = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
877
878                 if (ver == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS && pipes_log2 > 1 &&
879                     AMD_FMT_MOD_GET(PACKERS, modifier) == pipes_log2)
880                         ++pipes_log2;
881
882                 return max(8 + (pipe_aligned ? pipes_log2 : 0), 12);
883         }
884         default:
885                 return 0;
886         }
887 }
888
889 static int amdgpu_display_verify_plane(struct amdgpu_framebuffer *rfb, int plane,
890                                        const struct drm_format_info *format,
891                                        unsigned int block_width, unsigned int block_height,
892                                        unsigned int block_size_log2)
893 {
894         unsigned int width = rfb->base.width /
895                 ((plane && plane < format->num_planes) ? format->hsub : 1);
896         unsigned int height = rfb->base.height /
897                 ((plane && plane < format->num_planes) ? format->vsub : 1);
898         unsigned int cpp = plane < format->num_planes ? format->cpp[plane] : 1;
899         unsigned int block_pitch = block_width * cpp;
900         unsigned int min_pitch = ALIGN(width * cpp, block_pitch);
901         unsigned int block_size = 1 << block_size_log2;
902         uint64_t size;
903
904         if (rfb->base.pitches[plane] % block_pitch) {
905                 drm_dbg_kms(rfb->base.dev,
906                             "pitch %d for plane %d is not a multiple of block pitch %d\n",
907                             rfb->base.pitches[plane], plane, block_pitch);
908                 return -EINVAL;
909         }
910         if (rfb->base.pitches[plane] < min_pitch) {
911                 drm_dbg_kms(rfb->base.dev,
912                             "pitch %d for plane %d is less than minimum pitch %d\n",
913                             rfb->base.pitches[plane], plane, min_pitch);
914                 return -EINVAL;
915         }
916
917         /* Force at least natural alignment. */
918         if (rfb->base.offsets[plane] % block_size) {
919                 drm_dbg_kms(rfb->base.dev,
920                             "offset 0x%x for plane %d is not a multiple of block pitch 0x%x\n",
921                             rfb->base.offsets[plane], plane, block_size);
922                 return -EINVAL;
923         }
924
925         size = rfb->base.offsets[plane] +
926                 (uint64_t)rfb->base.pitches[plane] / block_pitch *
927                 block_size * DIV_ROUND_UP(height, block_height);
928
929         if (rfb->base.obj[0]->size < size) {
930                 drm_dbg_kms(rfb->base.dev,
931                             "BO size 0x%zx is less than 0x%llx required for plane %d\n",
932                             rfb->base.obj[0]->size, size, plane);
933                 return -EINVAL;
934         }
935
936         return 0;
937 }
938
939
940 static int amdgpu_display_verify_sizes(struct amdgpu_framebuffer *rfb)
941 {
942         const struct drm_format_info *format_info = drm_format_info(rfb->base.format->format);
943         uint64_t modifier = rfb->base.modifier;
944         int ret;
945         unsigned int i, block_width, block_height, block_size_log2;
946
947         if (rfb->base.dev->mode_config.fb_modifiers_not_supported)
948                 return 0;
949
950         for (i = 0; i < format_info->num_planes; ++i) {
951                 if (modifier == DRM_FORMAT_MOD_LINEAR) {
952                         block_width = 256 / format_info->cpp[i];
953                         block_height = 1;
954                         block_size_log2 = 8;
955                 } else {
956                         int swizzle = AMD_FMT_MOD_GET(TILE, modifier);
957
958                         switch ((swizzle & ~3) + 1) {
959                         case DC_SW_256B_S:
960                                 block_size_log2 = 8;
961                                 break;
962                         case DC_SW_4KB_S:
963                         case DC_SW_4KB_S_X:
964                                 block_size_log2 = 12;
965                                 break;
966                         case DC_SW_64KB_S:
967                         case DC_SW_64KB_S_T:
968                         case DC_SW_64KB_S_X:
969                                 block_size_log2 = 16;
970                                 break;
971                         default:
972                                 drm_dbg_kms(rfb->base.dev,
973                                             "Swizzle mode with unknown block size: %d\n", swizzle);
974                                 return -EINVAL;
975                         }
976
977                         get_block_dimensions(block_size_log2, format_info->cpp[i],
978                                              &block_width, &block_height);
979                 }
980
981                 ret = amdgpu_display_verify_plane(rfb, i, format_info,
982                                                   block_width, block_height, block_size_log2);
983                 if (ret)
984                         return ret;
985         }
986
987         if (AMD_FMT_MOD_GET(DCC, modifier)) {
988                 if (AMD_FMT_MOD_GET(DCC_RETILE, modifier)) {
989                         block_size_log2 = get_dcc_block_size(modifier, false, false);
990                         get_block_dimensions(block_size_log2 + 8, format_info->cpp[0],
991                                              &block_width, &block_height);
992                         ret = amdgpu_display_verify_plane(rfb, i, format_info,
993                                                           block_width, block_height,
994                                                           block_size_log2);
995                         if (ret)
996                                 return ret;
997
998                         ++i;
999                         block_size_log2 = get_dcc_block_size(modifier, true, true);
1000                 } else {
1001                         bool pipe_aligned = AMD_FMT_MOD_GET(DCC_PIPE_ALIGN, modifier);
1002
1003                         block_size_log2 = get_dcc_block_size(modifier, true, pipe_aligned);
1004                 }
1005                 get_block_dimensions(block_size_log2 + 8, format_info->cpp[0],
1006                                      &block_width, &block_height);
1007                 ret = amdgpu_display_verify_plane(rfb, i, format_info,
1008                                                   block_width, block_height, block_size_log2);
1009                 if (ret)
1010                         return ret;
1011         }
1012
1013         return 0;
1014 }
1015
1016 static int amdgpu_display_get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
1017                                       uint64_t *tiling_flags, bool *tmz_surface)
1018 {
1019         struct amdgpu_bo *rbo;
1020         int r;
1021
1022         if (!amdgpu_fb) {
1023                 *tiling_flags = 0;
1024                 *tmz_surface = false;
1025                 return 0;
1026         }
1027
1028         rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
1029         r = amdgpu_bo_reserve(rbo, false);
1030
1031         if (unlikely(r)) {
1032                 /* Don't show error message when returning -ERESTARTSYS */
1033                 if (r != -ERESTARTSYS)
1034                         DRM_ERROR("Unable to reserve buffer: %d\n", r);
1035                 return r;
1036         }
1037
1038         if (tiling_flags)
1039                 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
1040
1041         if (tmz_surface)
1042                 *tmz_surface = amdgpu_bo_encrypted(rbo);
1043
1044         amdgpu_bo_unreserve(rbo);
1045
1046         return r;
1047 }
1048
1049 static int amdgpu_display_gem_fb_verify_and_init(struct drm_device *dev,
1050                                                  struct amdgpu_framebuffer *rfb,
1051                                                  struct drm_file *file_priv,
1052                                                  const struct drm_mode_fb_cmd2 *mode_cmd,
1053                                                  struct drm_gem_object *obj)
1054 {
1055         int ret;
1056
1057         rfb->base.obj[0] = obj;
1058         drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd);
1059         /* Verify that the modifier is supported. */
1060         if (!drm_any_plane_has_format(dev, mode_cmd->pixel_format,
1061                                       mode_cmd->modifier[0])) {
1062                 drm_dbg_kms(dev,
1063                             "unsupported pixel format %p4cc / modifier 0x%llx\n",
1064                             &mode_cmd->pixel_format, mode_cmd->modifier[0]);
1065
1066                 ret = -EINVAL;
1067                 goto err;
1068         }
1069
1070         ret = amdgpu_display_framebuffer_init(dev, rfb, mode_cmd, obj);
1071         if (ret)
1072                 goto err;
1073
1074         ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
1075         if (ret)
1076                 goto err;
1077
1078         return 0;
1079 err:
1080         drm_dbg_kms(dev, "Failed to verify and init gem fb: %d\n", ret);
1081         rfb->base.obj[0] = NULL;
1082         return ret;
1083 }
1084
1085 static int amdgpu_display_framebuffer_init(struct drm_device *dev,
1086                                            struct amdgpu_framebuffer *rfb,
1087                                            const struct drm_mode_fb_cmd2 *mode_cmd,
1088                                            struct drm_gem_object *obj)
1089 {
1090         struct amdgpu_device *adev = drm_to_adev(dev);
1091         int ret, i;
1092
1093         /*
1094          * This needs to happen before modifier conversion as that might change
1095          * the number of planes.
1096          */
1097         for (i = 1; i < rfb->base.format->num_planes; ++i) {
1098                 if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
1099                         drm_dbg_kms(dev, "Plane 0 and %d have different BOs: %u vs. %u\n",
1100                                     i, mode_cmd->handles[0], mode_cmd->handles[i]);
1101                         ret = -EINVAL;
1102                         return ret;
1103                 }
1104         }
1105
1106         ret = amdgpu_display_get_fb_info(rfb, &rfb->tiling_flags, &rfb->tmz_surface);
1107         if (ret)
1108                 return ret;
1109
1110         if (dev->mode_config.fb_modifiers_not_supported && !adev->enable_virtual_display) {
1111                 drm_WARN_ONCE(dev, adev->family >= AMDGPU_FAMILY_AI,
1112                               "GFX9+ requires FB check based on format modifier\n");
1113                 ret = check_tiling_flags_gfx6(rfb);
1114                 if (ret)
1115                         return ret;
1116         }
1117
1118         if (!dev->mode_config.fb_modifiers_not_supported &&
1119             !(rfb->base.flags & DRM_MODE_FB_MODIFIERS)) {
1120                 ret = convert_tiling_flags_to_modifier(rfb);
1121                 if (ret) {
1122                         drm_dbg_kms(dev, "Failed to convert tiling flags 0x%llX to a modifier",
1123                                     rfb->tiling_flags);
1124                         return ret;
1125                 }
1126         }
1127
1128         ret = amdgpu_display_verify_sizes(rfb);
1129         if (ret)
1130                 return ret;
1131
1132         for (i = 0; i < rfb->base.format->num_planes; ++i) {
1133                 drm_gem_object_get(rfb->base.obj[0]);
1134                 rfb->base.obj[i] = rfb->base.obj[0];
1135         }
1136
1137         return 0;
1138 }
1139
1140 struct drm_framebuffer *
1141 amdgpu_display_user_framebuffer_create(struct drm_device *dev,
1142                                        struct drm_file *file_priv,
1143                                        const struct drm_mode_fb_cmd2 *mode_cmd)
1144 {
1145         struct amdgpu_framebuffer *amdgpu_fb;
1146         struct drm_gem_object *obj;
1147         struct amdgpu_bo *bo;
1148         uint32_t domains;
1149         int ret;
1150
1151         obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
1152         if (obj ==  NULL) {
1153                 drm_dbg_kms(dev, "No GEM object associated to handle 0x%08X, "
1154                             "can't create framebuffer\n", mode_cmd->handles[0]);
1155                 return ERR_PTR(-ENOENT);
1156         }
1157
1158         /* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */
1159         bo = gem_to_amdgpu_bo(obj);
1160         domains = amdgpu_display_supported_domains(drm_to_adev(dev), bo->flags);
1161         if (obj->import_attach && !(domains & AMDGPU_GEM_DOMAIN_GTT)) {
1162                 drm_dbg_kms(dev, "Cannot create framebuffer from imported dma_buf\n");
1163                 drm_gem_object_put(obj);
1164                 return ERR_PTR(-EINVAL);
1165         }
1166
1167         amdgpu_fb = kzalloc(sizeof(*amdgpu_fb), GFP_KERNEL);
1168         if (amdgpu_fb == NULL) {
1169                 drm_gem_object_put(obj);
1170                 return ERR_PTR(-ENOMEM);
1171         }
1172
1173         ret = amdgpu_display_gem_fb_verify_and_init(dev, amdgpu_fb, file_priv,
1174                                                     mode_cmd, obj);
1175         if (ret) {
1176                 kfree(amdgpu_fb);
1177                 drm_gem_object_put(obj);
1178                 return ERR_PTR(ret);
1179         }
1180
1181         drm_gem_object_put(obj);
1182         return &amdgpu_fb->base;
1183 }
1184
1185 const struct drm_mode_config_funcs amdgpu_mode_funcs = {
1186         .fb_create = amdgpu_display_user_framebuffer_create,
1187         .output_poll_changed = drm_fb_helper_output_poll_changed,
1188 };
1189
1190 static const struct drm_prop_enum_list amdgpu_underscan_enum_list[] =
1191 {       { UNDERSCAN_OFF, "off" },
1192         { UNDERSCAN_ON, "on" },
1193         { UNDERSCAN_AUTO, "auto" },
1194 };
1195
1196 static const struct drm_prop_enum_list amdgpu_audio_enum_list[] =
1197 {       { AMDGPU_AUDIO_DISABLE, "off" },
1198         { AMDGPU_AUDIO_ENABLE, "on" },
1199         { AMDGPU_AUDIO_AUTO, "auto" },
1200 };
1201
1202 /* XXX support different dither options? spatial, temporal, both, etc. */
1203 static const struct drm_prop_enum_list amdgpu_dither_enum_list[] =
1204 {       { AMDGPU_FMT_DITHER_DISABLE, "off" },
1205         { AMDGPU_FMT_DITHER_ENABLE, "on" },
1206 };
1207
1208 int amdgpu_display_modeset_create_props(struct amdgpu_device *adev)
1209 {
1210         int sz;
1211
1212         adev->mode_info.coherent_mode_property =
1213                 drm_property_create_range(adev_to_drm(adev), 0, "coherent", 0, 1);
1214         if (!adev->mode_info.coherent_mode_property)
1215                 return -ENOMEM;
1216
1217         adev->mode_info.load_detect_property =
1218                 drm_property_create_range(adev_to_drm(adev), 0, "load detection", 0, 1);
1219         if (!adev->mode_info.load_detect_property)
1220                 return -ENOMEM;
1221
1222         drm_mode_create_scaling_mode_property(adev_to_drm(adev));
1223
1224         sz = ARRAY_SIZE(amdgpu_underscan_enum_list);
1225         adev->mode_info.underscan_property =
1226                 drm_property_create_enum(adev_to_drm(adev), 0,
1227                                          "underscan",
1228                                          amdgpu_underscan_enum_list, sz);
1229
1230         adev->mode_info.underscan_hborder_property =
1231                 drm_property_create_range(adev_to_drm(adev), 0,
1232                                           "underscan hborder", 0, 128);
1233         if (!adev->mode_info.underscan_hborder_property)
1234                 return -ENOMEM;
1235
1236         adev->mode_info.underscan_vborder_property =
1237                 drm_property_create_range(adev_to_drm(adev), 0,
1238                                           "underscan vborder", 0, 128);
1239         if (!adev->mode_info.underscan_vborder_property)
1240                 return -ENOMEM;
1241
1242         sz = ARRAY_SIZE(amdgpu_audio_enum_list);
1243         adev->mode_info.audio_property =
1244                 drm_property_create_enum(adev_to_drm(adev), 0,
1245                                          "audio",
1246                                          amdgpu_audio_enum_list, sz);
1247
1248         sz = ARRAY_SIZE(amdgpu_dither_enum_list);
1249         adev->mode_info.dither_property =
1250                 drm_property_create_enum(adev_to_drm(adev), 0,
1251                                          "dither",
1252                                          amdgpu_dither_enum_list, sz);
1253
1254         if (amdgpu_device_has_dc_support(adev)) {
1255                 adev->mode_info.abm_level_property =
1256                         drm_property_create_range(adev_to_drm(adev), 0,
1257                                                   "abm level", 0, 4);
1258                 if (!adev->mode_info.abm_level_property)
1259                         return -ENOMEM;
1260         }
1261
1262         return 0;
1263 }
1264
1265 void amdgpu_display_update_priority(struct amdgpu_device *adev)
1266 {
1267         /* adjustment options for the display watermarks */
1268         if ((amdgpu_disp_priority == 0) || (amdgpu_disp_priority > 2))
1269                 adev->mode_info.disp_priority = 0;
1270         else
1271                 adev->mode_info.disp_priority = amdgpu_disp_priority;
1272
1273 }
1274
1275 static bool amdgpu_display_is_hdtv_mode(const struct drm_display_mode *mode)
1276 {
1277         /* try and guess if this is a tv or a monitor */
1278         if ((mode->vdisplay == 480 && mode->hdisplay == 720) || /* 480p */
1279             (mode->vdisplay == 576) || /* 576p */
1280             (mode->vdisplay == 720) || /* 720p */
1281             (mode->vdisplay == 1080)) /* 1080p */
1282                 return true;
1283         else
1284                 return false;
1285 }
1286
1287 bool amdgpu_display_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
1288                                         const struct drm_display_mode *mode,
1289                                         struct drm_display_mode *adjusted_mode)
1290 {
1291         struct drm_device *dev = crtc->dev;
1292         struct drm_encoder *encoder;
1293         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1294         struct amdgpu_encoder *amdgpu_encoder;
1295         struct drm_connector *connector;
1296         u32 src_v = 1, dst_v = 1;
1297         u32 src_h = 1, dst_h = 1;
1298
1299         amdgpu_crtc->h_border = 0;
1300         amdgpu_crtc->v_border = 0;
1301
1302         list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
1303                 if (encoder->crtc != crtc)
1304                         continue;
1305                 amdgpu_encoder = to_amdgpu_encoder(encoder);
1306                 connector = amdgpu_get_connector_for_encoder(encoder);
1307
1308                 /* set scaling */
1309                 if (amdgpu_encoder->rmx_type == RMX_OFF)
1310                         amdgpu_crtc->rmx_type = RMX_OFF;
1311                 else if (mode->hdisplay < amdgpu_encoder->native_mode.hdisplay ||
1312                          mode->vdisplay < amdgpu_encoder->native_mode.vdisplay)
1313                         amdgpu_crtc->rmx_type = amdgpu_encoder->rmx_type;
1314                 else
1315                         amdgpu_crtc->rmx_type = RMX_OFF;
1316                 /* copy native mode */
1317                 memcpy(&amdgpu_crtc->native_mode,
1318                        &amdgpu_encoder->native_mode,
1319                        sizeof(struct drm_display_mode));
1320                 src_v = crtc->mode.vdisplay;
1321                 dst_v = amdgpu_crtc->native_mode.vdisplay;
1322                 src_h = crtc->mode.hdisplay;
1323                 dst_h = amdgpu_crtc->native_mode.hdisplay;
1324
1325                 /* fix up for overscan on hdmi */
1326                 if ((!(mode->flags & DRM_MODE_FLAG_INTERLACE)) &&
1327                     ((amdgpu_encoder->underscan_type == UNDERSCAN_ON) ||
1328                      ((amdgpu_encoder->underscan_type == UNDERSCAN_AUTO) &&
1329                       connector->display_info.is_hdmi &&
1330                       amdgpu_display_is_hdtv_mode(mode)))) {
1331                         if (amdgpu_encoder->underscan_hborder != 0)
1332                                 amdgpu_crtc->h_border = amdgpu_encoder->underscan_hborder;
1333                         else
1334                                 amdgpu_crtc->h_border = (mode->hdisplay >> 5) + 16;
1335                         if (amdgpu_encoder->underscan_vborder != 0)
1336                                 amdgpu_crtc->v_border = amdgpu_encoder->underscan_vborder;
1337                         else
1338                                 amdgpu_crtc->v_border = (mode->vdisplay >> 5) + 16;
1339                         amdgpu_crtc->rmx_type = RMX_FULL;
1340                         src_v = crtc->mode.vdisplay;
1341                         dst_v = crtc->mode.vdisplay - (amdgpu_crtc->v_border * 2);
1342                         src_h = crtc->mode.hdisplay;
1343                         dst_h = crtc->mode.hdisplay - (amdgpu_crtc->h_border * 2);
1344                 }
1345         }
1346         if (amdgpu_crtc->rmx_type != RMX_OFF) {
1347                 fixed20_12 a, b;
1348                 a.full = dfixed_const(src_v);
1349                 b.full = dfixed_const(dst_v);
1350                 amdgpu_crtc->vsc.full = dfixed_div(a, b);
1351                 a.full = dfixed_const(src_h);
1352                 b.full = dfixed_const(dst_h);
1353                 amdgpu_crtc->hsc.full = dfixed_div(a, b);
1354         } else {
1355                 amdgpu_crtc->vsc.full = dfixed_const(1);
1356                 amdgpu_crtc->hsc.full = dfixed_const(1);
1357         }
1358         return true;
1359 }
1360
1361 /*
1362  * Retrieve current video scanout position of crtc on a given gpu, and
1363  * an optional accurate timestamp of when query happened.
1364  *
1365  * \param dev Device to query.
1366  * \param pipe Crtc to query.
1367  * \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0).
1368  *              For driver internal use only also supports these flags:
1369  *
1370  *              USE_REAL_VBLANKSTART to use the real start of vblank instead
1371  *              of a fudged earlier start of vblank.
1372  *
1373  *              GET_DISTANCE_TO_VBLANKSTART to return distance to the
1374  *              fudged earlier start of vblank in *vpos and the distance
1375  *              to true start of vblank in *hpos.
1376  *
1377  * \param *vpos Location where vertical scanout position should be stored.
1378  * \param *hpos Location where horizontal scanout position should go.
1379  * \param *stime Target location for timestamp taken immediately before
1380  *               scanout position query. Can be NULL to skip timestamp.
1381  * \param *etime Target location for timestamp taken immediately after
1382  *               scanout position query. Can be NULL to skip timestamp.
1383  *
1384  * Returns vpos as a positive number while in active scanout area.
1385  * Returns vpos as a negative number inside vblank, counting the number
1386  * of scanlines to go until end of vblank, e.g., -1 means "one scanline
1387  * until start of active scanout / end of vblank."
1388  *
1389  * \return Flags, or'ed together as follows:
1390  *
1391  * DRM_SCANOUTPOS_VALID = Query successful.
1392  * DRM_SCANOUTPOS_INVBL = Inside vblank.
1393  * DRM_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of
1394  * this flag means that returned position may be offset by a constant but
1395  * unknown small number of scanlines wrt. real scanout position.
1396  *
1397  */
1398 int amdgpu_display_get_crtc_scanoutpos(struct drm_device *dev,
1399                         unsigned int pipe, unsigned int flags, int *vpos,
1400                         int *hpos, ktime_t *stime, ktime_t *etime,
1401                         const struct drm_display_mode *mode)
1402 {
1403         u32 vbl = 0, position = 0;
1404         int vbl_start, vbl_end, vtotal, ret = 0;
1405         bool in_vbl = true;
1406
1407         struct amdgpu_device *adev = drm_to_adev(dev);
1408
1409         /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
1410
1411         /* Get optional system timestamp before query. */
1412         if (stime)
1413                 *stime = ktime_get();
1414
1415         if (amdgpu_display_page_flip_get_scanoutpos(adev, pipe, &vbl, &position) == 0)
1416                 ret |= DRM_SCANOUTPOS_VALID;
1417
1418         /* Get optional system timestamp after query. */
1419         if (etime)
1420                 *etime = ktime_get();
1421
1422         /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
1423
1424         /* Decode into vertical and horizontal scanout position. */
1425         *vpos = position & 0x1fff;
1426         *hpos = (position >> 16) & 0x1fff;
1427
1428         /* Valid vblank area boundaries from gpu retrieved? */
1429         if (vbl > 0) {
1430                 /* Yes: Decode. */
1431                 ret |= DRM_SCANOUTPOS_ACCURATE;
1432                 vbl_start = vbl & 0x1fff;
1433                 vbl_end = (vbl >> 16) & 0x1fff;
1434         }
1435         else {
1436                 /* No: Fake something reasonable which gives at least ok results. */
1437                 vbl_start = mode->crtc_vdisplay;
1438                 vbl_end = 0;
1439         }
1440
1441         /* Called from driver internal vblank counter query code? */
1442         if (flags & GET_DISTANCE_TO_VBLANKSTART) {
1443             /* Caller wants distance from real vbl_start in *hpos */
1444             *hpos = *vpos - vbl_start;
1445         }
1446
1447         /* Fudge vblank to start a few scanlines earlier to handle the
1448          * problem that vblank irqs fire a few scanlines before start
1449          * of vblank. Some driver internal callers need the true vblank
1450          * start to be used and signal this via the USE_REAL_VBLANKSTART flag.
1451          *
1452          * The cause of the "early" vblank irq is that the irq is triggered
1453          * by the line buffer logic when the line buffer read position enters
1454          * the vblank, whereas our crtc scanout position naturally lags the
1455          * line buffer read position.
1456          */
1457         if (!(flags & USE_REAL_VBLANKSTART))
1458                 vbl_start -= adev->mode_info.crtcs[pipe]->lb_vblank_lead_lines;
1459
1460         /* Test scanout position against vblank region. */
1461         if ((*vpos < vbl_start) && (*vpos >= vbl_end))
1462                 in_vbl = false;
1463
1464         /* In vblank? */
1465         if (in_vbl)
1466             ret |= DRM_SCANOUTPOS_IN_VBLANK;
1467
1468         /* Called from driver internal vblank counter query code? */
1469         if (flags & GET_DISTANCE_TO_VBLANKSTART) {
1470                 /* Caller wants distance from fudged earlier vbl_start */
1471                 *vpos -= vbl_start;
1472                 return ret;
1473         }
1474
1475         /* Check if inside vblank area and apply corrective offsets:
1476          * vpos will then be >=0 in video scanout area, but negative
1477          * within vblank area, counting down the number of lines until
1478          * start of scanout.
1479          */
1480
1481         /* Inside "upper part" of vblank area? Apply corrective offset if so: */
1482         if (in_vbl && (*vpos >= vbl_start)) {
1483                 vtotal = mode->crtc_vtotal;
1484
1485                 /* With variable refresh rate displays the vpos can exceed
1486                  * the vtotal value. Clamp to 0 to return -vbl_end instead
1487                  * of guessing the remaining number of lines until scanout.
1488                  */
1489                 *vpos = (*vpos < vtotal) ? (*vpos - vtotal) : 0;
1490         }
1491
1492         /* Correct for shifted end of vbl at vbl_end. */
1493         *vpos = *vpos - vbl_end;
1494
1495         return ret;
1496 }
1497
1498 int amdgpu_display_crtc_idx_to_irq_type(struct amdgpu_device *adev, int crtc)
1499 {
1500         if (crtc < 0 || crtc >= adev->mode_info.num_crtc)
1501                 return AMDGPU_CRTC_IRQ_NONE;
1502
1503         switch (crtc) {
1504         case 0:
1505                 return AMDGPU_CRTC_IRQ_VBLANK1;
1506         case 1:
1507                 return AMDGPU_CRTC_IRQ_VBLANK2;
1508         case 2:
1509                 return AMDGPU_CRTC_IRQ_VBLANK3;
1510         case 3:
1511                 return AMDGPU_CRTC_IRQ_VBLANK4;
1512         case 4:
1513                 return AMDGPU_CRTC_IRQ_VBLANK5;
1514         case 5:
1515                 return AMDGPU_CRTC_IRQ_VBLANK6;
1516         default:
1517                 return AMDGPU_CRTC_IRQ_NONE;
1518         }
1519 }
1520
1521 bool amdgpu_crtc_get_scanout_position(struct drm_crtc *crtc,
1522                         bool in_vblank_irq, int *vpos,
1523                         int *hpos, ktime_t *stime, ktime_t *etime,
1524                         const struct drm_display_mode *mode)
1525 {
1526         struct drm_device *dev = crtc->dev;
1527         unsigned int pipe = crtc->index;
1528
1529         return amdgpu_display_get_crtc_scanoutpos(dev, pipe, 0, vpos, hpos,
1530                                                   stime, etime, mode);
1531 }
1532
1533 static bool
1534 amdgpu_display_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj)
1535 {
1536         struct drm_device *dev = adev_to_drm(adev);
1537         struct drm_fb_helper *fb_helper = dev->fb_helper;
1538
1539         if (!fb_helper || !fb_helper->buffer)
1540                 return false;
1541
1542         if (gem_to_amdgpu_bo(fb_helper->buffer->gem) != robj)
1543                 return false;
1544
1545         return true;
1546 }
1547
1548 int amdgpu_display_suspend_helper(struct amdgpu_device *adev)
1549 {
1550         struct drm_device *dev = adev_to_drm(adev);
1551         struct drm_crtc *crtc;
1552         struct drm_connector *connector;
1553         struct drm_connector_list_iter iter;
1554         int r;
1555
1556         /* turn off display hw */
1557         drm_modeset_lock_all(dev);
1558         drm_connector_list_iter_begin(dev, &iter);
1559         drm_for_each_connector_iter(connector, &iter)
1560                 drm_helper_connector_dpms(connector,
1561                                           DRM_MODE_DPMS_OFF);
1562         drm_connector_list_iter_end(&iter);
1563         drm_modeset_unlock_all(dev);
1564         /* unpin the front buffers and cursors */
1565         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1566                 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1567                 struct drm_framebuffer *fb = crtc->primary->fb;
1568                 struct amdgpu_bo *robj;
1569
1570                 if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
1571                         struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
1572                         r = amdgpu_bo_reserve(aobj, true);
1573                         if (r == 0) {
1574                                 amdgpu_bo_unpin(aobj);
1575                                 amdgpu_bo_unreserve(aobj);
1576                         }
1577                 }
1578
1579                 if (fb == NULL || fb->obj[0] == NULL) {
1580                         continue;
1581                 }
1582                 robj = gem_to_amdgpu_bo(fb->obj[0]);
1583                 if (!amdgpu_display_robj_is_fb(adev, robj)) {
1584                         r = amdgpu_bo_reserve(robj, true);
1585                         if (r == 0) {
1586                                 amdgpu_bo_unpin(robj);
1587                                 amdgpu_bo_unreserve(robj);
1588                         }
1589                 }
1590         }
1591         return 0;
1592 }
1593
1594 int amdgpu_display_resume_helper(struct amdgpu_device *adev)
1595 {
1596         struct drm_device *dev = adev_to_drm(adev);
1597         struct drm_connector *connector;
1598         struct drm_connector_list_iter iter;
1599         struct drm_crtc *crtc;
1600         int r;
1601
1602         /* pin cursors */
1603         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1604                 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1605
1606                 if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
1607                         struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
1608                         r = amdgpu_bo_reserve(aobj, true);
1609                         if (r == 0) {
1610                                 r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
1611                                 if (r != 0)
1612                                         dev_err(adev->dev, "Failed to pin cursor BO (%d)\n", r);
1613                                 amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
1614                                 amdgpu_bo_unreserve(aobj);
1615                         }
1616                 }
1617         }
1618
1619         drm_helper_resume_force_mode(dev);
1620
1621         /* turn on display hw */
1622         drm_modeset_lock_all(dev);
1623
1624         drm_connector_list_iter_begin(dev, &iter);
1625         drm_for_each_connector_iter(connector, &iter)
1626                 drm_helper_connector_dpms(connector,
1627                                           DRM_MODE_DPMS_ON);
1628         drm_connector_list_iter_end(&iter);
1629
1630         drm_modeset_unlock_all(dev);
1631
1632         return 0;
1633 }
1634