diff options
author | Thomas Zimmermann <tzimmermann@suse.de> | 2021-11-18 09:36:39 +0100 |
---|---|---|
committer | Thomas Zimmermann <tzimmermann@suse.de> | 2021-11-18 09:36:39 +0100 |
commit | a713ca234ea9d946235ac7248995c5fddfd9e523 (patch) | |
tree | 708f72ee1c76360aa80c926f1defc8301aef1a23 /drivers/gpu | |
parent | 37fe0cf5fb803d98efd7feb64b408c9b029c1085 (diff) | |
parent | fa55b7dcdc43c1aa1ba12bca9d2dd4318c2a0dbf (diff) | |
download | linux-a713ca234ea9d946235ac7248995c5fddfd9e523.tar.gz |
Merge drm/drm-next into drm-misc-next
Backmerging from drm/drm-next for v5.16-rc1.
Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
Diffstat (limited to 'drivers/gpu')
363 files changed, 6946 insertions, 5146 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index fb144617055b..9bb456b49b59 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -117,9 +117,8 @@ config DRM_DEBUG_MODESET_LOCK config DRM_FBDEV_EMULATION bool "Enable legacy fbdev support for your modesetting driver" - depends on DRM - depends on FB=y || FB=DRM - select DRM_KMS_HELPER + depends on DRM_KMS_HELPER + depends on FB=y || FB=DRM_KMS_HELPER select FB_CFB_FILLRECT select FB_CFB_COPYAREA select FB_CFB_IMAGEBLIT diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 8d0748184a14..653726588956 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -73,10 +73,8 @@ amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce amdgpu-y += \ vi.o mxgpu_vi.o nbio_v6_1.o soc15.o emu_soc.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o \ - vega20_reg_init.o nbio_v7_4.o nbio_v2_3.o nv.o navi10_reg_init.o navi14_reg_init.o \ - arct_reg_init.o navi12_reg_init.o mxgpu_nv.o sienna_cichlid_reg_init.o vangogh_reg_init.o \ - nbio_v7_2.o dimgrey_cavefish_reg_init.o hdp_v4_0.o hdp_v5_0.o aldebaran_reg_init.o aldebaran.o \ - beige_goby_reg_init.o yellow_carp_reg_init.o cyan_skillfish_reg_init.o + vega20_reg_init.o nbio_v7_4.o nbio_v2_3.o nv.o arct_reg_init.o mxgpu_nv.o \ + nbio_v7_2.o hdp_v4_0.o hdp_v5_0.o aldebaran_reg_init.o aldebaran.o # add DF block amdgpu-y += \ diff --git a/drivers/gpu/drm/amd/amdgpu/aldebaran.c b/drivers/gpu/drm/amd/amdgpu/aldebaran.c index 148f6c3343ab..bcfdb63b1d42 100644 --- a/drivers/gpu/drm/amd/amdgpu/aldebaran.c +++ b/drivers/gpu/drm/amd/amdgpu/aldebaran.c @@ -307,6 +307,8 @@ static int aldebaran_mode2_restore_ip(struct amdgpu_device *adev) adev->ip_blocks[i].status.late_initialized = true; } + amdgpu_ras_set_error_query_ready(adev, true); + amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE); amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 4da7eb65e744..9f017663ac50 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -205,6 +205,7 @@ extern struct amdgpu_mgpu_info mgpu_info; extern int amdgpu_ras_enable; extern uint amdgpu_ras_mask; extern int amdgpu_bad_page_threshold; +extern bool amdgpu_ignore_bad_page_threshold; extern struct amdgpu_watchdog_timer amdgpu_watchdog_timer; extern int amdgpu_async_gfx_ring; extern int amdgpu_mcbp; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 69de31754907..a15a4787c7ee 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -279,6 +279,8 @@ int amdgpu_amdkfd_gpuvm_sync_memory( struct kgd_dev *kgd, struct kgd_mem *mem, bool intr); int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd, struct kgd_mem *mem, void **kptr, uint64_t *size); +void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_dev *kgd, struct kgd_mem *mem); + int amdgpu_amdkfd_gpuvm_restore_process_bos(void *process_info, struct dma_fence **ef); int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd, @@ -295,7 +297,7 @@ void amdgpu_amdkfd_ras_poison_consumption_handler(struct kgd_dev *kgd); void amdgpu_amdkfd_gpuvm_init_mem_limits(void); void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, struct amdgpu_vm *vm); -void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo); +void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo); void amdgpu_amdkfd_reserve_system_mem(uint64_t size); #else static inline @@ -310,7 +312,7 @@ void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, } static inline -void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo) +void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo) { } #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 054c1a224def..71acd577803e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -207,7 +207,7 @@ static void unreserve_mem_limit(struct amdgpu_device *adev, spin_unlock(&kfd_mem_limit.mem_limit_lock); } -void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo) +void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo) { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); u32 domain = bo->preferred_domains; @@ -219,6 +219,8 @@ void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo) } unreserve_mem_limit(adev, amdgpu_bo_size(bo), domain, sg); + + kfree(bo->kfd_bo); } @@ -734,14 +736,19 @@ static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem, } /* Add BO to VM internal data structures */ + ret = amdgpu_bo_reserve(bo[i], false); + if (ret) { + pr_debug("Unable to reserve BO during memory attach"); + goto unwind; + } attachment[i]->bo_va = amdgpu_vm_bo_add(adev, vm, bo[i]); + amdgpu_bo_unreserve(bo[i]); if (unlikely(!attachment[i]->bo_va)) { ret = -ENOMEM; pr_err("Failed to add BO object to VM. ret == %d\n", ret); goto unwind; } - attachment[i]->va = va; attachment[i]->pte_flags = get_pte_flags(adev, mem); attachment[i]->adev = adev; @@ -757,7 +764,9 @@ unwind: if (!attachment[i]) continue; if (attachment[i]->bo_va) { + amdgpu_bo_reserve(bo[i], true); amdgpu_vm_bo_rmv(adev, attachment[i]->bo_va); + amdgpu_bo_unreserve(bo[i]); list_del(&attachment[i]->list); } if (bo[i]) @@ -1503,7 +1512,7 @@ allocate_init_user_pages_failed: remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info); drm_vma_node_revoke(&gobj->vma_node, drm_priv); err_node_allow: - amdgpu_bo_unref(&bo); + drm_gem_object_put(gobj); /* Don't unreserve system mem limit twice */ goto err_reserve_limit; err_bo_create: @@ -1568,12 +1577,12 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va, mem->va + bo_size * (1 + mem->aql_queue)); - ret = unreserve_bo_and_vms(&ctx, false, false); - /* Remove from VM internal data structures */ list_for_each_entry_safe(entry, tmp, &mem->attachments, list) kfd_mem_detach(entry); + ret = unreserve_bo_and_vms(&ctx, false, false); + /* Free the sync object */ amdgpu_sync_free(&mem->sync); @@ -1600,9 +1609,13 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( drm_vma_node_revoke(&mem->bo->tbo.base.vma_node, drm_priv); if (mem->dmabuf) dma_buf_put(mem->dmabuf); - drm_gem_object_put(&mem->bo->tbo.base); mutex_destroy(&mem->lock); - kfree(mem); + + /* If this releases the last reference, it will end up calling + * amdgpu_amdkfd_release_notify and kfree the mem struct. That's why + * this needs to be the last call here. + */ + drm_gem_object_put(&mem->bo->tbo.base); return ret; } @@ -1871,6 +1884,16 @@ bo_reserve_failed: return ret; } +void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_dev *kgd, struct kgd_mem *mem) +{ + struct amdgpu_bo *bo = mem->bo; + + amdgpu_bo_reserve(bo, true); + amdgpu_bo_kunmap(bo); + amdgpu_bo_unpin(bo); + amdgpu_bo_unreserve(bo); +} + int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd, struct kfd_vm_fault_info *mem) { @@ -2041,19 +2064,26 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info, /* Get updated user pages */ ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages); if (ret) { - pr_debug("%s: Failed to get user pages: %d\n", - __func__, ret); + pr_debug("Failed %d to get user pages\n", ret); + + /* Return -EFAULT bad address error as success. It will + * fail later with a VM fault if the GPU tries to access + * it. Better than hanging indefinitely with stalled + * user mode queues. + * + * Return other error -EBUSY or -ENOMEM to retry restore + */ + if (ret != -EFAULT) + return ret; + } else { - /* Return error -EBUSY or -ENOMEM, retry restore */ - return ret; + /* + * FIXME: Cannot ignore the return code, must hold + * notifier_lock + */ + amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm); } - /* - * FIXME: Cannot ignore the return code, must hold - * notifier_lock - */ - amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm); - /* Mark the BO as valid unless it was invalidated * again concurrently. */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 239e71174855..5625f7736e37 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2398,10 +2398,6 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) if (!adev->gmc.xgmi.pending_reset) amdgpu_amdkfd_device_init(adev); - r = amdgpu_amdkfd_resume_iommu(adev); - if (r) - goto init_failed; - amdgpu_fru_get_product_info(adev); init_failed: @@ -3171,11 +3167,21 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type) { switch (asic_type) { #if defined(CONFIG_DRM_AMD_DC) -#if defined(CONFIG_DRM_AMD_DC_SI) case CHIP_TAHITI: case CHIP_PITCAIRN: case CHIP_VERDE: case CHIP_OLAND: + /* + * We have systems in the wild with these ASICs that require + * LVDS and VGA support which is not supported with DC. + * + * Fallback to the non-DC driver here by default so as not to + * cause regressions. + */ +#if defined(CONFIG_DRM_AMD_DC_SI) + return amdgpu_dc > 0; +#else + return false; #endif case CHIP_BONAIRE: case CHIP_KAVERI: @@ -3839,10 +3845,10 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev) amdgpu_fbdev_fini(adev); - amdgpu_irq_fini_hw(adev); - amdgpu_device_ip_fini_early(adev); + amdgpu_irq_fini_hw(adev); + ttm_device_clear_dma_mappings(&adev->mman.bdev); amdgpu_gart_dummy_page_fini(adev); @@ -3852,8 +3858,8 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev) void amdgpu_device_fini_sw(struct amdgpu_device *adev) { - amdgpu_device_ip_fini(adev); amdgpu_fence_driver_sw_fini(adev); + amdgpu_device_ip_fini(adev); release_firmware(adev->firmware.gpu_info_fw); adev->firmware.gpu_info_fw = NULL; adev->accel_working = false; @@ -4287,8 +4293,6 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, if (r) return r; - amdgpu_amdkfd_pre_reset(adev); - /* Resume IP prior to SMC */ r = amdgpu_device_ip_reinit_early_sriov(adev); if (r) @@ -4850,6 +4854,9 @@ static void amdgpu_device_recheck_guilty_jobs( /* clear job's guilty and depend the folowing step to decide the real one */ drm_sched_reset_karma(s_job); + /* for the real bad job, it will be resubmitted twice, adding a dma_fence_get + * to make sure fence is balanced */ + dma_fence_get(s_job->s_fence->parent); drm_sched_resubmit_jobs_ext(&ring->sched, 1); ret = dma_fence_wait_timeout(s_job->s_fence->parent, false, ring->sched.timeout); @@ -4885,6 +4892,7 @@ retry: /* got the hw fence, signal finished fence */ atomic_dec(ring->sched.score); + dma_fence_put(s_job->s_fence->parent); dma_fence_get(&s_job->s_fence->finished); dma_fence_signal(&s_job->s_fence->finished); dma_fence_put(&s_job->s_fence->finished); @@ -5020,8 +5028,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, cancel_delayed_work_sync(&tmp_adev->delayed_init_work); - if (!amdgpu_sriov_vf(tmp_adev)) - amdgpu_amdkfd_pre_reset(tmp_adev); + amdgpu_amdkfd_pre_reset(tmp_adev); /* * Mark these ASICs to be reseted as untracked first diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index 2bebd2ce6474..ff70bc233489 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -108,6 +108,8 @@ static const char *hw_id_names[HW_ID_MAX] = { [HDP_HWID] = "HDP", [SDMA0_HWID] = "SDMA0", [SDMA1_HWID] = "SDMA1", + [SDMA2_HWID] = "SDMA2", + [SDMA3_HWID] = "SDMA3", [ISP_HWID] = "ISP", [DBGU_IO_HWID] = "DBGU_IO", [DF_HWID] = "DF", @@ -505,6 +507,10 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev) break; } } + /* some IP discovery tables on Navy Flounder don't have this set correctly */ + if ((adev->ip_versions[UVD_HWIP][1] == IP_VERSION(3, 0, 1)) && + (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 2))) + adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1; if (vcn_harvest_count == adev->vcn.num_vcn_inst) { adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK; adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK; @@ -736,6 +742,7 @@ static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev) case IP_VERSION(1, 0, 1): case IP_VERSION(2, 0, 2): case IP_VERSION(2, 0, 0): + case IP_VERSION(2, 0, 3): case IP_VERSION(2, 1, 0): case IP_VERSION(3, 0, 0): case IP_VERSION(3, 0, 2): @@ -745,8 +752,6 @@ static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev) case IP_VERSION(3, 1, 3): amdgpu_device_ip_block_add(adev, &dm_ip_block); break; - case IP_VERSION(2, 0, 3): - break; default: return -EINVAL; } @@ -862,7 +867,8 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev) case IP_VERSION(2, 0, 2): case IP_VERSION(2, 2, 0): amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); - amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block); + if (!amdgpu_sriov_vf(adev)) + amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block); break; case IP_VERSION(2, 0, 3): break; @@ -876,6 +882,7 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev) break; case IP_VERSION(3, 0, 0): case IP_VERSION(3, 0, 16): + case IP_VERSION(3, 0, 64): case IP_VERSION(3, 1, 1): case IP_VERSION(3, 0, 2): amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block); @@ -926,6 +933,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 0); adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 0); adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 0); + adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 0); adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0); adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 1, 0); adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 0, 0); @@ -946,6 +954,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 1); adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 1); adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 1); + adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 1); adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 5, 0); adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 2, 0); adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 0); @@ -1004,6 +1013,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 0); adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 0); adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 0); + adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 0); adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 0); adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 0); adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 1); @@ -1013,6 +1023,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 2); adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 0); adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 2, 0); + adev->ip_versions[UVD_HWIP][1] = IP_VERSION(7, 2, 0); adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 1, 0); adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 1, 0); break; @@ -1025,6 +1036,13 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 1); adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 1); adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 2); + adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 2); + adev->ip_versions[SDMA1_HWIP][1] = IP_VERSION(4, 2, 2); + adev->ip_versions[SDMA1_HWIP][2] = IP_VERSION(4, 2, 2); + adev->ip_versions[SDMA1_HWIP][3] = IP_VERSION(4, 2, 2); + adev->ip_versions[SDMA1_HWIP][4] = IP_VERSION(4, 2, 2); + adev->ip_versions[SDMA1_HWIP][5] = IP_VERSION(4, 2, 2); + adev->ip_versions[SDMA1_HWIP][6] = IP_VERSION(4, 2, 2); adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 1); adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 1); adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 2); @@ -1034,6 +1052,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 3); adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 1); adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 5, 0); + adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 5, 0); break; case CHIP_ALDEBARAN: aldebaran_reg_base_init(adev); @@ -1044,6 +1063,10 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 4, 0); adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 4, 0); adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 4, 0); + adev->ip_versions[SDMA0_HWIP][1] = IP_VERSION(4, 4, 0); + adev->ip_versions[SDMA0_HWIP][2] = IP_VERSION(4, 4, 0); + adev->ip_versions[SDMA0_HWIP][3] = IP_VERSION(4, 4, 0); + adev->ip_versions[SDMA0_HWIP][4] = IP_VERSION(4, 4, 0); adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 2); adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 4); adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 7, 0); @@ -1053,6 +1076,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(13, 0, 2); adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 2); adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 6, 0); + adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 6, 0); adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 1, 0); break; default: @@ -1120,10 +1144,13 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) break; case IP_VERSION(7, 4, 0): case IP_VERSION(7, 4, 1): - case IP_VERSION(7, 4, 4): adev->nbio.funcs = &nbio_v7_4_funcs; adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg; break; + case IP_VERSION(7, 4, 4): + adev->nbio.funcs = &nbio_v7_4_funcs; + adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg_ald; + break; case IP_VERSION(7, 2, 0): case IP_VERSION(7, 2, 1): case IP_VERSION(7, 5, 0): @@ -1134,12 +1161,15 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) case IP_VERSION(2, 3, 0): case IP_VERSION(2, 3, 1): case IP_VERSION(2, 3, 2): + adev->nbio.funcs = &nbio_v2_3_funcs; + adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg; + break; case IP_VERSION(3, 3, 0): case IP_VERSION(3, 3, 1): case IP_VERSION(3, 3, 2): case IP_VERSION(3, 3, 3): adev->nbio.funcs = &nbio_v2_3_funcs; - adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg; + adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg_sc; break; default: break; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index c718fb5f3f8a..ad95de6399af 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -38,6 +38,7 @@ #include <drm/drm_probe_helper.h> #include <linux/mmu_notifier.h> #include <linux/suspend.h> +#include <linux/cc_platform.h> #include "amdgpu.h" #include "amdgpu_irq.h" @@ -877,7 +878,7 @@ module_param_named(reset_method, amdgpu_reset_method, int, 0444); * result in the GPU entering bad status when the number of total * faulty pages by ECC exceeds the threshold value. */ -MODULE_PARM_DESC(bad_page_threshold, "Bad page threshold(-1 = auto(default value), 0 = disable bad page retirement)"); +MODULE_PARM_DESC(bad_page_threshold, "Bad page threshold(-1 = auto(default value), 0 = disable bad page retirement, -2 = ignore bad page threshold)"); module_param_named(bad_page_threshold, amdgpu_bad_page_threshold, int, 0444); MODULE_PARM_DESC(num_kcq, "number of kernel compute queue user want to setup (8 if set to greater than 8 or less than 0, only affect gfx 8+)"); @@ -1922,7 +1923,8 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, * however, SME requires an indirect IOMMU mapping because the encryption * bit is beyond the DMA mask of the chip. */ - if (mem_encrypt_active() && ((flags & AMD_ASIC_MASK) == CHIP_RAVEN)) { + if (cc_platform_has(CC_ATTR_MEM_ENCRYPT) && + ((flags & AMD_ASIC_MASK) == CHIP_RAVEN)) { dev_info(&pdev->dev, "SME is not compatible with RAVEN\n"); return -ENOTSUPP; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index a573424a6e0b..a1e63ba4c54a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -60,9 +60,10 @@ static vm_fault_t amdgpu_gem_fault(struct vm_fault *vmf) goto unlock; } - ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot, - TTM_BO_VM_NUM_PREFAULT, 1); - drm_dev_exit(idx); + ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot, + TTM_BO_VM_NUM_PREFAULT); + + drm_dev_exit(idx); } else { ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index d2955ea4a62b..651c7abfde03 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -340,32 +340,32 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info, case AMDGPU_INFO_FW_TA: switch (query_fw->index) { case TA_FW_TYPE_PSP_XGMI: - fw_info->ver = adev->psp.ta_fw_version; + fw_info->ver = adev->psp.xgmi_context.context.bin_desc.fw_version; fw_info->feature = adev->psp.xgmi_context.context .bin_desc.feature_version; break; case TA_FW_TYPE_PSP_RAS: - fw_info->ver = adev->psp.ta_fw_version; + fw_info->ver = adev->psp.ras_context.context.bin_desc.fw_version; fw_info->feature = adev->psp.ras_context.context .bin_desc.feature_version; break; case TA_FW_TYPE_PSP_HDCP: - fw_info->ver = adev->psp.ta_fw_version; + fw_info->ver = adev->psp.hdcp_context.context.bin_desc.fw_version; fw_info->feature = adev->psp.hdcp_context.context .bin_desc.feature_version; break; case TA_FW_TYPE_PSP_DTM: - fw_info->ver = adev->psp.ta_fw_version; + fw_info->ver = adev->psp.dtm_context.context.bin_desc.fw_version; fw_info->feature = adev->psp.dtm_context.context .bin_desc.feature_version; break; case TA_FW_TYPE_PSP_RAP: - fw_info->ver = adev->psp.ta_fw_version; + fw_info->ver = adev->psp.rap_context.context.bin_desc.fw_version; fw_info->feature = adev->psp.rap_context.context .bin_desc.feature_version; break; case TA_FW_TYPE_PSP_SECUREDISPLAY: - fw_info->ver = adev->psp.ta_fw_version; + fw_info->ver = adev->psp.securedisplay_context.context.bin_desc.fw_version; fw_info->feature = adev->psp.securedisplay_context.context.bin_desc .feature_version; @@ -1423,6 +1423,8 @@ static int amdgpu_debugfs_firmware_info_show(struct seq_file *m, void *unused) struct drm_amdgpu_info_firmware fw_info; struct drm_amdgpu_query_fw query_fw; struct atom_context *ctx = adev->mode_info.atom_context; + uint8_t smu_minor, smu_debug; + uint16_t smu_major; int ret, i; static const char *ta_fw_name[TA_FW_TYPE_MAX_INDEX] = { @@ -1568,8 +1570,11 @@ static int amdgpu_debugfs_firmware_info_show(struct seq_file *m, void *unused) ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); if (ret) return ret; - seq_printf(m, "SMC feature version: %u, firmware version: 0x%08x\n", - fw_info.feature, fw_info.ver); + smu_major = (fw_info.ver >> 16) & 0xffff; + smu_minor = (fw_info.ver >> 8) & 0xff; + smu_debug = (fw_info.ver >> 0) & 0xff; + seq_printf(m, "SMC feature version: %u, firmware version: 0x%08x (%d.%d.%d)\n", + fw_info.feature, fw_info.ver, smu_major, smu_minor, smu_debug); /* SDMA */ query_fw.fw_type = AMDGPU_INFO_FW_SDMA; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index aeb92e5677ac..4fcfc2313b8c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -1274,7 +1274,7 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo) abo = ttm_to_amdgpu_bo(bo); if (abo->kfd_bo) - amdgpu_amdkfd_unreserve_memory_limit(abo); + amdgpu_amdkfd_release_notify(abo); /* We only remove the fence if the resv has individualized. */ WARN_ON_ONCE(bo->type == ttm_bo_type_kernel diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 6b39e6c02dd8..c641f84649d6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -65,7 +65,6 @@ static int psp_securedisplay_terminate(struct psp_context *psp); * * This new sequence is required for * - Arcturus and onwards - * - Navi12 and onwards */ static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp) { @@ -77,7 +76,9 @@ static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp } switch (adev->ip_versions[MP0_HWIP][0]) { + case IP_VERSION(11, 0, 0): case IP_VERSION(11, 0, 4): + case IP_VERSION(11, 0, 5): case IP_VERSION(11, 0, 7): case IP_VERSION(11, 0, 9): case IP_VERSION(11, 0, 11): @@ -1114,7 +1115,7 @@ int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id) static bool psp_xgmi_peer_link_info_supported(struct psp_context *psp) { return psp->adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 2) && - psp->xgmi_context.context.bin_desc.feature_version >= 0x2000000b; + psp->xgmi_context.context.bin_desc.fw_version >= 0x2000000b; } /* @@ -1291,6 +1292,29 @@ static int psp_ras_unload(struct psp_context *psp) return psp_ta_unload(psp, &psp->ras_context.context); } +static void psp_ras_ta_check_status(struct psp_context *psp) +{ + struct ta_ras_shared_memory *ras_cmd = + (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; + + switch (ras_cmd->ras_status) { + case TA_RAS_STATUS__ERROR_UNSUPPORTED_IP: + dev_warn(psp->adev->dev, + "RAS WARNING: cmd failed due to unsupported ip\n"); + break; + case TA_RAS_STATUS__ERROR_UNSUPPORTED_ERROR_INJ: + dev_warn(psp->adev->dev, + "RAS WARNING: cmd failed due to unsupported error injection\n"); + break; + case TA_RAS_STATUS__SUCCESS: + break; + default: + dev_warn(psp->adev->dev, + "RAS WARNING: ras status = 0x%X\n", ras_cmd->ras_status); + break; + } +} + int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id) { struct ta_ras_shared_memory *ras_cmd; @@ -1325,10 +1349,7 @@ int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id) dev_warn(psp->adev->dev, "RAS internal register access blocked\n"); - if (ras_cmd->ras_status == TA_RAS_STATUS__ERROR_UNSUPPORTED_IP) - dev_warn(psp->adev->dev, "RAS WARNING: cmd failed due to unsupported ip\n"); - else if (ras_cmd->ras_status) - dev_warn(psp->adev->dev, "RAS WARNING: ras status = 0x%X\n", ras_cmd->ras_status); + psp_ras_ta_check_status(psp); } return ret; @@ -2622,6 +2643,12 @@ static int psp_resume(void *handle) goto failed; } + ret = psp_rl_load(adev); + if (ret) { + dev_err(adev->dev, "PSP load RL failed!\n"); + goto failed; + } + if (adev->gmc.xgmi.num_physical_nodes > 1) { ret = psp_xgmi_initialize(psp, false, true); /* Warning the XGMI seesion initialize failure @@ -3081,32 +3108,32 @@ static int parse_ta_bin_descriptor(struct psp_context *psp, psp->asd_context.bin_desc.start_addr = ucode_start_addr; break; case TA_FW_TYPE_PSP_XGMI: - psp->xgmi_context.context.bin_desc.feature_version = le32_to_cpu(desc->fw_version); + psp->xgmi_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); psp->xgmi_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); psp->xgmi_context.context.bin_desc.start_addr = ucode_start_addr; break; case TA_FW_TYPE_PSP_RAS: - psp->ras_context.context.bin_desc.feature_version = le32_to_cpu(desc->fw_version); + psp->ras_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); psp->ras_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); psp->ras_context.context.bin_desc.start_addr = ucode_start_addr; break; case TA_FW_TYPE_PSP_HDCP: - psp->hdcp_context.context.bin_desc.feature_version = le32_to_cpu(desc->fw_version); + psp->hdcp_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); psp->hdcp_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); psp->hdcp_context.context.bin_desc.start_addr = ucode_start_addr; break; case TA_FW_TYPE_PSP_DTM: - psp->dtm_context.context.bin_desc.feature_version = le32_to_cpu(desc->fw_version); + psp->dtm_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); psp->dtm_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); psp->dtm_context.context.bin_desc.start_addr = ucode_start_addr; break; case TA_FW_TYPE_PSP_RAP: - psp->rap_context.context.bin_desc.feature_version = le32_to_cpu(desc->fw_version); + psp->rap_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); psp->rap_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); psp->rap_context.context.bin_desc.start_addr = ucode_start_addr; break; case TA_FW_TYPE_PSP_SECUREDISPLAY: - psp->securedisplay_context.context.bin_desc.feature_version = + psp->securedisplay_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); psp->securedisplay_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index e8875351967e..08133de21fdd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -112,7 +112,12 @@ static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con, static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev, uint64_t addr); #ifdef CONFIG_X86_MCE_AMD -static void amdgpu_register_bad_pages_mca_notifier(void); +static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev); +struct mce_notifier_adev_list { + struct amdgpu_device *devs[MAX_GPU_INSTANCE]; + int num_gpu; +}; +static struct mce_notifier_adev_list mce_adev_list; #endif void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready) @@ -2108,7 +2113,7 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev) #ifdef CONFIG_X86_MCE_AMD if ((adev->asic_type == CHIP_ALDEBARAN) && (adev->gmc.xgmi.connected_to_cpu)) - amdgpu_register_bad_pages_mca_notifier(); + amdgpu_register_bad_pages_mca_notifier(adev); #endif return 0; @@ -2605,24 +2610,18 @@ void amdgpu_release_ras_context(struct amdgpu_device *adev) #ifdef CONFIG_X86_MCE_AMD static struct amdgpu_device *find_adev(uint32_t node_id) { - struct amdgpu_gpu_instance *gpu_instance; int i; struct amdgpu_device *adev = NULL; - mutex_lock(&mgpu_info.mutex); - - for (i = 0; i < mgpu_info.num_gpu; i++) { - gpu_instance = &(mgpu_info.gpu_ins[i]); - adev = gpu_instance->adev; + for (i = 0; i < mce_adev_list.num_gpu; i++) { + adev = mce_adev_list.devs[i]; - if (adev->gmc.xgmi.connected_to_cpu && + if (adev && adev->gmc.xgmi.connected_to_cpu && adev->gmc.xgmi.physical_node_id == node_id) break; adev = NULL; } - mutex_unlock(&mgpu_info.mutex); - return adev; } @@ -2718,9 +2717,19 @@ static struct notifier_block amdgpu_bad_page_nb = { .priority = MCE_PRIO_UC, }; -static void amdgpu_register_bad_pages_mca_notifier(void) +static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev) { /* + * Add the adev to the mce_adev_list. + * During mode2 reset, amdgpu device is temporarily + * removed from the mgpu_info list which can cause + * page retirement to fail. + * Use this list instead of mgpu_info to find the amdgpu + * device on which the UMC error was reported. + */ + mce_adev_list.devs[mce_adev_list.num_gpu++] = adev; + + /* * Register the x86 notifier only once * with MCE subsystem. */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c index 98732518543e..05117eda105b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c @@ -1077,6 +1077,13 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control, if (res) DRM_ERROR("RAS table incorrect checksum or error:%d\n", res); + + /* Warn if we are at 90% of the threshold or above + */ + if (10 * control->ras_num_recs >= 9 * ras->bad_page_cnt_threshold) + dev_warn(adev->dev, "RAS records:%u exceeds 90%% of threshold:%d", + control->ras_num_recs, + ras->bad_page_cnt_threshold); } else if (hdr->header == RAS_TABLE_HDR_BAD && amdgpu_bad_page_threshold != 0) { res = __verify_ras_table_checksum(control); @@ -1098,11 +1105,18 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control, res = amdgpu_ras_eeprom_correct_header_tag(control, RAS_TABLE_HDR_VAL); } else { - *exceed_err_limit = true; - dev_err(adev->dev, - "RAS records:%d exceed threshold:%d, " - "maybe retire this GPU?", + dev_err(adev->dev, "RAS records:%d exceed threshold:%d", control->ras_num_recs, ras->bad_page_cnt_threshold); + if (amdgpu_bad_page_threshold == -2) { + dev_warn(adev->dev, "GPU will be initialized due to bad_page_threshold = -2."); + res = 0; + } else { + *exceed_err_limit = true; + dev_err(adev->dev, + "RAS records:%d exceed threshold:%d, " + "GPU will not be initialized. Replace this GPU or increase the threshold", + control->ras_num_recs, ras->bad_page_cnt_threshold); + } } } else { DRM_INFO("Creating a new EEPROM table"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 30b7dde496fc..eab4380f28e5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -41,6 +41,7 @@ #include <linux/swiotlb.h> #include <linux/dma-buf.h> #include <linux/sizes.h> +#include <linux/module.h> #include <drm/ttm/ttm_bo_api.h> #include <drm/ttm/ttm_bo_driver.h> @@ -59,6 +60,8 @@ #include "amdgpu_res_cursor.h" #include "bif/bif_4_1_d.h" +MODULE_IMPORT_NS(DMA_BUF); + #define AMDGPU_TTM_VRAM_MAX_DW_READ (size_t)128 static int amdgpu_ttm_backend_bind(struct ttm_device *bdev, @@ -696,6 +699,9 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages) true, NULL); out_unlock: mmap_read_unlock(mm); + if (r) + pr_debug("failed %d to get user pages 0x%lx\n", r, start); + mmput(mm); return r; @@ -1235,7 +1241,7 @@ struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm) * */ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, - unsigned long end) + unsigned long end, unsigned long *userptr) { struct amdgpu_ttm_tt *gtt = (void *)ttm; unsigned long size; @@ -1250,6 +1256,8 @@ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, if (gtt->userptr > end || gtt->userptr + size <= start) return false; + if (userptr) + *userptr = gtt->userptr; return true; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index 639c7b41e30b..7346ecff4438 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -182,7 +182,7 @@ int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo, bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm); struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm); bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, - unsigned long end); + unsigned long end, unsigned long *userptr); bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm, int *last_invalidated); bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c index 0c3127f37686..ca3350502618 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c @@ -509,7 +509,7 @@ static ssize_t show_##name(struct device *dev, \ struct drm_device *ddev = dev_get_drvdata(dev); \ struct amdgpu_device *adev = drm_to_adev(ddev); \ \ - return snprintf(buf, PAGE_SIZE, "0x%08x\n", adev->field); \ + return sysfs_emit(buf, "0x%08x\n", adev->field); \ } \ static DEVICE_ATTR(name, mode, show_##name, NULL) @@ -527,8 +527,8 @@ FW_VERSION_ATTR(mec_fw_version, 0444, gfx.mec_fw_version); FW_VERSION_ATTR(mec2_fw_version, 0444, gfx.mec2_fw_version); FW_VERSION_ATTR(sos_fw_version, 0444, psp.sos.fw_version); FW_VERSION_ATTR(asd_fw_version, 0444, psp.asd_context.bin_desc.fw_version); -FW_VERSION_ATTR(ta_ras_fw_version, 0444, psp.ras_context.context.bin_desc.feature_version); -FW_VERSION_ATTR(ta_xgmi_fw_version, 0444, psp.xgmi_context.context.bin_desc.feature_version); +FW_VERSION_ATTR(ta_ras_fw_version, 0444, psp.ras_context.context.bin_desc.fw_version); +FW_VERSION_ATTR(ta_xgmi_fw_version, 0444, psp.xgmi_context.context.bin_desc.fw_version); FW_VERSION_ATTR(smc_fw_version, 0444, pm.fw_version); FW_VERSION_ATTR(sdma_fw_version, 0444, sdma.instance[0].fw_version); FW_VERSION_ATTR(sdma2_fw_version, 0444, sdma.instance[1].fw_version); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index c7d316850570..4f7c70845785 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -134,6 +134,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) adev->vcn.indirect_sram = true; break; case IP_VERSION(3, 0, 0): + case IP_VERSION(3, 0, 64): if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) fw_name = FIRMWARE_SIENNA_CICHLID; else @@ -949,3 +950,30 @@ enum amdgpu_ring_priority_level amdgpu_vcn_get_enc_ring_prio(int ring) return AMDGPU_RING_PRIO_0; } } + +void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev) +{ + int i; + unsigned int idx; + + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + const struct common_firmware_header *hdr; + hdr = (const struct common_firmware_header *)adev->vcn.fw->data; + + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + /* currently only support 2 FW instances */ + if (i >= 2) { + dev_info(adev->dev, "More then 2 VCN FW instances!\n"); + break; + } + idx = AMDGPU_UCODE_ID_VCN + i; + adev->firmware.ucode[idx].ucode_id = idx; + adev->firmware.ucode[idx].fw = adev->vcn.fw; + adev->firmware.fw_size += + ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE); + } + dev_info(adev->dev, "Will use PSP to load VCN firmware\n"); + } +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h index 795cbaa02ff8..bfa27ea94804 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h @@ -310,4 +310,6 @@ int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout); enum amdgpu_ring_priority_level amdgpu_vcn_get_enc_ring_prio(int ring); +void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index 88c4177b708a..04cf9b207e62 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -535,9 +535,9 @@ static void amdgpu_virt_populate_vf2pf_ucode_info(struct amdgpu_device *adev) POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ASD, adev->psp.asd_context.bin_desc.fw_version); POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_RAS, - adev->psp.ras_context.context.bin_desc.feature_version); + adev->psp.ras_context.context.bin_desc.fw_version); POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_XGMI, - adev->psp.xgmi_context.context.bin_desc.feature_version); + adev->psp.xgmi_context.context.bin_desc.fw_version); POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SMC, adev->pm.fw_version); POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SDMA, adev->sdma.instance[0].fw_version); POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SDMA2, adev->sdma.instance[1].fw_version); @@ -584,6 +584,7 @@ static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev) vf2pf_info->encode_usage = 0; vf2pf_info->decode_usage = 0; + vf2pf_info->dummy_page_addr = (uint64_t)adev->dummy_page_addr; vf2pf_info->checksum = amd_sriov_msg_checksum( vf2pf_info, vf2pf_info->header.size, 0, 0); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index 978ac927ac11..0fad2bf854ae 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c @@ -806,9 +806,9 @@ static void amdgpu_xgmi_reset_ras_error_count(struct amdgpu_device *adev) for (i = 0; i < ARRAY_SIZE(xgmi23_pcs_err_status_reg_aldebaran); i++) pcs_clear_status(adev, xgmi23_pcs_err_status_reg_aldebaran[i]); - for (i = 0; i < ARRAY_SIZE(xgmi23_pcs_err_status_reg_aldebaran); i++) + for (i = 0; i < ARRAY_SIZE(xgmi3x16_pcs_err_status_reg_aldebaran); i++) pcs_clear_status(adev, - xgmi23_pcs_err_status_reg_aldebaran[i]); + xgmi3x16_pcs_err_status_reg_aldebaran[i]); for (i = 0; i < ARRAY_SIZE(walf_pcs_err_status_reg_aldebaran); i++) pcs_clear_status(adev, walf_pcs_err_status_reg_aldebaran[i]); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h index 995899191288..7326b6c1b71c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h @@ -261,9 +261,10 @@ struct amd_sriov_msg_vf2pf_info { uint8_t id; uint32_t version; } ucode_info[AMD_SRIOV_MSG_RESERVE_UCODE]; + uint64_t dummy_page_addr; /* reserved */ - uint32_t reserved[256-68]; + uint32_t reserved[256-70]; }; /* mailbox message send from guest to host */ diff --git a/drivers/gpu/drm/amd/amdgpu/beige_goby_reg_init.c b/drivers/gpu/drm/amd/amdgpu/beige_goby_reg_init.c deleted file mode 100644 index 608a113ce354..000000000000 --- a/drivers/gpu/drm/amd/amdgpu/beige_goby_reg_init.c +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright 2020 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#include "amdgpu.h" -#include "nv.h" - -#include "soc15_common.h" -#include "soc15_hw_ip.h" -#include "beige_goby_ip_offset.h" - -int beige_goby_reg_base_init(struct amdgpu_device *adev) -{ - /* HW has more IP blocks, only initialize the block needed by driver */ - uint32_t i; - for (i = 0 ; i < MAX_INSTANCE ; ++i) { - adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); - adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i])); - adev->reg_offset[MMHUB_HWIP][i] = (uint32_t *)(&(MMHUB_BASE.instance[i])); - adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i])); - adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i])); - adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i])); - adev->reg_offset[MP1_HWIP][i] = (uint32_t *)(&(MP1_BASE.instance[i])); - adev->reg_offset[VCN_HWIP][i] = (uint32_t *)(&(VCN0_BASE.instance[i])); - adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i])); - adev->reg_offset[DCE_HWIP][i] = (uint32_t *)(&(DCN_BASE.instance[i])); - adev->reg_offset[OSSSYS_HWIP][i] = (uint32_t *)(&(OSSSYS_BASE.instance[i])); - adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); - adev->reg_offset[SDMA1_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); - adev->reg_offset[SDMA2_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); - adev->reg_offset[SDMA3_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); - adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i])); - adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i])); - } - return 0; -} diff --git a/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c b/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c deleted file mode 100644 index 58808814d8fb..000000000000 --- a/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright 2018 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#include "amdgpu.h" -#include "nv.h" - -#include "soc15_common.h" -#include "soc15_hw_ip.h" -#include "cyan_skillfish_ip_offset.h" - -int cyan_skillfish_reg_base_init(struct amdgpu_device *adev) -{ - /* HW has more IP blocks, only initialized the blocke needed by driver */ - uint32_t i; - for (i = 0 ; i < MAX_INSTANCE ; ++i) { - adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); - adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i])); - adev->reg_offset[MMHUB_HWIP][i] = (uint32_t *)(&(MMHUB_BASE.instance[i])); - adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i])); - adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i])); - adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i])); - adev->reg_offset[MP1_HWIP][i] = (uint32_t *)(&(MP1_BASE.instance[i])); - adev->reg_offset[VCN_HWIP][i] = (uint32_t *)(&(UVD0_BASE.instance[i])); - adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i])); - adev->reg_offset[DCE_HWIP][i] = (uint32_t *)(&(DMU_BASE.instance[i])); - adev->reg_offset[OSSSYS_HWIP][i] = (uint32_t *)(&(OSSSYS_BASE.instance[i])); - adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); - adev->reg_offset[SDMA1_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); - adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i])); - } - return 0; -} diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 71bb3c0dc1da..e7dfeb466a0e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -270,25 +270,6 @@ MODULE_FIRMWARE("amdgpu/cyan_skillfish2_mec.bin"); MODULE_FIRMWARE("amdgpu/cyan_skillfish2_mec2.bin"); MODULE_FIRMWARE("amdgpu/cyan_skillfish2_rlc.bin"); -static const struct soc15_reg_golden golden_settings_gc_10_0[] = -{ - SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_INDEX, 0xffffffff, 0x00000000), - /* TA_GRAD_ADJ_UCONFIG -> TA_GRAD_ADJ */ - SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2544c382), - /* VGT_TF_RING_SIZE_UMD -> VGT_TF_RING_SIZE */ - SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2262c24e), - /* VGT_HS_OFFCHIP_PARAM_UMD -> VGT_HS_OFFCHIP_PARAM */ - SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x226cc24f), - /* VGT_TF_MEMORY_BASE_UMD -> VGT_TF_MEMORY_BASE */ - SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x226ec250), - /* VGT_TF_MEMORY_BASE_HI_UMD -> VGT_TF_MEMORY_BASE_HI */ - SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2278c261), - /* VGT_ESGS_RING_SIZE_UMD -> VGT_ESGS_RING_SIZE */ - SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2232c240), - /* VGT_GSVS_RING_SIZE_UMD -> VGT_GSVS_RING_SIZE */ - SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2233c241), -}; - static const struct soc15_reg_golden golden_settings_gc_10_1[] = { SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0xffffffff, 0x00400014), @@ -3810,9 +3791,6 @@ static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev) break; case IP_VERSION(10, 1, 3): soc15_program_register_sequence(adev, - golden_settings_gc_10_0, - (const u32)ARRAY_SIZE(golden_settings_gc_10_0)); - soc15_program_register_sequence(adev, golden_settings_gc_10_0_cyan_skillfish, (const u32)ARRAY_SIZE(golden_settings_gc_10_0_cyan_skillfish)); break; @@ -8238,8 +8216,9 @@ static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev, /* === CGCG + CGLS === */ gfx_v10_0_update_coarse_grain_clock_gating(adev, enable); - if ((adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 1, 10)) && - (adev->ip_versions[GC_HWIP][0] <= IP_VERSION(10, 1, 2))) + if ((adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 10)) || + (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 1)) || + (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 2))) gfx_v10_0_apply_medium_grain_clock_gating_workaround(adev); } else { /* CGCG/CGLS should be disabled before MGCG/MGLS @@ -8270,6 +8249,9 @@ static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev, static void gfx_v10_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid) { u32 reg, data; + + amdgpu_gfx_off_ctrl(adev, false); + /* not for *_SOC15 */ reg = SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_MC_CNTL); if (amdgpu_sriov_is_pp_one_vf(adev)) @@ -8284,6 +8266,8 @@ static void gfx_v10_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid) WREG32_SOC15_NO_KIQ(GC, 0, mmRLC_SPM_MC_CNTL, data); else WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data); + + amdgpu_gfx_off_ctrl(adev, true); } static bool gfx_v10_0_check_rlcg_range(struct amdgpu_device *adev, @@ -8337,11 +8321,8 @@ static void gfx_v10_cntl_power_gating(struct amdgpu_device *adev, bool enable) if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) { switch (adev->ip_versions[GC_HWIP][0]) { case IP_VERSION(10, 3, 1): - data = 0x4E20 & RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK_Vangogh; - WREG32_SOC15(GC, 0, mmRLC_PG_DELAY_3, data); - break; case IP_VERSION(10, 3, 3): - data = 0x1388 & RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK_Vangogh; + data = 0x4E20 & RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK_Vangogh; WREG32_SOC15(GC, 0, mmRLC_PG_DELAY_3, data); break; default: diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 37b4a3db6360..d17a6f399347 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -3575,12 +3575,16 @@ static void gfx_v7_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid) { u32 data; + amdgpu_gfx_off_ctrl(adev, false); + data = RREG32(mmRLC_SPM_VMID); data &= ~RLC_SPM_VMID__RLC_SPM_VMID_MASK; data |= (vmid & RLC_SPM_VMID__RLC_SPM_VMID_MASK) << RLC_SPM_VMID__RLC_SPM_VMID__SHIFT; WREG32(mmRLC_SPM_VMID, data); + + amdgpu_gfx_off_ctrl(adev, true); } static void gfx_v7_0_enable_cgcg(struct amdgpu_device *adev, bool enable) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index e0302c23e9a7..5f112efda634 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -5624,6 +5624,8 @@ static void gfx_v8_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid) { u32 data; + amdgpu_gfx_off_ctrl(adev, false); + if (amdgpu_sriov_is_pp_one_vf(adev)) data = RREG32_NO_KIQ(mmRLC_SPM_VMID); else @@ -5636,6 +5638,8 @@ static void gfx_v8_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid) WREG32_NO_KIQ(mmRLC_SPM_VMID, data); else WREG32(mmRLC_SPM_VMID, data); + + amdgpu_gfx_off_ctrl(adev, true); } static const struct amdgpu_rlc_funcs iceland_rlc_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 7f944bb11298..b4b80f27b894 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -2462,7 +2462,9 @@ static int gfx_v9_0_sw_fini(void *handle) amdgpu_gfx_kiq_fini(adev); gfx_v9_0_mec_fini(adev); - amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj); + amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, + &adev->gfx.rlc.clear_state_gpu_addr, + (void **)&adev->gfx.rlc.cs_ptr); if (adev->flags & AMD_IS_APU) { amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, &adev->gfx.rlc.cp_table_gpu_addr, @@ -5102,6 +5104,8 @@ static void gfx_v9_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid) { u32 reg, data; + amdgpu_gfx_off_ctrl(adev, false); + reg = SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_MC_CNTL); if (amdgpu_sriov_is_pp_one_vf(adev)) data = RREG32_NO_KIQ(reg); @@ -5115,6 +5119,8 @@ static void gfx_v9_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid) WREG32_SOC15_NO_KIQ(GC, 0, mmRLC_SPM_MC_CNTL, data); else WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data); + + amdgpu_gfx_off_ctrl(adev, true); } static bool gfx_v9_0_check_rlcg_range(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c index 00a2b36a24b3..c4f37a161875 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c @@ -706,6 +706,11 @@ int gfx_v9_4_2_do_edc_gpr_workarounds(struct amdgpu_device *adev) if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) return 0; + /* Workaround for ALDEBARAN, skip GPRs init in GPU reset. + Will remove it once GPRs init algorithm works for all CU settings. */ + if (amdgpu_in_reset(adev)) + return 0; + gfx_v9_4_2_do_sgprs_init(adev); gfx_v9_4_2_do_vgprs_init(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c index bda1542ef1dd..480e41847d7c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c @@ -348,6 +348,10 @@ static void gfxhub_v1_0_gart_disable(struct amdgpu_device *adev) WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT0_CNTL, i * hub->ctx_distance, 0); + if (amdgpu_sriov_vf(adev)) + /* Avoid write to GMC registers */ + return; + /* Setup TLB control */ tmp = RREG32_SOC15(GC, 0, mmMC_VM_MX_L1_TLB_CNTL); tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0); diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c index 497b86c376c6..90f0aefbdb39 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c @@ -54,15 +54,17 @@ int gfxhub_v1_1_get_xgmi_info(struct amdgpu_device *adev) seg_size = REG_GET_FIELD( RREG32_SOC15(GC, 0, mmMC_VM_XGMI_LFB_SIZE_ALDE), MC_VM_XGMI_LFB_SIZE, PF_LFB_SIZE) << 24; + max_region = + REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL_ALDE, PF_MAX_REGION); } else { xgmi_lfb_cntl = RREG32_SOC15(GC, 0, mmMC_VM_XGMI_LFB_CNTL); seg_size = REG_GET_FIELD( RREG32_SOC15(GC, 0, mmMC_VM_XGMI_LFB_SIZE), MC_VM_XGMI_LFB_SIZE, PF_LFB_SIZE) << 24; + max_region = + REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL, PF_MAX_REGION); } - max_region = - REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL, PF_MAX_REGION); switch (adev->asic_type) { @@ -89,9 +91,15 @@ int gfxhub_v1_1_get_xgmi_info(struct amdgpu_device *adev) if (adev->gmc.xgmi.num_physical_nodes > max_num_physical_nodes) return -EINVAL; - adev->gmc.xgmi.physical_node_id = - REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL, - PF_LFB_REGION); + if (adev->asic_type == CHIP_ALDEBARAN) { + adev->gmc.xgmi.physical_node_id = + REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL_ALDE, + PF_LFB_REGION); + } else { + adev->gmc.xgmi.physical_node_id = + REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL, + PF_LFB_REGION); + } if (adev->gmc.xgmi.physical_node_id > max_physical_node_id) return -EINVAL; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index 0e81e03e9b49..0fe714f54cca 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -841,12 +841,12 @@ static int gmc_v6_0_sw_init(void *handle) adev->gmc.mc_mask = 0xffffffffffULL; - r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44)); + r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(40)); if (r) { dev_warn(adev->dev, "No suitable DMA available.\n"); return r; } - adev->need_swiotlb = drm_need_swiotlb(44); + adev->need_swiotlb = drm_need_swiotlb(40); r = gmc_v6_0_init_microcode(adev); if (r) { diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_reg_init.c b/drivers/gpu/drm/amd/amdgpu/navi10_reg_init.c deleted file mode 100644 index 88efaecf9f70..000000000000 --- a/drivers/gpu/drm/amd/amdgpu/navi10_reg_init.c +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Copyright 2018 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#include "amdgpu.h" -#include "nv.h" - -#include "soc15_common.h" -#include "navi10_ip_offset.h" - -int navi10_reg_base_init(struct amdgpu_device *adev) -{ - int i; - - for (i = 0 ; i < MAX_INSTANCE ; ++i) { - adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); - adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i])); - adev->reg_offset[MMHUB_HWIP][i] = (uint32_t *)(&(MMHUB_BASE.instance[i])); - adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i])); - adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i])); - adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i])); - adev->reg_offset[MP1_HWIP][i] = (uint32_t *)(&(MP1_BASE.instance[i])); - adev->reg_offset[VCN_HWIP][i] = (uint32_t *)(&(VCN_BASE.instance[i])); - adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i])); - adev->reg_offset[DCE_HWIP][i] = (uint32_t *)(&(DCN_BASE.instance[i])); - adev->reg_offset[OSSSYS_HWIP][i] = (uint32_t *)(&(OSSSYS_BASE.instance[i])); - adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); - adev->reg_offset[SDMA1_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); - adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i])); - adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i])); - adev->reg_offset[CLK_HWIP][i] = (uint32_t *)(&(CLK_BASE.instance[i])); - } - - return 0; -} - - diff --git a/drivers/gpu/drm/amd/amdgpu/navi12_reg_init.c b/drivers/gpu/drm/amd/amdgpu/navi12_reg_init.c deleted file mode 100644 index a786d159e5e9..000000000000 --- a/drivers/gpu/drm/amd/amdgpu/navi12_reg_init.c +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Copyright 2018 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#include "amdgpu.h" -#include "nv.h" - -#include "soc15_common.h" -#include "navi12_ip_offset.h" - -int navi12_reg_base_init(struct amdgpu_device *adev) -{ - /* HW has more IP blocks, only initialized the blocks needed by driver */ - uint32_t i; - for (i = 0 ; i < MAX_INSTANCE ; ++i) { - adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); - adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i])); - adev->reg_offset[MMHUB_HWIP][i] = (uint32_t *)(&(MMHUB_BASE.instance[i])); - adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i])); - adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIF0_BASE.instance[i])); - adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i])); - adev->reg_offset[MP1_HWIP][i] = (uint32_t *)(&(MP1_BASE.instance[i])); - adev->reg_offset[VCN_HWIP][i] = (uint32_t *)(&(UVD0_BASE.instance[i])); - adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i])); - adev->reg_offset[DCE_HWIP][i] = (uint32_t *)(&(DMU_BASE.instance[i])); - adev->reg_offset[OSSSYS_HWIP][i] = (uint32_t *)(&(OSSSYS_BASE.instance[i])); - adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); - adev->reg_offset[SDMA1_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); - adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i])); - adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i])); - adev->reg_offset[CLK_HWIP][i] = (uint32_t *)(&(CLK_BASE.instance[i])); - } - return 0; -} diff --git a/drivers/gpu/drm/amd/amdgpu/navi14_reg_init.c b/drivers/gpu/drm/amd/amdgpu/navi14_reg_init.c deleted file mode 100644 index 4ea1e8fbb601..000000000000 --- a/drivers/gpu/drm/amd/amdgpu/navi14_reg_init.c +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright 2018 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#include "amdgpu.h" -#include "nv.h" - -#include "soc15_common.h" -#include "navi14_ip_offset.h" - -int navi14_reg_base_init(struct amdgpu_device *adev) -{ - int i; - - for (i = 0 ; i < MAX_INSTANCE ; ++i) { - adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); - adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i])); - adev->reg_offset[MMHUB_HWIP][i] = (uint32_t *)(&(MMHUB_BASE.instance[i])); - adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i])); - adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIF0_BASE.instance[i])); - adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i])); - adev->reg_offset[MP1_HWIP][i] = (uint32_t *)(&(MP1_BASE.instance[i])); - adev->reg_offset[VCN_HWIP][i] = (uint32_t *)(&(UVD0_BASE.instance[i])); - adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i])); - adev->reg_offset[DCE_HWIP][i] = (uint32_t *)(&(DMU_BASE.instance[i])); - adev->reg_offset[OSSSYS_HWIP][i] = (uint32_t *)(&(OSSSYS_BASE.instance[i])); - adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); - adev->reg_offset[SDMA1_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); - adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i])); - adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i])); - adev->reg_offset[CLK_HWIP][i] = (uint32_t *)(&(CLK_BASE.instance[i])); - } - - return 0; -} diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c index b184b656b9b6..4ecd2b5808ce 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c @@ -53,6 +53,16 @@ #define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288 +#define GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK 0x00001000L /* Don't use. Firmware uses this bit internally */ +#define GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK 0x00002000L +#define GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK 0x00004000L +#define GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK 0x00008000L +#define GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK 0x00010000L +#define GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK 0x00020000L +#define GPU_HDP_FLUSH_DONE__RSVD_ENG6_MASK 0x00040000L +#define GPU_HDP_FLUSH_DONE__RSVD_ENG7_MASK 0x00080000L +#define GPU_HDP_FLUSH_DONE__RSVD_ENG8_MASK 0x00100000L + static void nbio_v2_3_remap_hdp_registers(struct amdgpu_device *adev) { WREG32_SOC15(NBIO, 0, mmREMAP_HDP_MEM_FLUSH_CNTL, @@ -318,6 +328,27 @@ const struct nbio_hdp_flush_reg nbio_v2_3_hdp_flush_reg = { .ref_and_mask_sdma1 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__SDMA1_MASK, }; +const struct nbio_hdp_flush_reg nbio_v2_3_hdp_flush_reg_sc = { + .ref_and_mask_cp0 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP0_MASK, + .ref_and_mask_cp1 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP1_MASK, + .ref_and_mask_cp2 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP2_MASK, + .ref_and_mask_cp3 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP3_MASK, + .ref_and_mask_cp4 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP4_MASK, + .ref_and_mask_cp5 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP5_MASK, + .ref_and_mask_cp6 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP6_MASK, + .ref_and_mask_cp7 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP7_MASK, + .ref_and_mask_cp8 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP8_MASK, + .ref_and_mask_cp9 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP9_MASK, + .ref_and_mask_sdma0 = GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK, + .ref_and_mask_sdma1 = GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK, + .ref_and_mask_sdma2 = GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK, + .ref_and_mask_sdma3 = GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK, + .ref_and_mask_sdma4 = GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK, + .ref_and_mask_sdma5 = GPU_HDP_FLUSH_DONE__RSVD_ENG6_MASK, + .ref_and_mask_sdma6 = GPU_HDP_FLUSH_DONE__RSVD_ENG7_MASK, + .ref_and_mask_sdma7 = GPU_HDP_FLUSH_DONE__RSVD_ENG8_MASK, +}; + static void nbio_v2_3_init_registers(struct amdgpu_device *adev) { uint32_t def, data; diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.h b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.h index a43b60acf7f6..6074dd3a1ed8 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.h +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.h @@ -27,6 +27,7 @@ #include "soc15_common.h" extern const struct nbio_hdp_flush_reg nbio_v2_3_hdp_flush_reg; +extern const struct nbio_hdp_flush_reg nbio_v2_3_hdp_flush_reg_sc; extern const struct amdgpu_nbio_funcs nbio_v2_3_funcs; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c index 91b3afa946f5..b8bd03d16dba 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c @@ -56,12 +56,15 @@ * These are nbio v7_4_1 registers mask. Temporarily define these here since * nbio v7_4_1 header is incomplete. */ -#define GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK 0x00001000L +#define GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK 0x00001000L /* Don't use. Firmware uses this bit internally */ #define GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK 0x00002000L #define GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK 0x00004000L #define GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK 0x00008000L #define GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK 0x00010000L #define GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK 0x00020000L +#define GPU_HDP_FLUSH_DONE__RSVD_ENG6_MASK 0x00040000L +#define GPU_HDP_FLUSH_DONE__RSVD_ENG7_MASK 0x00080000L +#define GPU_HDP_FLUSH_DONE__RSVD_ENG8_MASK 0x00100000L #define mmBIF_MMSCH1_DOORBELL_RANGE 0x01dc #define mmBIF_MMSCH1_DOORBELL_RANGE_BASE_IDX 2 @@ -334,12 +337,27 @@ const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg = { .ref_and_mask_cp9 = GPU_HDP_FLUSH_DONE__CP9_MASK, .ref_and_mask_sdma0 = GPU_HDP_FLUSH_DONE__SDMA0_MASK, .ref_and_mask_sdma1 = GPU_HDP_FLUSH_DONE__SDMA1_MASK, - .ref_and_mask_sdma2 = GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK, - .ref_and_mask_sdma3 = GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK, - .ref_and_mask_sdma4 = GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK, - .ref_and_mask_sdma5 = GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK, - .ref_and_mask_sdma6 = GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK, - .ref_and_mask_sdma7 = GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK, +}; + +const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg_ald = { + .ref_and_mask_cp0 = GPU_HDP_FLUSH_DONE__CP0_MASK, + .ref_and_mask_cp1 = GPU_HDP_FLUSH_DONE__CP1_MASK, + .ref_and_mask_cp2 = GPU_HDP_FLUSH_DONE__CP2_MASK, + .ref_and_mask_cp3 = GPU_HDP_FLUSH_DONE__CP3_MASK, + .ref_and_mask_cp4 = GPU_HDP_FLUSH_DONE__CP4_MASK, + .ref_and_mask_cp5 = GPU_HDP_FLUSH_DONE__CP5_MASK, + .ref_and_mask_cp6 = GPU_HDP_FLUSH_DONE__CP6_MASK, + .ref_and_mask_cp7 = GPU_HDP_FLUSH_DONE__CP7_MASK, + .ref_and_mask_cp8 = GPU_HDP_FLUSH_DONE__CP8_MASK, + .ref_and_mask_cp9 = GPU_HDP_FLUSH_DONE__CP9_MASK, + .ref_and_mask_sdma0 = GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK, + .ref_and_mask_sdma1 = GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK, + .ref_and_mask_sdma2 = GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK, + .ref_and_mask_sdma3 = GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK, + .ref_and_mask_sdma4 = GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK, + .ref_and_mask_sdma5 = GPU_HDP_FLUSH_DONE__RSVD_ENG6_MASK, + .ref_and_mask_sdma6 = GPU_HDP_FLUSH_DONE__RSVD_ENG7_MASK, + .ref_and_mask_sdma7 = GPU_HDP_FLUSH_DONE__RSVD_ENG8_MASK, }; static void nbio_v7_4_init_registers(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h index b8216581ec8d..cc5692db6f98 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h @@ -27,6 +27,7 @@ #include "soc15_common.h" extern const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg; +extern const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg_ald; extern const struct amdgpu_nbio_funcs nbio_v7_4_funcs; extern const struct amdgpu_nbio_ras_funcs nbio_v7_4_ras_funcs; diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index 898e688be63c..59eafa31c626 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -182,6 +182,7 @@ static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode, { switch (adev->ip_versions[UVD_HWIP][0]) { case IP_VERSION(3, 0, 0): + case IP_VERSION(3, 0, 64): if (amdgpu_sriov_vf(adev)) { if (encode) *codecs = &sriov_sc_video_codecs_encode; @@ -607,304 +608,11 @@ const struct amdgpu_ip_block_version nv_common_ip_block = .funcs = &nv_common_ip_funcs, }; -static int nv_reg_base_init(struct amdgpu_device *adev) -{ - int r; - - if (amdgpu_discovery) { - r = amdgpu_discovery_reg_base_init(adev); - if (r) { - DRM_WARN("failed to init reg base from ip discovery table, " - "fallback to legacy init method\n"); - goto legacy_init; - } - - amdgpu_discovery_harvest_ip(adev); - - return 0; - } - -legacy_init: - switch (adev->asic_type) { - case CHIP_NAVI10: - navi10_reg_base_init(adev); - break; - case CHIP_NAVI14: - navi14_reg_base_init(adev); - break; - case CHIP_NAVI12: - navi12_reg_base_init(adev); - break; - case CHIP_SIENNA_CICHLID: - case CHIP_NAVY_FLOUNDER: - sienna_cichlid_reg_base_init(adev); - break; - case CHIP_VANGOGH: - vangogh_reg_base_init(adev); - break; - case CHIP_DIMGREY_CAVEFISH: - dimgrey_cavefish_reg_base_init(adev); - break; - case CHIP_BEIGE_GOBY: - beige_goby_reg_base_init(adev); - break; - case CHIP_YELLOW_CARP: - yellow_carp_reg_base_init(adev); - break; - case CHIP_CYAN_SKILLFISH: - cyan_skillfish_reg_base_init(adev); - break; - default: - return -EINVAL; - } - - return 0; -} - void nv_set_virt_ops(struct amdgpu_device *adev) { adev->virt.ops = &xgpu_nv_virt_ops; } -int nv_set_ip_blocks(struct amdgpu_device *adev) -{ - int r; - - if (adev->asic_type == CHIP_CYAN_SKILLFISH) { - adev->nbio.funcs = &nbio_v2_3_funcs; - adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg; - } else if (adev->flags & AMD_IS_APU) { - adev->nbio.funcs = &nbio_v7_2_funcs; - adev->nbio.hdp_flush_reg = &nbio_v7_2_hdp_flush_reg; - } else { - adev->nbio.funcs = &nbio_v2_3_funcs; - adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg; - } - adev->hdp.funcs = &hdp_v5_0_funcs; - - if (adev->asic_type >= CHIP_SIENNA_CICHLID) - adev->smuio.funcs = &smuio_v11_0_6_funcs; - else - adev->smuio.funcs = &smuio_v11_0_funcs; - - if (adev->asic_type == CHIP_SIENNA_CICHLID) - adev->gmc.xgmi.supported = true; - - /* Set IP register base before any HW register access */ - r = nv_reg_base_init(adev); - if (r) - return r; - - switch (adev->asic_type) { - case CHIP_NAVI10: - case CHIP_NAVI14: - amdgpu_device_ip_block_add(adev, &nv_common_ip_block); - amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); - amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); - amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); - if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP && - !amdgpu_sriov_vf(adev)) - amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); - if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) - amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); -#if defined(CONFIG_DRM_AMD_DC) - else if (amdgpu_device_has_dc_support(adev)) - amdgpu_device_ip_block_add(adev, &dm_ip_block); -#endif - amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); - amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block); - if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT && - !amdgpu_sriov_vf(adev)) - amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); - amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); - amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block); - if (adev->enable_mes) - amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block); - break; - case CHIP_NAVI12: - amdgpu_device_ip_block_add(adev, &nv_common_ip_block); - amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); - if (!amdgpu_sriov_vf(adev)) { - amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); - amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); - } else { - amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); - amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); - } - if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) - amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); - if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) - amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); -#if defined(CONFIG_DRM_AMD_DC) - else if (amdgpu_device_has_dc_support(adev)) - amdgpu_device_ip_block_add(adev, &dm_ip_block); -#endif - amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); - amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block); - if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT && - !amdgpu_sriov_vf(adev)) - amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); - amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); - if (!amdgpu_sriov_vf(adev)) - amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block); - break; - case CHIP_SIENNA_CICHLID: - amdgpu_device_ip_block_add(adev, &nv_common_ip_block); - amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); - if (!amdgpu_sriov_vf(adev)) { - amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); - if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) - amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); - } else { - if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) - amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); - amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); - } - if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP && - is_support_sw_smu(adev)) - amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); - if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) - amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); -#if defined(CONFIG_DRM_AMD_DC) - else if (amdgpu_device_has_dc_support(adev)) - amdgpu_device_ip_block_add(adev, &dm_ip_block); -#endif - amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); - amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block); - amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block); - if (!amdgpu_sriov_vf(adev)) - amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block); - if (adev->enable_mes) - amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block); - break; - case CHIP_NAVY_FLOUNDER: - amdgpu_device_ip_block_add(adev, &nv_common_ip_block); - amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); - amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); - if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) - amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); - if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP && - is_support_sw_smu(adev)) - amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); - if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) - amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); -#if defined(CONFIG_DRM_AMD_DC) - else if (amdgpu_device_has_dc_support(adev)) - amdgpu_device_ip_block_add(adev, &dm_ip_block); -#endif - amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); - amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block); - amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block); - amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block); - if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT && - is_support_sw_smu(adev)) - amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); - break; - case CHIP_VANGOGH: - amdgpu_device_ip_block_add(adev, &nv_common_ip_block); - amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); - amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); - if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) - amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); - amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); - if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) - amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); -#if defined(CONFIG_DRM_AMD_DC) - else if (amdgpu_device_has_dc_support(adev)) - amdgpu_device_ip_block_add(adev, &dm_ip_block); -#endif - amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); - amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block); - amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block); - amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block); - break; - case CHIP_DIMGREY_CAVEFISH: - amdgpu_device_ip_block_add(adev, &nv_common_ip_block); - amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); - amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); - if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) - amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); - if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP && - is_support_sw_smu(adev)) - amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); - if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) - amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); -#if defined(CONFIG_DRM_AMD_DC) - else if (amdgpu_device_has_dc_support(adev)) - amdgpu_device_ip_block_add(adev, &dm_ip_block); -#endif - amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); - amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block); - amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block); - amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block); - break; - case CHIP_BEIGE_GOBY: - amdgpu_device_ip_block_add(adev, &nv_common_ip_block); - amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); - amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); - if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) - amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); - if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP && - is_support_sw_smu(adev)) - amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); - amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); - amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block); - if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) - amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); -#if defined(CONFIG_DRM_AMD_DC) - else if (amdgpu_device_has_dc_support(adev)) - amdgpu_device_ip_block_add(adev, &dm_ip_block); -#endif - if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT && - is_support_sw_smu(adev)) - amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); - amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block); - break; - case CHIP_YELLOW_CARP: - amdgpu_device_ip_block_add(adev, &nv_common_ip_block); - amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); - amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); - if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) - amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block); - amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block); - if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) - amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); - amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); - amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block); - if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) - amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); -#if defined(CONFIG_DRM_AMD_DC) - else if (amdgpu_device_has_dc_support(adev)) - amdgpu_device_ip_block_add(adev, &dm_ip_block); -#endif - amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block); - amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block); - break; - case CHIP_CYAN_SKILLFISH: - amdgpu_device_ip_block_add(adev, &nv_common_ip_block); - amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); - amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); - if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) { - if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) - amdgpu_device_ip_block_add(adev, &psp_v11_0_8_ip_block); - amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); - } - if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) - amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); -#if defined(CONFIG_DRM_AMD_DC) - else if (amdgpu_device_has_dc_support(adev)) - amdgpu_device_ip_block_add(adev, &dm_ip_block); -#endif - amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); - amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block); - break; - default: - return -EINVAL; - } - - return 0; -} - static uint32_t nv_get_rev_id(struct amdgpu_device *adev) { return adev->nbio.funcs->get_rev_id(adev); @@ -1248,7 +956,7 @@ static int nv_common_early_init(void *handle) AMD_PG_SUPPORT_VCN_DPG | AMD_PG_SUPPORT_JPEG; if (adev->pdev->device == 0x1681) - adev->external_rev_id = adev->rev_id + 0x19; + adev->external_rev_id = 0x20; else adev->external_rev_id = adev->rev_id + 0x01; break; diff --git a/drivers/gpu/drm/amd/amdgpu/nv.h b/drivers/gpu/drm/amd/amdgpu/nv.h index 7df2f85bbcd0..83e9782aef39 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.h +++ b/drivers/gpu/drm/amd/amdgpu/nv.h @@ -31,15 +31,5 @@ extern const struct amdgpu_ip_block_version nv_common_ip_block; void nv_grbm_select(struct amdgpu_device *adev, u32 me, u32 pipe, u32 queue, u32 vmid); void nv_set_virt_ops(struct amdgpu_device *adev); -int nv_set_ip_blocks(struct amdgpu_device *adev); -int navi10_reg_base_init(struct amdgpu_device *adev); -int navi14_reg_base_init(struct amdgpu_device *adev); -int navi12_reg_base_init(struct amdgpu_device *adev); -int sienna_cichlid_reg_base_init(struct amdgpu_device *adev); -void vangogh_reg_base_init(struct amdgpu_device *adev); -int dimgrey_cavefish_reg_base_init(struct amdgpu_device *adev); -int beige_goby_reg_base_init(struct amdgpu_device *adev); -int yellow_carp_reg_base_init(struct amdgpu_device *adev); -int cyan_skillfish_reg_base_init(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c index 59644015dfc3..ed2293686f0d 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c @@ -84,7 +84,7 @@ static int psp_v10_0_init_microcode(struct psp_context *psp) ta_hdr = (const struct ta_firmware_header_v1_0 *) adev->psp.ta_fw->data; - adev->psp.hdcp_context.context.bin_desc.feature_version = + adev->psp.hdcp_context.context.bin_desc.fw_version = le32_to_cpu(ta_hdr->hdcp.fw_version); adev->psp.hdcp_context.context.bin_desc.size_bytes = le32_to_cpu(ta_hdr->hdcp.size_bytes); @@ -92,7 +92,7 @@ static int psp_v10_0_init_microcode(struct psp_context *psp) (uint8_t *)ta_hdr + le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); - adev->psp.dtm_context.context.bin_desc.feature_version = + adev->psp.dtm_context.context.bin_desc.fw_version = le32_to_cpu(ta_hdr->dtm.fw_version); adev->psp.dtm_context.context.bin_desc.size_bytes = le32_to_cpu(ta_hdr->dtm.size_bytes); @@ -100,7 +100,7 @@ static int psp_v10_0_init_microcode(struct psp_context *psp) (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr + le32_to_cpu(ta_hdr->dtm.offset_bytes); - adev->psp.securedisplay_context.context.bin_desc.feature_version = + adev->psp.securedisplay_context.context.bin_desc.fw_version = le32_to_cpu(ta_hdr->securedisplay.fw_version); adev->psp.securedisplay_context.context.bin_desc.size_bytes = le32_to_cpu(ta_hdr->securedisplay.size_bytes); diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c index 21a325ea49cb..2176ef85f137 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c @@ -151,7 +151,7 @@ static int psp_v11_0_init_microcode(struct psp_context *psp) goto out2; ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data; - adev->psp.xgmi_context.context.bin_desc.feature_version = + adev->psp.xgmi_context.context.bin_desc.fw_version = le32_to_cpu(ta_hdr->xgmi.fw_version); adev->psp.xgmi_context.context.bin_desc.size_bytes = le32_to_cpu(ta_hdr->xgmi.size_bytes); @@ -159,7 +159,7 @@ static int psp_v11_0_init_microcode(struct psp_context *psp) (uint8_t *)ta_hdr + le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version); - adev->psp.ras_context.context.bin_desc.feature_version = + adev->psp.ras_context.context.bin_desc.fw_version = le32_to_cpu(ta_hdr->ras.fw_version); adev->psp.ras_context.context.bin_desc.size_bytes = le32_to_cpu(ta_hdr->ras.size_bytes); @@ -192,7 +192,7 @@ static int psp_v11_0_init_microcode(struct psp_context *psp) goto out2; ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data; - adev->psp.hdcp_context.context.bin_desc.feature_version = + adev->psp.hdcp_context.context.bin_desc.fw_version = le32_to_cpu(ta_hdr->hdcp.fw_version); adev->psp.hdcp_context.context.bin_desc.size_bytes = le32_to_cpu(ta_hdr->hdcp.size_bytes); @@ -203,7 +203,7 @@ static int psp_v11_0_init_microcode(struct psp_context *psp) adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version); - adev->psp.dtm_context.context.bin_desc.feature_version = + adev->psp.dtm_context.context.bin_desc.fw_version = le32_to_cpu(ta_hdr->dtm.fw_version); adev->psp.dtm_context.context.bin_desc.size_bytes = le32_to_cpu(ta_hdr->dtm.size_bytes); diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c index 281bc4d7f0a1..a2588200ea58 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c @@ -84,7 +84,7 @@ static int psp_v12_0_init_microcode(struct psp_context *psp) ta_hdr = (const struct ta_firmware_header_v1_0 *) adev->psp.ta_fw->data; - adev->psp.hdcp_context.context.bin_desc.feature_version = + adev->psp.hdcp_context.context.bin_desc.fw_version = le32_to_cpu(ta_hdr->hdcp.fw_version); adev->psp.hdcp_context.context.bin_desc.size_bytes = le32_to_cpu(ta_hdr->hdcp.size_bytes); @@ -94,7 +94,7 @@ static int psp_v12_0_init_microcode(struct psp_context *psp) adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version); - adev->psp.dtm_context.context.bin_desc.feature_version = + adev->psp.dtm_context.context.bin_desc.fw_version = le32_to_cpu(ta_hdr->dtm.fw_version); adev->psp.dtm_context.context.bin_desc.size_bytes = le32_to_cpu(ta_hdr->dtm.size_bytes); diff --git a/drivers/gpu/drm/amd/amdgpu/sienna_cichlid_reg_init.c b/drivers/gpu/drm/amd/amdgpu/sienna_cichlid_reg_init.c deleted file mode 100644 index 5ee69f70c49b..000000000000 --- a/drivers/gpu/drm/amd/amdgpu/sienna_cichlid_reg_init.c +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright 2019 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#include "amdgpu.h" -#include "nv.h" - -#include "soc15_common.h" -#include "soc15_hw_ip.h" -#include "sienna_cichlid_ip_offset.h" - -int sienna_cichlid_reg_base_init(struct amdgpu_device *adev) -{ - /* HW has more IP blocks, only initialized the blocke needed by driver */ - uint32_t i; - for (i = 0 ; i < MAX_INSTANCE ; ++i) { - adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); - adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i])); - adev->reg_offset[MMHUB_HWIP][i] = (uint32_t *)(&(MMHUB_BASE.instance[i])); - adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i])); - adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i])); - adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i])); - adev->reg_offset[MP1_HWIP][i] = (uint32_t *)(&(MP1_BASE.instance[i])); - adev->reg_offset[VCN_HWIP][i] = (uint32_t *)(&(VCN_BASE.instance[i])); - adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i])); - adev->reg_offset[DCE_HWIP][i] = (uint32_t *)(&(DCN_BASE.instance[i])); - adev->reg_offset[OSSSYS_HWIP][i] = (uint32_t *)(&(OSSSYS_BASE.instance[i])); - adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); - adev->reg_offset[SDMA1_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); - adev->reg_offset[SDMA2_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); - adev->reg_offset[SDMA3_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); - adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i])); - adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i])); - } - return 0; -} diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 0640e143e7a5..0c316a2d42ed 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -780,185 +780,6 @@ void soc15_set_virt_ops(struct amdgpu_device *adev) soc15_reg_base_init(adev); } -int soc15_set_ip_blocks(struct amdgpu_device *adev) -{ - /* for bare metal case */ - if (!amdgpu_sriov_vf(adev)) - soc15_reg_base_init(adev); - - if (adev->flags & AMD_IS_APU) { - adev->nbio.funcs = &nbio_v7_0_funcs; - adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg; - } else if (adev->asic_type == CHIP_VEGA20 || - adev->asic_type == CHIP_ARCTURUS || - adev->asic_type == CHIP_ALDEBARAN) { - adev->nbio.funcs = &nbio_v7_4_funcs; - adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg; - } else { - adev->nbio.funcs = &nbio_v6_1_funcs; - adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg; - } - adev->hdp.funcs = &hdp_v4_0_funcs; - - if (adev->asic_type == CHIP_VEGA20 || - adev->asic_type == CHIP_ARCTURUS || - adev->asic_type == CHIP_ALDEBARAN) - adev->df.funcs = &df_v3_6_funcs; - else - adev->df.funcs = &df_v1_7_funcs; - - if (adev->asic_type == CHIP_VEGA20 || - adev->asic_type == CHIP_ARCTURUS) - adev->smuio.funcs = &smuio_v11_0_funcs; - else if (adev->asic_type == CHIP_ALDEBARAN) - adev->smuio.funcs = &smuio_v13_0_funcs; - else - adev->smuio.funcs = &smuio_v9_0_funcs; - - adev->rev_id = soc15_get_rev_id(adev); - - switch (adev->asic_type) { - case CHIP_VEGA10: - case CHIP_VEGA12: - case CHIP_VEGA20: - amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); - amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); - - /* For Vega10 SR-IOV, PSP need to be initialized before IH */ - if (amdgpu_sriov_vf(adev)) { - if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) { - if (adev->asic_type == CHIP_VEGA20) - amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); - else - amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block); - } - if (adev->asic_type == CHIP_VEGA20) - amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block); - else - amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); - } else { - if (adev->asic_type == CHIP_VEGA20) - amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block); - else - amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); - if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) { - if (adev->asic_type == CHIP_VEGA20) - amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); - else - amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block); - } - } - amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); - amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); - if (is_support_sw_smu(adev)) { - if (!amdgpu_sriov_vf(adev)) - amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); - } else { - amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); - } - if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) - amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); -#if defined(CONFIG_DRM_AMD_DC) - else if (amdgpu_device_has_dc_support(adev)) - amdgpu_device_ip_block_add(adev, &dm_ip_block); -#endif - if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev))) { - amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block); - amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block); - } - break; - case CHIP_RAVEN: - amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); - amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); - amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); - if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) - amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block); - amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); - amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); - amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); - if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) - amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); -#if defined(CONFIG_DRM_AMD_DC) - else if (amdgpu_device_has_dc_support(adev)) - amdgpu_device_ip_block_add(adev, &dm_ip_block); -#endif - amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block); - break; - case CHIP_ARCTURUS: - amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); - amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); - - if (amdgpu_sriov_vf(adev)) { - if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) - amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); - amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block); - } else { - amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block); - if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) - amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); - } - - if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) - amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); - amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); - amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); - amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); - - if (amdgpu_sriov_vf(adev)) { - if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) - amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block); - } else { - amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block); - } - if (!amdgpu_sriov_vf(adev)) - amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block); - break; - case CHIP_RENOIR: - amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); - amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); - amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); - if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) - amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block); - amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block); - amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); - amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); - if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) - amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); -#if defined(CONFIG_DRM_AMD_DC) - else if (amdgpu_device_has_dc_support(adev)) - amdgpu_device_ip_block_add(adev, &dm_ip_block); -#endif - amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); - amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block); - break; - case CHIP_ALDEBARAN: - amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); - amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); - - if (amdgpu_sriov_vf(adev)) { - if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) - amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block); - amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block); - } else { - amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block); - if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) - amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block); - } - - amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); - amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); - - amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block); - amdgpu_device_ip_block_add(adev, &vcn_v2_6_ip_block); - amdgpu_device_ip_block_add(adev, &jpeg_v2_6_ip_block); - break; - default: - return -EINVAL; - } - - return 0; -} - static bool soc15_need_full_reset(struct amdgpu_device *adev) { /* change this when we implement soft reset */ diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h index f9359003385d..efc2a253e8db 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.h +++ b/drivers/gpu/drm/amd/amdgpu/soc15.h @@ -102,7 +102,6 @@ struct soc15_ras_field_entry { void soc15_grbm_select(struct amdgpu_device *adev, u32 me, u32 pipe, u32 queue, u32 vmid); void soc15_set_virt_ops(struct amdgpu_device *adev); -int soc15_set_ip_blocks(struct amdgpu_device *adev); void soc15_program_register_sequence(struct amdgpu_device *adev, const struct soc15_reg_golden *registers, diff --git a/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h b/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h index de24a0a97d5e..5093826a43d1 100644 --- a/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h +++ b/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h @@ -59,7 +59,12 @@ enum ta_ras_status { TA_RAS_STATUS__ERROR_SYS_DRV_REG_ACCESS = 0xA011, TA_RAS_STATUS__ERROR_RAS_READ_WRITE = 0xA012, TA_RAS_STATUS__ERROR_NULL_PTR = 0xA013, - TA_RAS_STATUS__ERROR_UNSUPPORTED_IP = 0xA014 + TA_RAS_STATUS__ERROR_UNSUPPORTED_IP = 0xA014, + TA_RAS_STATUS__ERROR_PCS_STATE_QUIET = 0xA015, + TA_RAS_STATUS__ERROR_PCS_STATE_ERROR = 0xA016, + TA_RAS_STATUS__ERROR_PCS_STATE_HANG = 0xA017, + TA_RAS_STATUS__ERROR_PCS_STATE_UNKNOWN = 0xA018, + TA_RAS_STATUS__ERROR_UNSUPPORTED_ERROR_INJ = 0xA019 }; enum ta_ras_block { diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index d5d023a24269..2d558c2f417d 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -534,6 +534,19 @@ static int uvd_v6_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + cancel_delayed_work_sync(&adev->uvd.idle_work); + + if (RREG32(mmUVD_STATUS) != 0) + uvd_v6_0_stop(adev); + + return 0; +} + +static int uvd_v6_0_suspend(void *handle) +{ + int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + /* * Proper cleanups before halting the HW engine: * - cancel the delayed idle work @@ -558,17 +571,6 @@ static int uvd_v6_0_hw_fini(void *handle) AMD_CG_STATE_GATE); } - if (RREG32(mmUVD_STATUS) != 0) - uvd_v6_0_stop(adev); - - return 0; -} - -static int uvd_v6_0_suspend(void *handle) -{ - int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = uvd_v6_0_hw_fini(adev); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/vangogh_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vangogh_reg_init.c deleted file mode 100644 index d64d681a05dc..000000000000 --- a/drivers/gpu/drm/amd/amdgpu/vangogh_reg_init.c +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright 2019 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#include "amdgpu.h" -#include "nv.h" - -#include "soc15_common.h" -#include "soc15_hw_ip.h" -#include "vangogh_ip_offset.h" - -void vangogh_reg_base_init(struct amdgpu_device *adev) -{ - /* HW has more IP blocks, only initialized the blocke needed by driver */ - uint32_t i; - for (i = 0 ; i < MAX_INSTANCE ; ++i) { - adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); - adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i])); - adev->reg_offset[MMHUB_HWIP][i] = (uint32_t *)(&(MMHUB_BASE.instance[i])); - adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i])); - adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i])); - adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i])); - adev->reg_offset[MP1_HWIP][i] = (uint32_t *)(&(MP1_BASE.instance[i])); - adev->reg_offset[VCN_HWIP][i] = (uint32_t *)(&(VCN_BASE.instance[i])); - adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i])); - adev->reg_offset[DCE_HWIP][i] = (uint32_t *)(&(DCN_BASE.instance[i])); - adev->reg_offset[OSSSYS_HWIP][i] = (uint32_t *)(&(OSSSYS_BASE.instance[i])); - adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); - adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i])); - adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i])); - } -} diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index ad0d2564087c..d54d720b3cf6 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c @@ -111,15 +111,7 @@ static int vcn_v1_0_sw_init(void *handle) /* Override the work func */ adev->vcn.idle_work.work.func = vcn_v1_0_idle_work_handler; - if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { - const struct common_firmware_header *hdr; - hdr = (const struct common_firmware_header *)adev->vcn.fw->data; - adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].ucode_id = AMDGPU_UCODE_ID_VCN; - adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw; - adev->firmware.fw_size += - ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE); - dev_info(adev->dev, "Will use PSP to load VCN firmware\n"); - } + amdgpu_vcn_setup_ucode(adev); r = amdgpu_vcn_resume(adev); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c index 091d8c0f6801..313fc1b53999 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c @@ -115,15 +115,7 @@ static int vcn_v2_0_sw_init(void *handle) if (r) return r; - if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { - const struct common_firmware_header *hdr; - hdr = (const struct common_firmware_header *)adev->vcn.fw->data; - adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].ucode_id = AMDGPU_UCODE_ID_VCN; - adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw; - adev->firmware.fw_size += - ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE); - dev_info(adev->dev, "Will use PSP to load VCN firmware\n"); - } + amdgpu_vcn_setup_ucode(adev); r = amdgpu_vcn_resume(adev); if (r) @@ -1884,15 +1876,14 @@ static int vcn_v2_0_start_sriov(struct amdgpu_device *adev) /* mc resume*/ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { - tmp = AMDGPU_UCODE_ID_VCN; MMSCH_V2_0_INSERT_DIRECT_WT( SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), - adev->firmware.ucode[tmp].tmr_mc_addr_lo); + adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo); MMSCH_V2_0_INSERT_DIRECT_WT( SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), - adev->firmware.ucode[tmp].tmr_mc_addr_hi); + adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi); offset = 0; } else { MMSCH_V2_0_INSERT_DIRECT_WT( diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c index 59f469bab005..44fc4c218433 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c @@ -139,22 +139,7 @@ static int vcn_v2_5_sw_init(void *handle) if (r) return r; - if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { - const struct common_firmware_header *hdr; - hdr = (const struct common_firmware_header *)adev->vcn.fw->data; - adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].ucode_id = AMDGPU_UCODE_ID_VCN; - adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw; - adev->firmware.fw_size += - ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE); - - if (adev->vcn.num_vcn_inst == VCN25_MAX_HW_INSTANCES_ARCTURUS) { - adev->firmware.ucode[AMDGPU_UCODE_ID_VCN1].ucode_id = AMDGPU_UCODE_ID_VCN1; - adev->firmware.ucode[AMDGPU_UCODE_ID_VCN1].fw = adev->vcn.fw; - adev->firmware.fw_size += - ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE); - } - dev_info(adev->dev, "Will use PSP to load VCN firmware\n"); - } + amdgpu_vcn_setup_ucode(adev); r = amdgpu_vcn_resume(adev); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c index dbfd92984655..da11ceba0698 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c @@ -60,11 +60,6 @@ static int amdgpu_ih_clientid_vcns[] = { SOC15_IH_CLIENTID_VCN1 }; -static int amdgpu_ucode_id_vcns[] = { - AMDGPU_UCODE_ID_VCN, - AMDGPU_UCODE_ID_VCN1 -}; - static int vcn_v3_0_start_sriov(struct amdgpu_device *adev); static void vcn_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev); static void vcn_v3_0_set_enc_ring_funcs(struct amdgpu_device *adev); @@ -130,22 +125,7 @@ static int vcn_v3_0_sw_init(void *handle) if (r) return r; - if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { - const struct common_firmware_header *hdr; - hdr = (const struct common_firmware_header *)adev->vcn.fw->data; - adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].ucode_id = AMDGPU_UCODE_ID_VCN; - adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw; - adev->firmware.fw_size += - ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE); - - if (adev->vcn.num_vcn_inst == VCN_INSTANCES_SIENNA_CICHLID) { - adev->firmware.ucode[AMDGPU_UCODE_ID_VCN1].ucode_id = AMDGPU_UCODE_ID_VCN1; - adev->firmware.ucode[AMDGPU_UCODE_ID_VCN1].fw = adev->vcn.fw; - adev->firmware.fw_size += - ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE); - } - dev_info(adev->dev, "Will use PSP to load VCN firmware\n"); - } + amdgpu_vcn_setup_ucode(adev); r = amdgpu_vcn_resume(adev); if (r) @@ -1293,7 +1273,6 @@ static int vcn_v3_0_start_sriov(struct amdgpu_device *adev) uint32_t param, resp, expected; uint32_t offset, cache_size; uint32_t tmp, timeout; - uint32_t id; struct amdgpu_mm_table *table = &adev->virt.mm_table; uint32_t *table_loc; @@ -1337,13 +1316,12 @@ static int vcn_v3_0_start_sriov(struct amdgpu_device *adev) cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4); if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { - id = amdgpu_ucode_id_vcns[i]; MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), - adev->firmware.ucode[id].tmr_mc_addr_lo); + adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo); MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), - adev->firmware.ucode[id].tmr_mc_addr_hi); + adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi); offset = 0; MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET0), diff --git a/drivers/gpu/drm/amd/amdgpu/yellow_carp_reg_init.c b/drivers/gpu/drm/amd/amdgpu/yellow_carp_reg_init.c deleted file mode 100644 index 3d89421275ed..000000000000 --- a/drivers/gpu/drm/amd/amdgpu/yellow_carp_reg_init.c +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright 2019 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#include "amdgpu.h" -#include "nv.h" - -#include "soc15_common.h" -#include "soc15_hw_ip.h" -#include "yellow_carp_offset.h" - -int yellow_carp_reg_base_init(struct amdgpu_device *adev) -{ - /* HW has more IP blocks, only initialized the block needed by driver */ - uint32_t i; - for (i = 0 ; i < MAX_INSTANCE ; ++i) { - adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); - adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i])); - adev->reg_offset[MMHUB_HWIP][i] = (uint32_t *)(&(MMHUB_BASE.instance[i])); - adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i])); - adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i])); - adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i])); - adev->reg_offset[MP1_HWIP][i] = (uint32_t *)(&(MP1_BASE.instance[i])); - adev->reg_offset[VCN_HWIP][i] = (uint32_t *)(&(VCN_BASE.instance[i])); - adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i])); - adev->reg_offset[DCE_HWIP][i] = (uint32_t *)(&(DCN_BASE.instance[i])); - adev->reg_offset[OSSSYS_HWIP][i] = (uint32_t *)(&(OSSSYS_BASE.instance[i])); - adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(SDMA0_BASE.instance[i])); - adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i])); - adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i])); - } - return 0; -} diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 86afd37b098d..24ebd61395d8 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -405,7 +405,7 @@ static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p, mutex_lock(&p->mutex); - retval = pqm_update_queue(&p->pqm, args->queue_id, &properties); + retval = pqm_update_queue_properties(&p->pqm, args->queue_id, &properties); mutex_unlock(&p->mutex); @@ -418,7 +418,7 @@ static int kfd_ioctl_set_cu_mask(struct file *filp, struct kfd_process *p, int retval; const int max_num_cus = 1024; struct kfd_ioctl_set_cu_mask_args *args = data; - struct queue_properties properties; + struct mqd_update_info minfo = {0}; uint32_t __user *cu_mask_ptr = (uint32_t __user *)args->cu_mask_ptr; size_t cu_mask_size = sizeof(uint32_t) * (args->num_cu_mask / 32); @@ -428,8 +428,8 @@ static int kfd_ioctl_set_cu_mask(struct file *filp, struct kfd_process *p, return -EINVAL; } - properties.cu_mask_count = args->num_cu_mask; - if (properties.cu_mask_count == 0) { + minfo.cu_mask.count = args->num_cu_mask; + if (minfo.cu_mask.count == 0) { pr_debug("CU mask cannot be 0"); return -EINVAL; } @@ -438,32 +438,33 @@ static int kfd_ioctl_set_cu_mask(struct file *filp, struct kfd_process *p, * limit of max_num_cus bits. We can then just drop any CU mask bits * past max_num_cus bits and just use the first max_num_cus bits. */ - if (properties.cu_mask_count > max_num_cus) { + if (minfo.cu_mask.count > max_num_cus) { pr_debug("CU mask cannot be greater than 1024 bits"); - properties.cu_mask_count = max_num_cus; + minfo.cu_mask.count = max_num_cus; cu_mask_size = sizeof(uint32_t) * (max_num_cus/32); } - properties.cu_mask = kzalloc(cu_mask_size, GFP_KERNEL); - if (!properties.cu_mask) + minfo.cu_mask.ptr = kzalloc(cu_mask_size, GFP_KERNEL); + if (!minfo.cu_mask.ptr) return -ENOMEM; - retval = copy_from_user(properties.cu_mask, cu_mask_ptr, cu_mask_size); + retval = copy_from_user(minfo.cu_mask.ptr, cu_mask_ptr, cu_mask_size); if (retval) { pr_debug("Could not copy CU mask from userspace"); - kfree(properties.cu_mask); - return -EFAULT; + retval = -EFAULT; + goto out; } + minfo.update_flag = UPDATE_FLAG_CU_MASK; + mutex_lock(&p->mutex); - retval = pqm_set_cu_mask(&p->pqm, args->queue_id, &properties); + retval = pqm_update_mqd(&p->pqm, args->queue_id, &minfo); mutex_unlock(&p->mutex); - if (retval) - kfree(properties.cu_mask); - +out: + kfree(minfo.cu_mask.ptr); return retval; } @@ -1011,11 +1012,6 @@ static int kfd_ioctl_create_event(struct file *filp, struct kfd_process *p, void *mem, *kern_addr; uint64_t size; - if (p->signal_page) { - pr_err("Event page is already set\n"); - return -EINVAL; - } - kfd = kfd_device_by_id(GET_GPU_ID(args->event_page_offset)); if (!kfd) { pr_err("Getting device by id failed in %s\n", __func__); @@ -1023,6 +1019,13 @@ static int kfd_ioctl_create_event(struct file *filp, struct kfd_process *p, } mutex_lock(&p->mutex); + + if (p->signal_page) { + pr_err("Event page is already set\n"); + err = -EINVAL; + goto out_unlock; + } + pdd = kfd_bind_process_to_device(kfd, p); if (IS_ERR(pdd)) { err = PTR_ERR(pdd); @@ -1037,20 +1040,24 @@ static int kfd_ioctl_create_event(struct file *filp, struct kfd_process *p, err = -EINVAL; goto out_unlock; } - mutex_unlock(&p->mutex); err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(kfd->kgd, mem, &kern_addr, &size); if (err) { pr_err("Failed to map event page to kernel\n"); - return err; + goto out_unlock; } err = kfd_event_page_set(p, kern_addr, size); if (err) { pr_err("Failed to set event page\n"); - return err; + amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(kfd->kgd, mem); + goto out_unlock; } + + p->signal_handle = args->event_page_offset; + + mutex_unlock(&p->mutex); } err = kfd_event_create(filp, p, args->event_type, @@ -1259,6 +1266,23 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep, if (args->size == 0) return -EINVAL; +#if IS_ENABLED(CONFIG_HSA_AMD_SVM) + /* Flush pending deferred work to avoid racing with deferred actions + * from previous memory map changes (e.g. munmap). + */ + svm_range_list_lock_and_flush_work(&p->svms, current->mm); + mutex_lock(&p->svms.lock); + mmap_write_unlock(current->mm); + if (interval_tree_iter_first(&p->svms.objects, + args->va_addr >> PAGE_SHIFT, + (args->va_addr + args->size - 1) >> PAGE_SHIFT)) { + pr_err("Address: 0x%llx already allocated by SVM\n", + args->va_addr); + mutex_unlock(&p->svms.lock); + return -EADDRINUSE; + } + mutex_unlock(&p->svms.lock); +#endif dev = kfd_device_by_id(args->gpu_id); if (!dev) return -EINVAL; @@ -1351,6 +1375,15 @@ static int kfd_ioctl_free_memory_of_gpu(struct file *filep, return -EINVAL; mutex_lock(&p->mutex); + /* + * Safeguard to prevent user space from freeing signal BO. + * It will be freed at process termination. + */ + if (p->signal_handle && (p->signal_handle == args->handle)) { + pr_err("Free signal BO is not allowed\n"); + ret = -EPERM; + goto err_unlock; + } pdd = kfd_get_process_device_data(dev, p); if (!pdd) { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 064d42acd54e..3b119db16003 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -93,7 +93,6 @@ static const struct kfd_device_info carrizo_device_info = { .num_xgmi_sdma_engines = 0, .num_sdma_queues_per_engine = 2, }; -#endif static const struct kfd_device_info raven_device_info = { .asic_family = CHIP_RAVEN, @@ -113,7 +112,9 @@ static const struct kfd_device_info raven_device_info = { .num_xgmi_sdma_engines = 0, .num_sdma_queues_per_engine = 2, }; +#endif +#ifdef CONFIG_DRM_AMDGPU_CIK static const struct kfd_device_info hawaii_device_info = { .asic_family = CHIP_HAWAII, .asic_name = "hawaii", @@ -133,6 +134,7 @@ static const struct kfd_device_info hawaii_device_info = { .num_xgmi_sdma_engines = 0, .num_sdma_queues_per_engine = 2, }; +#endif static const struct kfd_device_info tonga_device_info = { .asic_family = CHIP_TONGA, @@ -404,7 +406,7 @@ static const struct kfd_device_info aldebaran_device_info = { static const struct kfd_device_info renoir_device_info = { .asic_family = CHIP_RENOIR, .asic_name = "renoir", - .gfx_target_version = 90002, + .gfx_target_version = 90012, .max_pasid_bits = 16, .max_no_of_hqd = 24, .doorbell_size = 8, @@ -1021,6 +1023,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, kfd_double_confirm_iommu_support(kfd); if (kfd_iommu_device_init(kfd)) { + kfd->use_iommu_v2 = false; dev_err(kfd_device, "Error initializing iommuv2\n"); goto device_iommu_error; } @@ -1029,6 +1032,9 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, svm_migrate_init((struct amdgpu_device *)kfd->kgd); + if(kgd2kfd_resume_iommu(kfd)) + goto device_iommu_error; + if (kfd_resume(kfd)) goto kfd_resume_error; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index f8fce9d05f50..003ba6a373ff 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -557,7 +557,8 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm, return retval; } -static int update_queue(struct device_queue_manager *dqm, struct queue *q) +static int update_queue(struct device_queue_manager *dqm, struct queue *q, + struct mqd_update_info *minfo) { int retval = 0; struct mqd_manager *mqd_mgr; @@ -605,7 +606,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q) } } - mqd_mgr->update_mqd(mqd_mgr, q->mqd, &q->properties); + mqd_mgr->update_mqd(mqd_mgr, q->mqd, &q->properties, minfo); /* * check active state vs. the previous state and modify @@ -1429,7 +1430,7 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm, if (!dqm->sched_running) return 0; - if (dqm->is_hws_hang) + if (dqm->is_hws_hang || dqm->is_resetting) return -EIO; if (!dqm->active_runlist) return retval; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h index c8719682c4da..499fc0ea387f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h @@ -93,7 +93,7 @@ struct device_queue_manager_ops { struct queue *q); int (*update_queue)(struct device_queue_manager *dqm, - struct queue *q); + struct queue *q, struct mqd_update_info *minfo); int (*register_process)(struct device_queue_manager *dqm, struct qcm_process_device *qpd); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c index 2e86692def19..d1388896f9c1 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c @@ -308,7 +308,7 @@ * 16MB are reserved for kernel use (CWSR trap handler and kernel IB * for now). */ -#define SVM_USER_BASE 0x1000000ull +#define SVM_USER_BASE (u64)(KFD_CWSR_TBA_TMA_SIZE + 2*PAGE_SIZE) #define SVM_CWSR_BASE (SVM_USER_BASE - KFD_CWSR_TBA_TMA_SIZE) #define SVM_IB_BASE (SVM_CWSR_BASE - PAGE_SIZE) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c index a2b77d1df854..64b4ac339904 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c @@ -136,7 +136,6 @@ static bool kq_initialize(struct kernel_queue *kq, struct kfd_dev *dev, prop.write_ptr = (uint32_t *) kq->wptr_gpu_addr; prop.eop_ring_buffer_address = kq->eop_gpu_addr; prop.eop_ring_buffer_size = PAGE_SIZE; - prop.cu_mask = NULL; if (init_queue(&kq->queue, &prop) != 0) goto err_init_queue; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index f53e17a94ad8..9b9c2b9bf2ef 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -20,7 +20,6 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ - #include <linux/types.h> #include <linux/hmm.h> #include <linux/dma-direction.h> @@ -34,6 +33,11 @@ #include "kfd_svm.h" #include "kfd_migrate.h" +#ifdef dev_fmt +#undef dev_fmt +#endif +#define dev_fmt(fmt) "kfd_migrate: %s: " fmt, __func__ + static uint64_t svm_migrate_direct_mapping_addr(struct amdgpu_device *adev, uint64_t addr) { @@ -151,14 +155,14 @@ svm_migrate_copy_memory_gart(struct amdgpu_device *adev, dma_addr_t *sys, gart_d = svm_migrate_direct_mapping_addr(adev, *vram); } if (r) { - pr_debug("failed %d to create gart mapping\n", r); + dev_err(adev->dev, "fail %d create gart mapping\n", r); goto out_unlock; } r = amdgpu_copy_buffer(ring, gart_s, gart_d, size * PAGE_SIZE, NULL, &next, false, true, false); if (r) { - pr_debug("failed %d to copy memory\n", r); + dev_err(adev->dev, "fail %d to copy memory\n", r); goto out_unlock; } @@ -264,6 +268,32 @@ static void svm_migrate_put_sys_page(unsigned long addr) put_page(page); } +static unsigned long svm_migrate_successful_pages(struct migrate_vma *migrate) +{ + unsigned long cpages = 0; + unsigned long i; + + for (i = 0; i < migrate->npages; i++) { + if (migrate->src[i] & MIGRATE_PFN_VALID && + migrate->src[i] & MIGRATE_PFN_MIGRATE) + cpages++; + } + return cpages; +} + +static unsigned long svm_migrate_unsuccessful_pages(struct migrate_vma *migrate) +{ + unsigned long upages = 0; + unsigned long i; + + for (i = 0; i < migrate->npages; i++) { + if (migrate->src[i] & MIGRATE_PFN_VALID && + !(migrate->src[i] & MIGRATE_PFN_MIGRATE)) + upages++; + } + return upages; +} + static int svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange, struct migrate_vma *migrate, struct dma_fence **mfence, @@ -285,7 +315,7 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange, r = svm_range_vram_node_new(adev, prange, true); if (r) { - pr_debug("failed %d get 0x%llx pages from vram\n", r, npages); + dev_err(adev->dev, "fail %d to alloc vram\n", r); goto out; } @@ -300,12 +330,11 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange, migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]); svm_migrate_get_vram_page(prange, migrate->dst[i]); migrate->dst[i] = migrate_pfn(migrate->dst[i]); - migrate->dst[i] |= MIGRATE_PFN_LOCKED; src[i] = dma_map_page(dev, spage, 0, PAGE_SIZE, DMA_TO_DEVICE); r = dma_mapping_error(dev, src[i]); if (r) { - pr_debug("failed %d dma_map_page\n", r); + dev_err(adev->dev, "fail %d dma_map_page\n", r); goto out_free_vram_pages; } } else { @@ -325,8 +354,8 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange, continue; } - pr_debug("dma mapping src to 0x%llx, page_to_pfn 0x%lx\n", - src[i] >> PAGE_SHIFT, page_to_pfn(spage)); + pr_debug_ratelimited("dma mapping src to 0x%llx, pfn 0x%lx\n", + src[i] >> PAGE_SHIFT, page_to_pfn(spage)); if (j >= (cursor.size >> PAGE_SHIFT) - 1 && i < npages - 1) { r = svm_migrate_copy_memory_gart(adev, src + i - j, @@ -372,7 +401,7 @@ out: return r; } -static int +static long svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange, struct vm_area_struct *vma, uint64_t start, uint64_t end) @@ -381,6 +410,7 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange, struct kfd_process_device *pdd; struct dma_fence *mfence = NULL; struct migrate_vma migrate; + unsigned long cpages = 0; dma_addr_t *scratch; size_t size; void *buf; @@ -405,23 +435,31 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange, r = migrate_vma_setup(&migrate); if (r) { - pr_debug("failed %d prepare migrate svms 0x%p [0x%lx 0x%lx]\n", - r, prange->svms, prange->start, prange->last); + dev_err(adev->dev, "vma setup fail %d range [0x%lx 0x%lx]\n", r, + prange->start, prange->last); goto out_free; } - if (migrate.cpages != npages) { - pr_debug("Partial migration. 0x%lx/0x%llx pages can be migrated\n", - migrate.cpages, - npages); - } - if (migrate.cpages) { - r = svm_migrate_copy_to_vram(adev, prange, &migrate, &mfence, - scratch); - migrate_vma_pages(&migrate); - svm_migrate_copy_done(adev, mfence); - migrate_vma_finalize(&migrate); + cpages = migrate.cpages; + if (!cpages) { + pr_debug("failed collect migrate sys pages [0x%lx 0x%lx]\n", + prange->start, prange->last); + goto out_free; } + if (cpages != npages) + pr_debug("partial migration, 0x%lx/0x%llx pages migrated\n", + cpages, npages); + else + pr_debug("0x%lx pages migrated\n", cpages); + + r = svm_migrate_copy_to_vram(adev, prange, &migrate, &mfence, scratch); + migrate_vma_pages(&migrate); + + pr_debug("successful/cpages/npages 0x%lx/0x%lx/0x%lx\n", + svm_migrate_successful_pages(&migrate), cpages, migrate.npages); + + svm_migrate_copy_done(adev, mfence); + migrate_vma_finalize(&migrate); svm_range_dma_unmap(adev->dev, scratch, 0, npages); svm_range_free_dma_mappings(prange); @@ -429,12 +467,13 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange, out_free: kvfree(buf); out: - if (!r) { + if (!r && cpages) { pdd = svm_range_get_pdd_by_adev(prange, adev); if (pdd) - WRITE_ONCE(pdd->page_in, pdd->page_in + migrate.cpages); - } + WRITE_ONCE(pdd->page_in, pdd->page_in + cpages); + return cpages; + } return r; } @@ -456,7 +495,8 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc, unsigned long addr, start, end; struct vm_area_struct *vma; struct amdgpu_device *adev; - int r = 0; + unsigned long cpages = 0; + long r = 0; if (prange->actual_loc == best_loc) { pr_debug("svms 0x%p [0x%lx 0x%lx] already on best_loc 0x%x\n", @@ -488,17 +528,19 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc, next = min(vma->vm_end, end); r = svm_migrate_vma_to_vram(adev, prange, vma, addr, next); - if (r) { - pr_debug("failed to migrate\n"); + if (r < 0) { + pr_debug("failed %ld to migrate\n", r); break; + } else { + cpages += r; } addr = next; } - if (!r) + if (cpages) prange->actual_loc = best_loc; - return r; + return r < 0 ? r : 0; } static void svm_migrate_page_free(struct page *page) @@ -506,7 +548,7 @@ static void svm_migrate_page_free(struct page *page) struct svm_range_bo *svm_bo = page->zone_device_data; if (svm_bo) { - pr_debug("svm_bo ref left: %d\n", kref_read(&svm_bo->kref)); + pr_debug_ratelimited("ref: %d\n", kref_read(&svm_bo->kref)); svm_range_bo_unref(svm_bo); } } @@ -572,15 +614,14 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange, dst[i] = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_FROM_DEVICE); r = dma_mapping_error(dev, dst[i]); if (r) { - pr_debug("failed %d dma_map_page\n", r); + dev_err(adev->dev, "fail %d dma_map_page\n", r); goto out_oom; } - pr_debug("dma mapping dst to 0x%llx, page_to_pfn 0x%lx\n", - dst[i] >> PAGE_SHIFT, page_to_pfn(dpage)); + pr_debug_ratelimited("dma mapping dst to 0x%llx, pfn 0x%lx\n", + dst[i] >> PAGE_SHIFT, page_to_pfn(dpage)); migrate->dst[i] = migrate_pfn(page_to_pfn(dpage)); - migrate->dst[i] |= MIGRATE_PFN_LOCKED; j++; } @@ -599,11 +640,13 @@ out_oom: return r; } -static int +static long svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange, struct vm_area_struct *vma, uint64_t start, uint64_t end) { uint64_t npages = (end - start) >> PAGE_SHIFT; + unsigned long upages = npages; + unsigned long cpages = 0; struct kfd_process_device *pdd; struct dma_fence *mfence = NULL; struct migrate_vma migrate; @@ -631,36 +674,47 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange, r = migrate_vma_setup(&migrate); if (r) { - pr_debug("failed %d prepare migrate svms 0x%p [0x%lx 0x%lx]\n", - r, prange->svms, prange->start, prange->last); + dev_err(adev->dev, "vma setup fail %d range [0x%lx 0x%lx]\n", r, + prange->start, prange->last); goto out_free; } - pr_debug("cpages %ld\n", migrate.cpages); - - if (migrate.cpages) { - r = svm_migrate_copy_to_ram(adev, prange, &migrate, &mfence, - scratch, npages); - migrate_vma_pages(&migrate); - svm_migrate_copy_done(adev, mfence); - migrate_vma_finalize(&migrate); - } else { + cpages = migrate.cpages; + if (!cpages) { pr_debug("failed collect migrate device pages [0x%lx 0x%lx]\n", prange->start, prange->last); + upages = svm_migrate_unsuccessful_pages(&migrate); + goto out_free; } + if (cpages != npages) + pr_debug("partial migration, 0x%lx/0x%llx pages migrated\n", + cpages, npages); + else + pr_debug("0x%lx pages migrated\n", cpages); + + r = svm_migrate_copy_to_ram(adev, prange, &migrate, &mfence, + scratch, npages); + migrate_vma_pages(&migrate); + + upages = svm_migrate_unsuccessful_pages(&migrate); + pr_debug("unsuccessful/cpages/npages 0x%lx/0x%lx/0x%lx\n", + upages, cpages, migrate.npages); + svm_migrate_copy_done(adev, mfence); + migrate_vma_finalize(&migrate); svm_range_dma_unmap(adev->dev, scratch, 0, npages); out_free: kvfree(buf); out: - if (!r) { + if (!r && cpages) { pdd = svm_range_get_pdd_by_adev(prange, adev); if (pdd) - WRITE_ONCE(pdd->page_out, - pdd->page_out + migrate.cpages); + WRITE_ONCE(pdd->page_out, pdd->page_out + cpages); + + return upages; } - return r; + return r ? r : upages; } /** @@ -680,7 +734,8 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm) unsigned long addr; unsigned long start; unsigned long end; - int r = 0; + unsigned long upages = 0; + long r = 0; if (!prange->actual_loc) { pr_debug("[0x%lx 0x%lx] already migrated to ram\n", @@ -711,18 +766,21 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm) next = min(vma->vm_end, end); r = svm_migrate_vma_to_ram(adev, prange, vma, addr, next); - if (r) { - pr_debug("failed %d to migrate\n", r); + if (r < 0) { + pr_debug("failed %ld to migrate\n", r); break; + } else { + upages += r; } addr = next; } - if (!r) { + if (!upages) { svm_range_vram_node_free(prange); prange->actual_loc = 0; } - return r; + + return r < 0 ? r : 0; } /** @@ -740,7 +798,7 @@ static int svm_migrate_vram_to_vram(struct svm_range *prange, uint32_t best_loc, struct mm_struct *mm) { - int r; + int r, retries = 3; /* * TODO: for both devices with PCIe large bar or on same xgmi hive, skip @@ -749,9 +807,14 @@ svm_migrate_vram_to_vram(struct svm_range *prange, uint32_t best_loc, pr_debug("from gpu 0x%x to gpu 0x%x\n", prange->actual_loc, best_loc); - r = svm_migrate_vram_to_ram(prange, mm); - if (r) - return r; + do { + r = svm_migrate_vram_to_ram(prange, mm); + if (r) + return r; + } while (prange->actual_loc && --retries); + + if (prange->actual_loc) + return -EDEADLK; return svm_migrate_ram_to_vram(prange, best_loc, mm); } @@ -796,6 +859,11 @@ static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf) pr_debug("failed find process at fault address 0x%lx\n", addr); return VM_FAULT_SIGBUS; } + if (READ_ONCE(p->svms.faulting_task) == current) { + pr_debug("skipping ram migration\n"); + kfd_unref_process(p); + return 0; + } addr >>= PAGE_SHIFT; pr_debug("CPU page fault svms 0x%p address 0x%lx\n", &p->svms, addr); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h index 6e6918ccedfd..965e17c5dbb4 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h @@ -80,7 +80,8 @@ struct mqd_manager { struct mm_struct *mms); void (*update_mqd)(struct mqd_manager *mm, void *mqd, - struct queue_properties *q); + struct queue_properties *q, + struct mqd_update_info *minfo); int (*destroy_mqd)(struct mqd_manager *mm, void *mqd, enum kfd_preempt_type type, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c index 064914e1e8d6..8128f4d312f1 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c @@ -42,16 +42,17 @@ static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd) } static void update_cu_mask(struct mqd_manager *mm, void *mqd, - struct queue_properties *q) + struct mqd_update_info *minfo) { struct cik_mqd *m; uint32_t se_mask[4] = {0}; /* 4 is the max # of SEs */ - if (q->cu_mask_count == 0) + if (!minfo || (minfo->update_flag != UPDATE_FLAG_CU_MASK) || + !minfo->cu_mask.ptr) return; mqd_symmetrically_map_cu_mask(mm, - q->cu_mask, q->cu_mask_count, se_mask); + minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask); m = get_mqd(mqd); m->compute_static_thread_mgmt_se0 = se_mask[0]; @@ -135,7 +136,7 @@ static void init_mqd(struct mqd_manager *mm, void **mqd, *mqd = m; if (gart_addr) *gart_addr = addr; - mm->update_mqd(mm, m, q); + mm->update_mqd(mm, m, q, NULL); } static void init_mqd_sdma(struct mqd_manager *mm, void **mqd, @@ -152,7 +153,7 @@ static void init_mqd_sdma(struct mqd_manager *mm, void **mqd, if (gart_addr) *gart_addr = mqd_mem_obj->gpu_addr; - mm->update_mqd(mm, m, q); + mm->update_mqd(mm, m, q, NULL); } static void free_mqd(struct mqd_manager *mm, void *mqd, @@ -185,7 +186,8 @@ static int load_mqd_sdma(struct mqd_manager *mm, void *mqd, } static void __update_mqd(struct mqd_manager *mm, void *mqd, - struct queue_properties *q, unsigned int atc_bit) + struct queue_properties *q, struct mqd_update_info *minfo, + unsigned int atc_bit) { struct cik_mqd *m; @@ -214,16 +216,17 @@ static void __update_mqd(struct mqd_manager *mm, void *mqd, if (q->format == KFD_QUEUE_FORMAT_AQL) m->cp_hqd_pq_control |= NO_UPDATE_RPTR; - update_cu_mask(mm, mqd, q); + update_cu_mask(mm, mqd, minfo); set_priority(m, q); q->is_active = QUEUE_IS_ACTIVE(*q); } static void update_mqd(struct mqd_manager *mm, void *mqd, - struct queue_properties *q) + struct queue_properties *q, + struct mqd_update_info *minfo) { - __update_mqd(mm, mqd, q, 1); + __update_mqd(mm, mqd, q, minfo, 1); } static uint32_t read_doorbell_id(void *mqd) @@ -234,13 +237,15 @@ static uint32_t read_doorbell_id(void *mqd) } static void update_mqd_hawaii(struct mqd_manager *mm, void *mqd, - struct queue_properties *q) + struct queue_properties *q, + struct mqd_update_info *minfo) { - __update_mqd(mm, mqd, q, 0); + __update_mqd(mm, mqd, q, minfo, 0); } static void update_mqd_sdma(struct mqd_manager *mm, void *mqd, - struct queue_properties *q) + struct queue_properties *q, + struct mqd_update_info *minfo) { struct cik_sdma_rlc_registers *m; @@ -318,7 +323,8 @@ static void init_mqd_hiq(struct mqd_manager *mm, void **mqd, } static void update_mqd_hiq(struct mqd_manager *mm, void *mqd, - struct queue_properties *q) + struct queue_properties *q, + struct mqd_update_info *minfo) { struct cik_mqd *m; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c index c7fb59ca597f..270160fc401b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c @@ -42,16 +42,17 @@ static inline struct v10_sdma_mqd *get_sdma_mqd(void *mqd) } static void update_cu_mask(struct mqd_manager *mm, void *mqd, - struct queue_properties *q) + struct mqd_update_info *minfo) { struct v10_compute_mqd *m; uint32_t se_mask[4] = {0}; /* 4 is the max # of SEs */ - if (q->cu_mask_count == 0) + if (!minfo || (minfo->update_flag != UPDATE_FLAG_CU_MASK) || + !minfo->cu_mask.ptr) return; mqd_symmetrically_map_cu_mask(mm, - q->cu_mask, q->cu_mask_count, se_mask); + minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask); m = get_mqd(mqd); m->compute_static_thread_mgmt_se0 = se_mask[0]; @@ -136,7 +137,7 @@ static void init_mqd(struct mqd_manager *mm, void **mqd, *mqd = m; if (gart_addr) *gart_addr = addr; - mm->update_mqd(mm, m, q); + mm->update_mqd(mm, m, q, NULL); } static int load_mqd(struct mqd_manager *mm, void *mqd, @@ -162,7 +163,8 @@ static int hiq_load_mqd_kiq(struct mqd_manager *mm, void *mqd, } static void update_mqd(struct mqd_manager *mm, void *mqd, - struct queue_properties *q) + struct queue_properties *q, + struct mqd_update_info *minfo) { struct v10_compute_mqd *m; @@ -218,7 +220,7 @@ static void update_mqd(struct mqd_manager *mm, void *mqd, if (mm->dev->cwsr_enabled) m->cp_hqd_ctx_save_control = 0; - update_cu_mask(mm, mqd, q); + update_cu_mask(mm, mqd, minfo); set_priority(m, q); q->is_active = QUEUE_IS_ACTIVE(*q); @@ -311,7 +313,7 @@ static void init_mqd_sdma(struct mqd_manager *mm, void **mqd, if (gart_addr) *gart_addr = mqd_mem_obj->gpu_addr; - mm->update_mqd(mm, m, q); + mm->update_mqd(mm, m, q, NULL); } static int load_mqd_sdma(struct mqd_manager *mm, void *mqd, @@ -326,7 +328,8 @@ static int load_mqd_sdma(struct mqd_manager *mm, void *mqd, #define SDMA_RLC_DUMMY_DEFAULT 0xf static void update_mqd_sdma(struct mqd_manager *mm, void *mqd, - struct queue_properties *q) + struct queue_properties *q, + struct mqd_update_info *minfo) { struct v10_sdma_mqd *m; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c index 7f4e102ff4bd..4e5932f54b5a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c @@ -43,16 +43,17 @@ static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd) } static void update_cu_mask(struct mqd_manager *mm, void *mqd, - struct queue_properties *q) + struct mqd_update_info *minfo) { struct v9_mqd *m; uint32_t se_mask[KFD_MAX_NUM_SE] = {0}; - if (q->cu_mask_count == 0) + if (!minfo || (minfo->update_flag != UPDATE_FLAG_CU_MASK) || + !minfo->cu_mask.ptr) return; mqd_symmetrically_map_cu_mask(mm, - q->cu_mask, q->cu_mask_count, se_mask); + minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask); m = get_mqd(mqd); m->compute_static_thread_mgmt_se0 = se_mask[0]; @@ -188,7 +189,7 @@ static void init_mqd(struct mqd_manager *mm, void **mqd, *mqd = m; if (gart_addr) *gart_addr = addr; - mm->update_mqd(mm, m, q); + mm->update_mqd(mm, m, q, NULL); } static int load_mqd(struct mqd_manager *mm, void *mqd, @@ -212,7 +213,8 @@ static int hiq_load_mqd_kiq(struct mqd_manager *mm, void *mqd, } static void update_mqd(struct mqd_manager *mm, void *mqd, - struct queue_properties *q) + struct queue_properties *q, + struct mqd_update_info *minfo) { struct v9_mqd *m; @@ -269,7 +271,7 @@ static void update_mqd(struct mqd_manager *mm, void *mqd, if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address) m->cp_hqd_ctx_save_control = 0; - update_cu_mask(mm, mqd, q); + update_cu_mask(mm, mqd, minfo); set_priority(m, q); q->is_active = QUEUE_IS_ACTIVE(*q); @@ -366,7 +368,7 @@ static void init_mqd_sdma(struct mqd_manager *mm, void **mqd, if (gart_addr) *gart_addr = mqd_mem_obj->gpu_addr; - mm->update_mqd(mm, m, q); + mm->update_mqd(mm, m, q, NULL); } static int load_mqd_sdma(struct mqd_manager *mm, void *mqd, @@ -381,7 +383,8 @@ static int load_mqd_sdma(struct mqd_manager *mm, void *mqd, #define SDMA_RLC_DUMMY_DEFAULT 0xf static void update_mqd_sdma(struct mqd_manager *mm, void *mqd, - struct queue_properties *q) + struct queue_properties *q, + struct mqd_update_info *minfo) { struct v9_sdma_mqd *m; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c index 33dbd22d290f..cd9220eb8a7a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c @@ -45,16 +45,17 @@ static inline struct vi_sdma_mqd *get_sdma_mqd(void *mqd) } static void update_cu_mask(struct mqd_manager *mm, void *mqd, - struct queue_properties *q) + struct mqd_update_info *minfo) { struct vi_mqd *m; uint32_t se_mask[4] = {0}; /* 4 is the max # of SEs */ - if (q->cu_mask_count == 0) + if (!minfo || (minfo->update_flag != UPDATE_FLAG_CU_MASK) || + !minfo->cu_mask.ptr) return; mqd_symmetrically_map_cu_mask(mm, - q->cu_mask, q->cu_mask_count, se_mask); + minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask); m = get_mqd(mqd); m->compute_static_thread_mgmt_se0 = se_mask[0]; @@ -150,7 +151,7 @@ static void init_mqd(struct mqd_manager *mm, void **mqd, *mqd = m; if (gart_addr) *gart_addr = addr; - mm->update_mqd(mm, m, q); + mm->update_mqd(mm, m, q, NULL); } static int load_mqd(struct mqd_manager *mm, void *mqd, @@ -167,8 +168,8 @@ static int load_mqd(struct mqd_manager *mm, void *mqd, } static void __update_mqd(struct mqd_manager *mm, void *mqd, - struct queue_properties *q, unsigned int mtype, - unsigned int atc_bit) + struct queue_properties *q, struct mqd_update_info *minfo, + unsigned int mtype, unsigned int atc_bit) { struct vi_mqd *m; @@ -230,7 +231,7 @@ static void __update_mqd(struct mqd_manager *mm, void *mqd, atc_bit << CP_HQD_CTX_SAVE_CONTROL__ATC__SHIFT | mtype << CP_HQD_CTX_SAVE_CONTROL__MTYPE__SHIFT; - update_cu_mask(mm, mqd, q); + update_cu_mask(mm, mqd, minfo); set_priority(m, q); q->is_active = QUEUE_IS_ACTIVE(*q); @@ -238,9 +239,10 @@ static void __update_mqd(struct mqd_manager *mm, void *mqd, static void update_mqd(struct mqd_manager *mm, void *mqd, - struct queue_properties *q) + struct queue_properties *q, + struct mqd_update_info *minfo) { - __update_mqd(mm, mqd, q, MTYPE_CC, 1); + __update_mqd(mm, mqd, q, minfo, MTYPE_CC, 1); } static uint32_t read_doorbell_id(void *mqd) @@ -251,9 +253,10 @@ static uint32_t read_doorbell_id(void *mqd) } static void update_mqd_tonga(struct mqd_manager *mm, void *mqd, - struct queue_properties *q) + struct queue_properties *q, + struct mqd_update_info *minfo) { - __update_mqd(mm, mqd, q, MTYPE_UC, 0); + __update_mqd(mm, mqd, q, minfo, MTYPE_UC, 0); } static int destroy_mqd(struct mqd_manager *mm, void *mqd, @@ -317,9 +320,10 @@ static void init_mqd_hiq(struct mqd_manager *mm, void **mqd, } static void update_mqd_hiq(struct mqd_manager *mm, void *mqd, - struct queue_properties *q) + struct queue_properties *q, + struct mqd_update_info *minfo) { - __update_mqd(mm, mqd, q, MTYPE_UC, 0); + __update_mqd(mm, mqd, q, minfo, MTYPE_UC, 0); } static void init_mqd_sdma(struct mqd_manager *mm, void **mqd, @@ -336,7 +340,7 @@ static void init_mqd_sdma(struct mqd_manager *mm, void **mqd, if (gart_addr) *gart_addr = mqd_mem_obj->gpu_addr; - mm->update_mqd(mm, m, q); + mm->update_mqd(mm, m, q, NULL); } static int load_mqd_sdma(struct mqd_manager *mm, void *mqd, @@ -349,7 +353,8 @@ static int load_mqd_sdma(struct mqd_manager *mm, void *mqd, } static void update_mqd_sdma(struct mqd_manager *mm, void *mqd, - struct queue_properties *q) + struct queue_properties *q, + struct mqd_update_info *minfo) { struct vi_sdma_mqd *m; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 6d8f9bb2d905..94e92c0812db 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -472,9 +472,6 @@ struct queue_properties { uint32_t ctl_stack_size; uint64_t tba_addr; uint64_t tma_addr; - /* Relevant for CU */ - uint32_t cu_mask_count; /* Must be a multiple of 32 */ - uint32_t *cu_mask; }; #define QUEUE_IS_ACTIVE(q) ((q).queue_size > 0 && \ @@ -482,6 +479,20 @@ struct queue_properties { (q).queue_percent > 0 && \ !(q).is_evicted) +enum mqd_update_flag { + UPDATE_FLAG_CU_MASK = 0, +}; + +struct mqd_update_info { + union { + struct { + uint32_t count; /* Must be a multiple of 32 */ + uint32_t *ptr; + } cu_mask; + }; + enum mqd_update_flag update_flag; +}; + /** * struct queue * @@ -608,12 +619,14 @@ struct qcm_process_device { uint32_t sh_hidden_private_base; /* CWSR memory */ + struct kgd_mem *cwsr_mem; void *cwsr_kaddr; uint64_t cwsr_base; uint64_t tba_addr; uint64_t tma_addr; /* IB memory */ + struct kgd_mem *ib_mem; uint64_t ib_base; void *ib_kaddr; @@ -753,8 +766,10 @@ struct svm_range_list { struct list_head deferred_range_list; spinlock_t deferred_list_lock; atomic_t evicted_ranges; + bool drain_pagefaults; struct delayed_work restore_work; DECLARE_BITMAP(bitmap_supported, MAX_GPU_INSTANCE); + struct task_struct *faulting_task; }; /* Process data */ @@ -808,6 +823,7 @@ struct kfd_process { /* Event ID allocator and lookup */ struct idr event_idr; /* Event page */ + u64 signal_handle; struct kfd_signal_page *signal_page; size_t signal_mapped_size; size_t signal_event_count; @@ -1031,10 +1047,10 @@ int pqm_create_queue(struct process_queue_manager *pqm, unsigned int *qid, uint32_t *p_doorbell_offset_in_process); int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid); -int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid, - struct queue_properties *p); -int pqm_set_cu_mask(struct process_queue_manager *pqm, unsigned int qid, +int pqm_update_queue_properties(struct process_queue_manager *pqm, unsigned int qid, struct queue_properties *p); +int pqm_update_mqd(struct process_queue_manager *pqm, unsigned int qid, + struct mqd_update_info *minfo); int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid, void *gws); struct kernel_queue *pqm_get_kernel_queue(struct process_queue_manager *pqm, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 21ec8a18cad2..b993011cfa64 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -72,6 +72,8 @@ static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep); static void evict_process_worker(struct work_struct *work); static void restore_process_worker(struct work_struct *work); +static void kfd_process_device_destroy_cwsr_dgpu(struct kfd_process_device *pdd); + struct kfd_procfs_tree { struct kobject *kobj; }; @@ -685,10 +687,15 @@ void kfd_process_destroy_wq(void) } static void kfd_process_free_gpuvm(struct kgd_mem *mem, - struct kfd_process_device *pdd) + struct kfd_process_device *pdd, void *kptr) { struct kfd_dev *dev = pdd->dev; + if (kptr) { + amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(dev->kgd, mem); + kptr = NULL; + } + amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(dev->kgd, mem, pdd->drm_priv); amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, mem, pdd->drm_priv, NULL); @@ -702,63 +709,46 @@ static void kfd_process_free_gpuvm(struct kgd_mem *mem, */ static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd, uint64_t gpu_va, uint32_t size, - uint32_t flags, void **kptr) + uint32_t flags, struct kgd_mem **mem, void **kptr) { struct kfd_dev *kdev = pdd->dev; - struct kgd_mem *mem = NULL; - int handle; int err; err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(kdev->kgd, gpu_va, size, - pdd->drm_priv, &mem, NULL, flags); + pdd->drm_priv, mem, NULL, flags); if (err) goto err_alloc_mem; - err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->kgd, mem, + err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->kgd, *mem, pdd->drm_priv, NULL); if (err) goto err_map_mem; - err = amdgpu_amdkfd_gpuvm_sync_memory(kdev->kgd, mem, true); + err = amdgpu_amdkfd_gpuvm_sync_memory(kdev->kgd, *mem, true); if (err) { pr_debug("Sync memory failed, wait interrupted by user signal\n"); goto sync_memory_failed; } - /* Create an obj handle so kfd_process_device_remove_obj_handle - * will take care of the bo removal when the process finishes. - * We do not need to take p->mutex, because the process is just - * created and the ioctls have not had the chance to run. - */ - handle = kfd_process_device_create_obj_handle(pdd, mem); - - if (handle < 0) { - err = handle; - goto free_gpuvm; - } - if (kptr) { err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(kdev->kgd, - (struct kgd_mem *)mem, kptr, NULL); + (struct kgd_mem *)*mem, kptr, NULL); if (err) { pr_debug("Map GTT BO to kernel failed\n"); - goto free_obj_handle; + goto sync_memory_failed; } } return err; -free_obj_handle: - kfd_process_device_remove_obj_handle(pdd, handle); -free_gpuvm: sync_memory_failed: - kfd_process_free_gpuvm(mem, pdd); - return err; + amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(kdev->kgd, *mem, pdd->drm_priv); err_map_mem: - amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev->kgd, mem, pdd->drm_priv, + amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev->kgd, *mem, pdd->drm_priv, NULL); err_alloc_mem: + *mem = NULL; *kptr = NULL; return err; } @@ -776,6 +766,7 @@ static int kfd_process_device_reserve_ib_mem(struct kfd_process_device *pdd) KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE | KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE; + struct kgd_mem *mem; void *kaddr; int ret; @@ -784,15 +775,26 @@ static int kfd_process_device_reserve_ib_mem(struct kfd_process_device *pdd) /* ib_base is only set for dGPU */ ret = kfd_process_alloc_gpuvm(pdd, qpd->ib_base, PAGE_SIZE, flags, - &kaddr); + &mem, &kaddr); if (ret) return ret; + qpd->ib_mem = mem; qpd->ib_kaddr = kaddr; return 0; } +static void kfd_process_device_destroy_ib_mem(struct kfd_process_device *pdd) +{ + struct qcm_process_device *qpd = &pdd->qpd; + + if (!qpd->ib_kaddr || !qpd->ib_base) + return; + + kfd_process_free_gpuvm(qpd->ib_mem, pdd, qpd->ib_kaddr); +} + struct kfd_process *kfd_create_process(struct file *filep) { struct kfd_process *process; @@ -947,6 +949,37 @@ static void kfd_process_device_free_bos(struct kfd_process_device *pdd) } } +/* + * Just kunmap and unpin signal BO here. It will be freed in + * kfd_process_free_outstanding_kfd_bos() + */ +static void kfd_process_kunmap_signal_bo(struct kfd_process *p) +{ + struct kfd_process_device *pdd; + struct kfd_dev *kdev; + void *mem; + + kdev = kfd_device_by_id(GET_GPU_ID(p->signal_handle)); + if (!kdev) + return; + + mutex_lock(&p->mutex); + + pdd = kfd_get_process_device_data(kdev, p); + if (!pdd) + goto out; + + mem = kfd_process_device_translate_handle( + pdd, GET_IDR_HANDLE(p->signal_handle)); + if (!mem) + goto out; + + amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(kdev->kgd, mem); + +out: + mutex_unlock(&p->mutex); +} + static void kfd_process_free_outstanding_kfd_bos(struct kfd_process *p) { int i; @@ -965,6 +998,9 @@ static void kfd_process_destroy_pdds(struct kfd_process *p) pr_debug("Releasing pdd (topology id %d) for process (pasid 0x%x)\n", pdd->dev->id, p->pasid); + kfd_process_device_destroy_cwsr_dgpu(pdd); + kfd_process_device_destroy_ib_mem(pdd); + if (pdd->drm_file) { amdgpu_amdkfd_gpuvm_release_process_vm( pdd->dev->kgd, pdd->drm_priv); @@ -1049,9 +1085,11 @@ static void kfd_process_wq_release(struct work_struct *work) { struct kfd_process *p = container_of(work, struct kfd_process, release_work); + kfd_process_remove_sysfs(p); kfd_iommu_unbind_process(p); + kfd_process_kunmap_signal_bo(p); kfd_process_free_outstanding_kfd_bos(p); svm_range_list_fini(p); @@ -1198,6 +1236,7 @@ static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd) uint32_t flags = KFD_IOC_ALLOC_MEM_FLAGS_GTT | KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE; + struct kgd_mem *mem; void *kaddr; int ret; @@ -1206,10 +1245,11 @@ static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd) /* cwsr_base is only set for dGPU */ ret = kfd_process_alloc_gpuvm(pdd, qpd->cwsr_base, - KFD_CWSR_TBA_TMA_SIZE, flags, &kaddr); + KFD_CWSR_TBA_TMA_SIZE, flags, &mem, &kaddr); if (ret) return ret; + qpd->cwsr_mem = mem; qpd->cwsr_kaddr = kaddr; qpd->tba_addr = qpd->cwsr_base; @@ -1222,6 +1262,17 @@ static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd) return 0; } +static void kfd_process_device_destroy_cwsr_dgpu(struct kfd_process_device *pdd) +{ + struct kfd_dev *dev = pdd->dev; + struct qcm_process_device *qpd = &pdd->qpd; + + if (!dev->cwsr_enabled || !qpd->cwsr_kaddr || !qpd->cwsr_base) + return; + + kfd_process_free_gpuvm(qpd->cwsr_mem, pdd, qpd->cwsr_kaddr); +} + void kfd_process_set_trap_handler(struct qcm_process_device *qpd, uint64_t tba_addr, uint64_t tma_addr) @@ -1664,7 +1715,11 @@ int kfd_process_evict_queues(struct kfd_process *p) r = pdd->dev->dqm->ops.evict_process_queues(pdd->dev->dqm, &pdd->qpd); - if (r) { + /* evict return -EIO if HWS is hang or asic is resetting, in this case + * we would like to set all the queues to be in evicted state to prevent + * them been add back since they actually not be saved right now. + */ + if (r && r != -EIO) { pr_err("Failed to evict process queues\n"); goto fail; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index 243dd1efcdbf..3627e7ac161b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -121,7 +121,7 @@ int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid, pdd->qpd.num_gws = gws ? amdgpu_amdkfd_get_num_gws(dev->kgd) : 0; return pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm, - pqn->q); + pqn->q, NULL); } void kfd_process_dequeue_from_all_devices(struct kfd_process *p) @@ -394,8 +394,6 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid) pdd->qpd.num_gws = 0; } - kfree(pqn->q->properties.cu_mask); - pqn->q->properties.cu_mask = NULL; uninit_queue(pqn->q); } @@ -411,8 +409,8 @@ err_destroy_queue: return retval; } -int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid, - struct queue_properties *p) +int pqm_update_queue_properties(struct process_queue_manager *pqm, + unsigned int qid, struct queue_properties *p) { int retval; struct process_queue_node *pqn; @@ -429,15 +427,15 @@ int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid, pqn->q->properties.priority = p->priority; retval = pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm, - pqn->q); + pqn->q, NULL); if (retval != 0) return retval; return 0; } -int pqm_set_cu_mask(struct process_queue_manager *pqm, unsigned int qid, - struct queue_properties *p) +int pqm_update_mqd(struct process_queue_manager *pqm, + unsigned int qid, struct mqd_update_info *minfo) { int retval; struct process_queue_node *pqn; @@ -448,16 +446,8 @@ int pqm_set_cu_mask(struct process_queue_manager *pqm, unsigned int qid, return -EFAULT; } - /* Free the old CU mask memory if it is already allocated, then - * allocate memory for the new CU mask. - */ - kfree(pqn->q->properties.cu_mask); - - pqn->q->properties.cu_mask_count = p->cu_mask_count; - pqn->q->properties.cu_mask = p->cu_mask; - retval = pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm, - pqn->q); + pqn->q, minfo); if (retval != 0) return retval; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 179080329af8..16137c4247bb 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -33,6 +33,11 @@ #include "kfd_svm.h" #include "kfd_migrate.h" +#ifdef dev_fmt +#undef dev_fmt +#endif +#define dev_fmt(fmt) "kfd_svm: %s: " fmt, __func__ + #define AMDGPU_SVM_RANGE_RESTORE_DELAY_MS 1 /* Long enough to ensure no retry fault comes after svm range is restored and @@ -45,7 +50,9 @@ static bool svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni, const struct mmu_notifier_range *range, unsigned long cur_seq); - +static int +svm_range_check_vm(struct kfd_process *p, uint64_t start, uint64_t last, + uint64_t *bo_s, uint64_t *bo_l); static const struct mmu_interval_notifier_ops svm_range_mn_ops = { .invalidate = svm_range_cpu_invalidate_pagetables, }; @@ -158,17 +165,17 @@ svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange, bo_adev->vm_manager.vram_base_offset - bo_adev->kfd.dev->pgmap.range.start; addr[i] |= SVM_RANGE_VRAM_DOMAIN; - pr_debug("vram address detected: 0x%llx\n", addr[i]); + pr_debug_ratelimited("vram address: 0x%llx\n", addr[i]); continue; } addr[i] = dma_map_page(dev, page, 0, PAGE_SIZE, dir); r = dma_mapping_error(dev, addr[i]); if (r) { - pr_debug("failed %d dma_map_page\n", r); + dev_err(dev, "failed %d dma_map_page\n", r); return r; } - pr_debug("dma mapping 0x%llx for page addr 0x%lx\n", - addr[i] >> PAGE_SHIFT, page_to_pfn(page)); + pr_debug_ratelimited("dma mapping 0x%llx for page addr 0x%lx\n", + addr[i] >> PAGE_SHIFT, page_to_pfn(page)); } return 0; } @@ -217,7 +224,7 @@ void svm_range_dma_unmap(struct device *dev, dma_addr_t *dma_addr, for (i = offset; i < offset + npages; i++) { if (!svm_is_valid_dma_mapping_addr(dev, dma_addr[i])) continue; - pr_debug("dma unmapping 0x%llx\n", dma_addr[i] >> PAGE_SHIFT); + pr_debug_ratelimited("unmap 0x%llx\n", dma_addr[i] >> PAGE_SHIFT); dma_unmap_page(dev, dma_addr[i], PAGE_SIZE, dir); dma_addr[i] = 0; } @@ -1454,7 +1461,7 @@ static int svm_range_validate_and_map(struct mm_struct *mm, /* This should never happen. actual_loc gets set by * svm_migrate_ram_to_vram after allocating a BO. */ - WARN(1, "VRAM BO missing during validation\n"); + WARN_ONCE(1, "VRAM BO missing during validation\n"); return -EINVAL; } @@ -1489,9 +1496,11 @@ static int svm_range_validate_and_map(struct mm_struct *mm, next = min(vma->vm_end, end); npages = (next - addr) >> PAGE_SHIFT; + WRITE_ONCE(p->svms.faulting_task, current); r = amdgpu_hmm_range_get_pages(&prange->notifier, mm, NULL, addr, npages, &hmm_range, readonly, true, owner); + WRITE_ONCE(p->svms.faulting_task, NULL); if (r) { pr_debug("failed %d to get svm range pages\n", r); goto unreserve_out; @@ -1547,7 +1556,7 @@ unreserve_out: * Context: Returns with mmap write lock held, pending deferred work flushed * */ -static void +void svm_range_list_lock_and_flush_work(struct svm_range_list *svms, struct mm_struct *mm) { @@ -1993,20 +2002,28 @@ static void svm_range_deferred_list_work(struct work_struct *work) pr_debug("prange 0x%p [0x%lx 0x%lx] op %d\n", prange, prange->start, prange->last, prange->work_item.op); - /* Make sure no stale retry fault coming after range is freed */ - if (prange->work_item.op == SVM_OP_UNMAP_RANGE) - svm_range_drain_retry_fault(prange->svms); - mm = prange->work_item.mm; +retry: mmap_write_lock(mm); mutex_lock(&svms->lock); - /* Remove from deferred_list must be inside mmap write lock, + /* Checking for the need to drain retry faults must be in + * mmap write lock to serialize with munmap notifiers. + * + * Remove from deferred_list must be inside mmap write lock, * otherwise, svm_range_list_lock_and_flush_work may hold mmap * write lock, and continue because deferred_list is empty, then * deferred_list handle is blocked by mmap write lock. */ spin_lock(&svms->deferred_list_lock); + if (unlikely(svms->drain_pagefaults)) { + svms->drain_pagefaults = false; + spin_unlock(&svms->deferred_list_lock); + mutex_unlock(&svms->lock); + mmap_write_unlock(mm); + svm_range_drain_retry_fault(svms); + goto retry; + } list_del_init(&prange->deferred_list); spin_unlock(&svms->deferred_list_lock); @@ -2039,6 +2056,12 @@ svm_range_add_list_work(struct svm_range_list *svms, struct svm_range *prange, struct mm_struct *mm, enum svm_work_list_ops op) { spin_lock(&svms->deferred_list_lock); + /* Make sure pending page faults are drained in the deferred worker + * before the range is freed to avoid straggler interrupts on + * unmapped memory causing "phantom faults". + */ + if (op == SVM_OP_UNMAP_RANGE) + svms->drain_pagefaults = true; /* if prange is on the deferred list */ if (!list_empty(&prange->deferred_list)) { pr_debug("update exist prange 0x%p work op %d\n", prange, op); @@ -2254,7 +2277,7 @@ svm_range_from_addr(struct svm_range_list *svms, unsigned long addr, * migration if actual loc is not best location, then update GPU page table * mapping to the best location. * - * If vm fault gpu is range preferred loc, the best_loc is preferred loc. + * If the preferred loc is accessible by faulting GPU, use preferred loc. * If vm fault gpu idx is on range ACCESSIBLE bitmap, best_loc is vm fault gpu * If vm fault gpu idx is on range ACCESSIBLE_IN_PLACE bitmap, then * if range actual loc is cpu, best_loc is cpu @@ -2271,7 +2294,7 @@ svm_range_best_restore_location(struct svm_range *prange, struct amdgpu_device *adev, int32_t *gpuidx) { - struct amdgpu_device *bo_adev; + struct amdgpu_device *bo_adev, *preferred_adev; struct kfd_process *p; uint32_t gpuid; int r; @@ -2284,8 +2307,16 @@ svm_range_best_restore_location(struct svm_range *prange, return -1; } - if (prange->preferred_loc == gpuid) + if (prange->preferred_loc == gpuid || + prange->preferred_loc == KFD_IOCTL_SVM_LOCATION_SYSMEM) { return prange->preferred_loc; + } else if (prange->preferred_loc != KFD_IOCTL_SVM_LOCATION_UNDEFINED) { + preferred_adev = svm_range_get_adev_by_id(prange, + prange->preferred_loc); + if (amdgpu_xgmi_same_hive(adev, preferred_adev)) + return prange->preferred_loc; + /* fall through */ + } if (test_bit(*gpuidx, prange->bitmap_access)) return gpuid; @@ -2303,9 +2334,11 @@ svm_range_best_restore_location(struct svm_range *prange, return -1; } + static int svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr, - unsigned long *start, unsigned long *last) + unsigned long *start, unsigned long *last, + bool *is_heap_stack) { struct vm_area_struct *vma; struct interval_tree_node *node; @@ -2316,6 +2349,12 @@ svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr, pr_debug("VMA does not exist in address [0x%llx]\n", addr); return -EFAULT; } + + *is_heap_stack = (vma->vm_start <= vma->vm_mm->brk && + vma->vm_end >= vma->vm_mm->start_brk) || + (vma->vm_start <= vma->vm_mm->start_stack && + vma->vm_end >= vma->vm_mm->start_stack); + start_limit = max(vma->vm_start >> PAGE_SHIFT, (unsigned long)ALIGN_DOWN(addr, 2UL << 8)); end_limit = min(vma->vm_end >> PAGE_SHIFT, @@ -2345,13 +2384,64 @@ svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr, *start = start_limit; *last = end_limit - 1; - pr_debug("vma start: 0x%lx start: 0x%lx vma end: 0x%lx last: 0x%lx\n", - vma->vm_start >> PAGE_SHIFT, *start, - vma->vm_end >> PAGE_SHIFT, *last); + pr_debug("vma [0x%lx 0x%lx] range [0x%lx 0x%lx] is_heap_stack %d\n", + vma->vm_start >> PAGE_SHIFT, vma->vm_end >> PAGE_SHIFT, + *start, *last, *is_heap_stack); return 0; +} + +static int +svm_range_check_vm_userptr(struct kfd_process *p, uint64_t start, uint64_t last, + uint64_t *bo_s, uint64_t *bo_l) +{ + struct amdgpu_bo_va_mapping *mapping; + struct interval_tree_node *node; + struct amdgpu_bo *bo = NULL; + unsigned long userptr; + uint32_t i; + int r; + for (i = 0; i < p->n_pdds; i++) { + struct amdgpu_vm *vm; + + if (!p->pdds[i]->drm_priv) + continue; + + vm = drm_priv_to_vm(p->pdds[i]->drm_priv); + r = amdgpu_bo_reserve(vm->root.bo, false); + if (r) + return r; + + /* Check userptr by searching entire vm->va interval tree */ + node = interval_tree_iter_first(&vm->va, 0, ~0ULL); + while (node) { + mapping = container_of((struct rb_node *)node, + struct amdgpu_bo_va_mapping, rb); + bo = mapping->bo_va->base.bo; + + if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, + start << PAGE_SHIFT, + last << PAGE_SHIFT, + &userptr)) { + node = interval_tree_iter_next(node, 0, ~0ULL); + continue; + } + + pr_debug("[0x%llx 0x%llx] already userptr mapped\n", + start, last); + if (bo_s && bo_l) { + *bo_s = userptr >> PAGE_SHIFT; + *bo_l = *bo_s + bo->tbo.ttm->num_pages - 1; + } + amdgpu_bo_unreserve(vm->root.bo); + return -EADDRINUSE; + } + amdgpu_bo_unreserve(vm->root.bo); + } + return 0; } + static struct svm_range *svm_range_create_unregistered_range(struct amdgpu_device *adev, struct kfd_process *p, @@ -2361,10 +2451,28 @@ svm_range *svm_range_create_unregistered_range(struct amdgpu_device *adev, struct svm_range *prange = NULL; unsigned long start, last; uint32_t gpuid, gpuidx; + bool is_heap_stack; + uint64_t bo_s = 0; + uint64_t bo_l = 0; + int r; - if (svm_range_get_range_boundaries(p, addr, &start, &last)) + if (svm_range_get_range_boundaries(p, addr, &start, &last, + &is_heap_stack)) return NULL; + r = svm_range_check_vm(p, start, last, &bo_s, &bo_l); + if (r != -EADDRINUSE) + r = svm_range_check_vm_userptr(p, start, last, &bo_s, &bo_l); + + if (r == -EADDRINUSE) { + if (addr >= bo_s && addr <= bo_l) + return NULL; + + /* Create one page svm range if 2MB range overlapping */ + start = addr; + last = addr; + } + prange = svm_range_new(&p->svms, start, last); if (!prange) { pr_debug("Failed to create prange in address [0x%llx]\n", addr); @@ -2376,6 +2484,9 @@ svm_range *svm_range_create_unregistered_range(struct amdgpu_device *adev, return NULL; } + if (is_heap_stack) + prange->preferred_loc = KFD_IOCTL_SVM_LOCATION_SYSMEM; + svm_range_add_to_svms(prange); svm_range_add_notifier_locked(mm, prange); @@ -2663,8 +2774,67 @@ int svm_range_list_init(struct kfd_process *p) } /** + * svm_range_check_vm - check if virtual address range mapped already + * @p: current kfd_process + * @start: range start address, in pages + * @last: range last address, in pages + * @bo_s: mapping start address in pages if address range already mapped + * @bo_l: mapping last address in pages if address range already mapped + * + * The purpose is to avoid virtual address ranges already allocated by + * kfd_ioctl_alloc_memory_of_gpu ioctl. + * It looks for each pdd in the kfd_process. + * + * Context: Process context + * + * Return 0 - OK, if the range is not mapped. + * Otherwise error code: + * -EADDRINUSE - if address is mapped already by kfd_ioctl_alloc_memory_of_gpu + * -ERESTARTSYS - A wait for the buffer to become unreserved was interrupted by + * a signal. Release all buffer reservations and return to user-space. + */ +static int +svm_range_check_vm(struct kfd_process *p, uint64_t start, uint64_t last, + uint64_t *bo_s, uint64_t *bo_l) +{ + struct amdgpu_bo_va_mapping *mapping; + struct interval_tree_node *node; + uint32_t i; + int r; + + for (i = 0; i < p->n_pdds; i++) { + struct amdgpu_vm *vm; + + if (!p->pdds[i]->drm_priv) + continue; + + vm = drm_priv_to_vm(p->pdds[i]->drm_priv); + r = amdgpu_bo_reserve(vm->root.bo, false); + if (r) + return r; + + node = interval_tree_iter_first(&vm->va, start, last); + if (node) { + pr_debug("range [0x%llx 0x%llx] already TTM mapped\n", + start, last); + mapping = container_of((struct rb_node *)node, + struct amdgpu_bo_va_mapping, rb); + if (bo_s && bo_l) { + *bo_s = mapping->start; + *bo_l = mapping->last; + } + amdgpu_bo_unreserve(vm->root.bo); + return -EADDRINUSE; + } + amdgpu_bo_unreserve(vm->root.bo); + } + + return 0; +} + +/** * svm_range_is_valid - check if virtual address range is valid - * @mm: current process mm_struct + * @p: current kfd_process * @start: range start address, in pages * @size: range size, in pages * @@ -2673,28 +2843,28 @@ int svm_range_list_init(struct kfd_process *p) * Context: Process context * * Return: - * true - valid svm range - * false - invalid svm range + * 0 - OK, otherwise error code */ -static bool -svm_range_is_valid(struct mm_struct *mm, uint64_t start, uint64_t size) +static int +svm_range_is_valid(struct kfd_process *p, uint64_t start, uint64_t size) { const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP; struct vm_area_struct *vma; unsigned long end; + unsigned long start_unchg = start; start <<= PAGE_SHIFT; end = start + (size << PAGE_SHIFT); - do { - vma = find_vma(mm, start); + vma = find_vma(p->mm, start); if (!vma || start < vma->vm_start || (vma->vm_flags & device_vma)) - return false; + return -EFAULT; start = min(end, vma->vm_end); } while (start < end); - return true; + return svm_range_check_vm(p, start_unchg, (end - 1) >> PAGE_SHIFT, NULL, + NULL); } /** @@ -2942,6 +3112,8 @@ static void svm_range_evict_svm_bo_worker(struct work_struct *work) struct svm_range *prange = list_first_entry(&svm_bo->range_list, struct svm_range, svm_bo_list); + int retries = 3; + list_del_init(&prange->svm_bo_list); spin_unlock(&svm_bo->list_lock); @@ -2949,7 +3121,11 @@ static void svm_range_evict_svm_bo_worker(struct work_struct *work) prange->start, prange->last); mutex_lock(&prange->migrate_mutex); - svm_migrate_vram_to_ram(prange, svm_bo->eviction_fence->mm); + do { + svm_migrate_vram_to_ram(prange, + svm_bo->eviction_fence->mm); + } while (prange->actual_loc && --retries); + WARN(prange->actual_loc, "Migration failed during eviction"); mutex_lock(&prange->lock); prange->svm_bo = NULL; @@ -2997,9 +3173,9 @@ svm_range_set_attr(struct kfd_process *p, uint64_t start, uint64_t size, svm_range_list_lock_and_flush_work(svms, mm); - if (!svm_range_is_valid(mm, start, size)) { - pr_debug("invalid range\n"); - r = -EFAULT; + r = svm_range_is_valid(p, start, size); + if (r) { + pr_debug("invalid range r=%d\n", r); mmap_write_unlock(mm); goto out; } @@ -3101,6 +3277,7 @@ svm_range_get_attr(struct kfd_process *p, uint64_t start, uint64_t size, uint32_t flags_or = 0; int gpuidx; uint32_t i; + int r = 0; pr_debug("svms 0x%p [0x%llx 0x%llx] nattr 0x%x\n", &p->svms, start, start + size - 1, nattr); @@ -3114,12 +3291,12 @@ svm_range_get_attr(struct kfd_process *p, uint64_t start, uint64_t size, flush_work(&p->svms.deferred_list_work); mmap_read_lock(mm); - if (!svm_range_is_valid(mm, start, size)) { - pr_debug("invalid range\n"); - mmap_read_unlock(mm); - return -EINVAL; - } + r = svm_range_is_valid(p, start, size); mmap_read_unlock(mm); + if (r) { + pr_debug("invalid range r=%d\n", r); + return r; + } for (i = 0; i < nattr; i++) { switch (attrs[i].type) { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h index c6ec55354c7b..6dc91c33e80f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h @@ -188,6 +188,7 @@ void svm_range_prefault(struct svm_range *prange, struct mm_struct *mm, void *owner); struct kfd_process_device * svm_range_get_pdd_by_adev(struct svm_range *prange, struct amdgpu_device *adev); +void svm_range_list_lock_and_flush_work(struct svm_range_list *svms, struct mm_struct *mm); /* SVM API and HMM page migration work together, device memory type * is initialized to not 0 when page migration register device memory. diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index 98cca5f2b27f..dd593ad0614a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -1296,6 +1296,24 @@ int kfd_topology_add_device(struct kfd_dev *gpu) proximity_domain = atomic_inc_return(&topology_crat_proximity_domain); + adev = (struct amdgpu_device *)(gpu->kgd); + + /* Include the CPU in xGMI hive if xGMI connected by assigning it the hive ID. */ + if (gpu->hive_id && adev->gmc.xgmi.connected_to_cpu) { + struct kfd_topology_device *top_dev; + + down_read(&topology_lock); + + list_for_each_entry(top_dev, &topology_device_list, list) { + if (top_dev->gpu) + break; + + top_dev->node_props.hive_id = gpu->hive_id; + } + + up_read(&topology_lock); + } + /* Check to see if this gpu device exists in the topology_device_list. * If so, assign the gpu to that device, * else create a Virtual CRAT for this gpu device and then parse that @@ -1457,7 +1475,6 @@ int kfd_topology_add_device(struct kfd_dev *gpu) dev->node_props.max_waves_per_simd = 10; } - adev = (struct amdgpu_device *)(dev->gpu->kgd); /* kfd only concerns sram ecc on GFX and HBM ecc on UMC */ dev->node_props.capability |= ((adev->ras_enabled & BIT(AMDGPU_RAS_BLOCK__GFX)) != 0) ? diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index c42b7f50beb8..4130082c5873 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -217,6 +217,7 @@ static const struct drm_format_info * amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd); static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector); +static void handle_hpd_rx_irq(void *param); static bool is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state, @@ -619,7 +620,7 @@ static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params) amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base); } -#endif +#endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */ /** * dmub_aux_setconfig_reply_callback - Callback for AUX or SET_CONFIG command. @@ -669,10 +670,7 @@ void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *not return; } - drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); - link_index = notify->link_index; - link = adev->dm.dc->links[link_index]; drm_connector_list_iter_begin(dev, &iter); @@ -685,10 +683,13 @@ void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *not } } drm_connector_list_iter_end(&iter); - drm_modeset_unlock(&dev->mode_config.connection_mutex); - if (hpd_aconnector) - handle_hpd_irq_helper(hpd_aconnector); + if (hpd_aconnector) { + if (notify->type == DMUB_NOTIFICATION_HPD) + handle_hpd_irq_helper(hpd_aconnector); + else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ) + handle_hpd_rx_irq(hpd_aconnector); + } } /** @@ -730,6 +731,8 @@ static void dm_handle_hpd_work(struct work_struct *work) dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev, dmub_hpd_wrk->dmub_notify); } + + kfree(dmub_hpd_wrk->dmub_notify); kfree(dmub_hpd_wrk); } @@ -755,12 +758,6 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params) if (dc_enable_dmub_notifications(adev->dm.dc) && irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) { - dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC); - if (!dmub_hpd_wrk) { - DRM_ERROR("Failed to allocate dmub_hpd_wrk"); - return; - } - INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work); do { dc_stat_get_dmub_notification(adev->dm.dc, ¬ify); @@ -768,8 +765,25 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params) DRM_ERROR("DM: notify type %d invalid!", notify.type); continue; } + if (!dm->dmub_callback[notify.type]) { + DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type); + continue; + } if (dm->dmub_thread_offload[notify.type] == true) { - dmub_hpd_wrk->dmub_notify = ¬ify; + dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC); + if (!dmub_hpd_wrk) { + DRM_ERROR("Failed to allocate dmub_hpd_wrk"); + return; + } + dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC); + if (!dmub_hpd_wrk->dmub_notify) { + kfree(dmub_hpd_wrk); + DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify"); + return; + } + INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work); + if (dmub_hpd_wrk->dmub_notify) + memcpy(dmub_hpd_wrk->dmub_notify, ¬ify, sizeof(struct dmub_notification)); dmub_hpd_wrk->adev = adev; if (notify.type == DMUB_NOTIFICATION_HPD) { plink = adev->dm.dc->links[notify.link_index]; @@ -804,7 +818,7 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params) if (count > DMUB_TRACE_MAX_READ) DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ"); } -#endif +#endif /* CONFIG_DRM_AMD_DC_DCN */ static int dm_set_clockgating_state(void *handle, enum amd_clockgating_state state) @@ -1008,6 +1022,7 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev) const unsigned char *fw_inst_const, *fw_bss_data; uint32_t i, fw_inst_const_size, fw_bss_data_size; bool has_hw_support; + struct dc *dc = adev->dm.dc; if (!dmub_srv) /* DMUB isn't supported on the ASIC. */ @@ -1094,6 +1109,19 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev) for (i = 0; i < fb_info->num_fb; ++i) hw_params.fb[i] = &fb_info->fb[i]; + switch (adev->asic_type) { + case CHIP_YELLOW_CARP: + if (dc->ctx->asic_id.hw_internal_rev != YELLOW_CARP_A0) { + hw_params.dpia_supported = true; +#if defined(CONFIG_DRM_AMD_DC_DCN) + hw_params.disable_dpia = dc->debug.dpia_debug.bits.disable_dpia; +#endif + } + break; + default: + break; + } + status = dmub_srv_hw_init(dmub_srv, &hw_params); if (status != DMUB_STATUS_OK) { DRM_ERROR("Error initializing DMUB HW: %d\n", status); @@ -1295,6 +1323,37 @@ static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct return hpd_rx_offload_wq; } +struct amdgpu_stutter_quirk { + u16 chip_vendor; + u16 chip_device; + u16 subsys_vendor; + u16 subsys_device; + u8 revision; +}; + +static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = { + /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */ + { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 }, + { 0, 0, 0, 0, 0 }, +}; + +static bool dm_should_disable_stutter(struct pci_dev *pdev) +{ + const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list; + + while (p && p->chip_device != 0) { + if (pdev->vendor == p->chip_vendor && + pdev->device == p->chip_device && + pdev->subsystem_vendor == p->subsys_vendor && + pdev->subsystem_device == p->subsys_device && + pdev->revision == p->revision) { + return true; + } + ++p; + } + return false; +} + static int amdgpu_dm_init(struct amdgpu_device *adev) { struct dc_init_data init_data; @@ -1356,8 +1415,15 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) switch (adev->ip_versions[DCE_HWIP][0]) { case IP_VERSION(2, 1, 0): init_data.flags.gpu_vm_support = true; - if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id)) + switch (adev->dm.dmcub_fw_version) { + case 0: /* development */ + case 0x1: /* linux-firmware.git hash 6d9f399 */ + case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */ + init_data.flags.disable_dmcu = false; + break; + default: init_data.flags.disable_dmcu = true; + } break; case IP_VERSION(1, 0, 0): case IP_VERSION(1, 0, 1): @@ -1407,6 +1473,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY) adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true; + if (dm_should_disable_stutter(adev->pdev)) + adev->dm.dc->debug.disable_stutter = true; if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER) adev->dm.dc->debug.disable_stutter = true; @@ -1501,7 +1569,11 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) DRM_ERROR("amdgpu: fail to register dmub hpd callback"); goto error; } -#endif + if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) { + DRM_ERROR("amdgpu: fail to register dmub hpd callback"); + goto error; + } +#endif /* CONFIG_DRM_AMD_DC_DCN */ } if (amdgpu_dm_initialize_drm_device(adev)) { @@ -1793,7 +1865,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev) break; case IP_VERSION(3, 1, 2): case IP_VERSION(3, 1, 3): - dmub_asic = DMUB_ASIC_DCN31; + dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31; fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB; break; @@ -4031,6 +4103,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) int32_t primary_planes; enum dc_connection_type new_connection_type = dc_connection_none; const struct dc_plane_cap *plane; + bool psr_feature_enabled = false; dm->display_indexes_num = dm->dc->caps.max_streams; /* Update the actual used number of crtc */ @@ -4113,6 +4186,19 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n", adev->ip_versions[DCE_HWIP][0]); } + + /* Determine whether to enable PSR support by default. */ + if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) { + switch (adev->ip_versions[DCE_HWIP][0]) { + case IP_VERSION(3, 1, 2): + case IP_VERSION(3, 1, 3): + psr_feature_enabled = true; + break; + default: + psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK; + break; + } + } #endif /* loops over all connectors on the board */ @@ -4156,7 +4242,8 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) } else if (dc_link_detect(link, DETECT_REASON_BOOT)) { amdgpu_dm_update_connector_after_detect(aconnector); register_backlight_device(dm, link); - if (amdgpu_dc_feature_mask & DC_PSR_MASK) + + if (psr_feature_enabled) amdgpu_dm_set_psr_caps(link); } @@ -4495,7 +4582,8 @@ static void get_min_max_dc_plane_scaling(struct drm_device *dev, } -static int fill_dc_scaling_info(const struct drm_plane_state *state, +static int fill_dc_scaling_info(struct amdgpu_device *adev, + const struct drm_plane_state *state, struct dc_scaling_info *scaling_info) { int scale_w, scale_h, min_downscale, max_upscale; @@ -4509,7 +4597,8 @@ static int fill_dc_scaling_info(const struct drm_plane_state *state, /* * For reasons we don't (yet) fully understand a non-zero * src_y coordinate into an NV12 buffer can cause a - * system hang. To avoid hangs (and maybe be overly cautious) + * system hang on DCN1x. + * To avoid hangs (and maybe be overly cautious) * let's reject both non-zero src_x and src_y. * * We currently know of only one use-case to reproduce a @@ -4517,10 +4606,10 @@ static int fill_dc_scaling_info(const struct drm_plane_state *state, * is to gesture the YouTube Android app into full screen * on ChromeOS. */ - if (state->fb && - state->fb->format->format == DRM_FORMAT_NV12 && - (scaling_info->src_rect.x != 0 || - scaling_info->src_rect.y != 0)) + if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) || + (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) && + (state->fb && state->fb->format->format == DRM_FORMAT_NV12 && + (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0))) return -EINVAL; scaling_info->src_rect.width = state->src_w >> 16; @@ -5426,7 +5515,7 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev, int ret; bool force_disable_dcc = false; - ret = fill_dc_scaling_info(plane_state, &scaling_info); + ret = fill_dc_scaling_info(adev, plane_state, &scaling_info); if (ret) return ret; @@ -6000,7 +6089,7 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector, if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel) stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel; } -#endif +#endif /* CONFIG_DRM_AMD_DC_DCN */ /** * DOC: FreeSync Video @@ -7171,8 +7260,8 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state, struct drm_connector_state *new_con_state; struct amdgpu_dm_connector *aconnector; struct dm_connector_state *dm_conn_state; - int i, j, clock; - int vcpi, pbn_div, pbn = 0; + int i, j; + int vcpi, pbn_div, pbn, slot_num = 0; for_each_new_connector_in_state(state, connector, new_con_state, i) { @@ -7200,17 +7289,7 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state, if (!stream) continue; - if (stream->timing.flags.DSC != 1) { - drm_dp_mst_atomic_enable_dsc(state, - aconnector->port, - dm_conn_state->pbn, - 0, - false); - continue; - } - pbn_div = dm_mst_get_pbn_divider(stream->link); - clock = stream->timing.pix_clk_100hz / 10; /* pbn is calculated by compute_mst_dsc_configs_for_state*/ for (j = 0; j < dc_state->stream_count; j++) { if (vars[j].aconnector == aconnector) { @@ -7219,6 +7298,23 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state, } } + if (j == dc_state->stream_count) + continue; + + slot_num = DIV_ROUND_UP(pbn, pbn_div); + + if (stream->timing.flags.DSC != 1) { + dm_conn_state->pbn = pbn; + dm_conn_state->vcpi_slots = slot_num; + + drm_dp_mst_atomic_enable_dsc(state, + aconnector->port, + dm_conn_state->pbn, + 0, + false); + continue; + } + vcpi = drm_dp_mst_atomic_enable_dsc(state, aconnector->port, pbn, pbn_div, @@ -7482,7 +7578,7 @@ static int dm_plane_atomic_check(struct drm_plane *plane, if (ret) return ret; - ret = fill_dc_scaling_info(new_plane_state, &scaling_info); + ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info); if (ret) return ret; @@ -8930,7 +9026,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix; } - fill_dc_scaling_info(new_plane_state, + fill_dc_scaling_info(dm->adev, new_plane_state, &bundle->scaling_infos[planes_count]); bundle->surface_updates[planes_count].scaling_info = @@ -10535,18 +10631,18 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state, struct drm_crtc *crtc, struct drm_crtc_state *new_crtc_state) { - struct drm_plane_state *new_cursor_state, *new_primary_state; - int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h; + struct drm_plane *cursor = crtc->cursor, *underlying; + struct drm_plane_state *new_cursor_state, *new_underlying_state; + int i; + int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h; /* On DCE and DCN there is no dedicated hardware cursor plane. We get a * cursor per pipe but it's going to inherit the scaling and * positioning from the underlying pipe. Check the cursor plane's - * blending properties match the primary plane's. */ + * blending properties match the underlying planes'. */ - new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor); - new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary); - if (!new_cursor_state || !new_primary_state || - !new_cursor_state->fb || !new_primary_state->fb) { + new_cursor_state = drm_atomic_get_new_plane_state(state, cursor); + if (!new_cursor_state || !new_cursor_state->fb) { return 0; } @@ -10555,15 +10651,34 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state, cursor_scale_h = new_cursor_state->crtc_h * 1000 / (new_cursor_state->src_h >> 16); - primary_scale_w = new_primary_state->crtc_w * 1000 / - (new_primary_state->src_w >> 16); - primary_scale_h = new_primary_state->crtc_h * 1000 / - (new_primary_state->src_h >> 16); + for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) { + /* Narrow down to non-cursor planes on the same CRTC as the cursor */ + if (new_underlying_state->crtc != crtc || underlying == crtc->cursor) + continue; - if (cursor_scale_w != primary_scale_w || - cursor_scale_h != primary_scale_h) { - drm_dbg_atomic(crtc->dev, "Cursor plane scaling doesn't match primary plane\n"); - return -EINVAL; + /* Ignore disabled planes */ + if (!new_underlying_state->fb) + continue; + + underlying_scale_w = new_underlying_state->crtc_w * 1000 / + (new_underlying_state->src_w >> 16); + underlying_scale_h = new_underlying_state->crtc_h * 1000 / + (new_underlying_state->src_h >> 16); + + if (cursor_scale_w != underlying_scale_w || + cursor_scale_h != underlying_scale_h) { + drm_dbg_atomic(crtc->dev, + "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n", + cursor->base.id, cursor->name, underlying->base.id, underlying->name); + return -EINVAL; + } + + /* If this plane covers the whole CRTC, no need to check planes underneath */ + if (new_underlying_state->crtc_x <= 0 && + new_underlying_state->crtc_y <= 0 && + new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay && + new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay) + break; } return 0; @@ -10594,53 +10709,6 @@ static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm } #endif -static int validate_overlay(struct drm_atomic_state *state) -{ - int i; - struct drm_plane *plane; - struct drm_plane_state *new_plane_state; - struct drm_plane_state *primary_state, *overlay_state = NULL; - - /* Check if primary plane is contained inside overlay */ - for_each_new_plane_in_state_reverse(state, plane, new_plane_state, i) { - if (plane->type == DRM_PLANE_TYPE_OVERLAY) { - if (drm_atomic_plane_disabling(plane->state, new_plane_state)) - return 0; - - overlay_state = new_plane_state; - continue; - } - } - - /* check if we're making changes to the overlay plane */ - if (!overlay_state) - return 0; - - /* check if overlay plane is enabled */ - if (!overlay_state->crtc) - return 0; - - /* find the primary plane for the CRTC that the overlay is enabled on */ - primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary); - if (IS_ERR(primary_state)) - return PTR_ERR(primary_state); - - /* check if primary plane is enabled */ - if (!primary_state->crtc) - return 0; - - /* Perform the bounds check to ensure the overlay plane covers the primary */ - if (primary_state->crtc_x < overlay_state->crtc_x || - primary_state->crtc_y < overlay_state->crtc_y || - primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w || - primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) { - DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n"); - return -EINVAL; - } - - return 0; -} - /** * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM. * @dev: The DRM device @@ -10683,6 +10751,8 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, struct dm_crtc_state *dm_old_crtc_state; #if defined(CONFIG_DRM_AMD_DC_DCN) struct dsc_mst_fairness_vars vars[MAX_PIPES]; + struct drm_dp_mst_topology_state *mst_state; + struct drm_dp_mst_topology_mgr *mgr; #endif trace_amdgpu_dm_atomic_check_begin(state); @@ -10743,7 +10813,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, ret = drm_atomic_add_affected_connectors(state, crtc); if (ret) - return ret; + goto fail; ret = drm_atomic_add_affected_planes(state, crtc); if (ret) @@ -10822,10 +10892,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, goto fail; } - ret = validate_overlay(state); - if (ret) - goto fail; - /* Add new/modified planes */ for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) { ret = dm_update_plane_state(dc, state, plane, @@ -10891,6 +10957,33 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, lock_and_validation_needed = true; } +#if defined(CONFIG_DRM_AMD_DC_DCN) + /* set the slot info for each mst_state based on the link encoding format */ + for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) { + struct amdgpu_dm_connector *aconnector; + struct drm_connector *connector; + struct drm_connector_list_iter iter; + u8 link_coding_cap; + + if (!mgr->mst_state ) + continue; + + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { + int id = connector->index; + + if (id == mst_state->mgr->conn_base_id) { + aconnector = to_amdgpu_dm_connector(connector); + link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link); + drm_dp_mst_update_slots(mst_state, link_coding_cap); + + break; + } + } + drm_connector_list_iter_end(&iter); + + } +#endif /** * Streams and planes are reset when there are changes that affect * bandwidth. Anything that affects bandwidth needs to go through diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index f8c8122e15ed..0277685864c5 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -78,12 +78,10 @@ static int parse_write_buffer_into_params(char *wr_buf, uint32_t wr_buf_size, wr_buf_ptr = wr_buf; - r = copy_from_user(wr_buf_ptr, buf, wr_buf_size); - - /* r is bytes not be copied */ - if (r >= wr_buf_size) { - DRM_DEBUG_DRIVER("user data not be read\n"); - return -EINVAL; + /* r is bytes not be copied */ + if (copy_from_user(wr_buf_ptr, buf, wr_buf_size)) { + DRM_DEBUG_DRIVER("user data could not be read successfully\n"); + return -EFAULT; } /* check number of parameters. isspace could not differ space and \n */ @@ -264,7 +262,7 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf, if (!wr_buf) return -ENOSPC; - if (parse_write_buffer_into_params(wr_buf, size, + if (parse_write_buffer_into_params(wr_buf, wr_buf_size, (long *)param, buf, max_param_num, ¶m_nums)) { @@ -294,6 +292,9 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf, case LINK_RATE_RBR2: case LINK_RATE_HIGH2: case LINK_RATE_HIGH3: +#if defined(CONFIG_DRM_AMD_DC_DCN) + case LINK_RATE_UHBR10: +#endif break; default: valid_input = false; @@ -488,7 +489,7 @@ static ssize_t dp_phy_settings_write(struct file *f, const char __user *buf, if (!wr_buf) return -ENOSPC; - if (parse_write_buffer_into_params(wr_buf, size, + if (parse_write_buffer_into_params(wr_buf, wr_buf_size, (long *)param, buf, max_param_num, ¶m_nums)) { @@ -640,7 +641,7 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us if (!wr_buf) return -ENOSPC; - if (parse_write_buffer_into_params(wr_buf, size, + if (parse_write_buffer_into_params(wr_buf, wr_buf_size, (long *)param, buf, max_param_num, ¶m_nums)) { @@ -915,7 +916,7 @@ static ssize_t dp_dsc_passthrough_set(struct file *f, const char __user *buf, return -ENOSPC; } - if (parse_write_buffer_into_params(wr_buf, size, + if (parse_write_buffer_into_params(wr_buf, wr_buf_size, ¶m, buf, max_param_num, ¶m_nums)) { @@ -1212,7 +1213,7 @@ static ssize_t trigger_hotplug(struct file *f, const char __user *buf, return -ENOSPC; } - if (parse_write_buffer_into_params(wr_buf, size, + if (parse_write_buffer_into_params(wr_buf, wr_buf_size, (long *)param, buf, max_param_num, ¶m_nums)) { @@ -1397,7 +1398,7 @@ static ssize_t dp_dsc_clock_en_write(struct file *f, const char __user *buf, return -ENOSPC; } - if (parse_write_buffer_into_params(wr_buf, size, + if (parse_write_buffer_into_params(wr_buf, wr_buf_size, (long *)param, buf, max_param_num, ¶m_nums)) { @@ -1582,7 +1583,7 @@ static ssize_t dp_dsc_slice_width_write(struct file *f, const char __user *buf, return -ENOSPC; } - if (parse_write_buffer_into_params(wr_buf, size, + if (parse_write_buffer_into_params(wr_buf, wr_buf_size, (long *)param, buf, max_param_num, ¶m_nums)) { @@ -1767,7 +1768,7 @@ static ssize_t dp_dsc_slice_height_write(struct file *f, const char __user *buf, return -ENOSPC; } - if (parse_write_buffer_into_params(wr_buf, size, + if (parse_write_buffer_into_params(wr_buf, wr_buf_size, (long *)param, buf, max_param_num, ¶m_nums)) { @@ -1945,7 +1946,7 @@ static ssize_t dp_dsc_bits_per_pixel_write(struct file *f, const char __user *bu return -ENOSPC; } - if (parse_write_buffer_into_params(wr_buf, size, + if (parse_write_buffer_into_params(wr_buf, wr_buf_size, (long *)param, buf, max_param_num, ¶m_nums)) { @@ -2383,7 +2384,7 @@ static ssize_t dp_max_bpc_write(struct file *f, const char __user *buf, return -ENOSPC; } - if (parse_write_buffer_into_params(wr_buf, size, + if (parse_write_buffer_into_params(wr_buf, wr_buf_size, (long *)param, buf, max_param_num, ¶m_nums)) { diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index ff0f91c93ba4..8cbeeb7c986d 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -219,6 +219,7 @@ bool dm_helpers_dp_mst_write_payload_allocation_table( struct drm_dp_mst_topology_mgr *mst_mgr; struct drm_dp_mst_port *mst_port; bool ret; + u8 link_coding_cap = DP_8b_10b_ENCODING; aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; /* Accessing the connector state is required for vcpi_slots allocation @@ -238,6 +239,10 @@ bool dm_helpers_dp_mst_write_payload_allocation_table( mst_port = aconnector->port; +#if defined(CONFIG_DRM_AMD_DC_DCN) + link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link); +#endif + if (enable) { ret = drm_dp_mst_allocate_vcpi(mst_mgr, mst_port, @@ -251,7 +256,7 @@ bool dm_helpers_dp_mst_write_payload_allocation_table( } /* It's OK for this to fail */ - drm_dp_update_payload_part1(mst_mgr); + drm_dp_update_payload_part1(mst_mgr, (link_coding_cap == DP_CAP_ANSI_128B132B) ? 0:1); /* mst_mgr->->payloads are VC payload notify MST branch using DPCD or * AUX message. The sequence is slot 1-63 allocated sequence for each diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 1a99fcc27078..32a5ce09a62a 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -64,6 +64,8 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, payload.i2c_over_aux = (msg->request & DP_AUX_NATIVE_WRITE) == 0; payload.write = (msg->request & DP_AUX_I2C_READ) == 0; payload.mot = (msg->request & DP_AUX_I2C_MOT) != 0; + payload.write_status_update = + (msg->request & DP_AUX_I2C_WRITE_STATUS_UPDATE) != 0; payload.defer_delay = 0; result = dc_link_aux_transfer_raw(TO_DM_AUX(aux)->ddc_service, &payload, @@ -532,13 +534,14 @@ static int kbps_to_peak_pbn(int kbps) static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *params, struct dsc_mst_fairness_vars *vars, - int count) + int count, + int k) { int i; for (i = 0; i < count; i++) { memset(¶ms[i].timing->dsc_cfg, 0, sizeof(params[i].timing->dsc_cfg)); - if (vars[i].dsc_enabled && dc_dsc_compute_config( + if (vars[i + k].dsc_enabled && dc_dsc_compute_config( params[i].sink->ctx->dc->res_pool->dscs[0], ¶ms[i].sink->dsc_caps.dsc_dec_caps, params[i].sink->ctx->dc->debug.dsc_min_slice_height_override, @@ -551,7 +554,7 @@ static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *p if (params[i].bpp_overwrite) params[i].timing->dsc_cfg.bits_per_pixel = params[i].bpp_overwrite; else - params[i].timing->dsc_cfg.bits_per_pixel = vars[i].bpp_x16; + params[i].timing->dsc_cfg.bits_per_pixel = vars[i + k].bpp_x16; if (params[i].num_slices_h) params[i].timing->dsc_cfg.num_slices_h = params[i].num_slices_h; @@ -584,7 +587,8 @@ static void increase_dsc_bpp(struct drm_atomic_state *state, struct dc_link *dc_link, struct dsc_mst_fairness_params *params, struct dsc_mst_fairness_vars *vars, - int count) + int count, + int k) { int i; bool bpp_increased[MAX_PIPES]; @@ -599,8 +603,9 @@ static void increase_dsc_bpp(struct drm_atomic_state *state, pbn_per_timeslot = dm_mst_get_pbn_divider(dc_link); for (i = 0; i < count; i++) { - if (vars[i].dsc_enabled) { - initial_slack[i] = kbps_to_peak_pbn(params[i].bw_range.max_kbps) - vars[i].pbn; + if (vars[i + k].dsc_enabled) { + initial_slack[i] = + kbps_to_peak_pbn(params[i].bw_range.max_kbps) - vars[i + k].pbn; bpp_increased[i] = false; remaining_to_increase += 1; } else { @@ -627,7 +632,7 @@ static void increase_dsc_bpp(struct drm_atomic_state *state, link_timeslots_used = 0; for (i = 0; i < count; i++) - link_timeslots_used += DIV_ROUND_UP(vars[i].pbn, pbn_per_timeslot); + link_timeslots_used += DIV_ROUND_UP(vars[i + k].pbn, pbn_per_timeslot); fair_pbn_alloc = (63 - link_timeslots_used) / remaining_to_increase * pbn_per_timeslot; @@ -680,7 +685,8 @@ static void try_disable_dsc(struct drm_atomic_state *state, struct dc_link *dc_link, struct dsc_mst_fairness_params *params, struct dsc_mst_fairness_vars *vars, - int count) + int count, + int k) { int i; bool tried[MAX_PIPES]; @@ -690,8 +696,8 @@ static void try_disable_dsc(struct drm_atomic_state *state, int remaining_to_try = 0; for (i = 0; i < count; i++) { - if (vars[i].dsc_enabled - && vars[i].bpp_x16 == params[i].bw_range.max_target_bpp_x16 + if (vars[i + k].dsc_enabled + && vars[i + k].bpp_x16 == params[i].bw_range.max_target_bpp_x16 && params[i].clock_force_enable == DSC_CLK_FORCE_DEFAULT) { kbps_increase[i] = params[i].bw_range.stream_kbps - params[i].bw_range.max_kbps; tried[i] = false; @@ -746,9 +752,10 @@ static void try_disable_dsc(struct drm_atomic_state *state, static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, struct dc_state *dc_state, struct dc_link *dc_link, - struct dsc_mst_fairness_vars *vars) + struct dsc_mst_fairness_vars *vars, + int *link_vars_start_index) { - int i; + int i, k; struct dc_stream_state *stream; struct dsc_mst_fairness_params params[MAX_PIPES]; struct amdgpu_dm_connector *aconnector; @@ -766,11 +773,17 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, if (stream->link != dc_link) continue; + aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; + if (!aconnector) + continue; + + if (!aconnector->port) + continue; + stream->timing.flags.DSC = 0; params[count].timing = &stream->timing; params[count].sink = stream->sink; - aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; params[count].aconnector = aconnector; params[count].port = aconnector->port; params[count].clock_force_enable = aconnector->dsc_settings.dsc_force_enable; @@ -792,44 +805,55 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, count++; } + + if (count == 0) { + ASSERT(0); + return true; + } + + /* k is start index of vars for current phy link used by mst hub */ + k = *link_vars_start_index; + /* set vars start index for next mst hub phy link */ + *link_vars_start_index += count; + /* Try no compression */ for (i = 0; i < count; i++) { - vars[i].aconnector = params[i].aconnector; - vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps); - vars[i].dsc_enabled = false; - vars[i].bpp_x16 = 0; + vars[i + k].aconnector = params[i].aconnector; + vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps); + vars[i + k].dsc_enabled = false; + vars[i + k].bpp_x16 = 0; if (drm_dp_atomic_find_vcpi_slots(state, params[i].port->mgr, params[i].port, - vars[i].pbn, + vars[i + k].pbn, dm_mst_get_pbn_divider(dc_link)) < 0) return false; } if (!drm_dp_mst_atomic_check(state) && !debugfs_overwrite) { - set_dsc_configs_from_fairness_vars(params, vars, count); + set_dsc_configs_from_fairness_vars(params, vars, count, k); return true; } /* Try max compression */ for (i = 0; i < count; i++) { if (params[i].compression_possible && params[i].clock_force_enable != DSC_CLK_FORCE_DISABLE) { - vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps); - vars[i].dsc_enabled = true; - vars[i].bpp_x16 = params[i].bw_range.min_target_bpp_x16; + vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps); + vars[i + k].dsc_enabled = true; + vars[i + k].bpp_x16 = params[i].bw_range.min_target_bpp_x16; if (drm_dp_atomic_find_vcpi_slots(state, params[i].port->mgr, params[i].port, - vars[i].pbn, + vars[i + k].pbn, dm_mst_get_pbn_divider(dc_link)) < 0) return false; } else { - vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps); - vars[i].dsc_enabled = false; - vars[i].bpp_x16 = 0; + vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps); + vars[i + k].dsc_enabled = false; + vars[i + k].bpp_x16 = 0; if (drm_dp_atomic_find_vcpi_slots(state, params[i].port->mgr, params[i].port, - vars[i].pbn, + vars[i + k].pbn, dm_mst_get_pbn_divider(dc_link)) < 0) return false; } @@ -838,15 +862,76 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, return false; /* Optimize degree of compression */ - increase_dsc_bpp(state, dc_link, params, vars, count); + increase_dsc_bpp(state, dc_link, params, vars, count, k); - try_disable_dsc(state, dc_link, params, vars, count); + try_disable_dsc(state, dc_link, params, vars, count, k); - set_dsc_configs_from_fairness_vars(params, vars, count); + set_dsc_configs_from_fairness_vars(params, vars, count, k); return true; } +static bool is_dsc_need_re_compute( + struct drm_atomic_state *state, + struct dc_state *dc_state, + struct dc_link *dc_link) +{ + int i; + bool is_dsc_need_re_compute = false; + + /* only check phy used by mst branch */ + if (dc_link->type != dc_connection_mst_branch) + return false; + + /* check if there is mode change in new request */ + for (i = 0; i < dc_state->stream_count; i++) { + struct amdgpu_dm_connector *aconnector; + struct dc_stream_state *stream; + struct drm_crtc_state *new_crtc_state; + struct drm_connector_state *new_conn_state; + + stream = dc_state->streams[i]; + + if (!stream) + continue; + + /* check if stream using the same link for mst */ + if (stream->link != dc_link) + continue; + + aconnector = (struct amdgpu_dm_connector *) stream->dm_stream_context; + if (!aconnector) + continue; + + new_conn_state = drm_atomic_get_new_connector_state(state, &aconnector->base); + + if (!new_conn_state) + continue; + + if (IS_ERR(new_conn_state)) + continue; + + if (!new_conn_state->crtc) + continue; + + new_crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc); + + if (!new_crtc_state) + continue; + + if (IS_ERR(new_crtc_state)) + continue; + + if (new_crtc_state->enable && new_crtc_state->active) { + if (new_crtc_state->mode_changed || new_crtc_state->active_changed || + new_crtc_state->connectors_changed) + is_dsc_need_re_compute = true; + } + } + + return is_dsc_need_re_compute; +} + bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, struct dc_state *dc_state, struct dsc_mst_fairness_vars *vars) @@ -855,6 +940,7 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, struct dc_stream_state *stream; bool computed_streams[MAX_PIPES]; struct amdgpu_dm_connector *aconnector; + int link_vars_start_index = 0; for (i = 0; i < dc_state->stream_count; i++) computed_streams[i] = false; @@ -879,8 +965,12 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, if (dcn20_remove_stream_from_ctx(stream->ctx->dc, dc_state, stream) != DC_OK) return false; + if (!is_dsc_need_re_compute(state, dc_state, stream->link)) + continue; + mutex_lock(&aconnector->mst_mgr.lock); - if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars)) { + if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link, + vars, &link_vars_start_index)) { mutex_unlock(&aconnector->mst_mgr.lock); return false; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c index 70a554f1e725..c022e56f9459 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c @@ -107,6 +107,8 @@ bool amdgpu_dm_psr_enable(struct dc_stream_state *stream) */ // Init fail safe of 2 frames static unsigned int num_frames_static = 2; + unsigned int power_opt = 0; + bool psr_enable = true; DRM_DEBUG_DRIVER("Enabling psr...\n"); @@ -133,7 +135,9 @@ bool amdgpu_dm_psr_enable(struct dc_stream_state *stream) &stream, 1, ¶ms); - return dc_link_set_psr_allow_active(link, true, false, false); + power_opt |= psr_power_opt_z10_static_screen; + + return dc_link_set_psr_allow_active(link, &psr_enable, false, false, &power_opt); } /* @@ -144,10 +148,12 @@ bool amdgpu_dm_psr_enable(struct dc_stream_state *stream) */ bool amdgpu_dm_psr_disable(struct dc_stream_state *stream) { + unsigned int power_opt = 0; + bool psr_enable = false; DRM_DEBUG_DRIVER("Disabling psr...\n"); - return dc_link_set_psr_allow_active(stream->link, false, true, false); + return dc_link_set_psr_allow_active(stream->link, &psr_enable, true, false, &power_opt); } /* diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index cdb5c027411a..a4bef4364afd 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c @@ -99,6 +99,10 @@ static enum bp_result get_firmware_info_v3_2( struct bios_parser *bp, struct dc_firmware_info *info); +static enum bp_result get_firmware_info_v3_4( + struct bios_parser *bp, + struct dc_firmware_info *info); + static struct atom_hpd_int_record *get_hpd_record(struct bios_parser *bp, struct atom_display_object_path_v2 *object); @@ -1426,8 +1430,10 @@ static enum bp_result bios_parser_get_firmware_info( break; case 2: case 3: - case 4: result = get_firmware_info_v3_2(bp, info); + break; + case 4: + result = get_firmware_info_v3_4(bp, info); break; default: break; @@ -1575,6 +1581,88 @@ static enum bp_result get_firmware_info_v3_2( return BP_RESULT_OK; } +static enum bp_result get_firmware_info_v3_4( + struct bios_parser *bp, + struct dc_firmware_info *info) +{ + struct atom_firmware_info_v3_4 *firmware_info; + struct atom_common_table_header *header; + struct atom_data_revision revision; + struct atom_display_controller_info_v4_1 *dce_info_v4_1 = NULL; + struct atom_display_controller_info_v4_4 *dce_info_v4_4 = NULL; + if (!info) + return BP_RESULT_BADINPUT; + + firmware_info = GET_IMAGE(struct atom_firmware_info_v3_4, + DATA_TABLES(firmwareinfo)); + + if (!firmware_info) + return BP_RESULT_BADBIOSTABLE; + + memset(info, 0, sizeof(*info)); + + header = GET_IMAGE(struct atom_common_table_header, + DATA_TABLES(dce_info)); + + get_atom_data_table_revision(header, &revision); + + switch (revision.major) { + case 4: + switch (revision.minor) { + case 4: + dce_info_v4_4 = GET_IMAGE(struct atom_display_controller_info_v4_4, + DATA_TABLES(dce_info)); + + if (!dce_info_v4_4) + return BP_RESULT_BADBIOSTABLE; + + /* 100MHz expected */ + info->pll_info.crystal_frequency = dce_info_v4_4->dce_refclk_10khz * 10; + info->dp_phy_ref_clk = dce_info_v4_4->dpphy_refclk_10khz * 10; + /* 50MHz expected */ + info->i2c_engine_ref_clk = dce_info_v4_4->i2c_engine_refclk_10khz * 10; + + /* Get SMU Display PLL VCO Frequency in KHz*/ + info->smu_gpu_pll_output_freq = dce_info_v4_4->dispclk_pll_vco_freq * 10; + break; + + default: + /* should not come here, keep as backup, as was before */ + dce_info_v4_1 = GET_IMAGE(struct atom_display_controller_info_v4_1, + DATA_TABLES(dce_info)); + + if (!dce_info_v4_1) + return BP_RESULT_BADBIOSTABLE; + + info->pll_info.crystal_frequency = dce_info_v4_1->dce_refclk_10khz * 10; + info->dp_phy_ref_clk = dce_info_v4_1->dpphy_refclk_10khz * 10; + info->i2c_engine_ref_clk = dce_info_v4_1->i2c_engine_refclk_10khz * 10; + break; + } + break; + + default: + ASSERT(0); + break; + } + + header = GET_IMAGE(struct atom_common_table_header, + DATA_TABLES(smu_info)); + get_atom_data_table_revision(header, &revision); + + // We need to convert from 10KHz units into KHz units. + info->default_memory_clk = firmware_info->bootup_mclk_in10khz * 10; + + if (firmware_info->board_i2c_feature_id == 0x2) { + info->oem_i2c_present = true; + info->oem_i2c_obj_id = firmware_info->board_i2c_feature_gpio_id; + } else { + info->oem_i2c_present = false; + } + + return BP_RESULT_OK; +} + static enum bp_result bios_parser_get_encoder_cap_info( struct dc_bios *dcb, struct graphics_object_id object_id, @@ -2233,6 +2321,8 @@ static enum bp_result get_integrated_info_v2_2( info->ext_disp_conn_info.checksum = info_v2_2->extdispconninfo.checksum; + info->ext_disp_conn_info.fixdpvoltageswing = + info_v2_2->extdispconninfo.fixdpvoltageswing; info->edp1_info.edp_backlight_pwm_hz = le16_to_cpu(info_v2_2->edp1_info.edp_backlight_pwm_hz); diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c index 1548b2a3fe03..26f96ee32472 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c @@ -100,11 +100,13 @@ void clk_mgr_exit_optimized_pwr_state(const struct dc *dc, struct clk_mgr *clk_m if (edp_num) { for (panel_inst = 0; panel_inst < edp_num; panel_inst++) { + bool allow_active = false; + edp_link = edp_links[panel_inst]; if (!edp_link->psr_settings.psr_feature_enabled) continue; clk_mgr->psr_allow_active_cache = edp_link->psr_settings.psr_allow_active; - dc_link_set_psr_allow_active(edp_link, false, false, false); + dc_link_set_psr_allow_active(edp_link, &allow_active, false, false, NULL); } } @@ -124,7 +126,7 @@ void clk_mgr_optimize_pwr_state(const struct dc *dc, struct clk_mgr *clk_mgr) if (!edp_link->psr_settings.psr_feature_enabled) continue; dc_link_set_psr_allow_active(edp_link, - clk_mgr->psr_allow_active_cache, false, false); + &clk_mgr->psr_allow_active_cache, false, false, NULL); } } @@ -283,13 +285,8 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p BREAK_TO_DEBUGGER(); return NULL; } - if (ASICREV_IS_YELLOW_CARP(asic_id.hw_internal_rev)) { - /* TODO: to add DCN31 clk_mgr support, once CLK IP header files are available, - * for now use DCN3.0 clk mgr. - */ - dcn31_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg); - return &clk_mgr->base.base; - } + + dcn31_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg); return &clk_mgr->base.base; } #endif @@ -326,7 +323,6 @@ void dc_destroy_clk_mgr(struct clk_mgr *clk_mgr_base) break; case FAMILY_YELLOW_CARP: - if (ASICREV_IS_YELLOW_CARP(clk_mgr_base->ctx->asic_id.hw_internal_rev)) dcn31_clk_mgr_destroy(clk_mgr); break; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c index d7bf9283dc90..f4c9a458ace8 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c @@ -219,14 +219,17 @@ static void dcn31_update_clocks(struct clk_mgr *clk_mgr_base, update_dispclk = true; } - /* TODO: add back DTO programming when DPPCLK restore is fixed in FSDL*/ if (dpp_clock_lowered) { // increase per DPP DTO before lowering global dppclk + dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower); dcn31_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz); } else { // increase global DPPCLK before lowering per DPP DTO if (update_dppclk || update_dispclk) dcn31_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz); + // always update dtos unless clock is lowered and not safe to lower + if (new_clocks->dppclk_khz >= dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz) + dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower); } // notify DMCUB of latest clocks @@ -368,32 +371,32 @@ static struct wm_table lpddr5_wm_table = { .wm_inst = WM_A, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.65333, - .sr_exit_time_us = 5.32, - .sr_enter_plus_exit_time_us = 6.38, + .sr_exit_time_us = 11.5, + .sr_enter_plus_exit_time_us = 14.5, .valid = true, }, { .wm_inst = WM_B, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.65333, - .sr_exit_time_us = 9.82, - .sr_enter_plus_exit_time_us = 11.196, + .sr_exit_time_us = 11.5, + .sr_enter_plus_exit_time_us = 14.5, .valid = true, }, { .wm_inst = WM_C, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.65333, - .sr_exit_time_us = 9.89, - .sr_enter_plus_exit_time_us = 11.24, + .sr_exit_time_us = 11.5, + .sr_enter_plus_exit_time_us = 14.5, .valid = true, }, { .wm_inst = WM_D, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.65333, - .sr_exit_time_us = 9.748, - .sr_enter_plus_exit_time_us = 11.102, + .sr_exit_time_us = 11.5, + .sr_enter_plus_exit_time_us = 14.5, .valid = true, }, } @@ -520,14 +523,21 @@ static unsigned int find_clk_for_voltage( unsigned int voltage) { int i; + int max_voltage = 0; + int clock = 0; for (i = 0; i < NUM_SOC_VOLTAGE_LEVELS; i++) { - if (clock_table->SocVoltage[i] == voltage) + if (clock_table->SocVoltage[i] == voltage) { return clocks[i]; + } else if (clock_table->SocVoltage[i] >= max_voltage && + clock_table->SocVoltage[i] < voltage) { + max_voltage = clock_table->SocVoltage[i]; + clock = clocks[i]; + } } - ASSERT(0); - return 0; + ASSERT(clock); + return clock; } void dcn31_clk_mgr_helper_populate_bw_params( diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index da942e9f5142..0ded4decee05 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -71,8 +71,6 @@ #include "dmub/dmub_srv.h" -#include "dcn30/dcn30_vpg.h" - #include "i2caux_interface.h" #include "dce/dmub_hw_lock_mgr.h" @@ -1087,6 +1085,8 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context) struct dc_stream_state *old_stream = dc->current_state->res_ctx.pipe_ctx[i].stream; bool should_disable = true; + bool pipe_split_change = + context->res_ctx.pipe_ctx[i].top_pipe != dc->current_state->res_ctx.pipe_ctx[i].top_pipe; for (j = 0; j < context->stream_count; j++) { if (old_stream == context->streams[j]) { @@ -1094,6 +1094,9 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context) break; } } + if (!should_disable && pipe_split_change) + should_disable = true; + if (should_disable && old_stream) { dc_rem_all_planes_for_stream(dc, old_stream, dangling_context); disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context); @@ -1889,6 +1892,7 @@ static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context) return false; } +#ifdef CONFIG_DRM_AMD_DC_DCN /* Perform updates here which need to be deferred until next vupdate * * i.e. blnd lut, 3dlut, and shaper lut bypass regs are double buffered @@ -1898,15 +1902,16 @@ static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context) */ static void process_deferred_updates(struct dc *dc) { -#ifdef CONFIG_DRM_AMD_DC_DCN - int i; + int i = 0; - if (dc->debug.enable_mem_low_power.bits.cm) + if (dc->debug.enable_mem_low_power.bits.cm) { + ASSERT(dc->dcn_ip->max_num_dpp); for (i = 0; i < dc->dcn_ip->max_num_dpp; i++) if (dc->res_pool->dpps[i]->funcs->dpp_deferred_update) dc->res_pool->dpps[i]->funcs->dpp_deferred_update(dc->res_pool->dpps[i]); -#endif + } } +#endif /* CONFIG_DRM_AMD_DC_DCN */ void dc_post_update_surfaces_to_stream(struct dc *dc) { @@ -1933,7 +1938,9 @@ void dc_post_update_surfaces_to_stream(struct dc *dc) dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]); } +#ifdef CONFIG_DRM_AMD_DC_DCN process_deferred_updates(dc); +#endif dc->hwss.optimize_bandwidth(dc, context); @@ -2285,6 +2292,9 @@ static enum surface_update_type det_surface_update(const struct dc *dc, update_flags->bits.gamma_change = 1; } + if (u->lut3d_func || u->func_shaper) + update_flags->bits.lut_3d = 1; + if (u->hdr_mult.value) if (u->hdr_mult.value != u->surface->hdr_mult.value) { update_flags->bits.hdr_mult = 1; @@ -2298,6 +2308,7 @@ static enum surface_update_type det_surface_update(const struct dc *dc, if (update_flags->bits.input_csc_change || update_flags->bits.coeff_reduction_change + || update_flags->bits.lut_3d || update_flags->bits.gamma_change || update_flags->bits.gamut_remap_change) { type = UPDATE_TYPE_FULL; @@ -2356,6 +2367,11 @@ static enum surface_update_type check_update_surfaces_for_stream( if (stream_update->dsc_config) su_flags->bits.dsc_changed = 1; +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (stream_update->mst_bw_update) + su_flags->bits.mst_bw = 1; +#endif + if (su_flags->raw != 0) overall_type = UPDATE_TYPE_FULL; @@ -2674,9 +2690,6 @@ static void commit_planes_do_stream_update(struct dc *dc, enum surface_update_type update_type, struct dc_state *context) { -#if defined(CONFIG_DRM_AMD_DC_DCN) - struct vpg *vpg; -#endif int j; // Stream updates @@ -2697,11 +2710,6 @@ static void commit_planes_do_stream_update(struct dc *dc, stream_update->vrr_infopacket || stream_update->vsc_infopacket || stream_update->vsp_infopacket) { -#if defined(CONFIG_DRM_AMD_DC_DCN) - vpg = pipe_ctx->stream_res.stream_enc->vpg; - if (vpg && vpg->funcs->vpg_poweron) - vpg->funcs->vpg_poweron(vpg); -#endif resource_build_info_frame(pipe_ctx); dc->hwss.update_info_frame(pipe_ctx); } @@ -2741,6 +2749,15 @@ static void commit_planes_do_stream_update(struct dc *dc, if (stream_update->dsc_config) dp_update_dsc_config(pipe_ctx); +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (stream_update->mst_bw_update) { + if (stream_update->mst_bw_update->is_increase) + dc_link_increase_mst_payload(pipe_ctx, stream_update->mst_bw_update->mst_stream_bw); + else + dc_link_reduce_mst_payload(pipe_ctx, stream_update->mst_bw_update->mst_stream_bw); + } +#endif + if (stream_update->pending_test_pattern) { dc_link_dp_set_test_pattern(stream->link, stream->test_pattern.type, @@ -3118,8 +3135,13 @@ void dc_commit_updates_for_stream(struct dc *dc, if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state) new_pipe->plane_state->force_full_update = true; } - } else if (update_type == UPDATE_TYPE_FAST) { - /* Previous frame finished and HW is ready for optimization. */ + } else if (update_type == UPDATE_TYPE_FAST && dc_ctx->dce_version >= DCE_VERSION_MAX) { + /* + * Previous frame finished and HW is ready for optimization. + * + * Only relevant for DCN behavior where we can guarantee the optimization + * is safe to apply - retain the legacy behavior for DCE. + */ dc_post_update_surfaces_to_stream(dc); } @@ -3178,6 +3200,12 @@ void dc_commit_updates_for_stream(struct dc *dc, } } + /* Legacy optimization path for DCE. */ + if (update_type >= UPDATE_TYPE_FULL && dc_ctx->dce_version < DCE_VERSION_MAX) { + dc_post_update_surfaces_to_stream(dc); + TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce); + } + return; } @@ -3478,6 +3506,7 @@ void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_ bool dc_set_psr_allow_active(struct dc *dc, bool enable) { int i; + bool allow_active; for (i = 0; i < dc->current_state->stream_count ; i++) { struct dc_link *link; @@ -3489,10 +3518,12 @@ bool dc_set_psr_allow_active(struct dc *dc, bool enable) if (link->psr_settings.psr_feature_enabled) { if (enable && !link->psr_settings.psr_allow_active) { - if (!dc_link_set_psr_allow_active(link, true, false, false)) + allow_active = true; + if (!dc_link_set_psr_allow_active(link, &allow_active, false, false, NULL)) return false; } else if (!enable && link->psr_settings.psr_allow_active) { - if (!dc_link_set_psr_allow_active(link, false, true, false)) + allow_active = false; + if (!dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL)) return false; } } @@ -3579,7 +3610,8 @@ bool dc_enable_dmub_notifications(struct dc *dc) #if defined(CONFIG_DRM_AMD_DC_DCN) /* YELLOW_CARP B0 USB4 DPIA needs dmub notifications for interrupts */ if (dc->ctx->asic_id.chip_family == FAMILY_YELLOW_CARP && - dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) + dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0 && + !dc->debug.dpia_debug.bits.disable_dpia) return true; #endif /* dmub aux needs dmub notifications to be enabled */ @@ -3727,6 +3759,60 @@ bool dc_process_dmub_set_config_async(struct dc *dc, } /** + ***************************************************************************** + * Function: dc_process_dmub_set_mst_slots + * + * @brief + * Submits mst slot allocation command to dmub via inbox message + * + * @param + * [in] dc: dc structure + * [in] link_index: link index + * [in] mst_alloc_slots: mst slots to be allotted + * [out] mst_slots_in_use: mst slots in use returned in failure case + * + * @return + * DC_OK if successful, DC_ERROR if failure + ***************************************************************************** + */ +enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc, + uint32_t link_index, + uint8_t mst_alloc_slots, + uint8_t *mst_slots_in_use) +{ + union dmub_rb_cmd cmd = {0}; + struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv; + + /* prepare MST_ALLOC_SLOTS command */ + cmd.set_mst_alloc_slots.header.type = DMUB_CMD__DPIA; + cmd.set_mst_alloc_slots.header.sub_type = DMUB_CMD__DPIA_MST_ALLOC_SLOTS; + + cmd.set_mst_alloc_slots.mst_slots_control.instance = dc->links[link_index]->ddc_hw_inst; + cmd.set_mst_alloc_slots.mst_slots_control.mst_alloc_slots = mst_alloc_slots; + + if (!dc_dmub_srv_cmd_with_reply_data(dmub_srv, &cmd)) + /* command is not processed by dmub */ + return DC_ERROR_UNEXPECTED; + + /* command processed by dmub, if ret_status is 1 */ + if (cmd.set_config_access.header.ret_status != 1) + /* command processing error */ + return DC_ERROR_UNEXPECTED; + + /* command processed and we have a status of 2, mst not enabled in dpia */ + if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 2) + return DC_FAIL_UNSUPPORTED_1; + + /* previously configured mst alloc and used slots did not match */ + if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 3) { + *mst_slots_in_use = cmd.set_mst_alloc_slots.mst_slots_control.mst_slots_in_use; + return DC_NOT_SUPPORTED; + } + + return DC_OK; +} + +/** * dc_disable_accelerated_mode - disable accelerated mode * @dc: dc structure */ diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index ca5dc3c168ec..60544788e911 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -674,13 +674,13 @@ static void query_hdcp_capability(enum signal_type signal, struct dc_link *link) static void read_current_link_settings_on_detect(struct dc_link *link) { - union lane_count_set lane_count_set = { {0} }; + union lane_count_set lane_count_set = {0}; uint8_t link_bw_set; uint8_t link_rate_set; uint32_t read_dpcd_retry_cnt = 10; enum dc_status status = DC_ERROR_UNEXPECTED; int i; - union max_down_spread max_down_spread = { {0} }; + union max_down_spread max_down_spread = {0}; // Read DPCD 00101h to find out the number of lanes currently set for (i = 0; i < read_dpcd_retry_cnt; i++) { @@ -1660,6 +1660,14 @@ static bool dc_link_construct_legacy(struct dc_link *link, DC_LOG_DC("BIOS object table - ddi_channel_mapping: 0x%04X", link->ddi_channel_mapping.raw); DC_LOG_DC("BIOS object table - chip_caps: %d", link->chip_caps); } + + if (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) { + link->bios_forced_drive_settings.VOLTAGE_SWING = + (info->ext_disp_conn_info.fixdpvoltageswing & 0x3); + link->bios_forced_drive_settings.PRE_EMPHASIS = + ((info->ext_disp_conn_info.fixdpvoltageswing >> 2) & 0x3); + } + break; } } @@ -1757,6 +1765,9 @@ static bool dc_link_construct_dpia(struct dc_link *link, link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED; + /* Some docks seem to NAK I2C writes to segment pointer with mot=0. */ + link->wa_flags.dp_mot_reset_segment = true; + return true; ddc_create_fail: @@ -1869,8 +1880,13 @@ static enum dc_status enable_link_dp(struct dc_state *state, do_fallback = true; #if defined(CONFIG_DRM_AMD_DC_DCN) + /* + * Temporary w/a to get DP2.0 link rates to work with SST. + * TODO DP2.0 - Workaround: Remove w/a if and when the issue is resolved. + */ if (dp_get_link_encoding_format(&link_settings) == DP_128b_132b_ENCODING && - pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT) { + pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT && + link->dc->debug.set_mst_en_for_sst) { dp_enable_mst_on_sink(link, true); } #endif @@ -1983,51 +1999,6 @@ static enum dc_status enable_link_dp_mst( return enable_link_dp(state, pipe_ctx); } -void blank_all_dp_displays(struct dc *dc, bool hw_init) -{ - unsigned int i, j, fe; - uint8_t dpcd_power_state = '\0'; - enum dc_status status = DC_ERROR_UNEXPECTED; - - for (i = 0; i < dc->link_count; i++) { - enum signal_type signal = dc->links[i]->connector_signal; - - if ((signal == SIGNAL_TYPE_EDP) || - (signal == SIGNAL_TYPE_DISPLAY_PORT)) { - if (hw_init && signal != SIGNAL_TYPE_EDP && dc->links[i]->priv != NULL) { - /* DP 2.0 spec requires that we read LTTPR caps first */ - dp_retrieve_lttpr_cap(dc->links[i]); - /* if any of the displays are lit up turn them off */ - status = core_link_read_dpcd(dc->links[i], DP_SET_POWER, - &dpcd_power_state, sizeof(dpcd_power_state)); - } - - if ((signal != SIGNAL_TYPE_EDP && status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0) || - (!hw_init && dc->links[i]->link_enc && - dc->links[i]->link_enc->funcs->is_dig_enabled(dc->links[i]->link_enc))) { - if (dc->links[i]->link_enc->funcs->get_dig_frontend) { - fe = dc->links[i]->link_enc->funcs->get_dig_frontend(dc->links[i]->link_enc); - if (fe == ENGINE_ID_UNKNOWN) - continue; - - for (j = 0; j < dc->res_pool->stream_enc_count; j++) { - if (fe == dc->res_pool->stream_enc[j]->id) { - dc->res_pool->stream_enc[j]->funcs->dp_blank(dc->links[i], - dc->res_pool->stream_enc[j]); - break; - } - } - } - - if (!dc->links[i]->wa_flags.dp_keep_receiver_powered || - (hw_init && signal != SIGNAL_TYPE_EDP && dc->links[i]->priv != NULL)) - dp_receiver_power_ctrl(dc->links[i], false); - } - } - } - -} - static bool get_ext_hdmi_settings(struct pipe_ctx *pipe_ctx, enum engine_id eng_id, struct ext_hdmi_settings *settings) @@ -2956,8 +2927,8 @@ bool dc_link_set_backlight_level(const struct dc_link *link, return true; } -bool dc_link_set_psr_allow_active(struct dc_link *link, bool allow_active, - bool wait, bool force_static) +bool dc_link_set_psr_allow_active(struct dc_link *link, const bool *allow_active, + bool wait, bool force_static, const unsigned int *power_opts) { struct dc *dc = link->ctx->dc; struct dmcu *dmcu = dc->res_pool->dmcu; @@ -2970,20 +2941,33 @@ bool dc_link_set_psr_allow_active(struct dc_link *link, bool allow_active, if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) return false; - link->psr_settings.psr_allow_active = allow_active; + /* Set power optimization flag */ + if (power_opts && link->psr_settings.psr_power_opt != *power_opts) { + link->psr_settings.psr_power_opt = *power_opts; + + if (psr != NULL && link->psr_settings.psr_feature_enabled && psr->funcs->psr_set_power_opt) + psr->funcs->psr_set_power_opt(psr, link->psr_settings.psr_power_opt); + } + + /* Enable or Disable PSR */ + if (allow_active && link->psr_settings.psr_allow_active != *allow_active) { + link->psr_settings.psr_allow_active = *allow_active; + #if defined(CONFIG_DRM_AMD_DC_DCN) - if (!allow_active) - dc_z10_restore(dc); + if (!link->psr_settings.psr_allow_active) + dc_z10_restore(dc); #endif - if (psr != NULL && link->psr_settings.psr_feature_enabled) { - if (force_static && psr->funcs->psr_force_static) - psr->funcs->psr_force_static(psr, panel_inst); - psr->funcs->psr_enable(psr, allow_active, wait, panel_inst); - } else if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && link->psr_settings.psr_feature_enabled) - dmcu->funcs->set_psr_enable(dmcu, allow_active, wait); - else - return false; + if (psr != NULL && link->psr_settings.psr_feature_enabled) { + if (force_static && psr->funcs->psr_force_static) + psr->funcs->psr_force_static(psr, panel_inst); + psr->funcs->psr_enable(psr, link->psr_settings.psr_allow_active, wait, panel_inst); + } else if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && + link->psr_settings.psr_feature_enabled) + dmcu->funcs->set_psr_enable(dmcu, link->psr_settings.psr_allow_active, wait); + else + return false; + } return true; } @@ -3272,10 +3256,12 @@ static struct fixed31_32 get_pbn_from_timing(struct pipe_ctx *pipe_ctx) static void update_mst_stream_alloc_table( struct dc_link *link, struct stream_encoder *stream_enc, +#if defined(CONFIG_DRM_AMD_DC_DCN) + struct hpo_dp_stream_encoder *hpo_dp_stream_enc, // TODO: Rename stream_enc to dio_stream_enc? +#endif const struct dp_mst_stream_allocation_table *proposed_table) { - struct link_mst_stream_allocation work_table[MAX_CONTROLLER_NUM] = { - { 0 } }; + struct link_mst_stream_allocation work_table[MAX_CONTROLLER_NUM] = { 0 }; struct link_mst_stream_allocation *dc_alloc; int i; @@ -3308,6 +3294,9 @@ static void update_mst_stream_alloc_table( work_table[i].slot_count = proposed_table->stream_allocations[i].slot_count; work_table[i].stream_enc = stream_enc; +#if defined(CONFIG_DRM_AMD_DC_DCN) + work_table[i].hpo_dp_stream_enc = hpo_dp_stream_enc; +#endif } } @@ -3430,11 +3419,15 @@ enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx) struct dc_link *link = stream->link; struct link_encoder *link_encoder = NULL; struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc; +#if defined(CONFIG_DRM_AMD_DC_DCN) + struct hpo_dp_link_encoder *hpo_dp_link_encoder = link->hpo_dp_link_enc; + struct hpo_dp_stream_encoder *hpo_dp_stream_encoder = pipe_ctx->stream_res.hpo_dp_stream_enc; +#endif struct dp_mst_stream_allocation_table proposed_table = {0}; struct fixed31_32 avg_time_slots_per_mtp; struct fixed31_32 pbn; struct fixed31_32 pbn_per_slot; - uint8_t i; + int i; enum act_return_status ret; DC_LOGGER_INIT(link->ctx->logger); @@ -3457,7 +3450,14 @@ enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx) &proposed_table, true)) { update_mst_stream_alloc_table( +#if defined(CONFIG_DRM_AMD_DC_DCN) + link, + pipe_ctx->stream_res.stream_enc, + pipe_ctx->stream_res.hpo_dp_stream_enc, + &proposed_table); +#else link, pipe_ctx->stream_res.stream_enc, &proposed_table); +#endif } else DC_LOG_WARNING("Failed to update" @@ -3471,23 +3471,70 @@ enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx) link->mst_stream_alloc_table.stream_count); for (i = 0; i < MAX_CONTROLLER_NUM; i++) { +#if defined(CONFIG_DRM_AMD_DC_DCN) DC_LOG_MST("stream_enc[%d]: %p " + "stream[%d].hpo_dp_stream_enc: %p " "stream[%d].vcp_id: %d " "stream[%d].slot_count: %d\n", i, (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc, i, + (void *) link->mst_stream_alloc_table.stream_allocations[i].hpo_dp_stream_enc, + i, link->mst_stream_alloc_table.stream_allocations[i].vcp_id, i, link->mst_stream_alloc_table.stream_allocations[i].slot_count); +#else + DC_LOG_MST("stream_enc[%d]: %p " + "stream[%d].vcp_id: %d " + "stream[%d].slot_count: %d\n", + i, + (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc, + i, + link->mst_stream_alloc_table.stream_allocations[i].vcp_id, + i, + link->mst_stream_alloc_table.stream_allocations[i].slot_count); +#endif } ASSERT(proposed_table.stream_count > 0); + if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) { + static enum dc_status status; + uint8_t mst_alloc_slots = 0, prev_mst_slots_in_use = 0xFF; + + for (i = 0; i < link->mst_stream_alloc_table.stream_count; i++) + mst_alloc_slots += link->mst_stream_alloc_table.stream_allocations[i].slot_count; + + status = dc_process_dmub_set_mst_slots(link->dc, link->link_index, + mst_alloc_slots, &prev_mst_slots_in_use); + ASSERT(status == DC_OK); + DC_LOG_MST("dpia : status[%d]: alloc_slots[%d]: used_slots[%d]\n", + status, mst_alloc_slots, prev_mst_slots_in_use); + } + /* program DP source TX for payload */ +#if defined(CONFIG_DRM_AMD_DC_DCN) + switch (dp_get_link_encoding_format(&link->cur_link_settings)) { + case DP_8b_10b_ENCODING: + link_encoder->funcs->update_mst_stream_allocation_table( + link_encoder, + &link->mst_stream_alloc_table); + break; + case DP_128b_132b_ENCODING: + hpo_dp_link_encoder->funcs->update_stream_allocation_table( + hpo_dp_link_encoder, + &link->mst_stream_alloc_table); + break; + case DP_UNKNOWN_ENCODING: + DC_LOG_ERROR("Failure: unknown encoding format\n"); + return DC_ERROR_UNEXPECTED; + } +#else link_encoder->funcs->update_mst_stream_allocation_table( link_encoder, &link->mst_stream_alloc_table); +#endif /* send down message */ ret = dm_helpers_dp_mst_poll_for_allocation_change_trigger( @@ -3510,23 +3557,205 @@ enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx) pbn = get_pbn_from_timing(pipe_ctx); avg_time_slots_per_mtp = dc_fixpt_div(pbn, pbn_per_slot); +#if defined(CONFIG_DRM_AMD_DC_DCN) + switch (dp_get_link_encoding_format(&link->cur_link_settings)) { + case DP_8b_10b_ENCODING: + stream_encoder->funcs->set_throttled_vcp_size( + stream_encoder, + avg_time_slots_per_mtp); + break; + case DP_128b_132b_ENCODING: + hpo_dp_link_encoder->funcs->set_throttled_vcp_size( + hpo_dp_link_encoder, + hpo_dp_stream_encoder->inst, + avg_time_slots_per_mtp); + break; + case DP_UNKNOWN_ENCODING: + DC_LOG_ERROR("Failure: unknown encoding format\n"); + return DC_ERROR_UNEXPECTED; + } +#else stream_encoder->funcs->set_throttled_vcp_size( stream_encoder, avg_time_slots_per_mtp); +#endif return DC_OK; } +#if defined(CONFIG_DRM_AMD_DC_DCN) +enum dc_status dc_link_reduce_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t bw_in_kbps) +{ + struct dc_stream_state *stream = pipe_ctx->stream; + struct dc_link *link = stream->link; + struct fixed31_32 avg_time_slots_per_mtp; + struct fixed31_32 pbn; + struct fixed31_32 pbn_per_slot; + struct link_encoder *link_encoder = link->link_enc; + struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc; + struct dp_mst_stream_allocation_table proposed_table = {0}; + uint8_t i; + enum act_return_status ret; + DC_LOGGER_INIT(link->ctx->logger); + + /* decrease throttled vcp size */ + pbn_per_slot = get_pbn_per_slot(stream); + pbn = get_pbn_from_bw_in_kbps(bw_in_kbps); + avg_time_slots_per_mtp = dc_fixpt_div(pbn, pbn_per_slot); + + stream_encoder->funcs->set_throttled_vcp_size( + stream_encoder, + avg_time_slots_per_mtp); + + /* send ALLOCATE_PAYLOAD sideband message with updated pbn */ + dm_helpers_dp_mst_send_payload_allocation( + stream->ctx, + stream, + true); + + /* notify immediate branch device table update */ + if (dm_helpers_dp_mst_write_payload_allocation_table( + stream->ctx, + stream, + &proposed_table, + true)) { + /* update mst stream allocation table software state */ + update_mst_stream_alloc_table( + link, + pipe_ctx->stream_res.stream_enc, + pipe_ctx->stream_res.hpo_dp_stream_enc, + &proposed_table); + } else { + DC_LOG_WARNING("Failed to update" + "MST allocation table for" + "pipe idx:%d\n", + pipe_ctx->pipe_idx); + } + + DC_LOG_MST("%s " + "stream_count: %d: \n ", + __func__, + link->mst_stream_alloc_table.stream_count); + + for (i = 0; i < MAX_CONTROLLER_NUM; i++) { + DC_LOG_MST("stream_enc[%d]: %p " + "stream[%d].vcp_id: %d " + "stream[%d].slot_count: %d\n", + i, + (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc, + i, + link->mst_stream_alloc_table.stream_allocations[i].vcp_id, + i, + link->mst_stream_alloc_table.stream_allocations[i].slot_count); + } + + ASSERT(proposed_table.stream_count > 0); + + /* update mst stream allocation table hardware state */ + link_encoder->funcs->update_mst_stream_allocation_table( + link_encoder, + &link->mst_stream_alloc_table); + + /* poll for immediate branch device ACT handled */ + ret = dm_helpers_dp_mst_poll_for_allocation_change_trigger( + stream->ctx, + stream); + + return DC_OK; +} + +enum dc_status dc_link_increase_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t bw_in_kbps) +{ + struct dc_stream_state *stream = pipe_ctx->stream; + struct dc_link *link = stream->link; + struct fixed31_32 avg_time_slots_per_mtp; + struct fixed31_32 pbn; + struct fixed31_32 pbn_per_slot; + struct link_encoder *link_encoder = link->link_enc; + struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc; + struct dp_mst_stream_allocation_table proposed_table = {0}; + uint8_t i; + enum act_return_status ret; + DC_LOGGER_INIT(link->ctx->logger); + + /* notify immediate branch device table update */ + if (dm_helpers_dp_mst_write_payload_allocation_table( + stream->ctx, + stream, + &proposed_table, + true)) { + /* update mst stream allocation table software state */ + update_mst_stream_alloc_table( + link, + pipe_ctx->stream_res.stream_enc, + pipe_ctx->stream_res.hpo_dp_stream_enc, + &proposed_table); + } + + DC_LOG_MST("%s " + "stream_count: %d: \n ", + __func__, + link->mst_stream_alloc_table.stream_count); + + for (i = 0; i < MAX_CONTROLLER_NUM; i++) { + DC_LOG_MST("stream_enc[%d]: %p " + "stream[%d].vcp_id: %d " + "stream[%d].slot_count: %d\n", + i, + (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc, + i, + link->mst_stream_alloc_table.stream_allocations[i].vcp_id, + i, + link->mst_stream_alloc_table.stream_allocations[i].slot_count); + } + + ASSERT(proposed_table.stream_count > 0); + + /* update mst stream allocation table hardware state */ + link_encoder->funcs->update_mst_stream_allocation_table( + link_encoder, + &link->mst_stream_alloc_table); + + /* poll for immediate branch device ACT handled */ + ret = dm_helpers_dp_mst_poll_for_allocation_change_trigger( + stream->ctx, + stream); + + if (ret != ACT_LINK_LOST) { + /* send ALLOCATE_PAYLOAD sideband message with updated pbn */ + dm_helpers_dp_mst_send_payload_allocation( + stream->ctx, + stream, + true); + } + + /* increase throttled vcp size */ + pbn = get_pbn_from_bw_in_kbps(bw_in_kbps); + pbn_per_slot = get_pbn_per_slot(stream); + avg_time_slots_per_mtp = dc_fixpt_div(pbn, pbn_per_slot); + + stream_encoder->funcs->set_throttled_vcp_size( + stream_encoder, + avg_time_slots_per_mtp); + + return DC_OK; +} +#endif + static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx) { struct dc_stream_state *stream = pipe_ctx->stream; struct dc_link *link = stream->link; struct link_encoder *link_encoder = NULL; struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc; +#if defined(CONFIG_DRM_AMD_DC_DCN) + struct hpo_dp_link_encoder *hpo_dp_link_encoder = link->hpo_dp_link_enc; + struct hpo_dp_stream_encoder *hpo_dp_stream_encoder = pipe_ctx->stream_res.hpo_dp_stream_enc; +#endif struct dp_mst_stream_allocation_table proposed_table = {0}; struct fixed31_32 avg_time_slots_per_mtp = dc_fixpt_from_int(0); - uint8_t i; + int i; bool mst_mode = (link->type == dc_connection_mst_branch); DC_LOGGER_INIT(link->ctx->logger); @@ -3545,9 +3774,28 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx) */ /* slot X.Y */ +#if defined(CONFIG_DRM_AMD_DC_DCN) + switch (dp_get_link_encoding_format(&link->cur_link_settings)) { + case DP_8b_10b_ENCODING: + stream_encoder->funcs->set_throttled_vcp_size( + stream_encoder, + avg_time_slots_per_mtp); + break; + case DP_128b_132b_ENCODING: + hpo_dp_link_encoder->funcs->set_throttled_vcp_size( + hpo_dp_link_encoder, + hpo_dp_stream_encoder->inst, + avg_time_slots_per_mtp); + break; + case DP_UNKNOWN_ENCODING: + DC_LOG_ERROR("Failure: unknown encoding format\n"); + return DC_ERROR_UNEXPECTED; + } +#else stream_encoder->funcs->set_throttled_vcp_size( stream_encoder, avg_time_slots_per_mtp); +#endif /* TODO: which component is responsible for remove payload table? */ if (mst_mode) { @@ -3557,8 +3805,16 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx) &proposed_table, false)) { +#if defined(CONFIG_DRM_AMD_DC_DCN) + update_mst_stream_alloc_table( + link, + pipe_ctx->stream_res.stream_enc, + pipe_ctx->stream_res.hpo_dp_stream_enc, + &proposed_table); +#else update_mst_stream_alloc_table( link, pipe_ctx->stream_res.stream_enc, &proposed_table); +#endif } else { DC_LOG_WARNING("Failed to update" @@ -3574,6 +3830,20 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx) link->mst_stream_alloc_table.stream_count); for (i = 0; i < MAX_CONTROLLER_NUM; i++) { +#if defined(CONFIG_DRM_AMD_DC_DCN) + DC_LOG_MST("stream_enc[%d]: %p " + "stream[%d].hpo_dp_stream_enc: %p " + "stream[%d].vcp_id: %d " + "stream[%d].slot_count: %d\n", + i, + (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc, + i, + (void *) link->mst_stream_alloc_table.stream_allocations[i].hpo_dp_stream_enc, + i, + link->mst_stream_alloc_table.stream_allocations[i].vcp_id, + i, + link->mst_stream_alloc_table.stream_allocations[i].slot_count); +#else DC_LOG_MST("stream_enc[%d]: %p " "stream[%d].vcp_id: %d " "stream[%d].slot_count: %d\n", @@ -3583,11 +3853,44 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx) link->mst_stream_alloc_table.stream_allocations[i].vcp_id, i, link->mst_stream_alloc_table.stream_allocations[i].slot_count); +#endif } + if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) { + enum dc_status status; + uint8_t mst_alloc_slots = 0, prev_mst_slots_in_use = 0xFF; + + for (i = 0; i < link->mst_stream_alloc_table.stream_count; i++) + mst_alloc_slots += link->mst_stream_alloc_table.stream_allocations[i].slot_count; + + status = dc_process_dmub_set_mst_slots(link->dc, link->link_index, + mst_alloc_slots, &prev_mst_slots_in_use); + ASSERT(status != DC_NOT_SUPPORTED); + DC_LOG_MST("dpia : status[%d]: alloc_slots[%d]: used_slots[%d]\n", + status, mst_alloc_slots, prev_mst_slots_in_use); + } + +#if defined(CONFIG_DRM_AMD_DC_DCN) + switch (dp_get_link_encoding_format(&link->cur_link_settings)) { + case DP_8b_10b_ENCODING: + link_encoder->funcs->update_mst_stream_allocation_table( + link_encoder, + &link->mst_stream_alloc_table); + break; + case DP_128b_132b_ENCODING: + hpo_dp_link_encoder->funcs->update_stream_allocation_table( + hpo_dp_link_encoder, + &link->mst_stream_alloc_table); + break; + case DP_UNKNOWN_ENCODING: + DC_LOG_ERROR("Failure: unknown encoding format\n"); + return DC_ERROR_UNEXPECTED; + } +#else link_encoder->funcs->update_mst_stream_allocation_table( link_encoder, &link->mst_stream_alloc_table); +#endif if (mst_mode) { dm_helpers_dp_mst_poll_for_allocation_change_trigger( @@ -3610,6 +3913,9 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off) struct cp_psp *cp_psp = &pipe_ctx->stream->ctx->cp_psp; #if defined(CONFIG_DRM_AMD_DC_DCN) struct link_encoder *link_enc = NULL; + struct dc_state *state = pipe_ctx->stream->ctx->dc->current_state; + struct link_enc_assignment link_enc_assign; + int i; #endif if (cp_psp && cp_psp->funcs.update_stream_config) { @@ -3623,9 +3929,72 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off) config.dig_be = pipe_ctx->stream->link->link_enc_hw_inst; #if defined(CONFIG_DRM_AMD_DC_DCN) config.stream_enc_idx = pipe_ctx->stream_res.stream_enc->id - ENGINE_ID_DIGA; - if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_PHY) { + + if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_PHY || + pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) { link_enc = pipe_ctx->stream->link->link_enc; + config.dio_output_type = pipe_ctx->stream->link->ep_type; + config.dio_output_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A; + if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_PHY) + link_enc = pipe_ctx->stream->link->link_enc; + else if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) + if (pipe_ctx->stream->link->dc->res_pool->funcs->link_encs_assign) { + link_enc = link_enc_cfg_get_link_enc_used_by_stream( + pipe_ctx->stream->ctx->dc, + pipe_ctx->stream); + } + // Initialize PHY ID with ABCDE - 01234 mapping except when it is B0 config.phy_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A; + + //look up the link_enc_assignment for the current pipe_ctx + for (i = 0; i < state->stream_count; i++) { + if (pipe_ctx->stream == state->streams[i]) { + link_enc_assign = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i]; + } + } + // Add flag to guard new A0 DIG mapping + if (pipe_ctx->stream->ctx->dc->enable_c20_dtm_b0 == true) { + config.dig_be = link_enc_assign.eng_id; + config.dio_output_type = pipe_ctx->stream->link->ep_type; + config.dio_output_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A; + } else { + config.dio_output_type = 0; + config.dio_output_idx = 0; + } + + // Add flag to guard B0 implementation + if (pipe_ctx->stream->ctx->dc->enable_c20_dtm_b0 == true && + link_enc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) { + if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) { + link_enc = link_enc_assign.stream->link_enc; + + // enum ID 1-4 maps to DPIA PHY ID 0-3 + config.phy_idx = link_enc_assign.ep_id.link_id.enum_id - ENUM_ID_1; + } else { // for non DPIA mode over B0, ABCDE maps to 01564 + + switch (link_enc->transmitter) { + case TRANSMITTER_UNIPHY_A: + config.phy_idx = 0; + break; + case TRANSMITTER_UNIPHY_B: + config.phy_idx = 1; + break; + case TRANSMITTER_UNIPHY_C: + config.phy_idx = 5; + break; + case TRANSMITTER_UNIPHY_D: + config.phy_idx = 6; + break; + case TRANSMITTER_UNIPHY_E: + config.phy_idx = 4; + break; + default: + config.phy_idx = 0; + break; + } + + } + } } else if (pipe_ctx->stream->link->dc->res_pool->funcs->link_encs_assign) { link_enc = link_enc_cfg_get_link_enc_used_by_stream( pipe_ctx->stream->ctx->dc, @@ -3910,6 +4279,8 @@ void core_link_enable_stream( */ if (status != DC_FAIL_DP_LINK_TRAINING || pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { + if (false == stream->link->link_status.link_active) + disable_link(stream->link, pipe_ctx->stream->signal); BREAK_TO_DEBUGGER(); return; } @@ -4399,7 +4770,7 @@ uint32_t dc_bandwidth_in_kbps_from_timing( timing->dsc_cfg.bits_per_pixel, timing->dsc_cfg.num_slices_h, timing->dsc_cfg.is_dp); -#endif +#endif /* CONFIG_DRM_AMD_DC_DCN */ switch (timing->display_color_depth) { case COLOR_DEPTH_666: diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c index b0f1cd7268c8..60539b1f2a80 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c @@ -554,6 +554,7 @@ bool dal_ddc_service_query_ddc_data( payload.address = address; payload.reply = NULL; payload.defer_delay = get_defer_delay(ddc); + payload.write_status_update = false; if (write_size != 0) { payload.write = true; @@ -625,24 +626,24 @@ bool dal_ddc_submit_aux_command(struct ddc_service *ddc, do { struct aux_payload current_payload; bool is_end_of_payload = (retrieved + DEFAULT_AUX_MAX_DATA_SIZE) >= - payload->length; + payload->length ? true : false; + uint32_t payload_length = is_end_of_payload ? + payload->length - retrieved : DEFAULT_AUX_MAX_DATA_SIZE; current_payload.address = payload->address; current_payload.data = &payload->data[retrieved]; current_payload.defer_delay = payload->defer_delay; current_payload.i2c_over_aux = payload->i2c_over_aux; - current_payload.length = is_end_of_payload ? - payload->length - retrieved : DEFAULT_AUX_MAX_DATA_SIZE; - /* set mot (middle of transaction) to false - * if it is the last payload - */ + current_payload.length = payload_length; + /* set mot (middle of transaction) to false if it is the last payload */ current_payload.mot = is_end_of_payload ? payload->mot:true; + current_payload.write_status_update = false; current_payload.reply = payload->reply; current_payload.write = payload->write; ret = dc_link_aux_transfer_with_retries(ddc, ¤t_payload); - retrieved += current_payload.length; + retrieved += payload_length; } while (retrieved < payload->length && ret == true); return ret; @@ -763,7 +764,7 @@ void dal_ddc_service_read_scdc_data(struct ddc_service *ddc_service) dal_ddc_service_query_ddc_data(ddc_service, slave_address, &offset, sizeof(offset), &tmds_config, sizeof(tmds_config)); if (tmds_config & 0x1) { - union hdmi_scdc_status_flags_data status_data = { {0} }; + union hdmi_scdc_status_flags_data status_data = {0}; uint8_t scramble_status = 0; offset = HDMI_SCDC_SCRAMBLER_STATUS; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 54662d74c65a..cb7bf9148904 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -106,6 +106,10 @@ static bool decide_fallback_link_setting( static struct dc_link_settings get_common_supported_link_settings( struct dc_link_settings link_setting_a, struct dc_link_settings link_setting_b); +static void maximize_lane_settings(const struct link_training_settings *lt_settings, + struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]); +static void override_lane_settings(const struct link_training_settings *lt_settings, + struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]); static uint32_t get_cr_training_aux_rd_interval(struct dc_link *link, const struct dc_link_settings *link_settings) @@ -259,7 +263,7 @@ static void dpcd_set_training_pattern( struct dc_link *link, enum dc_dp_training_pattern training_pattern) { - union dpcd_training_pattern dpcd_pattern = { {0} }; + union dpcd_training_pattern dpcd_pattern = {0}; dpcd_pattern.v1_4.TRAINING_PATTERN_SET = dc_dp_training_pattern_to_dpcd_training_pattern( @@ -401,8 +405,8 @@ enum dc_status dpcd_set_link_settings( uint8_t rate; enum dc_status status; - union down_spread_ctrl downspread = { {0} }; - union lane_count_set lane_count_set = { {0} }; + union down_spread_ctrl downspread = {0}; + union lane_count_set lane_count_set = {0}; downspread.raw = (uint8_t) (lt_settings->link_settings.link_spread); @@ -515,12 +519,10 @@ static void dpcd_set_lt_pattern_and_lane_settings( enum dc_dp_training_pattern pattern, uint32_t offset) { - union dpcd_training_lane dpcd_lane[LANE_COUNT_DP_MAX] = { { {0} } }; - uint32_t dpcd_base_lt_offset; uint8_t dpcd_lt_buffer[5] = {0}; - union dpcd_training_pattern dpcd_pattern = { {0} }; + union dpcd_training_pattern dpcd_pattern = { 0 }; uint32_t size_in_bytes; bool edp_workaround = false; /* TODO link_prop.INTERNAL */ dpcd_base_lt_offset = DP_TRAINING_PATTERN_SET; @@ -554,16 +556,14 @@ static void dpcd_set_lt_pattern_and_lane_settings( dpcd_pattern.v1_4.TRAINING_PATTERN_SET); } - dp_hw_to_dpcd_lane_settings(lt_settings, lt_settings->lane_settings, dpcd_lane); - /* concatenate everything into one buffer*/ - - size_in_bytes = lt_settings->link_settings.lane_count * sizeof(dpcd_lane[0]); + size_in_bytes = lt_settings->link_settings.lane_count * + sizeof(lt_settings->dpcd_lane_settings[0]); // 0x00103 - 0x00102 memmove( &dpcd_lt_buffer[DP_TRAINING_LANE0_SET - DP_TRAINING_PATTERN_SET], - dpcd_lane, + lt_settings->dpcd_lane_settings, size_in_bytes); if (is_repeater(link, offset)) { @@ -575,7 +575,7 @@ static void dpcd_set_lt_pattern_and_lane_settings( __func__, offset, dpcd_base_lt_offset, - dpcd_lane[0].tx_ffe.PRESET_VALUE); + lt_settings->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE); else if (dp_get_link_encoding_format(<_settings->link_settings) == DP_8b_10b_ENCODING) #endif @@ -584,10 +584,10 @@ static void dpcd_set_lt_pattern_and_lane_settings( __func__, offset, dpcd_base_lt_offset, - dpcd_lane[0].bits.VOLTAGE_SWING_SET, - dpcd_lane[0].bits.PRE_EMPHASIS_SET, - dpcd_lane[0].bits.MAX_SWING_REACHED, - dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED); + lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET, + lt_settings->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET, + lt_settings->dpcd_lane_settings[0].bits.MAX_SWING_REACHED, + lt_settings->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED); } else { #if defined(CONFIG_DRM_AMD_DC_DCN) if (dp_get_link_encoding_format(<_settings->link_settings) == @@ -595,17 +595,17 @@ static void dpcd_set_lt_pattern_and_lane_settings( DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X TX_FFE_PRESET_VALUE = %x\n", __func__, dpcd_base_lt_offset, - dpcd_lane[0].tx_ffe.PRESET_VALUE); + lt_settings->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE); else if (dp_get_link_encoding_format(<_settings->link_settings) == DP_8b_10b_ENCODING) #endif DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n", __func__, dpcd_base_lt_offset, - dpcd_lane[0].bits.VOLTAGE_SWING_SET, - dpcd_lane[0].bits.PRE_EMPHASIS_SET, - dpcd_lane[0].bits.MAX_SWING_REACHED, - dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED); + lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET, + lt_settings->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET, + lt_settings->dpcd_lane_settings[0].bits.MAX_SWING_REACHED, + lt_settings->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED); } if (edp_workaround) { /* for eDP write in 2 parts because the 5-byte burst is @@ -620,7 +620,7 @@ static void dpcd_set_lt_pattern_and_lane_settings( core_link_write_dpcd( link, DP_TRAINING_LANE0_SET, - (uint8_t *)(dpcd_lane), + (uint8_t *)(lt_settings->dpcd_lane_settings), size_in_bytes); #if defined(CONFIG_DRM_AMD_DC_DCN) @@ -711,34 +711,44 @@ void dp_hw_to_dpcd_lane_settings( } } -void dp_update_drive_settings( - struct link_training_settings *dest, - struct link_training_settings src) +void dp_decide_lane_settings( + const struct link_training_settings *lt_settings, + const union lane_adjust ln_adjust[LANE_COUNT_DP_MAX], + struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX], + union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX]) { uint32_t lane; - for (lane = 0; lane < src.link_settings.lane_count; lane++) { - if (dest->voltage_swing == NULL) - dest->lane_settings[lane].VOLTAGE_SWING = src.lane_settings[lane].VOLTAGE_SWING; - else - dest->lane_settings[lane].VOLTAGE_SWING = *dest->voltage_swing; - - if (dest->pre_emphasis == NULL) - dest->lane_settings[lane].PRE_EMPHASIS = src.lane_settings[lane].PRE_EMPHASIS; - else - dest->lane_settings[lane].PRE_EMPHASIS = *dest->pre_emphasis; - - if (dest->post_cursor2 == NULL) - dest->lane_settings[lane].POST_CURSOR2 = src.lane_settings[lane].POST_CURSOR2; - else - dest->lane_settings[lane].POST_CURSOR2 = *dest->post_cursor2; + for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { + if (dp_get_link_encoding_format(<_settings->link_settings) == + DP_8b_10b_ENCODING) { + hw_lane_settings[lane].VOLTAGE_SWING = + (enum dc_voltage_swing)(ln_adjust[lane].bits. + VOLTAGE_SWING_LANE); + hw_lane_settings[lane].PRE_EMPHASIS = + (enum dc_pre_emphasis)(ln_adjust[lane].bits. + PRE_EMPHASIS_LANE); + } #if defined(CONFIG_DRM_AMD_DC_DCN) - if (dest->ffe_preset == NULL) - dest->lane_settings[lane].FFE_PRESET = src.lane_settings[lane].FFE_PRESET; - else - dest->lane_settings[lane].FFE_PRESET = *dest->ffe_preset; + else if (dp_get_link_encoding_format(<_settings->link_settings) == + DP_128b_132b_ENCODING) { + hw_lane_settings[lane].FFE_PRESET.raw = + ln_adjust[lane].tx_ffe.PRESET_VALUE; + } #endif } + dp_hw_to_dpcd_lane_settings(lt_settings, hw_lane_settings, dpcd_lane_settings); + + if (lt_settings->disallow_per_lane_settings) { + /* we find the maximum of the requested settings across all lanes*/ + /* and set this maximum for all lanes*/ + maximize_lane_settings(lt_settings, hw_lane_settings); + override_lane_settings(lt_settings, hw_lane_settings); + + if (lt_settings->always_match_dpcd_with_hw_lane_settings) + dp_hw_to_dpcd_lane_settings(lt_settings, hw_lane_settings, dpcd_lane_settings); + } + } static uint8_t get_nibble_at_index(const uint8_t *buf, @@ -768,55 +778,29 @@ static enum dc_pre_emphasis get_max_pre_emphasis_for_voltage_swing( } -static void find_max_drive_settings( - const struct link_training_settings *link_training_setting, - struct link_training_settings *max_lt_setting) +static void maximize_lane_settings(const struct link_training_settings *lt_settings, + struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]) { uint32_t lane; struct dc_lane_settings max_requested; - max_requested.VOLTAGE_SWING = - link_training_setting-> - lane_settings[0].VOLTAGE_SWING; - max_requested.PRE_EMPHASIS = - link_training_setting-> - lane_settings[0].PRE_EMPHASIS; - /*max_requested.postCursor2 = - * link_training_setting->laneSettings[0].postCursor2;*/ + max_requested.VOLTAGE_SWING = lane_settings[0].VOLTAGE_SWING; + max_requested.PRE_EMPHASIS = lane_settings[0].PRE_EMPHASIS; #if defined(CONFIG_DRM_AMD_DC_DCN) - max_requested.FFE_PRESET = - link_training_setting->lane_settings[0].FFE_PRESET; + max_requested.FFE_PRESET = lane_settings[0].FFE_PRESET; #endif /* Determine what the maximum of the requested settings are*/ - for (lane = 1; lane < link_training_setting->link_settings.lane_count; - lane++) { - if (link_training_setting->lane_settings[lane].VOLTAGE_SWING > - max_requested.VOLTAGE_SWING) + for (lane = 1; lane < lt_settings->link_settings.lane_count; lane++) { + if (lane_settings[lane].VOLTAGE_SWING > max_requested.VOLTAGE_SWING) + max_requested.VOLTAGE_SWING = lane_settings[lane].VOLTAGE_SWING; - max_requested.VOLTAGE_SWING = - link_training_setting-> - lane_settings[lane].VOLTAGE_SWING; - - if (link_training_setting->lane_settings[lane].PRE_EMPHASIS > - max_requested.PRE_EMPHASIS) - max_requested.PRE_EMPHASIS = - link_training_setting-> - lane_settings[lane].PRE_EMPHASIS; - - /* - if (link_training_setting->laneSettings[lane].postCursor2 > - max_requested.postCursor2) - { - max_requested.postCursor2 = - link_training_setting->laneSettings[lane].postCursor2; - } - */ + if (lane_settings[lane].PRE_EMPHASIS > max_requested.PRE_EMPHASIS) + max_requested.PRE_EMPHASIS = lane_settings[lane].PRE_EMPHASIS; #if defined(CONFIG_DRM_AMD_DC_DCN) - if (link_training_setting->lane_settings[lane].FFE_PRESET.settings.level > + if (lane_settings[lane].FFE_PRESET.settings.level > max_requested.FFE_PRESET.settings.level) max_requested.FFE_PRESET.settings.level = - link_training_setting-> lane_settings[lane].FFE_PRESET.settings.level; #endif } @@ -828,10 +812,6 @@ static void find_max_drive_settings( if (max_requested.PRE_EMPHASIS > PRE_EMPHASIS_MAX_LEVEL) max_requested.PRE_EMPHASIS = PRE_EMPHASIS_MAX_LEVEL; - /* - if (max_requested.postCursor2 > PostCursor2_MaxLevel) - max_requested.postCursor2 = PostCursor2_MaxLevel; - */ #if defined(CONFIG_DRM_AMD_DC_DCN) if (max_requested.FFE_PRESET.settings.level > DP_FFE_PRESET_MAX_LEVEL) max_requested.FFE_PRESET.settings.level = DP_FFE_PRESET_MAX_LEVEL; @@ -845,61 +825,58 @@ static void find_max_drive_settings( get_max_pre_emphasis_for_voltage_swing( max_requested.VOLTAGE_SWING); - /* - * Post Cursor2 levels are completely independent from - * pre-emphasis (Post Cursor1) levels. But Post Cursor2 levels - * can only be applied to each allowable combination of voltage - * swing and pre-emphasis levels */ - /* if ( max_requested.postCursor2 > - * getMaxPostCursor2ForVoltageSwing(max_requested.voltageSwing)) - * max_requested.postCursor2 = - * getMaxPostCursor2ForVoltageSwing(max_requested.voltageSwing); - */ - - max_lt_setting->link_settings.link_rate = - link_training_setting->link_settings.link_rate; - max_lt_setting->link_settings.lane_count = - link_training_setting->link_settings.lane_count; - max_lt_setting->link_settings.link_spread = - link_training_setting->link_settings.link_spread; - - for (lane = 0; lane < - link_training_setting->link_settings.lane_count; - lane++) { - max_lt_setting->lane_settings[lane].VOLTAGE_SWING = - max_requested.VOLTAGE_SWING; - max_lt_setting->lane_settings[lane].PRE_EMPHASIS = - max_requested.PRE_EMPHASIS; - /*max_lt_setting->laneSettings[lane].postCursor2 = - * max_requested.postCursor2; - */ + for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { + lane_settings[lane].VOLTAGE_SWING = max_requested.VOLTAGE_SWING; + lane_settings[lane].PRE_EMPHASIS = max_requested.PRE_EMPHASIS; #if defined(CONFIG_DRM_AMD_DC_DCN) - max_lt_setting->lane_settings[lane].FFE_PRESET = - max_requested.FFE_PRESET; + lane_settings[lane].FFE_PRESET = max_requested.FFE_PRESET; #endif } +} + +static void override_lane_settings(const struct link_training_settings *lt_settings, + struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]) +{ + uint32_t lane; + + if (lt_settings->voltage_swing == NULL && + lt_settings->pre_emphasis == NULL && +#if defined(CONFIG_DRM_AMD_DC_DCN) + lt_settings->ffe_preset == NULL && +#endif + lt_settings->post_cursor2 == NULL) + return; + + for (lane = 1; lane < LANE_COUNT_DP_MAX; lane++) { + if (lt_settings->voltage_swing) + lane_settings[lane].VOLTAGE_SWING = *lt_settings->voltage_swing; + if (lt_settings->pre_emphasis) + lane_settings[lane].PRE_EMPHASIS = *lt_settings->pre_emphasis; + if (lt_settings->post_cursor2) + lane_settings[lane].POST_CURSOR2 = *lt_settings->post_cursor2; +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (lt_settings->ffe_preset) + lane_settings[lane].FFE_PRESET = *lt_settings->ffe_preset; +#endif + } } -enum dc_status dp_get_lane_status_and_drive_settings( +enum dc_status dp_get_lane_status_and_lane_adjust( struct dc_link *link, const struct link_training_settings *link_training_setting, - union lane_status *ln_status, - union lane_align_status_updated *ln_status_updated, - struct link_training_settings *req_settings, + union lane_status ln_status[LANE_COUNT_DP_MAX], + union lane_align_status_updated *ln_align, + union lane_adjust ln_adjust[LANE_COUNT_DP_MAX], uint32_t offset) { unsigned int lane01_status_address = DP_LANE0_1_STATUS; uint8_t lane_adjust_offset = 4; unsigned int lane01_adjust_address; uint8_t dpcd_buf[6] = {0}; - union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = { { {0} } }; - struct link_training_settings request_settings = { {0} }; uint32_t lane; enum dc_status status; - memset(req_settings, '\0', sizeof(struct link_training_settings)); - if (is_repeater(link, offset)) { lane01_status_address = DP_LANE0_1_STATUS_PHY_REPEATER1 + @@ -919,11 +896,11 @@ enum dc_status dp_get_lane_status_and_drive_settings( ln_status[lane].raw = get_nibble_at_index(&dpcd_buf[0], lane); - dpcd_lane_adjust[lane].raw = + ln_adjust[lane].raw = get_nibble_at_index(&dpcd_buf[lane_adjust_offset], lane); } - ln_status_updated->raw = dpcd_buf[2]; + ln_align->raw = dpcd_buf[2]; if (is_repeater(link, offset)) { DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n" @@ -962,55 +939,6 @@ enum dc_status dp_get_lane_status_and_drive_settings( dpcd_buf[lane_adjust_offset + 1]); } - /*copy to req_settings*/ - request_settings.link_settings.lane_count = - link_training_setting->link_settings.lane_count; - request_settings.link_settings.link_rate = - link_training_setting->link_settings.link_rate; - request_settings.link_settings.link_spread = - link_training_setting->link_settings.link_spread; - - for (lane = 0; lane < - (uint32_t)(link_training_setting->link_settings.lane_count); - lane++) { - -#if defined(CONFIG_DRM_AMD_DC_DCN) - if (dp_get_link_encoding_format(&link_training_setting->link_settings) == - DP_128b_132b_ENCODING) { - request_settings.lane_settings[lane].FFE_PRESET.raw = - dpcd_lane_adjust[lane].tx_ffe.PRESET_VALUE; - } else if (dp_get_link_encoding_format(&link_training_setting->link_settings) == - DP_8b_10b_ENCODING) { - request_settings.lane_settings[lane].VOLTAGE_SWING = - (enum dc_voltage_swing)(dpcd_lane_adjust[lane].bits. - VOLTAGE_SWING_LANE); - request_settings.lane_settings[lane].PRE_EMPHASIS = - (enum dc_pre_emphasis)(dpcd_lane_adjust[lane].bits. - PRE_EMPHASIS_LANE); - } -#else - request_settings.lane_settings[lane].VOLTAGE_SWING = - (enum dc_voltage_swing)(dpcd_lane_adjust[lane].bits. - VOLTAGE_SWING_LANE); - request_settings.lane_settings[lane].PRE_EMPHASIS = - (enum dc_pre_emphasis)(dpcd_lane_adjust[lane].bits. - PRE_EMPHASIS_LANE); -#endif - } - - /*Note: for postcursor2, read adjusted - * postcursor2 settings from*/ - /*DpcdAddress_AdjustRequestPostCursor2 = - *0x020C (not implemented yet)*/ - - /* we find the maximum of the requested settings across all lanes*/ - /* and set this maximum for all lanes*/ - find_max_drive_settings(&request_settings, req_settings); - - /* if post cursor 2 is needed in the future, - * read DpcdAddress_AdjustRequestPostCursor2 = 0x020C - */ - return status; } @@ -1019,7 +947,6 @@ enum dc_status dpcd_set_lane_settings( const struct link_training_settings *link_training_setting, uint32_t offset) { - union dpcd_training_lane dpcd_lane[LANE_COUNT_DP_MAX] = {{{0}}}; unsigned int lane0_set_address; enum dc_status status; @@ -1029,34 +956,11 @@ enum dc_status dpcd_set_lane_settings( lane0_set_address = DP_TRAINING_LANE0_SET_PHY_REPEATER1 + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); - dp_hw_to_dpcd_lane_settings(link_training_setting, - link_training_setting->lane_settings, - dpcd_lane); - status = core_link_write_dpcd(link, lane0_set_address, - (uint8_t *)(dpcd_lane), + (uint8_t *)(link_training_setting->dpcd_lane_settings), link_training_setting->link_settings.lane_count); - /* - if (LTSettings.link.rate == LinkRate_High2) - { - DpcdTrainingLaneSet2 dpcd_lane2[lane_count_DPMax] = {0}; - for ( uint32_t lane = 0; - lane < lane_count_DPMax; lane++) - { - dpcd_lane2[lane].bits.post_cursor2_set = - static_cast<unsigned char>( - LTSettings.laneSettings[lane].postCursor2); - dpcd_lane2[lane].bits.max_post_cursor2_reached = 0; - } - m_pDpcdAccessSrv->WriteDpcdData( - DpcdAddress_Lane0Set2, - reinterpret_cast<unsigned char*>(dpcd_lane2), - LTSettings.link.lanes); - } - */ - if (is_repeater(link, offset)) { #if defined(CONFIG_DRM_AMD_DC_DCN) if (dp_get_link_encoding_format(&link_training_setting->link_settings) == @@ -1066,7 +970,7 @@ enum dc_status dpcd_set_lane_settings( __func__, offset, lane0_set_address, - dpcd_lane[0].tx_ffe.PRESET_VALUE); + link_training_setting->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE); else if (dp_get_link_encoding_format(&link_training_setting->link_settings) == DP_8b_10b_ENCODING) #endif @@ -1075,10 +979,10 @@ enum dc_status dpcd_set_lane_settings( __func__, offset, lane0_set_address, - dpcd_lane[0].bits.VOLTAGE_SWING_SET, - dpcd_lane[0].bits.PRE_EMPHASIS_SET, - dpcd_lane[0].bits.MAX_SWING_REACHED, - dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED); + link_training_setting->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET, + link_training_setting->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET, + link_training_setting->dpcd_lane_settings[0].bits.MAX_SWING_REACHED, + link_training_setting->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED); } else { #if defined(CONFIG_DRM_AMD_DC_DCN) @@ -1087,17 +991,17 @@ enum dc_status dpcd_set_lane_settings( DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X TX_FFE_PRESET_VALUE = %x\n", __func__, lane0_set_address, - dpcd_lane[0].tx_ffe.PRESET_VALUE); + link_training_setting->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE); else if (dp_get_link_encoding_format(&link_training_setting->link_settings) == DP_8b_10b_ENCODING) #endif DC_LOG_HW_LINK_TRAINING("%s\n 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n", __func__, lane0_set_address, - dpcd_lane[0].bits.VOLTAGE_SWING_SET, - dpcd_lane[0].bits.PRE_EMPHASIS_SET, - dpcd_lane[0].bits.MAX_SWING_REACHED, - dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED); + link_training_setting->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET, + link_training_setting->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET, + link_training_setting->dpcd_lane_settings[0].bits.MAX_SWING_REACHED, + link_training_setting->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED); } return status; @@ -1110,7 +1014,7 @@ bool dp_is_max_vs_reached( for (lane = 0; lane < (uint32_t)(lt_settings->link_settings.lane_count); lane++) { - if (lt_settings->lane_settings[lane].VOLTAGE_SWING + if (lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET == VOLTAGE_SWING_MAX_LEVEL) return true; } @@ -1140,17 +1044,17 @@ static bool perform_post_lt_adj_req_sequence( adj_req_timer < POST_LT_ADJ_REQ_TIMEOUT; adj_req_timer++) { - struct link_training_settings req_settings; union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX]; union lane_align_status_updated dpcd_lane_status_updated; + union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = { { {0} } }; - dp_get_lane_status_and_drive_settings( + dp_get_lane_status_and_lane_adjust( link, lt_settings, dpcd_lane_status, &dpcd_lane_status_updated, - &req_settings, + dpcd_lane_adjust, DPRX); if (dpcd_lane_status_updated.bits. @@ -1168,11 +1072,10 @@ static bool perform_post_lt_adj_req_sequence( for (lane = 0; lane < (uint32_t)(lane_count); lane++) { if (lt_settings-> - lane_settings[lane].VOLTAGE_SWING != - req_settings.lane_settings[lane]. - VOLTAGE_SWING || - lt_settings->lane_settings[lane].PRE_EMPHASIS != - req_settings.lane_settings[lane].PRE_EMPHASIS) { + dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET != + dpcd_lane_adjust[lane].bits.VOLTAGE_SWING_LANE || + lt_settings->dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET != + dpcd_lane_adjust[lane].bits.PRE_EMPHASIS_LANE) { req_drv_setting_changed = true; break; @@ -1180,8 +1083,8 @@ static bool perform_post_lt_adj_req_sequence( } if (req_drv_setting_changed) { - dp_update_drive_settings( - lt_settings, req_settings); + dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, + lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); dc_link_dp_set_drive_settings(link, lt_settings); @@ -1261,16 +1164,15 @@ static enum link_training_result perform_channel_equalization_sequence( struct link_training_settings *lt_settings, uint32_t offset) { - struct link_training_settings req_settings; enum dc_dp_training_pattern tr_pattern; uint32_t retries_ch_eq; uint32_t wait_time_microsec; enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; - union lane_align_status_updated dpcd_lane_status_updated = { {0} }; - union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = { { {0} } }; + union lane_align_status_updated dpcd_lane_status_updated = {0}; + union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0}; + union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; /* Note: also check that TPS4 is a supported feature*/ - tr_pattern = lt_settings->pattern_for_eq; #if defined(CONFIG_DRM_AMD_DC_DCN) @@ -1316,12 +1218,12 @@ static enum link_training_result perform_channel_equalization_sequence( /* 4. Read lane status and requested * drive settings as set by the sink*/ - dp_get_lane_status_and_drive_settings( + dp_get_lane_status_and_lane_adjust( link, lt_settings, dpcd_lane_status, &dpcd_lane_status_updated, - &req_settings, + dpcd_lane_adjust, offset); /* 5. check CR done*/ @@ -1335,7 +1237,8 @@ static enum link_training_result perform_channel_equalization_sequence( return LINK_TRAINING_SUCCESS; /* 7. update VS/PE/PC2 in lt_settings*/ - dp_update_drive_settings(lt_settings, req_settings); + dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, + lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); } return LINK_TRAINING_EQ_FAIL_EQ; @@ -1361,10 +1264,10 @@ static enum link_training_result perform_clock_recovery_sequence( uint32_t retries_cr; uint32_t retry_count; uint32_t wait_time_microsec; - struct link_training_settings req_settings; enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX]; union lane_align_status_updated dpcd_lane_status_updated; + union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = { { {0} } }; retries_cr = 0; retry_count = 0; @@ -1418,12 +1321,12 @@ static enum link_training_result perform_clock_recovery_sequence( /* 4. Read lane status and requested drive * settings as set by the sink */ - dp_get_lane_status_and_drive_settings( + dp_get_lane_status_and_lane_adjust( link, lt_settings, dpcd_lane_status, &dpcd_lane_status_updated, - &req_settings, + dpcd_lane_adjust, offset); /* 5. check CR done*/ @@ -1441,33 +1344,25 @@ static enum link_training_result perform_clock_recovery_sequence( break; #endif -#if defined(CONFIG_DRM_AMD_DC_DCN) - if ((dp_get_link_encoding_format(<_settings->link_settings) == DP_128b_132b_ENCODING) && - lt_settings->lane_settings[0].FFE_PRESET.settings.level == - req_settings.lane_settings[0].FFE_PRESET.settings.level) - retries_cr++; - else if ((dp_get_link_encoding_format(<_settings->link_settings) == DP_8b_10b_ENCODING) && - lt_settings->lane_settings[0].VOLTAGE_SWING == - req_settings.lane_settings[0].VOLTAGE_SWING) - retries_cr++; - else - retries_cr = 0; -#else /* 7. same lane settings*/ /* Note: settings are the same for all lanes, * so comparing first lane is sufficient*/ - if ((lt_settings->lane_settings[0].VOLTAGE_SWING == - req_settings.lane_settings[0].VOLTAGE_SWING) - && (lt_settings->lane_settings[0].PRE_EMPHASIS == - req_settings.lane_settings[0].PRE_EMPHASIS)) + if ((dp_get_link_encoding_format(<_settings->link_settings) == DP_8b_10b_ENCODING) && + lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET == + dpcd_lane_adjust[0].bits.VOLTAGE_SWING_LANE) + retries_cr++; +#if defined(CONFIG_DRM_AMD_DC_DCN) + else if ((dp_get_link_encoding_format(<_settings->link_settings) == DP_128b_132b_ENCODING) && + lt_settings->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE == + dpcd_lane_adjust[0].tx_ffe.PRESET_VALUE) retries_cr++; +#endif else retries_cr = 0; -#endif /* 8. update VS/PE/PC2 in lt_settings*/ - dp_update_drive_settings(lt_settings, req_settings); - + dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, + lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); retry_count++; } @@ -1487,7 +1382,7 @@ static inline enum link_training_result dp_transition_to_video_idle( struct link_training_settings *lt_settings, enum link_training_result status) { - union lane_count_set lane_count_set = { {0} }; + union lane_count_set lane_count_set = {0}; /* 4. mainlink output idle pattern*/ dp_set_hw_test_pattern(link, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0); @@ -1596,6 +1491,9 @@ static inline void decide_8b_10b_training_settings( lt_settings->pattern_for_eq = decide_eq_training_pattern(link, link_setting); lt_settings->enhanced_framing = 1; lt_settings->should_set_fec_ready = true; + lt_settings->disallow_per_lane_settings = true; + lt_settings->always_match_dpcd_with_hw_lane_settings = true; + dp_hw_to_dpcd_lane_settings(lt_settings, lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); } #if defined(CONFIG_DRM_AMD_DC_DCN) @@ -1621,6 +1519,9 @@ static inline void decide_128b_132b_training_settings(struct dc_link *link, link->dpcd_caps.lttpr_caps.phy_repeater_cnt) + 1) * 20000; lt_settings->lttpr_mode = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) ? LTTPR_MODE_NON_TRANSPARENT : LTTPR_MODE_TRANSPARENT; + lt_settings->disallow_per_lane_settings = true; + dp_hw_to_dpcd_lane_settings(lt_settings, + lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); } #endif @@ -1661,7 +1562,13 @@ static void override_training_settings( if (overrides->ffe_preset != NULL) lt_settings->ffe_preset = overrides->ffe_preset; #endif - + /* Override HW lane settings with BIOS forced values if present */ + if (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN && + link->lttpr_mode == LTTPR_MODE_TRANSPARENT) { + lt_settings->voltage_swing = &link->bios_forced_drive_settings.VOLTAGE_SWING; + lt_settings->pre_emphasis = &link->bios_forced_drive_settings.PRE_EMPHASIS; + lt_settings->always_match_dpcd_with_hw_lane_settings = false; + } for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { lt_settings->lane_settings[lane].VOLTAGE_SWING = lt_settings->voltage_swing != NULL ? @@ -1677,6 +1584,9 @@ static void override_training_settings( : POST_CURSOR2_DISABLED; } + dp_hw_to_dpcd_lane_settings(lt_settings, + lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); + /* Initialize training timings */ if (overrides->cr_pattern_time != NULL) lt_settings->cr_pattern_time = *overrides->cr_pattern_time; @@ -1800,7 +1710,7 @@ static enum dc_status configure_lttpr_mode_non_transparent( static void repeater_training_done(struct dc_link *link, uint32_t offset) { - union dpcd_training_pattern dpcd_pattern = { {0} }; + union dpcd_training_pattern dpcd_pattern = {0}; const uint32_t dpcd_base_lt_offset = DP_TRAINING_PATTERN_SET_PHY_REPEATER1 + @@ -1947,6 +1857,9 @@ void dc_link_dp_set_drive_settings( /* program ASIC PHY settings*/ dp_set_hw_lane_settings(link, lt_settings, DPRX); + dp_hw_to_dpcd_lane_settings(lt_settings, + lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); + /* Notify DP sink the PHY settings from source */ dpcd_set_lane_settings(link, lt_settings, DPRX); } @@ -2074,38 +1987,43 @@ static enum link_training_result dp_perform_128b_132b_channel_eq_done_sequence( struct dc_link *link, struct link_training_settings *lt_settings) { - uint8_t loop_count = 0; + uint8_t loop_count; uint32_t aux_rd_interval = 0; uint32_t wait_time = 0; - struct link_training_settings req_settings; - union lane_align_status_updated dpcd_lane_status_updated = { {0} }; - union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = { { {0} } }; + union lane_align_status_updated dpcd_lane_status_updated = {0}; + union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0}; enum link_training_result status = LINK_TRAINING_SUCCESS; + union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; - /* Transmit 128b/132b_TPS1 over Main-Link and Set TRAINING_PATTERN_SET to 01h */ + /* Transmit 128b/132b_TPS1 over Main-Link */ dp_set_hw_training_pattern(link, lt_settings->pattern_for_cr, DPRX); + /* Set TRAINING_PATTERN_SET to 01h */ dpcd_set_training_pattern(link, lt_settings->pattern_for_cr); - /* Adjust TX_FFE_PRESET_VALUE as requested */ - dp_get_lane_status_and_drive_settings(link, lt_settings, dpcd_lane_status, - &dpcd_lane_status_updated, &req_settings, DPRX); - dp_update_drive_settings(lt_settings, req_settings); + /* Adjust TX_FFE_PRESET_VALUE and Transmit 128b/132b_TPS2 over Main-Link */ dpcd_128b_132b_get_aux_rd_interval(link, &aux_rd_interval); + dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status, + &dpcd_lane_status_updated, dpcd_lane_adjust, DPRX); + dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, + lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); dp_set_hw_lane_settings(link, lt_settings, DPRX); - dpcd_set_lane_settings(link, lt_settings, DPRX); - - /* Transmit 128b/132b_TPS2 over Main-Link and Set TRAINING_PATTERN_SET to 02h */ dp_set_hw_training_pattern(link, lt_settings->pattern_for_eq, DPRX); - dpcd_set_training_pattern(link, lt_settings->pattern_for_eq); + + /* Set loop counter to start from 1 */ + loop_count = 1; + + /* Set TRAINING_PATTERN_SET to 02h and TX_FFE_PRESET_VALUE in one AUX transaction */ + dpcd_set_lt_pattern_and_lane_settings(link, lt_settings, + lt_settings->pattern_for_eq, DPRX); /* poll for channel EQ done */ while (status == LINK_TRAINING_SUCCESS) { - loop_count++; dp_wait_for_training_aux_rd_interval(link, aux_rd_interval); wait_time += aux_rd_interval; - dp_get_lane_status_and_drive_settings(link, lt_settings, dpcd_lane_status, - &dpcd_lane_status_updated, &req_settings, DPRX); - dp_update_drive_settings(lt_settings, req_settings); + dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status, + &dpcd_lane_status_updated, dpcd_lane_adjust, DPRX); + dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, + lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); dpcd_128b_132b_get_aux_rd_interval(link, &aux_rd_interval); if (dp_is_ch_eq_done(lt_settings->link_settings.lane_count, dpcd_lane_status)) { @@ -2119,6 +2037,7 @@ static enum link_training_result dp_perform_128b_132b_channel_eq_done_sequence( dp_set_hw_lane_settings(link, lt_settings, DPRX); dpcd_set_lane_settings(link, lt_settings, DPRX); } + loop_count++; } /* poll for EQ interlane align done */ @@ -2134,8 +2053,8 @@ static enum link_training_result dp_perform_128b_132b_channel_eq_done_sequence( dp_wait_for_training_aux_rd_interval(link, lt_settings->eq_pattern_time); wait_time += lt_settings->eq_pattern_time; - dp_get_lane_status_and_drive_settings(link, lt_settings, dpcd_lane_status, - &dpcd_lane_status_updated, &req_settings, DPRX); + dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status, + &dpcd_lane_status_updated, dpcd_lane_adjust, DPRX); } } @@ -2148,9 +2067,9 @@ static enum link_training_result dp_perform_128b_132b_cds_done_sequence( { /* Assumption: assume hardware has transmitted eq pattern */ enum link_training_result status = LINK_TRAINING_SUCCESS; - struct link_training_settings req_settings; - union lane_align_status_updated dpcd_lane_status_updated = { {0} }; - union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = { { {0} } }; + union lane_align_status_updated dpcd_lane_status_updated = {0}; + union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0}; + union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = { { {0} } }; uint32_t wait_time = 0; /* initiate CDS done sequence */ @@ -2161,8 +2080,8 @@ static enum link_training_result dp_perform_128b_132b_cds_done_sequence( dp_wait_for_training_aux_rd_interval(link, lt_settings->cds_pattern_time); wait_time += lt_settings->cds_pattern_time; - dp_get_lane_status_and_drive_settings(link, lt_settings, dpcd_lane_status, - &dpcd_lane_status_updated, &req_settings, DPRX); + dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status, + &dpcd_lane_status_updated, dpcd_lane_adjust, DPRX); if (dp_is_symbol_locked(lt_settings->link_settings.lane_count, dpcd_lane_status) && dpcd_lane_status_updated.bits.CDS_INTERLANE_ALIGN_DONE_128b_132b) { /* pass */ @@ -2219,7 +2138,7 @@ static enum link_training_result dp_perform_8b_10b_link_training( } for (lane = 0; lane < (uint8_t)lt_settings->link_settings.lane_count; lane++) - lt_settings->lane_settings[lane].VOLTAGE_SWING = VOLTAGE_SWING_LEVEL0; + lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET = VOLTAGE_SWING_LEVEL0; } if (status == LINK_TRAINING_SUCCESS) { @@ -2863,7 +2782,7 @@ bool dp_verify_link_cap( link->verified_link_cap = *known_limit_link_setting; return true; } else if (link->link_enc && link->dc->res_pool->funcs->link_encs_assign && - !link_enc_cfg_is_link_enc_avail(link->ctx->dc, link->link_enc->preferred_engine)) { + !link_enc_cfg_is_link_enc_avail(link->ctx->dc, link->link_enc->preferred_engine, link)) { link->verified_link_cap = initial_link_settings; return true; } @@ -3523,6 +3442,8 @@ static bool handle_hpd_irq_psr_sink(struct dc_link *link) if (psr_error_status.bits.LINK_CRC_ERROR || psr_error_status.bits.RFB_STORAGE_ERROR || psr_error_status.bits.VSC_SDP_ERROR) { + bool allow_active; + /* Acknowledge and clear error bits */ dm_helpers_dp_write_dpcd( link->ctx, @@ -3532,8 +3453,10 @@ static bool handle_hpd_irq_psr_sink(struct dc_link *link) sizeof(psr_error_status.raw)); /* PSR error, disable and re-enable PSR */ - dc_link_set_psr_allow_active(link, false, true, false); - dc_link_set_psr_allow_active(link, true, true, false); + allow_active = false; + dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL); + allow_active = true; + dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL); return true; } else if (psr_sink_psr_status.bits.SINK_SELF_REFRESH_STATUS == @@ -3591,15 +3514,13 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link) #endif unsigned int test_pattern_size = 0; enum dp_test_pattern test_pattern; - struct dc_link_training_settings link_settings; union lane_adjust dpcd_lane_adjust; unsigned int lane; struct link_training_settings link_training_settings; - int i = 0; dpcd_test_pattern.raw = 0; memset(dpcd_lane_adjustment, 0, sizeof(dpcd_lane_adjustment)); - memset(&link_settings, 0, sizeof(link_settings)); + memset(&link_training_settings, 0, sizeof(link_training_settings)); /* get phy test pattern and pattern parameters from DP receiver */ core_link_read_dpcd( @@ -3720,48 +3641,37 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link) #endif /* prepare link training settings */ - link_settings.link = link->cur_link_settings; + link_training_settings.link_settings = link->cur_link_settings; for (lane = 0; lane < (unsigned int)(link->cur_link_settings.lane_count); lane++) { dpcd_lane_adjust.raw = get_nibble_at_index(&dpcd_lane_adjustment[0].raw, lane); -#if defined(CONFIG_DRM_AMD_DC_DCN) if (dp_get_link_encoding_format(&link->cur_link_settings) == - DP_128b_132b_ENCODING) { - link_settings.lane_settings[lane].FFE_PRESET.raw = - dpcd_lane_adjust.tx_ffe.PRESET_VALUE; - } else if (dp_get_link_encoding_format(&link->cur_link_settings) == DP_8b_10b_ENCODING) { - link_settings.lane_settings[lane].VOLTAGE_SWING = + link_training_settings.hw_lane_settings[lane].VOLTAGE_SWING = (enum dc_voltage_swing) (dpcd_lane_adjust.bits.VOLTAGE_SWING_LANE); - link_settings.lane_settings[lane].PRE_EMPHASIS = + link_training_settings.hw_lane_settings[lane].PRE_EMPHASIS = (enum dc_pre_emphasis) (dpcd_lane_adjust.bits.PRE_EMPHASIS_LANE); - link_settings.lane_settings[lane].POST_CURSOR2 = + link_training_settings.hw_lane_settings[lane].POST_CURSOR2 = (enum dc_post_cursor2) ((dpcd_post_cursor_2_adjustment >> (lane * 2)) & 0x03); } -#else - link_settings.lane_settings[lane].VOLTAGE_SWING = - (enum dc_voltage_swing) - (dpcd_lane_adjust.bits.VOLTAGE_SWING_LANE); - link_settings.lane_settings[lane].PRE_EMPHASIS = - (enum dc_pre_emphasis) - (dpcd_lane_adjust.bits.PRE_EMPHASIS_LANE); - link_settings.lane_settings[lane].POST_CURSOR2 = - (enum dc_post_cursor2) - ((dpcd_post_cursor_2_adjustment >> (lane * 2)) & 0x03); +#if defined(CONFIG_DRM_AMD_DC_DCN) + else if (dp_get_link_encoding_format(&link->cur_link_settings) == + DP_128b_132b_ENCODING) { + link_training_settings.hw_lane_settings[lane].FFE_PRESET.raw = + dpcd_lane_adjust.tx_ffe.PRESET_VALUE; + } #endif } - for (i = 0; i < 4; i++) - link_training_settings.lane_settings[i] = - link_settings.lane_settings[i]; - link_training_settings.link_settings = link_settings.link; - link_training_settings.allow_invalid_msa_timing_param = false; + dp_hw_to_dpcd_lane_settings(&link_training_settings, + link_training_settings.hw_lane_settings, + link_training_settings.dpcd_lane_settings); /*Usage: Measure DP physical lane signal * by DP SI test equipment automatically. * PHY test pattern request is generated by equipment via HPD interrupt. @@ -4065,8 +3975,8 @@ void dc_link_dp_handle_link_loss(struct dc_link *link) bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd_irq_dpcd_data, bool *out_link_loss, bool defer_handling, bool *has_left_work) { - union hpd_irq_data hpd_irq_dpcd_data = { { { {0} } } }; - union device_service_irq device_service_clear = { { 0 } }; + union hpd_irq_data hpd_irq_dpcd_data = {0}; + union device_service_irq device_service_clear = {0}; enum dc_status result; bool status = false; @@ -5419,6 +5329,14 @@ bool dc_link_dp_set_test_pattern( return false; if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) { +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (test_pattern == DP_TEST_PATTERN_SQUARE_PULSE) + core_link_write_dpcd(link, + DP_LINK_SQUARE_PATTERN, + p_custom_pattern, + 1); + +#endif /* tell receiver that we are sending qualification * pattern DP 1.2 or later - DP receiver's link quality * pattern is set using DPCD LINK_QUAL_LANEx_SET @@ -5939,7 +5857,7 @@ bool is_edp_ilr_optimization_required(struct dc_link *link, struct dc_crtc_timin uint8_t link_bw_set; uint8_t link_rate_set; uint32_t req_bw; - union lane_count_set lane_count_set = { {0} }; + union lane_count_set lane_count_set = {0}; ASSERT(link || crtc_timing); // invalid input @@ -5993,6 +5911,25 @@ enum dp_link_encoding dp_get_link_encoding_format(const struct dc_link_settings } #if defined(CONFIG_DRM_AMD_DC_DCN) +enum dp_link_encoding dc_link_dp_mst_decide_link_encoding_format(const struct dc_link *link) +{ + struct dc_link_settings link_settings = {0}; + + if (!dc_is_dp_signal(link->connector_signal)) + return DP_UNKNOWN_ENCODING; + + if (link->preferred_link_setting.lane_count != + LANE_COUNT_UNKNOWN && + link->preferred_link_setting.link_rate != + LINK_RATE_UNKNOWN) { + link_settings = link->preferred_link_setting; + } else { + decide_mst_link_settings(link, &link_settings); + } + + return dp_get_link_encoding_format(&link_settings); +} + // TODO - DP2.0 Link: Fix get_lane_status to handle LTTPR offset (SST and MST) static void get_lane_status( struct dc_link *link, diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c index e9006d099393..b1c9f77d6bf4 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c @@ -263,10 +263,10 @@ static enum link_training_result dpia_training_cr_non_transparent(struct dc_link uint32_t retry_count = 0; /* From DP spec, CR read interval is always 100us. */ uint32_t wait_time_microsec = TRAINING_AUX_RD_INTERVAL; - struct link_training_settings req_settings; enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = { { {0} } }; union lane_align_status_updated dpcd_lane_status_updated = { {0} }; + union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = { { {0} } }; uint8_t set_cfg_data; enum dpia_set_config_ts ts; @@ -345,11 +345,12 @@ static enum link_training_result dpia_training_cr_non_transparent(struct dc_link dp_wait_for_training_aux_rd_interval(link, wait_time_microsec); /* Read status and adjustment requests from DPCD. */ - status = dp_get_lane_status_and_drive_settings(link, + status = dp_get_lane_status_and_lane_adjust( + link, lt_settings, dpcd_lane_status, &dpcd_lane_status_updated, - &req_settings, + dpcd_lane_adjust, hop); if (status != DC_OK) { result = LINK_TRAINING_ABORT; @@ -371,16 +372,18 @@ static enum link_training_result dpia_training_cr_non_transparent(struct dc_link * Note: settings are the same for all lanes, * so comparing first lane is sufficient. */ - if (lt_settings->lane_settings[0].VOLTAGE_SWING == - req_settings.lane_settings[0].VOLTAGE_SWING && - lt_settings->lane_settings[0].PRE_EMPHASIS == - req_settings.lane_settings[0].PRE_EMPHASIS) + if ((lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET == + dpcd_lane_adjust[0].bits.VOLTAGE_SWING_LANE) + && (lt_settings->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET == + dpcd_lane_adjust[0].bits.PRE_EMPHASIS_LANE)) retries_cr++; else retries_cr = 0; /* Update VS/PE. */ - dp_update_drive_settings(lt_settings, req_settings); + dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, + lt_settings->lane_settings, + lt_settings->dpcd_lane_settings); retry_count++; } @@ -416,10 +419,10 @@ static enum link_training_result dpia_training_cr_transparent(struct dc_link *li uint32_t retries_cr = 0; /* Number of consecutive attempts with same VS or PE. */ uint32_t retry_count = 0; uint32_t wait_time_microsec = lt_settings->cr_pattern_time; - struct link_training_settings req_settings; enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = { { {0} } }; union lane_align_status_updated dpcd_lane_status_updated = { {0} }; + union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = { { {0} } }; /* Cap of LINK_TRAINING_MAX_CR_RETRY attempts at clock recovery. * Fix inherited from perform_clock_recovery_sequence() - @@ -445,11 +448,12 @@ static enum link_training_result dpia_training_cr_transparent(struct dc_link *li dp_wait_for_training_aux_rd_interval(link, wait_time_microsec); /* Read status and adjustment requests from DPCD. */ - status = dp_get_lane_status_and_drive_settings(link, + status = dp_get_lane_status_and_lane_adjust( + link, lt_settings, dpcd_lane_status, &dpcd_lane_status_updated, - &req_settings, + dpcd_lane_adjust, DPRX); if (status != DC_OK) { result = LINK_TRAINING_ABORT; @@ -471,16 +475,17 @@ static enum link_training_result dpia_training_cr_transparent(struct dc_link *li * Note: settings are the same for all lanes, * so comparing first lane is sufficient. */ - if (lt_settings->lane_settings[0].VOLTAGE_SWING == - req_settings.lane_settings[0].VOLTAGE_SWING && - lt_settings->lane_settings[0].PRE_EMPHASIS == - req_settings.lane_settings[0].PRE_EMPHASIS) + if ((lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET == + dpcd_lane_adjust[0].bits.VOLTAGE_SWING_LANE) + && (lt_settings->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET == + dpcd_lane_adjust[0].bits.PRE_EMPHASIS_LANE)) retries_cr++; else retries_cr = 0; /* Update VS/PE. */ - dp_update_drive_settings(lt_settings, req_settings); + dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, + lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); retry_count++; } @@ -566,10 +571,10 @@ static enum link_training_result dpia_training_eq_non_transparent(struct dc_link enum dc_status status; enum dc_dp_training_pattern tr_pattern; uint32_t wait_time_microsec; - struct link_training_settings req_settings; enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; union lane_align_status_updated dpcd_lane_status_updated = { {0} }; union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = { { {0} } }; + union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = { { {0} } }; uint8_t set_cfg_data; enum dpia_set_config_ts ts; @@ -639,11 +644,12 @@ static enum link_training_result dpia_training_eq_non_transparent(struct dc_link dp_wait_for_training_aux_rd_interval(link, wait_time_microsec); /* Read status and adjustment requests from DPCD. */ - status = dp_get_lane_status_and_drive_settings(link, + status = dp_get_lane_status_and_lane_adjust( + link, lt_settings, dpcd_lane_status, &dpcd_lane_status_updated, - &req_settings, + dpcd_lane_adjust, hop); if (status != DC_OK) { result = LINK_TRAINING_ABORT; @@ -664,7 +670,8 @@ static enum link_training_result dpia_training_eq_non_transparent(struct dc_link } /* Update VS/PE. */ - dp_update_drive_settings(lt_settings, req_settings); + dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, + lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); } /* Abort link training if equalization failed due to HPD unplug. */ @@ -701,10 +708,10 @@ static enum link_training_result dpia_training_eq_transparent(struct dc_link *li enum dc_status status; enum dc_dp_training_pattern tr_pattern = lt_settings->pattern_for_eq; uint32_t wait_time_microsec; - struct link_training_settings req_settings; enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; union lane_align_status_updated dpcd_lane_status_updated = { {0} }; union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = { { {0} } }; + union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = { { {0} } }; wait_time_microsec = dpia_get_eq_aux_rd_interval(link, lt_settings, DPRX); @@ -720,11 +727,12 @@ static enum link_training_result dpia_training_eq_transparent(struct dc_link *li dp_wait_for_training_aux_rd_interval(link, wait_time_microsec); /* Read status and adjustment requests from DPCD. */ - status = dp_get_lane_status_and_drive_settings(link, + status = dp_get_lane_status_and_lane_adjust( + link, lt_settings, dpcd_lane_status, &dpcd_lane_status_updated, - &req_settings, + dpcd_lane_adjust, DPRX); if (status != DC_OK) { result = LINK_TRAINING_ABORT; @@ -745,7 +753,8 @@ static enum link_training_result dpia_training_eq_transparent(struct dc_link *li } /* Update VS/PE. */ - dp_update_drive_settings(lt_settings, req_settings); + dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, + lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); } /* Abort link training if equalization failed due to HPD unplug. */ diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c index 1cab4bf06abe..25e48a8cbb78 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c @@ -236,6 +236,23 @@ static struct link_encoder *get_link_enc_used_by_link( return link_enc; } +/* Clear all link encoder assignments. */ +static void clear_enc_assignments(struct dc_state *state) +{ + int i; + enum engine_id eng_id; + struct dc_stream_state *stream; + + for (i = 0; i < MAX_PIPES; i++) { + state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].valid = false; + eng_id = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].eng_id; + stream = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].stream; + if (eng_id != ENGINE_ID_UNKNOWN) + state->res_ctx.link_enc_cfg_ctx.link_enc_avail[eng_id - ENGINE_ID_DIGA] = eng_id; + if (stream) + stream->link_enc = NULL; + } +} void link_enc_cfg_init( struct dc *dc, @@ -250,6 +267,8 @@ void link_enc_cfg_init( state->res_ctx.link_enc_cfg_ctx.link_enc_avail[i] = ENGINE_ID_UNKNOWN; } + clear_enc_assignments(state); + state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY; } @@ -265,6 +284,9 @@ void link_enc_cfg_link_encs_assign( ASSERT(state->stream_count == stream_count); + if (stream_count == 0) + clear_enc_assignments(state); + /* Release DIG link encoder resources before running assignment algorithm. */ for (i = 0; i < stream_count; i++) dc->res_pool->funcs->link_enc_unassign(state, streams[i]); @@ -488,16 +510,19 @@ struct link_encoder *link_enc_cfg_get_link_enc_used_by_stream( return link_enc; } -bool link_enc_cfg_is_link_enc_avail(struct dc *dc, enum engine_id eng_id) +bool link_enc_cfg_is_link_enc_avail(struct dc *dc, enum engine_id eng_id, struct dc_link *link) { bool is_avail = true; int i; - /* Add assigned encoders to list. */ + /* An encoder is not available if it has already been assigned to a different endpoint. */ for (i = 0; i < MAX_PIPES; i++) { struct link_enc_assignment assignment = get_assignment(dc, i); + struct display_endpoint_id ep_id = (struct display_endpoint_id) { + .link_id = link->link_id, + .ep_type = link->ep_type}; - if (assignment.valid && assignment.eng_id == eng_id) { + if (assignment.valid && assignment.eng_id == eng_id && !are_ep_ids_equal(&ep_id, &assignment.ep_id)) { is_avail = false; break; } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 561c10a92bb5..c32fdccd4d92 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -1155,9 +1155,17 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) pipe_ctx->plane_res.scl_data.recout.x += pipe_ctx->plane_res.scl_data.recout.width; } - if (pipe_ctx->plane_res.scl_data.viewport.height < MIN_VIEWPORT_SIZE || - pipe_ctx->plane_res.scl_data.viewport.width < MIN_VIEWPORT_SIZE) - res = false; + if (!pipe_ctx->stream->ctx->dc->config.enable_windowed_mpo_odm) { + if (pipe_ctx->plane_res.scl_data.viewport.height < MIN_VIEWPORT_SIZE || + pipe_ctx->plane_res.scl_data.viewport.width < MIN_VIEWPORT_SIZE) + res = false; + } else { + /* Clamp minimum viewport size */ + if (pipe_ctx->plane_res.scl_data.viewport.height < MIN_VIEWPORT_SIZE) + pipe_ctx->plane_res.scl_data.viewport.height = MIN_VIEWPORT_SIZE; + if (pipe_ctx->plane_res.scl_data.viewport.width < MIN_VIEWPORT_SIZE) + pipe_ctx->plane_res.scl_data.viewport.width = MIN_VIEWPORT_SIZE; + } DC_LOG_SCALER("%s pipe %d:\nViewport: height:%d width:%d x:%d y:%d Recout: height:%d width:%d x:%d y:%d HACTIVE:%d VACTIVE:%d\n" "src_rect: height:%d width:%d x:%d y:%d dst_rect: height:%d width:%d x:%d y:%d clip_rect: height:%d width:%d x:%d y:%d\n", @@ -3009,6 +3017,11 @@ enum dc_status dc_validate_plane(struct dc *dc, const struct dc_plane_state *pla { enum dc_status res = DC_OK; + /* check if surface has invalid dimensions */ + if (plane_state->src_rect.width == 0 || plane_state->src_rect.height == 0 || + plane_state->dst_rect.width == 0 || plane_state->dst_rect.height == 0) + return DC_FAIL_SURFACE_VALIDATE; + /* TODO For now validates pixel format only */ if (dc->res_pool->funcs->validate_plane) return dc->res_pool->funcs->validate_plane(plane_state, &dc->caps); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index f0f54f4d3d9b..57cf4cb82370 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -202,6 +202,10 @@ struct dc_stream_state *dc_copy_stream(const struct dc_stream_state *stream) new_stream->stream_id = new_stream->ctx->dc_stream_id_count; new_stream->ctx->dc_stream_id_count++; + /* If using dynamic encoder assignment, wait till stream committed to assign encoder. */ + if (new_stream->ctx->dc->res_pool->funcs->link_encs_assign) + new_stream->link_enc = NULL; + kref_init(&new_stream->refcount); return new_stream; diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index dd995905b0cb..3aac3f4a2852 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -47,7 +47,7 @@ struct aux_payload; struct set_config_cmd_payload; struct dmub_notification; -#define DC_VER "3.2.156" +#define DC_VER "3.2.160" #define MAX_SURFACES 3 #define MAX_PLANES 6 @@ -211,12 +211,12 @@ struct dc_dcc_setting { unsigned int max_uncompressed_blk_size; bool independent_64b_blks; #if defined(CONFIG_DRM_AMD_DC_DCN) - //These bitfields to be used starting with DCN 3.0 + //These bitfields to be used starting with DCN struct { - uint32_t dcc_256_64_64 : 1;//available in ASICs before DCN 3.0 (the worst compression case) - uint32_t dcc_128_128_uncontrained : 1; //available in ASICs before DCN 3.0 - uint32_t dcc_256_128_128 : 1; //available starting with DCN 3.0 - uint32_t dcc_256_256_unconstrained : 1; //available in ASICs before DCN 3.0 (the best compression case) + uint32_t dcc_256_64_64 : 1;//available in ASICs before DCN (the worst compression case) + uint32_t dcc_128_128_uncontrained : 1; //available in ASICs before DCN + uint32_t dcc_256_128_128 : 1; //available starting with DCN + uint32_t dcc_256_256_unconstrained : 1; //available in ASICs before DCN (the best compression case) } dcc_controls; #endif }; @@ -323,6 +323,7 @@ struct dc_config { bool multi_mon_pp_mclk_switch; bool disable_dmcu; bool enable_4to1MPC; + bool enable_windowed_mpo_odm; bool allow_edp_hotplug_detection; #if defined(CONFIG_DRM_AMD_DC_DCN) bool clamp_min_dcfclk; @@ -342,6 +343,12 @@ enum visual_confirm { VISUAL_CONFIRM_SWIZZLE = 9, }; +enum dc_psr_power_opts { + psr_power_opt_invalid = 0x0, + psr_power_opt_smu_opt_static_screen = 0x1, + psr_power_opt_z10_static_screen = 0x10, +}; + enum dcc_option { DCC_ENABLE = 0, DCC_DISABLE = 1, @@ -664,9 +671,11 @@ struct dc_debug_options { #if defined(CONFIG_DRM_AMD_DC_DCN) /* TODO - remove once tested */ bool legacy_dp2_lt; + bool set_mst_en_for_sst; #endif union mem_low_power_enable_options enable_mem_low_power; union root_clock_optimization_options root_clock_optimization; + bool hpo_optimization; bool force_vblank_alignment; /* Enable dmub aux for legacy ddc */ @@ -724,6 +733,9 @@ struct dc { #if defined(CONFIG_DRM_AMD_DC_DCN) bool idle_optimizations_allowed; #endif +#if defined(CONFIG_DRM_AMD_DC_DCN) + bool enable_c20_dtm_b0; +#endif /* Require to maintain clocks and bandwidth for UEFI enabled HW */ @@ -930,6 +942,7 @@ union surface_update_flags { uint32_t bandwidth_change:1; uint32_t clock_change:1; uint32_t stereo_format_change:1; + uint32_t lut_3d:1; uint32_t full_update:1; } bits; @@ -1416,6 +1429,12 @@ bool dc_process_dmub_set_config_async(struct dc *dc, uint32_t link_index, struct set_config_cmd_payload *payload, struct dmub_notification *notify); + +enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc, + uint32_t link_index, + uint8_t mst_alloc_slots, + uint8_t *mst_slots_in_use); + /******************************************************************************* * DSC Interfaces ******************************************************************************/ diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h index a23937e1dc5c..e68e9a86a4d9 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h @@ -174,11 +174,6 @@ struct dc_lane_settings { #endif }; -struct dc_link_training_settings { - struct dc_link_settings link; - struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]; -}; - struct dc_link_training_overrides { enum dc_voltage_swing *voltage_swing; enum dc_pre_emphasis *pre_emphasis; @@ -903,6 +898,9 @@ struct dpcd_usb4_dp_tunneling_info { #ifndef DP_DFP_CAPABILITY_EXTENSION_SUPPORT #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0A3 #endif +#ifndef DP_LINK_SQUARE_PATTERN +#define DP_LINK_SQUARE_PATTERN 0x10F +#endif #ifndef DP_DSC_CONFIGURATION #define DP_DSC_CONFIGURATION 0x161 #endif diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h index 69b008bafbbc..180ecd860296 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_link.h +++ b/drivers/gpu/drm/amd/display/dc/dc_link.h @@ -85,6 +85,7 @@ struct psr_settings { */ bool psr_frame_capture_indication_req; unsigned int psr_sdp_transmit_line_num_deadline; + unsigned int psr_power_opt; }; /* @@ -123,6 +124,10 @@ struct dc_link { struct dc_link_settings cur_link_settings; struct dc_lane_settings cur_lane_setting[LANE_COUNT_DP_MAX]; struct dc_link_settings preferred_link_setting; + /* preferred_training_settings are override values that + * come from DM. DM is responsible for the memory + * management of the override pointers. + */ struct dc_link_training_overrides preferred_training_settings; struct dp_audio_test_data audio_test_data; @@ -177,11 +182,15 @@ struct dc_link { struct psr_settings psr_settings; + /* Drive settings read from integrated info table */ + struct dc_lane_settings bios_forced_drive_settings; + /* MST record stream using this link */ struct link_flags { bool dp_keep_receiver_powered; bool dp_skip_DID2; bool dp_skip_reset_segment; + bool dp_mot_reset_segment; } wa_flags; struct link_mst_stream_allocation_table mst_stream_alloc_table; @@ -267,8 +276,8 @@ int dc_link_get_backlight_level(const struct dc_link *dc_link); int dc_link_get_target_backlight_pwm(const struct dc_link *link); -bool dc_link_set_psr_allow_active(struct dc_link *dc_link, bool enable, - bool wait, bool force_static); +bool dc_link_set_psr_allow_active(struct dc_link *dc_link, const bool *enable, + bool wait, bool force_static, const unsigned int *power_opts); bool dc_link_get_psr_state(const struct dc_link *dc_link, enum dc_psr_state *state); @@ -277,7 +286,6 @@ bool dc_link_setup_psr(struct dc_link *dc_link, struct psr_context *psr_context); void dc_link_get_psr_residency(const struct dc_link *link, uint32_t *residency); -void blank_all_dp_displays(struct dc *dc, bool hw_init); /* Request DC to detect if there is a Panel connected. * boot - If this call is during initial boot. @@ -296,6 +304,10 @@ enum dc_detect_reason { bool dc_link_detect(struct dc_link *dc_link, enum dc_detect_reason reason); bool dc_link_get_hpd_state(struct dc_link *dc_link); enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx); +#if defined(CONFIG_DRM_AMD_DC_DCN) +enum dc_status dc_link_reduce_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t req_pbn); +enum dc_status dc_link_increase_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t req_pbn); +#endif /* Notify DC about DP RX Interrupt (aka Short Pulse Interrupt). * Return: @@ -425,4 +437,7 @@ uint32_t dc_bandwidth_in_kbps_from_timing( bool dc_link_is_fec_supported(const struct dc_link *link); bool dc_link_should_enable_fec(const struct dc_link *link); +#if defined(CONFIG_DRM_AMD_DC_DCN) +enum dp_link_encoding dc_link_dp_mst_decide_link_encoding_format(const struct dc_link *link); +#endif #endif /* DC_LINK_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index b8ebc1f09538..e37c4a10bfd5 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -115,6 +115,13 @@ struct periodic_interrupt_config { int lines_offset; }; +#if defined(CONFIG_DRM_AMD_DC_DCN) +struct dc_mst_stream_bw_update { + bool is_increase; // is bandwidth reduced or increased + uint32_t mst_stream_bw; // new mst bandwidth in kbps +}; +#endif + union stream_update_flags { struct { uint32_t scaling:1; @@ -125,6 +132,9 @@ union stream_update_flags { uint32_t gamut_remap:1; uint32_t wb_update:1; uint32_t dsc_changed : 1; +#if defined(CONFIG_DRM_AMD_DC_DCN) + uint32_t mst_bw : 1; +#endif } bits; uint32_t raw; @@ -278,6 +288,9 @@ struct dc_stream_update { struct dc_writeback_update *wb_update; struct dc_dsc_config *dsc_config; +#if defined(CONFIG_DRM_AMD_DC_DCN) + struct dc_mst_stream_bw_update *mst_bw_update; +#endif struct dc_transfer_func *func_shaper; struct dc_3dlut *lut3d_func; diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index 15c353c389d8..388457ffc0a8 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -653,6 +653,7 @@ enum dc_psr_state { PSR_STATE1a, PSR_STATE2, PSR_STATE2a, + PSR_STATE2b, PSR_STATE3, PSR_STATE3Init, PSR_STATE4, diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c index 7866cf2a668f..27218ede150a 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c @@ -514,13 +514,15 @@ void dce_aud_az_configure( union audio_sample_rates sample_rates = audio_mode->sample_rates; uint8_t byte2 = audio_mode->max_bit_rate; + uint8_t channel_count = audio_mode->channel_count; /* adjust specific properties */ switch (audio_format_code) { case AUDIO_FORMAT_CODE_LINEARPCM: { + check_audio_bandwidth( crtc_info, - audio_mode->channel_count, + channel_count, signal, &sample_rates); @@ -548,7 +550,7 @@ void dce_aud_az_configure( /* fill audio format data */ set_reg_field_value(value, - audio_mode->channel_count - 1, + channel_count - 1, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, MAX_CHANNELS); diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c index 95cb4d7cc76a..6d42a9cc9916 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c @@ -534,17 +534,26 @@ struct dce_aux *dce110_aux_engine_construct(struct aux_engine_dce110 *aux_engine static enum i2caux_transaction_action i2caux_action_from_payload(struct aux_payload *payload) { if (payload->i2c_over_aux) { + if (payload->write_status_update) { + if (payload->mot) + return I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST_MOT; + else + return I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST; + } if (payload->write) { if (payload->mot) return I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT; - return I2CAUX_TRANSACTION_ACTION_I2C_WRITE; + else + return I2CAUX_TRANSACTION_ACTION_I2C_WRITE; } if (payload->mot) return I2CAUX_TRANSACTION_ACTION_I2C_READ_MOT; + return I2CAUX_TRANSACTION_ACTION_I2C_READ; } if (payload->write) return I2CAUX_TRANSACTION_ACTION_DP_WRITE; + return I2CAUX_TRANSACTION_ACTION_DP_READ; } @@ -698,7 +707,8 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc, aux_defer_retries = 0, aux_i2c_defer_retries = 0, aux_timeout_retries = 0, - aux_invalid_reply_retries = 0; + aux_invalid_reply_retries = 0, + aux_ack_m_retries = 0; if (ddc_pin) { aux_engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]; @@ -758,9 +768,27 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc, aux_defer_retries, AUX_MAX_RETRIES); goto fail; - } else { + } else udelay(300); + } else if (payload->write && ret > 0) { + /* sink requested more time to complete the write via AUX_ACKM */ + if (++aux_ack_m_retries >= AUX_MAX_RETRIES) { + DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_ERROR, + LOG_FLAG_Error_I2cAux, + "dce_aux_transfer_with_retries: FAILURE: aux_ack_m_retries=%d >= AUX_MAX_RETRIES=%d", + aux_ack_m_retries, + AUX_MAX_RETRIES); + goto fail; } + + /* retry reading the write status until complete + * NOTE: payload is modified here + */ + payload->write = false; + payload->write_status_update = true; + payload->length = 0; + udelay(300); + } else return true; break; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h index 296b2f80a1ec..a3fee929cd12 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h @@ -671,6 +671,7 @@ struct dce_hwseq_registers { uint32_t MC_VM_FB_LOCATION_BASE; uint32_t MC_VM_FB_LOCATION_TOP; uint32_t MC_VM_FB_OFFSET; + uint32_t HPO_TOP_HW_CONTROL; }; /* set field name */ #define HWS_SF(blk_name, reg_name, field_name, post_fix)\ @@ -1151,7 +1152,9 @@ struct dce_hwseq_registers { type DOMAIN_POWER_GATE;\ type DOMAIN_PGFSM_PWR_STATUS;\ type HPO_HDMISTREAMCLK_G_GATE_DIS;\ - type DISABLE_HOSTVM_FORCE_ALLOW_PSTATE; + type DISABLE_HOSTVM_FORCE_ALLOW_PSTATE;\ + type I2C_LIGHT_SLEEP_FORCE;\ + type HPO_IO_EN; struct dce_hwseq_shift { HWSEQ_REG_FIELD_LIST(uint8_t) diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c index aa8403bc4c83..90eb8eedacf2 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c @@ -50,6 +50,8 @@ static enum dc_psr_state convert_psr_state(uint32_t raw_state) state = PSR_STATE2; else if (raw_state == 0x21) state = PSR_STATE2a; + else if (raw_state == 0x22) + state = PSR_STATE2b; else if (raw_state == 0x30) state = PSR_STATE3; else if (raw_state == 0x31) @@ -225,6 +227,25 @@ static void dmub_psr_set_level(struct dmub_psr *dmub, uint16_t psr_level, uint8_ dc_dmub_srv_wait_idle(dc->dmub_srv); } +/** + * Set PSR power optimization flags. + */ +static void dmub_psr_set_power_opt(struct dmub_psr *dmub, unsigned int power_opt) +{ + union dmub_rb_cmd cmd; + struct dc_context *dc = dmub->ctx; + + memset(&cmd, 0, sizeof(cmd)); + cmd.psr_set_power_opt.header.type = DMUB_CMD__PSR; + cmd.psr_set_power_opt.header.sub_type = DMUB_CMD__SET_PSR_POWER_OPT; + cmd.psr_set_power_opt.header.payload_bytes = sizeof(struct dmub_cmd_psr_set_power_opt_data); + cmd.psr_set_power_opt.psr_set_power_opt_data.power_opt = power_opt; + + dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); + dc_dmub_srv_cmd_execute(dc->dmub_srv); + dc_dmub_srv_wait_idle(dc->dmub_srv); +} + /* * Setup PSR by programming phy registers and sending psr hw context values to firmware. */ @@ -356,6 +377,7 @@ static const struct dmub_psr_funcs psr_funcs = { .psr_set_level = dmub_psr_set_level, .psr_force_static = dmub_psr_force_static, .psr_get_residency = dmub_psr_get_residency, + .psr_set_power_opt = dmub_psr_set_power_opt, }; /* diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h index 9675c269e649..5dbd479660f1 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h @@ -46,6 +46,7 @@ struct dmub_psr_funcs { void (*psr_force_static)(struct dmub_psr *dmub, uint8_t panel_inst); void (*psr_get_residency)(struct dmub_psr *dmub, uint32_t *residency, uint8_t panel_inst); + void (*psr_set_power_opt)(struct dmub_psr *dmub, unsigned int power_opt); }; struct dmub_psr *dmub_psr_create(struct dc_context *ctx); diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 8108f9ae2638..24e47df526f6 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -1244,6 +1244,12 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx) #endif if (dc_is_dp_signal(pipe_ctx->stream->signal)) dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISCONNECT_DIG_FE_BE); + +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (dc->hwseq->funcs.setup_hpo_hw_control && is_dp_128b_132b_signal(pipe_ctx)) + dc->hwseq->funcs.setup_hpo_hw_control(dc->hwseq, false); +#endif + } void dce110_unblank_stream(struct pipe_ctx *pipe_ctx, @@ -1649,13 +1655,31 @@ static enum dc_status apply_single_controller_ctx_to_hw( static void power_down_encoders(struct dc *dc) { - int i; - - blank_all_dp_displays(dc, false); + int i, j; for (i = 0; i < dc->link_count; i++) { enum signal_type signal = dc->links[i]->connector_signal; + if ((signal == SIGNAL_TYPE_EDP) || + (signal == SIGNAL_TYPE_DISPLAY_PORT)) { + if (dc->links[i]->link_enc->funcs->get_dig_frontend && + dc->links[i]->link_enc->funcs->is_dig_enabled(dc->links[i]->link_enc)) { + unsigned int fe = dc->links[i]->link_enc->funcs->get_dig_frontend( + dc->links[i]->link_enc); + + for (j = 0; j < dc->res_pool->stream_enc_count; j++) { + if (fe == dc->res_pool->stream_enc[j]->id) { + dc->res_pool->stream_enc[j]->funcs->dp_blank(dc->links[i], + dc->res_pool->stream_enc[j]); + break; + } + } + } + + if (!dc->links[i]->wa_flags.dp_keep_receiver_powered) + dp_receiver_power_ctrl(dc->links[i], false); + } + if (signal != SIGNAL_TYPE_EDP) signal = SIGNAL_TYPE_NONE; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c index cb9767ddf93d..44293d66b46b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c @@ -205,9 +205,17 @@ static void dpp1_power_on_dscl( struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); if (dpp->tf_regs->DSCL_MEM_PWR_CTRL) { - REG_UPDATE(DSCL_MEM_PWR_CTRL, LUT_MEM_PWR_FORCE, power_on ? 0 : 3); - if (power_on) + if (power_on) { + REG_UPDATE(DSCL_MEM_PWR_CTRL, LUT_MEM_PWR_FORCE, 0); REG_WAIT(DSCL_MEM_PWR_STATUS, LUT_MEM_PWR_STATE, 0, 1, 5); + } else { + if (dpp->base.ctx->dc->debug.enable_mem_low_power.bits.dscl) { + dpp->base.ctx->dc->optimized_required = true; + dpp->base.deferred_reg_writes.bits.disable_dscl = true; + } else { + REG_UPDATE(DSCL_MEM_PWR_CTRL, LUT_MEM_PWR_FORCE, 3); + } + } } } diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index bc3ec05bf34b..0b788d794fb3 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -231,7 +231,7 @@ static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx) if (!s->blank_en) DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh" - "% 8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh" + " %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh" " %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n", pool->hubps[i]->inst, dlg_regs->refcyc_h_blank_end, dlg_regs->dlg_vblank_end, dlg_regs->min_dst_y_next_start, dlg_regs->refcyc_per_htotal, dlg_regs->refcyc_x_after_scaler, dlg_regs->dst_y_after_scaler, @@ -1366,7 +1366,7 @@ void dcn10_init_pipes(struct dc *dc, struct dc_state *context) void dcn10_init_hw(struct dc *dc) { - int i; + int i, j; struct abm *abm = dc->res_pool->abm; struct dmcu *dmcu = dc->res_pool->dmcu; struct dce_hwseq *hws = dc->hwseq; @@ -1378,6 +1378,12 @@ void dcn10_init_hw(struct dc *dc) if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks) dc->clk_mgr->funcs->init_clocks(dc->clk_mgr); + /* Align bw context with hw config when system resume. */ + if (dc->clk_mgr->clks.dispclk_khz != 0 && dc->clk_mgr->clks.dppclk_khz != 0) { + dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz = dc->clk_mgr->clks.dispclk_khz; + dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz = dc->clk_mgr->clks.dppclk_khz; + } + // Initialize the dccg if (dc->res_pool->dccg && dc->res_pool->dccg->funcs->dccg_init) dc->res_pool->dccg->funcs->dccg_init(res_pool->dccg); @@ -1462,8 +1468,43 @@ void dcn10_init_hw(struct dc *dc) dmub_enable_outbox_notification(dc); /* we want to turn off all dp displays before doing detection */ - if (dc->config.power_down_display_on_boot) - blank_all_dp_displays(dc, true); + if (dc->config.power_down_display_on_boot) { + uint8_t dpcd_power_state = '\0'; + enum dc_status status = DC_ERROR_UNEXPECTED; + + for (i = 0; i < dc->link_count; i++) { + if (dc->links[i]->connector_signal != SIGNAL_TYPE_DISPLAY_PORT) + continue; + + /* DP 2.0 requires that LTTPR Caps be read first */ + dp_retrieve_lttpr_cap(dc->links[i]); + + /* + * If any of the displays are lit up turn them off. + * The reason is that some MST hubs cannot be turned off + * completely until we tell them to do so. + * If not turned off, then displays connected to MST hub + * won't light up. + */ + status = core_link_read_dpcd(dc->links[i], DP_SET_POWER, + &dpcd_power_state, sizeof(dpcd_power_state)); + if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0) { + /* blank dp stream before power off receiver*/ + if (dc->links[i]->link_enc->funcs->get_dig_frontend) { + unsigned int fe = dc->links[i]->link_enc->funcs->get_dig_frontend(dc->links[i]->link_enc); + + for (j = 0; j < dc->res_pool->stream_enc_count; j++) { + if (fe == dc->res_pool->stream_enc[j]->id) { + dc->res_pool->stream_enc[j]->funcs->dp_blank(dc->links[i], + dc->res_pool->stream_enc[j]); + break; + } + } + } + dp_receiver_power_ctrl(dc->links[i], false); + } + } + } /* If taking control over from VBIOS, we may want to optimize our first * mode set, so we need to skip powering down pipes until we know which @@ -2304,8 +2345,8 @@ static void mmhub_read_vm_context0_settings(struct dcn10_hubp *hubp1, void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp) { struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp); - struct vm_system_aperture_param apt = { {{ 0 } } }; - struct vm_context0_param vm0 = { { { 0 } } }; + struct vm_system_aperture_param apt = {0}; + struct vm_context0_param vm0 = {0}; mmhub_read_vm_system_aperture_settings(hubp1, &apt, hws); mmhub_read_vm_context0_settings(hubp1, &vm0, hws); @@ -2478,7 +2519,7 @@ void dcn10_update_visual_confirm_color(struct dc *dc, struct pipe_ctx *pipe_ctx, void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) { struct hubp *hubp = pipe_ctx->plane_res.hubp; - struct mpcc_blnd_cfg blnd_cfg = {{0}}; + struct mpcc_blnd_cfg blnd_cfg = {0}; bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe; int mpcc_id; struct mpcc *new_mpcc; @@ -3635,7 +3676,7 @@ void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx) void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx, struct dc_link_settings *link_settings) { - struct encoder_unblank_param params = { { 0 } }; + struct encoder_unblank_param params = {0}; struct dc_stream_state *stream = pipe_ctx->stream; struct dc_link *link = stream->link; struct dce_hwseq *hws = link->dc->hwseq; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h index ede65100a050..f98aba308028 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h @@ -169,7 +169,29 @@ type DTBCLK_DTO_DIV[MAX_PIPES];\ type DCCG_AUDIO_DTO_SEL;\ type DCCG_AUDIO_DTO0_SOURCE_SEL;\ - type DENTIST_DISPCLK_CHG_MODE; + type DENTIST_DISPCLK_CHG_MODE;\ + type DSCCLK0_DTO_PHASE;\ + type DSCCLK0_DTO_MODULO;\ + type DSCCLK1_DTO_PHASE;\ + type DSCCLK1_DTO_MODULO;\ + type DSCCLK2_DTO_PHASE;\ + type DSCCLK2_DTO_MODULO;\ + type DSCCLK0_DTO_ENABLE;\ + type DSCCLK1_DTO_ENABLE;\ + type DSCCLK2_DTO_ENABLE;\ + type SYMCLK32_ROOT_SE0_GATE_DISABLE;\ + type SYMCLK32_ROOT_SE1_GATE_DISABLE;\ + type SYMCLK32_ROOT_SE2_GATE_DISABLE;\ + type SYMCLK32_ROOT_SE3_GATE_DISABLE;\ + type SYMCLK32_ROOT_LE0_GATE_DISABLE;\ + type SYMCLK32_ROOT_LE1_GATE_DISABLE;\ + type DPSTREAMCLK_ROOT_GATE_DISABLE;\ + type DPSTREAMCLK_GATE_DISABLE;\ + type HDMISTREAMCLK0_DTO_PHASE;\ + type HDMISTREAMCLK0_DTO_MODULO;\ + type HDMICHARCLK0_GATE_DISABLE;\ + type HDMICHARCLK0_ROOT_GATE_DISABLE; + struct dccg_shift { DCCG_REG_FIELD_LIST(uint8_t) @@ -205,6 +227,16 @@ struct dccg_registers { uint32_t SYMCLK32_SE_CNTL; uint32_t SYMCLK32_LE_CNTL; uint32_t DENTIST_DISPCLK_CNTL; + uint32_t DSCCLK_DTO_CTRL; + uint32_t DSCCLK0_DTO_PARAM; + uint32_t DSCCLK1_DTO_PARAM; + uint32_t DSCCLK2_DTO_PARAM; + uint32_t DPSTREAMCLK_ROOT_GATE_DISABLE; + uint32_t DPSTREAMCLK_GATE_DISABLE; + uint32_t DCCG_GATE_DISABLE_CNTL3; + uint32_t HDMISTREAMCLK0_DTO_PARAM; + uint32_t DCCG_GATE_DISABLE_CNTL4; + }; struct dcn_dccg { diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index fc83744149d9..4f88376a118f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -2123,7 +2123,7 @@ void dcn20_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx) void dcn20_unblank_stream(struct pipe_ctx *pipe_ctx, struct dc_link_settings *link_settings) { - struct encoder_unblank_param params = { { 0 } }; + struct encoder_unblank_param params = {0}; struct dc_stream_state *stream = pipe_ctx->stream; struct dc_link *link = stream->link; struct dce_hwseq *hws = link->dc->hwseq; @@ -2298,7 +2298,7 @@ void dcn20_update_visual_confirm_color(struct dc *dc, struct pipe_ctx *pipe_ctx, void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) { struct hubp *hubp = pipe_ctx->plane_res.hubp; - struct mpcc_blnd_cfg blnd_cfg = { {0} }; + struct mpcc_blnd_cfg blnd_cfg = {0}; bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha; int mpcc_id; struct mpcc *new_mpcc; @@ -2397,6 +2397,9 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx) * BY this, it is logic clean to separate stream and link */ if (is_dp_128b_132b_signal(pipe_ctx)) { + if (pipe_ctx->stream->ctx->dc->hwseq->funcs.setup_hpo_hw_control) + pipe_ctx->stream->ctx->dc->hwseq->funcs.setup_hpo_hw_control( + pipe_ctx->stream->ctx->dc->hwseq, true); setup_dp_hpo_stream(pipe_ctx, true); pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->enable_stream( pipe_ctx->stream_res.hpo_dp_stream_enc); diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index 756f5d411d9a..3883f918b3bb 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -3660,9 +3660,6 @@ static enum dml_project get_dml_project_version(uint32_t hw_internal_rev) return DML_PROJECT_NAVI10v2; } -#define fixed16_to_double(x) (((double) x) / ((double) (1 << 16))) -#define fixed16_to_double_to_cpu(x) fixed16_to_double(le32_to_cpu(x)) - static bool init_soc_bounding_box(struct dc *dc, struct dcn20_resource_pool *pool) { @@ -3698,16 +3695,22 @@ static bool init_soc_bounding_box(struct dc *dc, clock_limits_available = (status == PP_SMU_RESULT_OK); } - if (clock_limits_available && uclk_states_available && num_states) + if (clock_limits_available && uclk_states_available && num_states) { + DC_FP_START(); dcn20_update_bounding_box(dc, loaded_bb, &max_clocks, uclk_states, num_states); - else if (clock_limits_available) + DC_FP_END(); + } else if (clock_limits_available) { + DC_FP_START(); dcn20_cap_soc_clocks(loaded_bb, max_clocks); + DC_FP_END(); + } } loaded_ip->max_num_otg = pool->base.res_cap->num_timing_generator; loaded_ip->max_num_dpp = pool->base.pipe_count; + DC_FP_START(); dcn20_patch_bounding_box(dc, loaded_bb); - + DC_FP_END(); return true; } @@ -3727,8 +3730,6 @@ static bool dcn20_resource_construct( enum dml_project dml_project_version = get_dml_project_version(ctx->asic_id.hw_internal_rev); - DC_FP_START(); - ctx->dc_bios->regs = &bios_regs; pool->base.funcs = &dcn20_res_pool_funcs; @@ -4077,12 +4078,10 @@ static bool dcn20_resource_construct( pool->base.oem_device = NULL; } - DC_FP_END(); return true; create_fail: - DC_FP_END(); dcn20_resource_destruct(pool); return false; diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c index ef5f6da5248a..c1d967ed6551 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c @@ -494,6 +494,20 @@ void dpp3_deferred_update( int bypass_state; struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + if (dpp_base->deferred_reg_writes.bits.disable_dscl) { + REG_UPDATE(DSCL_MEM_PWR_CTRL, LUT_MEM_PWR_FORCE, 3); + dpp_base->deferred_reg_writes.bits.disable_dscl = false; + } + + if (dpp_base->deferred_reg_writes.bits.disable_gamcor) { + REG_GET(CM_GAMCOR_CONTROL, CM_GAMCOR_MODE_CURRENT, &bypass_state); + if (bypass_state == 0) { // only program if bypass was latched + REG_UPDATE(CM_MEM_PWR_CTRL, GAMCOR_MEM_PWR_FORCE, 3); + } else + ASSERT(0); // LUT select was updated again before vupdate + dpp_base->deferred_reg_writes.bits.disable_gamcor = false; + } + if (dpp_base->deferred_reg_writes.bits.disable_blnd_lut) { REG_GET(CM_BLNDGAM_CONTROL, CM_BLNDGAM_MODE_CURRENT, &bypass_state); if (bypass_state == 0) { // only program if bypass was latched diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c index 72c5687adc68..387eec616162 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c @@ -136,9 +136,13 @@ static void dpp3_power_on_gamcor_lut( struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) { - REG_UPDATE(CM_MEM_PWR_CTRL, GAMCOR_MEM_PWR_FORCE, power_on ? 0 : 3); - if (power_on) + if (power_on) { + REG_UPDATE(CM_MEM_PWR_CTRL, GAMCOR_MEM_PWR_FORCE, 0); REG_WAIT(CM_MEM_PWR_STATUS, GAMCOR_MEM_PWR_STATE, 0, 1, 5); + } else { + dpp_base->ctx->dc->optimized_required = true; + dpp_base->deferred_reg_writes.bits.disable_gamcor = true; + } } else REG_SET(CM_MEM_PWR_CTRL, 0, GAMCOR_MEM_PWR_DIS, power_on == true ? 0:1); diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c index 01a90badd173..df2717116604 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c @@ -437,7 +437,7 @@ void dcn30_init_hw(struct dc *dc) struct dce_hwseq *hws = dc->hwseq; struct dc_bios *dcb = dc->ctx->dc_bios; struct resource_pool *res_pool = dc->res_pool; - int i; + int i, j; int edp_num; uint32_t backlight = MAX_BACKLIGHT_LEVEL; @@ -534,8 +534,41 @@ void dcn30_init_hw(struct dc *dc) hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false); /* we want to turn off all dp displays before doing detection */ - if (dc->config.power_down_display_on_boot) - blank_all_dp_displays(dc, true); + if (dc->config.power_down_display_on_boot) { + uint8_t dpcd_power_state = '\0'; + enum dc_status status = DC_ERROR_UNEXPECTED; + + for (i = 0; i < dc->link_count; i++) { + if (dc->links[i]->connector_signal != SIGNAL_TYPE_DISPLAY_PORT) + continue; + /* DP 2.0 states that LTTPR regs must be read first */ + dp_retrieve_lttpr_cap(dc->links[i]); + + /* if any of the displays are lit up turn them off */ + status = core_link_read_dpcd(dc->links[i], DP_SET_POWER, + &dpcd_power_state, sizeof(dpcd_power_state)); + if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0) { + /* blank dp stream before power off receiver*/ + if (dc->links[i]->link_enc->funcs->get_dig_frontend) { + unsigned int fe; + + fe = dc->links[i]->link_enc->funcs->get_dig_frontend( + dc->links[i]->link_enc); + if (fe == ENGINE_ID_UNKNOWN) + continue; + + for (j = 0; j < dc->res_pool->stream_enc_count; j++) { + if (fe == dc->res_pool->stream_enc[j]->id) { + dc->res_pool->stream_enc[j]->funcs->dp_blank(dc->links[i], + dc->res_pool->stream_enc[j]); + break; + } + } + } + dp_receiver_power_ctrl(dc->links[i], false); + } + } + } /* If taking control over from VBIOS, we may want to optimize our first * mode set, so we need to skip powering down pipes until we know which @@ -969,7 +1002,8 @@ void dcn30_set_disp_pattern_generator(const struct dc *dc, /* turning off DPG */ pipe_ctx->plane_res.hubp->funcs->set_blank(pipe_ctx->plane_res.hubp, false); for (mpcc_pipe = pipe_ctx->bottom_pipe; mpcc_pipe; mpcc_pipe = mpcc_pipe->bottom_pipe) - mpcc_pipe->plane_res.hubp->funcs->set_blank(mpcc_pipe->plane_res.hubp, false); + if (mpcc_pipe->plane_res.hubp) + mpcc_pipe->plane_res.hubp->funcs->set_blank(mpcc_pipe->plane_res.hubp, false); stream_res->opp->funcs->opp_set_disp_pattern_generator(stream_res->opp, test_pattern, color_space, color_depth, solid_color, width, height, offset); diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c index a82319f4d081..95149734378b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c @@ -1381,13 +1381,11 @@ int mpcc3_release_rmu(struct mpc *mpc, int mpcc_id) } -static void mpc3_mpc_init(struct mpc *mpc) +static void mpc3_set_mpc_mem_lp_mode(struct mpc *mpc) { struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); int mpcc_id; - mpc1_mpc_init(mpc); - if (mpc->ctx->dc->debug.enable_mem_low_power.bits.mpc) { if (mpc30->mpc_mask->MPC_RMU0_MEM_LOW_PWR_MODE && mpc30->mpc_mask->MPC_RMU1_MEM_LOW_PWR_MODE) { REG_UPDATE(MPC_RMU_MEM_PWR_CTRL, MPC_RMU0_MEM_LOW_PWR_MODE, 3); @@ -1405,7 +1403,7 @@ const struct mpc_funcs dcn30_mpc_funcs = { .read_mpcc_state = mpc1_read_mpcc_state, .insert_plane = mpc1_insert_plane, .remove_mpcc = mpc1_remove_mpcc, - .mpc_init = mpc3_mpc_init, + .mpc_init = mpc1_mpc_init, .mpc_init_single_inst = mpc1_mpc_init_single_inst, .update_blending = mpc2_update_blending, .cursor_lock = mpc1_cursor_lock, @@ -1432,6 +1430,7 @@ const struct mpc_funcs dcn30_mpc_funcs = { .power_on_mpc_mem_pwr = mpc3_power_on_ogam_lut, .get_mpc_out_mux = mpc1_get_mpc_out_mux, .set_bg_color = mpc1_set_bg_color, + .set_mpc_mem_lp_mode = mpc3_set_mpc_mem_lp_mode, }; void dcn30_mpc_construct(struct dcn30_mpc *mpc30, diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c index 3a8a3214f770..79a66e0c4303 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c @@ -1707,9 +1707,6 @@ bool dcn30_release_post_bldn_3dlut( return ret; } -#define fixed16_to_double(x) (((double) x) / ((double) (1 << 16))) -#define fixed16_to_double_to_cpu(x) fixed16_to_double(le32_to_cpu(x)) - static bool is_soc_bounding_box_valid(struct dc *dc) { uint32_t hw_internal_rev = dc->ctx->asic_id.hw_internal_rev; @@ -1929,23 +1926,25 @@ noinline bool dcn30_internal_validate_bw( if (vlevel == context->bw_ctx.dml.soc.num_states) goto validate_fail; - for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; - struct pipe_ctx *mpo_pipe = pipe->bottom_pipe; + if (!dc->config.enable_windowed_mpo_odm) { + for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + struct pipe_ctx *mpo_pipe = pipe->bottom_pipe; - if (!pipe->stream) - continue; + if (!pipe->stream) + continue; - /* We only support full screen mpo with ODM */ - if (vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled - && pipe->plane_state && mpo_pipe - && memcmp(&mpo_pipe->plane_res.scl_data.recout, - &pipe->plane_res.scl_data.recout, - sizeof(struct rect)) != 0) { - ASSERT(mpo_pipe->plane_state != pipe->plane_state); - goto validate_fail; + /* We only support full screen mpo with ODM */ + if (vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled + && pipe->plane_state && mpo_pipe + && memcmp(&mpo_pipe->plane_res.scl_data.recout, + &pipe->plane_res.scl_data.recout, + sizeof(struct rect)) != 0) { + ASSERT(mpo_pipe->plane_state != pipe->plane_state); + goto validate_fail; + } + pipe_idx++; } - pipe_idx++; } /* merge pipes if necessary */ @@ -2129,10 +2128,10 @@ static noinline void dcn30_calculate_wm_and_dlg_fp( int pipe_cnt, int vlevel) { + int maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb; int i, pipe_idx; - double dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb]; - bool pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] != - dm_dram_clock_change_unsupported; + double dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][maxMpcComb]; + bool pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] != dm_dram_clock_change_unsupported; if (context->bw_ctx.dml.soc.min_dcfclk > dcfclk) dcfclk = context->bw_ctx.dml.soc.min_dcfclk; @@ -2208,6 +2207,7 @@ static noinline void dcn30_calculate_wm_and_dlg_fp( context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us; context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us; } + context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; @@ -2323,7 +2323,9 @@ bool dcn30_validate_bandwidth(struct dc *dc, goto validate_out; } + DC_FP_START(); dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel); + DC_FP_END(); BW_VAL_TRACE_END_WATERMARKS(); diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/Makefile b/drivers/gpu/drm/amd/display/dc/dcn301/Makefile index 09264716d1dc..7aa628c21973 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn301/Makefile @@ -13,32 +13,6 @@ DCN301 = dcn301_init.o dcn301_resource.o dcn301_dccg.o \ dcn301_dio_link_encoder.o dcn301_hwseq.o dcn301_panel_cntl.o dcn301_hubbub.o -ifdef CONFIG_X86 -CFLAGS_$(AMDDALPATH)/dc/dcn301/dcn301_resource.o := -msse -endif - -ifdef CONFIG_PPC64 -CFLAGS_$(AMDDALPATH)/dc/dcn301/dcn301_resource.o := -mhard-float -maltivec -endif - -ifdef CONFIG_CC_IS_GCC -ifeq ($(call cc-ifversion, -lt, 0701, y), y) -IS_OLD_GCC = 1 -endif -CFLAGS_$(AMDDALPATH)/dc/dcn301/dcn301_resource.o += -mhard-float -endif - -ifdef CONFIG_X86 -ifdef IS_OLD_GCC -# Stack alignment mismatch, proceed with caution. -# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3 -# (8B stack alignment). -CFLAGS_$(AMDDALPATH)/dc/dcn301/dcn301_resource.o += -mpreferred-stack-boundary=4 -else -CFLAGS_$(AMDDALPATH)/dc/dcn301/dcn301_resource.o += -msse2 -endif -endif - AMD_DAL_DCN301 = $(addprefix $(AMDDALPATH)/dc/dcn301/,$(DCN301)) AMD_DISPLAY_FILES += $(AMD_DAL_DCN301) diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c index 5350c93d7772..fbaa03f26d8b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c @@ -82,6 +82,7 @@ #include "dce/dce_i2c.h" #include "dml/dcn30/display_mode_vba_30.h" +#include "dml/dcn301/dcn301_fpu.h" #include "vm_helper.h" #include "dcn20/dcn20_vmid.h" #include "amdgpu_socbb.h" @@ -91,184 +92,6 @@ #define DC_LOGGER_INIT(logger) -struct _vcs_dpi_ip_params_st dcn3_01_ip = { - .odm_capable = 1, - .gpuvm_enable = 1, - .hostvm_enable = 1, - .gpuvm_max_page_table_levels = 1, - .hostvm_max_page_table_levels = 2, - .hostvm_cached_page_table_levels = 0, - .pte_group_size_bytes = 2048, - .num_dsc = 3, - .rob_buffer_size_kbytes = 184, - .det_buffer_size_kbytes = 184, - .dpte_buffer_size_in_pte_reqs_luma = 64, - .dpte_buffer_size_in_pte_reqs_chroma = 32, - .pde_proc_buffer_size_64k_reqs = 48, - .dpp_output_buffer_pixels = 2560, - .opp_output_buffer_lines = 1, - .pixel_chunk_size_kbytes = 8, - .meta_chunk_size_kbytes = 2, - .writeback_chunk_size_kbytes = 8, - .line_buffer_size_bits = 789504, - .is_line_buffer_bpp_fixed = 0, // ? - .line_buffer_fixed_bpp = 48, // ? - .dcc_supported = true, - .writeback_interface_buffer_size_kbytes = 90, - .writeback_line_buffer_buffer_size = 656640, - .max_line_buffer_lines = 12, - .writeback_luma_buffer_size_kbytes = 12, // writeback_line_buffer_buffer_size = 656640 - .writeback_chroma_buffer_size_kbytes = 8, - .writeback_chroma_line_buffer_width_pixels = 4, - .writeback_max_hscl_ratio = 1, - .writeback_max_vscl_ratio = 1, - .writeback_min_hscl_ratio = 1, - .writeback_min_vscl_ratio = 1, - .writeback_max_hscl_taps = 1, - .writeback_max_vscl_taps = 1, - .writeback_line_buffer_luma_buffer_size = 0, - .writeback_line_buffer_chroma_buffer_size = 14643, - .cursor_buffer_size = 8, - .cursor_chunk_size = 2, - .max_num_otg = 4, - .max_num_dpp = 4, - .max_num_wb = 1, - .max_dchub_pscl_bw_pix_per_clk = 4, - .max_pscl_lb_bw_pix_per_clk = 2, - .max_lb_vscl_bw_pix_per_clk = 4, - .max_vscl_hscl_bw_pix_per_clk = 4, - .max_hscl_ratio = 6, - .max_vscl_ratio = 6, - .hscl_mults = 4, - .vscl_mults = 4, - .max_hscl_taps = 8, - .max_vscl_taps = 8, - .dispclk_ramp_margin_percent = 1, - .underscan_factor = 1.11, - .min_vblank_lines = 32, - .dppclk_delay_subtotal = 46, - .dynamic_metadata_vm_enabled = true, - .dppclk_delay_scl_lb_only = 16, - .dppclk_delay_scl = 50, - .dppclk_delay_cnvc_formatter = 27, - .dppclk_delay_cnvc_cursor = 6, - .dispclk_delay_subtotal = 119, - .dcfclk_cstate_latency = 5.2, // SRExitTime - .max_inter_dcn_tile_repeaters = 8, - .max_num_hdmi_frl_outputs = 0, - .odm_combine_4to1_supported = true, - - .xfc_supported = false, - .xfc_fill_bw_overhead_percent = 10.0, - .xfc_fill_constant_bytes = 0, - .gfx7_compat_tiling_supported = 0, - .number_of_cursors = 1, -}; - -struct _vcs_dpi_soc_bounding_box_st dcn3_01_soc = { - .clock_limits = { - { - .state = 0, - .dram_speed_mts = 2400.0, - .fabricclk_mhz = 600, - .socclk_mhz = 278.0, - .dcfclk_mhz = 400.0, - .dscclk_mhz = 206.0, - .dppclk_mhz = 1015.0, - .dispclk_mhz = 1015.0, - .phyclk_mhz = 600.0, - }, - { - .state = 1, - .dram_speed_mts = 2400.0, - .fabricclk_mhz = 688, - .socclk_mhz = 278.0, - .dcfclk_mhz = 400.0, - .dscclk_mhz = 206.0, - .dppclk_mhz = 1015.0, - .dispclk_mhz = 1015.0, - .phyclk_mhz = 600.0, - }, - { - .state = 2, - .dram_speed_mts = 4267.0, - .fabricclk_mhz = 1067, - .socclk_mhz = 278.0, - .dcfclk_mhz = 608.0, - .dscclk_mhz = 296.0, - .dppclk_mhz = 1015.0, - .dispclk_mhz = 1015.0, - .phyclk_mhz = 810.0, - }, - - { - .state = 3, - .dram_speed_mts = 4267.0, - .fabricclk_mhz = 1067, - .socclk_mhz = 715.0, - .dcfclk_mhz = 676.0, - .dscclk_mhz = 338.0, - .dppclk_mhz = 1015.0, - .dispclk_mhz = 1015.0, - .phyclk_mhz = 810.0, - }, - - { - .state = 4, - .dram_speed_mts = 4267.0, - .fabricclk_mhz = 1067, - .socclk_mhz = 953.0, - .dcfclk_mhz = 810.0, - .dscclk_mhz = 338.0, - .dppclk_mhz = 1015.0, - .dispclk_mhz = 1015.0, - .phyclk_mhz = 810.0, - }, - }, - - .sr_exit_time_us = 9.0, - .sr_enter_plus_exit_time_us = 11.0, - .urgent_latency_us = 4.0, - .urgent_latency_pixel_data_only_us = 4.0, - .urgent_latency_pixel_mixed_with_vm_data_us = 4.0, - .urgent_latency_vm_data_only_us = 4.0, - .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096, - .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096, - .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096, - .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 80.0, - .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 75.0, - .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0, - .max_avg_sdp_bw_use_normal_percent = 60.0, - .max_avg_dram_bw_use_normal_percent = 60.0, - .writeback_latency_us = 12.0, - .max_request_size_bytes = 256, - .dram_channel_width_bytes = 4, - .fabric_datapath_to_dcn_data_return_bytes = 32, - .dcn_downspread_percent = 0.5, - .downspread_percent = 0.38, - .dram_page_open_time_ns = 50.0, - .dram_rw_turnaround_time_ns = 17.5, - .dram_return_buffer_per_channel_bytes = 8192, - .round_trip_ping_latency_dcfclk_cycles = 191, - .urgent_out_of_order_return_per_channel_bytes = 4096, - .channel_interleave_bytes = 256, - .num_banks = 8, - .num_chans = 4, - .gpuvm_min_page_size_bytes = 4096, - .hostvm_min_page_size_bytes = 4096, - .dram_clock_change_latency_us = 23.84, - .writeback_dram_clock_change_latency_us = 23.0, - .return_bus_width_bytes = 64, - .dispclk_dppclk_vco_speed_mhz = 3550, - .xfc_bus_transport_time_us = 20, // ? - .xfc_xbuf_latency_tolerance_us = 4, // ? - .use_urgent_burst_bw = 1, // ? - .num_states = 5, - .do_urgent_latency_adjustment = false, - .urgent_latency_adjustment_fabric_clock_component_us = 0, - .urgent_latency_adjustment_fabric_clock_reference_mhz = 0, -}; - enum dcn301_clk_src_array_id { DCN301_CLK_SRC_PLL0, DCN301_CLK_SRC_PLL1, @@ -1480,8 +1303,6 @@ static struct dc_cap_funcs cap_funcs = { .get_dcc_compression_cap = dcn20_get_dcc_compression_cap }; -#define fixed16_to_double(x) (((double) x) / ((double) (1 << 16))) -#define fixed16_to_double_to_cpu(x) fixed16_to_double(le32_to_cpu(x)) static bool is_soc_bounding_box_valid(struct dc *dc) { @@ -1508,26 +1329,24 @@ static bool init_soc_bounding_box(struct dc *dc, loaded_ip->max_num_otg = pool->base.res_cap->num_timing_generator; loaded_ip->max_num_dpp = pool->base.pipe_count; + DC_FP_START(); dcn20_patch_bounding_box(dc, loaded_bb); + DC_FP_END(); if (dc->ctx->dc_bios->funcs->get_soc_bb_info) { struct bp_soc_bb_info bb_info = {0}; if (dc->ctx->dc_bios->funcs->get_soc_bb_info(dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) { - if (bb_info.dram_clock_change_latency_100ns > 0) - dcn3_01_soc.dram_clock_change_latency_us = bb_info.dram_clock_change_latency_100ns * 10; - - if (bb_info.dram_sr_enter_exit_latency_100ns > 0) - dcn3_01_soc.sr_enter_plus_exit_time_us = bb_info.dram_sr_enter_exit_latency_100ns * 10; - - if (bb_info.dram_sr_exit_latency_100ns > 0) - dcn3_01_soc.sr_exit_time_us = bb_info.dram_sr_exit_latency_100ns * 10; + DC_FP_START(); + dcn301_fpu_init_soc_bounding_box(bb_info); + DC_FP_END(); } } return true; } + static void set_wm_ranges( struct pp_smu_funcs *pp_smu, struct _vcs_dpi_soc_bounding_box_st *loaded_bb) @@ -1550,9 +1369,9 @@ static void set_wm_ranges( ranges.reader_wm_sets[i].wm_inst = i; ranges.reader_wm_sets[i].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; ranges.reader_wm_sets[i].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; - ranges.reader_wm_sets[i].min_fill_clk_mhz = (i > 0) ? (loaded_bb->clock_limits[i - 1].dram_speed_mts / 16) + 1 : 0; - ranges.reader_wm_sets[i].max_fill_clk_mhz = loaded_bb->clock_limits[i].dram_speed_mts / 16; - + DC_FP_START(); + dcn301_fpu_set_wm_ranges(i, &ranges, loaded_bb); + DC_FP_END(); ranges.num_reader_wm_sets = i + 1; } @@ -1572,154 +1391,6 @@ static void set_wm_ranges( pp_smu->nv_funcs.set_wm_ranges(&pp_smu->nv_funcs.pp_smu, &ranges); } -static void dcn301_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) -{ - struct dcn301_resource_pool *pool = TO_DCN301_RES_POOL(dc->res_pool); - struct clk_limit_table *clk_table = &bw_params->clk_table; - struct _vcs_dpi_voltage_scaling_st clock_limits[DC__VOLTAGE_STATES]; - unsigned int i, closest_clk_lvl; - int j; - - // Default clock levels are used for diags, which may lead to overclocking. - if (!IS_DIAG_DC(dc->ctx->dce_environment)) { - dcn3_01_ip.max_num_otg = pool->base.res_cap->num_timing_generator; - dcn3_01_ip.max_num_dpp = pool->base.pipe_count; - dcn3_01_soc.num_chans = bw_params->num_channels; - - ASSERT(clk_table->num_entries); - for (i = 0; i < clk_table->num_entries; i++) { - /* loop backwards*/ - for (closest_clk_lvl = 0, j = dcn3_01_soc.num_states - 1; j >= 0; j--) { - if ((unsigned int) dcn3_01_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) { - closest_clk_lvl = j; - break; - } - } - - clock_limits[i].state = i; - clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz; - clock_limits[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz; - clock_limits[i].socclk_mhz = clk_table->entries[i].socclk_mhz; - clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2; - - clock_limits[i].dispclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dispclk_mhz; - clock_limits[i].dppclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dppclk_mhz; - clock_limits[i].dram_bw_per_chan_gbps = dcn3_01_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps; - clock_limits[i].dscclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dscclk_mhz; - clock_limits[i].dtbclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dtbclk_mhz; - clock_limits[i].phyclk_d18_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz; - clock_limits[i].phyclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].phyclk_mhz; - } - for (i = 0; i < clk_table->num_entries; i++) - dcn3_01_soc.clock_limits[i] = clock_limits[i]; - if (clk_table->num_entries) { - dcn3_01_soc.num_states = clk_table->num_entries; - /* duplicate last level */ - dcn3_01_soc.clock_limits[dcn3_01_soc.num_states] = dcn3_01_soc.clock_limits[dcn3_01_soc.num_states - 1]; - dcn3_01_soc.clock_limits[dcn3_01_soc.num_states].state = dcn3_01_soc.num_states; - } - } - - dcn3_01_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0; - dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0; - - dml_init_instance(&dc->dml, &dcn3_01_soc, &dcn3_01_ip, DML_PROJECT_DCN30); -} - -static void calculate_wm_set_for_vlevel( - int vlevel, - struct wm_range_table_entry *table_entry, - struct dcn_watermarks *wm_set, - struct display_mode_lib *dml, - display_e2e_pipe_params_st *pipes, - int pipe_cnt) -{ - double dram_clock_change_latency_cached = dml->soc.dram_clock_change_latency_us; - - ASSERT(vlevel < dml->soc.num_states); - /* only pipe 0 is read for voltage and dcf/soc clocks */ - pipes[0].clks_cfg.voltage = vlevel; - pipes[0].clks_cfg.dcfclk_mhz = dml->soc.clock_limits[vlevel].dcfclk_mhz; - pipes[0].clks_cfg.socclk_mhz = dml->soc.clock_limits[vlevel].socclk_mhz; - - dml->soc.dram_clock_change_latency_us = table_entry->pstate_latency_us; - dml->soc.sr_exit_time_us = table_entry->sr_exit_time_us; - dml->soc.sr_enter_plus_exit_time_us = table_entry->sr_enter_plus_exit_time_us; - - wm_set->urgent_ns = get_wm_urgent(dml, pipes, pipe_cnt) * 1000; - wm_set->cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(dml, pipes, pipe_cnt) * 1000; - wm_set->cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(dml, pipes, pipe_cnt) * 1000; - wm_set->cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(dml, pipes, pipe_cnt) * 1000; - wm_set->pte_meta_urgent_ns = get_wm_memory_trip(dml, pipes, pipe_cnt) * 1000; - wm_set->frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(dml, pipes, pipe_cnt) * 1000; - wm_set->frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(dml, pipes, pipe_cnt) * 1000; - wm_set->urgent_latency_ns = get_urgent_latency(dml, pipes, pipe_cnt) * 1000; - dml->soc.dram_clock_change_latency_us = dram_clock_change_latency_cached; - -} - -static void dcn301_calculate_wm_and_dlg( - struct dc *dc, struct dc_state *context, - display_e2e_pipe_params_st *pipes, - int pipe_cnt, - int vlevel_req) -{ - int i, pipe_idx; - int vlevel, vlevel_max; - struct wm_range_table_entry *table_entry; - struct clk_bw_params *bw_params = dc->clk_mgr->bw_params; - - ASSERT(bw_params); - - vlevel_max = bw_params->clk_table.num_entries - 1; - - /* WM Set D */ - table_entry = &bw_params->wm_table.entries[WM_D]; - if (table_entry->wm_type == WM_TYPE_RETRAINING) - vlevel = 0; - else - vlevel = vlevel_max; - calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.d, - &context->bw_ctx.dml, pipes, pipe_cnt); - /* WM Set C */ - table_entry = &bw_params->wm_table.entries[WM_C]; - vlevel = min(max(vlevel_req, 2), vlevel_max); - calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.c, - &context->bw_ctx.dml, pipes, pipe_cnt); - /* WM Set B */ - table_entry = &bw_params->wm_table.entries[WM_B]; - vlevel = min(max(vlevel_req, 1), vlevel_max); - calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.b, - &context->bw_ctx.dml, pipes, pipe_cnt); - - /* WM Set A */ - table_entry = &bw_params->wm_table.entries[WM_A]; - vlevel = min(vlevel_req, vlevel_max); - calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.a, - &context->bw_ctx.dml, pipes, pipe_cnt); - - for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { - if (!context->res_ctx.pipe_ctx[i].stream) - continue; - - pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt); - pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); - - if (dc->config.forced_clocks) { - pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz; - pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz; - } - if (dc->debug.min_disp_clk_khz > pipes[pipe_idx].clks_cfg.dispclk_mhz * 1000) - pipes[pipe_idx].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0; - if (dc->debug.min_dpp_clk_khz > pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000) - pipes[pipe_idx].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0; - - pipe_idx++; - } - - dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel); -} - static struct resource_funcs dcn301_res_pool_funcs = { .destroy = dcn301_destroy_resource_pool, .link_enc_create = dcn301_link_encoder_create, diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.h b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.h index 17e4e91ff4b8..ae8672680cdd 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.h +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.h @@ -32,6 +32,9 @@ struct dc; struct resource_pool; struct _vcs_dpi_display_pipe_params_st; +extern struct _vcs_dpi_ip_params_st dcn3_01_ip; +extern struct _vcs_dpi_soc_bounding_box_st dcn3_01_soc; + struct dcn301_resource_pool { struct resource_pool base; }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c index 2ce6eae7535d..4a9b64023675 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c @@ -1344,6 +1344,20 @@ void dcn303_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param dcn3_03_soc.clock_limits[i].phyclk_d18_mhz = dcn3_03_soc.clock_limits[0].phyclk_d18_mhz; dcn3_03_soc.clock_limits[i].dscclk_mhz = dcn3_03_soc.clock_limits[0].dscclk_mhz; } + + // WA: patch strobe modes to compensate for DCN303 BW issue + if (dcn3_03_soc.num_chans <= 4) { + for (i = 0; i < dcn3_03_soc.num_states; i++) { + if (dcn3_03_soc.clock_limits[i].dram_speed_mts > 1700) + break; + + if (dcn3_03_soc.clock_limits[i].dram_speed_mts >= 1500) { + dcn3_03_soc.clock_limits[i].dcfclk_mhz = 100; + dcn3_03_soc.clock_limits[i].fabricclk_mhz = 100; + } + } + } + /* re-init DML with updated bb */ dml_init_instance(&dc->dml, &dcn3_03_soc, &dcn3_03_ip, DML_PROJECT_DCN30); if (dc->current_state) diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.c index 6bd7a0626665..de5e18c2a3ac 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.c @@ -129,7 +129,7 @@ static void apg31_se_audio_setup( /* When running in "pair mode", pairs of audio channels have their own enable * this is for really old audio drivers */ - REG_UPDATE(APG_DBG_GEN_CONTROL, APG_DBG_AUDIO_CHANNEL_ENABLE, 0xF); + REG_UPDATE(APG_DBG_GEN_CONTROL, APG_DBG_AUDIO_CHANNEL_ENABLE, 0xFF); // REG_UPDATE(APG_DBG_GEN_CONTROL, APG_DBG_AUDIO_CHANNEL_ENABLE, channels); /* Disable forced mem power off */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c index 9896adf67425..815481a3ef54 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c @@ -26,6 +26,7 @@ #include "reg_helper.h" #include "core_types.h" #include "dcn31_dccg.h" +#include "dal_asic_id.h" #define TO_DCN_DCCG(dccg)\ container_of(dccg, struct dcn_dccg, base) @@ -42,10 +43,58 @@ #define DC_LOGGER \ dccg->ctx->logger -void dccg31_set_dpstreamclk( - struct dccg *dccg, - enum hdmistreamclk_source src, - int otg_inst) +static void dccg31_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk) +{ + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + + if (dccg->ref_dppclk && req_dppclk) { + int ref_dppclk = dccg->ref_dppclk; + int modulo, phase; + + // phase / modulo = dpp pipe clk / dpp global clk + modulo = 0xff; // use FF at the end + phase = ((modulo * req_dppclk) + ref_dppclk - 1) / ref_dppclk; + + if (phase > 0xff) { + ASSERT(false); + phase = 0xff; + } + + REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0, + DPPCLK0_DTO_PHASE, phase, + DPPCLK0_DTO_MODULO, modulo); + REG_UPDATE(DPPCLK_DTO_CTRL, + DPPCLK_DTO_ENABLE[dpp_inst], 1); + } else { + //DTO must be enabled to generate a 0Hz clock output + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpp) { + REG_UPDATE(DPPCLK_DTO_CTRL, + DPPCLK_DTO_ENABLE[dpp_inst], 1); + REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0, + DPPCLK0_DTO_PHASE, 0, + DPPCLK0_DTO_MODULO, 1); + } else { + REG_UPDATE(DPPCLK_DTO_CTRL, + DPPCLK_DTO_ENABLE[dpp_inst], 0); + } + } + dccg->pipe_dppclk_khz[dpp_inst] = req_dppclk; +} + +static enum phyd32clk_clock_source get_phy_mux_symclk( + struct dcn_dccg *dccg_dcn, + enum phyd32clk_clock_source src) +{ + if (dccg_dcn->base.ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) { + if (src == PHYD32CLKC) + src = PHYD32CLKF; + if (src == PHYD32CLKD) + src = PHYD32CLKG; + } + return src; +} + +static void dccg31_enable_dpstreamclk(struct dccg *dccg, int otg_inst) { struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); @@ -53,19 +102,53 @@ void dccg31_set_dpstreamclk( switch (otg_inst) { case 0: REG_UPDATE(DPSTREAMCLK_CNTL, - DPSTREAMCLK_PIPE0_EN, (src == REFCLK) ? 0 : 1); + DPSTREAMCLK_PIPE0_EN, 1); + break; + case 1: + REG_UPDATE(DPSTREAMCLK_CNTL, + DPSTREAMCLK_PIPE1_EN, 1); + break; + case 2: + REG_UPDATE(DPSTREAMCLK_CNTL, + DPSTREAMCLK_PIPE2_EN, 1); + break; + case 3: + REG_UPDATE(DPSTREAMCLK_CNTL, + DPSTREAMCLK_PIPE3_EN, 1); + break; + default: + BREAK_TO_DEBUGGER(); + return; + } + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, + DPSTREAMCLK_ROOT_GATE_DISABLE, 1); +} + +static void dccg31_disable_dpstreamclk(struct dccg *dccg, int otg_inst) +{ + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, + DPSTREAMCLK_ROOT_GATE_DISABLE, 0); + + switch (otg_inst) { + case 0: + REG_UPDATE(DPSTREAMCLK_CNTL, + DPSTREAMCLK_PIPE0_EN, 0); break; case 1: REG_UPDATE(DPSTREAMCLK_CNTL, - DPSTREAMCLK_PIPE1_EN, (src == REFCLK) ? 0 : 1); + DPSTREAMCLK_PIPE1_EN, 0); break; case 2: REG_UPDATE(DPSTREAMCLK_CNTL, - DPSTREAMCLK_PIPE2_EN, (src == REFCLK) ? 0 : 1); + DPSTREAMCLK_PIPE2_EN, 0); break; case 3: REG_UPDATE(DPSTREAMCLK_CNTL, - DPSTREAMCLK_PIPE3_EN, (src == REFCLK) ? 0 : 1); + DPSTREAMCLK_PIPE3_EN, 0); break; default: BREAK_TO_DEBUGGER(); @@ -73,6 +156,17 @@ void dccg31_set_dpstreamclk( } } +void dccg31_set_dpstreamclk( + struct dccg *dccg, + enum hdmistreamclk_source src, + int otg_inst) +{ + if (src == REFCLK) + dccg31_disable_dpstreamclk(dccg, otg_inst); + else + dccg31_enable_dpstreamclk(dccg, otg_inst); +} + void dccg31_enable_symclk32_se( struct dccg *dccg, int hpo_se_inst, @@ -80,24 +174,38 @@ void dccg31_enable_symclk32_se( { struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + phyd32clk = get_phy_mux_symclk(dccg_dcn, phyd32clk); + /* select one of the PHYD32CLKs as the source for symclk32_se */ switch (hpo_se_inst) { case 0: + if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_ROOT_SE0_GATE_DISABLE, 1); REG_UPDATE_2(SYMCLK32_SE_CNTL, SYMCLK32_SE0_SRC_SEL, phyd32clk, SYMCLK32_SE0_EN, 1); break; case 1: + if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_ROOT_SE1_GATE_DISABLE, 1); REG_UPDATE_2(SYMCLK32_SE_CNTL, SYMCLK32_SE1_SRC_SEL, phyd32clk, SYMCLK32_SE1_EN, 1); break; case 2: + if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_ROOT_SE2_GATE_DISABLE, 1); REG_UPDATE_2(SYMCLK32_SE_CNTL, SYMCLK32_SE2_SRC_SEL, phyd32clk, SYMCLK32_SE2_EN, 1); break; case 3: + if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_ROOT_SE3_GATE_DISABLE, 1); REG_UPDATE_2(SYMCLK32_SE_CNTL, SYMCLK32_SE3_SRC_SEL, phyd32clk, SYMCLK32_SE3_EN, 1); @@ -120,21 +228,33 @@ void dccg31_disable_symclk32_se( REG_UPDATE_2(SYMCLK32_SE_CNTL, SYMCLK32_SE0_SRC_SEL, 0, SYMCLK32_SE0_EN, 0); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_ROOT_SE0_GATE_DISABLE, 0); break; case 1: REG_UPDATE_2(SYMCLK32_SE_CNTL, SYMCLK32_SE1_SRC_SEL, 0, SYMCLK32_SE1_EN, 0); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_ROOT_SE1_GATE_DISABLE, 0); break; case 2: REG_UPDATE_2(SYMCLK32_SE_CNTL, SYMCLK32_SE2_SRC_SEL, 0, SYMCLK32_SE2_EN, 0); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_ROOT_SE2_GATE_DISABLE, 0); break; case 3: REG_UPDATE_2(SYMCLK32_SE_CNTL, SYMCLK32_SE3_SRC_SEL, 0, SYMCLK32_SE3_EN, 0); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_ROOT_SE3_GATE_DISABLE, 0); break; default: BREAK_TO_DEBUGGER(); @@ -149,14 +269,22 @@ void dccg31_enable_symclk32_le( { struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + phyd32clk = get_phy_mux_symclk(dccg_dcn, phyd32clk); + /* select one of the PHYD32CLKs as the source for symclk32_le */ switch (hpo_le_inst) { case 0: + if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_ROOT_LE0_GATE_DISABLE, 1); REG_UPDATE_2(SYMCLK32_LE_CNTL, SYMCLK32_LE0_SRC_SEL, phyd32clk, SYMCLK32_LE0_EN, 1); break; case 1: + if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_ROOT_LE1_GATE_DISABLE, 1); REG_UPDATE_2(SYMCLK32_LE_CNTL, SYMCLK32_LE1_SRC_SEL, phyd32clk, SYMCLK32_LE1_EN, 1); @@ -179,11 +307,87 @@ void dccg31_disable_symclk32_le( REG_UPDATE_2(SYMCLK32_LE_CNTL, SYMCLK32_LE0_SRC_SEL, 0, SYMCLK32_LE0_EN, 0); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_ROOT_LE0_GATE_DISABLE, 0); break; case 1: REG_UPDATE_2(SYMCLK32_LE_CNTL, SYMCLK32_LE1_SRC_SEL, 0, SYMCLK32_LE1_EN, 0); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_ROOT_LE1_GATE_DISABLE, 0); + break; + default: + BREAK_TO_DEBUGGER(); + return; + } +} + +static void dccg31_disable_dscclk(struct dccg *dccg, int inst) +{ + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + + if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dsc) + return; + //DTO must be enabled to generate a 0 Hz clock output + switch (inst) { + case 0: + REG_UPDATE(DSCCLK_DTO_CTRL, + DSCCLK0_DTO_ENABLE, 1); + REG_UPDATE_2(DSCCLK0_DTO_PARAM, + DSCCLK0_DTO_PHASE, 0, + DSCCLK0_DTO_MODULO, 1); + break; + case 1: + REG_UPDATE(DSCCLK_DTO_CTRL, + DSCCLK1_DTO_ENABLE, 1); + REG_UPDATE_2(DSCCLK1_DTO_PARAM, + DSCCLK1_DTO_PHASE, 0, + DSCCLK1_DTO_MODULO, 1); + break; + case 2: + REG_UPDATE(DSCCLK_DTO_CTRL, + DSCCLK2_DTO_ENABLE, 1); + REG_UPDATE_2(DSCCLK2_DTO_PARAM, + DSCCLK2_DTO_PHASE, 0, + DSCCLK2_DTO_MODULO, 1); + break; + default: + BREAK_TO_DEBUGGER(); + return; + } +} + +static void dccg31_enable_dscclk(struct dccg *dccg, int inst) +{ + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + + if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dsc) + return; + //Disable DTO + switch (inst) { + case 0: + REG_UPDATE_2(DSCCLK0_DTO_PARAM, + DSCCLK0_DTO_PHASE, 0, + DSCCLK0_DTO_MODULO, 0); + REG_UPDATE(DSCCLK_DTO_CTRL, + DSCCLK0_DTO_ENABLE, 0); + break; + case 1: + REG_UPDATE_2(DSCCLK1_DTO_PARAM, + DSCCLK1_DTO_PHASE, 0, + DSCCLK1_DTO_MODULO, 0); + REG_UPDATE(DSCCLK_DTO_CTRL, + DSCCLK1_DTO_ENABLE, 0); + break; + case 2: + REG_UPDATE_2(DSCCLK2_DTO_PARAM, + DSCCLK2_DTO_PHASE, 0, + DSCCLK2_DTO_MODULO, 0); + REG_UPDATE(DSCCLK_DTO_CTRL, + DSCCLK2_DTO_ENABLE, 0); break; default: BREAK_TO_DEBUGGER(); @@ -398,10 +602,23 @@ void dccg31_init(struct dccg *dccg) dccg31_disable_symclk32_se(dccg, 1); dccg31_disable_symclk32_se(dccg, 2); dccg31_disable_symclk32_se(dccg, 3); + + if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le) { + dccg31_disable_symclk32_le(dccg, 0); + dccg31_disable_symclk32_le(dccg, 1); + } + + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream) { + dccg31_disable_dpstreamclk(dccg, 0); + dccg31_disable_dpstreamclk(dccg, 1); + dccg31_disable_dpstreamclk(dccg, 2); + dccg31_disable_dpstreamclk(dccg, 3); + } + } static const struct dccg_funcs dccg31_funcs = { - .update_dpp_dto = dccg2_update_dpp_dto, + .update_dpp_dto = dccg31_update_dpp_dto, .get_dccg_ref_freq = dccg31_get_dccg_ref_freq, .dccg_init = dccg31_init, .set_dpstreamclk = dccg31_set_dpstreamclk, @@ -413,6 +630,8 @@ static const struct dccg_funcs dccg31_funcs = { .set_dtbclk_dto = dccg31_set_dtbclk_dto, .set_audio_dtbclk_dto = dccg31_set_audio_dtbclk_dto, .set_dispclk_change_mode = dccg31_set_dispclk_change_mode, + .disable_dsc = dccg31_disable_dscclk, + .enable_dsc = dccg31_enable_dscclk, }; struct dccg *dccg31_create( diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h index 1e5aabcb7799..a013a32bbaf7 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h @@ -61,7 +61,13 @@ SR(DCCG_AUDIO_DTBCLK_DTO_MODULO),\ SR(DCCG_AUDIO_DTBCLK_DTO_PHASE),\ SR(DCCG_AUDIO_DTO_SOURCE),\ - SR(DENTIST_DISPCLK_CNTL) + SR(DENTIST_DISPCLK_CNTL),\ + SR(DSCCLK0_DTO_PARAM),\ + SR(DSCCLK1_DTO_PARAM),\ + SR(DSCCLK2_DTO_PARAM),\ + SR(DSCCLK_DTO_CTRL),\ + SR(DCCG_GATE_DISABLE_CNTL3),\ + SR(HDMISTREAMCLK0_DTO_PARAM) #define DCCG_MASK_SH_LIST_DCN31(mask_sh) \ @@ -119,7 +125,26 @@ DCCG_SFII(OTG, PIXEL_RATE_CNTL, DTBCLK_DTO, DIV, 3, mask_sh),\ DCCG_SF(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO_SEL, mask_sh),\ DCCG_SF(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL, mask_sh),\ - DCCG_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_MODE, mask_sh) + DCCG_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_MODE, mask_sh), \ + DCCG_SF(DSCCLK0_DTO_PARAM, DSCCLK0_DTO_PHASE, mask_sh),\ + DCCG_SF(DSCCLK0_DTO_PARAM, DSCCLK0_DTO_MODULO, mask_sh),\ + DCCG_SF(DSCCLK1_DTO_PARAM, DSCCLK1_DTO_PHASE, mask_sh),\ + DCCG_SF(DSCCLK1_DTO_PARAM, DSCCLK1_DTO_MODULO, mask_sh),\ + DCCG_SF(DSCCLK2_DTO_PARAM, DSCCLK2_DTO_PHASE, mask_sh),\ + DCCG_SF(DSCCLK2_DTO_PARAM, DSCCLK2_DTO_MODULO, mask_sh),\ + DCCG_SF(DSCCLK_DTO_CTRL, DSCCLK0_DTO_ENABLE, mask_sh),\ + DCCG_SF(DSCCLK_DTO_CTRL, DSCCLK1_DTO_ENABLE, mask_sh),\ + DCCG_SF(DSCCLK_DTO_CTRL, DSCCLK2_DTO_ENABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL3, DPSTREAMCLK_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL3, DPSTREAMCLK_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE0_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE1_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE2_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE3_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_LE0_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_LE1_GATE_DISABLE, mask_sh),\ + DCCG_SF(HDMISTREAMCLK0_DTO_PARAM, HDMISTREAMCLK0_DTO_PHASE, mask_sh),\ + DCCG_SF(HDMISTREAMCLK0_DTO_PARAM, HDMISTREAMCLK0_DTO_MODULO, mask_sh) struct dccg *dccg31_create( @@ -130,6 +155,11 @@ struct dccg *dccg31_create( void dccg31_init(struct dccg *dccg); +void dccg31_set_dpstreamclk( + struct dccg *dccg, + enum hdmistreamclk_source src, + int otg_inst); + void dccg31_enable_symclk32_se( struct dccg *dccg, int hpo_se_inst, diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c index 8f8eee475144..ee6f13bef377 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c @@ -431,7 +431,7 @@ void dcn31_link_encoder_enable_dp_output( if (link) { dpia_control.dpia_id = link->ddc_hw_inst; - dpia_control.fec_rdy = link->fec_state == dc_link_fec_ready ? 1 : 0; + dpia_control.fec_rdy = dc_link_should_enable_fec(link); } else { DC_LOG_ERROR("%s: Failed to execute DPIA enable DMUB command.\n", __func__); BREAK_TO_DEBUGGER(); @@ -476,7 +476,7 @@ void dcn31_link_encoder_enable_dp_mst_output( if (link) { dpia_control.dpia_id = link->ddc_hw_inst; - dpia_control.fec_rdy = link->fec_state == dc_link_fec_ready ? 1 : 0; + dpia_control.fec_rdy = dc_link_should_enable_fec(link); } else { DC_LOG_ERROR("%s: Failed to execute DPIA enable DMUB command.\n", __func__); BREAK_TO_DEBUGGER(); diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c index 18e33ef3d217..5dd1ce9ddb53 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c @@ -49,6 +49,8 @@ #include "inc/link_dpcd.h" #include "dcn10/dcn10_hw_sequencer.h" #include "inc/link_enc_cfg.h" +#include "dcn30/dcn30_vpg.h" +#include "dce/dce_i2c_hw.h" #define DC_LOGGER_INIT(logger) @@ -64,6 +66,45 @@ #define FN(reg_name, field_name) \ hws->shifts->field_name, hws->masks->field_name +static void enable_memory_low_power(struct dc *dc) +{ + struct dce_hwseq *hws = dc->hwseq; + int i; + + if (dc->debug.enable_mem_low_power.bits.dmcu) { + // Force ERAM to shutdown if DMCU is not enabled + if (dc->debug.disable_dmcu || dc->config.disable_dmcu) { + REG_UPDATE(DMU_MEM_PWR_CNTL, DMCU_ERAM_MEM_PWR_FORCE, 3); + } + } + + // Set default OPTC memory power states + if (dc->debug.enable_mem_low_power.bits.optc) { + // Shutdown when unassigned and light sleep in VBLANK + REG_SET_2(ODM_MEM_PWR_CTRL3, 0, ODM_MEM_UNASSIGNED_PWR_MODE, 3, ODM_MEM_VBLANK_PWR_MODE, 1); + } + + if (dc->debug.enable_mem_low_power.bits.vga) { + // Power down VGA memory + REG_UPDATE(MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, 1); + } + + if (dc->debug.enable_mem_low_power.bits.mpc) + dc->res_pool->mpc->funcs->set_mpc_mem_lp_mode(dc->res_pool->mpc); + + + if (dc->debug.enable_mem_low_power.bits.vpg && dc->res_pool->stream_enc[0]->vpg->funcs->vpg_powerdown) { + // Power down VPGs + for (i = 0; i < dc->res_pool->stream_enc_count; i++) + dc->res_pool->stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->stream_enc[i]->vpg); +#if defined(CONFIG_DRM_AMD_DC_DCN) + for (i = 0; i < dc->res_pool->hpo_dp_stream_enc_count; i++) + dc->res_pool->hpo_dp_stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->hpo_dp_stream_enc[i]->vpg); +#endif + } + +} + void dcn31_init_hw(struct dc *dc) { struct abm **abms = dc->res_pool->multiple_abms; @@ -71,16 +112,11 @@ void dcn31_init_hw(struct dc *dc) struct dc_bios *dcb = dc->ctx->dc_bios; struct resource_pool *res_pool = dc->res_pool; uint32_t backlight = MAX_BACKLIGHT_LEVEL; - int i; - int edp_num; + int i, j; if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks) dc->clk_mgr->funcs->init_clocks(dc->clk_mgr); - // Initialize the dccg - if (res_pool->dccg->funcs->dccg_init) - res_pool->dccg->funcs->dccg_init(res_pool->dccg); - if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { REG_WRITE(REFCLK_CNTL, 0); @@ -107,24 +143,11 @@ void dcn31_init_hw(struct dc *dc) hws->funcs.bios_golden_init(dc); hws->funcs.disable_vga(dc->hwseq); } + // Initialize the dccg + if (res_pool->dccg->funcs->dccg_init) + res_pool->dccg->funcs->dccg_init(res_pool->dccg); - if (dc->debug.enable_mem_low_power.bits.dmcu) { - // Force ERAM to shutdown if DMCU is not enabled - if (dc->debug.disable_dmcu || dc->config.disable_dmcu) { - REG_UPDATE(DMU_MEM_PWR_CNTL, DMCU_ERAM_MEM_PWR_FORCE, 3); - } - } - - // Set default OPTC memory power states - if (dc->debug.enable_mem_low_power.bits.optc) { - // Shutdown when unassigned and light sleep in VBLANK - REG_SET_2(ODM_MEM_PWR_CTRL3, 0, ODM_MEM_UNASSIGNED_PWR_MODE, 3, ODM_MEM_VBLANK_PWR_MODE, 1); - } - - if (dc->debug.enable_mem_low_power.bits.vga) { - // Power down VGA memory - REG_UPDATE(MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, 1); - } + enable_memory_low_power(dc); if (dc->ctx->dc_bios->fw_info_valid) { res_pool->ref_clocks.xtalin_clock_inKhz = @@ -179,9 +202,40 @@ void dcn31_init_hw(struct dc *dc) dmub_enable_outbox_notification(dc); /* we want to turn off all dp displays before doing detection */ - if (dc->config.power_down_display_on_boot) - blank_all_dp_displays(dc, true); - + if (dc->config.power_down_display_on_boot) { + uint8_t dpcd_power_state = '\0'; + enum dc_status status = DC_ERROR_UNEXPECTED; + + for (i = 0; i < dc->link_count; i++) { + if (dc->links[i]->connector_signal != SIGNAL_TYPE_DISPLAY_PORT) + continue; + + /* if any of the displays are lit up turn them off */ + status = core_link_read_dpcd(dc->links[i], DP_SET_POWER, + &dpcd_power_state, sizeof(dpcd_power_state)); + if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0) { + /* blank dp stream before power off receiver*/ + if (dc->links[i]->ep_type == DISPLAY_ENDPOINT_PHY && + dc->links[i]->link_enc->funcs->get_dig_frontend) { + unsigned int fe; + + fe = dc->links[i]->link_enc->funcs->get_dig_frontend( + dc->links[i]->link_enc); + if (fe == ENGINE_ID_UNKNOWN) + continue; + + for (j = 0; j < dc->res_pool->stream_enc_count; j++) { + if (fe == dc->res_pool->stream_enc[j]->id) { + dc->res_pool->stream_enc[j]->funcs->dp_blank(dc->links[i], + dc->res_pool->stream_enc[j]); + break; + } + } + } + dp_receiver_power_ctrl(dc->links[i], false); + } + } + } /* If taking control over from VBIOS, we may want to optimize our first * mode set, so we need to skip powering down pipes until we know which @@ -196,48 +250,6 @@ void dcn31_init_hw(struct dc *dc) !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter); } - /* In headless boot cases, DIG may be turned - * on which causes HW/SW discrepancies. - * To avoid this, power down hardware on boot - * if DIG is turned on and seamless boot not enabled - */ - if (dc->config.power_down_display_on_boot) { - struct dc_link *edp_links[MAX_NUM_EDP]; - struct dc_link *edp_link; - bool power_down = false; - - get_edp_links(dc, edp_links, &edp_num); - if (edp_num) { - for (i = 0; i < edp_num; i++) { - edp_link = edp_links[i]; - if (edp_link->link_enc->funcs->is_dig_enabled && - edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) && - dc->hwss.edp_backlight_control && - dc->hwss.power_down && - dc->hwss.edp_power_control) { - dc->hwss.edp_backlight_control(edp_link, false); - dc->hwss.power_down(dc); - dc->hwss.edp_power_control(edp_link, false); - power_down = true; - } - } - } - if (!power_down) { - for (i = 0; i < dc->link_count; i++) { - struct dc_link *link = dc->links[i]; - - if (link->ep_type == DISPLAY_ENDPOINT_PHY && - link->link_enc->funcs->is_dig_enabled && - link->link_enc->funcs->is_dig_enabled(link->link_enc) && - dc->hwss.power_down) { - dc->hwss.power_down(dc); - break; - } - - } - } - } - for (i = 0; i < res_pool->audio_count; i++) { struct audio *audio = res_pool->audios[i]; @@ -259,6 +271,13 @@ void dcn31_init_hw(struct dc *dc) /* power AFMT HDMI memory TODO: may move to dis/en output save power*/ REG_WRITE(DIO_MEM_PWR_CTRL, 0); + // Set i2c to light sleep until engine is setup + if (dc->debug.enable_mem_low_power.bits.i2c) + REG_UPDATE(DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, 1); + + if (hws->funcs.setup_hpo_hw_control) + hws->funcs.setup_hpo_hw_control(hws, false); + if (!dc->debug.disable_clock_gate) { /* enable all DCN clock gating */ REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0); @@ -300,6 +319,12 @@ void dcn31_dsc_pg_control( if (hws->ctx->dc->debug.disable_dsc_power_gate) return; + if (hws->ctx->dc->debug.root_clock_optimization.bits.dsc && + hws->ctx->dc->res_pool->dccg->funcs->enable_dsc && + power_on) + hws->ctx->dc->res_pool->dccg->funcs->enable_dsc( + hws->ctx->dc->res_pool->dccg, dsc_inst); + REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl); if (org_ip_request_cntl == 0) REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1); @@ -336,6 +361,13 @@ void dcn31_dsc_pg_control( if (org_ip_request_cntl == 0) REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0); + + if (hws->ctx->dc->debug.root_clock_optimization.bits.dsc) { + if (hws->ctx->dc->res_pool->dccg->funcs->disable_dsc && !power_on) + hws->ctx->dc->res_pool->dccg->funcs->disable_dsc( + hws->ctx->dc->res_pool->dccg, dsc_inst); + } + } @@ -579,3 +611,9 @@ void dcn31_reset_hw_ctx_wrap( /* New dc_state in the process of being applied to hardware. */ dc->current_state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_TRANSIENT; } + +void dcn31_setup_hpo_hw_control(const struct dce_hwseq *hws, bool enable) +{ + if (hws->ctx->dc->debug.hpo_optimization) + REG_UPDATE(HPO_TOP_HW_CONTROL, HPO_IO_EN, !!enable); +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h index 7ae45dd202d9..edfc01d6ad73 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h @@ -54,5 +54,6 @@ void dcn31_reset_hw_ctx_wrap( bool dcn31_is_abm_supported(struct dc *dc, struct dc_state *context, struct dc_stream_state *stream); void dcn31_init_pipes(struct dc *dc, struct dc_state *context); +void dcn31_setup_hpo_hw_control(const struct dce_hwseq *hws, bool enable); #endif /* __DC_HWSS_DCN31_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c index c6a737781ad1..05335a8c3c2d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c @@ -137,6 +137,7 @@ static const struct hwseq_private_funcs dcn31_private_funcs = { .dccg_init = dcn20_dccg_init, .set_blend_lut = dcn30_set_blend_lut, .set_shaper_3dlut = dcn20_set_shaper_3dlut, + .setup_hpo_hw_control = dcn31_setup_hpo_hw_control, }; void dcn31_hw_sequencer_construct(struct dc *dc) diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c index 7cb7604a35eb..18896294ae12 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c @@ -101,8 +101,6 @@ #include "link_enc_cfg.h" #define DC_LOGGER_INIT(logger) -#define fixed16_to_double(x) (((double) x) / ((double) (1 << 16))) -#define fixed16_to_double_to_cpu(x) fixed16_to_double(le32_to_cpu(x)) #define DCN3_1_DEFAULT_DET_SIZE 384 @@ -222,8 +220,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_1_soc = { .num_states = 5, .sr_exit_time_us = 9.0, .sr_enter_plus_exit_time_us = 11.0, - .sr_exit_z8_time_us = 402.0, - .sr_enter_plus_exit_z8_time_us = 520.0, + .sr_exit_z8_time_us = 442.0, + .sr_enter_plus_exit_z8_time_us = 560.0, .writeback_latency_us = 12.0, .dram_channel_width_bytes = 4, .round_trip_ping_latency_dcfclk_cycles = 106, @@ -862,7 +860,8 @@ static const struct dccg_mask dccg_mask = { SR(D6VGA_CONTROL), \ SR(DC_IP_REQUEST_CNTL), \ SR(AZALIA_AUDIO_DTO), \ - SR(AZALIA_CONTROLLER_CLOCK_GATING) + SR(AZALIA_CONTROLLER_CLOCK_GATING), \ + SR(HPO_TOP_HW_CONTROL) static const struct dce_hwseq_registers hwseq_reg = { HWSEQ_DCN31_REG_LIST() @@ -899,7 +898,9 @@ static const struct dce_hwseq_registers hwseq_reg = { HWS_SF(, DMU_MEM_PWR_CNTL, DMCU_ERAM_MEM_PWR_FORCE, mask_sh), \ HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_UNASSIGNED_PWR_MODE, mask_sh), \ HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_VBLANK_PWR_MODE, mask_sh), \ - HWS_SF(, MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, mask_sh) + HWS_SF(, MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, mask_sh), \ + HWS_SF(, DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, mask_sh), \ + HWS_SF(, HPO_TOP_HW_CONTROL, HPO_IO_EN, mask_sh) static const struct dce_hwseq_shift hwseq_shift = { HWSEQ_DCN31_MASK_SH_LIST(__SHIFT) @@ -998,7 +999,7 @@ static const struct dc_debug_options debug_defaults_drv = { .disable_dcc = DCC_ENABLE, .vsr_support = true, .performance_trace = false, - .max_downscale_src_width = 3840,/*upto 4K*/ + .max_downscale_src_width = 4096,/*upto true 4K*/ .disable_pplib_wm_range = false, .scl_reset_length10 = true, .sanity_checks = false, @@ -1312,10 +1313,6 @@ static struct vpg *dcn31_vpg_create( &vpg_shift, &vpg_mask); - // Will re-enable hw block when we enable stream - // Check for enabled stream before powering down? - vpg31_powerdown(&vpg31->base); - return &vpg31->base; } @@ -1383,6 +1380,12 @@ static struct stream_encoder *dcn31_stream_encoder_create( return NULL; } + if (ctx->asic_id.chip_family == FAMILY_YELLOW_CARP && + ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) { + if ((eng_id == ENGINE_ID_DIGC) || (eng_id == ENGINE_ID_DIGD)) + eng_id = eng_id + 3; // For B0 only. C->F, D->G. + } + dcn30_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id, vpg, afmt, &stream_enc_regs[eng_id], @@ -1782,6 +1785,13 @@ static int dcn31_populate_dml_pipes_from_context( pipe = &res_ctx->pipe_ctx[i]; timing = &pipe->stream->timing; + /* + * Immediate flip can be set dynamically after enabling the plane. + * We need to require support for immediate flip or underflow can be + * intermittently experienced depending on peak b/w requirements. + */ + pipes[pipe_cnt].pipe.src.immediate_flip = true; + pipes[pipe_cnt].pipe.src.unbounded_req_mode = false; pipes[pipe_cnt].pipe.src.gpuvm = true; pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0; @@ -1824,7 +1834,7 @@ static int dcn31_populate_dml_pipes_from_context( return pipe_cnt; } -static void dcn31_update_soc_for_wm_a(struct dc *dc, struct dc_state *context) +void dcn31_update_soc_for_wm_a(struct dc *dc, struct dc_state *context) { if (dc->clk_mgr->bw_params->wm_table.entries[WM_A].valid) { context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.entries[WM_A].pstate_latency_us; @@ -1968,7 +1978,7 @@ static void dcn31_calculate_wm_and_dlg_fp( dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel); } -static void dcn31_calculate_wm_and_dlg( +void dcn31_calculate_wm_and_dlg( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, int pipe_cnt, @@ -2450,6 +2460,8 @@ static bool dcn31_resource_construct( dc->cap_funcs = cap_funcs; + dc->dcn_ip->max_num_dpp = dcn3_1_ip.max_num_dpp; + DC_FP_END(); return true; diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h index 93571c976996..416fe7a721d8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h @@ -35,6 +35,16 @@ struct dcn31_resource_pool { struct resource_pool base; }; +bool dcn31_validate_bandwidth(struct dc *dc, + struct dc_state *context, + bool fast_validate); +void dcn31_calculate_wm_and_dlg( + struct dc *dc, struct dc_state *context, + display_e2e_pipe_params_st *pipes, + int pipe_cnt, + int vlevel); +void dcn31_update_soc_for_wm_a(struct dc *dc, struct dc_state *context); + struct resource_pool *dcn31_create_resource_pool( const struct dc_init_data *init_data, struct dc *dc); diff --git a/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h index 43f33e186088..511f9e1159c7 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h +++ b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h @@ -35,6 +35,8 @@ struct cp_psp_stream_config { uint8_t link_enc_idx; uint8_t stream_enc_idx; uint8_t phy_idx; + uint8_t dio_output_idx; + uint8_t dio_output_type; uint8_t assr_enabled; uint8_t mst_enabled; uint8_t dp2_enabled; diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile index 169a4e68f86e..eee6672bd32d 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile @@ -70,6 +70,8 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_mode_vba_30.o := $(dml_ccflags) $(fram CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_rq_dlg_calc_30.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/display_mode_vba_31.o := $(dml_ccflags) $(frame_warn_flag) CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/display_rq_dlg_calc_31.o := $(dml_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml/dcn301/dcn301_fpu.o := $(dml_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml/dsc/rc_calc_fpu.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_ccflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/display_mode_vba.o := $(dml_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn2x/dcn2x.o := $(dml_rcflags) @@ -83,7 +85,9 @@ CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn30/display_mode_vba_30.o := $(dml_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn30/display_rq_dlg_calc_30.o := $(dml_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn31/display_mode_vba_31.o := $(dml_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn31/display_rq_dlg_calc_31.o := $(dml_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn301/dcn301_fpu.o := $(dml_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dsc/rc_calc_fpu.o := $(dml_rcflags) endif CFLAGS_$(AMDDALPATH)/dc/dml/dml1_display_rq_dlg_calc.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/display_rq_dlg_helpers.o := $(dml_ccflags) @@ -99,6 +103,8 @@ DML += dcn20/display_rq_dlg_calc_20v2.o dcn20/display_mode_vba_20v2.o DML += dcn21/display_rq_dlg_calc_21.o dcn21/display_mode_vba_21.o DML += dcn30/display_mode_vba_30.o dcn30/display_rq_dlg_calc_30.o DML += dcn31/display_mode_vba_31.o dcn31/display_rq_dlg_calc_31.o +DML += dcn301/dcn301_fpu.o +DML += dsc/rc_calc_fpu.o endif AMD_DAL_DML = $(addprefix $(AMDDALPATH)/dc/dml/,$(DML)) diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c index e3d9f1decdfc..f47d82da115c 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c @@ -3576,16 +3576,9 @@ static double TruncToValidBPP( MinDSCBPP = 8; MaxDSCBPP = 3 * DSCInputBitPerComponent - 1.0 / 16; } else { - if (Output == dm_hdmi) { - NonDSCBPP0 = 24; - NonDSCBPP1 = 24; - NonDSCBPP2 = 24; - } - else { - NonDSCBPP0 = 16; - NonDSCBPP1 = 20; - NonDSCBPP2 = 24; - } + NonDSCBPP0 = 16; + NonDSCBPP1 = 20; + NonDSCBPP2 = 24; if (Format == dm_n422) { MinDSCBPP = 7; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c new file mode 100644 index 000000000000..94c32832a0e7 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c @@ -0,0 +1,390 @@ +/* + * Copyright 2019-2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#include "resource.h" +#include "clk_mgr.h" +#include "dcn20/dcn20_resource.h" +#include "dcn301/dcn301_resource.h" + +#include "dml/dcn20/dcn20_fpu.h" +#include "dcn301_fpu.h" + +#define TO_DCN301_RES_POOL(pool)\ + container_of(pool, struct dcn301_resource_pool, base) + +/* Based on: //vidip/dc/dcn3/doc/architecture/DCN3x_Display_Mode.xlsm#83 */ +struct _vcs_dpi_ip_params_st dcn3_01_ip = { + .odm_capable = 1, + .gpuvm_enable = 1, + .hostvm_enable = 1, + .gpuvm_max_page_table_levels = 1, + .hostvm_max_page_table_levels = 2, + .hostvm_cached_page_table_levels = 0, + .pte_group_size_bytes = 2048, + .num_dsc = 3, + .rob_buffer_size_kbytes = 184, + .det_buffer_size_kbytes = 184, + .dpte_buffer_size_in_pte_reqs_luma = 64, + .dpte_buffer_size_in_pte_reqs_chroma = 32, + .pde_proc_buffer_size_64k_reqs = 48, + .dpp_output_buffer_pixels = 2560, + .opp_output_buffer_lines = 1, + .pixel_chunk_size_kbytes = 8, + .meta_chunk_size_kbytes = 2, + .writeback_chunk_size_kbytes = 8, + .line_buffer_size_bits = 789504, + .is_line_buffer_bpp_fixed = 0, // ? + .line_buffer_fixed_bpp = 48, // ? + .dcc_supported = true, + .writeback_interface_buffer_size_kbytes = 90, + .writeback_line_buffer_buffer_size = 656640, + .max_line_buffer_lines = 12, + .writeback_luma_buffer_size_kbytes = 12, // writeback_line_buffer_buffer_size = 656640 + .writeback_chroma_buffer_size_kbytes = 8, + .writeback_chroma_line_buffer_width_pixels = 4, + .writeback_max_hscl_ratio = 1, + .writeback_max_vscl_ratio = 1, + .writeback_min_hscl_ratio = 1, + .writeback_min_vscl_ratio = 1, + .writeback_max_hscl_taps = 1, + .writeback_max_vscl_taps = 1, + .writeback_line_buffer_luma_buffer_size = 0, + .writeback_line_buffer_chroma_buffer_size = 14643, + .cursor_buffer_size = 8, + .cursor_chunk_size = 2, + .max_num_otg = 4, + .max_num_dpp = 4, + .max_num_wb = 1, + .max_dchub_pscl_bw_pix_per_clk = 4, + .max_pscl_lb_bw_pix_per_clk = 2, + .max_lb_vscl_bw_pix_per_clk = 4, + .max_vscl_hscl_bw_pix_per_clk = 4, + .max_hscl_ratio = 6, + .max_vscl_ratio = 6, + .hscl_mults = 4, + .vscl_mults = 4, + .max_hscl_taps = 8, + .max_vscl_taps = 8, + .dispclk_ramp_margin_percent = 1, + .underscan_factor = 1.11, + .min_vblank_lines = 32, + .dppclk_delay_subtotal = 46, + .dynamic_metadata_vm_enabled = true, + .dppclk_delay_scl_lb_only = 16, + .dppclk_delay_scl = 50, + .dppclk_delay_cnvc_formatter = 27, + .dppclk_delay_cnvc_cursor = 6, + .dispclk_delay_subtotal = 119, + .dcfclk_cstate_latency = 5.2, // SRExitTime + .max_inter_dcn_tile_repeaters = 8, + .max_num_hdmi_frl_outputs = 0, + .odm_combine_4to1_supported = true, + + .xfc_supported = false, + .xfc_fill_bw_overhead_percent = 10.0, + .xfc_fill_constant_bytes = 0, + .gfx7_compat_tiling_supported = 0, + .number_of_cursors = 1, +}; + +struct _vcs_dpi_soc_bounding_box_st dcn3_01_soc = { + .clock_limits = { + { + .state = 0, + .dram_speed_mts = 2400.0, + .fabricclk_mhz = 600, + .socclk_mhz = 278.0, + .dcfclk_mhz = 400.0, + .dscclk_mhz = 206.0, + .dppclk_mhz = 1015.0, + .dispclk_mhz = 1015.0, + .phyclk_mhz = 600.0, + }, + + { + .state = 1, + .dram_speed_mts = 2400.0, + .fabricclk_mhz = 688, + .socclk_mhz = 278.0, + .dcfclk_mhz = 400.0, + .dscclk_mhz = 206.0, + .dppclk_mhz = 1015.0, + .dispclk_mhz = 1015.0, + .phyclk_mhz = 600.0, + }, + + { + .state = 2, + .dram_speed_mts = 4267.0, + .fabricclk_mhz = 1067, + .socclk_mhz = 278.0, + .dcfclk_mhz = 608.0, + .dscclk_mhz = 296.0, + .dppclk_mhz = 1015.0, + .dispclk_mhz = 1015.0, + .phyclk_mhz = 810.0, + }, + + { + .state = 3, + .dram_speed_mts = 4267.0, + .fabricclk_mhz = 1067, + .socclk_mhz = 715.0, + .dcfclk_mhz = 676.0, + .dscclk_mhz = 338.0, + .dppclk_mhz = 1015.0, + .dispclk_mhz = 1015.0, + .phyclk_mhz = 810.0, + }, + + { + .state = 4, + .dram_speed_mts = 4267.0, + .fabricclk_mhz = 1067, + .socclk_mhz = 953.0, + .dcfclk_mhz = 810.0, + .dscclk_mhz = 338.0, + .dppclk_mhz = 1015.0, + .dispclk_mhz = 1015.0, + .phyclk_mhz = 810.0, + }, + }, + + .sr_exit_time_us = 9.0, + .sr_enter_plus_exit_time_us = 11.0, + .urgent_latency_us = 4.0, + .urgent_latency_pixel_data_only_us = 4.0, + .urgent_latency_pixel_mixed_with_vm_data_us = 4.0, + .urgent_latency_vm_data_only_us = 4.0, + .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096, + .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096, + .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096, + .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 80.0, + .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 75.0, + .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0, + .max_avg_sdp_bw_use_normal_percent = 60.0, + .max_avg_dram_bw_use_normal_percent = 60.0, + .writeback_latency_us = 12.0, + .max_request_size_bytes = 256, + .dram_channel_width_bytes = 4, + .fabric_datapath_to_dcn_data_return_bytes = 32, + .dcn_downspread_percent = 0.5, + .downspread_percent = 0.38, + .dram_page_open_time_ns = 50.0, + .dram_rw_turnaround_time_ns = 17.5, + .dram_return_buffer_per_channel_bytes = 8192, + .round_trip_ping_latency_dcfclk_cycles = 191, + .urgent_out_of_order_return_per_channel_bytes = 4096, + .channel_interleave_bytes = 256, + .num_banks = 8, + .num_chans = 4, + .gpuvm_min_page_size_bytes = 4096, + .hostvm_min_page_size_bytes = 4096, + .dram_clock_change_latency_us = 23.84, + .writeback_dram_clock_change_latency_us = 23.0, + .return_bus_width_bytes = 64, + .dispclk_dppclk_vco_speed_mhz = 3550, + .xfc_bus_transport_time_us = 20, // ? + .xfc_xbuf_latency_tolerance_us = 4, // ? + .use_urgent_burst_bw = 1, // ? + .num_states = 5, + .do_urgent_latency_adjustment = false, + .urgent_latency_adjustment_fabric_clock_component_us = 0, + .urgent_latency_adjustment_fabric_clock_reference_mhz = 0, +}; + +static void calculate_wm_set_for_vlevel(int vlevel, + struct wm_range_table_entry *table_entry, + struct dcn_watermarks *wm_set, + struct display_mode_lib *dml, + display_e2e_pipe_params_st *pipes, + int pipe_cnt) +{ + double dram_clock_change_latency_cached = dml->soc.dram_clock_change_latency_us; + + ASSERT(vlevel < dml->soc.num_states); + /* only pipe 0 is read for voltage and dcf/soc clocks */ + pipes[0].clks_cfg.voltage = vlevel; + pipes[0].clks_cfg.dcfclk_mhz = dml->soc.clock_limits[vlevel].dcfclk_mhz; + pipes[0].clks_cfg.socclk_mhz = dml->soc.clock_limits[vlevel].socclk_mhz; + + dml->soc.dram_clock_change_latency_us = table_entry->pstate_latency_us; + dml->soc.sr_exit_time_us = table_entry->sr_exit_time_us; + dml->soc.sr_enter_plus_exit_time_us = table_entry->sr_enter_plus_exit_time_us; + + wm_set->urgent_ns = get_wm_urgent(dml, pipes, pipe_cnt) * 1000; + wm_set->cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(dml, pipes, pipe_cnt) * 1000; + wm_set->cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(dml, pipes, pipe_cnt) * 1000; + wm_set->cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(dml, pipes, pipe_cnt) * 1000; + wm_set->pte_meta_urgent_ns = get_wm_memory_trip(dml, pipes, pipe_cnt) * 1000; + wm_set->frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(dml, pipes, pipe_cnt) * 1000; + wm_set->frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(dml, pipes, pipe_cnt) * 1000; + wm_set->urgent_latency_ns = get_urgent_latency(dml, pipes, pipe_cnt) * 1000; + dml->soc.dram_clock_change_latency_us = dram_clock_change_latency_cached; + +} + +void dcn301_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) +{ + struct dcn301_resource_pool *pool = TO_DCN301_RES_POOL(dc->res_pool); + struct clk_limit_table *clk_table = &bw_params->clk_table; + struct _vcs_dpi_voltage_scaling_st clock_limits[DC__VOLTAGE_STATES]; + unsigned int i, closest_clk_lvl; + int j; + + dc_assert_fp_enabled(); + + /* Default clock levels are used for diags, which may lead to overclocking. */ + if (!IS_DIAG_DC(dc->ctx->dce_environment)) { + dcn3_01_ip.max_num_otg = pool->base.res_cap->num_timing_generator; + dcn3_01_ip.max_num_dpp = pool->base.pipe_count; + dcn3_01_soc.num_chans = bw_params->num_channels; + + ASSERT(clk_table->num_entries); + for (i = 0; i < clk_table->num_entries; i++) { + /* loop backwards*/ + for (closest_clk_lvl = 0, j = dcn3_01_soc.num_states - 1; j >= 0; j--) { + if ((unsigned int) dcn3_01_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) { + closest_clk_lvl = j; + break; + } + } + + clock_limits[i].state = i; + clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz; + clock_limits[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz; + clock_limits[i].socclk_mhz = clk_table->entries[i].socclk_mhz; + clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2; + + clock_limits[i].dispclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dispclk_mhz; + clock_limits[i].dppclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dppclk_mhz; + clock_limits[i].dram_bw_per_chan_gbps = dcn3_01_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps; + clock_limits[i].dscclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dscclk_mhz; + clock_limits[i].dtbclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dtbclk_mhz; + clock_limits[i].phyclk_d18_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz; + clock_limits[i].phyclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].phyclk_mhz; + } + + for (i = 0; i < clk_table->num_entries; i++) + dcn3_01_soc.clock_limits[i] = clock_limits[i]; + + if (clk_table->num_entries) { + dcn3_01_soc.num_states = clk_table->num_entries; + /* duplicate last level */ + dcn3_01_soc.clock_limits[dcn3_01_soc.num_states] = dcn3_01_soc.clock_limits[dcn3_01_soc.num_states - 1]; + dcn3_01_soc.clock_limits[dcn3_01_soc.num_states].state = dcn3_01_soc.num_states; + } + } + + dcn3_01_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0; + dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0; + + dml_init_instance(&dc->dml, &dcn3_01_soc, &dcn3_01_ip, DML_PROJECT_DCN30); +} + +void dcn301_fpu_set_wm_ranges(int i, + struct pp_smu_wm_range_sets *ranges, + struct _vcs_dpi_soc_bounding_box_st *loaded_bb) +{ + dc_assert_fp_enabled(); + + ranges->reader_wm_sets[i].min_fill_clk_mhz = (i > 0) ? (loaded_bb->clock_limits[i - 1].dram_speed_mts / 16) + 1 : 0; + ranges->reader_wm_sets[i].max_fill_clk_mhz = loaded_bb->clock_limits[i].dram_speed_mts / 16; +} + +void dcn301_fpu_init_soc_bounding_box(struct bp_soc_bb_info bb_info) +{ + dc_assert_fp_enabled(); + + if (bb_info.dram_clock_change_latency_100ns > 0) + dcn3_01_soc.dram_clock_change_latency_us = bb_info.dram_clock_change_latency_100ns * 10; + + if (bb_info.dram_sr_enter_exit_latency_100ns > 0) + dcn3_01_soc.sr_enter_plus_exit_time_us = bb_info.dram_sr_enter_exit_latency_100ns * 10; + + if (bb_info.dram_sr_exit_latency_100ns > 0) + dcn3_01_soc.sr_exit_time_us = bb_info.dram_sr_exit_latency_100ns * 10; +} + +void dcn301_calculate_wm_and_dlg(struct dc *dc, + struct dc_state *context, + display_e2e_pipe_params_st *pipes, + int pipe_cnt, + int vlevel_req) +{ + int i, pipe_idx; + int vlevel, vlevel_max; + struct wm_range_table_entry *table_entry; + struct clk_bw_params *bw_params = dc->clk_mgr->bw_params; + + ASSERT(bw_params); + dc_assert_fp_enabled(); + + vlevel_max = bw_params->clk_table.num_entries - 1; + + /* WM Set D */ + table_entry = &bw_params->wm_table.entries[WM_D]; + if (table_entry->wm_type == WM_TYPE_RETRAINING) + vlevel = 0; + else + vlevel = vlevel_max; + calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.d, + &context->bw_ctx.dml, pipes, pipe_cnt); + /* WM Set C */ + table_entry = &bw_params->wm_table.entries[WM_C]; + vlevel = min(max(vlevel_req, 2), vlevel_max); + calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.c, + &context->bw_ctx.dml, pipes, pipe_cnt); + /* WM Set B */ + table_entry = &bw_params->wm_table.entries[WM_B]; + vlevel = min(max(vlevel_req, 1), vlevel_max); + calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.b, + &context->bw_ctx.dml, pipes, pipe_cnt); + + /* WM Set A */ + table_entry = &bw_params->wm_table.entries[WM_A]; + vlevel = min(vlevel_req, vlevel_max); + calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.a, + &context->bw_ctx.dml, pipes, pipe_cnt); + + for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { + if (!context->res_ctx.pipe_ctx[i].stream) + continue; + + pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt); + pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); + + if (dc->config.forced_clocks) { + pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz; + pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz; + } + if (dc->debug.min_disp_clk_khz > pipes[pipe_idx].clks_cfg.dispclk_mhz * 1000) + pipes[pipe_idx].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0; + if (dc->debug.min_dpp_clk_khz > pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000) + pipes[pipe_idx].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0; + pipe_idx++; + } + + dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel); +} diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.h new file mode 100644 index 000000000000..fc7065d17842 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.h @@ -0,0 +1,42 @@ +/* + * Copyright 2019-2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DCN301_FPU_H__ +#define __DCN301_FPU_H__ + +void dcn301_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params); + +void dcn301_fpu_set_wm_ranges(int i, + struct pp_smu_wm_range_sets *ranges, + struct _vcs_dpi_soc_bounding_box_st *loaded_bb); + +void dcn301_fpu_init_soc_bounding_box(struct bp_soc_bb_info bb_info); + +void dcn301_calculate_wm_and_dlg(struct dc *dc, + struct dc_state *context, + display_e2e_pipe_params_st *pipes, + int pipe_cnt, + int vlevel_req); +#endif /* __DCN301_FPU_H__*/ diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c index ce55c9caf9a2..7e937bdcea00 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c @@ -3892,15 +3892,11 @@ static double TruncToValidBPP( MinDSCBPP = 8; MaxDSCBPP = 3 * DSCInputBitPerComponent - 1.0 / 16; } else { - if (Output == dm_hdmi) { - NonDSCBPP0 = 24; - NonDSCBPP1 = 24; - NonDSCBPP2 = 24; - } else { - NonDSCBPP0 = 16; - NonDSCBPP1 = 20; - NonDSCBPP2 = 24; - } + + NonDSCBPP0 = 16; + NonDSCBPP1 = 20; + NonDSCBPP2 = 24; + if (Format == dm_n422) { MinDSCBPP = 7; MaxDSCBPP = 2 * DSCInputBitPerComponent - 1.0 / 16.0; @@ -5398,9 +5394,9 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l v->MaximumReadBandwidthWithPrefetch = v->MaximumReadBandwidthWithPrefetch - + dml_max4( - v->VActivePixelBandwidth[i][j][k], - v->VActiveCursorBandwidth[i][j][k] + + dml_max3( + v->VActivePixelBandwidth[i][j][k] + + v->VActiveCursorBandwidth[i][j][k] + v->NoOfDPP[i][j][k] * (v->meta_row_bandwidth[i][j][k] + v->dpte_row_bandwidth[i][j][k]), diff --git a/drivers/gpu/drm/amd/display/dc/dsc/qp_tables.h b/drivers/gpu/drm/amd/display/dc/dml/dsc/qp_tables.h index e5fac9f4181d..e5fac9f4181d 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/qp_tables.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dsc/qp_tables.h diff --git a/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.c new file mode 100644 index 000000000000..3ee858f311d1 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.c @@ -0,0 +1,291 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "rc_calc_fpu.h" + +#include "qp_tables.h" +#include "amdgpu_dm/dc_fpu.h" + +#define table_hash(mode, bpc, max_min) ((mode << 16) | (bpc << 8) | max_min) + +#define MODE_SELECT(val444, val422, val420) \ + (cm == CM_444 || cm == CM_RGB) ? (val444) : (cm == CM_422 ? (val422) : (val420)) + + +#define TABLE_CASE(mode, bpc, max) case (table_hash(mode, BPC_##bpc, max)): \ + table = qp_table_##mode##_##bpc##bpc_##max; \ + table_size = sizeof(qp_table_##mode##_##bpc##bpc_##max)/sizeof(*qp_table_##mode##_##bpc##bpc_##max); \ + break + +static int median3(int a, int b, int c) +{ + if (a > b) + swap(a, b); + if (b > c) + swap(b, c); + if (a > b) + swap(b, c); + + return b; +} + +static double dsc_roundf(double num) +{ + if (num < 0.0) + num = num - 0.5; + else + num = num + 0.5; + + return (int)(num); +} + +static double dsc_ceil(double num) +{ + double retval = (int)num; + + if (retval != num && num > 0) + retval = num + 1; + + return (int)retval; +} + +static void get_qp_set(qp_set qps, enum colour_mode cm, enum bits_per_comp bpc, + enum max_min max_min, float bpp) +{ + int mode = MODE_SELECT(444, 422, 420); + int sel = table_hash(mode, bpc, max_min); + int table_size = 0; + int index; + const struct qp_entry *table = 0L; + + // alias enum + enum { min = DAL_MM_MIN, max = DAL_MM_MAX }; + switch (sel) { + TABLE_CASE(444, 8, max); + TABLE_CASE(444, 8, min); + TABLE_CASE(444, 10, max); + TABLE_CASE(444, 10, min); + TABLE_CASE(444, 12, max); + TABLE_CASE(444, 12, min); + TABLE_CASE(422, 8, max); + TABLE_CASE(422, 8, min); + TABLE_CASE(422, 10, max); + TABLE_CASE(422, 10, min); + TABLE_CASE(422, 12, max); + TABLE_CASE(422, 12, min); + TABLE_CASE(420, 8, max); + TABLE_CASE(420, 8, min); + TABLE_CASE(420, 10, max); + TABLE_CASE(420, 10, min); + TABLE_CASE(420, 12, max); + TABLE_CASE(420, 12, min); + } + + if (table == 0) + return; + + index = (bpp - table[0].bpp) * 2; + + /* requested size is bigger than the table */ + if (index >= table_size) { + dm_error("ERROR: Requested rc_calc to find a bpp entry that exceeds the table size\n"); + return; + } + + memcpy(qps, table[index].qps, sizeof(qp_set)); +} + +static void get_ofs_set(qp_set ofs, enum colour_mode mode, float bpp) +{ + int *p = ofs; + + if (mode == CM_444 || mode == CM_RGB) { + *p++ = (bpp <= 6) ? (0) : ((((bpp >= 8) && (bpp <= 12))) ? (2) : ((bpp >= 15) ? (10) : ((((bpp > 6) && (bpp < 8))) ? (0 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (2 + dsc_roundf((bpp - 12) * (8 / 3.0)))))); + *p++ = (bpp <= 6) ? (-2) : ((((bpp >= 8) && (bpp <= 12))) ? (0) : ((bpp >= 15) ? (8) : ((((bpp > 6) && (bpp < 8))) ? (-2 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (0 + dsc_roundf((bpp - 12) * (8 / 3.0)))))); + *p++ = (bpp <= 6) ? (-2) : ((((bpp >= 8) && (bpp <= 12))) ? (0) : ((bpp >= 15) ? (6) : ((((bpp > 6) && (bpp < 8))) ? (-2 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (0 + dsc_roundf((bpp - 12) * (6 / 3.0)))))); + *p++ = (bpp <= 6) ? (-4) : ((((bpp >= 8) && (bpp <= 12))) ? (-2) : ((bpp >= 15) ? (4) : ((((bpp > 6) && (bpp < 8))) ? (-4 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (-2 + dsc_roundf((bpp - 12) * (6 / 3.0)))))); + *p++ = (bpp <= 6) ? (-6) : ((((bpp >= 8) && (bpp <= 12))) ? (-4) : ((bpp >= 15) ? (2) : ((((bpp > 6) && (bpp < 8))) ? (-6 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (-4 + dsc_roundf((bpp - 12) * (6 / 3.0)))))); + *p++ = (bpp <= 12) ? (-6) : ((bpp >= 15) ? (0) : (-6 + dsc_roundf((bpp - 12) * (6 / 3.0)))); + *p++ = (bpp <= 12) ? (-8) : ((bpp >= 15) ? (-2) : (-8 + dsc_roundf((bpp - 12) * (6 / 3.0)))); + *p++ = (bpp <= 12) ? (-8) : ((bpp >= 15) ? (-4) : (-8 + dsc_roundf((bpp - 12) * (4 / 3.0)))); + *p++ = (bpp <= 12) ? (-8) : ((bpp >= 15) ? (-6) : (-8 + dsc_roundf((bpp - 12) * (2 / 3.0)))); + *p++ = (bpp <= 12) ? (-10) : ((bpp >= 15) ? (-8) : (-10 + dsc_roundf((bpp - 12) * (2 / 3.0)))); + *p++ = -10; + *p++ = (bpp <= 6) ? (-12) : ((bpp >= 8) ? (-10) : (-12 + dsc_roundf((bpp - 6) * (2 / 2.0)))); + *p++ = -12; + *p++ = -12; + *p++ = -12; + } else if (mode == CM_422) { + *p++ = (bpp <= 8) ? (2) : ((bpp >= 10) ? (10) : (2 + dsc_roundf((bpp - 8) * (8 / 2.0)))); + *p++ = (bpp <= 8) ? (0) : ((bpp >= 10) ? (8) : (0 + dsc_roundf((bpp - 8) * (8 / 2.0)))); + *p++ = (bpp <= 8) ? (0) : ((bpp >= 10) ? (6) : (0 + dsc_roundf((bpp - 8) * (6 / 2.0)))); + *p++ = (bpp <= 8) ? (-2) : ((bpp >= 10) ? (4) : (-2 + dsc_roundf((bpp - 8) * (6 / 2.0)))); + *p++ = (bpp <= 8) ? (-4) : ((bpp >= 10) ? (2) : (-4 + dsc_roundf((bpp - 8) * (6 / 2.0)))); + *p++ = (bpp <= 8) ? (-6) : ((bpp >= 10) ? (0) : (-6 + dsc_roundf((bpp - 8) * (6 / 2.0)))); + *p++ = (bpp <= 8) ? (-8) : ((bpp >= 10) ? (-2) : (-8 + dsc_roundf((bpp - 8) * (6 / 2.0)))); + *p++ = (bpp <= 8) ? (-8) : ((bpp >= 10) ? (-4) : (-8 + dsc_roundf((bpp - 8) * (4 / 2.0)))); + *p++ = (bpp <= 8) ? (-8) : ((bpp >= 10) ? (-6) : (-8 + dsc_roundf((bpp - 8) * (2 / 2.0)))); + *p++ = (bpp <= 8) ? (-10) : ((bpp >= 10) ? (-8) : (-10 + dsc_roundf((bpp - 8) * (2 / 2.0)))); + *p++ = -10; + *p++ = (bpp <= 6) ? (-12) : ((bpp >= 7) ? (-10) : (-12 + dsc_roundf((bpp - 6) * (2.0 / 1)))); + *p++ = -12; + *p++ = -12; + *p++ = -12; + } else { + *p++ = (bpp <= 6) ? (2) : ((bpp >= 8) ? (10) : (2 + dsc_roundf((bpp - 6) * (8 / 2.0)))); + *p++ = (bpp <= 6) ? (0) : ((bpp >= 8) ? (8) : (0 + dsc_roundf((bpp - 6) * (8 / 2.0)))); + *p++ = (bpp <= 6) ? (0) : ((bpp >= 8) ? (6) : (0 + dsc_roundf((bpp - 6) * (6 / 2.0)))); + *p++ = (bpp <= 6) ? (-2) : ((bpp >= 8) ? (4) : (-2 + dsc_roundf((bpp - 6) * (6 / 2.0)))); + *p++ = (bpp <= 6) ? (-4) : ((bpp >= 8) ? (2) : (-4 + dsc_roundf((bpp - 6) * (6 / 2.0)))); + *p++ = (bpp <= 6) ? (-6) : ((bpp >= 8) ? (0) : (-6 + dsc_roundf((bpp - 6) * (6 / 2.0)))); + *p++ = (bpp <= 6) ? (-8) : ((bpp >= 8) ? (-2) : (-8 + dsc_roundf((bpp - 6) * (6 / 2.0)))); + *p++ = (bpp <= 6) ? (-8) : ((bpp >= 8) ? (-4) : (-8 + dsc_roundf((bpp - 6) * (4 / 2.0)))); + *p++ = (bpp <= 6) ? (-8) : ((bpp >= 8) ? (-6) : (-8 + dsc_roundf((bpp - 6) * (2 / 2.0)))); + *p++ = (bpp <= 6) ? (-10) : ((bpp >= 8) ? (-8) : (-10 + dsc_roundf((bpp - 6) * (2 / 2.0)))); + *p++ = -10; + *p++ = (bpp <= 4) ? (-12) : ((bpp >= 5) ? (-10) : (-12 + dsc_roundf((bpp - 4) * (2 / 1.0)))); + *p++ = -12; + *p++ = -12; + *p++ = -12; + } +} + +void _do_calc_rc_params(struct rc_params *rc, + enum colour_mode cm, + enum bits_per_comp bpc, + u16 drm_bpp, + bool is_navite_422_or_420, + int slice_width, + int slice_height, + int minor_version) +{ + float bpp; + float bpp_group; + float initial_xmit_delay_factor; + int padding_pixels; + int i; + + dc_assert_fp_enabled(); + + bpp = ((float)drm_bpp / 16.0); + /* in native_422 or native_420 modes, the bits_per_pixel is double the + * target bpp (the latter is what calc_rc_params expects) + */ + if (is_navite_422_or_420) + bpp /= 2.0; + + rc->rc_quant_incr_limit0 = ((bpc == BPC_8) ? 11 : (bpc == BPC_10 ? 15 : 19)) - ((minor_version == 1 && cm == CM_444) ? 1 : 0); + rc->rc_quant_incr_limit1 = ((bpc == BPC_8) ? 11 : (bpc == BPC_10 ? 15 : 19)) - ((minor_version == 1 && cm == CM_444) ? 1 : 0); + + bpp_group = MODE_SELECT(bpp, bpp * 2.0, bpp * 2.0); + + switch (cm) { + case CM_420: + rc->initial_fullness_offset = (bpp >= 6) ? (2048) : ((bpp <= 4) ? (6144) : ((((bpp > 4) && (bpp <= 5))) ? (6144 - dsc_roundf((bpp - 4) * (512))) : (5632 - dsc_roundf((bpp - 5) * (3584))))); + rc->first_line_bpg_offset = median3(0, (12 + (int) (0.09 * min(34, slice_height - 8))), (int)((3 * bpc * 3) - (3 * bpp_group))); + rc->second_line_bpg_offset = median3(0, 12, (int)((3 * bpc * 3) - (3 * bpp_group))); + break; + case CM_422: + rc->initial_fullness_offset = (bpp >= 8) ? (2048) : ((bpp <= 7) ? (5632) : (5632 - dsc_roundf((bpp - 7) * (3584)))); + rc->first_line_bpg_offset = median3(0, (12 + (int) (0.09 * min(34, slice_height - 8))), (int)((3 * bpc * 4) - (3 * bpp_group))); + rc->second_line_bpg_offset = 0; + break; + case CM_444: + case CM_RGB: + rc->initial_fullness_offset = (bpp >= 12) ? (2048) : ((bpp <= 8) ? (6144) : ((((bpp > 8) && (bpp <= 10))) ? (6144 - dsc_roundf((bpp - 8) * (512 / 2))) : (5632 - dsc_roundf((bpp - 10) * (3584 / 2))))); + rc->first_line_bpg_offset = median3(0, (12 + (int) (0.09 * min(34, slice_height - 8))), (int)(((3 * bpc + (cm == CM_444 ? 0 : 2)) * 3) - (3 * bpp_group))); + rc->second_line_bpg_offset = 0; + break; + } + + initial_xmit_delay_factor = (cm == CM_444 || cm == CM_RGB) ? 1.0 : 2.0; + rc->initial_xmit_delay = dsc_roundf(8192.0/2.0/bpp/initial_xmit_delay_factor); + + if (cm == CM_422 || cm == CM_420) + slice_width /= 2; + + padding_pixels = ((slice_width % 3) != 0) ? (3 - (slice_width % 3)) * (rc->initial_xmit_delay / slice_width) : 0; + if (3 * bpp_group >= (((rc->initial_xmit_delay + 2) / 3) * (3 + (cm == CM_422)))) { + if ((rc->initial_xmit_delay + padding_pixels) % 3 == 1) + rc->initial_xmit_delay++; + } + + rc->flatness_min_qp = ((bpc == BPC_8) ? (3) : ((bpc == BPC_10) ? (7) : (11))) - ((minor_version == 1 && cm == CM_444) ? 1 : 0); + rc->flatness_max_qp = ((bpc == BPC_8) ? (12) : ((bpc == BPC_10) ? (16) : (20))) - ((minor_version == 1 && cm == CM_444) ? 1 : 0); + rc->flatness_det_thresh = 2 << (bpc - 8); + + get_qp_set(rc->qp_min, cm, bpc, DAL_MM_MIN, bpp); + get_qp_set(rc->qp_max, cm, bpc, DAL_MM_MAX, bpp); + if (cm == CM_444 && minor_version == 1) { + for (i = 0; i < QP_SET_SIZE; ++i) { + rc->qp_min[i] = rc->qp_min[i] > 0 ? rc->qp_min[i] - 1 : 0; + rc->qp_max[i] = rc->qp_max[i] > 0 ? rc->qp_max[i] - 1 : 0; + } + } + get_ofs_set(rc->ofs, cm, bpp); + + /* fixed parameters */ + rc->rc_model_size = 8192; + rc->rc_edge_factor = 6; + rc->rc_tgt_offset_hi = 3; + rc->rc_tgt_offset_lo = 3; + + rc->rc_buf_thresh[0] = 896; + rc->rc_buf_thresh[1] = 1792; + rc->rc_buf_thresh[2] = 2688; + rc->rc_buf_thresh[3] = 3584; + rc->rc_buf_thresh[4] = 4480; + rc->rc_buf_thresh[5] = 5376; + rc->rc_buf_thresh[6] = 6272; + rc->rc_buf_thresh[7] = 6720; + rc->rc_buf_thresh[8] = 7168; + rc->rc_buf_thresh[9] = 7616; + rc->rc_buf_thresh[10] = 7744; + rc->rc_buf_thresh[11] = 7872; + rc->rc_buf_thresh[12] = 8000; + rc->rc_buf_thresh[13] = 8064; +} + +u32 _do_bytes_per_pixel_calc(int slice_width, + u16 drm_bpp, + bool is_navite_422_or_420) +{ + float bpp; + u32 bytes_per_pixel; + double d_bytes_per_pixel; + + dc_assert_fp_enabled(); + + bpp = ((float)drm_bpp / 16.0); + d_bytes_per_pixel = dsc_ceil(bpp * slice_width / 8.0) / slice_width; + // TODO: Make sure the formula for calculating this is precise (ceiling + // vs. floor, and at what point they should be applied) + if (is_navite_422_or_420) + d_bytes_per_pixel /= 2; + + bytes_per_pixel = (u32)dsc_ceil(d_bytes_per_pixel * 0x10000000); + + return bytes_per_pixel; +} diff --git a/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.h new file mode 100644 index 000000000000..b93b95409fbe --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.h @@ -0,0 +1,94 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __RC_CALC_FPU_H__ +#define __RC_CALC_FPU_H__ + +#include "os_types.h" +#include <drm/drm_dsc.h> + +#define QP_SET_SIZE 15 + +typedef int qp_set[QP_SET_SIZE]; + +struct rc_params { + int rc_quant_incr_limit0; + int rc_quant_incr_limit1; + int initial_fullness_offset; + int initial_xmit_delay; + int first_line_bpg_offset; + int second_line_bpg_offset; + int flatness_min_qp; + int flatness_max_qp; + int flatness_det_thresh; + qp_set qp_min; + qp_set qp_max; + qp_set ofs; + int rc_model_size; + int rc_edge_factor; + int rc_tgt_offset_hi; + int rc_tgt_offset_lo; + int rc_buf_thresh[QP_SET_SIZE - 1]; +}; + +enum colour_mode { + CM_RGB, /* 444 RGB */ + CM_444, /* 444 YUV or simple 422 */ + CM_422, /* native 422 */ + CM_420 /* native 420 */ +}; + +enum bits_per_comp { + BPC_8 = 8, + BPC_10 = 10, + BPC_12 = 12 +}; + +enum max_min { + DAL_MM_MIN = 0, + DAL_MM_MAX = 1 +}; + +struct qp_entry { + float bpp; + const qp_set qps; +}; + +typedef struct qp_entry qp_table[]; + +u32 _do_bytes_per_pixel_calc(int slice_width, + u16 drm_bpp, + bool is_navite_422_or_420); + +void _do_calc_rc_params(struct rc_params *rc, + enum colour_mode cm, + enum bits_per_comp bpc, + u16 drm_bpp, + bool is_navite_422_or_420, + int slice_width, + int slice_height, + int minor_version); + +#endif diff --git a/drivers/gpu/drm/amd/display/dc/dsc/Makefile b/drivers/gpu/drm/amd/display/dc/dsc/Makefile index 8d31eb75c6a6..a2537229ee88 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dsc/Makefile @@ -1,35 +1,6 @@ # SPDX-License-Identifier: MIT # # Makefile for the 'dsc' sub-component of DAL. - -ifdef CONFIG_X86 -dsc_ccflags := -mhard-float -msse -endif - -ifdef CONFIG_PPC64 -dsc_ccflags := -mhard-float -maltivec -endif - -ifdef CONFIG_CC_IS_GCC -ifeq ($(call cc-ifversion, -lt, 0701, y), y) -IS_OLD_GCC = 1 -endif -endif - -ifdef CONFIG_X86 -ifdef IS_OLD_GCC -# Stack alignment mismatch, proceed with caution. -# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3 -# (8B stack alignment). -dsc_ccflags += -mpreferred-stack-boundary=4 -else -dsc_ccflags += -msse2 -endif -endif - -CFLAGS_$(AMDDALPATH)/dc/dsc/rc_calc.o := $(dsc_ccflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dsc/rc_calc.o := $(dsc_rcflags) - DSC = dc_dsc.o rc_calc.o rc_calc_dpi.o AMD_DAL_DSC = $(addprefix $(AMDDALPATH)/dc/dsc/,$(DSC)) diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c index 7b294f637881..b19d3aeb5962 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c @@ -23,266 +23,7 @@ * Authors: AMD * */ -#include <drm/drm_dsc.h> - -#include "os_types.h" #include "rc_calc.h" -#include "qp_tables.h" - -#define table_hash(mode, bpc, max_min) ((mode << 16) | (bpc << 8) | max_min) - -#define MODE_SELECT(val444, val422, val420) \ - (cm == CM_444 || cm == CM_RGB) ? (val444) : (cm == CM_422 ? (val422) : (val420)) - - -#define TABLE_CASE(mode, bpc, max) case (table_hash(mode, BPC_##bpc, max)): \ - table = qp_table_##mode##_##bpc##bpc_##max; \ - table_size = sizeof(qp_table_##mode##_##bpc##bpc_##max)/sizeof(*qp_table_##mode##_##bpc##bpc_##max); \ - break - - -static void get_qp_set(qp_set qps, enum colour_mode cm, enum bits_per_comp bpc, - enum max_min max_min, float bpp) -{ - int mode = MODE_SELECT(444, 422, 420); - int sel = table_hash(mode, bpc, max_min); - int table_size = 0; - int index; - const struct qp_entry *table = 0L; - - // alias enum - enum { min = DAL_MM_MIN, max = DAL_MM_MAX }; - switch (sel) { - TABLE_CASE(444, 8, max); - TABLE_CASE(444, 8, min); - TABLE_CASE(444, 10, max); - TABLE_CASE(444, 10, min); - TABLE_CASE(444, 12, max); - TABLE_CASE(444, 12, min); - TABLE_CASE(422, 8, max); - TABLE_CASE(422, 8, min); - TABLE_CASE(422, 10, max); - TABLE_CASE(422, 10, min); - TABLE_CASE(422, 12, max); - TABLE_CASE(422, 12, min); - TABLE_CASE(420, 8, max); - TABLE_CASE(420, 8, min); - TABLE_CASE(420, 10, max); - TABLE_CASE(420, 10, min); - TABLE_CASE(420, 12, max); - TABLE_CASE(420, 12, min); - } - - if (table == 0) - return; - - index = (bpp - table[0].bpp) * 2; - - /* requested size is bigger than the table */ - if (index >= table_size) { - dm_error("ERROR: Requested rc_calc to find a bpp entry that exceeds the table size\n"); - return; - } - - memcpy(qps, table[index].qps, sizeof(qp_set)); -} - -static double dsc_roundf(double num) -{ - if (num < 0.0) - num = num - 0.5; - else - num = num + 0.5; - - return (int)(num); -} - -static double dsc_ceil(double num) -{ - double retval = (int)num; - - if (retval != num && num > 0) - retval = num + 1; - - return (int)retval; -} - -static void get_ofs_set(qp_set ofs, enum colour_mode mode, float bpp) -{ - int *p = ofs; - - if (mode == CM_444 || mode == CM_RGB) { - *p++ = (bpp <= 6) ? (0) : ((((bpp >= 8) && (bpp <= 12))) ? (2) : ((bpp >= 15) ? (10) : ((((bpp > 6) && (bpp < 8))) ? (0 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (2 + dsc_roundf((bpp - 12) * (8 / 3.0)))))); - *p++ = (bpp <= 6) ? (-2) : ((((bpp >= 8) && (bpp <= 12))) ? (0) : ((bpp >= 15) ? (8) : ((((bpp > 6) && (bpp < 8))) ? (-2 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (0 + dsc_roundf((bpp - 12) * (8 / 3.0)))))); - *p++ = (bpp <= 6) ? (-2) : ((((bpp >= 8) && (bpp <= 12))) ? (0) : ((bpp >= 15) ? (6) : ((((bpp > 6) && (bpp < 8))) ? (-2 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (0 + dsc_roundf((bpp - 12) * (6 / 3.0)))))); - *p++ = (bpp <= 6) ? (-4) : ((((bpp >= 8) && (bpp <= 12))) ? (-2) : ((bpp >= 15) ? (4) : ((((bpp > 6) && (bpp < 8))) ? (-4 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (-2 + dsc_roundf((bpp - 12) * (6 / 3.0)))))); - *p++ = (bpp <= 6) ? (-6) : ((((bpp >= 8) && (bpp <= 12))) ? (-4) : ((bpp >= 15) ? (2) : ((((bpp > 6) && (bpp < 8))) ? (-6 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (-4 + dsc_roundf((bpp - 12) * (6 / 3.0)))))); - *p++ = (bpp <= 12) ? (-6) : ((bpp >= 15) ? (0) : (-6 + dsc_roundf((bpp - 12) * (6 / 3.0)))); - *p++ = (bpp <= 12) ? (-8) : ((bpp >= 15) ? (-2) : (-8 + dsc_roundf((bpp - 12) * (6 / 3.0)))); - *p++ = (bpp <= 12) ? (-8) : ((bpp >= 15) ? (-4) : (-8 + dsc_roundf((bpp - 12) * (4 / 3.0)))); - *p++ = (bpp <= 12) ? (-8) : ((bpp >= 15) ? (-6) : (-8 + dsc_roundf((bpp - 12) * (2 / 3.0)))); - *p++ = (bpp <= 12) ? (-10) : ((bpp >= 15) ? (-8) : (-10 + dsc_roundf((bpp - 12) * (2 / 3.0)))); - *p++ = -10; - *p++ = (bpp <= 6) ? (-12) : ((bpp >= 8) ? (-10) : (-12 + dsc_roundf((bpp - 6) * (2 / 2.0)))); - *p++ = -12; - *p++ = -12; - *p++ = -12; - } else if (mode == CM_422) { - *p++ = (bpp <= 8) ? (2) : ((bpp >= 10) ? (10) : (2 + dsc_roundf((bpp - 8) * (8 / 2.0)))); - *p++ = (bpp <= 8) ? (0) : ((bpp >= 10) ? (8) : (0 + dsc_roundf((bpp - 8) * (8 / 2.0)))); - *p++ = (bpp <= 8) ? (0) : ((bpp >= 10) ? (6) : (0 + dsc_roundf((bpp - 8) * (6 / 2.0)))); - *p++ = (bpp <= 8) ? (-2) : ((bpp >= 10) ? (4) : (-2 + dsc_roundf((bpp - 8) * (6 / 2.0)))); - *p++ = (bpp <= 8) ? (-4) : ((bpp >= 10) ? (2) : (-4 + dsc_roundf((bpp - 8) * (6 / 2.0)))); - *p++ = (bpp <= 8) ? (-6) : ((bpp >= 10) ? (0) : (-6 + dsc_roundf((bpp - 8) * (6 / 2.0)))); - *p++ = (bpp <= 8) ? (-8) : ((bpp >= 10) ? (-2) : (-8 + dsc_roundf((bpp - 8) * (6 / 2.0)))); - *p++ = (bpp <= 8) ? (-8) : ((bpp >= 10) ? (-4) : (-8 + dsc_roundf((bpp - 8) * (4 / 2.0)))); - *p++ = (bpp <= 8) ? (-8) : ((bpp >= 10) ? (-6) : (-8 + dsc_roundf((bpp - 8) * (2 / 2.0)))); - *p++ = (bpp <= 8) ? (-10) : ((bpp >= 10) ? (-8) : (-10 + dsc_roundf((bpp - 8) * (2 / 2.0)))); - *p++ = -10; - *p++ = (bpp <= 6) ? (-12) : ((bpp >= 7) ? (-10) : (-12 + dsc_roundf((bpp - 6) * (2.0 / 1)))); - *p++ = -12; - *p++ = -12; - *p++ = -12; - } else { - *p++ = (bpp <= 6) ? (2) : ((bpp >= 8) ? (10) : (2 + dsc_roundf((bpp - 6) * (8 / 2.0)))); - *p++ = (bpp <= 6) ? (0) : ((bpp >= 8) ? (8) : (0 + dsc_roundf((bpp - 6) * (8 / 2.0)))); - *p++ = (bpp <= 6) ? (0) : ((bpp >= 8) ? (6) : (0 + dsc_roundf((bpp - 6) * (6 / 2.0)))); - *p++ = (bpp <= 6) ? (-2) : ((bpp >= 8) ? (4) : (-2 + dsc_roundf((bpp - 6) * (6 / 2.0)))); - *p++ = (bpp <= 6) ? (-4) : ((bpp >= 8) ? (2) : (-4 + dsc_roundf((bpp - 6) * (6 / 2.0)))); - *p++ = (bpp <= 6) ? (-6) : ((bpp >= 8) ? (0) : (-6 + dsc_roundf((bpp - 6) * (6 / 2.0)))); - *p++ = (bpp <= 6) ? (-8) : ((bpp >= 8) ? (-2) : (-8 + dsc_roundf((bpp - 6) * (6 / 2.0)))); - *p++ = (bpp <= 6) ? (-8) : ((bpp >= 8) ? (-4) : (-8 + dsc_roundf((bpp - 6) * (4 / 2.0)))); - *p++ = (bpp <= 6) ? (-8) : ((bpp >= 8) ? (-6) : (-8 + dsc_roundf((bpp - 6) * (2 / 2.0)))); - *p++ = (bpp <= 6) ? (-10) : ((bpp >= 8) ? (-8) : (-10 + dsc_roundf((bpp - 6) * (2 / 2.0)))); - *p++ = -10; - *p++ = (bpp <= 4) ? (-12) : ((bpp >= 5) ? (-10) : (-12 + dsc_roundf((bpp - 4) * (2 / 1.0)))); - *p++ = -12; - *p++ = -12; - *p++ = -12; - } -} - -static int median3(int a, int b, int c) -{ - if (a > b) - swap(a, b); - if (b > c) - swap(b, c); - if (a > b) - swap(b, c); - - return b; -} - -static void _do_calc_rc_params(struct rc_params *rc, enum colour_mode cm, - enum bits_per_comp bpc, u16 drm_bpp, - bool is_navite_422_or_420, - int slice_width, int slice_height, - int minor_version) -{ - float bpp; - float bpp_group; - float initial_xmit_delay_factor; - int padding_pixels; - int i; - - bpp = ((float)drm_bpp / 16.0); - /* in native_422 or native_420 modes, the bits_per_pixel is double the - * target bpp (the latter is what calc_rc_params expects) - */ - if (is_navite_422_or_420) - bpp /= 2.0; - - rc->rc_quant_incr_limit0 = ((bpc == BPC_8) ? 11 : (bpc == BPC_10 ? 15 : 19)) - ((minor_version == 1 && cm == CM_444) ? 1 : 0); - rc->rc_quant_incr_limit1 = ((bpc == BPC_8) ? 11 : (bpc == BPC_10 ? 15 : 19)) - ((minor_version == 1 && cm == CM_444) ? 1 : 0); - - bpp_group = MODE_SELECT(bpp, bpp * 2.0, bpp * 2.0); - - switch (cm) { - case CM_420: - rc->initial_fullness_offset = (bpp >= 6) ? (2048) : ((bpp <= 4) ? (6144) : ((((bpp > 4) && (bpp <= 5))) ? (6144 - dsc_roundf((bpp - 4) * (512))) : (5632 - dsc_roundf((bpp - 5) * (3584))))); - rc->first_line_bpg_offset = median3(0, (12 + (int) (0.09 * min(34, slice_height - 8))), (int)((3 * bpc * 3) - (3 * bpp_group))); - rc->second_line_bpg_offset = median3(0, 12, (int)((3 * bpc * 3) - (3 * bpp_group))); - break; - case CM_422: - rc->initial_fullness_offset = (bpp >= 8) ? (2048) : ((bpp <= 7) ? (5632) : (5632 - dsc_roundf((bpp - 7) * (3584)))); - rc->first_line_bpg_offset = median3(0, (12 + (int) (0.09 * min(34, slice_height - 8))), (int)((3 * bpc * 4) - (3 * bpp_group))); - rc->second_line_bpg_offset = 0; - break; - case CM_444: - case CM_RGB: - rc->initial_fullness_offset = (bpp >= 12) ? (2048) : ((bpp <= 8) ? (6144) : ((((bpp > 8) && (bpp <= 10))) ? (6144 - dsc_roundf((bpp - 8) * (512 / 2))) : (5632 - dsc_roundf((bpp - 10) * (3584 / 2))))); - rc->first_line_bpg_offset = median3(0, (12 + (int) (0.09 * min(34, slice_height - 8))), (int)(((3 * bpc + (cm == CM_444 ? 0 : 2)) * 3) - (3 * bpp_group))); - rc->second_line_bpg_offset = 0; - break; - } - - initial_xmit_delay_factor = (cm == CM_444 || cm == CM_RGB) ? 1.0 : 2.0; - rc->initial_xmit_delay = dsc_roundf(8192.0/2.0/bpp/initial_xmit_delay_factor); - - if (cm == CM_422 || cm == CM_420) - slice_width /= 2; - - padding_pixels = ((slice_width % 3) != 0) ? (3 - (slice_width % 3)) * (rc->initial_xmit_delay / slice_width) : 0; - if (3 * bpp_group >= (((rc->initial_xmit_delay + 2) / 3) * (3 + (cm == CM_422)))) { - if ((rc->initial_xmit_delay + padding_pixels) % 3 == 1) - rc->initial_xmit_delay++; - } - - rc->flatness_min_qp = ((bpc == BPC_8) ? (3) : ((bpc == BPC_10) ? (7) : (11))) - ((minor_version == 1 && cm == CM_444) ? 1 : 0); - rc->flatness_max_qp = ((bpc == BPC_8) ? (12) : ((bpc == BPC_10) ? (16) : (20))) - ((minor_version == 1 && cm == CM_444) ? 1 : 0); - rc->flatness_det_thresh = 2 << (bpc - 8); - - get_qp_set(rc->qp_min, cm, bpc, DAL_MM_MIN, bpp); - get_qp_set(rc->qp_max, cm, bpc, DAL_MM_MAX, bpp); - if (cm == CM_444 && minor_version == 1) { - for (i = 0; i < QP_SET_SIZE; ++i) { - rc->qp_min[i] = rc->qp_min[i] > 0 ? rc->qp_min[i] - 1 : 0; - rc->qp_max[i] = rc->qp_max[i] > 0 ? rc->qp_max[i] - 1 : 0; - } - } - get_ofs_set(rc->ofs, cm, bpp); - - /* fixed parameters */ - rc->rc_model_size = 8192; - rc->rc_edge_factor = 6; - rc->rc_tgt_offset_hi = 3; - rc->rc_tgt_offset_lo = 3; - - rc->rc_buf_thresh[0] = 896; - rc->rc_buf_thresh[1] = 1792; - rc->rc_buf_thresh[2] = 2688; - rc->rc_buf_thresh[3] = 3584; - rc->rc_buf_thresh[4] = 4480; - rc->rc_buf_thresh[5] = 5376; - rc->rc_buf_thresh[6] = 6272; - rc->rc_buf_thresh[7] = 6720; - rc->rc_buf_thresh[8] = 7168; - rc->rc_buf_thresh[9] = 7616; - rc->rc_buf_thresh[10] = 7744; - rc->rc_buf_thresh[11] = 7872; - rc->rc_buf_thresh[12] = 8000; - rc->rc_buf_thresh[13] = 8064; -} - -static u32 _do_bytes_per_pixel_calc(int slice_width, u16 drm_bpp, - bool is_navite_422_or_420) -{ - float bpp; - u32 bytes_per_pixel; - double d_bytes_per_pixel; - - bpp = ((float)drm_bpp / 16.0); - d_bytes_per_pixel = dsc_ceil(bpp * slice_width / 8.0) / slice_width; - // TODO: Make sure the formula for calculating this is precise (ceiling - // vs. floor, and at what point they should be applied) - if (is_navite_422_or_420) - d_bytes_per_pixel /= 2; - - bytes_per_pixel = (u32)dsc_ceil(d_bytes_per_pixel * 0x10000000); - - return bytes_per_pixel; -} /** * calc_rc_params - reads the user's cmdline mode diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h index 262f06afcbf9..c2340e001b57 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h +++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h @@ -27,55 +27,7 @@ #ifndef __RC_CALC_H__ #define __RC_CALC_H__ - -#define QP_SET_SIZE 15 - -typedef int qp_set[QP_SET_SIZE]; - -struct rc_params { - int rc_quant_incr_limit0; - int rc_quant_incr_limit1; - int initial_fullness_offset; - int initial_xmit_delay; - int first_line_bpg_offset; - int second_line_bpg_offset; - int flatness_min_qp; - int flatness_max_qp; - int flatness_det_thresh; - qp_set qp_min; - qp_set qp_max; - qp_set ofs; - int rc_model_size; - int rc_edge_factor; - int rc_tgt_offset_hi; - int rc_tgt_offset_lo; - int rc_buf_thresh[QP_SET_SIZE - 1]; -}; - -enum colour_mode { - CM_RGB, /* 444 RGB */ - CM_444, /* 444 YUV or simple 422 */ - CM_422, /* native 422 */ - CM_420 /* native 420 */ -}; - -enum bits_per_comp { - BPC_8 = 8, - BPC_10 = 10, - BPC_12 = 12 -}; - -enum max_min { - DAL_MM_MIN = 0, - DAL_MM_MAX = 1 -}; - -struct qp_entry { - float bpp; - const qp_set qps; -}; - -typedef struct qp_entry qp_table[]; +#include "dml/dsc/rc_calc_fpu.h" void calc_rc_params(struct rc_params *rc, const struct drm_dsc_config *pps); u32 calc_dsc_bytes_per_pixel(const struct drm_dsc_config *pps); diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c index ef830aded5b1..1e19dd674e5a 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c @@ -22,7 +22,6 @@ * Authors: AMD * */ -#include "os_types.h" #include <drm/drm_dsc.h> #include "dscc_types.h" #include "rc_calc.h" diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h index c70375117519..a6d3d859754a 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h @@ -121,12 +121,12 @@ enum dc_status dpcd_set_lane_settings( const struct link_training_settings *link_training_setting, uint32_t offset); /* Read training status and adjustment requests from DPCD. */ -enum dc_status dp_get_lane_status_and_drive_settings( +enum dc_status dp_get_lane_status_and_lane_adjust( struct dc_link *link, const struct link_training_settings *link_training_setting, - union lane_status *ln_status, - union lane_align_status_updated *ln_status_updated, - struct link_training_settings *req_settings, + union lane_status ln_status[LANE_COUNT_DP_MAX], + union lane_align_status_updated *ln_align, + union lane_adjust ln_adjust[LANE_COUNT_DP_MAX], uint32_t offset); void dp_wait_for_training_aux_rd_interval( @@ -151,9 +151,11 @@ void dp_hw_to_dpcd_lane_settings( const struct link_training_settings *lt_settings, const struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX], union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX]); -void dp_update_drive_settings( - struct link_training_settings *dest, - struct link_training_settings src); +void dp_decide_lane_settings( + const struct link_training_settings *lt_settings, + const union lane_adjust ln_adjust[LANE_COUNT_DP_MAX], + struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX], + union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX]); uint32_t dp_translate_training_aux_read_interval(uint32_t dpcd_aux_read_interval); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h index 09237d5819f4..c940fdfda144 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h @@ -123,6 +123,15 @@ struct dccg_funcs { void (*set_dispclk_change_mode)( struct dccg *dccg, enum dentist_dispclk_change_mode change_mode); + + void (*disable_dsc)( + struct dccg *dccg, + int inst); + + void (*enable_dsc)( + struct dccg *dccg, + int inst); + }; #endif //__DAL_DCCG_H__ diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h index 9f12792b7e59..3ef7faa92052 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h @@ -34,6 +34,8 @@ union defer_reg_writes { bool disable_blnd_lut:1; bool disable_3dlut:1; bool disable_shaper:1; + bool disable_gamcor:1; + bool disable_dscl:1; } bits; uint32_t raw; }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h index 04d6ec3f021f..f5fd2a067323 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h @@ -367,6 +367,7 @@ struct mpc_funcs { void (*set_bg_color)(struct mpc *mpc, struct tg_color *bg_color, int mpcc_id); + void (*set_mpc_mem_lp_mode)(struct mpc *mpc); }; #endif diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h index f324285394be..c2008258c50a 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h @@ -143,6 +143,7 @@ struct hwseq_private_funcs { const struct dc_plane_state *plane_state); void (*PLAT_58856_wa)(struct dc_state *context, struct pipe_ctx *pipe_ctx); + void (*setup_hpo_hw_control)(const struct dce_hwseq *hws, bool enable); }; struct dce_hwseq { diff --git a/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h b/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h index 83b2199b2c83..10dcf6a5e9b1 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h +++ b/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h @@ -97,7 +97,7 @@ struct link_encoder *link_enc_cfg_get_link_enc_used_by_stream( const struct dc_stream_state *stream); /* Return true if encoder available to use. */ -bool link_enc_cfg_is_link_enc_avail(struct dc *dc, enum engine_id eng_id); +bool link_enc_cfg_is_link_enc_avail(struct dc *dc, enum engine_id eng_id, struct dc_link *link); /* Returns true if encoder assignments in supplied state pass validity checks. */ bool link_enc_cfg_validate(struct dc *dc, struct dc_state *state); diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h index 365fbc4758e1..cd204eef073b 100644 --- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h @@ -97,6 +97,7 @@ enum dmub_asic { DMUB_ASIC_DCN302, DMUB_ASIC_DCN303, DMUB_ASIC_DCN31, + DMUB_ASIC_DCN31B, DMUB_ASIC_MAX, }; @@ -237,6 +238,8 @@ struct dmub_srv_hw_params { bool load_inst_const; bool skip_panel_power_sequence; bool disable_z10; + bool power_optimization; + bool dpia_supported; bool disable_dpia; }; diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h index 0b9d6bf4886d..c29a67ccef17 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -46,10 +46,10 @@ /* Firmware versioning. */ #ifdef DMUB_EXPOSE_VERSION -#define DMUB_FW_VERSION_GIT_HASH 0xf0c64c97 +#define DMUB_FW_VERSION_GIT_HASH 0x1d82d23e #define DMUB_FW_VERSION_MAJOR 0 #define DMUB_FW_VERSION_MINOR 0 -#define DMUB_FW_VERSION_REVISION 87 +#define DMUB_FW_VERSION_REVISION 91 #define DMUB_FW_VERSION_TEST 0 #define DMUB_FW_VERSION_VBIOS 0 #define DMUB_FW_VERSION_HOTFIX 0 @@ -370,8 +370,13 @@ union dmub_fw_boot_options { uint32_t z10_disable: 1; /**< 1 to disable z10 */ uint32_t enable_dpia: 1; /**< 1 if DPIA should be enabled */ uint32_t invalid_vbios_data: 1; /**< 1 if VBIOS data table is invalid */ - uint32_t reserved_unreleased2: 1; /**< reserved for an unreleased feature */ - uint32_t reserved : 22; /**< reserved */ + uint32_t dpia_supported: 1; /**< 1 if DPIA is supported on this platform */ + uint32_t sel_mux_phy_c_d_phy_f_g: 1; /**< 1 if PHYF/PHYG should be enabled */ + /**< 1 if all root clock gating is enabled and low power memory is enabled*/ + uint32_t power_optimization: 1; + uint32_t diag_env: 1; /* 1 if diagnostic environment */ + + uint32_t reserved : 19; /**< reserved */ } bits; /**< boot bits */ uint32_t all; /**< 32-bit access to bits */ }; @@ -692,6 +697,7 @@ enum dmub_out_cmd_type { enum dmub_cmd_dpia_type { DMUB_CMD__DPIA_DIG1_DPIA_CONTROL = 0, DMUB_CMD__DPIA_SET_CONFIG_ACCESS = 1, + DMUB_CMD__DPIA_MST_ALLOC_SLOTS = 2, }; #pragma pack(push, 1) @@ -1067,6 +1073,24 @@ struct dmub_rb_cmd_set_config_access { }; /** + * Data passed from driver to FW in a DMUB_CMD__DPIA_MST_ALLOC_SLOTS command. + */ +struct dmub_cmd_mst_alloc_slots_control_data { + uint8_t mst_alloc_slots; /* mst slots to be allotted */ + uint8_t instance; /* DPIA instance */ + uint8_t immed_status; /* Immediate status returned as there is no outbox msg posted */ + uint8_t mst_slots_in_use; /* returns slots in use for error cases */ +}; + +/** + * DMUB command structure for SET_ command. + */ +struct dmub_rb_cmd_set_mst_alloc_slots { + struct dmub_cmd_header header; /* header */ + struct dmub_cmd_mst_alloc_slots_control_data mst_slots_control; /* mst slots control */ +}; + +/** * struct dmub_rb_cmd_dpphy_init - DPPHY init. */ struct dmub_rb_cmd_dpphy_init { @@ -1378,6 +1402,10 @@ enum dmub_cmd_psr_type { * Forces PSR enabled until an explicit PSR disable call. */ DMUB_CMD__PSR_FORCE_STATIC = 5, + /** + * Set PSR power option + */ + DMUB_CMD__SET_PSR_POWER_OPT = 7, }; /** @@ -1676,6 +1704,44 @@ struct dmub_rb_cmd_psr_force_static { }; /** + * Data passed from driver to FW in a DMUB_CMD__SET_PSR_POWER_OPT command. + */ +struct dmub_cmd_psr_set_power_opt_data { + /** + * PSR control version. + */ + uint8_t cmd_version; + /** + * Panel Instance. + * Panel isntance to identify which psr_state to use + * Currently the support is only for 0 or 1 + */ + uint8_t panel_inst; + /** + * Explicit padding to 4 byte boundary. + */ + uint8_t pad[2]; + /** + * PSR power option + */ + uint32_t power_opt; +}; + +/** + * Definition of a DMUB_CMD__SET_PSR_POWER_OPT command. + */ +struct dmub_rb_cmd_psr_set_power_opt { + /** + * Command header. + */ + struct dmub_cmd_header header; + /** + * Definition of a DMUB_CMD__SET_PSR_POWER_OPT command. + */ + struct dmub_cmd_psr_set_power_opt_data psr_set_power_opt_data; +}; + +/** * Set of HW components that can be locked. * * Note: If updating with more HW components, fields @@ -2459,6 +2525,10 @@ union dmub_rb_cmd { */ struct dmub_rb_cmd_psr_force_static psr_force_static; /** + * Definition of a DMUB_CMD__SET_PSR_POWER_OPT command. + */ + struct dmub_rb_cmd_psr_set_power_opt psr_set_power_opt; + /** * Definition of a DMUB_CMD__PLAT_54186_WA command. */ struct dmub_rb_cmd_PLAT_54186_wa PLAT_54186_wa; @@ -2543,6 +2613,10 @@ union dmub_rb_cmd { */ struct dmub_rb_cmd_set_config_access set_config_access; /** + * Definition of a DMUB_CMD__DPIA_MST_ALLOC_SLOTS command. + */ + struct dmub_rb_cmd_set_mst_alloc_slots set_mst_alloc_slots; + /** * Definition of a DMUB_CMD__EDID_CEA command. */ struct dmub_rb_cmd_edid_cea edid_cea; diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c index 3988f65f1ea4..fa0569174aec 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c @@ -338,7 +338,11 @@ void dmub_dcn31_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmu union dmub_fw_boot_options boot_options = {0}; boot_options.bits.z10_disable = params->disable_z10; + boot_options.bits.dpia_supported = params->dpia_supported; boot_options.bits.enable_dpia = params->disable_dpia ? 0 : 1; + boot_options.bits.power_optimization = params->power_optimization; + + boot_options.bits.sel_mux_phy_c_d_phy_f_g = (dmub->asic == DMUB_ASIC_DCN31B) ? 1 : 0; REG_WRITE(DMCUB_SCRATCH14, boot_options.all); } diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c index 77c67222cabd..56d400ffa7ac 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c @@ -208,6 +208,7 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic) break; case DMUB_ASIC_DCN31: + case DMUB_ASIC_DCN31B: dmub->regs_dcn31 = &dmub_srv_dcn31_regs; funcs->reset = dmub_dcn31_reset; funcs->reset_release = dmub_dcn31_reset_release; diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h index a9974f12f7fb..e4a2dfacab4c 100644 --- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h +++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h @@ -228,7 +228,7 @@ enum { #define FAMILY_YELLOW_CARP 146 #define YELLOW_CARP_A0 0x01 -#define YELLOW_CARP_B0 0x1A +#define YELLOW_CARP_B0 0x20 #define YELLOW_CARP_UNKNOWN 0xFF #ifndef ASICREV_IS_YELLOW_CARP diff --git a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h index 792652236c61..dd974c428d23 100644 --- a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h +++ b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h @@ -328,6 +328,7 @@ struct integrated_info { uint8_t gu_id[NUMBER_OF_UCHAR_FOR_GUID]; uint8_t checksum; + uint8_t fixdpvoltageswing; } ext_disp_conn_info; /* exiting long long time */ struct available_s_clk_list { diff --git a/drivers/gpu/drm/amd/display/include/i2caux_interface.h b/drivers/gpu/drm/amd/display/include/i2caux_interface.h index c7fbb9c3ad6b..418fbf8c5c3a 100644 --- a/drivers/gpu/drm/amd/display/include/i2caux_interface.h +++ b/drivers/gpu/drm/amd/display/include/i2caux_interface.h @@ -41,6 +41,8 @@ struct aux_payload { * reset it to read data */ bool write; bool mot; + bool write_status_update; + uint32_t address; uint32_t length; uint8_t *data; @@ -53,6 +55,7 @@ struct aux_payload { * zero means "use default value" */ uint32_t defer_delay; + }; struct aux_command { diff --git a/drivers/gpu/drm/amd/display/include/link_service_types.h b/drivers/gpu/drm/amd/display/include/link_service_types.h index 9ffea7b40545..424bccd36434 100644 --- a/drivers/gpu/drm/amd/display/include/link_service_types.h +++ b/drivers/gpu/drm/amd/display/include/link_service_types.h @@ -90,8 +90,11 @@ enum lttpr_mode { struct link_training_settings { struct dc_link_settings link_settings; - struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]; + /* TODO: turn lane settings below into mandatory fields + * as initial lane configuration + */ + struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]; enum dc_voltage_swing *voltage_swing; enum dc_pre_emphasis *pre_emphasis; enum dc_post_cursor2 *post_cursor2; @@ -115,8 +118,30 @@ struct link_training_settings { #endif bool enhanced_framing; - bool allow_invalid_msa_timing_param; enum lttpr_mode lttpr_mode; + + /* disallow different lanes to have different lane settings */ + bool disallow_per_lane_settings; + /* dpcd lane settings will always use the same hw lane settings + * even if it doesn't match requested lane adjust */ + bool always_match_dpcd_with_hw_lane_settings; + + /***************************************************************** + * training states - parameters that can change in link training + *****************************************************************/ + /* TODO: Move hw_lane_settings and dpcd_lane_settings + * along with lane adjust, lane align, offset and all + * other training states into a new structure called + * training states, so link_training_settings becomes + * a constant input pre-decided prior to link training. + * + * The goal is to strictly decouple link training settings + * decision making process from link training states to + * prevent it from messy code practice of changing training + * decision on the fly. + */ + struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX]; + union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX]; }; /*TODO: Move this enum test harness*/ diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c index b99aa232bd8b..bd1d1dc93629 100644 --- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c +++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c @@ -155,9 +155,18 @@ static unsigned int calc_v_total_from_duration( if (duration_in_us > vrr->max_duration_in_us) duration_in_us = vrr->max_duration_in_us; - v_total = div64_u64(div64_u64(((unsigned long long)( - duration_in_us) * (stream->timing.pix_clk_100hz / 10)), - stream->timing.h_total), 1000); + if (dc_is_hdmi_signal(stream->signal)) { + uint32_t h_total_up_scaled; + + h_total_up_scaled = stream->timing.h_total * 10000; + v_total = div_u64((unsigned long long)duration_in_us + * stream->timing.pix_clk_100hz + (h_total_up_scaled - 1), + h_total_up_scaled); + } else { + v_total = div64_u64(div64_u64(((unsigned long long)( + duration_in_us) * (stream->timing.pix_clk_100hz / 10)), + stream->timing.h_total), 1000); + } /* v_total cannot be less than nominal */ if (v_total < stream->timing.v_total) { diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c index e9bd84ec027d..be61975f1470 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c @@ -105,6 +105,7 @@ static enum mod_hdcp_status remove_display_from_topology_v3( dtm_cmd->dtm_status = TA_DTM_STATUS__GENERIC_FAILURE; psp_dtm_invoke(psp, dtm_cmd->cmd_id); + mutex_unlock(&psp->dtm_context.mutex); if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS) { status = remove_display_from_topology_v2(hdcp, index); @@ -115,8 +116,6 @@ static enum mod_hdcp_status remove_display_from_topology_v3( HDCP_TOP_REMOVE_DISPLAY_TRACE(hdcp, display->index); } - mutex_unlock(&psp->dtm_context.mutex); - return status; } @@ -205,6 +204,7 @@ static enum mod_hdcp_status add_display_to_topology_v3( dtm_cmd->dtm_in_message.topology_update_v3.link_hdcp_cap = link->hdcp_supported_informational; psp_dtm_invoke(psp, dtm_cmd->cmd_id); + mutex_unlock(&psp->dtm_context.mutex); if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS) { status = add_display_to_topology_v2(hdcp, display); @@ -214,8 +214,6 @@ static enum mod_hdcp_status add_display_to_topology_v3( HDCP_TOP_ADD_DISPLAY_TRACE(hdcp, display->index); } - mutex_unlock(&psp->dtm_context.mutex); - return status; } diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h index f37101f5a777..6d648c889866 100644 --- a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h +++ b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h @@ -249,6 +249,8 @@ struct mod_hdcp_link { uint8_t ddc_line; uint8_t link_enc_idx; uint8_t phy_idx; + uint8_t dio_output_type; + uint8_t dio_output_id; uint8_t hdcp_supported_informational; union { struct mod_hdcp_displayport dp; diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index 257f280d3d53..f1a46d16f7ea 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h @@ -228,7 +228,7 @@ enum DC_FEATURE_MASK { DC_FBC_MASK = (1 << 0), //0x1, disabled by default DC_MULTI_MON_PP_MCLK_SWITCH_MASK = (1 << 1), //0x2, enabled by default DC_DISABLE_FRACTIONAL_PWM_MASK = (1 << 2), //0x4, disabled by default - DC_PSR_MASK = (1 << 3), //0x8, disabled by default + DC_PSR_MASK = (1 << 3), //0x8, disabled by default for dcn < 3.1 DC_EDP_NO_POWER_SEQUENCING = (1 << 4), //0x10, disabled by default }; @@ -236,7 +236,8 @@ enum DC_DEBUG_MASK { DC_DISABLE_PIPE_SPLIT = 0x1, DC_DISABLE_STUTTER = 0x2, DC_DISABLE_DSC = 0x4, - DC_DISABLE_CLOCK_GATING = 0x8 + DC_DISABLE_CLOCK_GATING = 0x8, + DC_DISABLE_PSR = 0x10, }; enum amd_dpm_forced_level; diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_offset.h index 312c50ea30f3..f268d33c4744 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_offset.h @@ -436,6 +436,8 @@ #define regPHYESYMCLK_CLOCK_CNTL_BASE_IDX 2 #define regDCCG_GATE_DISABLE_CNTL3 0x005a #define regDCCG_GATE_DISABLE_CNTL3_BASE_IDX 2 +#define regHDMISTREAMCLK0_DTO_PARAM 0x005b +#define regHDMISTREAMCLK0_DTO_PARAM_BASE_IDX 2 #define regDCCG_AUDIO_DTBCLK_DTO_PHASE 0x0061 #define regDCCG_AUDIO_DTBCLK_DTO_PHASE_BASE_IDX 2 #define regDCCG_AUDIO_DTBCLK_DTO_MODULO 0x0062 diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_sh_mask.h index a9d553ef26c0..1f21f313bd1d 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_sh_mask.h @@ -1438,6 +1438,14 @@ #define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_LE0_GATE_DISABLE_MASK 0x00200000L #define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_ROOT_LE1_GATE_DISABLE_MASK 0x00400000L #define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_LE1_GATE_DISABLE_MASK 0x00800000L +//HDMISTREAMCLK0_DTO_PARAM +#define HDMISTREAMCLK0_DTO_PARAM__HDMISTREAMCLK0_DTO_PHASE__SHIFT 0x0 +#define HDMISTREAMCLK0_DTO_PARAM__HDMISTREAMCLK0_DTO_MODULO__SHIFT 0x8 +#define HDMISTREAMCLK0_DTO_PARAM__HDMISTREAMCLK0_DTO_EN__SHIFT 0x10 +#define HDMISTREAMCLK0_DTO_PARAM__HDMISTREAMCLK0_DTO_PHASE_MASK 0x000000FFL +#define HDMISTREAMCLK0_DTO_PARAM__HDMISTREAMCLK0_DTO_MODULO_MASK 0x0000FF00L +#define HDMISTREAMCLK0_DTO_PARAM__HDMISTREAMCLK0_DTO_EN_MASK 0x00010000L + //DCCG_AUDIO_DTBCLK_DTO_PHASE #define DCCG_AUDIO_DTBCLK_DTO_PHASE__DCCG_AUDIO_DTBCLK_DTO_PHASE__SHIFT 0x0 #define DCCG_AUDIO_DTBCLK_DTO_PHASE__DCCG_AUDIO_DTBCLK_DTO_PHASE_MASK 0xFFFFFFFFL diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index c255b4b8e685..41472ed99253 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -310,7 +310,7 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev, struct amdgpu_device *adev = drm_to_adev(ddev); const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; enum amd_dpm_forced_level level; - enum amd_dpm_forced_level current_level = 0xff; + enum amd_dpm_forced_level current_level; int ret = 0; if (amdgpu_in_reset(adev)) @@ -350,6 +350,8 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev, if (pp_funcs->get_performance_level) current_level = amdgpu_dpm_get_performance_level(adev); + else + current_level = adev->pm.dpm.forced_level; if (current_level == level) { pm_runtime_mark_last_busy(ddev->dev); @@ -2019,15 +2021,15 @@ static struct amdgpu_device_attr amdgpu_device_attrs[] = { AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie, ATTR_FLAG_BASIC), AMDGPU_DEVICE_ATTR_RW(pp_sclk_od, ATTR_FLAG_BASIC), AMDGPU_DEVICE_ATTR_RW(pp_mclk_od, ATTR_FLAG_BASIC), - AMDGPU_DEVICE_ATTR_RW(pp_power_profile_mode, ATTR_FLAG_BASIC), + AMDGPU_DEVICE_ATTR_RW(pp_power_profile_mode, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), AMDGPU_DEVICE_ATTR_RW(pp_od_clk_voltage, ATTR_FLAG_BASIC), - AMDGPU_DEVICE_ATTR_RO(gpu_busy_percent, ATTR_FLAG_BASIC), - AMDGPU_DEVICE_ATTR_RO(mem_busy_percent, ATTR_FLAG_BASIC), + AMDGPU_DEVICE_ATTR_RO(gpu_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), + AMDGPU_DEVICE_ATTR_RO(mem_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), AMDGPU_DEVICE_ATTR_RO(pcie_bw, ATTR_FLAG_BASIC), - AMDGPU_DEVICE_ATTR_RW(pp_features, ATTR_FLAG_BASIC), - AMDGPU_DEVICE_ATTR_RO(unique_id, ATTR_FLAG_BASIC), - AMDGPU_DEVICE_ATTR_RW(thermal_throttling_logging, ATTR_FLAG_BASIC), - AMDGPU_DEVICE_ATTR_RO(gpu_metrics, ATTR_FLAG_BASIC), + AMDGPU_DEVICE_ATTR_RW(pp_features, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), + AMDGPU_DEVICE_ATTR_RO(unique_id, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), + AMDGPU_DEVICE_ATTR_RW(thermal_throttling_logging, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), + AMDGPU_DEVICE_ATTR_RO(gpu_metrics, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), AMDGPU_DEVICE_ATTR_RO(smartshift_apu_power, ATTR_FLAG_BASIC, .attr_update = ss_power_attr_update), AMDGPU_DEVICE_ATTR_RO(smartshift_dgpu_power, ATTR_FLAG_BASIC, @@ -2092,6 +2094,10 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ } else if (DEVICE_ATTR_IS(pp_dpm_dclk)) { if (!(asic_type == CHIP_VANGOGH || asic_type == CHIP_SIENNA_CICHLID)) *states = ATTR_STATE_UNSUPPORTED; + } else if (DEVICE_ATTR_IS(pp_power_profile_mode)) { + if (!adev->powerplay.pp_funcs->get_power_profile_mode || + amdgpu_dpm_get_power_profile_mode(adev, NULL) == -EOPNOTSUPP) + *states = ATTR_STATE_UNSUPPORTED; } switch (asic_type) { diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h index 8156729c370b..3557f4e7fc30 100644 --- a/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h @@ -1008,7 +1008,9 @@ struct pptable_funcs { /** * @set_power_limit: Set power limit in watts. */ - int (*set_power_limit)(struct smu_context *smu, uint32_t n); + int (*set_power_limit)(struct smu_context *smu, + enum smu_ppt_limit_type limit_type, + uint32_t limit); /** * @init_max_sustainable_clocks: Populate max sustainable clock speed diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h index cbdae8a2c698..2d422e6a9feb 100644 --- a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h +++ b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h @@ -197,7 +197,9 @@ int smu_v11_0_notify_display_change(struct smu_context *smu); int smu_v11_0_get_current_power_limit(struct smu_context *smu, uint32_t *power_limit); -int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n); +int smu_v11_0_set_power_limit(struct smu_context *smu, + enum smu_ppt_limit_type limit_type, + uint32_t limit); int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu); diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h index dc91eb608791..e5d3b0d1a032 100644 --- a/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h +++ b/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h @@ -163,7 +163,9 @@ int smu_v13_0_notify_display_change(struct smu_context *smu); int smu_v13_0_get_current_power_limit(struct smu_context *smu, uint32_t *power_limit); -int smu_v13_0_set_power_limit(struct smu_context *smu, uint32_t n); +int smu_v13_0_set_power_limit(struct smu_context *smu, + enum smu_ppt_limit_type limit_type, + uint32_t limit); int smu_v13_0_init_max_sustainable_clocks(struct smu_context *smu); diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v13_0_1_ppsmc.h b/drivers/gpu/drm/amd/pm/inc/smu_v13_0_1_ppsmc.h index 1d3447991d0c..fc9198846e70 100644 --- a/drivers/gpu/drm/amd/pm/inc/smu_v13_0_1_ppsmc.h +++ b/drivers/gpu/drm/amd/pm/inc/smu_v13_0_1_ppsmc.h @@ -51,7 +51,7 @@ #define PPSMC_MSG_PowerUpVcn 0x07 ///< Power up VCN; VCN is power gated by default #define PPSMC_MSG_SetHardMinVcn 0x08 ///< For wireless display #define PPSMC_MSG_SetSoftMinGfxclk 0x09 ///< Set SoftMin for GFXCLK, argument is frequency in MHz -#define PPSMC_MSG_ActiveProcessNotify 0x0A ///< Set active work load type +#define PPSMC_MSG_ActiveProcessNotify 0x0A ///< Deprecated (Not to be used) #define PPSMC_MSG_ForcePowerDownGfx 0x0B ///< Force power down GFX, i.e. enter GFXOFF #define PPSMC_MSG_PrepareMp1ForUnload 0x0C ///< Prepare PMFW for GFX driver unload #define PPSMC_MSG_SetDriverDramAddrHigh 0x0D ///< Set high 32 bits of DRAM address for Driver table transfer @@ -63,7 +63,7 @@ #define PPSMC_MSG_SetHardMinSocclkByFreq 0x13 ///< Set hard min for SOC CLK #define PPSMC_MSG_SetSoftMinFclk 0x14 ///< Set hard min for FCLK #define PPSMC_MSG_SetSoftMinVcn 0x15 ///< Set soft min for VCN clocks (VCLK and DCLK) -#define PPSMC_MSG_SPARE0 0x16 ///< Spared +#define PPSMC_MSG_SPARE 0x16 ///< Spare #define PPSMC_MSG_GetGfxclkFrequency 0x17 ///< Get GFX clock frequency #define PPSMC_MSG_GetFclkFrequency 0x18 ///< Get FCLK frequency #define PPSMC_MSG_AllowGfxOff 0x19 ///< Inform PMFW of allowing GFXOFF entry diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c index 321215003643..8d796ed3b7d1 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c @@ -875,34 +875,30 @@ pp_dpm_get_vce_clock_state(void *handle, unsigned idx) static int pp_get_power_profile_mode(void *handle, char *buf) { struct pp_hwmgr *hwmgr = handle; + int ret; - if (!hwmgr || !hwmgr->pm_en || !buf) + if (!hwmgr || !hwmgr->pm_en || !hwmgr->hwmgr_func->get_power_profile_mode) + return -EOPNOTSUPP; + if (!buf) return -EINVAL; - if (hwmgr->hwmgr_func->get_power_profile_mode == NULL) { - pr_info_ratelimited("%s was not implemented.\n", __func__); - return snprintf(buf, PAGE_SIZE, "\n"); - } - - return hwmgr->hwmgr_func->get_power_profile_mode(hwmgr, buf); + mutex_lock(&hwmgr->smu_lock); + ret = hwmgr->hwmgr_func->get_power_profile_mode(hwmgr, buf); + mutex_unlock(&hwmgr->smu_lock); + return ret; } static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size) { struct pp_hwmgr *hwmgr = handle; - int ret = -EINVAL; + int ret = -EOPNOTSUPP; - if (!hwmgr || !hwmgr->pm_en) - return ret; - - if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) { - pr_info_ratelimited("%s was not implemented.\n", __func__); + if (!hwmgr || !hwmgr->pm_en || !hwmgr->hwmgr_func->set_power_profile_mode) return ret; - } if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) { pr_debug("power profile setting is for manual dpm mode only.\n"); - return ret; + return -EINVAL; } mutex_lock(&hwmgr->smu_lock); diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c index 1de3ae77e03e..258c573acc97 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c @@ -1024,6 +1024,8 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr, uint32_t min_freq, max_freq = 0; uint32_t ret = 0; + phm_get_sysfs_buf(&buf, &size); + switch (type) { case PP_SCLK: smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency, &now); @@ -1065,7 +1067,7 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr, if (ret) return ret; - size = sysfs_emit(buf, "%s:\n", "OD_SCLK"); + size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK"); size += sysfs_emit_at(buf, size, "0: %10uMhz\n", (data->gfx_actual_soft_min_freq > 0) ? data->gfx_actual_soft_min_freq : min_freq); size += sysfs_emit_at(buf, size, "1: %10uMhz\n", @@ -1081,7 +1083,7 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr, if (ret) return ret; - size = sysfs_emit(buf, "%s:\n", "OD_RANGE"); + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); size += sysfs_emit_at(buf, size, "SCLK: %7uMHz %10uMHz\n", min_freq, max_freq); } @@ -1456,6 +1458,8 @@ static int smu10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) if (!buf) return -EINVAL; + phm_get_sysfs_buf(&buf, &size); + size += sysfs_emit_at(buf, size, "%s %16s %s %s %s %s\n",title[0], title[1], title[2], title[3], title[4], title[5]); diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c index e7803ce8f67a..aceebf584225 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c @@ -4914,6 +4914,8 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr, int size = 0; uint32_t i, now, clock, pcie_speed; + phm_get_sysfs_buf(&buf, &size); + switch (type) { case PP_SCLK: smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency, &clock); @@ -4963,7 +4965,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr, break; case OD_SCLK: if (hwmgr->od_enabled) { - size = sysfs_emit(buf, "%s:\n", "OD_SCLK"); + size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK"); for (i = 0; i < odn_sclk_table->num_of_pl; i++) size += sysfs_emit_at(buf, size, "%d: %10uMHz %10umV\n", i, odn_sclk_table->entries[i].clock/100, @@ -4972,7 +4974,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr, break; case OD_MCLK: if (hwmgr->od_enabled) { - size = sysfs_emit(buf, "%s:\n", "OD_MCLK"); + size += sysfs_emit_at(buf, size, "%s:\n", "OD_MCLK"); for (i = 0; i < odn_mclk_table->num_of_pl; i++) size += sysfs_emit_at(buf, size, "%d: %10uMHz %10umV\n", i, odn_mclk_table->entries[i].clock/100, @@ -4981,7 +4983,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr, break; case OD_RANGE: if (hwmgr->od_enabled) { - size = sysfs_emit(buf, "%s:\n", "OD_RANGE"); + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); size += sysfs_emit_at(buf, size, "SCLK: %7uMHz %10uMHz\n", data->golden_dpm_table.sclk_table.dpm_levels[0].value/100, hwmgr->platform_descriptor.overdriveLimit.engineClock/100); @@ -5518,6 +5520,8 @@ static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) if (!buf) return -EINVAL; + phm_get_sysfs_buf(&buf, &size); + size += sysfs_emit_at(buf, size, "%s %16s %16s %16s %16s %16s %16s %16s\n", title[0], title[1], title[2], title[3], title[4], title[5], title[6], title[7]); diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c index b94a77e4e714..8e28a8eecefc 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c @@ -1550,6 +1550,8 @@ static int smu8_print_clock_levels(struct pp_hwmgr *hwmgr, uint32_t i, now; int size = 0; + phm_get_sysfs_buf(&buf, &size); + switch (type) { case PP_SCLK: now = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.h index ad33983a8064..2a75da1e9f03 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.h +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.h @@ -109,6 +109,19 @@ int phm_irq_process(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry); +/* + * Helper function to make sysfs_emit_at() happy. Align buf to + * the current page boundary and record the offset. + */ +static inline void phm_get_sysfs_buf(char **buf, int *offset) +{ + if (!*buf || !offset) + return; + + *offset = offset_in_page(*buf); + *buf -= *offset; +} + int smu9_register_irq_handlers(struct pp_hwmgr *hwmgr); void *smu_atom_get_data_table(void *dev, uint32_t table, uint16_t *size, diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c index c152a61ddd2c..c981fc2882f0 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c @@ -4548,6 +4548,8 @@ static int vega10_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf) int ret = 0; int size = 0; + phm_get_sysfs_buf(&buf, &size); + ret = vega10_get_enabled_smc_features(hwmgr, &features_enabled); PP_ASSERT_WITH_CODE(!ret, "[EnableAllSmuFeatures] Failed to get enabled smc features!", @@ -4637,6 +4639,8 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, int i, now, size = 0, count = 0; + phm_get_sysfs_buf(&buf, &size); + switch (type) { case PP_SCLK: if (data->registry_data.sclk_dpm_key_disabled) @@ -4717,7 +4721,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, case OD_SCLK: if (hwmgr->od_enabled) { - size = sysfs_emit(buf, "%s:\n", "OD_SCLK"); + size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK"); podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_sclk; for (i = 0; i < podn_vdd_dep->count; i++) size += sysfs_emit_at(buf, size, "%d: %10uMhz %10umV\n", @@ -4727,7 +4731,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, break; case OD_MCLK: if (hwmgr->od_enabled) { - size = sysfs_emit(buf, "%s:\n", "OD_MCLK"); + size += sysfs_emit_at(buf, size, "%s:\n", "OD_MCLK"); podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_mclk; for (i = 0; i < podn_vdd_dep->count; i++) size += sysfs_emit_at(buf, size, "%d: %10uMhz %10umV\n", @@ -4737,7 +4741,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, break; case OD_RANGE: if (hwmgr->od_enabled) { - size = sysfs_emit(buf, "%s:\n", "OD_RANGE"); + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); size += sysfs_emit_at(buf, size, "SCLK: %7uMHz %10uMHz\n", data->golden_dpm_table.gfx_table.dpm_levels[0].value/100, hwmgr->platform_descriptor.overdriveLimit.engineClock/100); @@ -5112,6 +5116,8 @@ static int vega10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) if (!buf) return -EINVAL; + phm_get_sysfs_buf(&buf, &size); + size += sysfs_emit_at(buf, size, "%s %16s %s %s %s %s\n",title[0], title[1], title[2], title[3], title[4], title[5]); diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c index 8558718e15a8..f7e783e1c888 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c @@ -2141,6 +2141,8 @@ static int vega12_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf) int ret = 0; int size = 0; + phm_get_sysfs_buf(&buf, &size); + ret = vega12_get_enabled_smc_features(hwmgr, &features_enabled); PP_ASSERT_WITH_CODE(!ret, "[EnableAllSmuFeatures] Failed to get enabled smc features!", @@ -2244,6 +2246,8 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr, int i, now, size = 0; struct pp_clock_levels_with_latency clocks; + phm_get_sysfs_buf(&buf, &size); + switch (type) { case PP_SCLK: PP_ASSERT_WITH_CODE( diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c index 0cf39c1244b1..03e63be4ee27 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c @@ -3238,6 +3238,8 @@ static int vega20_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf) int ret = 0; int size = 0; + phm_get_sysfs_buf(&buf, &size); + ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled); PP_ASSERT_WITH_CODE(!ret, "[EnableAllSmuFeatures] Failed to get enabled smc features!", @@ -3364,6 +3366,8 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, int ret = 0; uint32_t gen_speed, lane_width, current_gen_speed, current_lane_width; + phm_get_sysfs_buf(&buf, &size); + switch (type) { case PP_SCLK: ret = vega20_get_current_clk_freq(hwmgr, PPCLK_GFXCLK, &now); @@ -3479,7 +3483,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, case OD_SCLK: if (od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id && od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id) { - size = sysfs_emit(buf, "%s:\n", "OD_SCLK"); + size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK"); size += sysfs_emit_at(buf, size, "0: %10uMhz\n", od_table->GfxclkFmin); size += sysfs_emit_at(buf, size, "1: %10uMhz\n", @@ -3489,7 +3493,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, case OD_MCLK: if (od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) { - size = sysfs_emit(buf, "%s:\n", "OD_MCLK"); + size += sysfs_emit_at(buf, size, "%s:\n", "OD_MCLK"); size += sysfs_emit_at(buf, size, "1: %10uMhz\n", od_table->UclkFmax); } @@ -3503,7 +3507,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id && od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id && od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id) { - size = sysfs_emit(buf, "%s:\n", "OD_VDDC_CURVE"); + size += sysfs_emit_at(buf, size, "%s:\n", "OD_VDDC_CURVE"); size += sysfs_emit_at(buf, size, "0: %10uMhz %10dmV\n", od_table->GfxclkFreq1, od_table->GfxclkVolt1 / VOLTAGE_SCALE); @@ -3518,7 +3522,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, break; case OD_RANGE: - size = sysfs_emit(buf, "%s:\n", "OD_RANGE"); + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); if (od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id && od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id) { @@ -4003,6 +4007,8 @@ static int vega20_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) if (!buf) return -EINVAL; + phm_get_sysfs_buf(&buf, &size); + size += sysfs_emit_at(buf, size, "%16s %s %s %s %s %s %s %s %s %s %s\n", title[0], title[1], title[2], title[3], title[4], title[5], title[6], title[7], title[8], title[9], title[10]); diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index 4ea7e90ef60d..01168b8955bf 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -455,6 +455,10 @@ static int smu_get_power_num_states(void *handle, bool is_support_sw_smu(struct amdgpu_device *adev) { + /* vega20 is 11.0.2, but it's supported via the powerplay code */ + if (adev->asic_type == CHIP_VEGA20) + return false; + if (adev->ip_versions[MP1_HWIP][0] >= IP_VERSION(11, 0, 0)) return true; @@ -1464,7 +1468,7 @@ static int smu_disable_dpms(struct smu_context *smu) dev_err(adev->dev, "Failed to disable smu features.\n"); } - if (adev->ip_versions[MP1_HWIP][0] >= IP_VERSION(11, 0, 0) && + if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 0, 0) && adev->gfx.rlc.funcs->stop) adev->gfx.rlc.funcs->stop(adev); @@ -2344,9 +2348,10 @@ static int smu_set_power_limit(void *handle, uint32_t limit) mutex_lock(&smu->mutex); + limit &= (1<<24)-1; if (limit_type != SMU_DEFAULT_PPT_LIMIT) if (smu->ppt_funcs->set_power_limit) { - ret = smu->ppt_funcs->set_power_limit(smu, limit); + ret = smu->ppt_funcs->set_power_limit(smu, limit_type, limit); goto out; } @@ -2362,7 +2367,7 @@ static int smu_set_power_limit(void *handle, uint32_t limit) limit = smu->current_power_limit; if (smu->ppt_funcs->set_power_limit) { - ret = smu->ppt_funcs->set_power_limit(smu, limit); + ret = smu->ppt_funcs->set_power_limit(smu, limit_type, limit); if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) smu->user_dpm_profile.power_limit = limit; } @@ -2529,13 +2534,15 @@ static int smu_get_power_profile_mode(void *handle, char *buf) struct smu_context *smu = handle; int ret = 0; - if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) + if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || + !smu->ppt_funcs->get_power_profile_mode) return -EOPNOTSUPP; + if (!buf) + return -EINVAL; mutex_lock(&smu->mutex); - if (smu->ppt_funcs->get_power_profile_mode) - ret = smu->ppt_funcs->get_power_profile_mode(smu, buf); + ret = smu->ppt_funcs->get_power_profile_mode(smu, buf); mutex_unlock(&smu->mutex); @@ -2549,7 +2556,8 @@ static int smu_set_power_profile_mode(void *handle, struct smu_context *smu = handle; int ret = 0; - if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) + if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || + !smu->ppt_funcs->set_power_profile_mode) return -EOPNOTSUPP; mutex_lock(&smu->mutex); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c index 082f01893f3d..fd1d30a93db5 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c @@ -436,6 +436,19 @@ static void arcturus_check_bxco_support(struct smu_context *smu) } } +static void arcturus_check_fan_support(struct smu_context *smu) +{ + struct smu_table_context *table_context = &smu->smu_table; + PPTable_t *pptable = table_context->driver_pptable; + + /* No sort of fan control possible if PPTable has it disabled */ + smu->adev->pm.no_fan = + !(pptable->FeaturesToRun[0] & FEATURE_FAN_CONTROL_MASK); + if (smu->adev->pm.no_fan) + dev_info_once(smu->adev->dev, + "PMFW based fan control disabled"); +} + static int arcturus_check_powerplay_table(struct smu_context *smu) { struct smu_table_context *table_context = &smu->smu_table; @@ -443,6 +456,7 @@ static int arcturus_check_powerplay_table(struct smu_context *smu) table_context->power_play_table; arcturus_check_bxco_support(smu); + arcturus_check_fan_support(smu); table_context->thermal_controller_type = powerplay_table->thermal_controller_type; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c index 3d4c65bc29dc..cbc3f99e8573 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c @@ -47,7 +47,6 @@ /* unit: MHz */ #define CYAN_SKILLFISH_SCLK_MIN 1000 #define CYAN_SKILLFISH_SCLK_MAX 2000 -#define CYAN_SKILLFISH_SCLK_DEFAULT 1800 /* unit: mV */ #define CYAN_SKILLFISH_VDDC_MIN 700 @@ -59,6 +58,8 @@ static struct gfx_user_settings { uint32_t vddc; } cyan_skillfish_user_settings; +static uint32_t cyan_skillfish_sclk_default; + #define FEATURE_MASK(feature) (1ULL << feature) #define SMC_DPM_FEATURE ( \ FEATURE_MASK(FEATURE_FCLK_DPM_BIT) | \ @@ -365,13 +366,19 @@ static bool cyan_skillfish_is_dpm_running(struct smu_context *smu) return false; ret = smu_cmn_get_enabled_32_bits_mask(smu, feature_mask, 2); - if (ret) return false; feature_enabled = (uint64_t)feature_mask[0] | ((uint64_t)feature_mask[1] << 32); + /* + * cyan_skillfish specific, query default sclk inseted of hard code. + */ + if (!cyan_skillfish_sclk_default) + cyan_skillfish_get_smu_metrics_data(smu, METRICS_CURR_GFXCLK, + &cyan_skillfish_sclk_default); + return !!(feature_enabled & SMC_DPM_FEATURE); } @@ -444,14 +451,14 @@ static int cyan_skillfish_od_edit_dpm_table(struct smu_context *smu, return -EINVAL; } - if (input[1] <= CYAN_SKILLFISH_SCLK_MIN || + if (input[1] < CYAN_SKILLFISH_SCLK_MIN || input[1] > CYAN_SKILLFISH_SCLK_MAX) { dev_err(smu->adev->dev, "Invalid sclk! Valid sclk range: %uMHz - %uMhz\n", CYAN_SKILLFISH_SCLK_MIN, CYAN_SKILLFISH_SCLK_MAX); return -EINVAL; } - if (input[2] <= CYAN_SKILLFISH_VDDC_MIN || + if (input[2] < CYAN_SKILLFISH_VDDC_MIN || input[2] > CYAN_SKILLFISH_VDDC_MAX) { dev_err(smu->adev->dev, "Invalid vddc! Valid vddc range: %umV - %umV\n", CYAN_SKILLFISH_VDDC_MIN, CYAN_SKILLFISH_VDDC_MAX); @@ -468,7 +475,7 @@ static int cyan_skillfish_od_edit_dpm_table(struct smu_context *smu, return -EINVAL; } - cyan_skillfish_user_settings.sclk = CYAN_SKILLFISH_SCLK_DEFAULT; + cyan_skillfish_user_settings.sclk = cyan_skillfish_sclk_default; cyan_skillfish_user_settings.vddc = CYAN_SKILLFISH_VDDC_MAGIC; break; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index 15e66e1912de..a4108025fe29 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -670,7 +670,7 @@ static int sienna_cichlid_set_default_dpm_table(struct smu_context *smu) struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; struct smu_11_0_dpm_table *dpm_table; struct amdgpu_device *adev = smu->adev; - int ret = 0; + int i, ret = 0; DpmDescriptor_t *table_member; /* socclk dpm table setup */ @@ -746,78 +746,45 @@ static int sienna_cichlid_set_default_dpm_table(struct smu_context *smu) dpm_table->max = dpm_table->dpm_levels[0].value; } - /* vclk0 dpm table setup */ - dpm_table = &dpm_context->dpm_tables.vclk_table; - if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) { - ret = smu_v11_0_set_single_dpm_table(smu, - SMU_VCLK, - dpm_table); - if (ret) - return ret; - dpm_table->is_fine_grained = - !table_member[PPCLK_VCLK_0].SnapToDiscrete; - } else { - dpm_table->count = 1; - dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.vclk / 100; - dpm_table->dpm_levels[0].enabled = true; - dpm_table->min = dpm_table->dpm_levels[0].value; - dpm_table->max = dpm_table->dpm_levels[0].value; - } + /* vclk0/1 dpm table setup */ + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + if (adev->vcn.harvest_config & (1 << i)) + continue; - /* vclk1 dpm table setup */ - if (adev->vcn.num_vcn_inst > 1) { - dpm_table = &dpm_context->dpm_tables.vclk1_table; + dpm_table = &dpm_context->dpm_tables.vclk_table; if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) { ret = smu_v11_0_set_single_dpm_table(smu, - SMU_VCLK1, + i ? SMU_VCLK1 : SMU_VCLK, dpm_table); if (ret) return ret; dpm_table->is_fine_grained = - !table_member[PPCLK_VCLK_1].SnapToDiscrete; + !table_member[i ? PPCLK_VCLK_1 : PPCLK_VCLK_0].SnapToDiscrete; } else { dpm_table->count = 1; - dpm_table->dpm_levels[0].value = - smu->smu_table.boot_values.vclk / 100; + dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.vclk / 100; dpm_table->dpm_levels[0].enabled = true; dpm_table->min = dpm_table->dpm_levels[0].value; dpm_table->max = dpm_table->dpm_levels[0].value; } } - /* dclk0 dpm table setup */ - dpm_table = &dpm_context->dpm_tables.dclk_table; - if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) { - ret = smu_v11_0_set_single_dpm_table(smu, - SMU_DCLK, - dpm_table); - if (ret) - return ret; - dpm_table->is_fine_grained = - !table_member[PPCLK_DCLK_0].SnapToDiscrete; - } else { - dpm_table->count = 1; - dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dclk / 100; - dpm_table->dpm_levels[0].enabled = true; - dpm_table->min = dpm_table->dpm_levels[0].value; - dpm_table->max = dpm_table->dpm_levels[0].value; - } - - /* dclk1 dpm table setup */ - if (adev->vcn.num_vcn_inst > 1) { - dpm_table = &dpm_context->dpm_tables.dclk1_table; + /* dclk0/1 dpm table setup */ + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + dpm_table = &dpm_context->dpm_tables.dclk_table; if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) { ret = smu_v11_0_set_single_dpm_table(smu, - SMU_DCLK1, + i ? SMU_DCLK1 : SMU_DCLK, dpm_table); if (ret) return ret; dpm_table->is_fine_grained = - !table_member[PPCLK_DCLK_1].SnapToDiscrete; + !table_member[i ? PPCLK_DCLK_1 : PPCLK_DCLK_0].SnapToDiscrete; } else { dpm_table->count = 1; - dpm_table->dpm_levels[0].value = - smu->smu_table.boot_values.dclk / 100; + dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dclk / 100; dpm_table->dpm_levels[0].enabled = true; dpm_table->min = dpm_table->dpm_levels[0].value; dpm_table->max = dpm_table->dpm_levels[0].value; @@ -902,32 +869,18 @@ static int sienna_cichlid_set_default_dpm_table(struct smu_context *smu) static int sienna_cichlid_dpm_set_vcn_enable(struct smu_context *smu, bool enable) { struct amdgpu_device *adev = smu->adev; - int ret = 0; + int i, ret = 0; - if (enable) { + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + if (adev->vcn.harvest_config & (1 << i)) + continue; /* vcn dpm on is a prerequisite for vcn power gate messages */ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) { - ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0, NULL); + ret = smu_cmn_send_smc_msg_with_param(smu, enable ? + SMU_MSG_PowerUpVcn : SMU_MSG_PowerDownVcn, + 0x10000 * i, NULL); if (ret) return ret; - if (adev->vcn.num_vcn_inst > 1) { - ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, - 0x10000, NULL); - if (ret) - return ret; - } - } - } else { - if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) { - ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownVcn, 0, NULL); - if (ret) - return ret; - if (adev->vcn.num_vcn_inst > 1) { - ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownVcn, - 0x10000, NULL); - if (ret) - return ret; - } } } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c index 3470c33ee09d..28b7c0562b99 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c @@ -255,7 +255,7 @@ int smu_v11_0_check_fw_version(struct smu_context *smu) case IP_VERSION(11, 0, 11): smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Navy_Flounder; break; - case CHIP_VANGOGH: + case IP_VERSION(11, 5, 0): smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_VANGOGH; break; case IP_VERSION(11, 0, 12): @@ -755,6 +755,7 @@ int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count) */ if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 11) || adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 5, 0) || + adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 12) || adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 13)) return 0; @@ -978,10 +979,16 @@ int smu_v11_0_get_current_power_limit(struct smu_context *smu, return ret; } -int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n) +int smu_v11_0_set_power_limit(struct smu_context *smu, + enum smu_ppt_limit_type limit_type, + uint32_t limit) { int power_src; int ret = 0; + uint32_t limit_param; + + if (limit_type != SMU_DEFAULT_PPT_LIMIT) + return -EINVAL; if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) { dev_err(smu->adev->dev, "Setting new power limit is not supported!\n"); @@ -1001,16 +1008,16 @@ int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n) * BIT 16-23: PowerSource * BIT 0-15: PowerLimit */ - n &= 0xFFFF; - n |= 0 << 24; - n |= (power_src) << 16; - ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, n, NULL); + limit_param = (limit & 0xFFFF); + limit_param |= 0 << 24; + limit_param |= (power_src) << 16; + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, limit_param, NULL); if (ret) { dev_err(smu->adev->dev, "[%s] Set power limit Failed!\n", __func__); return ret; } - smu->current_power_limit = n; + smu->current_power_limit = limit; return 0; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c index f6ef0ce6e9e2..421f38e8dada 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c @@ -1386,52 +1386,38 @@ static int vangogh_set_performance_level(struct smu_context *smu, uint32_t soc_mask, mclk_mask, fclk_mask; uint32_t vclk_mask = 0, dclk_mask = 0; + smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq; + smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq; + switch (level) { case AMD_DPM_FORCED_LEVEL_HIGH: - smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; + smu->gfx_actual_hard_min_freq = smu->gfx_default_soft_max_freq; smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; - smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq; - smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq; ret = vangogh_force_dpm_limit_value(smu, true); + if (ret) + return ret; break; case AMD_DPM_FORCED_LEVEL_LOW: smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; - smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; - - smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq; - smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq; + smu->gfx_actual_soft_max_freq = smu->gfx_default_hard_min_freq; ret = vangogh_force_dpm_limit_value(smu, false); + if (ret) + return ret; break; case AMD_DPM_FORCED_LEVEL_AUTO: smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; - smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq; - smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq; - ret = vangogh_unforce_dpm_levels(smu); - break; - case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: - smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; - smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; - - smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq; - smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq; - - ret = smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_SetHardMinGfxClk, - VANGOGH_UMD_PSTATE_STANDARD_GFXCLK, NULL); - if (ret) - return ret; - - ret = smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_SetSoftMaxGfxClk, - VANGOGH_UMD_PSTATE_STANDARD_GFXCLK, NULL); if (ret) return ret; + break; + case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: + smu->gfx_actual_hard_min_freq = VANGOGH_UMD_PSTATE_STANDARD_GFXCLK; + smu->gfx_actual_soft_max_freq = VANGOGH_UMD_PSTATE_STANDARD_GFXCLK; ret = vangogh_get_profiling_clk_mask(smu, level, &vclk_mask, @@ -1446,32 +1432,15 @@ static int vangogh_set_performance_level(struct smu_context *smu, vangogh_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask); vangogh_force_clk_levels(smu, SMU_VCLK, 1 << vclk_mask); vangogh_force_clk_levels(smu, SMU_DCLK, 1 << dclk_mask); - break; case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; - smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; - - smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq; - smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq; - - ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinVcn, - VANGOGH_UMD_PSTATE_PEAK_DCLK, NULL); - if (ret) - return ret; - - ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxVcn, - VANGOGH_UMD_PSTATE_PEAK_DCLK, NULL); - if (ret) - return ret; + smu->gfx_actual_soft_max_freq = smu->gfx_default_hard_min_freq; break; case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; - smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq; - smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq; - ret = vangogh_get_profiling_clk_mask(smu, level, NULL, NULL, @@ -1484,29 +1453,29 @@ static int vangogh_set_performance_level(struct smu_context *smu, vangogh_force_clk_levels(smu, SMU_FCLK, 1 << fclk_mask); break; case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: - smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; - smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; - - smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq; - smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq; - - ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, - VANGOGH_UMD_PSTATE_PEAK_GFXCLK, NULL); - if (ret) - return ret; + smu->gfx_actual_hard_min_freq = VANGOGH_UMD_PSTATE_PEAK_GFXCLK; + smu->gfx_actual_soft_max_freq = VANGOGH_UMD_PSTATE_PEAK_GFXCLK; - ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, - VANGOGH_UMD_PSTATE_PEAK_GFXCLK, NULL); + ret = vangogh_set_peak_clock_by_device(smu); if (ret) return ret; - - ret = vangogh_set_peak_clock_by_device(smu); break; case AMD_DPM_FORCED_LEVEL_MANUAL: case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: default: - break; + return 0; } + + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, + smu->gfx_actual_hard_min_freq, NULL); + if (ret) + return ret; + + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, + smu->gfx_actual_soft_max_freq, NULL); + if (ret) + return ret; + return ret; } @@ -2144,11 +2113,12 @@ static int vangogh_get_ppt_limit(struct smu_context *smu, return 0; } -static int vangogh_set_power_limit(struct smu_context *smu, uint32_t ppt_limit) +static int vangogh_set_power_limit(struct smu_context *smu, + enum smu_ppt_limit_type limit_type, + uint32_t ppt_limit) { struct smu_11_5_power_context *power_context = - smu->smu_power.power_context; - uint32_t limit_type = ppt_limit >> 24; + smu->smu_power.power_context; int ret = 0; if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) { diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c index 5019903db492..59a7d276541d 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c @@ -1241,11 +1241,13 @@ static int aldebaran_get_power_limit(struct smu_context *smu, return 0; } -static int aldebaran_set_power_limit(struct smu_context *smu, uint32_t n) +static int aldebaran_set_power_limit(struct smu_context *smu, + enum smu_ppt_limit_type limit_type, + uint32_t limit) { /* Power limit can be set only through primary die */ if (aldebaran_is_primary(smu)) - return smu_v13_0_set_power_limit(smu, n); + return smu_v13_0_set_power_limit(smu, limit_type, limit); return -EINVAL; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c index 05c5e61f3506..35145db6eedf 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -945,22 +945,27 @@ int smu_v13_0_get_current_power_limit(struct smu_context *smu, return ret; } -int smu_v13_0_set_power_limit(struct smu_context *smu, uint32_t n) +int smu_v13_0_set_power_limit(struct smu_context *smu, + enum smu_ppt_limit_type limit_type, + uint32_t limit) { int ret = 0; + if (limit_type != SMU_DEFAULT_PPT_LIMIT) + return -EINVAL; + if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) { dev_err(smu->adev->dev, "Setting new power limit is not supported!\n"); return -EOPNOTSUPP; } - ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, n, NULL); + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, limit, NULL); if (ret) { dev_err(smu->adev->dev, "[%s] Set power limit Failed!\n", __func__); return ret; } - smu->current_power_limit = n; + smu->current_power_limit = limit; return 0; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c index a403657151ba..8215bbf5ed7c 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c @@ -64,7 +64,6 @@ static struct cmn2asic_msg_mapping yellow_carp_message_map[SMU_MSG_MAX_COUNT] = MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 1), MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 1), MSG_MAP(SetHardMinVcn, PPSMC_MSG_SetHardMinVcn, 1), - MSG_MAP(ActiveProcessNotify, PPSMC_MSG_ActiveProcessNotify, 1), MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 1), MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1), MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1), @@ -135,14 +134,6 @@ static struct cmn2asic_mapping yellow_carp_table_map[SMU_TABLE_COUNT] = { TAB_MAP_VALID(CUSTOM_DPM), TAB_MAP_VALID(DPMCLOCKS), }; - -static struct cmn2asic_mapping yellow_carp_workload_map[PP_SMC_POWER_PROFILE_COUNT] = { - WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D, WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT), - WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT), - WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT), - WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT), - WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT), -}; static int yellow_carp_init_smc_tables(struct smu_context *smu) { @@ -543,81 +534,6 @@ static int yellow_carp_set_watermarks_table(struct smu_context *smu, return 0; } -static int yellow_carp_get_power_profile_mode(struct smu_context *smu, - char *buf) -{ - static const char *profile_name[] = { - "BOOTUP_DEFAULT", - "3D_FULL_SCREEN", - "POWER_SAVING", - "VIDEO", - "VR", - "COMPUTE", - "CUSTOM"}; - uint32_t i, size = 0; - int16_t workload_type = 0; - - if (!buf) - return -EINVAL; - - for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) { - /* - * Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT. - * Not all profile modes are supported on yellow carp. - */ - workload_type = smu_cmn_to_asic_specific_index(smu, - CMN2ASIC_MAPPING_WORKLOAD, - i); - - if (workload_type < 0) - continue; - - size += sysfs_emit_at(buf, size, "%2d %14s%s\n", - i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " "); - } - - return size; -} - -static int yellow_carp_set_power_profile_mode(struct smu_context *smu, - long *input, uint32_t size) -{ - int workload_type, ret; - uint32_t profile_mode = input[size]; - - if (profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) { - dev_err(smu->adev->dev, "Invalid power profile mode %d\n", profile_mode); - return -EINVAL; - } - - if (profile_mode == PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT || - profile_mode == PP_SMC_POWER_PROFILE_POWERSAVING) - return 0; - - /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ - workload_type = smu_cmn_to_asic_specific_index(smu, - CMN2ASIC_MAPPING_WORKLOAD, - profile_mode); - if (workload_type < 0) { - dev_dbg(smu->adev->dev, "Unsupported power profile mode %d on YELLOWCARP\n", - profile_mode); - return -EINVAL; - } - - ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify, - 1 << workload_type, - NULL); - if (ret) { - dev_err_once(smu->adev->dev, "Fail to set workload type %d\n", - workload_type); - return ret; - } - - smu->power_profile_mode = profile_mode; - - return 0; -} - static ssize_t yellow_carp_get_gpu_metrics(struct smu_context *smu, void **table) { @@ -1238,8 +1154,6 @@ static const struct pptable_funcs yellow_carp_ppt_funcs = { .read_sensor = yellow_carp_read_sensor, .is_dpm_running = yellow_carp_is_dpm_running, .set_watermarks_table = yellow_carp_set_watermarks_table, - .get_power_profile_mode = yellow_carp_get_power_profile_mode, - .set_power_profile_mode = yellow_carp_set_power_profile_mode, .get_gpu_metrics = yellow_carp_get_gpu_metrics, .get_enabled_mask = smu_cmn_get_enabled_32_bits_mask, .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, @@ -1261,6 +1175,5 @@ void yellow_carp_set_ppt_funcs(struct smu_context *smu) smu->message_map = yellow_carp_message_map; smu->feature_map = yellow_carp_feature_mask_map; smu->table_map = yellow_carp_table_map; - smu->workload_map = yellow_carp_workload_map; smu->is_apu = true; } diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c index 21909642ee4c..147abf1a3968 100644 --- a/drivers/gpu/drm/armada/armada_gem.c +++ b/drivers/gpu/drm/armada/armada_gem.c @@ -15,6 +15,8 @@ #include "armada_gem.h" #include "armada_ioctlP.h" +MODULE_IMPORT_NS(DMA_BUF); + static vm_fault_t armada_gem_vm_fault(struct vm_fault *vmf) { struct drm_gem_object *gobj = vmf->vma->vm_private_data; @@ -336,7 +338,7 @@ int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data, struct drm_armada_gem_pwrite *args = data; struct armada_gem_object *dobj; char __user *ptr; - int ret; + int ret = 0; DRM_DEBUG_DRIVER("handle %u off %u size %u ptr 0x%llx\n", args->handle, args->offset, args->size, args->ptr); @@ -349,9 +351,8 @@ int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data, if (!access_ok(ptr, args->size)) return -EFAULT; - ret = fault_in_pages_readable(ptr, args->size); - if (ret) - return ret; + if (fault_in_readable(ptr, args->size)) + return -EFAULT; dobj = armada_gem_object_lookup(file, args->handle); if (dobj == NULL) diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index 6bfaefa01818..1e30eaeb0e1b 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -1300,18 +1300,6 @@ static enum drm_mode_status ast_mode_valid(struct drm_connector *connector, return flags; } -static enum drm_connector_status ast_connector_detect(struct drm_connector - *connector, bool force) -{ - int r; - - r = ast_get_modes(connector); - if (r <= 0) - return connector_status_disconnected; - - return connector_status_connected; -} - static void ast_connector_destroy(struct drm_connector *connector) { struct ast_connector *ast_connector = to_ast_connector(connector); @@ -1327,7 +1315,6 @@ static const struct drm_connector_helper_funcs ast_connector_helper_funcs = { static const struct drm_connector_funcs ast_connector_funcs = { .reset = drm_atomic_helper_connector_reset, - .detect = ast_connector_detect, .fill_modes = drm_helper_probe_single_connector_modes, .destroy = ast_connector_destroy, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, @@ -1355,8 +1342,7 @@ static int ast_connector_init(struct drm_device *dev) connector->interlace_allowed = 0; connector->doublescan_allowed = 0; - connector->polled = DRM_CONNECTOR_POLL_CONNECT | - DRM_CONNECTOR_POLL_DISCONNECT; + connector->polled = DRM_CONNECTOR_POLL_CONNECT; drm_connector_attach_encoder(connector, encoder); @@ -1425,8 +1411,6 @@ int ast_mode_config_init(struct ast_private *ast) drm_mode_config_reset(dev); - drm_kms_helper_poll_init(dev); - return 0; } diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c index 30cc59fe6ef7..f19d9acbe959 100644 --- a/drivers/gpu/drm/drm_cache.c +++ b/drivers/gpu/drm/drm_cache.c @@ -31,7 +31,7 @@ #include <linux/dma-buf-map.h> #include <linux/export.h> #include <linux/highmem.h> -#include <linux/mem_encrypt.h> +#include <linux/cc_platform.h> #include <xen/xen.h> #include <drm/drm_cache.h> @@ -204,7 +204,7 @@ bool drm_need_swiotlb(int dma_bits) * Enforce dma_alloc_coherent when memory encryption is active as well * for the same reasons as for Xen paravirtual hosts. */ - if (mem_encrypt_active()) + if (cc_platform_has(CC_ATTR_MEM_ENCRYPT)) return true; for (tmp = iomem_resource.child; tmp; tmp = tmp->sibling) diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 86d13d6bc463..f3d79eda94bb 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -1668,13 +1668,10 @@ __dump_topology_ref_history(struct drm_dp_mst_topology_ref_history *history, for (i = 0; i < history->len; i++) { const struct drm_dp_mst_topology_ref_entry *entry = &history->entries[i]; - ulong *entries; - uint nr_entries; u64 ts_nsec = entry->ts_nsec; u32 rem_nsec = do_div(ts_nsec, 1000000000); - nr_entries = stack_depot_fetch(entry->backtrace, &entries); - stack_trace_snprint(buf, PAGE_SIZE, entries, nr_entries, 4); + stack_depot_snprint(entry->backtrace, buf, PAGE_SIZE, 4); drm_printf(&p, " %d %ss (last at %5llu.%06u):\n%s", entry->count, @@ -3355,6 +3352,10 @@ static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr, /** * drm_dp_update_payload_part1() - Execute payload update part 1 * @mgr: manager to use. + * @start_slot: this is the cur slot + * + * NOTE: start_slot is a temporary workaround for non-atomic drivers, + * this will be removed when non-atomic mst helpers are moved out of the helper * * This iterates over all proposed virtual channels, and tries to * allocate space in the link for them. For 0->slots transitions, @@ -3365,12 +3366,12 @@ static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr, * after calling this the driver should generate ACT and payload * packets. */ -int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr) +int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr, int start_slot) { struct drm_dp_payload req_payload; struct drm_dp_mst_port *port; int i, j; - int cur_slots = 1; + int cur_slots = start_slot; bool skip; mutex_lock(&mgr->payload_lock); @@ -4334,10 +4335,6 @@ static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr, { int ret; - /* max. time slots - one slot for MTP header */ - if (slots > 63) - return -ENOSPC; - vcpi->pbn = pbn; vcpi->aligned_pbn = slots * mgr->pbn_div; vcpi->num_slots = slots; @@ -4510,6 +4507,27 @@ int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state, EXPORT_SYMBOL(drm_dp_atomic_release_vcpi_slots); /** + * drm_dp_mst_update_slots() - updates the slot info depending on the DP ecoding format + * @mst_state: mst_state to update + * @link_encoding_cap: the ecoding format on the link + */ +void drm_dp_mst_update_slots(struct drm_dp_mst_topology_state *mst_state, uint8_t link_encoding_cap) +{ + if (link_encoding_cap == DP_CAP_ANSI_128B132B) { + mst_state->total_avail_slots = 64; + mst_state->start_slot = 0; + } else { + mst_state->total_avail_slots = 63; + mst_state->start_slot = 1; + } + + DRM_DEBUG_KMS("%s encoding format on mst_state 0x%p\n", + (link_encoding_cap == DP_CAP_ANSI_128B132B) ? "128b/132b":"8b/10b", + mst_state); +} +EXPORT_SYMBOL(drm_dp_mst_update_slots); + +/** * drm_dp_mst_allocate_vcpi() - Allocate a virtual channel * @mgr: manager for this port * @port: port to allocate a virtual channel for. @@ -4540,7 +4558,7 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn, slots); if (ret) { - drm_dbg_kms(mgr->dev, "failed to init vcpi slots=%d max=63 ret=%d\n", + drm_dbg_kms(mgr->dev, "failed to init vcpi slots=%d ret=%d\n", DIV_ROUND_UP(pbn, mgr->pbn_div), ret); drm_dp_mst_topology_put_port(port); goto out; @@ -5228,7 +5246,7 @@ drm_dp_mst_atomic_check_vcpi_alloc_limit(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_topology_state *mst_state) { struct drm_dp_vcpi_allocation *vcpi; - int avail_slots = 63, payload_count = 0; + int avail_slots = mst_state->total_avail_slots, payload_count = 0; list_for_each_entry(vcpi, &mst_state->vcpis, next) { /* Releasing VCPI is always OK-even if the port is gone */ @@ -5257,7 +5275,7 @@ drm_dp_mst_atomic_check_vcpi_alloc_limit(struct drm_dp_mst_topology_mgr *mgr, } } drm_dbg_atomic(mgr->dev, "[MST MGR:%p] mst state %p VCPI avail=%d used=%d\n", - mgr, mst_state, avail_slots, 63 - avail_slots); + mgr, mst_state, avail_slots, mst_state->total_avail_slots - avail_slots); return 0; } @@ -5534,6 +5552,9 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, if (mst_state == NULL) return -ENOMEM; + mst_state->total_avail_slots = 63; + mst_state->start_slot = 1; + mst_state->mgr = mgr; INIT_LIST_HEAD(&mst_state->vcpis); diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 7aa2a56a71c8..12893e7be89b 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -1846,11 +1846,20 @@ static void connector_bad_edid(struct drm_connector *connector, u8 *edid, int num_blocks) { int i; - u8 num_of_ext = edid[0x7e]; + u8 last_block; + + /* + * 0x7e in the EDID is the number of extension blocks. The EDID + * is 1 (base block) + num_ext_blocks big. That means we can think + * of 0x7e in the EDID of the _index_ of the last block in the + * combined chunk of memory. + */ + last_block = edid[0x7e]; /* Calculate real checksum for the last edid extension block data */ - connector->real_edid_checksum = - drm_edid_block_checksum(edid + num_of_ext * EDID_LENGTH); + if (last_block < num_blocks) + connector->real_edid_checksum = + drm_edid_block_checksum(edid + last_block * EDID_LENGTH); if (connector->bad_edid_counter++ && !drm_debug_enabled(DRM_UT_KMS)) return; diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 442db735416f..9727a59d35fd 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -1506,6 +1506,7 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper, { struct drm_client_dev *client = &fb_helper->client; struct drm_device *dev = fb_helper->dev; + struct drm_mode_config *config = &dev->mode_config; int ret = 0; int crtc_count = 0; struct drm_connector_list_iter conn_iter; @@ -1663,6 +1664,11 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper, /* Handle our overallocation */ sizes.surface_height *= drm_fbdev_overalloc; sizes.surface_height /= 100; + if (sizes.surface_height > config->max_height) { + drm_dbg_kms(dev, "Fbdev over-allocation too large; clamping height to %d\n", + config->max_height); + sizes.surface_height = config->max_height; + } /* push down into drivers */ ret = (*fb_helper->funcs->fb_probe)(fb_helper, &sizes); diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c index 783844bfecc1..25837b1d6639 100644 --- a/drivers/gpu/drm/drm_fourcc.c +++ b/drivers/gpu/drm/drm_fourcc.c @@ -134,6 +134,8 @@ const struct drm_format_info *__drm_format_info(u32 format) static const struct drm_format_info formats[] = { { .format = DRM_FORMAT_C8, .depth = 8, .num_planes = 1, .cpp = { 1, 0, 0 }, .hsub = 1, .vsub = 1 }, { .format = DRM_FORMAT_R8, .depth = 8, .num_planes = 1, .cpp = { 1, 0, 0 }, .hsub = 1, .vsub = 1 }, + { .format = DRM_FORMAT_R10, .depth = 10, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 }, + { .format = DRM_FORMAT_R12, .depth = 12, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 }, { .format = DRM_FORMAT_RGB332, .depth = 8, .num_planes = 1, .cpp = { 1, 0, 0 }, .hsub = 1, .vsub = 1 }, { .format = DRM_FORMAT_BGR233, .depth = 8, .num_planes = 1, .cpp = { 1, 0, 0 }, .hsub = 1, .vsub = 1 }, { .format = DRM_FORMAT_XRGB4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 }, diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c index 3c75d79dbb65..746fd8c73845 100644 --- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c +++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c @@ -6,6 +6,7 @@ */ #include <linux/slab.h> +#include <linux/module.h> #include <drm/drm_damage_helper.h> #include <drm/drm_fb_helper.h> @@ -17,6 +18,8 @@ #include "drm_internal.h" +MODULE_IMPORT_NS(DMA_BUF); + #define AFBC_HEADER_SIZE 16 #define AFBC_TH_LAYOUT_ALIGNMENT 8 #define AFBC_HDR_ALIGN 64 diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index dfff073bf1e8..0eeda1012364 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -22,6 +22,8 @@ #include <drm/drm_prime.h> #include <drm/drm_print.h> +MODULE_IMPORT_NS(DMA_BUF); + /** * DOC: overview * diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index 93d48a6f04ab..7d1c578388d3 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c @@ -118,8 +118,6 @@ static noinline void save_stack(struct drm_mm_node *node) static void show_leaks(struct drm_mm *mm) { struct drm_mm_node *node; - unsigned long *entries; - unsigned int nr_entries; char *buf; buf = kmalloc(BUFSZ, GFP_KERNEL); @@ -133,8 +131,7 @@ static void show_leaks(struct drm_mm *mm) continue; } - nr_entries = stack_depot_fetch(node->stack, &entries); - stack_trace_snprint(buf, BUFSZ, entries, nr_entries, 0); + stack_depot_snprint(node->stack, buf, BUFSZ, 0); DRM_ERROR("node [%08llx + %08llx]: inserted at\n%s", node->start, node->size, buf); } diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c index 62e8ccc7ab9c..a9359878f4ed 100644 --- a/drivers/gpu/drm/drm_panel_orientation_quirks.c +++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c @@ -140,6 +140,12 @@ static const struct dmi_system_id orientation_data[] = { DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T103HAF"), }, .driver_data = (void *)&lcd800x1280_rightside_up, + }, { /* AYA NEO 2021 */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYADEVICE"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "AYA NEO 2021"), + }, + .driver_data = (void *)&lcd800x1280_rightside_up, }, { /* Chuwi HiBook (CWI514) */ .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"), @@ -205,6 +211,12 @@ static const struct dmi_system_id orientation_data[] = { DMI_EXACT_MATCH(DMI_BOARD_NAME, "Default string"), }, .driver_data = (void *)&gpd_win2, + }, { /* GPD Win 3 */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "GPD"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "G1618-03") + }, + .driver_data = (void *)&lcd720x1280_rightside_up, }, { /* I.T.Works TW891 */ .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "To be filled by O.E.M."), diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index deb23dbec8b5..c773d3dfb1ab 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -29,6 +29,7 @@ #include <linux/export.h> #include <linux/dma-buf.h> #include <linux/rbtree.h> +#include <linux/module.h> #include <drm/drm.h> #include <drm/drm_drv.h> @@ -39,6 +40,8 @@ #include "drm_internal.h" +MODULE_IMPORT_NS(DMA_BUF); + /** * DOC: overview and lifetime rules * @@ -719,11 +722,13 @@ int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) if (obj->funcs && obj->funcs->mmap) { vma->vm_ops = obj->funcs->vm_ops; + drm_gem_object_get(obj); ret = obj->funcs->mmap(obj, vma); - if (ret) + if (ret) { + drm_gem_object_put(obj); return ret; + } vma->vm_private_data = obj; - drm_gem_object_get(obj); return 0; } diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c index 6d8bed9c739d..6788ea8490d1 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c @@ -5,10 +5,13 @@ #include <drm/drm_prime.h> #include <linux/dma-buf.h> +#include <linux/module.h> #include "etnaviv_drv.h" #include "etnaviv_gem.h" +MODULE_IMPORT_NS(DMA_BUF); + static struct lock_class_key etnaviv_prime_lock_class; struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj) diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c index cc5b07f86346..242a5fd8b932 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c @@ -1733,7 +1733,6 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master, DBG("%s", dev_name(gpu->dev)); - flush_workqueue(gpu->wq); destroy_workqueue(gpu->wq); etnaviv_sched_fini(gpu); diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c index 9870c4e6af36..b5001db7a95c 100644 --- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c @@ -793,7 +793,6 @@ static int exynos5433_decon_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct decon_context *ctx; - struct resource *res; int ret; int i; @@ -818,8 +817,7 @@ static int exynos5433_decon_probe(struct platform_device *pdev) ctx->clks[i] = clk; } - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - ctx->addr = devm_ioremap_resource(dev, res); + ctx->addr = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(ctx->addr)) return PTR_ERR(ctx->addr); diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c index e39fac889edc..8d137857818c 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c @@ -1738,7 +1738,6 @@ static const struct component_ops exynos_dsi_component_ops = { static int exynos_dsi_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct resource *res; struct exynos_dsi *dsi; int ret, i; @@ -1789,8 +1788,7 @@ static int exynos_dsi_probe(struct platform_device *pdev) } } - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - dsi->reg_base = devm_ioremap_resource(dev, res); + dsi->reg_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(dsi->reg_base)) return PTR_ERR(dsi->reg_base); diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c index a3c718148c45..ecfd82d0afb7 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c @@ -85,7 +85,6 @@ struct fimc_scaler { /* * A structure of fimc context. * - * @regs_res: register resources. * @regs: memory mapped io registers. * @lock: locking of operations. * @clocks: fimc clocks. @@ -103,7 +102,6 @@ struct fimc_context { struct exynos_drm_ipp_formats *formats; unsigned int num_formats; - struct resource *regs_res; void __iomem *regs; spinlock_t lock; struct clk *clocks[FIMC_CLKS_MAX]; @@ -1327,8 +1325,7 @@ static int fimc_probe(struct platform_device *pdev) ctx->num_formats = num_formats; /* resource memory */ - ctx->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - ctx->regs = devm_ioremap_resource(dev, ctx->regs_res); + ctx->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(ctx->regs)) return PTR_ERR(ctx->regs); diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index 700ca4fa6665..c735e53939d8 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c @@ -1202,9 +1202,7 @@ static int fimd_probe(struct platform_device *pdev) return PTR_ERR(ctx->lcd_clk); } - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - - ctx->regs = devm_ioremap_resource(dev, res); + ctx->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(ctx->regs)) return PTR_ERR(ctx->regs); diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index b00230626c6a..471fd6c8135f 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c @@ -1449,7 +1449,6 @@ static const struct component_ops g2d_component_ops = { static int g2d_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct resource *res; struct g2d_data *g2d; int ret; @@ -1491,9 +1490,7 @@ static int g2d_probe(struct platform_device *pdev) clear_bit(G2D_BIT_SUSPEND_RUNQUEUE, &g2d->flags); clear_bit(G2D_BIT_ENGINE_BUSY, &g2d->flags); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - - g2d->regs = devm_ioremap_resource(dev, res); + g2d->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(g2d->regs)) { ret = PTR_ERR(g2d->regs); goto err_put_clk; diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index 4396224227d1..0a0c042a3155 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c @@ -9,6 +9,7 @@ #include <linux/dma-buf.h> #include <linux/pfn_t.h> #include <linux/shmem_fs.h> +#include <linux/module.h> #include <drm/drm_prime.h> #include <drm/drm_vma_manager.h> @@ -17,6 +18,8 @@ #include "exynos_drm_drv.h" #include "exynos_drm_gem.h" +MODULE_IMPORT_NS(DMA_BUF); + static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem, bool kvmap) { struct drm_device *dev = exynos_gem->base.dev; diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c index 90d7bf906885..166a80262896 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c @@ -86,7 +86,6 @@ struct gsc_scaler { /* * A structure of gsc context. * - * @regs_res: register resources. * @regs: memory mapped io registers. * @gsc_clk: gsc gate clock. * @sc: scaler infomations. @@ -103,7 +102,6 @@ struct gsc_context { struct exynos_drm_ipp_formats *formats; unsigned int num_formats; - struct resource *regs_res; void __iomem *regs; const char **clk_names; struct clk *clocks[GSC_MAX_CLOCKS]; @@ -1272,9 +1270,7 @@ static int gsc_probe(struct platform_device *pdev) } } - /* resource memory */ - ctx->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - ctx->regs = devm_ioremap_resource(dev, ctx->regs_res); + ctx->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(ctx->regs)) return PTR_ERR(ctx->regs); diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c index ee61be4cf152..dec7df35baa9 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c +++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c @@ -278,7 +278,6 @@ static const struct component_ops rotator_component_ops = { static int rotator_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct resource *regs_res; struct rot_context *rot; const struct rot_variant *variant; int irq; @@ -292,8 +291,7 @@ static int rotator_probe(struct platform_device *pdev) rot->formats = variant->formats; rot->num_formats = variant->num_formats; rot->dev = dev; - regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - rot->regs = devm_ioremap_resource(dev, regs_res); + rot->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(rot->regs)) return PTR_ERR(rot->regs); diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c index f9ae5b038d59..3a7851b7dc66 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_scaler.c +++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c @@ -485,7 +485,6 @@ static const struct component_ops scaler_component_ops = { static int scaler_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct resource *regs_res; struct scaler_context *scaler; int irq; int ret, i; @@ -498,8 +497,7 @@ static int scaler_probe(struct platform_device *pdev) (struct scaler_data *)of_device_get_match_data(dev); scaler->dev = dev; - regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - scaler->regs = devm_ioremap_resource(dev, regs_res); + scaler->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(scaler->regs)) return PTR_ERR(scaler->regs); diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index c769dec576de..7655142a4651 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -1957,7 +1957,6 @@ static int hdmi_probe(struct platform_device *pdev) struct hdmi_audio_infoframe *audio_infoframe; struct device *dev = &pdev->dev; struct hdmi_context *hdata; - struct resource *res; int ret; hdata = devm_kzalloc(dev, sizeof(struct hdmi_context), GFP_KERNEL); @@ -1979,8 +1978,7 @@ static int hdmi_probe(struct platform_device *pdev) return ret; } - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - hdata->regs = devm_ioremap_resource(dev, res); + hdata->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(hdata->regs)) { ret = PTR_ERR(hdata->regs); return ret; diff --git a/drivers/gpu/drm/hyperv/hyperv_drm.h b/drivers/gpu/drm/hyperv/hyperv_drm.h index 886add4f9cd0..d2d8582b36df 100644 --- a/drivers/gpu/drm/hyperv/hyperv_drm.h +++ b/drivers/gpu/drm/hyperv/hyperv_drm.h @@ -46,6 +46,7 @@ int hyperv_mode_config_init(struct hyperv_drm_device *hv); int hyperv_update_vram_location(struct hv_device *hdev, phys_addr_t vram_pp); int hyperv_update_situation(struct hv_device *hdev, u8 active, u32 bpp, u32 w, u32 h, u32 pitch); +int hyperv_hide_hw_ptr(struct hv_device *hdev); int hyperv_update_dirt(struct hv_device *hdev, struct drm_rect *rect); int hyperv_connect_vsp(struct hv_device *hdev); diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c b/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c index b490c3d2286e..93f51e70a951 100644 --- a/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c +++ b/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c @@ -104,6 +104,7 @@ static void hyperv_pipe_enable(struct drm_simple_display_pipe *pipe, struct hyperv_drm_device *hv = to_hv(pipe->crtc.dev); struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state); + hyperv_hide_hw_ptr(hv->hdev); hyperv_update_situation(hv->hdev, 1, hv->screen_depth, crtc_state->mode.hdisplay, crtc_state->mode.vdisplay, diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_proto.c b/drivers/gpu/drm/hyperv/hyperv_drm_proto.c index 6d4bdccfbd1a..c0155c6271bf 100644 --- a/drivers/gpu/drm/hyperv/hyperv_drm_proto.c +++ b/drivers/gpu/drm/hyperv/hyperv_drm_proto.c @@ -299,6 +299,55 @@ int hyperv_update_situation(struct hv_device *hdev, u8 active, u32 bpp, return 0; } +/* + * Hyper-V supports a hardware cursor feature. It's not used by Linux VM, + * but the Hyper-V host still draws a point as an extra mouse pointer, + * which is unwanted, especially when Xorg is running. + * + * The hyperv_fb driver uses synthvid_send_ptr() to hide the unwanted + * pointer, by setting msg.ptr_pos.is_visible = 1 and setting the + * msg.ptr_shape.data. Note: setting msg.ptr_pos.is_visible to 0 doesn't + * work in tests. + * + * Copy synthvid_send_ptr() to hyperv_drm and rename it to + * hyperv_hide_hw_ptr(). Note: hyperv_hide_hw_ptr() is also called in the + * handler of the SYNTHVID_FEATURE_CHANGE event, otherwise the host still + * draws an extra unwanted mouse pointer after the VM Connection window is + * closed and reopened. + */ +int hyperv_hide_hw_ptr(struct hv_device *hdev) +{ + struct synthvid_msg msg; + + memset(&msg, 0, sizeof(struct synthvid_msg)); + msg.vid_hdr.type = SYNTHVID_POINTER_POSITION; + msg.vid_hdr.size = sizeof(struct synthvid_msg_hdr) + + sizeof(struct synthvid_pointer_position); + msg.ptr_pos.is_visible = 1; + msg.ptr_pos.video_output = 0; + msg.ptr_pos.image_x = 0; + msg.ptr_pos.image_y = 0; + hyperv_sendpacket(hdev, &msg); + + memset(&msg, 0, sizeof(struct synthvid_msg)); + msg.vid_hdr.type = SYNTHVID_POINTER_SHAPE; + msg.vid_hdr.size = sizeof(struct synthvid_msg_hdr) + + sizeof(struct synthvid_pointer_shape); + msg.ptr_shape.part_idx = SYNTHVID_CURSOR_COMPLETE; + msg.ptr_shape.is_argb = 1; + msg.ptr_shape.width = 1; + msg.ptr_shape.height = 1; + msg.ptr_shape.hot_x = 0; + msg.ptr_shape.hot_y = 0; + msg.ptr_shape.data[0] = 0; + msg.ptr_shape.data[1] = 1; + msg.ptr_shape.data[2] = 1; + msg.ptr_shape.data[3] = 1; + hyperv_sendpacket(hdev, &msg); + + return 0; +} + int hyperv_update_dirt(struct hv_device *hdev, struct drm_rect *rect) { struct hyperv_drm_device *hv = hv_get_drvdata(hdev); @@ -392,8 +441,11 @@ static void hyperv_receive_sub(struct hv_device *hdev) return; } - if (msg->vid_hdr.type == SYNTHVID_FEATURE_CHANGE) + if (msg->vid_hdr.type == SYNTHVID_FEATURE_CHANGE) { hv->dirt_needed = msg->feature_chg.is_dirt_needed; + if (hv->dirt_needed) + hyperv_hide_hw_ptr(hv->hdev); + } } static void hyperv_receive(void *ctx) diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig index bf041b26ffec..84b6fc70cbf5 100644 --- a/drivers/gpu/drm/i915/Kconfig +++ b/drivers/gpu/drm/i915/Kconfig @@ -126,6 +126,7 @@ config DRM_I915_GVT_KVMGT depends on DRM_I915_GVT depends on KVM depends on VFIO_MDEV + select KVM_EXTERNAL_WRITE_TRACKING default n help Choose this option if you want to enable KVMGT support for diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.c b/drivers/gpu/drm/i915/display/g4x_hdmi.c index 88c427f3c346..f5b4dd5b4275 100644 --- a/drivers/gpu/drm/i915/display/g4x_hdmi.c +++ b/drivers/gpu/drm/i915/display/g4x_hdmi.c @@ -584,6 +584,7 @@ void g4x_hdmi_init(struct drm_i915_private *dev_priv, else intel_encoder->enable = g4x_enable_hdmi; } + intel_encoder->shutdown = intel_hdmi_encoder_shutdown; intel_encoder->type = INTEL_OUTPUT_HDMI; intel_encoder->power_domain = intel_port_to_power_domain(port); diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c index b99907c656bb..2b1423a43437 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.c +++ b/drivers/gpu/drm/i915/display/intel_bios.c @@ -1707,6 +1707,39 @@ static void sanitize_aux_ch(struct intel_bios_encoder_data *devdata, child->aux_channel = 0; } +static u8 dvo_port_type(u8 dvo_port) +{ + switch (dvo_port) { + case DVO_PORT_HDMIA: + case DVO_PORT_HDMIB: + case DVO_PORT_HDMIC: + case DVO_PORT_HDMID: + case DVO_PORT_HDMIE: + case DVO_PORT_HDMIF: + case DVO_PORT_HDMIG: + case DVO_PORT_HDMIH: + case DVO_PORT_HDMII: + return DVO_PORT_HDMIA; + case DVO_PORT_DPA: + case DVO_PORT_DPB: + case DVO_PORT_DPC: + case DVO_PORT_DPD: + case DVO_PORT_DPE: + case DVO_PORT_DPF: + case DVO_PORT_DPG: + case DVO_PORT_DPH: + case DVO_PORT_DPI: + return DVO_PORT_DPA; + case DVO_PORT_MIPIA: + case DVO_PORT_MIPIB: + case DVO_PORT_MIPIC: + case DVO_PORT_MIPID: + return DVO_PORT_MIPIA; + default: + return dvo_port; + } +} + static enum port __dvo_port_to_port(int n_ports, int n_dvo, const int port_mapping[][3], u8 dvo_port) { @@ -1930,50 +1963,6 @@ static int _intel_bios_max_tmds_clock(const struct intel_bios_encoder_data *devd } } -static enum port get_edp_port(struct drm_i915_private *i915) -{ - const struct intel_bios_encoder_data *devdata; - enum port port; - - for_each_port(port) { - devdata = i915->vbt.ports[port]; - - if (devdata && intel_bios_encoder_supports_edp(devdata)) - return port; - } - - return PORT_NONE; -} - -/* - * FIXME: The power sequencer and backlight code currently do not support more - * than one set registers, at least not on anything other than VLV/CHV. It will - * clobber the registers. As a temporary workaround, gracefully prevent more - * than one eDP from being registered. - */ -static void sanitize_dual_edp(struct intel_bios_encoder_data *devdata, - enum port port) -{ - struct drm_i915_private *i915 = devdata->i915; - struct child_device_config *child = &devdata->child; - enum port p; - - /* CHV might not clobber PPS registers. */ - if (IS_CHERRYVIEW(i915)) - return; - - p = get_edp_port(i915); - if (p == PORT_NONE) - return; - - drm_dbg_kms(&i915->drm, "both ports %c and %c configured as eDP, " - "disabling port %c eDP\n", port_name(p), port_name(port), - port_name(port)); - - child->device_type &= ~DEVICE_TYPE_DISPLAYPORT_OUTPUT; - child->device_type &= ~DEVICE_TYPE_INTERNAL_CONNECTOR; -} - static bool is_port_valid(struct drm_i915_private *i915, enum port port) { /* @@ -2031,9 +2020,6 @@ static void parse_ddi_port(struct drm_i915_private *i915, supports_typec_usb, supports_tbt, devdata->dsc != NULL); - if (is_edp) - sanitize_dual_edp(devdata, port); - if (is_dvi) sanitize_ddc_pin(devdata, port); @@ -2670,35 +2656,17 @@ bool intel_bios_is_port_edp(struct drm_i915_private *i915, enum port port) return false; } -static bool child_dev_is_dp_dual_mode(const struct child_device_config *child, - enum port port) +static bool child_dev_is_dp_dual_mode(const struct child_device_config *child) { - static const struct { - u16 dp, hdmi; - } port_mapping[] = { - /* - * Buggy VBTs may declare DP ports as having - * HDMI type dvo_port :( So let's check both. - */ - [PORT_B] = { DVO_PORT_DPB, DVO_PORT_HDMIB, }, - [PORT_C] = { DVO_PORT_DPC, DVO_PORT_HDMIC, }, - [PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, }, - [PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, }, - [PORT_F] = { DVO_PORT_DPF, DVO_PORT_HDMIF, }, - }; - - if (port == PORT_A || port >= ARRAY_SIZE(port_mapping)) - return false; - if ((child->device_type & DEVICE_TYPE_DP_DUAL_MODE_BITS) != (DEVICE_TYPE_DP_DUAL_MODE & DEVICE_TYPE_DP_DUAL_MODE_BITS)) return false; - if (child->dvo_port == port_mapping[port].dp) + if (dvo_port_type(child->dvo_port) == DVO_PORT_DPA) return true; /* Only accept a HDMI dvo_port as DP++ if it has an AUX channel */ - if (child->dvo_port == port_mapping[port].hdmi && + if (dvo_port_type(child->dvo_port) == DVO_PORT_HDMIA && child->aux_channel != 0) return true; @@ -2708,10 +2676,36 @@ static bool child_dev_is_dp_dual_mode(const struct child_device_config *child, bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *i915, enum port port) { + static const struct { + u16 dp, hdmi; + } port_mapping[] = { + /* + * Buggy VBTs may declare DP ports as having + * HDMI type dvo_port :( So let's check both. + */ + [PORT_B] = { DVO_PORT_DPB, DVO_PORT_HDMIB, }, + [PORT_C] = { DVO_PORT_DPC, DVO_PORT_HDMIC, }, + [PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, }, + [PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, }, + [PORT_F] = { DVO_PORT_DPF, DVO_PORT_HDMIF, }, + }; const struct intel_bios_encoder_data *devdata; + if (HAS_DDI(i915)) { + const struct intel_bios_encoder_data *devdata; + + devdata = intel_bios_encoder_data_lookup(i915, port); + + return devdata && child_dev_is_dp_dual_mode(&devdata->child); + } + + if (port == PORT_A || port >= ARRAY_SIZE(port_mapping)) + return false; + list_for_each_entry(devdata, &i915->vbt.display_devices, node) { - if (child_dev_is_dp_dual_mode(&devdata->child, port)) + if ((devdata->child.dvo_port == port_mapping[port].dp || + devdata->child.dvo_port == port_mapping[port].hdmi) && + child_dev_is_dp_dual_mode(&devdata->child)) return true; } diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index 9e466d829019..868dd43a7542 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -2885,7 +2885,7 @@ u32 intel_read_rawclk(struct drm_i915_private *dev_priv) return freq; } -static struct intel_cdclk_funcs tgl_cdclk_funcs = { +static const struct intel_cdclk_funcs tgl_cdclk_funcs = { .get_cdclk = bxt_get_cdclk, .set_cdclk = bxt_set_cdclk, .bw_calc_min_cdclk = skl_bw_calc_min_cdclk, @@ -2893,7 +2893,7 @@ static struct intel_cdclk_funcs tgl_cdclk_funcs = { .calc_voltage_level = tgl_calc_voltage_level, }; -static struct intel_cdclk_funcs ehl_cdclk_funcs = { +static const struct intel_cdclk_funcs ehl_cdclk_funcs = { .get_cdclk = bxt_get_cdclk, .set_cdclk = bxt_set_cdclk, .bw_calc_min_cdclk = skl_bw_calc_min_cdclk, @@ -2901,7 +2901,7 @@ static struct intel_cdclk_funcs ehl_cdclk_funcs = { .calc_voltage_level = ehl_calc_voltage_level, }; -static struct intel_cdclk_funcs icl_cdclk_funcs = { +static const struct intel_cdclk_funcs icl_cdclk_funcs = { .get_cdclk = bxt_get_cdclk, .set_cdclk = bxt_set_cdclk, .bw_calc_min_cdclk = skl_bw_calc_min_cdclk, @@ -2909,7 +2909,7 @@ static struct intel_cdclk_funcs icl_cdclk_funcs = { .calc_voltage_level = icl_calc_voltage_level, }; -static struct intel_cdclk_funcs bxt_cdclk_funcs = { +static const struct intel_cdclk_funcs bxt_cdclk_funcs = { .get_cdclk = bxt_get_cdclk, .set_cdclk = bxt_set_cdclk, .bw_calc_min_cdclk = skl_bw_calc_min_cdclk, @@ -2917,54 +2917,54 @@ static struct intel_cdclk_funcs bxt_cdclk_funcs = { .calc_voltage_level = bxt_calc_voltage_level, }; -static struct intel_cdclk_funcs skl_cdclk_funcs = { +static const struct intel_cdclk_funcs skl_cdclk_funcs = { .get_cdclk = skl_get_cdclk, .set_cdclk = skl_set_cdclk, .bw_calc_min_cdclk = skl_bw_calc_min_cdclk, .modeset_calc_cdclk = skl_modeset_calc_cdclk, }; -static struct intel_cdclk_funcs bdw_cdclk_funcs = { +static const struct intel_cdclk_funcs bdw_cdclk_funcs = { .get_cdclk = bdw_get_cdclk, .set_cdclk = bdw_set_cdclk, .bw_calc_min_cdclk = intel_bw_calc_min_cdclk, .modeset_calc_cdclk = bdw_modeset_calc_cdclk, }; -static struct intel_cdclk_funcs chv_cdclk_funcs = { +static const struct intel_cdclk_funcs chv_cdclk_funcs = { .get_cdclk = vlv_get_cdclk, .set_cdclk = chv_set_cdclk, .bw_calc_min_cdclk = intel_bw_calc_min_cdclk, .modeset_calc_cdclk = vlv_modeset_calc_cdclk, }; -static struct intel_cdclk_funcs vlv_cdclk_funcs = { +static const struct intel_cdclk_funcs vlv_cdclk_funcs = { .get_cdclk = vlv_get_cdclk, .set_cdclk = vlv_set_cdclk, .bw_calc_min_cdclk = intel_bw_calc_min_cdclk, .modeset_calc_cdclk = vlv_modeset_calc_cdclk, }; -static struct intel_cdclk_funcs hsw_cdclk_funcs = { +static const struct intel_cdclk_funcs hsw_cdclk_funcs = { .get_cdclk = hsw_get_cdclk, .bw_calc_min_cdclk = intel_bw_calc_min_cdclk, .modeset_calc_cdclk = fixed_modeset_calc_cdclk, }; /* SNB, IVB, 965G, 945G */ -static struct intel_cdclk_funcs fixed_400mhz_cdclk_funcs = { +static const struct intel_cdclk_funcs fixed_400mhz_cdclk_funcs = { .get_cdclk = fixed_400mhz_get_cdclk, .bw_calc_min_cdclk = intel_bw_calc_min_cdclk, .modeset_calc_cdclk = fixed_modeset_calc_cdclk, }; -static struct intel_cdclk_funcs ilk_cdclk_funcs = { +static const struct intel_cdclk_funcs ilk_cdclk_funcs = { .get_cdclk = fixed_450mhz_get_cdclk, .bw_calc_min_cdclk = intel_bw_calc_min_cdclk, .modeset_calc_cdclk = fixed_modeset_calc_cdclk, }; -static struct intel_cdclk_funcs gm45_cdclk_funcs = { +static const struct intel_cdclk_funcs gm45_cdclk_funcs = { .get_cdclk = gm45_get_cdclk, .bw_calc_min_cdclk = intel_bw_calc_min_cdclk, .modeset_calc_cdclk = fixed_modeset_calc_cdclk, @@ -2972,7 +2972,7 @@ static struct intel_cdclk_funcs gm45_cdclk_funcs = { /* G45 uses G33 */ -static struct intel_cdclk_funcs i965gm_cdclk_funcs = { +static const struct intel_cdclk_funcs i965gm_cdclk_funcs = { .get_cdclk = i965gm_get_cdclk, .bw_calc_min_cdclk = intel_bw_calc_min_cdclk, .modeset_calc_cdclk = fixed_modeset_calc_cdclk, @@ -2980,19 +2980,19 @@ static struct intel_cdclk_funcs i965gm_cdclk_funcs = { /* i965G uses fixed 400 */ -static struct intel_cdclk_funcs pnv_cdclk_funcs = { +static const struct intel_cdclk_funcs pnv_cdclk_funcs = { .get_cdclk = pnv_get_cdclk, .bw_calc_min_cdclk = intel_bw_calc_min_cdclk, .modeset_calc_cdclk = fixed_modeset_calc_cdclk, }; -static struct intel_cdclk_funcs g33_cdclk_funcs = { +static const struct intel_cdclk_funcs g33_cdclk_funcs = { .get_cdclk = g33_get_cdclk, .bw_calc_min_cdclk = intel_bw_calc_min_cdclk, .modeset_calc_cdclk = fixed_modeset_calc_cdclk, }; -static struct intel_cdclk_funcs i945gm_cdclk_funcs = { +static const struct intel_cdclk_funcs i945gm_cdclk_funcs = { .get_cdclk = i945gm_get_cdclk, .bw_calc_min_cdclk = intel_bw_calc_min_cdclk, .modeset_calc_cdclk = fixed_modeset_calc_cdclk, @@ -3000,37 +3000,37 @@ static struct intel_cdclk_funcs i945gm_cdclk_funcs = { /* i945G uses fixed 400 */ -static struct intel_cdclk_funcs i915gm_cdclk_funcs = { +static const struct intel_cdclk_funcs i915gm_cdclk_funcs = { .get_cdclk = i915gm_get_cdclk, .bw_calc_min_cdclk = intel_bw_calc_min_cdclk, .modeset_calc_cdclk = fixed_modeset_calc_cdclk, }; -static struct intel_cdclk_funcs i915g_cdclk_funcs = { +static const struct intel_cdclk_funcs i915g_cdclk_funcs = { .get_cdclk = fixed_333mhz_get_cdclk, .bw_calc_min_cdclk = intel_bw_calc_min_cdclk, .modeset_calc_cdclk = fixed_modeset_calc_cdclk, }; -static struct intel_cdclk_funcs i865g_cdclk_funcs = { +static const struct intel_cdclk_funcs i865g_cdclk_funcs = { .get_cdclk = fixed_266mhz_get_cdclk, .bw_calc_min_cdclk = intel_bw_calc_min_cdclk, .modeset_calc_cdclk = fixed_modeset_calc_cdclk, }; -static struct intel_cdclk_funcs i85x_cdclk_funcs = { +static const struct intel_cdclk_funcs i85x_cdclk_funcs = { .get_cdclk = i85x_get_cdclk, .bw_calc_min_cdclk = intel_bw_calc_min_cdclk, .modeset_calc_cdclk = fixed_modeset_calc_cdclk, }; -static struct intel_cdclk_funcs i845g_cdclk_funcs = { +static const struct intel_cdclk_funcs i845g_cdclk_funcs = { .get_cdclk = fixed_200mhz_get_cdclk, .bw_calc_min_cdclk = intel_bw_calc_min_cdclk, .modeset_calc_cdclk = fixed_modeset_calc_cdclk, }; -static struct intel_cdclk_funcs i830_cdclk_funcs = { +static const struct intel_cdclk_funcs i830_cdclk_funcs = { .get_cdclk = fixed_133mhz_get_cdclk, .bw_calc_min_cdclk = intel_bw_calc_min_cdclk, .modeset_calc_cdclk = fixed_modeset_calc_cdclk, diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 1dcfe31e6c6f..cfb567df71b3 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -4361,6 +4361,7 @@ static void intel_ddi_encoder_shutdown(struct intel_encoder *encoder) enum phy phy = intel_port_to_phy(i915, encoder->port); intel_dp_encoder_shutdown(encoder); + intel_hdmi_encoder_shutdown(encoder); if (!intel_phy_is_tc(i915, phy)) return; diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index ff598b6cd953..ec403e46a328 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -848,9 +848,16 @@ unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info int i; for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) { + unsigned int plane_size; + + plane_size = rem_info->plane[i].dst_stride * rem_info->plane[i].height; + if (plane_size == 0) + continue; + if (rem_info->plane_alignment) size = ALIGN(size, rem_info->plane_alignment); - size += rem_info->plane[i].dst_stride * rem_info->plane[i].height; + + size += plane_size; } return size; diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 7732573e3258..8195452b2d4c 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -120,6 +120,12 @@ bool intel_dp_is_uhbr(const struct intel_crtc_state *crtc_state) return crtc_state->port_clock >= 1000000; } +static void intel_dp_set_default_sink_rates(struct intel_dp *intel_dp) +{ + intel_dp->sink_rates[0] = 162000; + intel_dp->num_sink_rates = 1; +} + /* update sink rates from dpcd */ static void intel_dp_set_sink_rates(struct intel_dp *intel_dp) { @@ -281,7 +287,7 @@ intel_dp_max_data_rate(int max_link_rate, int max_lanes) */ int max_link_rate_kbps = max_link_rate * 10; - max_link_rate_kbps = DIV_ROUND_CLOSEST_ULL(max_link_rate_kbps * 9671, 10000); + max_link_rate_kbps = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(max_link_rate_kbps, 9671), 10000); max_link_rate = max_link_rate_kbps / 8; } @@ -1858,6 +1864,12 @@ void intel_dp_set_link_params(struct intel_dp *intel_dp, intel_dp->lane_count = lane_count; } +static void intel_dp_reset_max_link_params(struct intel_dp *intel_dp) +{ + intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp); + intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp); +} + /* Enable backlight PWM and backlight PP control. */ void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) @@ -2007,6 +2019,9 @@ void intel_dp_sync_state(struct intel_encoder *encoder, { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + if (!crtc_state) + return; + /* * Don't clobber DPCD if it's been already read out during output * setup (eDP) or detect. @@ -2014,8 +2029,7 @@ void intel_dp_sync_state(struct intel_encoder *encoder, if (intel_dp->dpcd[DP_DPCD_REV] == 0) intel_dp_get_dpcd(intel_dp); - intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp); - intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp); + intel_dp_reset_max_link_params(intel_dp); } bool intel_dp_initial_fastset_check(struct intel_encoder *encoder, @@ -2553,6 +2567,9 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp) */ intel_psr_init_dpcd(intel_dp); + /* Clear the default sink rates */ + intel_dp->num_sink_rates = 0; + /* Read the eDP 1.4+ supported link rates. */ if (intel_dp->edp_dpcd[0] >= DP_EDP_14) { __le16 sink_rates[DP_MAX_SUPPORTED_RATES]; @@ -2588,6 +2605,7 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp) intel_dp_set_sink_rates(intel_dp); intel_dp_set_common_rates(intel_dp); + intel_dp_reset_max_link_params(intel_dp); /* Read the eDP DSC DPCD registers */ if (DISPLAY_VER(dev_priv) >= 10) @@ -4329,12 +4347,7 @@ intel_dp_detect(struct drm_connector *connector, * supports link training fallback params. */ if (intel_dp->reset_link_params || intel_dp->is_mst) { - /* Initial max link lane count */ - intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp); - - /* Initial max link rate */ - intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp); - + intel_dp_reset_max_link_params(intel_dp); intel_dp->reset_link_params = false; } @@ -5000,6 +5013,9 @@ intel_dp_init_connector(struct intel_digital_port *dig_port, } intel_dp_set_source_rates(intel_dp); + intel_dp_set_default_sink_rates(intel_dp); + intel_dp_set_common_rates(intel_dp); + intel_dp_reset_max_link_params(intel_dp); if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) intel_dp->pps.active_pipe = vlv_active_pipe(intel_dp); diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index 0de0b4ff4d73..89d701e8ae9d 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -378,7 +378,7 @@ static void intel_mst_disable_dp(struct intel_atomic_state *state, drm_dp_mst_reset_vcpi_slots(&intel_dp->mst_mgr, connector->port); - ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr); + ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr, 1); if (ret) { drm_dbg_kms(&i915->drm, "failed to update payload %d\n", ret); } @@ -518,7 +518,7 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state, intel_dp->active_mst_links++; - ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr); + ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr, 1); /* * Before Gen 12 this is not done as part of diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c index fa1f375e696b..cb511b2b7069 100644 --- a/drivers/gpu/drm/i915/display/intel_fb.c +++ b/drivers/gpu/drm/i915/display/intel_fb.c @@ -378,8 +378,8 @@ static void intel_fb_plane_dims(const struct intel_framebuffer *fb, int color_pl intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, &fb->base, main_plane); intel_fb_plane_get_subsampling(&hsub, &vsub, &fb->base, color_plane); - *w = main_width / main_hsub / hsub; - *h = main_height / main_vsub / vsub; + *w = DIV_ROUND_UP(main_width, main_hsub * hsub); + *h = DIV_ROUND_UP(main_height, main_vsub * vsub); } static u32 intel_adjust_tile_offset(int *x, int *y, diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c index d2e61f6c6e08..371736bdc01f 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.c +++ b/drivers/gpu/drm/i915/display/intel_hdmi.c @@ -1246,12 +1246,13 @@ static void hsw_set_infoframes(struct intel_encoder *encoder, void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable) { struct drm_i915_private *dev_priv = intel_hdmi_to_i915(hdmi); - struct i2c_adapter *adapter = - intel_gmbus_get_adapter(dev_priv, hdmi->ddc_bus); + struct i2c_adapter *adapter; if (hdmi->dp_dual_mode.type < DRM_DP_DUAL_MODE_TYPE2_DVI) return; + adapter = intel_gmbus_get_adapter(dev_priv, hdmi->ddc_bus); + drm_dbg_kms(&dev_priv->drm, "%s DP dual mode adaptor TMDS output\n", enable ? "Enabling" : "Disabling"); @@ -2258,6 +2259,17 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder, return 0; } +void intel_hdmi_encoder_shutdown(struct intel_encoder *encoder) +{ + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); + + /* + * Give a hand to buggy BIOSen which forget to turn + * the TMDS output buffers back on after a reboot. + */ + intel_dp_dual_mode_set_tmds_output(intel_hdmi, true); +} + static void intel_hdmi_unset_edid(struct drm_connector *connector) { diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.h b/drivers/gpu/drm/i915/display/intel_hdmi.h index b43a180d007e..2bf440eb400a 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.h +++ b/drivers/gpu/drm/i915/display/intel_hdmi.h @@ -28,6 +28,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *dig_port, int intel_hdmi_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state); +void intel_hdmi_encoder_shutdown(struct intel_encoder *encoder); bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder, struct drm_connector *connector, bool high_tmds_clock_ratio, diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c index 1adcd8e02d29..e8a58c997170 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c @@ -7,11 +7,16 @@ #include <linux/dma-buf.h> #include <linux/highmem.h> #include <linux/dma-resv.h> +#include <linux/module.h> + +#include <asm/smp.h> #include "i915_drv.h" #include "i915_gem_object.h" #include "i915_scatterlist.h" +MODULE_IMPORT_NS(DMA_BUF); + I915_SELFTEST_DECLARE(static bool force_different_devices;) static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf) diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index 488acd39ff67..5634d14052bc 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -365,7 +365,8 @@ static int __intel_context_active(struct i915_active *active) } static int __i915_sw_fence_call -sw_fence_dummy_notify(struct i915_sw_fence *sf, enum i915_sw_fence_notify state) +sw_fence_dummy_notify(struct i915_sw_fence *sf, + enum i915_sw_fence_notify state) { return NOTIFY_DONE; } diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c index f17383e76eb7..57c97554393b 100644 --- a/drivers/gpu/drm/i915/gt/intel_ggtt.c +++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c @@ -1396,6 +1396,9 @@ remap_pages(struct drm_i915_gem_object *obj, { unsigned int row; + if (!width || !height) + return sg; + if (alignment_pad) { st->nents++; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index d7710debcd47..38b47e73e35d 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -2373,6 +2373,7 @@ static inline void guc_lrc_desc_unpin(struct intel_context *ce) unsigned long flags; bool disabled; + lockdep_assert_held(&guc->submission_state.lock); GEM_BUG_ON(!intel_gt_pm_is_awake(gt)); GEM_BUG_ON(!lrc_desc_registered(guc, ce->guc_id.id)); GEM_BUG_ON(ce != __get_context(guc, ce->guc_id.id)); @@ -2388,7 +2389,7 @@ static inline void guc_lrc_desc_unpin(struct intel_context *ce) } spin_unlock_irqrestore(&ce->guc_state.lock, flags); if (unlikely(disabled)) { - release_guc_id(guc, ce); + __release_guc_id(guc, ce); __guc_context_destroy(ce); return; } diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 0d18e13e3468..6c804102528b 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -576,7 +576,7 @@ retry: /* No one is going to touch shadow bb from now on. */ i915_gem_object_flush_map(bb->obj); - i915_gem_object_unlock(bb->obj); + i915_gem_ww_ctx_fini(&ww); } } return 0; @@ -630,7 +630,7 @@ retry: return ret; } - i915_gem_object_unlock(wa_ctx->indirect_ctx.obj); + i915_gem_ww_ctx_fini(&ww); /* FIXME: we are not tracking our pinned VMA leaving it * up to the core to fix up the stray pin_count upon diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index 9795f456cccf..8104981a6604 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -794,7 +794,6 @@ DECLARE_EVENT_CLASS(i915_request, TP_STRUCT__entry( __field(u32, dev) __field(u64, ctx) - __field(u32, guc_id) __field(u16, class) __field(u16, instance) __field(u32, seqno) @@ -805,16 +804,14 @@ DECLARE_EVENT_CLASS(i915_request, __entry->dev = rq->engine->i915->drm.primary->index; __entry->class = rq->engine->uabi_class; __entry->instance = rq->engine->uabi_instance; - __entry->guc_id = rq->context->guc_id.id; __entry->ctx = rq->fence.context; __entry->seqno = rq->fence.seqno; __entry->tail = rq->tail; ), - TP_printk("dev=%u, engine=%u:%u, guc_id=%u, ctx=%llu, seqno=%u, tail=%u", + TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u, tail=%u", __entry->dev, __entry->class, __entry->instance, - __entry->guc_id, __entry->ctx, __entry->seqno, - __entry->tail) + __entry->ctx, __entry->seqno, __entry->tail) ); DEFINE_EVENT(i915_request, i915_request_add, diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h index 62f189e064a9..7a5925072466 100644 --- a/drivers/gpu/drm/i915/i915_utils.h +++ b/drivers/gpu/drm/i915/i915_utils.h @@ -30,6 +30,7 @@ #include <linux/sched.h> #include <linux/types.h> #include <linux/workqueue.h> +#include <linux/sched/clock.h> struct drm_i915_private; struct timer_list; diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 90546fa58fc1..bef795e265a6 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -56,8 +56,6 @@ void i915_vma_free(struct i915_vma *vma) static void vma_print_allocator(struct i915_vma *vma, const char *reason) { - unsigned long *entries; - unsigned int nr_entries; char buf[512]; if (!vma->node.stack) { @@ -66,8 +64,7 @@ static void vma_print_allocator(struct i915_vma *vma, const char *reason) return; } - nr_entries = stack_depot_fetch(vma->node.stack, &entries); - stack_trace_snprint(buf, sizeof(buf), entries, nr_entries, 0); + stack_depot_snprint(vma->node.stack, buf, sizeof(buf), 0); DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n", vma->node.start, vma->node.size, reason, buf); } diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index f90fe39cf8ca..ecbb3d141632 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -77,6 +77,8 @@ struct intel_wm_config { static void gen9_init_clock_gating(struct drm_i915_private *dev_priv) { + enum pipe pipe; + if (HAS_LLC(dev_priv)) { /* * WaCompressedResourceDisplayNewHashMode:skl,kbl @@ -90,6 +92,16 @@ static void gen9_init_clock_gating(struct drm_i915_private *dev_priv) SKL_DE_COMPRESSED_HASH_MODE); } + for_each_pipe(dev_priv, pipe) { + /* + * "Plane N strech max must be programmed to 11b (x1) + * when Async flips are enabled on that plane." + */ + if (!IS_GEMINILAKE(dev_priv) && intel_vtd_active()) + intel_uncore_rmw(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe), + SKL_PLANE1_STRETCH_MAX_MASK, SKL_PLANE1_STRETCH_MAX_X1); + } + /* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl,cfl */ intel_uncore_write(&dev_priv->uncore, CHICKEN_PAR1_1, intel_uncore_read(&dev_priv->uncore, CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP); diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index eaf7688f517d..0d85f3c5c526 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -65,16 +65,6 @@ static noinline depot_stack_handle_t __save_depot_stack(void) return stack_depot_save(entries, n, GFP_NOWAIT | __GFP_NOWARN); } -static void __print_depot_stack(depot_stack_handle_t stack, - char *buf, int sz, int indent) -{ - unsigned long *entries; - unsigned int nr_entries; - - nr_entries = stack_depot_fetch(stack, &entries); - stack_trace_snprint(buf, sz, entries, nr_entries, indent); -} - static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm) { spin_lock_init(&rpm->debug.lock); @@ -146,12 +136,12 @@ static void untrack_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm, if (!buf) return; - __print_depot_stack(stack, buf, PAGE_SIZE, 2); + stack_depot_snprint(stack, buf, PAGE_SIZE, 2); DRM_DEBUG_DRIVER("wakeref %x from\n%s", stack, buf); stack = READ_ONCE(rpm->debug.last_release); if (stack) { - __print_depot_stack(stack, buf, PAGE_SIZE, 2); + stack_depot_snprint(stack, buf, PAGE_SIZE, 2); DRM_DEBUG_DRIVER("wakeref last released at\n%s", buf); } @@ -183,12 +173,12 @@ __print_intel_runtime_pm_wakeref(struct drm_printer *p, return; if (dbg->last_acquire) { - __print_depot_stack(dbg->last_acquire, buf, PAGE_SIZE, 2); + stack_depot_snprint(dbg->last_acquire, buf, PAGE_SIZE, 2); drm_printf(p, "Wakeref last acquired:\n%s", buf); } if (dbg->last_release) { - __print_depot_stack(dbg->last_release, buf, PAGE_SIZE, 2); + stack_depot_snprint(dbg->last_release, buf, PAGE_SIZE, 2); drm_printf(p, "Wakeref last released:\n%s", buf); } @@ -203,7 +193,7 @@ __print_intel_runtime_pm_wakeref(struct drm_printer *p, rep = 1; while (i + 1 < dbg->count && dbg->owners[i + 1] == stack) rep++, i++; - __print_depot_stack(stack, buf, PAGE_SIZE, 2); + stack_depot_snprint(stack, buf, PAGE_SIZE, 2); drm_printf(p, "Wakeref x%lu taken at:\n%s", rep, buf); } diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c index 9558e9e1b431..cb685fe2039b 100644 --- a/drivers/gpu/drm/imx/imx-drm-core.c +++ b/drivers/gpu/drm/imx/imx-drm-core.c @@ -81,7 +81,6 @@ static void imx_drm_atomic_commit_tail(struct drm_atomic_state *state) struct drm_plane_state *old_plane_state, *new_plane_state; bool plane_disabling = false; int i; - bool fence_cookie = dma_fence_begin_signalling(); drm_atomic_helper_commit_modeset_disables(dev, state); @@ -112,7 +111,6 @@ static void imx_drm_atomic_commit_tail(struct drm_atomic_state *state) } drm_atomic_helper_commit_hw_done(state); - dma_fence_end_signalling(fence_cookie); } static const struct drm_mode_config_helper_funcs imx_drm_mode_config_helpers = { diff --git a/drivers/gpu/drm/kmb/kmb_crtc.c b/drivers/gpu/drm/kmb/kmb_crtc.c index 44327bc629ca..06613ffeaaf8 100644 --- a/drivers/gpu/drm/kmb/kmb_crtc.c +++ b/drivers/gpu/drm/kmb/kmb_crtc.c @@ -66,7 +66,8 @@ static const struct drm_crtc_funcs kmb_crtc_funcs = { .disable_vblank = kmb_crtc_disable_vblank, }; -static void kmb_crtc_set_mode(struct drm_crtc *crtc) +static void kmb_crtc_set_mode(struct drm_crtc *crtc, + struct drm_atomic_state *old_state) { struct drm_device *dev = crtc->dev; struct drm_display_mode *m = &crtc->state->adjusted_mode; @@ -75,7 +76,7 @@ static void kmb_crtc_set_mode(struct drm_crtc *crtc) unsigned int val = 0; /* Initialize mipi */ - kmb_dsi_mode_set(kmb->kmb_dsi, m, kmb->sys_clk_mhz); + kmb_dsi_mode_set(kmb->kmb_dsi, m, kmb->sys_clk_mhz, old_state); drm_info(dev, "vfp= %d vbp= %d vsync_len=%d hfp=%d hbp=%d hsync_len=%d\n", m->crtc_vsync_start - m->crtc_vdisplay, @@ -138,7 +139,7 @@ static void kmb_crtc_atomic_enable(struct drm_crtc *crtc, struct kmb_drm_private *kmb = crtc_to_kmb_priv(crtc); clk_prepare_enable(kmb->kmb_clk.clk_lcd); - kmb_crtc_set_mode(crtc); + kmb_crtc_set_mode(crtc, state); drm_crtc_vblank_on(crtc); } @@ -185,11 +186,45 @@ static void kmb_crtc_atomic_flush(struct drm_crtc *crtc, spin_unlock_irq(&crtc->dev->event_lock); } +static enum drm_mode_status + kmb_crtc_mode_valid(struct drm_crtc *crtc, + const struct drm_display_mode *mode) +{ + int refresh; + struct drm_device *dev = crtc->dev; + int vfp = mode->vsync_start - mode->vdisplay; + + if (mode->vdisplay < KMB_CRTC_MAX_HEIGHT) { + drm_dbg(dev, "height = %d less than %d", + mode->vdisplay, KMB_CRTC_MAX_HEIGHT); + return MODE_BAD_VVALUE; + } + if (mode->hdisplay < KMB_CRTC_MAX_WIDTH) { + drm_dbg(dev, "width = %d less than %d", + mode->hdisplay, KMB_CRTC_MAX_WIDTH); + return MODE_BAD_HVALUE; + } + refresh = drm_mode_vrefresh(mode); + if (refresh < KMB_MIN_VREFRESH || refresh > KMB_MAX_VREFRESH) { + drm_dbg(dev, "refresh = %d less than %d or greater than %d", + refresh, KMB_MIN_VREFRESH, KMB_MAX_VREFRESH); + return MODE_BAD; + } + + if (vfp < KMB_CRTC_MIN_VFP) { + drm_dbg(dev, "vfp = %d less than %d", vfp, KMB_CRTC_MIN_VFP); + return MODE_BAD; + } + + return MODE_OK; +} + static const struct drm_crtc_helper_funcs kmb_crtc_helper_funcs = { .atomic_begin = kmb_crtc_atomic_begin, .atomic_enable = kmb_crtc_atomic_enable, .atomic_disable = kmb_crtc_atomic_disable, .atomic_flush = kmb_crtc_atomic_flush, + .mode_valid = kmb_crtc_mode_valid, }; int kmb_setup_crtc(struct drm_device *drm) diff --git a/drivers/gpu/drm/kmb/kmb_drv.c b/drivers/gpu/drm/kmb/kmb_drv.c index 7e1fda9f9a3d..ed2424350773 100644 --- a/drivers/gpu/drm/kmb/kmb_drv.c +++ b/drivers/gpu/drm/kmb/kmb_drv.c @@ -173,10 +173,10 @@ static int kmb_setup_mode_config(struct drm_device *drm) ret = drmm_mode_config_init(drm); if (ret) return ret; - drm->mode_config.min_width = KMB_MIN_WIDTH; - drm->mode_config.min_height = KMB_MIN_HEIGHT; - drm->mode_config.max_width = KMB_MAX_WIDTH; - drm->mode_config.max_height = KMB_MAX_HEIGHT; + drm->mode_config.min_width = KMB_FB_MIN_WIDTH; + drm->mode_config.min_height = KMB_FB_MIN_HEIGHT; + drm->mode_config.max_width = KMB_FB_MAX_WIDTH; + drm->mode_config.max_height = KMB_FB_MAX_HEIGHT; drm->mode_config.preferred_depth = 24; drm->mode_config.funcs = &kmb_mode_config_funcs; @@ -382,7 +382,7 @@ static irqreturn_t handle_lcd_irq(struct drm_device *dev) if (val & LAYER3_DMA_FIFO_UNDERFLOW) drm_dbg(&kmb->drm, "LAYER3:GL1 DMA UNDERFLOW val = 0x%lx", val); - if (val & LAYER3_DMA_FIFO_UNDERFLOW) + if (val & LAYER3_DMA_FIFO_OVERFLOW) drm_dbg(&kmb->drm, "LAYER3:GL1 DMA OVERFLOW val = 0x%lx", val); } diff --git a/drivers/gpu/drm/kmb/kmb_drv.h b/drivers/gpu/drm/kmb/kmb_drv.h index ebbaa5f422d5..bf085e95b28f 100644 --- a/drivers/gpu/drm/kmb/kmb_drv.h +++ b/drivers/gpu/drm/kmb/kmb_drv.h @@ -20,6 +20,18 @@ #define DRIVER_MAJOR 1 #define DRIVER_MINOR 1 +/* Platform definitions */ +#define KMB_CRTC_MIN_VFP 4 +#define KMB_CRTC_MAX_WIDTH 1920 /* max width in pixels */ +#define KMB_CRTC_MAX_HEIGHT 1080 /* max height in pixels */ +#define KMB_CRTC_MIN_WIDTH 1920 +#define KMB_CRTC_MIN_HEIGHT 1080 +#define KMB_FB_MAX_WIDTH 1920 +#define KMB_FB_MAX_HEIGHT 1080 +#define KMB_FB_MIN_WIDTH 1 +#define KMB_FB_MIN_HEIGHT 1 +#define KMB_MIN_VREFRESH 59 /*vertical refresh in Hz */ +#define KMB_MAX_VREFRESH 60 /*vertical refresh in Hz */ #define KMB_LCD_DEFAULT_CLK 200000000 #define KMB_SYS_CLK_MHZ 500 @@ -45,6 +57,7 @@ struct kmb_drm_private { spinlock_t irq_lock; int irq_lcd; int sys_clk_mhz; + struct disp_cfg init_disp_cfg[KMB_MAX_PLANES]; struct layer_status plane_status[KMB_MAX_PLANES]; int kmb_under_flow; int kmb_flush_done; diff --git a/drivers/gpu/drm/kmb/kmb_dsi.c b/drivers/gpu/drm/kmb/kmb_dsi.c index 1793cd31b117..f6071882054c 100644 --- a/drivers/gpu/drm/kmb/kmb_dsi.c +++ b/drivers/gpu/drm/kmb/kmb_dsi.c @@ -482,6 +482,10 @@ static u32 mipi_tx_fg_section_cfg(struct kmb_dsi *kmb_dsi, return 0; } +#define CLK_DIFF_LOW 50 +#define CLK_DIFF_HI 60 +#define SYSCLK_500 500 + static void mipi_tx_fg_cfg_regs(struct kmb_dsi *kmb_dsi, u8 frame_gen, struct mipi_tx_frame_timing_cfg *fg_cfg) { @@ -492,7 +496,12 @@ static void mipi_tx_fg_cfg_regs(struct kmb_dsi *kmb_dsi, u8 frame_gen, /* 500 Mhz system clock minus 50 to account for the difference in * MIPI clock speed in RTL tests */ - sysclk = kmb_dsi->sys_clk_mhz - 50; + if (kmb_dsi->sys_clk_mhz == SYSCLK_500) { + sysclk = kmb_dsi->sys_clk_mhz - CLK_DIFF_LOW; + } else { + /* 700 Mhz clk*/ + sysclk = kmb_dsi->sys_clk_mhz - CLK_DIFF_HI; + } /* PPL-Pixel Packing Layer, LLP-Low Level Protocol * Frame genartor timing parameters are clocked on the system clock, @@ -1322,7 +1331,8 @@ static u32 mipi_tx_init_dphy(struct kmb_dsi *kmb_dsi, return 0; } -static void connect_lcd_to_mipi(struct kmb_dsi *kmb_dsi) +static void connect_lcd_to_mipi(struct kmb_dsi *kmb_dsi, + struct drm_atomic_state *old_state) { struct regmap *msscam; @@ -1331,7 +1341,7 @@ static void connect_lcd_to_mipi(struct kmb_dsi *kmb_dsi) dev_dbg(kmb_dsi->dev, "failed to get msscam syscon"); return; } - + drm_atomic_bridge_chain_enable(adv_bridge, old_state); /* DISABLE MIPI->CIF CONNECTION */ regmap_write(msscam, MSS_MIPI_CIF_CFG, 0); @@ -1342,7 +1352,7 @@ static void connect_lcd_to_mipi(struct kmb_dsi *kmb_dsi) } int kmb_dsi_mode_set(struct kmb_dsi *kmb_dsi, struct drm_display_mode *mode, - int sys_clk_mhz) + int sys_clk_mhz, struct drm_atomic_state *old_state) { u64 data_rate; @@ -1384,18 +1394,13 @@ int kmb_dsi_mode_set(struct kmb_dsi *kmb_dsi, struct drm_display_mode *mode, mipi_tx_init_cfg.lane_rate_mbps = data_rate; } - kmb_write_mipi(kmb_dsi, DPHY_ENABLE, 0); - kmb_write_mipi(kmb_dsi, DPHY_INIT_CTRL0, 0); - kmb_write_mipi(kmb_dsi, DPHY_INIT_CTRL1, 0); - kmb_write_mipi(kmb_dsi, DPHY_INIT_CTRL2, 0); - /* Initialize mipi controller */ mipi_tx_init_cntrl(kmb_dsi, &mipi_tx_init_cfg); /* Dphy initialization */ mipi_tx_init_dphy(kmb_dsi, &mipi_tx_init_cfg); - connect_lcd_to_mipi(kmb_dsi); + connect_lcd_to_mipi(kmb_dsi, old_state); dev_info(kmb_dsi->dev, "mipi hw initialized"); return 0; diff --git a/drivers/gpu/drm/kmb/kmb_dsi.h b/drivers/gpu/drm/kmb/kmb_dsi.h index 66b7c500d9bc..09dc88743d77 100644 --- a/drivers/gpu/drm/kmb/kmb_dsi.h +++ b/drivers/gpu/drm/kmb/kmb_dsi.h @@ -380,7 +380,7 @@ int kmb_dsi_host_bridge_init(struct device *dev); struct kmb_dsi *kmb_dsi_init(struct platform_device *pdev); void kmb_dsi_host_unregister(struct kmb_dsi *kmb_dsi); int kmb_dsi_mode_set(struct kmb_dsi *kmb_dsi, struct drm_display_mode *mode, - int sys_clk_mhz); + int sys_clk_mhz, struct drm_atomic_state *old_state); int kmb_dsi_map_mmio(struct kmb_dsi *kmb_dsi); int kmb_dsi_clk_init(struct kmb_dsi *kmb_dsi); int kmb_dsi_encoder_init(struct drm_device *dev, struct kmb_dsi *kmb_dsi); diff --git a/drivers/gpu/drm/kmb/kmb_plane.c b/drivers/gpu/drm/kmb/kmb_plane.c index ecee6782612d..00404ba4126d 100644 --- a/drivers/gpu/drm/kmb/kmb_plane.c +++ b/drivers/gpu/drm/kmb/kmb_plane.c @@ -67,8 +67,21 @@ static const u32 kmb_formats_v[] = { static unsigned int check_pixel_format(struct drm_plane *plane, u32 format) { + struct kmb_drm_private *kmb; + struct kmb_plane *kmb_plane = to_kmb_plane(plane); int i; + int plane_id = kmb_plane->id; + struct disp_cfg init_disp_cfg; + kmb = to_kmb(plane->dev); + init_disp_cfg = kmb->init_disp_cfg[plane_id]; + /* Due to HW limitations, changing pixel format after initial + * plane configuration is not supported. + */ + if (init_disp_cfg.format && init_disp_cfg.format != format) { + drm_dbg(&kmb->drm, "Cannot change format after initial plane configuration"); + return -EINVAL; + } for (i = 0; i < plane->format_count; i++) { if (plane->format_types[i] == format) return 0; @@ -81,11 +94,17 @@ static int kmb_plane_atomic_check(struct drm_plane *plane, { struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane); + struct kmb_drm_private *kmb; + struct kmb_plane *kmb_plane = to_kmb_plane(plane); + int plane_id = kmb_plane->id; + struct disp_cfg init_disp_cfg; struct drm_framebuffer *fb; int ret; struct drm_crtc_state *crtc_state; bool can_position; + kmb = to_kmb(plane->dev); + init_disp_cfg = kmb->init_disp_cfg[plane_id]; fb = new_plane_state->fb; if (!fb || !new_plane_state->crtc) return 0; @@ -94,10 +113,21 @@ static int kmb_plane_atomic_check(struct drm_plane *plane, if (ret) return ret; - if (new_plane_state->crtc_w > KMB_MAX_WIDTH || new_plane_state->crtc_h > KMB_MAX_HEIGHT) + if (new_plane_state->crtc_w > KMB_FB_MAX_WIDTH || + new_plane_state->crtc_h > KMB_FB_MAX_HEIGHT || + new_plane_state->crtc_w < KMB_FB_MIN_WIDTH || + new_plane_state->crtc_h < KMB_FB_MIN_HEIGHT) return -EINVAL; - if (new_plane_state->crtc_w < KMB_MIN_WIDTH || new_plane_state->crtc_h < KMB_MIN_HEIGHT) + + /* Due to HW limitations, changing plane height or width after + * initial plane configuration is not supported. + */ + if ((init_disp_cfg.width && init_disp_cfg.height) && + (init_disp_cfg.width != fb->width || + init_disp_cfg.height != fb->height)) { + drm_dbg(&kmb->drm, "Cannot change plane height or width after initial configuration"); return -EINVAL; + } can_position = (plane->type == DRM_PLANE_TYPE_OVERLAY); crtc_state = drm_atomic_get_existing_crtc_state(state, @@ -277,6 +307,44 @@ static void config_csc(struct kmb_drm_private *kmb, int plane_id) kmb_write_lcd(kmb, LCD_LAYERn_CSC_OFF3(plane_id), csc_coef_lcd[11]); } +static void kmb_plane_set_alpha(struct kmb_drm_private *kmb, + const struct drm_plane_state *state, + unsigned char plane_id, + unsigned int *val) +{ + u16 plane_alpha = state->alpha; + u16 pixel_blend_mode = state->pixel_blend_mode; + int has_alpha = state->fb->format->has_alpha; + + if (plane_alpha != DRM_BLEND_ALPHA_OPAQUE) + *val |= LCD_LAYER_ALPHA_STATIC; + + if (has_alpha) { + switch (pixel_blend_mode) { + case DRM_MODE_BLEND_PIXEL_NONE: + break; + case DRM_MODE_BLEND_PREMULTI: + *val |= LCD_LAYER_ALPHA_EMBED | LCD_LAYER_ALPHA_PREMULT; + break; + case DRM_MODE_BLEND_COVERAGE: + *val |= LCD_LAYER_ALPHA_EMBED; + break; + default: + DRM_DEBUG("Missing pixel blend mode case (%s == %ld)\n", + __stringify(pixel_blend_mode), + (long)pixel_blend_mode); + break; + } + } + + if (plane_alpha == DRM_BLEND_ALPHA_OPAQUE && !has_alpha) { + *val &= LCD_LAYER_ALPHA_DISABLED; + return; + } + + kmb_write_lcd(kmb, LCD_LAYERn_ALPHA(plane_id), plane_alpha); +} + static void kmb_plane_atomic_update(struct drm_plane *plane, struct drm_atomic_state *state) { @@ -296,6 +364,7 @@ static void kmb_plane_atomic_update(struct drm_plane *plane, unsigned char plane_id; int num_planes; static dma_addr_t addr[MAX_SUB_PLANES]; + struct disp_cfg *init_disp_cfg; if (!plane || !new_plane_state || !old_plane_state) return; @@ -303,11 +372,12 @@ static void kmb_plane_atomic_update(struct drm_plane *plane, fb = new_plane_state->fb; if (!fb) return; + num_planes = fb->format->num_planes; kmb_plane = to_kmb_plane(plane); - plane_id = kmb_plane->id; kmb = to_kmb(plane->dev); + plane_id = kmb_plane->id; spin_lock_irq(&kmb->irq_lock); if (kmb->kmb_under_flow || kmb->kmb_flush_done) { @@ -317,7 +387,8 @@ static void kmb_plane_atomic_update(struct drm_plane *plane, } spin_unlock_irq(&kmb->irq_lock); - src_w = (new_plane_state->src_w >> 16); + init_disp_cfg = &kmb->init_disp_cfg[plane_id]; + src_w = new_plane_state->src_w >> 16; src_h = new_plane_state->src_h >> 16; crtc_x = new_plane_state->crtc_x; crtc_y = new_plane_state->crtc_y; @@ -400,20 +471,32 @@ static void kmb_plane_atomic_update(struct drm_plane *plane, config_csc(kmb, plane_id); } + kmb_plane_set_alpha(kmb, plane->state, plane_id, &val); + kmb_write_lcd(kmb, LCD_LAYERn_CFG(plane_id), val); + /* Configure LCD_CONTROL */ + ctrl = kmb_read_lcd(kmb, LCD_CONTROL); + + /* Set layer blending config */ + ctrl &= ~LCD_CTRL_ALPHA_ALL; + ctrl |= LCD_CTRL_ALPHA_BOTTOM_VL1 | + LCD_CTRL_ALPHA_BLEND_VL2; + + ctrl &= ~LCD_CTRL_ALPHA_BLEND_BKGND_DISABLE; + switch (plane_id) { case LAYER_0: - ctrl = LCD_CTRL_VL1_ENABLE; + ctrl |= LCD_CTRL_VL1_ENABLE; break; case LAYER_1: - ctrl = LCD_CTRL_VL2_ENABLE; + ctrl |= LCD_CTRL_VL2_ENABLE; break; case LAYER_2: - ctrl = LCD_CTRL_GL1_ENABLE; + ctrl |= LCD_CTRL_GL1_ENABLE; break; case LAYER_3: - ctrl = LCD_CTRL_GL2_ENABLE; + ctrl |= LCD_CTRL_GL2_ENABLE; break; } @@ -425,7 +508,7 @@ static void kmb_plane_atomic_update(struct drm_plane *plane, */ ctrl |= LCD_CTRL_VHSYNC_IDLE_LVL; - kmb_set_bitmask_lcd(kmb, LCD_CONTROL, ctrl); + kmb_write_lcd(kmb, LCD_CONTROL, ctrl); /* Enable pipeline AXI read transactions for the DMA * after setting graphics layers. This must be done @@ -448,6 +531,16 @@ static void kmb_plane_atomic_update(struct drm_plane *plane, /* Enable DMA */ kmb_write_lcd(kmb, LCD_LAYERn_DMA_CFG(plane_id), dma_cfg); + + /* Save initial display config */ + if (!init_disp_cfg->width || + !init_disp_cfg->height || + !init_disp_cfg->format) { + init_disp_cfg->width = width; + init_disp_cfg->height = height; + init_disp_cfg->format = fb->format->format; + } + drm_dbg(&kmb->drm, "dma_cfg=0x%x LCD_DMA_CFG=0x%x\n", dma_cfg, kmb_read_lcd(kmb, LCD_LAYERn_DMA_CFG(plane_id))); @@ -490,6 +583,9 @@ struct kmb_plane *kmb_plane_init(struct drm_device *drm) enum drm_plane_type plane_type; const u32 *plane_formats; int num_plane_formats; + unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) | + BIT(DRM_MODE_BLEND_PREMULTI) | + BIT(DRM_MODE_BLEND_COVERAGE); for (i = 0; i < KMB_MAX_PLANES; i++) { plane = drmm_kzalloc(drm, sizeof(*plane), GFP_KERNEL); @@ -521,8 +617,16 @@ struct kmb_plane *kmb_plane_init(struct drm_device *drm) drm_dbg(drm, "%s : %d i=%d type=%d", __func__, __LINE__, i, plane_type); + drm_plane_create_alpha_property(&plane->base_plane); + + drm_plane_create_blend_mode_property(&plane->base_plane, + blend_caps); + + drm_plane_create_zpos_immutable_property(&plane->base_plane, i); + drm_plane_helper_add(&plane->base_plane, &kmb_plane_helper_funcs); + if (plane_type == DRM_PLANE_TYPE_PRIMARY) { primary = plane; kmb->plane = plane; diff --git a/drivers/gpu/drm/kmb/kmb_plane.h b/drivers/gpu/drm/kmb/kmb_plane.h index 486490f7a3ec..b51144044fe8 100644 --- a/drivers/gpu/drm/kmb/kmb_plane.h +++ b/drivers/gpu/drm/kmb/kmb_plane.h @@ -35,6 +35,9 @@ #define POSSIBLE_CRTCS 1 #define to_kmb_plane(x) container_of(x, struct kmb_plane, base_plane) +#define POSSIBLE_CRTCS 1 +#define KMB_MAX_PLANES 2 + enum layer_id { LAYER_0, LAYER_1, @@ -43,8 +46,6 @@ enum layer_id { /* KMB_MAX_PLANES */ }; -#define KMB_MAX_PLANES 1 - enum sub_plane_id { Y_PLANE, U_PLANE, @@ -62,6 +63,12 @@ struct layer_status { u32 ctrl; }; +struct disp_cfg { + unsigned int width; + unsigned int height; + unsigned int format; +}; + struct kmb_plane *kmb_plane_init(struct drm_device *drm); void kmb_plane_destroy(struct drm_plane *plane); #endif /* __KMB_PLANE_H__ */ diff --git a/drivers/gpu/drm/kmb/kmb_regs.h b/drivers/gpu/drm/kmb/kmb_regs.h index 48150569f702..9756101b0d32 100644 --- a/drivers/gpu/drm/kmb/kmb_regs.h +++ b/drivers/gpu/drm/kmb/kmb_regs.h @@ -43,8 +43,10 @@ #define LCD_CTRL_OUTPUT_ENABLED BIT(19) #define LCD_CTRL_BPORCH_ENABLE BIT(21) #define LCD_CTRL_FPORCH_ENABLE BIT(22) +#define LCD_CTRL_ALPHA_BLEND_BKGND_DISABLE BIT(23) #define LCD_CTRL_PIPELINE_DMA BIT(28) #define LCD_CTRL_VHSYNC_IDLE_LVL BIT(31) +#define LCD_CTRL_ALPHA_ALL (0xff << 6) /* interrupts */ #define LCD_INT_STATUS (0x4 * 0x001) @@ -115,6 +117,7 @@ #define LCD_LAYER_ALPHA_EMBED BIT(5) #define LCD_LAYER_ALPHA_COMBI (LCD_LAYER_ALPHA_STATIC | \ LCD_LAYER_ALPHA_EMBED) +#define LCD_LAYER_ALPHA_DISABLED ~(LCD_LAYER_ALPHA_COMBI) /* RGB multiplied with alpha */ #define LCD_LAYER_ALPHA_PREMULT BIT(6) #define LCD_LAYER_INVERT_COL BIT(7) diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c index 5f81489fc60c..a4e80e499674 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c @@ -4,8 +4,6 @@ */ #include <linux/clk.h> -#include <linux/dma-mapping.h> -#include <linux/mailbox_controller.h> #include <linux/pm_runtime.h> #include <linux/soc/mediatek/mtk-cmdq.h> #include <linux/soc/mediatek/mtk-mmsys.h> @@ -52,11 +50,8 @@ struct mtk_drm_crtc { bool pending_async_planes; #if IS_REACHABLE(CONFIG_MTK_CMDQ) - struct mbox_client cmdq_cl; - struct mbox_chan *cmdq_chan; - struct cmdq_pkt cmdq_handle; + struct cmdq_client *cmdq_client; u32 cmdq_event; - u32 cmdq_vblank_cnt; #endif struct device *mmsys_dev; @@ -227,79 +222,9 @@ struct mtk_ddp_comp *mtk_drm_ddp_comp_for_plane(struct drm_crtc *crtc, } #if IS_REACHABLE(CONFIG_MTK_CMDQ) -static int mtk_drm_cmdq_pkt_create(struct mbox_chan *chan, struct cmdq_pkt *pkt, - size_t size) +static void ddp_cmdq_cb(struct cmdq_cb_data data) { - struct device *dev; - dma_addr_t dma_addr; - - pkt->va_base = kzalloc(size, GFP_KERNEL); - if (!pkt->va_base) { - kfree(pkt); - return -ENOMEM; - } - pkt->buf_size = size; - - dev = chan->mbox->dev; - dma_addr = dma_map_single(dev, pkt->va_base, pkt->buf_size, - DMA_TO_DEVICE); - if (dma_mapping_error(dev, dma_addr)) { - dev_err(dev, "dma map failed, size=%u\n", (u32)(u64)size); - kfree(pkt->va_base); - kfree(pkt); - return -ENOMEM; - } - - pkt->pa_base = dma_addr; - - return 0; -} - -static void mtk_drm_cmdq_pkt_destroy(struct mbox_chan *chan, struct cmdq_pkt *pkt) -{ - dma_unmap_single(chan->mbox->dev, pkt->pa_base, pkt->buf_size, - DMA_TO_DEVICE); - kfree(pkt->va_base); - kfree(pkt); -} - -static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg) -{ - struct mtk_drm_crtc *mtk_crtc = container_of(cl, struct mtk_drm_crtc, cmdq_cl); - struct cmdq_cb_data *data = mssg; - struct mtk_crtc_state *state; - unsigned int i; - - state = to_mtk_crtc_state(mtk_crtc->base.state); - - state->pending_config = false; - - if (mtk_crtc->pending_planes) { - for (i = 0; i < mtk_crtc->layer_nr; i++) { - struct drm_plane *plane = &mtk_crtc->planes[i]; - struct mtk_plane_state *plane_state; - - plane_state = to_mtk_plane_state(plane->state); - - plane_state->pending.config = false; - } - mtk_crtc->pending_planes = false; - } - - if (mtk_crtc->pending_async_planes) { - for (i = 0; i < mtk_crtc->layer_nr; i++) { - struct drm_plane *plane = &mtk_crtc->planes[i]; - struct mtk_plane_state *plane_state; - - plane_state = to_mtk_plane_state(plane->state); - - plane_state->pending.async_config = false; - } - mtk_crtc->pending_async_planes = false; - } - - mtk_crtc->cmdq_vblank_cnt = 0; - mtk_drm_cmdq_pkt_destroy(mtk_crtc->cmdq_chan, data->pkt); + cmdq_pkt_destroy(data.data); } #endif @@ -453,8 +378,7 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc, state->pending_vrefresh, 0, cmdq_handle); - if (!cmdq_handle) - state->pending_config = false; + state->pending_config = false; } if (mtk_crtc->pending_planes) { @@ -474,12 +398,9 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc, mtk_ddp_comp_layer_config(comp, local_layer, plane_state, cmdq_handle); - if (!cmdq_handle) - plane_state->pending.config = false; + plane_state->pending.config = false; } - - if (!cmdq_handle) - mtk_crtc->pending_planes = false; + mtk_crtc->pending_planes = false; } if (mtk_crtc->pending_async_planes) { @@ -499,12 +420,9 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc, mtk_ddp_comp_layer_config(comp, local_layer, plane_state, cmdq_handle); - if (!cmdq_handle) - plane_state->pending.async_config = false; + plane_state->pending.async_config = false; } - - if (!cmdq_handle) - mtk_crtc->pending_async_planes = false; + mtk_crtc->pending_async_planes = false; } } @@ -512,7 +430,7 @@ static void mtk_drm_crtc_update_config(struct mtk_drm_crtc *mtk_crtc, bool needs_vblank) { #if IS_REACHABLE(CONFIG_MTK_CMDQ) - struct cmdq_pkt *cmdq_handle = &mtk_crtc->cmdq_handle; + struct cmdq_pkt *cmdq_handle; #endif struct drm_crtc *crtc = &mtk_crtc->base; struct mtk_drm_private *priv = crtc->dev->dev_private; @@ -550,24 +468,14 @@ static void mtk_drm_crtc_update_config(struct mtk_drm_crtc *mtk_crtc, mtk_mutex_release(mtk_crtc->mutex); } #if IS_REACHABLE(CONFIG_MTK_CMDQ) - if (mtk_crtc->cmdq_chan) { - mbox_flush(mtk_crtc->cmdq_chan, 2000); - cmdq_handle->cmd_buf_size = 0; + if (mtk_crtc->cmdq_client) { + mbox_flush(mtk_crtc->cmdq_client->chan, 2000); + cmdq_handle = cmdq_pkt_create(mtk_crtc->cmdq_client, PAGE_SIZE); cmdq_pkt_clear_event(cmdq_handle, mtk_crtc->cmdq_event); cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event, false); mtk_crtc_ddp_config(crtc, cmdq_handle); cmdq_pkt_finalize(cmdq_handle); - dma_sync_single_for_device(mtk_crtc->cmdq_chan->mbox->dev, - cmdq_handle->pa_base, - cmdq_handle->cmd_buf_size, - DMA_TO_DEVICE); - /* - * CMDQ command should execute in next vblank, - * If it fail to execute in next 2 vblank, timeout happen. - */ - mtk_crtc->cmdq_vblank_cnt = 2; - mbox_send_message(mtk_crtc->cmdq_chan, cmdq_handle); - mbox_client_txdone(mtk_crtc->cmdq_chan, 0); + cmdq_pkt_flush_async(cmdq_handle, ddp_cmdq_cb, cmdq_handle); } #endif mtk_crtc->config_updating = false; @@ -581,15 +489,12 @@ static void mtk_crtc_ddp_irq(void *data) struct mtk_drm_private *priv = crtc->dev->dev_private; #if IS_REACHABLE(CONFIG_MTK_CMDQ) - if (!priv->data->shadow_register && !mtk_crtc->cmdq_chan) - mtk_crtc_ddp_config(crtc, NULL); - else if (mtk_crtc->cmdq_vblank_cnt > 0 && --mtk_crtc->cmdq_vblank_cnt == 0) - DRM_ERROR("mtk_crtc %d CMDQ execute command timeout!\n", - drm_crtc_index(&mtk_crtc->base)); + if (!priv->data->shadow_register && !mtk_crtc->cmdq_client) #else if (!priv->data->shadow_register) - mtk_crtc_ddp_config(crtc, NULL); #endif + mtk_crtc_ddp_config(crtc, NULL); + mtk_drm_finish_page_flip(mtk_crtc); } @@ -924,20 +829,16 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev, mutex_init(&mtk_crtc->hw_lock); #if IS_REACHABLE(CONFIG_MTK_CMDQ) - mtk_crtc->cmdq_cl.dev = mtk_crtc->mmsys_dev; - mtk_crtc->cmdq_cl.tx_block = false; - mtk_crtc->cmdq_cl.knows_txdone = true; - mtk_crtc->cmdq_cl.rx_callback = ddp_cmdq_cb; - mtk_crtc->cmdq_chan = - mbox_request_channel(&mtk_crtc->cmdq_cl, - drm_crtc_index(&mtk_crtc->base)); - if (IS_ERR(mtk_crtc->cmdq_chan)) { + mtk_crtc->cmdq_client = + cmdq_mbox_create(mtk_crtc->mmsys_dev, + drm_crtc_index(&mtk_crtc->base)); + if (IS_ERR(mtk_crtc->cmdq_client)) { dev_dbg(dev, "mtk_crtc %d failed to create mailbox client, writing register by CPU now\n", drm_crtc_index(&mtk_crtc->base)); - mtk_crtc->cmdq_chan = NULL; + mtk_crtc->cmdq_client = NULL; } - if (mtk_crtc->cmdq_chan) { + if (mtk_crtc->cmdq_client) { ret = of_property_read_u32_index(priv->mutex_node, "mediatek,gce-events", drm_crtc_index(&mtk_crtc->base), @@ -945,18 +846,8 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev, if (ret) { dev_dbg(dev, "mtk_crtc %d failed to get mediatek,gce-events property\n", drm_crtc_index(&mtk_crtc->base)); - mbox_free_channel(mtk_crtc->cmdq_chan); - mtk_crtc->cmdq_chan = NULL; - } else { - ret = mtk_drm_cmdq_pkt_create(mtk_crtc->cmdq_chan, - &mtk_crtc->cmdq_handle, - PAGE_SIZE); - if (ret) { - dev_dbg(dev, "mtk_crtc %d failed to create cmdq packet\n", - drm_crtc_index(&mtk_crtc->base)); - mbox_free_channel(mtk_crtc->cmdq_chan); - mtk_crtc->cmdq_chan = NULL; - } + cmdq_mbox_destroy(mtk_crtc->cmdq_client); + mtk_crtc->cmdq_client = NULL; } } #endif diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c index 93b40c245f00..5d90d2eb0019 100644 --- a/drivers/gpu/drm/mediatek/mtk_dsi.c +++ b/drivers/gpu/drm/mediatek/mtk_dsi.c @@ -11,6 +11,7 @@ #include <linux/of_platform.h> #include <linux/phy/phy.h> #include <linux/platform_device.h> +#include <linux/reset.h> #include <video/mipi_display.h> #include <video/videomode.h> @@ -980,8 +981,10 @@ static int mtk_dsi_bind(struct device *dev, struct device *master, void *data) struct mtk_dsi *dsi = dev_get_drvdata(dev); ret = mtk_dsi_encoder_init(drm, dsi); + if (ret) + return ret; - return ret; + return device_reset_optional(dev); } static void mtk_dsi_unbind(struct device *dev, struct device *master, diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c index 4fd4de16cd32..894472921c30 100644 --- a/drivers/gpu/drm/mga/mga_ioc32.c +++ b/drivers/gpu/drm/mga/mga_ioc32.c @@ -38,16 +38,18 @@ typedef struct drm32_mga_init { int func; u32 sarea_priv_offset; - int chipset; - int sgram; - unsigned int maccess; - unsigned int fb_cpp; - unsigned int front_offset, front_pitch; - unsigned int back_offset, back_pitch; - unsigned int depth_cpp; - unsigned int depth_offset, depth_pitch; - unsigned int texture_offset[MGA_NR_TEX_HEAPS]; - unsigned int texture_size[MGA_NR_TEX_HEAPS]; + struct_group(always32bit, + int chipset; + int sgram; + unsigned int maccess; + unsigned int fb_cpp; + unsigned int front_offset, front_pitch; + unsigned int back_offset, back_pitch; + unsigned int depth_cpp; + unsigned int depth_offset, depth_pitch; + unsigned int texture_offset[MGA_NR_TEX_HEAPS]; + unsigned int texture_size[MGA_NR_TEX_HEAPS]; + ); u32 fb_offset; u32 mmio_offset; u32 status_offset; @@ -67,9 +69,8 @@ static int compat_mga_init(struct file *file, unsigned int cmd, init.func = init32.func; init.sarea_priv_offset = init32.sarea_priv_offset; - memcpy(&init.chipset, &init32.chipset, - offsetof(drm_mga_init_t, fb_offset) - - offsetof(drm_mga_init_t, chipset)); + memcpy(&init.always32bit, &init32.always32bit, + sizeof(init32.always32bit)); init.fb_offset = init32.fb_offset; init.mmio_offset = init32.mmio_offset; init.status_offset = init32.status_offset; diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig index 5879f67bc88c..ae11061727ff 100644 --- a/drivers/gpu/drm/msm/Kconfig +++ b/drivers/gpu/drm/msm/Kconfig @@ -14,10 +14,12 @@ config DRM_MSM select REGULATOR select DRM_KMS_HELPER select DRM_PANEL + select DRM_BRIDGE + select DRM_PANEL_BRIDGE select DRM_SCHED select SHMEM select TMPFS - select QCOM_SCM if ARCH_QCOM + select QCOM_SCM select WANT_DEV_COREDUMP select SND_SOC_HDMI_CODEC if SND_SOC select SYNC_FILE @@ -55,7 +57,7 @@ config DRM_MSM_GPU_SUDO config DRM_MSM_HDMI_HDCP bool "Enable HDMI HDCP support in MSM DRM driver" - depends on DRM_MSM && QCOM_SCM + depends on DRM_MSM default y help Choose this option to enable HDCP state machine diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile index 904535eda0c4..40577f8856d8 100644 --- a/drivers/gpu/drm/msm/Makefile +++ b/drivers/gpu/drm/msm/Makefile @@ -51,7 +51,6 @@ msm-y := \ disp/mdp5/mdp5_mixer.o \ disp/mdp5/mdp5_plane.o \ disp/mdp5/mdp5_smp.o \ - disp/dpu1/dpu_core_irq.o \ disp/dpu1/dpu_core_perf.o \ disp/dpu1/dpu_crtc.o \ disp/dpu1/dpu_encoder.o \ diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c index 4534633fe7cd..8fb847c174ff 100644 --- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c @@ -571,13 +571,14 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev) } icc_path = devm_of_icc_get(&pdev->dev, "gfx-mem"); - ret = IS_ERR(icc_path); - if (ret) + if (IS_ERR(icc_path)) { + ret = PTR_ERR(icc_path); goto fail; + } ocmem_icc_path = devm_of_icc_get(&pdev->dev, "ocmem"); - ret = IS_ERR(ocmem_icc_path); - if (ret) { + if (IS_ERR(ocmem_icc_path)) { + ret = PTR_ERR(ocmem_icc_path); /* allow -ENODATA, ocmem icc is optional */ if (ret != -ENODATA) goto fail; diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c index 82bebb40234d..a96ee79cc5e0 100644 --- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c @@ -699,13 +699,14 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev) } icc_path = devm_of_icc_get(&pdev->dev, "gfx-mem"); - ret = IS_ERR(icc_path); - if (ret) + if (IS_ERR(icc_path)) { + ret = PTR_ERR(icc_path); goto fail; + } ocmem_icc_path = devm_of_icc_get(&pdev->dev, "ocmem"); - ret = IS_ERR(ocmem_icc_path); - if (ret) { + if (IS_ERR(ocmem_icc_path)) { + ret = PTR_ERR(ocmem_icc_path); /* allow -ENODATA, ocmem icc is optional */ if (ret != -ENODATA) goto fail; diff --git a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c index c9d11d57aed6..dd593ec2bc56 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c @@ -138,7 +138,7 @@ reset_set(void *data, u64 val) return 0; } -DEFINE_SIMPLE_ATTRIBUTE(reset_fops, NULL, reset_set, "%llx\n"); +DEFINE_DEBUGFS_ATTRIBUTE(reset_fops, NULL, reset_set, "%llx\n"); void a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor) @@ -154,6 +154,6 @@ void a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor) ARRAY_SIZE(a5xx_debugfs_list), minor->debugfs_root, minor); - debugfs_create_file("reset", S_IWUGO, minor->debugfs_root, dev, - &reset_fops); + debugfs_create_file_unsafe("reset", S_IWUGO, minor->debugfs_root, dev, + &reset_fops); } diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c index a7c58018959f..71e52b2b2025 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c @@ -296,6 +296,8 @@ int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) u32 val; int request, ack; + WARN_ON_ONCE(!mutex_is_locked(&gmu->lock)); + if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits)) return -EINVAL; @@ -337,6 +339,8 @@ void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) { int bit; + WARN_ON_ONCE(!mutex_is_locked(&gmu->lock)); + if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits)) return; @@ -512,11 +516,11 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu) struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; struct platform_device *pdev = to_platform_device(gmu->dev); void __iomem *pdcptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc"); - void __iomem *seqptr; + void __iomem *seqptr = NULL; uint32_t pdc_address_offset; bool pdc_in_aop = false; - if (!pdcptr) + if (IS_ERR(pdcptr)) goto err; if (adreno_is_a650(adreno_gpu) || adreno_is_a660_family(adreno_gpu)) @@ -528,7 +532,7 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu) if (!pdc_in_aop) { seqptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc_seq"); - if (!seqptr) + if (IS_ERR(seqptr)) goto err; } @@ -887,7 +891,7 @@ static void a6xx_gmu_set_initial_freq(struct msm_gpu *gpu, struct a6xx_gmu *gmu) unsigned long gpu_freq = gmu->gpu_freqs[gmu->current_perf_index]; gpu_opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, gpu_freq, true); - if (IS_ERR_OR_NULL(gpu_opp)) + if (IS_ERR(gpu_opp)) return; gmu->freq = 0; /* so a6xx_gmu_set_freq() doesn't exit early */ @@ -901,7 +905,7 @@ static void a6xx_gmu_set_initial_bw(struct msm_gpu *gpu, struct a6xx_gmu *gmu) unsigned long gpu_freq = gmu->gpu_freqs[gmu->current_perf_index]; gpu_opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, gpu_freq, true); - if (IS_ERR_OR_NULL(gpu_opp)) + if (IS_ERR(gpu_opp)) return; dev_pm_opp_set_opp(&gpu->pdev->dev, gpu_opp); @@ -1482,6 +1486,8 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node) if (!pdev) return -ENODEV; + mutex_init(&gmu->lock); + gmu->dev = &pdev->dev; of_dma_configure(gmu->dev, node, true); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h index 3c74f64e3126..84bd516f01e8 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h @@ -44,6 +44,9 @@ struct a6xx_gmu_bo { struct a6xx_gmu { struct device *dev; + /* For serializing communication with the GMU: */ + struct mutex lock; + struct msm_gem_address_space *aspace; void * __iomem mmio; diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index 40c9fef457a4..267a880811d6 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -106,7 +106,7 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu, u32 asid; u64 memptr = rbmemptr(ring, ttbr0); - if (ctx == a6xx_gpu->cur_ctx) + if (ctx->seqno == a6xx_gpu->cur_ctx_seqno) return; if (msm_iommu_pagetable_params(ctx->aspace->mmu, &ttbr, &asid)) @@ -139,7 +139,7 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu, OUT_PKT7(ring, CP_EVENT_WRITE, 1); OUT_RING(ring, 0x31); - a6xx_gpu->cur_ctx = ctx; + a6xx_gpu->cur_ctx_seqno = ctx->seqno; } static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) @@ -881,7 +881,7 @@ static int a6xx_zap_shader_init(struct msm_gpu *gpu) A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \ A6XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR) -static int a6xx_hw_init(struct msm_gpu *gpu) +static int hw_init(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); @@ -1081,7 +1081,7 @@ static int a6xx_hw_init(struct msm_gpu *gpu) /* Always come up on rb 0 */ a6xx_gpu->cur_ring = gpu->rb[0]; - a6xx_gpu->cur_ctx = NULL; + a6xx_gpu->cur_ctx_seqno = 0; /* Enable the SQE_to start the CP engine */ gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 1); @@ -1135,6 +1135,19 @@ out: return ret; } +static int a6xx_hw_init(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + int ret; + + mutex_lock(&a6xx_gpu->gmu.lock); + ret = hw_init(gpu); + mutex_unlock(&a6xx_gpu->gmu.lock); + + return ret; +} + static void a6xx_dump(struct msm_gpu *gpu) { DRM_DEV_INFO(&gpu->pdev->dev, "status: %08x\n", @@ -1509,7 +1522,9 @@ static int a6xx_pm_resume(struct msm_gpu *gpu) trace_msm_gpu_resume(0); + mutex_lock(&a6xx_gpu->gmu.lock); ret = a6xx_gmu_resume(a6xx_gpu); + mutex_unlock(&a6xx_gpu->gmu.lock); if (ret) return ret; @@ -1532,7 +1547,9 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu) msm_devfreq_suspend(gpu); + mutex_lock(&a6xx_gpu->gmu.lock); ret = a6xx_gmu_stop(a6xx_gpu); + mutex_unlock(&a6xx_gpu->gmu.lock); if (ret) return ret; @@ -1547,18 +1564,19 @@ static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); - static DEFINE_MUTEX(perfcounter_oob); - mutex_lock(&perfcounter_oob); + mutex_lock(&a6xx_gpu->gmu.lock); /* Force the GPU power on so we can read this register */ a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET); *value = gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO, - REG_A6XX_CP_ALWAYS_ON_COUNTER_HI); + REG_A6XX_CP_ALWAYS_ON_COUNTER_HI); a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET); - mutex_unlock(&perfcounter_oob); + + mutex_unlock(&a6xx_gpu->gmu.lock); + return 0; } @@ -1622,6 +1640,16 @@ static unsigned long a6xx_gpu_busy(struct msm_gpu *gpu) return (unsigned long)busy_time; } +void a6xx_gpu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + + mutex_lock(&a6xx_gpu->gmu.lock); + a6xx_gmu_set_freq(gpu, opp); + mutex_unlock(&a6xx_gpu->gmu.lock); +} + static struct msm_gem_address_space * a6xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev) { @@ -1766,7 +1794,7 @@ static const struct adreno_gpu_funcs funcs = { #endif .gpu_busy = a6xx_gpu_busy, .gpu_get_freq = a6xx_gmu_get_freq, - .gpu_set_freq = a6xx_gmu_set_freq, + .gpu_set_freq = a6xx_gpu_set_freq, #if defined(CONFIG_DRM_MSM_GPU_STATE) .gpu_state_get = a6xx_gpu_state_get, .gpu_state_put = a6xx_gpu_state_put, @@ -1810,6 +1838,13 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev) adreno_cmp_rev(ADRENO_REV(6, 3, 5, ANY_ID), info->rev))) adreno_gpu->base.hw_apriv = true; + /* + * For now only clamp to idle freq for devices where this is known not + * to cause power supply issues: + */ + if (info && (info->revn == 618)) + gpu->clamp_to_idle = true; + a6xx_llc_slices_init(pdev, a6xx_gpu); ret = a6xx_set_supported_hw(&pdev->dev, config->rev); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h index 0bc2d062f54a..8e5527c881b1 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h @@ -19,7 +19,16 @@ struct a6xx_gpu { uint64_t sqe_iova; struct msm_ringbuffer *cur_ring; - struct msm_file_private *cur_ctx; + + /** + * cur_ctx_seqno: + * + * The ctx->seqno value of the context with current pgtables + * installed. Tracked by seqno rather than pointer value to + * avoid dangling pointers, and cases where a ctx can be freed + * and a new one created with the same address. + */ + int cur_ctx_seqno; struct a6xx_gmu gmu; diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c index e8f65cd8eca6..7501849ed15d 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c @@ -180,7 +180,7 @@ static int debugbus_read(struct msm_gpu *gpu, u32 block, u32 offset, msm_readl((ptr) + ((offset) << 2)) /* read a value from the CX debug bus */ -static int cx_debugbus_read(void *__iomem cxdbg, u32 block, u32 offset, +static int cx_debugbus_read(void __iomem *cxdbg, u32 block, u32 offset, u32 *data) { u32 reg = A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX(offset) | diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c deleted file mode 100644 index d2457490930b..000000000000 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c +++ /dev/null @@ -1,256 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. - */ - -#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__ - -#include <linux/debugfs.h> -#include <linux/irqdomain.h> -#include <linux/irq.h> -#include <linux/kthread.h> - -#include "dpu_core_irq.h" -#include "dpu_trace.h" - -/** - * dpu_core_irq_callback_handler - dispatch core interrupts - * @arg: private data of callback handler - * @irq_idx: interrupt index - */ -static void dpu_core_irq_callback_handler(void *arg, int irq_idx) -{ - struct dpu_kms *dpu_kms = arg; - struct dpu_irq *irq_obj = &dpu_kms->irq_obj; - struct dpu_irq_callback *cb; - - VERB("irq_idx=%d\n", irq_idx); - - if (list_empty(&irq_obj->irq_cb_tbl[irq_idx])) - DRM_ERROR("no registered cb, idx:%d\n", irq_idx); - - atomic_inc(&irq_obj->irq_counts[irq_idx]); - - /* - * Perform registered function callback - */ - list_for_each_entry(cb, &irq_obj->irq_cb_tbl[irq_idx], list) - if (cb->func) - cb->func(cb->arg, irq_idx); -} - -u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, int irq_idx, bool clear) -{ - if (!dpu_kms->hw_intr || - !dpu_kms->hw_intr->ops.get_interrupt_status) - return 0; - - if (irq_idx < 0) { - DPU_ERROR("[%pS] invalid irq_idx=%d\n", - __builtin_return_address(0), irq_idx); - return 0; - } - - return dpu_kms->hw_intr->ops.get_interrupt_status(dpu_kms->hw_intr, - irq_idx, clear); -} - -int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, int irq_idx, - struct dpu_irq_callback *register_irq_cb) -{ - unsigned long irq_flags; - - if (!dpu_kms->irq_obj.irq_cb_tbl) { - DPU_ERROR("invalid params\n"); - return -EINVAL; - } - - if (!register_irq_cb || !register_irq_cb->func) { - DPU_ERROR("invalid irq_cb:%d func:%d\n", - register_irq_cb != NULL, - register_irq_cb ? - register_irq_cb->func != NULL : -1); - return -EINVAL; - } - - if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->total_irqs) { - DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx); - return -EINVAL; - } - - VERB("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx); - - irq_flags = dpu_kms->hw_intr->ops.lock(dpu_kms->hw_intr); - trace_dpu_core_irq_register_callback(irq_idx, register_irq_cb); - list_del_init(®ister_irq_cb->list); - list_add_tail(®ister_irq_cb->list, - &dpu_kms->irq_obj.irq_cb_tbl[irq_idx]); - if (list_is_first(®ister_irq_cb->list, - &dpu_kms->irq_obj.irq_cb_tbl[irq_idx])) { - int ret = dpu_kms->hw_intr->ops.enable_irq_locked( - dpu_kms->hw_intr, - irq_idx); - if (ret) - DPU_ERROR("Fail to enable IRQ for irq_idx:%d\n", - irq_idx); - } - dpu_kms->hw_intr->ops.unlock(dpu_kms->hw_intr, irq_flags); - - return 0; -} - -int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx, - struct dpu_irq_callback *register_irq_cb) -{ - unsigned long irq_flags; - - if (!dpu_kms->irq_obj.irq_cb_tbl) { - DPU_ERROR("invalid params\n"); - return -EINVAL; - } - - if (!register_irq_cb || !register_irq_cb->func) { - DPU_ERROR("invalid irq_cb:%d func:%d\n", - register_irq_cb != NULL, - register_irq_cb ? - register_irq_cb->func != NULL : -1); - return -EINVAL; - } - - if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->total_irqs) { - DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx); - return -EINVAL; - } - - VERB("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx); - - irq_flags = dpu_kms->hw_intr->ops.lock(dpu_kms->hw_intr); - trace_dpu_core_irq_unregister_callback(irq_idx, register_irq_cb); - list_del_init(®ister_irq_cb->list); - /* empty callback list but interrupt is still enabled */ - if (list_empty(&dpu_kms->irq_obj.irq_cb_tbl[irq_idx])) { - int ret = dpu_kms->hw_intr->ops.disable_irq_locked( - dpu_kms->hw_intr, - irq_idx); - if (ret) - DPU_ERROR("Fail to disable IRQ for irq_idx:%d\n", - irq_idx); - VERB("irq_idx=%d ret=%d\n", irq_idx, ret); - } - dpu_kms->hw_intr->ops.unlock(dpu_kms->hw_intr, irq_flags); - - return 0; -} - -static void dpu_clear_all_irqs(struct dpu_kms *dpu_kms) -{ - if (!dpu_kms->hw_intr || !dpu_kms->hw_intr->ops.clear_all_irqs) - return; - - dpu_kms->hw_intr->ops.clear_all_irqs(dpu_kms->hw_intr); -} - -static void dpu_disable_all_irqs(struct dpu_kms *dpu_kms) -{ - if (!dpu_kms->hw_intr || !dpu_kms->hw_intr->ops.disable_all_irqs) - return; - - dpu_kms->hw_intr->ops.disable_all_irqs(dpu_kms->hw_intr); -} - -#ifdef CONFIG_DEBUG_FS -static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v) -{ - struct dpu_kms *dpu_kms = s->private; - struct dpu_irq *irq_obj = &dpu_kms->irq_obj; - struct dpu_irq_callback *cb; - unsigned long irq_flags; - int i, irq_count, cb_count; - - if (WARN_ON(!irq_obj->irq_cb_tbl)) - return 0; - - for (i = 0; i < irq_obj->total_irqs; i++) { - irq_flags = dpu_kms->hw_intr->ops.lock(dpu_kms->hw_intr); - cb_count = 0; - irq_count = atomic_read(&irq_obj->irq_counts[i]); - list_for_each_entry(cb, &irq_obj->irq_cb_tbl[i], list) - cb_count++; - dpu_kms->hw_intr->ops.unlock(dpu_kms->hw_intr, irq_flags); - - if (irq_count || cb_count) - seq_printf(s, "idx:%d irq:%d cb:%d\n", - i, irq_count, cb_count); - } - - return 0; -} - -DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_core_irq); - -void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms, - struct dentry *parent) -{ - debugfs_create_file("core_irq", 0600, parent, dpu_kms, - &dpu_debugfs_core_irq_fops); -} -#endif - -void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms) -{ - int i; - - pm_runtime_get_sync(&dpu_kms->pdev->dev); - dpu_clear_all_irqs(dpu_kms); - dpu_disable_all_irqs(dpu_kms); - pm_runtime_put_sync(&dpu_kms->pdev->dev); - - /* Create irq callbacks for all possible irq_idx */ - dpu_kms->irq_obj.total_irqs = dpu_kms->hw_intr->total_irqs; - dpu_kms->irq_obj.irq_cb_tbl = kcalloc(dpu_kms->irq_obj.total_irqs, - sizeof(struct list_head), GFP_KERNEL); - dpu_kms->irq_obj.irq_counts = kcalloc(dpu_kms->irq_obj.total_irqs, - sizeof(atomic_t), GFP_KERNEL); - for (i = 0; i < dpu_kms->irq_obj.total_irqs; i++) { - INIT_LIST_HEAD(&dpu_kms->irq_obj.irq_cb_tbl[i]); - atomic_set(&dpu_kms->irq_obj.irq_counts[i], 0); - } -} - -void dpu_core_irq_uninstall(struct dpu_kms *dpu_kms) -{ - int i; - - pm_runtime_get_sync(&dpu_kms->pdev->dev); - for (i = 0; i < dpu_kms->irq_obj.total_irqs; i++) - if (!list_empty(&dpu_kms->irq_obj.irq_cb_tbl[i])) - DPU_ERROR("irq_idx=%d still enabled/registered\n", i); - - dpu_clear_all_irqs(dpu_kms); - dpu_disable_all_irqs(dpu_kms); - pm_runtime_put_sync(&dpu_kms->pdev->dev); - - kfree(dpu_kms->irq_obj.irq_cb_tbl); - kfree(dpu_kms->irq_obj.irq_counts); - dpu_kms->irq_obj.irq_cb_tbl = NULL; - dpu_kms->irq_obj.irq_counts = NULL; - dpu_kms->irq_obj.total_irqs = 0; -} - -irqreturn_t dpu_core_irq(struct dpu_kms *dpu_kms) -{ - /* - * Dispatch to HW driver to handle interrupt lookup that is being - * fired. When matching interrupt is located, HW driver will call to - * dpu_core_irq_callback_handler with the irq_idx from the lookup table. - * dpu_core_irq_callback_handler will perform the registered function - * callback, and do the interrupt status clearing once the registered - * callback is finished. - * Function will also clear the interrupt status after reading. - */ - dpu_kms->hw_intr->ops.dispatch_irqs( - dpu_kms->hw_intr, - dpu_core_irq_callback_handler, - dpu_kms); - - return IRQ_HANDLED; -} diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c index 768012243b44..967245b8cc02 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved. + * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved. * Copyright (C) 2013 Red Hat * Author: Rob Clark <robdclark@gmail.com> */ @@ -70,17 +70,147 @@ static struct drm_encoder *get_encoder_from_crtc(struct drm_crtc *crtc) return NULL; } -static u32 dpu_crtc_get_vblank_counter(struct drm_crtc *crtc) +static enum dpu_crtc_crc_source dpu_crtc_parse_crc_source(const char *src_name) { - struct drm_encoder *encoder; + if (!src_name || + !strcmp(src_name, "none")) + return DPU_CRTC_CRC_SOURCE_NONE; + if (!strcmp(src_name, "auto") || + !strcmp(src_name, "lm")) + return DPU_CRTC_CRC_SOURCE_LAYER_MIXER; + + return DPU_CRTC_CRC_SOURCE_INVALID; +} - encoder = get_encoder_from_crtc(crtc); +static int dpu_crtc_verify_crc_source(struct drm_crtc *crtc, + const char *src_name, size_t *values_cnt) +{ + enum dpu_crtc_crc_source source = dpu_crtc_parse_crc_source(src_name); + struct dpu_crtc_state *crtc_state = to_dpu_crtc_state(crtc->state); + + if (source < 0) { + DRM_DEBUG_DRIVER("Invalid source %s for CRTC%d\n", src_name, crtc->index); + return -EINVAL; + } + + if (source == DPU_CRTC_CRC_SOURCE_LAYER_MIXER) + *values_cnt = crtc_state->num_mixers; + + return 0; +} + +static int dpu_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name) +{ + enum dpu_crtc_crc_source source = dpu_crtc_parse_crc_source(src_name); + enum dpu_crtc_crc_source current_source; + struct dpu_crtc_state *crtc_state; + struct drm_device *drm_dev = crtc->dev; + struct dpu_crtc_mixer *m; + + bool was_enabled; + bool enable = false; + int i, ret = 0; + + if (source < 0) { + DRM_DEBUG_DRIVER("Invalid CRC source %s for CRTC%d\n", src_name, crtc->index); + return -EINVAL; + } + + ret = drm_modeset_lock(&crtc->mutex, NULL); + + if (ret) + return ret; + + enable = (source != DPU_CRTC_CRC_SOURCE_NONE); + crtc_state = to_dpu_crtc_state(crtc->state); + + spin_lock_irq(&drm_dev->event_lock); + current_source = crtc_state->crc_source; + spin_unlock_irq(&drm_dev->event_lock); + + was_enabled = (current_source != DPU_CRTC_CRC_SOURCE_NONE); + + if (!was_enabled && enable) { + ret = drm_crtc_vblank_get(crtc); + + if (ret) + goto cleanup; + + } else if (was_enabled && !enable) { + drm_crtc_vblank_put(crtc); + } + + spin_lock_irq(&drm_dev->event_lock); + crtc_state->crc_source = source; + spin_unlock_irq(&drm_dev->event_lock); + + crtc_state->crc_frame_skip_count = 0; + + for (i = 0; i < crtc_state->num_mixers; ++i) { + m = &crtc_state->mixers[i]; + + if (!m->hw_lm || !m->hw_lm->ops.setup_misr) + continue; + + /* Calculate MISR over 1 frame */ + m->hw_lm->ops.setup_misr(m->hw_lm, true, 1); + } + + +cleanup: + drm_modeset_unlock(&crtc->mutex); + + return ret; +} + +static u32 dpu_crtc_get_vblank_counter(struct drm_crtc *crtc) +{ + struct drm_encoder *encoder = get_encoder_from_crtc(crtc); if (!encoder) { DRM_ERROR("no encoder found for crtc %d\n", crtc->index); - return false; + return 0; + } + + return dpu_encoder_get_vsync_count(encoder); +} + + +static int dpu_crtc_get_crc(struct drm_crtc *crtc) +{ + struct dpu_crtc_state *crtc_state; + struct dpu_crtc_mixer *m; + u32 crcs[CRTC_DUAL_MIXERS]; + + int i = 0; + int rc = 0; + + crtc_state = to_dpu_crtc_state(crtc->state); + + BUILD_BUG_ON(ARRAY_SIZE(crcs) != ARRAY_SIZE(crtc_state->mixers)); + + /* Skip first 2 frames in case of "uncooked" CRCs */ + if (crtc_state->crc_frame_skip_count < 2) { + crtc_state->crc_frame_skip_count++; + return 0; } - return dpu_encoder_get_frame_count(encoder); + for (i = 0; i < crtc_state->num_mixers; ++i) { + + m = &crtc_state->mixers[i]; + + if (!m->hw_lm || !m->hw_lm->ops.collect_misr) + continue; + + rc = m->hw_lm->ops.collect_misr(m->hw_lm, &crcs[i]); + + if (rc) { + DRM_DEBUG_DRIVER("MISR read failed\n"); + return rc; + } + } + + return drm_crtc_add_crc_entry(crtc, true, + drm_crtc_accurate_vblank_count(crtc), crcs); } static bool dpu_crtc_get_scanout_position(struct drm_crtc *crtc, @@ -389,6 +519,9 @@ void dpu_crtc_vblank_callback(struct drm_crtc *crtc) dpu_crtc->vblank_cb_time = ktime_get(); else dpu_crtc->vblank_cb_count++; + + dpu_crtc_get_crc(crtc); + drm_crtc_handle_vblank(crtc); trace_dpu_crtc_vblank_cb(DRMID(crtc)); } @@ -1332,6 +1465,8 @@ static const struct drm_crtc_funcs dpu_crtc_funcs = { .atomic_destroy_state = dpu_crtc_destroy_state, .late_register = dpu_crtc_late_register, .early_unregister = dpu_crtc_early_unregister, + .verify_crc_source = dpu_crtc_verify_crc_source, + .set_crc_source = dpu_crtc_set_crc_source, .enable_vblank = msm_crtc_enable_vblank, .disable_vblank = msm_crtc_disable_vblank, .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h index cec3474340e8..ae9546ca1359 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved. + * Copyright (c) 2015-2021 The Linux Foundation. All rights reserved. * Copyright (C) 2013 Red Hat * Author: Rob Clark <robdclark@gmail.com> */ @@ -70,6 +70,19 @@ struct dpu_crtc_smmu_state_data { }; /** + * enum dpu_crtc_crc_source: CRC source + * @DPU_CRTC_CRC_SOURCE_NONE: no source set + * @DPU_CRTC_CRC_SOURCE_LAYER_MIXER: CRC in layer mixer + * @DPU_CRTC_CRC_SOURCE_INVALID: Invalid source + */ +enum dpu_crtc_crc_source { + DPU_CRTC_CRC_SOURCE_NONE = 0, + DPU_CRTC_CRC_SOURCE_LAYER_MIXER, + DPU_CRTC_CRC_SOURCE_MAX, + DPU_CRTC_CRC_SOURCE_INVALID = -1 +}; + +/** * struct dpu_crtc_mixer: stores the map for each virtual pipeline in the CRTC * @hw_lm: LM HW Driver context * @lm_ctl: CTL Path HW driver context @@ -139,6 +152,7 @@ struct dpu_crtc_frame_event { * @event_lock : Spinlock around event handling code * @phandle: Pointer to power handler * @cur_perf : current performance committed to clock/bandwidth driver + * @crc_source : CRC source */ struct dpu_crtc { struct drm_crtc base; @@ -210,6 +224,9 @@ struct dpu_crtc_state { u32 num_ctls; struct dpu_hw_ctl *hw_ctls[CRTC_DUAL_MIXERS]; + + enum dpu_crtc_crc_source crc_source; + int crc_frame_skip_count; }; #define to_dpu_crtc_state(x) \ diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c index 0e9d3fa1544b..e7ee4cfb8461 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c @@ -168,6 +168,7 @@ enum dpu_enc_rc_states { * @vsync_event_work: worker to handle vsync event for autorefresh * @topology: topology of the display * @idle_timeout: idle timeout duration in milliseconds + * @dp: msm_dp pointer, for DP encoders */ struct dpu_encoder_virt { struct drm_encoder base; @@ -206,6 +207,8 @@ struct dpu_encoder_virt { struct msm_display_topology topology; u32 idle_timeout; + + struct msm_dp *dp; }; #define to_dpu_encoder_virt(x) container_of(x, struct dpu_encoder_virt, base) @@ -395,19 +398,11 @@ int dpu_encoder_helper_unregister_irq(struct dpu_encoder_phys *phys_enc, return 0; } -int dpu_encoder_get_frame_count(struct drm_encoder *drm_enc) +int dpu_encoder_get_vsync_count(struct drm_encoder *drm_enc) { - struct dpu_encoder_virt *dpu_enc; - struct dpu_encoder_phys *phys; - int framecount = 0; - - dpu_enc = to_dpu_encoder_virt(drm_enc); - phys = dpu_enc ? dpu_enc->cur_master : NULL; - - if (phys && phys->ops.get_frame_count) - framecount = phys->ops.get_frame_count(phys); - - return framecount; + struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); + struct dpu_encoder_phys *phys = dpu_enc ? dpu_enc->cur_master : NULL; + return phys ? atomic_read(&phys->vsync_cnt) : 0; } int dpu_encoder_get_linecount(struct drm_encoder *drm_enc) @@ -1000,8 +995,8 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc, trace_dpu_enc_mode_set(DRMID(drm_enc)); - if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS && priv->dp) - msm_dp_display_mode_set(priv->dp, drm_enc, mode, adj_mode); + if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS) + msm_dp_display_mode_set(dpu_enc->dp, drm_enc, mode, adj_mode); list_for_each_entry(conn_iter, connector_list, head) if (conn_iter->encoder == drm_enc) @@ -1182,9 +1177,8 @@ static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc) _dpu_encoder_virt_enable_helper(drm_enc); - if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS && priv->dp) { - ret = msm_dp_display_enable(priv->dp, - drm_enc); + if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS) { + ret = msm_dp_display_enable(dpu_enc->dp, drm_enc); if (ret) { DPU_ERROR_ENC(dpu_enc, "dp display enable failed: %d\n", ret); @@ -1224,8 +1218,8 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc) /* wait for idle */ dpu_encoder_wait_for_event(drm_enc, MSM_ENC_TX_COMPLETE); - if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS && priv->dp) { - if (msm_dp_display_pre_disable(priv->dp, drm_enc)) + if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS) { + if (msm_dp_display_pre_disable(dpu_enc->dp, drm_enc)) DPU_ERROR_ENC(dpu_enc, "dp display push idle failed\n"); } @@ -1253,8 +1247,8 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc) DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n"); - if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS && priv->dp) { - if (msm_dp_display_disable(priv->dp, drm_enc)) + if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS) { + if (msm_dp_display_disable(dpu_enc->dp, drm_enc)) DPU_ERROR_ENC(dpu_enc, "dp display disable failed\n"); } @@ -2170,7 +2164,8 @@ int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc, timer_setup(&dpu_enc->vsync_event_timer, dpu_encoder_vsync_event_handler, 0); - + else if (disp_info->intf_type == DRM_MODE_ENCODER_TMDS) + dpu_enc->dp = priv->dp[disp_info->h_tile_instance[0]]; INIT_DELAYED_WORK(&dpu_enc->delayed_off_work, dpu_encoder_off_work); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h index 99a5d73c9b88..e241914a9677 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h @@ -163,9 +163,9 @@ void dpu_encoder_set_idle_timeout(struct drm_encoder *drm_enc, int dpu_encoder_get_linecount(struct drm_encoder *drm_enc); /** - * dpu_encoder_get_frame_count - get interface frame count for the encoder. + * dpu_encoder_get_vsync_count - get vsync count for the encoder. * @drm_enc: Pointer to previously created drm encoder structure */ -int dpu_encoder_get_frame_count(struct drm_encoder *drm_enc); +int dpu_encoder_get_vsync_count(struct drm_encoder *drm_enc); #endif /* __DPU_ENCODER_H__ */ diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c index aa01698d6b25..34a6940d12c5 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c @@ -42,7 +42,7 @@ static bool dpu_encoder_phys_cmd_is_master(struct dpu_encoder_phys *phys_enc) { - return (phys_enc->split_role != ENC_ROLE_SLAVE) ? true : false; + return (phys_enc->split_role != ENC_ROLE_SLAVE); } static bool dpu_encoder_phys_cmd_mode_fixup( diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c index b131fd376192..ce6f32a919e5 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c @@ -794,7 +794,7 @@ static const struct dpu_pingpong_cfg sm8150_pp[] = { DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30), -1), PP_BLK("pingpong_5", PINGPONG_5, 0x72800, MERGE_3D_2, sdm845_pp_sblk, - DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30), + DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31), -1), }; @@ -844,7 +844,7 @@ static const struct dpu_intf_cfg sdm845_intf[] = { }; static const struct dpu_intf_cfg sc7180_intf[] = { - INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0, 24, INTF_SC7180_MASK, MDP_SSPP_TOP0_INTR, 24, 25), + INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, MSM_DP_CONTROLLER_0, 24, INTF_SC7180_MASK, MDP_SSPP_TOP0_INTR, 24, 25), INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, 24, INTF_SC7180_MASK, MDP_SSPP_TOP0_INTR, 26, 27), }; @@ -958,12 +958,6 @@ static const struct dpu_perf_cfg sdm845_perf_data = { .min_core_ib = 2400000, .min_llcc_ib = 800000, .min_dram_ib = 800000, - .core_ib_ff = "6.0", - .core_clk_ff = "1.0", - .comp_ratio_rt = - "NV12/5/1/1.23 AB24/5/1/1.23 XB24/5/1/1.23", - .comp_ratio_nrt = - "NV12/5/1/1.25 AB24/5/1/1.25 XB24/5/1/1.25", .undersized_prefill_lines = 2, .xtra_prefill_lines = 2, .dest_scale_prefill_lines = 3, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h index d2a945a27cfa..4ade44bbd37e 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h @@ -676,10 +676,6 @@ struct dpu_perf_cdp_cfg { * @min_core_ib minimum mnoc ib vote in kbps * @min_llcc_ib minimum llcc ib vote in kbps * @min_dram_ib minimum dram ib vote in kbps - * @core_ib_ff core instantaneous bandwidth fudge factor - * @core_clk_ff core clock fudge factor - * @comp_ratio_rt string of 0 or more of <fourcc>/<ven>/<mod>/<comp ratio> - * @comp_ratio_nrt string of 0 or more of <fourcc>/<ven>/<mod>/<comp ratio> * @undersized_prefill_lines undersized prefill in lines * @xtra_prefill_lines extra prefill latency in lines * @dest_scale_prefill_lines destination scaler latency in lines @@ -702,10 +698,6 @@ struct dpu_perf_cfg { u32 min_core_ib; u32 min_llcc_ib; u32 min_dram_ib; - const char *core_ib_ff; - const char *core_clk_ff; - const char *comp_ratio_rt; - const char *comp_ratio_nrt; u32 undersized_prefill_lines; u32 xtra_prefill_lines; u32 dest_scale_prefill_lines; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c index 2e816f232e85..d2b6dca487e3 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c @@ -3,12 +3,15 @@ */ #include <linux/bitops.h> +#include <linux/debugfs.h> #include <linux/slab.h> +#include "dpu_core_irq.h" #include "dpu_kms.h" #include "dpu_hw_interrupts.h" #include "dpu_hw_util.h" #include "dpu_hw_mdss.h" +#include "dpu_trace.h" /** * Register offsets in MDSS register file for the interrupt registers @@ -117,25 +120,33 @@ static const struct dpu_intr_reg dpu_intr_set[] = { #define DPU_IRQ_REG(irq_idx) (irq_idx / 32) #define DPU_IRQ_MASK(irq_idx) (BIT(irq_idx % 32)) -static void dpu_hw_intr_clear_intr_status_nolock(struct dpu_hw_intr *intr, - int irq_idx) +/** + * dpu_core_irq_callback_handler - dispatch core interrupts + * @arg: private data of callback handler + * @irq_idx: interrupt index + */ +static void dpu_core_irq_callback_handler(struct dpu_kms *dpu_kms, int irq_idx) { - int reg_idx; + struct dpu_irq_callback *cb; - if (!intr) - return; + VERB("irq_idx=%d\n", irq_idx); - reg_idx = DPU_IRQ_REG(irq_idx); - DPU_REG_WRITE(&intr->hw, dpu_intr_set[reg_idx].clr_off, DPU_IRQ_MASK(irq_idx)); + if (list_empty(&dpu_kms->hw_intr->irq_cb_tbl[irq_idx])) + DRM_ERROR("no registered cb, idx:%d\n", irq_idx); - /* ensure register writes go through */ - wmb(); + atomic_inc(&dpu_kms->hw_intr->irq_counts[irq_idx]); + + /* + * Perform registered function callback + */ + list_for_each_entry(cb, &dpu_kms->hw_intr->irq_cb_tbl[irq_idx], list) + if (cb->func) + cb->func(cb->arg, irq_idx); } -static void dpu_hw_intr_dispatch_irq(struct dpu_hw_intr *intr, - void (*cbfunc)(void *, int), - void *arg) +irqreturn_t dpu_core_irq(struct dpu_kms *dpu_kms) { + struct dpu_hw_intr *intr = dpu_kms->hw_intr; int reg_idx; int irq_idx; u32 irq_status; @@ -144,13 +155,8 @@ static void dpu_hw_intr_dispatch_irq(struct dpu_hw_intr *intr, unsigned long irq_flags; if (!intr) - return; + return IRQ_NONE; - /* - * The dispatcher will save the IRQ status before calling here. - * Now need to go through each IRQ status and find matching - * irq lookup index. - */ spin_lock_irqsave(&intr->irq_lock, irq_flags); for (reg_idx = 0; reg_idx < ARRAY_SIZE(dpu_intr_set); reg_idx++) { if (!test_bit(reg_idx, &intr->irq_mask)) @@ -178,17 +184,8 @@ static void dpu_hw_intr_dispatch_irq(struct dpu_hw_intr *intr, */ while ((bit = ffs(irq_status)) != 0) { irq_idx = DPU_IRQ_IDX(reg_idx, bit - 1); - /* - * Once a match on irq mask, perform a callback - * to the given cbfunc. cbfunc will take care - * the interrupt status clearing. If cbfunc is - * not provided, then the interrupt clearing - * is here. - */ - if (cbfunc) - cbfunc(arg, irq_idx); - dpu_hw_intr_clear_intr_status_nolock(intr, irq_idx); + dpu_core_irq_callback_handler(dpu_kms, irq_idx); /* * When callback finish, clear the irq_status @@ -203,6 +200,8 @@ static void dpu_hw_intr_dispatch_irq(struct dpu_hw_intr *intr, wmb(); spin_unlock_irqrestore(&intr->irq_lock, irq_flags); + + return IRQ_HANDLED; } static int dpu_hw_intr_enable_irq_locked(struct dpu_hw_intr *intr, int irq_idx) @@ -303,12 +302,13 @@ static int dpu_hw_intr_disable_irq_locked(struct dpu_hw_intr *intr, int irq_idx) return 0; } -static int dpu_hw_intr_clear_irqs(struct dpu_hw_intr *intr) +static void dpu_clear_irqs(struct dpu_kms *dpu_kms) { + struct dpu_hw_intr *intr = dpu_kms->hw_intr; int i; if (!intr) - return -EINVAL; + return; for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++) { if (test_bit(i, &intr->irq_mask)) @@ -318,16 +318,15 @@ static int dpu_hw_intr_clear_irqs(struct dpu_hw_intr *intr) /* ensure register writes go through */ wmb(); - - return 0; } -static int dpu_hw_intr_disable_irqs(struct dpu_hw_intr *intr) +static void dpu_disable_all_irqs(struct dpu_kms *dpu_kms) { + struct dpu_hw_intr *intr = dpu_kms->hw_intr; int i; if (!intr) - return -EINVAL; + return; for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++) { if (test_bit(i, &intr->irq_mask)) @@ -337,13 +336,11 @@ static int dpu_hw_intr_disable_irqs(struct dpu_hw_intr *intr) /* ensure register writes go through */ wmb(); - - return 0; } -static u32 dpu_hw_intr_get_interrupt_status(struct dpu_hw_intr *intr, - int irq_idx, bool clear) +u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, int irq_idx, bool clear) { + struct dpu_hw_intr *intr = dpu_kms->hw_intr; int reg_idx; unsigned long irq_flags; u32 intr_status; @@ -351,6 +348,12 @@ static u32 dpu_hw_intr_get_interrupt_status(struct dpu_hw_intr *intr, if (!intr) return 0; + if (irq_idx < 0) { + DPU_ERROR("[%pS] invalid irq_idx=%d\n", + __builtin_return_address(0), irq_idx); + return 0; + } + if (irq_idx < 0 || irq_idx >= intr->total_irqs) { pr_err("invalid IRQ index: [%d]\n", irq_idx); return 0; @@ -374,32 +377,6 @@ static u32 dpu_hw_intr_get_interrupt_status(struct dpu_hw_intr *intr, return intr_status; } -static unsigned long dpu_hw_intr_lock(struct dpu_hw_intr *intr) -{ - unsigned long irq_flags; - - spin_lock_irqsave(&intr->irq_lock, irq_flags); - - return irq_flags; -} - -static void dpu_hw_intr_unlock(struct dpu_hw_intr *intr, unsigned long irq_flags) -{ - spin_unlock_irqrestore(&intr->irq_lock, irq_flags); -} - -static void __setup_intr_ops(struct dpu_hw_intr_ops *ops) -{ - ops->enable_irq_locked = dpu_hw_intr_enable_irq_locked; - ops->disable_irq_locked = dpu_hw_intr_disable_irq_locked; - ops->dispatch_irqs = dpu_hw_intr_dispatch_irq; - ops->clear_all_irqs = dpu_hw_intr_clear_irqs; - ops->disable_all_irqs = dpu_hw_intr_disable_irqs; - ops->get_interrupt_status = dpu_hw_intr_get_interrupt_status; - ops->lock = dpu_hw_intr_lock; - ops->unlock = dpu_hw_intr_unlock; -} - static void __intr_offset(struct dpu_mdss_cfg *m, void __iomem *addr, struct dpu_hw_blk_reg_map *hw) { @@ -421,7 +398,6 @@ struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr, return ERR_PTR(-ENOMEM); __intr_offset(m, addr, &intr->hw); - __setup_intr_ops(&intr->ops); intr->total_irqs = ARRAY_SIZE(dpu_intr_set) * 32; @@ -443,7 +419,168 @@ void dpu_hw_intr_destroy(struct dpu_hw_intr *intr) { if (intr) { kfree(intr->cache_irq_mask); + + kfree(intr->irq_cb_tbl); + kfree(intr->irq_counts); + kfree(intr); } } +int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, int irq_idx, + struct dpu_irq_callback *register_irq_cb) +{ + unsigned long irq_flags; + + if (!dpu_kms->hw_intr->irq_cb_tbl) { + DPU_ERROR("invalid params\n"); + return -EINVAL; + } + + if (!register_irq_cb || !register_irq_cb->func) { + DPU_ERROR("invalid irq_cb:%d func:%d\n", + register_irq_cb != NULL, + register_irq_cb ? + register_irq_cb->func != NULL : -1); + return -EINVAL; + } + + if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->total_irqs) { + DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx); + return -EINVAL; + } + + VERB("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx); + + spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags); + trace_dpu_core_irq_register_callback(irq_idx, register_irq_cb); + list_del_init(®ister_irq_cb->list); + list_add_tail(®ister_irq_cb->list, + &dpu_kms->hw_intr->irq_cb_tbl[irq_idx]); + if (list_is_first(®ister_irq_cb->list, + &dpu_kms->hw_intr->irq_cb_tbl[irq_idx])) { + int ret = dpu_hw_intr_enable_irq_locked( + dpu_kms->hw_intr, + irq_idx); + if (ret) + DPU_ERROR("Fail to enable IRQ for irq_idx:%d\n", + irq_idx); + } + spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags); + + return 0; +} + +int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx, + struct dpu_irq_callback *register_irq_cb) +{ + unsigned long irq_flags; + + if (!dpu_kms->hw_intr->irq_cb_tbl) { + DPU_ERROR("invalid params\n"); + return -EINVAL; + } + + if (!register_irq_cb || !register_irq_cb->func) { + DPU_ERROR("invalid irq_cb:%d func:%d\n", + register_irq_cb != NULL, + register_irq_cb ? + register_irq_cb->func != NULL : -1); + return -EINVAL; + } + + if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->total_irqs) { + DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx); + return -EINVAL; + } + + VERB("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx); + + spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags); + trace_dpu_core_irq_unregister_callback(irq_idx, register_irq_cb); + list_del_init(®ister_irq_cb->list); + /* empty callback list but interrupt is still enabled */ + if (list_empty(&dpu_kms->hw_intr->irq_cb_tbl[irq_idx])) { + int ret = dpu_hw_intr_disable_irq_locked( + dpu_kms->hw_intr, + irq_idx); + if (ret) + DPU_ERROR("Fail to disable IRQ for irq_idx:%d\n", + irq_idx); + VERB("irq_idx=%d ret=%d\n", irq_idx, ret); + } + spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags); + + return 0; +} + +#ifdef CONFIG_DEBUG_FS +static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v) +{ + struct dpu_kms *dpu_kms = s->private; + struct dpu_irq_callback *cb; + unsigned long irq_flags; + int i, irq_count, cb_count; + + if (WARN_ON(!dpu_kms->hw_intr->irq_cb_tbl)) + return 0; + + for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++) { + spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags); + cb_count = 0; + irq_count = atomic_read(&dpu_kms->hw_intr->irq_counts[i]); + list_for_each_entry(cb, &dpu_kms->hw_intr->irq_cb_tbl[i], list) + cb_count++; + spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags); + + if (irq_count || cb_count) + seq_printf(s, "idx:%d irq:%d cb:%d\n", + i, irq_count, cb_count); + } + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_core_irq); + +void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms, + struct dentry *parent) +{ + debugfs_create_file("core_irq", 0600, parent, dpu_kms, + &dpu_debugfs_core_irq_fops); +} +#endif + +void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms) +{ + int i; + + pm_runtime_get_sync(&dpu_kms->pdev->dev); + dpu_clear_irqs(dpu_kms); + dpu_disable_all_irqs(dpu_kms); + pm_runtime_put_sync(&dpu_kms->pdev->dev); + + /* Create irq callbacks for all possible irq_idx */ + dpu_kms->hw_intr->irq_cb_tbl = kcalloc(dpu_kms->hw_intr->total_irqs, + sizeof(struct list_head), GFP_KERNEL); + dpu_kms->hw_intr->irq_counts = kcalloc(dpu_kms->hw_intr->total_irqs, + sizeof(atomic_t), GFP_KERNEL); + for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++) { + INIT_LIST_HEAD(&dpu_kms->hw_intr->irq_cb_tbl[i]); + atomic_set(&dpu_kms->hw_intr->irq_counts[i], 0); + } +} + +void dpu_core_irq_uninstall(struct dpu_kms *dpu_kms) +{ + int i; + + pm_runtime_get_sync(&dpu_kms->pdev->dev); + for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++) + if (!list_empty(&dpu_kms->hw_intr->irq_cb_tbl[i])) + DPU_ERROR("irq_idx=%d still enabled/registered\n", i); + + dpu_clear_irqs(dpu_kms); + dpu_disable_all_irqs(dpu_kms); + pm_runtime_put_sync(&dpu_kms->pdev->dev); +} diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h index ac83c1159815..d50e78c9f148 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h @@ -32,92 +32,6 @@ enum dpu_hw_intr_reg { #define DPU_IRQ_IDX(reg_idx, offset) (reg_idx * 32 + offset) -struct dpu_hw_intr; - -/** - * Interrupt operations. - */ -struct dpu_hw_intr_ops { - - /** - * enable_irq - Enable IRQ based on lookup IRQ index - * @intr: HW interrupt handle - * @irq_idx: Lookup irq index return from irq_idx_lookup - * @return: 0 for success, otherwise failure - */ - int (*enable_irq_locked)( - struct dpu_hw_intr *intr, - int irq_idx); - - /** - * disable_irq - Disable IRQ based on lookup IRQ index - * @intr: HW interrupt handle - * @irq_idx: Lookup irq index return from irq_idx_lookup - * @return: 0 for success, otherwise failure - */ - int (*disable_irq_locked)( - struct dpu_hw_intr *intr, - int irq_idx); - - /** - * clear_all_irqs - Clears all the interrupts (i.e. acknowledges - * any asserted IRQs). Useful during reset. - * @intr: HW interrupt handle - * @return: 0 for success, otherwise failure - */ - int (*clear_all_irqs)( - struct dpu_hw_intr *intr); - - /** - * disable_all_irqs - Disables all the interrupts. Useful during reset. - * @intr: HW interrupt handle - * @return: 0 for success, otherwise failure - */ - int (*disable_all_irqs)( - struct dpu_hw_intr *intr); - - /** - * dispatch_irqs - IRQ dispatcher will call the given callback - * function when a matching interrupt status bit is - * found in the irq mapping table. - * @intr: HW interrupt handle - * @cbfunc: Callback function pointer - * @arg: Argument to pass back during callback - */ - void (*dispatch_irqs)( - struct dpu_hw_intr *intr, - void (*cbfunc)(void *arg, int irq_idx), - void *arg); - - /** - * get_interrupt_status - Gets HW interrupt status, and clear if set, - * based on given lookup IRQ index. - * @intr: HW interrupt handle - * @irq_idx: Lookup irq index return from irq_idx_lookup - * @clear: True to clear irq after read - */ - u32 (*get_interrupt_status)( - struct dpu_hw_intr *intr, - int irq_idx, - bool clear); - - /** - * lock - take the IRQ lock - * @intr: HW interrupt handle - * @return: irq_flags for the taken spinlock - */ - unsigned long (*lock)( - struct dpu_hw_intr *intr); - - /** - * unlock - take the IRQ lock - * @intr: HW interrupt handle - * @irq_flags: the irq_flags returned from lock - */ - void (*unlock)( - struct dpu_hw_intr *intr, unsigned long irq_flags); -}; - /** * struct dpu_hw_intr: hw interrupts handling data structure * @hw: virtual address mapping @@ -126,15 +40,19 @@ struct dpu_hw_intr_ops { * @save_irq_status: array of IRQ status reg storage created during init * @total_irqs: total number of irq_idx mapped in the hw_interrupts * @irq_lock: spinlock for accessing IRQ resources + * @irq_cb_tbl: array of IRQ callbacks lists + * @irq_counts: array of IRQ counts */ struct dpu_hw_intr { struct dpu_hw_blk_reg_map hw; - struct dpu_hw_intr_ops ops; u32 *cache_irq_mask; u32 *save_irq_status; u32 total_irqs; spinlock_t irq_lock; unsigned long irq_mask; + + struct list_head *irq_cb_tbl; + atomic_t *irq_counts; }; /** diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c index cb6bb7a22c15..86363c0ec834 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only -/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. +/* + * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved. */ #include "dpu_kms.h" @@ -24,6 +25,15 @@ #define LM_BLEND0_FG_ALPHA 0x04 #define LM_BLEND0_BG_ALPHA 0x08 +#define LM_MISR_CTRL 0x310 +#define LM_MISR_SIGNATURE 0x314 +#define LM_MISR_FRAME_COUNT_MASK 0xFF +#define LM_MISR_CTRL_ENABLE BIT(8) +#define LM_MISR_CTRL_STATUS BIT(9) +#define LM_MISR_CTRL_STATUS_CLEAR BIT(10) +#define LM_MISR_CTRL_FREE_RUN_MASK BIT(31) + + static const struct dpu_lm_cfg *_lm_offset(enum dpu_lm mixer, const struct dpu_mdss_cfg *m, void __iomem *addr, @@ -96,6 +106,48 @@ static void dpu_hw_lm_setup_border_color(struct dpu_hw_mixer *ctx, } } +static void dpu_hw_lm_setup_misr(struct dpu_hw_mixer *ctx, bool enable, u32 frame_count) +{ + struct dpu_hw_blk_reg_map *c = &ctx->hw; + u32 config = 0; + + DPU_REG_WRITE(c, LM_MISR_CTRL, LM_MISR_CTRL_STATUS_CLEAR); + + /* Clear old MISR value (in case it's read before a new value is calculated)*/ + wmb(); + + if (enable) { + config = (frame_count & LM_MISR_FRAME_COUNT_MASK) | + LM_MISR_CTRL_ENABLE | LM_MISR_CTRL_FREE_RUN_MASK; + + DPU_REG_WRITE(c, LM_MISR_CTRL, config); + } else { + DPU_REG_WRITE(c, LM_MISR_CTRL, 0); + } + +} + +static int dpu_hw_lm_collect_misr(struct dpu_hw_mixer *ctx, u32 *misr_value) +{ + struct dpu_hw_blk_reg_map *c = &ctx->hw; + u32 ctrl = 0; + + if (!misr_value) + return -EINVAL; + + ctrl = DPU_REG_READ(c, LM_MISR_CTRL); + + if (!(ctrl & LM_MISR_CTRL_ENABLE)) + return -EINVAL; + + if (!(ctrl & LM_MISR_CTRL_STATUS)) + return -EINVAL; + + *misr_value = DPU_REG_READ(c, LM_MISR_SIGNATURE); + + return 0; +} + static void dpu_hw_lm_setup_blend_config_sdm845(struct dpu_hw_mixer *ctx, u32 stage, u32 fg_alpha, u32 bg_alpha, u32 blend_op) { @@ -158,6 +210,8 @@ static void _setup_mixer_ops(const struct dpu_mdss_cfg *m, ops->setup_blend_config = dpu_hw_lm_setup_blend_config; ops->setup_alpha_out = dpu_hw_lm_setup_color3; ops->setup_border_color = dpu_hw_lm_setup_border_color; + ops->setup_misr = dpu_hw_lm_setup_misr; + ops->collect_misr = dpu_hw_lm_collect_misr; } struct dpu_hw_mixer *dpu_hw_lm_init(enum dpu_lm idx, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h index 4a6b2de19ef6..d8052fb2d5da 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h @@ -1,5 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ -/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. +/* + * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved. */ #ifndef _DPU_HW_LM_H @@ -53,6 +54,16 @@ struct dpu_hw_lm_ops { void (*setup_border_color)(struct dpu_hw_mixer *ctx, struct dpu_mdss_color *color, u8 border_en); + + /** + * setup_misr: Enable/disable MISR + */ + void (*setup_misr)(struct dpu_hw_mixer *ctx, bool enable, u32 frame_count); + + /** + * collect_misr: Read MISR signature + */ + int (*collect_misr)(struct dpu_hw_mixer *ctx, u32 *misr_value); }; struct dpu_hw_mixer { diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c index 69eed7932486..f9460672176a 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c @@ -138,11 +138,13 @@ static int _sspp_subblk_offset(struct dpu_hw_pipe *ctx, u32 *idx) { int rc = 0; - const struct dpu_sspp_sub_blks *sblk = ctx->cap->sblk; + const struct dpu_sspp_sub_blks *sblk; - if (!ctx) + if (!ctx || !ctx->cap || !ctx->cap->sblk) return -EINVAL; + sblk = ctx->cap->sblk; + switch (s_id) { case DPU_SSPP_SRC: *idx = sblk->src_blk.base; @@ -419,7 +421,7 @@ static void _dpu_hw_sspp_setup_scaler3(struct dpu_hw_pipe *ctx, (void)pe; if (_sspp_subblk_offset(ctx, DPU_SSPP_SCALER_QSEED3, &idx) || !sspp - || !scaler3_cfg || !ctx || !ctx->cap || !ctx->cap->sblk) + || !scaler3_cfg) return; dpu_hw_setup_scaler3(&ctx->hw, scaler3_cfg, idx, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h index ff3cffde84cd..6d4911957e33 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h @@ -1,5 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ -/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. +/* + * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved. */ #ifndef _DPU_HW_UTIL_H diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c index ae48f41821cf..a15b26428280 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c @@ -188,6 +188,7 @@ static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor) struct dentry *entry; struct drm_device *dev; struct msm_drm_private *priv; + int i; if (!p) return -EINVAL; @@ -203,8 +204,10 @@ static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor) dpu_debugfs_vbif_init(dpu_kms, entry); dpu_debugfs_core_irq_init(dpu_kms, entry); - if (priv->dp) - msm_dp_debugfs_init(priv->dp, minor); + for (i = 0; i < ARRAY_SIZE(priv->dp); i++) { + if (priv->dp[i]) + msm_dp_debugfs_init(priv->dp[i], minor); + } return dpu_core_perf_debugfs_init(dpu_kms, entry); } @@ -544,35 +547,42 @@ static int _dpu_kms_initialize_displayport(struct drm_device *dev, { struct drm_encoder *encoder = NULL; struct msm_display_info info; - int rc = 0; + int rc; + int i; - if (!priv->dp) - return rc; + for (i = 0; i < ARRAY_SIZE(priv->dp); i++) { + if (!priv->dp[i]) + continue; - encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_TMDS); - if (IS_ERR(encoder)) { - DPU_ERROR("encoder init failed for dsi display\n"); - return PTR_ERR(encoder); - } + encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_TMDS); + if (IS_ERR(encoder)) { + DPU_ERROR("encoder init failed for dsi display\n"); + return PTR_ERR(encoder); + } - memset(&info, 0, sizeof(info)); - rc = msm_dp_modeset_init(priv->dp, dev, encoder); - if (rc) { - DPU_ERROR("modeset_init failed for DP, rc = %d\n", rc); - drm_encoder_cleanup(encoder); - return rc; - } + memset(&info, 0, sizeof(info)); + rc = msm_dp_modeset_init(priv->dp[i], dev, encoder); + if (rc) { + DPU_ERROR("modeset_init failed for DP, rc = %d\n", rc); + drm_encoder_cleanup(encoder); + return rc; + } - priv->encoders[priv->num_encoders++] = encoder; + priv->encoders[priv->num_encoders++] = encoder; - info.num_of_h_tiles = 1; - info.capabilities = MSM_DISPLAY_CAP_VID_MODE; - info.intf_type = encoder->encoder_type; - rc = dpu_encoder_setup(dev, encoder, &info); - if (rc) - DPU_ERROR("failed to setup DPU encoder %d: rc:%d\n", - encoder->base.id, rc); - return rc; + info.num_of_h_tiles = 1; + info.h_tile_instance[0] = i; + info.capabilities = MSM_DISPLAY_CAP_VID_MODE; + info.intf_type = encoder->encoder_type; + rc = dpu_encoder_setup(dev, encoder, &info); + if (rc) { + DPU_ERROR("failed to setup DPU encoder %d: rc:%d\n", + encoder->base.id, rc); + return rc; + } + } + + return 0; } /** @@ -792,6 +802,7 @@ static int dpu_irq_postinstall(struct msm_kms *kms) { struct msm_drm_private *priv; struct dpu_kms *dpu_kms = to_dpu_kms(kms); + int i; if (!dpu_kms || !dpu_kms->dev) return -EINVAL; @@ -800,7 +811,8 @@ static int dpu_irq_postinstall(struct msm_kms *kms) if (!priv) return -EINVAL; - msm_dp_irq_postinstall(priv->dp); + for (i = 0; i < ARRAY_SIZE(priv->dp); i++) + msm_dp_irq_postinstall(priv->dp[i]); return 0; } @@ -908,6 +920,10 @@ static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms) return 0; mmu = msm_iommu_new(dpu_kms->dev->dev, domain); + if (IS_ERR(mmu)) { + iommu_domain_free(domain); + return PTR_ERR(mmu); + } aspace = msm_gem_address_space_create(mmu, "dpu1", 0x1000, 0x100000000 - 0x1000); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h index 323a6bce9e64..775bcbda860f 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h @@ -78,18 +78,6 @@ struct dpu_irq_callback { void *arg; }; -/** - * struct dpu_irq: IRQ structure contains callback registration info - * @total_irq: total number of irq_idx obtained from HW interrupts mapping - * @irq_cb_tbl: array of IRQ callbacks setting - * @debugfs_file: debugfs file for irq statistics - */ -struct dpu_irq { - u32 total_irqs; - struct list_head *irq_cb_tbl; - atomic_t *irq_counts; -}; - struct dpu_kms { struct msm_kms base; struct drm_device *dev; @@ -104,7 +92,6 @@ struct dpu_kms { struct regulator *venus; struct dpu_hw_intr *hw_intr; - struct dpu_irq irq_obj; struct dpu_core_perf perf; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c index c989621209aa..a3e3b9d1b82e 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c @@ -1193,7 +1193,7 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane) if (DPU_FORMAT_IS_YUV(fmt)) _dpu_plane_setup_csc(pdpu); else - pdpu->csc_ptr = 0; + pdpu->csc_ptr = NULL; } _dpu_plane_set_qos_lut(plane, fb); @@ -1330,7 +1330,7 @@ static void dpu_plane_reset(struct drm_plane *plane) /* remove previous state, if present */ if (plane->state) { dpu_plane_destroy_state(plane, plane->state); - plane->state = 0; + plane->state = NULL; } pstate = kzalloc(sizeof(*pstate), GFP_KERNEL); diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c index cdcaf470f148..5a33bb148e9e 100644 --- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c @@ -173,12 +173,9 @@ int mdp4_disable(struct mdp4_kms *mdp4_kms) DBG(""); clk_disable_unprepare(mdp4_kms->clk); - if (mdp4_kms->pclk) - clk_disable_unprepare(mdp4_kms->pclk); - if (mdp4_kms->lut_clk) - clk_disable_unprepare(mdp4_kms->lut_clk); - if (mdp4_kms->axi_clk) - clk_disable_unprepare(mdp4_kms->axi_clk); + clk_disable_unprepare(mdp4_kms->pclk); + clk_disable_unprepare(mdp4_kms->lut_clk); + clk_disable_unprepare(mdp4_kms->axi_clk); return 0; } @@ -188,12 +185,9 @@ int mdp4_enable(struct mdp4_kms *mdp4_kms) DBG(""); clk_prepare_enable(mdp4_kms->clk); - if (mdp4_kms->pclk) - clk_prepare_enable(mdp4_kms->pclk); - if (mdp4_kms->lut_clk) - clk_prepare_enable(mdp4_kms->lut_clk); - if (mdp4_kms->axi_clk) - clk_prepare_enable(mdp4_kms->axi_clk); + clk_prepare_enable(mdp4_kms->pclk); + clk_prepare_enable(mdp4_kms->lut_clk); + clk_prepare_enable(mdp4_kms->axi_clk); return 0; } diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c index 9741544ffc35..1bf9ff5dbabc 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c @@ -752,6 +752,94 @@ const struct mdp5_cfg_hw msm8x76_config = { .max_clk = 360000000, }; +static const struct mdp5_cfg_hw msm8x53_config = { + .name = "msm8x53", + .mdp = { + .count = 1, + .caps = MDP_CAP_CDM | + MDP_CAP_SRC_SPLIT, + }, + .ctl = { + .count = 3, + .base = { 0x01000, 0x01200, 0x01400 }, + .flush_hw_mask = 0xffffffff, + }, + .pipe_vig = { + .count = 1, + .base = { 0x04000 }, + .caps = MDP_PIPE_CAP_HFLIP | + MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SCALE | + MDP_PIPE_CAP_CSC | + MDP_PIPE_CAP_DECIMATION | + MDP_PIPE_CAP_SW_PIX_EXT | + 0, + }, + .pipe_rgb = { + .count = 2, + .base = { 0x14000, 0x16000 }, + .caps = MDP_PIPE_CAP_HFLIP | + MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_DECIMATION | + MDP_PIPE_CAP_SW_PIX_EXT | + 0, + }, + .pipe_dma = { + .count = 1, + .base = { 0x24000 }, + .caps = MDP_PIPE_CAP_HFLIP | + MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SW_PIX_EXT | + 0, + }, + .pipe_cursor = { + .count = 1, + .base = { 0x34000 }, + .caps = MDP_PIPE_CAP_HFLIP | + MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SW_PIX_EXT | + MDP_PIPE_CAP_CURSOR | + 0, + }, + + .lm = { + .count = 3, + .base = { 0x44000, 0x45000 }, + .instances = { + { .id = 0, .pp = 0, .dspp = 0, + .caps = MDP_LM_CAP_DISPLAY | + MDP_LM_CAP_PAIR }, + { .id = 1, .pp = 1, .dspp = -1, + .caps = MDP_LM_CAP_DISPLAY }, + }, + .nb_stages = 5, + .max_width = 2048, + .max_height = 0xFFFF, + }, + .dspp = { + .count = 1, + .base = { 0x54000 }, + + }, + .pp = { + .count = 2, + .base = { 0x70000, 0x70800 }, + }, + .cdm = { + .count = 1, + .base = { 0x79200 }, + }, + .intf = { + .base = { 0x6a000, 0x6a800, 0x6b000 }, + .connect = { + [0] = INTF_DISABLED, + [1] = INTF_DSI, + [2] = INTF_DSI, + }, + }, + .max_clk = 400000000, +}; + static const struct mdp5_cfg_hw msm8917_config = { .name = "msm8917", .mdp = { @@ -1151,6 +1239,7 @@ static const struct mdp5_cfg_handler cfg_handlers_v1[] = { { .revision = 7, .config = { .hw = &msm8x96_config } }, { .revision = 11, .config = { .hw = &msm8x76_config } }, { .revision = 15, .config = { .hw = &msm8917_config } }, + { .revision = 16, .config = { .hw = &msm8x53_config } }, }; static const struct mdp5_cfg_handler cfg_handlers_v3[] = { diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c index f482e0911d03..bb7d066618e6 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c @@ -1125,6 +1125,20 @@ static void mdp5_crtc_reset(struct drm_crtc *crtc) __drm_atomic_helper_crtc_reset(crtc, &mdp5_cstate->base); } +static const struct drm_crtc_funcs mdp5_crtc_no_lm_cursor_funcs = { + .set_config = drm_atomic_helper_set_config, + .destroy = mdp5_crtc_destroy, + .page_flip = drm_atomic_helper_page_flip, + .reset = mdp5_crtc_reset, + .atomic_duplicate_state = mdp5_crtc_duplicate_state, + .atomic_destroy_state = mdp5_crtc_destroy_state, + .atomic_print_state = mdp5_crtc_atomic_print_state, + .get_vblank_counter = mdp5_crtc_get_vblank_counter, + .enable_vblank = msm_crtc_enable_vblank, + .disable_vblank = msm_crtc_disable_vblank, + .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp, +}; + static const struct drm_crtc_funcs mdp5_crtc_funcs = { .set_config = drm_atomic_helper_set_config, .destroy = mdp5_crtc_destroy, @@ -1313,6 +1327,8 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev, mdp5_crtc->lm_cursor_enabled = cursor_plane ? false : true; drm_crtc_init_with_planes(dev, crtc, plane, cursor_plane, + cursor_plane ? + &mdp5_crtc_no_lm_cursor_funcs : &mdp5_crtc_funcs, NULL); drm_flip_work_init(&mdp5_crtc->unref_cursor_work, diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c index b3b42672b2d4..7b242246d4e7 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c @@ -295,15 +295,12 @@ static int mdp5_disable(struct mdp5_kms *mdp5_kms) mdp5_kms->enable_count--; WARN_ON(mdp5_kms->enable_count < 0); - if (mdp5_kms->tbu_rt_clk) - clk_disable_unprepare(mdp5_kms->tbu_rt_clk); - if (mdp5_kms->tbu_clk) - clk_disable_unprepare(mdp5_kms->tbu_clk); + clk_disable_unprepare(mdp5_kms->tbu_rt_clk); + clk_disable_unprepare(mdp5_kms->tbu_clk); clk_disable_unprepare(mdp5_kms->ahb_clk); clk_disable_unprepare(mdp5_kms->axi_clk); clk_disable_unprepare(mdp5_kms->core_clk); - if (mdp5_kms->lut_clk) - clk_disable_unprepare(mdp5_kms->lut_clk); + clk_disable_unprepare(mdp5_kms->lut_clk); return 0; } @@ -317,12 +314,9 @@ static int mdp5_enable(struct mdp5_kms *mdp5_kms) clk_prepare_enable(mdp5_kms->ahb_clk); clk_prepare_enable(mdp5_kms->axi_clk); clk_prepare_enable(mdp5_kms->core_clk); - if (mdp5_kms->lut_clk) - clk_prepare_enable(mdp5_kms->lut_clk); - if (mdp5_kms->tbu_clk) - clk_prepare_enable(mdp5_kms->tbu_clk); - if (mdp5_kms->tbu_rt_clk) - clk_prepare_enable(mdp5_kms->tbu_rt_clk); + clk_prepare_enable(mdp5_kms->lut_clk); + clk_prepare_enable(mdp5_kms->tbu_clk); + clk_prepare_enable(mdp5_kms->tbu_rt_clk); return 0; } diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c index 2f4895bcb0b0..0ea53420bc40 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c @@ -136,10 +136,8 @@ static int mdp5_mdss_enable(struct msm_mdss *mdss) DBG(""); clk_prepare_enable(mdp5_mdss->ahb_clk); - if (mdp5_mdss->axi_clk) - clk_prepare_enable(mdp5_mdss->axi_clk); - if (mdp5_mdss->vsync_clk) - clk_prepare_enable(mdp5_mdss->vsync_clk); + clk_prepare_enable(mdp5_mdss->axi_clk); + clk_prepare_enable(mdp5_mdss->vsync_clk); return 0; } @@ -149,10 +147,8 @@ static int mdp5_mdss_disable(struct msm_mdss *mdss) struct mdp5_mdss *mdp5_mdss = to_mdp5_mdss(mdss); DBG(""); - if (mdp5_mdss->vsync_clk) - clk_disable_unprepare(mdp5_mdss->vsync_clk); - if (mdp5_mdss->axi_clk) - clk_disable_unprepare(mdp5_mdss->axi_clk); + clk_disable_unprepare(mdp5_mdss->vsync_clk); + clk_disable_unprepare(mdp5_mdss->axi_clk); clk_disable_unprepare(mdp5_mdss->ahb_clk); return 0; diff --git a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c index cabe15190ec1..2e1acb1bc390 100644 --- a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c +++ b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c @@ -126,8 +126,12 @@ void msm_disp_snapshot_capture_state(struct msm_disp_state *disp_state) priv = drm_dev->dev_private; kms = priv->kms; - if (priv->dp) - msm_dp_snapshot(disp_state, priv->dp); + for (i = 0; i < ARRAY_SIZE(priv->dp); i++) { + if (!priv->dp[i]) + continue; + + msm_dp_snapshot(disp_state, priv->dp[i]); + } for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) { if (!priv->dsi[i]) diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c index cc2bb8295329..6ae9b29044b6 100644 --- a/drivers/gpu/drm/msm/dp/dp_catalog.c +++ b/drivers/gpu/drm/msm/dp/dp_catalog.c @@ -24,15 +24,6 @@ #define DP_INTERRUPT_STATUS_ACK_SHIFT 1 #define DP_INTERRUPT_STATUS_MASK_SHIFT 2 -#define MSM_DP_CONTROLLER_AHB_OFFSET 0x0000 -#define MSM_DP_CONTROLLER_AHB_SIZE 0x0200 -#define MSM_DP_CONTROLLER_AUX_OFFSET 0x0200 -#define MSM_DP_CONTROLLER_AUX_SIZE 0x0200 -#define MSM_DP_CONTROLLER_LINK_OFFSET 0x0400 -#define MSM_DP_CONTROLLER_LINK_SIZE 0x0C00 -#define MSM_DP_CONTROLLER_P0_OFFSET 0x1000 -#define MSM_DP_CONTROLLER_P0_SIZE 0x0400 - #define DP_INTERRUPT_STATUS1 \ (DP_INTR_AUX_I2C_DONE| \ DP_INTR_WRONG_ADDR | DP_INTR_TIMEOUT | \ @@ -66,82 +57,77 @@ void dp_catalog_snapshot(struct dp_catalog *dp_catalog, struct msm_disp_state *d { struct dp_catalog_private *catalog = container_of(dp_catalog, struct dp_catalog_private, dp_catalog); + struct dss_io_data *dss = &catalog->io->dp_controller; - msm_disp_snapshot_add_block(disp_state, catalog->io->dp_controller.len, - catalog->io->dp_controller.base, "dp_ctrl"); + msm_disp_snapshot_add_block(disp_state, dss->ahb.len, dss->ahb.base, "dp_ahb"); + msm_disp_snapshot_add_block(disp_state, dss->aux.len, dss->aux.base, "dp_aux"); + msm_disp_snapshot_add_block(disp_state, dss->link.len, dss->link.base, "dp_link"); + msm_disp_snapshot_add_block(disp_state, dss->p0.len, dss->p0.base, "dp_p0"); } static inline u32 dp_read_aux(struct dp_catalog_private *catalog, u32 offset) { - offset += MSM_DP_CONTROLLER_AUX_OFFSET; - return readl_relaxed(catalog->io->dp_controller.base + offset); + return readl_relaxed(catalog->io->dp_controller.aux.base + offset); } static inline void dp_write_aux(struct dp_catalog_private *catalog, u32 offset, u32 data) { - offset += MSM_DP_CONTROLLER_AUX_OFFSET; /* * To make sure aux reg writes happens before any other operation, * this function uses writel() instread of writel_relaxed() */ - writel(data, catalog->io->dp_controller.base + offset); + writel(data, catalog->io->dp_controller.aux.base + offset); } static inline u32 dp_read_ahb(struct dp_catalog_private *catalog, u32 offset) { - offset += MSM_DP_CONTROLLER_AHB_OFFSET; - return readl_relaxed(catalog->io->dp_controller.base + offset); + return readl_relaxed(catalog->io->dp_controller.ahb.base + offset); } static inline void dp_write_ahb(struct dp_catalog_private *catalog, u32 offset, u32 data) { - offset += MSM_DP_CONTROLLER_AHB_OFFSET; /* * To make sure phy reg writes happens before any other operation, * this function uses writel() instread of writel_relaxed() */ - writel(data, catalog->io->dp_controller.base + offset); + writel(data, catalog->io->dp_controller.ahb.base + offset); } static inline void dp_write_p0(struct dp_catalog_private *catalog, u32 offset, u32 data) { - offset += MSM_DP_CONTROLLER_P0_OFFSET; /* * To make sure interface reg writes happens before any other operation, * this function uses writel() instread of writel_relaxed() */ - writel(data, catalog->io->dp_controller.base + offset); + writel(data, catalog->io->dp_controller.p0.base + offset); } static inline u32 dp_read_p0(struct dp_catalog_private *catalog, u32 offset) { - offset += MSM_DP_CONTROLLER_P0_OFFSET; /* * To make sure interface reg writes happens before any other operation, * this function uses writel() instread of writel_relaxed() */ - return readl_relaxed(catalog->io->dp_controller.base + offset); + return readl_relaxed(catalog->io->dp_controller.p0.base + offset); } static inline u32 dp_read_link(struct dp_catalog_private *catalog, u32 offset) { - offset += MSM_DP_CONTROLLER_LINK_OFFSET; - return readl_relaxed(catalog->io->dp_controller.base + offset); + return readl_relaxed(catalog->io->dp_controller.link.base + offset); } static inline void dp_write_link(struct dp_catalog_private *catalog, u32 offset, u32 data) { - offset += MSM_DP_CONTROLLER_LINK_OFFSET; /* * To make sure link reg writes happens before any other operation, * this function uses writel() instread of writel_relaxed() */ - writel(data, catalog->io->dp_controller.base + offset); + writel(data, catalog->io->dp_controller.link.base + offset); } /* aux related catalog functions */ @@ -276,29 +262,21 @@ static void dump_regs(void __iomem *base, int len) void dp_catalog_dump_regs(struct dp_catalog *dp_catalog) { - u32 offset, len; struct dp_catalog_private *catalog = container_of(dp_catalog, struct dp_catalog_private, dp_catalog); + struct dss_io_data *io = &catalog->io->dp_controller; pr_info("AHB regs\n"); - offset = MSM_DP_CONTROLLER_AHB_OFFSET; - len = MSM_DP_CONTROLLER_AHB_SIZE; - dump_regs(catalog->io->dp_controller.base + offset, len); + dump_regs(io->ahb.base, io->ahb.len); pr_info("AUXCLK regs\n"); - offset = MSM_DP_CONTROLLER_AUX_OFFSET; - len = MSM_DP_CONTROLLER_AUX_SIZE; - dump_regs(catalog->io->dp_controller.base + offset, len); + dump_regs(io->aux.base, io->aux.len); pr_info("LCLK regs\n"); - offset = MSM_DP_CONTROLLER_LINK_OFFSET; - len = MSM_DP_CONTROLLER_LINK_SIZE; - dump_regs(catalog->io->dp_controller.base + offset, len); + dump_regs(io->link.base, io->link.len); pr_info("P0CLK regs\n"); - offset = MSM_DP_CONTROLLER_P0_OFFSET; - len = MSM_DP_CONTROLLER_P0_SIZE; - dump_regs(catalog->io->dp_controller.base + offset, len); + dump_regs(io->p0.base, io->p0.len); } u32 dp_catalog_aux_get_irq(struct dp_catalog *dp_catalog) @@ -493,8 +471,7 @@ int dp_catalog_ctrl_set_pattern(struct dp_catalog *dp_catalog, bit = BIT(pattern - 1) << DP_MAINLINK_READY_LINK_TRAINING_SHIFT; /* Poll for mainlink ready status */ - ret = readx_poll_timeout(readl, catalog->io->dp_controller.base + - MSM_DP_CONTROLLER_LINK_OFFSET + + ret = readx_poll_timeout(readl, catalog->io->dp_controller.link.base + REG_DP_MAINLINK_READY, data, data & bit, POLLING_SLEEP_US, POLLING_TIMEOUT_US); @@ -541,8 +518,7 @@ bool dp_catalog_ctrl_mainlink_ready(struct dp_catalog *dp_catalog) struct dp_catalog_private, dp_catalog); /* Poll for mainlink ready status */ - ret = readl_poll_timeout(catalog->io->dp_controller.base + - MSM_DP_CONTROLLER_LINK_OFFSET + + ret = readl_poll_timeout(catalog->io->dp_controller.link.base + REG_DP_MAINLINK_READY, data, data & DP_MAINLINK_READY_FOR_VIDEO, POLLING_SLEEP_US, POLLING_TIMEOUT_US); diff --git a/drivers/gpu/drm/msm/dp/dp_debug.c b/drivers/gpu/drm/msm/dp/dp_debug.c index 2f6247e80e9d..da4323556ef3 100644 --- a/drivers/gpu/drm/msm/dp/dp_debug.c +++ b/drivers/gpu/drm/msm/dp/dp_debug.c @@ -24,240 +24,108 @@ struct dp_debug_private { struct dp_usbpd *usbpd; struct dp_link *link; struct dp_panel *panel; - struct drm_connector **connector; + struct drm_connector *connector; struct device *dev; struct drm_device *drm_dev; struct dp_debug dp_debug; }; -static int dp_debug_check_buffer_overflow(int rc, int *max_size, int *len) -{ - if (rc >= *max_size) { - DRM_ERROR("buffer overflow\n"); - return -EINVAL; - } - *len += rc; - *max_size = SZ_4K - *len; - - return 0; -} - -static ssize_t dp_debug_read_info(struct file *file, char __user *user_buff, - size_t count, loff_t *ppos) +static int dp_debug_show(struct seq_file *seq, void *p) { - struct dp_debug_private *debug = file->private_data; - char *buf; - u32 len = 0, rc = 0; + struct dp_debug_private *debug = seq->private; u64 lclk = 0; - u32 max_size = SZ_4K; u32 link_params_rate; - struct drm_display_mode *drm_mode; + const struct drm_display_mode *drm_mode; if (!debug) return -ENODEV; - if (*ppos) - return 0; - - buf = kzalloc(SZ_4K, GFP_KERNEL); - if (!buf) - return -ENOMEM; - drm_mode = &debug->panel->dp_mode.drm_mode; - rc = snprintf(buf + len, max_size, "\tname = %s\n", DEBUG_NAME); - if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) - goto error; - - rc = snprintf(buf + len, max_size, - "\tdp_panel\n\t\tmax_pclk_khz = %d\n", + seq_printf(seq, "\tname = %s\n", DEBUG_NAME); + seq_printf(seq, "\tdp_panel\n\t\tmax_pclk_khz = %d\n", debug->panel->max_pclk_khz); - if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) - goto error; - - rc = snprintf(buf + len, max_size, - "\tdrm_dp_link\n\t\trate = %u\n", + seq_printf(seq, "\tdrm_dp_link\n\t\trate = %u\n", debug->panel->link_info.rate); - if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) - goto error; - - rc = snprintf(buf + len, max_size, - "\t\tnum_lanes = %u\n", + seq_printf(seq, "\t\tnum_lanes = %u\n", debug->panel->link_info.num_lanes); - if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) - goto error; - - rc = snprintf(buf + len, max_size, - "\t\tcapabilities = %lu\n", + seq_printf(seq, "\t\tcapabilities = %lu\n", debug->panel->link_info.capabilities); - if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) - goto error; - - rc = snprintf(buf + len, max_size, - "\tdp_panel_info:\n\t\tactive = %dx%d\n", + seq_printf(seq, "\tdp_panel_info:\n\t\tactive = %dx%d\n", drm_mode->hdisplay, drm_mode->vdisplay); - if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) - goto error; - - rc = snprintf(buf + len, max_size, - "\t\tback_porch = %dx%d\n", + seq_printf(seq, "\t\tback_porch = %dx%d\n", drm_mode->htotal - drm_mode->hsync_end, drm_mode->vtotal - drm_mode->vsync_end); - if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) - goto error; - - rc = snprintf(buf + len, max_size, - "\t\tfront_porch = %dx%d\n", + seq_printf(seq, "\t\tfront_porch = %dx%d\n", drm_mode->hsync_start - drm_mode->hdisplay, drm_mode->vsync_start - drm_mode->vdisplay); - if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) - goto error; - - rc = snprintf(buf + len, max_size, - "\t\tsync_width = %dx%d\n", + seq_printf(seq, "\t\tsync_width = %dx%d\n", drm_mode->hsync_end - drm_mode->hsync_start, drm_mode->vsync_end - drm_mode->vsync_start); - if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) - goto error; - - rc = snprintf(buf + len, max_size, - "\t\tactive_low = %dx%d\n", + seq_printf(seq, "\t\tactive_low = %dx%d\n", debug->panel->dp_mode.h_active_low, debug->panel->dp_mode.v_active_low); - if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) - goto error; - - rc = snprintf(buf + len, max_size, - "\t\th_skew = %d\n", + seq_printf(seq, "\t\th_skew = %d\n", drm_mode->hskew); - if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) - goto error; - - rc = snprintf(buf + len, max_size, - "\t\trefresh rate = %d\n", + seq_printf(seq, "\t\trefresh rate = %d\n", drm_mode_vrefresh(drm_mode)); - if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) - goto error; - - rc = snprintf(buf + len, max_size, - "\t\tpixel clock khz = %d\n", + seq_printf(seq, "\t\tpixel clock khz = %d\n", drm_mode->clock); - if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) - goto error; - - rc = snprintf(buf + len, max_size, - "\t\tbpp = %d\n", + seq_printf(seq, "\t\tbpp = %d\n", debug->panel->dp_mode.bpp); - if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) - goto error; /* Link Information */ - rc = snprintf(buf + len, max_size, - "\tdp_link:\n\t\ttest_requested = %d\n", + seq_printf(seq, "\tdp_link:\n\t\ttest_requested = %d\n", debug->link->sink_request); - if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) - goto error; - - rc = snprintf(buf + len, max_size, - "\t\tnum_lanes = %d\n", + seq_printf(seq, "\t\tnum_lanes = %d\n", debug->link->link_params.num_lanes); - if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) - goto error; - link_params_rate = debug->link->link_params.rate; - rc = snprintf(buf + len, max_size, - "\t\tbw_code = %d\n", + seq_printf(seq, "\t\tbw_code = %d\n", drm_dp_link_rate_to_bw_code(link_params_rate)); - if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) - goto error; - lclk = debug->link->link_params.rate * 1000; - rc = snprintf(buf + len, max_size, - "\t\tlclk = %lld\n", lclk); - if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) - goto error; - - rc = snprintf(buf + len, max_size, - "\t\tv_level = %d\n", + seq_printf(seq, "\t\tlclk = %lld\n", lclk); + seq_printf(seq, "\t\tv_level = %d\n", debug->link->phy_params.v_level); - if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) - goto error; - - rc = snprintf(buf + len, max_size, - "\t\tp_level = %d\n", + seq_printf(seq, "\t\tp_level = %d\n", debug->link->phy_params.p_level); - if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) - goto error; - - if (copy_to_user(user_buff, buf, len)) - goto error; - - *ppos += len; - kfree(buf); - return len; - error: - kfree(buf); - return -EINVAL; + return 0; } +DEFINE_SHOW_ATTRIBUTE(dp_debug); static int dp_test_data_show(struct seq_file *m, void *data) { - struct drm_device *dev; - struct dp_debug_private *debug; - struct drm_connector *connector; - struct drm_connector_list_iter conn_iter; + const struct dp_debug_private *debug = m->private; + const struct drm_connector *connector = debug->connector; u32 bpc; - debug = m->private; - dev = debug->drm_dev; - drm_connector_list_iter_begin(dev, &conn_iter); - drm_for_each_connector_iter(connector, &conn_iter) { - - if (connector->connector_type != - DRM_MODE_CONNECTOR_DisplayPort) - continue; - - if (connector->status == connector_status_connected) { - bpc = debug->link->test_video.test_bit_depth; - seq_printf(m, "hdisplay: %d\n", - debug->link->test_video.test_h_width); - seq_printf(m, "vdisplay: %d\n", - debug->link->test_video.test_v_height); - seq_printf(m, "bpc: %u\n", - dp_link_bit_depth_to_bpc(bpc)); - } else - seq_puts(m, "0"); + if (connector->status == connector_status_connected) { + bpc = debug->link->test_video.test_bit_depth; + seq_printf(m, "hdisplay: %d\n", + debug->link->test_video.test_h_width); + seq_printf(m, "vdisplay: %d\n", + debug->link->test_video.test_v_height); + seq_printf(m, "bpc: %u\n", + dp_link_bit_depth_to_bpc(bpc)); + } else { + seq_puts(m, "0"); } - drm_connector_list_iter_end(&conn_iter); - return 0; } DEFINE_SHOW_ATTRIBUTE(dp_test_data); static int dp_test_type_show(struct seq_file *m, void *data) { - struct dp_debug_private *debug = m->private; - struct drm_device *dev = debug->drm_dev; - struct drm_connector *connector; - struct drm_connector_list_iter conn_iter; + const struct dp_debug_private *debug = m->private; + const struct drm_connector *connector = debug->connector; - drm_connector_list_iter_begin(dev, &conn_iter); - drm_for_each_connector_iter(connector, &conn_iter) { - - if (connector->connector_type != - DRM_MODE_CONNECTOR_DisplayPort) - continue; - - if (connector->status == connector_status_connected) - seq_printf(m, "%02x", DP_TEST_LINK_VIDEO_PATTERN); - else - seq_puts(m, "0"); - } - drm_connector_list_iter_end(&conn_iter); + if (connector->status == connector_status_connected) + seq_printf(m, "%02x", DP_TEST_LINK_VIDEO_PATTERN); + else + seq_puts(m, "0"); return 0; } @@ -269,14 +137,12 @@ static ssize_t dp_test_active_write(struct file *file, { char *input_buffer; int status = 0; - struct dp_debug_private *debug; - struct drm_device *dev; - struct drm_connector *connector; - struct drm_connector_list_iter conn_iter; + const struct dp_debug_private *debug; + const struct drm_connector *connector; int val = 0; debug = ((struct seq_file *)file->private_data)->private; - dev = debug->drm_dev; + connector = debug->connector; if (len == 0) return 0; @@ -287,30 +153,22 @@ static ssize_t dp_test_active_write(struct file *file, DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len); - drm_connector_list_iter_begin(dev, &conn_iter); - drm_for_each_connector_iter(connector, &conn_iter) { - if (connector->connector_type != - DRM_MODE_CONNECTOR_DisplayPort) - continue; - - if (connector->status == connector_status_connected) { - status = kstrtoint(input_buffer, 10, &val); - if (status < 0) - break; - DRM_DEBUG_DRIVER("Got %d for test active\n", val); - /* To prevent erroneous activation of the compliance - * testing code, only accept an actual value of 1 here - */ - if (val == 1) - debug->panel->video_test = true; - else - debug->panel->video_test = false; + if (connector->status == connector_status_connected) { + status = kstrtoint(input_buffer, 10, &val); + if (status < 0) { + kfree(input_buffer); + return status; } + DRM_DEBUG_DRIVER("Got %d for test active\n", val); + /* To prevent erroneous activation of the compliance + * testing code, only accept an actual value of 1 here + */ + if (val == 1) + debug->panel->video_test = true; + else + debug->panel->video_test = false; } - drm_connector_list_iter_end(&conn_iter); kfree(input_buffer); - if (status < 0) - return status; *offp += len; return len; @@ -319,25 +177,16 @@ static ssize_t dp_test_active_write(struct file *file, static int dp_test_active_show(struct seq_file *m, void *data) { struct dp_debug_private *debug = m->private; - struct drm_device *dev = debug->drm_dev; - struct drm_connector *connector; - struct drm_connector_list_iter conn_iter; - - drm_connector_list_iter_begin(dev, &conn_iter); - drm_for_each_connector_iter(connector, &conn_iter) { - if (connector->connector_type != - DRM_MODE_CONNECTOR_DisplayPort) - continue; - - if (connector->status == connector_status_connected) { - if (debug->panel->video_test) - seq_puts(m, "1"); - else - seq_puts(m, "0"); - } else + struct drm_connector *connector = debug->connector; + + if (connector->status == connector_status_connected) { + if (debug->panel->video_test) + seq_puts(m, "1"); + else seq_puts(m, "0"); + } else { + seq_puts(m, "0"); } - drm_connector_list_iter_end(&conn_iter); return 0; } @@ -349,11 +198,6 @@ static int dp_test_active_open(struct inode *inode, inode->i_private); } -static const struct file_operations dp_debug_fops = { - .open = simple_open, - .read = dp_debug_read_info, -}; - static const struct file_operations test_active_fops = { .owner = THIS_MODULE, .open = dp_test_active_open, @@ -391,7 +235,7 @@ static int dp_debug_init(struct dp_debug *dp_debug, struct drm_minor *minor) struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel, struct dp_usbpd *usbpd, struct dp_link *link, - struct drm_connector **connector, struct drm_minor *minor) + struct drm_connector *connector, struct drm_minor *minor) { int rc = 0; struct dp_debug_private *debug; diff --git a/drivers/gpu/drm/msm/dp/dp_debug.h b/drivers/gpu/drm/msm/dp/dp_debug.h index 7eaedfbb149c..8c0d0b5178fd 100644 --- a/drivers/gpu/drm/msm/dp/dp_debug.h +++ b/drivers/gpu/drm/msm/dp/dp_debug.h @@ -43,7 +43,7 @@ struct dp_debug { */ struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel, struct dp_usbpd *usbpd, struct dp_link *link, - struct drm_connector **connector, + struct drm_connector *connector, struct drm_minor *minor); /** @@ -60,7 +60,7 @@ void dp_debug_put(struct dp_debug *dp_debug); static inline struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel, struct dp_usbpd *usbpd, struct dp_link *link, - struct drm_connector **connector, struct drm_minor *minor) + struct drm_connector *connector, struct drm_minor *minor) { return ERR_PTR(-EINVAL); } diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c index fbe4c2cd52a3..aba8aa47ed76 100644 --- a/drivers/gpu/drm/msm/dp/dp_display.c +++ b/drivers/gpu/drm/msm/dp/dp_display.c @@ -10,6 +10,7 @@ #include <linux/component.h> #include <linux/of_irq.h> #include <linux/delay.h> +#include <drm/drm_panel.h> #include "msm_drv.h" #include "msm_kms.h" @@ -27,7 +28,6 @@ #include "dp_audio.h" #include "dp_debug.h" -static struct msm_dp *g_dp_display; #define HPD_STRING_SIZE 30 enum { @@ -79,6 +79,8 @@ struct dp_display_private { char *name; int irq; + unsigned int id; + /* state variables */ bool core_initialized; bool hpd_irq_on; @@ -116,11 +118,35 @@ struct dp_display_private { struct dp_audio *audio; }; +struct msm_dp_desc { + phys_addr_t io_start; + unsigned int connector_type; +}; + +struct msm_dp_config { + const struct msm_dp_desc *descs; + size_t num_descs; +}; + +static const struct msm_dp_config sc7180_dp_cfg = { + .descs = (const struct msm_dp_desc[]) { + [MSM_DP_CONTROLLER_0] = { .io_start = 0x0ae90000, .connector_type = DRM_MODE_CONNECTOR_DisplayPort }, + }, + .num_descs = 1, +}; + static const struct of_device_id dp_dt_match[] = { - {.compatible = "qcom,sc7180-dp"}, + { .compatible = "qcom,sc7180-dp", .data = &sc7180_dp_cfg }, {} }; +static struct dp_display_private *dev_get_dp_display_private(struct device *dev) +{ + struct msm_dp *dp = dev_get_drvdata(dev); + + return container_of(dp, struct dp_display_private, dp_display); +} + static int dp_add_event(struct dp_display_private *dp_priv, u32 event, u32 data, u32 delay) { @@ -197,25 +223,24 @@ static int dp_display_bind(struct device *dev, struct device *master, void *data) { int rc = 0; - struct dp_display_private *dp; - struct drm_device *drm; + struct dp_display_private *dp = dev_get_dp_display_private(dev); struct msm_drm_private *priv; + struct drm_device *drm; drm = dev_get_drvdata(master); - dp = container_of(g_dp_display, - struct dp_display_private, dp_display); - dp->dp_display.drm_dev = drm; priv = drm->dev_private; - priv->dp = &(dp->dp_display); + priv->dp[dp->id] = &dp->dp_display; - rc = dp->parser->parse(dp->parser); + rc = dp->parser->parse(dp->parser, dp->dp_display.connector_type); if (rc) { DRM_ERROR("device tree parsing failed\n"); goto end; } + dp->dp_display.panel_bridge = dp->parser->panel_bridge; + dp->aux->drm_dev = drm; rc = dp_aux_register(dp->aux); if (rc) { @@ -240,16 +265,13 @@ end: static void dp_display_unbind(struct device *dev, struct device *master, void *data) { - struct dp_display_private *dp; + struct dp_display_private *dp = dev_get_dp_display_private(dev); struct drm_device *drm = dev_get_drvdata(master); struct msm_drm_private *priv = drm->dev_private; - dp = container_of(g_dp_display, - struct dp_display_private, dp_display); - dp_power_client_deinit(dp->power); dp_aux_unregister(dp->aux); - priv->dp = NULL; + priv->dp[dp->id] = NULL; } static const struct component_ops dp_display_comp_ops = { @@ -379,38 +401,17 @@ static void dp_display_host_deinit(struct dp_display_private *dp) static int dp_display_usbpd_configure_cb(struct device *dev) { - int rc = 0; - struct dp_display_private *dp; - - if (!dev) { - DRM_ERROR("invalid dev\n"); - rc = -EINVAL; - goto end; - } - - dp = container_of(g_dp_display, - struct dp_display_private, dp_display); + struct dp_display_private *dp = dev_get_dp_display_private(dev); dp_display_host_init(dp, false); - rc = dp_display_process_hpd_high(dp); -end: - return rc; + return dp_display_process_hpd_high(dp); } static int dp_display_usbpd_disconnect_cb(struct device *dev) { int rc = 0; - struct dp_display_private *dp; - - if (!dev) { - DRM_ERROR("invalid dev\n"); - rc = -EINVAL; - return rc; - } - - dp = container_of(g_dp_display, - struct dp_display_private, dp_display); + struct dp_display_private *dp = dev_get_dp_display_private(dev); dp_add_event(dp, EV_USER_NOTIFICATION, false, 0); @@ -472,15 +473,7 @@ static int dp_display_usbpd_attention_cb(struct device *dev) { int rc = 0; u32 sink_request; - struct dp_display_private *dp; - - if (!dev) { - DRM_ERROR("invalid dev\n"); - return -EINVAL; - } - - dp = container_of(g_dp_display, - struct dp_display_private, dp_display); + struct dp_display_private *dp = dev_get_dp_display_private(dev); /* check for any test request issued by sink */ rc = dp_link_process_request(dp->link); @@ -647,7 +640,7 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data) DRM_DEBUG_DP("hpd_state=%d\n", state); /* signal the disconnect event early to ensure proper teardown */ - dp_display_handle_plugged_change(g_dp_display, false); + dp_display_handle_plugged_change(&dp->dp_display, false); /* enable HDP plug interrupt to prepare for next plugin */ dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK, true); @@ -834,7 +827,7 @@ static int dp_display_set_mode(struct msm_dp *dp_display, return 0; } -static int dp_display_prepare(struct msm_dp *dp) +static int dp_display_prepare(struct msm_dp *dp_display) { return 0; } @@ -842,9 +835,7 @@ static int dp_display_prepare(struct msm_dp *dp) static int dp_display_enable(struct dp_display_private *dp, u32 data) { int rc = 0; - struct msm_dp *dp_display; - - dp_display = g_dp_display; + struct msm_dp *dp_display = &dp->dp_display; DRM_DEBUG_DP("sink_count=%d\n", dp->link->sink_count); if (dp_display->power_on) { @@ -880,9 +871,7 @@ static int dp_display_post_enable(struct msm_dp *dp_display) static int dp_display_disable(struct dp_display_private *dp, u32 data) { - struct msm_dp *dp_display; - - dp_display = g_dp_display; + struct msm_dp *dp_display = &dp->dp_display; if (!dp_display->power_on) return 0; @@ -912,7 +901,7 @@ static int dp_display_disable(struct dp_display_private *dp, u32 data) return 0; } -static int dp_display_unprepare(struct msm_dp *dp) +static int dp_display_unprepare(struct msm_dp *dp_display) { return 0; } @@ -1213,10 +1202,33 @@ int dp_display_request_irq(struct msm_dp *dp_display) return 0; } +static const struct msm_dp_desc *dp_display_get_desc(struct platform_device *pdev, + unsigned int *id) +{ + const struct msm_dp_config *cfg = of_device_get_match_data(&pdev->dev); + struct resource *res; + int i; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return NULL; + + for (i = 0; i < cfg->num_descs; i++) { + if (cfg->descs[i].io_start == res->start) { + *id = i; + return &cfg->descs[i]; + } + } + + dev_err(&pdev->dev, "unknown displayport instance\n"); + return NULL; +} + static int dp_display_probe(struct platform_device *pdev) { int rc = 0; struct dp_display_private *dp; + const struct msm_dp_desc *desc; if (!pdev || !pdev->dev.of_node) { DRM_ERROR("pdev not found\n"); @@ -1227,8 +1239,13 @@ static int dp_display_probe(struct platform_device *pdev) if (!dp) return -ENOMEM; + desc = dp_display_get_desc(pdev, &dp->id); + if (!desc) + return -EINVAL; + dp->pdev = pdev; dp->name = "drm_dp"; + dp->dp_display.connector_type = desc->connector_type; rc = dp_init_sub_modules(dp); if (rc) { @@ -1237,14 +1254,13 @@ static int dp_display_probe(struct platform_device *pdev) } mutex_init(&dp->event_mutex); - g_dp_display = &dp->dp_display; /* Store DP audio handle inside DP display */ - g_dp_display->dp_audio = dp->audio; + dp->dp_display.dp_audio = dp->audio; init_completion(&dp->audio_comp); - platform_set_drvdata(pdev, g_dp_display); + platform_set_drvdata(pdev, &dp->dp_display); rc = component_add(&pdev->dev, &dp_display_comp_ops); if (rc) { @@ -1257,10 +1273,7 @@ static int dp_display_probe(struct platform_device *pdev) static int dp_display_remove(struct platform_device *pdev) { - struct dp_display_private *dp; - - dp = container_of(g_dp_display, - struct dp_display_private, dp_display); + struct dp_display_private *dp = dev_get_dp_display_private(&pdev->dev); dp_display_deinit_sub_modules(dp); @@ -1309,14 +1322,14 @@ static int dp_pm_resume(struct device *dev) * can not declared display is connected unless * HDMI cable is plugged in and sink_count of * dongle become 1 + * also only signal audio when disconnected */ - if (dp->link->sink_count) + if (dp->link->sink_count) { dp->dp_display.is_connected = true; - else + } else { dp->dp_display.is_connected = false; - - dp_display_handle_plugged_change(g_dp_display, - dp->dp_display.is_connected); + dp_display_handle_plugged_change(dp_display, false); + } DRM_DEBUG_DP("After, sink_count=%d is_connected=%d core_inited=%d power_on=%d\n", dp->link->sink_count, dp->dp_display.is_connected, @@ -1429,7 +1442,7 @@ void msm_dp_debugfs_init(struct msm_dp *dp_display, struct drm_minor *minor) dev = &dp->pdev->dev; dp->debug = dp_debug_get(dev, dp->panel, dp->usbpd, - dp->link, &dp->dp_display.connector, + dp->link, dp->dp_display.connector, minor); if (IS_ERR(dp->debug)) { rc = PTR_ERR(dp->debug); diff --git a/drivers/gpu/drm/msm/dp/dp_display.h b/drivers/gpu/drm/msm/dp/dp_display.h index 8b47cdabb67e..8e80e3bac394 100644 --- a/drivers/gpu/drm/msm/dp/dp_display.h +++ b/drivers/gpu/drm/msm/dp/dp_display.h @@ -15,9 +15,11 @@ struct msm_dp { struct device *codec_dev; struct drm_connector *connector; struct drm_encoder *encoder; + struct drm_bridge *panel_bridge; bool is_connected; bool audio_enabled; bool power_on; + unsigned int connector_type; hdmi_codec_plugged_cb plugged_cb; diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c index 764f4b81017e..76856c4ee1d6 100644 --- a/drivers/gpu/drm/msm/dp/dp_drm.c +++ b/drivers/gpu/drm/msm/dp/dp_drm.c @@ -5,6 +5,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_atomic.h> +#include <drm/drm_bridge.h> #include <drm/drm_crtc.h> #include "msm_drv.h" @@ -147,7 +148,7 @@ struct drm_connector *dp_drm_connector_init(struct msm_dp *dp_display) ret = drm_connector_init(dp_display->drm_dev, connector, &dp_connector_funcs, - DRM_MODE_CONNECTOR_DisplayPort); + dp_display->connector_type); if (ret) return ERR_PTR(ret); @@ -160,5 +161,15 @@ struct drm_connector *dp_drm_connector_init(struct msm_dp *dp_display) drm_connector_attach_encoder(connector, dp_display->encoder); + if (dp_display->panel_bridge) { + ret = drm_bridge_attach(dp_display->encoder, + dp_display->panel_bridge, NULL, + DRM_BRIDGE_ATTACH_NO_CONNECTOR); + if (ret < 0) { + DRM_ERROR("failed to attach panel bridge: %d\n", ret); + return ERR_PTR(ret); + } + } + return connector; } diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c index 2181b60e1d1d..71db10c0f262 100644 --- a/drivers/gpu/drm/msm/dp/dp_panel.c +++ b/drivers/gpu/drm/msm/dp/dp_panel.c @@ -234,7 +234,7 @@ u32 dp_panel_get_mode_bpp(struct dp_panel *dp_panel, u32 mode_edid_bpp, u32 mode_pclk_khz) { struct dp_panel_private *panel; - u32 bpp = mode_edid_bpp; + u32 bpp; if (!dp_panel || !mode_edid_bpp || !mode_pclk_khz) { DRM_ERROR("invalid input\n"); diff --git a/drivers/gpu/drm/msm/dp/dp_parser.c b/drivers/gpu/drm/msm/dp/dp_parser.c index 0519dd3ac3c3..a7acc23f742b 100644 --- a/drivers/gpu/drm/msm/dp/dp_parser.c +++ b/drivers/gpu/drm/msm/dp/dp_parser.c @@ -6,11 +6,22 @@ #include <linux/of_gpio.h> #include <linux/phy/phy.h> +#include <drm/drm_of.h> #include <drm/drm_print.h> +#include <drm/drm_bridge.h> #include "dp_parser.h" #include "dp_reg.h" +#define DP_DEFAULT_AHB_OFFSET 0x0000 +#define DP_DEFAULT_AHB_SIZE 0x0200 +#define DP_DEFAULT_AUX_OFFSET 0x0200 +#define DP_DEFAULT_AUX_SIZE 0x0200 +#define DP_DEFAULT_LINK_OFFSET 0x0400 +#define DP_DEFAULT_LINK_SIZE 0x0C00 +#define DP_DEFAULT_P0_OFFSET 0x1000 +#define DP_DEFAULT_P0_SIZE 0x0400 + static const struct dp_regulator_cfg sdm845_dp_reg_cfg = { .num = 2, .regs = { @@ -19,67 +30,73 @@ static const struct dp_regulator_cfg sdm845_dp_reg_cfg = { }, }; -static int msm_dss_ioremap(struct platform_device *pdev, - struct dss_io_data *io_data) -{ - struct resource *res = NULL; - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - DRM_ERROR("%pS->%s: msm_dss_get_res failed\n", - __builtin_return_address(0), __func__); - return -ENODEV; - } - - io_data->len = (u32)resource_size(res); - io_data->base = ioremap(res->start, io_data->len); - if (!io_data->base) { - DRM_ERROR("%pS->%s: ioremap failed\n", - __builtin_return_address(0), __func__); - return -EIO; - } - - return 0; -} - -static void msm_dss_iounmap(struct dss_io_data *io_data) +static void __iomem *dp_ioremap(struct platform_device *pdev, int idx, size_t *len) { - if (io_data->base) { - iounmap(io_data->base); - io_data->base = NULL; - } - io_data->len = 0; -} + struct resource *res; + void __iomem *base; -static void dp_parser_unmap_io_resources(struct dp_parser *parser) -{ - struct dp_io *io = &parser->io; + base = devm_platform_get_and_ioremap_resource(pdev, idx, &res); + if (!IS_ERR(base)) + *len = resource_size(res); - msm_dss_iounmap(&io->dp_controller); + return base; } static int dp_parser_ctrl_res(struct dp_parser *parser) { - int rc = 0; struct platform_device *pdev = parser->pdev; struct dp_io *io = &parser->io; + struct dss_io_data *dss = &io->dp_controller; + + dss->ahb.base = dp_ioremap(pdev, 0, &dss->ahb.len); + if (IS_ERR(dss->ahb.base)) + return PTR_ERR(dss->ahb.base); + + dss->aux.base = dp_ioremap(pdev, 1, &dss->aux.len); + if (IS_ERR(dss->aux.base)) { + /* + * The initial binding had a single reg, but in order to + * support variation in the sub-region sizes this was split. + * dp_ioremap() will fail with -EINVAL here if only a single + * reg is specified, so fill in the sub-region offsets and + * lengths based on this single region. + */ + if (PTR_ERR(dss->aux.base) == -EINVAL) { + if (dss->ahb.len < DP_DEFAULT_P0_OFFSET + DP_DEFAULT_P0_SIZE) { + DRM_ERROR("legacy memory region not large enough\n"); + return -EINVAL; + } + + dss->ahb.len = DP_DEFAULT_AHB_SIZE; + dss->aux.base = dss->ahb.base + DP_DEFAULT_AUX_OFFSET; + dss->aux.len = DP_DEFAULT_AUX_SIZE; + dss->link.base = dss->ahb.base + DP_DEFAULT_LINK_OFFSET; + dss->link.len = DP_DEFAULT_LINK_SIZE; + dss->p0.base = dss->ahb.base + DP_DEFAULT_P0_OFFSET; + dss->p0.len = DP_DEFAULT_P0_SIZE; + } else { + DRM_ERROR("unable to remap aux region: %pe\n", dss->aux.base); + return PTR_ERR(dss->aux.base); + } + } else { + dss->link.base = dp_ioremap(pdev, 2, &dss->link.len); + if (IS_ERR(dss->link.base)) { + DRM_ERROR("unable to remap link region: %pe\n", dss->link.base); + return PTR_ERR(dss->link.base); + } - rc = msm_dss_ioremap(pdev, &io->dp_controller); - if (rc) { - DRM_ERROR("unable to remap dp io resources, rc=%d\n", rc); - goto err; + dss->p0.base = dp_ioremap(pdev, 3, &dss->p0.len); + if (IS_ERR(dss->p0.base)) { + DRM_ERROR("unable to remap p0 region: %pe\n", dss->p0.base); + return PTR_ERR(dss->p0.base); + } } io->phy = devm_phy_get(&pdev->dev, "dp"); - if (IS_ERR(io->phy)) { - rc = PTR_ERR(io->phy); - goto err; - } + if (IS_ERR(io->phy)) + return PTR_ERR(io->phy); return 0; -err: - dp_parser_unmap_io_resources(parser); - return rc; } static int dp_parser_misc(struct dp_parser *parser) @@ -248,7 +265,28 @@ static int dp_parser_clock(struct dp_parser *parser) return 0; } -static int dp_parser_parse(struct dp_parser *parser) +static int dp_parser_find_panel(struct dp_parser *parser) +{ + struct device *dev = &parser->pdev->dev; + struct drm_panel *panel; + int rc; + + rc = drm_of_find_panel_or_bridge(dev->of_node, 1, 0, &panel, NULL); + if (rc) { + DRM_ERROR("failed to acquire DRM panel: %d\n", rc); + return rc; + } + + parser->panel_bridge = devm_drm_panel_bridge_add(dev, panel); + if (IS_ERR(parser->panel_bridge)) { + DRM_ERROR("failed to create panel bridge\n"); + return PTR_ERR(parser->panel_bridge); + } + + return 0; +} + +static int dp_parser_parse(struct dp_parser *parser, int connector_type) { int rc = 0; @@ -269,6 +307,12 @@ static int dp_parser_parse(struct dp_parser *parser) if (rc) return rc; + if (connector_type == DRM_MODE_CONNECTOR_eDP) { + rc = dp_parser_find_panel(parser); + if (rc) + return rc; + } + /* Map the corresponding regulator information according to * version. Currently, since we only have one supported platform, * mapping the regulator directly. diff --git a/drivers/gpu/drm/msm/dp/dp_parser.h b/drivers/gpu/drm/msm/dp/dp_parser.h index 34b49628bbaf..3172da089421 100644 --- a/drivers/gpu/drm/msm/dp/dp_parser.h +++ b/drivers/gpu/drm/msm/dp/dp_parser.h @@ -25,11 +25,18 @@ enum dp_pm_type { DP_MAX_PM }; -struct dss_io_data { - u32 len; +struct dss_io_region { + size_t len; void __iomem *base; }; +struct dss_io_data { + struct dss_io_region ahb; + struct dss_io_region aux; + struct dss_io_region link; + struct dss_io_region p0; +}; + static inline const char *dp_parser_pm_name(enum dp_pm_type module) { switch (module) { @@ -116,8 +123,9 @@ struct dp_parser { struct dp_display_data disp_data; const struct dp_regulator_cfg *regulator_cfg; u32 max_dp_lanes; + struct drm_bridge *panel_bridge; - int (*parse)(struct dp_parser *parser); + int (*parse)(struct dp_parser *parser, int connector_type); }; /** diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c index ad73ebb84b2d..5cd230a5d5d3 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.c +++ b/drivers/gpu/drm/msm/dsi/dsi.c @@ -225,8 +225,10 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev, goto fail; } - if (!msm_dsi_manager_validate_current_config(msm_dsi->id)) + if (!msm_dsi_manager_validate_current_config(msm_dsi->id)) { + ret = -EINVAL; goto fail; + } msm_dsi->encoder = encoder; diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h index 83787cbee419..bb39e7ca802d 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.h +++ b/drivers/gpu/drm/msm/dsi/dsi.h @@ -107,6 +107,8 @@ void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host, u32 dma_base, u32 len); int msm_dsi_host_enable(struct mipi_dsi_host *host); int msm_dsi_host_disable(struct mipi_dsi_host *host); +void msm_dsi_host_enable_irq(struct mipi_dsi_host *host); +void msm_dsi_host_disable_irq(struct mipi_dsi_host *host); int msm_dsi_host_power_on(struct mipi_dsi_host *host, struct msm_dsi_phy_shared_timings *phy_shared_timings, bool is_bonded_dsi, struct msm_dsi_phy *phy); diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index f741494b1bf6..4c7b6944fc0d 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c @@ -106,7 +106,8 @@ struct msm_dsi_host { phys_addr_t ctrl_size; struct regulator_bulk_data supplies[DSI_DEV_REGULATOR_MAX]; - struct clk *bus_clks[DSI_BUS_CLK_MAX]; + int num_bus_clks; + struct clk_bulk_data bus_clks[DSI_BUS_CLK_MAX]; struct clk *byte_clk; struct clk *esc_clk; @@ -115,16 +116,16 @@ struct msm_dsi_host { struct clk *pixel_clk_src; struct clk *byte_intf_clk; - u32 byte_clk_rate; - u32 pixel_clk_rate; - u32 esc_clk_rate; + unsigned long byte_clk_rate; + unsigned long pixel_clk_rate; + unsigned long esc_clk_rate; /* DSI v2 specific clocks */ struct clk *src_clk; struct clk *esc_clk_src; struct clk *dsi_clk_src; - u32 src_clk_rate; + unsigned long src_clk_rate; struct gpio_desc *disp_en_gpio; struct gpio_desc *te_gpio; @@ -374,15 +375,14 @@ static int dsi_clk_init(struct msm_dsi_host *msm_host) int i, ret = 0; /* get bus clocks */ - for (i = 0; i < cfg->num_bus_clks; i++) { - msm_host->bus_clks[i] = msm_clk_get(pdev, - cfg->bus_clk_names[i]); - if (IS_ERR(msm_host->bus_clks[i])) { - ret = PTR_ERR(msm_host->bus_clks[i]); - pr_err("%s: Unable to get %s clock, ret = %d\n", - __func__, cfg->bus_clk_names[i], ret); - goto exit; - } + for (i = 0; i < cfg->num_bus_clks; i++) + msm_host->bus_clks[i].id = cfg->bus_clk_names[i]; + msm_host->num_bus_clks = cfg->num_bus_clks; + + ret = devm_clk_bulk_get(&pdev->dev, msm_host->num_bus_clks, msm_host->bus_clks); + if (ret < 0) { + dev_err(&pdev->dev, "Unable to get clocks, ret = %d\n", ret); + goto exit; } /* get link and source clocks */ @@ -433,41 +433,6 @@ exit: return ret; } -static int dsi_bus_clk_enable(struct msm_dsi_host *msm_host) -{ - const struct msm_dsi_config *cfg = msm_host->cfg_hnd->cfg; - int i, ret; - - DBG("id=%d", msm_host->id); - - for (i = 0; i < cfg->num_bus_clks; i++) { - ret = clk_prepare_enable(msm_host->bus_clks[i]); - if (ret) { - pr_err("%s: failed to enable bus clock %d ret %d\n", - __func__, i, ret); - goto err; - } - } - - return 0; -err: - for (; i > 0; i--) - clk_disable_unprepare(msm_host->bus_clks[i]); - - return ret; -} - -static void dsi_bus_clk_disable(struct msm_dsi_host *msm_host) -{ - const struct msm_dsi_config *cfg = msm_host->cfg_hnd->cfg; - int i; - - DBG(""); - - for (i = cfg->num_bus_clks - 1; i >= 0; i--) - clk_disable_unprepare(msm_host->bus_clks[i]); -} - int msm_dsi_runtime_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); @@ -478,7 +443,7 @@ int msm_dsi_runtime_suspend(struct device *dev) if (!msm_host->cfg_hnd) return 0; - dsi_bus_clk_disable(msm_host); + clk_bulk_disable_unprepare(msm_host->num_bus_clks, msm_host->bus_clks); return 0; } @@ -493,15 +458,15 @@ int msm_dsi_runtime_resume(struct device *dev) if (!msm_host->cfg_hnd) return 0; - return dsi_bus_clk_enable(msm_host); + return clk_bulk_prepare_enable(msm_host->num_bus_clks, msm_host->bus_clks); } int dsi_link_clk_set_rate_6g(struct msm_dsi_host *msm_host) { - u32 byte_intf_rate; + unsigned long byte_intf_rate; int ret; - DBG("Set clk rates: pclk=%d, byteclk=%d", + DBG("Set clk rates: pclk=%d, byteclk=%lu", msm_host->mode->clock, msm_host->byte_clk_rate); ret = dev_pm_opp_set_rate(&msm_host->pdev->dev, @@ -558,13 +523,11 @@ int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host) goto pixel_clk_err; } - if (msm_host->byte_intf_clk) { - ret = clk_prepare_enable(msm_host->byte_intf_clk); - if (ret) { - pr_err("%s: Failed to enable byte intf clk\n", - __func__); - goto byte_intf_clk_err; - } + ret = clk_prepare_enable(msm_host->byte_intf_clk); + if (ret) { + pr_err("%s: Failed to enable byte intf clk\n", + __func__); + goto byte_intf_clk_err; } return 0; @@ -583,7 +546,7 @@ int dsi_link_clk_set_rate_v2(struct msm_dsi_host *msm_host) { int ret; - DBG("Set clk rates: pclk=%d, byteclk=%d, esc_clk=%d, dsi_src_clk=%d", + DBG("Set clk rates: pclk=%d, byteclk=%lu, esc_clk=%lu, dsi_src_clk=%lu", msm_host->mode->clock, msm_host->byte_clk_rate, msm_host->esc_clk_rate, msm_host->src_clk_rate); @@ -660,8 +623,7 @@ void dsi_link_clk_disable_6g(struct msm_dsi_host *msm_host) dev_pm_opp_set_rate(&msm_host->pdev->dev, 0); clk_disable_unprepare(msm_host->esc_clk); clk_disable_unprepare(msm_host->pixel_clk); - if (msm_host->byte_intf_clk) - clk_disable_unprepare(msm_host->byte_intf_clk); + clk_disable_unprepare(msm_host->byte_intf_clk); clk_disable_unprepare(msm_host->byte_clk); } @@ -673,10 +635,10 @@ void dsi_link_clk_disable_v2(struct msm_dsi_host *msm_host) clk_disable_unprepare(msm_host->byte_clk); } -static u32 dsi_get_pclk_rate(struct msm_dsi_host *msm_host, bool is_bonded_dsi) +static unsigned long dsi_get_pclk_rate(struct msm_dsi_host *msm_host, bool is_bonded_dsi) { struct drm_display_mode *mode = msm_host->mode; - u32 pclk_rate; + unsigned long pclk_rate; pclk_rate = mode->clock * 1000; @@ -696,7 +658,7 @@ static void dsi_calc_pclk(struct msm_dsi_host *msm_host, bool is_bonded_dsi) { u8 lanes = msm_host->lanes; u32 bpp = dsi_get_bpp(msm_host->format); - u32 pclk_rate = dsi_get_pclk_rate(msm_host, is_bonded_dsi); + unsigned long pclk_rate = dsi_get_pclk_rate(msm_host, is_bonded_dsi); u64 pclk_bpp = (u64)pclk_rate * bpp; if (lanes == 0) { @@ -713,7 +675,7 @@ static void dsi_calc_pclk(struct msm_dsi_host *msm_host, bool is_bonded_dsi) msm_host->pixel_clk_rate = pclk_rate; msm_host->byte_clk_rate = pclk_bpp; - DBG("pclk=%d, bclk=%d", msm_host->pixel_clk_rate, + DBG("pclk=%lu, bclk=%lu", msm_host->pixel_clk_rate, msm_host->byte_clk_rate); } @@ -772,7 +734,7 @@ int dsi_calc_clk_rate_v2(struct msm_dsi_host *msm_host, bool is_bonded_dsi) msm_host->esc_clk_rate = msm_host->byte_clk_rate / esc_div; - DBG("esc=%d, src=%d", msm_host->esc_clk_rate, + DBG("esc=%lu, src=%lu", msm_host->esc_clk_rate, msm_host->src_clk_rate); return 0; @@ -1904,6 +1866,23 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi) return ret; } + msm_host->irq = irq_of_parse_and_map(pdev->dev.of_node, 0); + if (msm_host->irq < 0) { + ret = msm_host->irq; + dev_err(&pdev->dev, "failed to get irq: %d\n", ret); + return ret; + } + + /* do not autoenable, will be enabled later */ + ret = devm_request_irq(&pdev->dev, msm_host->irq, dsi_host_irq, + IRQF_TRIGGER_HIGH | IRQF_ONESHOT | IRQF_NO_AUTOEN, + "dsi_isr", msm_host); + if (ret < 0) { + dev_err(&pdev->dev, "failed to request IRQ%u: %d\n", + msm_host->irq, ret); + return ret; + } + init_completion(&msm_host->dma_comp); init_completion(&msm_host->video_comp); mutex_init(&msm_host->dev_mutex); @@ -1931,7 +1910,6 @@ void msm_dsi_host_destroy(struct mipi_dsi_host *host) DBG(""); dsi_tx_buf_free(msm_host); if (msm_host->workqueue) { - flush_workqueue(msm_host->workqueue); destroy_workqueue(msm_host->workqueue); msm_host->workqueue = NULL; } @@ -1947,25 +1925,8 @@ int msm_dsi_host_modeset_init(struct mipi_dsi_host *host, { struct msm_dsi_host *msm_host = to_msm_dsi_host(host); const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd; - struct platform_device *pdev = msm_host->pdev; int ret; - msm_host->irq = irq_of_parse_and_map(pdev->dev.of_node, 0); - if (msm_host->irq < 0) { - ret = msm_host->irq; - DRM_DEV_ERROR(dev->dev, "failed to get irq: %d\n", ret); - return ret; - } - - ret = devm_request_irq(&pdev->dev, msm_host->irq, - dsi_host_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT, - "dsi_isr", msm_host); - if (ret < 0) { - DRM_DEV_ERROR(&pdev->dev, "failed to request IRQ%u: %d\n", - msm_host->irq, ret); - return ret; - } - msm_host->dev = dev; ret = cfg_hnd->ops->tx_buf_alloc(msm_host, SZ_4K); if (ret) { @@ -2307,6 +2268,20 @@ void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host, clk_req->escclk_rate = msm_host->esc_clk_rate; } +void msm_dsi_host_enable_irq(struct mipi_dsi_host *host) +{ + struct msm_dsi_host *msm_host = to_msm_dsi_host(host); + + enable_irq(msm_host->irq); +} + +void msm_dsi_host_disable_irq(struct mipi_dsi_host *host) +{ + struct msm_dsi_host *msm_host = to_msm_dsi_host(host); + + disable_irq(msm_host->irq); +} + int msm_dsi_host_enable(struct mipi_dsi_host *host) { struct msm_dsi_host *msm_host = to_msm_dsi_host(host); diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c index fc949a84cef6..01bf8d907933 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_manager.c +++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c @@ -3,6 +3,8 @@ * Copyright (c) 2015, The Linux Foundation. All rights reserved. */ +#include "drm/drm_bridge_connector.h" + #include "msm_kms.h" #include "dsi.h" @@ -377,6 +379,14 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge) } } + /* + * Enable before preparing the panel, disable after unpreparing, so + * that the panel can communicate over the DSI link. + */ + msm_dsi_host_enable_irq(host); + if (is_bonded_dsi && msm_dsi1) + msm_dsi_host_enable_irq(msm_dsi1->host); + /* Always call panel functions once, because even for dual panels, * there is only one drm_panel instance. */ @@ -411,6 +421,10 @@ host_en_fail: if (panel) drm_panel_unprepare(panel); panel_prep_fail: + msm_dsi_host_disable_irq(host); + if (is_bonded_dsi && msm_dsi1) + msm_dsi_host_disable_irq(msm_dsi1->host); + if (is_bonded_dsi && msm_dsi1) msm_dsi_host_power_off(msm_dsi1->host); host1_on_fail: @@ -523,6 +537,10 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge) id, ret); } + msm_dsi_host_disable_irq(host); + if (is_bonded_dsi && msm_dsi1) + msm_dsi_host_disable_irq(msm_dsi1->host); + /* Save PHY status if it is a clock source */ msm_dsi_phy_pll_save_state(msm_dsi->phy); @@ -688,10 +706,10 @@ struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id) { struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); struct drm_device *dev = msm_dsi->dev; + struct drm_connector *connector; struct drm_encoder *encoder; struct drm_bridge *int_bridge, *ext_bridge; - struct drm_connector *connector; - struct list_head *connector_list; + int ret; int_bridge = msm_dsi->bridge; ext_bridge = msm_dsi->external_bridge = @@ -699,22 +717,44 @@ struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id) encoder = msm_dsi->encoder; - /* link the internal dsi bridge to the external bridge */ - drm_bridge_attach(encoder, ext_bridge, int_bridge, 0); - /* - * we need the drm_connector created by the external bridge - * driver (or someone else) to feed it to our driver's - * priv->connector[] list, mainly for msm_fbdev_init() + * Try first to create the bridge without it creating its own + * connector.. currently some bridges support this, and others + * do not (and some support both modes) */ - connector_list = &dev->mode_config.connector_list; + ret = drm_bridge_attach(encoder, ext_bridge, int_bridge, + DRM_BRIDGE_ATTACH_NO_CONNECTOR); + if (ret == -EINVAL) { + struct drm_connector *connector; + struct list_head *connector_list; + + /* link the internal dsi bridge to the external bridge */ + drm_bridge_attach(encoder, ext_bridge, int_bridge, 0); + + /* + * we need the drm_connector created by the external bridge + * driver (or someone else) to feed it to our driver's + * priv->connector[] list, mainly for msm_fbdev_init() + */ + connector_list = &dev->mode_config.connector_list; + + list_for_each_entry(connector, connector_list, head) { + if (drm_connector_has_possible_encoder(connector, encoder)) + return connector; + } + + return ERR_PTR(-ENODEV); + } - list_for_each_entry(connector, connector_list, head) { - if (drm_connector_has_possible_encoder(connector, encoder)) - return connector; + connector = drm_bridge_connector_init(dev, encoder); + if (IS_ERR(connector)) { + DRM_ERROR("Unable to create bridge connector\n"); + return ERR_CAST(connector); } - return ERR_PTR(-ENODEV); + drm_connector_attach_encoder(connector, encoder); + + return connector; } void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge) diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c index 8c65ef6968ca..9842e04b5858 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c @@ -627,6 +627,8 @@ static const struct of_device_id dsi_phy_dt_match[] = { .data = &dsi_phy_14nm_cfgs }, { .compatible = "qcom,dsi-phy-14nm-660", .data = &dsi_phy_14nm_660_cfgs }, + { .compatible = "qcom,dsi-phy-14nm-8953", + .data = &dsi_phy_14nm_8953_cfgs }, #endif #ifdef CONFIG_DRM_MSM_DSI_10NM_PHY { .compatible = "qcom,dsi-phy-10nm", diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h index b91303ada74f..4c8257581bfc 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h @@ -48,6 +48,7 @@ extern const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_14nm_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_14nm_660_cfgs; +extern const struct msm_dsi_phy_cfg dsi_phy_14nm_8953_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_10nm_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_10nm_8998_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_7nm_cfgs; diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c index d13552b2213b..7414966f198e 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c @@ -110,14 +110,13 @@ static struct dsi_pll_14nm *pll_14nm_list[DSI_MAX]; static bool pll_14nm_poll_for_ready(struct dsi_pll_14nm *pll_14nm, u32 nb_tries, u32 timeout_us) { - bool pll_locked = false; + bool pll_locked = false, pll_ready = false; void __iomem *base = pll_14nm->phy->pll_base; u32 tries, val; tries = nb_tries; while (tries--) { - val = dsi_phy_read(base + - REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS); + val = dsi_phy_read(base + REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS); pll_locked = !!(val & BIT(5)); if (pll_locked) @@ -126,23 +125,24 @@ static bool pll_14nm_poll_for_ready(struct dsi_pll_14nm *pll_14nm, udelay(timeout_us); } - if (!pll_locked) { - tries = nb_tries; - while (tries--) { - val = dsi_phy_read(base + - REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS); - pll_locked = !!(val & BIT(0)); + if (!pll_locked) + goto out; - if (pll_locked) - break; + tries = nb_tries; + while (tries--) { + val = dsi_phy_read(base + REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS); + pll_ready = !!(val & BIT(0)); + + if (pll_ready) + break; - udelay(timeout_us); - } + udelay(timeout_us); } - DBG("DSI PLL is %slocked", pll_locked ? "" : "*not* "); +out: + DBG("DSI PLL is %slocked, %sready", pll_locked ? "" : "*not* ", pll_ready ? "" : "*not* "); - return pll_locked; + return pll_locked && pll_ready; } static void dsi_pll_14nm_config_init(struct dsi_pll_config *pconf) @@ -213,9 +213,7 @@ static void pll_14nm_dec_frac_calc(struct dsi_pll_14nm *pll, struct dsi_pll_conf DBG("vco_clk_rate=%lld ref_clk_rate=%lld", vco_clk_rate, fref); dec_start_multiple = div_u64(vco_clk_rate * multiplier, fref); - div_u64_rem(dec_start_multiple, multiplier, &div_frac_start); - - dec_start = div_u64(dec_start_multiple, multiplier); + dec_start = div_u64_rem(dec_start_multiple, multiplier, &div_frac_start); pconf->dec_start = (u32)dec_start; pconf->div_frac_start = div_frac_start; @@ -1065,3 +1063,24 @@ const struct msm_dsi_phy_cfg dsi_phy_14nm_660_cfgs = { .io_start = { 0xc994400, 0xc996000 }, .num_dsi_phy = 2, }; + +const struct msm_dsi_phy_cfg dsi_phy_14nm_8953_cfgs = { + .has_phy_lane = true, + .reg_cfg = { + .num = 1, + .regs = { + {"vcca", 17000, 32}, + }, + }, + .ops = { + .enable = dsi_14nm_phy_enable, + .disable = dsi_14nm_phy_disable, + .pll_init = dsi_pll_14nm_init, + .save_pll_state = dsi_14nm_pll_save_state, + .restore_pll_state = dsi_14nm_pll_restore_state, + }, + .min_pll_rate = VCO_MIN_RATE, + .max_pll_rate = VCO_MAX_RATE, + .io_start = { 0x1a94400, 0x1a96400 }, + .num_dsi_phy = 2, +}; diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c index aaa37456f4ee..71ed4aa0dc67 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c @@ -428,7 +428,7 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **prov bytediv->reg = pll_28nm->phy->pll_base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9; snprintf(parent_name, 32, "dsi%dvco_clk", pll_28nm->phy->id); - snprintf(clk_name, 32, "dsi%dpllbyte", pll_28nm->phy->id); + snprintf(clk_name, 32, "dsi%dpllbyte", pll_28nm->phy->id + 1); bytediv_init.name = clk_name; bytediv_init.ops = &clk_bytediv_ops; @@ -442,7 +442,7 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **prov return ret; provided_clocks[DSI_BYTE_PLL_CLK] = &bytediv->hw; - snprintf(clk_name, 32, "dsi%dpll", pll_28nm->phy->id); + snprintf(clk_name, 32, "dsi%dpll", pll_28nm->phy->id + 1); /* DIV3 */ hw = devm_clk_hw_register_divider(dev, clk_name, parent_name, 0, pll_28nm->phy->pll_base + diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c index cb297b08458e..079613d2aaa9 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c @@ -114,9 +114,7 @@ static void dsi_pll_calc_dec_frac(struct dsi_pll_7nm *pll, struct dsi_pll_config multiplier = 1 << FRAC_BITS; dec_multiple = div_u64(pll_freq * multiplier, divider); - div_u64_rem(dec_multiple, multiplier, &frac); - - dec = div_u64(dec_multiple, multiplier); + dec = div_u64_rem(dec_multiple, multiplier, &frac); if (!(pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_1)) config->pll_clock_inverters = 0x28; diff --git a/drivers/gpu/drm/msm/edp/edp_ctrl.c b/drivers/gpu/drm/msm/edp/edp_ctrl.c index 4fb397ee7c84..a68a4a1867c1 100644 --- a/drivers/gpu/drm/msm/edp/edp_ctrl.c +++ b/drivers/gpu/drm/msm/edp/edp_ctrl.c @@ -1116,7 +1116,7 @@ void msm_edp_ctrl_power(struct edp_ctrl *ctrl, bool on) int msm_edp_ctrl_init(struct msm_edp *edp) { struct edp_ctrl *ctrl = NULL; - struct device *dev = &edp->pdev->dev; + struct device *dev; int ret; if (!edp) { @@ -1124,6 +1124,7 @@ int msm_edp_ctrl_init(struct msm_edp *edp) return -EINVAL; } + dev = &edp->pdev->dev; ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL); if (!ctrl) return -ENOMEM; @@ -1189,7 +1190,6 @@ void msm_edp_ctrl_destroy(struct edp_ctrl *ctrl) return; if (ctrl->workqueue) { - flush_workqueue(ctrl->workqueue); destroy_workqueue(ctrl->workqueue); ctrl->workqueue = NULL; } @@ -1242,8 +1242,6 @@ bool msm_edp_ctrl_panel_connected(struct edp_ctrl *ctrl) int msm_edp_ctrl_get_panel_info(struct edp_ctrl *ctrl, struct drm_connector *connector, struct edid **edid) { - int ret = 0; - mutex_lock(&ctrl->dev_mutex); if (ctrl->edid) { @@ -1278,7 +1276,7 @@ disable_ret: } unlock_ret: mutex_unlock(&ctrl->dev_mutex); - return ret; + return 0; } int msm_edp_ctrl_timing_cfg(struct edp_ctrl *ctrl, diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c index 737453b6e596..75b64e6ae035 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi.c @@ -61,10 +61,8 @@ static void msm_hdmi_destroy(struct hdmi *hdmi) * at this point, hpd has been disabled, * after flush workq, it's safe to deinit hdcp */ - if (hdmi->workq) { - flush_workqueue(hdmi->workq); + if (hdmi->workq) destroy_workqueue(hdmi->workq); - } msm_hdmi_hdcp_destroy(hdmi); if (hdmi->phy_dev) { @@ -154,19 +152,13 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev) ret = -ENOMEM; goto fail; } - for (i = 0; i < config->hpd_reg_cnt; i++) { - struct regulator *reg; - - reg = devm_regulator_get(&pdev->dev, - config->hpd_reg_names[i]); - if (IS_ERR(reg)) { - ret = PTR_ERR(reg); - DRM_DEV_ERROR(&pdev->dev, "failed to get hpd regulator: %s (%d)\n", - config->hpd_reg_names[i], ret); - goto fail; - } + for (i = 0; i < config->hpd_reg_cnt; i++) + hdmi->hpd_regs[i].supply = config->hpd_reg_names[i]; - hdmi->hpd_regs[i] = reg; + ret = devm_regulator_bulk_get(&pdev->dev, config->hpd_reg_cnt, hdmi->hpd_regs); + if (ret) { + DRM_DEV_ERROR(&pdev->dev, "failed to get hpd regulator: %d\n", ret); + goto fail; } hdmi->pwr_regs = devm_kcalloc(&pdev->dev, @@ -177,19 +169,11 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev) ret = -ENOMEM; goto fail; } - for (i = 0; i < config->pwr_reg_cnt; i++) { - struct regulator *reg; - - reg = devm_regulator_get(&pdev->dev, - config->pwr_reg_names[i]); - if (IS_ERR(reg)) { - ret = PTR_ERR(reg); - DRM_DEV_ERROR(&pdev->dev, "failed to get pwr regulator: %s (%d)\n", - config->pwr_reg_names[i], ret); - goto fail; - } - hdmi->pwr_regs[i] = reg; + ret = devm_regulator_bulk_get(&pdev->dev, config->pwr_reg_cnt, hdmi->pwr_regs); + if (ret) { + DRM_DEV_ERROR(&pdev->dev, "failed to get pwr regulator: %d\n", ret); + goto fail; } hdmi->hpd_clks = devm_kcalloc(&pdev->dev, diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h index d0b84f0abee1..82261078c6b1 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.h +++ b/drivers/gpu/drm/msm/hdmi/hdmi.h @@ -56,8 +56,8 @@ struct hdmi { void __iomem *qfprom_mmio; phys_addr_t mmio_phy_addr; - struct regulator **hpd_regs; - struct regulator **pwr_regs; + struct regulator_bulk_data *hpd_regs; + struct regulator_bulk_data *pwr_regs; struct clk **hpd_clks; struct clk **pwr_clks; @@ -163,7 +163,7 @@ struct hdmi_phy { void __iomem *mmio; struct hdmi_phy_cfg *cfg; const struct hdmi_phy_funcs *funcs; - struct regulator **regs; + struct regulator_bulk_data *regs; struct clk **clks; }; diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c index 6e380db9287b..f04eb4a70f0d 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c @@ -28,13 +28,9 @@ static void msm_hdmi_power_on(struct drm_bridge *bridge) pm_runtime_get_sync(&hdmi->pdev->dev); - for (i = 0; i < config->pwr_reg_cnt; i++) { - ret = regulator_enable(hdmi->pwr_regs[i]); - if (ret) { - DRM_DEV_ERROR(dev->dev, "failed to enable pwr regulator: %s (%d)\n", - config->pwr_reg_names[i], ret); - } - } + ret = regulator_bulk_enable(config->pwr_reg_cnt, hdmi->pwr_regs); + if (ret) + DRM_DEV_ERROR(dev->dev, "failed to enable pwr regulator: %d\n", ret); if (config->pwr_clk_cnt > 0) { DBG("pixclock: %lu", hdmi->pixclock); @@ -70,13 +66,9 @@ static void power_off(struct drm_bridge *bridge) for (i = 0; i < config->pwr_clk_cnt; i++) clk_disable_unprepare(hdmi->pwr_clks[i]); - for (i = 0; i < config->pwr_reg_cnt; i++) { - ret = regulator_disable(hdmi->pwr_regs[i]); - if (ret) { - DRM_DEV_ERROR(dev->dev, "failed to disable pwr regulator: %s (%d)\n", - config->pwr_reg_names[i], ret); - } - } + ret = regulator_bulk_disable(config->pwr_reg_cnt, hdmi->pwr_regs); + if (ret) + DRM_DEV_ERROR(dev->dev, "failed to disable pwr regulator: %d\n", ret); pm_runtime_put_autosuspend(&hdmi->pdev->dev); } diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c index 58707a1f3878..a7f729cdec7b 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c @@ -146,16 +146,13 @@ int msm_hdmi_hpd_enable(struct drm_connector *connector) const struct hdmi_platform_config *config = hdmi->config; struct device *dev = &hdmi->pdev->dev; uint32_t hpd_ctrl; - int i, ret; + int ret; unsigned long flags; - for (i = 0; i < config->hpd_reg_cnt; i++) { - ret = regulator_enable(hdmi->hpd_regs[i]); - if (ret) { - DRM_DEV_ERROR(dev, "failed to enable hpd regulator: %s (%d)\n", - config->hpd_reg_names[i], ret); - goto fail; - } + ret = regulator_bulk_enable(config->hpd_reg_cnt, hdmi->hpd_regs); + if (ret) { + DRM_DEV_ERROR(dev, "failed to enable hpd regulators: %d\n", ret); + goto fail; } ret = pinctrl_pm_select_default_state(dev); @@ -207,7 +204,7 @@ static void hdp_disable(struct hdmi_connector *hdmi_connector) struct hdmi *hdmi = hdmi_connector->hdmi; const struct hdmi_platform_config *config = hdmi->config; struct device *dev = &hdmi->pdev->dev; - int i, ret = 0; + int ret; /* Disable HPD interrupt */ hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, 0); @@ -225,12 +222,9 @@ static void hdp_disable(struct hdmi_connector *hdmi_connector) if (ret) dev_warn(dev, "pinctrl state chg failed: %d\n", ret); - for (i = 0; i < config->hpd_reg_cnt; i++) { - ret = regulator_disable(hdmi->hpd_regs[i]); - if (ret) - dev_warn(dev, "failed to disable hpd regulator: %s (%d)\n", - config->hpd_reg_names[i], ret); - } + ret = regulator_bulk_disable(config->hpd_reg_cnt, hdmi->hpd_regs); + if (ret) + dev_warn(dev, "failed to disable hpd regulator: %d\n", ret); } static void diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy.c index 8a38d4b95102..16b0e8836d27 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_phy.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy.c @@ -23,22 +23,15 @@ static int msm_hdmi_phy_resource_init(struct hdmi_phy *phy) if (!phy->clks) return -ENOMEM; - for (i = 0; i < cfg->num_regs; i++) { - struct regulator *reg; - - reg = devm_regulator_get(dev, cfg->reg_names[i]); - if (IS_ERR(reg)) { - ret = PTR_ERR(reg); - if (ret != -EPROBE_DEFER) { - DRM_DEV_ERROR(dev, - "failed to get phy regulator: %s (%d)\n", - cfg->reg_names[i], ret); - } + for (i = 0; i < cfg->num_regs; i++) + phy->regs[i].supply = cfg->reg_names[i]; - return ret; - } + ret = devm_regulator_bulk_get(dev, cfg->num_regs, phy->regs); + if (ret) { + if (ret != -EPROBE_DEFER) + DRM_DEV_ERROR(dev, "failed to get phy regulators: %d\n", ret); - phy->regs[i] = reg; + return ret; } for (i = 0; i < cfg->num_clks; i++) { @@ -66,11 +59,10 @@ int msm_hdmi_phy_resource_enable(struct hdmi_phy *phy) pm_runtime_get_sync(dev); - for (i = 0; i < cfg->num_regs; i++) { - ret = regulator_enable(phy->regs[i]); - if (ret) - DRM_DEV_ERROR(dev, "failed to enable regulator: %s (%d)\n", - cfg->reg_names[i], ret); + ret = regulator_bulk_enable(cfg->num_regs, phy->regs); + if (ret) { + DRM_DEV_ERROR(dev, "failed to enable regulators: (%d)\n", ret); + return ret; } for (i = 0; i < cfg->num_clks; i++) { @@ -92,8 +84,7 @@ void msm_hdmi_phy_resource_disable(struct hdmi_phy *phy) for (i = cfg->num_clks - 1; i >= 0; i--) clk_disable_unprepare(phy->clks[i]); - for (i = cfg->num_regs - 1; i >= 0; i--) - regulator_disable(phy->regs[i]); + regulator_bulk_disable(cfg->num_regs, phy->regs); pm_runtime_put_sync(dev); } diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c index a8f3b2cbfdc5..99c7853353fd 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c @@ -682,7 +682,7 @@ static int hdmi_8996_pll_is_enabled(struct clk_hw *hw) return pll_locked; } -static struct clk_ops hdmi_8996_pll_ops = { +static const struct clk_ops hdmi_8996_pll_ops = { .set_rate = hdmi_8996_pll_set_clk_rate, .round_rate = hdmi_8996_pll_round_rate, .recalc_rate = hdmi_8996_pll_recalc_rate, @@ -695,7 +695,7 @@ static const char * const hdmi_pll_parents[] = { "xo", }; -static struct clk_init_data pll_init = { +static const struct clk_init_data pll_init = { .name = "hdmipll", .ops = &hdmi_8996_pll_ops, .parent_names = hdmi_pll_parents, diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c index fab09e7c6efc..27c9ae563f2f 100644 --- a/drivers/gpu/drm/msm/msm_atomic.c +++ b/drivers/gpu/drm/msm/msm_atomic.c @@ -116,20 +116,10 @@ out: trace_msm_atomic_async_commit_finish(crtc_mask); } -static enum hrtimer_restart msm_atomic_pending_timer(struct hrtimer *t) -{ - struct msm_pending_timer *timer = container_of(t, - struct msm_pending_timer, timer); - - kthread_queue_work(timer->worker, &timer->work); - - return HRTIMER_NORESTART; -} - static void msm_atomic_pending_work(struct kthread_work *work) { struct msm_pending_timer *timer = container_of(work, - struct msm_pending_timer, work); + struct msm_pending_timer, work.work); msm_atomic_async_commit(timer->kms, timer->crtc_idx); } @@ -139,8 +129,6 @@ int msm_atomic_init_pending_timer(struct msm_pending_timer *timer, { timer->kms = kms; timer->crtc_idx = crtc_idx; - hrtimer_init(&timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); - timer->timer.function = msm_atomic_pending_timer; timer->worker = kthread_create_worker(0, "atomic-worker-%d", crtc_idx); if (IS_ERR(timer->worker)) { @@ -149,7 +137,10 @@ int msm_atomic_init_pending_timer(struct msm_pending_timer *timer, return ret; } sched_set_fifo(timer->worker->task); - kthread_init_work(&timer->work, msm_atomic_pending_work); + + msm_hrtimer_work_init(&timer->work, timer->worker, + msm_atomic_pending_work, + CLOCK_MONOTONIC, HRTIMER_MODE_ABS); return 0; } @@ -258,7 +249,7 @@ void msm_atomic_commit_tail(struct drm_atomic_state *state) vsync_time = kms->funcs->vsync_time(kms, async_crtc); wakeup_time = ktime_sub(vsync_time, ms_to_ktime(1)); - hrtimer_start(&timer->timer, wakeup_time, + msm_hrtimer_queue_work(&timer->work, wakeup_time, HRTIMER_MODE_ABS); } diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 2e6fc185e54d..7936e8d498dd 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -58,7 +58,7 @@ static const struct drm_mode_config_helper_funcs mode_config_helper_funcs = { }; #ifdef CONFIG_DRM_MSM_REGISTER_LOGGING -static bool reglog = false; +static bool reglog; MODULE_PARM_DESC(reglog, "Enable register read/write logging"); module_param(reglog, bool, 0600); #else @@ -75,7 +75,7 @@ static char *vram = "16m"; MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU)"); module_param(vram, charp, 0); -bool dumpstate = false; +bool dumpstate; MODULE_PARM_DESC(dumpstate, "Dump KMS state on errors"); module_param(dumpstate, bool, 0600); @@ -200,6 +200,35 @@ void msm_rmw(void __iomem *addr, u32 mask, u32 or) msm_writel(val | or, addr); } +static enum hrtimer_restart msm_hrtimer_worktimer(struct hrtimer *t) +{ + struct msm_hrtimer_work *work = container_of(t, + struct msm_hrtimer_work, timer); + + kthread_queue_work(work->worker, &work->work); + + return HRTIMER_NORESTART; +} + +void msm_hrtimer_queue_work(struct msm_hrtimer_work *work, + ktime_t wakeup_time, + enum hrtimer_mode mode) +{ + hrtimer_start(&work->timer, wakeup_time, mode); +} + +void msm_hrtimer_work_init(struct msm_hrtimer_work *work, + struct kthread_worker *worker, + kthread_work_func_t fn, + clockid_t clock_id, + enum hrtimer_mode mode) +{ + hrtimer_init(&work->timer, clock_id, mode); + work->timer.function = msm_hrtimer_worktimer; + work->worker = worker; + kthread_init_work(&work->work, fn); +} + static irqreturn_t msm_irq(int irq, void *arg) { struct drm_device *dev = arg; @@ -630,10 +659,11 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv) if (ret) goto err_msm_uninit; - ret = msm_disp_snapshot_init(ddev); - if (ret) - DRM_DEV_ERROR(dev, "msm_disp_snapshot_init failed ret = %d\n", ret); - + if (kms) { + ret = msm_disp_snapshot_init(ddev); + if (ret) + DRM_DEV_ERROR(dev, "msm_disp_snapshot_init failed ret = %d\n", ret); + } drm_mode_config_reset(ddev); #ifdef CONFIG_DRM_FBDEV_EMULATION @@ -682,6 +712,7 @@ static void load_gpu(struct drm_device *dev) static int context_init(struct drm_device *dev, struct drm_file *file) { + static atomic_t ident = ATOMIC_INIT(0); struct msm_drm_private *priv = dev->dev_private; struct msm_file_private *ctx; @@ -689,12 +720,17 @@ static int context_init(struct drm_device *dev, struct drm_file *file) if (!ctx) return -ENOMEM; + INIT_LIST_HEAD(&ctx->submitqueues); + rwlock_init(&ctx->queuelock); + kref_init(&ctx->ref); msm_submitqueue_init(dev, ctx); ctx->aspace = msm_gpu_create_private_address_space(priv->gpu, current); file->driver_priv = ctx; + ctx->seqno = atomic_inc_return(&ident); + return 0; } diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 31d50e98a723..eb984d925f4d 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -53,14 +53,6 @@ struct msm_disp_state; #define FRAC_16_16(mult, div) (((mult) << 16) / (div)) -struct msm_file_private { - rwlock_t queuelock; - struct list_head submitqueues; - int queueid; - struct msm_gem_address_space *aspace; - struct kref ref; -}; - enum msm_mdp_plane_property { PLANE_PROP_ZPOS, PLANE_PROP_ALPHA, @@ -68,6 +60,13 @@ enum msm_mdp_plane_property { PLANE_PROP_MAX_NUM }; +enum msm_dp_controller { + MSM_DP_CONTROLLER_0, + MSM_DP_CONTROLLER_1, + MSM_DP_CONTROLLER_2, + MSM_DP_CONTROLLER_COUNT, +}; + #define MSM_GPU_MAX_RINGS 4 #define MAX_H_TILES_PER_DISPLAY 2 @@ -161,7 +160,7 @@ struct msm_drm_private { /* DSI is shared by mdp4 and mdp5 */ struct msm_dsi *dsi[2]; - struct msm_dp *dp; + struct msm_dp *dp[MSM_DP_CONTROLLER_COUNT]; /* when we have more than one 'msm_gpu' these need to be an array: */ struct msm_gpu *gpu; @@ -490,40 +489,27 @@ void msm_writel(u32 data, void __iomem *addr); u32 msm_readl(const void __iomem *addr); void msm_rmw(void __iomem *addr, u32 mask, u32 or); -struct msm_gpu_submitqueue; -int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx); -struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx, - u32 id); -int msm_submitqueue_create(struct drm_device *drm, - struct msm_file_private *ctx, - u32 prio, u32 flags, u32 *id); -int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx, - struct drm_msm_submitqueue_query *args); -int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id); -void msm_submitqueue_close(struct msm_file_private *ctx); - -void msm_submitqueue_destroy(struct kref *kref); - -static inline void __msm_file_private_destroy(struct kref *kref) -{ - struct msm_file_private *ctx = container_of(kref, - struct msm_file_private, ref); - - msm_gem_address_space_put(ctx->aspace); - kfree(ctx); -} - -static inline void msm_file_private_put(struct msm_file_private *ctx) -{ - kref_put(&ctx->ref, __msm_file_private_destroy); -} +/** + * struct msm_hrtimer_work - a helper to combine an hrtimer with kthread_work + * + * @timer: hrtimer to control when the kthread work is triggered + * @work: the kthread work + * @worker: the kthread worker the work will be scheduled on + */ +struct msm_hrtimer_work { + struct hrtimer timer; + struct kthread_work work; + struct kthread_worker *worker; +}; -static inline struct msm_file_private *msm_file_private_get( - struct msm_file_private *ctx) -{ - kref_get(&ctx->ref); - return ctx; -} +void msm_hrtimer_queue_work(struct msm_hrtimer_work *work, + ktime_t wakeup_time, + enum hrtimer_mode mode); +void msm_hrtimer_work_init(struct msm_hrtimer_work *work, + struct kthread_worker *worker, + kthread_work_func_t fn, + clockid_t clock_id, + enum hrtimer_mode mode); #define DBG(fmt, ...) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__) #define VERB(fmt, ...) if (0) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__) @@ -549,7 +535,7 @@ static inline int align_pitch(int width, int bpp) static inline unsigned long timeout_to_jiffies(const ktime_t *timeout) { ktime_t now = ktime_get(); - unsigned long remaining_jiffies; + s64 remaining_jiffies; if (ktime_compare(*timeout, now) < 0) { remaining_jiffies = 0; @@ -558,7 +544,7 @@ static inline unsigned long timeout_to_jiffies(const ktime_t *timeout) remaining_jiffies = ktime_divns(rem, NSEC_PER_SEC / HZ); } - return remaining_jiffies; + return clamp(remaining_jiffies, 0LL, (s64)INT_MAX); } #endif /* __MSM_DRV_H__ */ diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 3878b8dc2d59..2916480d9115 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -5,6 +5,7 @@ */ #include <linux/dma-map-ops.h> +#include <linux/vmalloc.h> #include <linux/spinlock.h> #include <linux/shmem_fs.h> #include <linux/dma-buf.h> @@ -1105,6 +1106,7 @@ static int msm_gem_new_impl(struct drm_device *dev, msm_obj->flags = flags; msm_obj->madv = MSM_MADV_WILLNEED; + INIT_LIST_HEAD(&msm_obj->node); INIT_LIST_HEAD(&msm_obj->vmas); *obj = &msm_obj->base; @@ -1139,7 +1141,7 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32 ret = msm_gem_new_impl(dev, size, flags, &obj); if (ret) - goto fail; + return ERR_PTR(ret); msm_obj = to_msm_bo(obj); @@ -1223,7 +1225,7 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev, ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj); if (ret) - goto fail; + return ERR_PTR(ret); drm_gem_private_object_init(dev, obj, size); diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c index 0f1b29ee04a9..4a1420b05e97 100644 --- a/drivers/gpu/drm/msm/msm_gem_shrinker.c +++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c @@ -4,6 +4,8 @@ * Author: Rob Clark <robdclark@gmail.com> */ +#include <linux/vmalloc.h> + #include "msm_drv.h" #include "msm_gem.h" #include "msm_gpu.h" diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 924b01b9c105..3cb029f10925 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -46,7 +46,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev, if (!submit) return ERR_PTR(-ENOMEM); - ret = drm_sched_job_init(&submit->base, &queue->entity, queue); + ret = drm_sched_job_init(&submit->base, queue->entity, queue); if (ret) { kfree(submit); return ERR_PTR(ret); @@ -161,7 +161,8 @@ out: static int submit_lookup_cmds(struct msm_gem_submit *submit, struct drm_msm_gem_submit *args, struct drm_file *file) { - unsigned i, sz; + unsigned i; + size_t sz; int ret = 0; for (i = 0; i < args->nr_cmds; i++) { diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 8a3a592da3a4..2c46cd968ac4 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -296,7 +296,7 @@ static void msm_gpu_crashstate_capture(struct msm_gpu *gpu, state->bos = kcalloc(nr, sizeof(struct msm_gpu_state_bo), GFP_KERNEL); - for (i = 0; i < submit->nr_bos; i++) { + for (i = 0; state->bos && i < submit->nr_bos; i++) { if (should_dump(submit, i)) { msm_gpu_crashstate_get_bo(state, submit->bos[i].obj, submit->bos[i].iova, submit->bos[i].flags); diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h index 0e4b45bff2e6..59cdd00b69d0 100644 --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h @@ -112,6 +112,13 @@ struct msm_gpu_devfreq { * it is inactive. */ unsigned long idle_freq; + + /** + * idle_work: + * + * Used to delay clamping to idle freq on active->idle transition. + */ + struct msm_hrtimer_work idle_work; }; struct msm_gpu { @@ -203,6 +210,10 @@ struct msm_gpu { uint32_t suspend_count; struct msm_gpu_state *crashstate; + + /* Enable clamping to idle freq when inactive: */ + bool clamp_to_idle; + /* True if the hardware supports expanded apriv (a650 and newer) */ bool hw_apriv; @@ -258,6 +269,39 @@ struct msm_gpu_perfcntr { #define NR_SCHED_PRIORITIES (1 + DRM_SCHED_PRIORITY_HIGH - DRM_SCHED_PRIORITY_MIN) /** + * struct msm_file_private - per-drm_file context + * + * @queuelock: synchronizes access to submitqueues list + * @submitqueues: list of &msm_gpu_submitqueue created by userspace + * @queueid: counter incremented each time a submitqueue is created, + * used to assign &msm_gpu_submitqueue.id + * @aspace: the per-process GPU address-space + * @ref: reference count + * @seqno: unique per process seqno + */ +struct msm_file_private { + rwlock_t queuelock; + struct list_head submitqueues; + int queueid; + struct msm_gem_address_space *aspace; + struct kref ref; + int seqno; + + /** + * entities: + * + * Table of per-priority-level sched entities used by submitqueues + * associated with this &drm_file. Because some userspace apps + * make assumptions about rendering from multiple gl contexts + * (of the same priority) within the process happening in FIFO + * order without requiring any fencing beyond MakeCurrent(), we + * create at most one &drm_sched_entity per-process per-priority- + * level. + */ + struct drm_sched_entity *entities[NR_SCHED_PRIORITIES * MSM_GPU_MAX_RINGS]; +}; + +/** * msm_gpu_convert_priority - Map userspace priority to ring # and sched priority * * @gpu: the gpu instance @@ -304,6 +348,8 @@ static inline int msm_gpu_convert_priority(struct msm_gpu *gpu, int prio, } /** + * struct msm_gpu_submitqueues - Userspace created context. + * * A submitqueue is associated with a gl context or vk queue (or equiv) * in userspace. * @@ -321,7 +367,7 @@ static inline int msm_gpu_convert_priority(struct msm_gpu *gpu, int prio, * seqno, protected by submitqueue lock * @lock: submitqueue lock * @ref: reference count - * @entity: the submit job-queue + * @entity: the submit job-queue */ struct msm_gpu_submitqueue { int id; @@ -333,7 +379,7 @@ struct msm_gpu_submitqueue { struct idr fence_idr; struct mutex lock; struct kref ref; - struct drm_sched_entity entity; + struct drm_sched_entity *entity; }; struct msm_gpu_state_bo { @@ -421,6 +467,33 @@ static inline void gpu_write64(struct msm_gpu *gpu, u32 lo, u32 hi, u64 val) int msm_gpu_pm_suspend(struct msm_gpu *gpu); int msm_gpu_pm_resume(struct msm_gpu *gpu); +int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx); +struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx, + u32 id); +int msm_submitqueue_create(struct drm_device *drm, + struct msm_file_private *ctx, + u32 prio, u32 flags, u32 *id); +int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx, + struct drm_msm_submitqueue_query *args); +int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id); +void msm_submitqueue_close(struct msm_file_private *ctx); + +void msm_submitqueue_destroy(struct kref *kref); + +void __msm_file_private_destroy(struct kref *kref); + +static inline void msm_file_private_put(struct msm_file_private *ctx) +{ + kref_put(&ctx->ref, __msm_file_private_destroy); +} + +static inline struct msm_file_private *msm_file_private_get( + struct msm_file_private *ctx) +{ + kref_get(&ctx->ref); + return ctx; +} + void msm_devfreq_init(struct msm_gpu *gpu); void msm_devfreq_cleanup(struct msm_gpu *gpu); void msm_devfreq_resume(struct msm_gpu *gpu); diff --git a/drivers/gpu/drm/msm/msm_gpu_devfreq.c b/drivers/gpu/drm/msm/msm_gpu_devfreq.c index 0a1ee20296a2..8b7473f69cb8 100644 --- a/drivers/gpu/drm/msm/msm_gpu_devfreq.c +++ b/drivers/gpu/drm/msm/msm_gpu_devfreq.c @@ -88,8 +88,12 @@ static struct devfreq_dev_profile msm_devfreq_profile = { .get_cur_freq = msm_devfreq_get_cur_freq, }; +static void msm_devfreq_idle_work(struct kthread_work *work); + void msm_devfreq_init(struct msm_gpu *gpu) { + struct msm_gpu_devfreq *df = &gpu->devfreq; + /* We need target support to do devfreq */ if (!gpu->funcs->gpu_busy) return; @@ -105,25 +109,27 @@ void msm_devfreq_init(struct msm_gpu *gpu) msm_devfreq_profile.freq_table = NULL; msm_devfreq_profile.max_state = 0; - gpu->devfreq.devfreq = devm_devfreq_add_device(&gpu->pdev->dev, + df->devfreq = devm_devfreq_add_device(&gpu->pdev->dev, &msm_devfreq_profile, DEVFREQ_GOV_SIMPLE_ONDEMAND, NULL); - if (IS_ERR(gpu->devfreq.devfreq)) { + if (IS_ERR(df->devfreq)) { DRM_DEV_ERROR(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n"); - gpu->devfreq.devfreq = NULL; + df->devfreq = NULL; return; } - devfreq_suspend_device(gpu->devfreq.devfreq); + devfreq_suspend_device(df->devfreq); - gpu->cooling = of_devfreq_cooling_register(gpu->pdev->dev.of_node, - gpu->devfreq.devfreq); + gpu->cooling = of_devfreq_cooling_register(gpu->pdev->dev.of_node, df->devfreq); if (IS_ERR(gpu->cooling)) { DRM_DEV_ERROR(&gpu->pdev->dev, "Couldn't register GPU cooling device\n"); gpu->cooling = NULL; } + + msm_hrtimer_work_init(&df->idle_work, gpu->worker, msm_devfreq_idle_work, + CLOCK_MONOTONIC, HRTIMER_MODE_REL); } void msm_devfreq_cleanup(struct msm_gpu *gpu) @@ -151,6 +157,14 @@ void msm_devfreq_active(struct msm_gpu *gpu) unsigned int idle_time; unsigned long target_freq = df->idle_freq; + if (!df->devfreq) + return; + + /* + * Cancel any pending transition to idle frequency: + */ + hrtimer_cancel(&df->idle_work.timer); + /* * Hold devfreq lock to synchronize with get_dev_status()/ * target() callbacks @@ -181,11 +195,17 @@ void msm_devfreq_active(struct msm_gpu *gpu) mutex_unlock(&df->devfreq->lock); } -void msm_devfreq_idle(struct msm_gpu *gpu) + +static void msm_devfreq_idle_work(struct kthread_work *work) { - struct msm_gpu_devfreq *df = &gpu->devfreq; + struct msm_gpu_devfreq *df = container_of(work, + struct msm_gpu_devfreq, idle_work.work); + struct msm_gpu *gpu = container_of(df, struct msm_gpu, devfreq); unsigned long idle_freq, target_freq = 0; + if (!df->devfreq) + return; + /* * Hold devfreq lock to synchronize with get_dev_status()/ * target() callbacks @@ -194,10 +214,19 @@ void msm_devfreq_idle(struct msm_gpu *gpu) idle_freq = get_freq(gpu); - msm_devfreq_target(&gpu->pdev->dev, &target_freq, 0); + if (gpu->clamp_to_idle) + msm_devfreq_target(&gpu->pdev->dev, &target_freq, 0); df->idle_time = ktime_get(); df->idle_freq = idle_freq; mutex_unlock(&df->devfreq->lock); } + +void msm_devfreq_idle(struct msm_gpu *gpu) +{ + struct msm_gpu_devfreq *df = &gpu->devfreq; + + msm_hrtimer_queue_work(&df->idle_work, ms_to_ktime(1), + HRTIMER_MODE_ABS); +} diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h index de2bc3467bb5..6a42b819abc4 100644 --- a/drivers/gpu/drm/msm/msm_kms.h +++ b/drivers/gpu/drm/msm/msm_kms.h @@ -136,8 +136,7 @@ struct msm_kms; * shortly before vblank to flush pending async updates. */ struct msm_pending_timer { - struct hrtimer timer; - struct kthread_work work; + struct msm_hrtimer_work work; struct kthread_worker *worker; struct msm_kms *kms; unsigned crtc_idx; diff --git a/drivers/gpu/drm/msm/msm_submitqueue.c b/drivers/gpu/drm/msm/msm_submitqueue.c index 32a55d81b58b..7cb158bcbcf6 100644 --- a/drivers/gpu/drm/msm/msm_submitqueue.c +++ b/drivers/gpu/drm/msm/msm_submitqueue.c @@ -7,6 +7,24 @@ #include "msm_gpu.h" +void __msm_file_private_destroy(struct kref *kref) +{ + struct msm_file_private *ctx = container_of(kref, + struct msm_file_private, ref); + int i; + + for (i = 0; i < ARRAY_SIZE(ctx->entities); i++) { + if (!ctx->entities[i]) + continue; + + drm_sched_entity_destroy(ctx->entities[i]); + kfree(ctx->entities[i]); + } + + msm_gem_address_space_put(ctx->aspace); + kfree(ctx); +} + void msm_submitqueue_destroy(struct kref *kref) { struct msm_gpu_submitqueue *queue = container_of(kref, @@ -14,8 +32,6 @@ void msm_submitqueue_destroy(struct kref *kref) idr_destroy(&queue->fence_idr); - drm_sched_entity_destroy(&queue->entity); - msm_file_private_put(queue->ctx); kfree(queue); @@ -61,13 +77,48 @@ void msm_submitqueue_close(struct msm_file_private *ctx) } } +static struct drm_sched_entity * +get_sched_entity(struct msm_file_private *ctx, struct msm_ringbuffer *ring, + unsigned ring_nr, enum drm_sched_priority sched_prio) +{ + static DEFINE_MUTEX(entity_lock); + unsigned idx = (ring_nr * NR_SCHED_PRIORITIES) + sched_prio; + + /* We should have already validated that the requested priority is + * valid by the time we get here. + */ + if (WARN_ON(idx >= ARRAY_SIZE(ctx->entities))) + return ERR_PTR(-EINVAL); + + mutex_lock(&entity_lock); + + if (!ctx->entities[idx]) { + struct drm_sched_entity *entity; + struct drm_gpu_scheduler *sched = &ring->sched; + int ret; + + entity = kzalloc(sizeof(*ctx->entities[idx]), GFP_KERNEL); + + ret = drm_sched_entity_init(entity, sched_prio, &sched, 1, NULL); + if (ret) { + mutex_unlock(&entity_lock); + kfree(entity); + return ERR_PTR(ret); + } + + ctx->entities[idx] = entity; + } + + mutex_unlock(&entity_lock); + + return ctx->entities[idx]; +} + int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx, u32 prio, u32 flags, u32 *id) { struct msm_drm_private *priv = drm->dev_private; struct msm_gpu_submitqueue *queue; - struct msm_ringbuffer *ring; - struct drm_gpu_scheduler *sched; enum drm_sched_priority sched_prio; unsigned ring_nr; int ret; @@ -91,12 +142,10 @@ int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx, queue->flags = flags; queue->ring_nr = ring_nr; - ring = priv->gpu->rb[ring_nr]; - sched = &ring->sched; - - ret = drm_sched_entity_init(&queue->entity, - sched_prio, &sched, 1, NULL); - if (ret) { + queue->entity = get_sched_entity(ctx, priv->gpu->rb[ring_nr], + ring_nr, sched_prio); + if (IS_ERR(queue->entity)) { + ret = PTR_ERR(queue->entity); kfree(queue); return ret; } @@ -140,10 +189,6 @@ int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx) */ default_prio = DIV_ROUND_UP(max_priority, 2); - INIT_LIST_HEAD(&ctx->submitqueues); - - rwlock_init(&ctx->queuelock); - return msm_submitqueue_create(drm, ctx, default_prio, 0, NULL); } diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c index ec0432fe1bdf..86d78634a979 100644 --- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c +++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c @@ -173,7 +173,11 @@ static void mxsfb_irq_disable(struct drm_device *drm) struct mxsfb_drm_private *mxsfb = drm->dev_private; mxsfb_enable_axi_clk(mxsfb); - mxsfb->crtc.funcs->disable_vblank(&mxsfb->crtc); + + /* Disable and clear VBLANK IRQ */ + writel(CTRL1_CUR_FRAME_DONE_IRQ_EN, mxsfb->base + LCDC_CTRL1 + REG_CLR); + writel(CTRL1_CUR_FRAME_DONE_IRQ, mxsfb->base + LCDC_CTRL1 + REG_CLR); + mxsfb_disable_axi_clk(mxsfb); } diff --git a/drivers/gpu/drm/nouveau/dispnv50/crc.c b/drivers/gpu/drm/nouveau/dispnv50/crc.c index 49eb8e9fef22..29428e770f14 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/crc.c +++ b/drivers/gpu/drm/nouveau/dispnv50/crc.c @@ -682,6 +682,7 @@ static const struct file_operations nv50_crc_flip_threshold_fops = { .open = nv50_crc_debugfs_flip_threshold_open, .read = seq_read, .write = nv50_crc_debugfs_flip_threshold_set, + .release = single_release, }; int nv50_head_crc_late_register(struct nv50_head *head) diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index 23fa9ecc2296..ae1f41205520 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -1415,7 +1415,7 @@ nv50_mstm_prepare(struct nv50_mstm *mstm) struct drm_encoder *encoder; NV_ATOMIC(drm, "%s: mstm prepare\n", mstm->outp->base.base.name); - drm_dp_update_payload_part1(&mstm->mgr); + drm_dp_update_payload_part1(&mstm->mgr, 1); drm_for_each_encoder(encoder, mstm->outp->base.base.dev) { if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST) { diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c index f8438a886b64..c3c57be54e1c 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/head.c +++ b/drivers/gpu/drm/nouveau/dispnv50/head.c @@ -52,6 +52,7 @@ nv50_head_flush_clr(struct nv50_head *head, void nv50_head_flush_set_wndw(struct nv50_head *head, struct nv50_head_atom *asyh) { + if (asyh->set.curs ) head->func->curs_set(head, asyh); if (asyh->set.olut ) { asyh->olut.offset = nv50_lut_load(&head->olut, asyh->olut.buffer, @@ -67,7 +68,6 @@ nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh) if (asyh->set.view ) head->func->view (head, asyh); if (asyh->set.mode ) head->func->mode (head, asyh); if (asyh->set.core ) head->func->core_set(head, asyh); - if (asyh->set.curs ) head->func->curs_set(head, asyh); if (asyh->set.base ) head->func->base (head, asyh); if (asyh->set.ovly ) head->func->ovly (head, asyh); if (asyh->set.dither ) head->func->dither (head, asyh); diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h index c68cc957248e..a582c0cb0cb0 100644 --- a/drivers/gpu/drm/nouveau/include/nvif/class.h +++ b/drivers/gpu/drm/nouveau/include/nvif/class.h @@ -71,6 +71,7 @@ #define PASCAL_CHANNEL_GPFIFO_A /* cla06f.h */ 0x0000c06f #define VOLTA_CHANNEL_GPFIFO_A /* clc36f.h */ 0x0000c36f #define TURING_CHANNEL_GPFIFO_A /* clc36f.h */ 0x0000c46f +#define AMPERE_CHANNEL_GPFIFO_B /* clc36f.h */ 0x0000c76f #define NV50_DISP /* cl5070.h */ 0x00005070 #define G82_DISP /* cl5070.h */ 0x00008270 @@ -200,6 +201,7 @@ #define PASCAL_DMA_COPY_B 0x0000c1b5 #define VOLTA_DMA_COPY_A 0x0000c3b5 #define TURING_DMA_COPY_A 0x0000c5b5 +#define AMPERE_DMA_COPY_B 0x0000c7b5 #define FERMI_DECOMPRESS 0x000090b8 diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h index 54fab7cc36c1..64ee82c7c1be 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h @@ -77,4 +77,5 @@ int gp100_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct int gp10b_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **); int gv100_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **); int tu102_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **); +int ga102_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **); #endif diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c index 7c15f6448428..6140db756d06 100644 --- a/drivers/gpu/drm/nouveau/nouveau_acpi.c +++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c @@ -364,7 +364,6 @@ void * nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector) { struct acpi_device *acpidev; - acpi_handle handle; int type, ret; void *edid; @@ -377,12 +376,8 @@ nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector) return NULL; } - handle = ACPI_HANDLE(dev->dev); - if (!handle) - return NULL; - - ret = acpi_bus_get_device(handle, &acpidev); - if (ret) + acpidev = ACPI_COMPANION(dev->dev); + if (!acpidev) return NULL; ret = acpi_video_get_edid(acpidev, type, -1, &edid); diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 17a0a3ece485..fa73fe57f97b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -844,6 +844,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm) struct ttm_resource *, struct ttm_resource *); int (*init)(struct nouveau_channel *, u32 handle); } _methods[] = { + { "COPY", 4, 0xc7b5, nve0_bo_move_copy, nve0_bo_move_init }, { "COPY", 4, 0xc5b5, nve0_bo_move_copy, nve0_bo_move_init }, { "GRCE", 0, 0xc5b5, nve0_bo_move_copy, nvc0_bo_move_init }, { "COPY", 4, 0xc3b5, nve0_bo_move_copy, nve0_bo_move_init }, diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c index 80099ef75702..ea7769135b0d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_chan.c +++ b/drivers/gpu/drm/nouveau/nouveau_chan.c @@ -250,7 +250,8 @@ static int nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device, u64 runlist, bool priv, struct nouveau_channel **pchan) { - static const u16 oclasses[] = { TURING_CHANNEL_GPFIFO_A, + static const u16 oclasses[] = { AMPERE_CHANNEL_GPFIFO_B, + TURING_CHANNEL_GPFIFO_A, VOLTA_CHANNEL_GPFIFO_A, PASCAL_CHANNEL_GPFIFO_A, MAXWELL_CHANNEL_GPFIFO_A, @@ -386,7 +387,8 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart) nvif_object_map(&chan->user, NULL, 0); - if (chan->user.oclass >= FERMI_CHANNEL_GPFIFO) { + if (chan->user.oclass >= FERMI_CHANNEL_GPFIFO && + chan->user.oclass < AMPERE_CHANNEL_GPFIFO_B) { ret = nvif_notify_ctor(&chan->user, "abi16ChanKilled", nouveau_channel_killed, true, NV906F_V0_NTFY_KILLED, diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c index c2bc05eb2e54..1cbe01048b93 100644 --- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c +++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c @@ -207,6 +207,7 @@ static const struct file_operations nouveau_pstate_fops = { .open = nouveau_debugfs_pstate_open, .read = seq_read, .write = nouveau_debugfs_pstate_set, + .release = single_release, }; static struct drm_info_list nouveau_debugfs_list[] = { diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c index 92987daa5e17..3828aafd3ac4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dmem.c +++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c @@ -166,7 +166,7 @@ static vm_fault_t nouveau_dmem_fault_copy_one(struct nouveau_drm *drm, goto error_dma_unmap; mutex_unlock(&svmm->mutex); - args->dst[0] = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED; + args->dst[0] = migrate_pfn(page_to_pfn(dpage)); return 0; error_dma_unmap: @@ -602,7 +602,7 @@ static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm, ((paddr >> PAGE_SHIFT) << NVIF_VMM_PFNMAP_V0_ADDR_SHIFT); if (src & MIGRATE_PFN_WRITE) *pfn |= NVIF_VMM_PFNMAP_V0_W; - return migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED; + return migrate_pfn(page_to_pfn(dpage)); out_dma_unmap: dma_unmap_page(dev, *dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL); diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 1f828c9f691c..e7efd9ede8e4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -345,6 +345,9 @@ nouveau_accel_gr_init(struct nouveau_drm *drm) u32 arg0, arg1; int ret; + if (device->info.family >= NV_DEVICE_INFO_V0_AMPERE) + return; + /* Allocate channel that has access to the graphics engine. */ if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) { arg0 = nvif_fifo_runlist(device, NV_DEVICE_HOST_RUNLIST_ENGINES_GR); @@ -469,6 +472,7 @@ nouveau_accel_init(struct nouveau_drm *drm) case PASCAL_CHANNEL_GPFIFO_A: case VOLTA_CHANNEL_GPFIFO_A: case TURING_CHANNEL_GPFIFO_A: + case AMPERE_CHANNEL_GPFIFO_B: ret = nvc0_fence_create(drm); break; default: @@ -558,6 +562,7 @@ nouveau_drm_device_init(struct drm_device *dev) nvkm_dbgopt(nouveau_debug, "DRM"); INIT_LIST_HEAD(&drm->clients); + mutex_init(&drm->clients_lock); spin_lock_init(&drm->tile.lock); /* workaround an odd issue on nvc1 by disabling the device's @@ -628,6 +633,7 @@ fail_alloc: static void nouveau_drm_device_fini(struct drm_device *dev) { + struct nouveau_cli *cli, *temp_cli; struct nouveau_drm *drm = nouveau_drm(dev); if (nouveau_pmops_runtime()) { @@ -652,9 +658,28 @@ nouveau_drm_device_fini(struct drm_device *dev) nouveau_ttm_fini(drm); nouveau_vga_fini(drm); + /* + * There may be existing clients from as-yet unclosed files. For now, + * clean them up here rather than deferring until the file is closed, + * but this likely not correct if we want to support hot-unplugging + * properly. + */ + mutex_lock(&drm->clients_lock); + list_for_each_entry_safe(cli, temp_cli, &drm->clients, head) { + list_del(&cli->head); + mutex_lock(&cli->mutex); + if (cli->abi16) + nouveau_abi16_fini(cli->abi16); + mutex_unlock(&cli->mutex); + nouveau_cli_fini(cli); + kfree(cli); + } + mutex_unlock(&drm->clients_lock); + nouveau_cli_fini(&drm->client); nouveau_cli_fini(&drm->master); nvif_parent_dtor(&drm->parent); + mutex_destroy(&drm->clients_lock); kfree(drm); } @@ -792,7 +817,7 @@ nouveau_drm_device_remove(struct drm_device *dev) struct nvkm_client *client; struct nvkm_device *device; - drm_dev_unregister(dev); + drm_dev_unplug(dev); client = nvxx_client(&drm->client.base); device = nvkm_device_find(client->device); @@ -1086,9 +1111,9 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv) fpriv->driver_priv = cli; - mutex_lock(&drm->client.mutex); + mutex_lock(&drm->clients_lock); list_add(&cli->head, &drm->clients); - mutex_unlock(&drm->client.mutex); + mutex_unlock(&drm->clients_lock); done: if (ret && cli) { @@ -1106,6 +1131,16 @@ nouveau_drm_postclose(struct drm_device *dev, struct drm_file *fpriv) { struct nouveau_cli *cli = nouveau_cli(fpriv); struct nouveau_drm *drm = nouveau_drm(dev); + int dev_index; + + /* + * The device is gone, and as it currently stands all clients are + * cleaned up in the removal codepath. In the future this may change + * so that we can support hot-unplugging, but for now we immediately + * return to avoid a double-free situation. + */ + if (!drm_dev_enter(dev, &dev_index)) + return; pm_runtime_get_sync(dev->dev); @@ -1114,14 +1149,15 @@ nouveau_drm_postclose(struct drm_device *dev, struct drm_file *fpriv) nouveau_abi16_fini(cli->abi16); mutex_unlock(&cli->mutex); - mutex_lock(&drm->client.mutex); + mutex_lock(&drm->clients_lock); list_del(&cli->head); - mutex_unlock(&drm->client.mutex); + mutex_unlock(&drm->clients_lock); nouveau_cli_fini(cli); kfree(cli); pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); + drm_dev_exit(dev_index); } static const struct drm_ioctl_desc diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index ba65f136cf48..b2a970aa9bf4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -139,6 +139,11 @@ struct nouveau_drm { struct list_head clients; + /** + * @clients_lock: Protects access to the @clients list of &struct nouveau_cli. + */ + struct mutex clients_lock; + u8 old_pm_cap; struct { diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index d476940ee97c..9416bee92141 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -56,7 +56,7 @@ static vm_fault_t nouveau_ttm_fault(struct vm_fault *vmf) nouveau_bo_del_io_reserve_lru(bo); prot = vm_get_page_prot(vma->vm_flags); - ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT, 1); + ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT); nouveau_bo_add_io_reserve_lru(bo); if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) return ret; @@ -247,10 +247,8 @@ nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain, } ret = nouveau_bo_init(nvbo, size, align, domain, NULL, NULL); - if (ret) { - nouveau_bo_ref(NULL, &nvbo); + if (ret) return ret; - } /* we restrict allowed domains on nv50+ to only the types * that were requested at creation time. not possibly on diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c index b0c3422cb01f..266809e511e2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_svm.c +++ b/drivers/gpu/drm/nouveau/nouveau_svm.c @@ -162,10 +162,14 @@ nouveau_svmm_bind(struct drm_device *dev, void *data, */ mm = get_task_mm(current); + if (!mm) { + return -EINVAL; + } mmap_read_lock(mm); if (!cli->svm.svmm) { mmap_read_unlock(mm); + mmput(mm); return -EINVAL; } @@ -992,7 +996,7 @@ nouveau_svm_fault_buffer_ctor(struct nouveau_svm *svm, s32 oclass, int id) if (ret) return ret; - buffer->fault = kvzalloc(sizeof(*buffer->fault) * buffer->entries, GFP_KERNEL); + buffer->fault = kvcalloc(sizeof(*buffer->fault), buffer->entries, GFP_KERNEL); if (!buffer->fault) return -ENOMEM; diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c index 7c9c928c3196..c3526a8622e3 100644 --- a/drivers/gpu/drm/nouveau/nv84_fence.c +++ b/drivers/gpu/drm/nouveau/nv84_fence.c @@ -204,7 +204,7 @@ nv84_fence_create(struct nouveau_drm *drm) priv->base.context_new = nv84_fence_context_new; priv->base.context_del = nv84_fence_context_del; - priv->base.uevent = true; + priv->base.uevent = drm->client.device.info.family < NV_DEVICE_INFO_V0_AMPERE; mutex_init(&priv->mutex); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c index 704df0f2d1f1..09a112af2f89 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c @@ -78,6 +78,6 @@ int gt215_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine) { - return nvkm_falcon_new_(>215_ce, device, type, inst, + return nvkm_falcon_new_(>215_ce, device, type, -1, (device->chipset != 0xaf), 0x104000, pengine); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c index 93ddf63d1114..b51d690f375f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c @@ -2602,6 +2602,7 @@ nv172_chipset = { .top = { 0x00000001, ga100_top_new }, .disp = { 0x00000001, ga102_disp_new }, .dma = { 0x00000001, gv100_dma_new }, + .fifo = { 0x00000001, ga102_fifo_new }, }; static const struct nvkm_device_chip @@ -2622,6 +2623,7 @@ nv174_chipset = { .top = { 0x00000001, ga100_top_new }, .disp = { 0x00000001, ga102_disp_new }, .dma = { 0x00000001, gv100_dma_new }, + .fifo = { 0x00000001, ga102_fifo_new }, }; static const struct nvkm_device_chip @@ -2642,6 +2644,7 @@ nv177_chipset = { .top = { 0x00000001, ga100_top_new }, .disp = { 0x00000001, ga102_disp_new }, .dma = { 0x00000001, gv100_dma_new }, + .fifo = { 0x00000001, ga102_fifo_new }, }; static int @@ -3144,8 +3147,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func, WARN_ON(device->chip->ptr.inst & ~((1 << ARRAY_SIZE(device->ptr)) - 1)); \ for (j = 0; device->chip->ptr.inst && j < ARRAY_SIZE(device->ptr); j++) { \ if ((device->chip->ptr.inst & BIT(j)) && (subdev_mask & BIT_ULL(type))) { \ - int inst = (device->chip->ptr.inst == 1) ? -1 : (j); \ - ret = device->chip->ptr.ctor(device, (type), inst, &device->ptr[j]); \ + ret = device->chip->ptr.ctor(device, (type), (j), &device->ptr[j]); \ subdev = nvkm_device_subdev(device, (type), (j)); \ if (ret) { \ nvkm_subdev_del(&subdev); \ diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c index b0ece71aefde..ce774579c89d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c @@ -57,7 +57,7 @@ nvkm_control_mthd_pstate_info(struct nvkm_control *ctrl, void *data, u32 size) args->v0.count = 0; args->v0.ustate_ac = NVIF_CONTROL_PSTATE_INFO_V0_USTATE_DISABLE; args->v0.ustate_dc = NVIF_CONTROL_PSTATE_INFO_V0_USTATE_DISABLE; - args->v0.pwrsrc = -ENOSYS; + args->v0.pwrsrc = -ENODEV; args->v0.pstate = NVIF_CONTROL_PSTATE_INFO_V0_PSTATE_UNKNOWN; } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild index 3209eb7af65f..5e831d347a95 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild @@ -18,6 +18,7 @@ nvkm-y += nvkm/engine/fifo/gp100.o nvkm-y += nvkm/engine/fifo/gp10b.o nvkm-y += nvkm/engine/fifo/gv100.o nvkm-y += nvkm/engine/fifo/tu102.o +nvkm-y += nvkm/engine/fifo/ga102.o nvkm-y += nvkm/engine/fifo/chan.o nvkm-y += nvkm/engine/fifo/channv50.o diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c index 353b77d9b3dc..3492c561f2cf 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c @@ -82,7 +82,7 @@ g84_fifo_chan_engine_fini(struct nvkm_fifo_chan *base, if (offset < 0) return 0; - engn = fifo->base.func->engine_id(&fifo->base, engine); + engn = fifo->base.func->engine_id(&fifo->base, engine) - 1; save = nvkm_mask(device, 0x002520, 0x0000003f, 1 << engn); nvkm_wr32(device, 0x0032fc, chan->base.inst->addr >> 12); done = nvkm_msec(device, 2000, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga102.c new file mode 100644 index 000000000000..c630dbd2911a --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga102.c @@ -0,0 +1,311 @@ +/* + * Copyright 2021 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#define ga102_fifo(p) container_of((p), struct ga102_fifo, base.engine) +#define ga102_chan(p) container_of((p), struct ga102_chan, object) +#include <engine/fifo.h> +#include "user.h" + +#include <core/memory.h> +#include <subdev/mmu.h> +#include <subdev/timer.h> +#include <subdev/top.h> + +#include <nvif/cl0080.h> +#include <nvif/clc36f.h> +#include <nvif/class.h> + +struct ga102_fifo { + struct nvkm_fifo base; +}; + +struct ga102_chan { + struct nvkm_object object; + + struct { + u32 runl; + u32 chan; + } ctrl; + + struct nvkm_memory *mthd; + struct nvkm_memory *inst; + struct nvkm_memory *user; + struct nvkm_memory *runl; + + struct nvkm_vmm *vmm; +}; + +static int +ga102_chan_sclass(struct nvkm_object *object, int index, struct nvkm_oclass *oclass) +{ + if (index == 0) { + oclass->ctor = nvkm_object_new; + oclass->base = (struct nvkm_sclass) { -1, -1, AMPERE_DMA_COPY_B }; + return 0; + } + + return -EINVAL; +} + +static int +ga102_chan_map(struct nvkm_object *object, void *argv, u32 argc, + enum nvkm_object_map *type, u64 *addr, u64 *size) +{ + struct ga102_chan *chan = ga102_chan(object); + struct nvkm_device *device = chan->object.engine->subdev.device; + u64 bar2 = nvkm_memory_bar2(chan->user); + + if (bar2 == ~0ULL) + return -EFAULT; + + *type = NVKM_OBJECT_MAP_IO; + *addr = device->func->resource_addr(device, 3) + bar2; + *size = 0x1000; + return 0; +} + +static int +ga102_chan_fini(struct nvkm_object *object, bool suspend) +{ + struct ga102_chan *chan = ga102_chan(object); + struct nvkm_device *device = chan->object.engine->subdev.device; + + nvkm_wr32(device, chan->ctrl.chan, 0x00000003); + + nvkm_wr32(device, chan->ctrl.runl + 0x098, 0x01000000); + nvkm_msec(device, 2000, + if (!(nvkm_rd32(device, chan->ctrl.runl + 0x098) & 0x00100000)) + break; + ); + + nvkm_wr32(device, chan->ctrl.runl + 0x088, 0); + + nvkm_wr32(device, chan->ctrl.chan, 0xffffffff); + return 0; +} + +static int +ga102_chan_init(struct nvkm_object *object) +{ + struct ga102_chan *chan = ga102_chan(object); + struct nvkm_device *device = chan->object.engine->subdev.device; + + nvkm_mask(device, chan->ctrl.runl + 0x300, 0x80000000, 0x80000000); + + nvkm_wr32(device, chan->ctrl.runl + 0x080, lower_32_bits(nvkm_memory_addr(chan->runl))); + nvkm_wr32(device, chan->ctrl.runl + 0x084, upper_32_bits(nvkm_memory_addr(chan->runl))); + nvkm_wr32(device, chan->ctrl.runl + 0x088, 2); + + nvkm_wr32(device, chan->ctrl.chan, 0x00000002); + nvkm_wr32(device, chan->ctrl.runl + 0x0090, 0); + return 0; +} + +static void * +ga102_chan_dtor(struct nvkm_object *object) +{ + struct ga102_chan *chan = ga102_chan(object); + + if (chan->vmm) { + nvkm_vmm_part(chan->vmm, chan->inst); + nvkm_vmm_unref(&chan->vmm); + } + + nvkm_memory_unref(&chan->runl); + nvkm_memory_unref(&chan->user); + nvkm_memory_unref(&chan->inst); + nvkm_memory_unref(&chan->mthd); + return chan; +} + +static const struct nvkm_object_func +ga102_chan = { + .dtor = ga102_chan_dtor, + .init = ga102_chan_init, + .fini = ga102_chan_fini, + .map = ga102_chan_map, + .sclass = ga102_chan_sclass, +}; + +static int +ga102_chan_new(struct nvkm_device *device, + const struct nvkm_oclass *oclass, void *argv, u32 argc, struct nvkm_object **pobject) +{ + struct volta_channel_gpfifo_a_v0 *args = argv; + struct nvkm_top_device *tdev; + struct nvkm_vmm *vmm; + struct ga102_chan *chan; + int ret; + + if (argc != sizeof(*args)) + return -ENOSYS; + + vmm = nvkm_uvmm_search(oclass->client, args->vmm); + if (IS_ERR(vmm)) + return PTR_ERR(vmm); + + if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL))) + return -ENOMEM; + + nvkm_object_ctor(&ga102_chan, oclass, &chan->object); + *pobject = &chan->object; + + list_for_each_entry(tdev, &device->top->device, head) { + if (tdev->type == NVKM_ENGINE_CE) { + chan->ctrl.runl = tdev->runlist; + break; + } + } + + if (!chan->ctrl.runl) + return -ENODEV; + + chan->ctrl.chan = nvkm_rd32(device, chan->ctrl.runl + 0x004) & 0xfffffff0; + + args->chid = 0; + args->inst = 0; + args->token = nvkm_rd32(device, chan->ctrl.runl + 0x008) & 0xffff0000; + + ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000, true, &chan->mthd); + if (ret) + return ret; + + ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000, true, &chan->inst); + if (ret) + return ret; + + nvkm_kmap(chan->inst); + nvkm_wo32(chan->inst, 0x010, 0x0000face); + nvkm_wo32(chan->inst, 0x030, 0x7ffff902); + nvkm_wo32(chan->inst, 0x048, lower_32_bits(args->ioffset)); + nvkm_wo32(chan->inst, 0x04c, upper_32_bits(args->ioffset) | + (order_base_2(args->ilength / 8) << 16)); + nvkm_wo32(chan->inst, 0x084, 0x20400000); + nvkm_wo32(chan->inst, 0x094, 0x30000001); + nvkm_wo32(chan->inst, 0x0ac, 0x00020000); + nvkm_wo32(chan->inst, 0x0e4, 0x00000000); + nvkm_wo32(chan->inst, 0x0e8, 0); + nvkm_wo32(chan->inst, 0x0f4, 0x00001000); + nvkm_wo32(chan->inst, 0x0f8, 0x10003080); + nvkm_mo32(chan->inst, 0x218, 0x00000000, 0x00000000); + nvkm_wo32(chan->inst, 0x220, lower_32_bits(nvkm_memory_bar2(chan->mthd))); + nvkm_wo32(chan->inst, 0x224, upper_32_bits(nvkm_memory_bar2(chan->mthd))); + nvkm_done(chan->inst); + + ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000, true, &chan->user); + if (ret) + return ret; + + ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000, true, &chan->runl); + if (ret) + return ret; + + nvkm_kmap(chan->runl); + nvkm_wo32(chan->runl, 0x00, 0x80030001); + nvkm_wo32(chan->runl, 0x04, 1); + nvkm_wo32(chan->runl, 0x08, 0); + nvkm_wo32(chan->runl, 0x0c, 0x00000000); + nvkm_wo32(chan->runl, 0x10, lower_32_bits(nvkm_memory_addr(chan->user))); + nvkm_wo32(chan->runl, 0x14, upper_32_bits(nvkm_memory_addr(chan->user))); + nvkm_wo32(chan->runl, 0x18, lower_32_bits(nvkm_memory_addr(chan->inst))); + nvkm_wo32(chan->runl, 0x1c, upper_32_bits(nvkm_memory_addr(chan->inst))); + nvkm_done(chan->runl); + + ret = nvkm_vmm_join(vmm, chan->inst); + if (ret) + return ret; + + chan->vmm = nvkm_vmm_ref(vmm); + return 0; +} + +static const struct nvkm_device_oclass +ga102_chan_oclass = { + .ctor = ga102_chan_new, +}; + +static int +ga102_user_new(struct nvkm_device *device, + const struct nvkm_oclass *oclass, void *argv, u32 argc, struct nvkm_object **pobject) +{ + return tu102_fifo_user_new(oclass, argv, argc, pobject); +} + +static const struct nvkm_device_oclass +ga102_user_oclass = { + .ctor = ga102_user_new, +}; + +static int +ga102_fifo_sclass(struct nvkm_oclass *oclass, int index, const struct nvkm_device_oclass **class) +{ + if (index == 0) { + oclass->base = (struct nvkm_sclass) { -1, -1, VOLTA_USERMODE_A }; + *class = &ga102_user_oclass; + return 0; + } else + if (index == 1) { + oclass->base = (struct nvkm_sclass) { 0, 0, AMPERE_CHANNEL_GPFIFO_B }; + *class = &ga102_chan_oclass; + return 0; + } + + return 2; +} + +static int +ga102_fifo_info(struct nvkm_engine *engine, u64 mthd, u64 *data) +{ + switch (mthd) { + case NV_DEVICE_HOST_CHANNELS: *data = 1; return 0; + default: + break; + } + + return -ENOSYS; +} + +static void * +ga102_fifo_dtor(struct nvkm_engine *engine) +{ + return ga102_fifo(engine); +} + +static const struct nvkm_engine_func +ga102_fifo = { + .dtor = ga102_fifo_dtor, + .info = ga102_fifo_info, + .base.sclass = ga102_fifo_sclass, +}; + +int +ga102_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_fifo **pfifo) +{ + struct ga102_fifo *fifo; + + if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL))) + return -ENOMEM; + + nvkm_engine_ctor(&ga102_fifo, device, type, inst, true, &fifo->base.engine); + *pfifo = &fifo->base; + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/top/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/top/ga100.c index 31933f3e5a07..c982d834c8d9 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/top/ga100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/top/ga100.c @@ -54,7 +54,7 @@ ga100_top_oneinit(struct nvkm_top *top) info->reset = (data & 0x0000001f); break; case 2: - info->runlist = (data & 0x0000fc00) >> 10; + info->runlist = (data & 0x00fffc00); info->engine = (data & 0x00000003); break; default: @@ -85,9 +85,10 @@ ga100_top_oneinit(struct nvkm_top *top) } nvkm_debug(subdev, "%02x.%d (%8s): addr %06x fault %2d " - "runlist %2d engine %2d reset %2d\n", type, inst, + "runlist %6x engine %2d reset %2d\n", type, inst, info->type == NVKM_SUBDEV_NR ? "????????" : nvkm_subdev_type[info->type], - info->addr, info->fault, info->runlist, info->engine, info->reset); + info->addr, info->fault, info->runlist < 0 ? 0 : info->runlist, + info->engine, info->reset); info = NULL; } diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c index f4cde3a169d8..809f86cfc540 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c +++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c @@ -11,6 +11,8 @@ #include "omap_drv.h" +MODULE_IMPORT_NS(DMA_BUF); + /* ----------------------------------------------------------------------------- * DMABUF Export */ diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig index c64670707abf..7770b1802291 100644 --- a/drivers/gpu/drm/panel/Kconfig +++ b/drivers/gpu/drm/panel/Kconfig @@ -338,6 +338,7 @@ config DRM_PANEL_OLIMEX_LCD_OLINUXINO depends on OF depends on I2C depends on BACKLIGHT_CLASS_DEVICE + select CRC32 help The panel is used with different sizes LCDs, from 480x272 to 1280x800, and 24 bit per pixel. diff --git a/drivers/gpu/drm/panel/panel-abt-y030xx067a.c b/drivers/gpu/drm/panel/panel-abt-y030xx067a.c index 1b5d8f755b12..f043b484055b 100644 --- a/drivers/gpu/drm/panel/panel-abt-y030xx067a.c +++ b/drivers/gpu/drm/panel/panel-abt-y030xx067a.c @@ -146,8 +146,8 @@ static const struct reg_sequence y030xx067a_init_sequence[] = { { 0x09, REG09_SUB_BRIGHT_R(0x20) }, { 0x0a, REG0A_SUB_BRIGHT_B(0x20) }, { 0x0b, REG0B_HD_FREERUN | REG0B_VD_FREERUN }, - { 0x0c, REG0C_CONTRAST_R(0x10) }, - { 0x0d, REG0D_CONTRAST_G(0x10) }, + { 0x0c, REG0C_CONTRAST_R(0x00) }, + { 0x0d, REG0D_CONTRAST_G(0x00) }, { 0x0e, REG0E_CONTRAST_B(0x10) }, { 0x0f, 0 }, { 0x10, REG10_BRIGHT(0x7f) }, diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c index 754a0c775801..ba30d11547ad 100644 --- a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c +++ b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c @@ -800,14 +800,14 @@ static const struct drm_display_mode k101_im2byl02_default_mode = { .clock = 69700, .hdisplay = 800, - .hsync_start = 800 + 6, - .hsync_end = 800 + 6 + 15, - .htotal = 800 + 6 + 15 + 16, + .hsync_start = 800 + 52, + .hsync_end = 800 + 52 + 8, + .htotal = 800 + 52 + 8 + 48, .vdisplay = 1280, - .vsync_start = 1280 + 8, - .vsync_end = 1280 + 8 + 48, - .vtotal = 1280 + 8 + 48 + 52, + .vsync_start = 1280 + 16, + .vsync_end = 1280 + 16 + 6, + .vtotal = 1280 + 16 + 6 + 15, .width_mm = 135, .height_mm = 217, diff --git a/drivers/gpu/drm/r128/ati_pcigart.c b/drivers/gpu/drm/r128/ati_pcigart.c index 26001c2de9e9..dde0501aea68 100644 --- a/drivers/gpu/drm/r128/ati_pcigart.c +++ b/drivers/gpu/drm/r128/ati_pcigart.c @@ -215,7 +215,7 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga } ret = 0; -#if defined(__i386__) || defined(__x86_64__) +#ifdef CONFIG_X86 wbinvd(); #else mb(); diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c index ec867fa880a4..751c2c075e09 100644 --- a/drivers/gpu/drm/radeon/radeon_dp_mst.c +++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c @@ -423,7 +423,7 @@ radeon_mst_encoder_dpms(struct drm_encoder *encoder, int mode) drm_dp_mst_allocate_vcpi(&radeon_connector->mst_port->mst_mgr, radeon_connector->port, mst_enc->pbn, slots); - drm_dp_update_payload_part1(&radeon_connector->mst_port->mst_mgr); + drm_dp_update_payload_part1(&radeon_connector->mst_port->mst_mgr, 1); radeon_dp_mst_set_be_cntl(primary, mst_enc, radeon_connector->mst_port->hpd.hpd, true); @@ -452,7 +452,7 @@ radeon_mst_encoder_dpms(struct drm_encoder *encoder, int mode) return; drm_dp_mst_reset_vcpi_slots(&radeon_connector->mst_port->mst_mgr, mst_enc->port); - drm_dp_update_payload_part1(&radeon_connector->mst_port->mst_mgr); + drm_dp_update_payload_part1(&radeon_connector->mst_port->mst_mgr, 1); drm_dp_check_act_status(&radeon_connector->mst_port->mst_mgr); /* and this can also fail */ diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 458f92a70887..a36a4f2c76b0 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -61,7 +61,7 @@ static vm_fault_t radeon_gem_fault(struct vm_fault *vmf) goto unlock_resv; ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot, - TTM_BO_VM_NUM_PREFAULT, 1); + TTM_BO_VM_NUM_PREFAULT); if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) goto unlock_mclk; diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c index b28ea5d6a3e2..3e8d9e2d1b67 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c @@ -1176,26 +1176,24 @@ static bool vop_crtc_mode_fixup(struct drm_crtc *crtc, * * Action plan: * - * 1. When DRM gives us a mode, we should add 999 Hz to it. That way - * if the clock we need is 60000001 Hz (~60 MHz) and DRM tells us to - * make 60000 kHz then the clock framework will actually give us - * the right clock. + * 1. Try to set the exact rate first, and confirm the clock framework + * can provide it. * - * NOTE: if the PLL (maybe through a divider) could actually make - * a clock rate 999 Hz higher instead of the one we want then this - * could be a problem. Unfortunately there's not much we can do - * since it's baked into DRM to use kHz. It shouldn't matter in - * practice since Rockchip PLLs are controlled by tables and - * even if there is a divider in the middle I wouldn't expect PLL - * rates in the table that are just a few kHz different. + * 2. If the clock framework cannot provide the exact rate, we should + * add 999 Hz to the requested rate. That way if the clock we need + * is 60000001 Hz (~60 MHz) and DRM tells us to make 60000 kHz then + * the clock framework will actually give us the right clock. * - * 2. Get the clock framework to round the rate for us to tell us + * 3. Get the clock framework to round the rate for us to tell us * what it will actually make. * - * 3. Store the rounded up rate so that we don't need to worry about + * 4. Store the rounded up rate so that we don't need to worry about * this in the actual clk_set_rate(). */ - rate = clk_round_rate(vop->dclk, adjusted_mode->clock * 1000 + 999); + rate = clk_round_rate(vop->dclk, adjusted_mode->clock * 1000); + if (rate / 1000 != adjusted_mode->clock) + rate = clk_round_rate(vop->dclk, + adjusted_mode->clock * 1000 + 999); adjusted_mode->clock = DIV_ROUND_UP(rate, 1000); return true; diff --git a/drivers/gpu/drm/selftests/test-drm_damage_helper.c b/drivers/gpu/drm/selftests/test-drm_damage_helper.c index 1c19a5d3eefb..8d8d8e214c28 100644 --- a/drivers/gpu/drm/selftests/test-drm_damage_helper.c +++ b/drivers/gpu/drm/selftests/test-drm_damage_helper.c @@ -30,6 +30,7 @@ static void mock_setup(struct drm_plane_state *state) mock_device.driver = &mock_driver; mock_device.mode_config.prop_fb_damage_clips = &mock_prop; mock_plane.dev = &mock_device; + mock_obj_props.count = 0; mock_plane.base.properties = &mock_obj_props; mock_prop.base.id = 1; /* 0 is an invalid id */ mock_prop.dev = &mock_device; diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c index 21d473deb757..a8d75fd7e9f4 100644 --- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c +++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c @@ -213,11 +213,13 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master, goto err_disable_clk_tmds; } + ret = sun8i_hdmi_phy_init(hdmi->phy); + if (ret) + goto err_disable_clk_tmds; + drm_encoder_helper_add(encoder, &sun8i_dw_hdmi_encoder_helper_funcs); drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS); - sun8i_hdmi_phy_init(hdmi->phy); - plat_data->mode_valid = hdmi->quirks->mode_valid; plat_data->use_drm_infoframe = hdmi->quirks->use_drm_infoframe; sun8i_hdmi_phy_set_ops(hdmi->phy, plat_data); @@ -259,6 +261,7 @@ static void sun8i_dw_hdmi_unbind(struct device *dev, struct device *master, struct sun8i_dw_hdmi *hdmi = dev_get_drvdata(dev); dw_hdmi_unbind(hdmi->hdmi); + sun8i_hdmi_phy_deinit(hdmi->phy); clk_disable_unprepare(hdmi->clk_tmds); reset_control_assert(hdmi->rst_ctrl); gpiod_set_value(hdmi->ddc_en, 0); diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h index 74f6ed0e2570..bffe1b9cd3dc 100644 --- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h +++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h @@ -169,6 +169,7 @@ struct sun8i_hdmi_phy { struct clk *clk_phy; struct clk *clk_pll0; struct clk *clk_pll1; + struct device *dev; unsigned int rcal; struct regmap *regs; struct reset_control *rst_phy; @@ -205,7 +206,8 @@ encoder_to_sun8i_dw_hdmi(struct drm_encoder *encoder) int sun8i_hdmi_phy_get(struct sun8i_dw_hdmi *hdmi, struct device_node *node); -void sun8i_hdmi_phy_init(struct sun8i_hdmi_phy *phy); +int sun8i_hdmi_phy_init(struct sun8i_hdmi_phy *phy); +void sun8i_hdmi_phy_deinit(struct sun8i_hdmi_phy *phy); void sun8i_hdmi_phy_set_ops(struct sun8i_hdmi_phy *phy, struct dw_hdmi_plat_data *plat_data); diff --git a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c index c9239708d398..b64d93da651d 100644 --- a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c +++ b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c @@ -506,9 +506,60 @@ static void sun8i_hdmi_phy_init_h3(struct sun8i_hdmi_phy *phy) phy->rcal = (val & SUN8I_HDMI_PHY_ANA_STS_RCAL_MASK) >> 2; } -void sun8i_hdmi_phy_init(struct sun8i_hdmi_phy *phy) +int sun8i_hdmi_phy_init(struct sun8i_hdmi_phy *phy) { + int ret; + + ret = reset_control_deassert(phy->rst_phy); + if (ret) { + dev_err(phy->dev, "Cannot deassert phy reset control: %d\n", ret); + return ret; + } + + ret = clk_prepare_enable(phy->clk_bus); + if (ret) { + dev_err(phy->dev, "Cannot enable bus clock: %d\n", ret); + goto err_assert_rst_phy; + } + + ret = clk_prepare_enable(phy->clk_mod); + if (ret) { + dev_err(phy->dev, "Cannot enable mod clock: %d\n", ret); + goto err_disable_clk_bus; + } + + if (phy->variant->has_phy_clk) { + ret = sun8i_phy_clk_create(phy, phy->dev, + phy->variant->has_second_pll); + if (ret) { + dev_err(phy->dev, "Couldn't create the PHY clock\n"); + goto err_disable_clk_mod; + } + + clk_prepare_enable(phy->clk_phy); + } + phy->variant->phy_init(phy); + + return 0; + +err_disable_clk_mod: + clk_disable_unprepare(phy->clk_mod); +err_disable_clk_bus: + clk_disable_unprepare(phy->clk_bus); +err_assert_rst_phy: + reset_control_assert(phy->rst_phy); + + return ret; +} + +void sun8i_hdmi_phy_deinit(struct sun8i_hdmi_phy *phy) +{ + clk_disable_unprepare(phy->clk_mod); + clk_disable_unprepare(phy->clk_bus); + clk_disable_unprepare(phy->clk_phy); + + reset_control_assert(phy->rst_phy); } void sun8i_hdmi_phy_set_ops(struct sun8i_hdmi_phy *phy, @@ -638,6 +689,7 @@ static int sun8i_hdmi_phy_probe(struct platform_device *pdev) return -ENOMEM; phy->variant = (struct sun8i_hdmi_phy_variant *)match->data; + phy->dev = dev; ret = of_address_to_resource(node, 0, &res); if (ret) { @@ -696,47 +748,10 @@ static int sun8i_hdmi_phy_probe(struct platform_device *pdev) goto err_put_clk_pll1; } - ret = reset_control_deassert(phy->rst_phy); - if (ret) { - dev_err(dev, "Cannot deassert phy reset control: %d\n", ret); - goto err_put_rst_phy; - } - - ret = clk_prepare_enable(phy->clk_bus); - if (ret) { - dev_err(dev, "Cannot enable bus clock: %d\n", ret); - goto err_deassert_rst_phy; - } - - ret = clk_prepare_enable(phy->clk_mod); - if (ret) { - dev_err(dev, "Cannot enable mod clock: %d\n", ret); - goto err_disable_clk_bus; - } - - if (phy->variant->has_phy_clk) { - ret = sun8i_phy_clk_create(phy, dev, - phy->variant->has_second_pll); - if (ret) { - dev_err(dev, "Couldn't create the PHY clock\n"); - goto err_disable_clk_mod; - } - - clk_prepare_enable(phy->clk_phy); - } - platform_set_drvdata(pdev, phy); return 0; -err_disable_clk_mod: - clk_disable_unprepare(phy->clk_mod); -err_disable_clk_bus: - clk_disable_unprepare(phy->clk_bus); -err_deassert_rst_phy: - reset_control_assert(phy->rst_phy); -err_put_rst_phy: - reset_control_put(phy->rst_phy); err_put_clk_pll1: clk_put(phy->clk_pll1); err_put_clk_pll0: @@ -753,12 +768,6 @@ static int sun8i_hdmi_phy_remove(struct platform_device *pdev) { struct sun8i_hdmi_phy *phy = platform_get_drvdata(pdev); - clk_disable_unprepare(phy->clk_mod); - clk_disable_unprepare(phy->clk_bus); - clk_disable_unprepare(phy->clk_phy); - - reset_control_assert(phy->rst_phy); - reset_control_put(phy->rst_phy); clk_put(phy->clk_pll0); diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c index 16c7aabb94d3..a29d64f87563 100644 --- a/drivers/gpu/drm/tegra/dc.c +++ b/drivers/gpu/drm/tegra/dc.c @@ -1845,7 +1845,6 @@ tegra_crtc_update_memory_bandwidth(struct drm_crtc *crtc, bool prepare_bandwidth_transition) { const struct tegra_plane_state *old_tegra_state, *new_tegra_state; - const struct tegra_dc_state *old_dc_state, *new_dc_state; u32 i, new_avg_bw, old_avg_bw, new_peak_bw, old_peak_bw; const struct drm_plane_state *old_plane_state; const struct drm_crtc_state *old_crtc_state; @@ -1858,8 +1857,6 @@ tegra_crtc_update_memory_bandwidth(struct drm_crtc *crtc, return; old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc); - old_dc_state = to_const_dc_state(old_crtc_state); - new_dc_state = to_const_dc_state(crtc->state); if (!crtc->state->active) { if (!old_crtc_state->active) diff --git a/drivers/gpu/drm/tegra/dc.h b/drivers/gpu/drm/tegra/dc.h index f0cb691852a1..40378308d527 100644 --- a/drivers/gpu/drm/tegra/dc.h +++ b/drivers/gpu/drm/tegra/dc.h @@ -35,12 +35,6 @@ static inline struct tegra_dc_state *to_dc_state(struct drm_crtc_state *state) return NULL; } -static inline const struct tegra_dc_state * -to_const_dc_state(const struct drm_crtc_state *state) -{ - return to_dc_state((struct drm_crtc_state *)state); -} - struct tegra_dc_stats { unsigned long frames; unsigned long vblank; diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c index 6ec598f5d5b3..d38fd7e12b57 100644 --- a/drivers/gpu/drm/tegra/gem.c +++ b/drivers/gpu/drm/tegra/gem.c @@ -12,6 +12,7 @@ #include <linux/dma-buf.h> #include <linux/iommu.h> +#include <linux/module.h> #include <drm/drm_drv.h> #include <drm/drm_prime.h> @@ -20,6 +21,8 @@ #include "drm.h" #include "gem.h" +MODULE_IMPORT_NS(DMA_BUF); + static void tegra_bo_put(struct host1x_bo *bo) { struct tegra_bo *obj = host1x_to_tegra_bo(bo); diff --git a/drivers/gpu/drm/tegra/uapi.c b/drivers/gpu/drm/tegra/uapi.c index dc16a24f4dbe..690a339c52ec 100644 --- a/drivers/gpu/drm/tegra/uapi.c +++ b/drivers/gpu/drm/tegra/uapi.c @@ -222,7 +222,7 @@ int tegra_drm_ioctl_channel_map(struct drm_device *drm, void *data, struct drm_f mapping->iova = sg_dma_address(mapping->sgt->sgl); } - mapping->iova_end = mapping->iova + host1x_to_tegra_bo(mapping->bo)->size; + mapping->iova_end = mapping->iova + host1x_to_tegra_bo(mapping->bo)->gem.size; err = xa_alloc(&context->mappings, &args->mapping, mapping, XA_LIMIT(1, U32_MAX), GFP_KERNEL); diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c index 6b03f89a98d4..3ddb7c710a3d 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c @@ -186,10 +186,8 @@ static void tilcdc_fini(struct drm_device *dev) if (priv->mmio) iounmap(priv->mmio); - if (priv->wq) { - flush_workqueue(priv->wq); + if (priv->wq) destroy_workqueue(priv->wq); - } dev->dev_private = NULL; diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index f349ec8b91a1..e4a20a3a5d16 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -619,7 +619,8 @@ static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo, *busy = !ret; } - if (ret && place && !bo->bdev->funcs->eviction_valuable(bo, place)) { + if (ret && place && (bo->resource->mem_type != place->mem_type || + !bo->bdev->funcs->eviction_valuable(bo, place))) { ret = false; if (*locked) { dma_resv_unlock(bo->base.resv); diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index a342d701c91c..72a94301bc95 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -189,6 +189,7 @@ static void ttm_transfered_destroy(struct ttm_buffer_object *bo) struct ttm_transfer_obj *fbo; fbo = container_of(bo, struct ttm_transfer_obj, base); + dma_resv_fini(&fbo->base.base._resv); ttm_bo_put(fbo->bo); kfree(fbo); } diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 33680c94127c..08ba083a80d2 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -173,89 +173,6 @@ vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo, } EXPORT_SYMBOL(ttm_bo_vm_reserve); -#ifdef CONFIG_TRANSPARENT_HUGEPAGE -/** - * ttm_bo_vm_insert_huge - Insert a pfn for PUD or PMD faults - * @vmf: Fault data - * @bo: The buffer object - * @page_offset: Page offset from bo start - * @fault_page_size: The size of the fault in pages. - * @pgprot: The page protections. - * Does additional checking whether it's possible to insert a PUD or PMD - * pfn and performs the insertion. - * - * Return: VM_FAULT_NOPAGE on successful insertion, VM_FAULT_FALLBACK if - * a huge fault was not possible, or on insertion error. - */ -static vm_fault_t ttm_bo_vm_insert_huge(struct vm_fault *vmf, - struct ttm_buffer_object *bo, - pgoff_t page_offset, - pgoff_t fault_page_size, - pgprot_t pgprot) -{ - pgoff_t i; - vm_fault_t ret; - unsigned long pfn; - pfn_t pfnt; - struct ttm_tt *ttm = bo->ttm; - bool write = vmf->flags & FAULT_FLAG_WRITE; - - /* Fault should not cross bo boundary. */ - page_offset &= ~(fault_page_size - 1); - if (page_offset + fault_page_size > bo->resource->num_pages) - goto out_fallback; - - if (bo->resource->bus.is_iomem) - pfn = ttm_bo_io_mem_pfn(bo, page_offset); - else - pfn = page_to_pfn(ttm->pages[page_offset]); - - /* pfn must be fault_page_size aligned. */ - if ((pfn & (fault_page_size - 1)) != 0) - goto out_fallback; - - /* Check that memory is contiguous. */ - if (!bo->resource->bus.is_iomem) { - for (i = 1; i < fault_page_size; ++i) { - if (page_to_pfn(ttm->pages[page_offset + i]) != pfn + i) - goto out_fallback; - } - } else if (bo->bdev->funcs->io_mem_pfn) { - for (i = 1; i < fault_page_size; ++i) { - if (ttm_bo_io_mem_pfn(bo, page_offset + i) != pfn + i) - goto out_fallback; - } - } - - pfnt = __pfn_to_pfn_t(pfn, PFN_DEV); - if (fault_page_size == (HPAGE_PMD_SIZE >> PAGE_SHIFT)) - ret = vmf_insert_pfn_pmd_prot(vmf, pfnt, pgprot, write); -#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD - else if (fault_page_size == (HPAGE_PUD_SIZE >> PAGE_SHIFT)) - ret = vmf_insert_pfn_pud_prot(vmf, pfnt, pgprot, write); -#endif - else - WARN_ON_ONCE(ret = VM_FAULT_FALLBACK); - - if (ret != VM_FAULT_NOPAGE) - goto out_fallback; - - return VM_FAULT_NOPAGE; -out_fallback: - count_vm_event(THP_FAULT_FALLBACK); - return VM_FAULT_FALLBACK; -} -#else -static vm_fault_t ttm_bo_vm_insert_huge(struct vm_fault *vmf, - struct ttm_buffer_object *bo, - pgoff_t page_offset, - pgoff_t fault_page_size, - pgprot_t pgprot) -{ - return VM_FAULT_FALLBACK; -} -#endif - /** * ttm_bo_vm_fault_reserved - TTM fault helper * @vmf: The struct vm_fault given as argument to the fault callback @@ -263,7 +180,6 @@ static vm_fault_t ttm_bo_vm_insert_huge(struct vm_fault *vmf, * @num_prefault: Maximum number of prefault pages. The caller may want to * specify this based on madvice settings and the size of the GPU object * backed by the memory. - * @fault_page_size: The size of the fault in pages. * * This function inserts one or more page table entries pointing to the * memory backing the buffer object, and then returns a return code @@ -277,8 +193,7 @@ static vm_fault_t ttm_bo_vm_insert_huge(struct vm_fault *vmf, */ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, pgprot_t prot, - pgoff_t num_prefault, - pgoff_t fault_page_size) + pgoff_t num_prefault) { struct vm_area_struct *vma = vmf->vma; struct ttm_buffer_object *bo = vma->vm_private_data; @@ -329,11 +244,6 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, prot = pgprot_decrypted(prot); } - /* We don't prefault on huge faults. Yet. */ - if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && fault_page_size != 1) - return ttm_bo_vm_insert_huge(vmf, bo, page_offset, - fault_page_size, prot); - /* * Speculatively prefault a number of pages. Only error on * first page. @@ -429,7 +339,7 @@ vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf) prot = vma->vm_page_prot; if (drm_dev_enter(ddev, &idx)) { - ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT, 1); + ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT); drm_dev_exit(idx); } else { ret = ttm_bo_vm_dummy_page(vmf, prot); diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c index 3750fd216131..930574ad2bca 100644 --- a/drivers/gpu/drm/udl/udl_connector.c +++ b/drivers/gpu/drm/udl/udl_connector.c @@ -30,7 +30,7 @@ static int udl_get_edid_block(void *data, u8 *buf, unsigned int block, int bval = (i + block * EDID_LENGTH) << 8; ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 0x02, (0x80 | (0x02 << 5)), bval, - 0xA1, read_buff, 2, HZ); + 0xA1, read_buff, 2, 1000); if (ret < 1) { DRM_ERROR("Read EDID byte %d failed err %x\n", i, ret); kfree(read_buff); diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index fab9b93e1b84..053fbaf765ca 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -1620,14 +1620,6 @@ static int vc4_hdmi_audio_prepare(struct device *dev, void *data, return 0; } -static const struct snd_soc_dapm_widget vc4_hdmi_audio_widgets[] = { - SND_SOC_DAPM_OUTPUT("TX"), -}; - -static const struct snd_soc_dapm_route vc4_hdmi_audio_routes[] = { - { "TX", NULL, "Playback" }, -}; - static const struct snd_soc_component_driver vc4_hdmi_audio_cpu_dai_comp = { .name = "vc4-hdmi-cpu-dai-component", }; diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c index 749db18dcfa2..d86e1ad4a972 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.c +++ b/drivers/gpu/drm/virtio/virtgpu_drv.c @@ -163,10 +163,11 @@ static __poll_t virtio_gpu_poll(struct file *filp, struct drm_file *drm_file = filp->private_data; struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv; struct drm_device *dev = drm_file->minor->dev; + struct virtio_gpu_device *vgdev = dev->dev_private; struct drm_pending_event *e = NULL; __poll_t mask = 0; - if (!vfpriv->ring_idx_mask) + if (!vgdev->has_virgl_3d || !vfpriv || !vfpriv->ring_idx_mask) return drm_poll(filp, wait); poll_wait(filp, &drm_file->event_wait, wait); diff --git a/drivers/gpu/drm/vmwgfx/ttm_memory.c b/drivers/gpu/drm/vmwgfx/ttm_memory.c index edd17c30d5a5..7f7fe35fc21d 100644 --- a/drivers/gpu/drm/vmwgfx/ttm_memory.c +++ b/drivers/gpu/drm/vmwgfx/ttm_memory.c @@ -468,7 +468,6 @@ void ttm_mem_global_release(struct ttm_mem_global *glob) struct ttm_mem_zone *zone; unsigned int i; - flush_workqueue(glob->swap_queue); destroy_workqueue(glob->swap_queue); glob->swap_queue = NULL; for (i = 0; i < glob->num_zones; ++i) { diff --git a/drivers/gpu/drm/vmwgfx/ttm_object.c b/drivers/gpu/drm/vmwgfx/ttm_object.c index 04789b2bb2a2..899945f54dc7 100644 --- a/drivers/gpu/drm/vmwgfx/ttm_object.c +++ b/drivers/gpu/drm/vmwgfx/ttm_object.c @@ -48,8 +48,11 @@ #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/atomic.h> +#include <linux/module.h> #include "ttm_object.h" +MODULE_IMPORT_NS(DMA_BUF); + /** * struct ttm_object_file * diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index ab9a1750e1df..bfd71c86faa5 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -29,7 +29,7 @@ #include <linux/dma-mapping.h> #include <linux/module.h> #include <linux/pci.h> -#include <linux/mem_encrypt.h> +#include <linux/cc_platform.h> #include <drm/drm_aperture.h> #include <drm/drm_drv.h> @@ -666,7 +666,7 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv) [vmw_dma_map_bind] = "Giving up DMA mappings early."}; /* TTM currently doesn't fully support SEV encryption. */ - if (mem_encrypt_active()) + if (cc_platform_has(CC_ATTR_MEM_ENCRYPT)) return -EINVAL; if (vmw_force_coherent) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index a833751099b5..858aff99a3fe 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -1550,10 +1550,6 @@ void vmw_bo_dirty_unmap(struct vmw_buffer_object *vbo, pgoff_t start, pgoff_t end); vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf); vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf); -#ifdef CONFIG_TRANSPARENT_HUGEPAGE -vm_fault_t vmw_bo_vm_huge_fault(struct vm_fault *vmf, - enum page_entry_size pe_size); -#endif /* Transparent hugepage support - vmwgfx_thp.c */ #ifdef CONFIG_TRANSPARENT_HUGEPAGE diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c index e50fb82a3030..2aceac7856e2 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c @@ -28,7 +28,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> -#include <linux/mem_encrypt.h> +#include <linux/cc_platform.h> #include <asm/hypervisor.h> #include <drm/drm_ioctl.h> @@ -160,7 +160,7 @@ static unsigned long vmw_port_hb_out(struct rpc_channel *channel, unsigned long msg_len = strlen(msg); /* HB port can't access encrypted memory. */ - if (hb && !mem_encrypt_active()) { + if (hb && !cc_platform_has(CC_ATTR_MEM_ENCRYPT)) { unsigned long bp = channel->cookie_high; u32 channel_id = (channel->channel_id << 16); @@ -216,7 +216,7 @@ static unsigned long vmw_port_hb_in(struct rpc_channel *channel, char *reply, unsigned long si, di, eax, ebx, ecx, edx; /* HB port can't access encrypted memory */ - if (hb && !mem_encrypt_active()) { + if (hb && !cc_platform_has(CC_ATTR_MEM_ENCRYPT)) { unsigned long bp = channel->cookie_low; u32 channel_id = (channel->channel_id << 16); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c index e5a9a5cbd01a..922317d1acc8 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c @@ -477,7 +477,7 @@ vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf) else prot = vm_get_page_prot(vma->vm_flags); - ret = ttm_bo_vm_fault_reserved(vmf, prot, num_prefault, 1); + ret = ttm_bo_vm_fault_reserved(vmf, prot, num_prefault); if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) return ret; @@ -486,73 +486,3 @@ out_unlock: return ret; } - -#ifdef CONFIG_TRANSPARENT_HUGEPAGE -vm_fault_t vmw_bo_vm_huge_fault(struct vm_fault *vmf, - enum page_entry_size pe_size) -{ - struct vm_area_struct *vma = vmf->vma; - struct ttm_buffer_object *bo = (struct ttm_buffer_object *) - vma->vm_private_data; - struct vmw_buffer_object *vbo = - container_of(bo, struct vmw_buffer_object, base); - pgprot_t prot; - vm_fault_t ret; - pgoff_t fault_page_size; - bool write = vmf->flags & FAULT_FLAG_WRITE; - - switch (pe_size) { - case PE_SIZE_PMD: - fault_page_size = HPAGE_PMD_SIZE >> PAGE_SHIFT; - break; -#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD - case PE_SIZE_PUD: - fault_page_size = HPAGE_PUD_SIZE >> PAGE_SHIFT; - break; -#endif - default: - WARN_ON_ONCE(1); - return VM_FAULT_FALLBACK; - } - - /* Always do write dirty-tracking and COW on PTE level. */ - if (write && (READ_ONCE(vbo->dirty) || is_cow_mapping(vma->vm_flags))) - return VM_FAULT_FALLBACK; - - ret = ttm_bo_vm_reserve(bo, vmf); - if (ret) - return ret; - - if (vbo->dirty) { - pgoff_t allowed_prefault; - unsigned long page_offset; - - page_offset = vmf->pgoff - - drm_vma_node_start(&bo->base.vma_node); - if (page_offset >= bo->resource->num_pages || - vmw_resources_clean(vbo, page_offset, - page_offset + PAGE_SIZE, - &allowed_prefault)) { - ret = VM_FAULT_SIGBUS; - goto out_unlock; - } - - /* - * Write protect, so we get a new fault on write, and can - * split. - */ - prot = vm_get_page_prot(vma->vm_flags & ~VM_SHARED); - } else { - prot = vm_get_page_prot(vma->vm_flags); - } - - ret = ttm_bo_vm_fault_reserved(vmf, prot, 1, fault_page_size); - if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) - return ret; - -out_unlock: - dma_resv_unlock(bo->base.resv); - - return ret; -} -#endif diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c index e6b1f98ec99f..0a4c340252ec 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c @@ -61,9 +61,6 @@ int vmw_mmap(struct file *filp, struct vm_area_struct *vma) .fault = vmw_bo_vm_fault, .open = ttm_bo_vm_open, .close = ttm_bo_vm_close, -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - .huge_fault = vmw_bo_vm_huge_fault, -#endif }; struct drm_file *file_priv = filp->private_data; struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev); diff --git a/drivers/gpu/host1x/fence.c b/drivers/gpu/host1x/fence.c index 6941add95d0f..ecab72882192 100644 --- a/drivers/gpu/host1x/fence.c +++ b/drivers/gpu/host1x/fence.c @@ -15,7 +15,7 @@ #include "intr.h" #include "syncpt.h" -DEFINE_SPINLOCK(lock); +static DEFINE_SPINLOCK(lock); struct host1x_syncpt_fence { struct dma_fence base; @@ -152,8 +152,10 @@ struct dma_fence *host1x_fence_create(struct host1x_syncpt *sp, u32 threshold) return ERR_PTR(-ENOMEM); fence->waiter = kzalloc(sizeof(*fence->waiter), GFP_KERNEL); - if (!fence->waiter) + if (!fence->waiter) { + kfree(fence); return ERR_PTR(-ENOMEM); + } fence->sp = sp; fence->threshold = threshold; diff --git a/drivers/gpu/ipu-v3/ipu-csi.c b/drivers/gpu/ipu-v3/ipu-csi.c index 8ae301eef643..a9639d098893 100644 --- a/drivers/gpu/ipu-v3/ipu-csi.c +++ b/drivers/gpu/ipu-v3/ipu-csi.c @@ -259,10 +259,24 @@ static int mbus_code_to_bus_cfg(struct ipu_csi_bus_config *cfg, u32 mbus_code, cfg->data_width = IPU_CSI_DATA_WIDTH_8; break; case MEDIA_BUS_FMT_UYVY8_1X16: + if (mbus_type == V4L2_MBUS_BT656) { + cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_YUV422_UYVY; + cfg->data_width = IPU_CSI_DATA_WIDTH_8; + } else { + cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER; + cfg->data_width = IPU_CSI_DATA_WIDTH_16; + } + cfg->mipi_dt = MIPI_DT_YUV422; + break; case MEDIA_BUS_FMT_YUYV8_1X16: - cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER; + if (mbus_type == V4L2_MBUS_BT656) { + cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_YUV422_YUYV; + cfg->data_width = IPU_CSI_DATA_WIDTH_8; + } else { + cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER; + cfg->data_width = IPU_CSI_DATA_WIDTH_16; + } cfg->mipi_dt = MIPI_DT_YUV422; - cfg->data_width = IPU_CSI_DATA_WIDTH_16; break; case MEDIA_BUS_FMT_SBGGR8_1X8: case MEDIA_BUS_FMT_SGBRG8_1X8: @@ -332,7 +346,7 @@ static int fill_csi_bus_cfg(struct ipu_csi_bus_config *csicfg, const struct v4l2_mbus_config *mbus_cfg, const struct v4l2_mbus_framefmt *mbus_fmt) { - int ret; + int ret, is_bt1120; memset(csicfg, 0, sizeof(*csicfg)); @@ -353,11 +367,18 @@ static int fill_csi_bus_cfg(struct ipu_csi_bus_config *csicfg, break; case V4L2_MBUS_BT656: csicfg->ext_vsync = 0; + /* UYVY10_1X20 etc. should be supported as well */ + is_bt1120 = mbus_fmt->code == MEDIA_BUS_FMT_UYVY8_1X16 || + mbus_fmt->code == MEDIA_BUS_FMT_YUYV8_1X16; if (V4L2_FIELD_HAS_BOTH(mbus_fmt->field) || mbus_fmt->field == V4L2_FIELD_ALTERNATE) - csicfg->clk_mode = IPU_CSI_CLK_MODE_CCIR656_INTERLACED; + csicfg->clk_mode = is_bt1120 ? + IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_SDR : + IPU_CSI_CLK_MODE_CCIR656_INTERLACED; else - csicfg->clk_mode = IPU_CSI_CLK_MODE_CCIR656_PROGRESSIVE; + csicfg->clk_mode = is_bt1120 ? + IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_SDR : + IPU_CSI_CLK_MODE_CCIR656_PROGRESSIVE; break; case V4L2_MBUS_CSI2_DPHY: /* |