diff options
166 files changed, 9037 insertions, 7470 deletions
diff --git a/Documentation/gpu/amdgpu/apu-asic-info-table.csv b/Documentation/gpu/amdgpu/apu-asic-info-table.csv index 98c6988e424e..395a7b7bfaef 100644 --- a/Documentation/gpu/amdgpu/apu-asic-info-table.csv +++ b/Documentation/gpu/amdgpu/apu-asic-info-table.csv @@ -1,8 +1,10 @@ -Product Name, Code Reference, DCN/DCE version, GC version, VCE/UVD/VCN version, SDMA version -Radeon R* Graphics, CARRIZO/STONEY, DCE 11, 8, VCE 3 / UVD 6, 3 -Ryzen 3000 series / AMD Ryzen Embedded V1*/R1* with Radeon Vega Gfx, RAVEN/PICASSO, DCN 1.0, 9.1.0, VCN 1.0, 4.1.0 -Ryzen 4000 series, RENOIR, DCN 2.1, 9.3, VCN 2.2, 4.1.2 -Ryzen 3000 series / AMD Ryzen Embedded V1*/R1* with Radeon Vega Gfx, RAVEN2, DCN 1.0, 9.2.2, VCN 1.0.1, 4.1.1 -SteamDeck, VANGOGH, DCN 3.0.1, 10.3.1, VCN 3.1.0, 5.2.1 -Ryzen 5000 series, GREEN SARDINE, DCN 2.1, 9.3, VCN 2.2, 4.1.1 -Ryzen 6000 Zen, YELLOW CARP, 3.1.2, 10.3.3, VCN 3.1.1, 5.2.3 +Product Name, Code Reference, DCN/DCE version, GC version, VCE/UVD/VCN version, SDMA version, MP0 version +Radeon R* Graphics, CARRIZO/STONEY, DCE 11, 8, VCE 3 / UVD 6, 3, n/a +Ryzen 3000 series / AMD Ryzen Embedded V1*/R1* with Radeon Vega Gfx, RAVEN/PICASSO, DCN 1.0, 9.1.0, VCN 1.0, 4.1.0, 10.0.0 +Ryzen 4000 series, RENOIR, DCN 2.1, 9.3, VCN 2.2, 4.1.2, 11.0.3 +Ryzen 3000 series / AMD Ryzen Embedded V1*/R1* with Radeon Vega Gfx, RAVEN2, DCN 1.0, 9.2.2, VCN 1.0.1, 4.1.1, 10.0.1 +SteamDeck, VANGOGH, DCN 3.0.1, 10.3.1, VCN 3.1.0, 5.2.1, 11.5.0 +Ryzen 5000 series / Ryzen 7x30 series, GREEN SARDINE / Cezanne / Barcelo / Barcelo-R, DCN 2.1, 9.3, VCN 2.2, 4.1.1, 12.0.1 +Ryzen 6000 series / Ryzen 7x35 series, YELLOW CARP / Rembrandt / Rembrandt+, 3.1.2, 10.3.3, VCN 3.1.1, 5.2.3, 13.0.3 +Ryzen 7000 series (AM5), Raphael, 3.1.5, 10.3.6, 3.1.2, 5.2.6, 13.0.5 +Ryzen 7x20 series, Mendocino, 3.1.6, 10.3.7, 3.1.1, 5.2.7, 13.0.8 diff --git a/Documentation/gpu/amdgpu/dgpu-asic-info-table.csv b/Documentation/gpu/amdgpu/dgpu-asic-info-table.csv index 84617aa35dab..882d2518f8ed 100644 --- a/Documentation/gpu/amdgpu/dgpu-asic-info-table.csv +++ b/Documentation/gpu/amdgpu/dgpu-asic-info-table.csv @@ -22,3 +22,5 @@ AMD Radeon RX 6800(XT) /6900(XT) /W6800, SIENNA_CICHLID, DCN 3.0.0, 10.3.0, VCN AMD Radeon RX 6700 XT / 6800M / 6700M, NAVY_FLOUNDER, DCN 3.0.0, 10.3.2, VCN 3.0.0, 5.2.2 AMD Radeon RX 6600(XT) /6600M /W6600 /W6600M, DIMGREY_CAVEFISH, DCN 3.0.2, 10.3.4, VCN 3.0.16, 5.2.4 AMD Radeon RX 6500M /6300M /W6500M /W6300M, BEIGE_GOBY, DCN 3.0.3, 10.3.5, VCN 3.0.33, 5.2.5 +AMD Radeon RX 7900 XT /XTX, , DCN 3.2.0, 11.0.0, VCN 4.0.0, 6.0.0 +AMD Radeon RX 7600M (XT) /7700S /7600S, , DCN 3.2.1, 11.0.2, VCN 4.0.4, 6.0.2 diff --git a/Documentation/gpu/amdgpu/driver-misc.rst b/Documentation/gpu/amdgpu/driver-misc.rst index 1800543d45f7..be131e963d87 100644 --- a/Documentation/gpu/amdgpu/driver-misc.rst +++ b/Documentation/gpu/amdgpu/driver-misc.rst @@ -37,7 +37,7 @@ Accelerated Processing Units (APU) Info .. csv-table:: :header-rows: 1 - :widths: 3, 2, 2, 1, 1, 1 + :widths: 3, 2, 2, 1, 1, 1, 1 :file: ./apu-asic-info-table.csv Discrete GPU Info diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 332cf8bda7a2..5df603192cdc 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -137,6 +137,7 @@ amdgpu-y += \ gfx_v10_0.o \ imu_v11_0.o \ gfx_v11_0.o \ + gfx_v11_0_3.o \ imu_v11_0_3.o # add async DMA block diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 76a2b4a4de10..1257745fb202 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2076,6 +2076,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) struct drm_device *dev = adev_to_drm(adev); struct pci_dev *parent; int i, r; + bool total; amdgpu_device_enable_virtual_display(adev); @@ -2159,6 +2160,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID) adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK; + total = true; for (i = 0; i < adev->num_ip_blocks; i++) { if ((amdgpu_ip_block_mask & (1 << i)) == 0) { DRM_ERROR("disabled ip block: %d <%s>\n", @@ -2172,7 +2174,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) } else if (r) { DRM_ERROR("early_init of IP block <%s> failed %d\n", adev->ip_blocks[i].version->funcs->name, r); - return r; + total = false; } else { adev->ip_blocks[i].status.valid = true; } @@ -2203,6 +2205,8 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) } } + if (!total) + return -ENODEV; adev->cg_flags &= amdgpu_cg_mask; adev->pg_flags &= amdgpu_pg_mask; @@ -5854,8 +5858,8 @@ void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev, int amdgpu_in_reset(struct amdgpu_device *adev) { return atomic_read(&adev->reset_domain->in_gpu_reset); - } - +} + /** * amdgpu_device_halt() - bring hardware to some kind of halt state * diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index e3fed38a0d90..7bb12a76631f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -106,9 +106,10 @@ * - 3.49.0 - Add gang submit into CS IOCTL * - 3.50.0 - Update AMDGPU_INFO_DEV_INFO IOCTL for minimum engine and memory clock * Update AMDGPU_INFO_SENSOR IOCTL for PEAK_PSTATE engine and memory clock + * 3.51.0 - Return the PCIe gen and lanes from the INFO ioctl */ #define KMS_DRIVER_MAJOR 3 -#define KMS_DRIVER_MINOR 50 +#define KMS_DRIVER_MINOR 51 #define KMS_DRIVER_PATCHLEVEL 0 unsigned int amdgpu_vram_limit = UINT_MAX; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 42a939cd2eac..35ed46b9249c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -156,6 +156,9 @@ static bool amdgpu_gfx_is_compute_multipipe_capable(struct amdgpu_device *adev) return amdgpu_compute_multipipe == 1; } + if (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0)) + return true; + /* FIXME: spreading the queues across pipes causes perf regressions * on POLARIS11 compute workloads */ if (adev->asic_type == CHIP_POLARIS11) @@ -696,6 +699,50 @@ late_fini: return r; } +int amdgpu_gfx_ras_sw_init(struct amdgpu_device *adev) +{ + int err = 0; + struct amdgpu_gfx_ras *ras = NULL; + + /* adev->gfx.ras is NULL, which means gfx does not + * support ras function, then do nothing here. + */ + if (!adev->gfx.ras) + return 0; + + ras = adev->gfx.ras; + + err = amdgpu_ras_register_ras_block(adev, &ras->ras_block); + if (err) { + dev_err(adev->dev, "Failed to register gfx ras block!\n"); + return err; + } + + strcpy(ras->ras_block.ras_comm.name, "gfx"); + ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__GFX; + ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; + adev->gfx.ras_if = &ras->ras_block.ras_comm; + + /* If not define special ras_late_init function, use gfx default ras_late_init */ + if (!ras->ras_block.ras_late_init) + ras->ras_block.ras_late_init = amdgpu_ras_block_late_init; + + /* If not defined special ras_cb function, use default ras_cb */ + if (!ras->ras_block.ras_cb) + ras->ras_block.ras_cb = amdgpu_gfx_process_ras_data_cb; + + return 0; +} + +int amdgpu_gfx_poison_consumption_handler(struct amdgpu_device *adev, + struct amdgpu_iv_entry *entry) +{ + if (adev->gfx.ras && adev->gfx.ras->poison_consumption_handler) + return adev->gfx.ras->poison_consumption_handler(adev, entry); + + return 0; +} + int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev, void *err_data, struct amdgpu_iv_entry *entry) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index b3df4787877e..86ec9d0d12c8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -210,6 +210,11 @@ struct amdgpu_gfx_ras { struct amdgpu_ras_block_object ras_block; void (*enable_watchdog_timer)(struct amdgpu_device *adev); bool (*query_utcl2_poison_status)(struct amdgpu_device *adev); + int (*rlc_gc_fed_irq)(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry); + int (*poison_consumption_handler)(struct amdgpu_device *adev, + struct amdgpu_iv_entry *entry); }; struct amdgpu_gfx_funcs { @@ -323,6 +328,7 @@ struct amdgpu_gfx { struct amdgpu_irq_src priv_inst_irq; struct amdgpu_irq_src cp_ecc_error_irq; struct amdgpu_irq_src sq_irq; + struct amdgpu_irq_src rlc_gc_fed_irq; struct sq_work sq_work; /* gfx status */ @@ -432,4 +438,7 @@ void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v); int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev); void amdgpu_gfx_cp_init_microcode(struct amdgpu_device *adev, uint32_t ucode_id); +int amdgpu_gfx_ras_sw_init(struct amdgpu_device *adev); +int amdgpu_gfx_poison_consumption_handler(struct amdgpu_device *adev, + struct amdgpu_iv_entry *entry); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c index fcb711a11a5b..3f07b1a2ce47 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c @@ -497,6 +497,7 @@ void amdgpu_vmid_free_reserved(struct amdgpu_device *adev, !--id_mgr->reserved_use_count) { /* give the reserved ID back to normal round robin */ list_add(&id_mgr->reserved->list, &id_mgr->ids_lru); + id_mgr->reserved = NULL; } vm->reserved_vmid[vmhub] = false; mutex_unlock(&id_mgr->lock); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 9e549923622b..c3d9d75143f4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -161,8 +161,14 @@ void amdgpu_job_free_resources(struct amdgpu_job *job) struct dma_fence *f; unsigned i; - /* use sched fence if available */ - f = job->base.s_fence ? &job->base.s_fence->finished : &job->hw_fence; + /* Check if any fences where initialized */ + if (job->base.s_fence && job->base.s_fence->finished.ops) + f = &job->base.s_fence->finished; + else if (job->hw_fence.ops) + f = &job->hw_fence; + else + f = NULL; + for (i = 0; i < job->num_ibs; ++i) amdgpu_ib_free(ring->adev, &job->ibs[i], f); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 2947159d7d78..ca945055e683 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -43,6 +43,7 @@ #include "amdgpu_gem.h" #include "amdgpu_display.h" #include "amdgpu_ras.h" +#include "amd_pcie.h" void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev) { @@ -767,6 +768,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) case AMDGPU_INFO_DEV_INFO: { struct drm_amdgpu_info_device *dev_info; uint64_t vm_size; + uint32_t pcie_gen_mask; int ret; dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL); @@ -799,7 +801,6 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) dev_info->num_rb_pipes = adev->gfx.config.max_backends_per_se * adev->gfx.config.max_shader_engines; dev_info->num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts; - dev_info->_pad = 0; dev_info->ids_flags = 0; if (adev->flags & AMD_IS_APU) dev_info->ids_flags |= AMDGPU_IDS_FLAGS_FUSION; @@ -853,6 +854,17 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) dev_info->tcc_disabled_mask = adev->gfx.config.tcc_disabled_mask; + /* Combine the chip gen mask with the platform (CPU/mobo) mask. */ + pcie_gen_mask = adev->pm.pcie_gen_mask & (adev->pm.pcie_gen_mask >> 16); + dev_info->pcie_gen = fls(pcie_gen_mask); + dev_info->pcie_num_lanes = + adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 ? 32 : + adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 ? 16 : + adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 ? 12 : + adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 ? 8 : + adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 ? 4 : + adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 ? 2 : 1; + ret = copy_to_user(out, dev_info, min((size_t)size, sizeof(*dev_info))) ? -EFAULT : 0; kfree(dev_info); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 25a68d8888e0..981010de0a28 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -1574,9 +1574,9 @@ u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m) attachment = READ_ONCE(bo->tbo.base.import_attach); if (attachment) - seq_printf(m, " imported from %p", dma_buf); + seq_printf(m, " imported from ino:%lu", file_inode(dma_buf->file)->i_ino); else if (dma_buf) - seq_printf(m, " exported as %p", dma_buf); + seq_printf(m, " exported as ino:%lu", file_inode(dma_buf->file)->i_ino); amdgpu_bo_print_flag(m, bo, CPU_ACCESS_REQUIRED); amdgpu_bo_print_flag(m, bo, NO_CPU_ACCESS); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 0b59465b1494..a8391f269cd0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -336,7 +336,7 @@ static bool psp_get_runtime_db_entry(struct amdgpu_device *adev, if (db_header.cookie != PSP_RUNTIME_DB_COOKIE_ID) { /* runtime db doesn't exist, exit */ - dev_warn(adev->dev, "PSP runtime database doesn't exist\n"); + dev_dbg(adev->dev, "PSP runtime database doesn't exist\n"); return false; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index d06beb884a16..6e543558386d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -920,9 +920,6 @@ static struct amdgpu_ras_block_object *amdgpu_ras_get_ras_block(struct amdgpu_de if (block >= AMDGPU_RAS_BLOCK__LAST) return NULL; - if (!amdgpu_ras_is_supported(adev, block)) - return NULL; - list_for_each_entry_safe(node, tmp, &adev->ras_list, node) { if (!node->ras_obj) { dev_warn(adev->dev, "Warning: abnormal ras list node.\n"); @@ -1620,14 +1617,14 @@ static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager * struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, obj->head.block, 0); - if (!block_obj || !block_obj->hw_ops) + if (!block_obj) return; /* both query_poison_status and handle_poison_consumption are optional, * but at least one of them should be implemented if we need poison * consumption handler */ - if (block_obj->hw_ops->query_poison_status) { + if (block_obj->hw_ops && block_obj->hw_ops->query_poison_status) { poison_stat = block_obj->hw_ops->query_poison_status(adev); if (!poison_stat) { /* Not poison consumption interrupt, no need to handle it */ @@ -1641,7 +1638,7 @@ static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager * if (!adev->gmc.xgmi.connected_to_cpu) amdgpu_umc_poison_handler(adev, false); - if (block_obj->hw_ops->handle_poison_consumption) + if (block_obj->hw_ops && block_obj->hw_ops->handle_poison_consumption) poison_stat = block_obj->hw_ops->handle_poison_consumption(adev); /* gpu reset is fallback for failed and default cases */ @@ -1649,6 +1646,8 @@ static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager * dev_info(adev->dev, "GPU reset for %s RAS poison consumption is issued!\n", block_obj->ras_comm.name); amdgpu_ras_reset_gpu(adev); + } else { + amdgpu_gfx_poison_consumption_handler(adev, entry); } } @@ -3023,11 +3022,26 @@ int amdgpu_ras_set_context(struct amdgpu_device *adev, struct amdgpu_ras *ras_co int amdgpu_ras_is_supported(struct amdgpu_device *adev, unsigned int block) { + int ret = 0; struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); if (block >= AMDGPU_RAS_BLOCK_COUNT) return 0; - return ras && (adev->ras_enabled & (1 << block)); + + ret = ras && (adev->ras_enabled & (1 << block)); + + /* For the special asic with mem ecc enabled but sram ecc + * not enabled, even if the ras block is not supported on + * .ras_enabled, if the asic supports poison mode and the + * ras block has ras configuration, it can be considered + * that the ras block supports ras function. + */ + if (!ret && + amdgpu_ras_is_poison_mode_supported(adev) && + amdgpu_ras_get_ras_block(adev, block, 0)) + ret = 1; + + return ret; } int amdgpu_ras_reset_gpu(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c index e9b78739b9ff..231ca06bc9c7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c @@ -305,3 +305,38 @@ void amdgpu_sdma_unset_buffer_funcs_helper(struct amdgpu_device *adev) } } } + +int amdgpu_sdma_ras_sw_init(struct amdgpu_device *adev) +{ + int err = 0; + struct amdgpu_sdma_ras *ras = NULL; + + /* adev->sdma.ras is NULL, which means sdma does not + * support ras function, then do nothing here. + */ + if (!adev->sdma.ras) + return 0; + + ras = adev->sdma.ras; + + err = amdgpu_ras_register_ras_block(adev, &ras->ras_block); + if (err) { + dev_err(adev->dev, "Failed to register sdma ras block!\n"); + return err; + } + + strcpy(ras->ras_block.ras_comm.name, "sdma"); + ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__SDMA; + ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; + adev->sdma.ras_if = &ras->ras_block.ras_comm; + + /* If not define special ras_late_init function, use default ras_late_init */ + if (!ras->ras_block.ras_late_init) + ras->ras_block.ras_late_init = amdgpu_sdma_ras_late_init; + + /* If not defined special ras_cb function, use default ras_cb */ + if (!ras->ras_block.ras_cb) + ras->ras_block.ras_cb = amdgpu_sdma_process_ras_data_cb; + + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h index 2d16e6d36728..fc8528812598 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h @@ -129,5 +129,6 @@ int amdgpu_sdma_init_microcode(struct amdgpu_device *adev, u32 instance, void amdgpu_sdma_destroy_inst_ctx(struct amdgpu_device *adev, bool duplicate); void amdgpu_sdma_unset_buffer_funcs_helper(struct amdgpu_device *adev); +int amdgpu_sdma_ras_sw_init(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h index 677ad2016976..98d91ebf5c26 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h @@ -153,10 +153,10 @@ TRACE_EVENT(amdgpu_cs, TP_fast_assign( __entry->bo_list = p->bo_list; - __entry->ring = to_amdgpu_ring(job->base.sched)->idx; + __entry->ring = to_amdgpu_ring(job->base.entity->rq->sched)->idx; __entry->dw = ib->length_dw; __entry->fences = amdgpu_fence_count_emitted( - to_amdgpu_ring(job->base.sched)); + to_amdgpu_ring(job->base.entity->rq->sched)); ), TP_printk("bo_list=%p, ring=%u, dw=%u, fences=%u", __entry->bo_list, __entry->ring, __entry->dw, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c index 47549d659d9b..380b89114341 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c @@ -1072,7 +1072,6 @@ static const char *amdgpu_ucode_legacy_naming(struct amdgpu_device *adev, int bl default: return NULL; } - break; case IP_VERSION(10, 0, 0): case IP_VERSION(10, 0, 1): if (adev->asic_type == CHIP_RAVEN) { @@ -1087,6 +1086,8 @@ static const char *amdgpu_ucode_legacy_naming(struct amdgpu_device *adev, int bl return "navi10"; case IP_VERSION(11, 0, 2): return "vega20"; + case IP_VERSION(11, 0, 3): + return "renoir"; case IP_VERSION(11, 0, 4): return "arcturus"; case IP_VERSION(11, 0, 5): @@ -1104,12 +1105,7 @@ static const char *amdgpu_ucode_legacy_naming(struct amdgpu_device *adev, int bl case IP_VERSION(11, 5, 0): return "vangogh"; case IP_VERSION(12, 0, 1): - if (adev->asic_type == CHIP_RENOIR) { - if (adev->apu_flags & AMD_APU_IS_RENOIR) - return "renoir"; - return "green_sardine"; - } - break; + return "green_sardine"; case IP_VERSION(13, 0, 2): return "aldebaran"; case IP_VERSION(13, 0, 1): diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index a7eae84c7bf7..25217b05c0ea 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -36,26 +36,26 @@ #include "soc15d.h" /* Firmware Names */ -#define FIRMWARE_RAVEN "amdgpu/raven_vcn.bin" -#define FIRMWARE_PICASSO "amdgpu/picasso_vcn.bin" -#define FIRMWARE_RAVEN2 "amdgpu/raven2_vcn.bin" -#define FIRMWARE_ARCTURUS "amdgpu/arcturus_vcn.bin" -#define FIRMWARE_RENOIR "amdgpu/renoir_vcn.bin" -#define FIRMWARE_GREEN_SARDINE "amdgpu/green_sardine_vcn.bin" -#define FIRMWARE_NAVI10 "amdgpu/navi10_vcn.bin" -#define FIRMWARE_NAVI14 "amdgpu/navi14_vcn.bin" -#define FIRMWARE_NAVI12 "amdgpu/navi12_vcn.bin" -#define FIRMWARE_SIENNA_CICHLID "amdgpu/sienna_cichlid_vcn.bin" -#define FIRMWARE_NAVY_FLOUNDER "amdgpu/navy_flounder_vcn.bin" -#define FIRMWARE_VANGOGH "amdgpu/vangogh_vcn.bin" +#define FIRMWARE_RAVEN "amdgpu/raven_vcn.bin" +#define FIRMWARE_PICASSO "amdgpu/picasso_vcn.bin" +#define FIRMWARE_RAVEN2 "amdgpu/raven2_vcn.bin" +#define FIRMWARE_ARCTURUS "amdgpu/arcturus_vcn.bin" +#define FIRMWARE_RENOIR "amdgpu/renoir_vcn.bin" +#define FIRMWARE_GREEN_SARDINE "amdgpu/green_sardine_vcn.bin" +#define FIRMWARE_NAVI10 "amdgpu/navi10_vcn.bin" +#define FIRMWARE_NAVI14 "amdgpu/navi14_vcn.bin" +#define FIRMWARE_NAVI12 "amdgpu/navi12_vcn.bin" +#define FIRMWARE_SIENNA_CICHLID "amdgpu/sienna_cichlid_vcn.bin" +#define FIRMWARE_NAVY_FLOUNDER "amdgpu/navy_flounder_vcn.bin" +#define FIRMWARE_VANGOGH "amdgpu/vangogh_vcn.bin" #define FIRMWARE_DIMGREY_CAVEFISH "amdgpu/dimgrey_cavefish_vcn.bin" -#define FIRMWARE_ALDEBARAN "amdgpu/aldebaran_vcn.bin" -#define FIRMWARE_BEIGE_GOBY "amdgpu/beige_goby_vcn.bin" -#define FIRMWARE_YELLOW_CARP "amdgpu/yellow_carp_vcn.bin" -#define FIRMWARE_VCN_3_1_2 "amdgpu/vcn_3_1_2.bin" -#define FIRMWARE_VCN4_0_0 "amdgpu/vcn_4_0_0.bin" -#define FIRMWARE_VCN4_0_2 "amdgpu/vcn_4_0_2.bin" -#define FIRMWARE_VCN4_0_4 "amdgpu/vcn_4_0_4.bin" +#define FIRMWARE_ALDEBARAN "amdgpu/aldebaran_vcn.bin" +#define FIRMWARE_BEIGE_GOBY "amdgpu/beige_goby_vcn.bin" +#define FIRMWARE_YELLOW_CARP "amdgpu/yellow_carp_vcn.bin" +#define FIRMWARE_VCN_3_1_2 "amdgpu/vcn_3_1_2.bin" +#define FIRMWARE_VCN4_0_0 "amdgpu/vcn_4_0_0.bin" +#define FIRMWARE_VCN4_0_2 "amdgpu/vcn_4_0_2.bin" +#define FIRMWARE_VCN4_0_4 "amdgpu/vcn_4_0_4.bin" MODULE_FIRMWARE(FIRMWARE_RAVEN); MODULE_FIRMWARE(FIRMWARE_PICASSO); @@ -110,84 +110,9 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) for (i = 0; i < adev->vcn.num_vcn_inst; i++) atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0); - switch (adev->ip_versions[UVD_HWIP][0]) { - case IP_VERSION(1, 0, 0): - case IP_VERSION(1, 0, 1): - case IP_VERSION(2, 5, 0): - if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && - (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) - adev->vcn.indirect_sram = true; - break; - case IP_VERSION(2, 2, 0): - if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && - (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) - adev->vcn.indirect_sram = true; - break; - case IP_VERSION(2, 6, 0): - if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && - (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) - adev->vcn.indirect_sram = true; - break; - case IP_VERSION(2, 0, 0): - if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && - (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) - adev->vcn.indirect_sram = true; - break; - case IP_VERSION(2, 0, 2): - if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && - (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) - adev->vcn.indirect_sram = true; - break; - case IP_VERSION(3, 0, 0): - case IP_VERSION(3, 0, 64): - case IP_VERSION(3, 0, 192): - if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && - (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) - adev->vcn.indirect_sram = true; - break; - case IP_VERSION(3, 0, 2): - if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && - (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) - adev->vcn.indirect_sram = true; - break; - case IP_VERSION(3, 0, 16): - if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && - (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) - adev->vcn.indirect_sram = true; - break; - case IP_VERSION(3, 0, 33): - if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && - (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) - adev->vcn.indirect_sram = true; - break; - case IP_VERSION(3, 1, 1): - if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && - (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) - adev->vcn.indirect_sram = true; - break; - case IP_VERSION(3, 1, 2): - if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && - (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) - adev->vcn.indirect_sram = true; - break; - case IP_VERSION(4, 0, 0): - if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && - (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) - adev->vcn.indirect_sram = true; - break; - case IP_VERSION(4, 0, 2): - if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && - (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) - adev->vcn.indirect_sram = true; - break; - case IP_VERSION(4, 0, 4): - if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && - (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) - adev->vcn.indirect_sram = true; - break; - default: - return -EINVAL; - } + if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && + (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) + adev->vcn.indirect_sram = true; hdr = (const struct common_firmware_header *)adev->vcn.fw->data; adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index 4b9e7b050ccd..4340d08f7607 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c @@ -29,13 +29,16 @@ #include "df/df_3_6_offset.h" #include "xgmi/xgmi_4_0_0_smn.h" #include "xgmi/xgmi_4_0_0_sh_mask.h" +#include "xgmi/xgmi_6_1_0_sh_mask.h" #include "wafl/wafl2_4_0_0_smn.h" #include "wafl/wafl2_4_0_0_sh_mask.h" #include "amdgpu_reset.h" #define smnPCS_XGMI3X16_PCS_ERROR_STATUS 0x11a0020c +#define smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK 0x11a00218 #define smnPCS_GOPX1_PCS_ERROR_STATUS 0x12200210 +#define smnPCS_GOPX1_PCS_ERROR_NONCORRECTABLE_MASK 0x12200218 static DEFINE_MUTEX(xgmi_mutex); @@ -79,11 +82,27 @@ static const int xgmi3x16_pcs_err_status_reg_aldebaran[] = { smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x700000 }; +static const int xgmi3x16_pcs_err_noncorrectable_mask_reg_aldebaran[] = { + smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK, + smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x100000, + smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x200000, + smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x300000, + smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x400000, + smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x500000, + smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x600000, + smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x700000 +}; + static const int walf_pcs_err_status_reg_aldebaran[] = { smnPCS_GOPX1_PCS_ERROR_STATUS, smnPCS_GOPX1_PCS_ERROR_STATUS + 0x100000 }; +static const int walf_pcs_err_noncorrectable_mask_reg_aldebaran[] = { + smnPCS_GOPX1_PCS_ERROR_NONCORRECTABLE_MASK, + smnPCS_GOPX1_PCS_ERROR_NONCORRECTABLE_MASK + 0x100000 +}; + static const struct amdgpu_pcs_ras_field xgmi_pcs_ras_fields[] = { {"XGMI PCS DataLossErr", SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataLossErr)}, @@ -162,6 +181,67 @@ static const struct amdgpu_pcs_ras_field wafl_pcs_ras_fields[] = { SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, RecoveryRelockAttemptErr)}, }; +static const struct amdgpu_pcs_ras_field xgmi3x16_pcs_ras_fields[] = { + {"XGMI3X16 PCS DataLossErr", + SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, DataLossErr)}, + {"XGMI3X16 PCS TrainingErr", + SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, TrainingErr)}, + {"XGMI3X16 PCS FlowCtrlAckErr", + SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, FlowCtrlAckErr)}, + {"XGMI3X16 PCS RxFifoUnderflowErr", + SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RxFifoUnderflowErr)}, + {"XGMI3X16 PCS RxFifoOverflowErr", + SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RxFifoOverflowErr)}, + {"XGMI3X16 PCS CRCErr", + SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, CRCErr)}, + {"XGMI3X16 PCS BERExceededErr", + SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, BERExceededErr)}, + {"XGMI3X16 PCS TxVcidDataErr", + SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, TxVcidDataErr)}, + {"XGMI3X16 PCS ReplayBufParityErr", + SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, ReplayBufParityErr)}, + {"XGMI3X16 PCS DataParityErr", + SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, DataParityErr)}, + {"XGMI3X16 PCS ReplayFifoOverflowErr", + SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, ReplayFifoOverflowErr)}, + {"XGMI3X16 PCS ReplayFifoUnderflowErr", + SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, ReplayFifoUnderflowErr)}, + {"XGMI3X16 PCS ElasticFifoOverflowErr", + SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, ElasticFifoOverflowErr)}, + {"XGMI3X16 PCS DeskewErr", + SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, DeskewErr)}, + {"XGMI3X16 PCS FlowCtrlCRCErr", + SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, FlowCtrlCRCErr)}, + {"XGMI3X16 PCS DataStartupLimitErr", + SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, DataStartupLimitErr)}, + {"XGMI3X16 PCS FCInitTimeoutErr", + SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, FCInitTimeoutErr)}, + {"XGMI3X16 PCS RecoveryTimeoutErr", + SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RecoveryTimeoutErr)}, + {"XGMI3X16 PCS ReadySerialTimeoutErr", + SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, ReadySerialTimeoutErr)}, + {"XGMI3X16 PCS ReadySerialAttemptErr", + SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, ReadySerialAttemptErr)}, + {"XGMI3X16 PCS RecoveryAttemptErr", + SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RecoveryAttemptErr)}, + {"XGMI3X16 PCS RecoveryRelockAttemptErr", + SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RecoveryRelockAttemptErr)}, + {"XGMI3X16 PCS ReplayAttemptErr", + SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, ReplayAttemptErr)}, + {"XGMI3X16 PCS SyncHdrErr", + SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, SyncHdrErr)}, + {"XGMI3X16 PCS TxReplayTimeoutErr", + SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, TxReplayTimeoutErr)}, + {"XGMI3X16 PCS RxReplayTimeoutErr", + SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RxReplayTimeoutErr)}, + {"XGMI3X16 PCS LinkSubTxTimeoutErr", + SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, LinkSubTxTimeoutErr)}, + {"XGMI3X16 PCS LinkSubRxTimeoutErr", + SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, LinkSubRxTimeoutErr)}, + {"XGMI3X16 PCS RxCMDPktErr", + SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RxCMDPktErr)}, +}; + /** * DOC: AMDGPU XGMI Support * @@ -809,39 +889,47 @@ static void amdgpu_xgmi_reset_ras_error_count(struct amdgpu_device *adev) static int amdgpu_xgmi_query_pcs_error_status(struct amdgpu_device *adev, uint32_t value, + uint32_t mask_value, uint32_t *ue_count, uint32_t *ce_count, - bool is_xgmi_pcs) + bool is_xgmi_pcs, + bool check_mask) { int i; - int ue_cnt; + int ue_cnt = 0; + const struct amdgpu_pcs_ras_field *pcs_ras_fields = NULL; + uint32_t field_array_size = 0; if (is_xgmi_pcs) { - /* query xgmi pcs error status, - * only ue is supported */ - for (i = 0; i < ARRAY_SIZE(xgmi_pcs_ras_fields); i ++) { - ue_cnt = (value & - xgmi_pcs_ras_fields[i].pcs_err_mask) >> - xgmi_pcs_ras_fields[i].pcs_err_shift; - if (ue_cnt) { - dev_info(adev->dev, "%s detected\n", - xgmi_pcs_ras_fields[i].err_name); - *ue_count += ue_cnt; - } + if (adev->ip_versions[XGMI_HWIP][0] == IP_VERSION(6, 1, 0)) { + pcs_ras_fields = &xgmi3x16_pcs_ras_fields[0]; + field_array_size = ARRAY_SIZE(xgmi3x16_pcs_ras_fields); + } else { + pcs_ras_fields = &xgmi_pcs_ras_fields[0]; + field_array_size = ARRAY_SIZE(xgmi_pcs_ras_fields); } } else { - /* query wafl pcs error status, - * only ue is supported */ - for (i = 0; i < ARRAY_SIZE(wafl_pcs_ras_fields); i++) { - ue_cnt = (value & - wafl_pcs_ras_fields[i].pcs_err_mask) >> - wafl_pcs_ras_fields[i].pcs_err_shift; - if (ue_cnt) { - dev_info(adev->dev, "%s detected\n", - wafl_pcs_ras_fields[i].err_name); - *ue_count += ue_cnt; - } + pcs_ras_fields = &wafl_pcs_ras_fields[0]; + field_array_size = ARRAY_SIZE(wafl_pcs_ras_fields); + } + + if (check_mask) + value = value & ~mask_value; + + /* query xgmi/walf pcs error status, + * only ue is supported */ + for (i = 0; value && i < field_array_size; i++) { + ue_cnt = (value & + pcs_ras_fields[i].pcs_err_mask) >> + pcs_ras_fields[i].pcs_err_shift; + if (ue_cnt) { + dev_info(adev->dev, "%s detected\n", + pcs_ras_fields[i].err_name); + *ue_count += ue_cnt; } + + /* reset bit value if the bit is checked */ + value &= ~(pcs_ras_fields[i].pcs_err_mask); } return 0; @@ -852,7 +940,7 @@ static void amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev, { struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; int i; - uint32_t data; + uint32_t data, mask_data = 0; uint32_t ue_cnt = 0, ce_cnt = 0; if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL)) @@ -867,15 +955,15 @@ static void amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev, for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_arct); i++) { data = RREG32_PCIE(xgmi_pcs_err_status_reg_arct[i]); if (data) - amdgpu_xgmi_query_pcs_error_status(adev, - data, &ue_cnt, &ce_cnt, true); + amdgpu_xgmi_query_pcs_error_status(adev, data, + mask_data, &ue_cnt, &ce_cnt, true, false); } /* check wafl pcs error */ for (i = 0; i < ARRAY_SIZE(wafl_pcs_err_status_reg_arct); i++) { data = RREG32_PCIE(wafl_pcs_err_status_reg_arct[i]); if (data) - amdgpu_xgmi_query_pcs_error_status(adev, - data, &ue_cnt, &ce_cnt, false); + amdgpu_xgmi_query_pcs_error_status(adev, data, + mask_data, &ue_cnt, &ce_cnt, false, false); } break; case CHIP_VEGA20: @@ -883,31 +971,35 @@ static void amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev, for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_vg20); i++) { data = RREG32_PCIE(xgmi_pcs_err_status_reg_vg20[i]); if (data) - amdgpu_xgmi_query_pcs_error_status(adev, - data, &ue_cnt, &ce_cnt, true); + amdgpu_xgmi_query_pcs_error_status(adev, data, + mask_data, &ue_cnt, &ce_cnt, true, false); } /* check wafl pcs error */ for (i = 0; i < ARRAY_SIZE(wafl_pcs_err_status_reg_vg20); i++) { data = RREG32_PCIE(wafl_pcs_err_status_reg_vg20[i]); if (data) - amdgpu_xgmi_query_pcs_error_status(adev, - data, &ue_cnt, &ce_cnt, false); + amdgpu_xgmi_query_pcs_error_status(adev, data, + mask_data, &ue_cnt, &ce_cnt, false, false); } break; case CHIP_ALDEBARAN: /* check xgmi3x16 pcs error */ for (i = 0; i < ARRAY_SIZE(xgmi3x16_pcs_err_status_reg_aldebaran); i++) { data = RREG32_PCIE(xgmi3x16_pcs_err_status_reg_aldebaran[i]); + mask_data = + RREG32_PCIE(xgmi3x16_pcs_err_noncorrectable_mask_reg_aldebaran[i]); if (data) - amdgpu_xgmi_query_pcs_error_status(adev, - data, &ue_cnt, &ce_cnt, true); + amdgpu_xgmi_query_pcs_error_status(adev, data, + mask_data, &ue_cnt, &ce_cnt, true, true); } /* check wafl pcs error */ for (i = 0; i < ARRAY_SIZE(walf_pcs_err_status_reg_aldebaran); i++) { data = RREG32_PCIE(walf_pcs_err_status_reg_aldebaran[i]); + mask_data = + RREG32_PCIE(walf_pcs_err_noncorrectable_mask_reg_aldebaran[i]); if (data) - amdgpu_xgmi_query_pcs_error_status(adev, - data, &ue_cnt, &ce_cnt, false); + amdgpu_xgmi_query_pcs_error_status(adev, data, + mask_data, &ue_cnt, &ce_cnt, false, true); } break; default: diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index 985fe704203e..c621b2ad7ba3 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -46,6 +46,7 @@ #include "clearstate_gfx11.h" #include "v11_structs.h" #include "gfx_v11_0.h" +#include "gfx_v11_0_3.h" #include "nbio_v4_3.h" #include "mes_v11_0.h" @@ -815,7 +816,14 @@ static int gfx_v11_0_gpu_early_init(struct amdgpu_device *adev) switch (adev->ip_versions[GC_HWIP][0]) { case IP_VERSION(11, 0, 0): case IP_VERSION(11, 0, 2): + adev->gfx.config.max_hw_contexts = 8; + adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; + adev->gfx.config.sc_prim_fifo_size_backend = 0x100; + adev->gfx.config.sc_hiz_tile_fifo_size = 0; + adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0; + break; case IP_VERSION(11, 0, 3): + adev->gfx.ras = &gfx_v11_0_3_ras; adev->gfx.config.max_hw_contexts = 8; adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; adev->gfx.config.sc_prim_fifo_size_backend = 0x100; @@ -1251,10 +1259,8 @@ static int gfx_v11_0_sw_init(void *handle) switch (adev->ip_versions[GC_HWIP][0]) { case IP_VERSION(11, 0, 0): - case IP_VERSION(11, 0, 1): case IP_VERSION(11, 0, 2): case IP_VERSION(11, 0, 3): - case IP_VERSION(11, 0, 4): adev->gfx.me.num_me = 1; adev->gfx.me.num_pipe_per_me = 1; adev->gfx.me.num_queue_per_pipe = 1; @@ -1262,6 +1268,15 @@ static int gfx_v11_0_sw_init(void *handle) adev->gfx.mec.num_pipe_per_mec = 4; adev->gfx.mec.num_queue_per_pipe = 4; break; + case IP_VERSION(11, 0, 1): + case IP_VERSION(11, 0, 4): + adev->gfx.me.num_me = 1; + adev->gfx.me.num_pipe_per_me = 1; + adev->gfx.me.num_queue_per_pipe = 1; + adev->gfx.mec.num_mec = 1; + adev->gfx.mec.num_pipe_per_mec = 4; + adev->gfx.mec.num_queue_per_pipe = 4; + break; default: adev->gfx.me.num_me = 1; adev->gfx.me.num_pipe_per_me = 1; @@ -1293,6 +1308,20 @@ static int gfx_v11_0_sw_init(void *handle) if (r) return r; + /* ECC error */ + r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP, + GFX_11_0_0__SRCID__CP_ECC_ERROR, + &adev->gfx.cp_ecc_error_irq); + if (r) + return r; + + /* FED error */ + r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX, + GFX_11_0_0__SRCID__RLC_GC_FED_INTERRUPT, + &adev->gfx.rlc_gc_fed_irq); + if (r) + return r; + adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE; if (adev->gfx.imu.funcs) { @@ -1380,6 +1409,11 @@ static int gfx_v11_0_sw_init(void *handle) if (r) return r; + if (amdgpu_gfx_ras_sw_init(adev)) { + dev_err(adev->dev, "Failed to initialize gfx ras block!\n"); + return -EINVAL; + } + return 0; } @@ -4372,6 +4406,7 @@ static int gfx_v11_0_hw_fini(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; + amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); @@ -5803,6 +5838,36 @@ static void gfx_v11_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev } } +#define CP_ME1_PIPE_INST_ADDR_INTERVAL 0x1 +#define SET_ECC_ME_PIPE_STATE(reg_addr, state) \ + do { \ + uint32_t tmp = RREG32_SOC15_IP(GC, reg_addr); \ + tmp = REG_SET_FIELD(tmp, CP_ME1_PIPE0_INT_CNTL, CP_ECC_ERROR_INT_ENABLE, state); \ + WREG32_SOC15_IP(GC, reg_addr, tmp); \ + } while (0) + +static int gfx_v11_0_set_cp_ecc_error_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned type, + enum amdgpu_interrupt_state state) +{ + uint32_t ecc_irq_state = 0; + uint32_t pipe0_int_cntl_addr = 0; + int i = 0; + + ecc_irq_state = (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0; + + pipe0_int_cntl_addr = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL); + + WREG32_FIELD15_PREREG(GC, 0, CP_INT_CNTL_RING0, CP_ECC_ERROR_INT_ENABLE, ecc_irq_state); + + for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) + SET_ECC_ME_PIPE_STATE(pipe0_int_cntl_addr + i * CP_ME1_PIPE_INST_ADDR_INTERVAL, + ecc_irq_state); + + return 0; +} + static int gfx_v11_0_set_eop_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *src, unsigned type, @@ -5979,6 +6044,16 @@ static int gfx_v11_0_priv_inst_irq(struct amdgpu_device *adev, return 0; } +static int gfx_v11_0_rlc_gc_fed_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + if (adev->gfx.ras && adev->gfx.ras->rlc_gc_fed_irq) + return adev->gfx.ras->rlc_gc_fed_irq(adev, source, entry); + + return 0; +} + #if 0 static int gfx_v11_0_kiq_set_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *src, @@ -6209,6 +6284,15 @@ static const struct amdgpu_irq_src_funcs gfx_v11_0_priv_inst_irq_funcs = { .process = gfx_v11_0_priv_inst_irq, }; +static const struct amdgpu_irq_src_funcs gfx_v11_0_cp_ecc_error_irq_funcs = { + .set = gfx_v11_0_set_cp_ecc_error_state, + .process = amdgpu_gfx_cp_ecc_error_irq, +}; + +static const struct amdgpu_irq_src_funcs gfx_v11_0_rlc_gc_fed_irq_funcs = { + .process = gfx_v11_0_rlc_gc_fed_irq, +}; + static void gfx_v11_0_set_irq_funcs(struct amdgpu_device *adev) { adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST; @@ -6219,6 +6303,13 @@ static void gfx_v11_0_set_irq_funcs(struct amdgpu_device *adev) adev->gfx.priv_inst_irq.num_types = 1; adev->gfx.priv_inst_irq.funcs = &gfx_v11_0_priv_inst_irq_funcs; + + adev->gfx.cp_ecc_error_irq.num_types = 1; /* CP ECC error */ + adev->gfx.cp_ecc_error_irq.funcs = &gfx_v11_0_cp_ecc_error_irq_funcs; + + adev->gfx.rlc_gc_fed_irq.num_types = 1; /* 0x80 FED error */ + adev->gfx.rlc_gc_fed_irq.funcs = &gfx_v11_0_rlc_gc_fed_irq_funcs; + } static void gfx_v11_0_set_imu_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.c new file mode 100644 index 000000000000..b07a72ca25d9 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.c @@ -0,0 +1,88 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "amdgpu.h" +#include "soc21.h" +#include "gc/gc_11_0_3_offset.h" +#include "gc/gc_11_0_3_sh_mask.h" +#include "ivsrcid/gfx/irqsrcs_gfx_11_0_0.h" +#include "soc15.h" +#include "soc15d.h" +#include "gfx_v11_0.h" + + +static int gfx_v11_0_3_rlc_gc_fed_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + uint32_t rlc_status0 = 0, rlc_status1 = 0; + struct ras_common_if *ras_if = NULL; + struct ras_dispatch_if ih_data = { + .entry = entry, + }; + + rlc_status0 = RREG32(SOC15_REG_OFFSET(GC, 0, regRLC_RLCS_FED_STATUS_0)); + rlc_status1 = RREG32(SOC15_REG_OFFSET(GC, 0, regRLC_RLCS_FED_STATUS_1)); + + if (!rlc_status0 && !rlc_status1) { + dev_warn(adev->dev, "RLC_GC_FED irq is generated, but rlc_status0 and rlc_status1 are empty!\n"); + return 0; + } + + /* Use RLC_RLCS_FED_STATUS_0/1 to distinguish FED error block. */ + if (REG_GET_FIELD(rlc_status0, RLC_RLCS_FED_STATUS_0, SDMA0_FED_ERR) || + REG_GET_FIELD(rlc_status0, RLC_RLCS_FED_STATUS_0, SDMA1_FED_ERR)) + ras_if = adev->sdma.ras_if; + else + ras_if = adev->gfx.ras_if; + + if (!ras_if) { + dev_err(adev->dev, "Gfx or sdma ras block not initialized, rlc_status0:0x%x.\n", + rlc_status0); + return -EINVAL; + } + + ih_data.head = *ras_if; + + dev_warn(adev->dev, "RLC %s FED IRQ\n", ras_if->name); + amdgpu_ras_interrupt_dispatch(adev, &ih_data); + + return 0; +} + +static int gfx_v11_0_3_poison_consumption_handler(struct amdgpu_device *adev, + struct amdgpu_iv_entry *entry) +{ + /* Workaround: when vmid and pasid are both zero, trigger gpu reset in KGD. */ + if (entry && (entry->client_id == SOC21_IH_CLIENTID_GFX) && + (entry->src_id == GFX_11_0_0__SRCID__RLC_GC_FED_INTERRUPT) && + !entry->vmid && !entry->pasid) + amdgpu_ras_reset_gpu(adev); + + return 0; +} + +struct amdgpu_gfx_ras gfx_v11_0_3_ras = { + .rlc_gc_fed_irq = gfx_v11_0_3_rlc_gc_fed_irq, + .poison_consumption_handler = gfx_v11_0_3_poison_consumption_handler, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.h b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.h new file mode 100644 index 000000000000..672c7920b3d0 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.h @@ -0,0 +1,29 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __GFX_V11_0_3_H__ +#define __GFX_V11_0_3_H__ + +extern struct amdgpu_gfx_ras gfx_v11_0_3_ras; + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 4fb577d047fd..b1f2684d854a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -1003,7 +1003,7 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev) err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, fw_name); if (err == -ENODEV) { snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name); - err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, fw_name); + err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, fw_name); } } else { snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index e80685d1e6c6..8ad5c03506f2 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1345,7 +1345,7 @@ static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev, err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, fw_name); if (err) - return err; + goto out; amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1); amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1_JT); @@ -1355,13 +1355,14 @@ static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev, else snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name); + /* ignore failures to load */ err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw, fw_name); if (!err) { amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2); amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2_JT); } else { err = 0; - adev->gfx.mec2_fw = NULL; + amdgpu_ucode_release(&adev->gfx.mec2_fw); } } else { adev->gfx.mec2_fw_version = adev->gfx.mec_fw_version; @@ -1370,10 +1371,10 @@ static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev, gfx_v9_0_check_if_need_gfxoff(adev); gfx_v9_0_check_fw_write_wait(adev); - if (err) { + +out: + if (err) amdgpu_ucode_release(&adev->gfx.mec_fw); - amdgpu_ucode_release(&adev->gfx.mec2_fw); - } return err; } @@ -1935,27 +1936,6 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) break; } - if (adev->gfx.ras) { - err = amdgpu_ras_register_ras_block(adev, &adev->gfx.ras->ras_block); - if (err) { - DRM_ERROR("Failed to register gfx ras block!\n"); - return err; - } - - strcpy(adev->gfx.ras->ras_block.ras_comm.name, "gfx"); - adev->gfx.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__GFX; - adev->gfx.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; - adev->gfx.ras_if = &adev->gfx.ras->ras_block.ras_comm; - - /* If not define special ras_late_init function, use gfx default ras_late_init */ - if (!adev->gfx.ras->ras_block.ras_late_init) - adev->gfx.ras->ras_block.ras_late_init = amdgpu_gfx_ras_late_init; - - /* If not defined special ras_cb function, use default ras_cb */ - if (!adev->gfx.ras->ras_block.ras_cb) - adev->gfx.ras->ras_block.ras_cb = amdgpu_gfx_process_ras_data_cb; - } - adev->gfx.config.gb_addr_config = gb_addr_config; adev->gfx.config.gb_addr_config_fields.num_pipes = 1 << @@ -2197,6 +2177,11 @@ static int gfx_v9_0_sw_init(void *handle) if (r) return r; + if (amdgpu_gfx_ras_sw_init(adev)) { + dev_err(adev->dev, "Failed to initialize gfx ras block!\n"); + return -EINVAL; + } + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c index e9dcd6fcde7f..ae9cd1a4cfee 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c @@ -169,23 +169,23 @@ static void mmhub_v3_0_init_system_aperture_regs(struct amdgpu_device *adev) uint64_t value; uint32_t tmp; - if (!amdgpu_sriov_vf(adev)) { - /* - * the new L1 policy will block SRIOV guest from writing - * these regs, and they will be programed at host. - * so skip programing these regs. - */ - /* Disable AGP. */ - WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_BASE, 0); - WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_TOP, 0); - WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_BOT, 0x00FFFFFF); - - /* Program the system aperture low logical page number. */ - WREG32_SOC15(MMHUB, 0, regMMMC_VM_SYSTEM_APERTURE_LOW_ADDR, - adev->gmc.vram_start >> 18); - WREG32_SOC15(MMHUB, 0, regMMMC_VM_SYSTEM_APERTURE_HIGH_ADDR, - adev->gmc.vram_end >> 18); - } + if (amdgpu_sriov_vf(adev)) + return; + + /* + * the new L1 policy will block SRIOV guest from writing + * these regs, and they will be programed at host. + * so skip programing these regs. + */ + /* Disable AGP. */ + WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_BASE, 0); + WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_TOP, 0); + WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_BOT, 0x00FFFFFF); + /* Program the system aperture low logical page number. */ + WREG32_SOC15(MMHUB, 0, regMMMC_VM_SYSTEM_APERTURE_LOW_ADDR, + adev->gmc.vram_start >> 18); + WREG32_SOC15(MMHUB, 0, regMMMC_VM_SYSTEM_APERTURE_HIGH_ADDR, + adev->gmc.vram_end >> 18); /* Set default page address. */ value = adev->mem_scratch.gpu_addr - adev->gmc.vram_start + diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index 6853b93ac82e..d972025f0d20 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -98,7 +98,7 @@ static const struct amdgpu_video_codecs nv_video_codecs_decode = }; /* Sienna Cichlid */ -static const struct amdgpu_video_codec_info sc_video_codecs_decode_array[] = +static const struct amdgpu_video_codec_info sc_video_codecs_decode_array_vcn0[] = { {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)}, @@ -110,10 +110,27 @@ static const struct amdgpu_video_codec_info sc_video_codecs_decode_array[] = {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)}, }; -static const struct amdgpu_video_codecs sc_video_codecs_decode = +static const struct amdgpu_video_codec_info sc_video_codecs_decode_array_vcn1[] = { - .codec_count = ARRAY_SIZE(sc_video_codecs_decode_array), - .codec_array = sc_video_codecs_decode_array, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)}, +}; + +static const struct amdgpu_video_codecs sc_video_codecs_decode_vcn0 = +{ + .codec_count = ARRAY_SIZE(sc_video_codecs_decode_array_vcn0), + .codec_array = sc_video_codecs_decode_array_vcn0, +}; + +static const struct amdgpu_video_codecs sc_video_codecs_decode_vcn1 = +{ + .codec_count = ARRAY_SIZE(sc_video_codecs_decode_array_vcn1), + .codec_array = sc_video_codecs_decode_array_vcn1, }; /* SRIOV Sienna Cichlid, not const since data is controlled by host */ @@ -123,7 +140,7 @@ static struct amdgpu_video_codec_info sriov_sc_video_codecs_encode_array[] = {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)}, }; -static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array[] = +static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array_vcn0[] = { {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)}, @@ -135,16 +152,33 @@ static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array[] = {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)}, }; +static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array_vcn1[] = +{ + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)}, +}; + static struct amdgpu_video_codecs sriov_sc_video_codecs_encode = { .codec_count = ARRAY_SIZE(sriov_sc_video_codecs_encode_array), .codec_array = sriov_sc_video_codecs_encode_array, }; -static struct amdgpu_video_codecs sriov_sc_video_codecs_decode = +static struct amdgpu_video_codecs sriov_sc_video_codecs_decode_vcn0 = { - .codec_count = ARRAY_SIZE(sriov_sc_video_codecs_decode_array), - .codec_array = sriov_sc_video_codecs_decode_array, + .codec_count = ARRAY_SIZE(sriov_sc_video_codecs_decode_array_vcn0), + .codec_array = sriov_sc_video_codecs_decode_array_vcn0, +}; + +static struct amdgpu_video_codecs sriov_sc_video_codecs_decode_vcn1 = +{ + .codec_count = ARRAY_SIZE(sriov_sc_video_codecs_decode_array_vcn1), + .codec_array = sriov_sc_video_codecs_decode_array_vcn1, }; /* Beige Goby*/ @@ -181,20 +215,37 @@ static const struct amdgpu_video_codecs yc_video_codecs_decode = { static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode, const struct amdgpu_video_codecs **codecs) { + if (adev->vcn.num_vcn_inst == hweight8(adev->vcn.harvest_config)) + return -EINVAL; + switch (adev->ip_versions[UVD_HWIP][0]) { case IP_VERSION(3, 0, 0): case IP_VERSION(3, 0, 64): case IP_VERSION(3, 0, 192): if (amdgpu_sriov_vf(adev)) { - if (encode) - *codecs = &sriov_sc_video_codecs_encode; - else - *codecs = &sriov_sc_video_codecs_decode; + if (adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) { + if (encode) + *codecs = &sriov_sc_video_codecs_encode; + else + *codecs = &sriov_sc_video_codecs_decode_vcn1; + } else { + if (encode) + *codecs = &sriov_sc_video_codecs_encode; + else + *codecs = &sriov_sc_video_codecs_decode_vcn0; + } } else { - if (encode) - *codecs = &nv_video_codecs_encode; - else - *codecs = &sc_video_codecs_decode; + if (adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) { + if (encode) + *codecs = &nv_video_codecs_encode; + else + *codecs = &sc_video_codecs_decode_vcn1; + } else { + if (encode) + *codecs = &nv_video_codecs_encode; + else + *codecs = &sc_video_codecs_decode_vcn0; + } } return 0; case IP_VERSION(3, 0, 16): @@ -202,7 +253,7 @@ static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode, if (encode) *codecs = &nv_video_codecs_encode; else - *codecs = &sc_video_codecs_decode; + *codecs = &sc_video_codecs_decode_vcn0; return 0; case IP_VERSION(3, 1, 1): case IP_VERSION(3, 1, 2): @@ -993,9 +1044,19 @@ static int nv_common_late_init(void *handle) if (amdgpu_sriov_vf(adev)) { xgpu_nv_mailbox_get_irq(adev); - amdgpu_virt_update_sriov_video_codec(adev, - sriov_sc_video_codecs_encode_array, ARRAY_SIZE(sriov_sc_video_codecs_encode_array), - sriov_sc_video_codecs_decode_array, ARRAY_SIZE(sriov_sc_video_codecs_decode_array)); + if (adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) { + amdgpu_virt_update_sriov_video_codec(adev, + sriov_sc_video_codecs_encode_array, + ARRAY_SIZE(sriov_sc_video_codecs_encode_array), + sriov_sc_video_codecs_decode_array_vcn1, + ARRAY_SIZE(sriov_sc_video_codecs_decode_array_vcn1)); + } else { + amdgpu_virt_update_sriov_video_codec(adev, + sriov_sc_video_codecs_encode_array, + ARRAY_SIZE(sriov_sc_video_codecs_encode_array), + sriov_sc_video_codecs_decode_array_vcn1, + ARRAY_SIZE(sriov_sc_video_codecs_decode_array_vcn1)); + } } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 017ae298558e..b5affba22156 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -1851,6 +1851,11 @@ static int sdma_v4_0_sw_init(void *handle) } } + if (amdgpu_sdma_ras_sw_init(adev)) { + dev_err(adev->dev, "Failed to initialize sdma ras block!\n"); + return -EINVAL; + } + return r; } @@ -2688,22 +2693,6 @@ static void sdma_v4_0_set_ras_funcs(struct amdgpu_device *adev) break; } - if (adev->sdma.ras) { - amdgpu_ras_register_ras_block(adev, &adev->sdma.ras->ras_block); - - strcpy(adev->sdma.ras->ras_block.ras_comm.name, "sdma"); - adev->sdma.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__SDMA; - adev->sdma.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; - adev->sdma.ras_if = &adev->sdma.ras->ras_block.ras_comm; - - /* If don't define special ras_late_init function, use default ras_late_init */ - if (!adev->sdma.ras->ras_block.ras_late_init) - adev->sdma.ras->ras_block.ras_late_init = amdgpu_sdma_ras_late_init; - - /* If not defined special ras_cb function, use default ras_cb */ - if (!adev->sdma.ras->ras_block.ras_cb) - adev->sdma.ras->ras_block.ras_cb = amdgpu_sdma_process_ras_data_cb; - } } const struct amdgpu_ip_block_version sdma_v4_0_ip_block = { diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c index bf1fa5e8d2f9..3d36329be384 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c @@ -1211,6 +1211,24 @@ static void sdma_v6_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring, amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask); } +static struct amdgpu_sdma_ras sdma_v6_0_3_ras = { + .ras_block = { + .ras_late_init = amdgpu_ras_block_late_init, + }, +}; + +static void sdma_v6_0_set_ras_funcs(struct amdgpu_device *adev) +{ + switch (adev->ip_versions[SDMA0_HWIP][0]) { + case IP_VERSION(6, 0, 3): + adev->sdma.ras = &sdma_v6_0_3_ras; + break; + default: + break; + } + +} + static int sdma_v6_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -1220,6 +1238,7 @@ static int sdma_v6_0_early_init(void *handle) sdma_v6_0_set_vm_pte_funcs(adev); sdma_v6_0_set_irq_funcs(adev); sdma_v6_0_set_mqd_funcs(adev); + sdma_v6_0_set_ras_funcs(adev); return 0; } @@ -1264,6 +1283,11 @@ static int sdma_v6_0_sw_init(void *handle) return r; } + if (amdgpu_sdma_ras_sw_init(adev)) { + dev_err(adev->dev, "Failed to initialize sdma ras block!\n"); + return -EINVAL; + } + return r; } @@ -1403,10 +1427,12 @@ static int sdma_v6_0_set_trap_irq_state(struct amdgpu_device *adev, u32 reg_offset = sdma_v6_0_get_reg_offset(adev, type, regSDMA0_CNTL); - sdma_cntl = RREG32(reg_offset); - sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, - state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); - WREG32(reg_offset, sdma_cntl); + if (!amdgpu_sriov_vf(adev)) { + sdma_cntl = RREG32(reg_offset); + sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, + state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); + WREG32(reg_offset, sdma_cntl); + } return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c index 5562670b7b52..9c4a29d50f1c 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc21.c +++ b/drivers/gpu/drm/amd/amdgpu/soc21.c @@ -48,19 +48,31 @@ static const struct amd_ip_funcs soc21_common_ip_funcs; /* SOC21 */ -static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array[] = +static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array_vcn0[] = { {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)}, }; -static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_encode = +static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array_vcn1[] = { - .codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_encode_array), - .codec_array = vcn_4_0_0_video_codecs_encode_array, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)}, +}; + +static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_encode_vcn0 = +{ + .codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_encode_array_vcn0), + .codec_array = vcn_4_0_0_video_codecs_encode_array_vcn0, }; -static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_decode_array[] = +static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_encode_vcn1 = +{ + .codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_encode_array_vcn1), + .codec_array = vcn_4_0_0_video_codecs_encode_array_vcn1, +}; + +static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_decode_array_vcn0[] = { {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)}, @@ -69,23 +81,46 @@ static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_decode_array[ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)}, }; -static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_decode = +static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_decode_array_vcn1[] = +{ + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)}, +}; + +static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_decode_vcn0 = { - .codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_decode_array), - .codec_array = vcn_4_0_0_video_codecs_decode_array, + .codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_decode_array_vcn0), + .codec_array = vcn_4_0_0_video_codecs_decode_array_vcn0, +}; + +static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_decode_vcn1 = +{ + .codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_decode_array_vcn1), + .codec_array = vcn_4_0_0_video_codecs_decode_array_vcn1, }; static int soc21_query_video_codecs(struct amdgpu_device *adev, bool encode, const struct amdgpu_video_codecs **codecs) { - switch (adev->ip_versions[UVD_HWIP][0]) { + if (adev->vcn.num_vcn_inst == hweight8(adev->vcn.harvest_config)) + return -EINVAL; + switch (adev->ip_versions[UVD_HWIP][0]) { case IP_VERSION(4, 0, 0): case IP_VERSION(4, 0, 2): - if (encode) - *codecs = &vcn_4_0_0_video_codecs_encode; - else - *codecs = &vcn_4_0_0_video_codecs_decode; + if (adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) { + if (encode) + *codecs = &vcn_4_0_0_video_codecs_encode_vcn1; + else + *codecs = &vcn_4_0_0_video_codecs_decode_vcn1; + } else { + if (encode) + *codecs = &vcn_4_0_0_video_codecs_encode_vcn0; + else + *codecs = &vcn_4_0_0_video_codecs_decode_vcn0; + } return 0; default: return -EINVAL; diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c index 72fd963f178b..e08e25a3a1a9 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c @@ -57,13 +57,6 @@ static inline uint32_t get_umc_v6_7_reg_offset(struct amdgpu_device *adev, return adev->umc.channel_offs * ch_inst + UMC_V6_7_INST_DIST * umc_inst; } -static inline uint32_t get_umc_v6_7_channel_index(struct amdgpu_device *adev, - uint32_t umc_inst, - uint32_t ch_inst) -{ - return adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst]; -} - static void umc_v6_7_query_error_status_helper(struct amdgpu_device *adev, uint64_t mc_umc_status, uint32_t umc_reg_offset) { diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c index bd228512424a..66439388faee 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c @@ -1771,6 +1771,10 @@ static int vcn_v3_0_limit_sched(struct amdgpu_cs_parser *p, if (atomic_read(&job->base.entity->fence_seq)) return -EINVAL; + /* if VCN0 is harvested, we can't support AV1 */ + if (p->adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) + return -EINVAL; + scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_DEC] [AMDGPU_RING_PRIO_DEFAULT].sched; drm_sched_entity_modify_sched(job->base.entity, scheds, 1); diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c index a79b6088374b..efb22d0975b3 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c @@ -1632,6 +1632,10 @@ static int vcn_v4_0_limit_sched(struct amdgpu_cs_parser *p, if (atomic_read(&job->base.entity->fence_seq)) return -EINVAL; + /* if VCN0 is harvested, we can't support AV1 */ + if (p->adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) + return -EINVAL; + scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_ENC] [AMDGPU_RING_PRIO_0].sched; drm_sched_entity_modify_sched(job->base.entity, scheds, 1); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c index d119070956fb..8b2dd2670ab7 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c @@ -59,30 +59,27 @@ static int update_qpd_v9(struct device_queue_manager *dqm, /* check if sh_mem_config register already configured */ if (qpd->sh_mem_config == 0) { - qpd->sh_mem_config = - SH_MEM_ALIGNMENT_MODE_UNALIGNED << + qpd->sh_mem_config = SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT; - if (KFD_GC_VERSION(dqm->dev) == IP_VERSION(9, 4, 2)) { - /* Aldebaran can safely support different XNACK modes - * per process - */ - if (!pdd->process->xnack_enabled) - qpd->sh_mem_config |= - 1 << SH_MEM_CONFIG__RETRY_DISABLE__SHIFT; - } else if (dqm->dev->noretry && - !dqm->dev->use_iommu_v2) { - qpd->sh_mem_config |= - 1 << SH_MEM_CONFIG__RETRY_DISABLE__SHIFT; - } + if (dqm->dev->noretry && !dqm->dev->use_iommu_v2) + qpd->sh_mem_config |= 1 << SH_MEM_CONFIG__RETRY_DISABLE__SHIFT; qpd->sh_mem_ape1_limit = 0; qpd->sh_mem_ape1_base = 0; } + if (KFD_SUPPORT_XNACK_PER_PROCESS(dqm->dev)) { + if (!pdd->process->xnack_enabled) + qpd->sh_mem_config |= 1 << SH_MEM_CONFIG__RETRY_DISABLE__SHIFT; + else + qpd->sh_mem_config &= ~(1 << SH_MEM_CONFIG__RETRY_DISABLE__SHIFT); + } + qpd->sh_mem_bases = compute_sh_mem_bases_64bit(pdd); - pr_debug("sh_mem_bases 0x%X\n", qpd->sh_mem_bases); + pr_debug("sh_mem_bases 0x%X sh_mem_config 0x%X\n", qpd->sh_mem_bases, + qpd->sh_mem_config); return 0; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 552c3ac85a13..bfa30d12406b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -206,6 +206,8 @@ enum cache_policy { #define KFD_GC_VERSION(dev) ((dev)->adev->ip_versions[GC_HWIP][0]) #define KFD_IS_SOC15(dev) ((KFD_GC_VERSION(dev)) >= (IP_VERSION(9, 0, 1))) +#define KFD_SUPPORT_XNACK_PER_PROCESS(dev)\ + (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2)) struct kfd_event_interrupt_class { bool (*interrupt_isr)(struct kfd_dev *dev, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 71db24fefe05..72df6286e240 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -1330,7 +1330,7 @@ bool kfd_process_xnack_mode(struct kfd_process *p, bool supported) * per-process XNACK mode selection. But let the dev->noretry * setting still influence the default XNACK mode. */ - if (supported && KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2)) + if (supported && KFD_SUPPORT_XNACK_PER_PROCESS(dev)) continue; /* GFXv10 and later GPUs do not support shader preemption diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 2db449fed300..8e4b668faa35 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -66,7 +66,6 @@ #include "ivsrcid/ivsrcid_vislands30.h" -#include "i2caux_interface.h" #include <linux/backlight.h> #include <linux/module.h> #include <linux/moduleparam.h> @@ -1504,8 +1503,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) case IP_VERSION(3, 0, 1): case IP_VERSION(3, 1, 2): case IP_VERSION(3, 1, 3): - case IP_VERSION(3, 1, 4): - case IP_VERSION(3, 1, 5): case IP_VERSION(3, 1, 6): init_data.flags.gpu_vm_support = true; break; @@ -1734,15 +1731,11 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev) adev->dm.vblank_control_workqueue = NULL; } - for (i = 0; i < adev->dm.display_indexes_num; i++) { - drm_encoder_cleanup(&adev->dm.mst_encoders[i].base); - } - amdgpu_dm_destroy_drm_device(&adev->dm); #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) if (adev->dm.secure_display_ctxs) { - for (i = 0; i < adev->dm.dc->caps.max_links; i++) { + for (i = 0; i < adev->mode_info.num_crtc; i++) { if (adev->dm.secure_display_ctxs[i].crtc) { flush_work(&adev->dm.secure_display_ctxs[i].notify_ta_work); flush_work(&adev->dm.secure_display_ctxs[i].forward_roi_work); @@ -1949,10 +1942,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev) dmub_asic = DMUB_ASIC_DCN21; break; case IP_VERSION(3, 0, 0): - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) - dmub_asic = DMUB_ASIC_DCN30; - else - dmub_asic = DMUB_ASIC_DCN30; + dmub_asic = DMUB_ASIC_DCN30; break; case IP_VERSION(3, 0, 1): dmub_asic = DMUB_ASIC_DCN301; @@ -5342,8 +5332,6 @@ static void fill_stream_properties_from_drm_display_mode( timing_out->aspect_ratio = get_aspect_ratio(mode_in); - stream->output_color_space = get_output_color_space(timing_out); - stream->out_transfer_func->type = TF_TYPE_PREDEFINED; stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB; if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) { @@ -5354,6 +5342,8 @@ static void fill_stream_properties_from_drm_display_mode( adjust_colour_depth_from_display_info(timing_out, info); } } + + stream->output_color_space = get_output_color_space(timing_out); } static void fill_audio_info(struct audio_info *audio_info, @@ -9685,8 +9675,8 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, goto fail; } - if (dm_old_con_state->abm_level != - dm_new_con_state->abm_level) + if (dm_old_con_state->abm_level != dm_new_con_state->abm_level || + dm_old_con_state->scaling != dm_new_con_state->scaling) new_crtc_state->connectors_changed = true; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c index 8841c447d0e2..8873ecada27c 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c @@ -223,7 +223,7 @@ int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc, #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) /* Disable secure_display if it was enabled */ if (!enable) { - for (i = 0; i < adev->dm.dc->caps.max_links; i++) { + for (i = 0; i < adev->mode_info.num_crtc; i++) { if (adev->dm.secure_display_ctxs[i].crtc == crtc) { /* stop ROI update on this crtc */ flush_work(&adev->dm.secure_display_ctxs[i].notify_ta_work); @@ -544,12 +544,14 @@ amdgpu_dm_crtc_secure_display_create_contexts(struct amdgpu_device *adev) struct secure_display_context *secure_display_ctxs = NULL; int i; - secure_display_ctxs = kcalloc(AMDGPU_MAX_CRTCS, sizeof(struct secure_display_context), GFP_KERNEL); + secure_display_ctxs = kcalloc(adev->mode_info.num_crtc, + sizeof(struct secure_display_context), + GFP_KERNEL); if (!secure_display_ctxs) return NULL; - for (i = 0; i < adev->dm.dc->caps.max_links; i++) { + for (i = 0; i < adev->mode_info.num_crtc; i++) { INIT_WORK(&secure_display_ctxs[i].forward_roi_work, amdgpu_dm_forward_crc_window); INIT_WORK(&secure_display_ctxs[i].notify_ta_work, amdgpu_dm_crtc_notify_ta_to_read); secure_display_ctxs[i].crtc = &adev->mode_info.crtcs[i]->base; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c index 22125daf9dcf..1e39d0939700 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c @@ -105,8 +105,7 @@ static void vblank_control_worker(struct work_struct *work) else if (dm->active_vblank_irq_count) dm->active_vblank_irq_count--; - dc_allow_idle_optimizations( - dm->dc, dm->active_vblank_irq_count == 0 ? true : false); + dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0); DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index ae54a9719910..704860e6ba84 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -35,6 +35,7 @@ #include "resource.h" #include "dsc.h" #include "dc_link_dp.h" +#include "dc_link.h" #include "link_hwss.h" #include "dc/dc_dmub_srv.h" @@ -3395,7 +3396,7 @@ static int trigger_hpd_mst_set(void *data, u64 val) continue; link = aconnector->dc_link; - dp_receiver_power_ctrl(link, false); + dc_link_dp_receiver_power_ctrl(link, false); drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_port->mst_mgr, false); link->mst_stream_alloc_table.stream_count = 0; memset(link->mst_stream_alloc_table.stream_allocations, 0, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c index a7fd98f57f94..8e572f07ec47 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c @@ -170,9 +170,10 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work, struct mod_hdcp_display *display = &hdcp_work[link_index].display; struct mod_hdcp_link *link = &hdcp_work[link_index].link; struct mod_hdcp_display_query query; + unsigned int conn_index = aconnector->base.index; mutex_lock(&hdcp_w->mutex); - hdcp_w->aconnector = aconnector; + hdcp_w->aconnector[conn_index] = aconnector; query.display = NULL; mod_hdcp_query_display(&hdcp_w->hdcp, aconnector->base.index, &query); @@ -204,7 +205,7 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work, msecs_to_jiffies(DRM_HDCP_CHECK_PERIOD_MS)); } else { display->adjust.disable = MOD_HDCP_DISPLAY_DISABLE_AUTHENTICATION; - hdcp_w->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; + hdcp_w->encryption_status[conn_index] = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; cancel_delayed_work(&hdcp_w->property_validate_dwork); } @@ -223,9 +224,10 @@ static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work, { struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; struct drm_connector_state *conn_state = aconnector->base.state; + unsigned int conn_index = aconnector->base.index; mutex_lock(&hdcp_w->mutex); - hdcp_w->aconnector = aconnector; + hdcp_w->aconnector[conn_index] = aconnector; /* the removal of display will invoke auth reset -> hdcp destroy and * we'd expect the Content Protection (CP) property changed back to @@ -247,13 +249,18 @@ static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work, void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index) { struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; + unsigned int conn_index; mutex_lock(&hdcp_w->mutex); mod_hdcp_reset_connection(&hdcp_w->hdcp, &hdcp_w->output); cancel_delayed_work(&hdcp_w->property_validate_dwork); - hdcp_w->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; + + for (conn_index = 0; conn_index < AMDGPU_DM_MAX_DISPLAY_INDEX; conn_index++) { + hdcp_w->encryption_status[conn_index] = + MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; + } process_output(hdcp_w); @@ -290,49 +297,80 @@ static void event_callback(struct work_struct *work) } + static void event_property_update(struct work_struct *work) { - struct hdcp_workqueue *hdcp_work = container_of(work, struct hdcp_workqueue, property_update_work); - struct amdgpu_dm_connector *aconnector = hdcp_work->aconnector; - struct drm_device *dev = hdcp_work->aconnector->base.dev; + struct amdgpu_dm_connector *aconnector = NULL; + struct drm_device *dev; long ret; + unsigned int conn_index; + struct drm_connector *connector; + struct drm_connector_state *conn_state; - drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); - mutex_lock(&hdcp_work->mutex); + for (conn_index = 0; conn_index < AMDGPU_DM_MAX_DISPLAY_INDEX; conn_index++) { + aconnector = hdcp_work->aconnector[conn_index]; + if (!aconnector) + continue; - if (aconnector->base.state && aconnector->base.state->commit) { - ret = wait_for_completion_interruptible_timeout(&aconnector->base.state->commit->hw_done, 10 * HZ); + connector = &aconnector->base; - if (ret == 0) { - DRM_ERROR("HDCP state unknown! Setting it to DESIRED"); - hdcp_work->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; - } - } + /* check if display connected */ + if (connector->status != connector_status_connected) + continue; - if (aconnector->base.state) { - if (hdcp_work->encryption_status != MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF) { - if (aconnector->base.state->hdcp_content_type == + conn_state = aconnector->base.state; + + if (!conn_state) + continue; + + dev = connector->dev; + + if (!dev) + continue; + + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + mutex_lock(&hdcp_work->mutex); + + if (conn_state->commit) { + ret = wait_for_completion_interruptible_timeout( + &conn_state->commit->hw_done, 10 * HZ); + if (ret == 0) { + DRM_ERROR( + "HDCP state unknown! Setting it to DESIRED"); + hdcp_work->encryption_status[conn_index] = + MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; + } + } + if (hdcp_work->encryption_status[conn_index] != + MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF) { + if (conn_state->hdcp_content_type == DRM_MODE_HDCP_CONTENT_TYPE0 && - hdcp_work->encryption_status <= - MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON) - drm_hdcp_update_content_protection(&aconnector->base, + hdcp_work->encryption_status[conn_index] <= + MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON) { + + DRM_DEBUG_DRIVER("[HDCP_DM] DRM_MODE_CONTENT_PROTECTION_ENABLED\n"); + drm_hdcp_update_content_protection( + connector, DRM_MODE_CONTENT_PROTECTION_ENABLED); - else if (aconnector->base.state->hdcp_content_type == + } else if (conn_state->hdcp_content_type == DRM_MODE_HDCP_CONTENT_TYPE1 && - hdcp_work->encryption_status == - MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON) - drm_hdcp_update_content_protection(&aconnector->base, + hdcp_work->encryption_status[conn_index] == + MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON) { + drm_hdcp_update_content_protection( + connector, DRM_MODE_CONTENT_PROTECTION_ENABLED); + } } else { - drm_hdcp_update_content_protection(&aconnector->base, - DRM_MODE_CONTENT_PROTECTION_DESIRED); + DRM_DEBUG_DRIVER("[HDCP_DM] DRM_MODE_CONTENT_PROTECTION_DESIRED\n"); + drm_hdcp_update_content_protection( + connector, DRM_MODE_CONTENT_PROTECTION_DESIRED); + } + mutex_unlock(&hdcp_work->mutex); + drm_modeset_unlock(&dev->mode_config.connection_mutex); } - - mutex_unlock(&hdcp_work->mutex); - drm_modeset_unlock(&dev->mode_config.connection_mutex); } static void event_property_validate(struct work_struct *work) @@ -340,19 +378,47 @@ static void event_property_validate(struct work_struct *work) struct hdcp_workqueue *hdcp_work = container_of(to_delayed_work(work), struct hdcp_workqueue, property_validate_dwork); struct mod_hdcp_display_query query; - struct amdgpu_dm_connector *aconnector = hdcp_work->aconnector; - - if (!aconnector) - return; + struct amdgpu_dm_connector *aconnector; + unsigned int conn_index; mutex_lock(&hdcp_work->mutex); - query.encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; - mod_hdcp_query_display(&hdcp_work->hdcp, aconnector->base.index, &query); + for (conn_index = 0; conn_index < AMDGPU_DM_MAX_DISPLAY_INDEX; + conn_index++) { + aconnector = hdcp_work->aconnector[conn_index]; + + if (!aconnector) + continue; + + /* check if display connected */ + if (aconnector->base.status != connector_status_connected) + continue; - if (query.encryption_status != hdcp_work->encryption_status) { - hdcp_work->encryption_status = query.encryption_status; - schedule_work(&hdcp_work->property_update_work); + if (!aconnector->base.state) + continue; + + query.encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; + mod_hdcp_query_display(&hdcp_work->hdcp, aconnector->base.index, + &query); + + DRM_DEBUG_DRIVER("[HDCP_DM] disp %d, connector->CP %u, (query, work): (%d, %d)\n", + aconnector->base.index, + aconnector->base.state->content_protection, + query.encryption_status, + hdcp_work->encryption_status[conn_index]); + + if (query.encryption_status != + hdcp_work->encryption_status[conn_index]) { + DRM_DEBUG_DRIVER("[HDCP_DM] encryption_status change from %x to %x\n", + hdcp_work->encryption_status[conn_index], query.encryption_status); + + hdcp_work->encryption_status[conn_index] = + query.encryption_status; + + DRM_DEBUG_DRIVER("[HDCP_DM] trigger property_update_work\n"); + + schedule_work(&hdcp_work->property_update_work); + } } mutex_unlock(&hdcp_work->mutex); @@ -686,6 +752,13 @@ struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct hdcp_work[i].hdcp.config.ddc.funcs.read_i2c = lp_read_i2c; hdcp_work[i].hdcp.config.ddc.funcs.write_dpcd = lp_write_dpcd; hdcp_work[i].hdcp.config.ddc.funcs.read_dpcd = lp_read_dpcd; + + memset(hdcp_work[i].aconnector, 0, + sizeof(struct amdgpu_dm_connector *) * + AMDGPU_DM_MAX_DISPLAY_INDEX); + memset(hdcp_work[i].encryption_status, 0, + sizeof(enum mod_hdcp_encryption_status) * + AMDGPU_DM_MAX_DISPLAY_INDEX); } cp_psp->funcs.update_stream_config = update_config; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h index bbbf7d0eff82..69b445b011c8 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h @@ -43,7 +43,7 @@ struct hdcp_workqueue { struct delayed_work callback_dwork; struct delayed_work watchdog_timer_dwork; struct delayed_work property_validate_dwork; - struct amdgpu_dm_connector *aconnector; + struct amdgpu_dm_connector *aconnector[AMDGPU_DM_MAX_DISPLAY_INDEX]; struct mutex mutex; struct mod_hdcp hdcp; @@ -51,8 +51,7 @@ struct hdcp_workqueue { struct mod_hdcp_display display; struct mod_hdcp_link link; - enum mod_hdcp_encryption_status encryption_status; - + enum mod_hdcp_encryption_status encryption_status[AMDGPU_DM_MAX_DISPLAY_INDEX]; /* when display is unplugged from mst hub, connctor will be * destroyed within dm_dp_mst_connector_destroy. connector * hdcp perperties, like type, undesired, desired, enabled, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 41f35d75d0a8..5fa9bab95038 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -39,12 +39,10 @@ #include "dc.h" #include "dm_helpers.h" -#include "dc_link_ddc.h" #include "dc_link_dp.h" #include "ddc_service_types.h" #include "dpcd_defs.h" -#include "i2caux_interface.h" #include "dmub_cmd.h" #if defined(CONFIG_DEBUG_FS) #include "amdgpu_dm_debugfs.h" @@ -494,7 +492,6 @@ static const struct drm_connector_helper_funcs dm_dp_mst_connector_helper_funcs static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder) { drm_encoder_cleanup(encoder); - kfree(encoder); } static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile index b9effadfc4bb..98c508313350 100644 --- a/drivers/gpu/drm/amd/display/dc/Makefile +++ b/drivers/gpu/drm/amd/display/dc/Makefile @@ -64,9 +64,9 @@ AMD_DC = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/dc/,$(DC_LI include $(AMD_DC) -DISPLAY_CORE = dc.o dc_stat.o dc_link.o dc_resource.o dc_hw_sequencer.o dc_sink.o \ -dc_surface.o dc_link_dp.o dc_link_ddc.o dc_debug.o dc_stream.o \ -dc_link_enc_cfg.o dc_link_dpia.o dc_link_dpcd.o +DISPLAY_CORE = dc.o dc_stat.o dc_link.o dc_resource.o dc_hw_sequencer.o dc_sink.o \ +dc_surface.o dc_link_dp.o dc_debug.o dc_stream.o \ +dc_link_enc_cfg.o DISPLAY_CORE += dc_vm_helper.o diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c index a1a00f432168..27af9d3c2b73 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c @@ -33,7 +33,6 @@ #include "include/gpio_service_interface.h" #include "include/grph_object_ctrl_defs.h" #include "include/bios_parser_interface.h" -#include "include/i2caux_interface.h" #include "include/logger_interface.h" #include "command_table.h" diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index 074e70a5c458..e381de2429fa 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c @@ -32,7 +32,6 @@ #include "dc_bios_types.h" #include "include/grph_object_ctrl_defs.h" #include "include/bios_parser_interface.h" -#include "include/i2caux_interface.h" #include "include/logger_interface.h" #include "command_table2.h" @@ -1698,14 +1697,15 @@ static enum bp_result bios_parser_enable_disp_power_gating( static enum bp_result bios_parser_enable_lvtma_control( struct dc_bios *dcb, uint8_t uc_pwr_on, - uint8_t panel_instance) + uint8_t panel_instance, + uint8_t bypass_panel_control_wait) { struct bios_parser *bp = BP_FROM_DCB(dcb); if (!bp->cmd_tbl.enable_lvtma_control) return BP_RESULT_FAILURE; - return bp->cmd_tbl.enable_lvtma_control(bp, uc_pwr_on, panel_instance); + return bp->cmd_tbl.enable_lvtma_control(bp, uc_pwr_on, panel_instance, bypass_panel_control_wait); } static bool bios_parser_is_accelerated_mode( @@ -2929,7 +2929,6 @@ static enum bp_result construct_integrated_info( struct atom_common_table_header *header; struct atom_data_revision revision; - struct clock_voltage_caps temp = {0, 0}; uint32_t i; uint32_t j; @@ -3032,14 +3031,8 @@ static enum bp_result construct_integrated_info( for (i = 1; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) { for (j = i; j > 0; --j) { if (info->disp_clk_voltage[j].max_supported_clk < - info->disp_clk_voltage[j-1].max_supported_clk - ) { - /* swap j and j - 1*/ - temp = info->disp_clk_voltage[j-1]; - info->disp_clk_voltage[j-1] = - info->disp_clk_voltage[j]; - info->disp_clk_voltage[j] = temp; - } + info->disp_clk_voltage[j-1].max_supported_clk) + swap(info->disp_clk_voltage[j-1], info->disp_clk_voltage[j]); } } diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c index f52f7ff7ead4..1ef9e4053bb7 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c @@ -986,7 +986,8 @@ static unsigned int get_smu_clock_info_v3_1(struct bios_parser *bp, uint8_t id) static enum bp_result enable_lvtma_control( struct bios_parser *bp, uint8_t uc_pwr_on, - uint8_t panel_instance); + uint8_t panel_instance, + uint8_t bypass_panel_control_wait); static void init_enable_lvtma_control(struct bios_parser *bp) { @@ -998,7 +999,8 @@ static void init_enable_lvtma_control(struct bios_parser *bp) static void enable_lvtma_control_dmcub( struct dc_dmub_srv *dmcub, uint8_t uc_pwr_on, - uint8_t panel_instance) + uint8_t panel_instance, + uint8_t bypass_panel_control_wait) { union dmub_rb_cmd cmd; @@ -1012,6 +1014,8 @@ static void enable_lvtma_control_dmcub( uc_pwr_on; cmd.lvtma_control.data.panel_inst = panel_instance; + cmd.lvtma_control.data.bypass_panel_control_wait = + bypass_panel_control_wait; dc_dmub_srv_cmd_queue(dmcub, &cmd); dc_dmub_srv_cmd_execute(dmcub); dc_dmub_srv_wait_idle(dmcub); @@ -1021,7 +1025,8 @@ static void enable_lvtma_control_dmcub( static enum bp_result enable_lvtma_control( struct bios_parser *bp, uint8_t uc_pwr_on, - uint8_t panel_instance) + uint8_t panel_instance, + uint8_t bypass_panel_control_wait) { enum bp_result result = BP_RESULT_FAILURE; @@ -1029,7 +1034,8 @@ static enum bp_result enable_lvtma_control( bp->base.ctx->dc->debug.dmub_command_table) { enable_lvtma_control_dmcub(bp->base.ctx->dmub_srv, uc_pwr_on, - panel_instance); + panel_instance, + bypass_panel_control_wait); return BP_RESULT_OK; } return result; diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.h b/drivers/gpu/drm/amd/display/dc/bios/command_table2.h index be060b4b87db..b6d09bf6cf72 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.h +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.h @@ -96,7 +96,8 @@ struct cmd_tbl { struct bios_parser *bp, uint8_t id); enum bp_result (*enable_lvtma_control)(struct bios_parser *bp, uint8_t uc_pwr_on, - uint8_t panel_instance); + uint8_t panel_instance, + uint8_t bypass_panel_control_wait); }; void dal_firmware_parser_init_cmd_tbl(struct bios_parser *bp); diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c index f47cfe6b42bd..0765334f0825 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c @@ -146,6 +146,9 @@ static int dcn314_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, if (msg_id == VBIOSSMC_MSG_TransferTableDram2Smu && param == TABLE_WATERMARKS) DC_LOG_WARNING("Watermarks table not configured properly by SMU"); + else if (msg_id == VBIOSSMC_MSG_SetHardMinDcfclkByFreq || + msg_id == VBIOSSMC_MSG_SetMinDeepSleepDcfclk) + DC_LOG_WARNING("DCFCLK_DPM is not enabled by BIOS"); else ASSERT(0); REG_WRITE(MP1_SMN_C2PMSG_91, VBIOSSMC_Result_OK); diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c index ba9814f88f48..352c977d1495 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c @@ -34,6 +34,7 @@ #include "core_types.h" #include "dm_helpers.h" #include "dc_link_dp.h" +#include "link.h" #include "atomfirmware.h" #include "smu13_driver_if.h" @@ -255,6 +256,94 @@ static void dcn32_update_dppclk_dispclk_freq(struct clk_mgr_internal *clk_mgr, s } } +static void dcn32_update_clocks_update_dentist( + struct clk_mgr_internal *clk_mgr, + struct dc_state *context, + uint32_t old_dispclk_khz) +{ + uint32_t new_disp_divider = 0; + uint32_t old_disp_divider = 0; + uint32_t new_dispclk_wdivider = 0; + uint32_t old_dispclk_wdivider = 0; + uint32_t i; + + if (old_dispclk_khz == 0 || clk_mgr->base.clks.dispclk_khz == 0) + return; + + new_disp_divider = DENTIST_DIVIDER_RANGE_SCALE_FACTOR + * clk_mgr->base.dentist_vco_freq_khz / clk_mgr->base.clks.dispclk_khz; + old_disp_divider = DENTIST_DIVIDER_RANGE_SCALE_FACTOR + * clk_mgr->base.dentist_vco_freq_khz / old_dispclk_khz; + + new_dispclk_wdivider = dentist_get_did_from_divider(new_disp_divider); + old_dispclk_wdivider = dentist_get_did_from_divider(old_disp_divider); + + /* When changing divider to or from 127, some extra programming is required to prevent corruption */ + if (old_dispclk_wdivider == 127 && new_dispclk_wdivider != 127) { + for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; + uint32_t fifo_level; + struct dccg *dccg = clk_mgr->base.ctx->dc->res_pool->dccg; + struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc; + int32_t N; + int32_t j; + + if (!pipe_ctx->stream) + continue; + /* Virtual encoders don't have this function */ + if (!stream_enc->funcs->get_fifo_cal_average_level) + continue; + fifo_level = stream_enc->funcs->get_fifo_cal_average_level( + stream_enc); + N = fifo_level / 4; + dccg->funcs->set_fifo_errdet_ovr_en( + dccg, + true); + for (j = 0; j < N - 4; j++) + dccg->funcs->otg_drop_pixel( + dccg, + pipe_ctx->stream_res.tg->inst); + dccg->funcs->set_fifo_errdet_ovr_en( + dccg, + false); + } + } else if (new_dispclk_wdivider == 127 && old_dispclk_wdivider != 127) { + /* request clock with 126 divider first */ + uint32_t temp_disp_divider = dentist_get_divider_from_did(126); + uint32_t temp_dispclk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR * clk_mgr->base.dentist_vco_freq_khz) / temp_disp_divider; + + if (clk_mgr->smu_present) + dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DISPCLK, khz_to_mhz_ceil(temp_dispclk_khz)); + + for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; + struct dccg *dccg = clk_mgr->base.ctx->dc->res_pool->dccg; + struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc; + uint32_t fifo_level; + int32_t N; + int32_t j; + + if (!pipe_ctx->stream) + continue; + /* Virtual encoders don't have this function */ + if (!stream_enc->funcs->get_fifo_cal_average_level) + continue; + fifo_level = stream_enc->funcs->get_fifo_cal_average_level( + stream_enc); + N = fifo_level / 4; + dccg->funcs->set_fifo_errdet_ovr_en(dccg, true); + for (j = 0; j < 12 - N; j++) + dccg->funcs->otg_add_pixel(dccg, + pipe_ctx->stream_res.tg->inst); + dccg->funcs->set_fifo_errdet_ovr_en(dccg, false); + } + } + + /* do requested DISPCLK updates*/ + if (clk_mgr->smu_present) + dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DISPCLK, khz_to_mhz_ceil(clk_mgr->base.clks.dispclk_khz)); +} + static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool safe_to_lower) @@ -273,6 +362,7 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base, bool p_state_change_support; bool fclk_p_state_change_support; int total_plane_count; + int old_dispclk_khz = clk_mgr_base->clks.dispclk_khz; if (dc->work_arounds.skip_clock_update) return; @@ -396,9 +486,6 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base, if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) { clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz; - if (clk_mgr->smu_present) - dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DISPCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dispclk_khz)); - update_dispclk = true; } @@ -418,13 +505,13 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base, if (dpp_clock_lowered) { /* if clock is being lowered, increase DTO before lowering refclk */ dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower); - dcn20_update_clocks_update_dentist(clk_mgr, context); + dcn32_update_clocks_update_dentist(clk_mgr, context, old_dispclk_khz); if (clk_mgr->smu_present) dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DPPCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dppclk_khz)); } else { /* if clock is being raised, increase refclk before lowering DTO */ if (update_dppclk || update_dispclk) - dcn20_update_clocks_update_dentist(clk_mgr, context); + dcn32_update_clocks_update_dentist(clk_mgr, context, old_dispclk_khz); /* There is a check inside dcn20_update_clocks_update_dpp_dto which ensures * that we do not lower dto when it is not safe to lower. We do not need to * compare the current and new dppclk before calling this function. diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 2c18c8527079..53e586fc1501 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -33,6 +33,7 @@ #include "resource.h" +#include "gpio_service_interface.h" #include "clk_mgr.h" #include "clock_source.h" #include "dc_bios_types.h" @@ -53,7 +54,7 @@ #include "link_enc_cfg.h" #include "dc_link.h" -#include "dc_link_ddc.h" +#include "link.h" #include "dm_helpers.h" #include "mem_input.h" @@ -68,8 +69,6 @@ #include "dmub/dmub_srv.h" -#include "i2caux_interface.h" - #include "dce/dmub_psr.h" #include "dce/dmub_hw_lock_mgr.h" @@ -871,6 +870,7 @@ static bool dc_construct_ctx(struct dc *dc, dc_ctx->perf_trace = dc_perf_trace_create(); if (!dc_ctx->perf_trace) { + kfree(dc_ctx); ASSERT_CRITICAL(false); return false; } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c index 471078fc3900..652270a0b498 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c @@ -90,8 +90,8 @@ static const struct out_csc_color_matrix_type output_csc_matrix[] = { { 0xE00, 0xF349, 0xFEB7, 0x1000, 0x6CE, 0x16E3, 0x24F, 0x200, 0xFCCB, 0xF535, 0xE00, 0x1000} }, { COLOR_SPACE_YCBCR2020_TYPE, - { 0x1000, 0xF149, 0xFEB7, 0x0000, 0x0868, 0x15B2, - 0x01E6, 0x0000, 0xFB88, 0xF478, 0x1000, 0x0000} }, + { 0x1000, 0xF149, 0xFEB7, 0x1004, 0x0868, 0x15B2, + 0x01E6, 0x201, 0xFB88, 0xF478, 0x1000, 0x1004} }, { COLOR_SPACE_YCBCR709_BLACK_TYPE, { 0x0000, 0x0000, 0x0000, 0x1000, 0x0000, 0x0000, 0x0000, 0x0200, 0x0000, 0x0000, 0x0000, 0x1000} }, diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index ee20b4d3afd4..d9e490eca10f 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -33,9 +33,10 @@ #include "gpio_service_interface.h" #include "core_status.h" #include "dc_link_dp.h" -#include "dc_link_dpia.h" -#include "dc_link_ddc.h" +#include "link/link_dp_dpia.h" +#include "link/link_ddc.h" #include "link_hwss.h" +#include "link.h" #include "opp.h" #include "link_encoder.h" @@ -50,8 +51,12 @@ #include "dmub/dmub_srv.h" #include "inc/hw/panel_cntl.h" #include "inc/link_enc_cfg.h" -#include "inc/link_dpcd.h" +#include "link/link_dpcd.h" #include "link/link_dp_trace.h" +#include "link/link_hpd.h" +#include "link/link_dp_training.h" +#include "link/link_dp_phy.h" +#include "link/link_dp_capability.h" #include "dc/dcn30/dcn30_vpg.h" @@ -78,7 +83,7 @@ static void dc_link_destruct(struct dc_link *link) } if (link->ddc) - dal_ddc_service_destroy(&link->ddc); + link_destroy_ddc_service(&link->ddc); if (link->panel_cntl) link->panel_cntl->funcs->destroy(&link->panel_cntl); @@ -102,108 +107,6 @@ static void dc_link_destruct(struct dc_link *link) dc_sink_release(link->remote_sinks[i]); } -struct gpio *get_hpd_gpio(struct dc_bios *dcb, - struct graphics_object_id link_id, - struct gpio_service *gpio_service) -{ - enum bp_result bp_result; - struct graphics_object_hpd_info hpd_info; - struct gpio_pin_info pin_info; - - if (dcb->funcs->get_hpd_info(dcb, link_id, &hpd_info) != BP_RESULT_OK) - return NULL; - - bp_result = dcb->funcs->get_gpio_pin_info(dcb, - hpd_info.hpd_int_gpio_uid, &pin_info); - - if (bp_result != BP_RESULT_OK) { - ASSERT(bp_result == BP_RESULT_NORECORD); - return NULL; - } - - return dal_gpio_service_create_irq(gpio_service, - pin_info.offset, - pin_info.mask); -} - -/* - * Function: program_hpd_filter - * - * @brief - * Programs HPD filter on associated HPD line - * - * @param [in] delay_on_connect_in_ms: Connect filter timeout - * @param [in] delay_on_disconnect_in_ms: Disconnect filter timeout - * - * @return - * true on success, false otherwise - */ -static bool program_hpd_filter(const struct dc_link *link) -{ - bool result = false; - struct gpio *hpd; - int delay_on_connect_in_ms = 0; - int delay_on_disconnect_in_ms = 0; - - if (link->is_hpd_filter_disabled) - return false; - /* Verify feature is supported */ - switch (link->connector_signal) { - case SIGNAL_TYPE_DVI_SINGLE_LINK: - case SIGNAL_TYPE_DVI_DUAL_LINK: - case SIGNAL_TYPE_HDMI_TYPE_A: - /* Program hpd filter */ - delay_on_connect_in_ms = 500; - delay_on_disconnect_in_ms = 100; - break; - case SIGNAL_TYPE_DISPLAY_PORT: - case SIGNAL_TYPE_DISPLAY_PORT_MST: - /* Program hpd filter to allow DP signal to settle */ - /* 500: not able to detect MST <-> SST switch as HPD is low for - * only 100ms on DELL U2413 - * 0: some passive dongle still show aux mode instead of i2c - * 20-50: not enough to hide bouncing HPD with passive dongle. - * also see intermittent i2c read issues. - */ - delay_on_connect_in_ms = 80; - delay_on_disconnect_in_ms = 0; - break; - case SIGNAL_TYPE_LVDS: - case SIGNAL_TYPE_EDP: - default: - /* Don't program hpd filter */ - return false; - } - - /* Obtain HPD handle */ - hpd = get_hpd_gpio(link->ctx->dc_bios, link->link_id, - link->ctx->gpio_service); - - if (!hpd) - return result; - - /* Setup HPD filtering */ - if (dal_gpio_open(hpd, GPIO_MODE_INTERRUPT) == GPIO_RESULT_OK) { - struct gpio_hpd_config config; - - config.delay_on_connect = delay_on_connect_in_ms; - config.delay_on_disconnect = delay_on_disconnect_in_ms; - - dal_irq_setup_hpd_filter(hpd, &config); - - dal_gpio_close(hpd); - - result = true; - } else { - ASSERT_CRITICAL(false); - } - - /* Release HPD handle */ - dal_gpio_destroy_irq(&hpd); - - return result; -} - bool dc_link_wait_for_t12(struct dc_link *link) { if (link->connector_signal == SIGNAL_TYPE_EDP && link->dc->hwss.edp_wait_for_T12) { @@ -226,7 +129,6 @@ bool dc_link_wait_for_t12(struct dc_link *link) bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type) { uint32_t is_hpd_high = 0; - struct gpio *hpd_pin; if (link->connector_signal == SIGNAL_TYPE_LVDS) { *type = dc_connection_single; @@ -250,17 +152,9 @@ bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type) return true; } - /* todo: may need to lock gpio access */ - hpd_pin = get_hpd_gpio(link->ctx->dc_bios, link->link_id, - link->ctx->gpio_service); - if (!hpd_pin) + if (!query_hpd_status(link, &is_hpd_high)) goto hpd_gpio_failure; - dal_gpio_open(hpd_pin, GPIO_MODE_INTERRUPT); - dal_gpio_get_value(hpd_pin, &is_hpd_high); - dal_gpio_close(hpd_pin); - dal_gpio_destroy_irq(&hpd_pin); - if (is_hpd_high) { *type = dc_connection_single; /* TODO: need to do the actual detection */ @@ -386,7 +280,7 @@ bool dc_link_is_dp_sink_present(struct dc_link *link) (connector_id == CONNECTOR_ID_EDP) || (connector_id == CONNECTOR_ID_USBC)); - ddc = dal_ddc_service_get_ddc_pin(link->ddc); + ddc = get_ddc_pin(link->ddc); if (!ddc) { BREAK_TO_DEBUGGER(); @@ -531,11 +425,179 @@ static enum signal_type decide_signal_from_strap_and_dongle_type(enum display_do return signal; } +static bool i2c_read( + struct ddc_service *ddc, + uint32_t address, + uint8_t *buffer, + uint32_t len) +{ + uint8_t offs_data = 0; + struct i2c_payload payloads[2] = { + { + .write = true, + .address = address, + .length = 1, + .data = &offs_data }, + { + .write = false, + .address = address, + .length = len, + .data = buffer } }; + + struct i2c_command command = { + .payloads = payloads, + .number_of_payloads = 2, + .engine = DDC_I2C_COMMAND_ENGINE, + .speed = ddc->ctx->dc->caps.i2c_speed_in_khz }; + + return dm_helpers_submit_i2c( + ddc->ctx, + ddc->link, + &command); +} + +enum { + DP_SINK_CAP_SIZE = + DP_EDP_CONFIGURATION_CAP - DP_DPCD_REV + 1 +}; + +static void query_dp_dual_mode_adaptor( + struct ddc_service *ddc, + struct display_sink_capability *sink_cap) +{ + uint8_t i; + bool is_valid_hdmi_signature; + enum display_dongle_type *dongle = &sink_cap->dongle_type; + uint8_t type2_dongle_buf[DP_ADAPTOR_TYPE2_SIZE]; + bool is_type2_dongle = false; + int retry_count = 2; + struct dp_hdmi_dongle_signature_data *dongle_signature; + + /* Assume we have no valid DP passive dongle connected */ + *dongle = DISPLAY_DONGLE_NONE; + sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_HDMI_SAFE_MAX_TMDS_CLK; + + /* Read DP-HDMI dongle I2c (no response interpreted as DP-DVI dongle)*/ + if (!i2c_read( + ddc, + DP_HDMI_DONGLE_ADDRESS, + type2_dongle_buf, + sizeof(type2_dongle_buf))) { + /* Passive HDMI dongles can sometimes fail here without retrying*/ + while (retry_count > 0) { + if (i2c_read(ddc, + DP_HDMI_DONGLE_ADDRESS, + type2_dongle_buf, + sizeof(type2_dongle_buf))) + break; + retry_count--; + } + if (retry_count == 0) { + *dongle = DISPLAY_DONGLE_DP_DVI_DONGLE; + sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_DVI_MAX_TMDS_CLK; + + CONN_DATA_DETECT(ddc->link, type2_dongle_buf, sizeof(type2_dongle_buf), + "DP-DVI passive dongle %dMhz: ", + DP_ADAPTOR_DVI_MAX_TMDS_CLK / 1000); + return; + } + } + + /* Check if Type 2 dongle.*/ + if (type2_dongle_buf[DP_ADAPTOR_TYPE2_REG_ID] == DP_ADAPTOR_TYPE2_ID) + is_type2_dongle = true; + + dongle_signature = + (struct dp_hdmi_dongle_signature_data *)type2_dongle_buf; + + is_valid_hdmi_signature = true; + + /* Check EOT */ + if (dongle_signature->eot != DP_HDMI_DONGLE_SIGNATURE_EOT) { + is_valid_hdmi_signature = false; + } + + /* Check signature */ + for (i = 0; i < sizeof(dongle_signature->id); ++i) { + /* If its not the right signature, + * skip mismatch in subversion byte.*/ + if (dongle_signature->id[i] != + dp_hdmi_dongle_signature_str[i] && i != 3) { + + if (is_type2_dongle) { + is_valid_hdmi_signature = false; + break; + } + + } + } + + if (is_type2_dongle) { + uint32_t max_tmds_clk = + type2_dongle_buf[DP_ADAPTOR_TYPE2_REG_MAX_TMDS_CLK]; + + max_tmds_clk = max_tmds_clk * 2 + max_tmds_clk / 2; + + if (0 == max_tmds_clk || + max_tmds_clk < DP_ADAPTOR_TYPE2_MIN_TMDS_CLK || + max_tmds_clk > DP_ADAPTOR_TYPE2_MAX_TMDS_CLK) { + *dongle = DISPLAY_DONGLE_DP_DVI_DONGLE; + + CONN_DATA_DETECT(ddc->link, type2_dongle_buf, + sizeof(type2_dongle_buf), + "DP-DVI passive dongle %dMhz: ", + DP_ADAPTOR_DVI_MAX_TMDS_CLK / 1000); + } else { + if (is_valid_hdmi_signature == true) { + *dongle = DISPLAY_DONGLE_DP_HDMI_DONGLE; + + CONN_DATA_DETECT(ddc->link, type2_dongle_buf, + sizeof(type2_dongle_buf), + "Type 2 DP-HDMI passive dongle %dMhz: ", + max_tmds_clk); + } else { + *dongle = DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE; + + CONN_DATA_DETECT(ddc->link, type2_dongle_buf, + sizeof(type2_dongle_buf), + "Type 2 DP-HDMI passive dongle (no signature) %dMhz: ", + max_tmds_clk); + + } + + /* Multiply by 1000 to convert to kHz. */ + sink_cap->max_hdmi_pixel_clock = + max_tmds_clk * 1000; + } + sink_cap->is_dongle_type_one = false; + + } else { + if (is_valid_hdmi_signature == true) { + *dongle = DISPLAY_DONGLE_DP_HDMI_DONGLE; + + CONN_DATA_DETECT(ddc->link, type2_dongle_buf, + sizeof(type2_dongle_buf), + "Type 1 DP-HDMI passive dongle %dMhz: ", + sink_cap->max_hdmi_pixel_clock / 1000); + } else { + *dongle = DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE; + + CONN_DATA_DETECT(ddc->link, type2_dongle_buf, + sizeof(type2_dongle_buf), + "Type 1 DP-HDMI passive dongle (no signature) %dMhz: ", + sink_cap->max_hdmi_pixel_clock / 1000); + } + sink_cap->is_dongle_type_one = true; + } + + return; +} + static enum signal_type dp_passive_dongle_detection(struct ddc_service *ddc, struct display_sink_capability *sink_cap, struct audio_support *audio_support) { - dal_ddc_service_i2c_query_dp_dual_mode_adaptor(ddc, sink_cap); + query_dp_dual_mode_adaptor(ddc, sink_cap); return decide_signal_from_strap_and_dongle_type(sink_cap->dongle_type, audio_support); @@ -775,7 +837,7 @@ static bool wait_for_entering_dp_alt_mode(struct dc_link *link) return true; is_in_alt_mode = link->link_enc->funcs->is_in_alt_mode(link->link_enc); - DC_LOG_WARNING("DP Alt mode state on HPD: %d\n", is_in_alt_mode); + DC_LOG_DC("DP Alt mode state on HPD: %d\n", is_in_alt_mode); if (is_in_alt_mode) return true; @@ -971,7 +1033,7 @@ static bool should_verify_link_capability_destructively(struct dc_link *link, dc_is_embedded_signal(link->local_sink->sink_signal) || link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) { destrictive = false; - } else if (dp_get_link_encoding_format(&max_link_cap) == + } else if (link_dp_get_encoding_format(&max_link_cap) == DP_8b_10b_ENCODING) { if (link->dpcd_caps.is_mst_capable || is_link_enc_unavailable) { @@ -1155,11 +1217,11 @@ static bool detect_link_and_local_sink(struct dc_link *link, else link->dpcd_sink_count = 1; - dal_ddc_service_set_transaction_type(link->ddc, + set_ddc_transaction_type(link->ddc, sink_caps.transaction_type); link->aux_mode = - dal_ddc_service_is_in_aux_transaction_mode(link->ddc); + link_is_in_aux_transaction_mode(link->ddc); sink_init_data.link = link; sink_init_data.sink_signal = sink_caps.signal; @@ -1367,58 +1429,6 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) return is_local_sink_detect_success && !is_delegated_to_mst_top_mgr; } -bool dc_link_get_hpd_state(struct dc_link *dc_link) -{ - uint32_t state; - - dal_gpio_lock_pin(dc_link->hpd_gpio); - dal_gpio_get_value(dc_link->hpd_gpio, &state); - dal_gpio_unlock_pin(dc_link->hpd_gpio); - - return state; -} - -static enum hpd_source_id get_hpd_line(struct dc_link *link) -{ - struct gpio *hpd; - enum hpd_source_id hpd_id; - - hpd_id = HPD_SOURCEID_UNKNOWN; - - hpd = get_hpd_gpio(link->ctx->dc_bios, link->link_id, - link->ctx->gpio_service); - - if (hpd) { - switch (dal_irq_get_source(hpd)) { - case DC_IRQ_SOURCE_HPD1: - hpd_id = HPD_SOURCEID1; - break; - case DC_IRQ_SOURCE_HPD2: - hpd_id = HPD_SOURCEID2; - break; - case DC_IRQ_SOURCE_HPD3: - hpd_id = HPD_SOURCEID3; - break; - case DC_IRQ_SOURCE_HPD4: - hpd_id = HPD_SOURCEID4; - break; - case DC_IRQ_SOURCE_HPD5: - hpd_id = HPD_SOURCEID5; - break; - case DC_IRQ_SOURCE_HPD6: - hpd_id = HPD_SOURCEID6; - break; - default: - BREAK_TO_DEBUGGER(); - break; - } - - dal_gpio_destroy_irq(&hpd); - } - - return hpd_id; -} - static enum channel_id get_ddc_line(struct dc_link *link) { struct ddc *ddc; @@ -1426,7 +1436,7 @@ static enum channel_id get_ddc_line(struct dc_link *link) channel = CHANNEL_ID_UNKNOWN; - ddc = dal_ddc_service_get_ddc_pin(link->ddc); + ddc = get_ddc_pin(link->ddc); if (ddc) { switch (dal_ddc_get_line(ddc)) { @@ -1583,7 +1593,7 @@ static bool dc_link_construct_legacy(struct dc_link *link, if (link->dc->res_pool->funcs->link_init) link->dc->res_pool->funcs->link_init(link); - link->hpd_gpio = get_hpd_gpio(link->ctx->dc_bios, link->link_id, + link->hpd_gpio = link_get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service); if (link->hpd_gpio) { @@ -1663,7 +1673,7 @@ static bool dc_link_construct_legacy(struct dc_link *link, ddc_service_init_data.ctx = link->ctx; ddc_service_init_data.id = link->link_id; ddc_service_init_data.link = link; - link->ddc = dal_ddc_service_create(&ddc_service_init_data); + link->ddc = link_create_ddc_service(&ddc_service_init_data); if (!link->ddc) { DC_ERROR("Failed to create ddc_service!\n"); @@ -1676,7 +1686,7 @@ static bool dc_link_construct_legacy(struct dc_link *link, } link->ddc_hw_inst = - dal_ddc_get_line(dal_ddc_service_get_ddc_pin(link->ddc)); + dal_ddc_get_line(get_ddc_pin(link->ddc)); if (link->dc->res_pool->funcs->panel_cntl_create && @@ -1813,7 +1823,7 @@ link_enc_create_fail: if (link->panel_cntl != NULL) link->panel_cntl->funcs->destroy(&link->panel_cntl); panel_cntl_create_fail: - dal_ddc_service_destroy(&link->ddc); + link_destroy_ddc_service(&link->ddc); ddc_create_fail: create_fail: @@ -1871,7 +1881,7 @@ static bool dc_link_construct_dpia(struct dc_link *link, /* Set indicator for dpia link so that ddc won't be created */ ddc_service_init_data.is_dpia_link = true; - link->ddc = dal_ddc_service_create(&ddc_service_init_data); + link->ddc = link_create_ddc_service(&ddc_service_init_data); if (!link->ddc) { DC_ERROR("Failed to create ddc_service!\n"); goto ddc_create_fail; @@ -1996,7 +2006,7 @@ static enum dc_status enable_link_dp(struct dc_state *state, * Temporary w/a to get DP2.0 link rates to work with SST. * TODO DP2.0 - Workaround: Remove w/a if and when the issue is resolved. */ - if (dp_get_link_encoding_format(link_settings) == DP_128b_132b_ENCODING && + if (link_dp_get_encoding_format(link_settings) == DP_128b_132b_ENCODING && pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT && link->dc->debug.set_mst_en_for_sst) { dp_enable_mst_on_sink(link, true); @@ -2009,7 +2019,7 @@ static enum dc_status enable_link_dp(struct dc_state *state, link->dc->hwss.edp_wait_for_hpd_ready(link, true); } - if (dp_get_link_encoding_format(link_settings) == DP_128b_132b_ENCODING) { + if (link_dp_get_encoding_format(link_settings) == DP_128b_132b_ENCODING) { /* TODO - DP2.0 HW: calculate 32 symbol clock for HPO encoder */ } else { pipe_ctx->stream_res.pix_clk_params.requested_sym_clk = @@ -2050,7 +2060,7 @@ static enum dc_status enable_link_dp(struct dc_state *state, else fec_enable = true; - if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING) + if (link_dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING) dp_set_fec_enable(link, fec_enable); // during mode set we do DP_SET_POWER off then on, aux writes are lost @@ -2166,7 +2176,7 @@ void dc_link_blank_dp_stream(struct dc_link *link, bool hw_init) } if ((!link->wa_flags.dp_keep_receiver_powered) || hw_init) - dp_receiver_power_ctrl(link, false); + dc_link_dp_receiver_power_ctrl(link, false); } } @@ -2339,7 +2349,7 @@ static void write_i2c_retimer_setting( value = settings->reg_settings[i].i2c_reg_val; else { i2c_success = - dal_ddc_service_query_ddc_data( + link_query_ddc_data( pipe_ctx->stream->link->ddc, slave_address, &offset, 1, &value, 1); if (!i2c_success) @@ -2389,7 +2399,7 @@ static void write_i2c_retimer_setting( value = settings->reg_settings_6g[i].i2c_reg_val; else { i2c_success = - dal_ddc_service_query_ddc_data( + link_query_ddc_data( pipe_ctx->stream->link->ddc, slave_address, &offset, 1, &value, 1); if (!i2c_success) @@ -2631,7 +2641,7 @@ static void disable_link(struct dc_link *link, const struct link_resource *link_ if (dc_is_dp_sst_signal(signal) || link->mst_stream_alloc_table.stream_count == 0) { - if (dp_get_link_encoding_format(&link_settings) == DP_8b_10b_ENCODING) { + if (link_dp_get_encoding_format(&link_settings) == DP_8b_10b_ENCODING) { dp_set_fec_enable(link, false); dp_set_fec_ready(link, link_res, false); } @@ -2687,7 +2697,7 @@ static void enable_link_hdmi(struct pipe_ctx *pipe_ctx) } if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) - dal_ddc_service_write_scdc_data( + write_scdc_data( stream->link->ddc, stream->phy_pix_clk, stream->timing.flags.LTE_340MCSC_SCRAMBLE); @@ -2708,7 +2718,7 @@ static void enable_link_hdmi(struct pipe_ctx *pipe_ctx) stream->phy_pix_clk); if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) - dal_ddc_service_read_scdc_data(link->ddc); + read_scdc_data(link->ddc); } static void enable_link_lvds(struct pipe_ctx *pipe_ctx) @@ -3679,7 +3689,7 @@ static enum dc_status dc_link_update_sst_payload(struct pipe_ctx *pipe_ctx, } /* slot X.Y for SST payload allocate */ - if (allocate && dp_get_link_encoding_format(&link->cur_link_settings) == + if (allocate && link_dp_get_encoding_format(&link->cur_link_settings) == DP_128b_132b_ENCODING) { avg_time_slots_per_mtp = calculate_sst_avg_time_slots_per_mtp(stream, link); @@ -3762,7 +3772,7 @@ enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx) /* program DP source TX for payload */ if (link_hwss->ext.update_stream_allocation_table == NULL || - dp_get_link_encoding_format(&link->cur_link_settings) == DP_UNKNOWN_ENCODING) { + link_dp_get_encoding_format(&link->cur_link_settings) == DP_UNKNOWN_ENCODING) { DC_LOG_ERROR("Failure: unknown encoding format\n"); return DC_ERROR_UNEXPECTED; } @@ -3878,7 +3888,7 @@ enum dc_status dc_link_reduce_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t bw /* update mst stream allocation table hardware state */ if (link_hwss->ext.update_stream_allocation_table == NULL || - dp_get_link_encoding_format(&link->cur_link_settings) == DP_UNKNOWN_ENCODING) { + link_dp_get_encoding_format(&link->cur_link_settings) == DP_UNKNOWN_ENCODING) { DC_LOG_ERROR("Failure: unknown encoding format\n"); return DC_ERROR_UNEXPECTED; } @@ -3945,7 +3955,7 @@ enum dc_status dc_link_increase_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t /* update mst stream allocation table hardware state */ if (link_hwss->ext.update_stream_allocation_table == NULL || - dp_get_link_encoding_format(&link->cur_link_settings) == DP_UNKNOWN_ENCODING) { + link_dp_get_encoding_format(&link->cur_link_settings) == DP_UNKNOWN_ENCODING) { DC_LOG_ERROR("Failure: unknown encoding format\n"); return DC_ERROR_UNEXPECTED; } @@ -4058,7 +4068,7 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx) /* update mst stream allocation table hardware state */ if (link_hwss->ext.update_stream_allocation_table == NULL || - dp_get_link_encoding_format(&link->cur_link_settings) == DP_UNKNOWN_ENCODING) { + link_dp_get_encoding_format(&link->cur_link_settings) == DP_UNKNOWN_ENCODING) { DC_LOG_DEBUG("Unknown encoding format\n"); return DC_ERROR_UNEXPECTED; } @@ -4106,7 +4116,7 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off) /* stream encoder index */ config.stream_enc_idx = pipe_ctx->stream_res.stream_enc->id - ENGINE_ID_DIGA; - if (is_dp_128b_132b_signal(pipe_ctx)) + if (link_is_dp_128b_132b_signal(pipe_ctx)) config.stream_enc_idx = pipe_ctx->stream_res.hpo_dp_stream_enc->id - ENGINE_ID_HPO_DP_0; @@ -4115,7 +4125,7 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off) /* link encoder index */ config.link_enc_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A; - if (is_dp_128b_132b_signal(pipe_ctx)) + if (link_is_dp_128b_132b_signal(pipe_ctx)) config.link_enc_idx = pipe_ctx->link_res.hpo_dp_link_enc->inst; /* dio output index is dpia index for DPIA endpoint & dcio index by default */ @@ -4136,7 +4146,7 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off) config.assr_enabled = (panel_mode == DP_PANEL_MODE_EDP) ? 1 : 0; config.mst_enabled = (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) ? 1 : 0; - config.dp2_enabled = is_dp_128b_132b_signal(pipe_ctx) ? 1 : 0; + config.dp2_enabled = link_is_dp_128b_132b_signal(pipe_ctx) ? 1 : 0; config.usb4_enabled = (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) ? 1 : 0; config.dpms_off = dpms_off; @@ -4239,7 +4249,7 @@ void core_link_enable_stream( struct vpg *vpg = pipe_ctx->stream_res.stream_enc->vpg; const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); - if (is_dp_128b_132b_signal(pipe_ctx)) + if (link_is_dp_128b_132b_signal(pipe_ctx)) vpg = pipe_ctx->stream_res.hpo_dp_stream_enc->vpg; DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); @@ -4261,7 +4271,7 @@ void core_link_enable_stream( ASSERT(link_enc); if (!dc_is_virtual_signal(pipe_ctx->stream->signal) - && !is_dp_128b_132b_signal(pipe_ctx)) { + && !link_is_dp_128b_132b_signal(pipe_ctx)) { if (link_enc) link_enc->funcs->setup( link_enc, @@ -4271,7 +4281,7 @@ void core_link_enable_stream( pipe_ctx->stream->link->link_state_valid = true; if (pipe_ctx->stream_res.tg->funcs->set_out_mux) { - if (is_dp_128b_132b_signal(pipe_ctx)) + if (link_is_dp_128b_132b_signal(pipe_ctx)) otg_out_dest = OUT_MUX_HPO_DP; else otg_out_dest = OUT_MUX_DIO; @@ -4373,7 +4383,7 @@ void core_link_enable_stream( * from transmitter control. */ if (!(dc_is_virtual_signal(pipe_ctx->stream->signal) || - is_dp_128b_132b_signal(pipe_ctx))) + link_is_dp_128b_132b_signal(pipe_ctx))) if (link_enc) link_enc->funcs->setup( link_enc, @@ -4393,7 +4403,7 @@ void core_link_enable_stream( if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) dc_link_allocate_mst_payload(pipe_ctx); else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT && - is_dp_128b_132b_signal(pipe_ctx)) + link_is_dp_128b_132b_signal(pipe_ctx)) dc_link_update_sst_payload(pipe_ctx, true); dc->hwss.unblank_stream(pipe_ctx, @@ -4411,7 +4421,7 @@ void core_link_enable_stream( dc->hwss.enable_audio_stream(pipe_ctx); } else { // if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) - if (is_dp_128b_132b_signal(pipe_ctx)) + if (link_is_dp_128b_132b_signal(pipe_ctx)) fpga_dp_hpo_enable_link_and_stream(state, pipe_ctx); if (dc_is_dp_signal(pipe_ctx->stream->signal) || dc_is_virtual_signal(pipe_ctx->stream->signal)) @@ -4430,7 +4440,7 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx) struct dc_link *link = stream->sink->link; struct vpg *vpg = pipe_ctx->stream_res.stream_enc->vpg; - if (is_dp_128b_132b_signal(pipe_ctx)) + if (link_is_dp_128b_132b_signal(pipe_ctx)) vpg = pipe_ctx->stream_res.hpo_dp_stream_enc->vpg; DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); @@ -4463,7 +4473,7 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx) if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) deallocate_mst_payload(pipe_ctx); else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT && - is_dp_128b_132b_signal(pipe_ctx)) + link_is_dp_128b_132b_signal(pipe_ctx)) dc_link_update_sst_payload(pipe_ctx, false); if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) { @@ -4473,7 +4483,7 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx) unsigned short masked_chip_caps = link->chip_caps & EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK; //Need to inform that sink is going to use legacy HDMI mode. - dal_ddc_service_write_scdc_data( + write_scdc_data( link->ddc, 165000,//vbios only handles 165Mhz. false); @@ -4492,7 +4502,7 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx) } if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT && - !is_dp_128b_132b_signal(pipe_ctx)) { + !link_is_dp_128b_132b_signal(pipe_ctx)) { /* In DP1.x SST mode, our encoder will go to TPS1 * when link is on but stream is off. @@ -4512,7 +4522,7 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx) if (dc_is_dp_signal(pipe_ctx->stream->signal)) dp_set_dsc_enable(pipe_ctx, false); } - if (is_dp_128b_132b_signal(pipe_ctx)) { + if (link_is_dp_128b_132b_signal(pipe_ctx)) { if (pipe_ctx->stream_res.tg->funcs->set_out_mux) pipe_ctx->stream_res.tg->funcs->set_out_mux(pipe_ctx->stream_res.tg, OUT_MUX_DIO); } @@ -4531,51 +4541,6 @@ void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable) dc->hwss.set_avmute(pipe_ctx, enable); } -/** - * dc_link_enable_hpd_filter: - * If enable is true, programs HPD filter on associated HPD line using - * delay_on_disconnect/delay_on_connect values dependent on - * link->connector_signal - * - * If enable is false, programs HPD filter on associated HPD line with no - * delays on connect or disconnect - * - * @link: pointer to the dc link - * @enable: boolean specifying whether to enable hbd - */ -void dc_link_enable_hpd_filter(struct dc_link *link, bool enable) -{ - struct gpio *hpd; - - if (enable) { - link->is_hpd_filter_disabled = false; - program_hpd_filter(link); - } else { - link->is_hpd_filter_disabled = true; - /* Obtain HPD handle */ - hpd = get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service); - - if (!hpd) - return; - - /* Setup HPD filtering */ - if (dal_gpio_open(hpd, GPIO_MODE_INTERRUPT) == GPIO_RESULT_OK) { - struct gpio_hpd_config config; - - config.delay_on_connect = 0; - config.delay_on_disconnect = 0; - - dal_irq_setup_hpd_filter(hpd, &config); - - dal_gpio_close(hpd); - } else { - ASSERT_CRITICAL(false); - } - /* Release HPD handle */ - dal_gpio_destroy_irq(&hpd); - } -} - void dc_link_set_drive_settings(struct dc *dc, struct link_training_settings *lt_settings, const struct dc_link *link) @@ -4632,7 +4597,7 @@ void dc_link_set_preferred_link_settings(struct dc *dc, if (link_stream->dpms_off) return; - if (decide_link_settings(link_stream, &store_settings)) + if (link_decide_link_settings(link_stream, &store_settings)) dp_retrain_link_dp_test(link, &store_settings, false); } @@ -4663,16 +4628,6 @@ void dc_link_set_preferred_training_settings(struct dc *dc, dc_link_set_preferred_link_settings(dc, &link->preferred_link_setting, link); } -void dc_link_enable_hpd(const struct dc_link *link) -{ - dc_link_dp_enable_hpd(link); -} - -void dc_link_disable_hpd(const struct dc_link *link) -{ - dc_link_dp_disable_hpd(link); -} - void dc_link_set_test_pattern(struct dc_link *link, enum dp_test_pattern test_pattern, enum dp_test_pattern_color_space test_pattern_color_space, @@ -4697,7 +4652,7 @@ uint32_t dc_link_bandwidth_kbps( uint32_t total_data_bw_efficiency_x10000 = 0; uint32_t link_rate_per_lane_kbps = 0; - switch (dp_get_link_encoding_format(link_setting)) { + switch (link_dp_get_encoding_format(link_setting)) { case DP_8b_10b_ENCODING: /* For 8b/10b encoding: * link rate is defined in the unit of LINK_RATE_REF_FREQ_IN_KHZ per DP byte per lane. @@ -4726,57 +4681,6 @@ uint32_t dc_link_bandwidth_kbps( return link_rate_per_lane_kbps * link_setting->lane_count / 10000 * total_data_bw_efficiency_x10000; } -const struct dc_link_settings *dc_link_get_link_cap( - const struct dc_link *link) -{ - if (link->preferred_link_setting.lane_count != LANE_COUNT_UNKNOWN && - link->preferred_link_setting.link_rate != LINK_RATE_UNKNOWN) - return &link->preferred_link_setting; - return &link->verified_link_cap; -} - -void dc_link_overwrite_extended_receiver_cap( - struct dc_link *link) -{ - dp_overwrite_extended_receiver_cap(link); -} - -bool dc_link_is_fec_supported(const struct dc_link *link) -{ - /* TODO - use asic cap instead of link_enc->features - * we no longer know which link enc to use for this link before commit - */ - struct link_encoder *link_enc = NULL; - - link_enc = link_enc_cfg_get_link_enc(link); - ASSERT(link_enc); - - return (dc_is_dp_signal(link->connector_signal) && link_enc && - link_enc->features.fec_supported && - link->dpcd_caps.fec_cap.bits.FEC_CAPABLE && - !IS_FPGA_MAXIMUS_DC(link->ctx->dce_environment)); -} - -bool dc_link_should_enable_fec(const struct dc_link *link) -{ - bool force_disable = false; - - if (link->fec_state == dc_link_fec_enabled) - force_disable = false; - else if (link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT_MST && - link->local_sink && - link->local_sink->edid_caps.panel_patch.disable_fec) - force_disable = true; - else if (link->connector_signal == SIGNAL_TYPE_EDP - && (link->dpcd_caps.dsc_caps.dsc_basic_caps.fields. - dsc_support.DSC_SUPPORT == false - || link->panel_config.dsc.disable_dsc_edp - || !link->dc->caps.edp_dsc_support)) - force_disable = true; - - return !force_disable && dc_link_is_fec_supported(link); -} - uint32_t dc_bandwidth_in_kbps_from_timing( const struct dc_crtc_timing *timing) { @@ -4881,8 +4785,8 @@ void dc_get_cur_link_res_map(const struct dc *dc, uint32_t *map) for (i = 0; i < dc->caps.max_links; i++) { link = dc->links[i]; if (link->link_status.link_active && - dp_get_link_encoding_format(&link->reported_link_cap) == DP_128b_132b_ENCODING && - dp_get_link_encoding_format(&link->cur_link_settings) != DP_128b_132b_ENCODING) + link_dp_get_encoding_format(&link->reported_link_cap) == DP_128b_132b_ENCODING && + link_dp_get_encoding_format(&link->cur_link_settings) != DP_128b_132b_ENCODING) /* hpo dp link encoder is considered as recycled, when RX reports 128b/132b encoding capability * but current link doesn't use it. */ @@ -4925,7 +4829,7 @@ void dc_restore_link_res_map(const struct dc *dc, uint32_t *map) if ((hpo_dp_recycle_map & (1 << i)) == 0) { link = dc->links[i]; if (link->type != dc_connection_none && - dp_get_link_encoding_format(&link->verified_link_cap) == DP_128b_132b_ENCODING) { + link_dp_get_encoding_format(&link->verified_link_cap) == DP_128b_132b_ENCODING) { if (available_hpo_dp_count > 0) available_hpo_dp_count--; else @@ -4939,7 +4843,7 @@ void dc_restore_link_res_map(const struct dc *dc, uint32_t *map) if ((hpo_dp_recycle_map & (1 << i)) != 0) { link = dc->links[i]; if (link->type != dc_connection_none && - dp_get_link_encoding_format(&link->verified_link_cap) == DP_128b_132b_ENCODING) { + link_dp_get_encoding_format(&link->verified_link_cap) == DP_128b_132b_ENCODING) { if (available_hpo_dp_count > 0) available_hpo_dp_count--; else diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index d74ffc89810f..6747e4b199de 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -27,737 +27,31 @@ #include "dm_helpers.h" #include "opp.h" #include "dsc.h" -#include "clk_mgr.h" #include "resource.h" #include "inc/core_types.h" #include "link_hwss.h" -#include "dc_link_ddc.h" +#include "link/link_ddc.h" #include "core_status.h" #include "dpcd_defs.h" + #include "dc_dmub_srv.h" #include "dce/dmub_hw_lock_mgr.h" -#include "inc/dc_link_dpia.h" +#include "link/link_dp_dpia.h" #include "inc/link_enc_cfg.h" +#include "clk_mgr.h" #include "link/link_dp_trace.h" - -/*Travis*/ -static const uint8_t DP_VGA_LVDS_CONVERTER_ID_2[] = "sivarT"; -/*Nutmeg*/ -static const uint8_t DP_VGA_LVDS_CONVERTER_ID_3[] = "dnomlA"; - +#include "link/link_dp_training.h" +#include "link/link_dp_training_fixed_vs_pe_retimer.h" +#include "link/link_dp_training_dpia.h" +#include "link/link_dp_training_auxless.h" +#include "link/link_dp_phy.h" +#include "link/link_dp_capability.h" #define DC_LOGGER \ link->ctx->logger -#define DC_TRACE_LEVEL_MESSAGE(...) /* do nothing */ - -#include "link_dpcd.h" - -#ifndef MAX -#define MAX(X, Y) ((X) > (Y) ? (X) : (Y)) -#endif -#ifndef MIN -#define MIN(X, Y) ((X) < (Y) ? (X) : (Y)) -#endif - - /* maximum pre emphasis level allowed for each voltage swing level*/ - static const enum dc_pre_emphasis - voltage_swing_to_pre_emphasis[] = { PRE_EMPHASIS_LEVEL3, - PRE_EMPHASIS_LEVEL2, - PRE_EMPHASIS_LEVEL1, - PRE_EMPHASIS_DISABLED }; - -enum { - POST_LT_ADJ_REQ_LIMIT = 6, - POST_LT_ADJ_REQ_TIMEOUT = 200 -}; - -struct dp_lt_fallback_entry { - enum dc_lane_count lane_count; - enum dc_link_rate link_rate; -}; - -static const struct dp_lt_fallback_entry dp_lt_fallbacks[] = { - /* This link training fallback array is ordered by - * link bandwidth from highest to lowest. - * DP specs makes it a normative policy to always - * choose the next highest link bandwidth during - * link training fallback. - */ - {LANE_COUNT_FOUR, LINK_RATE_UHBR20}, - {LANE_COUNT_FOUR, LINK_RATE_UHBR13_5}, - {LANE_COUNT_TWO, LINK_RATE_UHBR20}, - {LANE_COUNT_FOUR, LINK_RATE_UHBR10}, - {LANE_COUNT_TWO, LINK_RATE_UHBR13_5}, - {LANE_COUNT_FOUR, LINK_RATE_HIGH3}, - {LANE_COUNT_ONE, LINK_RATE_UHBR20}, - {LANE_COUNT_TWO, LINK_RATE_UHBR10}, - {LANE_COUNT_FOUR, LINK_RATE_HIGH2}, - {LANE_COUNT_ONE, LINK_RATE_UHBR13_5}, - {LANE_COUNT_TWO, LINK_RATE_HIGH3}, - {LANE_COUNT_ONE, LINK_RATE_UHBR10}, - {LANE_COUNT_TWO, LINK_RATE_HIGH2}, - {LANE_COUNT_FOUR, LINK_RATE_HIGH}, - {LANE_COUNT_ONE, LINK_RATE_HIGH3}, - {LANE_COUNT_FOUR, LINK_RATE_LOW}, - {LANE_COUNT_ONE, LINK_RATE_HIGH2}, - {LANE_COUNT_TWO, LINK_RATE_HIGH}, - {LANE_COUNT_TWO, LINK_RATE_LOW}, - {LANE_COUNT_ONE, LINK_RATE_HIGH}, - {LANE_COUNT_ONE, LINK_RATE_LOW}, -}; - -static const struct dc_link_settings fail_safe_link_settings = { - .lane_count = LANE_COUNT_ONE, - .link_rate = LINK_RATE_LOW, - .link_spread = LINK_SPREAD_DISABLED, -}; - -static bool decide_fallback_link_setting( - struct dc_link *link, - struct dc_link_settings *max, - struct dc_link_settings *cur, - enum link_training_result training_result); -static void maximize_lane_settings(const struct link_training_settings *lt_settings, - struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]); -static void override_lane_settings(const struct link_training_settings *lt_settings, - struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]); - -static uint32_t get_cr_training_aux_rd_interval(struct dc_link *link, - const struct dc_link_settings *link_settings) -{ - union training_aux_rd_interval training_rd_interval; - uint32_t wait_in_micro_secs = 100; - - memset(&training_rd_interval, 0, sizeof(training_rd_interval)); - if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING && - link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) { - core_link_read_dpcd( - link, - DP_TRAINING_AUX_RD_INTERVAL, - (uint8_t *)&training_rd_interval, - sizeof(training_rd_interval)); - if (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL) - wait_in_micro_secs = training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL * 4000; - } - - return wait_in_micro_secs; -} - -static uint32_t get_eq_training_aux_rd_interval( - struct dc_link *link, - const struct dc_link_settings *link_settings) -{ - union training_aux_rd_interval training_rd_interval; - - memset(&training_rd_interval, 0, sizeof(training_rd_interval)); - if (dp_get_link_encoding_format(link_settings) == DP_128b_132b_ENCODING) { - core_link_read_dpcd( - link, - DP_128b_132b_TRAINING_AUX_RD_INTERVAL, - (uint8_t *)&training_rd_interval, - sizeof(training_rd_interval)); - } else if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING && - link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) { - core_link_read_dpcd( - link, - DP_TRAINING_AUX_RD_INTERVAL, - (uint8_t *)&training_rd_interval, - sizeof(training_rd_interval)); - } - - switch (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL) { - case 0: return 400; - case 1: return 4000; - case 2: return 8000; - case 3: return 12000; - case 4: return 16000; - case 5: return 32000; - case 6: return 64000; - default: return 400; - } -} - -void dp_wait_for_training_aux_rd_interval( - struct dc_link *link, - uint32_t wait_in_micro_secs) -{ - if (wait_in_micro_secs > 1000) - msleep(wait_in_micro_secs/1000); - else - udelay(wait_in_micro_secs); - - DC_LOG_HW_LINK_TRAINING("%s:\n wait = %d\n", - __func__, - wait_in_micro_secs); -} - -enum dpcd_training_patterns - dc_dp_training_pattern_to_dpcd_training_pattern( - struct dc_link *link, - enum dc_dp_training_pattern pattern) -{ - enum dpcd_training_patterns dpcd_tr_pattern = - DPCD_TRAINING_PATTERN_VIDEOIDLE; - - switch (pattern) { - case DP_TRAINING_PATTERN_SEQUENCE_1: - dpcd_tr_pattern = DPCD_TRAINING_PATTERN_1; - break; - case DP_TRAINING_PATTERN_SEQUENCE_2: - dpcd_tr_pattern = DPCD_TRAINING_PATTERN_2; - break; - case DP_TRAINING_PATTERN_SEQUENCE_3: - dpcd_tr_pattern = DPCD_TRAINING_PATTERN_3; - break; - case DP_TRAINING_PATTERN_SEQUENCE_4: - dpcd_tr_pattern = DPCD_TRAINING_PATTERN_4; - break; - case DP_128b_132b_TPS1: - dpcd_tr_pattern = DPCD_128b_132b_TPS1; - break; - case DP_128b_132b_TPS2: - dpcd_tr_pattern = DPCD_128b_132b_TPS2; - break; - case DP_128b_132b_TPS2_CDS: - dpcd_tr_pattern = DPCD_128b_132b_TPS2_CDS; - break; - case DP_TRAINING_PATTERN_VIDEOIDLE: - dpcd_tr_pattern = DPCD_TRAINING_PATTERN_VIDEOIDLE; - break; - default: - ASSERT(0); - DC_LOG_HW_LINK_TRAINING("%s: Invalid HW Training pattern: %d\n", - __func__, pattern); - break; - } - - return dpcd_tr_pattern; -} - -static void dpcd_set_training_pattern( - struct dc_link *link, - enum dc_dp_training_pattern training_pattern) -{ - union dpcd_training_pattern dpcd_pattern = {0}; - - dpcd_pattern.v1_4.TRAINING_PATTERN_SET = - dc_dp_training_pattern_to_dpcd_training_pattern( - link, training_pattern); - - core_link_write_dpcd( - link, - DP_TRAINING_PATTERN_SET, - &dpcd_pattern.raw, - 1); - - DC_LOG_HW_LINK_TRAINING("%s\n %x pattern = %x\n", - __func__, - DP_TRAINING_PATTERN_SET, - dpcd_pattern.v1_4.TRAINING_PATTERN_SET); -} - -static enum dc_dp_training_pattern decide_cr_training_pattern( - const struct dc_link_settings *link_settings) -{ - switch (dp_get_link_encoding_format(link_settings)) { - case DP_8b_10b_ENCODING: - default: - return DP_TRAINING_PATTERN_SEQUENCE_1; - case DP_128b_132b_ENCODING: - return DP_128b_132b_TPS1; - } -} - -static enum dc_dp_training_pattern decide_eq_training_pattern(struct dc_link *link, - const struct dc_link_settings *link_settings) -{ - struct link_encoder *link_enc; - struct encoder_feature_support *enc_caps; - struct dpcd_caps *rx_caps = &link->dpcd_caps; - enum dc_dp_training_pattern pattern = DP_TRAINING_PATTERN_SEQUENCE_2; - - link_enc = link_enc_cfg_get_link_enc(link); - ASSERT(link_enc); - enc_caps = &link_enc->features; - - switch (dp_get_link_encoding_format(link_settings)) { - case DP_8b_10b_ENCODING: - if (enc_caps->flags.bits.IS_TPS4_CAPABLE && - rx_caps->max_down_spread.bits.TPS4_SUPPORTED) - pattern = DP_TRAINING_PATTERN_SEQUENCE_4; - else if (enc_caps->flags.bits.IS_TPS3_CAPABLE && - rx_caps->max_ln_count.bits.TPS3_SUPPORTED) - pattern = DP_TRAINING_PATTERN_SEQUENCE_3; - else - pattern = DP_TRAINING_PATTERN_SEQUENCE_2; - break; - case DP_128b_132b_ENCODING: - pattern = DP_128b_132b_TPS2; - break; - default: - pattern = DP_TRAINING_PATTERN_SEQUENCE_2; - break; - } - return pattern; -} - -static uint8_t get_dpcd_link_rate(const struct dc_link_settings *link_settings) -{ - uint8_t link_rate = 0; - enum dp_link_encoding encoding = dp_get_link_encoding_format(link_settings); - - if (encoding == DP_128b_132b_ENCODING) - switch (link_settings->link_rate) { - case LINK_RATE_UHBR10: - link_rate = 0x1; - break; - case LINK_RATE_UHBR20: - link_rate = 0x2; - break; - case LINK_RATE_UHBR13_5: - link_rate = 0x4; - break; - default: - link_rate = 0; - break; - } - else if (encoding == DP_8b_10b_ENCODING) - link_rate = (uint8_t) link_settings->link_rate; - else - link_rate = 0; - - return link_rate; -} - -static void dp_fixed_vs_pe_read_lane_adjust( - struct dc_link *link, - union dpcd_training_lane dpcd_lane_adjust[LANE_COUNT_DP_MAX]) -{ - const uint8_t vendor_lttpr_write_data_vs[3] = {0x0, 0x53, 0x63}; - const uint8_t vendor_lttpr_write_data_pe[3] = {0x0, 0x54, 0x63}; - const uint8_t offset = dp_convert_to_count( - link->dpcd_caps.lttpr_caps.phy_repeater_cnt); - uint32_t vendor_lttpr_write_address = 0xF004F; - uint32_t vendor_lttpr_read_address = 0xF0053; - uint8_t dprx_vs = 0; - uint8_t dprx_pe = 0; - uint8_t lane; - - if (offset != 0xFF) { - vendor_lttpr_write_address += - ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); - vendor_lttpr_read_address += - ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); - } - - /* W/A to read lane settings requested by DPRX */ - core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data_vs[0], - sizeof(vendor_lttpr_write_data_vs)); - core_link_read_dpcd( - link, - vendor_lttpr_read_address, - &dprx_vs, - 1); - core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data_pe[0], - sizeof(vendor_lttpr_write_data_pe)); - core_link_read_dpcd( - link, - vendor_lttpr_read_address, - &dprx_pe, - 1); - - for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { - dpcd_lane_adjust[lane].bits.VOLTAGE_SWING_SET = (dprx_vs >> (2 * lane)) & 0x3; - dpcd_lane_adjust[lane].bits.PRE_EMPHASIS_SET = (dprx_pe >> (2 * lane)) & 0x3; - } -} - -static void dp_fixed_vs_pe_set_retimer_lane_settings( - struct dc_link *link, - const union dpcd_training_lane dpcd_lane_adjust[LANE_COUNT_DP_MAX], - uint8_t lane_count) -{ - const uint8_t offset = dp_convert_to_count( - link->dpcd_caps.lttpr_caps.phy_repeater_cnt); - const uint8_t vendor_lttpr_write_data_reset[4] = {0x1, 0x50, 0x63, 0xFF}; - uint32_t vendor_lttpr_write_address = 0xF004F; - uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0}; - uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0}; - uint8_t lane = 0; - - if (offset != 0xFF) { - vendor_lttpr_write_address += - ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); - } - - for (lane = 0; lane < lane_count; lane++) { - vendor_lttpr_write_data_vs[3] |= - dpcd_lane_adjust[lane].bits.VOLTAGE_SWING_SET << (2 * lane); - vendor_lttpr_write_data_pe[3] |= - dpcd_lane_adjust[lane].bits.PRE_EMPHASIS_SET << (2 * lane); - } - - /* Force LTTPR to output desired VS and PE */ - core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data_reset[0], - sizeof(vendor_lttpr_write_data_reset)); - core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data_vs[0], - sizeof(vendor_lttpr_write_data_vs)); - core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data_pe[0], - sizeof(vendor_lttpr_write_data_pe)); -} - -enum dc_status dpcd_set_link_settings( - struct dc_link *link, - const struct link_training_settings *lt_settings) -{ - uint8_t rate; - enum dc_status status; - - union down_spread_ctrl downspread = {0}; - union lane_count_set lane_count_set = {0}; - - downspread.raw = (uint8_t) - (lt_settings->link_settings.link_spread); - - lane_count_set.bits.LANE_COUNT_SET = - lt_settings->link_settings.lane_count; - - lane_count_set.bits.ENHANCED_FRAMING = lt_settings->enhanced_framing; - lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = 0; - - - if (link->ep_type == DISPLAY_ENDPOINT_PHY && - lt_settings->pattern_for_eq < DP_TRAINING_PATTERN_SEQUENCE_4) { - lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = - link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED; - } - - status = core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL, - &downspread.raw, sizeof(downspread)); - - status = core_link_write_dpcd(link, DP_LANE_COUNT_SET, - &lane_count_set.raw, 1); - - if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_13 && - lt_settings->link_settings.use_link_rate_set == true) { - rate = 0; - /* WA for some MUX chips that will power down with eDP and lose supported - * link rate set for eDP 1.4. Source reads DPCD 0x010 again to ensure - * MUX chip gets link rate set back before link training. - */ - if (link->connector_signal == SIGNAL_TYPE_EDP) { - uint8_t supported_link_rates[16]; - - core_link_read_dpcd(link, DP_SUPPORTED_LINK_RATES, - supported_link_rates, sizeof(supported_link_rates)); - } - status = core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1); - status = core_link_write_dpcd(link, DP_LINK_RATE_SET, - <_settings->link_settings.link_rate_set, 1); - } else { - rate = get_dpcd_link_rate(<_settings->link_settings); - - status = core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1); - } - - if (rate) { - DC_LOG_HW_LINK_TRAINING("%s\n %x rate = %x\n %x lane = %x framing = %x\n %x spread = %x\n", - __func__, - DP_LINK_BW_SET, - lt_settings->link_settings.link_rate, - DP_LANE_COUNT_SET, - lt_settings->link_settings.lane_count, - lt_settings->enhanced_framing, - DP_DOWNSPREAD_CTRL, - lt_settings->link_settings.link_spread); - } else { - DC_LOG_HW_LINK_TRAINING("%s\n %x rate set = %x\n %x lane = %x framing = %x\n %x spread = %x\n", - __func__, - DP_LINK_RATE_SET, - lt_settings->link_settings.link_rate_set, - DP_LANE_COUNT_SET, - lt_settings->link_settings.lane_count, - lt_settings->enhanced_framing, - DP_DOWNSPREAD_CTRL, - lt_settings->link_settings.link_spread); - } - - return status; -} - -uint8_t dc_dp_initialize_scrambling_data_symbols( - struct dc_link *link, - enum dc_dp_training_pattern pattern) -{ - uint8_t disable_scrabled_data_symbols = 0; - - switch (pattern) { - case DP_TRAINING_PATTERN_SEQUENCE_1: - case DP_TRAINING_PATTERN_SEQUENCE_2: - case DP_TRAINING_PATTERN_SEQUENCE_3: - disable_scrabled_data_symbols = 1; - break; - case DP_TRAINING_PATTERN_SEQUENCE_4: - case DP_128b_132b_TPS1: - case DP_128b_132b_TPS2: - disable_scrabled_data_symbols = 0; - break; - default: - ASSERT(0); - DC_LOG_HW_LINK_TRAINING("%s: Invalid HW Training pattern: %d\n", - __func__, pattern); - break; - } - return disable_scrabled_data_symbols; -} - -static inline bool is_repeater(const struct link_training_settings *lt_settings, uint32_t offset) -{ - return (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) && (offset != 0); -} - -static void dpcd_set_lt_pattern_and_lane_settings( - struct dc_link *link, - const struct link_training_settings *lt_settings, - enum dc_dp_training_pattern pattern, - uint32_t offset) -{ - uint32_t dpcd_base_lt_offset; - - uint8_t dpcd_lt_buffer[5] = {0}; - union dpcd_training_pattern dpcd_pattern = {0}; - uint32_t size_in_bytes; - bool edp_workaround = false; /* TODO link_prop.INTERNAL */ - dpcd_base_lt_offset = DP_TRAINING_PATTERN_SET; - - if (is_repeater(lt_settings, offset)) - dpcd_base_lt_offset = DP_TRAINING_PATTERN_SET_PHY_REPEATER1 + - ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); - - /***************************************************************** - * DpcdAddress_TrainingPatternSet - *****************************************************************/ - dpcd_pattern.v1_4.TRAINING_PATTERN_SET = - dc_dp_training_pattern_to_dpcd_training_pattern(link, pattern); - - dpcd_pattern.v1_4.SCRAMBLING_DISABLE = - dc_dp_initialize_scrambling_data_symbols(link, pattern); - - dpcd_lt_buffer[DP_TRAINING_PATTERN_SET - DP_TRAINING_PATTERN_SET] - = dpcd_pattern.raw; - - if (is_repeater(lt_settings, offset)) { - DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Repeater ID: %d\n 0x%X pattern = %x\n", - __func__, - offset, - dpcd_base_lt_offset, - dpcd_pattern.v1_4.TRAINING_PATTERN_SET); - } else { - DC_LOG_HW_LINK_TRAINING("%s\n 0x%X pattern = %x\n", - __func__, - dpcd_base_lt_offset, - dpcd_pattern.v1_4.TRAINING_PATTERN_SET); - } - - /* concatenate everything into one buffer*/ - size_in_bytes = lt_settings->link_settings.lane_count * - sizeof(lt_settings->dpcd_lane_settings[0]); - - // 0x00103 - 0x00102 - memmove( - &dpcd_lt_buffer[DP_TRAINING_LANE0_SET - DP_TRAINING_PATTERN_SET], - lt_settings->dpcd_lane_settings, - size_in_bytes); - - if (is_repeater(lt_settings, offset)) { - if (dp_get_link_encoding_format(<_settings->link_settings) == - DP_128b_132b_ENCODING) - DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n" - " 0x%X TX_FFE_PRESET_VALUE = %x\n", - __func__, - offset, - dpcd_base_lt_offset, - lt_settings->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE); - else if (dp_get_link_encoding_format(<_settings->link_settings) == - DP_8b_10b_ENCODING) - DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n" - " 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n", - __func__, - offset, - dpcd_base_lt_offset, - lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET, - lt_settings->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET, - lt_settings->dpcd_lane_settings[0].bits.MAX_SWING_REACHED, - lt_settings->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED); - } else { - if (dp_get_link_encoding_format(<_settings->link_settings) == - DP_128b_132b_ENCODING) - DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X TX_FFE_PRESET_VALUE = %x\n", - __func__, - dpcd_base_lt_offset, - lt_settings->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE); - else if (dp_get_link_encoding_format(<_settings->link_settings) == - DP_8b_10b_ENCODING) - DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n", - __func__, - dpcd_base_lt_offset, - lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET, - lt_settings->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET, - lt_settings->dpcd_lane_settings[0].bits.MAX_SWING_REACHED, - lt_settings->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED); - } - if (edp_workaround) { - /* for eDP write in 2 parts because the 5-byte burst is - * causing issues on some eDP panels (EPR#366724) - */ - core_link_write_dpcd( - link, - DP_TRAINING_PATTERN_SET, - &dpcd_pattern.raw, - sizeof(dpcd_pattern.raw)); - - core_link_write_dpcd( - link, - DP_TRAINING_LANE0_SET, - (uint8_t *)(lt_settings->dpcd_lane_settings), - size_in_bytes); - - } else if (dp_get_link_encoding_format(<_settings->link_settings) == - DP_128b_132b_ENCODING) { - core_link_write_dpcd( - link, - dpcd_base_lt_offset, - dpcd_lt_buffer, - sizeof(dpcd_lt_buffer)); - } else - /* write it all in (1 + number-of-lanes)-byte burst*/ - core_link_write_dpcd( - link, - dpcd_base_lt_offset, - dpcd_lt_buffer, - size_in_bytes + sizeof(dpcd_pattern.raw)); -} - -bool dp_is_cr_done(enum dc_lane_count ln_count, - union lane_status *dpcd_lane_status) -{ - uint32_t lane; - /*LANEx_CR_DONE bits All 1's?*/ - for (lane = 0; lane < (uint32_t)(ln_count); lane++) { - if (!dpcd_lane_status[lane].bits.CR_DONE_0) - return false; - } - return true; -} - -bool dp_is_ch_eq_done(enum dc_lane_count ln_count, - union lane_status *dpcd_lane_status) -{ - bool done = true; - uint32_t lane; - for (lane = 0; lane < (uint32_t)(ln_count); lane++) - if (!dpcd_lane_status[lane].bits.CHANNEL_EQ_DONE_0) - done = false; - return done; -} - -bool dp_is_symbol_locked(enum dc_lane_count ln_count, - union lane_status *dpcd_lane_status) -{ - bool locked = true; - uint32_t lane; - for (lane = 0; lane < (uint32_t)(ln_count); lane++) - if (!dpcd_lane_status[lane].bits.SYMBOL_LOCKED_0) - locked = false; - return locked; -} - -bool dp_is_interlane_aligned(union lane_align_status_updated align_status) -{ - return align_status.bits.INTERLANE_ALIGN_DONE == 1; -} - -void dp_hw_to_dpcd_lane_settings( - const struct link_training_settings *lt_settings, - const struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX], - union dpcd_training_lane dpcd_lane_settings[]) -{ - uint8_t lane = 0; - - for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { - if (dp_get_link_encoding_format(<_settings->link_settings) == - DP_8b_10b_ENCODING) { - dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET = - (uint8_t)(hw_lane_settings[lane].VOLTAGE_SWING); - dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET = - (uint8_t)(hw_lane_settings[lane].PRE_EMPHASIS); - dpcd_lane_settings[lane].bits.MAX_SWING_REACHED = - (hw_lane_settings[lane].VOLTAGE_SWING == - VOLTAGE_SWING_MAX_LEVEL ? 1 : 0); - dpcd_lane_settings[lane].bits.MAX_PRE_EMPHASIS_REACHED = - (hw_lane_settings[lane].PRE_EMPHASIS == - PRE_EMPHASIS_MAX_LEVEL ? 1 : 0); - } - else if (dp_get_link_encoding_format(<_settings->link_settings) == - DP_128b_132b_ENCODING) { - dpcd_lane_settings[lane].tx_ffe.PRESET_VALUE = - hw_lane_settings[lane].FFE_PRESET.settings.level; - } - } -} - -void dp_decide_lane_settings( - const struct link_training_settings *lt_settings, - const union lane_adjust ln_adjust[LANE_COUNT_DP_MAX], - struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX], - union dpcd_training_lane dpcd_lane_settings[]) -{ - uint32_t lane; - - for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { - if (dp_get_link_encoding_format(<_settings->link_settings) == - DP_8b_10b_ENCODING) { - hw_lane_settings[lane].VOLTAGE_SWING = - (enum dc_voltage_swing)(ln_adjust[lane].bits. - VOLTAGE_SWING_LANE); - hw_lane_settings[lane].PRE_EMPHASIS = - (enum dc_pre_emphasis)(ln_adjust[lane].bits. - PRE_EMPHASIS_LANE); - } - else if (dp_get_link_encoding_format(<_settings->link_settings) == - DP_128b_132b_ENCODING) { - hw_lane_settings[lane].FFE_PRESET.raw = - ln_adjust[lane].tx_ffe.PRESET_VALUE; - } - } - dp_hw_to_dpcd_lane_settings(lt_settings, hw_lane_settings, dpcd_lane_settings); - - if (lt_settings->disallow_per_lane_settings) { - /* we find the maximum of the requested settings across all lanes*/ - /* and set this maximum for all lanes*/ - maximize_lane_settings(lt_settings, hw_lane_settings); - override_lane_settings(lt_settings, hw_lane_settings); - - if (lt_settings->always_match_dpcd_with_hw_lane_settings) - dp_hw_to_dpcd_lane_settings(lt_settings, hw_lane_settings, dpcd_lane_settings); - } -} +#define DC_TRACE_LEVEL_MESSAGE(...) /* do nothing */ +#include "link/link_dpcd.h" static uint8_t get_nibble_at_index(const uint8_t *buf, uint32_t index) @@ -773,2368 +67,7 @@ static uint8_t get_nibble_at_index(const uint8_t *buf, return nibble; } -static enum dc_pre_emphasis get_max_pre_emphasis_for_voltage_swing( - enum dc_voltage_swing voltage) -{ - enum dc_pre_emphasis pre_emphasis; - pre_emphasis = PRE_EMPHASIS_MAX_LEVEL; - - if (voltage <= VOLTAGE_SWING_MAX_LEVEL) - pre_emphasis = voltage_swing_to_pre_emphasis[voltage]; - - return pre_emphasis; - -} - -static void maximize_lane_settings(const struct link_training_settings *lt_settings, - struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]) -{ - uint32_t lane; - struct dc_lane_settings max_requested; - - max_requested.VOLTAGE_SWING = lane_settings[0].VOLTAGE_SWING; - max_requested.PRE_EMPHASIS = lane_settings[0].PRE_EMPHASIS; - max_requested.FFE_PRESET = lane_settings[0].FFE_PRESET; - - /* Determine what the maximum of the requested settings are*/ - for (lane = 1; lane < lt_settings->link_settings.lane_count; lane++) { - if (lane_settings[lane].VOLTAGE_SWING > max_requested.VOLTAGE_SWING) - max_requested.VOLTAGE_SWING = lane_settings[lane].VOLTAGE_SWING; - - if (lane_settings[lane].PRE_EMPHASIS > max_requested.PRE_EMPHASIS) - max_requested.PRE_EMPHASIS = lane_settings[lane].PRE_EMPHASIS; - if (lane_settings[lane].FFE_PRESET.settings.level > - max_requested.FFE_PRESET.settings.level) - max_requested.FFE_PRESET.settings.level = - lane_settings[lane].FFE_PRESET.settings.level; - } - - /* make sure the requested settings are - * not higher than maximum settings*/ - if (max_requested.VOLTAGE_SWING > VOLTAGE_SWING_MAX_LEVEL) - max_requested.VOLTAGE_SWING = VOLTAGE_SWING_MAX_LEVEL; - - if (max_requested.PRE_EMPHASIS > PRE_EMPHASIS_MAX_LEVEL) - max_requested.PRE_EMPHASIS = PRE_EMPHASIS_MAX_LEVEL; - if (max_requested.FFE_PRESET.settings.level > DP_FFE_PRESET_MAX_LEVEL) - max_requested.FFE_PRESET.settings.level = DP_FFE_PRESET_MAX_LEVEL; - - /* make sure the pre-emphasis matches the voltage swing*/ - if (max_requested.PRE_EMPHASIS > - get_max_pre_emphasis_for_voltage_swing( - max_requested.VOLTAGE_SWING)) - max_requested.PRE_EMPHASIS = - get_max_pre_emphasis_for_voltage_swing( - max_requested.VOLTAGE_SWING); - - for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { - lane_settings[lane].VOLTAGE_SWING = max_requested.VOLTAGE_SWING; - lane_settings[lane].PRE_EMPHASIS = max_requested.PRE_EMPHASIS; - lane_settings[lane].FFE_PRESET = max_requested.FFE_PRESET; - } -} - -static void override_lane_settings(const struct link_training_settings *lt_settings, - struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]) -{ - uint32_t lane; - - if (lt_settings->voltage_swing == NULL && - lt_settings->pre_emphasis == NULL && - lt_settings->ffe_preset == NULL && - lt_settings->post_cursor2 == NULL) - - return; - - for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { - if (lt_settings->voltage_swing) - lane_settings[lane].VOLTAGE_SWING = *lt_settings->voltage_swing; - if (lt_settings->pre_emphasis) - lane_settings[lane].PRE_EMPHASIS = *lt_settings->pre_emphasis; - if (lt_settings->post_cursor2) - lane_settings[lane].POST_CURSOR2 = *lt_settings->post_cursor2; - if (lt_settings->ffe_preset) - lane_settings[lane].FFE_PRESET = *lt_settings->ffe_preset; - } -} - -enum dc_status dp_get_lane_status_and_lane_adjust( - struct dc_link *link, - const struct link_training_settings *link_training_setting, - union lane_status ln_status[LANE_COUNT_DP_MAX], - union lane_align_status_updated *ln_align, - union lane_adjust ln_adjust[LANE_COUNT_DP_MAX], - uint32_t offset) -{ - unsigned int lane01_status_address = DP_LANE0_1_STATUS; - uint8_t lane_adjust_offset = 4; - unsigned int lane01_adjust_address; - uint8_t dpcd_buf[6] = {0}; - uint32_t lane; - enum dc_status status; - - if (is_repeater(link_training_setting, offset)) { - lane01_status_address = - DP_LANE0_1_STATUS_PHY_REPEATER1 + - ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); - lane_adjust_offset = 3; - } - - status = core_link_read_dpcd( - link, - lane01_status_address, - (uint8_t *)(dpcd_buf), - sizeof(dpcd_buf)); - - if (status != DC_OK) { - DC_LOG_HW_LINK_TRAINING("%s:\n Failed to read from address 0x%X," - " keep current lane status and lane adjust unchanged", - __func__, - lane01_status_address); - return status; - } - - for (lane = 0; lane < - (uint32_t)(link_training_setting->link_settings.lane_count); - lane++) { - - ln_status[lane].raw = - get_nibble_at_index(&dpcd_buf[0], lane); - ln_adjust[lane].raw = - get_nibble_at_index(&dpcd_buf[lane_adjust_offset], lane); - } - - ln_align->raw = dpcd_buf[2]; - - if (is_repeater(link_training_setting, offset)) { - DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n" - " 0x%X Lane01Status = %x\n 0x%X Lane23Status = %x\n ", - __func__, - offset, - lane01_status_address, dpcd_buf[0], - lane01_status_address + 1, dpcd_buf[1]); - - lane01_adjust_address = DP_ADJUST_REQUEST_LANE0_1_PHY_REPEATER1 + - ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); - - DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n" - " 0x%X Lane01AdjustRequest = %x\n 0x%X Lane23AdjustRequest = %x\n", - __func__, - offset, - lane01_adjust_address, - dpcd_buf[lane_adjust_offset], - lane01_adjust_address + 1, - dpcd_buf[lane_adjust_offset + 1]); - } else { - DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X Lane01Status = %x\n 0x%X Lane23Status = %x\n ", - __func__, - lane01_status_address, dpcd_buf[0], - lane01_status_address + 1, dpcd_buf[1]); - - lane01_adjust_address = DP_ADJUST_REQUEST_LANE0_1; - - DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X Lane01AdjustRequest = %x\n 0x%X Lane23AdjustRequest = %x\n", - __func__, - lane01_adjust_address, - dpcd_buf[lane_adjust_offset], - lane01_adjust_address + 1, - dpcd_buf[lane_adjust_offset + 1]); - } - - return status; -} - -static enum dc_status dpcd_128b_132b_set_lane_settings( - struct dc_link *link, - const struct link_training_settings *link_training_setting) -{ - enum dc_status status = core_link_write_dpcd(link, - DP_TRAINING_LANE0_SET, - (uint8_t *)(link_training_setting->dpcd_lane_settings), - sizeof(link_training_setting->dpcd_lane_settings)); - - DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X TX_FFE_PRESET_VALUE = %x\n", - __func__, - DP_TRAINING_LANE0_SET, - link_training_setting->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE); - return status; -} - - -enum dc_status dpcd_set_lane_settings( - struct dc_link *link, - const struct link_training_settings *link_training_setting, - uint32_t offset) -{ - unsigned int lane0_set_address; - enum dc_status status; - - lane0_set_address = DP_TRAINING_LANE0_SET; - - if (is_repeater(link_training_setting, offset)) - lane0_set_address = DP_TRAINING_LANE0_SET_PHY_REPEATER1 + - ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); - - status = core_link_write_dpcd(link, - lane0_set_address, - (uint8_t *)(link_training_setting->dpcd_lane_settings), - link_training_setting->link_settings.lane_count); - - if (is_repeater(link_training_setting, offset)) { - DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Repeater ID: %d\n" - " 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n", - __func__, - offset, - lane0_set_address, - link_training_setting->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET, - link_training_setting->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET, - link_training_setting->dpcd_lane_settings[0].bits.MAX_SWING_REACHED, - link_training_setting->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED); - - } else { - DC_LOG_HW_LINK_TRAINING("%s\n 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n", - __func__, - lane0_set_address, - link_training_setting->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET, - link_training_setting->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET, - link_training_setting->dpcd_lane_settings[0].bits.MAX_SWING_REACHED, - link_training_setting->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED); - } - - return status; -} - -bool dp_is_max_vs_reached( - const struct link_training_settings *lt_settings) -{ - uint32_t lane; - for (lane = 0; lane < - (uint32_t)(lt_settings->link_settings.lane_count); - lane++) { - if (lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET - == VOLTAGE_SWING_MAX_LEVEL) - return true; - } - return false; - -} - -static bool perform_post_lt_adj_req_sequence( - struct dc_link *link, - const struct link_resource *link_res, - struct link_training_settings *lt_settings) -{ - enum dc_lane_count lane_count = - lt_settings->link_settings.lane_count; - - uint32_t adj_req_count; - uint32_t adj_req_timer; - bool req_drv_setting_changed; - uint32_t lane; - union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0}; - union lane_align_status_updated dpcd_lane_status_updated = {0}; - union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; - - req_drv_setting_changed = false; - for (adj_req_count = 0; adj_req_count < POST_LT_ADJ_REQ_LIMIT; - adj_req_count++) { - - req_drv_setting_changed = false; - - for (adj_req_timer = 0; - adj_req_timer < POST_LT_ADJ_REQ_TIMEOUT; - adj_req_timer++) { - - dp_get_lane_status_and_lane_adjust( - link, - lt_settings, - dpcd_lane_status, - &dpcd_lane_status_updated, - dpcd_lane_adjust, - DPRX); - - if (dpcd_lane_status_updated.bits. - POST_LT_ADJ_REQ_IN_PROGRESS == 0) - return true; - - if (!dp_is_cr_done(lane_count, dpcd_lane_status)) - return false; - - if (!dp_is_ch_eq_done(lane_count, dpcd_lane_status) || - !dp_is_symbol_locked(lane_count, dpcd_lane_status) || - !dp_is_interlane_aligned(dpcd_lane_status_updated)) - return false; - - for (lane = 0; lane < (uint32_t)(lane_count); lane++) { - - if (lt_settings-> - dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET != - dpcd_lane_adjust[lane].bits.VOLTAGE_SWING_LANE || - lt_settings->dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET != - dpcd_lane_adjust[lane].bits.PRE_EMPHASIS_LANE) { - - req_drv_setting_changed = true; - break; - } - } - - if (req_drv_setting_changed) { - dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, - lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); - - dc_link_dp_set_drive_settings(link, - link_res, - lt_settings); - break; - } - - msleep(1); - } - - if (!req_drv_setting_changed) { - DC_LOG_WARNING("%s: Post Link Training Adjust Request Timed out\n", - __func__); - - ASSERT(0); - return true; - } - } - DC_LOG_WARNING("%s: Post Link Training Adjust Request limit reached\n", - __func__); - - ASSERT(0); - return true; - -} - -/* Only used for channel equalization */ -uint32_t dp_translate_training_aux_read_interval(uint32_t dpcd_aux_read_interval) -{ - unsigned int aux_rd_interval_us = 400; - - switch (dpcd_aux_read_interval) { - case 0x01: - aux_rd_interval_us = 4000; - break; - case 0x02: - aux_rd_interval_us = 8000; - break; - case 0x03: - aux_rd_interval_us = 12000; - break; - case 0x04: - aux_rd_interval_us = 16000; - break; - case 0x05: - aux_rd_interval_us = 32000; - break; - case 0x06: - aux_rd_interval_us = 64000; - break; - default: - break; - } - - return aux_rd_interval_us; -} - -enum link_training_result dp_get_cr_failure(enum dc_lane_count ln_count, - union lane_status *dpcd_lane_status) -{ - enum link_training_result result = LINK_TRAINING_SUCCESS; - - if (ln_count >= LANE_COUNT_ONE && !dpcd_lane_status[0].bits.CR_DONE_0) - result = LINK_TRAINING_CR_FAIL_LANE0; - else if (ln_count >= LANE_COUNT_TWO && !dpcd_lane_status[1].bits.CR_DONE_0) - result = LINK_TRAINING_CR_FAIL_LANE1; - else if (ln_count >= LANE_COUNT_FOUR && !dpcd_lane_status[2].bits.CR_DONE_0) - result = LINK_TRAINING_CR_FAIL_LANE23; - else if (ln_count >= LANE_COUNT_FOUR && !dpcd_lane_status[3].bits.CR_DONE_0) - result = LINK_TRAINING_CR_FAIL_LANE23; - return result; -} - -static enum link_training_result perform_channel_equalization_sequence( - struct dc_link *link, - const struct link_resource *link_res, - struct link_training_settings *lt_settings, - uint32_t offset) -{ - enum dc_dp_training_pattern tr_pattern; - uint32_t retries_ch_eq; - uint32_t wait_time_microsec; - enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; - union lane_align_status_updated dpcd_lane_status_updated = {0}; - union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0}; - union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; - - /* Note: also check that TPS4 is a supported feature*/ - tr_pattern = lt_settings->pattern_for_eq; - - if (is_repeater(lt_settings, offset) && dp_get_link_encoding_format(<_settings->link_settings) == DP_8b_10b_ENCODING) - tr_pattern = DP_TRAINING_PATTERN_SEQUENCE_4; - - dp_set_hw_training_pattern(link, link_res, tr_pattern, offset); - - for (retries_ch_eq = 0; retries_ch_eq <= LINK_TRAINING_MAX_RETRY_COUNT; - retries_ch_eq++) { - - dp_set_hw_lane_settings(link, link_res, lt_settings, offset); - - /* 2. update DPCD*/ - if (!retries_ch_eq) - /* EPR #361076 - write as a 5-byte burst, - * but only for the 1-st iteration - */ - - dpcd_set_lt_pattern_and_lane_settings( - link, - lt_settings, - tr_pattern, offset); - else - dpcd_set_lane_settings(link, lt_settings, offset); - - /* 3. wait for receiver to lock-on*/ - wait_time_microsec = lt_settings->eq_pattern_time; - - if (is_repeater(lt_settings, offset)) - wait_time_microsec = - dp_translate_training_aux_read_interval( - link->dpcd_caps.lttpr_caps.aux_rd_interval[offset - 1]); - - dp_wait_for_training_aux_rd_interval( - link, - wait_time_microsec); - - /* 4. Read lane status and requested - * drive settings as set by the sink*/ - - dp_get_lane_status_and_lane_adjust( - link, - lt_settings, - dpcd_lane_status, - &dpcd_lane_status_updated, - dpcd_lane_adjust, - offset); - - /* 5. check CR done*/ - if (!dp_is_cr_done(lane_count, dpcd_lane_status)) - return dpcd_lane_status[0].bits.CR_DONE_0 ? - LINK_TRAINING_EQ_FAIL_CR_PARTIAL : - LINK_TRAINING_EQ_FAIL_CR; - - /* 6. check CHEQ done*/ - if (dp_is_ch_eq_done(lane_count, dpcd_lane_status) && - dp_is_symbol_locked(lane_count, dpcd_lane_status) && - dp_is_interlane_aligned(dpcd_lane_status_updated)) - return LINK_TRAINING_SUCCESS; - - /* 7. update VS/PE/PC2 in lt_settings*/ - dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, - lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); - } - - return LINK_TRAINING_EQ_FAIL_EQ; - -} - -static void start_clock_recovery_pattern_early(struct dc_link *link, - const struct link_resource *link_res, - struct link_training_settings *lt_settings, - uint32_t offset) -{ - DC_LOG_HW_LINK_TRAINING("%s\n GPU sends TPS1. Wait 400us.\n", - __func__); - dp_set_hw_training_pattern(link, link_res, lt_settings->pattern_for_cr, offset); - dp_set_hw_lane_settings(link, link_res, lt_settings, offset); - udelay(400); -} - -static enum link_training_result perform_clock_recovery_sequence( - struct dc_link *link, - const struct link_resource *link_res, - struct link_training_settings *lt_settings, - uint32_t offset) -{ - uint32_t retries_cr; - uint32_t retry_count; - uint32_t wait_time_microsec; - enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; - union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX]; - union lane_align_status_updated dpcd_lane_status_updated; - union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; - - retries_cr = 0; - retry_count = 0; - - memset(&dpcd_lane_status, '\0', sizeof(dpcd_lane_status)); - memset(&dpcd_lane_status_updated, '\0', - sizeof(dpcd_lane_status_updated)); - - if (!link->ctx->dc->work_arounds.lt_early_cr_pattern) - dp_set_hw_training_pattern(link, link_res, lt_settings->pattern_for_cr, offset); - - /* najeeb - The synaptics MST hub can put the LT in - * infinite loop by switching the VS - */ - /* between level 0 and level 1 continuously, here - * we try for CR lock for LinkTrainingMaxCRRetry count*/ - while ((retries_cr < LINK_TRAINING_MAX_RETRY_COUNT) && - (retry_count < LINK_TRAINING_MAX_CR_RETRY)) { - - - /* 1. call HWSS to set lane settings*/ - dp_set_hw_lane_settings( - link, - link_res, - lt_settings, - offset); - - /* 2. update DPCD of the receiver*/ - if (!retry_count) - /* EPR #361076 - write as a 5-byte burst, - * but only for the 1-st iteration.*/ - dpcd_set_lt_pattern_and_lane_settings( - link, - lt_settings, - lt_settings->pattern_for_cr, - offset); - else - dpcd_set_lane_settings( - link, - lt_settings, - offset); - - /* 3. wait receiver to lock-on*/ - wait_time_microsec = lt_settings->cr_pattern_time; - - dp_wait_for_training_aux_rd_interval( - link, - wait_time_microsec); - - /* 4. Read lane status and requested drive - * settings as set by the sink - */ - dp_get_lane_status_and_lane_adjust( - link, - lt_settings, - dpcd_lane_status, - &dpcd_lane_status_updated, - dpcd_lane_adjust, - offset); - - /* 5. check CR done*/ - if (dp_is_cr_done(lane_count, dpcd_lane_status)) - return LINK_TRAINING_SUCCESS; - - /* 6. max VS reached*/ - if ((dp_get_link_encoding_format(<_settings->link_settings) == - DP_8b_10b_ENCODING) && - dp_is_max_vs_reached(lt_settings)) - break; - - /* 7. same lane settings*/ - /* Note: settings are the same for all lanes, - * so comparing first lane is sufficient*/ - if ((dp_get_link_encoding_format(<_settings->link_settings) == DP_8b_10b_ENCODING) && - lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET == - dpcd_lane_adjust[0].bits.VOLTAGE_SWING_LANE) - retries_cr++; - else if ((dp_get_link_encoding_format(<_settings->link_settings) == DP_128b_132b_ENCODING) && - lt_settings->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE == - dpcd_lane_adjust[0].tx_ffe.PRESET_VALUE) - retries_cr++; - else - retries_cr = 0; - - /* 8. update VS/PE/PC2 in lt_settings*/ - dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, - lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); - retry_count++; - } - - if (retry_count >= LINK_TRAINING_MAX_CR_RETRY) { - ASSERT(0); - DC_LOG_ERROR("%s: Link Training Error, could not get CR after %d tries. Possibly voltage swing issue", - __func__, - LINK_TRAINING_MAX_CR_RETRY); - - } - - return dp_get_cr_failure(lane_count, dpcd_lane_status); -} - -static inline enum link_training_result dp_transition_to_video_idle( - struct dc_link *link, - const struct link_resource *link_res, - struct link_training_settings *lt_settings, - enum link_training_result status) -{ - union lane_count_set lane_count_set = {0}; - - /* 4. mainlink output idle pattern*/ - dp_set_hw_test_pattern(link, link_res, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0); - - /* - * 5. post training adjust if required - * If the upstream DPTX and downstream DPRX both support TPS4, - * TPS4 must be used instead of POST_LT_ADJ_REQ. - */ - if (link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED != 1 || - lt_settings->pattern_for_eq >= DP_TRAINING_PATTERN_SEQUENCE_4) { - /* delay 5ms after Main Link output idle pattern and then check - * DPCD 0202h. - */ - if (link->connector_signal != SIGNAL_TYPE_EDP && status == LINK_TRAINING_SUCCESS) { - msleep(5); - status = dp_check_link_loss_status(link, lt_settings); - } - return status; - } - - if (status == LINK_TRAINING_SUCCESS && - perform_post_lt_adj_req_sequence(link, link_res, lt_settings) == false) - status = LINK_TRAINING_LQA_FAIL; - - lane_count_set.bits.LANE_COUNT_SET = lt_settings->link_settings.lane_count; - lane_count_set.bits.ENHANCED_FRAMING = lt_settings->enhanced_framing; - lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = 0; - - core_link_write_dpcd( - link, - DP_LANE_COUNT_SET, - &lane_count_set.raw, - sizeof(lane_count_set)); - - return status; -} - -enum link_training_result dp_check_link_loss_status( - struct dc_link *link, - const struct link_training_settings *link_training_setting) -{ - enum link_training_result status = LINK_TRAINING_SUCCESS; - union lane_status lane_status; - uint8_t dpcd_buf[6] = {0}; - uint32_t lane; - - core_link_read_dpcd( - link, - DP_SINK_COUNT, - (uint8_t *)(dpcd_buf), - sizeof(dpcd_buf)); - - /*parse lane status*/ - for (lane = 0; lane < link->cur_link_settings.lane_count; lane++) { - /* - * check lanes status - */ - lane_status.raw = get_nibble_at_index(&dpcd_buf[2], lane); - - if (!lane_status.bits.CHANNEL_EQ_DONE_0 || - !lane_status.bits.CR_DONE_0 || - !lane_status.bits.SYMBOL_LOCKED_0) { - /* if one of the channel equalization, clock - * recovery or symbol lock is dropped - * consider it as (link has been - * dropped) dp sink status has changed - */ - status = LINK_TRAINING_LINK_LOSS; - break; - } - } - - return status; -} - -static inline void decide_8b_10b_training_settings( - struct dc_link *link, - const struct dc_link_settings *link_setting, - struct link_training_settings *lt_settings) -{ - memset(lt_settings, '\0', sizeof(struct link_training_settings)); - - /* Initialize link settings */ - lt_settings->link_settings.use_link_rate_set = link_setting->use_link_rate_set; - lt_settings->link_settings.link_rate_set = link_setting->link_rate_set; - lt_settings->link_settings.link_rate = link_setting->link_rate; - lt_settings->link_settings.lane_count = link_setting->lane_count; - /* TODO hard coded to SS for now - * lt_settings.link_settings.link_spread = - * dal_display_path_is_ss_supported( - * path_mode->display_path) ? - * LINK_SPREAD_05_DOWNSPREAD_30KHZ : - * LINK_SPREAD_DISABLED; - */ - lt_settings->link_settings.link_spread = link->dp_ss_off ? - LINK_SPREAD_DISABLED : LINK_SPREAD_05_DOWNSPREAD_30KHZ; - lt_settings->cr_pattern_time = get_cr_training_aux_rd_interval(link, link_setting); - lt_settings->eq_pattern_time = get_eq_training_aux_rd_interval(link, link_setting); - lt_settings->pattern_for_cr = decide_cr_training_pattern(link_setting); - lt_settings->pattern_for_eq = decide_eq_training_pattern(link, link_setting); - lt_settings->enhanced_framing = 1; - lt_settings->should_set_fec_ready = true; - lt_settings->disallow_per_lane_settings = true; - lt_settings->always_match_dpcd_with_hw_lane_settings = true; - lt_settings->lttpr_mode = dp_decide_8b_10b_lttpr_mode(link); - dp_hw_to_dpcd_lane_settings(lt_settings, lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); -} - -static inline void decide_128b_132b_training_settings(struct dc_link *link, - const struct dc_link_settings *link_settings, - struct link_training_settings *lt_settings) -{ - memset(lt_settings, 0, sizeof(*lt_settings)); - - lt_settings->link_settings = *link_settings; - /* TODO: should decide link spread when populating link_settings */ - lt_settings->link_settings.link_spread = link->dp_ss_off ? LINK_SPREAD_DISABLED : - LINK_SPREAD_05_DOWNSPREAD_30KHZ; - - lt_settings->pattern_for_cr = decide_cr_training_pattern(link_settings); - lt_settings->pattern_for_eq = decide_eq_training_pattern(link, link_settings); - lt_settings->eq_pattern_time = 2500; - lt_settings->eq_wait_time_limit = 400000; - lt_settings->eq_loop_count_limit = 20; - lt_settings->pattern_for_cds = DP_128b_132b_TPS2_CDS; - lt_settings->cds_pattern_time = 2500; - lt_settings->cds_wait_time_limit = (dp_convert_to_count( - link->dpcd_caps.lttpr_caps.phy_repeater_cnt) + 1) * 20000; - lt_settings->disallow_per_lane_settings = true; - lt_settings->lttpr_mode = dp_decide_128b_132b_lttpr_mode(link); - dp_hw_to_dpcd_lane_settings(lt_settings, - lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); -} - -void dp_decide_training_settings( - struct dc_link *link, - const struct dc_link_settings *link_settings, - struct link_training_settings *lt_settings) -{ - if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING) - decide_8b_10b_training_settings(link, link_settings, lt_settings); - else if (dp_get_link_encoding_format(link_settings) == DP_128b_132b_ENCODING) - decide_128b_132b_training_settings(link, link_settings, lt_settings); -} - -static void override_training_settings( - struct dc_link *link, - const struct dc_link_training_overrides *overrides, - struct link_training_settings *lt_settings) -{ - uint32_t lane; - - /* Override link spread */ - if (!link->dp_ss_off && overrides->downspread != NULL) - lt_settings->link_settings.link_spread = *overrides->downspread ? - LINK_SPREAD_05_DOWNSPREAD_30KHZ - : LINK_SPREAD_DISABLED; - - /* Override lane settings */ - if (overrides->voltage_swing != NULL) - lt_settings->voltage_swing = overrides->voltage_swing; - if (overrides->pre_emphasis != NULL) - lt_settings->pre_emphasis = overrides->pre_emphasis; - if (overrides->post_cursor2 != NULL) - lt_settings->post_cursor2 = overrides->post_cursor2; - if (overrides->ffe_preset != NULL) - lt_settings->ffe_preset = overrides->ffe_preset; - /* Override HW lane settings with BIOS forced values if present */ - if (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN && - lt_settings->lttpr_mode == LTTPR_MODE_TRANSPARENT) { - lt_settings->voltage_swing = &link->bios_forced_drive_settings.VOLTAGE_SWING; - lt_settings->pre_emphasis = &link->bios_forced_drive_settings.PRE_EMPHASIS; - lt_settings->always_match_dpcd_with_hw_lane_settings = false; - } - for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { - lt_settings->hw_lane_settings[lane].VOLTAGE_SWING = - lt_settings->voltage_swing != NULL ? - *lt_settings->voltage_swing : - VOLTAGE_SWING_LEVEL0; - lt_settings->hw_lane_settings[lane].PRE_EMPHASIS = - lt_settings->pre_emphasis != NULL ? - *lt_settings->pre_emphasis - : PRE_EMPHASIS_DISABLED; - lt_settings->hw_lane_settings[lane].POST_CURSOR2 = - lt_settings->post_cursor2 != NULL ? - *lt_settings->post_cursor2 - : POST_CURSOR2_DISABLED; - } - - if (lt_settings->always_match_dpcd_with_hw_lane_settings) - dp_hw_to_dpcd_lane_settings(lt_settings, - lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); - - /* Initialize training timings */ - if (overrides->cr_pattern_time != NULL) - lt_settings->cr_pattern_time = *overrides->cr_pattern_time; - - if (overrides->eq_pattern_time != NULL) - lt_settings->eq_pattern_time = *overrides->eq_pattern_time; - - if (overrides->pattern_for_cr != NULL) - lt_settings->pattern_for_cr = *overrides->pattern_for_cr; - if (overrides->pattern_for_eq != NULL) - lt_settings->pattern_for_eq = *overrides->pattern_for_eq; - - if (overrides->enhanced_framing != NULL) - lt_settings->enhanced_framing = *overrides->enhanced_framing; - - if (link->preferred_training_settings.fec_enable != NULL) - lt_settings->should_set_fec_ready = *link->preferred_training_settings.fec_enable; - - #if defined(CONFIG_DRM_AMD_DC_DCN) - /* Check DP tunnel LTTPR mode debug option. */ - if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && link->dc->debug.dpia_debug.bits.force_non_lttpr) - lt_settings->lttpr_mode = LTTPR_MODE_NON_LTTPR; - -#endif - dp_get_lttpr_mode_override(link, <_settings->lttpr_mode); - -} - -uint8_t dp_convert_to_count(uint8_t lttpr_repeater_count) -{ - switch (lttpr_repeater_count) { - case 0x80: // 1 lttpr repeater - return 1; - case 0x40: // 2 lttpr repeaters - return 2; - case 0x20: // 3 lttpr repeaters - return 3; - case 0x10: // 4 lttpr repeaters - return 4; - case 0x08: // 5 lttpr repeaters - return 5; - case 0x04: // 6 lttpr repeaters - return 6; - case 0x02: // 7 lttpr repeaters - return 7; - case 0x01: // 8 lttpr repeaters - return 8; - default: - break; - } - return 0; // invalid value -} - -static enum dc_status configure_lttpr_mode_transparent(struct dc_link *link) -{ - uint8_t repeater_mode = DP_PHY_REPEATER_MODE_TRANSPARENT; - - DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Transparent Mode\n", __func__); - return core_link_write_dpcd(link, - DP_PHY_REPEATER_MODE, - (uint8_t *)&repeater_mode, - sizeof(repeater_mode)); -} - -static enum dc_status configure_lttpr_mode_non_transparent( - struct dc_link *link, - const struct link_training_settings *lt_settings) -{ - /* aux timeout is already set to extended */ - /* RESET/SET lttpr mode to enable non transparent mode */ - uint8_t repeater_cnt; - uint32_t aux_interval_address; - uint8_t repeater_id; - enum dc_status result = DC_ERROR_UNEXPECTED; - uint8_t repeater_mode = DP_PHY_REPEATER_MODE_TRANSPARENT; - - enum dp_link_encoding encoding = dp_get_link_encoding_format(<_settings->link_settings); - - if (encoding == DP_8b_10b_ENCODING) { - DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Transparent Mode\n", __func__); - result = core_link_write_dpcd(link, - DP_PHY_REPEATER_MODE, - (uint8_t *)&repeater_mode, - sizeof(repeater_mode)); - - } - - if (result == DC_OK) { - link->dpcd_caps.lttpr_caps.mode = repeater_mode; - } - - if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) { - - DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Non Transparent Mode\n", __func__); - - repeater_mode = DP_PHY_REPEATER_MODE_NON_TRANSPARENT; - result = core_link_write_dpcd(link, - DP_PHY_REPEATER_MODE, - (uint8_t *)&repeater_mode, - sizeof(repeater_mode)); - - if (result == DC_OK) { - link->dpcd_caps.lttpr_caps.mode = repeater_mode; - } - - if (encoding == DP_8b_10b_ENCODING) { - repeater_cnt = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); - - /* Driver does not need to train the first hop. Skip DPCD read and clear - * AUX_RD_INTERVAL for DPTX-to-DPIA hop. - */ - if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) - link->dpcd_caps.lttpr_caps.aux_rd_interval[--repeater_cnt] = 0; - - for (repeater_id = repeater_cnt; repeater_id > 0; repeater_id--) { - aux_interval_address = DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1 + - ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (repeater_id - 1)); - core_link_read_dpcd( - link, - aux_interval_address, - (uint8_t *)&link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1], - sizeof(link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1])); - link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1] &= 0x7F; - } - } - } - - return result; -} - -static void repeater_training_done(struct dc_link *link, uint32_t offset) -{ - union dpcd_training_pattern dpcd_pattern = {0}; - - const uint32_t dpcd_base_lt_offset = - DP_TRAINING_PATTERN_SET_PHY_REPEATER1 + - ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); - /* Set training not in progress*/ - dpcd_pattern.v1_4.TRAINING_PATTERN_SET = DPCD_TRAINING_PATTERN_VIDEOIDLE; - - core_link_write_dpcd( - link, - dpcd_base_lt_offset, - &dpcd_pattern.raw, - 1); - - DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Id: %d 0x%X pattern = %x\n", - __func__, - offset, - dpcd_base_lt_offset, - dpcd_pattern.v1_4.TRAINING_PATTERN_SET); -} - -static void print_status_message( - struct dc_link *link, - const struct link_training_settings *lt_settings, - enum link_training_result status) -{ - char *link_rate = "Unknown"; - char *lt_result = "Unknown"; - char *lt_spread = "Disabled"; - - switch (lt_settings->link_settings.link_rate) { - case LINK_RATE_LOW: - link_rate = "RBR"; - break; - case LINK_RATE_RATE_2: - link_rate = "R2"; - break; - case LINK_RATE_RATE_3: - link_rate = "R3"; - break; - case LINK_RATE_HIGH: - link_rate = "HBR"; - break; - case LINK_RATE_RBR2: - link_rate = "RBR2"; - break; - case LINK_RATE_RATE_6: - link_rate = "R6"; - break; - case LINK_RATE_HIGH2: - link_rate = "HBR2"; - break; - case LINK_RATE_HIGH3: - link_rate = "HBR3"; - break; - case LINK_RATE_UHBR10: - link_rate = "UHBR10"; - break; - case LINK_RATE_UHBR13_5: - link_rate = "UHBR13.5"; - break; - case LINK_RATE_UHBR20: - link_rate = "UHBR20"; - break; - default: - break; - } - - switch (status) { - case LINK_TRAINING_SUCCESS: - lt_result = "pass"; - break; - case LINK_TRAINING_CR_FAIL_LANE0: - lt_result = "CR failed lane0"; - break; - case LINK_TRAINING_CR_FAIL_LANE1: - lt_result = "CR failed lane1"; - break; - case LINK_TRAINING_CR_FAIL_LANE23: - lt_result = "CR failed lane23"; - break; - case LINK_TRAINING_EQ_FAIL_CR: - lt_result = "CR failed in EQ"; - break; - case LINK_TRAINING_EQ_FAIL_CR_PARTIAL: - lt_result = "CR failed in EQ partially"; - break; - case LINK_TRAINING_EQ_FAIL_EQ: - lt_result = "EQ failed"; - break; - case LINK_TRAINING_LQA_FAIL: - lt_result = "LQA failed"; - break; - case LINK_TRAINING_LINK_LOSS: - lt_result = "Link loss"; - break; - case DP_128b_132b_LT_FAILED: - lt_result = "LT_FAILED received"; - break; - case DP_128b_132b_MAX_LOOP_COUNT_REACHED: - lt_result = "max loop count reached"; - break; - case DP_128b_132b_CHANNEL_EQ_DONE_TIMEOUT: - lt_result = "channel EQ timeout"; - break; - case DP_128b_132b_CDS_DONE_TIMEOUT: - lt_result = "CDS timeout"; - break; - default: - break; - } - - switch (lt_settings->link_settings.link_spread) { - case LINK_SPREAD_DISABLED: - lt_spread = "Disabled"; - break; - case LINK_SPREAD_05_DOWNSPREAD_30KHZ: - lt_spread = "0.5% 30KHz"; - break; - case LINK_SPREAD_05_DOWNSPREAD_33KHZ: - lt_spread = "0.5% 33KHz"; - break; - default: - break; - } - - /* Connectivity log: link training */ - - /* TODO - DP2.0 Log: add connectivity log for FFE PRESET */ - - CONN_MSG_LT(link, "%sx%d %s VS=%d, PE=%d, DS=%s", - link_rate, - lt_settings->link_settings.lane_count, - lt_result, - lt_settings->hw_lane_settings[0].VOLTAGE_SWING, - lt_settings->hw_lane_settings[0].PRE_EMPHASIS, - lt_spread); -} - -void dc_link_dp_set_drive_settings( - struct dc_link *link, - const struct link_resource *link_res, - struct link_training_settings *lt_settings) -{ - /* program ASIC PHY settings*/ - dp_set_hw_lane_settings(link, link_res, lt_settings, DPRX); - - dp_hw_to_dpcd_lane_settings(lt_settings, - lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); - - /* Notify DP sink the PHY settings from source */ - dpcd_set_lane_settings(link, lt_settings, DPRX); -} - -bool dc_link_dp_perform_link_training_skip_aux( - struct dc_link *link, - const struct link_resource *link_res, - const struct dc_link_settings *link_setting) -{ - struct link_training_settings lt_settings = {0}; - - dp_decide_training_settings( - link, - link_setting, - <_settings); - override_training_settings( - link, - &link->preferred_training_settings, - <_settings); - - /* 1. Perform_clock_recovery_sequence. */ - - /* transmit training pattern for clock recovery */ - dp_set_hw_training_pattern(link, link_res, lt_settings.pattern_for_cr, DPRX); - - /* call HWSS to set lane settings*/ - dp_set_hw_lane_settings(link, link_res, <_settings, DPRX); - - /* wait receiver to lock-on*/ - dp_wait_for_training_aux_rd_interval(link, lt_settings.cr_pattern_time); - - /* 2. Perform_channel_equalization_sequence. */ - - /* transmit training pattern for channel equalization. */ - dp_set_hw_training_pattern(link, link_res, lt_settings.pattern_for_eq, DPRX); - - /* call HWSS to set lane settings*/ - dp_set_hw_lane_settings(link, link_res, <_settings, DPRX); - - /* wait receiver to lock-on. */ - dp_wait_for_training_aux_rd_interval(link, lt_settings.eq_pattern_time); - - /* 3. Perform_link_training_int. */ - - /* Mainlink output idle pattern. */ - dp_set_hw_test_pattern(link, link_res, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0); - - print_status_message(link, <_settings, LINK_TRAINING_SUCCESS); - - return true; -} - -enum dc_status dpcd_configure_lttpr_mode(struct dc_link *link, struct link_training_settings *lt_settings) -{ - enum dc_status status = DC_OK; - - if (lt_settings->lttpr_mode == LTTPR_MODE_TRANSPARENT) - status = configure_lttpr_mode_transparent(link); - - else if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) - status = configure_lttpr_mode_non_transparent(link, lt_settings); - - return status; -} - -static void dpcd_exit_training_mode(struct dc_link *link, enum dp_link_encoding encoding) -{ - uint8_t sink_status = 0; - uint8_t i; - - /* clear training pattern set */ - dpcd_set_training_pattern(link, DP_TRAINING_PATTERN_VIDEOIDLE); - - if (encoding == DP_128b_132b_ENCODING) { - /* poll for intra-hop disable */ - for (i = 0; i < 10; i++) { - if ((core_link_read_dpcd(link, DP_SINK_STATUS, &sink_status, 1) == DC_OK) && - (sink_status & DP_INTRA_HOP_AUX_REPLY_INDICATION) == 0) - break; - udelay(1000); - } - } -} - -enum dc_status dpcd_configure_channel_coding(struct dc_link *link, - struct link_training_settings *lt_settings) -{ - enum dp_link_encoding encoding = - dp_get_link_encoding_format( - <_settings->link_settings); - enum dc_status status; - - status = core_link_write_dpcd( - link, - DP_MAIN_LINK_CHANNEL_CODING_SET, - (uint8_t *) &encoding, - 1); - DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X MAIN_LINK_CHANNEL_CODING_SET = %x\n", - __func__, - DP_MAIN_LINK_CHANNEL_CODING_SET, - encoding); - - return status; -} - -static void dpcd_128b_132b_get_aux_rd_interval(struct dc_link *link, - uint32_t *interval_in_us) -{ - union dp_128b_132b_training_aux_rd_interval dpcd_interval; - uint32_t interval_unit = 0; - - dpcd_interval.raw = 0; - core_link_read_dpcd(link, DP_128b_132b_TRAINING_AUX_RD_INTERVAL, - &dpcd_interval.raw, sizeof(dpcd_interval.raw)); - interval_unit = dpcd_interval.bits.UNIT ? 1 : 2; /* 0b = 2 ms, 1b = 1 ms */ - /* (128b/132b_TRAINING_AUX_RD_INTERVAL value + 1) * - * INTERVAL_UNIT. The maximum is 256 ms - */ - *interval_in_us = (dpcd_interval.bits.VALUE + 1) * interval_unit * 1000; -} - -static enum link_training_result dp_perform_128b_132b_channel_eq_done_sequence( - struct dc_link *link, - const struct link_resource *link_res, - struct link_training_settings *lt_settings) -{ - uint8_t loop_count; - uint32_t aux_rd_interval = 0; - uint32_t wait_time = 0; - union lane_align_status_updated dpcd_lane_status_updated = {0}; - union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0}; - enum dc_status status = DC_OK; - enum link_training_result result = LINK_TRAINING_SUCCESS; - union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; - - /* Transmit 128b/132b_TPS1 over Main-Link */ - dp_set_hw_training_pattern(link, link_res, lt_settings->pattern_for_cr, DPRX); - /* Set TRAINING_PATTERN_SET to 01h */ - dpcd_set_training_pattern(link, lt_settings->pattern_for_cr); - - /* Adjust TX_FFE_PRESET_VALUE and Transmit 128b/132b_TPS2 over Main-Link */ - dpcd_128b_132b_get_aux_rd_interval(link, &aux_rd_interval); - dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status, - &dpcd_lane_status_updated, dpcd_lane_adjust, DPRX); - dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, - lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); - dp_set_hw_lane_settings(link, link_res, lt_settings, DPRX); - dp_set_hw_training_pattern(link, link_res, lt_settings->pattern_for_eq, DPRX); - - /* Set loop counter to start from 1 */ - loop_count = 1; - - /* Set TRAINING_PATTERN_SET to 02h and TX_FFE_PRESET_VALUE in one AUX transaction */ - dpcd_set_lt_pattern_and_lane_settings(link, lt_settings, - lt_settings->pattern_for_eq, DPRX); - - /* poll for channel EQ done */ - while (result == LINK_TRAINING_SUCCESS) { - dp_wait_for_training_aux_rd_interval(link, aux_rd_interval); - wait_time += aux_rd_interval; - status = dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status, - &dpcd_lane_status_updated, dpcd_lane_adjust, DPRX); - dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, - lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); - dpcd_128b_132b_get_aux_rd_interval(link, &aux_rd_interval); - if (status != DC_OK) { - result = LINK_TRAINING_ABORT; - } else if (dp_is_ch_eq_done(lt_settings->link_settings.lane_count, - dpcd_lane_status)) { - /* pass */ - break; - } else if (loop_count >= lt_settings->eq_loop_count_limit) { - result = DP_128b_132b_MAX_LOOP_COUNT_REACHED; - } else if (dpcd_lane_status_updated.bits.LT_FAILED_128b_132b) { - result = DP_128b_132b_LT_FAILED; - } else { - dp_set_hw_lane_settings(link, link_res, lt_settings, DPRX); - dpcd_128b_132b_set_lane_settings(link, lt_settings); - } - loop_count++; - } - - /* poll for EQ interlane align done */ - while (result == LINK_TRAINING_SUCCESS) { - if (status != DC_OK) { - result = LINK_TRAINING_ABORT; - } else if (dpcd_lane_status_updated.bits.EQ_INTERLANE_ALIGN_DONE_128b_132b) { - /* pass */ - break; - } else if (wait_time >= lt_settings->eq_wait_time_limit) { - result = DP_128b_132b_CHANNEL_EQ_DONE_TIMEOUT; - } else if (dpcd_lane_status_updated.bits.LT_FAILED_128b_132b) { - result = DP_128b_132b_LT_FAILED; - } else { - dp_wait_for_training_aux_rd_interval(link, - lt_settings->eq_pattern_time); - wait_time += lt_settings->eq_pattern_time; - status = dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status, - &dpcd_lane_status_updated, dpcd_lane_adjust, DPRX); - } - } - - return result; -} - -static enum link_training_result dp_perform_128b_132b_cds_done_sequence( - struct dc_link *link, - const struct link_resource *link_res, - struct link_training_settings *lt_settings) -{ - /* Assumption: assume hardware has transmitted eq pattern */ - enum dc_status status = DC_OK; - enum link_training_result result = LINK_TRAINING_SUCCESS; - union lane_align_status_updated dpcd_lane_status_updated = {0}; - union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0}; - union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; - uint32_t wait_time = 0; - - /* initiate CDS done sequence */ - dpcd_set_training_pattern(link, lt_settings->pattern_for_cds); - - /* poll for CDS interlane align done and symbol lock */ - while (result == LINK_TRAINING_SUCCESS) { - dp_wait_for_training_aux_rd_interval(link, - lt_settings->cds_pattern_time); - wait_time += lt_settings->cds_pattern_time; - status = dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status, - &dpcd_lane_status_updated, dpcd_lane_adjust, DPRX); - if (status != DC_OK) { - result = LINK_TRAINING_ABORT; - } else if (dp_is_symbol_locked(lt_settings->link_settings.lane_count, dpcd_lane_status) && - dpcd_lane_status_updated.bits.CDS_INTERLANE_ALIGN_DONE_128b_132b) { - /* pass */ - break; - } else if (dpcd_lane_status_updated.bits.LT_FAILED_128b_132b) { - result = DP_128b_132b_LT_FAILED; - } else if (wait_time >= lt_settings->cds_wait_time_limit) { - result = DP_128b_132b_CDS_DONE_TIMEOUT; - } - } - - return result; -} - -static enum link_training_result dp_perform_8b_10b_link_training( - struct dc_link *link, - const struct link_resource *link_res, - struct link_training_settings *lt_settings) -{ - enum link_training_result status = LINK_TRAINING_SUCCESS; - - uint8_t repeater_cnt; - uint8_t repeater_id; - uint8_t lane = 0; - - if (link->ctx->dc->work_arounds.lt_early_cr_pattern) - start_clock_recovery_pattern_early(link, link_res, lt_settings, DPRX); - - /* 1. set link rate, lane count and spread. */ - dpcd_set_link_settings(link, lt_settings); - - if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) { - - /* 2. perform link training (set link training done - * to false is done as well) - */ - repeater_cnt = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); - - for (repeater_id = repeater_cnt; (repeater_id > 0 && status == LINK_TRAINING_SUCCESS); - repeater_id--) { - status = perform_clock_recovery_sequence(link, link_res, lt_settings, repeater_id); - - if (status != LINK_TRAINING_SUCCESS) { - repeater_training_done(link, repeater_id); - break; - } - - status = perform_channel_equalization_sequence(link, - link_res, - lt_settings, - repeater_id); - - repeater_training_done(link, repeater_id); - - if (status != LINK_TRAINING_SUCCESS) - break; - - for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { - lt_settings->dpcd_lane_settings[lane].raw = 0; - lt_settings->hw_lane_settings[lane].VOLTAGE_SWING = 0; - lt_settings->hw_lane_settings[lane].PRE_EMPHASIS = 0; - } - } - } - - if (status == LINK_TRAINING_SUCCESS) { - status = perform_clock_recovery_sequence(link, link_res, lt_settings, DPRX); - if (status == LINK_TRAINING_SUCCESS) { - status = perform_channel_equalization_sequence(link, - link_res, - lt_settings, - DPRX); - } - } - - return status; -} - -static enum link_training_result dp_perform_128b_132b_link_training( - struct dc_link *link, - const struct link_resource *link_res, - struct link_training_settings *lt_settings) -{ - enum link_training_result result = LINK_TRAINING_SUCCESS; - - /* TODO - DP2.0 Link: remove legacy_dp2_lt logic */ - if (link->dc->debug.legacy_dp2_lt) { - struct link_training_settings legacy_settings; - - decide_8b_10b_training_settings(link, - <_settings->link_settings, - &legacy_settings); - return dp_perform_8b_10b_link_training(link, link_res, &legacy_settings); - } - - dpcd_set_link_settings(link, lt_settings); - - if (result == LINK_TRAINING_SUCCESS) - result = dp_perform_128b_132b_channel_eq_done_sequence(link, link_res, lt_settings); - - if (result == LINK_TRAINING_SUCCESS) - result = dp_perform_128b_132b_cds_done_sequence(link, link_res, lt_settings); - - return result; -} - -static enum link_training_result perform_fixed_vs_pe_nontransparent_training_sequence( - struct dc_link *link, - const struct link_resource *link_res, - struct link_training_settings *lt_settings) -{ - enum link_training_result status = LINK_TRAINING_SUCCESS; - uint8_t lane = 0; - uint8_t toggle_rate = 0x6; - uint8_t target_rate = 0x6; - bool apply_toggle_rate_wa = false; - uint8_t repeater_cnt; - uint8_t repeater_id; - - /* Fixed VS/PE specific: Force CR AUX RD Interval to at least 16ms */ - if (lt_settings->cr_pattern_time < 16000) - lt_settings->cr_pattern_time = 16000; - - /* Fixed VS/PE specific: Toggle link rate */ - apply_toggle_rate_wa = (link->vendor_specific_lttpr_link_rate_wa == target_rate); - target_rate = get_dpcd_link_rate(<_settings->link_settings); - toggle_rate = (target_rate == 0x6) ? 0xA : 0x6; - - if (apply_toggle_rate_wa) - lt_settings->link_settings.link_rate = toggle_rate; - - if (link->ctx->dc->work_arounds.lt_early_cr_pattern) - start_clock_recovery_pattern_early(link, link_res, lt_settings, DPRX); - - /* 1. set link rate, lane count and spread. */ - dpcd_set_link_settings(link, lt_settings); - - /* Fixed VS/PE specific: Toggle link rate back*/ - if (apply_toggle_rate_wa) { - core_link_write_dpcd( - link, - DP_LINK_BW_SET, - &target_rate, - 1); - } - - link->vendor_specific_lttpr_link_rate_wa = target_rate; - - if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) { - - /* 2. perform link training (set link training done - * to false is done as well) - */ - repeater_cnt = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); - - for (repeater_id = repeater_cnt; (repeater_id > 0 && status == LINK_TRAINING_SUCCESS); - repeater_id--) { - status = perform_clock_recovery_sequence(link, link_res, lt_settings, repeater_id); - - if (status != LINK_TRAINING_SUCCESS) { - repeater_training_done(link, repeater_id); - break; - } - - status = perform_channel_equalization_sequence(link, - link_res, - lt_settings, - repeater_id); - - repeater_training_done(link, repeater_id); - - if (status != LINK_TRAINING_SUCCESS) - break; - - for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { - lt_settings->dpcd_lane_settings[lane].raw = 0; - lt_settings->hw_lane_settings[lane].VOLTAGE_SWING = 0; - lt_settings->hw_lane_settings[lane].PRE_EMPHASIS = 0; - } - } - } - - if (status == LINK_TRAINING_SUCCESS) { - status = perform_clock_recovery_sequence(link, link_res, lt_settings, DPRX); - if (status == LINK_TRAINING_SUCCESS) { - status = perform_channel_equalization_sequence(link, - link_res, - lt_settings, - DPRX); - } - } - - return status; -} - -static enum link_training_result dp_perform_fixed_vs_pe_training_sequence( - struct dc_link *link, - const struct link_resource *link_res, - struct link_training_settings *lt_settings) -{ - const uint8_t vendor_lttpr_write_data_reset[4] = {0x1, 0x50, 0x63, 0xFF}; - const uint8_t offset = dp_convert_to_count( - link->dpcd_caps.lttpr_caps.phy_repeater_cnt); - const uint8_t vendor_lttpr_write_data_intercept_en[4] = {0x1, 0x55, 0x63, 0x0}; - const uint8_t vendor_lttpr_write_data_intercept_dis[4] = {0x1, 0x55, 0x63, 0x68}; - uint32_t pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa; - uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0}; - uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0}; - uint32_t vendor_lttpr_write_address = 0xF004F; - enum link_training_result status = LINK_TRAINING_SUCCESS; - uint8_t lane = 0; - union down_spread_ctrl downspread = {0}; - union lane_count_set lane_count_set = {0}; - uint8_t toggle_rate; - uint8_t rate; - - /* Only 8b/10b is supported */ - ASSERT(dp_get_link_encoding_format(<_settings->link_settings) == - DP_8b_10b_ENCODING); - - if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) { - status = perform_fixed_vs_pe_nontransparent_training_sequence(link, link_res, lt_settings); - return status; - } - - if (offset != 0xFF) { - vendor_lttpr_write_address += - ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); - - /* Certain display and cable configuration require extra delay */ - if (offset > 2) - pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa * 2; - } - - /* Vendor specific: Reset lane settings */ - core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data_reset[0], - sizeof(vendor_lttpr_write_data_reset)); - core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data_vs[0], - sizeof(vendor_lttpr_write_data_vs)); - core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data_pe[0], - sizeof(vendor_lttpr_write_data_pe)); - - /* Vendor specific: Enable intercept */ - core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data_intercept_en[0], - sizeof(vendor_lttpr_write_data_intercept_en)); - - /* 1. set link rate, lane count and spread. */ - - downspread.raw = (uint8_t)(lt_settings->link_settings.link_spread); - - lane_count_set.bits.LANE_COUNT_SET = - lt_settings->link_settings.lane_count; - - lane_count_set.bits.ENHANCED_FRAMING = lt_settings->enhanced_framing; - lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = 0; - - - if (lt_settings->pattern_for_eq < DP_TRAINING_PATTERN_SEQUENCE_4) { - lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = - link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED; - } - - core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL, - &downspread.raw, sizeof(downspread)); - - core_link_write_dpcd(link, DP_LANE_COUNT_SET, - &lane_count_set.raw, 1); - - rate = get_dpcd_link_rate(<_settings->link_settings); - - /* Vendor specific: Toggle link rate */ - toggle_rate = (rate == 0x6) ? 0xA : 0x6; - - if (link->vendor_specific_lttpr_link_rate_wa == rate) { - core_link_write_dpcd( - link, - DP_LINK_BW_SET, - &toggle_rate, - 1); - } - - link->vendor_specific_lttpr_link_rate_wa = rate; - - core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1); - - DC_LOG_HW_LINK_TRAINING("%s\n %x rate = %x\n %x lane = %x framing = %x\n %x spread = %x\n", - __func__, - DP_LINK_BW_SET, - lt_settings->link_settings.link_rate, - DP_LANE_COUNT_SET, - lt_settings->link_settings.lane_count, - lt_settings->enhanced_framing, - DP_DOWNSPREAD_CTRL, - lt_settings->link_settings.link_spread); - - /* 2. Perform link training */ - - /* Perform Clock Recovery Sequence */ - if (status == LINK_TRAINING_SUCCESS) { - const uint8_t max_vendor_dpcd_retries = 10; - uint32_t retries_cr; - uint32_t retry_count; - uint32_t wait_time_microsec; - enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; - union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX]; - union lane_align_status_updated dpcd_lane_status_updated; - union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; - enum dc_status dpcd_status = DC_OK; - uint8_t i = 0; - - retries_cr = 0; - retry_count = 0; - - memset(&dpcd_lane_status, '\0', sizeof(dpcd_lane_status)); - memset(&dpcd_lane_status_updated, '\0', - sizeof(dpcd_lane_status_updated)); - - while ((retries_cr < LINK_TRAINING_MAX_RETRY_COUNT) && - (retry_count < LINK_TRAINING_MAX_CR_RETRY)) { - - - /* 1. call HWSS to set lane settings */ - dp_set_hw_lane_settings( - link, - link_res, - lt_settings, - 0); - - /* 2. update DPCD of the receiver */ - if (!retry_count) { - /* EPR #361076 - write as a 5-byte burst, - * but only for the 1-st iteration. - */ - dpcd_set_lt_pattern_and_lane_settings( - link, - lt_settings, - lt_settings->pattern_for_cr, - 0); - /* Vendor specific: Disable intercept */ - for (i = 0; i < max_vendor_dpcd_retries; i++) { - msleep(pre_disable_intercept_delay_ms); - dpcd_status = core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data_intercept_dis[0], - sizeof(vendor_lttpr_write_data_intercept_dis)); - - if (dpcd_status == DC_OK) - break; - - core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data_intercept_en[0], - sizeof(vendor_lttpr_write_data_intercept_en)); - } - } else { - vendor_lttpr_write_data_vs[3] = 0; - vendor_lttpr_write_data_pe[3] = 0; - - for (lane = 0; lane < lane_count; lane++) { - vendor_lttpr_write_data_vs[3] |= - lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET << (2 * lane); - vendor_lttpr_write_data_pe[3] |= - lt_settings->dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET << (2 * lane); - } - - /* Vendor specific: Update VS and PE to DPRX requested value */ - core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data_vs[0], - sizeof(vendor_lttpr_write_data_vs)); - core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data_pe[0], - sizeof(vendor_lttpr_write_data_pe)); - - dpcd_set_lane_settings( - link, - lt_settings, - 0); - } - - /* 3. wait receiver to lock-on*/ - wait_time_microsec = lt_settings->cr_pattern_time; - - dp_wait_for_training_aux_rd_interval( - link, - wait_time_microsec); - - /* 4. Read lane status and requested drive - * settings as set by the sink - */ - dp_get_lane_status_and_lane_adjust( - link, - lt_settings, - dpcd_lane_status, - &dpcd_lane_status_updated, - dpcd_lane_adjust, - 0); - - /* 5. check CR done*/ - if (dp_is_cr_done(lane_count, dpcd_lane_status)) { - status = LINK_TRAINING_SUCCESS; - break; - } - - /* 6. max VS reached*/ - if (dp_is_max_vs_reached(lt_settings)) - break; - - /* 7. same lane settings */ - /* Note: settings are the same for all lanes, - * so comparing first lane is sufficient - */ - if (lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET == - dpcd_lane_adjust[0].bits.VOLTAGE_SWING_LANE) - retries_cr++; - else - retries_cr = 0; - - /* 8. update VS/PE/PC2 in lt_settings*/ - dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, - lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); - retry_count++; - } - - if (retry_count >= LINK_TRAINING_MAX_CR_RETRY) { - ASSERT(0); - DC_LOG_ERROR("%s: Link Training Error, could not get CR after %d tries. Possibly voltage swing issue", - __func__, - LINK_TRAINING_MAX_CR_RETRY); - - } - - status = dp_get_cr_failure(lane_count, dpcd_lane_status); - } - - /* Perform Channel EQ Sequence */ - if (status == LINK_TRAINING_SUCCESS) { - enum dc_dp_training_pattern tr_pattern; - uint32_t retries_ch_eq; - uint32_t wait_time_microsec; - enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; - union lane_align_status_updated dpcd_lane_status_updated = {0}; - union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0}; - union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; - - /* Note: also check that TPS4 is a supported feature*/ - tr_pattern = lt_settings->pattern_for_eq; - - dp_set_hw_training_pattern(link, link_res, tr_pattern, 0); - - status = LINK_TRAINING_EQ_FAIL_EQ; - - for (retries_ch_eq = 0; retries_ch_eq <= LINK_TRAINING_MAX_RETRY_COUNT; - retries_ch_eq++) { - - dp_set_hw_lane_settings(link, link_res, lt_settings, 0); - - vendor_lttpr_write_data_vs[3] = 0; - vendor_lttpr_write_data_pe[3] = 0; - - for (lane = 0; lane < lane_count; lane++) { - vendor_lttpr_write_data_vs[3] |= - lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET << (2 * lane); - vendor_lttpr_write_data_pe[3] |= - lt_settings->dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET << (2 * lane); - } - - /* Vendor specific: Update VS and PE to DPRX requested value */ - core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data_vs[0], - sizeof(vendor_lttpr_write_data_vs)); - core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data_pe[0], - sizeof(vendor_lttpr_write_data_pe)); - - /* 2. update DPCD*/ - if (!retries_ch_eq) - /* EPR #361076 - write as a 5-byte burst, - * but only for the 1-st iteration - */ - - dpcd_set_lt_pattern_and_lane_settings( - link, - lt_settings, - tr_pattern, 0); - else - dpcd_set_lane_settings(link, lt_settings, 0); - - /* 3. wait for receiver to lock-on*/ - wait_time_microsec = lt_settings->eq_pattern_time; - - dp_wait_for_training_aux_rd_interval( - link, - wait_time_microsec); - - /* 4. Read lane status and requested - * drive settings as set by the sink - */ - dp_get_lane_status_and_lane_adjust( - link, - lt_settings, - dpcd_lane_status, - &dpcd_lane_status_updated, - dpcd_lane_adjust, - 0); - - /* 5. check CR done*/ - if (!dp_is_cr_done(lane_count, dpcd_lane_status)) { - status = LINK_TRAINING_EQ_FAIL_CR; - break; - } - - /* 6. check CHEQ done*/ - if (dp_is_ch_eq_done(lane_count, dpcd_lane_status) && - dp_is_symbol_locked(lane_count, dpcd_lane_status) && - dp_is_interlane_aligned(dpcd_lane_status_updated)) { - status = LINK_TRAINING_SUCCESS; - break; - } - - /* 7. update VS/PE/PC2 in lt_settings*/ - dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, - lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); - } - } - - return status; -} - - -enum link_training_result dc_link_dp_perform_link_training( - struct dc_link *link, - const struct link_resource *link_res, - const struct dc_link_settings *link_settings, - bool skip_video_pattern) -{ - enum link_training_result status = LINK_TRAINING_SUCCESS; - struct link_training_settings lt_settings = {0}; - enum dp_link_encoding encoding = - dp_get_link_encoding_format(link_settings); - - /* decide training settings */ - dp_decide_training_settings( - link, - link_settings, - <_settings); - - override_training_settings( - link, - &link->preferred_training_settings, - <_settings); - - /* reset previous training states */ - dpcd_exit_training_mode(link, encoding); - - /* configure link prior to entering training mode */ - dpcd_configure_lttpr_mode(link, <_settings); - dp_set_fec_ready(link, link_res, lt_settings.should_set_fec_ready); - dpcd_configure_channel_coding(link, <_settings); - - /* enter training mode: - * Per DP specs starting from here, DPTX device shall not issue - * Non-LT AUX transactions inside training mode. - */ - if (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN && encoding == DP_8b_10b_ENCODING) - status = dp_perform_fixed_vs_pe_training_sequence(link, link_res, <_settings); - else if (encoding == DP_8b_10b_ENCODING) - status = dp_perform_8b_10b_link_training(link, link_res, <_settings); - else if (encoding == DP_128b_132b_ENCODING) - status = dp_perform_128b_132b_link_training(link, link_res, <_settings); - else - ASSERT(0); - - /* exit training mode */ - dpcd_exit_training_mode(link, encoding); - - /* switch to video idle */ - if ((status == LINK_TRAINING_SUCCESS) || !skip_video_pattern) - status = dp_transition_to_video_idle(link, - link_res, - <_settings, - status); - - /* dump debug data */ - print_status_message(link, <_settings, status); - if (status != LINK_TRAINING_SUCCESS) - link->ctx->dc->debug_data.ltFailCount++; - return status; -} - -bool perform_link_training_with_retries( - const struct dc_link_settings *link_setting, - bool skip_video_pattern, - int attempts, - struct pipe_ctx *pipe_ctx, - enum signal_type signal, - bool do_fallback) -{ - int j; - uint8_t delay_between_attempts = LINK_TRAINING_RETRY_DELAY; - struct dc_stream_state *stream = pipe_ctx->stream; - struct dc_link *link = stream->link; - enum dp_panel_mode panel_mode = dp_get_panel_mode(link); - enum link_training_result status = LINK_TRAINING_CR_FAIL_LANE0; - struct dc_link_settings cur_link_settings = *link_setting; - struct dc_link_settings max_link_settings = *link_setting; - const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); - int fail_count = 0; - bool is_link_bw_low = false; /* link bandwidth < stream bandwidth */ - bool is_link_bw_min = /* RBR x 1 */ - (cur_link_settings.link_rate <= LINK_RATE_LOW) && - (cur_link_settings.lane_count <= LANE_COUNT_ONE); - - dp_trace_commit_lt_init(link); - - if (dp_get_link_encoding_format(&cur_link_settings) == DP_8b_10b_ENCODING) - /* We need to do this before the link training to ensure the idle - * pattern in SST mode will be sent right after the link training - */ - link_hwss->setup_stream_encoder(pipe_ctx); - - dp_trace_set_lt_start_timestamp(link, false); - j = 0; - while (j < attempts && fail_count < (attempts * 10)) { - - DC_LOG_HW_LINK_TRAINING("%s: Beginning link(%d) training attempt %u of %d @ rate(%d) x lane(%d)\n", - __func__, link->link_index, (unsigned int)j + 1, attempts, cur_link_settings.link_rate, - cur_link_settings.lane_count); - - dp_enable_link_phy( - link, - &pipe_ctx->link_res, - signal, - pipe_ctx->clock_source->id, - &cur_link_settings); - - if (stream->sink_patches.dppowerup_delay > 0) { - int delay_dp_power_up_in_ms = stream->sink_patches.dppowerup_delay; - - msleep(delay_dp_power_up_in_ms); - } - -#ifdef CONFIG_DRM_AMD_DC_HDCP - if (panel_mode == DP_PANEL_MODE_EDP) { - struct cp_psp *cp_psp = &stream->ctx->cp_psp; - - if (cp_psp && cp_psp->funcs.enable_assr) - /* ASSR is bound to fail with unsigned PSP - * verstage used during devlopment phase. - * Report and continue with eDP panel mode to - * perform eDP link training with right settings - */ - cp_psp->funcs.enable_assr(cp_psp->handle, link); - } -#endif - - dp_set_panel_mode(link, panel_mode); - - if (link->aux_access_disabled) { - dc_link_dp_perform_link_training_skip_aux(link, &pipe_ctx->link_res, &cur_link_settings); - return true; - } else { - /** @todo Consolidate USB4 DP and DPx.x training. */ - if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) { - status = dc_link_dpia_perform_link_training(link, - &pipe_ctx->link_res, - &cur_link_settings, - skip_video_pattern); - - /* Transmit idle pattern once training successful. */ - if (status == LINK_TRAINING_SUCCESS && !is_link_bw_low) { - dp_set_hw_test_pattern(link, &pipe_ctx->link_res, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0); - /* Update verified link settings to current one - * Because DPIA LT might fallback to lower link setting. - */ - if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { - link->verified_link_cap.link_rate = link->cur_link_settings.link_rate; - link->verified_link_cap.lane_count = link->cur_link_settings.lane_count; - dm_helpers_dp_mst_update_branch_bandwidth(link->ctx, link); - } - } - } else { - status = dc_link_dp_perform_link_training(link, - &pipe_ctx->link_res, - &cur_link_settings, - skip_video_pattern); - } - - dp_trace_lt_total_count_increment(link, false); - dp_trace_lt_result_update(link, status, false); - dp_trace_set_lt_end_timestamp(link, false); - if (status == LINK_TRAINING_SUCCESS && !is_link_bw_low) - return true; - } - - fail_count++; - dp_trace_lt_fail_count_update(link, fail_count, false); - if (link->ep_type == DISPLAY_ENDPOINT_PHY) { - /* latest link training still fail or link training is aborted - * skip delay and keep PHY on - */ - if (j == (attempts - 1) || (status == LINK_TRAINING_ABORT)) - break; - } - - DC_LOG_WARNING("%s: Link(%d) training attempt %u of %d failed @ rate(%d) x lane(%d) : fail reason:(%d)\n", - __func__, link->link_index, (unsigned int)j + 1, attempts, cur_link_settings.link_rate, - cur_link_settings.lane_count, status); - - dp_disable_link_phy(link, &pipe_ctx->link_res, signal); - - /* Abort link training if failure due to sink being unplugged. */ - if (status == LINK_TRAINING_ABORT) { - enum dc_connection_type type = dc_connection_none; - - dc_link_detect_sink(link, &type); - if (type == dc_connection_none) { - DC_LOG_HW_LINK_TRAINING("%s: Aborting training because sink unplugged\n", __func__); - break; - } - } - - /* Try to train again at original settings if: - * - not falling back between training attempts; - * - aborted previous attempt due to reasons other than sink unplug; - * - successfully trained but at a link rate lower than that required by stream; - * - reached minimum link bandwidth. - */ - if (!do_fallback || (status == LINK_TRAINING_ABORT) || - (status == LINK_TRAINING_SUCCESS && is_link_bw_low) || - is_link_bw_min) { - j++; - cur_link_settings = *link_setting; - delay_between_attempts += LINK_TRAINING_RETRY_DELAY; - is_link_bw_low = false; - is_link_bw_min = (cur_link_settings.link_rate <= LINK_RATE_LOW) && - (cur_link_settings.lane_count <= LANE_COUNT_ONE); - - } else if (do_fallback) { /* Try training at lower link bandwidth if doing fallback. */ - uint32_t req_bw; - uint32_t link_bw; - - decide_fallback_link_setting(link, &max_link_settings, - &cur_link_settings, status); - /* Fail link training if reduced link bandwidth no longer meets - * stream requirements. - */ - req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing); - link_bw = dc_link_bandwidth_kbps(link, &cur_link_settings); - is_link_bw_low = (req_bw > link_bw); - is_link_bw_min = ((cur_link_settings.link_rate <= LINK_RATE_LOW) && - (cur_link_settings.lane_count <= LANE_COUNT_ONE)); - if (is_link_bw_low) - DC_LOG_WARNING( - "%s: Link(%d) bandwidth too low after fallback req_bw(%d) > link_bw(%d)\n", - __func__, link->link_index, req_bw, link_bw); - } - - msleep(delay_between_attempts); - } - return false; -} - -static enum clock_source_id get_clock_source_id(struct dc_link *link) -{ - enum clock_source_id dp_cs_id = CLOCK_SOURCE_ID_UNDEFINED; - struct clock_source *dp_cs = link->dc->res_pool->dp_clock_source; - - if (dp_cs != NULL) { - dp_cs_id = dp_cs->id; - } else { - /* - * dp clock source is not initialized for some reason. - * Should not happen, CLOCK_SOURCE_ID_EXTERNAL will be used - */ - ASSERT(dp_cs); - } - - return dp_cs_id; -} - -static void set_dp_mst_mode(struct dc_link *link, const struct link_resource *link_res, - bool mst_enable) -{ - if (mst_enable == false && - link->type == dc_connection_mst_branch) { - /* Disable MST on link. Use only local sink. */ - dp_disable_link_phy_mst(link, link_res, link->connector_signal); - - link->type = dc_connection_single; - link->local_sink = link->remote_sinks[0]; - link->local_sink->sink_signal = SIGNAL_TYPE_DISPLAY_PORT; - dc_sink_retain(link->local_sink); - dm_helpers_dp_mst_stop_top_mgr(link->ctx, link); - } else if (mst_enable == true && - link->type == dc_connection_single && - link->remote_sinks[0] != NULL) { - /* Re-enable MST on link. */ - dp_disable_link_phy(link, link_res, link->connector_signal); - dp_enable_mst_on_sink(link, true); - - link->type = dc_connection_mst_branch; - link->local_sink->sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST; - } -} - -bool dc_link_dp_sync_lt_begin(struct dc_link *link) -{ - /* Begin Sync LT. During this time, - * DPCD:600h must not be powered down. - */ - link->sync_lt_in_progress = true; - - /*Clear any existing preferred settings.*/ - memset(&link->preferred_training_settings, 0, - sizeof(struct dc_link_training_overrides)); - memset(&link->preferred_link_setting, 0, - sizeof(struct dc_link_settings)); - - return true; -} - -enum link_training_result dc_link_dp_sync_lt_attempt( - struct dc_link *link, - const struct link_resource *link_res, - struct dc_link_settings *link_settings, - struct dc_link_training_overrides *lt_overrides) -{ - struct link_training_settings lt_settings = {0}; - enum link_training_result lt_status = LINK_TRAINING_SUCCESS; - enum dp_panel_mode panel_mode = DP_PANEL_MODE_DEFAULT; - enum clock_source_id dp_cs_id = CLOCK_SOURCE_ID_EXTERNAL; - bool fec_enable = false; - - dp_decide_training_settings( - link, - link_settings, - <_settings); - override_training_settings( - link, - lt_overrides, - <_settings); - /* Setup MST Mode */ - if (lt_overrides->mst_enable) - set_dp_mst_mode(link, link_res, *lt_overrides->mst_enable); - - /* Disable link */ - dp_disable_link_phy(link, link_res, link->connector_signal); - - /* Enable link */ - dp_cs_id = get_clock_source_id(link); - dp_enable_link_phy(link, link_res, link->connector_signal, - dp_cs_id, link_settings); - - /* Set FEC enable */ - if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING) { - fec_enable = lt_overrides->fec_enable && *lt_overrides->fec_enable; - dp_set_fec_ready(link, NULL, fec_enable); - } - - if (lt_overrides->alternate_scrambler_reset) { - if (*lt_overrides->alternate_scrambler_reset) - panel_mode = DP_PANEL_MODE_EDP; - else - panel_mode = DP_PANEL_MODE_DEFAULT; - } else - panel_mode = dp_get_panel_mode(link); - - dp_set_panel_mode(link, panel_mode); - - /* Attempt to train with given link training settings */ - if (link->ctx->dc->work_arounds.lt_early_cr_pattern) - start_clock_recovery_pattern_early(link, link_res, <_settings, DPRX); - - /* Set link rate, lane count and spread. */ - dpcd_set_link_settings(link, <_settings); - - /* 2. perform link training (set link training done - * to false is done as well) - */ - lt_status = perform_clock_recovery_sequence(link, link_res, <_settings, DPRX); - if (lt_status == LINK_TRAINING_SUCCESS) { - lt_status = perform_channel_equalization_sequence(link, - link_res, - <_settings, - DPRX); - } - - /* 3. Sync LT must skip TRAINING_PATTERN_SET:0 (video pattern)*/ - /* 4. print status message*/ - print_status_message(link, <_settings, lt_status); - - return lt_status; -} - -bool dc_link_dp_sync_lt_end(struct dc_link *link, bool link_down) -{ - /* If input parameter is set, shut down phy. - * Still shouldn't turn off dp_receiver (DPCD:600h) - */ - if (link_down == true) { - struct dc_link_settings link_settings = link->cur_link_settings; - dp_disable_link_phy(link, NULL, link->connector_signal); - if (dp_get_link_encoding_format(&link_settings) == DP_8b_10b_ENCODING) - dp_set_fec_ready(link, NULL, false); - } - - link->sync_lt_in_progress = false; - return true; -} - -static enum dc_link_rate get_lttpr_max_link_rate(struct dc_link *link) -{ - enum dc_link_rate lttpr_max_link_rate = link->dpcd_caps.lttpr_caps.max_link_rate; - - if (link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.bits.UHBR20) - lttpr_max_link_rate = LINK_RATE_UHBR20; - else if (link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.bits.UHBR13_5) - lttpr_max_link_rate = LINK_RATE_UHBR13_5; - else if (link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.bits.UHBR10) - lttpr_max_link_rate = LINK_RATE_UHBR10; - - return lttpr_max_link_rate; -} - -static enum dc_link_rate get_cable_max_link_rate(struct dc_link *link) -{ - enum dc_link_rate cable_max_link_rate = LINK_RATE_UNKNOWN; - - if (link->dpcd_caps.cable_id.bits.UHBR10_20_CAPABILITY & DP_UHBR20) - cable_max_link_rate = LINK_RATE_UHBR20; - else if (link->dpcd_caps.cable_id.bits.UHBR13_5_CAPABILITY) - cable_max_link_rate = LINK_RATE_UHBR13_5; - else if (link->dpcd_caps.cable_id.bits.UHBR10_20_CAPABILITY & DP_UHBR10) - cable_max_link_rate = LINK_RATE_UHBR10; - - return cable_max_link_rate; -} - -bool dc_link_dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_settings *max_link_enc_cap) -{ - struct link_encoder *link_enc = NULL; - - if (!max_link_enc_cap) { - DC_LOG_ERROR("%s: Could not return max link encoder caps", __func__); - return false; - } - - link_enc = link_enc_cfg_get_link_enc(link); - ASSERT(link_enc); - - if (link_enc && link_enc->funcs->get_max_link_cap) { - link_enc->funcs->get_max_link_cap(link_enc, max_link_enc_cap); - return true; - } - - DC_LOG_ERROR("%s: Max link encoder caps unknown", __func__); - max_link_enc_cap->lane_count = 1; - max_link_enc_cap->link_rate = 6; - return false; -} - - -struct dc_link_settings dp_get_max_link_cap(struct dc_link *link) -{ - struct dc_link_settings max_link_cap = {0}; - enum dc_link_rate lttpr_max_link_rate; - enum dc_link_rate cable_max_link_rate; - struct link_encoder *link_enc = NULL; - - - link_enc = link_enc_cfg_get_link_enc(link); - ASSERT(link_enc); - - /* get max link encoder capability */ - if (link_enc) - link_enc->funcs->get_max_link_cap(link_enc, &max_link_cap); - - /* Lower link settings based on sink's link cap */ - if (link->reported_link_cap.lane_count < max_link_cap.lane_count) - max_link_cap.lane_count = - link->reported_link_cap.lane_count; - if (link->reported_link_cap.link_rate < max_link_cap.link_rate) - max_link_cap.link_rate = - link->reported_link_cap.link_rate; - if (link->reported_link_cap.link_spread < - max_link_cap.link_spread) - max_link_cap.link_spread = - link->reported_link_cap.link_spread; - - /* Lower link settings based on cable attributes - * Cable ID is a DP2 feature to identify max certified link rate that - * a cable can carry. The cable identification method requires both - * cable and display hardware support. Since the specs comes late, it is - * anticipated that the first round of DP2 cables and displays may not - * be fully compatible to reliably return cable ID data. Therefore the - * decision of our cable id policy is that if the cable can return non - * zero cable id data, we will take cable's link rate capability into - * account. However if we get zero data, the cable link rate capability - * is considered inconclusive. In this case, we will not take cable's - * capability into account to avoid of over limiting hardware capability - * from users. The max overall link rate capability is still determined - * after actual dp pre-training. Cable id is considered as an auxiliary - * method of determining max link bandwidth capability. - */ - cable_max_link_rate = get_cable_max_link_rate(link); - - if (!link->dc->debug.ignore_cable_id && - cable_max_link_rate != LINK_RATE_UNKNOWN && - cable_max_link_rate < max_link_cap.link_rate) - max_link_cap.link_rate = cable_max_link_rate; - - /* account for lttpr repeaters cap - * notes: repeaters do not snoop in the DPRX Capabilities addresses (3.6.3). - */ - if (dp_is_lttpr_present(link)) { - if (link->dpcd_caps.lttpr_caps.max_lane_count < max_link_cap.lane_count) - max_link_cap.lane_count = link->dpcd_caps.lttpr_caps.max_lane_count; - lttpr_max_link_rate = get_lttpr_max_link_rate(link); - - if (lttpr_max_link_rate < max_link_cap.link_rate) - max_link_cap.link_rate = lttpr_max_link_rate; - - DC_LOG_HW_LINK_TRAINING("%s\n Training with LTTPR, max_lane count %d max_link rate %d \n", - __func__, - max_link_cap.lane_count, - max_link_cap.link_rate); - } - - if (dp_get_link_encoding_format(&max_link_cap) == DP_128b_132b_ENCODING && - link->dc->debug.disable_uhbr) - max_link_cap.link_rate = LINK_RATE_HIGH3; - - return max_link_cap; -} - -static enum dc_status read_hpd_rx_irq_data( +enum dc_status read_hpd_rx_irq_data( struct dc_link *link, union hpd_irq_data *irq_data) { @@ -3249,372 +182,6 @@ bool hpd_rx_irq_check_link_loss_status( return return_code; } -static bool dp_verify_link_cap( - struct dc_link *link, - struct dc_link_settings *known_limit_link_setting, - int *fail_count) -{ - struct dc_link_settings cur_link_settings = {0}; - struct dc_link_settings max_link_settings = *known_limit_link_setting; - bool success = false; - bool skip_video_pattern; - enum clock_source_id dp_cs_id = get_clock_source_id(link); - enum link_training_result status = LINK_TRAINING_SUCCESS; - union hpd_irq_data irq_data; - struct link_resource link_res; - - memset(&irq_data, 0, sizeof(irq_data)); - cur_link_settings = max_link_settings; - - /* Grant extended timeout request */ - if (dp_is_lttpr_present(link) && link->dpcd_caps.lttpr_caps.max_ext_timeout > 0) { - uint8_t grant = link->dpcd_caps.lttpr_caps.max_ext_timeout & 0x80; - - core_link_write_dpcd(link, DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT, &grant, sizeof(grant)); - } - - do { - if (!get_temp_dp_link_res(link, &link_res, &cur_link_settings)) - continue; - - skip_video_pattern = cur_link_settings.link_rate != LINK_RATE_LOW; - dp_enable_link_phy( - link, - &link_res, - link->connector_signal, - dp_cs_id, - &cur_link_settings); - - status = dc_link_dp_perform_link_training( - link, - &link_res, - &cur_link_settings, - skip_video_pattern); - - if (status == LINK_TRAINING_SUCCESS) { - success = true; - udelay(1000); - if (read_hpd_rx_irq_data(link, &irq_data) == DC_OK && - hpd_rx_irq_check_link_loss_status( - link, - &irq_data)) - (*fail_count)++; - - } else { - (*fail_count)++; - } - dp_trace_lt_total_count_increment(link, true); - dp_trace_lt_result_update(link, status, true); - dp_disable_link_phy(link, &link_res, link->connector_signal); - } while (!success && decide_fallback_link_setting(link, - &max_link_settings, &cur_link_settings, status)); - - link->verified_link_cap = success ? - cur_link_settings : fail_safe_link_settings; - return success; -} - -static void apply_usbc_combo_phy_reset_wa(struct dc_link *link, - struct dc_link_settings *link_settings) -{ - /* Temporary Renoir-specific workaround PHY will sometimes be in bad - * state on hotplugging display from certain USB-C dongle, so add extra - * cycle of enabling and disabling the PHY before first link training. - */ - struct link_resource link_res = {0}; - enum clock_source_id dp_cs_id = get_clock_source_id(link); - - dp_enable_link_phy(link, &link_res, link->connector_signal, - dp_cs_id, link_settings); - dp_disable_link_phy(link, &link_res, link->connector_signal); -} - -bool dp_verify_link_cap_with_retries( - struct dc_link *link, - struct dc_link_settings *known_limit_link_setting, - int attempts) -{ - int i = 0; - bool success = false; - int fail_count = 0; - - dp_trace_detect_lt_init(link); - - if (link->link_enc && link->link_enc->features.flags.bits.DP_IS_USB_C && - link->dc->debug.usbc_combo_phy_reset_wa) - apply_usbc_combo_phy_reset_wa(link, known_limit_link_setting); - - dp_trace_set_lt_start_timestamp(link, false); - for (i = 0; i < attempts; i++) { - enum dc_connection_type type = dc_connection_none; - - memset(&link->verified_link_cap, 0, - sizeof(struct dc_link_settings)); - if (!dc_link_detect_sink(link, &type) || type == dc_connection_none) { - link->verified_link_cap = fail_safe_link_settings; - break; - } else if (dp_verify_link_cap(link, known_limit_link_setting, - &fail_count) && fail_count == 0) { - success = true; - break; - } - msleep(10); - } - - dp_trace_lt_fail_count_update(link, fail_count, true); - dp_trace_set_lt_end_timestamp(link, true); - - return success; -} - -/* in DP compliance test, DPR-120 may have - * a random value in its MAX_LINK_BW dpcd field. - * We map it to the maximum supported link rate that - * is smaller than MAX_LINK_BW in this case. - */ -static enum dc_link_rate get_link_rate_from_max_link_bw( - uint8_t max_link_bw) -{ - enum dc_link_rate link_rate; - - if (max_link_bw >= LINK_RATE_HIGH3) { - link_rate = LINK_RATE_HIGH3; - } else if (max_link_bw < LINK_RATE_HIGH3 - && max_link_bw >= LINK_RATE_HIGH2) { - link_rate = LINK_RATE_HIGH2; - } else if (max_link_bw < LINK_RATE_HIGH2 - && max_link_bw >= LINK_RATE_HIGH) { - link_rate = LINK_RATE_HIGH; - } else if (max_link_bw < LINK_RATE_HIGH - && max_link_bw >= LINK_RATE_LOW) { - link_rate = LINK_RATE_LOW; - } else { - link_rate = LINK_RATE_UNKNOWN; - } - - return link_rate; -} - -static inline bool reached_minimum_lane_count(enum dc_lane_count lane_count) -{ - return lane_count <= LANE_COUNT_ONE; -} - -static inline bool reached_minimum_link_rate(enum dc_link_rate link_rate) -{ - return link_rate <= LINK_RATE_LOW; -} - -static enum dc_lane_count reduce_lane_count(enum dc_lane_count lane_count) -{ - switch (lane_count) { - case LANE_COUNT_FOUR: - return LANE_COUNT_TWO; - case LANE_COUNT_TWO: - return LANE_COUNT_ONE; - case LANE_COUNT_ONE: - return LANE_COUNT_UNKNOWN; - default: - return LANE_COUNT_UNKNOWN; - } -} - -static enum dc_link_rate reduce_link_rate(enum dc_link_rate link_rate) -{ - switch (link_rate) { - case LINK_RATE_UHBR20: - return LINK_RATE_UHBR13_5; - case LINK_RATE_UHBR13_5: - return LINK_RATE_UHBR10; - case LINK_RATE_UHBR10: - return LINK_RATE_HIGH3; - case LINK_RATE_HIGH3: - return LINK_RATE_HIGH2; - case LINK_RATE_HIGH2: - return LINK_RATE_HIGH; - case LINK_RATE_HIGH: - return LINK_RATE_LOW; - case LINK_RATE_LOW: - return LINK_RATE_UNKNOWN; - default: - return LINK_RATE_UNKNOWN; - } -} - -static enum dc_lane_count increase_lane_count(enum dc_lane_count lane_count) -{ - switch (lane_count) { - case LANE_COUNT_ONE: - return LANE_COUNT_TWO; - case LANE_COUNT_TWO: - return LANE_COUNT_FOUR; - default: - return LANE_COUNT_UNKNOWN; - } -} - -static enum dc_link_rate increase_link_rate(struct dc_link *link, - enum dc_link_rate link_rate) -{ - switch (link_rate) { - case LINK_RATE_LOW: - return LINK_RATE_HIGH; - case LINK_RATE_HIGH: - return LINK_RATE_HIGH2; - case LINK_RATE_HIGH2: - return LINK_RATE_HIGH3; - case LINK_RATE_HIGH3: - return LINK_RATE_UHBR10; - case LINK_RATE_UHBR10: - /* upto DP2.x specs UHBR13.5 is the only link rate that could be - * not supported by DPRX when higher link rate is supported. - * so we treat it as a special case for code simplicity. When we - * have new specs with more link rates like this, we should - * consider a more generic solution to handle discrete link - * rate capabilities. - */ - return link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR13_5 ? - LINK_RATE_UHBR13_5 : LINK_RATE_UHBR20; - case LINK_RATE_UHBR13_5: - return LINK_RATE_UHBR20; - default: - return LINK_RATE_UNKNOWN; - } -} - -static bool decide_fallback_link_setting_max_bw_policy( - struct dc_link *link, - const struct dc_link_settings *max, - struct dc_link_settings *cur, - enum link_training_result training_result) -{ - uint8_t cur_idx = 0, next_idx; - bool found = false; - - if (training_result == LINK_TRAINING_ABORT) - return false; - - while (cur_idx < ARRAY_SIZE(dp_lt_fallbacks)) - /* find current index */ - if (dp_lt_fallbacks[cur_idx].lane_count == cur->lane_count && - dp_lt_fallbacks[cur_idx].link_rate == cur->link_rate) - break; - else - cur_idx++; - - next_idx = cur_idx + 1; - - while (next_idx < ARRAY_SIZE(dp_lt_fallbacks)) - /* find next index */ - if (dp_lt_fallbacks[next_idx].lane_count > max->lane_count || - dp_lt_fallbacks[next_idx].link_rate > max->link_rate) - next_idx++; - else if (dp_lt_fallbacks[next_idx].link_rate == LINK_RATE_UHBR13_5 && - link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR13_5 == 0) - /* upto DP2.x specs UHBR13.5 is the only link rate that - * could be not supported by DPRX when higher link rate - * is supported. so we treat it as a special case for - * code simplicity. When we have new specs with more - * link rates like this, we should consider a more - * generic solution to handle discrete link rate - * capabilities. - */ - next_idx++; - else - break; - - if (next_idx < ARRAY_SIZE(dp_lt_fallbacks)) { - cur->lane_count = dp_lt_fallbacks[next_idx].lane_count; - cur->link_rate = dp_lt_fallbacks[next_idx].link_rate; - found = true; - } - - return found; -} - -/* - * function: set link rate and lane count fallback based - * on current link setting and last link training result - * return value: - * true - link setting could be set - * false - has reached minimum setting - * and no further fallback could be done - */ -static bool decide_fallback_link_setting( - struct dc_link *link, - struct dc_link_settings *max, - struct dc_link_settings *cur, - enum link_training_result training_result) -{ - if (dp_get_link_encoding_format(max) == DP_128b_132b_ENCODING || - link->dc->debug.force_dp2_lt_fallback_method) - return decide_fallback_link_setting_max_bw_policy(link, max, cur, - training_result); - - switch (training_result) { - case LINK_TRAINING_CR_FAIL_LANE0: - case LINK_TRAINING_CR_FAIL_LANE1: - case LINK_TRAINING_CR_FAIL_LANE23: - case LINK_TRAINING_LQA_FAIL: - { - if (!reached_minimum_link_rate(cur->link_rate)) { - cur->link_rate = reduce_link_rate(cur->link_rate); - } else if (!reached_minimum_lane_count(cur->lane_count)) { - cur->link_rate = max->link_rate; - if (training_result == LINK_TRAINING_CR_FAIL_LANE0) - return false; - else if (training_result == LINK_TRAINING_CR_FAIL_LANE1) - cur->lane_count = LANE_COUNT_ONE; - else if (training_result == LINK_TRAINING_CR_FAIL_LANE23) - cur->lane_count = LANE_COUNT_TWO; - else - cur->lane_count = reduce_lane_count(cur->lane_count); - } else { - return false; - } - break; - } - case LINK_TRAINING_EQ_FAIL_EQ: - case LINK_TRAINING_EQ_FAIL_CR_PARTIAL: - { - if (!reached_minimum_lane_count(cur->lane_count)) { - cur->lane_count = reduce_lane_count(cur->lane_count); - } else if (!reached_minimum_link_rate(cur->link_rate)) { - cur->link_rate = reduce_link_rate(cur->link_rate); - /* Reduce max link rate to avoid potential infinite loop. - * Needed so that any subsequent CR_FAIL fallback can't - * re-set the link rate higher than the link rate from - * the latest EQ_FAIL fallback. - */ - max->link_rate = cur->link_rate; - cur->lane_count = max->lane_count; - } else { - return false; - } - break; - } - case LINK_TRAINING_EQ_FAIL_CR: - { - if (!reached_minimum_link_rate(cur->link_rate)) { - cur->link_rate = reduce_link_rate(cur->link_rate); - /* Reduce max link rate to avoid potential infinite loop. - * Needed so that any subsequent CR_FAIL fallback can't - * re-set the link rate higher than the link rate from - * the latest EQ_FAIL fallback. - */ - max->link_rate = cur->link_rate; - cur->lane_count = max->lane_count; - } else { - return false; - } - break; - } - default: - return false; - } - return true; -} - bool dp_validate_mode_timing( struct dc_link *link, const struct dc_crtc_timing *timing) @@ -3666,306 +233,6 @@ bool dp_validate_mode_timing( return false; } -static bool decide_dp_link_settings(struct dc_link *link, struct dc_link_settings *link_setting, uint32_t req_bw) -{ - struct dc_link_settings initial_link_setting = { - LANE_COUNT_ONE, LINK_RATE_LOW, LINK_SPREAD_DISABLED, false, 0}; - struct dc_link_settings current_link_setting = - initial_link_setting; - uint32_t link_bw; - - if (req_bw > dc_link_bandwidth_kbps(link, &link->verified_link_cap)) - return false; - - /* search for the minimum link setting that: - * 1. is supported according to the link training result - * 2. could support the b/w requested by the timing - */ - while (current_link_setting.link_rate <= - link->verified_link_cap.link_rate) { - link_bw = dc_link_bandwidth_kbps( - link, - ¤t_link_setting); - if (req_bw <= link_bw) { - *link_setting = current_link_setting; - return true; - } - - if (current_link_setting.lane_count < - link->verified_link_cap.lane_count) { - current_link_setting.lane_count = - increase_lane_count( - current_link_setting.lane_count); - } else { - current_link_setting.link_rate = - increase_link_rate(link, - current_link_setting.link_rate); - current_link_setting.lane_count = - initial_link_setting.lane_count; - } - } - - return false; -} - -bool decide_edp_link_settings(struct dc_link *link, struct dc_link_settings *link_setting, uint32_t req_bw) -{ - struct dc_link_settings initial_link_setting; - struct dc_link_settings current_link_setting; - uint32_t link_bw; - - /* - * edp_supported_link_rates_count is only valid for eDP v1.4 or higher. - * Per VESA eDP spec, "The DPCD revision for eDP v1.4 is 13h" - */ - if (link->dpcd_caps.dpcd_rev.raw < DPCD_REV_13 || - link->dpcd_caps.edp_supported_link_rates_count == 0) { - *link_setting = link->verified_link_cap; - return true; - } - - memset(&initial_link_setting, 0, sizeof(initial_link_setting)); - initial_link_setting.lane_count = LANE_COUNT_ONE; - initial_link_setting.link_rate = link->dpcd_caps.edp_supported_link_rates[0]; - initial_link_setting.link_spread = LINK_SPREAD_DISABLED; - initial_link_setting.use_link_rate_set = true; - initial_link_setting.link_rate_set = 0; - current_link_setting = initial_link_setting; - - /* search for the minimum link setting that: - * 1. is supported according to the link training result - * 2. could support the b/w requested by the timing - */ - while (current_link_setting.link_rate <= - link->verified_link_cap.link_rate) { - link_bw = dc_link_bandwidth_kbps( - link, - ¤t_link_setting); - if (req_bw <= link_bw) { - *link_setting = current_link_setting; - return true; - } - - if (current_link_setting.lane_count < - link->verified_link_cap.lane_count) { - current_link_setting.lane_count = - increase_lane_count( - current_link_setting.lane_count); - } else { - if (current_link_setting.link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) { - current_link_setting.link_rate_set++; - current_link_setting.link_rate = - link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set]; - current_link_setting.lane_count = - initial_link_setting.lane_count; - } else - break; - } - } - return false; -} - -static bool decide_edp_link_settings_with_dsc(struct dc_link *link, - struct dc_link_settings *link_setting, - uint32_t req_bw, - enum dc_link_rate max_link_rate) -{ - struct dc_link_settings initial_link_setting; - struct dc_link_settings current_link_setting; - uint32_t link_bw; - - unsigned int policy = 0; - - policy = link->panel_config.dsc.force_dsc_edp_policy; - if (max_link_rate == LINK_RATE_UNKNOWN) - max_link_rate = link->verified_link_cap.link_rate; - /* - * edp_supported_link_rates_count is only valid for eDP v1.4 or higher. - * Per VESA eDP spec, "The DPCD revision for eDP v1.4 is 13h" - */ - if ((link->dpcd_caps.dpcd_rev.raw < DPCD_REV_13 || - link->dpcd_caps.edp_supported_link_rates_count == 0)) { - /* for DSC enabled case, we search for minimum lane count */ - memset(&initial_link_setting, 0, sizeof(initial_link_setting)); - initial_link_setting.lane_count = LANE_COUNT_ONE; - initial_link_setting.link_rate = LINK_RATE_LOW; - initial_link_setting.link_spread = LINK_SPREAD_DISABLED; - initial_link_setting.use_link_rate_set = false; - initial_link_setting.link_rate_set = 0; - current_link_setting = initial_link_setting; - if (req_bw > dc_link_bandwidth_kbps(link, &link->verified_link_cap)) - return false; - - /* search for the minimum link setting that: - * 1. is supported according to the link training result - * 2. could support the b/w requested by the timing - */ - while (current_link_setting.link_rate <= - max_link_rate) { - link_bw = dc_link_bandwidth_kbps( - link, - ¤t_link_setting); - if (req_bw <= link_bw) { - *link_setting = current_link_setting; - return true; - } - if (policy) { - /* minimize lane */ - if (current_link_setting.link_rate < max_link_rate) { - current_link_setting.link_rate = - increase_link_rate(link, - current_link_setting.link_rate); - } else { - if (current_link_setting.lane_count < - link->verified_link_cap.lane_count) { - current_link_setting.lane_count = - increase_lane_count( - current_link_setting.lane_count); - current_link_setting.link_rate = initial_link_setting.link_rate; - } else - break; - } - } else { - /* minimize link rate */ - if (current_link_setting.lane_count < - link->verified_link_cap.lane_count) { - current_link_setting.lane_count = - increase_lane_count( - current_link_setting.lane_count); - } else { - current_link_setting.link_rate = - increase_link_rate(link, - current_link_setting.link_rate); - current_link_setting.lane_count = - initial_link_setting.lane_count; - } - } - } - return false; - } - - /* if optimize edp link is supported */ - memset(&initial_link_setting, 0, sizeof(initial_link_setting)); - initial_link_setting.lane_count = LANE_COUNT_ONE; - initial_link_setting.link_rate = link->dpcd_caps.edp_supported_link_rates[0]; - initial_link_setting.link_spread = LINK_SPREAD_DISABLED; - initial_link_setting.use_link_rate_set = true; - initial_link_setting.link_rate_set = 0; - current_link_setting = initial_link_setting; - - /* search for the minimum link setting that: - * 1. is supported according to the link training result - * 2. could support the b/w requested by the timing - */ - while (current_link_setting.link_rate <= - max_link_rate) { - link_bw = dc_link_bandwidth_kbps( - link, - ¤t_link_setting); - if (req_bw <= link_bw) { - *link_setting = current_link_setting; - return true; - } - if (policy) { - /* minimize lane */ - if (current_link_setting.link_rate_set < - link->dpcd_caps.edp_supported_link_rates_count - && current_link_setting.link_rate < max_link_rate) { - current_link_setting.link_rate_set++; - current_link_setting.link_rate = - link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set]; - } else { - if (current_link_setting.lane_count < link->verified_link_cap.lane_count) { - current_link_setting.lane_count = - increase_lane_count( - current_link_setting.lane_count); - current_link_setting.link_rate_set = initial_link_setting.link_rate_set; - current_link_setting.link_rate = - link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set]; - } else - break; - } - } else { - /* minimize link rate */ - if (current_link_setting.lane_count < - link->verified_link_cap.lane_count) { - current_link_setting.lane_count = - increase_lane_count( - current_link_setting.lane_count); - } else { - if (current_link_setting.link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) { - current_link_setting.link_rate_set++; - current_link_setting.link_rate = - link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set]; - current_link_setting.lane_count = - initial_link_setting.lane_count; - } else - break; - } - } - } - return false; -} - -static bool decide_mst_link_settings(const struct dc_link *link, struct dc_link_settings *link_setting) -{ - *link_setting = link->verified_link_cap; - return true; -} - -bool decide_link_settings(struct dc_stream_state *stream, - struct dc_link_settings *link_setting) -{ - struct dc_link *link = stream->link; - uint32_t req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing); - - memset(link_setting, 0, sizeof(*link_setting)); - - /* if preferred is specified through AMDDP, use it, if it's enough - * to drive the mode - */ - if (link->preferred_link_setting.lane_count != - LANE_COUNT_UNKNOWN && - link->preferred_link_setting.link_rate != - LINK_RATE_UNKNOWN) { - *link_setting = link->preferred_link_setting; - return true; - } - - /* MST doesn't perform link training for now - * TODO: add MST specific link training routine - */ - if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { - decide_mst_link_settings(link, link_setting); - } else if (link->connector_signal == SIGNAL_TYPE_EDP) { - /* enable edp link optimization for DSC eDP case */ - if (stream->timing.flags.DSC) { - enum dc_link_rate max_link_rate = LINK_RATE_UNKNOWN; - - if (link->panel_config.dsc.force_dsc_edp_policy) { - /* calculate link max link rate cap*/ - struct dc_link_settings tmp_link_setting; - struct dc_crtc_timing tmp_timing = stream->timing; - uint32_t orig_req_bw; - - tmp_link_setting.link_rate = LINK_RATE_UNKNOWN; - tmp_timing.flags.DSC = 0; - orig_req_bw = dc_bandwidth_in_kbps_from_timing(&tmp_timing); - decide_edp_link_settings(link, &tmp_link_setting, orig_req_bw); - max_link_rate = tmp_link_setting.link_rate; - } - decide_edp_link_settings_with_dsc(link, link_setting, req_bw, max_link_rate); - } else { - decide_edp_link_settings(link, link_setting, req_bw); - } - } else { - decide_dp_link_settings(link, link_setting, req_bw); - } - - return link_setting->lane_count != LANE_COUNT_UNKNOWN && - link_setting->link_rate != LINK_RATE_UNKNOWN; -} - /*************************Short Pulse IRQ***************************/ bool dc_link_dp_allow_hpd_rx_irq(const struct dc_link *link) { @@ -4139,7 +406,7 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link) /* prepare link training settings */ link_training_settings.link_settings = link->cur_link_settings; - link_training_settings.lttpr_mode = dp_decide_lttpr_mode(link, &link->cur_link_settings); + link_training_settings.lttpr_mode = dc_link_decide_lttpr_mode(link, &link->cur_link_settings); if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && link_training_settings.lttpr_mode == LTTPR_MODE_TRANSPARENT) @@ -4267,7 +534,7 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link) lane++) { dpcd_lane_adjust.raw = get_nibble_at_index(&dpcd_lane_adjustment[0].raw, lane); - if (dp_get_link_encoding_format(&link->cur_link_settings) == + if (link_dp_get_encoding_format(&link->cur_link_settings) == DP_8b_10b_ENCODING) { link_training_settings.hw_lane_settings[lane].VOLTAGE_SWING = (enum dc_voltage_swing) @@ -4278,7 +545,7 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link) link_training_settings.hw_lane_settings[lane].POST_CURSOR2 = (enum dc_post_cursor2) ((dpcd_post_cursor_2_adjustment >> (lane * 2)) & 0x03); - } else if (dp_get_link_encoding_format(&link->cur_link_settings) == + } else if (link_dp_get_encoding_format(&link->cur_link_settings) == DP_128b_132b_ENCODING) { link_training_settings.hw_lane_settings[lane].FFE_PRESET.settings.level = dpcd_lane_adjust.tx_ffe.PRESET_VALUE; @@ -4724,1164 +991,6 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd return status; } -/*query dpcd for version and mst cap addresses*/ -bool is_mst_supported(struct dc_link *link) -{ - bool mst = false; - enum dc_status st = DC_OK; - union dpcd_rev rev; - union mstm_cap cap; - - if (link->preferred_training_settings.mst_enable && - *link->preferred_training_settings.mst_enable == false) { - return false; - } - - rev.raw = 0; - cap.raw = 0; - - st = core_link_read_dpcd(link, DP_DPCD_REV, &rev.raw, - sizeof(rev)); - - if (st == DC_OK && rev.raw >= DPCD_REV_12) { - - st = core_link_read_dpcd(link, DP_MSTM_CAP, - &cap.raw, sizeof(cap)); - if (st == DC_OK && cap.bits.MST_CAP == 1) - mst = true; - } - return mst; - -} - -bool is_dp_active_dongle(const struct dc_link *link) -{ - return (link->dpcd_caps.dongle_type >= DISPLAY_DONGLE_DP_VGA_CONVERTER) && - (link->dpcd_caps.dongle_type <= DISPLAY_DONGLE_DP_HDMI_CONVERTER); -} - -bool is_dp_branch_device(const struct dc_link *link) -{ - return link->dpcd_caps.is_branch_dev; -} - -static int translate_dpcd_max_bpc(enum dpcd_downstream_port_max_bpc bpc) -{ - switch (bpc) { - case DOWN_STREAM_MAX_8BPC: - return 8; - case DOWN_STREAM_MAX_10BPC: - return 10; - case DOWN_STREAM_MAX_12BPC: - return 12; - case DOWN_STREAM_MAX_16BPC: - return 16; - default: - break; - } - - return -1; -} - -#if defined(CONFIG_DRM_AMD_DC_DCN) -uint32_t dc_link_bw_kbps_from_raw_frl_link_rate_data(uint8_t bw) -{ - switch (bw) { - case 0b001: - return 9000000; - case 0b010: - return 18000000; - case 0b011: - return 24000000; - case 0b100: - return 32000000; - case 0b101: - return 40000000; - case 0b110: - return 48000000; - } - - return 0; -} - -/* - * Return PCON's post FRL link training supported BW if its non-zero, otherwise return max_supported_frl_bw. - */ -static uint32_t intersect_frl_link_bw_support( - const uint32_t max_supported_frl_bw_in_kbps, - const union hdmi_encoded_link_bw hdmi_encoded_link_bw) -{ - uint32_t supported_bw_in_kbps = max_supported_frl_bw_in_kbps; - - // HDMI_ENCODED_LINK_BW bits are only valid if HDMI Link Configuration bit is 1 (FRL mode) - if (hdmi_encoded_link_bw.bits.FRL_MODE) { - if (hdmi_encoded_link_bw.bits.BW_48Gbps) - supported_bw_in_kbps = 48000000; - else if (hdmi_encoded_link_bw.bits.BW_40Gbps) - supported_bw_in_kbps = 40000000; - else if (hdmi_encoded_link_bw.bits.BW_32Gbps) - supported_bw_in_kbps = 32000000; - else if (hdmi_encoded_link_bw.bits.BW_24Gbps) - supported_bw_in_kbps = 24000000; - else if (hdmi_encoded_link_bw.bits.BW_18Gbps) - supported_bw_in_kbps = 18000000; - else if (hdmi_encoded_link_bw.bits.BW_9Gbps) - supported_bw_in_kbps = 9000000; - } - - return supported_bw_in_kbps; -} -#endif - -static void read_dp_device_vendor_id(struct dc_link *link) -{ - struct dp_device_vendor_id dp_id; - - /* read IEEE branch device id */ - core_link_read_dpcd( - link, - DP_BRANCH_OUI, - (uint8_t *)&dp_id, - sizeof(dp_id)); - - link->dpcd_caps.branch_dev_id = - (dp_id.ieee_oui[0] << 16) + - (dp_id.ieee_oui[1] << 8) + - dp_id.ieee_oui[2]; - - memmove( - link->dpcd_caps.branch_dev_name, - dp_id.ieee_device_id, - sizeof(dp_id.ieee_device_id)); -} - - - -static void get_active_converter_info( - uint8_t data, struct dc_link *link) -{ - union dp_downstream_port_present ds_port = { .byte = data }; - memset(&link->dpcd_caps.dongle_caps, 0, sizeof(link->dpcd_caps.dongle_caps)); - - /* decode converter info*/ - if (!ds_port.fields.PORT_PRESENT) { - link->dpcd_caps.dongle_type = DISPLAY_DONGLE_NONE; - ddc_service_set_dongle_type(link->ddc, - link->dpcd_caps.dongle_type); - link->dpcd_caps.is_branch_dev = false; - return; - } - - /* DPCD 0x5 bit 0 = 1, it indicate it's branch device */ - link->dpcd_caps.is_branch_dev = ds_port.fields.PORT_PRESENT; - - switch (ds_port.fields.PORT_TYPE) { - case DOWNSTREAM_VGA: - link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_VGA_CONVERTER; - break; - case DOWNSTREAM_DVI_HDMI_DP_PLUS_PLUS: - /* At this point we don't know is it DVI or HDMI or DP++, - * assume DVI.*/ - link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_DVI_CONVERTER; - break; - default: - link->dpcd_caps.dongle_type = DISPLAY_DONGLE_NONE; - break; - } - - if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_11) { - uint8_t det_caps[16]; /* CTS 4.2.2.7 expects source to read Detailed Capabilities Info : 00080h-0008F.*/ - union dwnstream_port_caps_byte0 *port_caps = - (union dwnstream_port_caps_byte0 *)det_caps; - if (core_link_read_dpcd(link, DP_DOWNSTREAM_PORT_0, - det_caps, sizeof(det_caps)) == DC_OK) { - - switch (port_caps->bits.DWN_STRM_PORTX_TYPE) { - /*Handle DP case as DONGLE_NONE*/ - case DOWN_STREAM_DETAILED_DP: - link->dpcd_caps.dongle_type = DISPLAY_DONGLE_NONE; - break; - case DOWN_STREAM_DETAILED_VGA: - link->dpcd_caps.dongle_type = - DISPLAY_DONGLE_DP_VGA_CONVERTER; - break; - case DOWN_STREAM_DETAILED_DVI: - link->dpcd_caps.dongle_type = - DISPLAY_DONGLE_DP_DVI_CONVERTER; - break; - case DOWN_STREAM_DETAILED_HDMI: - case DOWN_STREAM_DETAILED_DP_PLUS_PLUS: - /*Handle DP++ active converter case, process DP++ case as HDMI case according DP1.4 spec*/ - link->dpcd_caps.dongle_type = - DISPLAY_DONGLE_DP_HDMI_CONVERTER; - - link->dpcd_caps.dongle_caps.dongle_type = link->dpcd_caps.dongle_type; - if (ds_port.fields.DETAILED_CAPS) { - - union dwnstream_port_caps_byte3_hdmi - hdmi_caps = {.raw = det_caps[3] }; - union dwnstream_port_caps_byte2 - hdmi_color_caps = {.raw = det_caps[2] }; - link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk_in_khz = - det_caps[1] * 2500; - - link->dpcd_caps.dongle_caps.is_dp_hdmi_s3d_converter = - hdmi_caps.bits.FRAME_SEQ_TO_FRAME_PACK; - /*YCBCR capability only for HDMI case*/ - if (port_caps->bits.DWN_STRM_PORTX_TYPE - == DOWN_STREAM_DETAILED_HDMI) { - link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr422_pass_through = - hdmi_caps.bits.YCrCr422_PASS_THROUGH; - link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr420_pass_through = - hdmi_caps.bits.YCrCr420_PASS_THROUGH; - link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr422_converter = - hdmi_caps.bits.YCrCr422_CONVERSION; - link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr420_converter = - hdmi_caps.bits.YCrCr420_CONVERSION; - } - - link->dpcd_caps.dongle_caps.dp_hdmi_max_bpc = - translate_dpcd_max_bpc( - hdmi_color_caps.bits.MAX_BITS_PER_COLOR_COMPONENT); - -#if defined(CONFIG_DRM_AMD_DC_DCN) - if (link->dc->caps.dp_hdmi21_pcon_support) { - union hdmi_encoded_link_bw hdmi_encoded_link_bw; - - link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps = - dc_link_bw_kbps_from_raw_frl_link_rate_data( - hdmi_color_caps.bits.MAX_ENCODED_LINK_BW_SUPPORT); - - // Intersect reported max link bw support with the supported link rate post FRL link training - if (core_link_read_dpcd(link, DP_PCON_HDMI_POST_FRL_STATUS, - &hdmi_encoded_link_bw.raw, sizeof(hdmi_encoded_link_bw)) == DC_OK) { - link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps = intersect_frl_link_bw_support( - link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps, - hdmi_encoded_link_bw); - } - - if (link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps > 0) - link->dpcd_caps.dongle_caps.extendedCapValid = true; - } -#endif - - if (link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk_in_khz != 0) - link->dpcd_caps.dongle_caps.extendedCapValid = true; - } - - break; - } - } - } - - ddc_service_set_dongle_type(link->ddc, link->dpcd_caps.dongle_type); - - { - struct dp_sink_hw_fw_revision dp_hw_fw_revision; - - core_link_read_dpcd( - link, - DP_BRANCH_REVISION_START, - (uint8_t *)&dp_hw_fw_revision, - sizeof(dp_hw_fw_revision)); - - link->dpcd_caps.branch_hw_revision = - dp_hw_fw_revision.ieee_hw_rev; - - memmove( - link->dpcd_caps.branch_fw_revision, - dp_hw_fw_revision.ieee_fw_rev, - sizeof(dp_hw_fw_revision.ieee_fw_rev)); - } - if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14 && - link->dpcd_caps.dongle_type != DISPLAY_DONGLE_NONE) { - union dp_dfp_cap_ext dfp_cap_ext; - memset(&dfp_cap_ext, '\0', sizeof (dfp_cap_ext)); - core_link_read_dpcd( - link, - DP_DFP_CAPABILITY_EXTENSION_SUPPORT, - dfp_cap_ext.raw, - sizeof(dfp_cap_ext.raw)); - link->dpcd_caps.dongle_caps.dfp_cap_ext.supported = dfp_cap_ext.fields.supported; - link->dpcd_caps.dongle_caps.dfp_cap_ext.max_pixel_rate_in_mps = - dfp_cap_ext.fields.max_pixel_rate_in_mps[0] + - (dfp_cap_ext.fields.max_pixel_rate_in_mps[1] << 8); - link->dpcd_caps.dongle_caps.dfp_cap_ext.max_video_h_active_width = - dfp_cap_ext.fields.max_video_h_active_width[0] + - (dfp_cap_ext.fields.max_video_h_active_width[1] << 8); - link->dpcd_caps.dongle_caps.dfp_cap_ext.max_video_v_active_height = - dfp_cap_ext.fields.max_video_v_active_height[0] + - (dfp_cap_ext.fields.max_video_v_active_height[1] << 8); - link->dpcd_caps.dongle_caps.dfp_cap_ext.encoding_format_caps = - dfp_cap_ext.fields.encoding_format_caps; - link->dpcd_caps.dongle_caps.dfp_cap_ext.rgb_color_depth_caps = - dfp_cap_ext.fields.rgb_color_depth_caps; - link->dpcd_caps.dongle_caps.dfp_cap_ext.ycbcr444_color_depth_caps = - dfp_cap_ext.fields.ycbcr444_color_depth_caps; - link->dpcd_caps.dongle_caps.dfp_cap_ext.ycbcr422_color_depth_caps = - dfp_cap_ext.fields.ycbcr422_color_depth_caps; - link->dpcd_caps.dongle_caps.dfp_cap_ext.ycbcr420_color_depth_caps = - dfp_cap_ext.fields.ycbcr420_color_depth_caps; - DC_LOG_DP2("DFP capability extension is read at link %d", link->link_index); - DC_LOG_DP2("\tdfp_cap_ext.supported = %s", link->dpcd_caps.dongle_caps.dfp_cap_ext.supported ? "true" : "false"); - DC_LOG_DP2("\tdfp_cap_ext.max_pixel_rate_in_mps = %d", link->dpcd_caps.dongle_caps.dfp_cap_ext.max_pixel_rate_in_mps); - DC_LOG_DP2("\tdfp_cap_ext.max_video_h_active_width = %d", link->dpcd_caps.dongle_caps.dfp_cap_ext.max_video_h_active_width); - DC_LOG_DP2("\tdfp_cap_ext.max_video_v_active_height = %d", link->dpcd_caps.dongle_caps.dfp_cap_ext.max_video_v_active_height); - } -} - -static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data, - int length) -{ - int retry = 0; - - if (!link->dpcd_caps.dpcd_rev.raw) { - do { - dp_receiver_power_ctrl(link, true); - core_link_read_dpcd(link, DP_DPCD_REV, - dpcd_data, length); - link->dpcd_caps.dpcd_rev.raw = dpcd_data[ - DP_DPCD_REV - - DP_DPCD_REV]; - } while (retry++ < 4 && !link->dpcd_caps.dpcd_rev.raw); - } - - if (link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_VGA_CONVERTER) { - switch (link->dpcd_caps.branch_dev_id) { - /* 0010FA active dongles (DP-VGA, DP-DLDVI converters) power down - * all internal circuits including AUX communication preventing - * reading DPCD table and EDID (spec violation). - * Encoder will skip DP RX power down on disable_output to - * keep receiver powered all the time.*/ - case DP_BRANCH_DEVICE_ID_0010FA: - case DP_BRANCH_DEVICE_ID_0080E1: - case DP_BRANCH_DEVICE_ID_00E04C: - link->wa_flags.dp_keep_receiver_powered = true; - break; - - /* TODO: May need work around for other dongles. */ - default: - link->wa_flags.dp_keep_receiver_powered = false; - break; - } - } else - link->wa_flags.dp_keep_receiver_powered = false; -} - -/* Read additional sink caps defined in source specific DPCD area - * This function currently only reads from SinkCapability address (DP_SOURCE_SINK_CAP) - */ -static bool dpcd_read_sink_ext_caps(struct dc_link *link) -{ - uint8_t dpcd_data; - - if (!link) - return false; - - if (core_link_read_dpcd(link, DP_SOURCE_SINK_CAP, &dpcd_data, 1) != DC_OK) - return false; - - link->dpcd_sink_ext_caps.raw = dpcd_data; - return true; -} - -enum dc_status dp_retrieve_lttpr_cap(struct dc_link *link) -{ - uint8_t lttpr_dpcd_data[8]; - enum dc_status status = DC_ERROR_UNEXPECTED; - bool is_lttpr_present = false; - - /* Logic to determine LTTPR support*/ - bool vbios_lttpr_interop = link->dc->caps.vbios_lttpr_aware; - - if (!vbios_lttpr_interop || !link->dc->caps.extended_aux_timeout_support) - return false; - - /* By reading LTTPR capability, RX assumes that we will enable - * LTTPR extended aux timeout if LTTPR is present. - */ - status = core_link_read_dpcd(link, - DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV, - lttpr_dpcd_data, - sizeof(lttpr_dpcd_data)); - - link->dpcd_caps.lttpr_caps.revision.raw = - lttpr_dpcd_data[DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV - - DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; - - link->dpcd_caps.lttpr_caps.max_link_rate = - lttpr_dpcd_data[DP_MAX_LINK_RATE_PHY_REPEATER - - DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; - - link->dpcd_caps.lttpr_caps.phy_repeater_cnt = - lttpr_dpcd_data[DP_PHY_REPEATER_CNT - - DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; - - link->dpcd_caps.lttpr_caps.max_lane_count = - lttpr_dpcd_data[DP_MAX_LANE_COUNT_PHY_REPEATER - - DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; - - link->dpcd_caps.lttpr_caps.mode = - lttpr_dpcd_data[DP_PHY_REPEATER_MODE - - DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; - - link->dpcd_caps.lttpr_caps.max_ext_timeout = - lttpr_dpcd_data[DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT - - DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; - link->dpcd_caps.lttpr_caps.main_link_channel_coding.raw = - lttpr_dpcd_data[DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER - - DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; - - link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.raw = - lttpr_dpcd_data[DP_PHY_REPEATER_128B132B_RATES - - DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; - - /* If this chip cap is set, at least one retimer must exist in the chain - * Override count to 1 if we receive a known bad count (0 or an invalid value) - */ - if (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN && - (dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) == 0)) { - ASSERT(0); - link->dpcd_caps.lttpr_caps.phy_repeater_cnt = 0x80; - DC_LOG_DC("lttpr_caps forced phy_repeater_cnt = %d\n", link->dpcd_caps.lttpr_caps.phy_repeater_cnt); - } - - /* Attempt to train in LTTPR transparent mode if repeater count exceeds 8. */ - is_lttpr_present = dp_is_lttpr_present(link); - - if (is_lttpr_present) - CONN_DATA_DETECT(link, lttpr_dpcd_data, sizeof(lttpr_dpcd_data), "LTTPR Caps: "); - - DC_LOG_DC("is_lttpr_present = %d\n", is_lttpr_present); - return status; -} - -bool dp_is_lttpr_present(struct dc_link *link) -{ - return (dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) != 0 && - link->dpcd_caps.lttpr_caps.max_lane_count > 0 && - link->dpcd_caps.lttpr_caps.max_lane_count <= 4 && - link->dpcd_caps.lttpr_caps.revision.raw >= 0x14); -} - -enum lttpr_mode dp_decide_lttpr_mode(struct dc_link *link, struct dc_link_settings *link_setting) -{ - enum dp_link_encoding encoding = dp_get_link_encoding_format(link_setting); - - if (encoding == DP_8b_10b_ENCODING) - return dp_decide_8b_10b_lttpr_mode(link); - else if (encoding == DP_128b_132b_ENCODING) - return dp_decide_128b_132b_lttpr_mode(link); - - ASSERT(0); - return LTTPR_MODE_NON_LTTPR; -} - -void dp_get_lttpr_mode_override(struct dc_link *link, enum lttpr_mode *override) -{ - if (!dp_is_lttpr_present(link)) - return; - - if (link->dc->debug.lttpr_mode_override == LTTPR_MODE_TRANSPARENT) { - *override = LTTPR_MODE_TRANSPARENT; - } else if (link->dc->debug.lttpr_mode_override == LTTPR_MODE_NON_TRANSPARENT) { - *override = LTTPR_MODE_NON_TRANSPARENT; - } else if (link->dc->debug.lttpr_mode_override == LTTPR_MODE_NON_LTTPR) { - *override = LTTPR_MODE_NON_LTTPR; - } - DC_LOG_DC("lttpr_mode_override chose LTTPR_MODE = %d\n", (uint8_t)(*override)); -} - -enum lttpr_mode dp_decide_8b_10b_lttpr_mode(struct dc_link *link) -{ - bool is_lttpr_present = dp_is_lttpr_present(link); - bool vbios_lttpr_force_non_transparent = link->dc->caps.vbios_lttpr_enable; - bool vbios_lttpr_aware = link->dc->caps.vbios_lttpr_aware; - - if (!is_lttpr_present) - return LTTPR_MODE_NON_LTTPR; - - if (vbios_lttpr_aware) { - if (vbios_lttpr_force_non_transparent) { - DC_LOG_DC("chose LTTPR_MODE_NON_TRANSPARENT due to VBIOS DCE_INFO_CAPS_LTTPR_SUPPORT_ENABLE set to 1.\n"); - return LTTPR_MODE_NON_TRANSPARENT; - } else { - DC_LOG_DC("chose LTTPR_MODE_NON_TRANSPARENT by default due to VBIOS not set DCE_INFO_CAPS_LTTPR_SUPPORT_ENABLE set to 1.\n"); - return LTTPR_MODE_TRANSPARENT; - } - } - - if (link->dc->config.allow_lttpr_non_transparent_mode.bits.DP1_4A && - link->dc->caps.extended_aux_timeout_support) { - DC_LOG_DC("chose LTTPR_MODE_NON_TRANSPARENT by default and dc->config.allow_lttpr_non_transparent_mode.bits.DP1_4A set to 1.\n"); - return LTTPR_MODE_NON_TRANSPARENT; - } - - DC_LOG_DC("chose LTTPR_MODE_NON_LTTPR.\n"); - return LTTPR_MODE_NON_LTTPR; -} - -enum lttpr_mode dp_decide_128b_132b_lttpr_mode(struct dc_link *link) -{ - enum lttpr_mode mode = LTTPR_MODE_NON_LTTPR; - - if (dp_is_lttpr_present(link)) - mode = LTTPR_MODE_NON_TRANSPARENT; - - DC_LOG_DC("128b_132b chose LTTPR_MODE %d.\n", mode); - return mode; -} - -static bool get_usbc_cable_id(struct dc_link *link, union dp_cable_id *cable_id) -{ - union dmub_rb_cmd cmd; - - if (!link->ctx->dmub_srv || - link->ep_type != DISPLAY_ENDPOINT_PHY || - link->link_enc->features.flags.bits.DP_IS_USB_C == 0) - return false; - - memset(&cmd, 0, sizeof(cmd)); - cmd.cable_id.header.type = DMUB_CMD_GET_USBC_CABLE_ID; - cmd.cable_id.header.payload_bytes = sizeof(cmd.cable_id.data); - cmd.cable_id.data.input.phy_inst = resource_transmitter_to_phy_idx( - link->dc, link->link_enc->transmitter); - if (dc_dmub_srv_cmd_with_reply_data(link->ctx->dmub_srv, &cmd) && - cmd.cable_id.header.ret_status == 1) { - cable_id->raw = cmd.cable_id.data.output_raw; - DC_LOG_DC("usbc_cable_id = %d.\n", cable_id->raw); - } - return cmd.cable_id.header.ret_status == 1; -} - -static union dp_cable_id intersect_cable_id( - union dp_cable_id *a, union dp_cable_id *b) -{ - union dp_cable_id out; - - out.bits.UHBR10_20_CAPABILITY = MIN(a->bits.UHBR10_20_CAPABILITY, - b->bits.UHBR10_20_CAPABILITY); - out.bits.UHBR13_5_CAPABILITY = MIN(a->bits.UHBR13_5_CAPABILITY, - b->bits.UHBR13_5_CAPABILITY); - out.bits.CABLE_TYPE = MAX(a->bits.CABLE_TYPE, b->bits.CABLE_TYPE); - - return out; -} - -static void retrieve_cable_id(struct dc_link *link) -{ - union dp_cable_id usbc_cable_id; - - link->dpcd_caps.cable_id.raw = 0; - core_link_read_dpcd(link, DP_CABLE_ATTRIBUTES_UPDATED_BY_DPRX, - &link->dpcd_caps.cable_id.raw, sizeof(uint8_t)); - - if (get_usbc_cable_id(link, &usbc_cable_id)) - link->dpcd_caps.cable_id = intersect_cable_id( - &link->dpcd_caps.cable_id, &usbc_cable_id); -} - -static enum dc_status wake_up_aux_channel(struct dc_link *link) -{ - enum dc_status status = DC_ERROR_UNEXPECTED; - uint32_t aux_channel_retry_cnt = 0; - uint8_t dpcd_power_state = '\0'; - - while (status != DC_OK && aux_channel_retry_cnt < 10) { - status = core_link_read_dpcd(link, DP_SET_POWER, - &dpcd_power_state, sizeof(dpcd_power_state)); - - /* Delay 1 ms if AUX CH is in power down state. Based on spec - * section 2.3.1.2, if AUX CH may be powered down due to - * write to DPCD 600h = 2. Sink AUX CH is monitoring differential - * signal and may need up to 1 ms before being able to reply. - */ - if (status != DC_OK || dpcd_power_state == DP_SET_POWER_D3) { - udelay(1000); - aux_channel_retry_cnt++; - } - } - - if (status != DC_OK) { - dpcd_power_state = DP_SET_POWER_D0; - status = core_link_write_dpcd( - link, - DP_SET_POWER, - &dpcd_power_state, - sizeof(dpcd_power_state)); - - dpcd_power_state = DP_SET_POWER_D3; - status = core_link_write_dpcd( - link, - DP_SET_POWER, - &dpcd_power_state, - sizeof(dpcd_power_state)); - return DC_ERROR_UNEXPECTED; - } - - return DC_OK; -} - -static bool retrieve_link_cap(struct dc_link *link) -{ - /* DP_ADAPTER_CAP - DP_DPCD_REV + 1 == 16 and also DP_DSC_BITS_PER_PIXEL_INC - DP_DSC_SUPPORT + 1 == 16, - * which means size 16 will be good for both of those DPCD register block reads - */ - uint8_t dpcd_data[16]; - /*Only need to read 1 byte starting from DP_DPRX_FEATURE_ENUMERATION_LIST. - */ - uint8_t dpcd_dprx_data = '\0'; - - struct dp_device_vendor_id sink_id; - union down_stream_port_count down_strm_port_count; - union edp_configuration_cap edp_config_cap; - union dp_downstream_port_present ds_port = { 0 }; - enum dc_status status = DC_ERROR_UNEXPECTED; - uint32_t read_dpcd_retry_cnt = 3; - int i; - struct dp_sink_hw_fw_revision dp_hw_fw_revision; - const uint32_t post_oui_delay = 30; // 30ms - - memset(dpcd_data, '\0', sizeof(dpcd_data)); - memset(&down_strm_port_count, - '\0', sizeof(union down_stream_port_count)); - memset(&edp_config_cap, '\0', - sizeof(union edp_configuration_cap)); - - /* if extended timeout is supported in hardware, - * default to LTTPR timeout (3.2ms) first as a W/A for DP link layer - * CTS 4.2.1.1 regression introduced by CTS specs requirement update. - */ - dc_link_aux_try_to_configure_timeout(link->ddc, - LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD); - - status = dp_retrieve_lttpr_cap(link); - - if (status != DC_OK) { - status = wake_up_aux_channel(link); - if (status == DC_OK) - dp_retrieve_lttpr_cap(link); - else - return false; - } - - if (dp_is_lttpr_present(link)) - configure_lttpr_mode_transparent(link); - - /* Read DP tunneling information. */ - status = dpcd_get_tunneling_device_data(link); - - dpcd_set_source_specific_data(link); - /* Sink may need to configure internals based on vendor, so allow some - * time before proceeding with possibly vendor specific transactions - */ - msleep(post_oui_delay); - - for (i = 0; i < read_dpcd_retry_cnt; i++) { - status = core_link_read_dpcd( - link, - DP_DPCD_REV, - dpcd_data, - sizeof(dpcd_data)); - if (status == DC_OK) - break; - } - - if (status != DC_OK) { - dm_error("%s: Read receiver caps dpcd data failed.\n", __func__); - return false; - } - - if (!dp_is_lttpr_present(link)) - dc_link_aux_try_to_configure_timeout(link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD); - - { - union training_aux_rd_interval aux_rd_interval; - - aux_rd_interval.raw = - dpcd_data[DP_TRAINING_AUX_RD_INTERVAL]; - - link->dpcd_caps.ext_receiver_cap_field_present = - aux_rd_interval.bits.EXT_RECEIVER_CAP_FIELD_PRESENT == 1; - - if (aux_rd_interval.bits.EXT_RECEIVER_CAP_FIELD_PRESENT == 1) { - uint8_t ext_cap_data[16]; - - memset(ext_cap_data, '\0', sizeof(ext_cap_data)); - for (i = 0; i < read_dpcd_retry_cnt; i++) { - status = core_link_read_dpcd( - link, - DP_DP13_DPCD_REV, - ext_cap_data, - sizeof(ext_cap_data)); - if (status == DC_OK) { - memcpy(dpcd_data, ext_cap_data, sizeof(dpcd_data)); - break; - } - } - if (status != DC_OK) - dm_error("%s: Read extend caps data failed, use cap from dpcd 0.\n", __func__); - } - } - - link->dpcd_caps.dpcd_rev.raw = - dpcd_data[DP_DPCD_REV - DP_DPCD_REV]; - - if (link->dpcd_caps.ext_receiver_cap_field_present) { - for (i = 0; i < read_dpcd_retry_cnt; i++) { - status = core_link_read_dpcd( - link, - DP_DPRX_FEATURE_ENUMERATION_LIST, - &dpcd_dprx_data, - sizeof(dpcd_dprx_data)); - if (status == DC_OK) - break; - } - - link->dpcd_caps.dprx_feature.raw = dpcd_dprx_data; - - if (status != DC_OK) - dm_error("%s: Read DPRX caps data failed.\n", __func__); - } - - else { - link->dpcd_caps.dprx_feature.raw = 0; - } - - - /* Error condition checking... - * It is impossible for Sink to report Max Lane Count = 0. - * It is possible for Sink to report Max Link Rate = 0, if it is - * an eDP device that is reporting specialized link rates in the - * SUPPORTED_LINK_RATE table. - */ - if (dpcd_data[DP_MAX_LANE_COUNT - DP_DPCD_REV] == 0) - return false; - - ds_port.byte = dpcd_data[DP_DOWNSTREAMPORT_PRESENT - - DP_DPCD_REV]; - - read_dp_device_vendor_id(link); - - /* TODO - decouple raw mst capability from policy decision */ - link->dpcd_caps.is_mst_capable = is_mst_supported(link); - - get_active_converter_info(ds_port.byte, link); - - dp_wa_power_up_0010FA(link, dpcd_data, sizeof(dpcd_data)); - - down_strm_port_count.raw = dpcd_data[DP_DOWN_STREAM_PORT_COUNT - - DP_DPCD_REV]; - - link->dpcd_caps.allow_invalid_MSA_timing_param = - down_strm_port_count.bits.IGNORE_MSA_TIMING_PARAM; - - link->dpcd_caps.max_ln_count.raw = dpcd_data[ - DP_MAX_LANE_COUNT - DP_DPCD_REV]; - - link->dpcd_caps.max_down_spread.raw = dpcd_data[ - DP_MAX_DOWNSPREAD - DP_DPCD_REV]; - - link->reported_link_cap.lane_count = - link->dpcd_caps.max_ln_count.bits.MAX_LANE_COUNT; - link->reported_link_cap.link_rate = get_link_rate_from_max_link_bw( - dpcd_data[DP_MAX_LINK_RATE - DP_DPCD_REV]); - link->reported_link_cap.link_spread = - link->dpcd_caps.max_down_spread.bits.MAX_DOWN_SPREAD ? - LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED; - - edp_config_cap.raw = dpcd_data[ - DP_EDP_CONFIGURATION_CAP - DP_DPCD_REV]; - link->dpcd_caps.panel_mode_edp = - edp_config_cap.bits.ALT_SCRAMBLER_RESET; - link->dpcd_caps.dpcd_display_control_capable = - edp_config_cap.bits.DPCD_DISPLAY_CONTROL_CAPABLE; - link->dpcd_caps.channel_coding_cap.raw = - dpcd_data[DP_MAIN_LINK_CHANNEL_CODING - DP_DPCD_REV]; - link->test_pattern_enabled = false; - link->compliance_test_state.raw = 0; - - /* read sink count */ - core_link_read_dpcd(link, - DP_SINK_COUNT, - &link->dpcd_caps.sink_count.raw, - sizeof(link->dpcd_caps.sink_count.raw)); - - /* read sink ieee oui */ - core_link_read_dpcd(link, - DP_SINK_OUI, - (uint8_t *)(&sink_id), - sizeof(sink_id)); - - link->dpcd_caps.sink_dev_id = - (sink_id.ieee_oui[0] << 16) + - (sink_id.ieee_oui[1] << 8) + - (sink_id.ieee_oui[2]); - - memmove( - link->dpcd_caps.sink_dev_id_str, - sink_id.ieee_device_id, - sizeof(sink_id.ieee_device_id)); - - /* Quirk Apple MBP 2017 15" Retina panel: Wrong DP_MAX_LINK_RATE */ - { - uint8_t str_mbp_2017[] = { 101, 68, 21, 101, 98, 97 }; - - if ((link->dpcd_caps.sink_dev_id == 0x0010fa) && - !memcmp(link->dpcd_caps.sink_dev_id_str, str_mbp_2017, - sizeof(str_mbp_2017))) { - link->reported_link_cap.link_rate = 0x0c; - } - } - - core_link_read_dpcd( - link, - DP_SINK_HW_REVISION_START, - (uint8_t *)&dp_hw_fw_revision, - sizeof(dp_hw_fw_revision)); - - link->dpcd_caps.sink_hw_revision = - dp_hw_fw_revision.ieee_hw_rev; - - memmove( - link->dpcd_caps.sink_fw_revision, - dp_hw_fw_revision.ieee_fw_rev, - sizeof(dp_hw_fw_revision.ieee_fw_rev)); - - /* Quirk for Apple MBP 2018 15" Retina panels: wrong DP_MAX_LINK_RATE */ - { - uint8_t str_mbp_2018[] = { 101, 68, 21, 103, 98, 97 }; - uint8_t fwrev_mbp_2018[] = { 7, 4 }; - uint8_t fwrev_mbp_2018_vega[] = { 8, 4 }; - - /* We also check for the firmware revision as 16,1 models have an - * identical device id and are incorrectly quirked otherwise. - */ - if ((link->dpcd_caps.sink_dev_id == 0x0010fa) && - !memcmp(link->dpcd_caps.sink_dev_id_str, str_mbp_2018, - sizeof(str_mbp_2018)) && - (!memcmp(link->dpcd_caps.sink_fw_revision, fwrev_mbp_2018, - sizeof(fwrev_mbp_2018)) || - !memcmp(link->dpcd_caps.sink_fw_revision, fwrev_mbp_2018_vega, - sizeof(fwrev_mbp_2018_vega)))) { - link->reported_link_cap.link_rate = LINK_RATE_RBR2; - } - } - - memset(&link->dpcd_caps.dsc_caps, '\0', - sizeof(link->dpcd_caps.dsc_caps)); - memset(&link->dpcd_caps.fec_cap, '\0', sizeof(link->dpcd_caps.fec_cap)); - /* Read DSC and FEC sink capabilities if DP revision is 1.4 and up */ - if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14) { - status = core_link_read_dpcd( - link, - DP_FEC_CAPABILITY, - &link->dpcd_caps.fec_cap.raw, - sizeof(link->dpcd_caps.fec_cap.raw)); - status = core_link_read_dpcd( - link, - DP_DSC_SUPPORT, - link->dpcd_caps.dsc_caps.dsc_basic_caps.raw, - sizeof(link->dpcd_caps.dsc_caps.dsc_basic_caps.raw)); - if (link->dpcd_caps.dongle_type != DISPLAY_DONGLE_NONE) { - status = core_link_read_dpcd( - link, - DP_DSC_BRANCH_OVERALL_THROUGHPUT_0, - link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw, - sizeof(link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw)); - DC_LOG_DSC("DSC branch decoder capability is read at link %d", link->link_index); - DC_LOG_DSC("\tBRANCH_OVERALL_THROUGHPUT_0 = 0x%02x", - link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.fields.BRANCH_OVERALL_THROUGHPUT_0); - DC_LOG_DSC("\tBRANCH_OVERALL_THROUGHPUT_1 = 0x%02x", - link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.fields.BRANCH_OVERALL_THROUGHPUT_1); - DC_LOG_DSC("\tBRANCH_MAX_LINE_WIDTH 0x%02x", - link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.fields.BRANCH_MAX_LINE_WIDTH); - } - - /* Apply work around to disable FEC and DSC for USB4 tunneling in TBT3 compatibility mode - * only if required. - */ - if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && - link->dc->debug.dpia_debug.bits.enable_force_tbt3_work_around && - link->dpcd_caps.is_branch_dev && - link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 && - link->dpcd_caps.branch_hw_revision == DP_BRANCH_HW_REV_10 && - (link->dpcd_caps.fec_cap.bits.FEC_CAPABLE || - link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT)) { - /* A TBT3 device is expected to report no support for FEC or DSC to a USB4 DPIA. - * Clear FEC and DSC capabilities as a work around if that is not the case. - */ - link->wa_flags.dpia_forced_tbt3_mode = true; - memset(&link->dpcd_caps.dsc_caps, '\0', sizeof(link->dpcd_caps.dsc_caps)); - memset(&link->dpcd_caps.fec_cap, '\0', sizeof(link->dpcd_caps.fec_cap)); - DC_LOG_DSC("Clear DSC SUPPORT for USB4 link(%d) in TBT3 compatibility mode", link->link_index); - } else - link->wa_flags.dpia_forced_tbt3_mode = false; - } - - if (!dpcd_read_sink_ext_caps(link)) - link->dpcd_sink_ext_caps.raw = 0; - - if (link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) { - DC_LOG_DP2("128b/132b encoding is supported at link %d", link->link_index); - - core_link_read_dpcd(link, - DP_128b_132b_SUPPORTED_LINK_RATES, - &link->dpcd_caps.dp_128b_132b_supported_link_rates.raw, - sizeof(link->dpcd_caps.dp_128b_132b_supported_link_rates.raw)); - if (link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR20) - link->reported_link_cap.link_rate = LINK_RATE_UHBR20; - else if (link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR13_5) - link->reported_link_cap.link_rate = LINK_RATE_UHBR13_5; - else if (link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR10) - link->reported_link_cap.link_rate = LINK_RATE_UHBR10; - else - dm_error("%s: Invalid RX 128b_132b_supported_link_rates\n", __func__); - DC_LOG_DP2("128b/132b supported link rates is read at link %d", link->link_index); - DC_LOG_DP2("\tmax 128b/132b link rate support is %d.%d GHz", - link->reported_link_cap.link_rate / 100, - link->reported_link_cap.link_rate % 100); - - core_link_read_dpcd(link, - DP_SINK_VIDEO_FALLBACK_FORMATS, - &link->dpcd_caps.fallback_formats.raw, - sizeof(link->dpcd_caps.fallback_formats.raw)); - DC_LOG_DP2("sink video fallback format is read at link %d", link->link_index); - if (link->dpcd_caps.fallback_formats.bits.dp_1920x1080_60Hz_24bpp_support) - DC_LOG_DP2("\t1920x1080@60Hz 24bpp fallback format supported"); - if (link->dpcd_caps.fallback_formats.bits.dp_1280x720_60Hz_24bpp_support) - DC_LOG_DP2("\t1280x720@60Hz 24bpp fallback format supported"); - if (link->dpcd_caps.fallback_formats.bits.dp_1024x768_60Hz_24bpp_support) - DC_LOG_DP2("\t1024x768@60Hz 24bpp fallback format supported"); - if (link->dpcd_caps.fallback_formats.raw == 0) { - DC_LOG_DP2("\tno supported fallback formats, assume 1920x1080@60Hz 24bpp is supported"); - link->dpcd_caps.fallback_formats.bits.dp_1920x1080_60Hz_24bpp_support = 1; - } - - core_link_read_dpcd(link, - DP_FEC_CAPABILITY_1, - &link->dpcd_caps.fec_cap1.raw, - sizeof(link->dpcd_caps.fec_cap1.raw)); - DC_LOG_DP2("FEC CAPABILITY 1 is read at link %d", link->link_index); - if (link->dpcd_caps.fec_cap1.bits.AGGREGATED_ERROR_COUNTERS_CAPABLE) - DC_LOG_DP2("\tFEC aggregated error counters are supported"); - } - - retrieve_cable_id(link); - dpcd_write_cable_id_to_dprx(link); - - /* Connectivity log: detection */ - CONN_DATA_DETECT(link, dpcd_data, sizeof(dpcd_data), "Rx Caps: "); - - return true; -} - -bool dp_overwrite_extended_receiver_cap(struct dc_link *link) -{ - uint8_t dpcd_data[16]; - uint32_t read_dpcd_retry_cnt = 3; - enum dc_status status = DC_ERROR_UNEXPECTED; - union dp_downstream_port_present ds_port = { 0 }; - union down_stream_port_count down_strm_port_count; - union edp_configuration_cap edp_config_cap; - - int i; - - for (i = 0; i < read_dpcd_retry_cnt; i++) { - status = core_link_read_dpcd( - link, - DP_DPCD_REV, - dpcd_data, - sizeof(dpcd_data)); - if (status == DC_OK) - break; - } - - link->dpcd_caps.dpcd_rev.raw = - dpcd_data[DP_DPCD_REV - DP_DPCD_REV]; - - if (dpcd_data[DP_MAX_LANE_COUNT - DP_DPCD_REV] == 0) - return false; - - ds_port.byte = dpcd_data[DP_DOWNSTREAMPORT_PRESENT - - DP_DPCD_REV]; - - get_active_converter_info(ds_port.byte, link); - - down_strm_port_count.raw = dpcd_data[DP_DOWN_STREAM_PORT_COUNT - - DP_DPCD_REV]; - - link->dpcd_caps.allow_invalid_MSA_timing_param = - down_strm_port_count.bits.IGNORE_MSA_TIMING_PARAM; - - link->dpcd_caps.max_ln_count.raw = dpcd_data[ - DP_MAX_LANE_COUNT - DP_DPCD_REV]; - - link->dpcd_caps.max_down_spread.raw = dpcd_data[ - DP_MAX_DOWNSPREAD - DP_DPCD_REV]; - - link->reported_link_cap.lane_count = - link->dpcd_caps.max_ln_count.bits.MAX_LANE_COUNT; - link->reported_link_cap.link_rate = dpcd_data[ - DP_MAX_LINK_RATE - DP_DPCD_REV]; - link->reported_link_cap.link_spread = - link->dpcd_caps.max_down_spread.bits.MAX_DOWN_SPREAD ? - LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED; - - edp_config_cap.raw = dpcd_data[ - DP_EDP_CONFIGURATION_CAP - DP_DPCD_REV]; - link->dpcd_caps.panel_mode_edp = - edp_config_cap.bits.ALT_SCRAMBLER_RESET; - link->dpcd_caps.dpcd_display_control_capable = - edp_config_cap.bits.DPCD_DISPLAY_CONTROL_CAPABLE; - - return true; -} - -bool detect_dp_sink_caps(struct dc_link *link) -{ - return retrieve_link_cap(link); -} - -static enum dc_link_rate linkRateInKHzToLinkRateMultiplier(uint32_t link_rate_in_khz) -{ - enum dc_link_rate link_rate; - // LinkRate is normally stored as a multiplier of 0.27 Gbps per lane. Do the translation. - switch (link_rate_in_khz) { - case 1620000: - link_rate = LINK_RATE_LOW; // Rate_1 (RBR) - 1.62 Gbps/Lane - break; - case 2160000: - link_rate = LINK_RATE_RATE_2; // Rate_2 - 2.16 Gbps/Lane - break; - case 2430000: - link_rate = LINK_RATE_RATE_3; // Rate_3 - 2.43 Gbps/Lane - break; - case 2700000: - link_rate = LINK_RATE_HIGH; // Rate_4 (HBR) - 2.70 Gbps/Lane - break; - case 3240000: - link_rate = LINK_RATE_RBR2; // Rate_5 (RBR2) - 3.24 Gbps/Lane - break; - case 4320000: - link_rate = LINK_RATE_RATE_6; // Rate_6 - 4.32 Gbps/Lane - break; - case 5400000: - link_rate = LINK_RATE_HIGH2; // Rate_7 (HBR2) - 5.40 Gbps/Lane - break; - case 8100000: - link_rate = LINK_RATE_HIGH3; // Rate_8 (HBR3) - 8.10 Gbps/Lane - break; - default: - link_rate = LINK_RATE_UNKNOWN; - break; - } - return link_rate; -} - -void detect_edp_sink_caps(struct dc_link *link) -{ - uint8_t supported_link_rates[16]; - uint32_t entry; - uint32_t link_rate_in_khz; - enum dc_link_rate link_rate = LINK_RATE_UNKNOWN; - uint8_t backlight_adj_cap; - uint8_t general_edp_cap; - - retrieve_link_cap(link); - link->dpcd_caps.edp_supported_link_rates_count = 0; - memset(supported_link_rates, 0, sizeof(supported_link_rates)); - - /* - * edp_supported_link_rates_count is only valid for eDP v1.4 or higher. - * Per VESA eDP spec, "The DPCD revision for eDP v1.4 is 13h" - */ - if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_13 && - (link->panel_config.ilr.optimize_edp_link_rate || - link->reported_link_cap.link_rate == LINK_RATE_UNKNOWN)) { - // Read DPCD 00010h - 0001Fh 16 bytes at one shot - core_link_read_dpcd(link, DP_SUPPORTED_LINK_RATES, - supported_link_rates, sizeof(supported_link_rates)); - - for (entry = 0; entry < 16; entry += 2) { - // DPCD register reports per-lane link rate = 16-bit link rate capability - // value X 200 kHz. Need multiplier to find link rate in kHz. - link_rate_in_khz = (supported_link_rates[entry+1] * 0x100 + - supported_link_rates[entry]) * 200; - - if (link_rate_in_khz != 0) { - link_rate = linkRateInKHzToLinkRateMultiplier(link_rate_in_khz); - link->dpcd_caps.edp_supported_link_rates[link->dpcd_caps.edp_supported_link_rates_count] = link_rate; - link->dpcd_caps.edp_supported_link_rates_count++; - - if (link->reported_link_cap.link_rate < link_rate) - link->reported_link_cap.link_rate = link_rate; - } - } - } - core_link_read_dpcd(link, DP_EDP_BACKLIGHT_ADJUSTMENT_CAP, - &backlight_adj_cap, sizeof(backlight_adj_cap)); - - link->dpcd_caps.dynamic_backlight_capable_edp = - (backlight_adj_cap & DP_EDP_DYNAMIC_BACKLIGHT_CAP) ? true:false; - - core_link_read_dpcd(link, DP_EDP_GENERAL_CAP_1, - &general_edp_cap, sizeof(general_edp_cap)); - - link->dpcd_caps.set_power_state_capable_edp = - (general_edp_cap & DP_EDP_SET_POWER_CAP) ? true:false; - - dc_link_set_default_brightness_aux(link); - - core_link_read_dpcd(link, DP_EDP_DPCD_REV, - &link->dpcd_caps.edp_rev, - sizeof(link->dpcd_caps.edp_rev)); - /* - * PSR is only valid for eDP v1.3 or higher. - */ - if (link->dpcd_caps.edp_rev >= DP_EDP_13) { - core_link_read_dpcd(link, DP_PSR_SUPPORT, - &link->dpcd_caps.psr_info.psr_version, - sizeof(link->dpcd_caps.psr_info.psr_version)); - if (link->dpcd_caps.sink_dev_id == DP_BRANCH_DEVICE_ID_001CF8) - core_link_read_dpcd(link, DP_FORCE_PSRSU_CAPABILITY, - &link->dpcd_caps.psr_info.force_psrsu_cap, - sizeof(link->dpcd_caps.psr_info.force_psrsu_cap)); - core_link_read_dpcd(link, DP_PSR_CAPS, - &link->dpcd_caps.psr_info.psr_dpcd_caps.raw, - sizeof(link->dpcd_caps.psr_info.psr_dpcd_caps.raw)); - if (link->dpcd_caps.psr_info.psr_dpcd_caps.bits.Y_COORDINATE_REQUIRED) { - core_link_read_dpcd(link, DP_PSR2_SU_Y_GRANULARITY, - &link->dpcd_caps.psr_info.psr2_su_y_granularity_cap, - sizeof(link->dpcd_caps.psr_info.psr2_su_y_granularity_cap)); - } - } - - /* - * ALPM is only valid for eDP v1.4 or higher. - */ - if (link->dpcd_caps.dpcd_rev.raw >= DP_EDP_14) - core_link_read_dpcd(link, DP_RECEIVER_ALPM_CAP, - &link->dpcd_caps.alpm_caps.raw, - sizeof(link->dpcd_caps.alpm_caps.raw)); -} - -void dc_link_dp_enable_hpd(const struct dc_link *link) -{ - struct link_encoder *encoder = link->link_enc; - - if (encoder != NULL && encoder->funcs->enable_hpd != NULL) - encoder->funcs->enable_hpd(encoder); -} - -void dc_link_dp_disable_hpd(const struct dc_link *link) -{ - struct link_encoder *encoder = link->link_enc; - - if (encoder != NULL && encoder->funcs->enable_hpd != NULL) - encoder->funcs->disable_hpd(encoder); -} - static bool is_dp_phy_pattern(enum dp_test_pattern test_pattern) { if ((DP_TEST_PATTERN_PHY_PATTERN_BEGIN <= test_pattern && @@ -6542,88 +1651,6 @@ void dp_set_fec_enable(struct dc_link *link, bool enable) } } -void dpcd_set_source_specific_data(struct dc_link *link) -{ - if (!link->dc->vendor_signature.is_valid) { - enum dc_status __maybe_unused result_write_min_hblank = DC_NOT_SUPPORTED; - struct dpcd_amd_signature amd_signature = {0}; - struct dpcd_amd_device_id amd_device_id = {0}; - - amd_device_id.device_id_byte1 = - (uint8_t)(link->ctx->asic_id.chip_id); - amd_device_id.device_id_byte2 = - (uint8_t)(link->ctx->asic_id.chip_id >> 8); - amd_device_id.dce_version = - (uint8_t)(link->ctx->dce_version); - amd_device_id.dal_version_byte1 = 0x0; // needed? where to get? - amd_device_id.dal_version_byte2 = 0x0; // needed? where to get? - - core_link_read_dpcd(link, DP_SOURCE_OUI, - (uint8_t *)(&amd_signature), - sizeof(amd_signature)); - - if (!((amd_signature.AMD_IEEE_TxSignature_byte1 == 0x0) && - (amd_signature.AMD_IEEE_TxSignature_byte2 == 0x0) && - (amd_signature.AMD_IEEE_TxSignature_byte3 == 0x1A))) { - - amd_signature.AMD_IEEE_TxSignature_byte1 = 0x0; - amd_signature.AMD_IEEE_TxSignature_byte2 = 0x0; - amd_signature.AMD_IEEE_TxSignature_byte3 = 0x1A; - - core_link_write_dpcd(link, DP_SOURCE_OUI, - (uint8_t *)(&amd_signature), - sizeof(amd_signature)); - } - - core_link_write_dpcd(link, DP_SOURCE_OUI+0x03, - (uint8_t *)(&amd_device_id), - sizeof(amd_device_id)); - - if (link->ctx->dce_version >= DCN_VERSION_2_0 && - link->dc->caps.min_horizontal_blanking_period != 0) { - - uint8_t hblank_size = (uint8_t)link->dc->caps.min_horizontal_blanking_period; - - result_write_min_hblank = core_link_write_dpcd(link, - DP_SOURCE_MINIMUM_HBLANK_SUPPORTED, (uint8_t *)(&hblank_size), - sizeof(hblank_size)); - } - DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION, - WPP_BIT_FLAG_DC_DETECTION_DP_CAPS, - "result=%u link_index=%u enum dce_version=%d DPCD=0x%04X min_hblank=%u branch_dev_id=0x%x branch_dev_name='%c%c%c%c%c%c'", - result_write_min_hblank, - link->link_index, - link->ctx->dce_version, - DP_SOURCE_MINIMUM_HBLANK_SUPPORTED, - link->dc->caps.min_horizontal_blanking_period, - link->dpcd_caps.branch_dev_id, - link->dpcd_caps.branch_dev_name[0], - link->dpcd_caps.branch_dev_name[1], - link->dpcd_caps.branch_dev_name[2], - link->dpcd_caps.branch_dev_name[3], - link->dpcd_caps.branch_dev_name[4], - link->dpcd_caps.branch_dev_name[5]); - } else { - core_link_write_dpcd(link, DP_SOURCE_OUI, - link->dc->vendor_signature.data.raw, - sizeof(link->dc->vendor_signature.data.raw)); - } -} - -void dpcd_write_cable_id_to_dprx(struct dc_link *link) -{ - if (!link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED || - link->dpcd_caps.cable_id.raw == 0 || - link->dprx_states.cable_id_written) - return; - - core_link_write_dpcd(link, DP_CABLE_ATTRIBUTES_UPDATED_BY_DPTX, - &link->dpcd_caps.cable_id.raw, - sizeof(link->dpcd_caps.cable_id.raw)); - - link->dprx_states.cable_id_written = 1; -} - bool dc_link_set_backlight_level_nits(struct dc_link *link, bool isHDR, uint32_t backlight_millinits, @@ -6668,9 +1695,9 @@ bool dc_link_get_backlight_level_nits(struct dc_link *link, link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)) return false; - if (core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_CURRENT_PEAK, + if (!core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_CURRENT_PEAK, dpcd_backlight_get.raw, - sizeof(union dpcd_source_backlight_get)) != DC_OK) + sizeof(union dpcd_source_backlight_get))) return false; *backlight_millinits_avg = @@ -6709,9 +1736,9 @@ bool dc_link_read_default_bl_aux(struct dc_link *link, uint32_t *backlight_milli link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)) return false; - if (core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL, + if (!core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL, (uint8_t *) backlight_millinits, - sizeof(uint32_t)) != DC_OK) + sizeof(uint32_t))) return false; return true; @@ -6769,7 +1796,7 @@ bool is_edp_ilr_optimization_required(struct dc_link *link, struct dc_crtc_timin req_bw = dc_bandwidth_in_kbps_from_timing(crtc_timing); if (!crtc_timing->flags.DSC) - decide_edp_link_settings(link, &link_setting, req_bw); + dc_link_decide_edp_link_settings(link, &link_setting, req_bw); else decide_edp_link_settings_with_dsc(link, &link_setting, req_bw, LINK_RATE_UNKNOWN); @@ -6783,35 +1810,6 @@ bool is_edp_ilr_optimization_required(struct dc_link *link, struct dc_crtc_timin return false; } -enum dp_link_encoding dp_get_link_encoding_format(const struct dc_link_settings *link_settings) -{ - if ((link_settings->link_rate >= LINK_RATE_LOW) && - (link_settings->link_rate <= LINK_RATE_HIGH3)) - return DP_8b_10b_ENCODING; - else if ((link_settings->link_rate >= LINK_RATE_UHBR10) && - (link_settings->link_rate <= LINK_RATE_UHBR20)) - return DP_128b_132b_ENCODING; - return DP_UNKNOWN_ENCODING; -} - -enum dp_link_encoding dc_link_dp_mst_decide_link_encoding_format(const struct dc_link *link) -{ - struct dc_link_settings link_settings = {0}; - - if (!dc_is_dp_signal(link->connector_signal)) - return DP_UNKNOWN_ENCODING; - - if (link->preferred_link_setting.lane_count != - LANE_COUNT_UNKNOWN && - link->preferred_link_setting.link_rate != - LINK_RATE_UNKNOWN) { - link_settings = link->preferred_link_setting; - } else { - decide_mst_link_settings(link, &link_settings); - } - - return dp_get_link_encoding_format(&link_settings); -} // TODO - DP2.0 Link: Fix get_lane_status to handle LTTPR offset (SST and MST) static void get_lane_status( @@ -7010,15 +2008,6 @@ struct fixed31_32 calculate_sst_avg_time_slots_per_mtp( return avg_time_slots_per_mtp; } -bool is_dp_128b_132b_signal(struct pipe_ctx *pipe_ctx) -{ - /* If this assert is hit then we have a link encoder dynamic management issue */ - ASSERT(pipe_ctx->stream_res.hpo_dp_stream_enc ? pipe_ctx->link_res.hpo_dp_link_enc != NULL : true); - return (pipe_ctx->stream_res.hpo_dp_stream_enc && - pipe_ctx->link_res.hpo_dp_link_enc && - dc_is_dp_signal(pipe_ctx->stream->signal)); -} - void edp_panel_backlight_power_on(struct dc_link *link, bool wait_for_hpd) { if (link->connector_signal != SIGNAL_TYPE_EDP) @@ -7036,20 +2025,6 @@ void dc_link_clear_dprx_states(struct dc_link *link) memset(&link->dprx_states, 0, sizeof(link->dprx_states)); } -void dp_receiver_power_ctrl(struct dc_link *link, bool on) -{ - uint8_t state; - - state = on ? DP_POWER_STATE_D0 : DP_POWER_STATE_D3; - - if (link->sync_lt_in_progress) - return; - - core_link_write_dpcd(link, DP_SET_POWER, &state, - sizeof(state)); - -} - void dp_source_sequence_trace(struct dc_link *link, uint8_t dp_test_mode) { if (link != NULL && link->dc->debug.enable_driver_sequence_debug) @@ -7057,50 +2032,6 @@ void dp_source_sequence_trace(struct dc_link *link, uint8_t dp_test_mode) &dp_test_mode, sizeof(dp_test_mode)); } - -static uint8_t convert_to_count(uint8_t lttpr_repeater_count) -{ - switch (lttpr_repeater_count) { - case 0x80: // 1 lttpr repeater - return 1; - case 0x40: // 2 lttpr repeaters - return 2; - case 0x20: // 3 lttpr repeaters - return 3; - case 0x10: // 4 lttpr repeaters - return 4; - case 0x08: // 5 lttpr repeaters - return 5; - case 0x04: // 6 lttpr repeaters - return 6; - case 0x02: // 7 lttpr repeaters - return 7; - case 0x01: // 8 lttpr repeaters - return 8; - default: - break; - } - return 0; // invalid value -} - -static inline bool is_immediate_downstream(struct dc_link *link, uint32_t offset) -{ - return (convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) == offset); -} - -void dp_enable_link_phy( - struct dc_link *link, - const struct link_resource *link_res, - enum signal_type signal, - enum clock_source_id clock_source, - const struct dc_link_settings *link_settings) -{ - link->cur_link_settings = *link_settings; - link->dc->hwss.enable_dp_link_output(link, link_res, signal, - clock_source, link_settings); - dp_receiver_power_ctrl(link, true); -} - void edp_add_delay_for_T9(struct dc_link *link) { if (link && link->panel_config.pps.extra_delay_backlight_off > 0) @@ -7166,112 +2097,6 @@ bool edp_receiver_ready_T7(struct dc_link *link) return result; } -void dp_disable_link_phy(struct dc_link *link, const struct link_resource *link_res, - enum signal_type signal) -{ - struct dc *dc = link->ctx->dc; - - if (!link->wa_flags.dp_keep_receiver_powered) - dp_receiver_power_ctrl(link, false); - - dc->hwss.disable_link_output(link, link_res, signal); - /* Clear current link setting.*/ - memset(&link->cur_link_settings, 0, - sizeof(link->cur_link_settings)); - - if (dc->clk_mgr->funcs->notify_link_rate_change) - dc->clk_mgr->funcs->notify_link_rate_change(dc->clk_mgr, link); -} - -void dp_disable_link_phy_mst(struct dc_link *link, const struct link_resource *link_res, - enum signal_type signal) -{ - /* MST disable link only when no stream use the link */ - if (link->mst_stream_alloc_table.stream_count > 0) - return; - - dp_disable_link_phy(link, link_res, signal); - - /* set the sink to SST mode after disabling the link */ - dp_enable_mst_on_sink(link, false); -} - -bool dp_set_hw_training_pattern( - struct dc_link *link, - const struct link_resource *link_res, - enum dc_dp_training_pattern pattern, - uint32_t offset) -{ - enum dp_test_pattern test_pattern = DP_TEST_PATTERN_UNSUPPORTED; - - switch (pattern) { - case DP_TRAINING_PATTERN_SEQUENCE_1: - test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN1; - break; - case DP_TRAINING_PATTERN_SEQUENCE_2: - test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN2; - break; - case DP_TRAINING_PATTERN_SEQUENCE_3: - test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN3; - break; - case DP_TRAINING_PATTERN_SEQUENCE_4: - test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN4; - break; - case DP_128b_132b_TPS1: - test_pattern = DP_TEST_PATTERN_128b_132b_TPS1_TRAINING_MODE; - break; - case DP_128b_132b_TPS2: - test_pattern = DP_TEST_PATTERN_128b_132b_TPS2_TRAINING_MODE; - break; - default: - break; - } - - dp_set_hw_test_pattern(link, link_res, test_pattern, NULL, 0); - - return true; -} - -void dp_set_hw_lane_settings( - struct dc_link *link, - const struct link_resource *link_res, - const struct link_training_settings *link_settings, - uint32_t offset) -{ - const struct link_hwss *link_hwss = get_link_hwss(link, link_res); - - if ((link_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) && !is_immediate_downstream(link, offset)) - return; - - if (link_hwss->ext.set_dp_lane_settings) - link_hwss->ext.set_dp_lane_settings(link, link_res, - &link_settings->link_settings, - link_settings->hw_lane_settings); - - memmove(link->cur_lane_setting, - link_settings->hw_lane_settings, - sizeof(link->cur_lane_setting)); -} - -void dp_set_hw_test_pattern( - struct dc_link *link, - const struct link_resource *link_res, - enum dp_test_pattern test_pattern, - uint8_t *custom_pattern, - uint32_t custom_pattern_size) -{ - const struct link_hwss *link_hwss = get_link_hwss(link, link_res); - struct encoder_set_dp_phy_pattern_param pattern_param = {0}; - - pattern_param.dp_phy_pattern = test_pattern; - pattern_param.custom_pattern = custom_pattern; - pattern_param.custom_pattern_size = custom_pattern_size; - pattern_param.dp_panel_mode = dp_get_panel_mode(link); - - if (link_hwss->ext.set_dp_link_test_pattern) - link_hwss->ext.set_dp_link_test_pattern(link, link_res, &pattern_param); -} - void dp_retrain_link_dp_test(struct dc_link *link, struct dc_link_settings *link_setting, bool skip_video_pattern) @@ -7390,7 +2215,7 @@ void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) /* Enable DSC in encoder */ if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) - && !is_dp_128b_132b_signal(pipe_ctx)) { + && !link_is_dp_128b_132b_signal(pipe_ctx)) { DC_LOG_DSC("Setting stream encoder DSC config for engine %d:", (int)pipe_ctx->stream_res.stream_enc->id); dsc_optc_config_log(dsc, &dsc_optc_cfg); pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config(pipe_ctx->stream_res.stream_enc, @@ -7416,7 +2241,7 @@ void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) /* disable DSC in stream encoder */ if (dc_is_dp_signal(stream->signal)) { - if (is_dp_128b_132b_signal(pipe_ctx)) + if (link_is_dp_128b_132b_signal(pipe_ctx)) pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_set_dsc_pps_info_packet( pipe_ctx->stream_res.hpo_dp_stream_enc, false, @@ -7498,7 +2323,7 @@ bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable, bool immediate_u memcpy(&stream->dsc_packed_pps[0], &dsc_packed_pps[0], sizeof(stream->dsc_packed_pps)); if (dc_is_dp_signal(stream->signal)) { DC_LOG_DSC("Setting stream encoder DSC PPS SDP for engine %d\n", (int)pipe_ctx->stream_res.stream_enc->id); - if (is_dp_128b_132b_signal(pipe_ctx)) + if (link_is_dp_128b_132b_signal(pipe_ctx)) pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_set_dsc_pps_info_packet( pipe_ctx->stream_res.hpo_dp_stream_enc, true, @@ -7515,7 +2340,7 @@ bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable, bool immediate_u /* disable DSC PPS in stream encoder */ memset(&stream->dsc_packed_pps[0], 0, sizeof(stream->dsc_packed_pps)); if (dc_is_dp_signal(stream->signal)) { - if (is_dp_128b_132b_signal(pipe_ctx)) + if (link_is_dp_128b_132b_signal(pipe_ctx)) pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_set_dsc_pps_info_packet( pipe_ctx->stream_res.hpo_dp_stream_enc, false, diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c index 614f022d1cff..fa2ba3fc683b 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c @@ -25,6 +25,7 @@ #include "link_enc_cfg.h" #include "resource.h" #include "dc_link_dp.h" +#include "link.h" #define DC_LOGGER dc->ctx->logger @@ -48,7 +49,7 @@ static bool is_dig_link_enc_stream(struct dc_stream_state *stream) /* DIGs do not support DP2.0 streams with 128b/132b encoding. */ struct dc_link_settings link_settings = {0}; - decide_link_settings(stream, &link_settings); + link_decide_link_settings(stream, &link_settings); if ((link_settings.link_rate >= LINK_RATE_LOW) && link_settings.link_rate <= LINK_RATE_HIGH3) { is_dig_stream = true; @@ -305,15 +306,17 @@ void link_enc_cfg_link_encs_assign( for (i = 0; i < stream_count; i++) { struct dc_stream_state *stream = streams[i]; + /* skip it if the link is mappable endpoint. */ + if (stream->link->is_dig_mapping_flexible) + continue; + /* Skip stream if not supported by DIG link encoder. */ if (!is_dig_link_enc_stream(stream)) continue; /* Physical endpoints have a fixed mapping to DIG link encoders. */ - if (!stream->link->is_dig_mapping_flexible) { - eng_id = stream->link->eng_id; - add_link_enc_assignment(state, stream, eng_id); - } + eng_id = stream->link->eng_id; + add_link_enc_assignment(state, stream, eng_id); } /* (b) Retain previous assignments for mappable endpoints if encoders still available. */ @@ -325,11 +328,12 @@ void link_enc_cfg_link_encs_assign( for (i = 0; i < stream_count; i++) { struct dc_stream_state *stream = state->streams[i]; - /* Skip stream if not supported by DIG link encoder. */ - if (!is_dig_link_enc_stream(stream)) + /* Skip it if the link is NOT mappable endpoint. */ + if (!stream->link->is_dig_mapping_flexible) continue; - if (!stream->link->is_dig_mapping_flexible) + /* Skip stream if not supported by DIG link encoder. */ + if (!is_dig_link_enc_stream(stream)) continue; for (j = 0; j < prev_state->stream_count; j++) { @@ -338,6 +342,7 @@ void link_enc_cfg_link_encs_assign( if (stream == prev_stream && stream->link == prev_stream->link && prev_state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[j].valid) { eng_id = prev_state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[j].eng_id; + if (is_avail_link_enc(state, eng_id, stream)) add_link_enc_assignment(state, stream, eng_id); } @@ -350,6 +355,15 @@ void link_enc_cfg_link_encs_assign( for (i = 0; i < stream_count; i++) { struct dc_stream_state *stream = streams[i]; + struct link_encoder *link_enc = NULL; + + /* Skip it if the link is NOT mappable endpoint. */ + if (!stream->link->is_dig_mapping_flexible) + continue; + + /* Skip if encoder assignment retained in step (b) above. */ + if (stream->link_enc) + continue; /* Skip stream if not supported by DIG link encoder. */ if (!is_dig_link_enc_stream(stream)) { @@ -358,24 +372,18 @@ void link_enc_cfg_link_encs_assign( } /* Mappable endpoints have a flexible mapping to DIG link encoders. */ - if (stream->link->is_dig_mapping_flexible) { - struct link_encoder *link_enc = NULL; - /* Skip if encoder assignment retained in step (b) above. */ - if (stream->link_enc) - continue; + /* For MST, multiple streams will share the same link / display + * endpoint. These streams should use the same link encoder + * assigned to that endpoint. + */ + link_enc = get_link_enc_used_by_link(state, stream->link); + if (link_enc == NULL) + eng_id = find_first_avail_link_enc(stream->ctx, state); + else + eng_id = link_enc->preferred_engine; - /* For MST, multiple streams will share the same link / display - * endpoint. These streams should use the same link encoder - * assigned to that endpoint. - */ - link_enc = get_link_enc_used_by_link(state, stream->link); - if (link_enc == NULL) - eng_id = find_first_avail_link_enc(stream->ctx, state); - else - eng_id = link_enc->preferred_engine; - add_link_enc_assignment(state, stream, eng_id); - } + add_link_enc_assignment(state, stream, eng_id); } link_enc_cfg_validate(dc, state); @@ -420,10 +428,6 @@ void link_enc_cfg_link_enc_unassign( { enum engine_id eng_id = ENGINE_ID_UNKNOWN; - /* Only DIG link encoders. */ - if (!is_dig_link_enc_stream(stream)) - return; - if (stream->link_enc) eng_id = stream->link_enc->preferred_engine; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 06b5f49e0954..a5b5f8592c1b 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -41,6 +41,7 @@ #include "dpcd_defs.h" #include "link_enc_cfg.h" #include "dc_link_dp.h" +#include "link.h" #include "virtual/virtual_link_hwss.h" #include "link/link_hwss_dio.h" #include "link/link_hwss_dpia.h" @@ -2213,7 +2214,7 @@ enum dc_status dc_remove_stream_from_ctx( del_pipe->stream_res.stream_enc, false); - if (is_dp_128b_132b_signal(del_pipe)) { + if (link_is_dp_128b_132b_signal(del_pipe)) { update_hpo_dp_stream_engine_usage( &new_ctx->res_ctx, dc->res_pool, del_pipe->stream_res.hpo_dp_stream_enc, @@ -2513,9 +2514,9 @@ enum dc_status resource_map_pool_resources( * and link settings */ if (dc_is_dp_signal(stream->signal)) { - if (!decide_link_settings(stream, &pipe_ctx->link_config.dp_link_settings)) + if (!link_decide_link_settings(stream, &pipe_ctx->link_config.dp_link_settings)) return DC_FAIL_DP_LINK_BANDWIDTH; - if (dp_get_link_encoding_format(&pipe_ctx->link_config.dp_link_settings) == DP_128b_132b_ENCODING) { + if (link_dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings) == DP_128b_132b_ENCODING) { pipe_ctx->stream_res.hpo_dp_stream_enc = find_first_free_match_hpo_dp_stream_enc_for_link( &context->res_ctx, pool, stream); @@ -3763,7 +3764,7 @@ bool get_temp_dp_link_res(struct dc_link *link, memset(link_res, 0, sizeof(*link_res)); - if (dp_get_link_encoding_format(link_settings) == DP_128b_132b_ENCODING) { + if (link_dp_get_encoding_format(link_settings) == DP_128b_132b_ENCODING) { link_res->hpo_dp_link_enc = get_temp_hpo_dp_link_enc(res_ctx, dc->res_pool, link); if (!link_res->hpo_dp_link_enc) @@ -3810,8 +3811,6 @@ void check_syncd_pipes_for_disabled_master_pipe(struct dc *dc, int i; struct pipe_ctx *pipe_ctx, *pipe_ctx_check; - DC_LOGGER_INIT(dc->ctx->logger); - pipe_ctx = &context->res_ctx.pipe_ctx[disabled_master_pipe_idx]; if ((GET_PIPE_SYNCD_FROM_PIPE(pipe_ctx) != disabled_master_pipe_idx) || !IS_PIPE_SYNCD_VALID(pipe_ctx)) @@ -3822,15 +3821,19 @@ void check_syncd_pipes_for_disabled_master_pipe(struct dc *dc, pipe_ctx_check = &context->res_ctx.pipe_ctx[i]; if ((GET_PIPE_SYNCD_FROM_PIPE(pipe_ctx_check) == disabled_master_pipe_idx) && - IS_PIPE_SYNCD_VALID(pipe_ctx_check) && (i != disabled_master_pipe_idx)) { - /* On dcn32, this error isn't fatal since hw supports odm transition in fast update*/ - if (dc->ctx->dce_version == DCN_VERSION_3_2 || - dc->ctx->dce_version == DCN_VERSION_3_21) - DC_LOG_DEBUG("DC: pipe_idx[%d] syncd with disabled master pipe_idx[%d]\n", - i, disabled_master_pipe_idx); - else - DC_ERR("DC: Failure: pipe_idx[%d] syncd with disabled master pipe_idx[%d]\n", - i, disabled_master_pipe_idx); + IS_PIPE_SYNCD_VALID(pipe_ctx_check) && (i != disabled_master_pipe_idx)) { + struct pipe_ctx *first_pipe = pipe_ctx_check; + + while (first_pipe->prev_odm_pipe) + first_pipe = first_pipe->prev_odm_pipe; + /* When ODM combine is enabled, this case is expected. If the disabled pipe + * is part of the ODM tree, then we should not print an error. + * */ + if (first_pipe->pipe_idx == disabled_master_pipe_idx) + continue; + + DC_ERR("DC: Failure: pipe_idx[%d] syncd with disabled master pipe_idx[%d]\n", + i, disabled_master_pipe_idx); } } } @@ -3995,7 +3998,7 @@ enum dc_status update_dp_encoder_resources_for_test_harness(const struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx) { - if (dp_get_link_encoding_format(&pipe_ctx->link_config.dp_link_settings) == DP_128b_132b_ENCODING) { + if (link_dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings) == DP_128b_132b_ENCODING) { if (pipe_ctx->stream_res.hpo_dp_stream_enc == NULL) { pipe_ctx->stream_res.hpo_dp_stream_enc = find_first_free_match_hpo_dp_stream_enc_for_link( diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 72963617553e..22e754ad22c8 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -47,7 +47,7 @@ struct aux_payload; struct set_config_cmd_payload; struct dmub_notification; -#define DC_VER "3.2.217" +#define DC_VER "3.2.218" #define MAX_SURFACES 3 #define MAX_PLANES 6 @@ -873,6 +873,7 @@ struct dc_debug_options { unsigned int dsc_delay_factor_wa_x1000; unsigned int min_prefetch_in_strobe_ns; bool disable_unbounded_requesting; + bool dig_fifo_off_in_blank; }; struct gpu_info_soc_bounding_box_v1_0; diff --git a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h index 260ac4458870..be9aa1a71847 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h @@ -140,7 +140,8 @@ struct dc_vbios_funcs { enum bp_result (*enable_lvtma_control)( struct dc_bios *bios, uint8_t uc_pwr_on, - uint8_t panel_instance); + uint8_t panel_instance, + uint8_t bypass_panel_control_wait); enum bp_result (*get_soc_bb_info)( struct dc_bios *dcb, diff --git a/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h b/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h index 7769bd099a5a..7b036a772b0c 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h @@ -77,6 +77,32 @@ struct aux_reply_transaction_data { uint8_t *data; }; +struct aux_payload { + /* set following flag to read/write I2C data, + * reset it to read/write DPCD data */ + bool i2c_over_aux; + /* set following flag to write data, + * reset it to read data */ + bool write; + bool mot; + bool write_status_update; + + uint32_t address; + uint32_t length; + uint8_t *data; + /* + * used to return the reply type of the transaction + * ignored if NULL + */ + uint8_t *reply; + /* expressed in milliseconds + * zero means "use default value" + */ + uint32_t defer_delay; + +}; +#define DEFAULT_AUX_MAX_DATA_SIZE 16 + struct i2c_payload { bool write; uint8_t address; @@ -90,6 +116,8 @@ enum i2c_command_engine { I2C_COMMAND_ENGINE_HW }; +#define DDC_I2C_COMMAND_ENGINE I2C_COMMAND_ENGINE_SW + struct i2c_command { struct i2c_payload *payloads; uint8_t number_of_payloads; diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h index 73f58ac3b93f..84da54358922 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h @@ -361,14 +361,10 @@ enum dpcd_downstream_port_detailed_type { union dwnstream_port_caps_byte2 { struct { uint8_t MAX_BITS_PER_COLOR_COMPONENT:2; -#if defined(CONFIG_DRM_AMD_DC_DCN) uint8_t MAX_ENCODED_LINK_BW_SUPPORT:3; uint8_t SOURCE_CONTROL_MODE_SUPPORT:1; uint8_t CONCURRENT_LINK_BRING_UP_SEQ_SUPPORT:1; uint8_t RESERVED:1; -#else - uint8_t RESERVED:6; -#endif } bits; uint8_t raw; }; @@ -406,7 +402,6 @@ union dwnstream_port_caps_byte3_hdmi { uint8_t raw; }; -#if defined(CONFIG_DRM_AMD_DC_DCN) union hdmi_sink_encoded_link_bw_support { struct { uint8_t HDMI_SINK_ENCODED_LINK_BW_SUPPORT:3; @@ -428,7 +423,6 @@ union hdmi_encoded_link_bw { } bits; uint8_t raw; }; -#endif /*4-byte structure for detailed capabilities of a down-stream port (DP-to-TMDS converter).*/ @@ -975,6 +969,9 @@ struct dpcd_usb4_dp_tunneling_info { /* TODO - Use DRM header to replace above once available */ #endif // DP_INTRA_HOP_AUX_REPLY_INDICATION +#ifndef DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE +#define DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE 0x50 +#endif union dp_main_line_channel_coding_cap { struct { uint8_t DP_8b_10b_SUPPORTED :1; diff --git a/drivers/gpu/drm/amd/display/dc/dc_hdmi_types.h b/drivers/gpu/drm/amd/display/dc/dc_hdmi_types.h new file mode 100644 index 000000000000..faf0d175bf19 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dc_hdmi_types.h @@ -0,0 +1,114 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef DC_HDMI_TYPES_H +#define DC_HDMI_TYPES_H + +#include "os_types.h" + +/* Address range from 0x00 to 0x1F.*/ +#define DP_ADAPTOR_TYPE2_SIZE 0x20 +#define DP_ADAPTOR_TYPE2_REG_ID 0x10 +#define DP_ADAPTOR_TYPE2_REG_MAX_TMDS_CLK 0x1D +/* Identifies adaptor as Dual-mode adaptor */ +#define DP_ADAPTOR_TYPE2_ID 0xA0 +/* MHz*/ +#define DP_ADAPTOR_TYPE2_MAX_TMDS_CLK 600 +/* MHz*/ +#define DP_ADAPTOR_TYPE2_MIN_TMDS_CLK 25 +/* kHZ*/ +#define DP_ADAPTOR_DVI_MAX_TMDS_CLK 165000 +/* kHZ*/ +#define DP_ADAPTOR_HDMI_SAFE_MAX_TMDS_CLK 165000 + +struct dp_hdmi_dongle_signature_data { + int8_t id[15];/* "DP-HDMI ADAPTOR"*/ + uint8_t eot;/* end of transmition '\x4' */ +}; + +/* DP-HDMI dongle slave address for retrieving dongle signature*/ +#define DP_HDMI_DONGLE_ADDRESS 0x40 +static const uint8_t dp_hdmi_dongle_signature_str[] = "DP-HDMI ADAPTOR"; +#define DP_HDMI_DONGLE_SIGNATURE_EOT 0x04 + + +/* SCDC Address defines (HDMI 2.0)*/ +#define HDMI_SCDC_WRITE_UPDATE_0_ARRAY 3 +#define HDMI_SCDC_ADDRESS 0x54 +#define HDMI_SCDC_SINK_VERSION 0x01 +#define HDMI_SCDC_SOURCE_VERSION 0x02 +#define HDMI_SCDC_UPDATE_0 0x10 +#define HDMI_SCDC_TMDS_CONFIG 0x20 +#define HDMI_SCDC_SCRAMBLER_STATUS 0x21 +#define HDMI_SCDC_CONFIG_0 0x30 +#define HDMI_SCDC_CONFIG_1 0x31 +#define HDMI_SCDC_SOURCE_TEST_REQ 0x35 +#define HDMI_SCDC_STATUS_FLAGS 0x40 +#define HDMI_SCDC_ERR_DETECT 0x50 +#define HDMI_SCDC_TEST_CONFIG 0xC0 + +union hdmi_scdc_update_read_data { + uint8_t byte[2]; + struct { + uint8_t STATUS_UPDATE:1; + uint8_t CED_UPDATE:1; + uint8_t RR_TEST:1; + uint8_t RESERVED:5; + uint8_t RESERVED2:8; + } fields; +}; + +union hdmi_scdc_status_flags_data { + uint8_t byte; + struct { + uint8_t CLOCK_DETECTED:1; + uint8_t CH0_LOCKED:1; + uint8_t CH1_LOCKED:1; + uint8_t CH2_LOCKED:1; + uint8_t RESERVED:4; + } fields; +}; + +union hdmi_scdc_ced_data { + uint8_t byte[11]; + struct { + uint8_t CH0_8LOW:8; + uint8_t CH0_7HIGH:7; + uint8_t CH0_VALID:1; + uint8_t CH1_8LOW:8; + uint8_t CH1_7HIGH:7; + uint8_t CH1_VALID:1; + uint8_t CH2_8LOW:8; + uint8_t CH2_7HIGH:7; + uint8_t CH2_VALID:1; + uint8_t CHECKSUM:8; + uint8_t RESERVED:8; + uint8_t RESERVED2:8; + uint8_t RESERVED3:8; + uint8_t RESERVED4:4; + } fields; +}; + +#endif /* DC_HDMI_TYPES_H */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h index 8565bbb75177..48f6a5b09336 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_link.h +++ b/drivers/gpu/drm/amd/display/dc/dc_link.h @@ -31,6 +31,7 @@ #include "grph_object_defs.h" struct link_resource; +enum aux_return_code_type; enum dc_link_fec_state { dc_link_fec_not_ready, @@ -158,11 +159,11 @@ struct dc_panel_config { struct dc_dpia_bw_alloc { int sink_verified_bw; // The Verified BW that sink can allocated and use that has been verified already int sink_allocated_bw; // The Actual Allocated BW that sink currently allocated - int padding_bw; // The Padding "Un-used" BW allocated by CM for padding reasons int sink_max_bw; // The Max BW that sink can require/support int estimated_bw; // The estimated available BW for this DPIA int bw_granularity; // BW Granularity bool bw_alloc_enabled; // The BW Alloc Mode Support is turned ON for all 3: DP-Tx & Dpia & CM + bool response_ready; // Response ready from the CM side }; /* @@ -293,6 +294,8 @@ struct dc_link { struct gpio *hpd_gpio; enum dc_link_fec_state fec_state; + bool link_powered_externally; // Used to bypass hardware sequencing delays when panel is powered down forcibly + struct dc_panel_config panel_config; struct phy_state phy_state; }; @@ -338,14 +341,13 @@ static inline bool dc_get_edp_link_panel_inst(const struct dc *dc, int edp_num, i; *inst_out = 0; - if (link->connector_signal != SIGNAL_TYPE_EDP || !link->local_sink) + if (link->connector_signal != SIGNAL_TYPE_EDP) return false; get_edp_links(dc, edp_links, &edp_num); for (i = 0; i < edp_num; i++) { if (link == edp_links[i]) break; - if (edp_links[i]->local_sink) - (*inst_out)++; + (*inst_out)++; } return true; } @@ -459,31 +461,6 @@ void dc_link_dp_set_drive_settings( const struct link_resource *link_res, struct link_training_settings *lt_settings); -bool dc_link_dp_perform_link_training_skip_aux( - struct dc_link *link, - const struct link_resource *link_res, - const struct dc_link_settings *link_setting); - -enum link_training_result dc_link_dp_perform_link_training( - struct dc_link *link, - const struct link_resource *link_res, - const struct dc_link_settings *link_settings, - bool skip_video_pattern); - -bool dc_link_dp_sync_lt_begin(struct dc_link *link); - -enum link_training_result dc_link_dp_sync_lt_attempt( - struct dc_link *link, - const struct link_resource *link_res, - struct dc_link_settings *link_setting, - struct dc_link_training_overrides *lt_settings); - -bool dc_link_dp_sync_lt_end(struct dc_link *link, bool link_down); - -void dc_link_dp_enable_hpd(const struct dc_link *link); - -void dc_link_dp_disable_hpd(const struct dc_link *link); - bool dc_link_dp_set_test_pattern( struct dc_link *link, enum dp_test_pattern test_pattern, @@ -494,6 +471,21 @@ bool dc_link_dp_set_test_pattern( bool dc_link_dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_settings *max_link_enc_cap); +/** + ***************************************************************************** + * Function: dc_link_enable_hpd_filter + * + * @brief + * If enable is true, programs HPD filter on associated HPD line to default + * values dependent on link->connector_signal + * + * If enable is false, programs HPD filter on associated HPD line with no + * delays on connect or disconnect + * + * @param [in] link: pointer to the dc link + * @param [in] enable: boolean specifying whether to enable hbd + ***************************************************************************** + */ void dc_link_enable_hpd_filter(struct dc_link *link, bool enable); bool dc_link_is_dp_sink_present(struct dc_link *link); @@ -566,9 +558,6 @@ void dc_get_cur_link_res_map(const struct dc *dc, uint32_t *map); /* restore link resource allocation state from a snapshot */ void dc_restore_link_res_map(const struct dc *dc, uint32_t *map); void dc_link_clear_dprx_states(struct dc_link *link); -struct gpio *get_hpd_gpio(struct dc_bios *dcb, - struct graphics_object_id link_id, - struct gpio_service *gpio_service); void dp_trace_reset(struct dc_link *link); bool dc_dp_trace_is_initialized(struct dc_link *link); unsigned long long dc_dp_trace_get_lt_end_timestamp(struct dc_link *link, @@ -584,4 +573,20 @@ unsigned int dc_dp_trace_get_link_loss_count(struct dc_link *link); /* Destruct the mst topology of the link and reset the allocated payload table */ bool reset_cur_dp_mst_topology(struct dc_link *link); + +/* Attempt to transfer the given aux payload. This function does not perform + * retries or handle error states. The reply is returned in the payload->reply + * and the result through operation_result. Returns the number of bytes + * transferred,or -1 on a failure. + */ +int dc_link_aux_transfer_raw(struct ddc_service *ddc, + struct aux_payload *payload, + enum aux_return_code_type *operation_result); + +enum lttpr_mode dc_link_decide_lttpr_mode(struct dc_link *link, + struct dc_link_settings *link_setting); +void dc_link_dp_receiver_power_ctrl(struct dc_link *link, bool on); +bool dc_link_decide_edp_link_settings(struct dc_link *link, + struct dc_link_settings *link_setting, + uint32_t req_bw); #endif /* DC_LINK_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index dc78e2404b48..c73a655bd687 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -33,6 +33,7 @@ #include "fixed31_32.h" #include "irq_types.h" #include "dc_dp_types.h" +#include "dc_hdmi_types.h" #include "dc_hw_types.h" #include "dal_types.h" #include "grph_object_defs.h" diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h index e69f1899fbf0..c850ed49281f 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h @@ -26,7 +26,7 @@ #ifndef __DAL_AUX_ENGINE_DCE110_H__ #define __DAL_AUX_ENGINE_DCE110_H__ -#include "i2caux_interface.h" +#include "gpio_service_interface.h" #include "inc/hw/aux_engine.h" enum aux_return_code_type; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c index 09260c23c3bd..fa314493ffc5 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c @@ -29,7 +29,6 @@ #include "link_encoder.h" #include "dce_link_encoder.h" #include "stream_encoder.h" -#include "i2caux_interface.h" #include "dc_bios_types.h" #include "gpio_service_interface.h" diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 16e3b079fc56..a51bd21a796f 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -46,6 +46,7 @@ #include "link_encoder.h" #include "link_enc_cfg.h" #include "link_hwss.h" +#include "link.h" #include "dc_link_dp.h" #include "dccg.h" #include "clock_source.h" @@ -54,7 +55,6 @@ #include "audio.h" #include "reg_helper.h" #include "panel_cntl.h" -#include "inc/link_dpcd.h" #include "dpcd_defs.h" /* include DCE11 register header files */ #include "dce/dce_11_0_d.h" @@ -737,7 +737,7 @@ void dce110_edp_wait_for_hpd_ready( /* obtain HPD */ /* TODO what to do with this? */ - hpd = get_hpd_gpio(ctx->dc_bios, connector, ctx->gpio_service); + hpd = link_get_hpd_gpio(ctx->dc_bios, connector, ctx->gpio_service); if (!hpd) { BREAK_TO_DEBUGGER(); @@ -875,14 +875,16 @@ void dce110_edp_power_control( if (ctx->dc->ctx->dmub_srv && ctx->dc->debug.dmub_command_table) { - if (cntl.action == TRANSMITTER_CONTROL_POWER_ON) + + if (cntl.action == TRANSMITTER_CONTROL_POWER_ON) { bp_result = ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios, LVTMA_CONTROL_POWER_ON, - panel_instance); - else + panel_instance, link->link_powered_externally); + } else { bp_result = ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios, LVTMA_CONTROL_POWER_OFF, - panel_instance); + panel_instance, link->link_powered_externally); + } } bp_result = link_transmitter_control(ctx->dc_bios, &cntl); @@ -941,7 +943,6 @@ void dce110_edp_wait_for_T12( msleep(t12_duration - time_since_edp_poweroff_ms); } } - /*todo: cloned in stream enc, fix*/ /* * @brief @@ -1020,16 +1021,20 @@ void dce110_edp_backlight_control( DC_LOG_DC("edp_receiver_ready_T7 skipped\n"); } + /* Setting link_powered_externally will bypass delays in the backlight + * as they are not required if the link is being powered by a different + * source. + */ if (ctx->dc->ctx->dmub_srv && ctx->dc->debug.dmub_command_table) { if (cntl.action == TRANSMITTER_CONTROL_BACKLIGHT_ON) ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios, LVTMA_CONTROL_LCD_BLON, - panel_instance); + panel_instance, link->link_powered_externally); else ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios, LVTMA_CONTROL_LCD_BLOFF, - panel_instance); + panel_instance, link->link_powered_externally); } link_transmitter_control(ctx->dc_bios, &cntl); @@ -1154,7 +1159,7 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx) pipe_ctx->stream_res.stream_enc); } - if (is_dp_128b_132b_signal(pipe_ctx)) { + if (link_is_dp_128b_132b_signal(pipe_ctx)) { pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->stop_dp_info_packets( pipe_ctx->stream_res.hpo_dp_stream_enc); } else if (dc_is_dp_signal(pipe_ctx->stream->signal)) @@ -1165,7 +1170,7 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx) link_hwss->reset_stream_encoder(pipe_ctx); - if (is_dp_128b_132b_signal(pipe_ctx)) { + if (link_is_dp_128b_132b_signal(pipe_ctx)) { dto_params.otg_inst = tg->inst; dto_params.timing = &pipe_ctx->stream->timing; dp_hpo_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst; @@ -1174,7 +1179,7 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx) dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst, dp_hpo_inst); } - if (is_dp_128b_132b_signal(pipe_ctx)) { + if (link_is_dp_128b_132b_signal(pipe_ctx)) { /* TODO: This looks like a bug to me as we are disabling HPO IO when * we are just disabling a single HPO stream. Shouldn't we disable HPO * HW control only when HPOs for all streams are disabled? @@ -1216,7 +1221,7 @@ void dce110_blank_stream(struct pipe_ctx *pipe_ctx) link->dc->hwss.set_abm_immediate_disable(pipe_ctx); } - if (is_dp_128b_132b_signal(pipe_ctx)) { + if (link_is_dp_128b_132b_signal(pipe_ctx)) { /* TODO - DP2.0 HW: Set ODM mode in dp hpo encoder here */ pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_blank( pipe_ctx->stream_res.hpo_dp_stream_enc); @@ -1421,7 +1426,7 @@ static enum dc_status dce110_enable_stream_timing( if (false == pipe_ctx->clock_source->funcs->program_pix_clk( pipe_ctx->clock_source, &pipe_ctx->stream_res.pix_clk_params, - dp_get_link_encoding_format(&pipe_ctx->link_config.dp_link_settings), + link_dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings), &pipe_ctx->pll_settings)) { BREAK_TO_DEBUGGER(); return DC_ERROR_UNEXPECTED; @@ -1525,7 +1530,7 @@ static enum dc_status apply_single_controller_ctx_to_hw( * To do so, move calling function enable_stream_timing to only be done AFTER calling * function core_link_enable_stream */ - if (!(hws->wa.dp_hpo_and_otg_sequence && is_dp_128b_132b_signal(pipe_ctx))) + if (!(hws->wa.dp_hpo_and_otg_sequence && link_is_dp_128b_132b_signal(pipe_ctx))) /* */ /* Do not touch stream timing on seamless boot optimization. */ if (!pipe_ctx->stream->apply_seamless_boot_optimization) @@ -1567,7 +1572,7 @@ static enum dc_status apply_single_controller_ctx_to_hw( * To do so, move calling function enable_stream_timing to only be done AFTER calling * function core_link_enable_stream */ - if (hws->wa.dp_hpo_and_otg_sequence && is_dp_128b_132b_signal(pipe_ctx)) { + if (hws->wa.dp_hpo_and_otg_sequence && link_is_dp_128b_132b_signal(pipe_ctx)) { if (!pipe_ctx->stream->apply_seamless_boot_optimization) hws->funcs.enable_stream_timing(pipe_ctx, context, dc); } @@ -3047,13 +3052,13 @@ void dce110_enable_dp_link_output( pipes[i].clock_source->funcs->program_pix_clk( pipes[i].clock_source, &pipes[i].stream_res.pix_clk_params, - dp_get_link_encoding_format(link_settings), + link_dp_get_encoding_format(link_settings), &pipes[i].pll_settings); } } } - if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING) { + if (link_dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING) { if (dc->clk_mgr->funcs->notify_link_rate_change) dc->clk_mgr->funcs->notify_link_rate_change(dc->clk_mgr, link); } diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h index 758f4b3b0087..394d83a97f33 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h @@ -71,7 +71,7 @@ void dce110_optimize_bandwidth( struct dc *dc, struct dc_state *context); -void dp_receiver_power_ctrl(struct dc_link *link, bool on); +void dc_link_dp_receiver_power_ctrl(struct dc_link *link, bool on); void dce110_edp_power_control( struct dc_link *link, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h index ba1c0621f0f8..e8752077571a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h @@ -172,6 +172,10 @@ struct dcn_hubbub_registers { uint32_t DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C; uint32_t DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D; uint32_t SDPIF_REQUEST_RATE_LIMIT; + uint32_t DCHUBBUB_SDPIF_CFG0; + uint32_t DCHUBBUB_SDPIF_CFG1; + uint32_t DCHUBBUB_CLOCK_CNTL; + uint32_t DCHUBBUB_MEM_PWR_MODE_CTRL; }; #define HUBBUB_REG_FIELD_LIST_DCN32(type) \ @@ -362,7 +366,13 @@ struct dcn_hubbub_registers { type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C;\ type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D;\ type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D;\ - type SDPIF_REQUEST_RATE_LIMIT + type SDPIF_REQUEST_RATE_LIMIT;\ + type DISPCLK_R_DCHUBBUB_GATE_DIS;\ + type DCFCLK_R_DCHUBBUB_GATE_DIS;\ + type SDPIF_MAX_NUM_OUTSTANDING;\ + type DCHUBBUB_ARB_MAX_REQ_OUTSTAND;\ + type SDPIF_PORT_CONTROL;\ + type DET_MEM_PWR_LS_MODE struct dcn_hubbub_shift { diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index fe2023f18b7d..0a0c930c1626 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -57,7 +57,7 @@ #include "dc_trace.h" #include "dce/dmub_outbox.h" #include "inc/dc_link_dp.h" -#include "inc/link_dpcd.h" +#include "link.h" #define DC_LOGGER_INIT(logger) @@ -921,7 +921,7 @@ enum dc_status dcn10_enable_stream_timing( if (false == pipe_ctx->clock_source->funcs->program_pix_clk( pipe_ctx->clock_source, &pipe_ctx->stream_res.pix_clk_params, - dp_get_link_encoding_format(&pipe_ctx->link_config.dp_link_settings), + link_dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings), &pipe_ctx->pll_settings)) { BREAK_TO_DEBUGGER(); return DC_ERROR_UNEXPECTED; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c index fbccb7263ad2..c4287147b853 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c @@ -29,7 +29,6 @@ #include "link_encoder.h" #include "dcn10_link_encoder.h" #include "stream_encoder.h" -#include "i2caux_interface.h" #include "dc_bios_types.h" #include "gpio_service_interface.h" diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c index 484e7cdf00b8..1527c3b4fb19 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c @@ -28,7 +28,7 @@ #include "dcn10_stream_encoder.h" #include "reg_helper.h" #include "hw_shared.h" -#include "inc/link_dpcd.h" +#include "dc_link_dp.h" #include "dpcd_defs.h" #include "dcn30/dcn30_afmt.h" diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index 20c85ef2a957..6bfa16d9135f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -52,10 +52,10 @@ #include "dc_dmub_srv.h" #include "dce/dmub_hw_lock_mgr.h" #include "hw_sequencer.h" -#include "inc/link_dpcd.h" #include "dpcd_defs.h" #include "inc/link_enc_cfg.h" #include "link_hwss.h" +#include "link.h" #define DC_LOGGER_INIT(logger) @@ -712,7 +712,7 @@ enum dc_status dcn20_enable_stream_timing( if (false == pipe_ctx->clock_source->funcs->program_pix_clk( pipe_ctx->clock_source, &pipe_ctx->stream_res.pix_clk_params, - dp_get_link_encoding_format(&pipe_ctx->link_config.dp_link_settings), + link_dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings), &pipe_ctx->pll_settings)) { BREAK_TO_DEBUGGER(); return DC_ERROR_UNEXPECTED; @@ -2383,7 +2383,7 @@ void dcn20_unblank_stream(struct pipe_ctx *pipe_ctx, params.link_settings.link_rate = link_settings->link_rate; - if (is_dp_128b_132b_signal(pipe_ctx)) { + if (link_is_dp_128b_132b_signal(pipe_ctx)) { /* TODO - DP2.0 HW: Set ODM mode in dp hpo encoder here */ pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_unblank( pipe_ctx->stream_res.hpo_dp_stream_enc, @@ -2691,12 +2691,12 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx) unsigned int k1_div = PIXEL_RATE_DIV_NA; unsigned int k2_div = PIXEL_RATE_DIV_NA; - if (is_dp_128b_132b_signal(pipe_ctx)) { + if (link_is_dp_128b_132b_signal(pipe_ctx)) { if (dc->hwseq->funcs.setup_hpo_hw_control) dc->hwseq->funcs.setup_hpo_hw_control(dc->hwseq, true); } - if (is_dp_128b_132b_signal(pipe_ctx)) { + if (link_is_dp_128b_132b_signal(pipe_ctx)) { dp_hpo_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst; dccg->funcs->set_dpstreamclk(dccg, DTBCLK0, tg->inst, dp_hpo_inst); diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c index 2f9bfaeaba8d..51a57dae1811 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c @@ -29,7 +29,6 @@ #include "link_encoder.h" #include "dcn20_link_encoder.h" #include "stream_encoder.h" -#include "i2caux_interface.h" #include "dc_bios_types.h" #include "gpio_service_interface.h" diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index 71c7237413ef..531f405d2554 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -62,7 +62,6 @@ #include "dml/display_mode_vba.h" #include "dcn20_dccg.h" #include "dcn20_vmid.h" -#include "dc_link_ddc.h" #include "dce/dce_panel_cntl.h" #include "navi10_ip_offset.h" @@ -90,6 +89,7 @@ #include "amdgpu_socbb.h" +#include "link.h" #define DC_LOGGER_INIT(logger) #ifndef mmDP0_DP_DPHY_INTERNAL_CTRL @@ -1214,7 +1214,7 @@ static void dcn20_resource_destruct(struct dcn20_resource_pool *pool) dcn20_pp_smu_destroy(&pool->base.pp_smu); if (pool->base.oem_device != NULL) - dal_ddc_service_destroy(&pool->base.oem_device); + link_destroy_ddc_service(&pool->base.oem_device); } struct hubp *dcn20_hubp_create( @@ -2769,7 +2769,7 @@ static bool dcn20_resource_construct( ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id; ddc_init_data.id.enum_id = 0; ddc_init_data.id.type = OBJECT_TYPE_GENERIC; - pool->base.oem_device = dal_ddc_service_create(&ddc_init_data); + pool->base.oem_device = link_create_ddc_service(&ddc_init_data); } else { pool->base.oem_device = NULL; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c index b40489e678f9..cacf3f5298b0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c @@ -29,7 +29,7 @@ #include "dcn20_stream_encoder.h" #include "reg_helper.h" #include "hw_shared.h" -#include "inc/link_dpcd.h" +#include "dc_link_dp.h" #include "dpcd_defs.h" #define DC_LOGGER \ diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.c index 7f9ec59ef443..8d31fa131cd6 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.c @@ -29,7 +29,6 @@ #include "link_encoder.h" #include "dcn201_link_encoder.h" #include "stream_encoder.h" -#include "i2caux_interface.h" #include "dc_bios_types.h" #include "gpio_service_interface.h" diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c index 0a1ba6e7081c..eb9abb9f9698 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c @@ -31,7 +31,6 @@ #include "dcn21_link_encoder.h" #include "stream_encoder.h" -#include "i2caux_interface.h" #include "dc_bios_types.h" #include "gpio_service_interface.h" diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c index 6f3c2fb60790..1fb8fd7afc95 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c @@ -29,7 +29,6 @@ #include "link_encoder.h" #include "dcn30_dio_link_encoder.h" #include "stream_encoder.h" -#include "i2caux_interface.h" #include "dc_bios_types.h" /* #include "dcn3ag/dcn3ag_phy_fw.h" */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c index 8c5045711264..7360b3ce4283 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c @@ -51,7 +51,6 @@ #include "../dcn20/dcn20_hwseq.h" #include "dcn30_resource.h" #include "inc/dc_link_dp.h" -#include "inc/link_dpcd.h" diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c index c18c52a60100..feb4bb491525 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c @@ -60,7 +60,7 @@ #include "dml/display_mode_vba.h" #include "dcn30/dcn30_dccg.h" #include "dcn10/dcn10_resource.h" -#include "dc_link_ddc.h" +#include "link.h" #include "dce/dce_panel_cntl.h" #include "dcn30/dcn30_dwb.h" @@ -1208,7 +1208,7 @@ static void dcn30_resource_destruct(struct dcn30_resource_pool *pool) dcn_dccg_destroy(&pool->base.dccg); if (pool->base.oem_device != NULL) - dal_ddc_service_destroy(&pool->base.oem_device); + link_destroy_ddc_service(&pool->base.oem_device); } static struct hubp *dcn30_hubp_create( @@ -2590,7 +2590,7 @@ static bool dcn30_resource_construct( ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id; ddc_init_data.id.enum_id = 0; ddc_init_data.id.type = OBJECT_TYPE_GENERIC; - pool->base.oem_device = dal_ddc_service_create(&ddc_init_data); + pool->base.oem_device = link_create_ddc_service(&ddc_init_data); } else { pool->base.oem_device = NULL; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_dio_link_encoder.c index c9fbaed23965..1b39a6e8a1ac 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_dio_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_dio_link_encoder.c @@ -29,7 +29,6 @@ #include "link_encoder.h" #include "dcn301_dio_link_encoder.h" #include "stream_encoder.h" -#include "i2caux_interface.h" #include "dc_bios_types.h" #include "gpio_service_interface.h" diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c index 47cffd0e6830..03ddf4f5f065 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c @@ -47,6 +47,7 @@ #include "dcn10/dcn10_resource.h" +#include "link.h" #include "dce/dce_abm.h" #include "dce/dce_audio.h" #include "dce/dce_aux.h" @@ -1125,6 +1126,9 @@ static void dcn302_resource_destruct(struct resource_pool *pool) if (pool->dccg != NULL) dcn_dccg_destroy(&pool->dccg); + + if (pool->oem_device != NULL) + link_destroy_ddc_service(&pool->oem_device); } static void dcn302_destroy_resource_pool(struct resource_pool **pool) @@ -1216,6 +1220,7 @@ static bool dcn302_resource_construct( int i; struct dc_context *ctx = dc->ctx; struct irq_service_init_data init_data; + struct ddc_service_init_data ddc_init_data = {0}; ctx->dc_bios->regs = &bios_regs; @@ -1497,6 +1502,17 @@ static bool dcn302_resource_construct( dc->cap_funcs = cap_funcs; + if (dc->ctx->dc_bios->fw_info.oem_i2c_present) { + ddc_init_data.ctx = dc->ctx; + ddc_init_data.link = NULL; + ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id; + ddc_init_data.id.enum_id = 0; + ddc_init_data.id.type = OBJECT_TYPE_GENERIC; + pool->oem_device = link_create_ddc_service(&ddc_init_data); + } else { + pool->oem_device = NULL; + } + return true; create_fail: diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c index c14d35894b2e..31e212064168 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c @@ -29,7 +29,7 @@ #include "dcn10/dcn10_resource.h" -#include "dc_link_ddc.h" +#include "link.h" #include "dce/dce_abm.h" #include "dce/dce_audio.h" @@ -1054,7 +1054,7 @@ static void dcn303_resource_destruct(struct resource_pool *pool) dcn_dccg_destroy(&pool->dccg); if (pool->oem_device != NULL) - dal_ddc_service_destroy(&pool->oem_device); + link_destroy_ddc_service(&pool->oem_device); } static void dcn303_destroy_resource_pool(struct resource_pool **pool) @@ -1421,7 +1421,7 @@ static bool dcn303_resource_construct( ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id; ddc_init_data.id.enum_id = 0; ddc_init_data.id.type = OBJECT_TYPE_GENERIC; - pool->oem_device = dal_ddc_service_create(&ddc_init_data); + pool->oem_device = link_create_ddc_service(&ddc_init_data); } else { pool->oem_device = NULL; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c index ab70ebd8f223..275e78c06dee 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c @@ -30,7 +30,6 @@ #include "link_encoder.h" #include "dcn31_dio_link_encoder.h" #include "stream_encoder.h" -#include "i2caux_interface.h" #include "dc_bios_types.h" #include "gpio_service_interface.h" diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c index 6360dc9502e7..7e7cd5b64e6a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c @@ -1008,6 +1008,24 @@ static bool hubbub31_verify_allow_pstate_change_high(struct hubbub *hubbub) return false; } +void hubbub31_init(struct hubbub *hubbub) +{ + struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); + + /*Enable clock gate*/ + if (hubbub->ctx->dc->debug.disable_clock_gate) { + /*done in hwseq*/ + /*REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);*/ + REG_UPDATE_2(DCHUBBUB_CLOCK_CNTL, + DISPCLK_R_DCHUBBUB_GATE_DIS, 0, + DCFCLK_R_DCHUBBUB_GATE_DIS, 0); + } + + /* + only the DCN will determine when to connect the SDP port + */ + REG_UPDATE(DCHUBBUB_SDPIF_CFG0, SDPIF_PORT_CONTROL, 1); +} static const struct hubbub_funcs hubbub31_funcs = { .update_dchub = hubbub2_update_dchub, .init_dchub_sys_ctx = hubbub31_init_dchub_sys_ctx, diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.h index 70c60de448ac..e015e5a6c866 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.h +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.h @@ -42,6 +42,10 @@ SR(DCHUBBUB_COMPBUF_CTRL),\ SR(COMPBUF_RESERVED_SPACE),\ SR(DCHUBBUB_DEBUG_CTRL_0),\ + SR(DCHUBBUB_CLOCK_CNTL),\ + SR(DCHUBBUB_SDPIF_CFG0),\ + SR(DCHUBBUB_SDPIF_CFG1),\ + SR(DCHUBBUB_MEM_PWR_MODE_CTRL),\ SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A),\ SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A),\ SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B),\ @@ -120,7 +124,11 @@ HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_VMID, mask_sh), \ HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_TABLE_LEVEL, mask_sh), \ HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_PIPE, mask_sh), \ - HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_INTERRUPT_STATUS, mask_sh) + HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_INTERRUPT_STATUS, mask_sh),\ + HUBBUB_SF(DCHUBBUB_CLOCK_CNTL, DISPCLK_R_DCHUBBUB_GATE_DIS, mask_sh),\ + HUBBUB_SF(DCHUBBUB_CLOCK_CNTL, DCFCLK_R_DCHUBBUB_GATE_DIS, mask_sh),\ + HUBBUB_SF(DCHUBBUB_SDPIF_CFG0, SDPIF_PORT_CONTROL, mask_sh),\ + HUBBUB_SF(DCHUBBUB_MEM_PWR_MODE_CTRL, DET_MEM_PWR_LS_MODE, mask_sh) int hubbub31_init_dchub_sys_ctx(struct hubbub *hubbub, struct dcn_hubbub_phys_addr_config *pa_config); diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c index 165c920ca776..0e1949d9ea58 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c @@ -46,7 +46,7 @@ #include "dpcd_defs.h" #include "dce/dmub_outbox.h" #include "dc_link_dp.h" -#include "inc/link_dpcd.h" +#include "link.h" #include "dcn10/dcn10_hw_sequencer.h" #include "inc/link_enc_cfg.h" #include "dcn30/dcn30_vpg.h" @@ -415,7 +415,12 @@ void dcn31_update_info_frame(struct pipe_ctx *pipe_ctx) pipe_ctx->stream_res.stream_enc->funcs->update_hdmi_info_packets( pipe_ctx->stream_res.stream_enc, &pipe_ctx->stream_res.encoder_info_frame); - else { + else if (link_is_dp_128b_132b_signal(pipe_ctx)) { + pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->update_dp_info_packets( + pipe_ctx->stream_res.hpo_dp_stream_enc, + &pipe_ctx->stream_res.encoder_info_frame); + return; + } else { pipe_ctx->stream_res.stream_enc->funcs->update_dp_info_packets( pipe_ctx->stream_res.stream_enc, &pipe_ctx->stream_res.encoder_info_frame); diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c index 0926db018338..67f4589f3e23 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c @@ -30,7 +30,7 @@ #include "dcn314_dio_stream_encoder.h" #include "reg_helper.h" #include "hw_shared.h" -#include "inc/link_dpcd.h" +#include "dc_link_dp.h" #include "dpcd_defs.h" #define DC_LOGGER \ @@ -281,7 +281,8 @@ static void enc314_stream_encoder_dp_blank( enc1_stream_encoder_dp_blank(link, enc); /* Disable FIFO after the DP vid stream is disabled to avoid corruption. */ - enc314_disable_fifo(enc); + if (enc->ctx->dc->debug.dig_fifo_off_in_blank) + enc314_disable_fifo(enc); } static void enc314_stream_encoder_dp_unblank( diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c index a0741794db62..7980462e3abe 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c @@ -47,8 +47,8 @@ #include "dpcd_defs.h" #include "dce/dmub_outbox.h" #include "dc_link_dp.h" +#include "link.h" #include "inc/dc_link_dp.h" -#include "inc/link_dpcd.h" #include "dcn10/dcn10_hw_sequencer.h" #include "inc/link_enc_cfg.h" #include "dcn30/dcn30_vpg.h" @@ -348,7 +348,7 @@ unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsig two_pix_per_container = optc2_is_two_pixels_per_containter(&stream->timing); odm_combine_factor = get_odm_config(pipe_ctx, NULL); - if (is_dp_128b_132b_signal(pipe_ctx)) { + if (link_is_dp_128b_132b_signal(pipe_ctx)) { *k1_div = PIXEL_RATE_DIV_BY_1; *k2_div = PIXEL_RATE_DIV_BY_1; } else if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal) || dc_is_dvi_signal(pipe_ctx->stream->signal)) { diff --git a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c index b4d5076e124c..dc0b49506275 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c @@ -1776,7 +1776,7 @@ static bool dcn316_resource_construct( pool->base.mpcc_count = pool->base.res_cap->num_timing_generator; dc->caps.max_downscale_ratio = 600; dc->caps.i2c_speed_in_khz = 100; - dc->caps.i2c_speed_in_khz_hdcp = 100; + dc->caps.i2c_speed_in_khz_hdcp = 5; /*1.5 w/a applied by default*/ dc->caps.max_cursor_size = 256; dc->caps.min_horizontal_blanking_period = 80; dc->caps.dmdata_alloc_size = 2048; diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c index 076969d928af..501388014855 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c @@ -31,7 +31,6 @@ #include "dcn31/dcn31_dio_link_encoder.h" #include "dcn32_dio_link_encoder.h" #include "stream_encoder.h" -#include "i2caux_interface.h" #include "dc_bios_types.h" #include "link_enc_cfg.h" diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c index d19fc93dbc75..f01968f6d182 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c @@ -29,7 +29,7 @@ #include "dcn32_dio_stream_encoder.h" #include "reg_helper.h" #include "hw_shared.h" -#include "inc/link_dpcd.h" +#include "dc_link_dp.h" #include "dpcd_defs.h" #define DC_LOGGER \ @@ -421,6 +421,33 @@ static void enc32_set_dig_input_mode(struct stream_encoder *enc, unsigned int pi REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_OUTPUT_PIXEL_MODE, pix_per_container == 2 ? 0x1 : 0x0); } +static void enc32_reset_fifo(struct stream_encoder *enc, bool reset) +{ + struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); + uint32_t reset_val = reset ? 1 : 0; + uint32_t is_symclk_on; + + REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_RESET, reset_val); + REG_GET(DIG_FE_CNTL, DIG_SYMCLK_FE_ON, &is_symclk_on); + + if (is_symclk_on) + REG_WAIT(DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, reset_val, 10, 5000); + else + udelay(10); +} + +static void enc32_enable_fifo(struct stream_encoder *enc) +{ + struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); + + REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_READ_START_LEVEL, 0x7); + + enc32_reset_fifo(enc, true); + enc32_reset_fifo(enc, false); + + REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, 1); +} + static const struct stream_encoder_funcs dcn32_str_enc_funcs = { .dp_set_odm_combine = enc32_dp_set_odm_combine, @@ -466,6 +493,7 @@ static const struct stream_encoder_funcs dcn32_str_enc_funcs = { .hdmi_reset_stream_attribute = enc1_reset_hdmi_stream_attribute, .set_input_mode = enc32_set_dig_input_mode, + .enable_fifo = enc32_enable_fifo, }; void dcn32_dio_stream_encoder_construct( diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c index 9501403a48a9..eb08ccc38e79 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c @@ -945,6 +945,35 @@ void hubbub32_force_wm_propagate_to_pipes(struct hubbub *hubbub) DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value); } +void hubbub32_init(struct hubbub *hubbub) +{ + struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); + + /* Enable clock gate*/ + if (hubbub->ctx->dc->debug.disable_clock_gate) { + /*done in hwseq*/ + /*REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);*/ + + REG_UPDATE_2(DCHUBBUB_CLOCK_CNTL, + DISPCLK_R_DCHUBBUB_GATE_DIS, 0, + DCFCLK_R_DCHUBBUB_GATE_DIS, 0); + } + /* + ignore the "df_pre_cstate_req" from the SDP port control. + only the DCN will determine when to connect the SDP port + */ + REG_UPDATE(DCHUBBUB_SDPIF_CFG0, + SDPIF_PORT_CONTROL, 1); + /*Set SDP's max outstanding request to 512 + must set the register back to 0 (max outstanding = 256) in zero frame buffer mode*/ + REG_UPDATE(DCHUBBUB_SDPIF_CFG1, + SDPIF_MAX_NUM_OUTSTANDING, 1); + /*must set the registers back to 256 in zero frame buffer mode*/ + REG_UPDATE_2(DCHUBBUB_ARB_DF_REQ_OUTSTAND, + DCHUBBUB_ARB_MAX_REQ_OUTSTAND, 512, + DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 512); +} + static const struct hubbub_funcs hubbub32_funcs = { .update_dchub = hubbub2_update_dchub, .init_dchub_sys_ctx = hubbub3_init_dchub_sys_ctx, diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h index 786f9ce07f92..bdc146890fca 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h @@ -83,7 +83,12 @@ SR(DCN_VM_FAULT_ADDR_LSB),\ SR(DCN_VM_FAULT_CNTL),\ SR(DCN_VM_FAULT_STATUS),\ - SR(SDPIF_REQUEST_RATE_LIMIT) + SR(SDPIF_REQUEST_RATE_LIMIT),\ + SR(DCHUBBUB_CLOCK_CNTL),\ + SR(DCHUBBUB_SDPIF_CFG0),\ + SR(DCHUBBUB_SDPIF_CFG1),\ + SR(DCHUBBUB_MEM_PWR_MODE_CTRL) + #define HUBBUB_MASK_SH_LIST_DCN32(mask_sh)\ HUBBUB_SF(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, mask_sh), \ @@ -96,6 +101,7 @@ HUBBUB_SF(DCHUBBUB_ARB_DRAM_STATE_CNTL, DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, mask_sh), \ HUBBUB_SF(DCHUBBUB_ARB_SAT_LEVEL, DCHUBBUB_ARB_SAT_LEVEL, mask_sh), \ HUBBUB_SF(DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MIN_REQ_OUTSTAND, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MAX_REQ_OUTSTAND, mask_sh), \ HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, mask_sh), \ HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, mask_sh), \ HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, mask_sh), \ @@ -161,7 +167,14 @@ HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_TABLE_LEVEL, mask_sh), \ HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_PIPE, mask_sh), \ HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_INTERRUPT_STATUS, mask_sh),\ - HUBBUB_SF(SDPIF_REQUEST_RATE_LIMIT, SDPIF_REQUEST_RATE_LIMIT, mask_sh) + HUBBUB_SF(SDPIF_REQUEST_RATE_LIMIT, SDPIF_REQUEST_RATE_LIMIT, mask_sh),\ + HUBBUB_SF(DCHUBBUB_CLOCK_CNTL, DISPCLK_R_DCHUBBUB_GATE_DIS, mask_sh),\ + HUBBUB_SF(DCHUBBUB_CLOCK_CNTL, DCFCLK_R_DCHUBBUB_GATE_DIS, mask_sh),\ + HUBBUB_SF(DCHUBBUB_SDPIF_CFG0, SDPIF_PORT_CONTROL, mask_sh),\ + HUBBUB_SF(DCHUBBUB_SDPIF_CFG1, SDPIF_MAX_NUM_OUTSTANDING, mask_sh),\ + HUBBUB_SF(DCHUBBUB_MEM_PWR_MODE_CTRL, DET_MEM_PWR_LS_MODE, mask_sh) + + bool hubbub32_program_urgent_watermarks( struct hubbub *hubbub, diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c index ac1c6458dd55..fe0cd177744c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c @@ -155,7 +155,11 @@ void hubp32_cursor_set_attributes( else REG_UPDATE(DCHUBP_MALL_CONFIG, USE_MALL_FOR_CURSOR, false); } - +void hubp32_init(struct hubp *hubp) +{ + struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); + REG_WRITE(HUBPREQ_DEBUG_DB, 1 << 8); +} static struct hubp_funcs dcn32_hubp_funcs = { .hubp_enable_tripleBuffer = hubp2_enable_triplebuffer, .hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled, diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c index c10d8a60380a..3b44006e1a80 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c @@ -50,6 +50,7 @@ #include "dmub_subvp_state.h" #include "dce/dmub_hw_lock_mgr.h" #include "dcn32_resource.h" +#include "link.h" #include "dc_link_dp.h" #include "dmub/inc/dmub_subvp_state.h" @@ -207,151 +208,31 @@ static bool dcn32_check_no_memory_request_for_cab(struct dc *dc) */ static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *ctx) { - int i, j; - struct dc_stream_state *stream = NULL; - struct dc_plane_state *plane = NULL; - uint32_t cursor_size = 0; - uint32_t total_lines = 0; - uint32_t lines_per_way = 0; + int i; uint8_t num_ways = 0; - uint8_t bytes_per_pixel = 0; - uint8_t cursor_bpp = 0; - uint16_t mblk_width = 0; - uint16_t mblk_height = 0; - uint16_t mall_alloc_width_blk_aligned = 0; - uint16_t mall_alloc_height_blk_aligned = 0; - uint16_t num_mblks = 0; - uint32_t bytes_in_mall = 0; - uint32_t cache_lines_used = 0; - uint32_t cache_lines_per_plane = 0; - - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; - - /* If PSR is supported on an eDP panel that's connected, but that panel is - * not in PSR at the time of trying to enter MALL SS, we have to include it - * in the static screen CAB calculation - */ - if (!pipe->stream || !pipe->plane_state || - (pipe->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED && - pipe->stream->link->psr_settings.psr_allow_active) || - pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) - continue; - - bytes_per_pixel = pipe->plane_state->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4; - mblk_width = DCN3_2_MBLK_WIDTH; - mblk_height = bytes_per_pixel == 4 ? DCN3_2_MBLK_HEIGHT_4BPE : DCN3_2_MBLK_HEIGHT_8BPE; - - /* full_vp_width_blk_aligned = FLOOR(vp_x_start + full_vp_width + blk_width - 1, blk_width) - - * FLOOR(vp_x_start, blk_width) - * - * mall_alloc_width_blk_aligned_l/c = full_vp_width_blk_aligned_l/c - */ - mall_alloc_width_blk_aligned = ((pipe->plane_res.scl_data.viewport.x + - pipe->plane_res.scl_data.viewport.width + mblk_width - 1) / mblk_width * mblk_width) - - (pipe->plane_res.scl_data.viewport.x / mblk_width * mblk_width); - - /* full_vp_height_blk_aligned = FLOOR(vp_y_start + full_vp_height + blk_height - 1, blk_height) - - * FLOOR(vp_y_start, blk_height) - * - * mall_alloc_height_blk_aligned_l/c = full_vp_height_blk_aligned_l/c - */ - mall_alloc_height_blk_aligned = ((pipe->plane_res.scl_data.viewport.y + - pipe->plane_res.scl_data.viewport.height + mblk_height - 1) / mblk_height * mblk_height) - - (pipe->plane_res.scl_data.viewport.y / mblk_height * mblk_height); - - num_mblks = ((mall_alloc_width_blk_aligned + mblk_width - 1) / mblk_width) * - ((mall_alloc_height_blk_aligned + mblk_height - 1) / mblk_height); - - /*For DCC: - * meta_num_mblk = CEILING(meta_pitch*full_vp_height*Bpe/256/mblk_bytes, 1) - */ - if (pipe->plane_state->dcc.enable) - num_mblks += (pipe->plane_state->dcc.meta_pitch * pipe->plane_res.scl_data.viewport.height * bytes_per_pixel + - (256 * DCN3_2_MALL_MBLK_SIZE_BYTES) - 1) / (256 * DCN3_2_MALL_MBLK_SIZE_BYTES); - - bytes_in_mall = num_mblks * DCN3_2_MALL_MBLK_SIZE_BYTES; + uint32_t mall_ss_size_bytes = 0; - /* (cache lines used is total bytes / cache_line size. Add +2 for worst case alignment - * (MALL is 64-byte aligned) - */ - cache_lines_per_plane = bytes_in_mall / dc->caps.cache_line_size + 2; - cache_lines_used += cache_lines_per_plane; - } + mall_ss_size_bytes = ctx->bw_ctx.bw.dcn.mall_ss_size_bytes; + // TODO add additional logic for PSR active stream exclusion optimization + // mall_ss_psr_active_size_bytes = ctx->bw_ctx.bw.dcn.mall_ss_psr_active_size_bytes; // Include cursor size for CAB allocation - for (j = 0; j < dc->res_pool->pipe_count; j++) { - struct pipe_ctx *pipe = &ctx->res_ctx.pipe_ctx[j]; - struct hubp *hubp = pipe->plane_res.hubp; - - if (pipe->stream && pipe->plane_state && hubp) - /* Find the cursor plane and use the exact size instead of - using the max for calculation */ - - if (hubp->curs_attr.width > 0) { - cursor_size = hubp->curs_attr.pitch * hubp->curs_attr.height; - - switch (pipe->stream->cursor_attributes.color_format) { - case CURSOR_MODE_MONO: - cursor_size /= 2; - cursor_bpp = 4; - break; - case CURSOR_MODE_COLOR_1BIT_AND: - case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA: - case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA: - cursor_size *= 4; - cursor_bpp = 4; - break; + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &ctx->res_ctx.pipe_ctx[i]; - case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED: - case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED: - cursor_size *= 8; - cursor_bpp = 8; - break; - } + if (!pipe->stream || !pipe->plane_state) + continue; - if (pipe->stream->cursor_position.enable && !dc->debug.alloc_extra_way_for_cursor && - cursor_size > 16384) { - /* cursor_num_mblk = CEILING(num_cursors*cursor_width*cursor_width*cursor_Bpe/mblk_bytes, 1) - */ - cache_lines_used += (((cursor_size + DCN3_2_MALL_MBLK_SIZE_BYTES - 1) / - DCN3_2_MALL_MBLK_SIZE_BYTES) * DCN3_2_MALL_MBLK_SIZE_BYTES) / - dc->caps.cache_line_size + 2; - break; - } - } + mall_ss_size_bytes += dcn32_helper_calculate_mall_bytes_for_cursor(dc, pipe, false); } // Convert number of cache lines required to number of ways - total_lines = dc->caps.max_cab_allocation_bytes / dc->caps.cache_line_size; - lines_per_way = total_lines / dc->caps.cache_num_ways; - num_ways = cache_lines_used / lines_per_way; - - if (cache_lines_used % lines_per_way > 0) - num_ways++; - - for (i = 0; i < ctx->stream_count; i++) { - stream = ctx->streams[i]; - for (j = 0; j < ctx->stream_status[i].plane_count; j++) { - plane = ctx->stream_status[i].plane_states[j]; - - if (stream->cursor_position.enable && plane && - dc->debug.alloc_extra_way_for_cursor && - cursor_size > 16384) { - /* Cursor caching is not supported since it won't be on the same line. - * So we need an extra line to accommodate it. With large cursors and a single 4k monitor - * this case triggers corruption. If we're at the edge, then dont trigger display refresh - * from MALL. We only need to cache cursor if its greater that 64x64 at 4 bpp. - */ - num_ways++; - /* We only expect one cursor plane */ - break; - } - } - } if (dc->debug.force_mall_ss_num_ways > 0) { num_ways = dc->debug.force_mall_ss_num_ways; + } else { + num_ways = dcn32_helper_mall_bytes_to_ways(dc, mall_ss_size_bytes); } + return num_ways; } @@ -804,6 +685,25 @@ void dcn32_program_mall_pipe_config(struct dc *dc, struct dc_state *context) } } +static void dcn32_initialize_min_clocks(struct dc *dc) +{ + struct dc_clocks *clocks = &dc->current_state->bw_ctx.bw.dcn.clk; + + clocks->dcfclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz * 1000; + clocks->socclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].socclk_mhz * 1000; + clocks->dramclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].memclk_mhz * 1000; + clocks->dppclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dppclk_mhz * 1000; + clocks->dispclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dispclk_mhz * 1000; + clocks->ref_dtbclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dtbclk_mhz * 1000; + clocks->fclk_p_state_change_support = true; + clocks->p_state_change_support = true; + + dc->clk_mgr->funcs->update_clocks( + dc->clk_mgr, + dc->current_state, + true); +} + void dcn32_init_hw(struct dc *dc) { struct abm **abms = dc->res_pool->multiple_abms; @@ -898,6 +798,8 @@ void dcn32_init_hw(struct dc *dc) if (dc->res_pool->hubbub->funcs->allow_self_refresh_control) dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub, !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter); + + dcn32_initialize_min_clocks(dc); } /* In headless boot cases, DIG may be turned @@ -1176,7 +1078,7 @@ unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsign two_pix_per_container = optc2_is_two_pixels_per_containter(&stream->timing); odm_combine_factor = get_odm_config(pipe_ctx, NULL); - if (is_dp_128b_132b_signal(pipe_ctx)) { + if (link_is_dp_128b_132b_signal(pipe_ctx)) { *k1_div = PIXEL_RATE_DIV_BY_1; *k2_div = PIXEL_RATE_DIV_BY_1; } else if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal) || dc_is_dvi_signal(pipe_ctx->stream->signal)) { @@ -1240,7 +1142,7 @@ void dcn32_unblank_stream(struct pipe_ctx *pipe_ctx, params.link_settings.link_rate = link_settings->link_rate; - if (is_dp_128b_132b_signal(pipe_ctx)) { + if (link_is_dp_128b_132b_signal(pipe_ctx)) { /* TODO - DP2.0 HW: Set ODM mode in dp hpo encoder here */ pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_unblank( pipe_ctx->stream_res.hpo_dp_stream_enc, @@ -1267,7 +1169,7 @@ bool dcn32_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx) if (!is_h_timing_divisible_by_2(pipe_ctx->stream)) return false; - if (dc_is_dp_signal(pipe_ctx->stream->signal) && !is_dp_128b_132b_signal(pipe_ctx) && + if (dc_is_dp_signal(pipe_ctx->stream->signal) && !link_is_dp_128b_132b_signal(pipe_ctx) && dc->debug.enable_dp_dig_pixel_rate_div_policy) return true; return false; @@ -1301,7 +1203,7 @@ static void apply_symclk_on_tx_off_wa(struct dc_link *link) pipe_ctx->clock_source->funcs->program_pix_clk( pipe_ctx->clock_source, &pipe_ctx->stream_res.pix_clk_params, - dp_get_link_encoding_format(&pipe_ctx->link_config.dp_link_settings), + link_dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings), &pipe_ctx->pll_settings); link->phy_state.symclk_state = SYMCLK_ON_TX_OFF; break; diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c index dfecdf3e25e9..47dc96acdacb 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c @@ -69,7 +69,7 @@ #include "dml/display_mode_vba.h" #include "dcn32/dcn32_dccg.h" #include "dcn10/dcn10_resource.h" -#include "dc_link_ddc.h" +#include "link.h" #include "dcn31/dcn31_panel_cntl.h" #include "dcn30/dcn30_dwb.h" @@ -1508,7 +1508,7 @@ static void dcn32_resource_destruct(struct dcn32_resource_pool *pool) dcn_dccg_destroy(&pool->base.dccg); if (pool->base.oem_device != NULL) - dal_ddc_service_destroy(&pool->base.oem_device); + link_destroy_ddc_service(&pool->base.oem_device); } @@ -2450,7 +2450,7 @@ static bool dcn32_resource_construct( ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id; ddc_init_data.id.enum_id = 0; ddc_init_data.id.type = OBJECT_TYPE_GENERIC; - pool->base.oem_device = dal_ddc_service_create(&ddc_init_data); + pool->base.oem_device = link_create_ddc_service(&ddc_init_data); } else { pool->base.oem_device = NULL; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h index 57ce1d670abe..b07d3b0e6a5c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h @@ -96,8 +96,17 @@ void dcn32_calculate_wm_and_dlg( int pipe_cnt, int vlevel); -uint32_t dcn32_helper_calculate_num_ways_for_subvp - (struct dc *dc, +uint32_t dcn32_helper_mall_bytes_to_ways( + struct dc *dc, + uint32_t total_size_in_mall_bytes); + +uint32_t dcn32_helper_calculate_mall_bytes_for_cursor( + struct dc *dc, + struct pipe_ctx *pipe_ctx, + bool ignore_cursor_buf); + +uint32_t dcn32_helper_calculate_num_ways_for_subvp( + struct dc *dc, struct dc_state *context); void dcn32_merge_pipes_for_subvp(struct dc *dc, @@ -135,6 +144,8 @@ void dcn32_restore_mall_state(struct dc *dc, struct dc_state *context, struct mall_temp_config *temp_config); +bool dcn32_allow_subvp_with_active_margin(struct pipe_ctx *pipe); + /* definitions for run time init of reg offsets */ /* CLK SRC */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c index e5287e5f66d5..0fc79d75ce76 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c @@ -33,13 +33,75 @@ static bool is_dual_plane(enum surface_pixel_format format) return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA; } + +uint32_t dcn32_helper_mall_bytes_to_ways( + struct dc *dc, + uint32_t total_size_in_mall_bytes) +{ + uint32_t cache_lines_used, lines_per_way, total_cache_lines, num_ways; + + /* add 2 lines for worst case alignment */ + cache_lines_used = total_size_in_mall_bytes / dc->caps.cache_line_size + 2; + + total_cache_lines = dc->caps.max_cab_allocation_bytes / dc->caps.cache_line_size; + lines_per_way = total_cache_lines / dc->caps.cache_num_ways; + num_ways = cache_lines_used / lines_per_way; + if (cache_lines_used % lines_per_way > 0) + num_ways++; + + return num_ways; +} + +uint32_t dcn32_helper_calculate_mall_bytes_for_cursor( + struct dc *dc, + struct pipe_ctx *pipe_ctx, + bool ignore_cursor_buf) +{ + struct hubp *hubp = pipe_ctx->plane_res.hubp; + uint32_t cursor_size = hubp->curs_attr.pitch * hubp->curs_attr.height; + uint32_t cursor_bpp = 4; + uint32_t cursor_mall_size_bytes = 0; + + switch (pipe_ctx->stream->cursor_attributes.color_format) { + case CURSOR_MODE_MONO: + cursor_size /= 2; + cursor_bpp = 4; + break; + case CURSOR_MODE_COLOR_1BIT_AND: + case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA: + case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA: + cursor_size *= 4; + cursor_bpp = 4; + break; + + case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED: + case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED: + cursor_size *= 8; + cursor_bpp = 8; + break; + } + + /* only count if cursor is enabled, and if additional allocation needed outside of the + * DCN cursor buffer + */ + if (pipe_ctx->stream->cursor_position.enable && (ignore_cursor_buf || + cursor_size > 16384)) { + /* cursor_num_mblk = CEILING(num_cursors*cursor_width*cursor_width*cursor_Bpe/mblk_bytes, 1) + * Note: add 1 mblk in case of cursor misalignment + */ + cursor_mall_size_bytes = ((cursor_size + DCN3_2_MALL_MBLK_SIZE_BYTES - 1) / + DCN3_2_MALL_MBLK_SIZE_BYTES + 1) * DCN3_2_MALL_MBLK_SIZE_BYTES; + } + + return cursor_mall_size_bytes; +} + /** * ******************************************************************************************** * dcn32_helper_calculate_num_ways_for_subvp: Calculate number of ways needed for SubVP * - * This function first checks the bytes required per pixel on the SubVP pipe, then calculates - * the total number of pixels required in the SubVP MALL region. These are used to calculate - * the number of cache lines used (then number of ways required) for SubVP MCLK switching. + * Gets total allocation required for the phantom viewport calculated by DML in bytes and + * converts to number of cache ways. * * @param [in] dc: current dc state * @param [in] context: new dc state @@ -48,106 +110,19 @@ static bool is_dual_plane(enum surface_pixel_format format) * * ******************************************************************************************** */ -uint32_t dcn32_helper_calculate_num_ways_for_subvp(struct dc *dc, struct dc_state *context) +uint32_t dcn32_helper_calculate_num_ways_for_subvp( + struct dc *dc, + struct dc_state *context) { - uint32_t num_ways = 0; - uint32_t bytes_per_pixel = 0; - uint32_t cache_lines_used = 0; - uint32_t lines_per_way = 0; - uint32_t total_cache_lines = 0; - uint32_t bytes_in_mall = 0; - uint32_t num_mblks = 0; - uint32_t cache_lines_per_plane = 0; - uint32_t i = 0, j = 0; - uint16_t mblk_width = 0; - uint16_t mblk_height = 0; - uint32_t full_vp_width_blk_aligned = 0; - uint32_t full_vp_height_blk_aligned = 0; - uint32_t mall_alloc_width_blk_aligned = 0; - uint32_t mall_alloc_height_blk_aligned = 0; - uint16_t full_vp_height = 0; - bool subvp_in_use = false; - - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; - - /* Find the phantom pipes. - * - For pipe split case we need to loop through the bottom and next ODM - * pipes or only half the viewport size is counted - */ - if (pipe->stream && pipe->plane_state && - pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { - struct pipe_ctx *main_pipe = NULL; - - subvp_in_use = true; - /* Get full viewport height from main pipe (required for MBLK calculation) */ - for (j = 0; j < dc->res_pool->pipe_count; j++) { - main_pipe = &context->res_ctx.pipe_ctx[j]; - if (main_pipe->stream == pipe->stream->mall_stream_config.paired_stream) { - full_vp_height = main_pipe->plane_res.scl_data.viewport.height; - break; - } - } - - bytes_per_pixel = pipe->plane_state->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4; - mblk_width = DCN3_2_MBLK_WIDTH; - mblk_height = bytes_per_pixel == 4 ? DCN3_2_MBLK_HEIGHT_4BPE : DCN3_2_MBLK_HEIGHT_8BPE; - - /* full_vp_width_blk_aligned = FLOOR(vp_x_start + full_vp_width + blk_width - 1, blk_width) - - * FLOOR(vp_x_start, blk_width) - */ - full_vp_width_blk_aligned = ((pipe->plane_res.scl_data.viewport.x + - pipe->plane_res.scl_data.viewport.width + mblk_width - 1) / mblk_width * mblk_width) - - (pipe->plane_res.scl_data.viewport.x / mblk_width * mblk_width); - - /* full_vp_height_blk_aligned = FLOOR(vp_y_start + full_vp_height + blk_height - 1, blk_height) - - * FLOOR(vp_y_start, blk_height) - */ - full_vp_height_blk_aligned = ((pipe->plane_res.scl_data.viewport.y + - full_vp_height + mblk_height - 1) / mblk_height * mblk_height) - - (pipe->plane_res.scl_data.viewport.y / mblk_height * mblk_height); - - /* mall_alloc_width_blk_aligned_l/c = full_vp_width_blk_aligned_l/c */ - mall_alloc_width_blk_aligned = full_vp_width_blk_aligned; - - /* mall_alloc_height_blk_aligned_l/c = CEILING(sub_vp_height_l/c - 1, blk_height_l/c) + blk_height_l/c */ - mall_alloc_height_blk_aligned = (pipe->plane_res.scl_data.viewport.height - 1 + mblk_height - 1) / - mblk_height * mblk_height + mblk_height; - - /* full_mblk_width_ub_l/c = mall_alloc_width_blk_aligned_l/c; - * full_mblk_height_ub_l/c = mall_alloc_height_blk_aligned_l/c; - * num_mblk_l/c = (full_mblk_width_ub_l/c / mblk_width_l/c) * (full_mblk_height_ub_l/c / mblk_height_l/c); - * (Should be divisible, but round up if not) - */ - num_mblks = ((mall_alloc_width_blk_aligned + mblk_width - 1) / mblk_width) * - ((mall_alloc_height_blk_aligned + mblk_height - 1) / mblk_height); - - /*For DCC: - * meta_num_mblk = CEILING(meta_pitch*full_vp_height*Bpe/256/mblk_bytes, 1) - */ - if (pipe->plane_state->dcc.enable) - num_mblks += (pipe->plane_state->dcc.meta_pitch * pipe->plane_res.scl_data.viewport.height * bytes_per_pixel + - (256 * DCN3_2_MALL_MBLK_SIZE_BYTES) - 1) / (256 * DCN3_2_MALL_MBLK_SIZE_BYTES); - - bytes_in_mall = num_mblks * DCN3_2_MALL_MBLK_SIZE_BYTES; - // cache lines used is total bytes / cache_line size. Add +2 for worst case alignment - // (MALL is 64-byte aligned) - cache_lines_per_plane = bytes_in_mall / dc->caps.cache_line_size + 2; - - cache_lines_used += cache_lines_per_plane; + if (context->bw_ctx.bw.dcn.mall_subvp_size_bytes > 0) { + if (dc->debug.force_subvp_num_ways) { + return dc->debug.force_subvp_num_ways; + } else { + return dcn32_helper_mall_bytes_to_ways(dc, context->bw_ctx.bw.dcn.mall_subvp_size_bytes); } + } else { + return 0; } - - total_cache_lines = dc->caps.max_cab_allocation_bytes / dc->caps.cache_line_size; - lines_per_way = total_cache_lines / dc->caps.cache_num_ways; - num_ways = cache_lines_used / lines_per_way; - if (cache_lines_used % lines_per_way > 0) - num_ways++; - - if (subvp_in_use && dc->debug.force_subvp_num_ways > 0) - num_ways = dc->debug.force_subvp_num_ways; - - return num_ways; } void dcn32_merge_pipes_for_subvp(struct dc *dc, @@ -265,6 +240,14 @@ bool dcn32_is_center_timing(struct pipe_ctx *pipe) is_center_timing = true; } } + + if (pipe->plane_state) { + if (pipe->stream->timing.v_addressable != pipe->plane_state->dst_rect.height && + pipe->stream->timing.v_addressable != pipe->plane_state->src_rect.height) { + is_center_timing = true; + } + } + return is_center_timing; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c index fa9b6603cfd3..13be5f06d987 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c @@ -31,7 +31,6 @@ #include "dcn321_dio_link_encoder.h" #include "dcn31/dcn31_dio_link_encoder.h" #include "stream_encoder.h" -#include "i2caux_interface.h" #include "dc_bios_types.h" #include "gpio_service_interface.h" diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c index 62e400e90b56..260d71ca0205 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c @@ -73,7 +73,7 @@ #include "dml/display_mode_vba.h" #include "dcn32/dcn32_dccg.h" #include "dcn10/dcn10_resource.h" -#include "dc_link_ddc.h" +#include "link.h" #include "dcn31/dcn31_panel_cntl.h" #include "dcn30/dcn30_dwb.h" @@ -1493,7 +1493,7 @@ static void dcn321_resource_destruct(struct dcn321_resource_pool *pool) dcn_dccg_destroy(&pool->base.dccg); if (pool->base.oem_device != NULL) - dal_ddc_service_destroy(&pool->base.oem_device); + link_destroy_ddc_service(&pool->base.oem_device); } @@ -1991,7 +1991,7 @@ static bool dcn321_resource_construct( ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id; ddc_init_data.id.enum_id = 0; ddc_init_data.id.type = OBJECT_TYPE_GENERIC; - pool->base.oem_device = dal_ddc_service_create(&ddc_init_data); + pool->base.oem_device = link_create_ddc_service(&ddc_init_data); } else { pool->base.oem_device = NULL; } diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c index c26da3bb2892..d2b89c50be2a 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c @@ -32,6 +32,7 @@ #include "dcn21/dcn21_resource.h" #include "clk_mgr/dcn21/rn_clk_mgr.h" +#include "link.h" #include "dcn20_fpu.h" #define DC_LOGGER_INIT(logger) @@ -938,7 +939,7 @@ static bool is_dtbclk_required(struct dc *dc, struct dc_state *context) for (i = 0; i < dc->res_pool->pipe_count; i++) { if (!context->res_ctx.pipe_ctx[i].stream) continue; - if (is_dp_128b_132b_signal(&context->res_ctx.pipe_ctx[i])) + if (link_is_dp_128b_132b_signal(&context->res_ctx.pipe_ctx[i])) return true; } return false; @@ -1302,7 +1303,7 @@ int dcn20_populate_dml_pipes_from_context( case SIGNAL_TYPE_DISPLAY_PORT_MST: case SIGNAL_TYPE_DISPLAY_PORT: pipes[pipe_cnt].dout.output_type = dm_dp; - if (is_dp_128b_132b_signal(&res_ctx->pipe_ctx[i])) + if (link_is_dp_128b_132b_signal(&res_ctx->pipe_ctx[i])) pipes[pipe_cnt].dout.output_type = dm_dp2p0; break; case SIGNAL_TYPE_EDP: diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c index e7459fd50bf9..7feb8759e475 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c @@ -31,6 +31,7 @@ // We need this includes for WATERMARKS_* defines #include "clk_mgr/dcn32/dcn32_smu13_driver_if.h" #include "dcn30/dcn30_resource.h" +#include "link.h" #define DC_LOGGER_INIT(logger) @@ -693,7 +694,9 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc, */ if (pipe->plane_state && !pipe->top_pipe && !dcn32_is_center_timing(pipe) && pipe->stream->mall_stream_config.type == SUBVP_NONE && refresh_rate < 120 && !pipe->plane_state->address.tmz_surface && - vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] <= 0) { + (vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] <= 0 || + (vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] > 0 && + dcn32_allow_subvp_with_active_margin(pipe)))) { while (pipe) { num_pipes++; pipe = pipe->bottom_pipe; @@ -977,13 +980,12 @@ static bool subvp_vblank_schedulable(struct dc *dc, struct dc_state *context) if (!subvp_pipe && pipe->stream->mall_stream_config.type == SUBVP_MAIN) subvp_pipe = pipe; } - // Use ignore_msa_timing_param flag to identify as DRR - if (found && context->res_ctx.pipe_ctx[vblank_index].stream->ignore_msa_timing_param) { - // SUBVP + DRR case -- don't enable SubVP + DRR for HDMI VRR cases - if (context->res_ctx.pipe_ctx[vblank_index].stream->allow_freesync) - schedulable = subvp_drr_schedulable(dc, context, &context->res_ctx.pipe_ctx[vblank_index]); - else - schedulable = false; + // Use ignore_msa_timing_param and VRR active, or Freesync flag to identify as DRR On + if (found && context->res_ctx.pipe_ctx[vblank_index].stream->ignore_msa_timing_param && + (context->res_ctx.pipe_ctx[vblank_index].stream->allow_freesync || + context->res_ctx.pipe_ctx[vblank_index].stream->vrr_active_variable)) { + // SUBVP + DRR case -- only allowed if run through DRR validation path + schedulable = false; } else if (found) { main_timing = &subvp_pipe->stream->timing; phantom_timing = &subvp_pipe->stream->mall_stream_config.paired_stream->timing; @@ -1087,12 +1089,12 @@ static void dcn32_full_validate_bw_helper(struct dc *dc, { struct vba_vars_st *vba = &context->bw_ctx.dml.vba; unsigned int dc_pipe_idx = 0; + int i = 0; bool found_supported_config = false; struct pipe_ctx *pipe = NULL; uint32_t non_subvp_pipes = 0; bool drr_pipe_found = false; uint32_t drr_pipe_index = 0; - uint32_t i = 0; dc_assert_fp_enabled(); @@ -1186,11 +1188,11 @@ static void dcn32_full_validate_bw_helper(struct dc *dc, vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] != dm_dram_clock_change_unsupported && subvp_validate_static_schedulability(dc, context, *vlevel)) { found_supported_config = true; - } else if (*vlevel < context->bw_ctx.dml.soc.num_states && - vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported) { - /* Case where 1 SubVP is added, and DML reports MCLK unsupported. This handles - * the case for SubVP + DRR, where the DRR display does not support MCLK switch - * at it's native refresh rate / timing. + } else if (*vlevel < context->bw_ctx.dml.soc.num_states) { + /* Case where 1 SubVP is added, and DML reports MCLK unsupported or DRR is allowed. + * This handles the case for SubVP + DRR, where the DRR display does not support MCLK + * switch at it's native refresh rate / timing, or DRR is allowed for the non-subvp + * display. */ for (i = 0; i < dc->res_pool->pipe_count; i++) { pipe = &context->res_ctx.pipe_ctx[i]; @@ -1207,6 +1209,15 @@ static void dcn32_full_validate_bw_helper(struct dc *dc, // If there is only 1 remaining non SubVP pipe that is DRR, check static // schedulability for SubVP + DRR. if (non_subvp_pipes == 1 && drr_pipe_found) { + /* find lowest vlevel that supports the config */ + for (i = *vlevel; i >= 0; i--) { + if (vba->ModeSupport[i][vba->maxMpcComb]) { + *vlevel = i; + } else { + break; + } + } + found_supported_config = subvp_drr_schedulable(dc, context, &context->res_ctx.pipe_ctx[drr_pipe_index]); } @@ -1255,7 +1266,7 @@ static bool is_dtbclk_required(struct dc *dc, struct dc_state *context) for (i = 0; i < dc->res_pool->pipe_count; i++) { if (!context->res_ctx.pipe_ctx[i].stream) continue; - if (is_dp_128b_132b_signal(&context->res_ctx.pipe_ctx[i])) + if (link_is_dp_128b_132b_signal(&context->res_ctx.pipe_ctx[i])) return true; } return false; @@ -1283,7 +1294,6 @@ static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context, context->bw_ctx.bw.dcn.clk.p_state_change_support = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] != dm_dram_clock_change_unsupported; - context->bw_ctx.bw.dcn.clk.num_ways = dcn32_helper_calculate_num_ways_for_subvp(dc, context); context->bw_ctx.bw.dcn.clk.dppclk_khz = 0; context->bw_ctx.bw.dcn.clk.dtbclk_en = is_dtbclk_required(dc, context); @@ -1307,6 +1317,10 @@ static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context, unbounded_req_enabled = false; } + context->bw_ctx.bw.dcn.mall_ss_size_bytes = 0; + context->bw_ctx.bw.dcn.mall_ss_psr_active_size_bytes = 0; + context->bw_ctx.bw.dcn.mall_subvp_size_bytes = 0; + for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { if (!context->res_ctx.pipe_ctx[i].stream) continue; @@ -1338,6 +1352,29 @@ static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context, else context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz = 0; context->res_ctx.pipe_ctx[i].pipe_dlg_param = pipes[pipe_idx].pipe.dest; + + context->res_ctx.pipe_ctx[i].surface_size_in_mall_bytes = get_surface_size_in_mall(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); + + /* MALL Allocation Sizes */ + /* count from active, top pipes per plane only */ + if (context->res_ctx.pipe_ctx[i].stream && context->res_ctx.pipe_ctx[i].plane_state && + (context->res_ctx.pipe_ctx[i].top_pipe == NULL || + context->res_ctx.pipe_ctx[i].plane_state != context->res_ctx.pipe_ctx[i].top_pipe->plane_state) && + context->res_ctx.pipe_ctx[i].prev_odm_pipe == NULL) { + /* SS: all active surfaces stored in MALL */ + if (context->res_ctx.pipe_ctx[i].stream->mall_stream_config.type != SUBVP_PHANTOM) { + context->bw_ctx.bw.dcn.mall_ss_size_bytes += context->res_ctx.pipe_ctx[i].surface_size_in_mall_bytes; + + if (context->res_ctx.pipe_ctx[i].stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED) { + /* SS PSR On: all active surfaces part of streams not supporting PSR stored in MALL */ + context->bw_ctx.bw.dcn.mall_ss_psr_active_size_bytes += context->res_ctx.pipe_ctx[i].surface_size_in_mall_bytes; + } + } else if (context->res_ctx.pipe_ctx[i].stream->mall_stream_config.type == SUBVP_PHANTOM) { + /* SUBVP: phantom surfaces only stored in MALL */ + context->bw_ctx.bw.dcn.mall_subvp_size_bytes += context->res_ctx.pipe_ctx[i].surface_size_in_mall_bytes; + } + } + pipe_idx++; } /* If DCN isn't making memory requests we can allow pstate change and lower clocks */ @@ -1358,6 +1395,8 @@ static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context, context->bw_ctx.bw.dcn.clk.max_supported_dispclk_khz = context->bw_ctx.dml.soc.clock_limits[vlevel].dispclk_mhz * 1000; + context->bw_ctx.bw.dcn.clk.num_ways = dcn32_helper_calculate_num_ways_for_subvp(dc, context); + context->bw_ctx.bw.dcn.compbuf_size_kb = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes; for (i = 0; i < dc->res_pool->pipe_count; i++) { @@ -1564,7 +1603,6 @@ bool dcn32_internal_validate_bw(struct dc *dc, context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final = dm_prefetch_support_fclk_and_stutter; - context->bw_ctx.dml.validate_max_state = fast_validate; vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt); /* Last attempt with Prefetch mode 2 (dm_prefetch_support_stutter == 3) */ @@ -1573,7 +1611,6 @@ bool dcn32_internal_validate_bw(struct dc *dc, dm_prefetch_support_stutter; vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt); } - context->bw_ctx.dml.validate_max_state = false; if (vlevel < context->bw_ctx.dml.soc.num_states) { memset(split, 0, sizeof(split)); @@ -1851,7 +1888,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context, bool subvp_in_use = dcn32_subvp_in_use(dc, context); unsigned int min_dram_speed_mts_margin; bool need_fclk_lat_as_dummy = false; - bool is_subvp_p_drr = true; + bool is_subvp_p_drr = false; dc_assert_fp_enabled(); @@ -1859,7 +1896,8 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context, if (subvp_in_use) { /* Override DRAMClockChangeSupport for SubVP + DRR case where the DRR cannot switch without stretching it's VBLANK */ if (!pstate_en) { - context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] = dm_dram_clock_change_vblank_w_mall_sub_vp; + context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] = dm_dram_clock_change_vblank_w_mall_sub_vp; + context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final = dm_prefetch_support_fclk_and_stutter; pstate_en = true; is_subvp_p_drr = true; } @@ -1877,8 +1915,9 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context, context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us; dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false); + maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb; if (is_subvp_p_drr) { - context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] = dm_dram_clock_change_vblank_w_mall_sub_vp; + context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] = dm_dram_clock_change_vblank_w_mall_sub_vp; } } @@ -2639,3 +2678,30 @@ void dcn32_zero_pipe_dcc_fraction(display_e2e_pipe_params_st *pipes, pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0; pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_chroma = 0; } + +bool dcn32_allow_subvp_with_active_margin(struct pipe_ctx *pipe) +{ + bool allow = false; + uint32_t refresh_rate = 0; + + /* Allow subvp on displays that have active margin for 2560x1440@60hz displays + * only for now. There must be no scaling as well. + * + * For now we only enable on 2560x1440@60hz displays to enable 4K60 + 1440p60 configs + * for p-state switching. + */ + if (pipe->stream && pipe->plane_state) { + refresh_rate = (pipe->stream->timing.pix_clk_100hz * 100 + + pipe->stream->timing.v_total * pipe->stream->timing.h_total - 1) + / (double)(pipe->stream->timing.v_total * pipe->stream->timing.h_total); + if (pipe->stream->timing.v_addressable == 1440 && + pipe->stream->timing.h_addressable == 2560 && + refresh_rate >= 55 && refresh_rate <= 65 && + pipe->plane_state->src_rect.height == 1440 && + pipe->plane_state->src_rect.width == 2560 && + pipe->plane_state->dst_rect.height == 1440 && + pipe->plane_state->dst_rect.width == 2560) + allow = true; + } + return allow; +} diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c index bc22078751f8..6c5ab5c26b38 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c @@ -387,6 +387,7 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman mode_lib->vba.NumberOfActiveSurfaces, mode_lib->vba.MALLAllocatedForDCNFinal, mode_lib->vba.UseMALLForStaticScreen, + mode_lib->vba.UsesMALLForPStateChange, mode_lib->vba.DCCEnable, mode_lib->vba.ViewportStationary, mode_lib->vba.ViewportXStartY, @@ -411,6 +412,8 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman v->BlockWidthC, v->BlockHeightY, v->BlockHeightC, + mode_lib->vba.DCCMetaPitchY, + mode_lib->vba.DCCMetaPitchC, /* Output */ v->SurfaceSizeInMALL, @@ -1707,7 +1710,7 @@ static void mode_support_configuration(struct vba_vars_st *v, void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_lib) { struct vba_vars_st *v = &mode_lib->vba; - int i, j, start_state; + int i, j; unsigned int k, m; unsigned int MaximumMPCCombine; unsigned int NumberOfNonCombinedSurfaceOfMaximumBandwidth; @@ -1720,10 +1723,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l #endif /*MODE SUPPORT, VOLTAGE STATE AND SOC CONFIGURATION*/ - if (mode_lib->validate_max_state) - start_state = v->soc.num_states - 1; - else - start_state = 0; + /*Scale Ratio, taps Support Check*/ mode_lib->vba.ScaleRatioAndTapsSupport = true; @@ -2012,7 +2012,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.MPCCombineMethodIncompatible = v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.MPCCombineMethodAsNeededForPStateChangeAndVoltage && v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.MPCCombineMethodAsPossible; - for (i = start_state; i < v->soc.num_states; i++) { + for (i = 0; i < v->soc.num_states; i++) { for (j = 0; j < 2; j++) { mode_lib->vba.TotalNumberOfActiveDPP[i][j] = 0; mode_lib->vba.TotalAvailablePipesSupport[i][j] = true; @@ -2289,7 +2289,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l } } - for (i = start_state; i < v->soc.num_states; ++i) { + for (i = 0; i < v->soc.num_states; ++i) { mode_lib->vba.ExceededMultistreamSlots[i] = false; for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) { if (mode_lib->vba.OutputMultistreamEn[k] == true && mode_lib->vba.OutputMultistreamId[k] == k) { @@ -2389,7 +2389,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l } } - for (i = start_state; i < v->soc.num_states; ++i) { + for (i = 0; i < v->soc.num_states; ++i) { mode_lib->vba.DTBCLKRequiredMoreThanSupported[i] = false; for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) { if (mode_lib->vba.BlendingAndTiming[k] == k @@ -2406,7 +2406,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l } } - for (i = start_state; i < v->soc.num_states; ++i) { + for (i = 0; i < v->soc.num_states; ++i) { mode_lib->vba.ODMCombine2To1SupportCheckOK[i] = true; mode_lib->vba.ODMCombine4To1SupportCheckOK[i] = true; for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) { @@ -2424,7 +2424,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l } } - for (i = start_state; i < v->soc.num_states; i++) { + for (i = 0; i < v->soc.num_states; i++) { mode_lib->vba.DSCCLKRequiredMoreThanSupported[i] = false; for (k = 0; k <= mode_lib->vba.NumberOfActiveSurfaces - 1; k++) { if (mode_lib->vba.BlendingAndTiming[k] == k) { @@ -2461,7 +2461,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l /* Check DSC Unit and Slices Support */ v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.TotalDSCUnitsRequired = 0; - for (i = start_state; i < v->soc.num_states; ++i) { + for (i = 0; i < v->soc.num_states; ++i) { mode_lib->vba.NotEnoughDSCUnits[i] = false; mode_lib->vba.NotEnoughDSCSlices[i] = false; v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.TotalDSCUnitsRequired = 0; @@ -2496,7 +2496,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l } /*DSC Delay per state*/ - for (i = start_state; i < v->soc.num_states; ++i) { + for (i = 0; i < v->soc.num_states; ++i) { for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) { mode_lib->vba.DSCDelayPerState[i][k] = dml32_DSCDelayRequirement( mode_lib->vba.RequiresDSC[i][k], mode_lib->vba.ODMCombineEnablePerState[i][k], @@ -2523,7 +2523,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l //Calculate Swath, DET Configuration, DCFCLKDeepSleep // - for (i = start_state; i < (int) v->soc.num_states; ++i) { + for (i = 0; i < (int) v->soc.num_states; ++i) { for (j = 0; j <= 1; ++j) { for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) { mode_lib->vba.RequiredDPPCLKThisState[k] = mode_lib->vba.RequiredDPPCLK[i][j][k]; @@ -2629,6 +2629,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.NumberOfActiveSurfaces, mode_lib->vba.MALLAllocatedForDCNFinal, mode_lib->vba.UseMALLForStaticScreen, + mode_lib->vba.UsesMALLForPStateChange, mode_lib->vba.DCCEnable, mode_lib->vba.ViewportStationary, mode_lib->vba.ViewportXStartY, @@ -2653,12 +2654,14 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.MacroTileWidthC, mode_lib->vba.MacroTileHeightY, mode_lib->vba.MacroTileHeightC, + mode_lib->vba.DCCMetaPitchY, + mode_lib->vba.DCCMetaPitchC, /* Output */ mode_lib->vba.SurfaceSizeInMALL, &mode_lib->vba.ExceededMALLSize); - for (i = start_state; i < v->soc.num_states; i++) { + for (i = 0; i < v->soc.num_states; i++) { for (j = 0; j < 2; j++) { for (k = 0; k <= mode_lib->vba.NumberOfActiveSurfaces - 1; k++) { mode_lib->vba.swath_width_luma_ub_this_state[k] = @@ -2885,7 +2888,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l } //Calculate Return BW - for (i = start_state; i < (int) v->soc.num_states; ++i) { + for (i = 0; i < (int) v->soc.num_states; ++i) { for (j = 0; j <= 1; ++j) { for (k = 0; k <= mode_lib->vba.NumberOfActiveSurfaces - 1; k++) { if (mode_lib->vba.BlendingAndTiming[k] == k) { @@ -2964,7 +2967,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l &mode_lib->vba.MinPrefetchMode, &mode_lib->vba.MaxPrefetchMode); - for (i = start_state; i < (int) v->soc.num_states; ++i) { + for (i = 0; i < (int) v->soc.num_states; ++i) { for (j = 0; j <= 1; ++j) mode_lib->vba.DCFCLKState[i][j] = mode_lib->vba.DCFCLKPerState[i]; } @@ -3086,7 +3089,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.DCFCLKState); } // UseMinimumRequiredDCFCLK == true - for (i = start_state; i < (int) v->soc.num_states; ++i) { + for (i = 0; i < (int) v->soc.num_states; ++i) { for (j = 0; j <= 1; ++j) { mode_lib->vba.ReturnBWPerState[i][j] = dml32_get_return_bw_mbps(&mode_lib->vba.soc, i, mode_lib->vba.HostVMEnable, mode_lib->vba.DCFCLKState[i][j], @@ -3095,7 +3098,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l } //Re-ordering Buffer Support Check - for (i = start_state; i < (int) v->soc.num_states; ++i) { + for (i = 0; i < (int) v->soc.num_states; ++i) { for (j = 0; j <= 1; ++j) { if ((mode_lib->vba.ROBBufferSizeInKByte - mode_lib->vba.PixelChunkSizeInKByte) * 1024 / mode_lib->vba.ReturnBWPerState[i][j] @@ -3117,7 +3120,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l + mode_lib->vba.ReadBandwidthChroma[k]; } - for (i = start_state; i < (int) v->soc.num_states; ++i) { + for (i = 0; i < (int) v->soc.num_states; ++i) { for (j = 0; j <= 1; ++j) { mode_lib->vba.MaxTotalVerticalActiveAvailableBandwidth[i][j] = dml_min3(mode_lib->vba.ReturnBusWidth * mode_lib->vba.DCFCLKState[i][j] @@ -3141,7 +3144,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l /* Prefetch Check */ - for (i = start_state; i < (int) v->soc.num_states; ++i) { + for (i = 0; i < (int) v->soc.num_states; ++i) { for (j = 0; j <= 1; ++j) { mode_lib->vba.TimeCalc = 24 / mode_lib->vba.ProjectedDCFCLKDeepSleep[i][j]; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c index b53feeaf5cf1..0932f49cd819 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c @@ -1772,6 +1772,7 @@ void dml32_CalculateSurfaceSizeInMall( unsigned int NumberOfActiveSurfaces, unsigned int MALLAllocatedForDCN, enum dm_use_mall_for_static_screen_mode UseMALLForStaticScreen[], + enum dm_use_mall_for_pstate_change_mode UsesMALLForPStateChange[], bool DCCEnable[], bool ViewportStationary[], unsigned int ViewportXStartY[], @@ -1796,13 +1797,17 @@ void dml32_CalculateSurfaceSizeInMall( unsigned int ReadBlockWidthC[], unsigned int ReadBlockHeightY[], unsigned int ReadBlockHeightC[], + unsigned int DCCMetaPitchY[], + unsigned int DCCMetaPitchC[], /* Output */ unsigned int SurfaceSizeInMALL[], bool *ExceededMALLSize) { - unsigned int TotalSurfaceSizeInMALL = 0; unsigned int k; + unsigned int TotalSurfaceSizeInMALLForSS = 0; + unsigned int TotalSurfaceSizeInMALLForSubVP = 0; + unsigned int MALLAllocatedForDCNInBytes = MALLAllocatedForDCN * 1024 * 1024; for (k = 0; k < NumberOfActiveSurfaces; ++k) { if (ViewportStationary[k]) { @@ -1828,18 +1833,18 @@ void dml32_CalculateSurfaceSizeInMall( } if (DCCEnable[k] == true) { SurfaceSizeInMALL[k] = SurfaceSizeInMALL[k] + - dml_min(dml_ceil(SurfaceWidthY[k], 8 * Read256BytesBlockWidthY[k]), + (dml_min(dml_ceil(DCCMetaPitchY[k], 8 * Read256BytesBlockWidthY[k]), dml_floor(ViewportXStartY[k] + ViewportWidthY[k] + 8 * Read256BytesBlockWidthY[k] - 1, 8 * Read256BytesBlockWidthY[k]) - dml_floor(ViewportXStartY[k], 8 * Read256BytesBlockWidthY[k])) * dml_min(dml_ceil(SurfaceHeightY[k], 8 * Read256BytesBlockHeightY[k]), dml_floor(ViewportYStartY[k] + ViewportHeightY[k] + 8 * Read256BytesBlockHeightY[k] - 1, 8 * - Read256BytesBlockHeightY[k]) - dml_floor(ViewportYStartY[k], 8 - * Read256BytesBlockHeightY[k])) * BytesPerPixelY[k] / 256; + Read256BytesBlockHeightY[k]) - dml_floor(ViewportYStartY[k], 8 * + Read256BytesBlockHeightY[k])) * BytesPerPixelY[k] / 256) + (64 * 1024); if (Read256BytesBlockWidthC[k] > 0) { SurfaceSizeInMALL[k] = SurfaceSizeInMALL[k] + - dml_min(dml_ceil(SurfaceWidthC[k], 8 * + dml_min(dml_ceil(DCCMetaPitchC[k], 8 * Read256BytesBlockWidthC[k]), dml_floor(ViewportXStartC[k] + ViewportWidthC[k] + 8 * Read256BytesBlockWidthC[k] - 1, 8 * @@ -1872,16 +1877,16 @@ void dml32_CalculateSurfaceSizeInMall( } if (DCCEnable[k] == true) { SurfaceSizeInMALL[k] = SurfaceSizeInMALL[k] + - dml_ceil(dml_min(SurfaceWidthY[k], ViewportWidthY[k] + 8 * + (dml_ceil(dml_min(DCCMetaPitchY[k], ViewportWidthY[k] + 8 * Read256BytesBlockWidthY[k] - 1), 8 * Read256BytesBlockWidthY[k]) * dml_ceil(dml_min(SurfaceHeightY[k], ViewportHeightY[k] + 8 * Read256BytesBlockHeightY[k] - 1), 8 * - Read256BytesBlockHeightY[k]) * BytesPerPixelY[k] / 256; + Read256BytesBlockHeightY[k]) * BytesPerPixelY[k] / 256) + (64 * 1024); if (Read256BytesBlockWidthC[k] > 0) { SurfaceSizeInMALL[k] = SurfaceSizeInMALL[k] + - dml_ceil(dml_min(SurfaceWidthC[k], ViewportWidthC[k] + 8 * + dml_ceil(dml_min(DCCMetaPitchC[k], ViewportWidthC[k] + 8 * Read256BytesBlockWidthC[k] - 1), 8 * Read256BytesBlockWidthC[k]) * dml_ceil(dml_min(SurfaceHeightC[k], ViewportHeightC[k] + 8 * @@ -1894,10 +1899,14 @@ void dml32_CalculateSurfaceSizeInMall( } for (k = 0; k < NumberOfActiveSurfaces; ++k) { - if (UseMALLForStaticScreen[k] == dm_use_mall_static_screen_enable) - TotalSurfaceSizeInMALL = TotalSurfaceSizeInMALL + SurfaceSizeInMALL[k]; + /* SS and Subvp counted separate as they are never used at the same time */ + if (UsesMALLForPStateChange[k] == dm_use_mall_pstate_change_phantom_pipe) + TotalSurfaceSizeInMALLForSubVP = TotalSurfaceSizeInMALLForSubVP + SurfaceSizeInMALL[k]; + else if (UseMALLForStaticScreen[k] == dm_use_mall_static_screen_enable) + TotalSurfaceSizeInMALLForSS = TotalSurfaceSizeInMALLForSS + SurfaceSizeInMALL[k]; } - *ExceededMALLSize = (TotalSurfaceSizeInMALL > MALLAllocatedForDCN * 1024 * 1024); + *ExceededMALLSize = (TotalSurfaceSizeInMALLForSS > MALLAllocatedForDCNInBytes) || + (TotalSurfaceSizeInMALLForSubVP > MALLAllocatedForDCNInBytes); } // CalculateSurfaceSizeInMall void dml32_CalculateVMRowAndSwath( @@ -6245,7 +6254,7 @@ bool dml32_CalculateDETSwathFillLatencyHiding(unsigned int NumberOfActiveSurface double PixelClock[], double VRatioY[], double VRatioC[], - enum dm_use_mall_for_pstate_change_mode UsesMALLForPStateChange[DC__NUM_DPP__MAX]) + enum dm_use_mall_for_pstate_change_mode UsesMALLForPStateChange[]) { int k; double SwathSizeAllSurfaces = 0; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h index 779c6805f599..d41c4d8b0c7a 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h @@ -334,6 +334,7 @@ void dml32_CalculateSurfaceSizeInMall( unsigned int NumberOfActiveSurfaces, unsigned int MALLAllocatedForDCN, enum dm_use_mall_for_static_screen_mode UseMALLForStaticScreen[], + enum dm_use_mall_for_pstate_change_mode UsesMALLForPStateChange[], bool DCCEnable[], bool ViewportStationary[], unsigned int ViewportXStartY[], @@ -358,6 +359,8 @@ void dml32_CalculateSurfaceSizeInMall( unsigned int ReadBlockWidthC[], unsigned int ReadBlockHeightY[], unsigned int ReadBlockHeightC[], + unsigned int DCCMetaPitchY[], + unsigned int DCCMetaPitchC[], /* Output */ unsigned int SurfaceSizeInMALL[], @@ -1157,6 +1160,6 @@ bool dml32_CalculateDETSwathFillLatencyHiding(unsigned int NumberOfActiveSurface double PixelClock[], double VRatioY[], double VRatioC[], - enum dm_use_mall_for_pstate_change_mode UsesMALLForPStateChange[DC__NUM_DPP__MAX]); + enum dm_use_mall_for_pstate_change_mode UsesMALLForPStateChange[]); #endif diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h index a9d49ef58fb5..3d643d50c3eb 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h @@ -91,7 +91,6 @@ struct display_mode_lib { struct dal_logger *logger; struct dml_funcs funcs; struct _vcs_dpi_display_e2e_pipe_params_st dml_pipe_state[6]; - bool validate_max_state; }; void dml_init_instance(struct display_mode_lib *lib, diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c index 8e6585dab20e..8cb28b7918db 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c @@ -202,6 +202,7 @@ dml_get_pipe_attr_func(vm_group_size_in_bytes, mode_lib->vba.vm_group_bytes); dml_get_pipe_attr_func(dpte_row_height_linear_l, mode_lib->vba.dpte_row_height_linear); dml_get_pipe_attr_func(pte_buffer_mode, mode_lib->vba.PTE_BUFFER_MODE); dml_get_pipe_attr_func(subviewport_lines_needed_in_mall, mode_lib->vba.SubViewportLinesNeededInMALL); +dml_get_pipe_attr_func(surface_size_in_mall, mode_lib->vba.SurfaceSizeInMALL) double get_total_immediate_flip_bytes( struct display_mode_lib *mode_lib, diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h index 81e53e67cd0b..876b9b517ea2 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h @@ -143,6 +143,7 @@ dml_get_pipe_attr_decl(vready_at_or_after_vsync); dml_get_pipe_attr_decl(min_dst_y_next_start); dml_get_pipe_attr_decl(vstartup_calculated); dml_get_pipe_attr_decl(subviewport_lines_needed_in_mall); +dml_get_pipe_attr_decl(surface_size_in_mall); double get_total_immediate_flip_bytes( struct display_mode_lib *mode_lib, diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c index 9b63c6c0cc84..e0bd0c722e00 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c @@ -138,7 +138,8 @@ static const struct ddc_sh_mask ddc_shift[] = { DDC_MASK_SH_LIST_DCN2(__SHIFT, 3), DDC_MASK_SH_LIST_DCN2(__SHIFT, 4), DDC_MASK_SH_LIST_DCN2(__SHIFT, 5), - DDC_MASK_SH_LIST_DCN2(__SHIFT, 6) + DDC_MASK_SH_LIST_DCN2(__SHIFT, 6), + DDC_MASK_SH_LIST_DCN2_VGA(__SHIFT) }; static const struct ddc_sh_mask ddc_mask[] = { @@ -147,7 +148,8 @@ static const struct ddc_sh_mask ddc_mask[] = { DDC_MASK_SH_LIST_DCN2(_MASK, 3), DDC_MASK_SH_LIST_DCN2(_MASK, 4), DDC_MASK_SH_LIST_DCN2(_MASK, 5), - DDC_MASK_SH_LIST_DCN2(_MASK, 6) + DDC_MASK_SH_LIST_DCN2(_MASK, 6), + DDC_MASK_SH_LIST_DCN2_VGA(_MASK) }; #include "../generic_regs.h" diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.c index 687d4f128480..36a5736c58c9 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.c @@ -145,7 +145,8 @@ static const struct ddc_sh_mask ddc_shift[] = { DDC_MASK_SH_LIST_DCN2(__SHIFT, 3), DDC_MASK_SH_LIST_DCN2(__SHIFT, 4), DDC_MASK_SH_LIST_DCN2(__SHIFT, 5), - DDC_MASK_SH_LIST_DCN2(__SHIFT, 6) + DDC_MASK_SH_LIST_DCN2(__SHIFT, 6), + DDC_MASK_SH_LIST_DCN2_VGA(__SHIFT) }; static const struct ddc_sh_mask ddc_mask[] = { @@ -154,7 +155,8 @@ static const struct ddc_sh_mask ddc_mask[] = { DDC_MASK_SH_LIST_DCN2(_MASK, 3), DDC_MASK_SH_LIST_DCN2(_MASK, 4), DDC_MASK_SH_LIST_DCN2(_MASK, 5), - DDC_MASK_SH_LIST_DCN2(_MASK, 6) + DDC_MASK_SH_LIST_DCN2(_MASK, 6), + DDC_MASK_SH_LIST_DCN2_VGA(_MASK) }; #include "../generic_regs.h" diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn32/hw_factory_dcn32.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn32/hw_factory_dcn32.c index 9fd8b269dd79..985f10b39750 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/dcn32/hw_factory_dcn32.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn32/hw_factory_dcn32.c @@ -149,7 +149,8 @@ static const struct ddc_sh_mask ddc_shift[] = { DDC_MASK_SH_LIST_DCN2(__SHIFT, 3), DDC_MASK_SH_LIST_DCN2(__SHIFT, 4), DDC_MASK_SH_LIST_DCN2(__SHIFT, 5), - DDC_MASK_SH_LIST_DCN2(__SHIFT, 6) + DDC_MASK_SH_LIST_DCN2(__SHIFT, 6), + DDC_MASK_SH_LIST_DCN2_VGA(__SHIFT) }; static const struct ddc_sh_mask ddc_mask[] = { @@ -158,7 +159,8 @@ static const struct ddc_sh_mask ddc_mask[] = { DDC_MASK_SH_LIST_DCN2(_MASK, 3), DDC_MASK_SH_LIST_DCN2(_MASK, 4), DDC_MASK_SH_LIST_DCN2(_MASK, 5), - DDC_MASK_SH_LIST_DCN2(_MASK, 6) + DDC_MASK_SH_LIST_DCN2(_MASK, 6), + DDC_MASK_SH_LIST_DCN2_VGA(_MASK) }; #include "../generic_regs.h" diff --git a/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h b/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h index 308a543178a5..59884ef651b3 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h +++ b/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h @@ -113,6 +113,13 @@ (PHY_AUX_CNTL__AUX## cd ##_PAD_RXSEL## mask_sh),\ (DC_GPIO_AUX_CTRL_5__DDC_PAD## cd ##_I2CMODE## mask_sh)} +#define DDC_MASK_SH_LIST_DCN2_VGA(mask_sh) \ + {DDC_MASK_SH_LIST_COMMON(mask_sh),\ + 0,\ + 0,\ + 0,\ + 0} + struct ddc_registers { struct gpio_registers gpio; uint32_t ddc_setup; diff --git a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c index 4233955e3c47..906a43e85f6d 100644 --- a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c +++ b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c @@ -28,12 +28,11 @@ #include "dm_services.h" #include "dm_helpers.h" #include "include/hdcp_types.h" -#include "include/i2caux_interface.h" #include "include/signal_types.h" #include "core_types.h" -#include "dc_link_ddc.h" +#include "link.h" #include "link_hwss.h" -#include "inc/link_dpcd.h" +#include "link/link_dpcd.h" #define DC_LOGGER \ link->ctx->logger diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index b093ea495468..bebfcf8737b3 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -450,10 +450,11 @@ struct pipe_ctx { struct _vcs_dpi_display_e2e_pipe_params_st dml_input; int det_buffer_size_kb; bool unbounded_req; + unsigned int surface_size_in_mall_bytes; - union pipe_update_flags update_flags; struct dwbc *dwbc; struct mcif_wb *mcif_wb; + union pipe_update_flags update_flags; }; /* Data used for dynamic link encoder assignment. @@ -507,6 +508,9 @@ struct dcn_bw_output { struct dcn_watermark_set watermarks; struct dcn_bw_writeback bw_writeback; int compbuf_size_kb; + unsigned int mall_ss_size_bytes; + unsigned int mall_ss_psr_active_size_bytes; + unsigned int mall_subvp_size_bytes; unsigned int legacy_svp_drr_stream_index; bool legacy_svp_drr_stream_index_valid; }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h deleted file mode 100644 index 95fb61d62778..000000000000 --- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h +++ /dev/null @@ -1,133 +0,0 @@ -/* - * Copyright 2012-15 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DAL_DDC_SERVICE_H__ -#define __DAL_DDC_SERVICE_H__ - -#include "include/ddc_service_types.h" -#include "include/i2caux_interface.h" - -#define EDID_SEGMENT_SIZE 256 - -/* Address range from 0x00 to 0x1F.*/ -#define DP_ADAPTOR_TYPE2_SIZE 0x20 -#define DP_ADAPTOR_TYPE2_REG_ID 0x10 -#define DP_ADAPTOR_TYPE2_REG_MAX_TMDS_CLK 0x1D -/* Identifies adaptor as Dual-mode adaptor */ -#define DP_ADAPTOR_TYPE2_ID 0xA0 -/* MHz*/ -#define DP_ADAPTOR_TYPE2_MAX_TMDS_CLK 600 -/* MHz*/ -#define DP_ADAPTOR_TYPE2_MIN_TMDS_CLK 25 -/* kHZ*/ -#define DP_ADAPTOR_DVI_MAX_TMDS_CLK 165000 -/* kHZ*/ -#define DP_ADAPTOR_HDMI_SAFE_MAX_TMDS_CLK 165000 - -#define DDC_I2C_COMMAND_ENGINE I2C_COMMAND_ENGINE_SW - -struct ddc_service; -struct graphics_object_id; -enum ddc_result; -struct av_sync_data; -struct dp_receiver_id_info; - -struct i2c_payloads; -struct aux_payloads; -enum aux_return_code_type; - -void dal_ddc_i2c_payloads_add( - struct i2c_payloads *payloads, - uint32_t address, - uint32_t len, - uint8_t *data, - bool write); - -struct ddc_service_init_data { - struct graphics_object_id id; - struct dc_context *ctx; - struct dc_link *link; - bool is_dpia_link; -}; - -struct ddc_service *dal_ddc_service_create( - struct ddc_service_init_data *ddc_init_data); - -void dal_ddc_service_destroy(struct ddc_service **ddc); - -enum ddc_service_type dal_ddc_service_get_type(struct ddc_service *ddc); - -void dal_ddc_service_set_transaction_type( - struct ddc_service *ddc, - enum ddc_transaction_type type); - -bool dal_ddc_service_is_in_aux_transaction_mode(struct ddc_service *ddc); - -void dal_ddc_service_i2c_query_dp_dual_mode_adaptor( - struct ddc_service *ddc, - struct display_sink_capability *sink_cap); - -bool dal_ddc_service_query_ddc_data( - struct ddc_service *ddc, - uint32_t address, - uint8_t *write_buf, - uint32_t write_size, - uint8_t *read_buf, - uint32_t read_size); - -bool dal_ddc_submit_aux_command(struct ddc_service *ddc, - struct aux_payload *payload); - -int dc_link_aux_transfer_raw(struct ddc_service *ddc, - struct aux_payload *payload, - enum aux_return_code_type *operation_result); - -bool dc_link_aux_transfer_with_retries(struct ddc_service *ddc, - struct aux_payload *payload); - -bool dc_link_aux_try_to_configure_timeout(struct ddc_service *ddc, - uint32_t timeout); - -void dal_ddc_service_write_scdc_data( - struct ddc_service *ddc_service, - uint32_t pix_clk, - bool lte_340_scramble); - -void dal_ddc_service_read_scdc_data( - struct ddc_service *ddc_service); - -void ddc_service_set_dongle_type(struct ddc_service *ddc, - enum display_dongle_type dongle_type); - -void dal_ddc_service_set_ddc_pin( - struct ddc_service *ddc_service, - struct ddc *ddc); - -struct ddc *dal_ddc_service_get_ddc_pin(struct ddc_service *ddc_service); - -uint32_t get_defer_delay(struct ddc_service *ddc); - -#endif /* __DAL_DDC_SERVICE_H__ */ - diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h index e8d8c5cb1309..52e1aad1fce8 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h @@ -28,10 +28,7 @@ #define LINK_TRAINING_ATTEMPTS 4 #define LINK_TRAINING_RETRY_DELAY 50 /* ms */ -#define LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD 3200 /*us*/ -#define LINK_AUX_DEFAULT_TIMEOUT_PERIOD 552 /*us*/ #define MAX_MTP_SLOT_COUNT 64 -#define DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE 0x50 #define TRAINING_AUX_RD_INTERVAL 100 //us #define LINK_AUX_WAKE_TIMEOUT_MS 1500 // Timeout when trying to wake unresponsive DPRX. @@ -40,11 +37,6 @@ struct dc_stream_state; struct dc_link_settings; enum { - LINK_TRAINING_MAX_RETRY_COUNT = 5, - /* to avoid infinite loop where-in the receiver - * switches between different VS - */ - LINK_TRAINING_MAX_CR_RETRY = 100, /* * Some receivers fail to train on first try and are good * on subsequent tries. 2 retries should be plenty. If we @@ -55,7 +47,6 @@ enum { PEAK_FACTOR_X1000 = 1006, }; -struct dc_link_settings dp_get_max_link_cap(struct dc_link *link); bool dp_verify_link_cap_with_retries( struct dc_link *link, @@ -66,35 +57,11 @@ bool dp_validate_mode_timing( struct dc_link *link, const struct dc_crtc_timing *timing); -bool decide_edp_link_settings(struct dc_link *link, - struct dc_link_settings *link_setting, - uint32_t req_bw); - -bool decide_link_settings( - struct dc_stream_state *stream, - struct dc_link_settings *link_setting); - -bool perform_link_training_with_retries( - const struct dc_link_settings *link_setting, - bool skip_video_pattern, - int attempts, - struct pipe_ctx *pipe_ctx, - enum signal_type signal, - bool do_fallback); - -bool hpd_rx_irq_check_link_loss_status( +bool hpd_rx_irq_check_link_loss_status(struct dc_link *link, + union hpd_irq_data *hpd_irq_dpcd_data); +enum dc_status read_hpd_rx_irq_data( struct dc_link *link, - union hpd_irq_data *hpd_irq_dpcd_data); - -bool is_mst_supported(struct dc_link *link); - -bool detect_dp_sink_caps(struct dc_link *link); - -void detect_edp_sink_caps(struct dc_link *link); - -bool is_dp_active_dongle(const struct dc_link *link); - -bool is_dp_branch_device(const struct dc_link *link); + union hpd_irq_data *irq_data); bool is_edp_ilr_optimization_required(struct dc_link *link, struct dc_crtc_timing *crtc_timing); @@ -103,69 +70,8 @@ void dp_enable_mst_on_sink(struct dc_link *link, bool enable); enum dp_panel_mode dp_get_panel_mode(struct dc_link *link); void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode); -bool dp_overwrite_extended_receiver_cap(struct dc_link *link); - -void dpcd_set_source_specific_data(struct dc_link *link); - void dpcd_write_cable_id_to_dprx(struct dc_link *link); -/* Write DPCD link configuration data. */ -enum dc_status dpcd_set_link_settings( - struct dc_link *link, - const struct link_training_settings *lt_settings); -/* Write DPCD drive settings. */ -enum dc_status dpcd_set_lane_settings( - struct dc_link *link, - const struct link_training_settings *link_training_setting, - uint32_t offset); -/* Read training status and adjustment requests from DPCD. */ -enum dc_status dp_get_lane_status_and_lane_adjust( - struct dc_link *link, - const struct link_training_settings *link_training_setting, - union lane_status ln_status[LANE_COUNT_DP_MAX], - union lane_align_status_updated *ln_align, - union lane_adjust ln_adjust[LANE_COUNT_DP_MAX], - uint32_t offset); - -void dp_wait_for_training_aux_rd_interval( - struct dc_link *link, - uint32_t wait_in_micro_secs); - -bool dp_is_cr_done(enum dc_lane_count ln_count, - union lane_status *dpcd_lane_status); - -enum link_training_result dp_get_cr_failure(enum dc_lane_count ln_count, - union lane_status *dpcd_lane_status); - -bool dp_is_ch_eq_done(enum dc_lane_count ln_count, - union lane_status *dpcd_lane_status); -bool dp_is_symbol_locked(enum dc_lane_count ln_count, - union lane_status *dpcd_lane_status); -bool dp_is_interlane_aligned(union lane_align_status_updated align_status); - -bool dp_is_max_vs_reached( - const struct link_training_settings *lt_settings); -void dp_hw_to_dpcd_lane_settings( - const struct link_training_settings *lt_settings, - const struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX], - union dpcd_training_lane dpcd_lane_settings[]); -void dp_decide_lane_settings( - const struct link_training_settings *lt_settings, - const union lane_adjust ln_adjust[LANE_COUNT_DP_MAX], - struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX], - union dpcd_training_lane dpcd_lane_settings[]); - -uint32_t dp_translate_training_aux_read_interval(uint32_t dpcd_aux_read_interval); - -enum dpcd_training_patterns - dc_dp_training_pattern_to_dpcd_training_pattern( - struct dc_link *link, - enum dc_dp_training_pattern pattern); - -uint8_t dc_dp_initialize_scrambling_data_symbols( - struct dc_link *link, - enum dc_dp_training_pattern pattern); - enum dc_status dp_set_fec_ready(struct dc_link *link, const struct link_resource *link_res, bool ready); void dp_set_fec_enable(struct dc_link *link, bool enable); bool dp_set_dsc_enable(struct pipe_ctx *pipe_ctx, bool enable); @@ -180,35 +86,12 @@ void dp_decide_training_settings( const struct dc_link_settings *link_setting, struct link_training_settings *lt_settings); -/* Convert PHY repeater count read from DPCD uint8_t. */ -uint8_t dp_convert_to_count(uint8_t lttpr_repeater_count); - -/* Check DPCD training status registers to detect link loss. */ -enum link_training_result dp_check_link_loss_status( - struct dc_link *link, - const struct link_training_settings *link_training_setting); - -enum dc_status dpcd_configure_lttpr_mode( - struct dc_link *link, - struct link_training_settings *lt_settings); - -enum dp_link_encoding dp_get_link_encoding_format(const struct dc_link_settings *link_settings); -enum dc_status dp_retrieve_lttpr_cap(struct dc_link *link); -bool dp_is_lttpr_present(struct dc_link *link); -enum lttpr_mode dp_decide_lttpr_mode(struct dc_link *link, struct dc_link_settings *link_setting); -void dp_get_lttpr_mode_override(struct dc_link *link, enum lttpr_mode *override); -enum lttpr_mode dp_decide_8b_10b_lttpr_mode(struct dc_link *link); -enum lttpr_mode dp_decide_128b_132b_lttpr_mode(struct dc_link *link); bool dpcd_write_128b_132b_sst_payload_allocation_table( const struct dc_stream_state *stream, struct dc_link *link, struct link_mst_stream_allocation_table *proposed_table, bool allocate); -enum dc_status dpcd_configure_channel_coding( - struct dc_link *link, - struct link_training_settings *lt_settings); - bool dpcd_poll_for_allocation_change_trigger(struct dc_link *link); struct fixed31_32 calculate_sst_avg_time_slots_per_mtp( @@ -220,48 +103,15 @@ void enable_dp_hpo_output(struct dc_link *link, void disable_dp_hpo_output(struct dc_link *link, const struct link_resource *link_res, enum signal_type signal); - void setup_dp_hpo_stream(struct pipe_ctx *pipe_ctx, bool enable); -bool is_dp_128b_132b_signal(struct pipe_ctx *pipe_ctx); void edp_panel_backlight_power_on(struct dc_link *link, bool wait_for_hpd); -void dp_receiver_power_ctrl(struct dc_link *link, bool on); void dp_source_sequence_trace(struct dc_link *link, uint8_t dp_test_mode); -void dp_enable_link_phy( - struct dc_link *link, - const struct link_resource *link_res, - enum signal_type signal, - enum clock_source_id clock_source, - const struct dc_link_settings *link_settings); void edp_add_delay_for_T9(struct dc_link *link); bool edp_receiver_ready_T9(struct dc_link *link); bool edp_receiver_ready_T7(struct dc_link *link); -void dp_disable_link_phy(struct dc_link *link, const struct link_resource *link_res, - enum signal_type signal); - -void dp_disable_link_phy_mst(struct dc_link *link, const struct link_resource *link_res, - enum signal_type signal); - -bool dp_set_hw_training_pattern( - struct dc_link *link, - const struct link_resource *link_res, - enum dc_dp_training_pattern pattern, - uint32_t offset); - -void dp_set_hw_lane_settings( - struct dc_link *link, - const struct link_resource *link_res, - const struct link_training_settings *link_settings, - uint32_t offset); - -void dp_set_hw_test_pattern( - struct dc_link *link, - const struct link_resource *link_res, - enum dp_test_pattern test_pattern, - uint8_t *custom_pattern, - uint32_t custom_pattern_size); - void dp_retrain_link_dp_test(struct dc_link *link, struct dc_link_settings *link_setting, bool skip_video_pattern); + #endif /* __DC_LINK_DP_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dpia.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dpia.h deleted file mode 100644 index 39c1d1d07357..000000000000 --- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dpia.h +++ /dev/null @@ -1,105 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -/* - * Copyright 2021 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DC_LINK_DPIA_H__ -#define __DC_LINK_DPIA_H__ - -/* This module implements functionality for training DPIA links. */ - -struct dc_link; -struct dc_link_settings; - -/* The approximate time (us) it takes to transmit 9 USB4 DP clock sync packets. */ -#define DPIA_CLK_SYNC_DELAY 16000 - -/* Extend interval between training status checks for manual testing. */ -#define DPIA_DEBUG_EXTENDED_AUX_RD_INTERVAL_US 60000000 - -/** @note Can remove once DP tunneling registers in upstream include/drm/drm_dp_helper.h */ -/* DPCD DP Tunneling over USB4 */ -#define DP_TUNNELING_CAPABILITIES_SUPPORT 0xe000d -#define DP_IN_ADAPTER_INFO 0xe000e -#define DP_USB4_DRIVER_ID 0xe000f -#define DP_USB4_ROUTER_TOPOLOGY_ID 0xe001b - -/* SET_CONFIG message types sent by driver. */ -enum dpia_set_config_type { - DPIA_SET_CFG_SET_LINK = 0x01, - DPIA_SET_CFG_SET_PHY_TEST_MODE = 0x05, - DPIA_SET_CFG_SET_TRAINING = 0x18, - DPIA_SET_CFG_SET_VSPE = 0x19 -}; - -/* Training stages (TS) in SET_CONFIG(SET_TRAINING) message. */ -enum dpia_set_config_ts { - DPIA_TS_DPRX_DONE = 0x00, /* Done training DPRX. */ - DPIA_TS_TPS1 = 0x01, - DPIA_TS_TPS2 = 0x02, - DPIA_TS_TPS3 = 0x03, - DPIA_TS_TPS4 = 0x07, - DPIA_TS_UFP_DONE = 0xff /* Done training DPTX-to-DPIA hop. */ -}; - -/* SET_CONFIG message data associated with messages sent by driver. */ -union dpia_set_config_data { - struct { - uint8_t mode : 1; - uint8_t reserved : 7; - } set_link; - struct { - uint8_t stage; - } set_training; - struct { - uint8_t swing : 2; - uint8_t max_swing_reached : 1; - uint8_t pre_emph : 2; - uint8_t max_pre_emph_reached : 1; - uint8_t reserved : 2; - } set_vspe; - uint8_t raw; -}; - -/* Read tunneling device capability from DPCD and update link capability - * accordingly. - */ -enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link); - -/* Query hot plug status of USB4 DP tunnel. - * Returns true if HPD high. - */ -bool dc_link_dpia_query_hpd_status(struct dc_link *link); - -/* Train DP tunneling link for USB4 DPIA display endpoint. - * DPIA equivalent of dc_link_dp_perfrorm_link_training. - * Aborts link training upon detection of sink unplug. - */ -enum link_training_result dc_link_dpia_perform_link_training( - struct dc_link *link, - const struct link_resource *link_res, - const struct dc_link_settings *link_setting, - bool skip_video_pattern); - -#endif /* __DC_LINK_DPIA_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h b/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h index 2ae630bf2aee..7254182b7c72 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h @@ -27,7 +27,6 @@ #define __DAL_AUX_ENGINE_H__ #include "dc_ddc_types.h" -#include "include/i2caux_interface.h" enum aux_return_code_type; @@ -81,7 +80,12 @@ enum i2c_default_speed { I2CAUX_DEFAULT_I2C_SW_SPEED = 50 }; -union aux_config; +union aux_config { + struct { + uint32_t ALLOW_AUX_WHEN_HPD_LOW:1; + } bits; + uint32_t raw; +}; struct aux_engine { uint32_t inst; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h index 5b0265c0df61..beb26dc8a07f 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h @@ -187,6 +187,7 @@ struct hubbub_funcs { void (*init_crb)(struct hubbub *hubbub); void (*force_usr_retraining_allow)(struct hubbub *hubbub, bool allow); void (*set_request_limit)(struct hubbub *hubbub, int memory_channel_count, int words_per_channel); + void (*dchubbub_init)(struct hubbub *hubbub); }; struct hubbub { diff --git a/drivers/gpu/drm/amd/display/dc/inc/link.h b/drivers/gpu/drm/amd/display/dc/inc/link.h new file mode 100644 index 000000000000..3945522fb798 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/inc/link.h @@ -0,0 +1,92 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_LINK_H__ +#define __DC_LINK_H__ + +/* FILE POLICY AND INTENDED USAGE: + * + * This header declares link functions exposed to dc. All functions must have + * "link_" as prefix. For example link_run_my_function. This header is strictly + * private in dc and should never be included in other header files. dc + * components should include this header in their .c files in order to access + * functions in link folder. This file should never include any header files in + * link folder. If there is a need to expose a function declared in one of + * header files in side link folder, you need to move the function declaration + * into this file and prefix it with "link_". + */ +#include "core_types.h" +#include "dc_link.h" + +struct gpio *link_get_hpd_gpio(struct dc_bios *dcb, + struct graphics_object_id link_id, + struct gpio_service *gpio_service); + +struct ddc_service_init_data { + struct graphics_object_id id; + struct dc_context *ctx; + struct dc_link *link; + bool is_dpia_link; +}; + +struct ddc_service *link_create_ddc_service( + struct ddc_service_init_data *ddc_init_data); + +void link_destroy_ddc_service(struct ddc_service **ddc); + +bool link_is_in_aux_transaction_mode(struct ddc_service *ddc); + +bool link_query_ddc_data( + struct ddc_service *ddc, + uint32_t address, + uint8_t *write_buf, + uint32_t write_size, + uint8_t *read_buf, + uint32_t read_size); + + +/* Attempt to submit an aux payload, retrying on timeouts, defers, and busy + * states as outlined in the DP spec. Returns true if the request was + * successful. + * + * NOTE: The function requires explicit mutex on DM side in order to prevent + * potential race condition. DC components should call the dpcd read/write + * function in dm_helpers in order to access dpcd safely + */ +bool link_aux_transfer_with_retries_no_mutex(struct ddc_service *ddc, + struct aux_payload *payload); + +uint32_t link_get_aux_defer_delay(struct ddc_service *ddc); + +bool link_is_dp_128b_132b_signal(struct pipe_ctx *pipe_ctx); + +enum dp_link_encoding link_dp_get_encoding_format( + const struct dc_link_settings *link_settings); + +bool link_decide_link_settings( + struct dc_stream_state *stream, + struct dc_link_settings *link_setting); + +#endif /* __DC_LINK_HPD_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/Makefile b/drivers/gpu/drm/amd/display/dc/link/Makefile index 054c2a727eb2..4dee0e6248b1 100644 --- a/drivers/gpu/drm/amd/display/dc/link/Makefile +++ b/drivers/gpu/drm/amd/display/dc/link/Makefile @@ -23,7 +23,11 @@ # It abstracts the control and status of back end pipe such as DIO, HPO, DPIA, # PHY, HPD, DDC and etc). -LINK = link_hwss_dio.o link_hwss_dpia.o link_hwss_hpo_dp.o link_dp_trace.o +LINK = link_hwss_dio.o link_hwss_dpia.o link_hwss_hpo_dp.o link_dp_trace.o \ +link_hpd.o link_ddc.o link_dpcd.o link_dp_dpia.o link_dp_training.o \ +link_dp_training_8b_10b.o link_dp_training_128b_132b.o link_dp_training_dpia.o \ +link_dp_training_auxless.o link_dp_training_fixed_vs_pe_retimer.o link_dp_phy.o \ +link_dp_capability.o AMD_DAL_LINK = $(addprefix $(AMDDALPATH)/dc/link/,$(LINK)) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/link/link_ddc.c index 651231387043..5269125bc2a4 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_ddc.c @@ -23,20 +23,20 @@ * */ -#include "dm_services.h" -#include "dm_helpers.h" -#include "gpio_service_interface.h" -#include "include/ddc_service_types.h" -#include "include/grph_object_id.h" -#include "include/dpcd_defs.h" -#include "include/logger_interface.h" -#include "include/vector.h" -#include "core_types.h" -#include "dc_link_ddc.h" +/* FILE POLICY AND INTENDED USAGE: + * + * This file implements generic display communication protocols such as i2c, aux + * and scdc. The file should not contain any specific applications of these + * protocols such as display capability query, detection, or handshaking such as + * link training. + */ +#include "link_ddc.h" +#include "vector.h" #include "dce/dce_aux.h" -#include "dmub/inc/dmub_cmd.h" +#include "dal_asic_id.h" #include "link_dpcd.h" -#include "include/dal_asic_id.h" +#include "dm_helpers.h" +#include "atomfirmware.h" #define DC_LOGGER_INIT(logger) @@ -45,86 +45,6 @@ static const uint8_t DP_VGA_DONGLE_BRANCH_DEV_NAME[] = "DpVga"; static const uint8_t DP_DVI_CONVERTER_ID_4[] = "m2DVIa"; static const uint8_t DP_DVI_CONVERTER_ID_5[] = "3393N2"; -#define AUX_POWER_UP_WA_DELAY 500 -#define I2C_OVER_AUX_DEFER_WA_DELAY 70 -#define DPVGA_DONGLE_AUX_DEFER_WA_DELAY 40 -#define I2C_OVER_AUX_DEFER_WA_DELAY_1MS 1 - -/* CV smart dongle slave address for retrieving supported HDTV modes*/ -#define CV_SMART_DONGLE_ADDRESS 0x20 -/* DVI-HDMI dongle slave address for retrieving dongle signature*/ -#define DVI_HDMI_DONGLE_ADDRESS 0x68 -struct dvi_hdmi_dongle_signature_data { - int8_t vendor[3];/* "AMD" */ - uint8_t version[2]; - uint8_t size; - int8_t id[11];/* "6140063500G"*/ -}; -/* DP-HDMI dongle slave address for retrieving dongle signature*/ -#define DP_HDMI_DONGLE_ADDRESS 0x40 -static const uint8_t dp_hdmi_dongle_signature_str[] = "DP-HDMI ADAPTOR"; -#define DP_HDMI_DONGLE_SIGNATURE_EOT 0x04 - -struct dp_hdmi_dongle_signature_data { - int8_t id[15];/* "DP-HDMI ADAPTOR"*/ - uint8_t eot;/* end of transmition '\x4' */ -}; - -/* SCDC Address defines (HDMI 2.0)*/ -#define HDMI_SCDC_WRITE_UPDATE_0_ARRAY 3 -#define HDMI_SCDC_ADDRESS 0x54 -#define HDMI_SCDC_SINK_VERSION 0x01 -#define HDMI_SCDC_SOURCE_VERSION 0x02 -#define HDMI_SCDC_UPDATE_0 0x10 -#define HDMI_SCDC_TMDS_CONFIG 0x20 -#define HDMI_SCDC_SCRAMBLER_STATUS 0x21 -#define HDMI_SCDC_CONFIG_0 0x30 -#define HDMI_SCDC_STATUS_FLAGS 0x40 -#define HDMI_SCDC_ERR_DETECT 0x50 -#define HDMI_SCDC_TEST_CONFIG 0xC0 - -union hdmi_scdc_update_read_data { - uint8_t byte[2]; - struct { - uint8_t STATUS_UPDATE:1; - uint8_t CED_UPDATE:1; - uint8_t RR_TEST:1; - uint8_t RESERVED:5; - uint8_t RESERVED2:8; - } fields; -}; - -union hdmi_scdc_status_flags_data { - uint8_t byte; - struct { - uint8_t CLOCK_DETECTED:1; - uint8_t CH0_LOCKED:1; - uint8_t CH1_LOCKED:1; - uint8_t CH2_LOCKED:1; - uint8_t RESERVED:4; - } fields; -}; - -union hdmi_scdc_ced_data { - uint8_t byte[7]; - struct { - uint8_t CH0_8LOW:8; - uint8_t CH0_7HIGH:7; - uint8_t CH0_VALID:1; - uint8_t CH1_8LOW:8; - uint8_t CH1_7HIGH:7; - uint8_t CH1_VALID:1; - uint8_t CH2_8LOW:8; - uint8_t CH2_7HIGH:7; - uint8_t CH2_VALID:1; - uint8_t CHECKSUM:8; - uint8_t RESERVED:8; - uint8_t RESERVED2:8; - uint8_t RESERVED3:8; - uint8_t RESERVED4:4; - } fields; -}; - struct i2c_payloads { struct vector payloads; }; @@ -157,7 +77,7 @@ static uint32_t dal_ddc_i2c_payloads_get_count(struct i2c_payloads *p) #define DDC_MIN(a, b) (((a) < (b)) ? (a) : (b)) -void dal_ddc_i2c_payloads_add( +static void i2c_payloads_add( struct i2c_payloads *payloads, uint32_t address, uint32_t len, @@ -225,7 +145,7 @@ static void ddc_service_construct( ddc_service->wa.raw = 0; } -struct ddc_service *dal_ddc_service_create( +struct ddc_service *link_create_ddc_service( struct ddc_service_init_data *init_data) { struct ddc_service *ddc_service; @@ -245,7 +165,7 @@ static void ddc_service_destruct(struct ddc_service *ddc) dal_gpio_destroy_ddc(&ddc->ddc_pin); } -void dal_ddc_service_destroy(struct ddc_service **ddc) +void link_destroy_ddc_service(struct ddc_service **ddc) { if (!ddc || !*ddc) { BREAK_TO_DEBUGGER(); @@ -256,19 +176,14 @@ void dal_ddc_service_destroy(struct ddc_service **ddc) *ddc = NULL; } -enum ddc_service_type dal_ddc_service_get_type(struct ddc_service *ddc) -{ - return DDC_SERVICE_TYPE_CONNECTOR; -} - -void dal_ddc_service_set_transaction_type( +void set_ddc_transaction_type( struct ddc_service *ddc, enum ddc_transaction_type type) { ddc->transaction_type = type; } -bool dal_ddc_service_is_in_aux_transaction_mode(struct ddc_service *ddc) +bool link_is_in_aux_transaction_mode(struct ddc_service *ddc) { switch (ddc->transaction_type) { case DDC_TRANSACTION_TYPE_I2C_OVER_AUX: @@ -281,7 +196,7 @@ bool dal_ddc_service_is_in_aux_transaction_mode(struct ddc_service *ddc) return false; } -void ddc_service_set_dongle_type(struct ddc_service *ddc, +void set_dongle_type(struct ddc_service *ddc, enum display_dongle_type dongle_type) { ddc->dongle_type = dongle_type; @@ -323,7 +238,7 @@ static uint32_t defer_delay_converter_wa( #define DP_TRANSLATOR_DELAY 5 -uint32_t get_defer_delay(struct ddc_service *ddc) +uint32_t link_get_aux_defer_delay(struct ddc_service *ddc) { uint32_t defer_delay = 0; @@ -351,175 +266,45 @@ uint32_t get_defer_delay(struct ddc_service *ddc) return defer_delay; } -static bool i2c_read( - struct ddc_service *ddc, - uint32_t address, - uint8_t *buffer, - uint32_t len) -{ - uint8_t offs_data = 0; - struct i2c_payload payloads[2] = { - { - .write = true, - .address = address, - .length = 1, - .data = &offs_data }, - { - .write = false, - .address = address, - .length = len, - .data = buffer } }; - - struct i2c_command command = { - .payloads = payloads, - .number_of_payloads = 2, - .engine = DDC_I2C_COMMAND_ENGINE, - .speed = ddc->ctx->dc->caps.i2c_speed_in_khz }; - - return dm_helpers_submit_i2c( - ddc->ctx, - ddc->link, - &command); -} - -void dal_ddc_service_i2c_query_dp_dual_mode_adaptor( - struct ddc_service *ddc, - struct display_sink_capability *sink_cap) +static bool submit_aux_command(struct ddc_service *ddc, + struct aux_payload *payload) { - uint8_t i; - bool is_valid_hdmi_signature; - enum display_dongle_type *dongle = &sink_cap->dongle_type; - uint8_t type2_dongle_buf[DP_ADAPTOR_TYPE2_SIZE]; - bool is_type2_dongle = false; - int retry_count = 2; - struct dp_hdmi_dongle_signature_data *dongle_signature; - - /* Assume we have no valid DP passive dongle connected */ - *dongle = DISPLAY_DONGLE_NONE; - sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_HDMI_SAFE_MAX_TMDS_CLK; - - /* Read DP-HDMI dongle I2c (no response interpreted as DP-DVI dongle)*/ - if (!i2c_read( - ddc, - DP_HDMI_DONGLE_ADDRESS, - type2_dongle_buf, - sizeof(type2_dongle_buf))) { - /* Passive HDMI dongles can sometimes fail here without retrying*/ - while (retry_count > 0) { - if (i2c_read(ddc, - DP_HDMI_DONGLE_ADDRESS, - type2_dongle_buf, - sizeof(type2_dongle_buf))) - break; - retry_count--; - } - if (retry_count == 0) { - *dongle = DISPLAY_DONGLE_DP_DVI_DONGLE; - sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_DVI_MAX_TMDS_CLK; - - CONN_DATA_DETECT(ddc->link, type2_dongle_buf, sizeof(type2_dongle_buf), - "DP-DVI passive dongle %dMhz: ", - DP_ADAPTOR_DVI_MAX_TMDS_CLK / 1000); - return; - } - } - - /* Check if Type 2 dongle.*/ - if (type2_dongle_buf[DP_ADAPTOR_TYPE2_REG_ID] == DP_ADAPTOR_TYPE2_ID) - is_type2_dongle = true; - - dongle_signature = - (struct dp_hdmi_dongle_signature_data *)type2_dongle_buf; + uint32_t retrieved = 0; + bool ret = false; - is_valid_hdmi_signature = true; + if (!ddc) + return false; - /* Check EOT */ - if (dongle_signature->eot != DP_HDMI_DONGLE_SIGNATURE_EOT) { - is_valid_hdmi_signature = false; - } + if (!payload) + return false; - /* Check signature */ - for (i = 0; i < sizeof(dongle_signature->id); ++i) { - /* If its not the right signature, - * skip mismatch in subversion byte.*/ - if (dongle_signature->id[i] != - dp_hdmi_dongle_signature_str[i] && i != 3) { + do { + struct aux_payload current_payload; + bool is_end_of_payload = (retrieved + DEFAULT_AUX_MAX_DATA_SIZE) >= + payload->length; + uint32_t payload_length = is_end_of_payload ? + payload->length - retrieved : DEFAULT_AUX_MAX_DATA_SIZE; - if (is_type2_dongle) { - is_valid_hdmi_signature = false; - break; - } + current_payload.address = payload->address; + current_payload.data = &payload->data[retrieved]; + current_payload.defer_delay = payload->defer_delay; + current_payload.i2c_over_aux = payload->i2c_over_aux; + current_payload.length = payload_length; + /* set mot (middle of transaction) to false if it is the last payload */ + current_payload.mot = is_end_of_payload ? payload->mot:true; + current_payload.write_status_update = false; + current_payload.reply = payload->reply; + current_payload.write = payload->write; - } - } + ret = link_aux_transfer_with_retries_no_mutex(ddc, ¤t_payload); - if (is_type2_dongle) { - uint32_t max_tmds_clk = - type2_dongle_buf[DP_ADAPTOR_TYPE2_REG_MAX_TMDS_CLK]; - - max_tmds_clk = max_tmds_clk * 2 + max_tmds_clk / 2; - - if (0 == max_tmds_clk || - max_tmds_clk < DP_ADAPTOR_TYPE2_MIN_TMDS_CLK || - max_tmds_clk > DP_ADAPTOR_TYPE2_MAX_TMDS_CLK) { - *dongle = DISPLAY_DONGLE_DP_DVI_DONGLE; - - CONN_DATA_DETECT(ddc->link, type2_dongle_buf, - sizeof(type2_dongle_buf), - "DP-DVI passive dongle %dMhz: ", - DP_ADAPTOR_DVI_MAX_TMDS_CLK / 1000); - } else { - if (is_valid_hdmi_signature == true) { - *dongle = DISPLAY_DONGLE_DP_HDMI_DONGLE; - - CONN_DATA_DETECT(ddc->link, type2_dongle_buf, - sizeof(type2_dongle_buf), - "Type 2 DP-HDMI passive dongle %dMhz: ", - max_tmds_clk); - } else { - *dongle = DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE; - - CONN_DATA_DETECT(ddc->link, type2_dongle_buf, - sizeof(type2_dongle_buf), - "Type 2 DP-HDMI passive dongle (no signature) %dMhz: ", - max_tmds_clk); - - } - - /* Multiply by 1000 to convert to kHz. */ - sink_cap->max_hdmi_pixel_clock = - max_tmds_clk * 1000; - } - sink_cap->is_dongle_type_one = false; - - } else { - if (is_valid_hdmi_signature == true) { - *dongle = DISPLAY_DONGLE_DP_HDMI_DONGLE; - - CONN_DATA_DETECT(ddc->link, type2_dongle_buf, - sizeof(type2_dongle_buf), - "Type 1 DP-HDMI passive dongle %dMhz: ", - sink_cap->max_hdmi_pixel_clock / 1000); - } else { - *dongle = DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE; - - CONN_DATA_DETECT(ddc->link, type2_dongle_buf, - sizeof(type2_dongle_buf), - "Type 1 DP-HDMI passive dongle (no signature) %dMhz: ", - sink_cap->max_hdmi_pixel_clock / 1000); - } - sink_cap->is_dongle_type_one = true; - } + retrieved += payload_length; + } while (retrieved < payload->length && ret == true); - return; + return ret; } -enum { - DP_SINK_CAP_SIZE = - DP_EDP_CONFIGURATION_CAP - DP_DPCD_REV + 1 -}; - -bool dal_ddc_service_query_ddc_data( +bool link_query_ddc_data( struct ddc_service *ddc, uint32_t address, uint8_t *write_buf, @@ -529,7 +314,7 @@ bool dal_ddc_service_query_ddc_data( { bool success = true; uint32_t payload_size = - dal_ddc_service_is_in_aux_transaction_mode(ddc) ? + link_is_in_aux_transaction_mode(ddc) ? DEFAULT_AUX_MAX_DATA_SIZE : EDID_SEGMENT_SIZE; uint32_t write_payloads = @@ -543,13 +328,13 @@ bool dal_ddc_service_query_ddc_data( if (!payloads_num) return false; - if (dal_ddc_service_is_in_aux_transaction_mode(ddc)) { + if (link_is_in_aux_transaction_mode(ddc)) { struct aux_payload payload; payload.i2c_over_aux = true; payload.address = address; payload.reply = NULL; - payload.defer_delay = get_defer_delay(ddc); + payload.defer_delay = link_get_aux_defer_delay(ddc); payload.write_status_update = false; if (write_size != 0) { @@ -561,7 +346,7 @@ bool dal_ddc_service_query_ddc_data( payload.length = write_size; payload.data = write_buf; - success = dal_ddc_submit_aux_command(ddc, &payload); + success = submit_aux_command(ddc, &payload); } if (read_size != 0 && success) { @@ -573,7 +358,7 @@ bool dal_ddc_service_query_ddc_data( payload.length = read_size; payload.data = read_buf; - success = dal_ddc_submit_aux_command(ddc, &payload); + success = submit_aux_command(ddc, &payload); } } else { struct i2c_command command = {0}; @@ -587,10 +372,10 @@ bool dal_ddc_service_query_ddc_data( command.engine = DDC_I2C_COMMAND_ENGINE; command.speed = ddc->ctx->dc->caps.i2c_speed_in_khz; - dal_ddc_i2c_payloads_add( + i2c_payloads_add( &payloads, address, write_size, write_buf, true); - dal_ddc_i2c_payloads_add( + i2c_payloads_add( &payloads, address, read_size, read_buf, false); command.number_of_payloads = @@ -607,51 +392,6 @@ bool dal_ddc_service_query_ddc_data( return success; } -bool dal_ddc_submit_aux_command(struct ddc_service *ddc, - struct aux_payload *payload) -{ - uint32_t retrieved = 0; - bool ret = false; - - if (!ddc) - return false; - - if (!payload) - return false; - - do { - struct aux_payload current_payload; - bool is_end_of_payload = (retrieved + DEFAULT_AUX_MAX_DATA_SIZE) >= - payload->length; - uint32_t payload_length = is_end_of_payload ? - payload->length - retrieved : DEFAULT_AUX_MAX_DATA_SIZE; - - current_payload.address = payload->address; - current_payload.data = &payload->data[retrieved]; - current_payload.defer_delay = payload->defer_delay; - current_payload.i2c_over_aux = payload->i2c_over_aux; - current_payload.length = payload_length; - /* set mot (middle of transaction) to false if it is the last payload */ - current_payload.mot = is_end_of_payload ? payload->mot:true; - current_payload.write_status_update = false; - current_payload.reply = payload->reply; - current_payload.write = payload->write; - - ret = dc_link_aux_transfer_with_retries(ddc, ¤t_payload); - - retrieved += payload_length; - } while (retrieved < payload->length && ret == true); - - return ret; -} - -/* dc_link_aux_transfer_raw() - Attempt to transfer - * the given aux payload. This function does not perform - * retries or handle error states. The reply is returned - * in the payload->reply and the result through - * *operation_result. Returns the number of bytes transferred, - * or -1 on a failure. - */ int dc_link_aux_transfer_raw(struct ddc_service *ddc, struct aux_payload *payload, enum aux_return_code_type *operation_result) @@ -664,22 +404,14 @@ int dc_link_aux_transfer_raw(struct ddc_service *ddc, } } -/* dc_link_aux_transfer_with_retries() - Attempt to submit an - * aux payload, retrying on timeouts, defers, and busy states - * as outlined in the DP spec. Returns true if the request - * was successful. - * - * Unless you want to implement your own retry semantics, this - * is probably the one you want. - */ -bool dc_link_aux_transfer_with_retries(struct ddc_service *ddc, +bool link_aux_transfer_with_retries_no_mutex(struct ddc_service *ddc, struct aux_payload *payload) { return dce_aux_transfer_with_retries(ddc, payload); } -bool dc_link_aux_try_to_configure_timeout(struct ddc_service *ddc, +bool try_to_configure_aux_timeout(struct ddc_service *ddc, uint32_t timeout) { bool result = false; @@ -712,20 +444,12 @@ bool dc_link_aux_try_to_configure_timeout(struct ddc_service *ddc, return result; } -/*test only function*/ -void dal_ddc_service_set_ddc_pin( - struct ddc_service *ddc_service, - struct ddc *ddc) -{ - ddc_service->ddc_pin = ddc; -} - -struct ddc *dal_ddc_service_get_ddc_pin(struct ddc_service *ddc_service) +struct ddc *get_ddc_pin(struct ddc_service *ddc_service) { return ddc_service->ddc_pin; } -void dal_ddc_service_write_scdc_data(struct ddc_service *ddc_service, +void write_scdc_data(struct ddc_service *ddc_service, uint32_t pix_clk, bool lte_340_scramble) { @@ -740,13 +464,13 @@ void dal_ddc_service_write_scdc_data(struct ddc_service *ddc_service, ddc_service->link->local_sink->edid_caps.panel_patch.skip_scdc_overwrite) return; - dal_ddc_service_query_ddc_data(ddc_service, slave_address, &offset, + link_query_ddc_data(ddc_service, slave_address, &offset, sizeof(offset), &sink_version, sizeof(sink_version)); if (sink_version == 1) { /*Source Version = 1*/ write_buffer[0] = HDMI_SCDC_SOURCE_VERSION; write_buffer[1] = 1; - dal_ddc_service_query_ddc_data(ddc_service, slave_address, + link_query_ddc_data(ddc_service, slave_address, write_buffer, sizeof(write_buffer), NULL, 0); /*Read Request from SCDC caps*/ } @@ -759,11 +483,11 @@ void dal_ddc_service_write_scdc_data(struct ddc_service *ddc_service, } else { write_buffer[1] = 0; } - dal_ddc_service_query_ddc_data(ddc_service, slave_address, write_buffer, + link_query_ddc_data(ddc_service, slave_address, write_buffer, sizeof(write_buffer), NULL, 0); } -void dal_ddc_service_read_scdc_data(struct ddc_service *ddc_service) +void read_scdc_data(struct ddc_service *ddc_service) { uint8_t slave_address = HDMI_SCDC_ADDRESS; uint8_t offset = HDMI_SCDC_TMDS_CONFIG; @@ -773,20 +497,19 @@ void dal_ddc_service_read_scdc_data(struct ddc_service *ddc_service) ddc_service->link->local_sink->edid_caps.panel_patch.skip_scdc_overwrite) return; - dal_ddc_service_query_ddc_data(ddc_service, slave_address, &offset, + link_query_ddc_data(ddc_service, slave_address, &offset, sizeof(offset), &tmds_config, sizeof(tmds_config)); if (tmds_config & 0x1) { union hdmi_scdc_status_flags_data status_data = {0}; uint8_t scramble_status = 0; offset = HDMI_SCDC_SCRAMBLER_STATUS; - dal_ddc_service_query_ddc_data(ddc_service, slave_address, + link_query_ddc_data(ddc_service, slave_address, &offset, sizeof(offset), &scramble_status, sizeof(scramble_status)); offset = HDMI_SCDC_STATUS_FLAGS; - dal_ddc_service_query_ddc_data(ddc_service, slave_address, + link_query_ddc_data(ddc_service, slave_address, &offset, sizeof(offset), &status_data.byte, sizeof(status_data.byte)); } } - diff --git a/drivers/gpu/drm/amd/display/include/i2caux_interface.h b/drivers/gpu/drm/amd/display/dc/link/link_ddc.h index 418fbf8c5c3a..86e9d2e886d6 100644 --- a/drivers/gpu/drm/amd/display/include/i2caux_interface.h +++ b/drivers/gpu/drm/amd/display/dc/link/link_ddc.h @@ -23,60 +23,38 @@ * */ -#ifndef __DAL_I2CAUX_INTERFACE_H__ -#define __DAL_I2CAUX_INTERFACE_H__ +#ifndef __DAL_DDC_SERVICE_H__ +#define __DAL_DDC_SERVICE_H__ -#include "dc_types.h" -#include "gpio_service_interface.h" +#include "link.h" +#define AUX_POWER_UP_WA_DELAY 500 +#define I2C_OVER_AUX_DEFER_WA_DELAY 70 +#define DPVGA_DONGLE_AUX_DEFER_WA_DELAY 40 +#define I2C_OVER_AUX_DEFER_WA_DELAY_1MS 1 +#define LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD 3200 /*us*/ -#define DEFAULT_AUX_MAX_DATA_SIZE 16 -#define AUX_MAX_DEFER_WRITE_RETRY 20 +#define EDID_SEGMENT_SIZE 256 -struct aux_payload { - /* set following flag to read/write I2C data, - * reset it to read/write DPCD data */ - bool i2c_over_aux; - /* set following flag to write data, - * reset it to read data */ - bool write; - bool mot; - bool write_status_update; +void set_ddc_transaction_type( + struct ddc_service *ddc, + enum ddc_transaction_type type); - uint32_t address; - uint32_t length; - uint8_t *data; - /* - * used to return the reply type of the transaction - * ignored if NULL - */ - uint8_t *reply; - /* expressed in milliseconds - * zero means "use default value" - */ - uint32_t defer_delay; +bool try_to_configure_aux_timeout(struct ddc_service *ddc, + uint32_t timeout); -}; +void write_scdc_data( + struct ddc_service *ddc_service, + uint32_t pix_clk, + bool lte_340_scramble); -struct aux_command { - struct aux_payload *payloads; - uint8_t number_of_payloads; +void read_scdc_data( + struct ddc_service *ddc_service); - /* expressed in milliseconds - * zero means "use default value" */ - uint32_t defer_delay; +void set_dongle_type(struct ddc_service *ddc, + enum display_dongle_type dongle_type); - /* zero means "use default value" */ - uint32_t max_defer_write_retry; +struct ddc *get_ddc_pin(struct ddc_service *ddc_service); - enum i2c_mot_mode mot; -}; +#endif /* __DAL_DDC_SERVICE_H__ */ -union aux_config { - struct { - uint32_t ALLOW_AUX_WHEN_HPD_LOW:1; - } bits; - uint32_t raw; -}; - -#endif diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/link_dp_capability.c new file mode 100644 index 000000000000..e72ad1b8330f --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/link_dp_capability.c @@ -0,0 +1,2169 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +/* FILE POLICY AND INTENDED USAGE: + * This file implements dp specific link capability retrieval sequence. It is + * responsible for retrieving, parsing, overriding, deciding capability obtained + * from dp link. Link capability consists of encoders, DPRXs, cables, retimers, + * usb and all other possible backend capabilities. Other components should + * include this header file in order to access link capability. Accessing link + * capability by dereferencing dc_link outside dp_link_capability is not a + * recommended method as it makes the component dependent on the underlying data + * structure used to represent link capability instead of function interfaces. + */ + +#include "link_dp_capability.h" +#include "link_ddc.h" +#include "link_dpcd.h" +#include "link_dp_dpia.h" +#include "link_dp_phy.h" +#include "link_dp_trace.h" +#include "link_dp_training.h" +#include "atomfirmware.h" +#include "resource.h" +#include "link_enc_cfg.h" +#include "dc_link_dp.h" +#include "dc_dmub_srv.h" + +#define DC_LOGGER \ + link->ctx->logger +#define DC_TRACE_LEVEL_MESSAGE(...) /* do nothing */ + +#ifndef MAX +#define MAX(X, Y) ((X) > (Y) ? (X) : (Y)) +#endif +#ifndef MIN +#define MIN(X, Y) ((X) < (Y) ? (X) : (Y)) +#endif + +#define LINK_AUX_DEFAULT_TIMEOUT_PERIOD 552 /*us*/ + +struct dp_lt_fallback_entry { + enum dc_lane_count lane_count; + enum dc_link_rate link_rate; +}; + +static const struct dp_lt_fallback_entry dp_lt_fallbacks[] = { + /* This link training fallback array is ordered by + * link bandwidth from highest to lowest. + * DP specs makes it a normative policy to always + * choose the next highest link bandwidth during + * link training fallback. + */ + {LANE_COUNT_FOUR, LINK_RATE_UHBR20}, + {LANE_COUNT_FOUR, LINK_RATE_UHBR13_5}, + {LANE_COUNT_TWO, LINK_RATE_UHBR20}, + {LANE_COUNT_FOUR, LINK_RATE_UHBR10}, + {LANE_COUNT_TWO, LINK_RATE_UHBR13_5}, + {LANE_COUNT_FOUR, LINK_RATE_HIGH3}, + {LANE_COUNT_ONE, LINK_RATE_UHBR20}, + {LANE_COUNT_TWO, LINK_RATE_UHBR10}, + {LANE_COUNT_FOUR, LINK_RATE_HIGH2}, + {LANE_COUNT_ONE, LINK_RATE_UHBR13_5}, + {LANE_COUNT_TWO, LINK_RATE_HIGH3}, + {LANE_COUNT_ONE, LINK_RATE_UHBR10}, + {LANE_COUNT_TWO, LINK_RATE_HIGH2}, + {LANE_COUNT_FOUR, LINK_RATE_HIGH}, + {LANE_COUNT_ONE, LINK_RATE_HIGH3}, + {LANE_COUNT_FOUR, LINK_RATE_LOW}, + {LANE_COUNT_ONE, LINK_RATE_HIGH2}, + {LANE_COUNT_TWO, LINK_RATE_HIGH}, + {LANE_COUNT_TWO, LINK_RATE_LOW}, + {LANE_COUNT_ONE, LINK_RATE_HIGH}, + {LANE_COUNT_ONE, LINK_RATE_LOW}, +}; + +static const struct dc_link_settings fail_safe_link_settings = { + .lane_count = LANE_COUNT_ONE, + .link_rate = LINK_RATE_LOW, + .link_spread = LINK_SPREAD_DISABLED, +}; + +bool is_dp_active_dongle(const struct dc_link *link) +{ + return (link->dpcd_caps.dongle_type >= DISPLAY_DONGLE_DP_VGA_CONVERTER) && + (link->dpcd_caps.dongle_type <= DISPLAY_DONGLE_DP_HDMI_CONVERTER); +} + +bool is_dp_branch_device(const struct dc_link *link) +{ + return link->dpcd_caps.is_branch_dev; +} + +static int translate_dpcd_max_bpc(enum dpcd_downstream_port_max_bpc bpc) +{ + switch (bpc) { + case DOWN_STREAM_MAX_8BPC: + return 8; + case DOWN_STREAM_MAX_10BPC: + return 10; + case DOWN_STREAM_MAX_12BPC: + return 12; + case DOWN_STREAM_MAX_16BPC: + return 16; + default: + break; + } + + return -1; +} + +uint8_t dp_parse_lttpr_repeater_count(uint8_t lttpr_repeater_count) +{ + switch (lttpr_repeater_count) { + case 0x80: // 1 lttpr repeater + return 1; + case 0x40: // 2 lttpr repeaters + return 2; + case 0x20: // 3 lttpr repeaters + return 3; + case 0x10: // 4 lttpr repeaters + return 4; + case 0x08: // 5 lttpr repeaters + return 5; + case 0x04: // 6 lttpr repeaters + return 6; + case 0x02: // 7 lttpr repeaters + return 7; + case 0x01: // 8 lttpr repeaters + return 8; + default: + break; + } + return 0; // invalid value +} + +uint32_t dc_link_bw_kbps_from_raw_frl_link_rate_data(uint8_t bw) +{ + switch (bw) { + case 0b001: + return 9000000; + case 0b010: + return 18000000; + case 0b011: + return 24000000; + case 0b100: + return 32000000; + case 0b101: + return 40000000; + case 0b110: + return 48000000; + } + + return 0; +} + +static enum dc_link_rate linkRateInKHzToLinkRateMultiplier(uint32_t link_rate_in_khz) +{ + enum dc_link_rate link_rate; + // LinkRate is normally stored as a multiplier of 0.27 Gbps per lane. Do the translation. + switch (link_rate_in_khz) { + case 1620000: + link_rate = LINK_RATE_LOW; // Rate_1 (RBR) - 1.62 Gbps/Lane + break; + case 2160000: + link_rate = LINK_RATE_RATE_2; // Rate_2 - 2.16 Gbps/Lane + break; + case 2430000: + link_rate = LINK_RATE_RATE_3; // Rate_3 - 2.43 Gbps/Lane + break; + case 2700000: + link_rate = LINK_RATE_HIGH; // Rate_4 (HBR) - 2.70 Gbps/Lane + break; + case 3240000: + link_rate = LINK_RATE_RBR2; // Rate_5 (RBR2)- 3.24 Gbps/Lane + break; + case 4320000: + link_rate = LINK_RATE_RATE_6; // Rate_6 - 4.32 Gbps/Lane + break; + case 5400000: + link_rate = LINK_RATE_HIGH2; // Rate_7 (HBR2)- 5.40 Gbps/Lane + break; + case 8100000: + link_rate = LINK_RATE_HIGH3; // Rate_8 (HBR3)- 8.10 Gbps/Lane + break; + default: + link_rate = LINK_RATE_UNKNOWN; + break; + } + return link_rate; +} + +static union dp_cable_id intersect_cable_id( + union dp_cable_id *a, union dp_cable_id *b) +{ + union dp_cable_id out; + + out.bits.UHBR10_20_CAPABILITY = MIN(a->bits.UHBR10_20_CAPABILITY, + b->bits.UHBR10_20_CAPABILITY); + out.bits.UHBR13_5_CAPABILITY = MIN(a->bits.UHBR13_5_CAPABILITY, + b->bits.UHBR13_5_CAPABILITY); + out.bits.CABLE_TYPE = MAX(a->bits.CABLE_TYPE, b->bits.CABLE_TYPE); + + return out; +} + +/* + * Return PCON's post FRL link training supported BW if its non-zero, otherwise return max_supported_frl_bw. + */ +static uint32_t intersect_frl_link_bw_support( + const uint32_t max_supported_frl_bw_in_kbps, + const union hdmi_encoded_link_bw hdmi_encoded_link_bw) +{ + uint32_t supported_bw_in_kbps = max_supported_frl_bw_in_kbps; + + // HDMI_ENCODED_LINK_BW bits are only valid if HDMI Link Configuration bit is 1 (FRL mode) + if (hdmi_encoded_link_bw.bits.FRL_MODE) { + if (hdmi_encoded_link_bw.bits.BW_48Gbps) + supported_bw_in_kbps = 48000000; + else if (hdmi_encoded_link_bw.bits.BW_40Gbps) + supported_bw_in_kbps = 40000000; + else if (hdmi_encoded_link_bw.bits.BW_32Gbps) + supported_bw_in_kbps = 32000000; + else if (hdmi_encoded_link_bw.bits.BW_24Gbps) + supported_bw_in_kbps = 24000000; + else if (hdmi_encoded_link_bw.bits.BW_18Gbps) + supported_bw_in_kbps = 18000000; + else if (hdmi_encoded_link_bw.bits.BW_9Gbps) + supported_bw_in_kbps = 9000000; + } + + return supported_bw_in_kbps; +} + +static enum clock_source_id get_clock_source_id(struct dc_link *link) +{ + enum clock_source_id dp_cs_id = CLOCK_SOURCE_ID_UNDEFINED; + struct clock_source *dp_cs = link->dc->res_pool->dp_clock_source; + + if (dp_cs != NULL) { + dp_cs_id = dp_cs->id; + } else { + /* + * dp clock source is not initialized for some reason. + * Should not happen, CLOCK_SOURCE_ID_EXTERNAL will be used + */ + ASSERT(dp_cs); + } + + return dp_cs_id; +} + +static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data, + int length) +{ + int retry = 0; + union dp_downstream_port_present ds_port = { 0 }; + + if (!link->dpcd_caps.dpcd_rev.raw) { + do { + dc_link_dp_receiver_power_ctrl(link, true); + core_link_read_dpcd(link, DP_DPCD_REV, + dpcd_data, length); + link->dpcd_caps.dpcd_rev.raw = dpcd_data[ + DP_DPCD_REV - + DP_DPCD_REV]; + } while (retry++ < 4 && !link->dpcd_caps.dpcd_rev.raw); + } + + ds_port.byte = dpcd_data[DP_DOWNSTREAMPORT_PRESENT - + DP_DPCD_REV]; + + if (link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_VGA_CONVERTER) { + switch (link->dpcd_caps.branch_dev_id) { + /* 0010FA active dongles (DP-VGA, DP-DLDVI converters) power down + * all internal circuits including AUX communication preventing + * reading DPCD table and EDID (spec violation). + * Encoder will skip DP RX power down on disable_output to + * keep receiver powered all the time.*/ + case DP_BRANCH_DEVICE_ID_0010FA: + case DP_BRANCH_DEVICE_ID_0080E1: + case DP_BRANCH_DEVICE_ID_00E04C: + link->wa_flags.dp_keep_receiver_powered = true; + break; + + /* TODO: May need work around for other dongles. */ + default: + link->wa_flags.dp_keep_receiver_powered = false; + break; + } + } else + link->wa_flags.dp_keep_receiver_powered = false; +} + +bool dc_link_is_fec_supported(const struct dc_link *link) +{ + /* TODO - use asic cap instead of link_enc->features + * we no longer know which link enc to use for this link before commit + */ + struct link_encoder *link_enc = NULL; + + link_enc = link_enc_cfg_get_link_enc(link); + ASSERT(link_enc); + + return (dc_is_dp_signal(link->connector_signal) && link_enc && + link_enc->features.fec_supported && + link->dpcd_caps.fec_cap.bits.FEC_CAPABLE && + !IS_FPGA_MAXIMUS_DC(link->ctx->dce_environment)); +} + +bool dc_link_should_enable_fec(const struct dc_link *link) +{ + bool force_disable = false; + + if (link->fec_state == dc_link_fec_enabled) + force_disable = false; + else if (link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT_MST && + link->local_sink && + link->local_sink->edid_caps.panel_patch.disable_fec) + force_disable = true; + else if (link->connector_signal == SIGNAL_TYPE_EDP + && (link->dpcd_caps.dsc_caps.dsc_basic_caps.fields. + dsc_support.DSC_SUPPORT == false + || link->panel_config.dsc.disable_dsc_edp + || !link->dc->caps.edp_dsc_support)) + force_disable = true; + + return !force_disable && dc_link_is_fec_supported(link); +} + +bool link_is_dp_128b_132b_signal(struct pipe_ctx *pipe_ctx) +{ + /* If this assert is hit then we have a link encoder dynamic management issue */ + ASSERT(pipe_ctx->stream_res.hpo_dp_stream_enc ? pipe_ctx->link_res.hpo_dp_link_enc != NULL : true); + return (pipe_ctx->stream_res.hpo_dp_stream_enc && + pipe_ctx->link_res.hpo_dp_link_enc && + dc_is_dp_signal(pipe_ctx->stream->signal)); +} + +bool dp_is_lttpr_present(struct dc_link *link) +{ + return (dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) != 0 && + link->dpcd_caps.lttpr_caps.max_lane_count > 0 && + link->dpcd_caps.lttpr_caps.max_lane_count <= 4 && + link->dpcd_caps.lttpr_caps.revision.raw >= 0x14); +} + +/* in DP compliance test, DPR-120 may have + * a random value in its MAX_LINK_BW dpcd field. + * We map it to the maximum supported link rate that + * is smaller than MAX_LINK_BW in this case. + */ +static enum dc_link_rate get_link_rate_from_max_link_bw( + uint8_t max_link_bw) +{ + enum dc_link_rate link_rate; + + if (max_link_bw >= LINK_RATE_HIGH3) { + link_rate = LINK_RATE_HIGH3; + } else if (max_link_bw < LINK_RATE_HIGH3 + && max_link_bw >= LINK_RATE_HIGH2) { + link_rate = LINK_RATE_HIGH2; + } else if (max_link_bw < LINK_RATE_HIGH2 + && max_link_bw >= LINK_RATE_HIGH) { + link_rate = LINK_RATE_HIGH; + } else if (max_link_bw < LINK_RATE_HIGH + && max_link_bw >= LINK_RATE_LOW) { + link_rate = LINK_RATE_LOW; + } else { + link_rate = LINK_RATE_UNKNOWN; + } + + return link_rate; +} + +static enum dc_link_rate get_lttpr_max_link_rate(struct dc_link *link) +{ + enum dc_link_rate lttpr_max_link_rate = link->dpcd_caps.lttpr_caps.max_link_rate; + + if (link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.bits.UHBR20) + lttpr_max_link_rate = LINK_RATE_UHBR20; + else if (link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.bits.UHBR13_5) + lttpr_max_link_rate = LINK_RATE_UHBR13_5; + else if (link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.bits.UHBR10) + lttpr_max_link_rate = LINK_RATE_UHBR10; + + return lttpr_max_link_rate; +} + +static enum dc_link_rate get_cable_max_link_rate(struct dc_link *link) +{ + enum dc_link_rate cable_max_link_rate = LINK_RATE_UNKNOWN; + + if (link->dpcd_caps.cable_id.bits.UHBR10_20_CAPABILITY & DP_UHBR20) + cable_max_link_rate = LINK_RATE_UHBR20; + else if (link->dpcd_caps.cable_id.bits.UHBR13_5_CAPABILITY) + cable_max_link_rate = LINK_RATE_UHBR13_5; + else if (link->dpcd_caps.cable_id.bits.UHBR10_20_CAPABILITY & DP_UHBR10) + cable_max_link_rate = LINK_RATE_UHBR10; + + return cable_max_link_rate; +} + +static inline bool reached_minimum_lane_count(enum dc_lane_count lane_count) +{ + return lane_count <= LANE_COUNT_ONE; +} + +static inline bool reached_minimum_link_rate(enum dc_link_rate link_rate) +{ + return link_rate <= LINK_RATE_LOW; +} + +static enum dc_lane_count reduce_lane_count(enum dc_lane_count lane_count) +{ + switch (lane_count) { + case LANE_COUNT_FOUR: + return LANE_COUNT_TWO; + case LANE_COUNT_TWO: + return LANE_COUNT_ONE; + case LANE_COUNT_ONE: + return LANE_COUNT_UNKNOWN; + default: + return LANE_COUNT_UNKNOWN; + } +} + +static enum dc_link_rate reduce_link_rate(enum dc_link_rate link_rate) +{ + switch (link_rate) { + case LINK_RATE_UHBR20: + return LINK_RATE_UHBR13_5; + case LINK_RATE_UHBR13_5: + return LINK_RATE_UHBR10; + case LINK_RATE_UHBR10: + return LINK_RATE_HIGH3; + case LINK_RATE_HIGH3: + return LINK_RATE_HIGH2; + case LINK_RATE_HIGH2: + return LINK_RATE_HIGH; + case LINK_RATE_HIGH: + return LINK_RATE_LOW; + case LINK_RATE_LOW: + return LINK_RATE_UNKNOWN; + default: + return LINK_RATE_UNKNOWN; + } +} + +static enum dc_lane_count increase_lane_count(enum dc_lane_count lane_count) +{ + switch (lane_count) { + case LANE_COUNT_ONE: + return LANE_COUNT_TWO; + case LANE_COUNT_TWO: + return LANE_COUNT_FOUR; + default: + return LANE_COUNT_UNKNOWN; + } +} + +static enum dc_link_rate increase_link_rate(struct dc_link *link, + enum dc_link_rate link_rate) +{ + switch (link_rate) { + case LINK_RATE_LOW: + return LINK_RATE_HIGH; + case LINK_RATE_HIGH: + return LINK_RATE_HIGH2; + case LINK_RATE_HIGH2: + return LINK_RATE_HIGH3; + case LINK_RATE_HIGH3: + return LINK_RATE_UHBR10; + case LINK_RATE_UHBR10: + /* upto DP2.x specs UHBR13.5 is the only link rate that could be + * not supported by DPRX when higher link rate is supported. + * so we treat it as a special case for code simplicity. When we + * have new specs with more link rates like this, we should + * consider a more generic solution to handle discrete link + * rate capabilities. + */ + return link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR13_5 ? + LINK_RATE_UHBR13_5 : LINK_RATE_UHBR20; + case LINK_RATE_UHBR13_5: + return LINK_RATE_UHBR20; + default: + return LINK_RATE_UNKNOWN; + } +} + +static bool decide_fallback_link_setting_max_bw_policy( + struct dc_link *link, + const struct dc_link_settings *max, + struct dc_link_settings *cur, + enum link_training_result training_result) +{ + uint8_t cur_idx = 0, next_idx; + bool found = false; + + if (training_result == LINK_TRAINING_ABORT) + return false; + + while (cur_idx < ARRAY_SIZE(dp_lt_fallbacks)) + /* find current index */ + if (dp_lt_fallbacks[cur_idx].lane_count == cur->lane_count && + dp_lt_fallbacks[cur_idx].link_rate == cur->link_rate) + break; + else + cur_idx++; + + next_idx = cur_idx + 1; + + while (next_idx < ARRAY_SIZE(dp_lt_fallbacks)) + /* find next index */ + if (dp_lt_fallbacks[next_idx].lane_count > max->lane_count || + dp_lt_fallbacks[next_idx].link_rate > max->link_rate) + next_idx++; + else if (dp_lt_fallbacks[next_idx].link_rate == LINK_RATE_UHBR13_5 && + link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR13_5 == 0) + /* upto DP2.x specs UHBR13.5 is the only link rate that + * could be not supported by DPRX when higher link rate + * is supported. so we treat it as a special case for + * code simplicity. When we have new specs with more + * link rates like this, we should consider a more + * generic solution to handle discrete link rate + * capabilities. + */ + next_idx++; + else + break; + + if (next_idx < ARRAY_SIZE(dp_lt_fallbacks)) { + cur->lane_count = dp_lt_fallbacks[next_idx].lane_count; + cur->link_rate = dp_lt_fallbacks[next_idx].link_rate; + found = true; + } + + return found; +} + +/* + * function: set link rate and lane count fallback based + * on current link setting and last link training result + * return value: + * true - link setting could be set + * false - has reached minimum setting + * and no further fallback could be done + */ +bool decide_fallback_link_setting( + struct dc_link *link, + struct dc_link_settings *max, + struct dc_link_settings *cur, + enum link_training_result training_result) +{ + if (link_dp_get_encoding_format(max) == DP_128b_132b_ENCODING || + link->dc->debug.force_dp2_lt_fallback_method) + return decide_fallback_link_setting_max_bw_policy(link, max, + cur, training_result); + + switch (training_result) { + case LINK_TRAINING_CR_FAIL_LANE0: + case LINK_TRAINING_CR_FAIL_LANE1: + case LINK_TRAINING_CR_FAIL_LANE23: + case LINK_TRAINING_LQA_FAIL: + { + if (!reached_minimum_link_rate(cur->link_rate)) { + cur->link_rate = reduce_link_rate(cur->link_rate); + } else if (!reached_minimum_lane_count(cur->lane_count)) { + cur->link_rate = max->link_rate; + if (training_result == LINK_TRAINING_CR_FAIL_LANE0) + return false; + else if (training_result == LINK_TRAINING_CR_FAIL_LANE1) + cur->lane_count = LANE_COUNT_ONE; + else if (training_result == LINK_TRAINING_CR_FAIL_LANE23) + cur->lane_count = LANE_COUNT_TWO; + else + cur->lane_count = reduce_lane_count(cur->lane_count); + } else { + return false; + } + break; + } + case LINK_TRAINING_EQ_FAIL_EQ: + case LINK_TRAINING_EQ_FAIL_CR_PARTIAL: + { + if (!reached_minimum_lane_count(cur->lane_count)) { + cur->lane_count = reduce_lane_count(cur->lane_count); + } else if (!reached_minimum_link_rate(cur->link_rate)) { + cur->link_rate = reduce_link_rate(cur->link_rate); + /* Reduce max link rate to avoid potential infinite loop. + * Needed so that any subsequent CR_FAIL fallback can't + * re-set the link rate higher than the link rate from + * the latest EQ_FAIL fallback. + */ + max->link_rate = cur->link_rate; + cur->lane_count = max->lane_count; + } else { + return false; + } + break; + } + case LINK_TRAINING_EQ_FAIL_CR: + { + if (!reached_minimum_link_rate(cur->link_rate)) { + cur->link_rate = reduce_link_rate(cur->link_rate); + /* Reduce max link rate to avoid potential infinite loop. + * Needed so that any subsequent CR_FAIL fallback can't + * re-set the link rate higher than the link rate from + * the latest EQ_FAIL fallback. + */ + max->link_rate = cur->link_rate; + cur->lane_count = max->lane_count; + } else { + return false; + } + break; + } + default: + return false; + } + return true; +} +static bool decide_dp_link_settings(struct dc_link *link, struct dc_link_settings *link_setting, uint32_t req_bw) +{ + struct dc_link_settings initial_link_setting = { + LANE_COUNT_ONE, LINK_RATE_LOW, LINK_SPREAD_DISABLED, false, 0}; + struct dc_link_settings current_link_setting = + initial_link_setting; + uint32_t link_bw; + + if (req_bw > dc_link_bandwidth_kbps(link, &link->verified_link_cap)) + return false; + + /* search for the minimum link setting that: + * 1. is supported according to the link training result + * 2. could support the b/w requested by the timing + */ + while (current_link_setting.link_rate <= + link->verified_link_cap.link_rate) { + link_bw = dc_link_bandwidth_kbps( + link, + ¤t_link_setting); + if (req_bw <= link_bw) { + *link_setting = current_link_setting; + return true; + } + + if (current_link_setting.lane_count < + link->verified_link_cap.lane_count) { + current_link_setting.lane_count = + increase_lane_count( + current_link_setting.lane_count); + } else { + current_link_setting.link_rate = + increase_link_rate(link, + current_link_setting.link_rate); + current_link_setting.lane_count = + initial_link_setting.lane_count; + } + } + + return false; +} + +bool dc_link_decide_edp_link_settings(struct dc_link *link, struct dc_link_settings *link_setting, uint32_t req_bw) +{ + struct dc_link_settings initial_link_setting; + struct dc_link_settings current_link_setting; + uint32_t link_bw; + + /* + * edp_supported_link_rates_count is only valid for eDP v1.4 or higher. + * Per VESA eDP spec, "The DPCD revision for eDP v1.4 is 13h" + */ + if (link->dpcd_caps.dpcd_rev.raw < DPCD_REV_13 || + link->dpcd_caps.edp_supported_link_rates_count == 0) { + *link_setting = link->verified_link_cap; + return true; + } + + memset(&initial_link_setting, 0, sizeof(initial_link_setting)); + initial_link_setting.lane_count = LANE_COUNT_ONE; + initial_link_setting.link_rate = link->dpcd_caps.edp_supported_link_rates[0]; + initial_link_setting.link_spread = LINK_SPREAD_DISABLED; + initial_link_setting.use_link_rate_set = true; + initial_link_setting.link_rate_set = 0; + current_link_setting = initial_link_setting; + + /* search for the minimum link setting that: + * 1. is supported according to the link training result + * 2. could support the b/w requested by the timing + */ + while (current_link_setting.link_rate <= + link->verified_link_cap.link_rate) { + link_bw = dc_link_bandwidth_kbps( + link, + ¤t_link_setting); + if (req_bw <= link_bw) { + *link_setting = current_link_setting; + return true; + } + + if (current_link_setting.lane_count < + link->verified_link_cap.lane_count) { + current_link_setting.lane_count = + increase_lane_count( + current_link_setting.lane_count); + } else { + if (current_link_setting.link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) { + current_link_setting.link_rate_set++; + current_link_setting.link_rate = + link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set]; + current_link_setting.lane_count = + initial_link_setting.lane_count; + } else + break; + } + } + return false; +} + +bool decide_edp_link_settings_with_dsc(struct dc_link *link, + struct dc_link_settings *link_setting, + uint32_t req_bw, + enum dc_link_rate max_link_rate) +{ + struct dc_link_settings initial_link_setting; + struct dc_link_settings current_link_setting; + uint32_t link_bw; + + unsigned int policy = 0; + + policy = link->panel_config.dsc.force_dsc_edp_policy; + if (max_link_rate == LINK_RATE_UNKNOWN) + max_link_rate = link->verified_link_cap.link_rate; + /* + * edp_supported_link_rates_count is only valid for eDP v1.4 or higher. + * Per VESA eDP spec, "The DPCD revision for eDP v1.4 is 13h" + */ + if ((link->dpcd_caps.dpcd_rev.raw < DPCD_REV_13 || + link->dpcd_caps.edp_supported_link_rates_count == 0)) { + /* for DSC enabled case, we search for minimum lane count */ + memset(&initial_link_setting, 0, sizeof(initial_link_setting)); + initial_link_setting.lane_count = LANE_COUNT_ONE; + initial_link_setting.link_rate = LINK_RATE_LOW; + initial_link_setting.link_spread = LINK_SPREAD_DISABLED; + initial_link_setting.use_link_rate_set = false; + initial_link_setting.link_rate_set = 0; + current_link_setting = initial_link_setting; + if (req_bw > dc_link_bandwidth_kbps(link, &link->verified_link_cap)) + return false; + + /* search for the minimum link setting that: + * 1. is supported according to the link training result + * 2. could support the b/w requested by the timing + */ + while (current_link_setting.link_rate <= + max_link_rate) { + link_bw = dc_link_bandwidth_kbps( + link, + ¤t_link_setting); + if (req_bw <= link_bw) { + *link_setting = current_link_setting; + return true; + } + if (policy) { + /* minimize lane */ + if (current_link_setting.link_rate < max_link_rate) { + current_link_setting.link_rate = + increase_link_rate(link, + current_link_setting.link_rate); + } else { + if (current_link_setting.lane_count < + link->verified_link_cap.lane_count) { + current_link_setting.lane_count = + increase_lane_count( + current_link_setting.lane_count); + current_link_setting.link_rate = initial_link_setting.link_rate; + } else + break; + } + } else { + /* minimize link rate */ + if (current_link_setting.lane_count < + link->verified_link_cap.lane_count) { + current_link_setting.lane_count = + increase_lane_count( + current_link_setting.lane_count); + } else { + current_link_setting.link_rate = + increase_link_rate(link, + current_link_setting.link_rate); + current_link_setting.lane_count = + initial_link_setting.lane_count; + } + } + } + return false; + } + + /* if optimize edp link is supported */ + memset(&initial_link_setting, 0, sizeof(initial_link_setting)); + initial_link_setting.lane_count = LANE_COUNT_ONE; + initial_link_setting.link_rate = link->dpcd_caps.edp_supported_link_rates[0]; + initial_link_setting.link_spread = LINK_SPREAD_DISABLED; + initial_link_setting.use_link_rate_set = true; + initial_link_setting.link_rate_set = 0; + current_link_setting = initial_link_setting; + + /* search for the minimum link setting that: + * 1. is supported according to the link training result + * 2. could support the b/w requested by the timing + */ + while (current_link_setting.link_rate <= + max_link_rate) { + link_bw = dc_link_bandwidth_kbps( + link, + ¤t_link_setting); + if (req_bw <= link_bw) { + *link_setting = current_link_setting; + return true; + } + if (policy) { + /* minimize lane */ + if (current_link_setting.link_rate_set < + link->dpcd_caps.edp_supported_link_rates_count + && current_link_setting.link_rate < max_link_rate) { + current_link_setting.link_rate_set++; + current_link_setting.link_rate = + link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set]; + } else { + if (current_link_setting.lane_count < link->verified_link_cap.lane_count) { + current_link_setting.lane_count = + increase_lane_count( + current_link_setting.lane_count); + current_link_setting.link_rate_set = initial_link_setting.link_rate_set; + current_link_setting.link_rate = + link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set]; + } else + break; + } + } else { + /* minimize link rate */ + if (current_link_setting.lane_count < + link->verified_link_cap.lane_count) { + current_link_setting.lane_count = + increase_lane_count( + current_link_setting.lane_count); + } else { + if (current_link_setting.link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) { + current_link_setting.link_rate_set++; + current_link_setting.link_rate = + link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set]; + current_link_setting.lane_count = + initial_link_setting.lane_count; + } else + break; + } + } + } + return false; +} + +static bool decide_mst_link_settings(const struct dc_link *link, struct dc_link_settings *link_setting) +{ + *link_setting = link->verified_link_cap; + return true; +} + +bool link_decide_link_settings(struct dc_stream_state *stream, + struct dc_link_settings *link_setting) +{ + struct dc_link *link = stream->link; + uint32_t req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing); + + memset(link_setting, 0, sizeof(*link_setting)); + + /* if preferred is specified through AMDDP, use it, if it's enough + * to drive the mode + */ + if (link->preferred_link_setting.lane_count != + LANE_COUNT_UNKNOWN && + link->preferred_link_setting.link_rate != + LINK_RATE_UNKNOWN) { + *link_setting = link->preferred_link_setting; + return true; + } + + /* MST doesn't perform link training for now + * TODO: add MST specific link training routine + */ + if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { + decide_mst_link_settings(link, link_setting); + } else if (link->connector_signal == SIGNAL_TYPE_EDP) { + /* enable edp link optimization for DSC eDP case */ + if (stream->timing.flags.DSC) { + enum dc_link_rate max_link_rate = LINK_RATE_UNKNOWN; + + if (link->panel_config.dsc.force_dsc_edp_policy) { + /* calculate link max link rate cap*/ + struct dc_link_settings tmp_link_setting; + struct dc_crtc_timing tmp_timing = stream->timing; + uint32_t orig_req_bw; + + tmp_link_setting.link_rate = LINK_RATE_UNKNOWN; + tmp_timing.flags.DSC = 0; + orig_req_bw = dc_bandwidth_in_kbps_from_timing(&tmp_timing); + dc_link_decide_edp_link_settings(link, &tmp_link_setting, orig_req_bw); + max_link_rate = tmp_link_setting.link_rate; + } + decide_edp_link_settings_with_dsc(link, link_setting, req_bw, max_link_rate); + } else { + dc_link_decide_edp_link_settings(link, link_setting, req_bw); + } + } else { + decide_dp_link_settings(link, link_setting, req_bw); + } + + return link_setting->lane_count != LANE_COUNT_UNKNOWN && + link_setting->link_rate != LINK_RATE_UNKNOWN; +} + +enum dp_link_encoding link_dp_get_encoding_format(const struct dc_link_settings *link_settings) +{ + if ((link_settings->link_rate >= LINK_RATE_LOW) && + (link_settings->link_rate <= LINK_RATE_HIGH3)) + return DP_8b_10b_ENCODING; + else if ((link_settings->link_rate >= LINK_RATE_UHBR10) && + (link_settings->link_rate <= LINK_RATE_UHBR20)) + return DP_128b_132b_ENCODING; + return DP_UNKNOWN_ENCODING; +} + +enum dp_link_encoding dc_link_dp_mst_decide_link_encoding_format(const struct dc_link *link) +{ + struct dc_link_settings link_settings = {0}; + + if (!dc_is_dp_signal(link->connector_signal)) + return DP_UNKNOWN_ENCODING; + + if (link->preferred_link_setting.lane_count != + LANE_COUNT_UNKNOWN && + link->preferred_link_setting.link_rate != + LINK_RATE_UNKNOWN) { + link_settings = link->preferred_link_setting; + } else { + decide_mst_link_settings(link, &link_settings); + } + + return link_dp_get_encoding_format(&link_settings); +} + +static void read_dp_device_vendor_id(struct dc_link *link) +{ + struct dp_device_vendor_id dp_id; + + /* read IEEE branch device id */ + core_link_read_dpcd( + link, + DP_BRANCH_OUI, + (uint8_t *)&dp_id, + sizeof(dp_id)); + + link->dpcd_caps.branch_dev_id = + (dp_id.ieee_oui[0] << 16) + + (dp_id.ieee_oui[1] << 8) + + dp_id.ieee_oui[2]; + + memmove( + link->dpcd_caps.branch_dev_name, + dp_id.ieee_device_id, + sizeof(dp_id.ieee_device_id)); +} + +static enum dc_status wake_up_aux_channel(struct dc_link *link) +{ + enum dc_status status = DC_ERROR_UNEXPECTED; + uint32_t aux_channel_retry_cnt = 0; + uint8_t dpcd_power_state = '\0'; + + while (status != DC_OK && aux_channel_retry_cnt < 10) { + status = core_link_read_dpcd(link, DP_SET_POWER, + &dpcd_power_state, sizeof(dpcd_power_state)); + + /* Delay 1 ms if AUX CH is in power down state. Based on spec + * section 2.3.1.2, if AUX CH may be powered down due to + * write to DPCD 600h = 2. Sink AUX CH is monitoring differential + * signal and may need up to 1 ms before being able to reply. + */ + if (status != DC_OK || dpcd_power_state == DP_SET_POWER_D3) { + udelay(1000); + aux_channel_retry_cnt++; + } + } + + if (status != DC_OK) { + dpcd_power_state = DP_SET_POWER_D0; + status = core_link_write_dpcd( + link, + DP_SET_POWER, + &dpcd_power_state, + sizeof(dpcd_power_state)); + + dpcd_power_state = DP_SET_POWER_D3; + status = core_link_write_dpcd( + link, + DP_SET_POWER, + &dpcd_power_state, + sizeof(dpcd_power_state)); + return DC_ERROR_UNEXPECTED; + } + + return DC_OK; +} + +static void get_active_converter_info( + uint8_t data, struct dc_link *link) +{ + union dp_downstream_port_present ds_port = { .byte = data }; + memset(&link->dpcd_caps.dongle_caps, 0, sizeof(link->dpcd_caps.dongle_caps)); + + /* decode converter info*/ + if (!ds_port.fields.PORT_PRESENT) { + link->dpcd_caps.dongle_type = DISPLAY_DONGLE_NONE; + set_dongle_type(link->ddc, + link->dpcd_caps.dongle_type); + link->dpcd_caps.is_branch_dev = false; + return; + } + + /* DPCD 0x5 bit 0 = 1, it indicate it's branch device */ + link->dpcd_caps.is_branch_dev = ds_port.fields.PORT_PRESENT; + + switch (ds_port.fields.PORT_TYPE) { + case DOWNSTREAM_VGA: + link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_VGA_CONVERTER; + break; + case DOWNSTREAM_DVI_HDMI_DP_PLUS_PLUS: + /* At this point we don't know is it DVI or HDMI or DP++, + * assume DVI.*/ + link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_DVI_CONVERTER; + break; + default: + link->dpcd_caps.dongle_type = DISPLAY_DONGLE_NONE; + break; + } + + if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_11) { + uint8_t det_caps[16]; /* CTS 4.2.2.7 expects source to read Detailed Capabilities Info : 00080h-0008F.*/ + union dwnstream_port_caps_byte0 *port_caps = + (union dwnstream_port_caps_byte0 *)det_caps; + if (core_link_read_dpcd(link, DP_DOWNSTREAM_PORT_0, + det_caps, sizeof(det_caps)) == DC_OK) { + + switch (port_caps->bits.DWN_STRM_PORTX_TYPE) { + /*Handle DP case as DONGLE_NONE*/ + case DOWN_STREAM_DETAILED_DP: + link->dpcd_caps.dongle_type = DISPLAY_DONGLE_NONE; + break; + case DOWN_STREAM_DETAILED_VGA: + link->dpcd_caps.dongle_type = + DISPLAY_DONGLE_DP_VGA_CONVERTER; + break; + case DOWN_STREAM_DETAILED_DVI: + link->dpcd_caps.dongle_type = + DISPLAY_DONGLE_DP_DVI_CONVERTER; + break; + case DOWN_STREAM_DETAILED_HDMI: + case DOWN_STREAM_DETAILED_DP_PLUS_PLUS: + /*Handle DP++ active converter case, process DP++ case as HDMI case according DP1.4 spec*/ + link->dpcd_caps.dongle_type = + DISPLAY_DONGLE_DP_HDMI_CONVERTER; + + link->dpcd_caps.dongle_caps.dongle_type = link->dpcd_caps.dongle_type; + if (ds_port.fields.DETAILED_CAPS) { + + union dwnstream_port_caps_byte3_hdmi + hdmi_caps = {.raw = det_caps[3] }; + union dwnstream_port_caps_byte2 + hdmi_color_caps = {.raw = det_caps[2] }; + link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk_in_khz = + det_caps[1] * 2500; + + link->dpcd_caps.dongle_caps.is_dp_hdmi_s3d_converter = + hdmi_caps.bits.FRAME_SEQ_TO_FRAME_PACK; + /*YCBCR capability only for HDMI case*/ + if (port_caps->bits.DWN_STRM_PORTX_TYPE + == DOWN_STREAM_DETAILED_HDMI) { + link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr422_pass_through = + hdmi_caps.bits.YCrCr422_PASS_THROUGH; + link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr420_pass_through = + hdmi_caps.bits.YCrCr420_PASS_THROUGH; + link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr422_converter = + hdmi_caps.bits.YCrCr422_CONVERSION; + link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr420_converter = + hdmi_caps.bits.YCrCr420_CONVERSION; + } + + link->dpcd_caps.dongle_caps.dp_hdmi_max_bpc = + translate_dpcd_max_bpc( + hdmi_color_caps.bits.MAX_BITS_PER_COLOR_COMPONENT); + + if (link->dc->caps.dp_hdmi21_pcon_support) { + union hdmi_encoded_link_bw hdmi_encoded_link_bw; + + link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps = + dc_link_bw_kbps_from_raw_frl_link_rate_data( + hdmi_color_caps.bits.MAX_ENCODED_LINK_BW_SUPPORT); + + // Intersect reported max link bw support with the supported link rate post FRL link training + if (core_link_read_dpcd(link, DP_PCON_HDMI_POST_FRL_STATUS, + &hdmi_encoded_link_bw.raw, sizeof(hdmi_encoded_link_bw)) == DC_OK) { + link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps = intersect_frl_link_bw_support( + link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps, + hdmi_encoded_link_bw); + } + + if (link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps > 0) + link->dpcd_caps.dongle_caps.extendedCapValid = true; + } + + if (link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk_in_khz != 0) + link->dpcd_caps.dongle_caps.extendedCapValid = true; + } + + break; + } + } + } + + set_dongle_type(link->ddc, link->dpcd_caps.dongle_type); + + { + struct dp_sink_hw_fw_revision dp_hw_fw_revision; + + core_link_read_dpcd( + link, + DP_BRANCH_REVISION_START, + (uint8_t *)&dp_hw_fw_revision, + sizeof(dp_hw_fw_revision)); + + link->dpcd_caps.branch_hw_revision = + dp_hw_fw_revision.ieee_hw_rev; + + memmove( + link->dpcd_caps.branch_fw_revision, + dp_hw_fw_revision.ieee_fw_rev, + sizeof(dp_hw_fw_revision.ieee_fw_rev)); + } + if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14 && + link->dpcd_caps.dongle_type != DISPLAY_DONGLE_NONE) { + union dp_dfp_cap_ext dfp_cap_ext; + memset(&dfp_cap_ext, '\0', sizeof (dfp_cap_ext)); + core_link_read_dpcd( + link, + DP_DFP_CAPABILITY_EXTENSION_SUPPORT, + dfp_cap_ext.raw, + sizeof(dfp_cap_ext.raw)); + link->dpcd_caps.dongle_caps.dfp_cap_ext.supported = dfp_cap_ext.fields.supported; + link->dpcd_caps.dongle_caps.dfp_cap_ext.max_pixel_rate_in_mps = + dfp_cap_ext.fields.max_pixel_rate_in_mps[0] + + (dfp_cap_ext.fields.max_pixel_rate_in_mps[1] << 8); + link->dpcd_caps.dongle_caps.dfp_cap_ext.max_video_h_active_width = + dfp_cap_ext.fields.max_video_h_active_width[0] + + (dfp_cap_ext.fields.max_video_h_active_width[1] << 8); + link->dpcd_caps.dongle_caps.dfp_cap_ext.max_video_v_active_height = + dfp_cap_ext.fields.max_video_v_active_height[0] + + (dfp_cap_ext.fields.max_video_v_active_height[1] << 8); + link->dpcd_caps.dongle_caps.dfp_cap_ext.encoding_format_caps = + dfp_cap_ext.fields.encoding_format_caps; + link->dpcd_caps.dongle_caps.dfp_cap_ext.rgb_color_depth_caps = + dfp_cap_ext.fields.rgb_color_depth_caps; + link->dpcd_caps.dongle_caps.dfp_cap_ext.ycbcr444_color_depth_caps = + dfp_cap_ext.fields.ycbcr444_color_depth_caps; + link->dpcd_caps.dongle_caps.dfp_cap_ext.ycbcr422_color_depth_caps = + dfp_cap_ext.fields.ycbcr422_color_depth_caps; + link->dpcd_caps.dongle_caps.dfp_cap_ext.ycbcr420_color_depth_caps = + dfp_cap_ext.fields.ycbcr420_color_depth_caps; + DC_LOG_DP2("DFP capability extension is read at link %d", link->link_index); + DC_LOG_DP2("\tdfp_cap_ext.supported = %s", link->dpcd_caps.dongle_caps.dfp_cap_ext.supported ? "true" : "false"); + DC_LOG_DP2("\tdfp_cap_ext.max_pixel_rate_in_mps = %d", link->dpcd_caps.dongle_caps.dfp_cap_ext.max_pixel_rate_in_mps); + DC_LOG_DP2("\tdfp_cap_ext.max_video_h_active_width = %d", link->dpcd_caps.dongle_caps.dfp_cap_ext.max_video_h_active_width); + DC_LOG_DP2("\tdfp_cap_ext.max_video_v_active_height = %d", link->dpcd_caps.dongle_caps.dfp_cap_ext.max_video_v_active_height); + } +} + +static void apply_usbc_combo_phy_reset_wa(struct dc_link *link, + struct dc_link_settings *link_settings) +{ + /* Temporary Renoir-specific workaround PHY will sometimes be in bad + * state on hotplugging display from certain USB-C dongle, so add extra + * cycle of enabling and disabling the PHY before first link training. + */ + struct link_resource link_res = {0}; + enum clock_source_id dp_cs_id = get_clock_source_id(link); + + dp_enable_link_phy(link, &link_res, link->connector_signal, + dp_cs_id, link_settings); + dp_disable_link_phy(link, &link_res, link->connector_signal); +} + +static bool dp_overwrite_extended_receiver_cap(struct dc_link *link) +{ + uint8_t dpcd_data[16]; + uint32_t read_dpcd_retry_cnt = 3; + enum dc_status status = DC_ERROR_UNEXPECTED; + union dp_downstream_port_present ds_port = { 0 }; + union down_stream_port_count down_strm_port_count; + union edp_configuration_cap edp_config_cap; + + int i; + + for (i = 0; i < read_dpcd_retry_cnt; i++) { + status = core_link_read_dpcd( + link, + DP_DPCD_REV, + dpcd_data, + sizeof(dpcd_data)); + if (status == DC_OK) + break; + } + + link->dpcd_caps.dpcd_rev.raw = + dpcd_data[DP_DPCD_REV - DP_DPCD_REV]; + + if (dpcd_data[DP_MAX_LANE_COUNT - DP_DPCD_REV] == 0) + return false; + + ds_port.byte = dpcd_data[DP_DOWNSTREAMPORT_PRESENT - + DP_DPCD_REV]; + + get_active_converter_info(ds_port.byte, link); + + down_strm_port_count.raw = dpcd_data[DP_DOWN_STREAM_PORT_COUNT - + DP_DPCD_REV]; + + link->dpcd_caps.allow_invalid_MSA_timing_param = + down_strm_port_count.bits.IGNORE_MSA_TIMING_PARAM; + + link->dpcd_caps.max_ln_count.raw = dpcd_data[ + DP_MAX_LANE_COUNT - DP_DPCD_REV]; + + link->dpcd_caps.max_down_spread.raw = dpcd_data[ + DP_MAX_DOWNSPREAD - DP_DPCD_REV]; + + link->reported_link_cap.lane_count = + link->dpcd_caps.max_ln_count.bits.MAX_LANE_COUNT; + link->reported_link_cap.link_rate = dpcd_data[ + DP_MAX_LINK_RATE - DP_DPCD_REV]; + link->reported_link_cap.link_spread = + link->dpcd_caps.max_down_spread.bits.MAX_DOWN_SPREAD ? + LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED; + + edp_config_cap.raw = dpcd_data[ + DP_EDP_CONFIGURATION_CAP - DP_DPCD_REV]; + link->dpcd_caps.panel_mode_edp = + edp_config_cap.bits.ALT_SCRAMBLER_RESET; + link->dpcd_caps.dpcd_display_control_capable = + edp_config_cap.bits.DPCD_DISPLAY_CONTROL_CAPABLE; + + return true; +} + +void dc_link_overwrite_extended_receiver_cap( + struct dc_link *link) +{ + dp_overwrite_extended_receiver_cap(link); +} + +void dpcd_set_source_specific_data(struct dc_link *link) +{ + if (!link->dc->vendor_signature.is_valid) { + enum dc_status result_write_min_hblank = DC_NOT_SUPPORTED; + struct dpcd_amd_signature amd_signature = {0}; + struct dpcd_amd_device_id amd_device_id = {0}; + + amd_device_id.device_id_byte1 = + (uint8_t)(link->ctx->asic_id.chip_id); + amd_device_id.device_id_byte2 = + (uint8_t)(link->ctx->asic_id.chip_id >> 8); + amd_device_id.dce_version = + (uint8_t)(link->ctx->dce_version); + amd_device_id.dal_version_byte1 = 0x0; // needed? where to get? + amd_device_id.dal_version_byte2 = 0x0; // needed? where to get? + + core_link_read_dpcd(link, DP_SOURCE_OUI, + (uint8_t *)(&amd_signature), + sizeof(amd_signature)); + + if (!((amd_signature.AMD_IEEE_TxSignature_byte1 == 0x0) && + (amd_signature.AMD_IEEE_TxSignature_byte2 == 0x0) && + (amd_signature.AMD_IEEE_TxSignature_byte3 == 0x1A))) { + + amd_signature.AMD_IEEE_TxSignature_byte1 = 0x0; + amd_signature.AMD_IEEE_TxSignature_byte2 = 0x0; + amd_signature.AMD_IEEE_TxSignature_byte3 = 0x1A; + + core_link_write_dpcd(link, DP_SOURCE_OUI, + (uint8_t *)(&amd_signature), + sizeof(amd_signature)); + } + + core_link_write_dpcd(link, DP_SOURCE_OUI+0x03, + (uint8_t *)(&amd_device_id), + sizeof(amd_device_id)); + + if (link->ctx->dce_version >= DCN_VERSION_2_0 && + link->dc->caps.min_horizontal_blanking_period != 0) { + + uint8_t hblank_size = (uint8_t)link->dc->caps.min_horizontal_blanking_period; + + result_write_min_hblank = core_link_write_dpcd(link, + DP_SOURCE_MINIMUM_HBLANK_SUPPORTED, (uint8_t *)(&hblank_size), + sizeof(hblank_size)); + } + DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION, + WPP_BIT_FLAG_DC_DETECTION_DP_CAPS, + "result=%u link_index=%u enum dce_version=%d DPCD=0x%04X min_hblank=%u branch_dev_id=0x%x branch_dev_name='%c%c%c%c%c%c'", + result_write_min_hblank, + link->link_index, + link->ctx->dce_version, + DP_SOURCE_MINIMUM_HBLANK_SUPPORTED, + link->dc->caps.min_horizontal_blanking_period, + link->dpcd_caps.branch_dev_id, + link->dpcd_caps.branch_dev_name[0], + link->dpcd_caps.branch_dev_name[1], + link->dpcd_caps.branch_dev_name[2], + link->dpcd_caps.branch_dev_name[3], + link->dpcd_caps.branch_dev_name[4], + link->dpcd_caps.branch_dev_name[5]); + } else { + core_link_write_dpcd(link, DP_SOURCE_OUI, + link->dc->vendor_signature.data.raw, + sizeof(link->dc->vendor_signature.data.raw)); + } +} + +void dpcd_write_cable_id_to_dprx(struct dc_link *link) +{ + if (!link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED || + link->dpcd_caps.cable_id.raw == 0 || + link->dprx_states.cable_id_written) + return; + + core_link_write_dpcd(link, DP_CABLE_ATTRIBUTES_UPDATED_BY_DPTX, + &link->dpcd_caps.cable_id.raw, + sizeof(link->dpcd_caps.cable_id.raw)); + + link->dprx_states.cable_id_written = 1; +} + +static bool get_usbc_cable_id(struct dc_link *link, union dp_cable_id *cable_id) +{ + union dmub_rb_cmd cmd; + + if (!link->ctx->dmub_srv || + link->ep_type != DISPLAY_ENDPOINT_PHY || + link->link_enc->features.flags.bits.DP_IS_USB_C == 0) + return false; + + memset(&cmd, 0, sizeof(cmd)); + cmd.cable_id.header.type = DMUB_CMD_GET_USBC_CABLE_ID; + cmd.cable_id.header.payload_bytes = sizeof(cmd.cable_id.data); + cmd.cable_id.data.input.phy_inst = resource_transmitter_to_phy_idx( + link->dc, link->link_enc->transmitter); + if (dc_dmub_srv_cmd_with_reply_data(link->ctx->dmub_srv, &cmd) && + cmd.cable_id.header.ret_status == 1) { + cable_id->raw = cmd.cable_id.data.output_raw; + DC_LOG_DC("usbc_cable_id = %d.\n", cable_id->raw); + } + return cmd.cable_id.header.ret_status == 1; +} + +static void retrieve_cable_id(struct dc_link *link) +{ + union dp_cable_id usbc_cable_id; + + link->dpcd_caps.cable_id.raw = 0; + core_link_read_dpcd(link, DP_CABLE_ATTRIBUTES_UPDATED_BY_DPRX, + &link->dpcd_caps.cable_id.raw, sizeof(uint8_t)); + + if (get_usbc_cable_id(link, &usbc_cable_id)) + link->dpcd_caps.cable_id = intersect_cable_id( + &link->dpcd_caps.cable_id, &usbc_cable_id); +} + +bool read_is_mst_supported(struct dc_link *link) +{ + bool mst = false; + enum dc_status st = DC_OK; + union dpcd_rev rev; + union mstm_cap cap; + + if (link->preferred_training_settings.mst_enable && + *link->preferred_training_settings.mst_enable == false) { + return false; + } + + rev.raw = 0; + cap.raw = 0; + + st = core_link_read_dpcd(link, DP_DPCD_REV, &rev.raw, + sizeof(rev)); + + if (st == DC_OK && rev.raw >= DPCD_REV_12) { + + st = core_link_read_dpcd(link, DP_MSTM_CAP, + &cap.raw, sizeof(cap)); + if (st == DC_OK && cap.bits.MST_CAP == 1) + mst = true; + } + return mst; + +} + +/* Read additional sink caps defined in source specific DPCD area + * This function currently only reads from SinkCapability address (DP_SOURCE_SINK_CAP) + * TODO: Add FS caps and read from DP_SOURCE_SINK_FS_CAP as well + */ +static bool dpcd_read_sink_ext_caps(struct dc_link *link) +{ + uint8_t dpcd_data; + + if (!link) + return false; + + if (core_link_read_dpcd(link, DP_SOURCE_SINK_CAP, &dpcd_data, 1) != DC_OK) + return false; + + link->dpcd_sink_ext_caps.raw = dpcd_data; + return true; +} + +enum dc_status dp_retrieve_lttpr_cap(struct dc_link *link) +{ + uint8_t lttpr_dpcd_data[8]; + enum dc_status status; + bool is_lttpr_present; + + /* Logic to determine LTTPR support*/ + bool vbios_lttpr_interop = link->dc->caps.vbios_lttpr_aware; + + if (!vbios_lttpr_interop || !link->dc->caps.extended_aux_timeout_support) + return DC_ERROR_UNEXPECTED; + + /* By reading LTTPR capability, RX assumes that we will enable + * LTTPR extended aux timeout if LTTPR is present. + */ + status = core_link_read_dpcd( + link, + DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV, + lttpr_dpcd_data, + sizeof(lttpr_dpcd_data)); + + link->dpcd_caps.lttpr_caps.revision.raw = + lttpr_dpcd_data[DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV - + DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; + + link->dpcd_caps.lttpr_caps.max_link_rate = + lttpr_dpcd_data[DP_MAX_LINK_RATE_PHY_REPEATER - + DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; + + link->dpcd_caps.lttpr_caps.phy_repeater_cnt = + lttpr_dpcd_data[DP_PHY_REPEATER_CNT - + DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; + + link->dpcd_caps.lttpr_caps.max_lane_count = + lttpr_dpcd_data[DP_MAX_LANE_COUNT_PHY_REPEATER - + DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; + + link->dpcd_caps.lttpr_caps.mode = + lttpr_dpcd_data[DP_PHY_REPEATER_MODE - + DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; + + link->dpcd_caps.lttpr_caps.max_ext_timeout = + lttpr_dpcd_data[DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT - + DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; + link->dpcd_caps.lttpr_caps.main_link_channel_coding.raw = + lttpr_dpcd_data[DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER - + DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; + + link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.raw = + lttpr_dpcd_data[DP_PHY_REPEATER_128B132B_RATES - + DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; + + /* If this chip cap is set, at least one retimer must exist in the chain + * Override count to 1 if we receive a known bad count (0 or an invalid value) */ + if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && + (dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) == 0)) { + ASSERT(0); + link->dpcd_caps.lttpr_caps.phy_repeater_cnt = 0x80; + DC_LOG_DC("lttpr_caps forced phy_repeater_cnt = %d\n", link->dpcd_caps.lttpr_caps.phy_repeater_cnt); + } + + /* Attempt to train in LTTPR transparent mode if repeater count exceeds 8. */ + is_lttpr_present = dp_is_lttpr_present(link); + + if (is_lttpr_present) + CONN_DATA_DETECT(link, lttpr_dpcd_data, sizeof(lttpr_dpcd_data), "LTTPR Caps: "); + + DC_LOG_DC("is_lttpr_present = %d\n", is_lttpr_present); + return status; +} + +static bool retrieve_link_cap(struct dc_link *link) +{ + /* DP_ADAPTER_CAP - DP_DPCD_REV + 1 == 16 and also DP_DSC_BITS_PER_PIXEL_INC - DP_DSC_SUPPORT + 1 == 16, + * which means size 16 will be good for both of those DPCD register block reads + */ + uint8_t dpcd_data[16]; + /*Only need to read 1 byte starting from DP_DPRX_FEATURE_ENUMERATION_LIST. + */ + uint8_t dpcd_dprx_data = '\0'; + + struct dp_device_vendor_id sink_id; + union down_stream_port_count down_strm_port_count; + union edp_configuration_cap edp_config_cap; + union dp_downstream_port_present ds_port = { 0 }; + enum dc_status status = DC_ERROR_UNEXPECTED; + uint32_t read_dpcd_retry_cnt = 3; + int i; + struct dp_sink_hw_fw_revision dp_hw_fw_revision; + const uint32_t post_oui_delay = 30; // 30ms + + memset(dpcd_data, '\0', sizeof(dpcd_data)); + memset(&down_strm_port_count, + '\0', sizeof(union down_stream_port_count)); + memset(&edp_config_cap, '\0', + sizeof(union edp_configuration_cap)); + + /* if extended timeout is supported in hardware, + * default to LTTPR timeout (3.2ms) first as a W/A for DP link layer + * CTS 4.2.1.1 regression introduced by CTS specs requirement update. + */ + try_to_configure_aux_timeout(link->ddc, + LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD); + + status = dp_retrieve_lttpr_cap(link); + + if (status != DC_OK) { + status = wake_up_aux_channel(link); + if (status == DC_OK) + dp_retrieve_lttpr_cap(link); + else + return false; + } + + if (dp_is_lttpr_present(link)) + configure_lttpr_mode_transparent(link); + + /* Read DP tunneling information. */ + status = dpcd_get_tunneling_device_data(link); + + dpcd_set_source_specific_data(link); + /* Sink may need to configure internals based on vendor, so allow some + * time before proceeding with possibly vendor specific transactions + */ + msleep(post_oui_delay); + + for (i = 0; i < read_dpcd_retry_cnt; i++) { + status = core_link_read_dpcd( + link, + DP_DPCD_REV, + dpcd_data, + sizeof(dpcd_data)); + if (status == DC_OK) + break; + } + + + if (status != DC_OK) { + dm_error("%s: Read receiver caps dpcd data failed.\n", __func__); + return false; + } + + if (!dp_is_lttpr_present(link)) + try_to_configure_aux_timeout(link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD); + + + { + union training_aux_rd_interval aux_rd_interval; + + aux_rd_interval.raw = + dpcd_data[DP_TRAINING_AUX_RD_INTERVAL]; + + link->dpcd_caps.ext_receiver_cap_field_present = + aux_rd_interval.bits.EXT_RECEIVER_CAP_FIELD_PRESENT == 1 ? true:false; + + if (aux_rd_interval.bits.EXT_RECEIVER_CAP_FIELD_PRESENT == 1) { + uint8_t ext_cap_data[16]; + + memset(ext_cap_data, '\0', sizeof(ext_cap_data)); + for (i = 0; i < read_dpcd_retry_cnt; i++) { + status = core_link_read_dpcd( + link, + DP_DP13_DPCD_REV, + ext_cap_data, + sizeof(ext_cap_data)); + if (status == DC_OK) { + memcpy(dpcd_data, ext_cap_data, sizeof(dpcd_data)); + break; + } + } + if (status != DC_OK) + dm_error("%s: Read extend caps data failed, use cap from dpcd 0.\n", __func__); + } + } + + link->dpcd_caps.dpcd_rev.raw = + dpcd_data[DP_DPCD_REV - DP_DPCD_REV]; + + if (link->dpcd_caps.ext_receiver_cap_field_present) { + for (i = 0; i < read_dpcd_retry_cnt; i++) { + status = core_link_read_dpcd( + link, + DP_DPRX_FEATURE_ENUMERATION_LIST, + &dpcd_dprx_data, + sizeof(dpcd_dprx_data)); + if (status == DC_OK) + break; + } + + link->dpcd_caps.dprx_feature.raw = dpcd_dprx_data; + + if (status != DC_OK) + dm_error("%s: Read DPRX caps data failed.\n", __func__); + } + + else { + link->dpcd_caps.dprx_feature.raw = 0; + } + + + /* Error condition checking... + * It is impossible for Sink to report Max Lane Count = 0. + * It is possible for Sink to report Max Link Rate = 0, if it is + * an eDP device that is reporting specialized link rates in the + * SUPPORTED_LINK_RATE table. + */ + if (dpcd_data[DP_MAX_LANE_COUNT - DP_DPCD_REV] == 0) + return false; + + ds_port.byte = dpcd_data[DP_DOWNSTREAMPORT_PRESENT - + DP_DPCD_REV]; + + read_dp_device_vendor_id(link); + + /* TODO - decouple raw mst capability from policy decision */ + link->dpcd_caps.is_mst_capable = read_is_mst_supported(link); + + get_active_converter_info(ds_port.byte, link); + + dp_wa_power_up_0010FA(link, dpcd_data, sizeof(dpcd_data)); + + down_strm_port_count.raw = dpcd_data[DP_DOWN_STREAM_PORT_COUNT - + DP_DPCD_REV]; + + link->dpcd_caps.allow_invalid_MSA_timing_param = + down_strm_port_count.bits.IGNORE_MSA_TIMING_PARAM; + + link->dpcd_caps.max_ln_count.raw = dpcd_data[ + DP_MAX_LANE_COUNT - DP_DPCD_REV]; + + link->dpcd_caps.max_down_spread.raw = dpcd_data[ + DP_MAX_DOWNSPREAD - DP_DPCD_REV]; + + link->reported_link_cap.lane_count = + link->dpcd_caps.max_ln_count.bits.MAX_LANE_COUNT; + link->reported_link_cap.link_rate = get_link_rate_from_max_link_bw( + dpcd_data[DP_MAX_LINK_RATE - DP_DPCD_REV]); + link->reported_link_cap.link_spread = + link->dpcd_caps.max_down_spread.bits.MAX_DOWN_SPREAD ? + LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED; + + edp_config_cap.raw = dpcd_data[ + DP_EDP_CONFIGURATION_CAP - DP_DPCD_REV]; + link->dpcd_caps.panel_mode_edp = + edp_config_cap.bits.ALT_SCRAMBLER_RESET; + link->dpcd_caps.dpcd_display_control_capable = + edp_config_cap.bits.DPCD_DISPLAY_CONTROL_CAPABLE; + link->dpcd_caps.channel_coding_cap.raw = + dpcd_data[DP_MAIN_LINK_CHANNEL_CODING - DP_DPCD_REV]; + link->test_pattern_enabled = false; + link->compliance_test_state.raw = 0; + + /* read sink count */ + core_link_read_dpcd(link, + DP_SINK_COUNT, + &link->dpcd_caps.sink_count.raw, + sizeof(link->dpcd_caps.sink_count.raw)); + + /* read sink ieee oui */ + core_link_read_dpcd(link, + DP_SINK_OUI, + (uint8_t *)(&sink_id), + sizeof(sink_id)); + + link->dpcd_caps.sink_dev_id = + (sink_id.ieee_oui[0] << 16) + + (sink_id.ieee_oui[1] << 8) + + (sink_id.ieee_oui[2]); + + memmove( + link->dpcd_caps.sink_dev_id_str, + sink_id.ieee_device_id, + sizeof(sink_id.ieee_device_id)); + + core_link_read_dpcd( + link, + DP_SINK_HW_REVISION_START, + (uint8_t *)&dp_hw_fw_revision, + sizeof(dp_hw_fw_revision)); + + link->dpcd_caps.sink_hw_revision = + dp_hw_fw_revision.ieee_hw_rev; + + memmove( + link->dpcd_caps.sink_fw_revision, + dp_hw_fw_revision.ieee_fw_rev, + sizeof(dp_hw_fw_revision.ieee_fw_rev)); + + /* Quirk for Retina panels: wrong DP_MAX_LINK_RATE */ + { + uint8_t str_mbp_2018[] = { 101, 68, 21, 103, 98, 97 }; + uint8_t fwrev_mbp_2018[] = { 7, 4 }; + uint8_t fwrev_mbp_2018_vega[] = { 8, 4 }; + + /* We also check for the firmware revision as 16,1 models have an + * identical device id and are incorrectly quirked otherwise. + */ + if ((link->dpcd_caps.sink_dev_id == 0x0010fa) && + !memcmp(link->dpcd_caps.sink_dev_id_str, str_mbp_2018, + sizeof(str_mbp_2018)) && + (!memcmp(link->dpcd_caps.sink_fw_revision, fwrev_mbp_2018, + sizeof(fwrev_mbp_2018)) || + !memcmp(link->dpcd_caps.sink_fw_revision, fwrev_mbp_2018_vega, + sizeof(fwrev_mbp_2018_vega)))) { + link->reported_link_cap.link_rate = LINK_RATE_RBR2; + } + } + + memset(&link->dpcd_caps.dsc_caps, '\0', + sizeof(link->dpcd_caps.dsc_caps)); + memset(&link->dpcd_caps.fec_cap, '\0', sizeof(link->dpcd_caps.fec_cap)); + /* Read DSC and FEC sink capabilities if DP revision is 1.4 and up */ + if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14) { + status = core_link_read_dpcd( + link, + DP_FEC_CAPABILITY, + &link->dpcd_caps.fec_cap.raw, + sizeof(link->dpcd_caps.fec_cap.raw)); + status = core_link_read_dpcd( + link, + DP_DSC_SUPPORT, + link->dpcd_caps.dsc_caps.dsc_basic_caps.raw, + sizeof(link->dpcd_caps.dsc_caps.dsc_basic_caps.raw)); + if (link->dpcd_caps.dongle_type != DISPLAY_DONGLE_NONE) { + status = core_link_read_dpcd( + link, + DP_DSC_BRANCH_OVERALL_THROUGHPUT_0, + link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw, + sizeof(link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw)); + DC_LOG_DSC("DSC branch decoder capability is read at link %d", link->link_index); + DC_LOG_DSC("\tBRANCH_OVERALL_THROUGHPUT_0 = 0x%02x", + link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.fields.BRANCH_OVERALL_THROUGHPUT_0); + DC_LOG_DSC("\tBRANCH_OVERALL_THROUGHPUT_1 = 0x%02x", + link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.fields.BRANCH_OVERALL_THROUGHPUT_1); + DC_LOG_DSC("\tBRANCH_MAX_LINE_WIDTH 0x%02x", + link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.fields.BRANCH_MAX_LINE_WIDTH); + } + + /* Apply work around to disable FEC and DSC for USB4 tunneling in TBT3 compatibility mode + * only if required. + */ + if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && + link->dc->debug.dpia_debug.bits.enable_force_tbt3_work_around && + link->dpcd_caps.is_branch_dev && + link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 && + link->dpcd_caps.branch_hw_revision == DP_BRANCH_HW_REV_10 && + (link->dpcd_caps.fec_cap.bits.FEC_CAPABLE || + link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT)) { + /* A TBT3 device is expected to report no support for FEC or DSC to a USB4 DPIA. + * Clear FEC and DSC capabilities as a work around if that is not the case. + */ + link->wa_flags.dpia_forced_tbt3_mode = true; + memset(&link->dpcd_caps.dsc_caps, '\0', sizeof(link->dpcd_caps.dsc_caps)); + memset(&link->dpcd_caps.fec_cap, '\0', sizeof(link->dpcd_caps.fec_cap)); + DC_LOG_DSC("Clear DSC SUPPORT for USB4 link(%d) in TBT3 compatibility mode", link->link_index); + } else + link->wa_flags.dpia_forced_tbt3_mode = false; + } + + if (!dpcd_read_sink_ext_caps(link)) + link->dpcd_sink_ext_caps.raw = 0; + + if (link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) { + DC_LOG_DP2("128b/132b encoding is supported at link %d", link->link_index); + + core_link_read_dpcd(link, + DP_128B132B_SUPPORTED_LINK_RATES, + &link->dpcd_caps.dp_128b_132b_supported_link_rates.raw, + sizeof(link->dpcd_caps.dp_128b_132b_supported_link_rates.raw)); + if (link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR20) + link->reported_link_cap.link_rate = LINK_RATE_UHBR20; + else if (link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR13_5) + link->reported_link_cap.link_rate = LINK_RATE_UHBR13_5; + else if (link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR10) + link->reported_link_cap.link_rate = LINK_RATE_UHBR10; + else + dm_error("%s: Invalid RX 128b_132b_supported_link_rates\n", __func__); + DC_LOG_DP2("128b/132b supported link rates is read at link %d", link->link_index); + DC_LOG_DP2("\tmax 128b/132b link rate support is %d.%d GHz", + link->reported_link_cap.link_rate / 100, + link->reported_link_cap.link_rate % 100); + + core_link_read_dpcd(link, + DP_SINK_VIDEO_FALLBACK_FORMATS, + &link->dpcd_caps.fallback_formats.raw, + sizeof(link->dpcd_caps.fallback_formats.raw)); + DC_LOG_DP2("sink video fallback format is read at link %d", link->link_index); + if (link->dpcd_caps.fallback_formats.bits.dp_1920x1080_60Hz_24bpp_support) + DC_LOG_DP2("\t1920x1080@60Hz 24bpp fallback format supported"); + if (link->dpcd_caps.fallback_formats.bits.dp_1280x720_60Hz_24bpp_support) + DC_LOG_DP2("\t1280x720@60Hz 24bpp fallback format supported"); + if (link->dpcd_caps.fallback_formats.bits.dp_1024x768_60Hz_24bpp_support) + DC_LOG_DP2("\t1024x768@60Hz 24bpp fallback format supported"); + if (link->dpcd_caps.fallback_formats.raw == 0) { + DC_LOG_DP2("\tno supported fallback formats, assume 1920x1080@60Hz 24bpp is supported"); + link->dpcd_caps.fallback_formats.bits.dp_1920x1080_60Hz_24bpp_support = 1; + } + + core_link_read_dpcd(link, + DP_FEC_CAPABILITY_1, + &link->dpcd_caps.fec_cap1.raw, + sizeof(link->dpcd_caps.fec_cap1.raw)); + DC_LOG_DP2("FEC CAPABILITY 1 is read at link %d", link->link_index); + if (link->dpcd_caps.fec_cap1.bits.AGGREGATED_ERROR_COUNTERS_CAPABLE) + DC_LOG_DP2("\tFEC aggregated error counters are supported"); + } + + retrieve_cable_id(link); + dpcd_write_cable_id_to_dprx(link); + + /* Connectivity log: detection */ + CONN_DATA_DETECT(link, dpcd_data, sizeof(dpcd_data), "Rx Caps: "); + + return true; +} + +bool detect_dp_sink_caps(struct dc_link *link) +{ + return retrieve_link_cap(link); +} + +void detect_edp_sink_caps(struct dc_link *link) +{ + uint8_t supported_link_rates[16]; + uint32_t entry; + uint32_t link_rate_in_khz; + enum dc_link_rate link_rate = LINK_RATE_UNKNOWN; + uint8_t backlight_adj_cap; + uint8_t general_edp_cap; + + retrieve_link_cap(link); + link->dpcd_caps.edp_supported_link_rates_count = 0; + memset(supported_link_rates, 0, sizeof(supported_link_rates)); + + /* + * edp_supported_link_rates_count is only valid for eDP v1.4 or higher. + * Per VESA eDP spec, "The DPCD revision for eDP v1.4 is 13h" + */ + if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_13 && + (link->panel_config.ilr.optimize_edp_link_rate || + link->reported_link_cap.link_rate == LINK_RATE_UNKNOWN)) { + // Read DPCD 00010h - 0001Fh 16 bytes at one shot + core_link_read_dpcd(link, DP_SUPPORTED_LINK_RATES, + supported_link_rates, sizeof(supported_link_rates)); + + for (entry = 0; entry < 16; entry += 2) { + // DPCD register reports per-lane link rate = 16-bit link rate capability + // value X 200 kHz. Need multiplier to find link rate in kHz. + link_rate_in_khz = (supported_link_rates[entry+1] * 0x100 + + supported_link_rates[entry]) * 200; + + if (link_rate_in_khz != 0) { + link_rate = linkRateInKHzToLinkRateMultiplier(link_rate_in_khz); + link->dpcd_caps.edp_supported_link_rates[link->dpcd_caps.edp_supported_link_rates_count] = link_rate; + link->dpcd_caps.edp_supported_link_rates_count++; + + if (link->reported_link_cap.link_rate < link_rate) + link->reported_link_cap.link_rate = link_rate; + } + } + } + core_link_read_dpcd(link, DP_EDP_BACKLIGHT_ADJUSTMENT_CAP, + &backlight_adj_cap, sizeof(backlight_adj_cap)); + + link->dpcd_caps.dynamic_backlight_capable_edp = + (backlight_adj_cap & DP_EDP_DYNAMIC_BACKLIGHT_CAP) ? true:false; + + core_link_read_dpcd(link, DP_EDP_GENERAL_CAP_1, + &general_edp_cap, sizeof(general_edp_cap)); + + link->dpcd_caps.set_power_state_capable_edp = + (general_edp_cap & DP_EDP_SET_POWER_CAP) ? true:false; + + dc_link_set_default_brightness_aux(link); + + core_link_read_dpcd(link, DP_EDP_DPCD_REV, + &link->dpcd_caps.edp_rev, + sizeof(link->dpcd_caps.edp_rev)); + /* + * PSR is only valid for eDP v1.3 or higher. + */ + if (link->dpcd_caps.edp_rev >= DP_EDP_13) { + core_link_read_dpcd(link, DP_PSR_SUPPORT, + &link->dpcd_caps.psr_info.psr_version, + sizeof(link->dpcd_caps.psr_info.psr_version)); + if (link->dpcd_caps.sink_dev_id == DP_BRANCH_DEVICE_ID_001CF8) + core_link_read_dpcd(link, DP_FORCE_PSRSU_CAPABILITY, + &link->dpcd_caps.psr_info.force_psrsu_cap, + sizeof(link->dpcd_caps.psr_info.force_psrsu_cap)); + core_link_read_dpcd(link, DP_PSR_CAPS, + &link->dpcd_caps.psr_info.psr_dpcd_caps.raw, + sizeof(link->dpcd_caps.psr_info.psr_dpcd_caps.raw)); + if (link->dpcd_caps.psr_info.psr_dpcd_caps.bits.Y_COORDINATE_REQUIRED) { + core_link_read_dpcd(link, DP_PSR2_SU_Y_GRANULARITY, + &link->dpcd_caps.psr_info.psr2_su_y_granularity_cap, + sizeof(link->dpcd_caps.psr_info.psr2_su_y_granularity_cap)); + } + } + + /* + * ALPM is only valid for eDP v1.4 or higher. + */ + if (link->dpcd_caps.dpcd_rev.raw >= DP_EDP_14) + core_link_read_dpcd(link, DP_RECEIVER_ALPM_CAP, + &link->dpcd_caps.alpm_caps.raw, + sizeof(link->dpcd_caps.alpm_caps.raw)); +} + +bool dc_link_dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_settings *max_link_enc_cap) +{ + struct link_encoder *link_enc = NULL; + + if (!max_link_enc_cap) { + DC_LOG_ERROR("%s: Could not return max link encoder caps", __func__); + return false; + } + + link_enc = link_enc_cfg_get_link_enc(link); + ASSERT(link_enc); + + if (link_enc && link_enc->funcs->get_max_link_cap) { + link_enc->funcs->get_max_link_cap(link_enc, max_link_enc_cap); + return true; + } + + DC_LOG_ERROR("%s: Max link encoder caps unknown", __func__); + max_link_enc_cap->lane_count = 1; + max_link_enc_cap->link_rate = 6; + return false; +} + +const struct dc_link_settings *dc_link_get_link_cap( + const struct dc_link *link) +{ + if (link->preferred_link_setting.lane_count != LANE_COUNT_UNKNOWN && + link->preferred_link_setting.link_rate != LINK_RATE_UNKNOWN) + return &link->preferred_link_setting; + return &link->verified_link_cap; +} + +struct dc_link_settings dp_get_max_link_cap(struct dc_link *link) +{ + struct dc_link_settings max_link_cap = {0}; + enum dc_link_rate lttpr_max_link_rate; + enum dc_link_rate cable_max_link_rate; + struct link_encoder *link_enc = NULL; + + + link_enc = link_enc_cfg_get_link_enc(link); + ASSERT(link_enc); + + /* get max link encoder capability */ + if (link_enc) + link_enc->funcs->get_max_link_cap(link_enc, &max_link_cap); + + /* Lower link settings based on sink's link cap */ + if (link->reported_link_cap.lane_count < max_link_cap.lane_count) + max_link_cap.lane_count = + link->reported_link_cap.lane_count; + if (link->reported_link_cap.link_rate < max_link_cap.link_rate) + max_link_cap.link_rate = + link->reported_link_cap.link_rate; + if (link->reported_link_cap.link_spread < + max_link_cap.link_spread) + max_link_cap.link_spread = + link->reported_link_cap.link_spread; + + /* Lower link settings based on cable attributes + * Cable ID is a DP2 feature to identify max certified link rate that + * a cable can carry. The cable identification method requires both + * cable and display hardware support. Since the specs comes late, it is + * anticipated that the first round of DP2 cables and displays may not + * be fully compatible to reliably return cable ID data. Therefore the + * decision of our cable id policy is that if the cable can return non + * zero cable id data, we will take cable's link rate capability into + * account. However if we get zero data, the cable link rate capability + * is considered inconclusive. In this case, we will not take cable's + * capability into account to avoid of over limiting hardware capability + * from users. The max overall link rate capability is still determined + * after actual dp pre-training. Cable id is considered as an auxiliary + * method of determining max link bandwidth capability. + */ + cable_max_link_rate = get_cable_max_link_rate(link); + + if (!link->dc->debug.ignore_cable_id && + cable_max_link_rate != LINK_RATE_UNKNOWN && + cable_max_link_rate < max_link_cap.link_rate) + max_link_cap.link_rate = cable_max_link_rate; + + /* account for lttpr repeaters cap + * notes: repeaters do not snoop in the DPRX Capabilities addresses (3.6.3). + */ + if (dp_is_lttpr_present(link)) { + if (link->dpcd_caps.lttpr_caps.max_lane_count < max_link_cap.lane_count) + max_link_cap.lane_count = link->dpcd_caps.lttpr_caps.max_lane_count; + lttpr_max_link_rate = get_lttpr_max_link_rate(link); + + if (lttpr_max_link_rate < max_link_cap.link_rate) + max_link_cap.link_rate = lttpr_max_link_rate; + + DC_LOG_HW_LINK_TRAINING("%s\n Training with LTTPR, max_lane count %d max_link rate %d \n", + __func__, + max_link_cap.lane_count, + max_link_cap.link_rate); + } + + if (link_dp_get_encoding_format(&max_link_cap) == DP_128b_132b_ENCODING && + link->dc->debug.disable_uhbr) + max_link_cap.link_rate = LINK_RATE_HIGH3; + + return max_link_cap; +} + +static bool dp_verify_link_cap( + struct dc_link *link, + struct dc_link_settings *known_limit_link_setting, + int *fail_count) +{ + struct dc_link_settings cur_link_settings = {0}; + struct dc_link_settings max_link_settings = *known_limit_link_setting; + bool success = false; + bool skip_video_pattern; + enum clock_source_id dp_cs_id = get_clock_source_id(link); + enum link_training_result status = LINK_TRAINING_SUCCESS; + union hpd_irq_data irq_data; + struct link_resource link_res; + + memset(&irq_data, 0, sizeof(irq_data)); + cur_link_settings = max_link_settings; + + /* Grant extended timeout request */ + if (dp_is_lttpr_present(link) && link->dpcd_caps.lttpr_caps.max_ext_timeout > 0) { + uint8_t grant = link->dpcd_caps.lttpr_caps.max_ext_timeout & 0x80; + + core_link_write_dpcd(link, DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT, &grant, sizeof(grant)); + } + + do { + if (!get_temp_dp_link_res(link, &link_res, &cur_link_settings)) + continue; + + skip_video_pattern = cur_link_settings.link_rate != LINK_RATE_LOW; + dp_enable_link_phy( + link, + &link_res, + link->connector_signal, + dp_cs_id, + &cur_link_settings); + + status = dp_perform_link_training( + link, + &link_res, + &cur_link_settings, + skip_video_pattern); + + if (status == LINK_TRAINING_SUCCESS) { + success = true; + udelay(1000); + if (read_hpd_rx_irq_data(link, &irq_data) == DC_OK && + hpd_rx_irq_check_link_loss_status( + link, + &irq_data)) + (*fail_count)++; + + } else { + (*fail_count)++; + } + dp_trace_lt_total_count_increment(link, true); + dp_trace_lt_result_update(link, status, true); + dp_disable_link_phy(link, &link_res, link->connector_signal); + } while (!success && decide_fallback_link_setting(link, + &max_link_settings, &cur_link_settings, status)); + + link->verified_link_cap = success ? + cur_link_settings : fail_safe_link_settings; + return success; +} + +bool dp_verify_link_cap_with_retries( + struct dc_link *link, + struct dc_link_settings *known_limit_link_setting, + int attempts) +{ + int i = 0; + bool success = false; + int fail_count = 0; + + dp_trace_detect_lt_init(link); + + if (link->link_enc && link->link_enc->features.flags.bits.DP_IS_USB_C && + link->dc->debug.usbc_combo_phy_reset_wa) + apply_usbc_combo_phy_reset_wa(link, known_limit_link_setting); + + dp_trace_set_lt_start_timestamp(link, false); + for (i = 0; i < attempts; i++) { + enum dc_connection_type type = dc_connection_none; + + memset(&link->verified_link_cap, 0, + sizeof(struct dc_link_settings)); + if (!dc_link_detect_sink(link, &type) || type == dc_connection_none) { + link->verified_link_cap = fail_safe_link_settings; + break; + } else if (dp_verify_link_cap(link, known_limit_link_setting, + &fail_count) && fail_count == 0) { + success = true; + break; + } + msleep(10); + } + + dp_trace_lt_fail_count_update(link, fail_count, true); + dp_trace_set_lt_end_timestamp(link, true); + + return success; +} diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dp_capability.h b/drivers/gpu/drm/amd/display/dc/link/link_dp_capability.h new file mode 100644 index 000000000000..5500744d2e47 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/link_dp_capability.h @@ -0,0 +1,66 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_LINK_DP_CAPABILITY_H__ +#define __DC_LINK_DP_CAPABILITY_H__ + +#include "link.h" + +bool detect_dp_sink_caps(struct dc_link *link); + +void detect_edp_sink_caps(struct dc_link *link); + +struct dc_link_settings dp_get_max_link_cap(struct dc_link *link); + + +enum dc_status dp_retrieve_lttpr_cap(struct dc_link *link); + +/* Convert PHY repeater count read from DPCD uint8_t. */ +uint8_t dp_parse_lttpr_repeater_count(uint8_t lttpr_repeater_count); + +bool dp_is_lttpr_present(struct dc_link *link); + +bool is_dp_active_dongle(const struct dc_link *link); + +bool is_dp_branch_device(const struct dc_link *link); + +bool decide_edp_link_settings_with_dsc(struct dc_link *link, + struct dc_link_settings *link_setting, + uint32_t req_bw, + enum dc_link_rate max_link_rate); + +void dpcd_set_source_specific_data(struct dc_link *link); + +/*query dpcd for version and mst cap addresses*/ +bool read_is_mst_supported(struct dc_link *link); + +bool decide_fallback_link_setting( + struct dc_link *link, + struct dc_link_settings *max, + struct dc_link_settings *cur, + enum link_training_result training_result); + + +#endif /* __DC_LINK_DP_CAPABILITY_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dp_dpia.c b/drivers/gpu/drm/amd/display/dc/link/link_dp_dpia.c new file mode 100644 index 000000000000..6136db392548 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/link_dp_dpia.c @@ -0,0 +1,107 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dc.h" +#include "inc/core_status.h" +#include "dc_link.h" +#include "dc_link_dp.h" +#include "dpcd_defs.h" + +#include "link_dp_dpia.h" +#include "link_hwss.h" +#include "dm_helpers.h" +#include "dmub/inc/dmub_cmd.h" +#include "link_dpcd.h" +#include "link_dp_training.h" +#include "dc_dmub_srv.h" + +#define DC_LOGGER \ + link->ctx->logger + +/** @note Can remove once DP tunneling registers in upstream include/drm/drm_dp_helper.h */ +/* DPCD DP Tunneling over USB4 */ +#define DP_TUNNELING_CAPABILITIES_SUPPORT 0xe000d +#define DP_IN_ADAPTER_INFO 0xe000e +#define DP_USB4_DRIVER_ID 0xe000f +#define DP_USB4_ROUTER_TOPOLOGY_ID 0xe001b + +enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link) +{ + enum dc_status status = DC_OK; + uint8_t dpcd_dp_tun_data[3] = {0}; + uint8_t dpcd_topology_data[DPCD_USB4_TOPOLOGY_ID_LEN] = {0}; + uint8_t i = 0; + + status = core_link_read_dpcd( + link, + DP_TUNNELING_CAPABILITIES_SUPPORT, + dpcd_dp_tun_data, + sizeof(dpcd_dp_tun_data)); + + status = core_link_read_dpcd( + link, + DP_USB4_ROUTER_TOPOLOGY_ID, + dpcd_topology_data, + sizeof(dpcd_topology_data)); + + link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.raw = + dpcd_dp_tun_data[DP_TUNNELING_CAPABILITIES_SUPPORT - DP_TUNNELING_CAPABILITIES_SUPPORT]; + link->dpcd_caps.usb4_dp_tun_info.dpia_info.raw = + dpcd_dp_tun_data[DP_IN_ADAPTER_INFO - DP_TUNNELING_CAPABILITIES_SUPPORT]; + link->dpcd_caps.usb4_dp_tun_info.usb4_driver_id = + dpcd_dp_tun_data[DP_USB4_DRIVER_ID - DP_TUNNELING_CAPABILITIES_SUPPORT]; + + for (i = 0; i < DPCD_USB4_TOPOLOGY_ID_LEN; i++) + link->dpcd_caps.usb4_dp_tun_info.usb4_topology_id[i] = dpcd_topology_data[i]; + + return status; +} + +bool dc_link_dpia_query_hpd_status(struct dc_link *link) +{ + union dmub_rb_cmd cmd = {0}; + struct dc_dmub_srv *dmub_srv = link->ctx->dmub_srv; + bool is_hpd_high = false; + + /* prepare QUERY_HPD command */ + cmd.query_hpd.header.type = DMUB_CMD__QUERY_HPD_STATE; + cmd.query_hpd.data.instance = link->link_id.enum_id - ENUM_ID_1; + cmd.query_hpd.data.ch_type = AUX_CHANNEL_DPIA; + + /* Return HPD status reported by DMUB if query successfully executed. */ + if (dc_dmub_srv_cmd_with_reply_data(dmub_srv, &cmd) && cmd.query_hpd.data.status == AUX_RET_SUCCESS) + is_hpd_high = cmd.query_hpd.data.result; + + DC_LOG_DEBUG("%s: link(%d) dpia(%d) cmd_status(%d) result(%d)\n", + __func__, + link->link_index, + link->link_id.enum_id - ENUM_ID_1, + cmd.query_hpd.data.status, + cmd.query_hpd.data.result); + + return is_hpd_high; +} + diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dp_dpia.h b/drivers/gpu/drm/amd/display/dc/link/link_dp_dpia.h new file mode 100644 index 000000000000..98935cc10bb7 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/link_dp_dpia.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_LINK_DPIA_H__ +#define __DC_LINK_DPIA_H__ + +#include "link.h" + +/* Read tunneling device capability from DPCD and update link capability + * accordingly. + */ +enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link); + +/* Query hot plug status of USB4 DP tunnel. + * Returns true if HPD high. + */ +bool dc_link_dpia_query_hpd_status(struct dc_link *link); + + +#endif /* __DC_LINK_DPIA_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dp_dpia_bw.h b/drivers/gpu/drm/amd/display/dc/link/link_dp_dpia_bw.h index 669e995f825f..58eb7b581093 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_dp_dpia_bw.h +++ b/drivers/gpu/drm/amd/display/dc/link/link_dp_dpia_bw.h @@ -26,13 +26,13 @@ #ifndef DC_INC_LINK_DP_DPIA_BW_H_ #define DC_INC_LINK_DP_DPIA_BW_H_ -// XXX: TODO: Re-add for Phase 2 -/* Number of Host Routers per motherboard is 2 and 2 DPIA per host router */ -#define MAX_HR_NUM 2 - -struct dc_host_router_bw_alloc { - int max_bw[MAX_HR_NUM]; // The Max BW that each Host Router has available to be shared btw DPIAs - int total_estimated_bw[MAX_HR_NUM]; // The Total Verified and available BW that Host Router has +/* + * Host Router BW type + */ +enum bw_type { + HOST_ROUTER_BW_ESTIMATED, + HOST_ROUTER_BW_ALLOCATED, + HOST_ROUTER_BW_INVALID, }; /* @@ -61,9 +61,40 @@ void set_usb4_req_bw_req(struct dc_link *link, int req_bw); * find out the result of allocating on CM and update structs accordingly * * @link: pointer to the dc_link struct instance + * @bw: Allocated or Estimated BW depending on the result + * @result: Response type + * + * return: none + */ +void get_usb4_req_bw_resp(struct dc_link *link, uint8_t bw, uint8_t result); + +/* + * Return the response_ready flag from dc_link struct + * + * @link: pointer to the dc_link struct instance + * + * return: response_ready flag from dc_link struct + */ +bool get_cm_response_ready_flag(struct dc_link *link); + +/* + * Get the Max Available BW or Max Estimated BW for each Host Router + * + * @link: pointer to the dc_link struct instance + * @type: ESTIMATD BW or MAX AVAILABLE BW + * + * return: response_ready flag from dc_link struct + */ +int get_host_router_total_bw(struct dc_link *link, uint8_t type); + +/* + * Cleanup function for when the dpia is unplugged to reset struct + * and perform any required clean up + * + * @link: pointer to the dc_link struct instance * * return: none */ -void get_usb4_req_bw_resp(struct dc_link *link); +bool dpia_bw_alloc_unplug(struct dc_link *link); #endif /* DC_INC_LINK_DP_DPIA_BW_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dp_phy.c b/drivers/gpu/drm/amd/display/dc/link/link_dp_phy.c new file mode 100644 index 000000000000..afe3b21335c2 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/link_dp_phy.c @@ -0,0 +1,145 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +/* FILE POLICY AND INTENDED USAGE: + * This file implements basic dp phy functionality such as enable/disable phy + * output and set lane/drive settings. This file is responsible for maintaining + * and update software state representing current phy status such as current + * link settings. + */ + +#include "link_dp_phy.h" +#include "link_dpcd.h" +#include "link_dp_training.h" +#include "link_dp_capability.h" +#include "clk_mgr.h" +#include "resource.h" +#include "dc_link_dp.h" + +#define DC_LOGGER \ + link->ctx->logger + +void dc_link_dp_set_drive_settings( + struct dc_link *link, + const struct link_resource *link_res, + struct link_training_settings *lt_settings) +{ + /* program ASIC PHY settings*/ + dp_set_hw_lane_settings(link, link_res, lt_settings, DPRX); + + dp_hw_to_dpcd_lane_settings(lt_settings, + lt_settings->hw_lane_settings, + lt_settings->dpcd_lane_settings); + + /* Notify DP sink the PHY settings from source */ + dpcd_set_lane_settings(link, lt_settings, DPRX); +} + +void dc_link_dp_receiver_power_ctrl(struct dc_link *link, bool on) +{ + uint8_t state; + + state = on ? DP_POWER_STATE_D0 : DP_POWER_STATE_D3; + + if (link->sync_lt_in_progress) + return; + + core_link_write_dpcd(link, DP_SET_POWER, &state, + sizeof(state)); + +} + +void dp_enable_link_phy( + struct dc_link *link, + const struct link_resource *link_res, + enum signal_type signal, + enum clock_source_id clock_source, + const struct dc_link_settings *link_settings) +{ + link->cur_link_settings = *link_settings; + link->dc->hwss.enable_dp_link_output(link, link_res, signal, + clock_source, link_settings); + dc_link_dp_receiver_power_ctrl(link, true); +} + +void dp_disable_link_phy(struct dc_link *link, + const struct link_resource *link_res, + enum signal_type signal) +{ + struct dc *dc = link->ctx->dc; + + if (!link->wa_flags.dp_keep_receiver_powered) + dc_link_dp_receiver_power_ctrl(link, false); + + dc->hwss.disable_link_output(link, link_res, signal); + /* Clear current link setting.*/ + memset(&link->cur_link_settings, 0, + sizeof(link->cur_link_settings)); + + if (dc->clk_mgr->funcs->notify_link_rate_change) + dc->clk_mgr->funcs->notify_link_rate_change(dc->clk_mgr, link); +} + +void dp_disable_link_phy_mst(struct dc_link *link, + const struct link_resource *link_res, + enum signal_type signal) +{ + /* MST disable link only when no stream use the link */ + if (link->mst_stream_alloc_table.stream_count > 0) + return; + + dp_disable_link_phy(link, link_res, signal); + + /* set the sink to SST mode after disabling the link */ + dp_enable_mst_on_sink(link, false); +} + +static inline bool is_immediate_downstream(struct dc_link *link, uint32_t offset) +{ + return (dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) == + offset); +} + +void dp_set_hw_lane_settings( + struct dc_link *link, + const struct link_resource *link_res, + const struct link_training_settings *link_settings, + uint32_t offset) +{ + const struct link_hwss *link_hwss = get_link_hwss(link, link_res); + + if ((link_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) && + !is_immediate_downstream(link, offset)) + return; + + if (link_hwss->ext.set_dp_lane_settings) + link_hwss->ext.set_dp_lane_settings(link, link_res, + &link_settings->link_settings, + link_settings->hw_lane_settings); + + memmove(link->cur_lane_setting, + link_settings->hw_lane_settings, + sizeof(link->cur_lane_setting)); +} diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dp_phy.h b/drivers/gpu/drm/amd/display/dc/link/link_dp_phy.h new file mode 100644 index 000000000000..717e078fd564 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/link_dp_phy.h @@ -0,0 +1,51 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_LINK_DP_PHY_H__ +#define __DC_LINK_DP_PHY_H__ + +#include "link.h" +void dp_enable_link_phy( + struct dc_link *link, + const struct link_resource *link_res, + enum signal_type signal, + enum clock_source_id clock_source, + const struct dc_link_settings *link_settings); + +void dp_disable_link_phy(struct dc_link *link, + const struct link_resource *link_res, + enum signal_type signal); + +void dp_disable_link_phy_mst(struct dc_link *link, + const struct link_resource *link_res, + enum signal_type signal); + +void dp_set_hw_lane_settings( + struct dc_link *link, + const struct link_resource *link_res, + const struct link_training_settings *link_settings, + uint32_t offset); + +#endif /* __DC_LINK_DP_PHY_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/link_dp_training.c new file mode 100644 index 000000000000..e49e0258a1bd --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/link_dp_training.c @@ -0,0 +1,1700 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +/* FILE POLICY AND INTENDED USAGE: + * This file implements all generic dp link training helper functions and top + * level generic training sequence. All variations of dp link training sequence + * should be called inside the top level training functions in this file to + * ensure the integrity of our overall training procedure across different types + * of link encoding and back end hardware. + */ +#include "link_dp_training.h" +#include "link_dp_training_8b_10b.h" +#include "link_dp_training_128b_132b.h" +#include "link_dp_training_auxless.h" +#include "link_dp_training_dpia.h" +#include "link_dp_training_fixed_vs_pe_retimer.h" +#include "link_dpcd.h" +#include "link_dp_trace.h" +#include "link_dp_phy.h" +#include "link_dp_capability.h" +#include "dc_link_dp.h" +#include "atomfirmware.h" +#include "link_enc_cfg.h" +#include "resource.h" +#include "dm_helpers.h" + +#define DC_LOGGER \ + link->ctx->logger + +#define POST_LT_ADJ_REQ_LIMIT 6 +#define POST_LT_ADJ_REQ_TIMEOUT 200 + +void dp_log_training_result( + struct dc_link *link, + const struct link_training_settings *lt_settings, + enum link_training_result status) +{ + char *link_rate = "Unknown"; + char *lt_result = "Unknown"; + char *lt_spread = "Disabled"; + + switch (lt_settings->link_settings.link_rate) { + case LINK_RATE_LOW: + link_rate = "RBR"; + break; + case LINK_RATE_RATE_2: + link_rate = "R2"; + break; + case LINK_RATE_RATE_3: + link_rate = "R3"; + break; + case LINK_RATE_HIGH: + link_rate = "HBR"; + break; + case LINK_RATE_RBR2: + link_rate = "RBR2"; + break; + case LINK_RATE_RATE_6: + link_rate = "R6"; + break; + case LINK_RATE_HIGH2: + link_rate = "HBR2"; + break; + case LINK_RATE_HIGH3: + link_rate = "HBR3"; + break; + case LINK_RATE_UHBR10: + link_rate = "UHBR10"; + break; + case LINK_RATE_UHBR13_5: + link_rate = "UHBR13.5"; + break; + case LINK_RATE_UHBR20: + link_rate = "UHBR20"; + break; + default: + break; + } + + switch (status) { + case LINK_TRAINING_SUCCESS: + lt_result = "pass"; + break; + case LINK_TRAINING_CR_FAIL_LANE0: + lt_result = "CR failed lane0"; + break; + case LINK_TRAINING_CR_FAIL_LANE1: + lt_result = "CR failed lane1"; + break; + case LINK_TRAINING_CR_FAIL_LANE23: + lt_result = "CR failed lane23"; + break; + case LINK_TRAINING_EQ_FAIL_CR: + lt_result = "CR failed in EQ"; + break; + case LINK_TRAINING_EQ_FAIL_CR_PARTIAL: + lt_result = "CR failed in EQ partially"; + break; + case LINK_TRAINING_EQ_FAIL_EQ: + lt_result = "EQ failed"; + break; + case LINK_TRAINING_LQA_FAIL: + lt_result = "LQA failed"; + break; + case LINK_TRAINING_LINK_LOSS: + lt_result = "Link loss"; + break; + case DP_128b_132b_LT_FAILED: + lt_result = "LT_FAILED received"; + break; + case DP_128b_132b_MAX_LOOP_COUNT_REACHED: + lt_result = "max loop count reached"; + break; + case DP_128b_132b_CHANNEL_EQ_DONE_TIMEOUT: + lt_result = "channel EQ timeout"; + break; + case DP_128b_132b_CDS_DONE_TIMEOUT: + lt_result = "CDS timeout"; + break; + default: + break; + } + + switch (lt_settings->link_settings.link_spread) { + case LINK_SPREAD_DISABLED: + lt_spread = "Disabled"; + break; + case LINK_SPREAD_05_DOWNSPREAD_30KHZ: + lt_spread = "0.5% 30KHz"; + break; + case LINK_SPREAD_05_DOWNSPREAD_33KHZ: + lt_spread = "0.5% 33KHz"; + break; + default: + break; + } + + /* Connectivity log: link training */ + + /* TODO - DP2.0 Log: add connectivity log for FFE PRESET */ + + CONN_MSG_LT(link, "%sx%d %s VS=%d, PE=%d, DS=%s", + link_rate, + lt_settings->link_settings.lane_count, + lt_result, + lt_settings->hw_lane_settings[0].VOLTAGE_SWING, + lt_settings->hw_lane_settings[0].PRE_EMPHASIS, + lt_spread); +} + +uint8_t dp_initialize_scrambling_data_symbols( + struct dc_link *link, + enum dc_dp_training_pattern pattern) +{ + uint8_t disable_scrabled_data_symbols = 0; + + switch (pattern) { + case DP_TRAINING_PATTERN_SEQUENCE_1: + case DP_TRAINING_PATTERN_SEQUENCE_2: + case DP_TRAINING_PATTERN_SEQUENCE_3: + disable_scrabled_data_symbols = 1; + break; + case DP_TRAINING_PATTERN_SEQUENCE_4: + case DP_128b_132b_TPS1: + case DP_128b_132b_TPS2: + disable_scrabled_data_symbols = 0; + break; + default: + ASSERT(0); + DC_LOG_HW_LINK_TRAINING("%s: Invalid HW Training pattern: %d\n", + __func__, pattern); + break; + } + return disable_scrabled_data_symbols; +} + +enum dpcd_training_patterns + dp_training_pattern_to_dpcd_training_pattern( + struct dc_link *link, + enum dc_dp_training_pattern pattern) +{ + enum dpcd_training_patterns dpcd_tr_pattern = + DPCD_TRAINING_PATTERN_VIDEOIDLE; + + switch (pattern) { + case DP_TRAINING_PATTERN_SEQUENCE_1: + dpcd_tr_pattern = DPCD_TRAINING_PATTERN_1; + break; + case DP_TRAINING_PATTERN_SEQUENCE_2: + dpcd_tr_pattern = DPCD_TRAINING_PATTERN_2; + break; + case DP_TRAINING_PATTERN_SEQUENCE_3: + dpcd_tr_pattern = DPCD_TRAINING_PATTERN_3; + break; + case DP_TRAINING_PATTERN_SEQUENCE_4: + dpcd_tr_pattern = DPCD_TRAINING_PATTERN_4; + break; + case DP_128b_132b_TPS1: + dpcd_tr_pattern = DPCD_128b_132b_TPS1; + break; + case DP_128b_132b_TPS2: + dpcd_tr_pattern = DPCD_128b_132b_TPS2; + break; + case DP_128b_132b_TPS2_CDS: + dpcd_tr_pattern = DPCD_128b_132b_TPS2_CDS; + break; + case DP_TRAINING_PATTERN_VIDEOIDLE: + dpcd_tr_pattern = DPCD_TRAINING_PATTERN_VIDEOIDLE; + break; + default: + ASSERT(0); + DC_LOG_HW_LINK_TRAINING("%s: Invalid HW Training pattern: %d\n", + __func__, pattern); + break; + } + + return dpcd_tr_pattern; +} + +static uint8_t get_nibble_at_index(const uint8_t *buf, + uint32_t index) +{ + uint8_t nibble; + nibble = buf[index / 2]; + + if (index % 2) + nibble >>= 4; + else + nibble &= 0x0F; + + return nibble; +} + +void dp_wait_for_training_aux_rd_interval( + struct dc_link *link, + uint32_t wait_in_micro_secs) +{ + if (wait_in_micro_secs > 1000) + msleep(wait_in_micro_secs/1000); + else + udelay(wait_in_micro_secs); + + DC_LOG_HW_LINK_TRAINING("%s:\n wait = %d\n", + __func__, + wait_in_micro_secs); +} + +/* maximum pre emphasis level allowed for each voltage swing level*/ +static const enum dc_pre_emphasis voltage_swing_to_pre_emphasis[] = { + PRE_EMPHASIS_LEVEL3, + PRE_EMPHASIS_LEVEL2, + PRE_EMPHASIS_LEVEL1, + PRE_EMPHASIS_DISABLED }; + +static enum dc_pre_emphasis get_max_pre_emphasis_for_voltage_swing( + enum dc_voltage_swing voltage) +{ + enum dc_pre_emphasis pre_emphasis; + pre_emphasis = PRE_EMPHASIS_MAX_LEVEL; + + if (voltage <= VOLTAGE_SWING_MAX_LEVEL) + pre_emphasis = voltage_swing_to_pre_emphasis[voltage]; + + return pre_emphasis; + +} + +static void maximize_lane_settings(const struct link_training_settings *lt_settings, + struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]) +{ + uint32_t lane; + struct dc_lane_settings max_requested; + + max_requested.VOLTAGE_SWING = lane_settings[0].VOLTAGE_SWING; + max_requested.PRE_EMPHASIS = lane_settings[0].PRE_EMPHASIS; + max_requested.FFE_PRESET = lane_settings[0].FFE_PRESET; + + /* Determine what the maximum of the requested settings are*/ + for (lane = 1; lane < lt_settings->link_settings.lane_count; lane++) { + if (lane_settings[lane].VOLTAGE_SWING > max_requested.VOLTAGE_SWING) + max_requested.VOLTAGE_SWING = lane_settings[lane].VOLTAGE_SWING; + + if (lane_settings[lane].PRE_EMPHASIS > max_requested.PRE_EMPHASIS) + max_requested.PRE_EMPHASIS = lane_settings[lane].PRE_EMPHASIS; + if (lane_settings[lane].FFE_PRESET.settings.level > + max_requested.FFE_PRESET.settings.level) + max_requested.FFE_PRESET.settings.level = + lane_settings[lane].FFE_PRESET.settings.level; + } + + /* make sure the requested settings are + * not higher than maximum settings*/ + if (max_requested.VOLTAGE_SWING > VOLTAGE_SWING_MAX_LEVEL) + max_requested.VOLTAGE_SWING = VOLTAGE_SWING_MAX_LEVEL; + + if (max_requested.PRE_EMPHASIS > PRE_EMPHASIS_MAX_LEVEL) + max_requested.PRE_EMPHASIS = PRE_EMPHASIS_MAX_LEVEL; + if (max_requested.FFE_PRESET.settings.level > DP_FFE_PRESET_MAX_LEVEL) + max_requested.FFE_PRESET.settings.level = DP_FFE_PRESET_MAX_LEVEL; + + /* make sure the pre-emphasis matches the voltage swing*/ + if (max_requested.PRE_EMPHASIS > + get_max_pre_emphasis_for_voltage_swing( + max_requested.VOLTAGE_SWING)) + max_requested.PRE_EMPHASIS = + get_max_pre_emphasis_for_voltage_swing( + max_requested.VOLTAGE_SWING); + + for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { + lane_settings[lane].VOLTAGE_SWING = max_requested.VOLTAGE_SWING; + lane_settings[lane].PRE_EMPHASIS = max_requested.PRE_EMPHASIS; + lane_settings[lane].FFE_PRESET = max_requested.FFE_PRESET; + } +} + +void dp_hw_to_dpcd_lane_settings( + const struct link_training_settings *lt_settings, + const struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX], + union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX]) +{ + uint8_t lane = 0; + + for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { + if (link_dp_get_encoding_format(<_settings->link_settings) == + DP_8b_10b_ENCODING) { + dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET = + (uint8_t)(hw_lane_settings[lane].VOLTAGE_SWING); + dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET = + (uint8_t)(hw_lane_settings[lane].PRE_EMPHASIS); + dpcd_lane_settings[lane].bits.MAX_SWING_REACHED = + (hw_lane_settings[lane].VOLTAGE_SWING == + VOLTAGE_SWING_MAX_LEVEL ? 1 : 0); + dpcd_lane_settings[lane].bits.MAX_PRE_EMPHASIS_REACHED = + (hw_lane_settings[lane].PRE_EMPHASIS == + PRE_EMPHASIS_MAX_LEVEL ? 1 : 0); + } else if (link_dp_get_encoding_format(<_settings->link_settings) == + DP_128b_132b_ENCODING) { + dpcd_lane_settings[lane].tx_ffe.PRESET_VALUE = + hw_lane_settings[lane].FFE_PRESET.settings.level; + } + } +} + +uint8_t get_dpcd_link_rate(const struct dc_link_settings *link_settings) +{ + uint8_t link_rate = 0; + enum dp_link_encoding encoding = link_dp_get_encoding_format(link_settings); + + if (encoding == DP_128b_132b_ENCODING) + switch (link_settings->link_rate) { + case LINK_RATE_UHBR10: + link_rate = 0x1; + break; + case LINK_RATE_UHBR20: + link_rate = 0x2; + break; + case LINK_RATE_UHBR13_5: + link_rate = 0x4; + break; + default: + link_rate = 0; + break; + } + else if (encoding == DP_8b_10b_ENCODING) + link_rate = (uint8_t) link_settings->link_rate; + else + link_rate = 0; + + return link_rate; +} + +/* Only used for channel equalization */ +uint32_t dp_translate_training_aux_read_interval(uint32_t dpcd_aux_read_interval) +{ + unsigned int aux_rd_interval_us = 400; + + switch (dpcd_aux_read_interval) { + case 0x01: + aux_rd_interval_us = 4000; + break; + case 0x02: + aux_rd_interval_us = 8000; + break; + case 0x03: + aux_rd_interval_us = 12000; + break; + case 0x04: + aux_rd_interval_us = 16000; + break; + case 0x05: + aux_rd_interval_us = 32000; + break; + case 0x06: + aux_rd_interval_us = 64000; + break; + default: + break; + } + + return aux_rd_interval_us; +} + +enum link_training_result dp_get_cr_failure(enum dc_lane_count ln_count, + union lane_status *dpcd_lane_status) +{ + enum link_training_result result = LINK_TRAINING_SUCCESS; + + if (ln_count >= LANE_COUNT_ONE && !dpcd_lane_status[0].bits.CR_DONE_0) + result = LINK_TRAINING_CR_FAIL_LANE0; + else if (ln_count >= LANE_COUNT_TWO && !dpcd_lane_status[1].bits.CR_DONE_0) + result = LINK_TRAINING_CR_FAIL_LANE1; + else if (ln_count >= LANE_COUNT_FOUR && !dpcd_lane_status[2].bits.CR_DONE_0) + result = LINK_TRAINING_CR_FAIL_LANE23; + else if (ln_count >= LANE_COUNT_FOUR && !dpcd_lane_status[3].bits.CR_DONE_0) + result = LINK_TRAINING_CR_FAIL_LANE23; + return result; +} + +bool is_repeater(const struct link_training_settings *lt_settings, uint32_t offset) +{ + return (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) && (offset != 0); +} + +bool dp_is_max_vs_reached( + const struct link_training_settings *lt_settings) +{ + uint32_t lane; + for (lane = 0; lane < + (uint32_t)(lt_settings->link_settings.lane_count); + lane++) { + if (lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET + == VOLTAGE_SWING_MAX_LEVEL) + return true; + } + return false; + +} + +bool dp_is_cr_done(enum dc_lane_count ln_count, + union lane_status *dpcd_lane_status) +{ + bool done = true; + uint32_t lane; + /*LANEx_CR_DONE bits All 1's?*/ + for (lane = 0; lane < (uint32_t)(ln_count); lane++) { + if (!dpcd_lane_status[lane].bits.CR_DONE_0) + done = false; + } + return done; + +} + +bool dp_is_ch_eq_done(enum dc_lane_count ln_count, + union lane_status *dpcd_lane_status) +{ + bool done = true; + uint32_t lane; + for (lane = 0; lane < (uint32_t)(ln_count); lane++) + if (!dpcd_lane_status[lane].bits.CHANNEL_EQ_DONE_0) + done = false; + return done; +} + +bool dp_is_symbol_locked(enum dc_lane_count ln_count, + union lane_status *dpcd_lane_status) +{ + bool locked = true; + uint32_t lane; + for (lane = 0; lane < (uint32_t)(ln_count); lane++) + if (!dpcd_lane_status[lane].bits.SYMBOL_LOCKED_0) + locked = false; + return locked; +} + +bool dp_is_interlane_aligned(union lane_align_status_updated align_status) +{ + return align_status.bits.INTERLANE_ALIGN_DONE == 1; +} + +enum link_training_result dp_check_link_loss_status( + struct dc_link *link, + const struct link_training_settings *link_training_setting) +{ + enum link_training_result status = LINK_TRAINING_SUCCESS; + union lane_status lane_status; + uint8_t dpcd_buf[6] = {0}; + uint32_t lane; + + core_link_read_dpcd( + link, + DP_SINK_COUNT, + (uint8_t *)(dpcd_buf), + sizeof(dpcd_buf)); + + /*parse lane status*/ + for (lane = 0; lane < link->cur_link_settings.lane_count; lane++) { + /* + * check lanes status + */ + lane_status.raw = get_nibble_at_index(&dpcd_buf[2], lane); + + if (!lane_status.bits.CHANNEL_EQ_DONE_0 || + !lane_status.bits.CR_DONE_0 || + !lane_status.bits.SYMBOL_LOCKED_0) { + /* if one of the channel equalization, clock + * recovery or symbol lock is dropped + * consider it as (link has been + * dropped) dp sink status has changed + */ + status = LINK_TRAINING_LINK_LOSS; + break; + } + } + + return status; +} + +enum dc_status dp_get_lane_status_and_lane_adjust( + struct dc_link *link, + const struct link_training_settings *link_training_setting, + union lane_status ln_status[LANE_COUNT_DP_MAX], + union lane_align_status_updated *ln_align, + union lane_adjust ln_adjust[LANE_COUNT_DP_MAX], + uint32_t offset) +{ + unsigned int lane01_status_address = DP_LANE0_1_STATUS; + uint8_t lane_adjust_offset = 4; + unsigned int lane01_adjust_address; + uint8_t dpcd_buf[6] = {0}; + uint32_t lane; + enum dc_status status; + + if (is_repeater(link_training_setting, offset)) { + lane01_status_address = + DP_LANE0_1_STATUS_PHY_REPEATER1 + + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); + lane_adjust_offset = 3; + } + + status = core_link_read_dpcd( + link, + lane01_status_address, + (uint8_t *)(dpcd_buf), + sizeof(dpcd_buf)); + + if (status != DC_OK) { + DC_LOG_HW_LINK_TRAINING("%s:\n Failed to read from address 0x%X," + " keep current lane status and lane adjust unchanged", + __func__, + lane01_status_address); + return status; + } + + for (lane = 0; lane < + (uint32_t)(link_training_setting->link_settings.lane_count); + lane++) { + + ln_status[lane].raw = + get_nibble_at_index(&dpcd_buf[0], lane); + ln_adjust[lane].raw = + get_nibble_at_index(&dpcd_buf[lane_adjust_offset], lane); + } + + ln_align->raw = dpcd_buf[2]; + + if (is_repeater(link_training_setting, offset)) { + DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n" + " 0x%X Lane01Status = %x\n 0x%X Lane23Status = %x\n ", + __func__, + offset, + lane01_status_address, dpcd_buf[0], + lane01_status_address + 1, dpcd_buf[1]); + + lane01_adjust_address = DP_ADJUST_REQUEST_LANE0_1_PHY_REPEATER1 + + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); + + DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n" + " 0x%X Lane01AdjustRequest = %x\n 0x%X Lane23AdjustRequest = %x\n", + __func__, + offset, + lane01_adjust_address, + dpcd_buf[lane_adjust_offset], + lane01_adjust_address + 1, + dpcd_buf[lane_adjust_offset + 1]); + } else { + DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X Lane01Status = %x\n 0x%X Lane23Status = %x\n ", + __func__, + lane01_status_address, dpcd_buf[0], + lane01_status_address + 1, dpcd_buf[1]); + + lane01_adjust_address = DP_ADJUST_REQUEST_LANE0_1; + + DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X Lane01AdjustRequest = %x\n 0x%X Lane23AdjustRequest = %x\n", + __func__, + lane01_adjust_address, + dpcd_buf[lane_adjust_offset], + lane01_adjust_address + 1, + dpcd_buf[lane_adjust_offset + 1]); + } + + return status; +} + +static void override_lane_settings(const struct link_training_settings *lt_settings, + struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]) +{ + uint32_t lane; + + if (lt_settings->voltage_swing == NULL && + lt_settings->pre_emphasis == NULL && + lt_settings->ffe_preset == NULL && + lt_settings->post_cursor2 == NULL) + + return; + + for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { + if (lt_settings->voltage_swing) + lane_settings[lane].VOLTAGE_SWING = *lt_settings->voltage_swing; + if (lt_settings->pre_emphasis) + lane_settings[lane].PRE_EMPHASIS = *lt_settings->pre_emphasis; + if (lt_settings->post_cursor2) + lane_settings[lane].POST_CURSOR2 = *lt_settings->post_cursor2; + if (lt_settings->ffe_preset) + lane_settings[lane].FFE_PRESET = *lt_settings->ffe_preset; + } +} + +void dp_get_lttpr_mode_override(struct dc_link *link, enum lttpr_mode *override) +{ + if (!dp_is_lttpr_present(link)) + return; + + if (link->dc->debug.lttpr_mode_override == LTTPR_MODE_TRANSPARENT) { + *override = LTTPR_MODE_TRANSPARENT; + } else if (link->dc->debug.lttpr_mode_override == LTTPR_MODE_NON_TRANSPARENT) { + *override = LTTPR_MODE_NON_TRANSPARENT; + } else if (link->dc->debug.lttpr_mode_override == LTTPR_MODE_NON_LTTPR) { + *override = LTTPR_MODE_NON_LTTPR; + } + DC_LOG_DC("lttpr_mode_override chose LTTPR_MODE = %d\n", (uint8_t)(*override)); +} + +void override_training_settings( + struct dc_link *link, + const struct dc_link_training_overrides *overrides, + struct link_training_settings *lt_settings) +{ + uint32_t lane; + + /* Override link spread */ + if (!link->dp_ss_off && overrides->downspread != NULL) + lt_settings->link_settings.link_spread = *overrides->downspread ? + LINK_SPREAD_05_DOWNSPREAD_30KHZ + : LINK_SPREAD_DISABLED; + + /* Override lane settings */ + if (overrides->voltage_swing != NULL) + lt_settings->voltage_swing = overrides->voltage_swing; + if (overrides->pre_emphasis != NULL) + lt_settings->pre_emphasis = overrides->pre_emphasis; + if (overrides->post_cursor2 != NULL) + lt_settings->post_cursor2 = overrides->post_cursor2; + if (overrides->ffe_preset != NULL) + lt_settings->ffe_preset = overrides->ffe_preset; + /* Override HW lane settings with BIOS forced values if present */ + if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && + lt_settings->lttpr_mode == LTTPR_MODE_TRANSPARENT) { + lt_settings->voltage_swing = &link->bios_forced_drive_settings.VOLTAGE_SWING; + lt_settings->pre_emphasis = &link->bios_forced_drive_settings.PRE_EMPHASIS; + lt_settings->always_match_dpcd_with_hw_lane_settings = false; + } + for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { + lt_settings->hw_lane_settings[lane].VOLTAGE_SWING = + lt_settings->voltage_swing != NULL ? + *lt_settings->voltage_swing : + VOLTAGE_SWING_LEVEL0; + lt_settings->hw_lane_settings[lane].PRE_EMPHASIS = + lt_settings->pre_emphasis != NULL ? + *lt_settings->pre_emphasis + : PRE_EMPHASIS_DISABLED; + lt_settings->hw_lane_settings[lane].POST_CURSOR2 = + lt_settings->post_cursor2 != NULL ? + *lt_settings->post_cursor2 + : POST_CURSOR2_DISABLED; + } + + if (lt_settings->always_match_dpcd_with_hw_lane_settings) + dp_hw_to_dpcd_lane_settings(lt_settings, + lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); + + /* Override training timings */ + if (overrides->cr_pattern_time != NULL) + lt_settings->cr_pattern_time = *overrides->cr_pattern_time; + if (overrides->eq_pattern_time != NULL) + lt_settings->eq_pattern_time = *overrides->eq_pattern_time; + if (overrides->pattern_for_cr != NULL) + lt_settings->pattern_for_cr = *overrides->pattern_for_cr; + if (overrides->pattern_for_eq != NULL) + lt_settings->pattern_for_eq = *overrides->pattern_for_eq; + if (overrides->enhanced_framing != NULL) + lt_settings->enhanced_framing = *overrides->enhanced_framing; + if (link->preferred_training_settings.fec_enable != NULL) + lt_settings->should_set_fec_ready = *link->preferred_training_settings.fec_enable; + +#if defined(CONFIG_DRM_AMD_DC_DCN) + /* Check DP tunnel LTTPR mode debug option. */ + if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && link->dc->debug.dpia_debug.bits.force_non_lttpr) + lt_settings->lttpr_mode = LTTPR_MODE_NON_LTTPR; + +#endif + dp_get_lttpr_mode_override(link, <_settings->lttpr_mode); + +} + +enum dc_dp_training_pattern decide_cr_training_pattern( + const struct dc_link_settings *link_settings) +{ + switch (link_dp_get_encoding_format(link_settings)) { + case DP_8b_10b_ENCODING: + default: + return DP_TRAINING_PATTERN_SEQUENCE_1; + case DP_128b_132b_ENCODING: + return DP_128b_132b_TPS1; + } +} + +enum dc_dp_training_pattern decide_eq_training_pattern(struct dc_link *link, + const struct dc_link_settings *link_settings) +{ + struct link_encoder *link_enc; + struct encoder_feature_support *enc_caps; + struct dpcd_caps *rx_caps = &link->dpcd_caps; + enum dc_dp_training_pattern pattern = DP_TRAINING_PATTERN_SEQUENCE_2; + + link_enc = link_enc_cfg_get_link_enc(link); + ASSERT(link_enc); + enc_caps = &link_enc->features; + + switch (link_dp_get_encoding_format(link_settings)) { + case DP_8b_10b_ENCODING: + if (enc_caps->flags.bits.IS_TPS4_CAPABLE && + rx_caps->max_down_spread.bits.TPS4_SUPPORTED) + pattern = DP_TRAINING_PATTERN_SEQUENCE_4; + else if (enc_caps->flags.bits.IS_TPS3_CAPABLE && + rx_caps->max_ln_count.bits.TPS3_SUPPORTED) + pattern = DP_TRAINING_PATTERN_SEQUENCE_3; + else + pattern = DP_TRAINING_PATTERN_SEQUENCE_2; + break; + case DP_128b_132b_ENCODING: + pattern = DP_128b_132b_TPS2; + break; + default: + pattern = DP_TRAINING_PATTERN_SEQUENCE_2; + break; + } + return pattern; +} + +enum lttpr_mode dc_link_decide_lttpr_mode(struct dc_link *link, + struct dc_link_settings *link_setting) +{ + enum dp_link_encoding encoding = link_dp_get_encoding_format(link_setting); + + if (encoding == DP_8b_10b_ENCODING) + return dp_decide_8b_10b_lttpr_mode(link); + else if (encoding == DP_128b_132b_ENCODING) + return dp_decide_128b_132b_lttpr_mode(link); + + ASSERT(0); + return LTTPR_MODE_NON_LTTPR; +} + +void dp_decide_lane_settings( + const struct link_training_settings *lt_settings, + const union lane_adjust ln_adjust[LANE_COUNT_DP_MAX], + struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX], + union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX]) +{ + uint32_t lane; + + for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { + if (link_dp_get_encoding_format(<_settings->link_settings) == + DP_8b_10b_ENCODING) { + hw_lane_settings[lane].VOLTAGE_SWING = + (enum dc_voltage_swing)(ln_adjust[lane].bits. + VOLTAGE_SWING_LANE); + hw_lane_settings[lane].PRE_EMPHASIS = + (enum dc_pre_emphasis)(ln_adjust[lane].bits. + PRE_EMPHASIS_LANE); + } else if (link_dp_get_encoding_format(<_settings->link_settings) == + DP_128b_132b_ENCODING) { + hw_lane_settings[lane].FFE_PRESET.raw = + ln_adjust[lane].tx_ffe.PRESET_VALUE; + } + } + dp_hw_to_dpcd_lane_settings(lt_settings, hw_lane_settings, dpcd_lane_settings); + + if (lt_settings->disallow_per_lane_settings) { + /* we find the maximum of the requested settings across all lanes*/ + /* and set this maximum for all lanes*/ + maximize_lane_settings(lt_settings, hw_lane_settings); + override_lane_settings(lt_settings, hw_lane_settings); + + if (lt_settings->always_match_dpcd_with_hw_lane_settings) + dp_hw_to_dpcd_lane_settings(lt_settings, hw_lane_settings, dpcd_lane_settings); + } + +} + +void dp_decide_training_settings( + struct dc_link *link, + const struct dc_link_settings *link_settings, + struct link_training_settings *lt_settings) +{ + if (link_dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING) + decide_8b_10b_training_settings(link, link_settings, lt_settings); + else if (link_dp_get_encoding_format(link_settings) == DP_128b_132b_ENCODING) + decide_128b_132b_training_settings(link, link_settings, lt_settings); +} + + +enum dc_status configure_lttpr_mode_transparent(struct dc_link *link) +{ + uint8_t repeater_mode = DP_PHY_REPEATER_MODE_TRANSPARENT; + + DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Transparent Mode\n", __func__); + return core_link_write_dpcd(link, + DP_PHY_REPEATER_MODE, + (uint8_t *)&repeater_mode, + sizeof(repeater_mode)); +} + +static enum dc_status configure_lttpr_mode_non_transparent( + struct dc_link *link, + const struct link_training_settings *lt_settings) +{ + /* aux timeout is already set to extended */ + /* RESET/SET lttpr mode to enable non transparent mode */ + uint8_t repeater_cnt; + uint32_t aux_interval_address; + uint8_t repeater_id; + enum dc_status result = DC_ERROR_UNEXPECTED; + uint8_t repeater_mode = DP_PHY_REPEATER_MODE_TRANSPARENT; + + enum dp_link_encoding encoding = link_dp_get_encoding_format(<_settings->link_settings); + + if (encoding == DP_8b_10b_ENCODING) { + DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Transparent Mode\n", __func__); + result = core_link_write_dpcd(link, + DP_PHY_REPEATER_MODE, + (uint8_t *)&repeater_mode, + sizeof(repeater_mode)); + + } + + if (result == DC_OK) { + link->dpcd_caps.lttpr_caps.mode = repeater_mode; + } + + if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) { + + DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Non Transparent Mode\n", __func__); + + repeater_mode = DP_PHY_REPEATER_MODE_NON_TRANSPARENT; + result = core_link_write_dpcd(link, + DP_PHY_REPEATER_MODE, + (uint8_t *)&repeater_mode, + sizeof(repeater_mode)); + + if (result == DC_OK) { + link->dpcd_caps.lttpr_caps.mode = repeater_mode; + } + + if (encoding == DP_8b_10b_ENCODING) { + repeater_cnt = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); + + /* Driver does not need to train the first hop. Skip DPCD read and clear + * AUX_RD_INTERVAL for DPTX-to-DPIA hop. + */ + if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) + link->dpcd_caps.lttpr_caps.aux_rd_interval[--repeater_cnt] = 0; + + for (repeater_id = repeater_cnt; repeater_id > 0; repeater_id--) { + aux_interval_address = DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1 + + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (repeater_id - 1)); + core_link_read_dpcd( + link, + aux_interval_address, + (uint8_t *)&link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1], + sizeof(link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1])); + link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1] &= 0x7F; + } + } + } + + return result; +} + +enum dc_status dpcd_configure_lttpr_mode(struct dc_link *link, struct link_training_settings *lt_settings) +{ + enum dc_status status = DC_OK; + + if (lt_settings->lttpr_mode == LTTPR_MODE_TRANSPARENT) + status = configure_lttpr_mode_transparent(link); + + else if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) + status = configure_lttpr_mode_non_transparent(link, lt_settings); + + return status; +} + +void repeater_training_done(struct dc_link *link, uint32_t offset) +{ + union dpcd_training_pattern dpcd_pattern = {0}; + + const uint32_t dpcd_base_lt_offset = + DP_TRAINING_PATTERN_SET_PHY_REPEATER1 + + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); + /* Set training not in progress*/ + dpcd_pattern.v1_4.TRAINING_PATTERN_SET = DPCD_TRAINING_PATTERN_VIDEOIDLE; + + core_link_write_dpcd( + link, + dpcd_base_lt_offset, + &dpcd_pattern.raw, + 1); + + DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Id: %d 0x%X pattern = %x\n", + __func__, + offset, + dpcd_base_lt_offset, + dpcd_pattern.v1_4.TRAINING_PATTERN_SET); +} + +static void dpcd_exit_training_mode(struct dc_link *link, enum dp_link_encoding encoding) +{ + uint8_t sink_status = 0; + uint8_t i; + + /* clear training pattern set */ + dpcd_set_training_pattern(link, DP_TRAINING_PATTERN_VIDEOIDLE); + + if (encoding == DP_128b_132b_ENCODING) { + /* poll for intra-hop disable */ + for (i = 0; i < 10; i++) { + if ((core_link_read_dpcd(link, DP_SINK_STATUS, &sink_status, 1) == DC_OK) && + (sink_status & DP_INTRA_HOP_AUX_REPLY_INDICATION) == 0) + break; + udelay(1000); + } + } +} + +enum dc_status dpcd_configure_channel_coding(struct dc_link *link, + struct link_training_settings *lt_settings) +{ + enum dp_link_encoding encoding = + link_dp_get_encoding_format( + <_settings->link_settings); + enum dc_status status; + + status = core_link_write_dpcd( + link, + DP_MAIN_LINK_CHANNEL_CODING_SET, + (uint8_t *) &encoding, + 1); + DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X MAIN_LINK_CHANNEL_CODING_SET = %x\n", + __func__, + DP_MAIN_LINK_CHANNEL_CODING_SET, + encoding); + + return status; +} + +void dpcd_set_training_pattern( + struct dc_link *link, + enum dc_dp_training_pattern training_pattern) +{ + union dpcd_training_pattern dpcd_pattern = {0}; + + dpcd_pattern.v1_4.TRAINING_PATTERN_SET = + dp_training_pattern_to_dpcd_training_pattern( + link, training_pattern); + + core_link_write_dpcd( + link, + DP_TRAINING_PATTERN_SET, + &dpcd_pattern.raw, + 1); + + DC_LOG_HW_LINK_TRAINING("%s\n %x pattern = %x\n", + __func__, + DP_TRAINING_PATTERN_SET, + dpcd_pattern.v1_4.TRAINING_PATTERN_SET); +} + +enum dc_status dpcd_set_link_settings( + struct dc_link *link, + const struct link_training_settings *lt_settings) +{ + uint8_t rate; + enum dc_status status; + + union down_spread_ctrl downspread = {0}; + union lane_count_set lane_count_set = {0}; + + downspread.raw = (uint8_t) + (lt_settings->link_settings.link_spread); + + lane_count_set.bits.LANE_COUNT_SET = + lt_settings->link_settings.lane_count; + + lane_count_set.bits.ENHANCED_FRAMING = lt_settings->enhanced_framing; + lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = 0; + + + if (link->ep_type == DISPLAY_ENDPOINT_PHY && + lt_settings->pattern_for_eq < DP_TRAINING_PATTERN_SEQUENCE_4) { + lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = + link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED; + } + + status = core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL, + &downspread.raw, sizeof(downspread)); + + status = core_link_write_dpcd(link, DP_LANE_COUNT_SET, + &lane_count_set.raw, 1); + + if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_13 && + lt_settings->link_settings.use_link_rate_set == true) { + rate = 0; + /* WA for some MUX chips that will power down with eDP and lose supported + * link rate set for eDP 1.4. Source reads DPCD 0x010 again to ensure + * MUX chip gets link rate set back before link training. + */ + if (link->connector_signal == SIGNAL_TYPE_EDP) { + uint8_t supported_link_rates[16]; + + core_link_read_dpcd(link, DP_SUPPORTED_LINK_RATES, + supported_link_rates, sizeof(supported_link_rates)); + } + status = core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1); + status = core_link_write_dpcd(link, DP_LINK_RATE_SET, + <_settings->link_settings.link_rate_set, 1); + } else { + rate = get_dpcd_link_rate(<_settings->link_settings); + + status = core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1); + } + + if (rate) { + DC_LOG_HW_LINK_TRAINING("%s\n %x rate = %x\n %x lane = %x framing = %x\n %x spread = %x\n", + __func__, + DP_LINK_BW_SET, + lt_settings->link_settings.link_rate, + DP_LANE_COUNT_SET, + lt_settings->link_settings.lane_count, + lt_settings->enhanced_framing, + DP_DOWNSPREAD_CTRL, + lt_settings->link_settings.link_spread); + } else { + DC_LOG_HW_LINK_TRAINING("%s\n %x rate set = %x\n %x lane = %x framing = %x\n %x spread = %x\n", + __func__, + DP_LINK_RATE_SET, + lt_settings->link_settings.link_rate_set, + DP_LANE_COUNT_SET, + lt_settings->link_settings.lane_count, + lt_settings->enhanced_framing, + DP_DOWNSPREAD_CTRL, + lt_settings->link_settings.link_spread); + } + + return status; +} + +enum dc_status dpcd_set_lane_settings( + struct dc_link *link, + const struct link_training_settings *link_training_setting, + uint32_t offset) +{ + unsigned int lane0_set_address; + enum dc_status status; + lane0_set_address = DP_TRAINING_LANE0_SET; + + if (is_repeater(link_training_setting, offset)) + lane0_set_address = DP_TRAINING_LANE0_SET_PHY_REPEATER1 + + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); + + status = core_link_write_dpcd(link, + lane0_set_address, + (uint8_t *)(link_training_setting->dpcd_lane_settings), + link_training_setting->link_settings.lane_count); + + if (is_repeater(link_training_setting, offset)) { + DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Repeater ID: %d\n" + " 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n", + __func__, + offset, + lane0_set_address, + link_training_setting->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET, + link_training_setting->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET, + link_training_setting->dpcd_lane_settings[0].bits.MAX_SWING_REACHED, + link_training_setting->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED); + + } else { + DC_LOG_HW_LINK_TRAINING("%s\n 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n", + __func__, + lane0_set_address, + link_training_setting->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET, + link_training_setting->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET, + link_training_setting->dpcd_lane_settings[0].bits.MAX_SWING_REACHED, + link_training_setting->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED); + } + + return status; +} + +void dpcd_set_lt_pattern_and_lane_settings( + struct dc_link *link, + const struct link_training_settings *lt_settings, + enum dc_dp_training_pattern pattern, + uint32_t offset) +{ + uint32_t dpcd_base_lt_offset; + uint8_t dpcd_lt_buffer[5] = {0}; + union dpcd_training_pattern dpcd_pattern = {0}; + uint32_t size_in_bytes; + bool edp_workaround = false; /* TODO link_prop.INTERNAL */ + dpcd_base_lt_offset = DP_TRAINING_PATTERN_SET; + + if (is_repeater(lt_settings, offset)) + dpcd_base_lt_offset = DP_TRAINING_PATTERN_SET_PHY_REPEATER1 + + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); + + /***************************************************************** + * DpcdAddress_TrainingPatternSet + *****************************************************************/ + dpcd_pattern.v1_4.TRAINING_PATTERN_SET = + dp_training_pattern_to_dpcd_training_pattern(link, pattern); + + dpcd_pattern.v1_4.SCRAMBLING_DISABLE = + dp_initialize_scrambling_data_symbols(link, pattern); + + dpcd_lt_buffer[DP_TRAINING_PATTERN_SET - DP_TRAINING_PATTERN_SET] + = dpcd_pattern.raw; + + if (is_repeater(lt_settings, offset)) { + DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Repeater ID: %d\n 0x%X pattern = %x\n", + __func__, + offset, + dpcd_base_lt_offset, + dpcd_pattern.v1_4.TRAINING_PATTERN_SET); + } else { + DC_LOG_HW_LINK_TRAINING("%s\n 0x%X pattern = %x\n", + __func__, + dpcd_base_lt_offset, + dpcd_pattern.v1_4.TRAINING_PATTERN_SET); + } + + /* concatenate everything into one buffer*/ + size_in_bytes = lt_settings->link_settings.lane_count * + sizeof(lt_settings->dpcd_lane_settings[0]); + + // 0x00103 - 0x00102 + memmove( + &dpcd_lt_buffer[DP_TRAINING_LANE0_SET - DP_TRAINING_PATTERN_SET], + lt_settings->dpcd_lane_settings, + size_in_bytes); + + if (is_repeater(lt_settings, offset)) { + if (link_dp_get_encoding_format(<_settings->link_settings) == + DP_128b_132b_ENCODING) + DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n" + " 0x%X TX_FFE_PRESET_VALUE = %x\n", + __func__, + offset, + dpcd_base_lt_offset, + lt_settings->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE); + else if (link_dp_get_encoding_format(<_settings->link_settings) == + DP_8b_10b_ENCODING) + DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n" + " 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n", + __func__, + offset, + dpcd_base_lt_offset, + lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET, + lt_settings->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET, + lt_settings->dpcd_lane_settings[0].bits.MAX_SWING_REACHED, + lt_settings->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED); + } else { + if (link_dp_get_encoding_format(<_settings->link_settings) == + DP_128b_132b_ENCODING) + DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X TX_FFE_PRESET_VALUE = %x\n", + __func__, + dpcd_base_lt_offset, + lt_settings->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE); + else if (link_dp_get_encoding_format(<_settings->link_settings) == + DP_8b_10b_ENCODING) + DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n", + __func__, + dpcd_base_lt_offset, + lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET, + lt_settings->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET, + lt_settings->dpcd_lane_settings[0].bits.MAX_SWING_REACHED, + lt_settings->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED); + } + if (edp_workaround) { + /* for eDP write in 2 parts because the 5-byte burst is + * causing issues on some eDP panels (EPR#366724) + */ + core_link_write_dpcd( + link, + DP_TRAINING_PATTERN_SET, + &dpcd_pattern.raw, + sizeof(dpcd_pattern.raw)); + + core_link_write_dpcd( + link, + DP_TRAINING_LANE0_SET, + (uint8_t *)(lt_settings->dpcd_lane_settings), + size_in_bytes); + + } else if (link_dp_get_encoding_format(<_settings->link_settings) == + DP_128b_132b_ENCODING) { + core_link_write_dpcd( + link, + dpcd_base_lt_offset, + dpcd_lt_buffer, + sizeof(dpcd_lt_buffer)); + } else + /* write it all in (1 + number-of-lanes)-byte burst*/ + core_link_write_dpcd( + link, + dpcd_base_lt_offset, + dpcd_lt_buffer, + size_in_bytes + sizeof(dpcd_pattern.raw)); +} + +void start_clock_recovery_pattern_early(struct dc_link *link, + const struct link_resource *link_res, + struct link_training_settings *lt_settings, + uint32_t offset) +{ + DC_LOG_HW_LINK_TRAINING("%s\n GPU sends TPS1. Wait 400us.\n", + __func__); + dp_set_hw_training_pattern(link, link_res, lt_settings->pattern_for_cr, offset); + dp_set_hw_lane_settings(link, link_res, lt_settings, offset); + udelay(400); +} + +void dp_set_hw_test_pattern( + struct dc_link *link, + const struct link_resource *link_res, + enum dp_test_pattern test_pattern, + uint8_t *custom_pattern, + uint32_t custom_pattern_size) +{ + const struct link_hwss *link_hwss = get_link_hwss(link, link_res); + struct encoder_set_dp_phy_pattern_param pattern_param = {0}; + + pattern_param.dp_phy_pattern = test_pattern; + pattern_param.custom_pattern = custom_pattern; + pattern_param.custom_pattern_size = custom_pattern_size; + pattern_param.dp_panel_mode = dp_get_panel_mode(link); + + if (link_hwss->ext.set_dp_link_test_pattern) + link_hwss->ext.set_dp_link_test_pattern(link, link_res, &pattern_param); +} + +bool dp_set_hw_training_pattern( + struct dc_link *link, + const struct link_resource *link_res, + enum dc_dp_training_pattern pattern, + uint32_t offset) +{ + enum dp_test_pattern test_pattern = DP_TEST_PATTERN_UNSUPPORTED; + + switch (pattern) { + case DP_TRAINING_PATTERN_SEQUENCE_1: + test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN1; + break; + case DP_TRAINING_PATTERN_SEQUENCE_2: + test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN2; + break; + case DP_TRAINING_PATTERN_SEQUENCE_3: + test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN3; + break; + case DP_TRAINING_PATTERN_SEQUENCE_4: + test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN4; + break; + case DP_128b_132b_TPS1: + test_pattern = DP_TEST_PATTERN_128b_132b_TPS1_TRAINING_MODE; + break; + case DP_128b_132b_TPS2: + test_pattern = DP_TEST_PATTERN_128b_132b_TPS2_TRAINING_MODE; + break; + default: + break; + } + + dp_set_hw_test_pattern(link, link_res, test_pattern, NULL, 0); + + return true; +} + +static bool perform_post_lt_adj_req_sequence( + struct dc_link *link, + const struct link_resource *link_res, + struct link_training_settings *lt_settings) +{ + enum dc_lane_count lane_count = + lt_settings->link_settings.lane_count; + + uint32_t adj_req_count; + uint32_t adj_req_timer; + bool req_drv_setting_changed; + uint32_t lane; + union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0}; + union lane_align_status_updated dpcd_lane_status_updated = {0}; + union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; + + req_drv_setting_changed = false; + for (adj_req_count = 0; adj_req_count < POST_LT_ADJ_REQ_LIMIT; + adj_req_count++) { + + req_drv_setting_changed = false; + + for (adj_req_timer = 0; + adj_req_timer < POST_LT_ADJ_REQ_TIMEOUT; + adj_req_timer++) { + + dp_get_lane_status_and_lane_adjust( + link, + lt_settings, + dpcd_lane_status, + &dpcd_lane_status_updated, + dpcd_lane_adjust, + DPRX); + + if (dpcd_lane_status_updated.bits. + POST_LT_ADJ_REQ_IN_PROGRESS == 0) + return true; + + if (!dp_is_cr_done(lane_count, dpcd_lane_status)) + return false; + + if (!dp_is_ch_eq_done(lane_count, dpcd_lane_status) || + !dp_is_symbol_locked(lane_count, dpcd_lane_status) || + !dp_is_interlane_aligned(dpcd_lane_status_updated)) + return false; + + for (lane = 0; lane < (uint32_t)(lane_count); lane++) { + + if (lt_settings-> + dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET != + dpcd_lane_adjust[lane].bits.VOLTAGE_SWING_LANE || + lt_settings->dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET != + dpcd_lane_adjust[lane].bits.PRE_EMPHASIS_LANE) { + + req_drv_setting_changed = true; + break; + } + } + + if (req_drv_setting_changed) { + dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, + lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); + + dc_link_dp_set_drive_settings(link, + link_res, + lt_settings); + break; + } + + msleep(1); + } + + if (!req_drv_setting_changed) { + DC_LOG_WARNING("%s: Post Link Training Adjust Request Timed out\n", + __func__); + + ASSERT(0); + return true; + } + } + DC_LOG_WARNING("%s: Post Link Training Adjust Request limit reached\n", + __func__); + + ASSERT(0); + return true; + +} + +static enum link_training_result dp_transition_to_video_idle( + struct dc_link *link, + const struct link_resource *link_res, + struct link_training_settings *lt_settings, + enum link_training_result status) +{ + union lane_count_set lane_count_set = {0}; + + /* 4. mainlink output idle pattern*/ + dp_set_hw_test_pattern(link, link_res, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0); + + /* + * 5. post training adjust if required + * If the upstream DPTX and downstream DPRX both support TPS4, + * TPS4 must be used instead of POST_LT_ADJ_REQ. + */ + if (link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED != 1 || + lt_settings->pattern_for_eq >= DP_TRAINING_PATTERN_SEQUENCE_4) { + /* delay 5ms after Main Link output idle pattern and then check + * DPCD 0202h. + */ + if (link->connector_signal != SIGNAL_TYPE_EDP && status == LINK_TRAINING_SUCCESS) { + msleep(5); + status = dp_check_link_loss_status(link, lt_settings); + } + return status; + } + + if (status == LINK_TRAINING_SUCCESS && + perform_post_lt_adj_req_sequence(link, link_res, lt_settings) == false) + status = LINK_TRAINING_LQA_FAIL; + + lane_count_set.bits.LANE_COUNT_SET = lt_settings->link_settings.lane_count; + lane_count_set.bits.ENHANCED_FRAMING = lt_settings->enhanced_framing; + lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = 0; + + core_link_write_dpcd( + link, + DP_LANE_COUNT_SET, + &lane_count_set.raw, + sizeof(lane_count_set)); + + return status; +} + +enum link_training_result dp_perform_link_training( + struct dc_link *link, + const struct link_resource *link_res, + const struct dc_link_settings *link_settings, + bool skip_video_pattern) +{ + enum link_training_result status = LINK_TRAINING_SUCCESS; + struct link_training_settings lt_settings = {0}; + enum dp_link_encoding encoding = + link_dp_get_encoding_format(link_settings); + + /* decide training settings */ + dp_decide_training_settings( + link, + link_settings, + <_settings); + + override_training_settings( + link, + &link->preferred_training_settings, + <_settings); + + /* reset previous training states */ + dpcd_exit_training_mode(link, encoding); + + /* configure link prior to entering training mode */ + dpcd_configure_lttpr_mode(link, <_settings); + dp_set_fec_ready(link, link_res, lt_settings.should_set_fec_ready); + dpcd_configure_channel_coding(link, <_settings); + + /* enter training mode: + * Per DP specs starting from here, DPTX device shall not issue + * Non-LT AUX transactions inside training mode. + */ + if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && encoding == DP_8b_10b_ENCODING) + status = dp_perform_fixed_vs_pe_training_sequence(link, link_res, <_settings); + else if (encoding == DP_8b_10b_ENCODING) + status = dp_perform_8b_10b_link_training(link, link_res, <_settings); + else if (encoding == DP_128b_132b_ENCODING) + status = dp_perform_128b_132b_link_training(link, link_res, <_settings); + else + ASSERT(0); + + /* exit training mode */ + dpcd_exit_training_mode(link, encoding); + + /* switch to video idle */ + if ((status == LINK_TRAINING_SUCCESS) || !skip_video_pattern) + status = dp_transition_to_video_idle(link, + link_res, + <_settings, + status); + + /* dump debug data */ + dp_log_training_result(link, <_settings, status); + if (status != LINK_TRAINING_SUCCESS) + link->ctx->dc->debug_data.ltFailCount++; + return status; +} + +bool perform_link_training_with_retries( + const struct dc_link_settings *link_setting, + bool skip_video_pattern, + int attempts, + struct pipe_ctx *pipe_ctx, + enum signal_type signal, + bool do_fallback) +{ + int j; + uint8_t delay_between_attempts = LINK_TRAINING_RETRY_DELAY; + struct dc_stream_state *stream = pipe_ctx->stream; + struct dc_link *link = stream->link; + enum dp_panel_mode panel_mode = dp_get_panel_mode(link); + enum link_training_result status = LINK_TRAINING_CR_FAIL_LANE0; + struct dc_link_settings cur_link_settings = *link_setting; + struct dc_link_settings max_link_settings = *link_setting; + const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); + int fail_count = 0; + bool is_link_bw_low = false; /* link bandwidth < stream bandwidth */ + bool is_link_bw_min = /* RBR x 1 */ + (cur_link_settings.link_rate <= LINK_RATE_LOW) && + (cur_link_settings.lane_count <= LANE_COUNT_ONE); + + dp_trace_commit_lt_init(link); + + + if (link_dp_get_encoding_format(&cur_link_settings) == DP_8b_10b_ENCODING) + /* We need to do this before the link training to ensure the idle + * pattern in SST mode will be sent right after the link training + */ + link_hwss->setup_stream_encoder(pipe_ctx); + + dp_trace_set_lt_start_timestamp(link, false); + j = 0; + while (j < attempts && fail_count < (attempts * 10)) { + + DC_LOG_HW_LINK_TRAINING("%s: Beginning link(%d) training attempt %u of %d @ rate(%d) x lane(%d)\n", + __func__, link->link_index, (unsigned int)j + 1, attempts, cur_link_settings.link_rate, + cur_link_settings.lane_count); + + dp_enable_link_phy( + link, + &pipe_ctx->link_res, + signal, + pipe_ctx->clock_source->id, + &cur_link_settings); + + if (stream->sink_patches.dppowerup_delay > 0) { + int delay_dp_power_up_in_ms = stream->sink_patches.dppowerup_delay; + + msleep(delay_dp_power_up_in_ms); + } + +#ifdef CONFIG_DRM_AMD_DC_HDCP + if (panel_mode == DP_PANEL_MODE_EDP) { + struct cp_psp *cp_psp = &stream->ctx->cp_psp; + + if (cp_psp && cp_psp->funcs.enable_assr) { + /* ASSR is bound to fail with unsigned PSP + * verstage used during devlopment phase. + * Report and continue with eDP panel mode to + * perform eDP link training with right settings + */ + bool result; + result = cp_psp->funcs.enable_assr(cp_psp->handle, link); + } + } +#endif + + dp_set_panel_mode(link, panel_mode); + + if (link->aux_access_disabled) { + dc_link_dp_perform_link_training_skip_aux(link, &pipe_ctx->link_res, &cur_link_settings); + return true; + } else { + /** @todo Consolidate USB4 DP and DPx.x training. */ + if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) { + status = dc_link_dpia_perform_link_training( + link, + &pipe_ctx->link_res, + &cur_link_settings, + skip_video_pattern); + + /* Transmit idle pattern once training successful. */ + if (status == LINK_TRAINING_SUCCESS && !is_link_bw_low) { + dp_set_hw_test_pattern(link, &pipe_ctx->link_res, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0); + // Update verified link settings to current one + // Because DPIA LT might fallback to lower link setting. + if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { + link->verified_link_cap.link_rate = link->cur_link_settings.link_rate; + link->verified_link_cap.lane_count = link->cur_link_settings.lane_count; + dm_helpers_dp_mst_update_branch_bandwidth(link->ctx, link); + } + } + } else { + status = dp_perform_link_training( + link, + &pipe_ctx->link_res, + &cur_link_settings, + skip_video_pattern); + } + + dp_trace_lt_total_count_increment(link, false); + dp_trace_lt_result_update(link, status, false); + dp_trace_set_lt_end_timestamp(link, false); + if (status == LINK_TRAINING_SUCCESS && !is_link_bw_low) + return true; + } + + fail_count++; + dp_trace_lt_fail_count_update(link, fail_count, false); + if (link->ep_type == DISPLAY_ENDPOINT_PHY) { + /* latest link training still fail or link training is aborted + * skip delay and keep PHY on + */ + if (j == (attempts - 1) || (status == LINK_TRAINING_ABORT)) + break; + } + + DC_LOG_WARNING("%s: Link(%d) training attempt %u of %d failed @ rate(%d) x lane(%d) : fail reason:(%d)\n", + __func__, link->link_index, (unsigned int)j + 1, attempts, cur_link_settings.link_rate, + cur_link_settings.lane_count, status); + + dp_disable_link_phy(link, &pipe_ctx->link_res, signal); + + /* Abort link training if failure due to sink being unplugged. */ + if (status == LINK_TRAINING_ABORT) { + enum dc_connection_type type = dc_connection_none; + + dc_link_detect_sink(link, &type); + if (type == dc_connection_none) { + DC_LOG_HW_LINK_TRAINING("%s: Aborting training because sink unplugged\n", __func__); + break; + } + } + + /* Try to train again at original settings if: + * - not falling back between training attempts; + * - aborted previous attempt due to reasons other than sink unplug; + * - successfully trained but at a link rate lower than that required by stream; + * - reached minimum link bandwidth. + */ + if (!do_fallback || (status == LINK_TRAINING_ABORT) || + (status == LINK_TRAINING_SUCCESS && is_link_bw_low) || + is_link_bw_min) { + j++; + cur_link_settings = *link_setting; + delay_between_attempts += LINK_TRAINING_RETRY_DELAY; + is_link_bw_low = false; + is_link_bw_min = (cur_link_settings.link_rate <= LINK_RATE_LOW) && + (cur_link_settings.lane_count <= LANE_COUNT_ONE); + + } else if (do_fallback) { /* Try training at lower link bandwidth if doing fallback. */ + uint32_t req_bw; + uint32_t link_bw; + + decide_fallback_link_setting(link, &max_link_settings, + &cur_link_settings, status); + /* Flag if reduced link bandwidth no longer meets stream requirements or fallen back to + * minimum link bandwidth. + */ + req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing); + link_bw = dc_link_bandwidth_kbps(link, &cur_link_settings); + is_link_bw_low = (req_bw > link_bw); + is_link_bw_min = ((cur_link_settings.link_rate <= LINK_RATE_LOW) && + (cur_link_settings.lane_count <= LANE_COUNT_ONE)); + + if (is_link_bw_low) + DC_LOG_WARNING( + "%s: Link(%d) bandwidth too low after fallback req_bw(%d) > link_bw(%d)\n", + __func__, link->link_index, req_bw, link_bw); + } + + msleep(delay_between_attempts); + } + + return false; +} + diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dp_training.h b/drivers/gpu/drm/amd/display/dc/link/link_dp_training.h new file mode 100644 index 000000000000..376d370e3bbb --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/link_dp_training.h @@ -0,0 +1,179 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + + +#ifndef __DC_LINK_DP_TRAINING_H__ +#define __DC_LINK_DP_TRAINING_H__ +#include "link.h" + +bool perform_link_training_with_retries( + const struct dc_link_settings *link_setting, + bool skip_video_pattern, + int attempts, + struct pipe_ctx *pipe_ctx, + enum signal_type signal, + bool do_fallback); + +enum link_training_result dp_perform_link_training( + struct dc_link *link, + const struct link_resource *link_res, + const struct dc_link_settings *link_settings, + bool skip_video_pattern); + +bool dp_set_hw_training_pattern( + struct dc_link *link, + const struct link_resource *link_res, + enum dc_dp_training_pattern pattern, + uint32_t offset); + +void dp_set_hw_test_pattern( + struct dc_link *link, + const struct link_resource *link_res, + enum dp_test_pattern test_pattern, + uint8_t *custom_pattern, + uint32_t custom_pattern_size); + +void dpcd_set_training_pattern( + struct dc_link *link, + enum dc_dp_training_pattern training_pattern); + +/* Write DPCD drive settings. */ +enum dc_status dpcd_set_lane_settings( + struct dc_link *link, + const struct link_training_settings *link_training_setting, + uint32_t offset); + +/* Write DPCD link configuration data. */ +enum dc_status dpcd_set_link_settings( + struct dc_link *link, + const struct link_training_settings *lt_settings); + +void dpcd_set_lt_pattern_and_lane_settings( + struct dc_link *link, + const struct link_training_settings *lt_settings, + enum dc_dp_training_pattern pattern, + uint32_t offset); + +/* Read training status and adjustment requests from DPCD. */ +enum dc_status dp_get_lane_status_and_lane_adjust( + struct dc_link *link, + const struct link_training_settings *link_training_setting, + union lane_status ln_status[LANE_COUNT_DP_MAX], + union lane_align_status_updated *ln_align, + union lane_adjust ln_adjust[LANE_COUNT_DP_MAX], + uint32_t offset); + +enum dc_status dpcd_configure_lttpr_mode( + struct dc_link *link, + struct link_training_settings *lt_settings); + +enum dc_status configure_lttpr_mode_transparent(struct dc_link *link); + +enum dc_status dpcd_configure_channel_coding( + struct dc_link *link, + struct link_training_settings *lt_settings); + +void repeater_training_done(struct dc_link *link, uint32_t offset); + +void start_clock_recovery_pattern_early(struct dc_link *link, + const struct link_resource *link_res, + struct link_training_settings *lt_settings, + uint32_t offset); + +void dp_decide_training_settings( + struct dc_link *link, + const struct dc_link_settings *link_settings, + struct link_training_settings *lt_settings); + +void dp_decide_lane_settings( + const struct link_training_settings *lt_settings, + const union lane_adjust ln_adjust[LANE_COUNT_DP_MAX], + struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX], + union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX]); + +enum dc_dp_training_pattern decide_cr_training_pattern( + const struct dc_link_settings *link_settings); + +enum dc_dp_training_pattern decide_eq_training_pattern(struct dc_link *link, + const struct dc_link_settings *link_settings); + +void dp_get_lttpr_mode_override(struct dc_link *link, + enum lttpr_mode *override); + +void override_training_settings( + struct dc_link *link, + const struct dc_link_training_overrides *overrides, + struct link_training_settings *lt_settings); + +/* Check DPCD training status registers to detect link loss. */ +enum link_training_result dp_check_link_loss_status( + struct dc_link *link, + const struct link_training_settings *link_training_setting); + +bool dp_is_cr_done(enum dc_lane_count ln_count, + union lane_status *dpcd_lane_status); + +bool dp_is_ch_eq_done(enum dc_lane_count ln_count, + union lane_status *dpcd_lane_status); +bool dp_is_symbol_locked(enum dc_lane_count ln_count, + union lane_status *dpcd_lane_status); +bool dp_is_interlane_aligned(union lane_align_status_updated align_status); + +bool is_repeater(const struct link_training_settings *lt_settings, uint32_t offset); + +bool dp_is_max_vs_reached( + const struct link_training_settings *lt_settings); + +uint8_t get_dpcd_link_rate(const struct dc_link_settings *link_settings); + +enum link_training_result dp_get_cr_failure(enum dc_lane_count ln_count, + union lane_status *dpcd_lane_status); + +void dp_hw_to_dpcd_lane_settings( + const struct link_training_settings *lt_settings, + const struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX], + union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX]); + +void dp_wait_for_training_aux_rd_interval( + struct dc_link *link, + uint32_t wait_in_micro_secs); + +enum dpcd_training_patterns + dp_training_pattern_to_dpcd_training_pattern( + struct dc_link *link, + enum dc_dp_training_pattern pattern); + +uint8_t dp_initialize_scrambling_data_symbols( + struct dc_link *link, + enum dc_dp_training_pattern pattern); + +void dp_log_training_result( + struct dc_link *link, + const struct link_training_settings *lt_settings, + enum link_training_result status); + +uint32_t dp_translate_training_aux_read_interval( + uint32_t dpcd_aux_read_interval); +#endif /* __DC_LINK_DP_TRAINING_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dp_training_128b_132b.c b/drivers/gpu/drm/amd/display/dc/link/link_dp_training_128b_132b.c new file mode 100644 index 000000000000..bfabebed5868 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/link_dp_training_128b_132b.c @@ -0,0 +1,260 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +/* FILE POLICY AND INTENDED USAGE: + * This file implements dp 128b/132b link training software policies and + * sequences. + */ +#include "link_dp_training_128b_132b.h" +#include "link_dp_training_8b_10b.h" +#include "link_dpcd.h" +#include "link_dp_phy.h" +#include "link_dp_capability.h" +#include "dc_link_dp.h" + +#define DC_LOGGER \ + link->ctx->logger + +static enum dc_status dpcd_128b_132b_set_lane_settings( + struct dc_link *link, + const struct link_training_settings *link_training_setting) +{ + enum dc_status status = core_link_write_dpcd(link, + DP_TRAINING_LANE0_SET, + (uint8_t *)(link_training_setting->dpcd_lane_settings), + sizeof(link_training_setting->dpcd_lane_settings)); + + DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X TX_FFE_PRESET_VALUE = %x\n", + __func__, + DP_TRAINING_LANE0_SET, + link_training_setting->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE); + return status; +} + +static void dpcd_128b_132b_get_aux_rd_interval(struct dc_link *link, + uint32_t *interval_in_us) +{ + union dp_128b_132b_training_aux_rd_interval dpcd_interval; + uint32_t interval_unit = 0; + + dpcd_interval.raw = 0; + core_link_read_dpcd(link, DP_128B132B_TRAINING_AUX_RD_INTERVAL, + &dpcd_interval.raw, sizeof(dpcd_interval.raw)); + interval_unit = dpcd_interval.bits.UNIT ? 1 : 2; /* 0b = 2 ms, 1b = 1 ms */ + /* (128b/132b_TRAINING_AUX_RD_INTERVAL value + 1) * + * INTERVAL_UNIT. The maximum is 256 ms + */ + *interval_in_us = (dpcd_interval.bits.VALUE + 1) * interval_unit * 1000; +} + +static enum link_training_result dp_perform_128b_132b_channel_eq_done_sequence( + struct dc_link *link, + const struct link_resource *link_res, + struct link_training_settings *lt_settings) +{ + uint8_t loop_count; + uint32_t aux_rd_interval = 0; + uint32_t wait_time = 0; + union lane_align_status_updated dpcd_lane_status_updated = {0}; + union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0}; + union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; + enum dc_status status = DC_OK; + enum link_training_result result = LINK_TRAINING_SUCCESS; + + /* Transmit 128b/132b_TPS1 over Main-Link */ + dp_set_hw_training_pattern(link, link_res, lt_settings->pattern_for_cr, DPRX); + + /* Set TRAINING_PATTERN_SET to 01h */ + dpcd_set_training_pattern(link, lt_settings->pattern_for_cr); + + /* Adjust TX_FFE_PRESET_VALUE and Transmit 128b/132b_TPS2 over Main-Link */ + dpcd_128b_132b_get_aux_rd_interval(link, &aux_rd_interval); + dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status, + &dpcd_lane_status_updated, dpcd_lane_adjust, DPRX); + dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, + lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); + dp_set_hw_lane_settings(link, link_res, lt_settings, DPRX); + dp_set_hw_training_pattern(link, link_res, lt_settings->pattern_for_eq, DPRX); + + /* Set loop counter to start from 1 */ + loop_count = 1; + + /* Set TRAINING_PATTERN_SET to 02h and TX_FFE_PRESET_VALUE in one AUX transaction */ + dpcd_set_lt_pattern_and_lane_settings(link, lt_settings, + lt_settings->pattern_for_eq, DPRX); + + /* poll for channel EQ done */ + while (result == LINK_TRAINING_SUCCESS) { + dp_wait_for_training_aux_rd_interval(link, aux_rd_interval); + wait_time += aux_rd_interval; + status = dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status, + &dpcd_lane_status_updated, dpcd_lane_adjust, DPRX); + dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, + lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); + dpcd_128b_132b_get_aux_rd_interval(link, &aux_rd_interval); + if (status != DC_OK) { + result = LINK_TRAINING_ABORT; + } else if (dp_is_ch_eq_done(lt_settings->link_settings.lane_count, + dpcd_lane_status)) { + /* pass */ + break; + } else if (loop_count >= lt_settings->eq_loop_count_limit) { + result = DP_128b_132b_MAX_LOOP_COUNT_REACHED; + } else if (dpcd_lane_status_updated.bits.LT_FAILED_128b_132b) { + result = DP_128b_132b_LT_FAILED; + } else { + dp_set_hw_lane_settings(link, link_res, lt_settings, DPRX); + dpcd_128b_132b_set_lane_settings(link, lt_settings); + } + loop_count++; + } + + /* poll for EQ interlane align done */ + while (result == LINK_TRAINING_SUCCESS) { + if (status != DC_OK) { + result = LINK_TRAINING_ABORT; + } else if (dpcd_lane_status_updated.bits.EQ_INTERLANE_ALIGN_DONE_128b_132b) { + /* pass */ + break; + } else if (wait_time >= lt_settings->eq_wait_time_limit) { + result = DP_128b_132b_CHANNEL_EQ_DONE_TIMEOUT; + } else if (dpcd_lane_status_updated.bits.LT_FAILED_128b_132b) { + result = DP_128b_132b_LT_FAILED; + } else { + dp_wait_for_training_aux_rd_interval(link, + lt_settings->eq_pattern_time); + wait_time += lt_settings->eq_pattern_time; + status = dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status, + &dpcd_lane_status_updated, dpcd_lane_adjust, DPRX); + } + } + + return result; +} + +static enum link_training_result dp_perform_128b_132b_cds_done_sequence( + struct dc_link *link, + const struct link_resource *link_res, + struct link_training_settings *lt_settings) +{ + /* Assumption: assume hardware has transmitted eq pattern */ + enum dc_status status = DC_OK; + enum link_training_result result = LINK_TRAINING_SUCCESS; + union lane_align_status_updated dpcd_lane_status_updated = {0}; + union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0}; + union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; + uint32_t wait_time = 0; + + /* initiate CDS done sequence */ + dpcd_set_training_pattern(link, lt_settings->pattern_for_cds); + + /* poll for CDS interlane align done and symbol lock */ + while (result == LINK_TRAINING_SUCCESS) { + dp_wait_for_training_aux_rd_interval(link, + lt_settings->cds_pattern_time); + wait_time += lt_settings->cds_pattern_time; + status = dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status, + &dpcd_lane_status_updated, dpcd_lane_adjust, DPRX); + if (status != DC_OK) { + result = LINK_TRAINING_ABORT; + } else if (dp_is_symbol_locked(lt_settings->link_settings.lane_count, dpcd_lane_status) && + dpcd_lane_status_updated.bits.CDS_INTERLANE_ALIGN_DONE_128b_132b) { + /* pass */ + break; + } else if (dpcd_lane_status_updated.bits.LT_FAILED_128b_132b) { + result = DP_128b_132b_LT_FAILED; + } else if (wait_time >= lt_settings->cds_wait_time_limit) { + result = DP_128b_132b_CDS_DONE_TIMEOUT; + } + } + + return result; +} + +enum link_training_result dp_perform_128b_132b_link_training( + struct dc_link *link, + const struct link_resource *link_res, + struct link_training_settings *lt_settings) +{ + enum link_training_result result = LINK_TRAINING_SUCCESS; + + /* TODO - DP2.0 Link: remove legacy_dp2_lt logic */ + if (link->dc->debug.legacy_dp2_lt) { + struct link_training_settings legacy_settings; + + decide_8b_10b_training_settings(link, + <_settings->link_settings, + &legacy_settings); + return dp_perform_8b_10b_link_training(link, link_res, &legacy_settings); + } + + dpcd_set_link_settings(link, lt_settings); + + if (result == LINK_TRAINING_SUCCESS) + result = dp_perform_128b_132b_channel_eq_done_sequence(link, link_res, lt_settings); + + if (result == LINK_TRAINING_SUCCESS) + result = dp_perform_128b_132b_cds_done_sequence(link, link_res, lt_settings); + + return result; +} + +void decide_128b_132b_training_settings(struct dc_link *link, + const struct dc_link_settings *link_settings, + struct link_training_settings *lt_settings) +{ + memset(lt_settings, 0, sizeof(*lt_settings)); + + lt_settings->link_settings = *link_settings; + /* TODO: should decide link spread when populating link_settings */ + lt_settings->link_settings.link_spread = link->dp_ss_off ? LINK_SPREAD_DISABLED : + LINK_SPREAD_05_DOWNSPREAD_30KHZ; + + lt_settings->pattern_for_cr = decide_cr_training_pattern(link_settings); + lt_settings->pattern_for_eq = decide_eq_training_pattern(link, link_settings); + lt_settings->eq_pattern_time = 2500; + lt_settings->eq_wait_time_limit = 400000; + lt_settings->eq_loop_count_limit = 20; + lt_settings->pattern_for_cds = DP_128b_132b_TPS2_CDS; + lt_settings->cds_pattern_time = 2500; + lt_settings->cds_wait_time_limit = (dp_parse_lttpr_repeater_count( + link->dpcd_caps.lttpr_caps.phy_repeater_cnt) + 1) * 20000; + lt_settings->disallow_per_lane_settings = true; + lt_settings->lttpr_mode = dp_decide_128b_132b_lttpr_mode(link); + dp_hw_to_dpcd_lane_settings(lt_settings, + lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); +} + +enum lttpr_mode dp_decide_128b_132b_lttpr_mode(struct dc_link *link) +{ + enum lttpr_mode mode = LTTPR_MODE_NON_LTTPR; + + if (dp_is_lttpr_present(link)) + mode = LTTPR_MODE_NON_TRANSPARENT; + + DC_LOG_DC("128b_132b chose LTTPR_MODE %d.\n", mode); + return mode; +} + diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dp_training_128b_132b.h b/drivers/gpu/drm/amd/display/dc/link/link_dp_training_128b_132b.h new file mode 100644 index 000000000000..2147f24efc8b --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/link_dp_training_128b_132b.h @@ -0,0 +1,42 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + + +#ifndef __DC_LINK_DP_TRAINING_128B_132B_H__ +#define __DC_LINK_DP_TRAINING_128B_132B_H__ +#include "link_dp_training.h" + +enum link_training_result dp_perform_128b_132b_link_training( + struct dc_link *link, + const struct link_resource *link_res, + struct link_training_settings *lt_settings); + +void decide_128b_132b_training_settings(struct dc_link *link, + const struct dc_link_settings *link_settings, + struct link_training_settings *lt_settings); + +enum lttpr_mode dp_decide_128b_132b_lttpr_mode(struct dc_link *link); + +#endif /* __DC_LINK_DP_TRAINING_128B_132B_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dp_training_8b_10b.c b/drivers/gpu/drm/amd/display/dc/link/link_dp_training_8b_10b.c new file mode 100644 index 000000000000..ec8b619d51c5 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/link_dp_training_8b_10b.c @@ -0,0 +1,415 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +/* FILE POLICY AND INTENDED USAGE: + * This file implements dp 8b/10b link training software policies and + * sequences. + */ +#include "link_dp_training_8b_10b.h" +#include "link_dpcd.h" +#include "link_dp_phy.h" +#include "link_dp_capability.h" +#include "dc_link_dp.h" + +#define DC_LOGGER \ + link->ctx->logger + +static int32_t get_cr_training_aux_rd_interval(struct dc_link *link, + const struct dc_link_settings *link_settings) +{ + union training_aux_rd_interval training_rd_interval; + uint32_t wait_in_micro_secs = 100; + + memset(&training_rd_interval, 0, sizeof(training_rd_interval)); + if (link_dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING && + link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) { + core_link_read_dpcd( + link, + DP_TRAINING_AUX_RD_INTERVAL, + (uint8_t *)&training_rd_interval, + sizeof(training_rd_interval)); + if (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL) + wait_in_micro_secs = training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL * 4000; + } + return wait_in_micro_secs; +} + +static uint32_t get_eq_training_aux_rd_interval( + struct dc_link *link, + const struct dc_link_settings *link_settings) +{ + union training_aux_rd_interval training_rd_interval; + + memset(&training_rd_interval, 0, sizeof(training_rd_interval)); + if (link_dp_get_encoding_format(link_settings) == DP_128b_132b_ENCODING) { + core_link_read_dpcd( + link, + DP_128B132B_TRAINING_AUX_RD_INTERVAL, + (uint8_t *)&training_rd_interval, + sizeof(training_rd_interval)); + } else if (link_dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING && + link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) { + core_link_read_dpcd( + link, + DP_TRAINING_AUX_RD_INTERVAL, + (uint8_t *)&training_rd_interval, + sizeof(training_rd_interval)); + } + + switch (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL) { + case 0: return 400; + case 1: return 4000; + case 2: return 8000; + case 3: return 12000; + case 4: return 16000; + case 5: return 32000; + case 6: return 64000; + default: return 400; + } +} + +void decide_8b_10b_training_settings( + struct dc_link *link, + const struct dc_link_settings *link_setting, + struct link_training_settings *lt_settings) +{ + memset(lt_settings, '\0', sizeof(struct link_training_settings)); + + /* Initialize link settings */ + lt_settings->link_settings.use_link_rate_set = link_setting->use_link_rate_set; + lt_settings->link_settings.link_rate_set = link_setting->link_rate_set; + lt_settings->link_settings.link_rate = link_setting->link_rate; + lt_settings->link_settings.lane_count = link_setting->lane_count; + /* TODO hard coded to SS for now + * lt_settings.link_settings.link_spread = + * dal_display_path_is_ss_supported( + * path_mode->display_path) ? + * LINK_SPREAD_05_DOWNSPREAD_30KHZ : + * LINK_SPREAD_DISABLED; + */ + lt_settings->link_settings.link_spread = link->dp_ss_off ? + LINK_SPREAD_DISABLED : LINK_SPREAD_05_DOWNSPREAD_30KHZ; + lt_settings->cr_pattern_time = get_cr_training_aux_rd_interval(link, link_setting); + lt_settings->eq_pattern_time = get_eq_training_aux_rd_interval(link, link_setting); + lt_settings->pattern_for_cr = decide_cr_training_pattern(link_setting); + lt_settings->pattern_for_eq = decide_eq_training_pattern(link, link_setting); + lt_settings->enhanced_framing = 1; + lt_settings->should_set_fec_ready = true; + lt_settings->disallow_per_lane_settings = true; + lt_settings->always_match_dpcd_with_hw_lane_settings = true; + lt_settings->lttpr_mode = dp_decide_8b_10b_lttpr_mode(link); + dp_hw_to_dpcd_lane_settings(lt_settings, lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); +} + +enum lttpr_mode dp_decide_8b_10b_lttpr_mode(struct dc_link *link) +{ + bool is_lttpr_present = dp_is_lttpr_present(link); + bool vbios_lttpr_force_non_transparent = link->dc->caps.vbios_lttpr_enable; + bool vbios_lttpr_aware = link->dc->caps.vbios_lttpr_aware; + + if (!is_lttpr_present) + return LTTPR_MODE_NON_LTTPR; + + if (vbios_lttpr_aware) { + if (vbios_lttpr_force_non_transparent) { + DC_LOG_DC("chose LTTPR_MODE_NON_TRANSPARENT due to VBIOS DCE_INFO_CAPS_LTTPR_SUPPORT_ENABLE set to 1.\n"); + return LTTPR_MODE_NON_TRANSPARENT; + } else { + DC_LOG_DC("chose LTTPR_MODE_NON_TRANSPARENT by default due to VBIOS not set DCE_INFO_CAPS_LTTPR_SUPPORT_ENABLE set to 1.\n"); + return LTTPR_MODE_TRANSPARENT; + } + } + + if (link->dc->config.allow_lttpr_non_transparent_mode.bits.DP1_4A && + link->dc->caps.extended_aux_timeout_support) { + DC_LOG_DC("chose LTTPR_MODE_NON_TRANSPARENT by default and dc->config.allow_lttpr_non_transparent_mode.bits.DP1_4A set to 1.\n"); + return LTTPR_MODE_NON_TRANSPARENT; + } + + DC_LOG_DC("chose LTTPR_MODE_NON_LTTPR.\n"); + return LTTPR_MODE_NON_LTTPR; +} + +enum link_training_result perform_8b_10b_clock_recovery_sequence( + struct dc_link *link, + const struct link_resource *link_res, + struct link_training_settings *lt_settings, + uint32_t offset) +{ + uint32_t retries_cr; + uint32_t retry_count; + uint32_t wait_time_microsec; + enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; + union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX]; + union lane_align_status_updated dpcd_lane_status_updated; + union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; + + retries_cr = 0; + retry_count = 0; + + memset(&dpcd_lane_status, '\0', sizeof(dpcd_lane_status)); + memset(&dpcd_lane_status_updated, '\0', + sizeof(dpcd_lane_status_updated)); + + if (!link->ctx->dc->work_arounds.lt_early_cr_pattern) + dp_set_hw_training_pattern(link, link_res, lt_settings->pattern_for_cr, offset); + + /* najeeb - The synaptics MST hub can put the LT in + * infinite loop by switching the VS + */ + /* between level 0 and level 1 continuously, here + * we try for CR lock for LinkTrainingMaxCRRetry count*/ + while ((retries_cr < LINK_TRAINING_MAX_RETRY_COUNT) && + (retry_count < LINK_TRAINING_MAX_CR_RETRY)) { + + + /* 1. call HWSS to set lane settings*/ + dp_set_hw_lane_settings( + link, + link_res, + lt_settings, + offset); + + /* 2. update DPCD of the receiver*/ + if (!retry_count) + /* EPR #361076 - write as a 5-byte burst, + * but only for the 1-st iteration.*/ + dpcd_set_lt_pattern_and_lane_settings( + link, + lt_settings, + lt_settings->pattern_for_cr, + offset); + else + dpcd_set_lane_settings( + link, + lt_settings, + offset); + + /* 3. wait receiver to lock-on*/ + wait_time_microsec = lt_settings->cr_pattern_time; + + dp_wait_for_training_aux_rd_interval( + link, + wait_time_microsec); + + /* 4. Read lane status and requested drive + * settings as set by the sink + */ + dp_get_lane_status_and_lane_adjust( + link, + lt_settings, + dpcd_lane_status, + &dpcd_lane_status_updated, + dpcd_lane_adjust, + offset); + + /* 5. check CR done*/ + if (dp_is_cr_done(lane_count, dpcd_lane_status)) + return LINK_TRAINING_SUCCESS; + + /* 6. max VS reached*/ + if ((link_dp_get_encoding_format(<_settings->link_settings) == + DP_8b_10b_ENCODING) && + dp_is_max_vs_reached(lt_settings)) + break; + + /* 7. same lane settings*/ + /* Note: settings are the same for all lanes, + * so comparing first lane is sufficient*/ + if ((link_dp_get_encoding_format(<_settings->link_settings) == DP_8b_10b_ENCODING) && + lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET == + dpcd_lane_adjust[0].bits.VOLTAGE_SWING_LANE) + retries_cr++; + else if ((link_dp_get_encoding_format(<_settings->link_settings) == DP_128b_132b_ENCODING) && + lt_settings->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE == + dpcd_lane_adjust[0].tx_ffe.PRESET_VALUE) + retries_cr++; + else + retries_cr = 0; + + /* 8. update VS/PE/PC2 in lt_settings*/ + dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, + lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); + retry_count++; + } + + if (retry_count >= LINK_TRAINING_MAX_CR_RETRY) { + ASSERT(0); + DC_LOG_ERROR("%s: Link Training Error, could not get CR after %d tries. Possibly voltage swing issue", + __func__, + LINK_TRAINING_MAX_CR_RETRY); + + } + + return dp_get_cr_failure(lane_count, dpcd_lane_status); +} + +enum link_training_result perform_8b_10b_channel_equalization_sequence( + struct dc_link *link, + const struct link_resource *link_res, + struct link_training_settings *lt_settings, + uint32_t offset) +{ + enum dc_dp_training_pattern tr_pattern; + uint32_t retries_ch_eq; + uint32_t wait_time_microsec; + enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; + union lane_align_status_updated dpcd_lane_status_updated = {0}; + union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0}; + union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; + + /* Note: also check that TPS4 is a supported feature*/ + tr_pattern = lt_settings->pattern_for_eq; + + if (is_repeater(lt_settings, offset) && link_dp_get_encoding_format(<_settings->link_settings) == DP_8b_10b_ENCODING) + tr_pattern = DP_TRAINING_PATTERN_SEQUENCE_4; + + dp_set_hw_training_pattern(link, link_res, tr_pattern, offset); + + for (retries_ch_eq = 0; retries_ch_eq <= LINK_TRAINING_MAX_RETRY_COUNT; + retries_ch_eq++) { + + dp_set_hw_lane_settings(link, link_res, lt_settings, offset); + + /* 2. update DPCD*/ + if (!retries_ch_eq) + /* EPR #361076 - write as a 5-byte burst, + * but only for the 1-st iteration + */ + + dpcd_set_lt_pattern_and_lane_settings( + link, + lt_settings, + tr_pattern, offset); + else + dpcd_set_lane_settings(link, lt_settings, offset); + + /* 3. wait for receiver to lock-on*/ + wait_time_microsec = lt_settings->eq_pattern_time; + + if (is_repeater(lt_settings, offset)) + wait_time_microsec = + dp_translate_training_aux_read_interval( + link->dpcd_caps.lttpr_caps.aux_rd_interval[offset - 1]); + + dp_wait_for_training_aux_rd_interval( + link, + wait_time_microsec); + + /* 4. Read lane status and requested + * drive settings as set by the sink*/ + + dp_get_lane_status_and_lane_adjust( + link, + lt_settings, + dpcd_lane_status, + &dpcd_lane_status_updated, + dpcd_lane_adjust, + offset); + + /* 5. check CR done*/ + if (!dp_is_cr_done(lane_count, dpcd_lane_status)) + return dpcd_lane_status[0].bits.CR_DONE_0 ? + LINK_TRAINING_EQ_FAIL_CR_PARTIAL : + LINK_TRAINING_EQ_FAIL_CR; + + /* 6. check CHEQ done*/ + if (dp_is_ch_eq_done(lane_count, dpcd_lane_status) && + dp_is_symbol_locked(lane_count, dpcd_lane_status) && + dp_is_interlane_aligned(dpcd_lane_status_updated)) + return LINK_TRAINING_SUCCESS; + + /* 7. update VS/PE/PC2 in lt_settings*/ + dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, + lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); + } + + return LINK_TRAINING_EQ_FAIL_EQ; + +} + +enum link_training_result dp_perform_8b_10b_link_training( + struct dc_link *link, + const struct link_resource *link_res, + struct link_training_settings *lt_settings) +{ + enum link_training_result status = LINK_TRAINING_SUCCESS; + + uint8_t repeater_cnt; + uint8_t repeater_id; + uint8_t lane = 0; + + if (link->ctx->dc->work_arounds.lt_early_cr_pattern) + start_clock_recovery_pattern_early(link, link_res, lt_settings, DPRX); + + /* 1. set link rate, lane count and spread. */ + dpcd_set_link_settings(link, lt_settings); + + if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) { + + /* 2. perform link training (set link training done + * to false is done as well) + */ + repeater_cnt = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); + + for (repeater_id = repeater_cnt; (repeater_id > 0 && status == LINK_TRAINING_SUCCESS); + repeater_id--) { + status = perform_8b_10b_clock_recovery_sequence(link, link_res, lt_settings, repeater_id); + + if (status != LINK_TRAINING_SUCCESS) { + repeater_training_done(link, repeater_id); + break; + } + + status = perform_8b_10b_channel_equalization_sequence(link, + link_res, + lt_settings, + repeater_id); + + repeater_training_done(link, repeater_id); + + if (status != LINK_TRAINING_SUCCESS) + break; + + for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { + lt_settings->dpcd_lane_settings[lane].raw = 0; + lt_settings->hw_lane_settings[lane].VOLTAGE_SWING = 0; + lt_settings->hw_lane_settings[lane].PRE_EMPHASIS = 0; + } + } + } + + if (status == LINK_TRAINING_SUCCESS) { + status = perform_8b_10b_clock_recovery_sequence(link, link_res, lt_settings, DPRX); + if (status == LINK_TRAINING_SUCCESS) { + status = perform_8b_10b_channel_equalization_sequence(link, + link_res, + lt_settings, + DPRX); + } + } + + return status; +} diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dp_training_8b_10b.h b/drivers/gpu/drm/amd/display/dc/link/link_dp_training_8b_10b.h new file mode 100644 index 000000000000..d26de15ce954 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/link_dp_training_8b_10b.h @@ -0,0 +1,61 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + + +#ifndef __DC_LINK_DP_TRAINING_8B_10B_H__ +#define __DC_LINK_DP_TRAINING_8B_10B_H__ +#include "link_dp_training.h" + +/* to avoid infinite loop where-in the receiver + * switches between different VS + */ +#define LINK_TRAINING_MAX_CR_RETRY 100 +#define LINK_TRAINING_MAX_RETRY_COUNT 5 + +enum link_training_result dp_perform_8b_10b_link_training( + struct dc_link *link, + const struct link_resource *link_res, + struct link_training_settings *lt_settings); + +enum link_training_result perform_8b_10b_clock_recovery_sequence( + struct dc_link *link, + const struct link_resource *link_res, + struct link_training_settings *lt_settings, + uint32_t offset); + +enum link_training_result perform_8b_10b_channel_equalization_sequence( + struct dc_link *link, + const struct link_resource *link_res, + struct link_training_settings *lt_settings, + uint32_t offset); + +enum lttpr_mode dp_decide_8b_10b_lttpr_mode(struct dc_link *link); + +void decide_8b_10b_training_settings( + struct dc_link *link, + const struct dc_link_settings *link_setting, + struct link_training_settings *lt_settings); + +#endif /* __DC_LINK_DP_TRAINING_8B_10B_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dp_training_auxless.c b/drivers/gpu/drm/amd/display/dc/link/link_dp_training_auxless.c new file mode 100644 index 000000000000..f84b6ea53e8b --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/link_dp_training_auxless.c @@ -0,0 +1,80 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +/* FILE POLICY AND INTENDED USAGE: + * + */ +#include "link_dp_training_auxless.h" +#include "link_dp_phy.h" +#include "dc_link_dp.h" +#define DC_LOGGER \ + link->ctx->logger +bool dc_link_dp_perform_link_training_skip_aux( + struct dc_link *link, + const struct link_resource *link_res, + const struct dc_link_settings *link_setting) +{ + struct link_training_settings lt_settings = {0}; + + dp_decide_training_settings( + link, + link_setting, + <_settings); + override_training_settings( + link, + &link->preferred_training_settings, + <_settings); + + /* 1. Perform_clock_recovery_sequence. */ + + /* transmit training pattern for clock recovery */ + dp_set_hw_training_pattern(link, link_res, lt_settings.pattern_for_cr, DPRX); + + /* call HWSS to set lane settings*/ + dp_set_hw_lane_settings(link, link_res, <_settings, DPRX); + + /* wait receiver to lock-on*/ + dp_wait_for_training_aux_rd_interval(link, lt_settings.cr_pattern_time); + + /* 2. Perform_channel_equalization_sequence. */ + + /* transmit training pattern for channel equalization. */ + dp_set_hw_training_pattern(link, link_res, lt_settings.pattern_for_eq, DPRX); + + /* call HWSS to set lane settings*/ + dp_set_hw_lane_settings(link, link_res, <_settings, DPRX); + + /* wait receiver to lock-on. */ + dp_wait_for_training_aux_rd_interval(link, lt_settings.eq_pattern_time); + + /* 3. Perform_link_training_int. */ + + /* Mainlink output idle pattern. */ + dp_set_hw_test_pattern(link, link_res, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0); + + dp_log_training_result(link, <_settings, LINK_TRAINING_SUCCESS); + + return true; +} diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dp_training_auxless.h b/drivers/gpu/drm/amd/display/dc/link/link_dp_training_auxless.h new file mode 100644 index 000000000000..413999cd03c4 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/link_dp_training_auxless.h @@ -0,0 +1,35 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + + +#ifndef __DC_LINK_DP_TRAINING_AUXLESS_H__ +#define __DC_LINK_DP_TRAINING_AUXLESS_H__ +#include "link_dp_training.h" + +bool dc_link_dp_perform_link_training_skip_aux( + struct dc_link *link, + const struct link_resource *link_res, + const struct dc_link_settings *link_setting); +#endif /* __DC_LINK_DP_TRAINING_AUXLESS_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c b/drivers/gpu/drm/amd/display/dc/link/link_dp_training_dpia.c index d130d58ac08e..cf47db1c2141 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_dp_training_dpia.c @@ -1,6 +1,5 @@ -// SPDX-License-Identifier: MIT /* - * Copyright 2021 Advanced Micro Devices, Inc. + * Copyright 2022 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -24,76 +23,70 @@ * */ +/* FILE POLICY AND INTENDED USAGE: + * This module implements functionality for training DPIA links. + */ +#include "link_dp_training_dpia.h" #include "dc.h" -#include "dc_link_dpia.h" #include "inc/core_status.h" #include "dc_link.h" #include "dc_link_dp.h" #include "dpcd_defs.h" + +#include "link_dp_dpia.h" #include "link_hwss.h" #include "dm_helpers.h" #include "dmub/inc/dmub_cmd.h" -#include "inc/link_dpcd.h" +#include "link_dpcd.h" +#include "link_dp_training_8b_10b.h" +#include "link_dp_capability.h" #include "dc_dmub_srv.h" - #define DC_LOGGER \ link->ctx->logger -enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link) -{ - enum dc_status status = DC_OK; - uint8_t dpcd_dp_tun_data[3] = {0}; - uint8_t dpcd_topology_data[DPCD_USB4_TOPOLOGY_ID_LEN] = {0}; - uint8_t i = 0; - - status = core_link_read_dpcd(link, - DP_TUNNELING_CAPABILITIES_SUPPORT, - dpcd_dp_tun_data, - sizeof(dpcd_dp_tun_data)); - - status = core_link_read_dpcd(link, - DP_USB4_ROUTER_TOPOLOGY_ID, - dpcd_topology_data, - sizeof(dpcd_topology_data)); - - link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.raw = - dpcd_dp_tun_data[DP_TUNNELING_CAPABILITIES_SUPPORT - - DP_TUNNELING_CAPABILITIES_SUPPORT]; - link->dpcd_caps.usb4_dp_tun_info.dpia_info.raw = - dpcd_dp_tun_data[DP_IN_ADAPTER_INFO - DP_TUNNELING_CAPABILITIES_SUPPORT]; - link->dpcd_caps.usb4_dp_tun_info.usb4_driver_id = - dpcd_dp_tun_data[DP_USB4_DRIVER_ID - DP_TUNNELING_CAPABILITIES_SUPPORT]; - - for (i = 0; i < DPCD_USB4_TOPOLOGY_ID_LEN; i++) - link->dpcd_caps.usb4_dp_tun_info.usb4_topology_id[i] = dpcd_topology_data[i]; - - return status; -} - -bool dc_link_dpia_query_hpd_status(struct dc_link *link) -{ - union dmub_rb_cmd cmd = {0}; - struct dc_dmub_srv *dmub_srv = link->ctx->dmub_srv; - bool is_hpd_high = false; - - /* prepare QUERY_HPD command */ - cmd.query_hpd.header.type = DMUB_CMD__QUERY_HPD_STATE; - cmd.query_hpd.data.instance = link->link_id.enum_id - ENUM_ID_1; - cmd.query_hpd.data.ch_type = AUX_CHANNEL_DPIA; - - /* Return HPD status reported by DMUB if query successfully executed. */ - if (dc_dmub_srv_cmd_with_reply_data(dmub_srv, &cmd) && cmd.query_hpd.data.status == AUX_RET_SUCCESS) - is_hpd_high = cmd.query_hpd.data.result; +/* The approximate time (us) it takes to transmit 9 USB4 DP clock sync packets. */ +#define DPIA_CLK_SYNC_DELAY 16000 + +/* Extend interval between training status checks for manual testing. */ +#define DPIA_DEBUG_EXTENDED_AUX_RD_INTERVAL_US 60000000 + +/* SET_CONFIG message types sent by driver. */ +enum dpia_set_config_type { + DPIA_SET_CFG_SET_LINK = 0x01, + DPIA_SET_CFG_SET_PHY_TEST_MODE = 0x05, + DPIA_SET_CFG_SET_TRAINING = 0x18, + DPIA_SET_CFG_SET_VSPE = 0x19 +}; + +/* Training stages (TS) in SET_CONFIG(SET_TRAINING) message. */ +enum dpia_set_config_ts { + DPIA_TS_DPRX_DONE = 0x00, /* Done training DPRX. */ + DPIA_TS_TPS1 = 0x01, + DPIA_TS_TPS2 = 0x02, + DPIA_TS_TPS3 = 0x03, + DPIA_TS_TPS4 = 0x07, + DPIA_TS_UFP_DONE = 0xff /* Done training DPTX-to-DPIA hop. */ +}; + +/* SET_CONFIG message data associated with messages sent by driver. */ +union dpia_set_config_data { + struct { + uint8_t mode : 1; + uint8_t reserved : 7; + } set_link; + struct { + uint8_t stage; + } set_training; + struct { + uint8_t swing : 2; + uint8_t max_swing_reached : 1; + uint8_t pre_emph : 2; + uint8_t max_pre_emph_reached : 1; + uint8_t reserved : 2; + } set_vspe; + uint8_t raw; +}; - DC_LOG_DEBUG("%s: link(%d) dpia(%d) cmd_status(%d) result(%d)\n", - __func__, - link->link_index, - link->link_id.enum_id - ENUM_ID_1, - cmd.query_hpd.data.status, - cmd.query_hpd.data.result); - - return is_hpd_high; -} /* Configure link as prescribed in link_setting; set LTTPR mode; and * Initialize link training settings. @@ -113,11 +106,12 @@ static enum link_training_result dpia_configure_link( bool fec_enable; DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) configuring\n - LTTPR mode(%d)\n", - __func__, - link->link_id.enum_id - ENUM_ID_1, - lt_settings->lttpr_mode); + __func__, + link->link_id.enum_id - ENUM_ID_1, + lt_settings->lttpr_mode); - dp_decide_training_settings(link, + dp_decide_training_settings( + link, link_setting, lt_settings); @@ -137,7 +131,7 @@ static enum link_training_result dpia_configure_link( if (status != DC_OK && link->is_hpd_pending) return LINK_TRAINING_ABORT; - if (link->preferred_training_settings.fec_enable) + if (link->preferred_training_settings.fec_enable != NULL) fec_enable = *link->preferred_training_settings.fec_enable; else fec_enable = true; @@ -148,7 +142,8 @@ static enum link_training_result dpia_configure_link( return LINK_TRAINING_SUCCESS; } -static enum dc_status core_link_send_set_config(struct dc_link *link, +static enum dc_status core_link_send_set_config( + struct dc_link *link, uint8_t msg_type, uint8_t msg_data) { @@ -160,8 +155,8 @@ static enum dc_status core_link_send_set_config(struct dc_link *link, payload.msg_data = msg_data; if (!link->ddc->ddc_pin && !link->aux_access_disabled && - (dm_helpers_dmub_set_config_sync(link->ctx, link, - &payload, &set_config_result) == -1)) { + (dm_helpers_dmub_set_config_sync(link->ctx, + link, &payload, &set_config_result) == -1)) { return DC_ERROR_UNEXPECTED; } @@ -170,7 +165,8 @@ static enum dc_status core_link_send_set_config(struct dc_link *link, } /* Build SET_CONFIG message data payload for specified message type. */ -static uint8_t dpia_build_set_config_data(enum dpia_set_config_type type, +static uint8_t dpia_build_set_config_data( + enum dpia_set_config_type type, struct dc_link *link, struct link_training_settings *lt_settings) { @@ -189,11 +185,9 @@ static uint8_t dpia_build_set_config_data(enum dpia_set_config_type type, data.set_vspe.swing = lt_settings->hw_lane_settings[0].VOLTAGE_SWING; data.set_vspe.pre_emph = lt_settings->hw_lane_settings[0].PRE_EMPHASIS; data.set_vspe.max_swing_reached = - lt_settings->hw_lane_settings[0].VOLTAGE_SWING == - VOLTAGE_SWING_MAX_LEVEL ? 1 : 0; + lt_settings->hw_lane_settings[0].VOLTAGE_SWING == VOLTAGE_SWING_MAX_LEVEL ? 1 : 0; data.set_vspe.max_pre_emph_reached = - lt_settings->hw_lane_settings[0].PRE_EMPHASIS == - PRE_EMPHASIS_MAX_LEVEL ? 1 : 0; + lt_settings->hw_lane_settings[0].PRE_EMPHASIS == PRE_EMPHASIS_MAX_LEVEL ? 1 : 0; break; default: ASSERT(false); /* Message type not supported by helper function. */ @@ -235,7 +229,8 @@ static enum dc_status convert_trng_ptn_to_trng_stg(enum dc_dp_training_pattern t } /* Write training pattern to DPCD. */ -static enum dc_status dpcd_set_lt_pattern(struct dc_link *link, +static enum dc_status dpcd_set_lt_pattern( + struct dc_link *link, enum dc_dp_training_pattern pattern, uint32_t hop) { @@ -249,28 +244,29 @@ static enum dc_status dpcd_set_lt_pattern(struct dc_link *link, /* DpcdAddress_TrainingPatternSet */ dpcd_pattern.v1_4.TRAINING_PATTERN_SET = - dc_dp_training_pattern_to_dpcd_training_pattern(link, pattern); + dp_training_pattern_to_dpcd_training_pattern(link, pattern); dpcd_pattern.v1_4.SCRAMBLING_DISABLE = - dc_dp_initialize_scrambling_data_symbols(link, pattern); + dp_initialize_scrambling_data_symbols(link, pattern); if (hop != DPRX) { DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Repeater ID: %d\n 0x%X pattern = %x\n", - __func__, - hop, - dpcd_tps_offset, - dpcd_pattern.v1_4.TRAINING_PATTERN_SET); + __func__, + hop, + dpcd_tps_offset, + dpcd_pattern.v1_4.TRAINING_PATTERN_SET); } else { DC_LOG_HW_LINK_TRAINING("%s\n 0x%X pattern = %x\n", - __func__, - dpcd_tps_offset, - dpcd_pattern.v1_4.TRAINING_PATTERN_SET); + __func__, + dpcd_tps_offset, + dpcd_pattern.v1_4.TRAINING_PATTERN_SET); } - status = core_link_write_dpcd(link, - dpcd_tps_offset, - &dpcd_pattern.raw, - sizeof(dpcd_pattern.raw)); + status = core_link_write_dpcd( + link, + dpcd_tps_offset, + &dpcd_pattern.raw, + sizeof(dpcd_pattern.raw)); return status; } @@ -284,7 +280,7 @@ static enum dc_status dpcd_set_lt_pattern(struct dc_link *link, * * @param link DPIA link being trained. * @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis). - * @param hop The Hop in display path. DPRX = 0. + * @param hop Hop in display path. DPRX = 0. */ static enum link_training_result dpia_training_cr_non_transparent( struct dc_link *link, @@ -297,8 +293,7 @@ static enum link_training_result dpia_training_cr_non_transparent( enum dc_status status; uint32_t retries_cr = 0; /* Number of consecutive attempts with same VS or PE. */ uint32_t retry_count = 0; - /* From DP spec, CR read interval is always 100us. */ - uint32_t wait_time_microsec = TRAINING_AUX_RD_INTERVAL; + uint32_t wait_time_microsec = TRAINING_AUX_RD_INTERVAL; /* From DP spec, CR read interval is always 100us. */ enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0}; union lane_align_status_updated dpcd_lane_status_updated = {0}; @@ -306,7 +301,7 @@ static enum link_training_result dpia_training_cr_non_transparent( uint8_t set_cfg_data; enum dpia_set_config_ts ts; - repeater_cnt = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); + repeater_cnt = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); /* Cap of LINK_TRAINING_MAX_CR_RETRY attempts at clock recovery. * Fix inherited from perform_clock_recovery_sequence() - @@ -316,17 +311,20 @@ static enum link_training_result dpia_training_cr_non_transparent( * continuously. */ while ((retries_cr < LINK_TRAINING_MAX_RETRY_COUNT) && - (retry_count < LINK_TRAINING_MAX_CR_RETRY)) { + (retry_count < LINK_TRAINING_MAX_CR_RETRY)) { + /* DPTX-to-DPIA */ if (hop == repeater_cnt) { /* Send SET_CONFIG(SET_LINK:LC,LR,LTTPR) to notify DPOA that * non-transparent link training has started. * This also enables the transmission of clk_sync packets. */ - set_cfg_data = dpia_build_set_config_data(DPIA_SET_CFG_SET_LINK, + set_cfg_data = dpia_build_set_config_data( + DPIA_SET_CFG_SET_LINK, link, lt_settings); - status = core_link_send_set_config(link, + status = core_link_send_set_config( + link, DPIA_SET_CFG_SET_LINK, set_cfg_data); /* CR for this hop is considered successful as long as @@ -347,6 +345,14 @@ static enum link_training_result dpia_training_cr_non_transparent( result = LINK_TRAINING_ABORT; break; } + status = core_link_send_set_config( + link, + DPIA_SET_CFG_SET_TRAINING, + ts); + if (status != DC_OK) { + result = LINK_TRAINING_ABORT; + break; + } status = dpcd_set_lt_pattern(link, lt_settings->pattern_for_cr, hop); if (status != DC_OK) { result = LINK_TRAINING_ABORT; @@ -358,10 +364,12 @@ static enum link_training_result dpia_training_cr_non_transparent( * drive settings for hops immediately downstream. */ if (hop == repeater_cnt - 1) { - set_cfg_data = dpia_build_set_config_data(DPIA_SET_CFG_SET_VSPE, + set_cfg_data = dpia_build_set_config_data( + DPIA_SET_CFG_SET_VSPE, link, lt_settings); - status = core_link_send_set_config(link, + status = core_link_send_set_config( + link, DPIA_SET_CFG_SET_VSPE, set_cfg_data); if (status != DC_OK) { @@ -468,7 +476,8 @@ static enum link_training_result dpia_training_cr_transparent( * continuously. */ while ((retries_cr < LINK_TRAINING_MAX_RETRY_COUNT) && - (retry_count < LINK_TRAINING_MAX_CR_RETRY)) { + (retry_count < LINK_TRAINING_MAX_CR_RETRY)) { + /* Write TPS1 (not VS or PE) to DPCD to start CR phase. * DPIA sends SET_CONFIG(SET_LINK) to notify DPOA to * start link training. @@ -529,8 +538,7 @@ static enum link_training_result dpia_training_cr_transparent( if (link->is_hpd_pending) result = LINK_TRAINING_ABORT; - DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) clock recovery\n" - " -hop(%d)\n - result(%d)\n - retries(%d)\n", + DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) clock recovery\n -hop(%d)\n - result(%d)\n - retries(%d)\n", __func__, link->link_id.enum_id - ENUM_ID_1, DPRX, @@ -545,7 +553,7 @@ static enum link_training_result dpia_training_cr_transparent( * * @param link DPIA link being trained. * @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis). - * @param hop The Hop in display path. DPRX = 0. + * @param hop Hop in display path. DPRX = 0. */ static enum link_training_result dpia_training_cr_phase( struct dc_link *link, @@ -564,7 +572,8 @@ static enum link_training_result dpia_training_cr_phase( } /* Return status read interval during equalization phase. */ -static uint32_t dpia_get_eq_aux_rd_interval(const struct dc_link *link, +static uint32_t dpia_get_eq_aux_rd_interval( + const struct dc_link *link, const struct link_training_settings *lt_settings, uint32_t hop) { @@ -590,12 +599,11 @@ static uint32_t dpia_get_eq_aux_rd_interval(const struct dc_link *link, * - TPSx is transmitted for any hops downstream of DPOA. * - Drive (VS/PE) only transmitted for the hop immediately downstream of DPOA. * - EQ for the first hop (DPTX-to-DPIA) is assumed to be successful. - * - DPRX EQ only reported successful when both DPRX and DPIA requirements - * (clk sync packets sent) fulfilled. + * - DPRX EQ only reported successful when both DPRX and DPIA requirements (clk sync packets sent) fulfilled. * * @param link DPIA link being trained. * @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis). - * @param hop The Hop in display path. DPRX = 0. + * @param hop Hop in display path. DPRX = 0. */ static enum link_training_result dpia_training_eq_non_transparent( struct dc_link *link, @@ -624,9 +632,10 @@ static enum link_training_result dpia_training_eq_non_transparent( else tr_pattern = DP_TRAINING_PATTERN_SEQUENCE_4; - repeater_cnt = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); + repeater_cnt = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); for (retries_eq = 0; retries_eq < LINK_TRAINING_MAX_RETRY_COUNT; retries_eq++) { + /* DPTX-to-DPIA equalization always successful. */ if (hop == repeater_cnt) { result = LINK_TRAINING_SUCCESS; @@ -640,7 +649,8 @@ static enum link_training_result dpia_training_eq_non_transparent( result = LINK_TRAINING_ABORT; break; } - status = core_link_send_set_config(link, + status = core_link_send_set_config( + link, DPIA_SET_CFG_SET_TRAINING, ts); if (status != DC_OK) { @@ -658,12 +668,14 @@ static enum link_training_result dpia_training_eq_non_transparent( * drive settings for hop immediately downstream. */ if (hop == repeater_cnt - 1) { - set_cfg_data = dpia_build_set_config_data(DPIA_SET_CFG_SET_VSPE, - link, - lt_settings); - status = core_link_send_set_config(link, - DPIA_SET_CFG_SET_VSPE, - set_cfg_data); + set_cfg_data = dpia_build_set_config_data( + DPIA_SET_CFG_SET_VSPE, + link, + lt_settings); + status = core_link_send_set_config( + link, + DPIA_SET_CFG_SET_VSPE, + set_cfg_data); if (status != DC_OK) { result = LINK_TRAINING_ABORT; break; @@ -679,7 +691,7 @@ static enum link_training_result dpia_training_eq_non_transparent( * ensure clock sync packets have been sent. */ if (hop == DPRX && retries_eq == 1) - wait_time_microsec = max(wait_time_microsec, (uint32_t)DPIA_CLK_SYNC_DELAY); + wait_time_microsec = max(wait_time_microsec, (uint32_t) DPIA_CLK_SYNC_DELAY); else wait_time_microsec = dpia_get_eq_aux_rd_interval(link, lt_settings, hop); @@ -705,8 +717,8 @@ static enum link_training_result dpia_training_eq_non_transparent( } if (dp_is_ch_eq_done(lane_count, dpcd_lane_status) && - dp_is_symbol_locked(link->cur_link_settings.lane_count, dpcd_lane_status) && - dp_is_interlane_aligned(dpcd_lane_status_updated)) { + dp_is_symbol_locked(link->cur_link_settings.lane_count, dpcd_lane_status) && + dp_is_interlane_aligned(dpcd_lane_status_updated)) { result = LINK_TRAINING_SUCCESS; break; } @@ -741,7 +753,7 @@ static enum link_training_result dpia_training_eq_non_transparent( * * @param link DPIA link being trained. * @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis). - * @param hop The Hop in display path. DPRX = 0. + * @param hop Hop in display path. DPRX = 0. */ static enum link_training_result dpia_training_eq_transparent( struct dc_link *link, @@ -761,6 +773,7 @@ static enum link_training_result dpia_training_eq_transparent( wait_time_microsec = dpia_get_eq_aux_rd_interval(link, lt_settings, DPRX); for (retries_eq = 0; retries_eq < LINK_TRAINING_MAX_RETRY_COUNT; retries_eq++) { + if (retries_eq == 0) { status = dpcd_set_lt_pattern(link, tr_pattern, DPRX); if (status != DC_OK) { @@ -810,8 +823,7 @@ static enum link_training_result dpia_training_eq_transparent( if (link->is_hpd_pending) result = LINK_TRAINING_ABORT; - DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) equalization\n" - " - hop(%d)\n - result(%d)\n - retries(%d)\n", + DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) equalization\n - hop(%d)\n - result(%d)\n - retries(%d)\n", __func__, link->link_id.enum_id - ENUM_ID_1, DPRX, @@ -826,7 +838,7 @@ static enum link_training_result dpia_training_eq_transparent( * * @param link DPIA link being trained. * @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis). - * @param hop The Hop in display path. DPRX = 0. + * @param hop Hop in display path. DPRX = 0. */ static enum link_training_result dpia_training_eq_phase( struct dc_link *link, @@ -845,7 +857,9 @@ static enum link_training_result dpia_training_eq_phase( } /* End training of specified hop in display path. */ -static enum dc_status dpcd_clear_lt_pattern(struct dc_link *link, uint32_t hop) +static enum dc_status dpcd_clear_lt_pattern( + struct dc_link *link, + uint32_t hop) { union dpcd_training_pattern dpcd_pattern = {0}; uint32_t dpcd_tps_offset = DP_TRAINING_PATTERN_SET; @@ -855,7 +869,8 @@ static enum dc_status dpcd_clear_lt_pattern(struct dc_link *link, uint32_t hop) dpcd_tps_offset = DP_TRAINING_PATTERN_SET_PHY_REPEATER1 + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (hop - 1)); - status = core_link_write_dpcd(link, + status = core_link_write_dpcd( + link, dpcd_tps_offset, &dpcd_pattern.raw, sizeof(dpcd_pattern.raw)); @@ -873,9 +888,10 @@ static enum dc_status dpcd_clear_lt_pattern(struct dc_link *link, uint32_t hop) * (DPTX-to-DPIA) and last hop (DPRX). * * @param link DPIA link being trained. - * @param hop The Hop in display path. DPRX = 0. + * @param hop Hop in display path. DPRX = 0. */ -static enum link_training_result dpia_training_end(struct dc_link *link, +static enum link_training_result dpia_training_end( + struct dc_link *link, struct link_training_settings *lt_settings, uint32_t hop) { @@ -884,13 +900,15 @@ static enum link_training_result dpia_training_end(struct dc_link *link, enum dc_status status; if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) { - repeater_cnt = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); + + repeater_cnt = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); if (hop == repeater_cnt) { /* DPTX-to-DPIA */ /* Send SET_CONFIG(SET_TRAINING:0xff) to notify DPOA that * DPTX-to-DPIA hop trained. No DPCD write needed for first hop. */ - status = core_link_send_set_config(link, + status = core_link_send_set_config( + link, DPIA_SET_CFG_SET_TRAINING, DPIA_TS_UFP_DONE); if (status != DC_OK) @@ -904,7 +922,8 @@ static enum link_training_result dpia_training_end(struct dc_link *link, /* Notify DPOA that non-transparent link training of DPRX done. */ if (hop == DPRX && result != LINK_TRAINING_ABORT) { - status = core_link_send_set_config(link, + status = core_link_send_set_config( + link, DPIA_SET_CFG_SET_TRAINING, DPIA_TS_DPRX_DONE); if (status != DC_OK) @@ -912,18 +931,20 @@ static enum link_training_result dpia_training_end(struct dc_link *link, } } else { /* non-LTTPR or transparent LTTPR. */ + /* Write 0x0 to TRAINING_PATTERN_SET */ status = dpcd_clear_lt_pattern(link, hop); if (status != DC_OK) result = LINK_TRAINING_ABORT; + } DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) end\n - hop(%d)\n - result(%d)\n - LTTPR mode(%d)\n", - __func__, - link->link_id.enum_id - ENUM_ID_1, - hop, - result, - lt_settings->lttpr_mode); + __func__, + link->link_id.enum_id - ENUM_ID_1, + hop, + result, + lt_settings->lttpr_mode); return result; } @@ -933,20 +954,21 @@ static enum link_training_result dpia_training_end(struct dc_link *link, * - Sending SET_CONFIG(SET_LINK) with lane count and link rate set to 0. * * @param link DPIA link being trained. - * @param hop The Hop in display path. DPRX = 0. + * @param hop Hop in display path. DPRX = 0. */ -static void dpia_training_abort(struct dc_link *link, - struct link_training_settings *lt_settings, - uint32_t hop) +static void dpia_training_abort( + struct dc_link *link, + struct link_training_settings *lt_settings, + uint32_t hop) { uint8_t data = 0; uint32_t dpcd_tps_offset = DP_TRAINING_PATTERN_SET; DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) aborting\n - LTTPR mode(%d)\n - HPD(%d)\n", - __func__, - link->link_id.enum_id - ENUM_ID_1, - lt_settings->lttpr_mode, - link->is_hpd_pending); + __func__, + link->link_id.enum_id - ENUM_ID_1, + lt_settings->lttpr_mode, + link->is_hpd_pending); /* Abandon clean-up if sink unplugged. */ if (link->is_hpd_pending) @@ -975,7 +997,7 @@ enum link_training_result dc_link_dpia_perform_link_training( struct dc_link_settings link_settings = *link_setting; // non-const copy to pass in - lt_settings.lttpr_mode = dp_decide_lttpr_mode(link, &link_settings); + lt_settings.lttpr_mode = dc_link_decide_lttpr_mode(link, &link_settings); /* Configure link as prescribed in link_setting and set LTTPR mode. */ result = dpia_configure_link(link, link_res, link_setting, <_settings); @@ -983,7 +1005,7 @@ enum link_training_result dc_link_dpia_perform_link_training( return result; if (lt_settings.lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) - repeater_cnt = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); + repeater_cnt = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); /* Train each hop in turn starting with the one closest to DPTX. * In transparent or non-LTTPR mode, train only the final hop (DPRX). @@ -1014,10 +1036,10 @@ enum link_training_result dc_link_dpia_perform_link_training( msleep(5); if (!link->is_automated) result = dp_check_link_loss_status(link, <_settings); - } else if (result == LINK_TRAINING_ABORT) { + } else if (result == LINK_TRAINING_ABORT) dpia_training_abort(link, <_settings, repeater_id); - } else { + else dpia_training_end(link, <_settings, repeater_id); - } + return result; } diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dp_training_dpia.h b/drivers/gpu/drm/amd/display/dc/link/link_dp_training_dpia.h new file mode 100644 index 000000000000..0150f2916421 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/link_dp_training_dpia.h @@ -0,0 +1,41 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + + +#ifndef __DC_LINK_DP_TRAINING_DPIA_H__ +#define __DC_LINK_DP_TRAINING_DPIA_H__ +#include "link_dp_training.h" + +/* Train DP tunneling link for USB4 DPIA display endpoint. + * DPIA equivalent of dc_link_dp_perfrorm_link_training. + * Aborts link training upon detection of sink unplug. + */ +enum link_training_result dc_link_dpia_perform_link_training( + struct dc_link *link, + const struct link_resource *link_res, + const struct dc_link_settings *link_setting, + bool skip_video_pattern); + +#endif /* __DC_LINK_DP_TRAINING_DPIA_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dp_training_fixed_vs_pe_retimer.c b/drivers/gpu/drm/amd/display/dc/link/link_dp_training_fixed_vs_pe_retimer.c new file mode 100644 index 000000000000..860b5eea89aa --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/link_dp_training_fixed_vs_pe_retimer.c @@ -0,0 +1,580 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +/* FILE POLICY AND INTENDED USAGE: + * This file implements 8b/10b link training specially modified to support an + * embedded retimer chip. This retimer chip is referred as fixed vs pe retimer. + * Unlike native dp connection this chip requires a modified link training + * protocol based on 8b/10b link training. Since this is a non standard sequence + * and we must support this hardware, we decided to isolate it in its own + * training sequence inside its own file. + */ +#include "link_dp_training_fixed_vs_pe_retimer.h" +#include "link_dp_training_8b_10b.h" +#include "link_dpcd.h" +#include "link_dp_phy.h" +#include "link_dp_capability.h" +#include "dc_link_dp.h" + +#define DC_LOGGER \ + link->ctx->logger + +void dp_fixed_vs_pe_read_lane_adjust( + struct dc_link *link, + union dpcd_training_lane dpcd_lane_adjust[LANE_COUNT_DP_MAX]) +{ + const uint8_t vendor_lttpr_write_data_vs[3] = {0x0, 0x53, 0x63}; + const uint8_t vendor_lttpr_write_data_pe[3] = {0x0, 0x54, 0x63}; + const uint8_t offset = dp_parse_lttpr_repeater_count( + link->dpcd_caps.lttpr_caps.phy_repeater_cnt); + uint32_t vendor_lttpr_write_address = 0xF004F; + uint32_t vendor_lttpr_read_address = 0xF0053; + uint8_t dprx_vs = 0; + uint8_t dprx_pe = 0; + uint8_t lane; + + if (offset != 0xFF) { + vendor_lttpr_write_address += + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); + vendor_lttpr_read_address += + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); + } + + /* W/A to read lane settings requested by DPRX */ + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data_vs[0], + sizeof(vendor_lttpr_write_data_vs)); + core_link_read_dpcd( + link, + vendor_lttpr_read_address, + &dprx_vs, + 1); + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data_pe[0], + sizeof(vendor_lttpr_write_data_pe)); + core_link_read_dpcd( + link, + vendor_lttpr_read_address, + &dprx_pe, + 1); + + for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { + dpcd_lane_adjust[lane].bits.VOLTAGE_SWING_SET = (dprx_vs >> (2 * lane)) & 0x3; + dpcd_lane_adjust[lane].bits.PRE_EMPHASIS_SET = (dprx_pe >> (2 * lane)) & 0x3; + } +} + + +void dp_fixed_vs_pe_set_retimer_lane_settings( + struct dc_link *link, + const union dpcd_training_lane dpcd_lane_adjust[LANE_COUNT_DP_MAX], + uint8_t lane_count) +{ + const uint8_t offset = dp_parse_lttpr_repeater_count( + link->dpcd_caps.lttpr_caps.phy_repeater_cnt); + const uint8_t vendor_lttpr_write_data_reset[4] = {0x1, 0x50, 0x63, 0xFF}; + uint32_t vendor_lttpr_write_address = 0xF004F; + uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0}; + uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0}; + uint8_t lane = 0; + + if (offset != 0xFF) { + vendor_lttpr_write_address += + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); + } + + for (lane = 0; lane < lane_count; lane++) { + vendor_lttpr_write_data_vs[3] |= + dpcd_lane_adjust[lane].bits.VOLTAGE_SWING_SET << (2 * lane); + vendor_lttpr_write_data_pe[3] |= + dpcd_lane_adjust[lane].bits.PRE_EMPHASIS_SET << (2 * lane); + } + + /* Force LTTPR to output desired VS and PE */ + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data_reset[0], + sizeof(vendor_lttpr_write_data_reset)); + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data_vs[0], + sizeof(vendor_lttpr_write_data_vs)); + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data_pe[0], + sizeof(vendor_lttpr_write_data_pe)); +} + +static enum link_training_result perform_fixed_vs_pe_nontransparent_training_sequence( + struct dc_link *link, + const struct link_resource *link_res, + struct link_training_settings *lt_settings) +{ + enum link_training_result status = LINK_TRAINING_SUCCESS; + uint8_t lane = 0; + uint8_t toggle_rate = 0x6; + uint8_t target_rate = 0x6; + bool apply_toggle_rate_wa = false; + uint8_t repeater_cnt; + uint8_t repeater_id; + + /* Fixed VS/PE specific: Force CR AUX RD Interval to at least 16ms */ + if (lt_settings->cr_pattern_time < 16000) + lt_settings->cr_pattern_time = 16000; + + /* Fixed VS/PE specific: Toggle link rate */ + apply_toggle_rate_wa = (link->vendor_specific_lttpr_link_rate_wa == target_rate); + target_rate = get_dpcd_link_rate(<_settings->link_settings); + toggle_rate = (target_rate == 0x6) ? 0xA : 0x6; + + if (apply_toggle_rate_wa) + lt_settings->link_settings.link_rate = toggle_rate; + + if (link->ctx->dc->work_arounds.lt_early_cr_pattern) + start_clock_recovery_pattern_early(link, link_res, lt_settings, DPRX); + + /* 1. set link rate, lane count and spread. */ + dpcd_set_link_settings(link, lt_settings); + + /* Fixed VS/PE specific: Toggle link rate back*/ + if (apply_toggle_rate_wa) { + core_link_write_dpcd( + link, + DP_LINK_BW_SET, + &target_rate, + 1); + } + + link->vendor_specific_lttpr_link_rate_wa = target_rate; + + if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) { + + /* 2. perform link training (set link training done + * to false is done as well) + */ + repeater_cnt = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); + + for (repeater_id = repeater_cnt; (repeater_id > 0 && status == LINK_TRAINING_SUCCESS); + repeater_id--) { + status = perform_8b_10b_clock_recovery_sequence(link, link_res, lt_settings, repeater_id); + + if (status != LINK_TRAINING_SUCCESS) { + repeater_training_done(link, repeater_id); + break; + } + + status = perform_8b_10b_channel_equalization_sequence(link, + link_res, + lt_settings, + repeater_id); + + repeater_training_done(link, repeater_id); + + if (status != LINK_TRAINING_SUCCESS) + break; + + for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { + lt_settings->dpcd_lane_settings[lane].raw = 0; + lt_settings->hw_lane_settings[lane].VOLTAGE_SWING = 0; + lt_settings->hw_lane_settings[lane].PRE_EMPHASIS = 0; + } + } + } + + if (status == LINK_TRAINING_SUCCESS) { + status = perform_8b_10b_clock_recovery_sequence(link, link_res, lt_settings, DPRX); + if (status == LINK_TRAINING_SUCCESS) { + status = perform_8b_10b_channel_equalization_sequence(link, + link_res, + lt_settings, + DPRX); + } + } + + return status; +} + + +enum link_training_result dp_perform_fixed_vs_pe_training_sequence( + struct dc_link *link, + const struct link_resource *link_res, + struct link_training_settings *lt_settings) +{ + const uint8_t vendor_lttpr_write_data_reset[4] = {0x1, 0x50, 0x63, 0xFF}; + const uint8_t offset = dp_parse_lttpr_repeater_count( + link->dpcd_caps.lttpr_caps.phy_repeater_cnt); + const uint8_t vendor_lttpr_write_data_intercept_en[4] = {0x1, 0x55, 0x63, 0x0}; + const uint8_t vendor_lttpr_write_data_intercept_dis[4] = {0x1, 0x55, 0x63, 0x68}; + uint32_t pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa; + uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0}; + uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0}; + uint32_t vendor_lttpr_write_address = 0xF004F; + enum link_training_result status = LINK_TRAINING_SUCCESS; + uint8_t lane = 0; + union down_spread_ctrl downspread = {0}; + union lane_count_set lane_count_set = {0}; + uint8_t toggle_rate; + uint8_t rate; + + /* Only 8b/10b is supported */ + ASSERT(link_dp_get_encoding_format(<_settings->link_settings) == + DP_8b_10b_ENCODING); + + if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) { + status = perform_fixed_vs_pe_nontransparent_training_sequence(link, link_res, lt_settings); + return status; + } + + if (offset != 0xFF) { + vendor_lttpr_write_address += + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); + + /* Certain display and cable configuration require extra delay */ + if (offset > 2) + pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa * 2; + } + + /* Vendor specific: Reset lane settings */ + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data_reset[0], + sizeof(vendor_lttpr_write_data_reset)); + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data_vs[0], + sizeof(vendor_lttpr_write_data_vs)); + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data_pe[0], + sizeof(vendor_lttpr_write_data_pe)); + + /* Vendor specific: Enable intercept */ + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data_intercept_en[0], + sizeof(vendor_lttpr_write_data_intercept_en)); + + /* 1. set link rate, lane count and spread. */ + + downspread.raw = (uint8_t)(lt_settings->link_settings.link_spread); + + lane_count_set.bits.LANE_COUNT_SET = + lt_settings->link_settings.lane_count; + + lane_count_set.bits.ENHANCED_FRAMING = lt_settings->enhanced_framing; + lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = 0; + + + if (lt_settings->pattern_for_eq < DP_TRAINING_PATTERN_SEQUENCE_4) { + lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = + link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED; + } + + core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL, + &downspread.raw, sizeof(downspread)); + + core_link_write_dpcd(link, DP_LANE_COUNT_SET, + &lane_count_set.raw, 1); + + rate = get_dpcd_link_rate(<_settings->link_settings); + + /* Vendor specific: Toggle link rate */ + toggle_rate = (rate == 0x6) ? 0xA : 0x6; + + if (link->vendor_specific_lttpr_link_rate_wa == rate) { + core_link_write_dpcd( + link, + DP_LINK_BW_SET, + &toggle_rate, + 1); + } + + link->vendor_specific_lttpr_link_rate_wa = rate; + + core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1); + + DC_LOG_HW_LINK_TRAINING("%s\n %x rate = %x\n %x lane = %x framing = %x\n %x spread = %x\n", + __func__, + DP_LINK_BW_SET, + lt_settings->link_settings.link_rate, + DP_LANE_COUNT_SET, + lt_settings->link_settings.lane_count, + lt_settings->enhanced_framing, + DP_DOWNSPREAD_CTRL, + lt_settings->link_settings.link_spread); + + /* 2. Perform link training */ + + /* Perform Clock Recovery Sequence */ + if (status == LINK_TRAINING_SUCCESS) { + const uint8_t max_vendor_dpcd_retries = 10; + uint32_t retries_cr; + uint32_t retry_count; + uint32_t wait_time_microsec; + enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; + union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX]; + union lane_align_status_updated dpcd_lane_status_updated; + union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; + enum dc_status dpcd_status = DC_OK; + uint8_t i = 0; + + retries_cr = 0; + retry_count = 0; + + memset(&dpcd_lane_status, '\0', sizeof(dpcd_lane_status)); + memset(&dpcd_lane_status_updated, '\0', + sizeof(dpcd_lane_status_updated)); + + while ((retries_cr < LINK_TRAINING_MAX_RETRY_COUNT) && + (retry_count < LINK_TRAINING_MAX_CR_RETRY)) { + + + /* 1. call HWSS to set lane settings */ + dp_set_hw_lane_settings( + link, + link_res, + lt_settings, + 0); + + /* 2. update DPCD of the receiver */ + if (!retry_count) { + /* EPR #361076 - write as a 5-byte burst, + * but only for the 1-st iteration. + */ + dpcd_set_lt_pattern_and_lane_settings( + link, + lt_settings, + lt_settings->pattern_for_cr, + 0); + /* Vendor specific: Disable intercept */ + for (i = 0; i < max_vendor_dpcd_retries; i++) { + msleep(pre_disable_intercept_delay_ms); + dpcd_status = core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data_intercept_dis[0], + sizeof(vendor_lttpr_write_data_intercept_dis)); + + if (dpcd_status == DC_OK) + break; + + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data_intercept_en[0], + sizeof(vendor_lttpr_write_data_intercept_en)); + } + } else { + vendor_lttpr_write_data_vs[3] = 0; + vendor_lttpr_write_data_pe[3] = 0; + + for (lane = 0; lane < lane_count; lane++) { + vendor_lttpr_write_data_vs[3] |= + lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET << (2 * lane); + vendor_lttpr_write_data_pe[3] |= + lt_settings->dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET << (2 * lane); + } + + /* Vendor specific: Update VS and PE to DPRX requested value */ + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data_vs[0], + sizeof(vendor_lttpr_write_data_vs)); + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data_pe[0], + sizeof(vendor_lttpr_write_data_pe)); + + dpcd_set_lane_settings( + link, + lt_settings, + 0); + } + + /* 3. wait receiver to lock-on*/ + wait_time_microsec = lt_settings->cr_pattern_time; + + dp_wait_for_training_aux_rd_interval( + link, + wait_time_microsec); + + /* 4. Read lane status and requested drive + * settings as set by the sink + */ + dp_get_lane_status_and_lane_adjust( + link, + lt_settings, + dpcd_lane_status, + &dpcd_lane_status_updated, + dpcd_lane_adjust, + 0); + + /* 5. check CR done*/ + if (dp_is_cr_done(lane_count, dpcd_lane_status)) { + status = LINK_TRAINING_SUCCESS; + break; + } + + /* 6. max VS reached*/ + if (dp_is_max_vs_reached(lt_settings)) + break; + + /* 7. same lane settings */ + /* Note: settings are the same for all lanes, + * so comparing first lane is sufficient + */ + if (lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET == + dpcd_lane_adjust[0].bits.VOLTAGE_SWING_LANE) + retries_cr++; + else + retries_cr = 0; + + /* 8. update VS/PE/PC2 in lt_settings*/ + dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, + lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); + retry_count++; + } + + if (retry_count >= LINK_TRAINING_MAX_CR_RETRY) { + ASSERT(0); + DC_LOG_ERROR("%s: Link Training Error, could not get CR after %d tries. Possibly voltage swing issue", + __func__, + LINK_TRAINING_MAX_CR_RETRY); + + } + + status = dp_get_cr_failure(lane_count, dpcd_lane_status); + } + + /* Perform Channel EQ Sequence */ + if (status == LINK_TRAINING_SUCCESS) { + enum dc_dp_training_pattern tr_pattern; + uint32_t retries_ch_eq; + uint32_t wait_time_microsec; + enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; + union lane_align_status_updated dpcd_lane_status_updated = {0}; + union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0}; + union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; + + /* Note: also check that TPS4 is a supported feature*/ + tr_pattern = lt_settings->pattern_for_eq; + + dp_set_hw_training_pattern(link, link_res, tr_pattern, 0); + + status = LINK_TRAINING_EQ_FAIL_EQ; + + for (retries_ch_eq = 0; retries_ch_eq <= LINK_TRAINING_MAX_RETRY_COUNT; + retries_ch_eq++) { + + dp_set_hw_lane_settings(link, link_res, lt_settings, 0); + + vendor_lttpr_write_data_vs[3] = 0; + vendor_lttpr_write_data_pe[3] = 0; + + for (lane = 0; lane < lane_count; lane++) { + vendor_lttpr_write_data_vs[3] |= + lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET << (2 * lane); + vendor_lttpr_write_data_pe[3] |= + lt_settings->dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET << (2 * lane); + } + + /* Vendor specific: Update VS and PE to DPRX requested value */ + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data_vs[0], + sizeof(vendor_lttpr_write_data_vs)); + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data_pe[0], + sizeof(vendor_lttpr_write_data_pe)); + + /* 2. update DPCD*/ + if (!retries_ch_eq) + /* EPR #361076 - write as a 5-byte burst, + * but only for the 1-st iteration + */ + + dpcd_set_lt_pattern_and_lane_settings( + link, + lt_settings, + tr_pattern, 0); + else + dpcd_set_lane_settings(link, lt_settings, 0); + + /* 3. wait for receiver to lock-on*/ + wait_time_microsec = lt_settings->eq_pattern_time; + + dp_wait_for_training_aux_rd_interval( + link, + wait_time_microsec); + + /* 4. Read lane status and requested + * drive settings as set by the sink + */ + dp_get_lane_status_and_lane_adjust( + link, + lt_settings, + dpcd_lane_status, + &dpcd_lane_status_updated, + dpcd_lane_adjust, + 0); + + /* 5. check CR done*/ + if (!dp_is_cr_done(lane_count, dpcd_lane_status)) { + status = LINK_TRAINING_EQ_FAIL_CR; + break; + } + + /* 6. check CHEQ done*/ + if (dp_is_ch_eq_done(lane_count, dpcd_lane_status) && + dp_is_symbol_locked(lane_count, dpcd_lane_status) && + dp_is_interlane_aligned(dpcd_lane_status_updated)) { + status = LINK_TRAINING_SUCCESS; + break; + } + + /* 7. update VS/PE/PC2 in lt_settings*/ + dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, + lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); + } + } + + return status; +} diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dp_training_fixed_vs_pe_retimer.h b/drivers/gpu/drm/amd/display/dc/link/link_dp_training_fixed_vs_pe_retimer.h new file mode 100644 index 000000000000..e61970e27661 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/link_dp_training_fixed_vs_pe_retimer.h @@ -0,0 +1,45 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + + +#ifndef __DC_LINK_DP_FIXED_VS_PE_RETIMER_H__ +#define __DC_LINK_DP_FIXED_VS_PE_RETIMER_H__ +#include "link_dp_training.h" + +enum link_training_result dp_perform_fixed_vs_pe_training_sequence( + struct dc_link *link, + const struct link_resource *link_res, + struct link_training_settings *lt_settings); + +void dp_fixed_vs_pe_set_retimer_lane_settings( + struct dc_link *link, + const union dpcd_training_lane dpcd_lane_adjust[LANE_COUNT_DP_MAX], + uint8_t lane_count); + +void dp_fixed_vs_pe_read_lane_adjust( + struct dc_link *link, + union dpcd_training_lane dpcd_lane_adjust[LANE_COUNT_DP_MAX]); + +#endif /* __DC_LINK_DP_FIXED_VS_PE_RETIMER_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c b/drivers/gpu/drm/amd/display/dc/link/link_dpcd.c index af110bf9470f..5c9a30211c10 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_dpcd.c @@ -23,11 +23,14 @@ * */ -#include <inc/core_status.h> -#include <dc_link.h> -#include <inc/link_hwss.h> -#include <inc/link_dpcd.h> -#include <dc_dp_types.h> +/* FILE POLICY AND INTENDED USAGE: + * + * This file implements basic dpcd read/write functionality. It also does basic + * dpcd range check to ensure that every dpcd request is compliant with specs + * range requirements. + */ + +#include "link_dpcd.h" #include <drm/display/drm_dp_helper.h> #include "dm_helpers.h" diff --git a/drivers/gpu/drm/amd/display/dc/inc/link_dpcd.h b/drivers/gpu/drm/amd/display/dc/link/link_dpcd.h index d561f86d503c..08d787a1e451 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/link_dpcd.h +++ b/drivers/gpu/drm/amd/display/dc/link/link_dpcd.h @@ -25,9 +25,8 @@ #ifndef __LINK_DPCD_H__ #define __LINK_DPCD_H__ -#include <inc/core_status.h> -#include <dc_link.h> -#include <dc_link_dp.h> +#include "link.h" +#include "dpcd_defs.h" enum dc_status core_link_read_dpcd( struct dc_link *link, diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hpd.c b/drivers/gpu/drm/amd/display/dc/link/link_hpd.c new file mode 100644 index 000000000000..5f39dfe06e9a --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/link_hpd.c @@ -0,0 +1,240 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +/* FILE POLICY AND INTENDED USAGE: + * + * This file implements functions that manage basic HPD components such as gpio. + * It also provides wrapper functions to execute HPD related programming. This + * file only manages basic HPD functionality. It doesn't manage detection or + * feature or signal specific HPD behaviors. + */ +#include "link_hpd.h" +#include "gpio_service_interface.h" + +bool dc_link_get_hpd_state(struct dc_link *dc_link) +{ + uint32_t state; + + dal_gpio_lock_pin(dc_link->hpd_gpio); + dal_gpio_get_value(dc_link->hpd_gpio, &state); + dal_gpio_unlock_pin(dc_link->hpd_gpio); + + return state; +} + +void dc_link_enable_hpd(const struct dc_link *link) +{ + struct link_encoder *encoder = link->link_enc; + + if (encoder != NULL && encoder->funcs->enable_hpd != NULL) + encoder->funcs->enable_hpd(encoder); +} + +void dc_link_disable_hpd(const struct dc_link *link) +{ + struct link_encoder *encoder = link->link_enc; + + if (encoder != NULL && encoder->funcs->enable_hpd != NULL) + encoder->funcs->disable_hpd(encoder); +} + +void dc_link_enable_hpd_filter(struct dc_link *link, bool enable) +{ + struct gpio *hpd; + + if (enable) { + link->is_hpd_filter_disabled = false; + program_hpd_filter(link); + } else { + link->is_hpd_filter_disabled = true; + /* Obtain HPD handle */ + hpd = link_get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service); + + if (!hpd) + return; + + /* Setup HPD filtering */ + if (dal_gpio_open(hpd, GPIO_MODE_INTERRUPT) == GPIO_RESULT_OK) { + struct gpio_hpd_config config; + + config.delay_on_connect = 0; + config.delay_on_disconnect = 0; + + dal_irq_setup_hpd_filter(hpd, &config); + + dal_gpio_close(hpd); + } else { + ASSERT_CRITICAL(false); + } + /* Release HPD handle */ + dal_gpio_destroy_irq(&hpd); + } +} + +struct gpio *link_get_hpd_gpio(struct dc_bios *dcb, + struct graphics_object_id link_id, + struct gpio_service *gpio_service) +{ + enum bp_result bp_result; + struct graphics_object_hpd_info hpd_info; + struct gpio_pin_info pin_info; + + if (dcb->funcs->get_hpd_info(dcb, link_id, &hpd_info) != BP_RESULT_OK) + return NULL; + + bp_result = dcb->funcs->get_gpio_pin_info(dcb, + hpd_info.hpd_int_gpio_uid, &pin_info); + + if (bp_result != BP_RESULT_OK) { + ASSERT(bp_result == BP_RESULT_NORECORD); + return NULL; + } + + return dal_gpio_service_create_irq(gpio_service, + pin_info.offset, + pin_info.mask); +} + +bool query_hpd_status(struct dc_link *link, uint32_t *is_hpd_high) +{ + struct gpio *hpd_pin = link_get_hpd_gpio( + link->ctx->dc_bios, link->link_id, + link->ctx->gpio_service); + if (!hpd_pin) + return false; + + dal_gpio_open(hpd_pin, GPIO_MODE_INTERRUPT); + dal_gpio_get_value(hpd_pin, is_hpd_high); + dal_gpio_close(hpd_pin); + dal_gpio_destroy_irq(&hpd_pin); + return true; +} + +enum hpd_source_id get_hpd_line(struct dc_link *link) +{ + struct gpio *hpd; + enum hpd_source_id hpd_id; + + hpd_id = HPD_SOURCEID_UNKNOWN; + + hpd = link_get_hpd_gpio(link->ctx->dc_bios, link->link_id, + link->ctx->gpio_service); + + if (hpd) { + switch (dal_irq_get_source(hpd)) { + case DC_IRQ_SOURCE_HPD1: + hpd_id = HPD_SOURCEID1; + break; + case DC_IRQ_SOURCE_HPD2: + hpd_id = HPD_SOURCEID2; + break; + case DC_IRQ_SOURCE_HPD3: + hpd_id = HPD_SOURCEID3; + break; + case DC_IRQ_SOURCE_HPD4: + hpd_id = HPD_SOURCEID4; + break; + case DC_IRQ_SOURCE_HPD5: + hpd_id = HPD_SOURCEID5; + break; + case DC_IRQ_SOURCE_HPD6: + hpd_id = HPD_SOURCEID6; + break; + default: + BREAK_TO_DEBUGGER(); + break; + } + + dal_gpio_destroy_irq(&hpd); + } + + return hpd_id; +} + +bool program_hpd_filter(const struct dc_link *link) +{ + bool result = false; + struct gpio *hpd; + int delay_on_connect_in_ms = 0; + int delay_on_disconnect_in_ms = 0; + + if (link->is_hpd_filter_disabled) + return false; + /* Verify feature is supported */ + switch (link->connector_signal) { + case SIGNAL_TYPE_DVI_SINGLE_LINK: + case SIGNAL_TYPE_DVI_DUAL_LINK: + case SIGNAL_TYPE_HDMI_TYPE_A: + /* Program hpd filter */ + delay_on_connect_in_ms = 500; + delay_on_disconnect_in_ms = 100; + break; + case SIGNAL_TYPE_DISPLAY_PORT: + case SIGNAL_TYPE_DISPLAY_PORT_MST: + /* Program hpd filter to allow DP signal to settle */ + /* 500: not able to detect MST <-> SST switch as HPD is low for + * only 100ms on DELL U2413 + * 0: some passive dongle still show aux mode instead of i2c + * 20-50: not enough to hide bouncing HPD with passive dongle. + * also see intermittent i2c read issues. + */ + delay_on_connect_in_ms = 80; + delay_on_disconnect_in_ms = 0; + break; + case SIGNAL_TYPE_LVDS: + case SIGNAL_TYPE_EDP: + default: + /* Don't program hpd filter */ + return false; + } + + /* Obtain HPD handle */ + hpd = link_get_hpd_gpio(link->ctx->dc_bios, link->link_id, + link->ctx->gpio_service); + + if (!hpd) + return result; + + /* Setup HPD filtering */ + if (dal_gpio_open(hpd, GPIO_MODE_INTERRUPT) == GPIO_RESULT_OK) { + struct gpio_hpd_config config; + + config.delay_on_connect = delay_on_connect_in_ms; + config.delay_on_disconnect = delay_on_disconnect_in_ms; + + dal_irq_setup_hpd_filter(hpd, &config); + + dal_gpio_close(hpd); + + result = true; + } else { + ASSERT_CRITICAL(false); + } + + /* Release HPD handle */ + dal_gpio_destroy_irq(&hpd); + + return result; +} diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hpd.h b/drivers/gpu/drm/amd/display/dc/link/link_hpd.h new file mode 100644 index 000000000000..3d122def0c88 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/link_hpd.h @@ -0,0 +1,47 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + + +#ifndef __DC_LINK_HPD_H__ +#define __DC_LINK_HPD_H__ +#include "link.h" + +enum hpd_source_id get_hpd_line(struct dc_link *link); +/* + * Function: program_hpd_filter + * + * @brief + * Programs HPD filter on associated HPD line to default values. + * + * @return + * true on success, false otherwise + */ +bool program_hpd_filter(const struct dc_link *link); +/* Query hot plug status of USB4 DP tunnel. + * Returns true if HPD high. + */ +bool dpia_query_hpd_status(struct dc_link *link); +bool query_hpd_status(struct dc_link *link, uint32_t *is_hpd_high); +#endif /* __DC_LINK_HPD_H__ */ diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h index c8274967de94..a391b939d709 100644 --- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h @@ -131,6 +131,17 @@ enum dmub_notification_type { }; /** + * DPIA NOTIFICATION Response Type + */ +enum dpia_notify_bw_alloc_status { + + DPIA_BW_REQ_FAILED = 0, + DPIA_BW_REQ_SUCCESS, + DPIA_EST_BW_CHANGED, + DPIA_BW_ALLOC_CAPS_CHANGED +}; + +/** * struct dmub_region - dmub hw memory region * @base: base address for region, must be 256 byte aligned * @top: top address for region @@ -465,7 +476,10 @@ struct dmub_notification { struct aux_reply_data aux_reply; enum dp_hpd_status hpd_status; enum set_config_status sc_status; - struct dpia_notification_reply_data bw_alloc_reply; + /** + * DPIA notification command. + */ + struct dmub_rb_cmd_dpia_notification dpia_notification; }; }; diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h index 4dcd82d19ccf..06c553b61322 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -770,7 +770,10 @@ enum dmub_out_cmd_type { * Command type used for SET_CONFIG Reply notification */ DMUB_OUT_CMD__SET_CONFIG_REPLY = 3, - DMUB_OUT_CMD__DPIA_NOTIFICATION = 5 + /** + * Command type used for USB4 DPIA notification + */ + DMUB_OUT_CMD__DPIA_NOTIFICATION = 5, }; /* DMUB_CMD__DPIA command sub-types. */ @@ -780,6 +783,11 @@ enum dmub_cmd_dpia_type { DMUB_CMD__DPIA_MST_ALLOC_SLOTS = 2, }; +/* DMUB_OUT_CMD__DPIA_NOTIFICATION command types. */ +enum dmub_cmd_dpia_notification_type { + DPIA_NOTIFY__BW_ALLOCATION = 0, +}; + #pragma pack(push, 1) /** @@ -1518,84 +1526,6 @@ struct dp_hpd_data { }; /** - * DPIA NOTIFICATION Response Type - */ -enum dpia_notify_bw_alloc_status { - - DPIA_BW_REQ_FAILED = 0, - DPIA_BW_REQ_SUCCESS, - DPIA_EST_BW_CHANGED, - DPIA_BW_ALLOC_CAPS_CHANGED -}; - -/* DMUB_OUT_CMD__DPIA_NOTIFY Reply command - OutBox Cmd */ -/** - * Data passed to driver from FW in a DMUB_OUT_CMD__DPIA_NOTIFY command. - */ -struct dpia_notification_reply_data { - uint8_t allocated_bw; - uint8_t estimated_bw; -}; - -struct dpia_notification_common { - bool shared; -}; - -struct dpia_bw_allocation_notify_data { - union { - struct { - uint16_t cm_bw_alloc_support: 1; /**< USB4 CM BW Allocation mode support */ - uint16_t bw_request_failed: 1; /**< BW_Request_Failed */ - uint16_t bw_request_succeeded: 1; /**< BW_Request_Succeeded */ - uint16_t est_bw_changed: 1; /**< Estimated_BW changed */ - uint16_t bw_alloc_cap_changed: 1; /**< BW_Allocation_Capabiity_Changed */ - uint16_t reserved: 11; - } bits; - uint16_t flags; - }; - uint8_t cm_id; /**< CM ID */ - uint8_t group_id; /**< Group ID */ - uint8_t granularity; /**< BW Allocation Granularity */ - uint8_t estimated_bw; /**< Estimated_BW */ - uint8_t allocated_bw; /**< Allocated_BW */ - uint8_t reserved; -}; - -union dpia_notification_data { - struct dpia_notification_common common_data; - struct dpia_bw_allocation_notify_data dpia_bw_alloc; /**< Used for DPIA BW Allocation mode notification */ -}; - -enum dmub_cmd_dpia_notification_type { - DPIA_NOTIFY__BW_ALLOCATION = 0, -}; - -struct dpia_notification_header { - uint8_t instance; /**< DPIA Instance */ - uint8_t reserved[3]; - enum dmub_cmd_dpia_notification_type type; /**< DPIA notification type */ -}; - -struct dpia_notification_payload { - struct dpia_notification_header header; - union dpia_notification_data data; /**< DPIA notification data */ -}; - -/** - * Definition of a DMUB_OUT_CMD__DPIA_NOTIFY command. - */ -struct dmub_rb_cmd_dpia_notification { - /** - * Command header. - */ - struct dmub_cmd_header header; /**< DPIA notification header */ - /** - * Data passed to driver from FW in a DMUB_OUT_CMD__DPIA_NOTIFY command. - */ - struct dpia_notification_payload payload; /**< DPIA notification payload */ -}; - -/** * Definition of a DMUB_OUT_CMD__DP_HPD_NOTIFY command. */ struct dmub_rb_cmd_dp_hpd_notify { @@ -1637,6 +1567,79 @@ struct dmub_rb_cmd_dp_set_config_reply { }; /** + * Definition of a DPIA notification header + */ +struct dpia_notification_header { + uint8_t instance; /**< DPIA Instance */ + uint8_t reserved[3]; + enum dmub_cmd_dpia_notification_type type; /**< DPIA notification type */ +}; + +/** + * Definition of the common data struct of DPIA notification + */ +struct dpia_notification_common { + uint8_t cmd_buffer[DMUB_RB_CMD_SIZE - sizeof(struct dmub_cmd_header) + - sizeof(struct dpia_notification_header)]; +}; + +/** + * Definition of a DPIA notification data + */ +struct dpia_bw_allocation_notify_data { + union { + struct { + uint16_t cm_bw_alloc_support: 1; /**< USB4 CM BW Allocation mode support */ + uint16_t bw_request_failed: 1; /**< BW_Request_Failed */ + uint16_t bw_request_succeeded: 1; /**< BW_Request_Succeeded */ + uint16_t est_bw_changed: 1; /**< Estimated_BW changed */ + uint16_t bw_alloc_cap_changed: 1; /**< BW_Allocation_Capabiity_Changed */ + uint16_t reserved: 11; /**< Reserved */ + } bits; + + uint16_t flags; + }; + + uint8_t cm_id; /**< CM ID */ + uint8_t group_id; /**< Group ID */ + uint8_t granularity; /**< BW Allocation Granularity */ + uint8_t estimated_bw; /**< Estimated_BW */ + uint8_t allocated_bw; /**< Allocated_BW */ + uint8_t reserved; +}; + +/** + * union dpia_notify_data_type - DPIA Notification in Outbox command + */ +union dpia_notification_data { + /** + * DPIA Notification for common data struct + */ + struct dpia_notification_common common_data; + + /** + * DPIA Notification for DP BW Allocation support + */ + struct dpia_bw_allocation_notify_data dpia_bw_alloc; +}; + +/** + * Definition of a DPIA notification payload + */ +struct dpia_notification_payload { + struct dpia_notification_header header; + union dpia_notification_data data; /**< DPIA notification payload data */ +}; + +/** + * Definition of a DMUB_OUT_CMD__DPIA_NOTIFICATION command. + */ +struct dmub_rb_cmd_dpia_notification { + struct dmub_cmd_header header; /**< DPIA notification header */ + struct dpia_notification_payload payload; /**< DPIA notification payload */ +}; + +/** * Data passed from driver to FW in a DMUB_CMD__QUERY_HPD_STATE command. */ struct dmub_cmd_hpd_state_query_data { @@ -3108,7 +3111,8 @@ struct dmub_rb_cmd_panel_cntl { */ struct dmub_cmd_lvtma_control_data { uint8_t uc_pwr_action; /**< LVTMA_ACTION */ - uint8_t reserved_0[3]; /**< For future use */ + uint8_t bypass_panel_control_wait; + uint8_t reserved_0[2]; uint8_t panel_inst; /**< LVTMA control instance */ uint8_t reserved_1[3]; /**< For future use */ }; @@ -3502,9 +3506,9 @@ union dmub_rb_out_cmd { */ struct dmub_rb_cmd_dp_set_config_reply set_config_reply; /** - * BW ALLOCATION notification command. + * DPIA notification command. */ - struct dmub_rb_cmd_dpia_notification dpia_notify; + struct dmub_rb_cmd_dpia_notification dpia_notification; }; #pragma pack(pop) diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c index 55a534ec0794..74189102eaec 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c @@ -94,23 +94,23 @@ enum dmub_status dmub_srv_stat_get_notification(struct dmub_srv *dmub, break; case DMUB_OUT_CMD__DPIA_NOTIFICATION: notify->type = DMUB_NOTIFICATION_DPIA_NOTIFICATION; - notify->link_index = cmd.dpia_notify.payload.header.instance; + notify->link_index = cmd.dpia_notification.payload.header.instance; - if (cmd.dpia_notify.payload.header.type == DPIA_NOTIFY__BW_ALLOCATION) { + if (cmd.dpia_notification.payload.header.type == DPIA_NOTIFY__BW_ALLOCATION) { - if (cmd.dpia_notify.payload.data.dpia_bw_alloc.bits.bw_request_failed) { + notify->dpia_notification.payload.data.dpia_bw_alloc.estimated_bw = + cmd.dpia_notification.payload.data.dpia_bw_alloc.estimated_bw; + notify->dpia_notification.payload.data.dpia_bw_alloc.allocated_bw = + cmd.dpia_notification.payload.data.dpia_bw_alloc.allocated_bw; + + if (cmd.dpia_notification.payload.data.dpia_bw_alloc.bits.bw_request_failed) notify->result = DPIA_BW_REQ_FAILED; - } else if (cmd.dpia_notify.payload.data.dpia_bw_alloc.bits.bw_request_succeeded) { + else if (cmd.dpia_notification.payload.data.dpia_bw_alloc.bits.bw_request_succeeded) notify->result = DPIA_BW_REQ_SUCCESS; - notify->bw_alloc_reply.allocated_bw = - cmd.dpia_notify.payload.data.dpia_bw_alloc.allocated_bw; - } else if (cmd.dpia_notify.payload.data.dpia_bw_alloc.bits.est_bw_changed) { + else if (cmd.dpia_notification.payload.data.dpia_bw_alloc.bits.est_bw_changed) notify->result = DPIA_EST_BW_CHANGED; - notify->bw_alloc_reply.estimated_bw = - cmd.dpia_notify.payload.data.dpia_bw_alloc.estimated_bw; - } else if (cmd.dpia_notify.payload.data.dpia_bw_alloc.bits.bw_alloc_cap_changed) { + else if (cmd.dpia_notification.payload.data.dpia_bw_alloc.bits.bw_alloc_cap_changed) notify->result = DPIA_BW_ALLOC_CAPS_CHANGED; - } } break; default: diff --git a/drivers/gpu/drm/amd/display/include/ddc_service_types.h b/drivers/gpu/drm/amd/display/include/ddc_service_types.h index a7ba5bd8dc16..3610f71891a3 100644 --- a/drivers/gpu/drm/amd/display/include/ddc_service_types.h +++ b/drivers/gpu/drm/amd/display/include/ddc_service_types.h @@ -133,6 +133,11 @@ static const uint8_t DP_SINK_DEVICE_STR_ID_2[] = {7, 1, 8, 7, 5}; static const u8 DP_SINK_BRANCH_DEV_NAME_7580[] = "7580\x80u"; +/*Travis*/ +static const uint8_t DP_VGA_LVDS_CONVERTER_ID_2[] = "sivarT"; +/*Nutmeg*/ +static const uint8_t DP_VGA_LVDS_CONVERTER_ID_3[] = "dnomlA"; + /*MST Dock*/ static const uint8_t SYNAPTICS_DEVICE_ID[] = "SYNA"; diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c index c2e00f7b8381..315da61ee897 100644 --- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c +++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c @@ -616,7 +616,8 @@ static void build_vrr_infopacket_data_v1(const struct mod_vrr_params *vrr, } static void build_vrr_infopacket_data_v3(const struct mod_vrr_params *vrr, - struct dc_info_packet *infopacket) + struct dc_info_packet *infopacket, + bool freesync_on_desktop) { unsigned int min_refresh; unsigned int max_refresh; @@ -649,9 +650,15 @@ static void build_vrr_infopacket_data_v3(const struct mod_vrr_params *vrr, infopacket->sb[6] |= 0x02; /* PB6 = [Bit 2 = FreeSync Active] */ - if (vrr->state == VRR_STATE_ACTIVE_VARIABLE || + if (freesync_on_desktop) { + if (vrr->state != VRR_STATE_DISABLED && + vrr->state != VRR_STATE_UNSUPPORTED) + infopacket->sb[6] |= 0x04; + } else { + if (vrr->state == VRR_STATE_ACTIVE_VARIABLE || vrr->state == VRR_STATE_ACTIVE_FIXED) - infopacket->sb[6] |= 0x04; + infopacket->sb[6] |= 0x04; + } min_refresh = (vrr->min_refresh_in_uhz + 500000) / 1000000; max_refresh = (vrr->max_refresh_in_uhz + 500000) / 1000000; @@ -898,52 +905,20 @@ static void build_vrr_infopacket_v2(enum signal_type signal, infopacket->valid = true; } -#ifndef TRIM_FSFT -static void build_vrr_infopacket_fast_transport_data( - bool ftActive, - unsigned int ftOutputRate, - struct dc_info_packet *infopacket) -{ - /* PB9 : bit7 - fast transport Active*/ - unsigned char activeBit = (ftActive) ? 1 << 7 : 0; - - infopacket->sb[1] &= ~activeBit; //clear bit - infopacket->sb[1] |= activeBit; //set bit - - /* PB13 : Target Output Pixel Rate [kHz] - bits 7:0 */ - infopacket->sb[13] = ftOutputRate & 0xFF; - - /* PB14 : Target Output Pixel Rate [kHz] - bits 15:8 */ - infopacket->sb[14] = (ftOutputRate >> 8) & 0xFF; - - /* PB15 : Target Output Pixel Rate [kHz] - bits 23:16 */ - infopacket->sb[15] = (ftOutputRate >> 16) & 0xFF; - -} -#endif static void build_vrr_infopacket_v3(enum signal_type signal, const struct mod_vrr_params *vrr, -#ifndef TRIM_FSFT - bool ftActive, unsigned int ftOutputRate, -#endif enum color_transfer_func app_tf, - struct dc_info_packet *infopacket) + struct dc_info_packet *infopacket, + bool freesync_on_desktop) { unsigned int payload_size = 0; build_vrr_infopacket_header_v3(signal, infopacket, &payload_size); - build_vrr_infopacket_data_v3(vrr, infopacket); + build_vrr_infopacket_data_v3(vrr, infopacket, freesync_on_desktop); build_vrr_infopacket_fs2_data(app_tf, infopacket); -#ifndef TRIM_FSFT - build_vrr_infopacket_fast_transport_data( - ftActive, - ftOutputRate, - infopacket); -#endif - build_vrr_infopacket_checksum(&payload_size, infopacket); infopacket->valid = true; @@ -985,18 +960,7 @@ void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync, switch (packet_type) { case PACKET_TYPE_FS_V3: -#ifndef TRIM_FSFT - // always populate with pixel rate. - build_vrr_infopacket_v3( - stream->signal, vrr, - stream->timing.flags.FAST_TRANSPORT, - (stream->timing.flags.FAST_TRANSPORT) ? - stream->timing.fast_transport_output_rate_100hz : - stream->timing.pix_clk_100hz, - app_tf, infopacket); -#else - build_vrr_infopacket_v3(stream->signal, vrr, app_tf, infopacket); -#endif + build_vrr_infopacket_v3(stream->signal, vrr, app_tf, infopacket, stream->freesync_on_desktop); break; case PACKET_TYPE_FS_V2: build_vrr_infopacket_v2(stream->signal, vrr, app_tf, infopacket, stream->freesync_on_desktop); diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_3_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_3_offset.h index 3b95a59b196c..56e00252bff8 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_3_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_3_offset.h @@ -3593,6 +3593,14 @@ #define regGCL2TLB_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1 +// addressBlock: gc_rlcsdec +// base address: 0x3b980 +#define regRLC_RLCS_FED_STATUS_0 0x4eff +#define regRLC_RLCS_FED_STATUS_0_BASE_IDX 1 +#define regRLC_RLCS_FED_STATUS_1 0x4f00 +#define regRLC_RLCS_FED_STATUS_1_BASE_IDX 1 + + // addressBlock: gc_gcvml2pspdec // base address: 0x3f900 #define regGCUTCL2_TRANSLATION_BYPASS_BY_VMID 0x5e41 diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_3_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_3_sh_mask.h index ae3ef8a9e702..658e88a8e2ac 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_3_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_3_sh_mask.h @@ -37642,6 +37642,56 @@ #define RLC_RLCG_DOORBELL_RANGE__LOWER_ADDR_MASK 0x00000FFCL #define RLC_RLCG_DOORBELL_RANGE__UPPER_ADDR_RESERVED_MASK 0x00030000L #define RLC_RLCG_DOORBELL_RANGE__UPPER_ADDR_MASK 0x0FFC0000L +//RLC_RLCS_FED_STATUS_0 +#define RLC_RLCS_FED_STATUS_0__RLC_FED_ERR__SHIFT 0x0 +#define RLC_RLCS_FED_STATUS_0__UTCL2_FED_ERR__SHIFT 0x1 +#define RLC_RLCS_FED_STATUS_0__GE_FED_ERR__SHIFT 0x2 +#define RLC_RLCS_FED_STATUS_0__CPC_FED_ERR__SHIFT 0x3 +#define RLC_RLCS_FED_STATUS_0__CPF_FED_ERR__SHIFT 0x4 +#define RLC_RLCS_FED_STATUS_0__CPG_FED_ERR__SHIFT 0x5 +#define RLC_RLCS_FED_STATUS_0__SDMA0_FED_ERR__SHIFT 0x6 +#define RLC_RLCS_FED_STATUS_0__SDMA1_FED_ERR__SHIFT 0x7 +#define RLC_RLCS_FED_STATUS_0__RLC_FED_ERR_MASK 0x00000001L +#define RLC_RLCS_FED_STATUS_0__UTCL2_FED_ERR_MASK 0x00000002L +#define RLC_RLCS_FED_STATUS_0__GE_FED_ERR_MASK 0x00000004L +#define RLC_RLCS_FED_STATUS_0__CPC_FED_ERR_MASK 0x00000008L +#define RLC_RLCS_FED_STATUS_0__CPF_FED_ERR_MASK 0x00000010L +#define RLC_RLCS_FED_STATUS_0__CPG_FED_ERR_MASK 0x00000020L +#define RLC_RLCS_FED_STATUS_0__SDMA0_FED_ERR_MASK 0x00000040L +#define RLC_RLCS_FED_STATUS_0__SDMA1_FED_ERR_MASK 0x00000080L +//RLC_RLCS_FED_STATUS_1 +#define RLC_RLCS_FED_STATUS_1__GL2C0_FED_ERR__SHIFT 0x0 +#define RLC_RLCS_FED_STATUS_1__GL2C1_FED_ERR__SHIFT 0x1 +#define RLC_RLCS_FED_STATUS_1__GL2C2_FED_ERR__SHIFT 0x2 +#define RLC_RLCS_FED_STATUS_1__GL2C3_FED_ERR__SHIFT 0x3 +#define RLC_RLCS_FED_STATUS_1__GL2C4_FED_ERR__SHIFT 0x4 +#define RLC_RLCS_FED_STATUS_1__GL2C5_FED_ERR__SHIFT 0x5 +#define RLC_RLCS_FED_STATUS_1__GL2C6_FED_ERR__SHIFT 0x6 +#define RLC_RLCS_FED_STATUS_1__GL2C7_FED_ERR__SHIFT 0x7 +#define RLC_RLCS_FED_STATUS_1__GL2C8_FED_ERR__SHIFT 0x8 +#define RLC_RLCS_FED_STATUS_1__GL2C9_FED_ERR__SHIFT 0x9 +#define RLC_RLCS_FED_STATUS_1__GL2C10_FED_ERR__SHIFT 0xa +#define RLC_RLCS_FED_STATUS_1__GL2C11_FED_ERR__SHIFT 0xb +#define RLC_RLCS_FED_STATUS_1__GL2C12_FED_ERR__SHIFT 0xc +#define RLC_RLCS_FED_STATUS_1__GL2C13_FED_ERR__SHIFT 0xd +#define RLC_RLCS_FED_STATUS_1__GL2C14_FED_ERR__SHIFT 0xe +#define RLC_RLCS_FED_STATUS_1__GL2C15_FED_ERR__SHIFT 0xf +#define RLC_RLCS_FED_STATUS_1__GL2C0_FED_ERR_MASK 0x00000001L +#define RLC_RLCS_FED_STATUS_1__GL2C1_FED_ERR_MASK 0x00000002L +#define RLC_RLCS_FED_STATUS_1__GL2C2_FED_ERR_MASK 0x00000004L +#define RLC_RLCS_FED_STATUS_1__GL2C3_FED_ERR_MASK 0x00000008L +#define RLC_RLCS_FED_STATUS_1__GL2C4_FED_ERR_MASK 0x00000010L +#define RLC_RLCS_FED_STATUS_1__GL2C5_FED_ERR_MASK 0x00000020L +#define RLC_RLCS_FED_STATUS_1__GL2C6_FED_ERR_MASK 0x00000040L +#define RLC_RLCS_FED_STATUS_1__GL2C7_FED_ERR_MASK 0x00000080L +#define RLC_RLCS_FED_STATUS_1__GL2C8_FED_ERR_MASK 0x00000100L +#define RLC_RLCS_FED_STATUS_1__GL2C9_FED_ERR_MASK 0x00000200L +#define RLC_RLCS_FED_STATUS_1__GL2C10_FED_ERR_MASK 0x00000400L +#define RLC_RLCS_FED_STATUS_1__GL2C11_FED_ERR_MASK 0x00000800L +#define RLC_RLCS_FED_STATUS_1__GL2C12_FED_ERR_MASK 0x00001000L +#define RLC_RLCS_FED_STATUS_1__GL2C13_FED_ERR_MASK 0x00002000L +#define RLC_RLCS_FED_STATUS_1__GL2C14_FED_ERR_MASK 0x00004000L +#define RLC_RLCS_FED_STATUS_1__GL2C15_FED_ERR_MASK 0x00008000L //RLC_CGTT_MGCG_OVERRIDE #define RLC_CGTT_MGCG_OVERRIDE__RLC_REPEATER_FGCG_OVERRIDE__SHIFT 0x0 #define RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE__SHIFT 0x1 diff --git a/drivers/gpu/drm/amd/include/asic_reg/xgmi/xgmi_6_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/xgmi/xgmi_6_1_0_sh_mask.h new file mode 100644 index 000000000000..c6c0cf1376a6 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/xgmi/xgmi_6_1_0_sh_mask.h @@ -0,0 +1,87 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _xgmi_6_1_0_SH_MASK_HEADER +#define _xgmi_6_1_0_SH_MASK_HEADER + +//PCS_XGMI3X16_PCS_ERROR_STATUS +#define PCS_XGMI3X16_PCS_ERROR_STATUS__DataLossErr__SHIFT 0x0 +#define PCS_XGMI3X16_PCS_ERROR_STATUS__TrainingErr__SHIFT 0x1 +#define PCS_XGMI3X16_PCS_ERROR_STATUS__FlowCtrlAckErr__SHIFT 0x2 +#define PCS_XGMI3X16_PCS_ERROR_STATUS__RxFifoUnderflowErr__SHIFT 0x3 +#define PCS_XGMI3X16_PCS_ERROR_STATUS__RxFifoOverflowErr__SHIFT 0x4 +#define PCS_XGMI3X16_PCS_ERROR_STATUS__CRCErr__SHIFT 0x5 +#define PCS_XGMI3X16_PCS_ERROR_STATUS__BERExceededErr__SHIFT 0x6 +#define PCS_XGMI3X16_PCS_ERROR_STATUS__TxVcidDataErr__SHIFT 0x7 +#define PCS_XGMI3X16_PCS_ERROR_STATUS__ReplayBufParityErr__SHIFT 0x8 +#define PCS_XGMI3X16_PCS_ERROR_STATUS__DataParityErr__SHIFT 0x9 +#define PCS_XGMI3X16_PCS_ERROR_STATUS__ReplayFifoOverflowErr__SHIFT 0xa +#define PCS_XGMI3X16_PCS_ERROR_STATUS__ReplayFifoUnderflowErr__SHIFT 0xb +#define PCS_XGMI3X16_PCS_ERROR_STATUS__ElasticFifoOverflowErr__SHIFT 0xc +#define PCS_XGMI3X16_PCS_ERROR_STATUS__DeskewErr__SHIFT 0xd +#define PCS_XGMI3X16_PCS_ERROR_STATUS__FlowCtrlCRCErr__SHIFT 0xe +#define PCS_XGMI3X16_PCS_ERROR_STATUS__DataStartupLimitErr__SHIFT 0xf +#define PCS_XGMI3X16_PCS_ERROR_STATUS__FCInitTimeoutErr__SHIFT 0x10 +#define PCS_XGMI3X16_PCS_ERROR_STATUS__RecoveryTimeoutErr__SHIFT 0x11 +#define PCS_XGMI3X16_PCS_ERROR_STATUS__ReadySerialTimeoutErr__SHIFT 0x12 +#define PCS_XGMI3X16_PCS_ERROR_STATUS__ReadySerialAttemptErr__SHIFT 0x13 +#define PCS_XGMI3X16_PCS_ERROR_STATUS__RecoveryAttemptErr__SHIFT 0x14 +#define PCS_XGMI3X16_PCS_ERROR_STATUS__RecoveryRelockAttemptErr__SHIFT 0x15 +#define PCS_XGMI3X16_PCS_ERROR_STATUS__ReplayAttemptErr__SHIFT 0x16 +#define PCS_XGMI3X16_PCS_ERROR_STATUS__SyncHdrErr__SHIFT 0x17 +#define PCS_XGMI3X16_PCS_ERROR_STATUS__TxReplayTimeoutErr__SHIFT 0x18 +#define PCS_XGMI3X16_PCS_ERROR_STATUS__RxReplayTimeoutErr__SHIFT 0x19 +#define PCS_XGMI3X16_PCS_ERROR_STATUS__LinkSubTxTimeoutErr__SHIFT 0x1a +#define PCS_XGMI3X16_PCS_ERROR_STATUS__LinkSubRxTimeoutErr__SHIFT 0x1b +#define PCS_XGMI3X16_PCS_ERROR_STATUS__RxCMDPktErr__SHIFT 0x1c +#define PCS_XGMI3X16_PCS_ERROR_STATUS__DataLossErr_MASK 0x00000001L +#define PCS_XGMI3X16_PCS_ERROR_STATUS__TrainingErr_MASK 0x00000002L +#define PCS_XGMI3X16_PCS_ERROR_STATUS__FlowCtrlAckErr_MASK 0x00000004L +#define PCS_XGMI3X16_PCS_ERROR_STATUS__RxFifoUnderflowErr_MASK 0x00000008L +#define PCS_XGMI3X16_PCS_ERROR_STATUS__RxFifoOverflowErr_MASK 0x00000010L +#define PCS_XGMI3X16_PCS_ERROR_STATUS__CRCErr_MASK 0x00000020L +#define PCS_XGMI3X16_PCS_ERROR_STATUS__BERExceededErr_MASK 0x00000040L +#define PCS_XGMI3X16_PCS_ERROR_STATUS__TxVcidDataErr_MASK 0x00000080L +#define PCS_XGMI3X16_PCS_ERROR_STATUS__ReplayBufParityErr_MASK 0x00000100L +#define PCS_XGMI3X16_PCS_ERROR_STATUS__DataParityErr_MASK 0x00000200L +#define PCS_XGMI3X16_PCS_ERROR_STATUS__ReplayFifoOverflowErr_MASK 0x00000400L +#define PCS_XGMI3X16_PCS_ERROR_STATUS__ReplayFifoUnderflowErr_MASK 0x00000800L +#define PCS_XGMI3X16_PCS_ERROR_STATUS__ElasticFifoOverflowErr_MASK 0x00001000L +#define PCS_XGMI3X16_PCS_ERROR_STATUS__DeskewErr_MASK 0x00002000L +#define PCS_XGMI3X16_PCS_ERROR_STATUS__FlowCtrlCRCErr_MASK 0x00004000L +#define PCS_XGMI3X16_PCS_ERROR_STATUS__DataStartupLimitErr_MASK 0x00008000L +#define PCS_XGMI3X16_PCS_ERROR_STATUS__FCInitTimeoutErr_MASK 0x00010000L +#define PCS_XGMI3X16_PCS_ERROR_STATUS__RecoveryTimeoutErr_MASK 0x00020000L +#define PCS_XGMI3X16_PCS_ERROR_STATUS__ReadySerialTimeoutErr_MASK 0x00040000L +#define PCS_XGMI3X16_PCS_ERROR_STATUS__ReadySerialAttemptErr_MASK 0x00080000L +#define PCS_XGMI3X16_PCS_ERROR_STATUS__RecoveryAttemptErr_MASK 0x00100000L +#define PCS_XGMI3X16_PCS_ERROR_STATUS__RecoveryRelockAttemptErr_MASK 0x00200000L +#define PCS_XGMI3X16_PCS_ERROR_STATUS__ReplayAttemptErr_MASK 0x00400000L +#define PCS_XGMI3X16_PCS_ERROR_STATUS__SyncHdrErr_MASK 0x00800000L +#define PCS_XGMI3X16_PCS_ERROR_STATUS__TxReplayTimeoutErr_MASK 0x01000000L +#define PCS_XGMI3X16_PCS_ERROR_STATUS__RxReplayTimeoutErr_MASK 0x02000000L +#define PCS_XGMI3X16_PCS_ERROR_STATUS__LinkSubTxTimeoutErr_MASK 0x04000000L +#define PCS_XGMI3X16_PCS_ERROR_STATUS__LinkSubRxTimeoutErr_MASK 0x08000000L +#define PCS_XGMI3X16_PCS_ERROR_STATUS__RxCMDPktErr_MASK 0x10000000L + +#endif diff --git a/drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_11_0_0.h b/drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_11_0_0.h index 9e8ed9f4bb15..3a4670bc4449 100644 --- a/drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_11_0_0.h +++ b/drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_11_0_0.h @@ -49,6 +49,8 @@ #define GFX_11_0_0__SRCID__SDMA_SEM_INCOMPLETE_TIMEOUT 65 // 0x41 GPF(Sem incomplete timeout) #define GFX_11_0_0__SRCID__SDMA_SEM_WAIT_FAIL_TIMEOUT 66 // 0x42 Semaphore wait fail timeout +#define GFX_11_0_0__SRCID__RLC_GC_FED_INTERRUPT 128 // 0x80 FED Interrupt (for data poisoning) + #define GFX_11_0_0__SRCID__CP_GENERIC_INT 177 // 0xB1 CP_GENERIC int #define GFX_11_0_0__SRCID__CP_PM4_PKT_RSVD_BIT_ERROR 180 // 0xB4 PM4 Pkt Rsvd Bits Error #define GFX_11_0_0__SRCID__CP_EOP_INTERRUPT 181 // 0xB5 End-of-Pipe Interrupt diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index 236657eece47..76b9ec64ca50 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -3059,7 +3059,7 @@ static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev, * * hwmon interfaces for GPU power: * - * - power1_average: average power used by the GPU in microWatts + * - power1_average: average power used by the SoC in microWatts. On APUs this includes the CPU. * * - power1_cap_min: minimum cap supported in microWatts * diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c index a2e1f6ed525f..89fc32318d80 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c @@ -4202,7 +4202,7 @@ static int smu7_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) if ((0 == data->sclk_dpm_key_disabled) && (data->need_update_smu7_dpm_table & - (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { + (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) { PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), "Trying to freeze SCLK DPM when DPM is disabled", ); @@ -4259,7 +4259,7 @@ static int smu7_populate_and_upload_sclk_mclk_dpm_levels( } if (data->need_update_smu7_dpm_table & - (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) { + (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK)) { result = smum_populate_all_graphic_levels(hwmgr); PP_ASSERT_WITH_CODE((0 == result), "Failed to populate SCLK during PopulateNewDPMClocksStates Function!", @@ -4267,7 +4267,7 @@ static int smu7_populate_and_upload_sclk_mclk_dpm_levels( } if (data->need_update_smu7_dpm_table & - (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) { + (DPMTABLE_OD_UPDATE_MCLK | DPMTABLE_UPDATE_MCLK)) { /*populate MCLK dpm table to SMU7 */ result = smum_populate_all_memory_levels(hwmgr); PP_ASSERT_WITH_CODE((0 == result), @@ -4358,7 +4358,7 @@ static int smu7_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) if ((0 == data->sclk_dpm_key_disabled) && (data->need_update_smu7_dpm_table & - (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { + (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) { PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), "Trying to Unfreeze SCLK DPM when DPM is disabled", diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c index 5c3f42d97f69..4bc8db1be738 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c @@ -2202,7 +2202,7 @@ static int ci_program_mem_timing_parameters(struct pp_hwmgr *hwmgr) struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); if (data->need_update_smu7_dpm_table & - (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK)) + (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK)) return ci_program_memory_timing_parameters(hwmgr); return 0; diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c index 03df35dee8ba..060fc140c574 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c @@ -2165,7 +2165,7 @@ static int iceland_program_mem_timing_parameters(struct pp_hwmgr *hwmgr) struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); if (data->need_update_smu7_dpm_table & - (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK)) + (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK)) return iceland_program_memory_timing_parameters(hwmgr); return 0; diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c index 04b561f5d932..acbe41174d7e 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c @@ -2554,7 +2554,7 @@ static int tonga_program_mem_timing_parameters(struct pp_hwmgr *hwmgr) struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); if (data->need_update_smu7_dpm_table & - (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK)) + (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK)) return tonga_program_memory_timing_parameters(hwmgr); return 0; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c index 85e22210963f..5cdc07165480 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c @@ -1171,6 +1171,7 @@ static int renoir_get_smu_metrics_data(struct smu_context *smu, int ret = 0; uint32_t apu_percent = 0; uint32_t dgpu_percent = 0; + struct amdgpu_device *adev = smu->adev; ret = smu_cmn_get_metrics_table(smu, @@ -1196,7 +1197,11 @@ static int renoir_get_smu_metrics_data(struct smu_context *smu, *value = metrics->AverageUvdActivity / 100; break; case METRICS_AVERAGE_SOCKETPOWER: - *value = (metrics->CurrentSocketPower << 8) / 1000; + if (((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 1)) && (adev->pm.fw_version >= 0x40000f)) || + ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 0)) && (adev->pm.fw_version >= 0x373200))) + *value = metrics->CurrentSocketPower << 8; + else + *value = (metrics->CurrentSocketPower << 8) / 1000; break; case METRICS_TEMPERATURE_EDGE: *value = (metrics->GfxTemperature / 100) * diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c index 969e5f965540..d0cdc578344d 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c @@ -1904,15 +1904,51 @@ static int smu_v13_0_0_set_df_cstate(struct smu_context *smu, NULL); } +static void smu_v13_0_0_set_mode1_reset_param(struct smu_context *smu, + uint32_t supported_version, + uint32_t *param) +{ + uint32_t smu_version; + struct amdgpu_device *adev = smu->adev; + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + + smu_cmn_get_smc_version(smu, NULL, &smu_version); + + if ((smu_version >= supported_version) && + ras && atomic_read(&ras->in_recovery)) + /* Set RAS fatal error reset flag */ + *param = 1 << 16; + else + *param = 0; +} + static int smu_v13_0_0_mode1_reset(struct smu_context *smu) { int ret; + uint32_t param; struct amdgpu_device *adev = smu->adev; - if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 10)) - ret = smu_cmn_send_debug_smc_msg(smu, DEBUGSMC_MSG_Mode1Reset); - else + switch (adev->ip_versions[MP1_HWIP][0]) { + case IP_VERSION(13, 0, 0): + /* SMU 13_0_0 PMFW supports RAS fatal error reset from 78.77 */ + smu_v13_0_0_set_mode1_reset_param(smu, 0x004e4d00, ¶m); + + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_Mode1Reset, param, NULL); + break; + + case IP_VERSION(13, 0, 10): + /* SMU 13_0_10 PMFW supports RAS fatal error reset from 80.28 */ + smu_v13_0_0_set_mode1_reset_param(smu, 0x00501c00, ¶m); + + ret = smu_cmn_send_debug_smc_msg_with_param(smu, + DEBUGSMC_MSG_Mode1Reset, param); + break; + + default: ret = smu_cmn_send_smc_msg(smu, SMU_MSG_Mode1Reset, NULL); + break; + } if (!ret) msleep(SMU13_MODE1_RESET_WAIT_TIME_IN_MS); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c index 768b6e7dbd77..d5abafc5a682 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c @@ -404,6 +404,12 @@ int smu_cmn_send_debug_smc_msg(struct smu_context *smu, return __smu_cmn_send_debug_msg(smu, msg, 0); } +int smu_cmn_send_debug_smc_msg_with_param(struct smu_context *smu, + uint32_t msg, uint32_t param) +{ + return __smu_cmn_send_debug_msg(smu, msg, param); +} + int smu_cmn_to_asic_specific_index(struct smu_context *smu, enum smu_cmn2asic_mapping_type type, uint32_t index) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h index f82cf76dd3a4..d7cd358a53bd 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h @@ -45,6 +45,9 @@ int smu_cmn_send_smc_msg(struct smu_context *smu, int smu_cmn_send_debug_smc_msg(struct smu_context *smu, uint32_t msg); +int smu_cmn_send_debug_smc_msg_with_param(struct smu_context *smu, + uint32_t msg, uint32_t param); + int smu_cmn_wait_for_response(struct smu_context *smu); int smu_cmn_to_asic_specific_index(struct smu_context *smu, diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index fe7f871e3080..973af6d06626 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -1053,7 +1053,8 @@ struct drm_amdgpu_info_device { __u32 enabled_rb_pipes_mask; __u32 num_rb_pipes; __u32 num_hw_gfx_contexts; - __u32 _pad; + /* PCIe version (the smaller of the GPU and the CPU/motherboard) */ + __u32 pcie_gen; __u64 ids_flags; /** Starting virtual address for UMDs. */ __u64 virtual_address_offset; @@ -1100,7 +1101,8 @@ struct drm_amdgpu_info_device { __u32 gs_prim_buffer_depth; /* max gs wavefront per vgt*/ __u32 max_gs_waves_per_vgt; - __u32 _pad1; + /* PCIe number of lanes (the smaller of the GPU and the CPU/motherboard) */ + __u32 pcie_num_lanes; /* always on cu bitmap */ __u32 cu_ao_bitmap[4][4]; /** Starting high virtual address for UMDs. */ |