From 112ed2d31a46f4704085ad925435b77e62b8abee Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 24 Apr 2019 18:48:39 +0100 Subject: drm/i915: Move GraphicsTechnology files under gt/ Start partitioning off the code that talks to the hardware (GT) from the uapi layers and move the device facing code under gt/ One casualty is s/intel_ringbuffer.h/intel_engine.h/ with the plan to subdivide that header and body further (and split out the submission code from the ringbuffer and logical context handling). This patch aims to be simple motion so git can fixup inflight patches with little mess. Signed-off-by: Chris Wilson Acked-by: Joonas Lahtinen Acked-by: Jani Nikula Acked-by: Rodrigo Vivi Link: https://patchwork.freedesktop.org/patch/msgid/20190424174839.7141-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_workarounds.c | 1402 +++++++++++++++++++++++++++ 1 file changed, 1402 insertions(+) create mode 100644 drivers/gpu/drm/i915/gt/intel_workarounds.c (limited to 'drivers/gpu/drm/i915/gt/intel_workarounds.c') diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c new file mode 100644 index 000000000000..f46ed0e2f07c --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -0,0 +1,1402 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2014-2018 Intel Corporation + */ + +#include "i915_drv.h" +#include "intel_workarounds.h" + +/** + * DOC: Hardware workarounds + * + * This file is intended as a central place to implement most [1]_ of the + * required workarounds for hardware to work as originally intended. They fall + * in five basic categories depending on how/when they are applied: + * + * - Workarounds that touch registers that are saved/restored to/from the HW + * context image. The list is emitted (via Load Register Immediate commands) + * everytime a new context is created. + * - GT workarounds. The list of these WAs is applied whenever these registers + * revert to default values (on GPU reset, suspend/resume [2]_, etc..). + * - Display workarounds. The list is applied during display clock-gating + * initialization. + * - Workarounds that whitelist a privileged register, so that UMDs can manage + * them directly. This is just a special case of a MMMIO workaround (as we + * write the list of these to/be-whitelisted registers to some special HW + * registers). + * - Workaround batchbuffers, that get executed automatically by the hardware + * on every HW context restore. + * + * .. [1] Please notice that there are other WAs that, due to their nature, + * cannot be applied from a central place. Those are peppered around the rest + * of the code, as needed. + * + * .. [2] Technically, some registers are powercontext saved & restored, so they + * survive a suspend/resume. In practice, writing them again is not too + * costly and simplifies things. We can revisit this in the future. + * + * Layout + * '''''' + * + * Keep things in this file ordered by WA type, as per the above (context, GT, + * display, register whitelist, batchbuffer). Then, inside each type, keep the + * following order: + * + * - Infrastructure functions and macros + * - WAs per platform in standard gen/chrono order + * - Public functions to init or apply the given workaround type. + */ + +static void wa_init_start(struct i915_wa_list *wal, const char *name) +{ + wal->name = name; +} + +#define WA_LIST_CHUNK (1 << 4) + +static void wa_init_finish(struct i915_wa_list *wal) +{ + /* Trim unused entries. */ + if (!IS_ALIGNED(wal->count, WA_LIST_CHUNK)) { + struct i915_wa *list = kmemdup(wal->list, + wal->count * sizeof(*list), + GFP_KERNEL); + + if (list) { + kfree(wal->list); + wal->list = list; + } + } + + if (!wal->count) + return; + + DRM_DEBUG_DRIVER("Initialized %u %s workarounds\n", + wal->wa_count, wal->name); +} + +static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa) +{ + unsigned int addr = i915_mmio_reg_offset(wa->reg); + unsigned int start = 0, end = wal->count; + const unsigned int grow = WA_LIST_CHUNK; + struct i915_wa *wa_; + + GEM_BUG_ON(!is_power_of_2(grow)); + + if (IS_ALIGNED(wal->count, grow)) { /* Either uninitialized or full. */ + struct i915_wa *list; + + list = kmalloc_array(ALIGN(wal->count + 1, grow), sizeof(*wa), + GFP_KERNEL); + if (!list) { + DRM_ERROR("No space for workaround init!\n"); + return; + } + + if (wal->list) + memcpy(list, wal->list, sizeof(*wa) * wal->count); + + wal->list = list; + } + + while (start < end) { + unsigned int mid = start + (end - start) / 2; + + if (i915_mmio_reg_offset(wal->list[mid].reg) < addr) { + start = mid + 1; + } else if (i915_mmio_reg_offset(wal->list[mid].reg) > addr) { + end = mid; + } else { + wa_ = &wal->list[mid]; + + if ((wa->mask & ~wa_->mask) == 0) { + DRM_ERROR("Discarding overwritten w/a for reg %04x (mask: %08x, value: %08x)\n", + i915_mmio_reg_offset(wa_->reg), + wa_->mask, wa_->val); + + wa_->val &= ~wa->mask; + } + + wal->wa_count++; + wa_->val |= wa->val; + wa_->mask |= wa->mask; + wa_->read |= wa->read; + return; + } + } + + wal->wa_count++; + wa_ = &wal->list[wal->count++]; + *wa_ = *wa; + + while (wa_-- > wal->list) { + GEM_BUG_ON(i915_mmio_reg_offset(wa_[0].reg) == + i915_mmio_reg_offset(wa_[1].reg)); + if (i915_mmio_reg_offset(wa_[1].reg) > + i915_mmio_reg_offset(wa_[0].reg)) + break; + + swap(wa_[1], wa_[0]); + } +} + +static void +wa_write_masked_or(struct i915_wa_list *wal, i915_reg_t reg, u32 mask, + u32 val) +{ + struct i915_wa wa = { + .reg = reg, + .mask = mask, + .val = val, + .read = mask, + }; + + _wa_add(wal, &wa); +} + +static void +wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val) +{ + wa_write_masked_or(wal, reg, val, _MASKED_BIT_ENABLE(val)); +} + +static void +wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 val) +{ + wa_write_masked_or(wal, reg, ~0, val); +} + +static void +wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 val) +{ + wa_write_masked_or(wal, reg, val, val); +} + +static void +ignore_wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 mask, u32 val) +{ + struct i915_wa wa = { + .reg = reg, + .mask = mask, + .val = val, + /* Bonkers HW, skip verifying */ + }; + + _wa_add(wal, &wa); +} + +#define WA_SET_BIT_MASKED(addr, mask) \ + wa_write_masked_or(wal, (addr), (mask), _MASKED_BIT_ENABLE(mask)) + +#define WA_CLR_BIT_MASKED(addr, mask) \ + wa_write_masked_or(wal, (addr), (mask), _MASKED_BIT_DISABLE(mask)) + +#define WA_SET_FIELD_MASKED(addr, mask, value) \ + wa_write_masked_or(wal, (addr), (mask), _MASKED_FIELD((mask), (value))) + +static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine) +{ + struct i915_wa_list *wal = &engine->ctx_wa_list; + + WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING); + + /* WaDisableAsyncFlipPerfMode:bdw,chv */ + WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE); + + /* WaDisablePartialInstShootdown:bdw,chv */ + WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, + PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE); + + /* Use Force Non-Coherent whenever executing a 3D context. This is a + * workaround for for a possible hang in the unlikely event a TLB + * invalidation occurs during a PSD flush. + */ + /* WaForceEnableNonCoherent:bdw,chv */ + /* WaHdcDisableFetchWhenMasked:bdw,chv */ + WA_SET_BIT_MASKED(HDC_CHICKEN0, + HDC_DONOT_FETCH_MEM_WHEN_MASKED | + HDC_FORCE_NON_COHERENT); + + /* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0: + * "The Hierarchical Z RAW Stall Optimization allows non-overlapping + * polygons in the same 8x4 pixel/sample area to be processed without + * stalling waiting for the earlier ones to write to Hierarchical Z + * buffer." + * + * This optimization is off by default for BDW and CHV; turn it on. + */ + WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE); + + /* Wa4x4STCOptimizationDisable:bdw,chv */ + WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE); + + /* + * BSpec recommends 8x4 when MSAA is used, + * however in practice 16x4 seems fastest. + * + * Note that PS/WM thread counts depend on the WIZ hashing + * disable bit, which we don't touch here, but it's good + * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). + */ + WA_SET_FIELD_MASKED(GEN7_GT_MODE, + GEN6_WIZ_HASHING_MASK, + GEN6_WIZ_HASHING_16x4); +} + +static void bdw_ctx_workarounds_init(struct intel_engine_cs *engine) +{ + struct drm_i915_private *i915 = engine->i915; + struct i915_wa_list *wal = &engine->ctx_wa_list; + + gen8_ctx_workarounds_init(engine); + + /* WaDisableThreadStallDopClockGating:bdw (pre-production) */ + WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE); + + /* WaDisableDopClockGating:bdw + * + * Also see the related UCGTCL1 write in broadwell_init_clock_gating() + * to disable EUTC clock gating. + */ + WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2, + DOP_CLOCK_GATING_DISABLE); + + WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, + GEN8_SAMPLER_POWER_BYPASS_DIS); + + WA_SET_BIT_MASKED(HDC_CHICKEN0, + /* WaForceContextSaveRestoreNonCoherent:bdw */ + HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT | + /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */ + (IS_BDW_GT3(i915) ? HDC_FENCE_DEST_SLM_DISABLE : 0)); +} + +static void chv_ctx_workarounds_init(struct intel_engine_cs *engine) +{ + struct i915_wa_list *wal = &engine->ctx_wa_list; + + gen8_ctx_workarounds_init(engine); + + /* WaDisableThreadStallDopClockGating:chv */ + WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE); + + /* Improve HiZ throughput on CHV. */ + WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X); +} + +static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine) +{ + struct drm_i915_private *i915 = engine->i915; + struct i915_wa_list *wal = &engine->ctx_wa_list; + + if (HAS_LLC(i915)) { + /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl + * + * Must match Display Engine. See + * WaCompressedResourceDisplayNewHashMode. + */ + WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, + GEN9_PBE_COMPRESSED_HASH_SELECTION); + WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7, + GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR); + } + + /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk,cfl */ + /* WaDisablePartialInstShootdown:skl,bxt,kbl,glk,cfl */ + WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, + FLOW_CONTROL_ENABLE | + PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE); + + /* Syncing dependencies between camera and graphics:skl,bxt,kbl */ + if (!IS_COFFEELAKE(i915)) + WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, + GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC); + + /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl,glk,cfl */ + /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl,cfl */ + WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7, + GEN9_ENABLE_YV12_BUGFIX | + GEN9_ENABLE_GPGPU_PREEMPTION); + + /* Wa4x4STCOptimizationDisable:skl,bxt,kbl,glk,cfl */ + /* WaDisablePartialResolveInVc:skl,bxt,kbl,cfl */ + WA_SET_BIT_MASKED(CACHE_MODE_1, + GEN8_4x4_STC_OPTIMIZATION_DISABLE | + GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE); + + /* WaCcsTlbPrefetchDisable:skl,bxt,kbl,glk,cfl */ + WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5, + GEN9_CCS_TLB_PREFETCH_ENABLE); + + /* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl,cfl */ + WA_SET_BIT_MASKED(HDC_CHICKEN0, + HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT | + HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE); + + /* WaForceEnableNonCoherent and WaDisableHDCInvalidation are + * both tied to WaForceContextSaveRestoreNonCoherent + * in some hsds for skl. We keep the tie for all gen9. The + * documentation is a bit hazy and so we want to get common behaviour, + * even though there is no clear evidence we would need both on kbl/bxt. + * This area has been source of system hangs so we play it safe + * and mimic the skl regardless of what bspec says. + * + * Use Force Non-Coherent whenever executing a 3D context. This + * is a workaround for a possible hang in the unlikely event + * a TLB invalidation occurs during a PSD flush. + */ + + /* WaForceEnableNonCoherent:skl,bxt,kbl,cfl */ + WA_SET_BIT_MASKED(HDC_CHICKEN0, + HDC_FORCE_NON_COHERENT); + + /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */ + if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) || IS_COFFEELAKE(i915)) + WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, + GEN8_SAMPLER_POWER_BYPASS_DIS); + + /* WaDisableSTUnitPowerOptimization:skl,bxt,kbl,glk,cfl */ + WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE); + + /* + * Supporting preemption with fine-granularity requires changes in the + * batch buffer programming. Since we can't break old userspace, we + * need to set our default preemption level to safe value. Userspace is + * still able to use more fine-grained preemption levels, since in + * WaEnablePreemptionGranularityControlByUMD we're whitelisting the + * per-ctx register. As such, WaDisable{3D,GPGPU}MidCmdPreemption are + * not real HW workarounds, but merely a way to start using preemption + * while maintaining old contract with userspace. + */ + + /* WaDisable3DMidCmdPreemption:skl,bxt,glk,cfl,[cnl] */ + WA_CLR_BIT_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL); + + /* WaDisableGPGPUMidCmdPreemption:skl,bxt,blk,cfl,[cnl] */ + WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1, + GEN9_PREEMPT_GPGPU_LEVEL_MASK, + GEN9_PREEMPT_GPGPU_COMMAND_LEVEL); + + /* WaClearHIZ_WM_CHICKEN3:bxt,glk */ + if (IS_GEN9_LP(i915)) + WA_SET_BIT_MASKED(GEN9_WM_CHICKEN3, GEN9_FACTOR_IN_CLR_VAL_HIZ); +} + +static void skl_tune_iz_hashing(struct intel_engine_cs *engine) +{ + struct drm_i915_private *i915 = engine->i915; + struct i915_wa_list *wal = &engine->ctx_wa_list; + u8 vals[3] = { 0, 0, 0 }; + unsigned int i; + + for (i = 0; i < 3; i++) { + u8 ss; + + /* + * Only consider slices where one, and only one, subslice has 7 + * EUs + */ + if (!is_power_of_2(RUNTIME_INFO(i915)->sseu.subslice_7eu[i])) + continue; + + /* + * subslice_7eu[i] != 0 (because of the check above) and + * ss_max == 4 (maximum number of subslices possible per slice) + * + * -> 0 <= ss <= 3; + */ + ss = ffs(RUNTIME_INFO(i915)->sseu.subslice_7eu[i]) - 1; + vals[i] = 3 - ss; + } + + if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0) + return; + + /* Tune IZ hashing. See intel_device_info_runtime_init() */ + WA_SET_FIELD_MASKED(GEN7_GT_MODE, + GEN9_IZ_HASHING_MASK(2) | + GEN9_IZ_HASHING_MASK(1) | + GEN9_IZ_HASHING_MASK(0), + GEN9_IZ_HASHING(2, vals[2]) | + GEN9_IZ_HASHING(1, vals[1]) | + GEN9_IZ_HASHING(0, vals[0])); +} + +static void skl_ctx_workarounds_init(struct intel_engine_cs *engine) +{ + gen9_ctx_workarounds_init(engine); + skl_tune_iz_hashing(engine); +} + +static void bxt_ctx_workarounds_init(struct intel_engine_cs *engine) +{ + struct i915_wa_list *wal = &engine->ctx_wa_list; + + gen9_ctx_workarounds_init(engine); + + /* WaDisableThreadStallDopClockGating:bxt */ + WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, + STALL_DOP_GATING_DISABLE); + + /* WaToEnableHwFixForPushConstHWBug:bxt */ + WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, + GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); +} + +static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine) +{ + struct drm_i915_private *i915 = engine->i915; + struct i915_wa_list *wal = &engine->ctx_wa_list; + + gen9_ctx_workarounds_init(engine); + + /* WaToEnableHwFixForPushConstHWBug:kbl */ + if (IS_KBL_REVID(i915, KBL_REVID_C0, REVID_FOREVER)) + WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, + GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); + + /* WaDisableSbeCacheDispatchPortSharing:kbl */ + WA_SET_BIT_MASKED(GEN7_HALF_SLICE_CHICKEN1, + GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); +} + +static void glk_ctx_workarounds_init(struct intel_engine_cs *engine) +{ + struct i915_wa_list *wal = &engine->ctx_wa_list; + + gen9_ctx_workarounds_init(engine); + + /* WaToEnableHwFixForPushConstHWBug:glk */ + WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, + GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); +} + +static void cfl_ctx_workarounds_init(struct intel_engine_cs *engine) +{ + struct i915_wa_list *wal = &engine->ctx_wa_list; + + gen9_ctx_workarounds_init(engine); + + /* WaToEnableHwFixForPushConstHWBug:cfl */ + WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, + GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); + + /* WaDisableSbeCacheDispatchPortSharing:cfl */ + WA_SET_BIT_MASKED(GEN7_HALF_SLICE_CHICKEN1, + GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); +} + +static void cnl_ctx_workarounds_init(struct intel_engine_cs *engine) +{ + struct drm_i915_private *i915 = engine->i915; + struct i915_wa_list *wal = &engine->ctx_wa_list; + + /* WaForceContextSaveRestoreNonCoherent:cnl */ + WA_SET_BIT_MASKED(CNL_HDC_CHICKEN0, + HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT); + + /* WaThrottleEUPerfToAvoidTDBackPressure:cnl(pre-prod) */ + if (IS_CNL_REVID(i915, CNL_REVID_B0, CNL_REVID_B0)) + WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, THROTTLE_12_5); + + /* WaDisableReplayBufferBankArbitrationOptimization:cnl */ + WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, + GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); + + /* WaDisableEnhancedSBEVertexCaching:cnl (pre-prod) */ + if (IS_CNL_REVID(i915, 0, CNL_REVID_B0)) + WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, + GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE); + + /* WaPushConstantDereferenceHoldDisable:cnl */ + WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2, PUSH_CONSTANT_DEREF_DISABLE); + + /* FtrEnableFastAnisoL1BankingFix:cnl */ + WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, CNL_FAST_ANISO_L1_BANKING_FIX); + + /* WaDisable3DMidCmdPreemption:cnl */ + WA_CLR_BIT_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL); + + /* WaDisableGPGPUMidCmdPreemption:cnl */ + WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1, + GEN9_PREEMPT_GPGPU_LEVEL_MASK, + GEN9_PREEMPT_GPGPU_COMMAND_LEVEL); + + /* WaDisableEarlyEOT:cnl */ + WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, DISABLE_EARLY_EOT); +} + +static void icl_ctx_workarounds_init(struct intel_engine_cs *engine) +{ + struct drm_i915_private *i915 = engine->i915; + struct i915_wa_list *wal = &engine->ctx_wa_list; + + /* Wa_1604370585:icl (pre-prod) + * Formerly known as WaPushConstantDereferenceHoldDisable + */ + if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0)) + WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2, + PUSH_CONSTANT_DEREF_DISABLE); + + /* WaForceEnableNonCoherent:icl + * This is not the same workaround as in early Gen9 platforms, where + * lacking this could cause system hangs, but coherency performance + * overhead is high and only a few compute workloads really need it + * (the register is whitelisted in hardware now, so UMDs can opt in + * for coherency if they have a good reason). + */ + WA_SET_BIT_MASKED(ICL_HDC_MODE, HDC_FORCE_NON_COHERENT); + + /* Wa_2006611047:icl (pre-prod) + * Formerly known as WaDisableImprovedTdlClkGating + */ + if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0)) + WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2, + GEN11_TDL_CLOCK_GATING_FIX_DISABLE); + + /* WaEnableStateCacheRedirectToCS:icl */ + WA_SET_BIT_MASKED(GEN9_SLICE_COMMON_ECO_CHICKEN1, + GEN11_STATE_CACHE_REDIRECT_TO_CS); + + /* Wa_2006665173:icl (pre-prod) */ + if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0)) + WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3, + GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC); + + /* WaEnableFloatBlendOptimization:icl */ + wa_write_masked_or(wal, + GEN10_CACHE_MODE_SS, + 0, /* write-only, so skip validation */ + _MASKED_BIT_ENABLE(FLOAT_BLEND_OPTIMIZATION_ENABLE)); + + /* WaDisableGPGPUMidThreadPreemption:icl */ + WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1, + GEN9_PREEMPT_GPGPU_LEVEL_MASK, + GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL); +} + +void intel_engine_init_ctx_wa(struct intel_engine_cs *engine) +{ + struct drm_i915_private *i915 = engine->i915; + struct i915_wa_list *wal = &engine->ctx_wa_list; + + wa_init_start(wal, "context"); + + if (IS_GEN(i915, 11)) + icl_ctx_workarounds_init(engine); + else if (IS_CANNONLAKE(i915)) + cnl_ctx_workarounds_init(engine); + else if (IS_COFFEELAKE(i915)) + cfl_ctx_workarounds_init(engine); + else if (IS_GEMINILAKE(i915)) + glk_ctx_workarounds_init(engine); + else if (IS_KABYLAKE(i915)) + kbl_ctx_workarounds_init(engine); + else if (IS_BROXTON(i915)) + bxt_ctx_workarounds_init(engine); + else if (IS_SKYLAKE(i915)) + skl_ctx_workarounds_init(engine); + else if (IS_CHERRYVIEW(i915)) + chv_ctx_workarounds_init(engine); + else if (IS_BROADWELL(i915)) + bdw_ctx_workarounds_init(engine); + else if (INTEL_GEN(i915) < 8) + return; + else + MISSING_CASE(INTEL_GEN(i915)); + + wa_init_finish(wal); +} + +int intel_engine_emit_ctx_wa(struct i915_request *rq) +{ + struct i915_wa_list *wal = &rq->engine->ctx_wa_list; + struct i915_wa *wa; + unsigned int i; + u32 *cs; + int ret; + + if (wal->count == 0) + return 0; + + ret = rq->engine->emit_flush(rq, EMIT_BARRIER); + if (ret) + return ret; + + cs = intel_ring_begin(rq, (wal->count * 2 + 2)); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + *cs++ = MI_LOAD_REGISTER_IMM(wal->count); + for (i = 0, wa = wal->list; i < wal->count; i++, wa++) { + *cs++ = i915_mmio_reg_offset(wa->reg); + *cs++ = wa->val; + } + *cs++ = MI_NOOP; + + intel_ring_advance(rq, cs); + + ret = rq->engine->emit_flush(rq, EMIT_BARRIER); + if (ret) + return ret; + + return 0; +} + +static void +gen9_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) +{ + /* WaDisableKillLogic:bxt,skl,kbl */ + if (!IS_COFFEELAKE(i915)) + wa_write_or(wal, + GAM_ECOCHK, + ECOCHK_DIS_TLB); + + if (HAS_LLC(i915)) { + /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl + * + * Must match Display Engine. See + * WaCompressedResourceDisplayNewHashMode. + */ + wa_write_or(wal, + MMCD_MISC_CTRL, + MMCD_PCLA | MMCD_HOTSPOT_EN); + } + + /* WaDisableHDCInvalidation:skl,bxt,kbl,cfl */ + wa_write_or(wal, + GAM_ECOCHK, + BDW_DISABLE_HDC_INVALIDATION); +} + +static void +skl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) +{ + gen9_gt_workarounds_init(i915, wal); + + /* WaDisableGafsUnitClkGating:skl */ + wa_write_or(wal, + GEN7_UCGCTL4, + GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); + + /* WaInPlaceDecompressionHang:skl */ + if (IS_SKL_REVID(i915, SKL_REVID_H0, REVID_FOREVER)) + wa_write_or(wal, + GEN9_GAMT_ECO_REG_RW_IA, + GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); +} + +static void +bxt_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) +{ + gen9_gt_workarounds_init(i915, wal); + + /* WaInPlaceDecompressionHang:bxt */ + wa_write_or(wal, + GEN9_GAMT_ECO_REG_RW_IA, + GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); +} + +static void +kbl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) +{ + gen9_gt_workarounds_init(i915, wal); + + /* WaDisableDynamicCreditSharing:kbl */ + if (IS_KBL_REVID(i915, 0, KBL_REVID_B0)) + wa_write_or(wal, + GAMT_CHKN_BIT_REG, + GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING); + + /* WaDisableGafsUnitClkGating:kbl */ + wa_write_or(wal, + GEN7_UCGCTL4, + GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); + + /* WaInPlaceDecompressionHang:kbl */ + wa_write_or(wal, + GEN9_GAMT_ECO_REG_RW_IA, + GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); +} + +static void +glk_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) +{ + gen9_gt_workarounds_init(i915, wal); +} + +static void +cfl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) +{ + gen9_gt_workarounds_init(i915, wal); + + /* WaDisableGafsUnitClkGating:cfl */ + wa_write_or(wal, + GEN7_UCGCTL4, + GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); + + /* WaInPlaceDecompressionHang:cfl */ + wa_write_or(wal, + GEN9_GAMT_ECO_REG_RW_IA, + GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); +} + +static void +wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal) +{ + const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu; + u32 mcr_slice_subslice_mask; + + /* + * WaProgramMgsrForL3BankSpecificMmioReads: cnl,icl + * L3Banks could be fused off in single slice scenario. If that is + * the case, we might need to program MCR select to a valid L3Bank + * by default, to make sure we correctly read certain registers + * later on (in the range 0xB100 - 0xB3FF). + * This might be incompatible with + * WaProgramMgsrForCorrectSliceSpecificMmioReads. + * Fortunately, this should not happen in production hardware, so + * we only assert that this is the case (instead of implementing + * something more complex that requires checking the range of every + * MMIO read). + */ + if (INTEL_GEN(i915) >= 10 && + is_power_of_2(sseu->slice_mask)) { + /* + * read FUSE3 for enabled L3 Bank IDs, if L3 Bank matches + * enabled subslice, no need to redirect MCR packet + */ + u32 slice = fls(sseu->slice_mask); + u32 fuse3 = + intel_uncore_read(&i915->uncore, GEN10_MIRROR_FUSE3); + u8 ss_mask = sseu->subslice_mask[slice]; + + u8 enabled_mask = (ss_mask | ss_mask >> + GEN10_L3BANK_PAIR_COUNT) & GEN10_L3BANK_MASK; + u8 disabled_mask = fuse3 & GEN10_L3BANK_MASK; + + /* + * Production silicon should have matched L3Bank and + * subslice enabled + */ + WARN_ON((enabled_mask & disabled_mask) != enabled_mask); + } + + if (INTEL_GEN(i915) >= 11) + mcr_slice_subslice_mask = GEN11_MCR_SLICE_MASK | + GEN11_MCR_SUBSLICE_MASK; + else + mcr_slice_subslice_mask = GEN8_MCR_SLICE_MASK | + GEN8_MCR_SUBSLICE_MASK; + /* + * WaProgramMgsrForCorrectSliceSpecificMmioReads:cnl,icl + * Before any MMIO read into slice/subslice specific registers, MCR + * packet control register needs to be programmed to point to any + * enabled s/ss pair. Otherwise, incorrect values will be returned. + * This means each subsequent MMIO read will be forwarded to an + * specific s/ss combination, but this is OK since these registers + * are consistent across s/ss in almost all cases. In the rare + * occasions, such as INSTDONE, where this value is dependent + * on s/ss combo, the read should be done with read_subslice_reg. + */ + wa_write_masked_or(wal, + GEN8_MCR_SELECTOR, + mcr_slice_subslice_mask, + intel_calculate_mcr_s_ss_select(i915)); +} + +static void +cnl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) +{ + wa_init_mcr(i915, wal); + + /* WaDisableI2mCycleOnWRPort:cnl (pre-prod) */ + if (IS_CNL_REVID(i915, CNL_REVID_B0, CNL_REVID_B0)) + wa_write_or(wal, + GAMT_CHKN_BIT_REG, + GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT); + + /* WaInPlaceDecompressionHang:cnl */ + wa_write_or(wal, + GEN9_GAMT_ECO_REG_RW_IA, + GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); +} + +static void +icl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) +{ + wa_init_mcr(i915, wal); + + /* WaInPlaceDecompressionHang:icl */ + wa_write_or(wal, + GEN9_GAMT_ECO_REG_RW_IA, + GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); + + /* WaModifyGamTlbPartitioning:icl */ + wa_write_masked_or(wal, + GEN11_GACB_PERF_CTRL, + GEN11_HASH_CTRL_MASK, + GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4); + + /* Wa_1405766107:icl + * Formerly known as WaCL2SFHalfMaxAlloc + */ + wa_write_or(wal, + GEN11_LSN_UNSLCVC, + GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC | + GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC); + + /* Wa_220166154:icl + * Formerly known as WaDisCtxReload + */ + wa_write_or(wal, + GEN8_GAMW_ECO_DEV_RW_IA, + GAMW_ECO_DEV_CTX_RELOAD_DISABLE); + + /* Wa_1405779004:icl (pre-prod) */ + if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0)) + wa_write_or(wal, + SLICE_UNIT_LEVEL_CLKGATE, + MSCUNIT_CLKGATE_DIS); + + /* Wa_1406680159:icl */ + wa_write_or(wal, + SUBSLICE_UNIT_LEVEL_CLKGATE, + GWUNIT_CLKGATE_DIS); + + /* Wa_1406838659:icl (pre-prod) */ + if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0)) + wa_write_or(wal, + INF_UNIT_LEVEL_CLKGATE, + CGPSF_CLKGATE_DIS); + + /* Wa_1406463099:icl + * Formerly known as WaGamTlbPendError + */ + wa_write_or(wal, + GAMT_CHKN_BIT_REG, + GAMT_CHKN_DISABLE_L3_COH_PIPE); +} + +static void +gt_init_workarounds(struct drm_i915_private *i915, struct i915_wa_list *wal) +{ + if (IS_GEN(i915, 11)) + icl_gt_workarounds_init(i915, wal); + else if (IS_CANNONLAKE(i915)) + cnl_gt_workarounds_init(i915, wal); + else if (IS_COFFEELAKE(i915)) + cfl_gt_workarounds_init(i915, wal); + else if (IS_GEMINILAKE(i915)) + glk_gt_workarounds_init(i915, wal); + else if (IS_KABYLAKE(i915)) + kbl_gt_workarounds_init(i915, wal); + else if (IS_BROXTON(i915)) + bxt_gt_workarounds_init(i915, wal); + else if (IS_SKYLAKE(i915)) + skl_gt_workarounds_init(i915, wal); + else if (INTEL_GEN(i915) <= 8) + return; + else + MISSING_CASE(INTEL_GEN(i915)); +} + +void intel_gt_init_workarounds(struct drm_i915_private *i915) +{ + struct i915_wa_list *wal = &i915->gt_wa_list; + + wa_init_start(wal, "GT"); + gt_init_workarounds(i915, wal); + wa_init_finish(wal); +} + +static enum forcewake_domains +wal_get_fw_for_rmw(struct intel_uncore *uncore, const struct i915_wa_list *wal) +{ + enum forcewake_domains fw = 0; + struct i915_wa *wa; + unsigned int i; + + for (i = 0, wa = wal->list; i < wal->count; i++, wa++) + fw |= intel_uncore_forcewake_for_reg(uncore, + wa->reg, + FW_REG_READ | + FW_REG_WRITE); + + return fw; +} + +static bool +wa_verify(const struct i915_wa *wa, u32 cur, const char *name, const char *from) +{ + if ((cur ^ wa->val) & wa->read) { + DRM_ERROR("%s workaround lost on %s! (%x=%x/%x, expected %x, mask=%x)\n", + name, from, i915_mmio_reg_offset(wa->reg), + cur, cur & wa->read, + wa->val, wa->mask); + + return false; + } + + return true; +} + +static void +wa_list_apply(struct intel_uncore *uncore, const struct i915_wa_list *wal) +{ + enum forcewake_domains fw; + unsigned long flags; + struct i915_wa *wa; + unsigned int i; + + if (!wal->count) + return; + + fw = wal_get_fw_for_rmw(uncore, wal); + + spin_lock_irqsave(&uncore->lock, flags); + intel_uncore_forcewake_get__locked(uncore, fw); + + for (i = 0, wa = wal->list; i < wal->count; i++, wa++) { + intel_uncore_rmw_fw(uncore, wa->reg, wa->mask, wa->val); + if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) + wa_verify(wa, + intel_uncore_read_fw(uncore, wa->reg), + wal->name, "application"); + } + + intel_uncore_forcewake_put__locked(uncore, fw); + spin_unlock_irqrestore(&uncore->lock, flags); +} + +void intel_gt_apply_workarounds(struct drm_i915_private *i915) +{ + wa_list_apply(&i915->uncore, &i915->gt_wa_list); +} + +static bool wa_list_verify(struct intel_uncore *uncore, + const struct i915_wa_list *wal, + const char *from) +{ + struct i915_wa *wa; + unsigned int i; + bool ok = true; + + for (i = 0, wa = wal->list; i < wal->count; i++, wa++) + ok &= wa_verify(wa, + intel_uncore_read(uncore, wa->reg), + wal->name, from); + + return ok; +} + +bool intel_gt_verify_workarounds(struct drm_i915_private *i915, + const char *from) +{ + return wa_list_verify(&i915->uncore, &i915->gt_wa_list, from); +} + +static void +whitelist_reg(struct i915_wa_list *wal, i915_reg_t reg) +{ + struct i915_wa wa = { + .reg = reg + }; + + if (GEM_DEBUG_WARN_ON(wal->count >= RING_MAX_NONPRIV_SLOTS)) + return; + + _wa_add(wal, &wa); +} + +static void gen9_whitelist_build(struct i915_wa_list *w) +{ + /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */ + whitelist_reg(w, GEN9_CTX_PREEMPT_REG); + + /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,[cnl] */ + whitelist_reg(w, GEN8_CS_CHICKEN1); + + /* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl,glk,cfl */ + whitelist_reg(w, GEN8_HDC_CHICKEN1); +} + +static void skl_whitelist_build(struct i915_wa_list *w) +{ + gen9_whitelist_build(w); + + /* WaDisableLSQCROPERFforOCL:skl */ + whitelist_reg(w, GEN8_L3SQCREG4); +} + +static void bxt_whitelist_build(struct i915_wa_list *w) +{ + gen9_whitelist_build(w); +} + +static void kbl_whitelist_build(struct i915_wa_list *w) +{ + gen9_whitelist_build(w); + + /* WaDisableLSQCROPERFforOCL:kbl */ + whitelist_reg(w, GEN8_L3SQCREG4); +} + +static void glk_whitelist_build(struct i915_wa_list *w) +{ + gen9_whitelist_build(w); + + /* WA #0862: Userspace has to set "Barrier Mode" to avoid hangs. */ + whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1); +} + +static void cfl_whitelist_build(struct i915_wa_list *w) +{ + gen9_whitelist_build(w); +} + +static void cnl_whitelist_build(struct i915_wa_list *w) +{ + /* WaEnablePreemptionGranularityControlByUMD:cnl */ + whitelist_reg(w, GEN8_CS_CHICKEN1); +} + +static void icl_whitelist_build(struct i915_wa_list *w) +{ + /* WaAllowUMDToModifyHalfSliceChicken7:icl */ + whitelist_reg(w, GEN9_HALF_SLICE_CHICKEN7); + + /* WaAllowUMDToModifySamplerMode:icl */ + whitelist_reg(w, GEN10_SAMPLER_MODE); +} + +void intel_engine_init_whitelist(struct intel_engine_cs *engine) +{ + struct drm_i915_private *i915 = engine->i915; + struct i915_wa_list *w = &engine->whitelist; + + GEM_BUG_ON(engine->id != RCS0); + + wa_init_start(w, "whitelist"); + + if (IS_GEN(i915, 11)) + icl_whitelist_build(w); + else if (IS_CANNONLAKE(i915)) + cnl_whitelist_build(w); + else if (IS_COFFEELAKE(i915)) + cfl_whitelist_build(w); + else if (IS_GEMINILAKE(i915)) + glk_whitelist_build(w); + else if (IS_KABYLAKE(i915)) + kbl_whitelist_build(w); + else if (IS_BROXTON(i915)) + bxt_whitelist_build(w); + else if (IS_SKYLAKE(i915)) + skl_whitelist_build(w); + else if (INTEL_GEN(i915) <= 8) + return; + else + MISSING_CASE(INTEL_GEN(i915)); + + wa_init_finish(w); +} + +void intel_engine_apply_whitelist(struct intel_engine_cs *engine) +{ + const struct i915_wa_list *wal = &engine->whitelist; + struct intel_uncore *uncore = engine->uncore; + const u32 base = engine->mmio_base; + struct i915_wa *wa; + unsigned int i; + + if (!wal->count) + return; + + for (i = 0, wa = wal->list; i < wal->count; i++, wa++) + intel_uncore_write(uncore, + RING_FORCE_TO_NONPRIV(base, i), + i915_mmio_reg_offset(wa->reg)); + + /* And clear the rest just in case of garbage */ + for (; i < RING_MAX_NONPRIV_SLOTS; i++) + intel_uncore_write(uncore, + RING_FORCE_TO_NONPRIV(base, i), + i915_mmio_reg_offset(RING_NOPID(base))); +} + +static void +rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) +{ + struct drm_i915_private *i915 = engine->i915; + + if (IS_GEN(i915, 11)) { + /* This is not an Wa. Enable for better image quality */ + wa_masked_en(wal, + _3D_CHICKEN3, + _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE); + + /* WaPipelineFlushCoherentLines:icl */ + ignore_wa_write_or(wal, + GEN8_L3SQCREG4, + GEN8_LQSC_FLUSH_COHERENT_LINES, + GEN8_LQSC_FLUSH_COHERENT_LINES); + + /* + * Wa_1405543622:icl + * Formerly known as WaGAPZPriorityScheme + */ + wa_write_or(wal, + GEN8_GARBCNTL, + GEN11_ARBITRATION_PRIO_ORDER_MASK); + + /* + * Wa_1604223664:icl + * Formerly known as WaL3BankAddressHashing + */ + wa_write_masked_or(wal, + GEN8_GARBCNTL, + GEN11_HASH_CTRL_EXCL_MASK, + GEN11_HASH_CTRL_EXCL_BIT0); + wa_write_masked_or(wal, + GEN11_GLBLINVL, + GEN11_BANK_HASH_ADDR_EXCL_MASK, + GEN11_BANK_HASH_ADDR_EXCL_BIT0); + + /* + * Wa_1405733216:icl + * Formerly known as WaDisableCleanEvicts + */ + ignore_wa_write_or(wal, + GEN8_L3SQCREG4, + GEN11_LQSC_CLEAN_EVICT_DISABLE, + GEN11_LQSC_CLEAN_EVICT_DISABLE); + + /* WaForwardProgressSoftReset:icl */ + wa_write_or(wal, + GEN10_SCRATCH_LNCF2, + PMFLUSHDONE_LNICRSDROP | + PMFLUSH_GAPL3UNBLOCK | + PMFLUSHDONE_LNEBLK); + + /* Wa_1406609255:icl (pre-prod) */ + if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0)) + wa_write_or(wal, + GEN7_SARCHKMD, + GEN7_DISABLE_DEMAND_PREFETCH | + GEN7_DISABLE_SAMPLER_PREFETCH); + } + + if (IS_GEN_RANGE(i915, 9, 11)) { + /* FtrPerCtxtPreemptionGranularityControl:skl,bxt,kbl,cfl,cnl,icl */ + wa_masked_en(wal, + GEN7_FF_SLICE_CS_CHICKEN1, + GEN9_FFSC_PERCTX_PREEMPT_CTRL); + } + + if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) || IS_COFFEELAKE(i915)) { + /* WaEnableGapsTsvCreditFix:skl,kbl,cfl */ + wa_write_or(wal, + GEN8_GARBCNTL, + GEN9_GAPS_TSV_CREDIT_DISABLE); + } + + if (IS_BROXTON(i915)) { + /* WaDisablePooledEuLoadBalancingFix:bxt */ + wa_masked_en(wal, + FF_SLICE_CS_CHICKEN2, + GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE); + } + + if (IS_GEN(i915, 9)) { + /* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */ + wa_masked_en(wal, + GEN9_CSFE_CHICKEN1_RCS, + GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE); + + /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */ + wa_write_or(wal, + BDW_SCRATCH1, + GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE); + + /* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */ + if (IS_GEN9_LP(i915)) + wa_write_masked_or(wal, + GEN8_L3SQCREG1, + L3_PRIO_CREDITS_MASK, + L3_GENERAL_PRIO_CREDITS(62) | + L3_HIGH_PRIO_CREDITS(2)); + + /* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */ + wa_write_or(wal, + GEN8_L3SQCREG4, + GEN8_LQSC_FLUSH_COHERENT_LINES); + } +} + +static void +xcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) +{ + struct drm_i915_private *i915 = engine->i915; + + /* WaKBLVECSSemaphoreWaitPoll:kbl */ + if (IS_KBL_REVID(i915, KBL_REVID_A0, KBL_REVID_E0)) { + wa_write(wal, + RING_SEMA_WAIT_POLL(engine->mmio_base), + 1); + } +} + +static void +engine_init_workarounds(struct intel_engine_cs *engine, struct i915_wa_list *wal) +{ + if (I915_SELFTEST_ONLY(INTEL_GEN(engine->i915) < 8)) + return; + + if (engine->id == RCS0) + rcs_engine_wa_init(engine, wal); + else + xcs_engine_wa_init(engine, wal); +} + +void intel_engine_init_workarounds(struct intel_engine_cs *engine) +{ + struct i915_wa_list *wal = &engine->wa_list; + + if (GEM_WARN_ON(INTEL_GEN(engine->i915) < 8)) + return; + + wa_init_start(wal, engine->name); + engine_init_workarounds(engine, wal); + wa_init_finish(wal); +} + +void intel_engine_apply_workarounds(struct intel_engine_cs *engine) +{ + wa_list_apply(engine->uncore, &engine->wa_list); +} + +static struct i915_vma * +create_scratch(struct i915_address_space *vm, int count) +{ + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + unsigned int size; + int err; + + size = round_up(count * sizeof(u32), PAGE_SIZE); + obj = i915_gem_object_create_internal(vm->i915, size); + if (IS_ERR(obj)) + return ERR_CAST(obj); + + i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC); + + vma = i915_vma_instance(obj, vm, NULL); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto err_obj; + } + + err = i915_vma_pin(vma, 0, 0, + i915_vma_is_ggtt(vma) ? PIN_GLOBAL : PIN_USER); + if (err) + goto err_obj; + + return vma; + +err_obj: + i915_gem_object_put(obj); + return ERR_PTR(err); +} + +static int +wa_list_srm(struct i915_request *rq, + const struct i915_wa_list *wal, + struct i915_vma *vma) +{ + const struct i915_wa *wa; + unsigned int i; + u32 srm, *cs; + + srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; + if (INTEL_GEN(rq->i915) >= 8) + srm++; + + cs = intel_ring_begin(rq, 4 * wal->count); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + for (i = 0, wa = wal->list; i < wal->count; i++, wa++) { + *cs++ = srm; + *cs++ = i915_mmio_reg_offset(wa->reg); + *cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i; + *cs++ = 0; + } + intel_ring_advance(rq, cs); + + return 0; +} + +static int engine_wa_list_verify(struct intel_engine_cs *engine, + const struct i915_wa_list * const wal, + const char *from) +{ + const struct i915_wa *wa; + struct i915_request *rq; + struct i915_vma *vma; + unsigned int i; + u32 *results; + int err; + + if (!wal->count) + return 0; + + vma = create_scratch(&engine->i915->ggtt.vm, wal->count); + if (IS_ERR(vma)) + return PTR_ERR(vma); + + rq = i915_request_alloc(engine, engine->kernel_context->gem_context); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto err_vma; + } + + err = wa_list_srm(rq, wal, vma); + if (err) + goto err_vma; + + i915_request_add(rq); + if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) { + err = -ETIME; + goto err_vma; + } + + results = i915_gem_object_pin_map(vma->obj, I915_MAP_WB); + if (IS_ERR(results)) { + err = PTR_ERR(results); + goto err_vma; + } + + err = 0; + for (i = 0, wa = wal->list; i < wal->count; i++, wa++) + if (!wa_verify(wa, results[i], wal->name, from)) + err = -ENXIO; + + i915_gem_object_unpin_map(vma->obj); + +err_vma: + i915_vma_unpin(vma); + i915_vma_put(vma); + return err; +} + +int intel_engine_verify_workarounds(struct intel_engine_cs *engine, + const char *from) +{ + return engine_wa_list_verify(engine, &engine->wa_list, from); +} + +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) +#include "selftest_workarounds.c" +#endif -- cgit From 2ccdf6a1c3f7ff51d721ee7a5bed96e03da77205 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 24 Apr 2019 21:07:16 +0100 Subject: drm/i915: Pass intel_context to i915_request_create() Start acquiring the logical intel_context and using that as our primary means for request allocation. This is the initial step to allow us to avoid requiring struct_mutex for request allocation along the perma-pinned kernel context, but it also provides a foundation for breaking up the complex request allocation to handle different scenarios inside execbuf. For the purpose of emitting a request from inside retirement (see the next patch for engine power management), we also need to lift control over the timeline mutex to the caller. v2: Note that the request carries the active reference upon construction. Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190424200717.1686-4-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_workarounds.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/i915/gt/intel_workarounds.c') diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index f46ed0e2f07c..364696221fd7 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -1356,7 +1356,7 @@ static int engine_wa_list_verify(struct intel_engine_cs *engine, if (IS_ERR(vma)) return PTR_ERR(vma); - rq = i915_request_alloc(engine, engine->kernel_context->gem_context); + rq = i915_request_create(engine->kernel_context); if (IS_ERR(rq)) { err = PTR_ERR(rq); goto err_vma; -- cgit From 11334c6aad9500a9d3b9b48dd0a4bb6406eb88fb Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 26 Apr 2019 17:33:33 +0100 Subject: drm/i915: Split engine setup/init into two phases In the next patch, we require the engine vfuncs setup prior to initialising the pinned kernel contexts, so split the vfunc setup from the engine initialisation and call it earlier. v2: s/setup_xcs/setup_common/ for intel_ring_submission_setup() Signed-off-by: Chris Wilson Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190426163336.15906-6-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_workarounds.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/i915/gt/intel_workarounds.c') diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index 364696221fd7..5751446a4b0b 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -1077,7 +1077,8 @@ void intel_engine_init_whitelist(struct intel_engine_cs *engine) struct drm_i915_private *i915 = engine->i915; struct i915_wa_list *w = &engine->whitelist; - GEM_BUG_ON(engine->id != RCS0); + if (engine->class != RENDER_CLASS) + return; wa_init_start(w, "whitelist"); -- cgit From 0fc2273b9ab7f07cdef448e99525e481535e1ab0 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Thu, 18 Apr 2019 11:06:34 +0100 Subject: drm/i915/icl: Whitelist GEN9_SLICE_COMMON_ECO_CHICKEN1 WaEnableStateCacheRedirectToCS context workaround configures the L3 cache to benefit 3d workloads but media has different requirements. Remove the workaround and whitelist the register to allow any userspace configure the behaviour to their liking. v2: * Remove the workaround apart from adding the whitelist. Signed-off-by: Tvrtko Ursulin Cc: Lionel Landwerlin Cc: kevin.ma@intel.com Cc: xiaogang.li@intel.com Acked-by: Lionel Landwerlin Acked-by: Anuj Phogat Link: https://patchwork.freedesktop.org/patch/msgid/20190418100634.984-1-tvrtko.ursulin@linux.intel.com Fixes: f63c7b4880aa ("drm/i915/icl: WaEnableStateCacheRedirectToCS") Reviewed-by: Joonas Lahtinen [tursulin: Anuj reported no GPU hangs or performance regressions with old Mesa on patched kernel.] --- drivers/gpu/drm/i915/gt/intel_workarounds.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/i915/gt/intel_workarounds.c') diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index 5751446a4b0b..43e290306551 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -556,10 +556,6 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine) WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2, GEN11_TDL_CLOCK_GATING_FIX_DISABLE); - /* WaEnableStateCacheRedirectToCS:icl */ - WA_SET_BIT_MASKED(GEN9_SLICE_COMMON_ECO_CHICKEN1, - GEN11_STATE_CACHE_REDIRECT_TO_CS); - /* Wa_2006665173:icl (pre-prod) */ if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0)) WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3, @@ -1070,6 +1066,9 @@ static void icl_whitelist_build(struct i915_wa_list *w) /* WaAllowUMDToModifySamplerMode:icl */ whitelist_reg(w, GEN10_SAMPLER_MODE); + + /* WaEnableStateCacheRedirectToCS:icl */ + whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1); } void intel_engine_init_whitelist(struct intel_engine_cs *engine) -- cgit From fde938867b92607d1108fa42cf7fbfcbab8457c8 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Mon, 20 May 2019 15:25:46 +0100 Subject: drm/i915/selftests: Verify context workarounds Test context workarounds have been correctly applied in newly created contexts. To accomplish this the existing engine_wa_list_verify helper is extended to take in a context from which reading of the workaround list will be done. Context workaround verification is done from the existing subtests, which have been renamed to reflect they are no longer only about GT and engine workarounds. v2: * Test after resets and refactor to use intel_context more. (Chris) v3: * Use ce->engine->i915 instead of ce->gem_context->i915. (Chris) * gem_engine_iter.idx is engine->id + 1. (Chris) v4: * Make local function static. Signed-off-by: Tvrtko Ursulin Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190520142546.12493-1-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/gt/intel_workarounds.c | 112 +++++++++++++++------------- 1 file changed, 60 insertions(+), 52 deletions(-) (limited to 'drivers/gpu/drm/i915/gt/intel_workarounds.c') diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index 43e290306551..d692b83583fa 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -196,10 +196,9 @@ ignore_wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 mask, u32 val) #define WA_SET_FIELD_MASKED(addr, mask, value) \ wa_write_masked_or(wal, (addr), (mask), _MASKED_FIELD((mask), (value))) -static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine) +static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine, + struct i915_wa_list *wal) { - struct i915_wa_list *wal = &engine->ctx_wa_list; - WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING); /* WaDisableAsyncFlipPerfMode:bdw,chv */ @@ -245,12 +244,12 @@ static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine) GEN6_WIZ_HASHING_16x4); } -static void bdw_ctx_workarounds_init(struct intel_engine_cs *engine) +static void bdw_ctx_workarounds_init(struct intel_engine_cs *engine, + struct i915_wa_list *wal) { struct drm_i915_private *i915 = engine->i915; - struct i915_wa_list *wal = &engine->ctx_wa_list; - gen8_ctx_workarounds_init(engine); + gen8_ctx_workarounds_init(engine, wal); /* WaDisableThreadStallDopClockGating:bdw (pre-production) */ WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE); @@ -273,11 +272,10 @@ static void bdw_ctx_workarounds_init(struct intel_engine_cs *engine) (IS_BDW_GT3(i915) ? HDC_FENCE_DEST_SLM_DISABLE : 0)); } -static void chv_ctx_workarounds_init(struct intel_engine_cs *engine) +static void chv_ctx_workarounds_init(struct intel_engine_cs *engine, + struct i915_wa_list *wal) { - struct i915_wa_list *wal = &engine->ctx_wa_list; - - gen8_ctx_workarounds_init(engine); + gen8_ctx_workarounds_init(engine, wal); /* WaDisableThreadStallDopClockGating:chv */ WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE); @@ -286,10 +284,10 @@ static void chv_ctx_workarounds_init(struct intel_engine_cs *engine) WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X); } -static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine) +static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine, + struct i915_wa_list *wal) { struct drm_i915_private *i915 = engine->i915; - struct i915_wa_list *wal = &engine->ctx_wa_list; if (HAS_LLC(i915)) { /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl @@ -384,10 +382,10 @@ static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine) WA_SET_BIT_MASKED(GEN9_WM_CHICKEN3, GEN9_FACTOR_IN_CLR_VAL_HIZ); } -static void skl_tune_iz_hashing(struct intel_engine_cs *engine) +static void skl_tune_iz_hashing(struct intel_engine_cs *engine, + struct i915_wa_list *wal) { struct drm_i915_private *i915 = engine->i915; - struct i915_wa_list *wal = &engine->ctx_wa_list; u8 vals[3] = { 0, 0, 0 }; unsigned int i; @@ -424,17 +422,17 @@ static void skl_tune_iz_hashing(struct intel_engine_cs *engine) GEN9_IZ_HASHING(0, vals[0])); } -static void skl_ctx_workarounds_init(struct intel_engine_cs *engine) +static void skl_ctx_workarounds_init(struct intel_engine_cs *engine, + struct i915_wa_list *wal) { - gen9_ctx_workarounds_init(engine); - skl_tune_iz_hashing(engine); + gen9_ctx_workarounds_init(engine, wal); + skl_tune_iz_hashing(engine, wal); } -static void bxt_ctx_workarounds_init(struct intel_engine_cs *engine) +static void bxt_ctx_workarounds_init(struct intel_engine_cs *engine, + struct i915_wa_list *wal) { - struct i915_wa_list *wal = &engine->ctx_wa_list; - - gen9_ctx_workarounds_init(engine); + gen9_ctx_workarounds_init(engine, wal); /* WaDisableThreadStallDopClockGating:bxt */ WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, @@ -445,12 +443,12 @@ static void bxt_ctx_workarounds_init(struct intel_engine_cs *engine) GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); } -static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine) +static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine, + struct i915_wa_list *wal) { struct drm_i915_private *i915 = engine->i915; - struct i915_wa_list *wal = &engine->ctx_wa_list; - gen9_ctx_workarounds_init(engine); + gen9_ctx_workarounds_init(engine, wal); /* WaToEnableHwFixForPushConstHWBug:kbl */ if (IS_KBL_REVID(i915, KBL_REVID_C0, REVID_FOREVER)) @@ -462,22 +460,20 @@ static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine) GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); } -static void glk_ctx_workarounds_init(struct intel_engine_cs *engine) +static void glk_ctx_workarounds_init(struct intel_engine_cs *engine, + struct i915_wa_list *wal) { - struct i915_wa_list *wal = &engine->ctx_wa_list; - - gen9_ctx_workarounds_init(engine); + gen9_ctx_workarounds_init(engine, wal); /* WaToEnableHwFixForPushConstHWBug:glk */ WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); } -static void cfl_ctx_workarounds_init(struct intel_engine_cs *engine) +static void cfl_ctx_workarounds_init(struct intel_engine_cs *engine, + struct i915_wa_list *wal) { - struct i915_wa_list *wal = &engine->ctx_wa_list; - - gen9_ctx_workarounds_init(engine); + gen9_ctx_workarounds_init(engine, wal); /* WaToEnableHwFixForPushConstHWBug:cfl */ WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, @@ -488,10 +484,10 @@ static void cfl_ctx_workarounds_init(struct intel_engine_cs *engine) GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); } -static void cnl_ctx_workarounds_init(struct intel_engine_cs *engine) +static void cnl_ctx_workarounds_init(struct intel_engine_cs *engine, + struct i915_wa_list *wal) { struct drm_i915_private *i915 = engine->i915; - struct i915_wa_list *wal = &engine->ctx_wa_list; /* WaForceContextSaveRestoreNonCoherent:cnl */ WA_SET_BIT_MASKED(CNL_HDC_CHICKEN0, @@ -528,10 +524,10 @@ static void cnl_ctx_workarounds_init(struct intel_engine_cs *engine) WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, DISABLE_EARLY_EOT); } -static void icl_ctx_workarounds_init(struct intel_engine_cs *engine) +static void icl_ctx_workarounds_init(struct intel_engine_cs *engine, + struct i915_wa_list *wal) { struct drm_i915_private *i915 = engine->i915; - struct i915_wa_list *wal = &engine->ctx_wa_list; /* Wa_1604370585:icl (pre-prod) * Formerly known as WaPushConstantDereferenceHoldDisable @@ -573,31 +569,36 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine) GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL); } -void intel_engine_init_ctx_wa(struct intel_engine_cs *engine) +static void +__intel_engine_init_ctx_wa(struct intel_engine_cs *engine, + struct i915_wa_list *wal, + const char *name) { struct drm_i915_private *i915 = engine->i915; - struct i915_wa_list *wal = &engine->ctx_wa_list; - wa_init_start(wal, "context"); + if (engine->class != RENDER_CLASS) + return; + + wa_init_start(wal, name); if (IS_GEN(i915, 11)) - icl_ctx_workarounds_init(engine); + icl_ctx_workarounds_init(engine, wal); else if (IS_CANNONLAKE(i915)) - cnl_ctx_workarounds_init(engine); + cnl_ctx_workarounds_init(engine, wal); else if (IS_COFFEELAKE(i915)) - cfl_ctx_workarounds_init(engine); + cfl_ctx_workarounds_init(engine, wal); else if (IS_GEMINILAKE(i915)) - glk_ctx_workarounds_init(engine); + glk_ctx_workarounds_init(engine, wal); else if (IS_KABYLAKE(i915)) - kbl_ctx_workarounds_init(engine); + kbl_ctx_workarounds_init(engine, wal); else if (IS_BROXTON(i915)) - bxt_ctx_workarounds_init(engine); + bxt_ctx_workarounds_init(engine, wal); else if (IS_SKYLAKE(i915)) - skl_ctx_workarounds_init(engine); + skl_ctx_workarounds_init(engine, wal); else if (IS_CHERRYVIEW(i915)) - chv_ctx_workarounds_init(engine); + chv_ctx_workarounds_init(engine, wal); else if (IS_BROADWELL(i915)) - bdw_ctx_workarounds_init(engine); + bdw_ctx_workarounds_init(engine, wal); else if (INTEL_GEN(i915) < 8) return; else @@ -606,6 +607,11 @@ void intel_engine_init_ctx_wa(struct intel_engine_cs *engine) wa_init_finish(wal); } +void intel_engine_init_ctx_wa(struct intel_engine_cs *engine) +{ + __intel_engine_init_ctx_wa(engine, &engine->ctx_wa_list, "context"); +} + int intel_engine_emit_ctx_wa(struct i915_request *rq) { struct i915_wa_list *wal = &rq->engine->ctx_wa_list; @@ -1338,7 +1344,7 @@ wa_list_srm(struct i915_request *rq, return 0; } -static int engine_wa_list_verify(struct intel_engine_cs *engine, +static int engine_wa_list_verify(struct intel_context *ce, const struct i915_wa_list * const wal, const char *from) { @@ -1352,11 +1358,11 @@ static int engine_wa_list_verify(struct intel_engine_cs *engine, if (!wal->count) return 0; - vma = create_scratch(&engine->i915->ggtt.vm, wal->count); + vma = create_scratch(&ce->engine->i915->ggtt.vm, wal->count); if (IS_ERR(vma)) return PTR_ERR(vma); - rq = i915_request_create(engine->kernel_context); + rq = intel_context_create_request(ce); if (IS_ERR(rq)) { err = PTR_ERR(rq); goto err_vma; @@ -1394,7 +1400,9 @@ err_vma: int intel_engine_verify_workarounds(struct intel_engine_cs *engine, const char *from) { - return engine_wa_list_verify(engine, &engine->wa_list, from); + return engine_wa_list_verify(engine->kernel_context, + &engine->wa_list, + from); } #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) -- cgit From cbe3e1d103793705204b29c6952faed537c41fe1 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Mon, 20 May 2019 12:04:42 +0100 Subject: drm/i915/icl: Add WaDisableBankHangMode Disable GPU hang by default on unrecoverable ECC cache errors. v2: * Rebase. v3: * Use intel_uncore_read. (Chris) Fixes: cc38cae7c4e9 ("drm/i915/icl: Introduce initial Icelake Workarounds") Signed-off-by: Tvrtko Ursulin Acked-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190520110442.403-2-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/gt/intel_workarounds.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers/gpu/drm/i915/gt/intel_workarounds.c') diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index d692b83583fa..990b278a36d1 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -529,6 +529,12 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine, { struct drm_i915_private *i915 = engine->i915; + /* WaDisableBankHangMode:icl */ + wa_write(wal, + GEN8_L3CNTLREG, + intel_uncore_read(engine->uncore, GEN8_L3CNTLREG) | + GEN8_ERRDETBCTRL); + /* Wa_1604370585:icl (pre-prod) * Formerly known as WaPushConstantDereferenceHoldDisable */ -- cgit From 397049a03022702defa65694c23643f96d5fa113 Mon Sep 17 00:00:00 2001 From: Dongwon Kim Date: Thu, 25 Apr 2019 06:50:05 +0100 Subject: drm/i915/gen11: enable support for headerless msgs Setting bit5 (headerless msg for preemptible GPGPU context) of SAMPLER_MODE register to enable support for the headless msgs on gen11. None of existing use cases will be affected by this as this change makes both types of message - headerless and w/ header supported at the same time. It also complies with the new recommendation for the default bit value for the next gen. v2: rewrote commit message to include more information v3: setting the bit in icl_ctx_workarounds_init() Signed-off-by: Dongwon Kim Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190425055005.21790-1-chris@chris-wilson.co.uk Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/gt/intel_workarounds.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/gpu/drm/i915/gt/intel_workarounds.c') diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index 990b278a36d1..ce4bcca3f83c 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -573,6 +573,10 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine, WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_GPGPU_LEVEL_MASK, GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL); + + /* allow headerless messages for preemptible GPGPU context */ + WA_SET_BIT_MASKED(GEN10_SAMPLER_MODE, + GEN11_SAMPLER_ENABLE_HEADLESS_MSG); } static void -- cgit From 10be98a77c558f8cfb823cd2777171fbb35040f6 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 28 May 2019 10:29:49 +0100 Subject: drm/i915: Move more GEM objects under gem/ Continuing the theme of separating out the GEM clutter. Signed-off-by: Chris Wilson Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190528092956.14910-8-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_workarounds.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm/i915/gt/intel_workarounds.c') diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index ce4bcca3f83c..133d069244f4 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -5,6 +5,7 @@ */ #include "i915_drv.h" +#include "intel_context.h" #include "intel_workarounds.h" /** -- cgit From 1ac159e23c2c033f1fcbf7d60286b90335a4e9b2 Mon Sep 17 00:00:00 2001 From: Stuart Summers Date: Fri, 24 May 2019 08:40:22 -0700 Subject: drm/i915: Expand subslice mask Currently, the subslice_mask runtime parameter is stored as an array of subslices per slice. Expand the subslice mask array to better match what is presented to userspace through the I915_QUERY_TOPOLOGY_INFO ioctl. The index into this array is then calculated: slice * subslice stride + subslice index / 8 v2: fix spacing in set_sseu_info args use set_sseu_info to initialize sseu data when building device status in debugfs rename variables in intel_engine_types.h to avoid checkpatch warnings v3: update headers in intel_sseu.h v4: add const to some sseu_dev_info variables use sseu->eu_stride for EU stride calculations v5: address review comments from Tvrtko and Daniele v6: remove extra space in intel_sseu_get_subslices return the correct subslice enable in for_each_instdone add GEM_BUG_ON to ensure user doesn't pass invalid ss_mask size use printk formatted string for subslice mask v7: remove string.h header and rebase Cc: Daniele Ceraolo Spurio Cc: Lionel Landwerlin Acked-by: Lionel Landwerlin Reviewed-by: Daniele Ceraolo Spurio Signed-off-by: Stuart Summers Signed-off-by: Manasi Navare Link: https://patchwork.freedesktop.org/patch/msgid/20190524154022.13575-6-stuart.summers@intel.com --- drivers/gpu/drm/i915/gt/intel_workarounds.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/i915/gt/intel_workarounds.c') diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index 133d069244f4..fbc853085809 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -784,7 +784,7 @@ wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal) u32 slice = fls(sseu->slice_mask); u32 fuse3 = intel_uncore_read(&i915->uncore, GEN10_MIRROR_FUSE3); - u8 ss_mask = sseu->subslice_mask[slice]; + u32 ss_mask = intel_sseu_get_subslices(sseu, slice); u8 enabled_mask = (ss_mask | ss_mask >> GEN10_L3BANK_PAIR_COUNT) & GEN10_L3BANK_MASK; -- cgit From a10f361d176ce53c72d5b1b2e2913a1a222ee393 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Wed, 29 May 2019 11:21:50 +0300 Subject: Revert "drm/i915: Expand subslice mask" This reverts commit 1ac159e23c2c ("drm/i915: Expand subslice mask"), which kills ICL due to GEM_BUG_ON() sanity checks before CI even gets a chance to do anything. The commit exposes an issue in commit 1e40d4aea57b ("drm/i915/cnl: Implement WaProgramMgsrForCorrectSliceSpecificMmioReads"), which will also need to be addressed. There's a proposed fix [1], but considering the seeming uncertainty with the fix as well as the size of the regressing commit (in this context, the one that actually brings down ICL), this warrants a revert to get ICL working, and gives us time to get all of this right without rushing. Even if this means shooting the messenger. <3>[ 9.426327] intel_sseu_get_subslices:46 GEM_BUG_ON(slice >= sseu->max_slices) <4>[ 9.426355] ------------[ cut here ]------------ <2>[ 9.426357] kernel BUG at drivers/gpu/drm/i915/gt/intel_sseu.c:46! <4>[ 9.426371] invalid opcode: 0000 [#1] PREEMPT SMP NOPTI <4>[ 9.426377] CPU: 1 PID: 364 Comm: systemd-udevd Not tainted 5.2.0-rc2-CI-CI_DRM_6159+ #1 <4>[ 9.426385] Hardware name: Intel Corporation Ice Lake Client Platform/IceLake U DDR4 SODIMM PD RVP TLC, BIOS ICLSFWR1.R00.3183.A00.1905020411 05/02/2019 <4>[ 9.426444] RIP: 0010:intel_sseu_get_subslices+0x8a/0xe0 [i915] <4>[ 9.426452] Code: d5 76 b7 e0 48 8b 35 9d 24 21 00 49 c7 c0 07 f0 72 a0 b9 2e 00 00 00 48 c7 c2 00 8e 6d a0 48 c7 c7 a5 14 5b a0 e8 36 3c be e0 <0f> 0b 48 c7 c1 80 d5 6f a0 ba 30 00 00 00 48 c7 c6 00 8e 6d a0 48 <4>[ 9.426468] RSP: 0018:ffffc9000037b9c8 EFLAGS: 00010282 <4>[ 9.426475] RAX: 000000000000000f RBX: 0000000000000000 RCX: 0000000000000000 <4>[ 9.426482] RDX: 0000000000000001 RSI: 0000000000000008 RDI: ffff88849e346f98 <4>[ 9.426490] RBP: ffff88848a200000 R08: 0000000000000004 R09: ffff88849d50b000 <4>[ 9.426497] R10: 0000000000000000 R11: ffff88849e346f98 R12: ffff88848a209e78 <4>[ 9.426505] R13: 0000000003000000 R14: ffff88848a20b1a8 R15: 0000000000000000 <4>[ 9.426513] FS: 00007f73d5ae8680(0000) GS:ffff88849fc80000(0000) knlGS:0000000000000000 <4>[ 9.426521] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 <4>[ 9.426527] CR2: 0000561417b01260 CR3: 0000000494764003 CR4: 0000000000760ee0 <4>[ 9.426535] PKRU: 55555554 <4>[ 9.426538] Call Trace: <4>[ 9.426585] wa_init_mcr+0xd5/0x110 [i915] <4>[ 9.426597] ? lock_acquire+0xa6/0x1c0 <4>[ 9.426645] icl_gt_workarounds_init+0x21/0x1a0 [i915] <4>[ 9.426694] ? i915_driver_load+0xfcf/0x18a0 [i915] <4>[ 9.426739] gt_init_workarounds+0x14c/0x230 [i915] <4>[ 9.426748] ? _raw_spin_unlock_irq+0x24/0x50 <4>[ 9.426789] intel_gt_init_workarounds+0x1b/0x30 [i915] <4>[ 9.426835] i915_driver_load+0xfd7/0x18a0 [i915] <4>[ 9.426843] ? lock_acquire+0xa6/0x1c0 <4>[ 9.426850] ? __pm_runtime_resume+0x4f/0x80 <4>[ 9.426857] ? _raw_spin_unlock_irqrestore+0x4c/0x60 <4>[ 9.426863] ? _raw_spin_unlock_irqrestore+0x4c/0x60 <4>[ 9.426870] ? lockdep_hardirqs_on+0xe3/0x1b0 <4>[ 9.426915] i915_pci_probe+0x29/0xa0 [i915] <4>[ 9.426923] pci_device_probe+0x9e/0x120 <4>[ 9.426930] really_probe+0xea/0x3c0 <4>[ 9.426936] driver_probe_device+0x10b/0x120 <4>[ 9.426942] device_driver_attach+0x4a/0x50 <4>[ 9.426948] __driver_attach+0x97/0x130 <4>[ 9.426954] ? device_driver_attach+0x50/0x50 <4>[ 9.426960] bus_for_each_dev+0x74/0xc0 <4>[ 9.426966] bus_add_driver+0x13f/0x210 <4>[ 9.426971] ? 0xffffffffa083b000 <4>[ 9.426976] driver_register+0x56/0xe0 <4>[ 9.426982] ? 0xffffffffa083b000 <4>[ 9.426987] do_one_initcall+0x58/0x300 <4>[ 9.426994] ? do_init_module+0x1d/0x1f6 <4>[ 9.427001] ? rcu_read_lock_sched_held+0x6f/0x80 <4>[ 9.427007] ? kmem_cache_alloc_trace+0x261/0x290 <4>[ 9.427014] do_init_module+0x56/0x1f6 <4>[ 9.427020] load_module+0x24d1/0x2990 <4>[ 9.427032] ? __se_sys_finit_module+0xd3/0xf0 <4>[ 9.427037] __se_sys_finit_module+0xd3/0xf0 <4>[ 9.427047] do_syscall_64+0x55/0x1c0 <4>[ 9.427053] entry_SYSCALL_64_after_hwframe+0x49/0xbe <4>[ 9.427059] RIP: 0033:0x7f73d5609839 <4>[ 9.427064] Code: 00 f3 c3 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 40 00 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d 1f f6 2c 00 f7 d8 64 89 01 48 <4>[ 9.427082] RSP: 002b:00007ffdf34477b8 EFLAGS: 00000246 ORIG_RAX: 0000000000000139 <4>[ 9.427091] RAX: ffffffffffffffda RBX: 00005559fd5d7b40 RCX: 00007f73d5609839 <4>[ 9.427099] RDX: 0000000000000000 RSI: 00007f73d52e8145 RDI: 000000000000000f <4>[ 9.427106] RBP: 00007f73d52e8145 R08: 0000000000000000 R09: 00007ffdf34478d0 <4>[ 9.427114] R10: 000000000000000f R11: 0000000000000246 R12: 0000000000000000 <4>[ 9.427121] R13: 00005559fd5c90f0 R14: 0000000000020000 R15: 00005559fd5d7b40 <4>[ 9.427131] Modules linked in: i915(+) mei_hdcp x86_pkg_temp_thermal coretemp snd_hda_intel crct10dif_pclmul crc32_pclmul snd_hda_codec snd_hwdep e1000e snd_hda_core ghash_clmulni_intel ptp snd_pcm cdc_ether usbnet mii pps_core mei_me mei prime_numbers btusb btrtl btbcm btintel bluetooth ecdh_generic ecc <4>[ 9.427254] ---[ end trace af3eeb543bd66e66 ]--- [1] http://patchwork.freedesktop.org/patch/msgid/20190528200655.11605-1-chris@chris-wilson.co.uk References: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6159/fi-icl-u2/pstore0-1517155098_Oops_1.log References: 1e40d4aea57b ("drm/i915/cnl: Implement WaProgramMgsrForCorrectSliceSpecificMmioReads") Fixes: 1ac159e23c2c ("drm/i915: Expand subslice mask") Cc: Chris Wilson Cc: Daniele Ceraolo Spurio Cc: Joonas Lahtinen Cc: Lionel Landwerlin Cc: Manasi Navare Cc: Michel Thierry Cc: Mika Kuoppala Cc: Oscar Mateo Cc: Stuart Summers Cc: Tvrtko Ursulin Cc: Yunwei Zhang Acked-by: Daniel Vetter Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190529082150.31526-1-jani.nikula@intel.com --- drivers/gpu/drm/i915/gt/intel_workarounds.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/i915/gt/intel_workarounds.c') diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index fbc853085809..133d069244f4 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -784,7 +784,7 @@ wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal) u32 slice = fls(sseu->slice_mask); u32 fuse3 = intel_uncore_read(&i915->uncore, GEN10_MIRROR_FUSE3); - u32 ss_mask = intel_sseu_get_subslices(sseu, slice); + u8 ss_mask = sseu->subslice_mask[slice]; u8 enabled_mask = (ss_mask | ss_mask >> GEN10_L3BANK_PAIR_COUNT) & GEN10_L3BANK_MASK; -- cgit From 5380d0b781c491d94b4f4690ecf9762c1946c4ec Mon Sep 17 00:00:00 2001 From: John Harrison Date: Mon, 17 Jun 2019 18:01:05 -0700 Subject: drm/i915: Support flags in whitlist WAs Newer hardware adds flags to the whitelist work-around register. These allow per access direction privileges and ranges. Signed-off-by: John Harrison Signed-off-by: Robert M. Fosha Cc: Tvrtko Ursulin Cc: Chris Wilson Reviewed-by: Tvrtko Ursulin Reviewed-by: Tvrtko Ursulin Signed-off-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190618010108.27499-2-John.C.Harrison@Intel.com --- drivers/gpu/drm/i915/gt/intel_workarounds.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/i915/gt/intel_workarounds.c') diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index 133d069244f4..92ca700789c5 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -1012,7 +1012,7 @@ bool intel_gt_verify_workarounds(struct drm_i915_private *i915, } static void -whitelist_reg(struct i915_wa_list *wal, i915_reg_t reg) +whitelist_reg_ext(struct i915_wa_list *wal, i915_reg_t reg, u32 flags) { struct i915_wa wa = { .reg = reg @@ -1021,9 +1021,16 @@ whitelist_reg(struct i915_wa_list *wal, i915_reg_t reg) if (GEM_DEBUG_WARN_ON(wal->count >= RING_MAX_NONPRIV_SLOTS)) return; + wa.reg.reg |= flags; _wa_add(wal, &wa); } +static void +whitelist_reg(struct i915_wa_list *wal, i915_reg_t reg) +{ + whitelist_reg_ext(wal, reg, RING_FORCE_TO_NONPRIV_RW); +} + static void gen9_whitelist_build(struct i915_wa_list *w) { /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */ -- cgit From ebd2de47a19f1c17ae47f8331aae3cd436766663 Mon Sep 17 00:00:00 2001 From: John Harrison Date: Mon, 17 Jun 2019 18:01:06 -0700 Subject: drm/i915: Support whitelist workarounds on all engines Newer hardware requires setting up whitelists on engines other than render. So, extend the whitelist code to support all engines. Signed-off-by: John Harrison Signed-off-by: Robert M. Fosha Cc: Tvrtko Ursulin Cc: Chris Wilson Reviewed-by: Tvrtko Ursulin Signed-off-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190618010108.27499-3-John.C.Harrison@Intel.com --- drivers/gpu/drm/i915/gt/intel_workarounds.c | 66 ++++++++++++++++++++--------- 1 file changed, 47 insertions(+), 19 deletions(-) (limited to 'drivers/gpu/drm/i915/gt/intel_workarounds.c') diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index 92ca700789c5..7fb35d88c2fc 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -1043,48 +1043,79 @@ static void gen9_whitelist_build(struct i915_wa_list *w) whitelist_reg(w, GEN8_HDC_CHICKEN1); } -static void skl_whitelist_build(struct i915_wa_list *w) +static void skl_whitelist_build(struct intel_engine_cs *engine) { + struct i915_wa_list *w = &engine->whitelist; + + if (engine->class != RENDER_CLASS) + return; + gen9_whitelist_build(w); /* WaDisableLSQCROPERFforOCL:skl */ whitelist_reg(w, GEN8_L3SQCREG4); } -static void bxt_whitelist_build(struct i915_wa_list *w) +static void bxt_whitelist_build(struct intel_engine_cs *engine) { - gen9_whitelist_build(w); + if (engine->class != RENDER_CLASS) + return; + + gen9_whitelist_build(&engine->whitelist); } -static void kbl_whitelist_build(struct i915_wa_list *w) +static void kbl_whitelist_build(struct intel_engine_cs *engine) { + struct i915_wa_list *w = &engine->whitelist; + + if (engine->class != RENDER_CLASS) + return; + gen9_whitelist_build(w); /* WaDisableLSQCROPERFforOCL:kbl */ whitelist_reg(w, GEN8_L3SQCREG4); } -static void glk_whitelist_build(struct i915_wa_list *w) +static void glk_whitelist_build(struct intel_engine_cs *engine) { + struct i915_wa_list *w = &engine->whitelist; + + if (engine->class != RENDER_CLASS) + return; + gen9_whitelist_build(w); /* WA #0862: Userspace has to set "Barrier Mode" to avoid hangs. */ whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1); } -static void cfl_whitelist_build(struct i915_wa_list *w) +static void cfl_whitelist_build(struct intel_engine_cs *engine) { - gen9_whitelist_build(w); + if (engine->class != RENDER_CLASS) + return; + + gen9_whitelist_build(&engine->whitelist); } -static void cnl_whitelist_build(struct i915_wa_list *w) +static void cnl_whitelist_build(struct intel_engine_cs *engine) { + struct i915_wa_list *w = &engine->whitelist; + + if (engine->class != RENDER_CLASS) + return; + /* WaEnablePreemptionGranularityControlByUMD:cnl */ whitelist_reg(w, GEN8_CS_CHICKEN1); } -static void icl_whitelist_build(struct i915_wa_list *w) +static void icl_whitelist_build(struct intel_engine_cs *engine) { + struct i915_wa_list *w = &engine->whitelist; + + if (engine->class != RENDER_CLASS) + return; + /* WaAllowUMDToModifyHalfSliceChicken7:icl */ whitelist_reg(w, GEN9_HALF_SLICE_CHICKEN7); @@ -1100,25 +1131,22 @@ void intel_engine_init_whitelist(struct intel_engine_cs *engine) struct drm_i915_private *i915 = engine->i915; struct i915_wa_list *w = &engine->whitelist; - if (engine->class != RENDER_CLASS) - return; - wa_init_start(w, "whitelist"); if (IS_GEN(i915, 11)) - icl_whitelist_build(w); + icl_whitelist_build(engine); else if (IS_CANNONLAKE(i915)) - cnl_whitelist_build(w); + cnl_whitelist_build(engine); else if (IS_COFFEELAKE(i915)) - cfl_whitelist_build(w); + cfl_whitelist_build(engine); else if (IS_GEMINILAKE(i915)) - glk_whitelist_build(w); + glk_whitelist_build(engine); else if (IS_KABYLAKE(i915)) - kbl_whitelist_build(w); + kbl_whitelist_build(engine); else if (IS_BROXTON(i915)) - bxt_whitelist_build(w); + bxt_whitelist_build(engine); else if (IS_SKYLAKE(i915)) - skl_whitelist_build(w); + skl_whitelist_build(engine); else if (INTEL_GEN(i915) <= 8) return; else -- cgit From 7b3d406310983a89ed7a1ecdd115efbe12b0ded5 Mon Sep 17 00:00:00 2001 From: John Harrison Date: Mon, 17 Jun 2019 18:01:07 -0700 Subject: drm/i915: Add whitelist workarounds for ICL Updated whitelist table for ICL. v2: Reduce changes to just those required for media driver until the selftest can be updated to support the new features of the other entries. Signed-off-by: John Harrison Signed-off-by: Robert M. Fosha Cc: Tvrtko Ursulin Cc: Chris Wilson Reviewed-by: Tvrtko Ursulin Signed-off-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190618010108.27499-4-John.C.Harrison@Intel.com --- drivers/gpu/drm/i915/gt/intel_workarounds.c | 38 ++++++++++++++++++++--------- 1 file changed, 27 insertions(+), 11 deletions(-) (limited to 'drivers/gpu/drm/i915/gt/intel_workarounds.c') diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index 7fb35d88c2fc..a4e221c87ded 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -1113,17 +1113,33 @@ static void icl_whitelist_build(struct intel_engine_cs *engine) { struct i915_wa_list *w = &engine->whitelist; - if (engine->class != RENDER_CLASS) - return; - - /* WaAllowUMDToModifyHalfSliceChicken7:icl */ - whitelist_reg(w, GEN9_HALF_SLICE_CHICKEN7); - - /* WaAllowUMDToModifySamplerMode:icl */ - whitelist_reg(w, GEN10_SAMPLER_MODE); - - /* WaEnableStateCacheRedirectToCS:icl */ - whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1); + switch (engine->class) { + case RENDER_CLASS: + /* WaAllowUMDToModifyHalfSliceChicken7:icl */ + whitelist_reg(w, GEN9_HALF_SLICE_CHICKEN7); + + /* WaAllowUMDToModifySamplerMode:icl */ + whitelist_reg(w, GEN10_SAMPLER_MODE); + + /* WaEnableStateCacheRedirectToCS:icl */ + whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1); + break; + + case VIDEO_DECODE_CLASS: + /* hucStatusRegOffset */ + whitelist_reg_ext(w, _MMIO(0x2000 + engine->mmio_base), + RING_FORCE_TO_NONPRIV_RD); + /* hucUKernelHdrInfoRegOffset */ + whitelist_reg_ext(w, _MMIO(0x2014 + engine->mmio_base), + RING_FORCE_TO_NONPRIV_RD); + /* hucStatus2RegOffset */ + whitelist_reg_ext(w, _MMIO(0x23B0 + engine->mmio_base), + RING_FORCE_TO_NONPRIV_RD); + break; + + default: + break; + } } void intel_engine_init_whitelist(struct intel_engine_cs *engine) -- cgit From 2f5309452dc044a133c36c6e75170eb5f7450088 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 18 Jun 2019 08:41:30 +0100 Subject: drm/i915: Stop passing I915_WAIT_LOCKED to i915_request_wait() Since commit eb8d0f5af4ec ("drm/i915: Remove GPU reset dependence on struct_mutex"), the I915_WAIT_LOCKED flags passed to i915_request_wait() has been defunct. Now go ahead and remove it from all callers. References: eb8d0f5af4ec ("drm/i915: Remove GPU reset dependence on struct_mutex") Signed-off-by: Chris Wilson Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190618074153.16055-3-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_workarounds.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/i915/gt/intel_workarounds.c') diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index a4e221c87ded..686df92b4866 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -1435,7 +1435,7 @@ static int engine_wa_list_verify(struct intel_context *ce, goto err_vma; i915_request_add(rq); - if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) { + if (i915_request_wait(rq, 0, HZ / 5) < 0) { err = -ETIME; goto err_vma; } -- cgit