diff options
391 files changed, 18444 insertions, 15450 deletions
diff --git a/arch/arm/mach-u300/core.c b/arch/arm/mach-u300/core.c index a79fa3b0c8ed..a1694d977ec9 100644 --- a/arch/arm/mach-u300/core.c +++ b/arch/arm/mach-u300/core.c @@ -201,7 +201,7 @@ static unsigned long pin_highz_conf[] = { }; /* Pin control settings */ -static struct pinctrl_map __initdata u300_pinmux_map[] = { +static const struct pinctrl_map u300_pinmux_map[] = { /* anonymous maps for chip power and EMIFs */ PIN_MAP_MUX_GROUP_HOG_DEFAULT("pinctrl-u300", NULL, "power"), PIN_MAP_MUX_GROUP_HOG_DEFAULT("pinctrl-u300", NULL, "emif0"), diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c index 895b73f23079..6d4a29e99ae2 100644 --- a/drivers/gpu/drm/drm_client_modeset.c +++ b/drivers/gpu/drm/drm_client_modeset.c @@ -115,6 +115,33 @@ drm_client_find_modeset(struct drm_client_dev *client, struct drm_crtc *crtc) } static struct drm_display_mode * +drm_connector_get_tiled_mode(struct drm_connector *connector) +{ + struct drm_display_mode *mode; + + list_for_each_entry(mode, &connector->modes, head) { + if (mode->hdisplay == connector->tile_h_size && + mode->vdisplay == connector->tile_v_size) + return mode; + } + return NULL; +} + +static struct drm_display_mode * +drm_connector_fallback_non_tiled_mode(struct drm_connector *connector) +{ + struct drm_display_mode *mode; + + list_for_each_entry(mode, &connector->modes, head) { + if (mode->hdisplay == connector->tile_h_size && + mode->vdisplay == connector->tile_v_size) + continue; + return mode; + } + return NULL; +} + +static struct drm_display_mode * drm_connector_has_preferred_mode(struct drm_connector *connector, int width, int height) { struct drm_display_mode *mode; @@ -348,8 +375,15 @@ static bool drm_client_target_preferred(struct drm_connector **connectors, struct drm_connector *connector; u64 conn_configured = 0; int tile_pass = 0; + int num_tiled_conns = 0; int i; + for (i = 0; i < connector_count; i++) { + if (connectors[i]->has_tile && + connectors[i]->status == connector_status_connected) + num_tiled_conns++; + } + retry: for (i = 0; i < connector_count; i++) { connector = connectors[i]; @@ -399,6 +433,28 @@ retry: list_for_each_entry(modes[i], &connector->modes, head) break; } + /* + * In case of tiled mode if all tiles not present fallback to + * first available non tiled mode. + * After all tiles are present, try to find the tiled mode + * for all and if tiled mode not present due to fbcon size + * limitations, use first non tiled mode only for + * tile 0,0 and set to no mode for all other tiles. + */ + if (connector->has_tile) { + if (num_tiled_conns < + connector->num_h_tile * connector->num_v_tile || + (connector->tile_h_loc == 0 && + connector->tile_v_loc == 0 && + !drm_connector_get_tiled_mode(connector))) { + DRM_DEBUG_KMS("Falling back to non tiled mode on Connector %d\n", + connector->base.id); + modes[i] = drm_connector_fallback_non_tiled_mode(connector); + } else { + modes[i] = drm_connector_get_tiled_mode(connector); + } + } + DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name : "none"); conn_configured |= BIT_ULL(i); @@ -515,6 +571,7 @@ static bool drm_client_firmware_config(struct drm_client_dev *client, bool fallback = true, ret = true; int num_connectors_enabled = 0; int num_connectors_detected = 0; + int num_tiled_conns = 0; struct drm_modeset_acquire_ctx ctx; if (!drm_drv_uses_atomic_modeset(dev)) @@ -532,6 +589,11 @@ static bool drm_client_firmware_config(struct drm_client_dev *client, memcpy(save_enabled, enabled, count); mask = GENMASK(count - 1, 0); conn_configured = 0; + for (i = 0; i < count; i++) { + if (connectors[i]->has_tile && + connectors[i]->status == connector_status_connected) + num_tiled_conns++; + } retry: conn_seq = conn_configured; for (i = 0; i < count; i++) { @@ -631,6 +693,16 @@ retry: connector->name); modes[i] = &connector->state->crtc->mode; } + /* + * In case of tiled modes, if all tiles are not present + * then fallback to a non tiled mode. + */ + if (connector->has_tile && + num_tiled_conns < connector->num_h_tile * connector->num_v_tile) { + DRM_DEBUG_KMS("Falling back to non tiled mode on Connector %d\n", + connector->base.id); + modes[i] = drm_connector_fallback_non_tiled_mode(connector); + } crtcs[i] = new_crtc; DRM_DEBUG_KMS("connector %s on [CRTC:%d:%s]: %dx%d%s\n", diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index f8e905192608..57f510687b85 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -1561,7 +1561,9 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper, for (j = 0; j < mode_set->num_connectors; j++) { struct drm_connector *connector = mode_set->connectors[j]; - if (connector->has_tile) { + if (connector->has_tile && + desired_mode->hdisplay == connector->tile_h_size && + desired_mode->vdisplay == connector->tile_v_size) { lasth = (connector->tile_h_loc == (connector->num_h_tile - 1)); lastv = (connector->tile_v_loc == (connector->num_v_tile - 1)); /* cloning to multiple tiles is just crazy-talk, so: */ diff --git a/drivers/gpu/drm/i915/.gitignore b/drivers/gpu/drm/i915/.gitignore new file mode 100644 index 000000000000..d9a77f3b59b2 --- /dev/null +++ b/drivers/gpu/drm/i915/.gitignore @@ -0,0 +1 @@ +*.hdrtest diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index b0c53661f62b..b8c5f8934dbd 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -31,9 +31,6 @@ CFLAGS_display/intel_fbdev.o = $(call cc-disable-warning, override-init) subdir-ccflags-y += \ $(call as-instr,movntdqa (%eax)$(comma)%xmm0,-DCONFIG_AS_MOVNTDQA) -# Extra header tests -header-test-pattern-$(CONFIG_DRM_I915_WERROR) := *.h - subdir-ccflags-y += -I$(srctree)/$(src) # Please keep these build lists sorted! @@ -73,11 +70,12 @@ i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o display/intel_pipe_crc.o i915-$(CONFIG_PERF_EVENTS) += i915_pmu.o # "Graphics Technology" (aka we talk to the gpu) -obj-y += gt/ gt-y += \ gt/debugfs_engines.o \ gt/debugfs_gt.o \ gt/debugfs_gt_pm.o \ + gt/gen6_ppgtt.o \ + gt/gen8_ppgtt.o \ gt/intel_breadcrumbs.o \ gt/intel_context.o \ gt/intel_engine_cs.o \ @@ -85,14 +83,17 @@ gt-y += \ gt/intel_engine_pm.o \ gt/intel_engine_pool.o \ gt/intel_engine_user.o \ + gt/intel_ggtt.o \ gt/intel_gt.o \ gt/intel_gt_irq.o \ gt/intel_gt_pm.o \ gt/intel_gt_pm_irq.o \ gt/intel_gt_requests.o \ + gt/intel_gtt.o \ gt/intel_llc.o \ gt/intel_lrc.o \ gt/intel_mocs.o \ + gt/intel_ppgtt.o \ gt/intel_rc6.o \ gt/intel_renderstate.o \ gt/intel_reset.o \ @@ -111,7 +112,6 @@ gt-y += \ i915-y += $(gt-y) # GEM (Graphics Execution Management) code -obj-y += gem/ gem-y += \ gem/i915_gem_busy.o \ gem/i915_gem_clflush.o \ @@ -157,7 +157,6 @@ i915-y += \ intel_wopcm.o # general-purpose microcontroller (GuC) support -obj-y += gt/uc/ i915-y += gt/uc/intel_uc.o \ gt/uc/intel_uc_fw.o \ gt/uc/intel_guc.o \ @@ -170,7 +169,6 @@ i915-y += gt/uc/intel_uc.o \ gt/uc/intel_huc_fw.o # modesetting core code -obj-y += display/ i915-y += \ display/intel_atomic.o \ display/intel_atomic_plane.o \ @@ -235,7 +233,6 @@ i915-y += \ display/vlv_dsi_pll.o # perf code -obj-y += oa/ i915-y += \ oa/i915_oa_hsw.o \ oa/i915_oa_bdw.o \ @@ -260,6 +257,7 @@ i915-$(CONFIG_DRM_I915_SELFTEST) += \ gem/selftests/igt_gem_utils.o \ selftests/i915_random.o \ selftests/i915_selftest.o \ + selftests/igt_atomic.o \ selftests/igt_flush_test.o \ selftests/igt_live_test.o \ selftests/igt_mmap.o \ @@ -276,3 +274,27 @@ endif obj-$(CONFIG_DRM_I915) += i915.o obj-$(CONFIG_DRM_I915_GVT_KVMGT) += gvt/kvmgt.o + +# header test + +# exclude some broken headers from the test coverage +no-header-test := \ + display/intel_vbt_defs.h \ + gvt/execlist.h \ + gvt/fb_decoder.h \ + gvt/gtt.h \ + gvt/gvt.h \ + gvt/interrupt.h \ + gvt/mmio_context.h \ + gvt/mpt.h \ + gvt/scheduler.h + +extra-$(CONFIG_DRM_I915_WERROR) += \ + $(patsubst %.h,%.hdrtest, $(filter-out $(no-header-test), \ + $(shell cd $(srctree)/$(src) && find * -name '*.h'))) + +quiet_cmd_hdrtest = HDRTEST $(patsubst %.hdrtest,%.h,$@) + cmd_hdrtest = $(CC) $(c_flags) -S -o /dev/null -x c /dev/null -include $<; touch $@ + +$(obj)/%.hdrtest: $(src)/%.h FORCE + $(call if_changed_dep,hdrtest) diff --git a/drivers/gpu/drm/i915/display/Makefile b/drivers/gpu/drm/i915/display/Makefile deleted file mode 100644 index 173c305d7866..000000000000 --- a/drivers/gpu/drm/i915/display/Makefile +++ /dev/null @@ -1,6 +0,0 @@ -# For building individual subdir files on the command line -subdir-ccflags-y += -I$(srctree)/$(src)/.. - -# Extra header tests -header-test-pattern-$(CONFIG_DRM_I915_WERROR) := *.h -header-test- := intel_vbt_defs.h diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c index 006b1a297e6f..f8e882101396 100644 --- a/drivers/gpu/drm/i915/display/icl_dsi.c +++ b/drivers/gpu/drm/i915/display/icl_dsi.c @@ -77,7 +77,7 @@ static enum transcoder dsi_port_to_transcoder(enum port port) static void wait_for_cmds_dispatched_to_panel(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); struct mipi_dsi_device *dsi; enum port port; enum transcoder dsi_trans; @@ -202,7 +202,7 @@ static int dsi_send_pkt_payld(struct intel_dsi_host *host, static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum phy phy; u32 tmp; int lane; @@ -267,7 +267,7 @@ static void configure_dual_link_mode(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); u32 dss_ctl1; dss_ctl1 = I915_READ(DSS_CTL1); @@ -306,7 +306,7 @@ static void configure_dual_link_mode(struct intel_encoder *encoder, static int afe_clk(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); int bpp; if (crtc_state->dsc.compression_enable) @@ -321,7 +321,7 @@ static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; int afe_clk_khz; u32 esc_clk_div_m; @@ -360,7 +360,7 @@ static void get_dsi_io_power_domains(struct drm_i915_private *dev_priv, static void gen11_dsi_enable_io_power(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; u32 tmp; @@ -376,7 +376,7 @@ static void gen11_dsi_enable_io_power(struct intel_encoder *encoder) static void gen11_dsi_power_up_lanes(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum phy phy; for_each_dsi_phy(phy, intel_dsi->phys) @@ -387,7 +387,7 @@ static void gen11_dsi_power_up_lanes(struct intel_encoder *encoder) static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum phy phy; u32 tmp; int lane; @@ -436,7 +436,7 @@ static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder) static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); u32 tmp; enum phy phy; @@ -488,7 +488,7 @@ static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder) static void gen11_dsi_enable_ddi_buffer(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); u32 tmp; enum port port; @@ -509,7 +509,7 @@ gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); u32 tmp; enum port port; enum phy phy; @@ -575,7 +575,7 @@ gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder, static void gen11_dsi_gate_clocks(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); u32 tmp; enum phy phy; @@ -591,7 +591,7 @@ static void gen11_dsi_gate_clocks(struct intel_encoder *encoder) static void gen11_dsi_ungate_clocks(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); u32 tmp; enum phy phy; @@ -608,7 +608,7 @@ static void gen11_dsi_map_pll(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); struct intel_shared_dpll *pll = crtc_state->shared_dpll; enum phy phy; u32 val; @@ -640,7 +640,7 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc); enum pipe pipe = intel_crtc->pipe; u32 tmp; @@ -789,7 +789,7 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; enum port port; @@ -923,7 +923,7 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder, static void gen11_dsi_enable_transcoder(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; enum transcoder dsi_trans; u32 tmp; @@ -945,7 +945,7 @@ static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; enum transcoder dsi_trans; u32 tmp, hs_tx_timeout, lp_rx_timeout, ta_timeout, divisor, mul; @@ -1026,7 +1026,7 @@ gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder, static void gen11_dsi_powerup_panel(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); struct mipi_dsi_device *dsi; enum port port; enum transcoder dsi_trans; @@ -1077,7 +1077,7 @@ static void gen11_dsi_pre_enable(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); /* step3b */ gen11_dsi_map_pll(encoder, pipe_config); @@ -1104,7 +1104,7 @@ static void gen11_dsi_pre_enable(struct intel_encoder *encoder, static void gen11_dsi_disable_transcoder(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; enum transcoder dsi_trans; u32 tmp; @@ -1126,7 +1126,7 @@ static void gen11_dsi_disable_transcoder(struct intel_encoder *encoder) static void gen11_dsi_powerdown_panel(struct intel_encoder *encoder) { - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_OFF); intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_ASSERT_RESET); @@ -1139,7 +1139,7 @@ static void gen11_dsi_powerdown_panel(struct intel_encoder *encoder) static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; enum transcoder dsi_trans; u32 tmp; @@ -1180,7 +1180,7 @@ static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder) static void gen11_dsi_disable_port(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); u32 tmp; enum port port; @@ -1202,7 +1202,7 @@ static void gen11_dsi_disable_port(struct intel_encoder *encoder) static void gen11_dsi_disable_io_power(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; u32 tmp; @@ -1229,7 +1229,7 @@ static void gen11_dsi_disable(struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); /* step1: turn off backlight */ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_OFF); @@ -1259,7 +1259,7 @@ static void gen11_dsi_post_disable(struct intel_encoder *encoder, intel_dsc_disable(old_crtc_state); - skylake_scaler_disable(old_crtc_state); + skl_scaler_disable(old_crtc_state); } static enum drm_mode_status gen11_dsi_mode_valid(struct drm_connector *connector, @@ -1272,7 +1272,7 @@ static enum drm_mode_status gen11_dsi_mode_valid(struct drm_connector *connector static void gen11_dsi_get_timings(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; @@ -1313,7 +1313,7 @@ static void gen11_dsi_get_config(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); intel_dsc_get_config(encoder, pipe_config); @@ -1417,7 +1417,8 @@ static void gen11_dsi_get_power_domains(struct intel_encoder *encoder, { struct drm_i915_private *i915 = to_i915(encoder->base.dev); - get_dsi_io_power_domains(i915, enc_to_intel_dsi(&encoder->base)); + get_dsi_io_power_domains(i915, + enc_to_intel_dsi(encoder)); if (crtc_state->dsc.compression_enable) intel_display_power_get(i915, @@ -1428,7 +1429,7 @@ static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum transcoder dsi_trans; intel_wakeref_t wakeref; enum port port; diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c index fd0026fc3618..c362eecdd414 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic.c +++ b/drivers/gpu/drm/i915/display/intel_atomic.c @@ -37,6 +37,7 @@ #include "intel_atomic.h" #include "intel_display_types.h" #include "intel_hdcp.h" +#include "intel_psr.h" #include "intel_sprite.h" /** @@ -129,6 +130,7 @@ int intel_digital_connector_atomic_check(struct drm_connector *conn, struct drm_crtc_state *crtc_state; intel_hdcp_atomic_check(conn, old_state, new_state); + intel_psr_atomic_check(conn, old_state, new_state); if (!new_state->crtc) return 0; @@ -175,6 +177,38 @@ intel_digital_connector_duplicate_state(struct drm_connector *connector) } /** + * intel_connector_needs_modeset - check if connector needs a modeset + */ +bool +intel_connector_needs_modeset(struct intel_atomic_state *state, + struct drm_connector *connector) +{ + const struct drm_connector_state *old_conn_state, *new_conn_state; + + old_conn_state = drm_atomic_get_old_connector_state(&state->base, connector); + new_conn_state = drm_atomic_get_new_connector_state(&state->base, connector); + + return old_conn_state->crtc != new_conn_state->crtc || + (new_conn_state->crtc && + drm_atomic_crtc_needs_modeset(drm_atomic_get_new_crtc_state(&state->base, + new_conn_state->crtc))); +} + +struct intel_digital_connector_state * +intel_atomic_get_digital_connector_state(struct intel_atomic_state *state, + struct intel_connector *connector) +{ + struct drm_connector_state *conn_state; + + conn_state = drm_atomic_get_connector_state(&state->base, + &connector->base); + if (IS_ERR(conn_state)) + return ERR_CAST(conn_state); + + return to_intel_digital_connector_state(conn_state); +} + +/** * intel_crtc_duplicate_state - duplicate crtc state * @crtc: drm crtc * diff --git a/drivers/gpu/drm/i915/display/intel_atomic.h b/drivers/gpu/drm/i915/display/intel_atomic.h index 7b49623419ba..74c749dbfb4f 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic.h +++ b/drivers/gpu/drm/i915/display/intel_atomic.h @@ -17,6 +17,7 @@ struct drm_device; struct drm_i915_private; struct drm_property; struct intel_atomic_state; +struct intel_connector; struct intel_crtc; struct intel_crtc_state; @@ -32,6 +33,11 @@ int intel_digital_connector_atomic_check(struct drm_connector *conn, struct drm_atomic_state *state); struct drm_connector_state * intel_digital_connector_duplicate_state(struct drm_connector *connector); +bool intel_connector_needs_modeset(struct intel_atomic_state *state, + struct drm_connector *connector); +struct intel_digital_connector_state * +intel_atomic_get_digital_connector_state(struct intel_atomic_state *state, + struct intel_connector *connector); struct drm_crtc_state *intel_crtc_duplicate_state(struct drm_crtc *crtc); void intel_crtc_destroy_state(struct drm_crtc *crtc, diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c index 27710098d056..b18040793d9e 100644 --- a/drivers/gpu/drm/i915/display/intel_audio.c +++ b/drivers/gpu/drm/i915/display/intel_audio.c @@ -707,8 +707,8 @@ void intel_audio_codec_enable(struct intel_encoder *encoder, DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", connector->base.id, connector->name, - connector->encoder->base.id, - connector->encoder->name); + encoder->base.base.id, + encoder->base.name); connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2; @@ -856,7 +856,7 @@ static unsigned long i915_audio_component_get_power(struct device *kdev) } /* Force CDCLK to 2*BCLK as long as we need audio powered. */ - if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) + if (IS_GEMINILAKE(dev_priv)) glk_force_audio_cdclk(dev_priv, true); if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) @@ -875,7 +875,7 @@ static void i915_audio_component_put_power(struct device *kdev, /* Stop forcing CDCLK to 2*BCLK if no need for audio to be powered. */ if (--dev_priv->audio_power_refcount == 0) - if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) + if (IS_GEMINILAKE(dev_priv)) glk_force_audio_cdclk(dev_priv, false); intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO, cookie); diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c index dcb66a33be9b..b228671d5a5d 100644 --- a/drivers/gpu/drm/i915/display/intel_bw.c +++ b/drivers/gpu/drm/i915/display/intel_bw.c @@ -486,3 +486,8 @@ int intel_bw_init(struct drm_i915_private *dev_priv) return 0; } + +void intel_bw_cleanup(struct drm_i915_private *dev_priv) +{ + drm_atomic_private_obj_fini(&dev_priv->bw_obj); +} diff --git a/drivers/gpu/drm/i915/display/intel_bw.h b/drivers/gpu/drm/i915/display/intel_bw.h index 9db10af012f4..20b9ad241802 100644 --- a/drivers/gpu/drm/i915/display/intel_bw.h +++ b/drivers/gpu/drm/i915/display/intel_bw.h @@ -25,6 +25,7 @@ struct intel_bw_state { void intel_bw_init_hw(struct drm_i915_private *dev_priv); int intel_bw_init(struct drm_i915_private *dev_priv); +void intel_bw_cleanup(struct drm_i915_private *dev_priv); int intel_bw_atomic_check(struct intel_atomic_state *state); void intel_bw_crtc_update(struct intel_bw_state *bw_state, const struct intel_crtc_state *crtc_state); diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index 7d1ab1e5b7c3..0ce5926006ca 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -2004,6 +2004,18 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state) /* Account for additional needs from the planes */ min_cdclk = max(intel_planes_min_cdclk(crtc_state), min_cdclk); + /* + * HACK. Currently for TGL platforms we calculate + * min_cdclk initially based on pixel_rate divided + * by 2, accounting for also plane requirements, + * however in some cases the lowest possible CDCLK + * doesn't work and causing the underruns. + * Explicitly stating here that this seems to be currently + * rather a Hack, than final solution. + */ + if (IS_TIGERLAKE(dev_priv)) + min_cdclk = max(min_cdclk, (int)crtc_state->pixel_rate); + if (min_cdclk > dev_priv->max_cdclk_freq) { DRM_DEBUG_KMS("required cdclk (%d kHz) exceeds max (%d kHz)\n", min_cdclk, dev_priv->max_cdclk_freq); diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c index b2b1336ecdb6..f976b800b245 100644 --- a/drivers/gpu/drm/i915/display/intel_crt.c +++ b/drivers/gpu/drm/i915/display/intel_crt.c @@ -65,7 +65,7 @@ static struct intel_crt *intel_encoder_to_crt(struct intel_encoder *encoder) return container_of(encoder, struct intel_crt, base); } -static struct intel_crt *intel_attached_crt(struct drm_connector *connector) +static struct intel_crt *intel_attached_crt(struct intel_connector *connector) { return intel_encoder_to_crt(intel_attached_encoder(connector)); } @@ -247,7 +247,7 @@ static void hsw_post_disable_crt(struct intel_encoder *encoder, intel_ddi_disable_transcoder_func(old_crtc_state); - ironlake_pfit_disable(old_crtc_state); + ilk_pfit_disable(old_crtc_state); intel_ddi_disable_pipe_clock(old_crtc_state); @@ -351,7 +351,7 @@ intel_crt_mode_valid(struct drm_connector *connector, /* The FDI receiver on LPT only supports 8bpc and only has 2 lanes. */ if (HAS_PCH_LPT(dev_priv) && - (ironlake_get_lanes_required(mode->clock, 270000, 24) > 2)) + ilk_get_lanes_required(mode->clock, 270000, 24) > 2) return MODE_CLOCK_HIGH; /* HSW/BDW FDI limited to 4k */ @@ -427,10 +427,10 @@ static int hsw_crt_compute_config(struct intel_encoder *encoder, return 0; } -static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) +static bool ilk_crt_detect_hotplug(struct drm_connector *connector) { struct drm_device *dev = connector->dev; - struct intel_crt *crt = intel_attached_crt(connector); + struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector)); struct drm_i915_private *dev_priv = to_i915(dev); u32 adpa; bool ret; @@ -440,7 +440,7 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) bool turn_off_dac = HAS_PCH_SPLIT(dev_priv); u32 save_adpa; - crt->force_hotplug_required = 0; + crt->force_hotplug_required = false; save_adpa = adpa = I915_READ(crt->adpa_reg); DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa); @@ -477,7 +477,7 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) static bool valleyview_crt_detect_hotplug(struct drm_connector *connector) { struct drm_device *dev = connector->dev; - struct intel_crt *crt = intel_attached_crt(connector); + struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector)); struct drm_i915_private *dev_priv = to_i915(dev); bool reenable_hpd; u32 adpa; @@ -535,7 +535,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector) int i, tries = 0; if (HAS_PCH_SPLIT(dev_priv)) - return intel_ironlake_crt_detect_hotplug(connector); + return ilk_crt_detect_hotplug(connector); if (IS_VALLEYVIEW(dev_priv)) return valleyview_crt_detect_hotplug(connector); @@ -609,7 +609,7 @@ static int intel_crt_ddc_get_modes(struct drm_connector *connector, static bool intel_crt_detect_ddc(struct drm_connector *connector) { - struct intel_crt *crt = intel_attached_crt(connector); + struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector)); struct drm_i915_private *dev_priv = to_i915(crt->base.base.dev); struct edid *edid; struct i2c_adapter *i2c; @@ -795,7 +795,7 @@ intel_crt_detect(struct drm_connector *connector, bool force) { struct drm_i915_private *dev_priv = to_i915(connector->dev); - struct intel_crt *crt = intel_attached_crt(connector); + struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector)); struct intel_encoder *intel_encoder = &crt->base; intel_wakeref_t wakeref; int status, ret; @@ -886,7 +886,7 @@ static int intel_crt_get_modes(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_crt *crt = intel_attached_crt(connector); + struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector)); struct intel_encoder *intel_encoder = &crt->base; intel_wakeref_t wakeref; struct i2c_adapter *i2c; @@ -925,7 +925,7 @@ void intel_crt_reset(struct drm_encoder *encoder) POSTING_READ(crt->adpa_reg); DRM_DEBUG_KMS("crt adpa set to 0x%x\n", adpa); - crt->force_hotplug_required = 1; + crt->force_hotplug_required = true; } } @@ -1063,7 +1063,7 @@ void intel_crt_init(struct drm_i915_private *dev_priv) /* * Configure the automatic hotplug detection stuff */ - crt->force_hotplug_required = 0; + crt->force_hotplug_required = false; /* * TODO: find a proper way to discover whether we need to set the the diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index c9ba7d7f3787..33f1dc3d7c1a 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -34,6 +34,7 @@ #include "intel_ddi.h" #include "intel_display_types.h" #include "intel_dp.h" +#include "intel_dp_mst.h" #include "intel_dp_link_training.h" #include "intel_dpio_phy.h" #include "intel_dsi.h" @@ -1237,9 +1238,9 @@ void hsw_fdi_link_train(struct intel_encoder *encoder, static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder) { - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct intel_digital_port *intel_dig_port = - enc_to_dig_port(&encoder->base); + enc_to_dig_port(encoder); intel_dp->DP = intel_dig_port->saved_port_bits | DDI_BUF_CTL_ENABLE | DDI_BUF_TRANS_SELECT(0); @@ -1899,8 +1900,13 @@ intel_ddi_transcoder_func_reg_val_get(const struct intel_crtc_state *crtc_state) temp |= TRANS_DDI_MODE_SELECT_DP_MST; temp |= DDI_PORT_WIDTH(crtc_state->lane_count); - if (INTEL_GEN(dev_priv) >= 12) - temp |= TRANS_DDI_MST_TRANSPORT_SELECT(crtc_state->cpu_transcoder); + if (INTEL_GEN(dev_priv) >= 12) { + enum transcoder master; + + master = crtc_state->mst_master_transcoder; + WARN_ON(master == INVALID_TRANSCODER); + temp |= TRANS_DDI_MST_TRANSPORT_SELECT(master); + } } else { temp |= TRANS_DDI_MODE_SELECT_DP_SST; temp |= DDI_PORT_WIDTH(crtc_state->lane_count); @@ -1944,17 +1950,18 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; - i915_reg_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder); - u32 val = I915_READ(reg); + u32 val; + + val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); + val &= ~TRANS_DDI_FUNC_ENABLE; if (INTEL_GEN(dev_priv) >= 12) { - val &= ~(TRANS_DDI_FUNC_ENABLE | TGL_TRANS_DDI_PORT_MASK | - TRANS_DDI_DP_VC_PAYLOAD_ALLOC); + if (!intel_dp_mst_is_master_trans(crtc_state)) + val &= ~TGL_TRANS_DDI_PORT_MASK; } else { - val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK | - TRANS_DDI_DP_VC_PAYLOAD_ALLOC); + val &= ~TRANS_DDI_PORT_MASK; } - I915_WRITE(reg, val); + I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), val); if (dev_priv->quirks & QUIRK_INCREASE_DDI_DISABLED_TIME && intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { @@ -2217,7 +2224,7 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder, if (WARN_ON(intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST))) return; - dig_port = enc_to_dig_port(&encoder->base); + dig_port = enc_to_dig_port(encoder); intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain); /* @@ -2287,7 +2294,7 @@ static void _skl_ddi_set_iboost(struct drm_i915_private *dev_priv, static void skl_ddi_set_iboost(struct intel_encoder *encoder, int level, enum intel_output_type type) { - struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base); + struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum port port = encoder->port; u8 iboost; @@ -2358,7 +2365,7 @@ static void bxt_ddi_vswing_sequence(struct intel_encoder *encoder, u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); enum port port = encoder->port; enum phy phy = intel_port_to_phy(dev_priv, port); int n_entries; @@ -2497,7 +2504,7 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder, width = 4; rate = 0; /* Rate is always < than 6GHz for HDMI */ } else { - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); width = intel_dp->lane_count; rate = intel_dp->link_rate; @@ -2623,7 +2630,7 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder, width = 4; /* Rate is always < than 6GHz for HDMI */ } else { - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); width = intel_dp->lane_count; rate = intel_dp->link_rate; @@ -3161,57 +3168,6 @@ static void intel_ddi_clk_disable(struct intel_encoder *encoder) } static void -icl_phy_set_clock_gating(struct intel_digital_port *dig_port, bool enable) -{ - struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); - enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port); - u32 val, bits; - int ln; - - if (tc_port == PORT_TC_NONE) - return; - - bits = MG_DP_MODE_CFG_TR2PWR_GATING | MG_DP_MODE_CFG_TRPWR_GATING | - MG_DP_MODE_CFG_CLNPWR_GATING | MG_DP_MODE_CFG_DIGPWR_GATING | - MG_DP_MODE_CFG_GAONPWR_GATING; - - for (ln = 0; ln < 2; ln++) { - if (INTEL_GEN(dev_priv) >= 12) { - I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, ln)); - val = I915_READ(DKL_DP_MODE(tc_port)); - } else { - val = I915_READ(MG_DP_MODE(ln, tc_port)); - } - - if (enable) - val |= bits; - else - val &= ~bits; - - if (INTEL_GEN(dev_priv) >= 12) - I915_WRITE(DKL_DP_MODE(tc_port), val); - else - I915_WRITE(MG_DP_MODE(ln, tc_port), val); - } - - if (INTEL_GEN(dev_priv) == 11) { - bits = MG_MISC_SUS0_CFG_TR2PWR_GATING | - MG_MISC_SUS0_CFG_CL2PWR_GATING | - MG_MISC_SUS0_CFG_GAONPWR_GATING | - MG_MISC_SUS0_CFG_TRPWR_GATING | - MG_MISC_SUS0_CFG_CL1PWR_GATING | - MG_MISC_SUS0_CFG_DGPWR_GATING; - - val = I915_READ(MG_MISC_SUS0(tc_port)); - if (enable) - val |= (bits | MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE(3)); - else - val &= ~(bits | MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE_MASK); - I915_WRITE(MG_MISC_SUS0(tc_port), val); - } -} - -static void icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port, const struct intel_crtc_state *crtc_state) { @@ -3317,7 +3273,7 @@ static void intel_ddi_enable_fec(struct intel_encoder *encoder, if (!crtc_state->fec_enable) return; - intel_dp = enc_to_intel_dp(&encoder->base); + intel_dp = enc_to_intel_dp(encoder); val = I915_READ(intel_dp->regs.dp_tp_ctl); val |= DP_TP_CTL_FEC_ENABLE; I915_WRITE(intel_dp->regs.dp_tp_ctl, val); @@ -3337,7 +3293,7 @@ static void intel_ddi_disable_fec_state(struct intel_encoder *encoder, if (!crtc_state->fec_enable) return; - intel_dp = enc_to_intel_dp(&encoder->base); + intel_dp = enc_to_intel_dp(encoder); val = I915_READ(intel_dp->regs.dp_tp_ctl); val &= ~DP_TP_CTL_FEC_ENABLE; I915_WRITE(intel_dp->regs.dp_tp_ctl, val); @@ -3428,10 +3384,10 @@ static void tgl_ddi_pre_enable_dp(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum phy phy = intel_port_to_phy(dev_priv, encoder->port); - struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST); int level = intel_ddi_dp_level(intel_dp); enum transcoder transcoder = crtc_state->cpu_transcoder; @@ -3458,14 +3414,14 @@ static void tgl_ddi_pre_enable_dp(struct intel_encoder *encoder, * (DFLEXDPSP.DPX4TXLATC) * * This was done before tgl_ddi_pre_enable_dp by - * haswell_crtc_enable()->intel_encoders_pre_pll_enable(). + * hsw_crtc_enable()->intel_encoders_pre_pll_enable(). */ /* * 4. Enable the port PLL. * * The PLL enabling itself was already done before this function by - * haswell_crtc_enable()->intel_enable_shared_dpll(). We need only + * hsw_crtc_enable()->intel_enable_shared_dpll(). We need only * configure the PLL to port mapping here. */ intel_ddi_clk_select(encoder, crtc_state); @@ -3509,12 +3465,6 @@ static void tgl_ddi_pre_enable_dp(struct intel_encoder *encoder, * down this function. */ - /* - * 7.d Type C with DP alternate or fixed/legacy/static connection - - * Disable PHY clock gating per Type-C DDI Buffer page - */ - icl_phy_set_clock_gating(dig_port, false); - /* 7.e Configure voltage swing and related IO settings */ tgl_ddi_vswing_sequence(encoder, crtc_state->port_clock, level, encoder->type); @@ -3566,15 +3516,6 @@ static void tgl_ddi_pre_enable_dp(struct intel_encoder *encoder, if (!is_trans_port_sync_mode(crtc_state)) intel_dp_stop_link_train(intel_dp); - /* - * TODO: enable clock gating - * - * It is not written in DP enabling sequence but "PHY Clockgating - * programming" states that clock gating should be enabled after the - * link training but doing so causes all the following trainings to fail - * so not enabling it for now. - */ - /* 7.l Configure and enable FEC if needed */ intel_ddi_enable_fec(encoder, crtc_state); intel_dsc_enable(encoder, crtc_state); @@ -3584,15 +3525,18 @@ static void hsw_ddi_pre_enable_dp(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum port port = encoder->port; enum phy phy = intel_port_to_phy(dev_priv, port); - struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST); int level = intel_ddi_dp_level(intel_dp); - WARN_ON(is_mst && (port == PORT_A || port == PORT_E)); + if (INTEL_GEN(dev_priv) < 11) + WARN_ON(is_mst && (port == PORT_A || port == PORT_E)); + else + WARN_ON(is_mst && port == PORT_A); intel_dp_set_link_params(intel_dp, crtc_state->port_clock, crtc_state->lane_count, is_mst); @@ -3610,7 +3554,6 @@ static void hsw_ddi_pre_enable_dp(struct intel_encoder *encoder, dig_port->ddi_io_power_domain); icl_program_mg_dp_mode(dig_port, crtc_state); - icl_phy_set_clock_gating(dig_port, false); if (INTEL_GEN(dev_priv) >= 11) icl_ddi_vswing_sequence(encoder, crtc_state->port_clock, @@ -3644,8 +3587,6 @@ static void hsw_ddi_pre_enable_dp(struct intel_encoder *encoder, intel_ddi_enable_fec(encoder, crtc_state); - icl_phy_set_clock_gating(dig_port, true); - if (!is_mst) intel_ddi_enable_pipe_clock(crtc_state); @@ -3674,12 +3615,12 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { - struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base); + struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi; struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum port port = encoder->port; int level = intel_ddi_hdmi_level(dev_priv, port); - struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); intel_dp_dual_mode_set_tmds_output(intel_hdmi, true); intel_ddi_clk_select(encoder, crtc_state); @@ -3687,7 +3628,6 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder, intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain); icl_program_mg_dp_mode(dig_port, crtc_state); - icl_phy_set_clock_gating(dig_port, false); if (INTEL_GEN(dev_priv) >= 12) tgl_ddi_vswing_sequence(encoder, crtc_state->port_clock, @@ -3702,8 +3642,6 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder, else intel_prepare_hdmi_ddi_buffers(encoder, level); - icl_phy_set_clock_gating(dig_port, true); - if (IS_GEN9_BC(dev_priv)) skl_ddi_set_iboost(encoder, level, INTEL_OUTPUT_HDMI); @@ -3746,12 +3684,12 @@ static void intel_ddi_pre_enable(struct intel_encoder *encoder, intel_ddi_pre_enable_hdmi(encoder, crtc_state, conn_state); } else { struct intel_lspcon *lspcon = - enc_to_intel_lspcon(&encoder->base); + enc_to_intel_lspcon(encoder); intel_ddi_pre_enable_dp(encoder, crtc_state, conn_state); if (lspcon->active) { struct intel_digital_port *dig_port = - enc_to_dig_port(&encoder->base); + enc_to_dig_port(encoder); dig_port->set_infoframes(encoder, crtc_state->has_infoframe, @@ -3776,7 +3714,7 @@ static void intel_disable_ddi_buf(struct intel_encoder *encoder, } if (intel_crtc_has_dp_encoder(crtc_state)) { - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); val = I915_READ(intel_dp->regs.dp_tp_ctl); val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK); @@ -3796,7 +3734,7 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder, const struct drm_connector_state *old_conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); struct intel_dp *intel_dp = &dig_port->dp; bool is_mst = intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST); @@ -3808,8 +3746,19 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder, */ intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); - if (INTEL_GEN(dev_priv) < 12 && !is_mst) - intel_ddi_disable_pipe_clock(old_crtc_state); + if (INTEL_GEN(dev_priv) >= 12) { + if (is_mst) { + enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; + u32 val; + + val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); + val &= ~TGL_TRANS_DDI_PORT_MASK; + I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), val); + } + } else { + if (!is_mst) + intel_ddi_disable_pipe_clock(old_crtc_state); + } intel_disable_ddi_buf(encoder, old_crtc_state); @@ -3838,7 +3787,7 @@ static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder, const struct drm_connector_state *old_conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); struct intel_hdmi *intel_hdmi = &dig_port->hdmi; dig_port->set_infoframes(encoder, false, @@ -3860,8 +3809,6 @@ static void icl_disable_transcoder_port_sync(const struct intel_crtc_state *old_ { struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - i915_reg_t reg; - u32 trans_ddi_func_ctl2_val; if (old_crtc_state->master_transcoder == INVALID_TRANSCODER) return; @@ -3869,10 +3816,7 @@ static void icl_disable_transcoder_port_sync(const struct intel_crtc_state *old_ DRM_DEBUG_KMS("Disabling Transcoder Port Sync on Slave Transcoder %s\n", transcoder_name(old_crtc_state->cpu_transcoder)); - reg = TRANS_DDI_FUNC_CTL2(old_crtc_state->cpu_transcoder); - trans_ddi_func_ctl2_val = ~(PORT_SYNC_MODE_ENABLE | - PORT_SYNC_MODE_MASTER_SELECT_MASK); - I915_WRITE(reg, trans_ddi_func_ctl2_val); + I915_WRITE(TRANS_DDI_FUNC_CTL2(old_crtc_state->cpu_transcoder), 0); } static void intel_ddi_post_disable(struct intel_encoder *encoder, @@ -3880,25 +3824,27 @@ static void intel_ddi_post_disable(struct intel_encoder *encoder, const struct drm_connector_state *old_conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); enum phy phy = intel_port_to_phy(dev_priv, encoder->port); bool is_tc_port = intel_phy_is_tc(dev_priv, phy); - intel_crtc_vblank_off(old_crtc_state); + if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST)) { + intel_crtc_vblank_off(old_crtc_state); - intel_disable_pipe(old_crtc_state); + intel_disable_pipe(old_crtc_state); - if (INTEL_GEN(dev_priv) >= 11) - icl_disable_transcoder_port_sync(old_crtc_state); + if (INTEL_GEN(dev_priv) >= 11) + icl_disable_transcoder_port_sync(old_crtc_state); - intel_ddi_disable_transcoder_func(old_crtc_state); + intel_ddi_disable_transcoder_func(old_crtc_state); - intel_dsc_disable(old_crtc_state); + intel_dsc_disable(old_crtc_state); - if (INTEL_GEN(dev_priv) >= 9) - skylake_scaler_disable(old_crtc_state); - else - ironlake_pfit_disable(old_crtc_state); + if (INTEL_GEN(dev_priv) >= 9) + skl_scaler_disable(old_crtc_state); + else + ilk_pfit_disable(old_crtc_state); + } /* * When called from DP MST code: @@ -3970,7 +3916,7 @@ static void intel_enable_ddi_dp(struct intel_encoder *encoder, const struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); enum port port = encoder->port; if (port == PORT_A && INTEL_GEN(dev_priv) < 9) @@ -4011,7 +3957,7 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder, const struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); struct drm_connector *connector = conn_state->connector; enum port port = encoder->port; @@ -4088,7 +4034,7 @@ static void intel_disable_ddi_dp(struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); intel_dp->link_trained = false; @@ -4136,7 +4082,7 @@ static void intel_ddi_update_pipe_dp(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); intel_ddi_set_dp_msa(crtc_state, conn_state); @@ -4200,7 +4146,8 @@ intel_ddi_update_prepare(struct intel_atomic_state *state, WARN_ON(crtc && crtc->active); - intel_tc_port_get_link(enc_to_dig_port(&encoder->base), required_lanes); + intel_tc_port_get_link(enc_to_dig_port(encoder), + required_lanes); if (crtc_state && crtc_state->hw.active) intel_update_active_dpll(state, crtc, encoder); } @@ -4210,7 +4157,7 @@ intel_ddi_update_complete(struct intel_atomic_state *state, struct intel_encoder *encoder, struct intel_crtc *crtc) { - intel_tc_port_put_link(enc_to_dig_port(&encoder->base)); + intel_tc_port_put_link(enc_to_dig_port(encoder)); } static void @@ -4219,7 +4166,7 @@ intel_ddi_pre_pll_enable(struct intel_encoder *encoder, const struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); enum phy phy = intel_port_to_phy(dev_priv, encoder->port); bool is_tc_port = intel_phy_is_tc(dev_priv, phy); @@ -4405,6 +4352,11 @@ void intel_ddi_get_config(struct intel_encoder *encoder, pipe_config->output_types |= BIT(INTEL_OUTPUT_DP_MST); pipe_config->lane_count = ((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1; + + if (INTEL_GEN(dev_priv) >= 12) + pipe_config->mst_master_transcoder = + REG_FIELD_GET(TRANS_DDI_MST_TRANSPORT_SELECT_MASK, temp); + intel_dp_get_m_n(intel_crtc, pipe_config); break; default: @@ -4518,7 +4470,7 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder, static void intel_ddi_encoder_destroy(struct drm_encoder *encoder) { - struct intel_digital_port *dig_port = enc_to_dig_port(encoder); + struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder)); intel_dp_encoder_flush_work(encoder); @@ -4585,7 +4537,7 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder, struct drm_modeset_acquire_ctx *ctx) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_hdmi *hdmi = enc_to_intel_hdmi(&encoder->base); + struct intel_hdmi *hdmi = enc_to_intel_hdmi(encoder); struct intel_connector *connector = hdmi->attached_connector; struct i2c_adapter *adapter = intel_gmbus_get_adapter(dev_priv, hdmi->ddc_bus); @@ -4657,7 +4609,7 @@ intel_ddi_hotplug(struct intel_encoder *encoder, struct intel_connector *connector, bool irq_received) { - struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); struct drm_modeset_acquire_ctx ctx; enum intel_hotplug_state state; int ret; diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 1860da0a493e..19ea842cfd84 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -46,6 +46,7 @@ #include "display/intel_crt.h" #include "display/intel_ddi.h" #include "display/intel_dp.h" +#include "display/intel_dp_mst.h" #include "display/intel_dsi.h" #include "display/intel_dvo.h" #include "display/intel_gmbus.h" @@ -145,8 +146,8 @@ static const u64 cursor_format_modifiers[] = { static void i9xx_crtc_clock_get(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config); -static void ironlake_pch_clock_get(struct intel_crtc *crtc, - struct intel_crtc_state *pipe_config); +static void ilk_pch_clock_get(struct intel_crtc *crtc, + struct intel_crtc_state *pipe_config); static int intel_framebuffer_init(struct intel_framebuffer *ifb, struct drm_i915_gem_object *obj, @@ -157,15 +158,15 @@ static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_sta const struct intel_link_m_n *m_n, const struct intel_link_m_n *m2_n2); static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state); -static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state); -static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state); +static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state); +static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state); static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state); static void vlv_prepare_pll(struct intel_crtc *crtc, const struct intel_crtc_state *pipe_config); static void chv_prepare_pll(struct intel_crtc *crtc, const struct intel_crtc_state *pipe_config); -static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state); -static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state); +static void skl_pfit_enable(const struct intel_crtc_state *crtc_state); +static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state); static void intel_modeset_setup_hw_state(struct drm_device *dev, struct drm_modeset_acquire_ctx *ctx); static struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc); @@ -369,7 +370,7 @@ static const struct intel_limit intel_limits_g4x_dual_channel_lvds = { }, }; -static const struct intel_limit intel_limits_pineview_sdvo = { +static const struct intel_limit pnv_limits_sdvo = { .dot = { .min = 20000, .max = 400000}, .vco = { .min = 1700000, .max = 3500000 }, /* Pineview's Ncounter is a ring counter */ @@ -384,7 +385,7 @@ static const struct intel_limit intel_limits_pineview_sdvo = { .p2_slow = 10, .p2_fast = 5 }, }; -static const struct intel_limit intel_limits_pineview_lvds = { +static const struct intel_limit pnv_limits_lvds = { .dot = { .min = 20000, .max = 400000 }, .vco = { .min = 1700000, .max = 3500000 }, .n = { .min = 3, .max = 6 }, @@ -402,7 +403,7 @@ static const struct intel_limit intel_limits_pineview_lvds = { * We calculate clock using (register_value + 2) for N/M1/M2, so here * the range value for them is (actual_value - 2). */ -static const struct intel_limit intel_limits_ironlake_dac = { +static const struct intel_limit ilk_limits_dac = { .dot = { .min = 25000, .max = 350000 }, .vco = { .min = 1760000, .max = 3510000 }, .n = { .min = 1, .max = 5 }, @@ -415,7 +416,7 @@ static const struct intel_limit intel_limits_ironlake_dac = { .p2_slow = 10, .p2_fast = 5 }, }; -static const struct intel_limit intel_limits_ironlake_single_lvds = { +static const struct intel_limit ilk_limits_single_lvds = { .dot = { .min = 25000, .max = 350000 }, .vco = { .min = 1760000, .max = 3510000 }, .n = { .min = 1, .max = 3 }, @@ -428,7 +429,7 @@ static const struct intel_limit intel_limits_ironlake_single_lvds = { .p2_slow = 14, .p2_fast = 14 }, }; -static const struct intel_limit intel_limits_ironlake_dual_lvds = { +static const struct intel_limit ilk_limits_dual_lvds = { .dot = { .min = 25000, .max = 350000 }, .vco = { .min = 1760000, .max = 3510000 }, .n = { .min = 1, .max = 3 }, @@ -442,7 +443,7 @@ static const struct intel_limit intel_limits_ironlake_dual_lvds = { }; /* LVDS 100mhz refclk limits. */ -static const struct intel_limit intel_limits_ironlake_single_lvds_100m = { +static const struct intel_limit ilk_limits_single_lvds_100m = { .dot = { .min = 25000, .max = 350000 }, .vco = { .min = 1760000, .max = 3510000 }, .n = { .min = 1, .max = 2 }, @@ -455,7 +456,7 @@ static const struct intel_limit intel_limits_ironlake_single_lvds_100m = { .p2_slow = 14, .p2_fast = 14 }, }; -static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = { +static const struct intel_limit ilk_limits_dual_lvds_100m = { .dot = { .min = 25000, .max = 350000 }, .vco = { .min = 1760000, .max = 3510000 }, .n = { .min = 1, .max = 3 }, @@ -553,13 +554,6 @@ is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state) } static bool -is_trans_port_sync_master(const struct intel_crtc_state *crtc_state) -{ - return (crtc_state->master_transcoder == INVALID_TRANSCODER && - crtc_state->sync_mode_slaves_mask); -} - -static bool is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state) { return crtc_state->master_transcoder != INVALID_TRANSCODER; @@ -1637,7 +1631,7 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv, I915_READ(dpll_reg) & port_mask, expected_mask); } -static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_state) +static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); @@ -1735,8 +1729,8 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv, DRM_ERROR("Failed to enable PCH transcoder\n"); } -static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv, - enum pipe pipe) +static void ilk_disable_pch_transcoder(struct drm_i915_private *dev_priv, + enum pipe pipe) { i915_reg_t reg; u32 val; @@ -1944,7 +1938,9 @@ static bool is_ccs_plane(const struct drm_framebuffer *fb, int plane) static bool is_gen12_ccs_modifier(u64 modifier) { - return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS; + return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS || + modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS; + } static bool is_gen12_ccs_plane(const struct drm_framebuffer *fb, int plane) @@ -1977,8 +1973,7 @@ static int ccs_to_main_plane(const struct drm_framebuffer *fb, int ccs_plane) } /* Return either the main plane's CCS or - if not a CCS FB - UV plane */ -static int -intel_main_to_aux_plane(const struct drm_framebuffer *fb, int main_plane) +int intel_main_to_aux_plane(const struct drm_framebuffer *fb, int main_plane) { if (is_ccs_modifier(fb->modifier)) return main_to_ccs_plane(fb, main_plane); @@ -1994,6 +1989,13 @@ intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info, info->num_planes == (is_ccs_modifier(modifier) ? 4 : 2); } +static bool is_semiplanar_uv_plane(const struct drm_framebuffer *fb, + int color_plane) +{ + return intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) && + color_plane == 1; +} + static unsigned int intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane) { @@ -2013,6 +2015,7 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane) return 128; /* fall through */ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: + case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: if (is_ccs_plane(fb, color_plane)) return 64; /* fall through */ @@ -2068,6 +2071,16 @@ static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane, *tile_height = intel_tile_height(fb, color_plane); } +static unsigned int intel_tile_row_size(const struct drm_framebuffer *fb, + int color_plane) +{ + unsigned int tile_width, tile_height; + + intel_tile_dims(fb, color_plane, &tile_width, &tile_height); + + return fb->pitches[color_plane] * tile_height; +} + unsigned int intel_fb_align_height(const struct drm_framebuffer *fb, int color_plane, unsigned int height) @@ -2142,7 +2155,8 @@ static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb, struct drm_i915_private *dev_priv = to_i915(fb->dev); /* AUX_DIST needs only 4K alignment */ - if (is_aux_plane(fb, color_plane)) + if ((INTEL_GEN(dev_priv) < 12 && is_aux_plane(fb, color_plane)) || + is_ccs_plane(fb, color_plane)) return 4096; switch (fb->modifier) { @@ -2152,11 +2166,19 @@ static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb, if (INTEL_GEN(dev_priv) >= 9) return 256 * 1024; return 0; + case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: + if (is_semiplanar_uv_plane(fb, color_plane)) + return intel_tile_row_size(fb, color_plane); + /* Fall-through */ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: return 16 * 1024; case I915_FORMAT_MOD_Y_TILED_CCS: case I915_FORMAT_MOD_Yf_TILED_CCS: case I915_FORMAT_MOD_Y_TILED: + if (INTEL_GEN(dev_priv) >= 12 && + is_semiplanar_uv_plane(fb, color_plane)) + return intel_tile_row_size(fb, color_plane); + /* Fall-through */ case I915_FORMAT_MOD_Yf_TILED: return 1 * 1024 * 1024; default: @@ -2193,6 +2215,8 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, return ERR_PTR(-EINVAL); alignment = intel_surf_alignment(fb, 0); + if (WARN_ON(alignment && !is_power_of_2(alignment))) + return ERR_PTR(-EINVAL); /* Note that the w/a also requires 64 PTE of padding following the * bo. We currently fill all unused PTE with the shadow page and so @@ -2431,9 +2455,6 @@ static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv, unsigned int cpp = fb->format->cpp[color_plane]; u32 offset, offset_aligned; - if (alignment) - alignment--; - if (!is_surface_linear(fb, color_plane)) { unsigned int tile_size, tile_width, tile_height; unsigned int tile_rows, tiles, pitch_tiles; @@ -2455,17 +2476,24 @@ static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv, *x %= tile_width; offset = (tile_rows * pitch_tiles + tiles) * tile_size; - offset_aligned = offset & ~alignment; + + offset_aligned = offset; + if (alignment) + offset_aligned = rounddown(offset_aligned, alignment); intel_adjust_tile_offset(x, y, tile_width, tile_height, tile_size, pitch_tiles, offset, offset_aligned); } else { offset = *y * pitch + *x * cpp; - offset_aligned = offset & ~alignment; - - *y = (offset & alignment) / pitch; - *x = ((offset & alignment) - *y * pitch) / cpp; + offset_aligned = offset; + if (alignment) { + offset_aligned = rounddown(offset_aligned, alignment); + *y = (offset % alignment) / pitch; + *x = ((offset % alignment) - *y * pitch) / cpp; + } else { + *y = *x = 0; + } } return offset_aligned; @@ -2498,9 +2526,17 @@ static int intel_fb_offset_to_xy(int *x, int *y, { struct drm_i915_private *dev_priv = to_i915(fb->dev); unsigned int height; + u32 alignment; - if (fb->modifier != DRM_FORMAT_MOD_LINEAR && - fb->offsets[color_plane] % intel_tile_size(dev_priv)) { + if (INTEL_GEN(dev_priv) >= 12 && + is_semiplanar_uv_plane(fb, color_plane)) + alignment = intel_tile_row_size(fb, color_plane); + else if (fb->modifier != DRM_FORMAT_MOD_LINEAR) + alignment = intel_tile_size(dev_priv); + else + alignment = 0; + + if (alignment != 0 && fb->offsets[color_plane] % alignment) { DRM_DEBUG_KMS("Misaligned offset 0x%08x for color plane %d\n", fb->offsets[color_plane], color_plane); return -EINVAL; @@ -2537,6 +2573,7 @@ static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier) case I915_FORMAT_MOD_Y_TILED: case I915_FORMAT_MOD_Y_TILED_CCS: case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: + case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: return I915_TILING_Y; default: return I915_TILING_NONE; @@ -2588,6 +2625,30 @@ static const struct drm_format_info gen12_ccs_formats[] = { { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, .hsub = 1, .vsub = 1, .has_alpha = true }, + { .format = DRM_FORMAT_YUYV, .num_planes = 2, + .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, + .hsub = 2, .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_YVYU, .num_planes = 2, + .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, + .hsub = 2, .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_UYVY, .num_planes = 2, + .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, + .hsub = 2, .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_VYUY, .num_planes = 2, + .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, + .hsub = 2, .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_NV12, .num_planes = 4, + .char_per_block = { 1, 2, 1, 1 }, .block_w = { 1, 1, 4, 4 }, .block_h = { 1, 1, 1, 1 }, + .hsub = 2, .vsub = 2, .is_yuv = true }, + { .format = DRM_FORMAT_P010, .num_planes = 4, + .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 }, + .hsub = 2, .vsub = 2, .is_yuv = true }, + { .format = DRM_FORMAT_P012, .num_planes = 4, + .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 }, + .hsub = 2, .vsub = 2, .is_yuv = true }, + { .format = DRM_FORMAT_P016, .num_planes = 4, + .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 }, + .hsub = 2, .vsub = 2, .is_yuv = true }, }; static const struct drm_format_info * @@ -2614,6 +2675,7 @@ intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd) ARRAY_SIZE(skl_ccs_formats), cmd->pixel_format); case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: + case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: return lookup_format_info(gen12_ccs_formats, ARRAY_SIZE(gen12_ccs_formats), cmd->pixel_format); @@ -2625,6 +2687,7 @@ intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd) bool is_ccs_modifier(u64 modifier) { return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS || + modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS || modifier == I915_FORMAT_MOD_Y_TILED_CCS || modifier == I915_FORMAT_MOD_Yf_TILED_CCS; } @@ -2698,7 +2761,7 @@ intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane) } tile_width = intel_tile_width_bytes(fb, color_plane); - if (is_ccs_modifier(fb->modifier) && color_plane == 0) { + if (is_ccs_modifier(fb->modifier)) { /* * Display WA #0531: skl,bxt,kbl,glk * @@ -2708,7 +2771,7 @@ intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane) * require the entire fb to accommodate that to avoid * potential runtime errors at plane configuration time. */ - if (IS_GEN(dev_priv, 9) && fb->width > 3840) + if (IS_GEN(dev_priv, 9) && color_plane == 0 && fb->width > 3840) tile_width *= 4; /* * The main surface pitch must be padded to a multiple of four @@ -2876,11 +2939,15 @@ intel_fb_check_ccs_xy(struct drm_framebuffer *fb, int ccs_plane, int x, int y) static void intel_fb_plane_dims(int *w, int *h, struct drm_framebuffer *fb, int color_plane) { + int main_plane = is_ccs_plane(fb, color_plane) ? + ccs_to_main_plane(fb, color_plane) : 0; + int main_hsub, main_vsub; int hsub, vsub; + intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, fb, main_plane); intel_fb_plane_get_subsampling(&hsub, &vsub, fb, color_plane); - *w = fb->width / hsub; - *h = fb->height / vsub; + *w = fb->width / main_hsub / hsub; + *h = fb->height / main_vsub / vsub; } /* @@ -3598,6 +3665,7 @@ static int skl_max_plane_width(const struct drm_framebuffer *fb, return 5120; case I915_FORMAT_MOD_Y_TILED_CCS: case I915_FORMAT_MOD_Yf_TILED_CCS: + case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: /* FIXME AUX plane? */ case I915_FORMAT_MOD_Y_TILED: case I915_FORMAT_MOD_Yf_TILED: @@ -3656,11 +3724,12 @@ static int icl_max_plane_height(void) return 4320; } -static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state, - int main_x, int main_y, u32 main_offset) +static bool +skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state, + int main_x, int main_y, u32 main_offset, + int ccs_plane) { const struct drm_framebuffer *fb = plane_state->hw.fb; - int ccs_plane = main_to_ccs_plane(fb, 0); int aux_x = plane_state->color_plane[ccs_plane].x; int aux_y = plane_state->color_plane[ccs_plane].y; u32 aux_offset = plane_state->color_plane[ccs_plane].offset; @@ -3737,6 +3806,8 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state) intel_add_fb_offsets(&x, &y, plane_state, 0); offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 0); alignment = intel_surf_alignment(fb, 0); + if (WARN_ON(alignment && !is_power_of_2(alignment))) + return -EINVAL; /* * AUX surface offset is specified as the distance from the @@ -3772,7 +3843,8 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state) * they match with the main surface x/y offsets. */ if (is_ccs_modifier(fb->modifier)) { - while (!skl_check_main_ccs_coordinates(plane_state, x, y, offset)) { + while (!skl_check_main_ccs_coordinates(plane_state, x, y, + offset, aux_plane)) { if (offset == 0) break; @@ -3805,7 +3877,8 @@ static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state) { const struct drm_framebuffer *fb = plane_state->hw.fb; unsigned int rotation = plane_state->hw.rotation; - int max_width = skl_max_plane_width(fb, 1, rotation); + int uv_plane = 1; + int max_width = skl_max_plane_width(fb, uv_plane, rotation); int max_height = 4096; int x = plane_state->uapi.src.x1 >> 17; int y = plane_state->uapi.src.y1 >> 17; @@ -3813,8 +3886,9 @@ static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state) int h = drm_rect_height(&plane_state->uapi.src) >> 17; u32 offset; - intel_add_fb_offsets(&x, &y, plane_state, 1); - offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1); + intel_add_fb_offsets(&x, &y, plane_state, uv_plane); + offset = intel_plane_compute_aligned_offset(&x, &y, + plane_state, uv_plane); /* FIXME not quite sure how/if these apply to the chroma plane */ if (w > max_width || h > max_height) { @@ -3823,9 +3897,39 @@ static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state) return -EINVAL; } - plane_state->color_plane[1].offset = offset; - plane_state->color_plane[1].x = x; - plane_state->color_plane[1].y = y; + if (is_ccs_modifier(fb->modifier)) { + int ccs_plane = main_to_ccs_plane(fb, uv_plane); + int aux_offset = plane_state->color_plane[ccs_plane].offset; + int alignment = intel_surf_alignment(fb, uv_plane); + + if (offset > aux_offset) + offset = intel_plane_adjust_aligned_offset(&x, &y, + plane_state, + uv_plane, + offset, + aux_offset & ~(alignment - 1)); + + while (!skl_check_main_ccs_coordinates(plane_state, x, y, + offset, ccs_plane)) { + if (offset == 0) + break; + + offset = intel_plane_adjust_aligned_offset(&x, &y, + plane_state, + uv_plane, + offset, offset - alignment); + } + + if (x != plane_state->color_plane[ccs_plane].x || + y != plane_state->color_plane[ccs_plane].y) { + DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n"); + return -EINVAL; + } + } + + plane_state->color_plane[uv_plane].offset = offset; + plane_state->color_plane[uv_plane].x = x; + plane_state->color_plane[uv_plane].y = y; return 0; } @@ -3835,21 +3939,40 @@ static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state) const struct drm_framebuffer *fb = plane_state->hw.fb; int src_x = plane_state->uapi.src.x1 >> 16; int src_y = plane_state->uapi.src.y1 >> 16; - int hsub; - int vsub; - int x; - int y; u32 offset; + int ccs_plane; + + for (ccs_plane = 0; ccs_plane < fb->format->num_planes; ccs_plane++) { + int main_hsub, main_vsub; + int hsub, vsub; + int x, y; - intel_fb_plane_get_subsampling(&hsub, &vsub, fb, 1); - x = src_x / hsub; - y = src_y / vsub; - intel_add_fb_offsets(&x, &y, plane_state, 1); - offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1); + if (!is_ccs_plane(fb, ccs_plane)) + continue; + + intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, fb, + ccs_to_main_plane(fb, ccs_plane)); + intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane); - plane_state->color_plane[1].offset = offset; - plane_state->color_plane[1].x = x * hsub + src_x % hsub; - plane_state->color_plane[1].y = y * vsub + src_y % vsub; + hsub *= main_hsub; + vsub *= main_vsub; + x = src_x / hsub; + y = src_y / vsub; + + intel_add_fb_offsets(&x, &y, plane_state, ccs_plane); + + offset = intel_plane_compute_aligned_offset(&x, &y, + plane_state, + ccs_plane); + + plane_state->color_plane[ccs_plane].offset = offset; + plane_state->color_plane[ccs_plane].x = (x * hsub + + src_x % hsub) / + main_hsub; + plane_state->color_plane[ccs_plane].y = (y * vsub + + src_y % vsub) / + main_vsub; + } return 0; } @@ -3858,6 +3981,7 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state) { const struct drm_framebuffer *fb = plane_state->hw.fb; int ret; + bool needs_aux = false; ret = intel_plane_compute_gtt(plane_state); if (ret) @@ -3867,22 +3991,32 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state) return 0; /* - * Handle the AUX surface first since - * the main surface setup depends on it. + * Handle the AUX surface first since the main surface setup depends on + * it. */ + if (is_ccs_modifier(fb->modifier)) { + needs_aux = true; + ret = skl_check_ccs_aux_surface(plane_state); + if (ret) + return ret; + } + if (intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) { + needs_aux = true; ret = skl_check_nv12_aux_surface(plane_state); if (ret) return ret; - } else if (is_ccs_modifier(fb->modifier)) { - ret = skl_check_ccs_aux_surface(plane_state); - if (ret) - return ret; - } else { - plane_state->color_plane[1].offset = ~0xfff; - plane_state->color_plane[1].x = 0; - plane_state->color_plane[1].y = 0; + } + + if (!needs_aux) { + int i; + + for (i = 1; i < fb->format->num_planes; i++) { + plane_state->color_plane[i].offset = ~0xfff; + plane_state->color_plane[i].x = 0; + plane_state->color_plane[i].y = 0; + } } ret = skl_check_main_surface(plane_state); @@ -4472,6 +4606,8 @@ static u32 skl_plane_ctl_tiling(u64 fb_modifier) return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE | PLANE_CTL_CLEAR_COLOR_DISABLE; + case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: + return PLANE_CTL_TILED_Y | PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE; case I915_FORMAT_MOD_Yf_TILED: return PLANE_CTL_TILED_YF; case I915_FORMAT_MOD_Yf_TILED_CCS: @@ -4869,8 +5005,8 @@ static void intel_fdi_normal_train(struct intel_crtc *crtc) } /* The FDI link training functions for ILK/Ibexpeak. */ -static void ironlake_fdi_link_train(struct intel_crtc *crtc, - const struct intel_crtc_state *crtc_state) +static void ilk_fdi_link_train(struct intel_crtc *crtc, + const struct intel_crtc_state *crtc_state) { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); @@ -5222,7 +5358,7 @@ train_done: DRM_DEBUG_KMS("FDI train done.\n"); } -static void ironlake_fdi_pll_enable(const struct intel_crtc_state *crtc_state) +static void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); @@ -5259,7 +5395,7 @@ static void ironlake_fdi_pll_enable(const struct intel_crtc_state *crtc_state) } } -static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc) +static void ilk_fdi_pll_disable(struct intel_crtc *intel_crtc) { struct drm_device *dev = intel_crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); @@ -5289,7 +5425,7 @@ static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc) udelay(100); } -static void ironlake_fdi_disable(struct intel_crtc *crtc) +static void ilk_fdi_disable(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; @@ -5496,8 +5632,8 @@ int lpt_get_iclkip(struct drm_i915_private *dev_priv) desired_divisor << auxdiv); } -static void ironlake_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state, - enum pipe pch_transcoder) +static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state, + enum pipe pch_transcoder) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); @@ -5540,7 +5676,7 @@ static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool e POSTING_READ(SOUTH_CHICKEN1); } -static void ivybridge_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state) +static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); @@ -5601,8 +5737,8 @@ intel_get_crtc_new_encoder(const struct intel_atomic_state *state, * - DP transcoding bits * - transcoder */ -static void ironlake_pch_enable(const struct intel_atomic_state *state, - const struct intel_crtc_state *crtc_state) +static void ilk_pch_enable(const struct intel_atomic_state *state, + const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_device *dev = crtc->base.dev; @@ -5613,7 +5749,7 @@ static void ironlake_pch_enable(const struct intel_atomic_state *state, assert_pch_transcoder_disabled(dev_priv, pipe); if (IS_IVYBRIDGE(dev_priv)) - ivybridge_update_fdi_bc_bifurcation(crtc_state); + ivb_update_fdi_bc_bifurcation(crtc_state); /* Write the TU size bits before fdi link training, so that error * detection works. */ @@ -5650,7 +5786,7 @@ static void ironlake_pch_enable(const struct intel_atomic_state *state, /* set transcoder timing, panel must allow it */ assert_panel_unlocked(dev_priv, pipe); - ironlake_pch_transcoder_set_timings(crtc_state, pipe); + ilk_pch_transcoder_set_timings(crtc_state, pipe); intel_fdi_normal_train(crtc); @@ -5682,7 +5818,7 @@ static void ironlake_pch_enable(const struct intel_atomic_state *state, I915_WRITE(reg, temp); } - ironlake_enable_pch_transcoder(crtc_state); + ilk_enable_pch_transcoder(crtc_state); } static void lpt_pch_enable(const struct intel_atomic_state *state, @@ -5697,7 +5833,7 @@ static void lpt_pch_enable(const struct intel_atomic_state *state, lpt_program_iclkip(crtc_state); /* Set transcoder timing. */ - ironlake_pch_transcoder_set_timings(crtc_state, PIPE_A); + ilk_pch_transcoder_set_timings(crtc_state, PIPE_A); lpt_enable_pch_transcoder(dev_priv, cpu_transcoder); } @@ -6001,7 +6137,7 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, return 0; } -void skylake_scaler_disable(const struct intel_crtc_state *old_crtc_state) +void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state) { struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); int i; @@ -6010,7 +6146,7 @@ void skylake_scaler_disable(const struct intel_crtc_state *old_crtc_state) skl_detach_scaler(crtc, i); } -static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state) +static void skl_pfit_enable(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); @@ -6047,7 +6183,7 @@ static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state) } } -static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state) +static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); @@ -6411,45 +6547,29 @@ intel_connector_primary_encoder(struct intel_connector *connector) if (connector->mst_port) return &dp_to_dig_port(connector->mst_port)->base; - encoder = intel_attached_encoder(&connector->base); + encoder = intel_attached_encoder(connector); WARN_ON(!encoder); return encoder; } -static bool -intel_connector_needs_modeset(struct intel_atomic_state *state, - const struct drm_connector_state *old_conn_state, - const struct drm_connector_state *new_conn_state) -{ - struct intel_crtc *old_crtc = old_conn_state->crtc ? - to_intel_crtc(old_conn_state->crtc) : NULL; - struct intel_crtc *new_crtc = new_conn_state->crtc ? - to_intel_crtc(new_conn_state->crtc) : NULL; - - return new_crtc != old_crtc || - (new_crtc && - needs_modeset(intel_atomic_get_new_crtc_state(state, new_crtc))); -} - static void intel_encoders_update_prepare(struct intel_atomic_state *state) { - struct drm_connector_state *old_conn_state; struct drm_connector_state *new_conn_state; - struct drm_connector *conn; + struct drm_connector *connector; int i; - for_each_oldnew_connector_in_state(&state->base, conn, - old_conn_state, new_conn_state, i) { + for_each_new_connector_in_state(&state->base, connector, new_conn_state, + i) { + struct intel_connector *intel_connector; struct intel_encoder *encoder; struct intel_crtc *crtc; - if (!intel_connector_needs_modeset(state, - old_conn_state, - new_conn_state)) + if (!intel_connector_needs_modeset(state, connector)) continue; - encoder = intel_connector_primary_encoder(to_intel_connector(conn)); + intel_connector = to_intel_connector(connector); + encoder = intel_connector_primary_encoder(intel_connector); if (!encoder->update_prepare) continue; @@ -6461,22 +6581,21 @@ static void intel_encoders_update_prepare(struct intel_atomic_state *state) static void intel_encoders_update_complete(struct intel_atomic_state *state) { - struct drm_connector_state *old_conn_state; struct drm_connector_state *new_conn_state; - struct drm_connector *conn; + struct drm_connector *connector; int i; - for_each_oldnew_connector_in_state(&state->base, conn, - old_conn_state, new_conn_state, i) { + for_each_new_connector_in_state(&state->base, connector, new_conn_state, + i) { + struct intel_connector *intel_connector; struct intel_encoder *encoder; struct intel_crtc *crtc; - if (!intel_connector_needs_modeset(state, - old_conn_state, - new_conn_state)) + if (!intel_connector_needs_modeset(state, connector)) continue; - encoder = intel_connector_primary_encoder(to_intel_connector(conn)); + intel_connector = to_intel_connector(connector); + encoder = intel_connector_primary_encoder(intel_connector); if (!encoder->update_complete) continue; @@ -6643,8 +6762,8 @@ static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_stat plane->disable_plane(plane, crtc_state); } -static void ironlake_crtc_enable(struct intel_atomic_state *state, - struct intel_crtc *crtc) +static void ilk_crtc_enable(struct intel_atomic_state *state, + struct intel_crtc *crtc) { const struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); @@ -6680,7 +6799,7 @@ static void ironlake_crtc_enable(struct intel_atomic_state *state, intel_cpu_transcoder_set_m_n(new_crtc_state, &new_crtc_state->fdi_m_n, NULL); - ironlake_set_pipeconf(new_crtc_state); + ilk_set_pipeconf(new_crtc_state); crtc->active = true; @@ -6690,13 +6809,13 @@ static void ironlake_crtc_enable(struct intel_atomic_state *state, /* Note: FDI PLL enabling _must_ be done before we enable the * cpu pipes, hence this is separate from all the other fdi/pch * enabling. */ - ironlake_fdi_pll_enable(new_crtc_state); + ilk_fdi_pll_enable(new_crtc_state); } else { assert_fdi_tx_disabled(dev_priv, pipe); assert_fdi_rx_disabled(dev_priv, pipe); } - ironlake_pfit_enable(new_crtc_state); + ilk_pfit_enable(new_crtc_state); /* * On ILK+ LUT must be loaded before the pipe is running but with @@ -6712,7 +6831,7 @@ static void ironlake_crtc_enable(struct intel_atomic_state *state, intel_enable_pipe(new_crtc_state); if (new_crtc_state->has_pch_encoder) - ironlake_pch_enable(state, new_crtc_state); + ilk_pch_enable(state, new_crtc_state); intel_crtc_vblank_on(new_crtc_state); @@ -6787,8 +6906,8 @@ static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state) I915_WRITE(reg, val); } -static void haswell_crtc_enable(struct intel_atomic_state *state, - struct intel_crtc *crtc) +static void hsw_crtc_enable(struct intel_atomic_state *state, + struct intel_crtc *crtc) { const struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); @@ -6829,7 +6948,7 @@ static void haswell_crtc_enable(struct intel_atomic_state *state, if (!transcoder_is_dsi(cpu_transcoder)) { hsw_set_frame_start_delay(new_crtc_state); - haswell_set_pipeconf(new_crtc_state); + hsw_set_pipeconf(new_crtc_state); } if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) @@ -6844,9 +6963,9 @@ static void haswell_crtc_enable(struct intel_atomic_state *state, glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true); if (INTEL_GEN(dev_priv) >= 9) - skylake_pfit_enable(new_crtc_state); + skl_pfit_enable(new_crtc_state); else - ironlake_pfit_enable(new_crtc_state); + ilk_pfit_enable(new_crtc_state); /* * On ILK+ LUT must be loaded before the pipe is running but with @@ -6895,7 +7014,7 @@ static void haswell_crtc_enable(struct intel_atomic_state *state, } } -void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state) +void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state) { struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); @@ -6910,8 +7029,8 @@ void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state) } } -static void ironlake_crtc_disable(struct intel_atomic_state *state, - struct intel_crtc *crtc) +static void ilk_crtc_disable(struct intel_atomic_state *state, + struct intel_crtc *crtc) { const struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); @@ -6932,15 +7051,15 @@ static void ironlake_crtc_disable(struct intel_atomic_state *state, intel_disable_pipe(old_crtc_state); - ironlake_pfit_disable(old_crtc_state); + ilk_pfit_disable(old_crtc_state); if (old_crtc_state->has_pch_encoder) - ironlake_fdi_disable(crtc); + ilk_fdi_disable(crtc); intel_encoders_post_disable(state, crtc); if (old_crtc_state->has_pch_encoder) { - ironlake_disable_pch_transcoder(dev_priv, pipe); + ilk_disable_pch_transcoder(dev_priv, pipe); if (HAS_PCH_CPT(dev_priv)) { i915_reg_t reg; @@ -6960,15 +7079,15 @@ static void ironlake_crtc_disable(struct intel_atomic_state *state, I915_WRITE(PCH_DPLL_SEL, temp); } - ironlake_fdi_pll_disable(crtc); + ilk_fdi_pll_disable(crtc); } intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); } -static void haswell_crtc_disable(struct intel_atomic_state *state, - struct intel_crtc *crtc) +static void hsw_crtc_disable(struct intel_atomic_state *state, + struct intel_crtc *crtc) { /* * FIXME collapse everything to one hook. @@ -7505,8 +7624,8 @@ static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state) return 0; } -static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe, - struct intel_crtc_state *pipe_config) +static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe, + struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(dev); struct drm_atomic_state *state = pipe_config->uapi.state; @@ -7578,8 +7697,8 @@ static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe, } #define RETRY 1 -static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc, - struct intel_crtc_state *pipe_config) +static int ilk_fdi_compute_config(struct intel_crtc *intel_crtc, + struct intel_crtc_state *pipe_config) { struct drm_device *dev = intel_crtc->base.dev; const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; @@ -7598,15 +7717,15 @@ retry: fdi_dotclock = adjusted_mode->crtc_clock; - lane = ironlake_get_lanes_required(fdi_dotclock, link_bw, - pipe_config->pipe_bpp); + lane = ilk_get_lanes_required(fdi_dotclock, link_bw, + pipe_config->pipe_bpp); pipe_config->fdi_lanes = lane; intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock, link_bw, &pipe_config->fdi_m_n, false, false); - ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config); + ret = ilk_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config); if (ret == -EDEADLK) return ret; @@ -7812,7 +7931,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc, intel_crtc_compute_pixel_rate(pipe_config); if (pipe_config->has_pch_encoder) - return ironlake_fdi_compute_config(crtc, pipe_config); + return ilk_fdi_compute_config(crtc, pipe_config); return 0; } @@ -8795,9 +8914,9 @@ static int pnv_crtc_compute_clock(struct intel_crtc *crtc, DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); } - limit = &intel_limits_pineview_lvds; + limit = &pnv_limits_lvds; } else { - limit = &intel_limits_pineview_sdvo; + limit = &pnv_limits_sdvo; } if (!crtc_state->clock_set && @@ -9224,7 +9343,7 @@ out: return ret; } -static void ironlake_init_pch_refclk(struct drm_i915_private *dev_priv) +static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv) { struct intel_encoder *encoder; int i; @@ -9722,12 +9841,12 @@ static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv) void intel_init_pch_refclk(struct drm_i915_private *dev_priv) { if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) - ironlake_init_pch_refclk(dev_priv); + ilk_init_pch_refclk(dev_priv); else if (HAS_PCH_LPT(dev_priv)) lpt_init_pch_refclk(dev_priv); } -static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state) +static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); @@ -9783,7 +9902,7 @@ static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state) POSTING_READ(PIPECONF(pipe)); } -static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state) +static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); @@ -9871,7 +9990,7 @@ int bdw_get_pipemisc_bpp(struct intel_crtc *crtc) } } -int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp) +int ilk_get_lanes_required(int target_clock, int link_bw, int bpp) { /* * Account for spread spectrum to avoid @@ -9882,14 +10001,14 @@ int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp) return DIV_ROUND_UP(bps, link_bw * 8); } -static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor) +static bool ilk_needs_fb_cb_tune(struct dpll *dpll, int factor) { return i9xx_dpll_compute_m(dpll) < factor * dpll->n; } -static void ironlake_compute_dpll(struct intel_crtc *crtc, - struct intel_crtc_state *crtc_state, - struct dpll *reduced_clock) +static void ilk_compute_dpll(struct intel_crtc *crtc, + struct intel_crtc_state *crtc_state, + struct dpll *reduced_clock) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); u32 dpll, fp, fp2; @@ -9909,7 +10028,7 @@ static void ironlake_compute_dpll(struct intel_crtc *crtc, fp = i9xx_dpll_compute_fp(&crtc_state->dpll); - if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor)) + if (ilk_needs_fb_cb_tune(&crtc_state->dpll, factor)) fp |= FP_CB_TUNE; if (reduced_clock) { @@ -9989,8 +10108,8 @@ static void ironlake_compute_dpll(struct intel_crtc *crtc, crtc_state->dpll_hw_state.fp1 = fp2; } -static int ironlake_crtc_compute_clock(struct intel_crtc *crtc, - struct intel_crtc_state *crtc_state) +static int ilk_crtc_compute_clock(struct intel_crtc *crtc, + struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_atomic_state *state = @@ -10014,17 +10133,17 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc, if (intel_is_dual_link_lvds(dev_priv)) { if (refclk == 100000) - limit = &intel_limits_ironlake_dual_lvds_100m; + limit = &ilk_limits_dual_lvds_100m; else - limit = &intel_limits_ironlake_dual_lvds; + limit = &ilk_limits_dual_lvds; } else { if (refclk == 100000) - limit = &intel_limits_ironlake_single_lvds_100m; + limit = &ilk_limits_single_lvds_100m; else - limit = &intel_limits_ironlake_single_lvds; + limit = &ilk_limits_single_lvds; } } else { - limit = &intel_limits_ironlake_dac; + limit = &ilk_limits_dac; } if (!crtc_state->clock_set && @@ -10034,7 +10153,7 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc, return -EINVAL; } - ironlake_compute_dpll(crtc, crtc_state, NULL); + ilk_compute_dpll(crtc, crtc_state, NULL); if (!intel_reserve_shared_dplls(state, crtc, NULL)) { DRM_DEBUG_KMS("failed to find PLL for pipe %c\n", @@ -10109,15 +10228,15 @@ void intel_dp_get_m_n(struct intel_crtc *crtc, &pipe_config->dp_m2_n2); } -static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc, - struct intel_crtc_state *pipe_config) +static void ilk_get_fdi_m_n_config(struct intel_crtc *crtc, + struct intel_crtc_state *pipe_config) { intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder, &pipe_config->fdi_m_n, NULL); } -static void skylake_get_pfit_config(struct intel_crtc *crtc, - struct intel_crtc_state *pipe_config) +static void skl_get_pfit_config(struct intel_crtc *crtc, + struct intel_crtc_state *pipe_config) { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); @@ -10148,8 +10267,8 @@ static void skylake_get_pfit_config(struct intel_crtc *crtc, } static void -skylake_get_initial_plane_config(struct intel_crtc *crtc, - struct intel_initial_plane_config *plane_config) +skl_get_initial_plane_config(struct intel_crtc *crtc, + struct intel_initial_plane_config *plane_config) { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); @@ -10210,6 +10329,8 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc, fb->modifier = INTEL_GEN(dev_priv) >= 12 ? I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS : I915_FORMAT_MOD_Y_TILED_CCS; + else if (val & PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE) + fb->modifier = I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS; else fb->modifier = I915_FORMAT_MOD_Y_TILED; break; @@ -10276,8 +10397,8 @@ error: kfree(intel_fb); } -static void ironlake_get_pfit_config(struct intel_crtc *crtc, - struct intel_crtc_state *pipe_config) +static void ilk_get_pfit_config(struct intel_crtc *crtc, + struct intel_crtc_state *pipe_config) { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); @@ -10300,8 +10421,8 @@ static void ironlake_get_pfit_config(struct intel_crtc *crtc, } } -static bool ironlake_get_pipe_config(struct intel_crtc *crtc, - struct intel_crtc_state *pipe_config) +static bool ilk_get_pipe_config(struct intel_crtc *crtc, + struct intel_crtc_state *pipe_config) { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); @@ -10372,7 +10493,7 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc, pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> FDI_DP_PORT_WIDTH_SHIFT) + 1; - ironlake_get_fdi_m_n_config(crtc, pipe_config); + ilk_get_fdi_m_n_config(crtc, pipe_config); if (HAS_PCH_IBX(dev_priv)) { /* @@ -10400,7 +10521,7 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc, ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK) >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1; - ironlake_pch_clock_get(crtc, pipe_config); + ilk_pch_clock_get(crtc, pipe_config); } else { pipe_config->pixel_multiplier = 1; } @@ -10408,7 +10529,7 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc, intel_get_pipe_timings(crtc, pipe_config); intel_get_pipe_src_size(crtc, pipe_config); - ironlake_get_pfit_config(crtc, pipe_config); + ilk_get_pfit_config(crtc, pipe_config); ret = true; @@ -10417,8 +10538,9 @@ out: return ret; } -static int haswell_crtc_compute_clock(struct intel_crtc *crtc, - struct intel_crtc_state *crtc_state) + +static int hsw_crtc_compute_clock(struct intel_crtc *crtc, + struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_atomic_state *state = @@ -10439,9 +10561,8 @@ static int haswell_crtc_compute_clock(struct intel_crtc *crtc, return 0; } -static void cannonlake_get_ddi_pll(struct drm_i915_private *dev_priv, - enum port port, - struct intel_crtc_state *pipe_config) +static void cnl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port, + struct intel_crtc_state *pipe_config) { enum intel_dpll_id id; u32 temp; @@ -10455,9 +10576,8 @@ static void cannonlake_get_ddi_pll(struct drm_i915_private *dev_priv, pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); } -static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv, - enum port port, - struct intel_crtc_state *pipe_config) +static void icl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port, + struct intel_crtc_state *pipe_config) { enum phy phy = intel_port_to_phy(dev_priv, port); enum icl_port_dpll_id port_dpll_id; @@ -10516,9 +10636,8 @@ static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv, pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); } -static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv, - enum port port, - struct intel_crtc_state *pipe_config) +static void skl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port, + struct intel_crtc_state *pipe_config) { enum intel_dpll_id id; u32 temp; @@ -10532,9 +10651,8 @@ static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv, pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); } -static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv, - enum port port, - struct intel_crtc_state *pipe_config) +static void hsw_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port, + struct intel_crtc_state *pipe_config) { enum intel_dpll_id id; u32 ddi_pll_sel = I915_READ(PORT_CLK_SEL(port)); @@ -10722,8 +10840,8 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc, return transcoder_is_dsi(pipe_config->cpu_transcoder); } -static void haswell_get_ddi_port_state(struct intel_crtc *crtc, - struct intel_crtc_state *pipe_config) +static void hsw_get_ddi_port_state(struct intel_crtc *crtc, + struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; @@ -10743,15 +10861,15 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc, } if (INTEL_GEN(dev_priv) >= 11) - icelake_get_ddi_pll(dev_priv, port, pipe_config); + icl_get_ddi_pll(dev_priv, port, pipe_config); else if (IS_CANNONLAKE(dev_priv)) - cannonlake_get_ddi_pll(dev_priv, port, pipe_config); + cnl_get_ddi_pll(dev_priv, port, pipe_config); else if (IS_GEN9_BC(dev_priv)) - skylake_get_ddi_pll(dev_priv, port, pipe_config); + skl_get_ddi_pll(dev_priv, port, pipe_config); else if (IS_GEN9_LP(dev_priv)) bxt_get_ddi_pll(dev_priv, port, pipe_config); else - haswell_get_ddi_pll(dev_priv, port, pipe_config); + hsw_get_ddi_pll(dev_priv, port, pipe_config); pll = pipe_config->shared_dpll; if (pll) { @@ -10772,7 +10890,7 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc, pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> FDI_DP_PORT_WIDTH_SHIFT) + 1; - ironlake_get_fdi_m_n_config(crtc, pipe_config); + ilk_get_fdi_m_n_config(crtc, pipe_config); } } @@ -10794,7 +10912,7 @@ static enum transcoder transcoder_master_readout(struct drm_i915_private *dev_pr return master_select - 1; } -static void icelake_get_trans_port_sync_config(struct intel_crtc_state *crtc_state) +static void icl_get_trans_port_sync_config(struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); u32 transcoders; @@ -10829,8 +10947,8 @@ static void icelake_get_trans_port_sync_config(struct intel_crtc_state *crtc_sta crtc_state->sync_mode_slaves_mask); } -static bool haswell_get_pipe_config(struct intel_crtc *crtc, - struct intel_crtc_state *pipe_config) +static bool hsw_get_pipe_config(struct intel_crtc *crtc, + struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); intel_wakeref_t wakerefs[POWER_DOMAIN_NUM], wf; @@ -10865,7 +10983,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, if (!transcoder_is_dsi(pipe_config->cpu_transcoder) || INTEL_GEN(dev_priv) >= 11) { - haswell_get_ddi_port_state(crtc, pipe_config); + hsw_get_ddi_port_state(crtc, pipe_config); intel_get_pipe_timings(crtc, pipe_config); } @@ -10922,9 +11040,9 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, power_domain_mask |= BIT_ULL(power_domain); if (INTEL_GEN(dev_priv) >= 9) - skylake_get_pfit_config(crtc, pipe_config); + skl_get_pfit_config(crtc, pipe_config); else - ironlake_get_pfit_config(crtc, pipe_config); + ilk_get_pfit_config(crtc, pipe_config); } if (hsw_crtc_supports_ips(crtc)) { @@ -10950,7 +11068,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, if (INTEL_GEN(dev_priv) >= 11 && !transcoder_is_dsi(pipe_config->cpu_transcoder)) - icelake_get_trans_port_sync_config(pipe_config); + icl_get_trans_port_sync_config(pipe_config); out: for_each_power_domain(power_domain, power_domain_mask) @@ -11570,7 +11688,7 @@ int intel_get_load_detect_pipe(struct drm_connector *connector, { struct intel_crtc *intel_crtc; struct intel_encoder *intel_encoder = - intel_attached_encoder(connector); + intel_attached_encoder(to_intel_connector(connector)); struct drm_crtc *possible_crtc; struct drm_encoder *encoder = &intel_encoder->base; struct drm_crtc *crtc = NULL; @@ -11724,7 +11842,7 @@ void intel_release_load_detect_pipe(struct drm_connector *connector, struct drm_modeset_acquire_ctx *ctx) { struct intel_encoder *intel_encoder = - intel_attached_encoder(connector); + intel_attached_encoder(to_intel_connector(connector)); struct drm_encoder *encoder = &intel_encoder->base; struct drm_atomic_state *state = old->restore_state; int ret; @@ -11867,8 +11985,8 @@ int intel_dotclock_calculate(int link_freq, return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n); } -static void ironlake_pch_clock_get(struct intel_crtc *crtc, - struct intel_crtc_state *pipe_config) +static void ilk_pch_clock_get(struct intel_crtc *crtc, + struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); @@ -11897,6 +12015,7 @@ static void intel_crtc_state_reset(struct intel_crtc_state *crtc_state, crtc_state->hsw_workaround_pipe = INVALID_PIPE; crtc_state->output_format = INTEL_OUTPUT_FORMAT_INVALID; crtc_state->scaler_state.scaler_id = -1; + crtc_state->mst_master_transcoder = INVALID_TRANSCODER; } static struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc) @@ -12278,88 +12397,121 @@ static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state) return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes; } -static int icl_add_sync_mode_crtcs(struct intel_crtc_state *crtc_state) +static bool +intel_atomic_is_master_connector(struct intel_crtc_state *crtc_state) +{ + struct drm_crtc *crtc = crtc_state->uapi.crtc; + struct drm_atomic_state *state = crtc_state->uapi.state; + struct drm_connector *connector; + struct drm_connector_state *connector_state; + int i; + + for_each_new_connector_in_state(state, connector, connector_state, i) { + if (connector_state->crtc != crtc) + continue; + if (connector->has_tile && + connector->tile_h_loc == connector->num_h_tile - 1 && + connector->tile_v_loc == connector->num_v_tile - 1) + return true; + } + + return false; +} + +static void reset_port_sync_mode_state(struct intel_crtc_state *crtc_state) +{ + crtc_state->master_transcoder = INVALID_TRANSCODER; + crtc_state->sync_mode_slaves_mask = 0; +} + +static int icl_compute_port_sync_crtc_state(struct drm_connector *connector, + struct intel_crtc_state *crtc_state, + int num_tiled_conns) { struct drm_crtc *crtc = crtc_state->uapi.crtc; struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state); struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); - struct drm_connector *master_connector, *connector; - struct drm_connector_state *connector_state; + struct drm_connector *master_connector; struct drm_connector_list_iter conn_iter; struct drm_crtc *master_crtc = NULL; struct drm_crtc_state *master_crtc_state; struct intel_crtc_state *master_pipe_config; - int i, tile_group_id; if (INTEL_GEN(dev_priv) < 11) return 0; + if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP)) + return 0; + /* * In case of tiled displays there could be one or more slaves but there is * only one master. Lets make the CRTC used by the connector corresponding * to the last horizonal and last vertical tile a master/genlock CRTC. * All the other CRTCs corresponding to other tiles of the same Tile group * are the slave CRTCs and hold a pointer to their genlock CRTC. + * If all tiles not present do not make master slave assignments. */ - for_each_new_connector_in_state(&state->base, connector, connector_state, i) { - if (connector_state->crtc != crtc) - continue; - if (!connector->has_tile) + if (!connector->has_tile || + crtc_state->hw.mode.hdisplay != connector->tile_h_size || + crtc_state->hw.mode.vdisplay != connector->tile_v_size || + num_tiled_conns < connector->num_h_tile * connector->num_v_tile) { + reset_port_sync_mode_state(crtc_state); + return 0; + } + /* Last Horizontal and last vertical tile connector is a master + * Master's crtc state is already populated in slave for port sync + */ + if (connector->tile_h_loc == connector->num_h_tile - 1 && + connector->tile_v_loc == connector->num_v_tile - 1) + return 0; + + /* Loop through all connectors and configure the Slave crtc_state + * to point to the correct master. + */ + drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); + drm_for_each_connector_iter(master_connector, &conn_iter) { + struct drm_connector_state *master_conn_state = NULL; + + if (!(master_connector->has_tile && + master_connector->tile_group->id == connector->tile_group->id)) continue; - if (crtc_state->hw.mode.hdisplay != connector->tile_h_size || - crtc_state->hw.mode.vdisplay != connector->tile_v_size) - return 0; - if (connector->tile_h_loc == connector->num_h_tile - 1 && - connector->tile_v_loc == connector->num_v_tile - 1) + if (master_connector->tile_h_loc != master_connector->num_h_tile - 1 || + master_connector->tile_v_loc != master_connector->num_v_tile - 1) continue; - crtc_state->sync_mode_slaves_mask = 0; - tile_group_id = connector->tile_group->id; - drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); - drm_for_each_connector_iter(master_connector, &conn_iter) { - struct drm_connector_state *master_conn_state = NULL; - - if (!master_connector->has_tile) - continue; - if (master_connector->tile_h_loc != master_connector->num_h_tile - 1 || - master_connector->tile_v_loc != master_connector->num_v_tile - 1) - continue; - if (master_connector->tile_group->id != tile_group_id) - continue; - master_conn_state = drm_atomic_get_connector_state(&state->base, - master_connector); - if (IS_ERR(master_conn_state)) { - drm_connector_list_iter_end(&conn_iter); - return PTR_ERR(master_conn_state); - } - if (master_conn_state->crtc) { - master_crtc = master_conn_state->crtc; - break; - } + master_conn_state = drm_atomic_get_connector_state(&state->base, + master_connector); + if (IS_ERR(master_conn_state)) { + drm_connector_list_iter_end(&conn_iter); + return PTR_ERR(master_conn_state); } - drm_connector_list_iter_end(&conn_iter); - - if (!master_crtc) { - DRM_DEBUG_KMS("Could not find Master CRTC for Slave CRTC %d\n", - connector_state->crtc->base.id); - return -EINVAL; + if (master_conn_state->crtc) { + master_crtc = master_conn_state->crtc; + break; } + } + drm_connector_list_iter_end(&conn_iter); - master_crtc_state = drm_atomic_get_crtc_state(&state->base, - master_crtc); - if (IS_ERR(master_crtc_state)) - return PTR_ERR(master_crtc_state); - - master_pipe_config = to_intel_crtc_state(master_crtc_state); - crtc_state->master_transcoder = master_pipe_config->cpu_transcoder; - master_pipe_config->sync_mode_slaves_mask |= - BIT(crtc_state->cpu_transcoder); - DRM_DEBUG_KMS("Master Transcoder = %s added for Slave CRTC = %d, slave transcoder bitmask = %d\n", - transcoder_name(crtc_state->master_transcoder), - crtc_state->uapi.crtc->base.id, - master_pipe_config->sync_mode_slaves_mask); + if (!master_crtc) { + DRM_DEBUG_KMS("Could not find Master CRTC for Slave CRTC %d\n", + crtc->base.id); + return -EINVAL; } + master_crtc_state = drm_atomic_get_crtc_state(&state->base, + master_crtc); + if (IS_ERR(master_crtc_state)) + return PTR_ERR(master_crtc_state); + + master_pipe_config = to_intel_crtc_state(master_crtc_state); + crtc_state->master_transcoder = master_pipe_config->cpu_transcoder; + master_pipe_config->sync_mode_slaves_mask |= + BIT(crtc_state->cpu_transcoder); + DRM_DEBUG_KMS("Master Transcoder = %s added for Slave CRTC = %d, slave transcoder bitmask = %d\n", + transcoder_name(crtc_state->master_transcoder), + crtc->base.id, + master_pipe_config->sync_mode_slaves_mask); + return 0; } @@ -12755,6 +12907,9 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config, pipe_config->csc_mode, pipe_config->gamma_mode, pipe_config->gamma_enable, pipe_config->csc_enable); + DRM_DEBUG_KMS("MST master transcoder: %s\n", + transcoder_name(pipe_config->mst_master_transcoder)); + dump_planes: if (!state) return; @@ -12901,9 +13056,11 @@ intel_crtc_prepare_cleared_state(struct intel_crtc_state *crtc_state) saved_state->wm = crtc_state->wm; /* * Save the slave bitmask which gets filled for master crtc state during - * slave atomic check call. + * slave atomic check call. For all other CRTCs reset the port sync variables + * crtc_state->master_transcoder needs to be set to INVALID */ - if (is_trans_port_sync_master(crtc_state)) + reset_port_sync_mode_state(saved_state); + if (intel_atomic_is_master_connector(crtc_state)) saved_state->sync_mode_slaves_mask = crtc_state->sync_mode_slaves_mask; @@ -12924,7 +13081,7 @@ intel_modeset_pipe_config(struct intel_crtc_state *pipe_config) struct drm_connector *connector; struct drm_connector_state *connector_state; int base_bpp, ret; - int i; + int i, tile_group_id = -1, num_tiled_conns = 0; bool retry = true; pipe_config->cpu_transcoder = @@ -12994,13 +13151,22 @@ encoder_retry: drm_mode_set_crtcinfo(&pipe_config->hw.adjusted_mode, CRTC_STEREO_DOUBLE); - /* Set the crtc_state defaults for trans_port_sync */ - pipe_config->master_transcoder = INVALID_TRANSCODER; - ret = icl_add_sync_mode_crtcs(pipe_config); - if (ret) { - DRM_DEBUG_KMS("Cannot assign Sync Mode CRTCs: %d\n", - ret); - return ret; + /* Get tile_group_id of tiled connector */ + for_each_new_connector_in_state(state, connector, connector_state, i) { + if (connector_state->crtc == crtc && + connector->has_tile) { + tile_group_id = connector->tile_group->id; + break; + } + } + + /* Get total number of tiled connectors in state that belong to + * this tile group. + */ + for_each_new_connector_in_state(state, connector, connector_state, i) { + if (connector->has_tile && + connector->tile_group->id == tile_group_id) + num_tiled_conns++; } /* Pass our mode to the connectors and the CRTC to give them a chance to @@ -13011,6 +13177,14 @@ encoder_retry: if (connector_state->crtc != crtc) continue; + ret = icl_compute_port_sync_crtc_state(connector, pipe_config, + num_tiled_conns); + if (ret) { + DRM_DEBUG_KMS("Cannot assign Sync Mode CRTCs: %d\n", + ret); + return ret; + } + encoder = to_intel_encoder(connector_state->best_encoder); ret = encoder->compute_config(encoder, pipe_config, connector_state); @@ -13535,6 +13709,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, PIPE_CONF_CHECK_I(dsc.dsc_split); PIPE_CONF_CHECK_I(dsc.compressed_bpp); + PIPE_CONF_CHECK_I(mst_master_transcoder); + #undef PIPE_CONF_CHECK_X #undef PIPE_CONF_CHECK_I #undef PIPE_CONF_CHECK_BOOL @@ -14048,7 +14224,7 @@ static void intel_modeset_clear_plls(struct intel_atomic_state *state) * multiple pipes, and planes are enabled after the pipe, we need to wait at * least 2 vblanks on the first pipe before enabling planes on the second pipe. */ -static int haswell_mode_set_planes_workaround(struct intel_atomic_state *state) +static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state) { struct intel_crtc_state *crtc_state; struct intel_crtc *crtc; @@ -14143,7 +14319,7 @@ static int intel_modeset_checks(struct intel_atomic_state *state) intel_modeset_clear_plls(state); if (IS_HASWELL(dev_priv)) - return haswell_mode_set_planes_workaround(state); + return hsw_mode_set_planes_workaround(state); return 0; } @@ -14173,7 +14349,11 @@ static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_sta new_crtc_state->uapi.mode_changed = false; new_crtc_state->update_pipe = true; +} +static void intel_crtc_copy_fastset(const struct intel_crtc_state *old_crtc_state, + struct intel_crtc_state *new_crtc_state) +{ /* * If we're not doing the full modeset we want to * keep the current M/N values as they may be @@ -14296,6 +14476,107 @@ static int intel_atomic_check_crtcs(struct intel_atomic_state *state) return 0; } +static bool intel_cpu_transcoder_needs_modeset(struct intel_atomic_state *state, + enum transcoder transcoder) +{ + struct intel_crtc_state *new_crtc_state; + struct intel_crtc *crtc; + int i; + + for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) + if (new_crtc_state->cpu_transcoder == transcoder) + return needs_modeset(new_crtc_state); + + return false; +} + +static void +intel_modeset_synced_crtcs(struct intel_atomic_state *state, + u8 transcoders) +{ + struct intel_crtc_state *new_crtc_state; + struct intel_crtc *crtc; + int i; + + for_each_new_intel_crtc_in_state(state, crtc, + new_crtc_state, i) { + if (transcoders & BIT(new_crtc_state->cpu_transcoder)) { + new_crtc_state->uapi.mode_changed = true; + new_crtc_state->update_pipe = false; + } + } +} + +static int +intel_modeset_all_tiles(struct intel_atomic_state *state, int tile_grp_id) +{ + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; + int ret = 0; + + drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + struct drm_connector_state *conn_state; + struct drm_crtc_state *crtc_state; + + if (!connector->has_tile || + connector->tile_group->id != tile_grp_id) + continue; + conn_state = drm_atomic_get_connector_state(&state->base, + connector); + if (IS_ERR(conn_state)) { + ret = PTR_ERR(conn_state); + break; + } + + if (!conn_state->crtc) + continue; + + crtc_state = drm_atomic_get_crtc_state(&state->base, + conn_state->crtc); + if (IS_ERR(crtc_state)) { + ret = PTR_ERR(crtc_state); + break; + } + crtc_state->mode_changed = true; + ret = drm_atomic_add_affected_connectors(&state->base, + conn_state->crtc); + if (ret) + break; + } + drm_connector_list_iter_end(&conn_iter); + + return ret; +} + +static int +intel_atomic_check_tiled_conns(struct intel_atomic_state *state) +{ + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct drm_connector *connector; + struct drm_connector_state *old_conn_state, *new_conn_state; + int i, ret; + + if (INTEL_GEN(dev_priv) < 11) + return 0; + + /* Is tiled, mark all other tiled CRTCs as needing a modeset */ + for_each_oldnew_connector_in_state(&state->base, connector, + old_conn_state, new_conn_state, i) { + if (!connector->has_tile) + continue; + if (!intel_connector_needs_modeset(state, connector)) + continue; + + ret = intel_modeset_all_tiles(state, connector->tile_group->id); + if (ret) + return ret; + } + + return 0; +} + /** * intel_atomic_check - validate state object * @dev: drm device @@ -14323,6 +14604,21 @@ static int intel_atomic_check(struct drm_device *dev, if (ret) goto fail; + /** + * This check adds all the connectors in current state that belong to + * the same tile group to a full modeset. + * This function directly sets the mode_changed to true and we also call + * drm_atomic_add_affected_connectors(). Hence we are not explicitly + * calling drm_atomic_helper_check_modeset() after this. + * + * Fixme: Handle some corner cases where one of the + * tiled connectors gets disconnected and tile info is lost but since it + * was previously synced to other conn, we need to add that to the modeset. + */ + ret = intel_atomic_check_tiled_conns(state); + if (ret) + goto fail; + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { if (!needs_modeset(new_crtc_state)) { @@ -14334,8 +14630,6 @@ static int intel_atomic_check(struct drm_device *dev, if (!new_crtc_state->uapi.enable) { intel_crtc_copy_uapi_to_hw_state(new_crtc_state); - - any_ms = true; continue; } @@ -14348,9 +14642,49 @@ static int intel_atomic_check(struct drm_device *dev, goto fail; intel_crtc_check_fastset(old_crtc_state, new_crtc_state); + } + + /** + * Check if fastset is allowed by external dependencies like other + * pipes and transcoders. + * + * Right now it only forces a fullmodeset when the MST master + * transcoder did not changed but the pipe of the master transcoder + * needs a fullmodeset so all slaves also needs to do a fullmodeset or + * in case of port synced crtcs, if one of the synced crtcs + * needs a full modeset, all other synced crtcs should be + * forced a full modeset. + */ + for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { + if (!new_crtc_state->hw.enable || needs_modeset(new_crtc_state)) + continue; + + if (intel_dp_mst_is_slave_trans(new_crtc_state)) { + enum transcoder master = new_crtc_state->mst_master_transcoder; + + if (intel_cpu_transcoder_needs_modeset(state, master)) { + new_crtc_state->uapi.mode_changed = true; + new_crtc_state->update_pipe = false; + } + } else if (is_trans_port_sync_mode(new_crtc_state)) { + u8 trans = new_crtc_state->sync_mode_slaves_mask | + BIT(new_crtc_state->master_transcoder); - if (needs_modeset(new_crtc_state)) + intel_modeset_synced_crtcs(state, trans); + } + } + + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, + new_crtc_state, i) { + if (needs_modeset(new_crtc_state)) { any_ms = true; + continue; + } + + if (!new_crtc_state->update_pipe) + continue; + + intel_crtc_copy_fastset(old_crtc_state, new_crtc_state); } if (any_ms && !check_digital_port_conflicts(state)) { @@ -14472,12 +14806,12 @@ static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state, skl_detach_scalers(new_crtc_state); if (new_crtc_state->pch_pfit.enabled) - skylake_pfit_enable(new_crtc_state); + skl_pfit_enable(new_crtc_state); } else if (HAS_PCH_SPLIT(dev_priv)) { if (new_crtc_state->pch_pfit.enabled) - ironlake_pfit_enable(new_crtc_state); + ilk_pfit_enable(new_crtc_state); else if (old_crtc_state->pch_pfit.enabled) - ironlake_pfit_disable(old_crtc_state); + ilk_pfit_disable(old_crtc_state); } if (INTEL_GEN(dev_priv) >= 11) @@ -14619,7 +14953,7 @@ static void intel_commit_modeset_disables(struct intel_atomic_state *state) u32 handled = 0; int i; - /* Only disable port sync slaves */ + /* Only disable port sync and MST slaves */ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { if (!needs_modeset(new_crtc_state)) @@ -14633,7 +14967,8 @@ static void intel_commit_modeset_disables(struct intel_atomic_state *state) * slave CRTCs are disabled first and then master CRTC since * Slave vblanks are masked till Master Vblanks. */ - if (!is_trans_port_sync_slave(old_crtc_state)) + if (!is_trans_port_sync_slave(old_crtc_state) && + !intel_dp_mst_is_slave_trans(old_crtc_state)) continue; intel_pre_plane_update(state, crtc); @@ -14694,10 +15029,14 @@ static void intel_set_dp_tp_ctl_normal(struct intel_crtc *crtc, if (conn_state->crtc == &crtc->base) break; } - intel_dp = enc_to_intel_dp(&intel_attached_encoder(conn)->base); + intel_dp = enc_to_intel_dp(intel_attached_encoder(to_intel_connector(conn))); intel_dp_stop_link_train(intel_dp); } +/* + * TODO: This is only called from port sync and it is identical to what will be + * executed again in intel_update_crtc() over port sync pipes + */ static void intel_post_crtc_enable_updates(struct intel_crtc *crtc, struct intel_atomic_state *state) { @@ -14786,15 +15125,21 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state) u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices; u8 required_slices = state->wm_results.ddb.enabled_slices; struct skl_ddb_entry entries[I915_MAX_PIPES] = {}; - u8 dirty_pipes = 0; + const u8 num_pipes = INTEL_NUM_PIPES(dev_priv); + u8 update_pipes = 0, modeset_pipes = 0; int i; for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { + if (!new_crtc_state->hw.active) + continue; + /* ignore allocations for crtc's that have been turned off. */ - if (!needs_modeset(new_crtc_state) && new_crtc_state->hw.active) + if (!needs_modeset(new_crtc_state)) { entries[i] = old_crtc_state->wm.skl.ddb; - if (new_crtc_state->hw.active) - dirty_pipes |= BIT(crtc->pipe); + update_pipes |= BIT(crtc->pipe); + } else { + modeset_pipes |= BIT(crtc->pipe); + } } /* If 2nd DBuf slice required, enable it here */ @@ -14804,38 +15149,29 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state) /* * Whenever the number of active pipes changes, we need to make sure we * update the pipes in the right order so that their ddb allocations - * never overlap with eachother inbetween CRTC updates. Otherwise we'll + * never overlap with each other between CRTC updates. Otherwise we'll * cause pipe underruns and other bad stuff. + * + * So first lets enable all pipes that do not need a fullmodeset as + * those don't have any external dependency. */ - while (dirty_pipes) { + while (update_pipes) { for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { enum pipe pipe = crtc->pipe; - bool modeset = needs_modeset(new_crtc_state); - if ((dirty_pipes & BIT(pipe)) == 0) + if ((update_pipes & BIT(pipe)) == 0) continue; if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb, - entries, - INTEL_NUM_PIPES(dev_priv), i)) + entries, num_pipes, i)) continue; entries[i] = new_crtc_state->wm.skl.ddb; - dirty_pipes &= ~BIT(pipe); - - if (modeset && is_trans_port_sync_mode(new_crtc_state)) { - if (is_trans_port_sync_master(new_crtc_state)) - intel_update_trans_port_sync_crtcs(crtc, - state, - old_crtc_state, - new_crtc_state); - else - continue; - } else { - intel_update_crtc(crtc, state, old_crtc_state, - new_crtc_state); - } + update_pipes &= ~BIT(pipe); + + intel_update_crtc(crtc, state, old_crtc_state, + new_crtc_state); /* * If this is an already active pipe, it's DDB changed, @@ -14845,11 +15181,72 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state) */ if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb, &old_crtc_state->wm.skl.ddb) && - !modeset && dirty_pipes) + (update_pipes | modeset_pipes)) intel_wait_for_vblank(dev_priv, pipe); } } + /* + * Enable all pipes that needs a modeset and do not depends on other + * pipes + */ + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, + new_crtc_state, i) { + enum pipe pipe = crtc->pipe; + + if ((modeset_pipes & BIT(pipe)) == 0) + continue; + + if (intel_dp_mst_is_slave_trans(new_crtc_state) || + is_trans_port_sync_slave(new_crtc_state)) + continue; + + WARN_ON(skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb, + entries, num_pipes, i)); + + entries[i] = new_crtc_state->wm.skl.ddb; + modeset_pipes &= ~BIT(pipe); + + if (is_trans_port_sync_mode(new_crtc_state)) { + struct intel_crtc *slave_crtc; + + intel_update_trans_port_sync_crtcs(crtc, state, + old_crtc_state, + new_crtc_state); + + slave_crtc = intel_get_slave_crtc(new_crtc_state); + /* TODO: update entries[] of slave */ + modeset_pipes &= ~BIT(slave_crtc->pipe); + + } else { + intel_update_crtc(crtc, state, old_crtc_state, + new_crtc_state); + } + } + + /* + * Finally enable all pipes that needs a modeset and depends on + * other pipes, right now it is only MST slaves as both port sync slave + * and master are enabled together + */ + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, + new_crtc_state, i) { + enum pipe pipe = crtc->pipe; + + if ((modeset_pipes & BIT(pipe)) == 0) + continue; + + WARN_ON(skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb, + entries, num_pipes, i)); + + entries[i] = new_crtc_state->wm.skl.ddb; + modeset_pipes &= ~BIT(pipe); + + intel_update_crtc(crtc, state, old_crtc_state, new_crtc_state); + } + + WARN_ON(modeset_pipes); + /* If 2nd DBuf slice is no more required disable it */ if (INTEL_GEN(dev_priv) >= 11 && required_slices < hw_enabled_slices) icl_dbuf_slices_update(dev_priv, required_slices); @@ -16586,8 +16983,11 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb, } /* FIXME need to adjust LINOFF/TILEOFF accordingly. */ - if (mode_cmd->offsets[0] != 0) + if (mode_cmd->offsets[0] != 0) { + DRM_DEBUG_KMS("plane 0 offset (0x%08x) must be 0\n", + mode_cmd->offsets[0]); goto err; + } drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd); @@ -16814,29 +17214,28 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv) intel_init_cdclk_hooks(dev_priv); if (INTEL_GEN(dev_priv) >= 9) { - dev_priv->display.get_pipe_config = haswell_get_pipe_config; + dev_priv->display.get_pipe_config = hsw_get_pipe_config; dev_priv->display.get_initial_plane_config = - skylake_get_initial_plane_config; - dev_priv->display.crtc_compute_clock = - haswell_crtc_compute_clock; - dev_priv->display.crtc_enable = haswell_crtc_enable; - dev_priv->display.crtc_disable = haswell_crtc_disable; + skl_get_initial_plane_config; + dev_priv->display.crtc_compute_clock = hsw_crtc_compute_clock; + dev_priv->display.crtc_enable = hsw_crtc_enable; + dev_priv->display.crtc_disable = hsw_crtc_disable; } else if (HAS_DDI(dev_priv)) { - dev_priv->display.get_pipe_config = haswell_get_pipe_config; + dev_priv->display.get_pipe_config = hsw_get_pipe_config; dev_priv->display.get_initial_plane_config = i9xx_get_initial_plane_config; dev_priv->display.crtc_compute_clock = - haswell_crtc_compute_clock; - dev_priv->display.crtc_enable = haswell_crtc_enable; - dev_priv->display.crtc_disable = haswell_crtc_disable; + hsw_crtc_compute_clock; + dev_priv->display.crtc_enable = hsw_crtc_enable; + dev_priv->display.crtc_disable = hsw_crtc_disable; } else if (HAS_PCH_SPLIT(dev_priv)) { - dev_priv->display.get_pipe_config = ironlake_get_pipe_config; + dev_priv->display.get_pipe_config = ilk_get_pipe_config; dev_priv->display.get_initial_plane_config = i9xx_get_initial_plane_config; dev_priv->display.crtc_compute_clock = - ironlake_crtc_compute_clock; - dev_priv->display.crtc_enable = ironlake_crtc_enable; - dev_priv->display.crtc_disable = ironlake_crtc_disable; + ilk_crtc_compute_clock; + dev_priv->display.crtc_enable = ilk_crtc_enable; + dev_priv->display.crtc_disable = ilk_crtc_disable; } else if (IS_CHERRYVIEW(dev_priv)) { dev_priv->display.get_pipe_config = i9xx_get_pipe_config; dev_priv->display.get_initial_plane_config = @@ -16882,7 +17281,7 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv) } if (IS_GEN(dev_priv, 5)) { - dev_priv->display.fdi_link_train = ironlake_fdi_link_train; + dev_priv->display.fdi_link_train = ilk_fdi_link_train; } else if (IS_GEN(dev_priv, 6)) { dev_priv->display.fdi_link_train = gen6_fdi_link_train; } else if (IS_IVYBRIDGE(dev_priv)) { @@ -17827,8 +18226,11 @@ get_encoder_power_domains(struct drm_i915_private *dev_priv) static void intel_early_display_was(struct drm_i915_private *dev_priv) { - /* Display WA #1185 WaDisableDARBFClkGating:cnl,glk */ - if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) + /* + * Display WA #1185 WaDisableDARBFClkGating:cnl,glk,icl,ehl,tgl + * Also known as Wa_14010480278. + */ + if (IS_GEN_RANGE(dev_priv, 10, 12) || IS_GEMINILAKE(dev_priv)) I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) | DARBF_GATING_DIS); @@ -17928,7 +18330,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev, /* We need to sanitize only the MST primary port. */ if (encoder->type != INTEL_OUTPUT_DP_MST && intel_phy_is_tc(dev_priv, phy)) - intel_tc_port_sanitize(enc_to_dig_port(&encoder->base)); + intel_tc_port_sanitize(enc_to_dig_port(encoder)); } get_encoder_power_domains(dev_priv); @@ -18101,6 +18503,8 @@ void intel_modeset_driver_remove(struct drm_i915_private *i915) intel_gmbus_teardown(i915); + intel_bw_cleanup(i915); + destroy_workqueue(i915->flip_wq); destroy_workqueue(i915->modeset_wq); diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h index 0fef9263cddc..028aab728514 100644 --- a/drivers/gpu/drm/i915/display/intel_display.h +++ b/drivers/gpu/drm/i915/display/intel_display.h @@ -474,6 +474,7 @@ void intel_link_compute_m_n(u16 bpp, int nlanes, struct intel_link_m_n *m_n, bool constant_n, bool fec_enable); bool is_ccs_modifier(u64 modifier); +int intel_main_to_aux_plane(const struct drm_framebuffer *fb, int main_plane); void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv); u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv, u32 pixel_format, u64 modifier); @@ -521,7 +522,7 @@ int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data, u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc); void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state); -int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp); +int ilk_get_lanes_required(int target_clock, int link_bw, int bpp); void vlv_wait_port_ready(struct drm_i915_private *dev_priv, struct intel_digital_port *dport, unsigned int expected_mask); @@ -578,8 +579,8 @@ void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc, u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_center); int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state); -void skylake_scaler_disable(const struct intel_crtc_state *old_crtc_state); -void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state); +void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state); +void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state); u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state); u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state); diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index 679457156797..21561acfa3ac 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -514,7 +514,7 @@ static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv, if (encoder->type == INTEL_OUTPUT_DP_MST) continue; - dig_port = enc_to_dig_port(&encoder->base); + dig_port = enc_to_dig_port(encoder); if (WARN_ON(!dig_port)) continue; @@ -1664,8 +1664,8 @@ void chv_phy_powergate_lanes(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct i915_power_domains *power_domains = &dev_priv->power_domains; - enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base)); - enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base)); + enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(encoder)); + enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(encoder)); mutex_lock(&power_domains->lock); diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h index 83ea04149b77..888ea8a170d1 100644 --- a/drivers/gpu/drm/i915/display/intel_display_types.h +++ b/drivers/gpu/drm/i915/display/intel_display_types.h @@ -90,8 +90,8 @@ struct intel_framebuffer { /* for each plane in the normal GTT view */ struct { unsigned int x, y; - } normal[2]; - /* for each plane in the rotated GTT view */ + } normal[4]; + /* for each plane in the rotated GTT view for no-CCS formats */ struct { unsigned int x, y; unsigned int pitch; /* pixels */ @@ -555,7 +555,7 @@ struct intel_plane_state { */ u32 stride; int x, y; - } color_plane[2]; + } color_plane[4]; /* plane control register */ u32 ctl; @@ -1054,6 +1054,9 @@ struct intel_crtc_state { /* Bitmask to indicate slaves attached */ u8 sync_mode_slaves_mask; + + /* Only valid on TGL+ */ + enum transcoder mst_master_transcoder; }; struct intel_crtc { @@ -1435,9 +1438,9 @@ struct intel_load_detect_pipe { }; static inline struct intel_encoder * -intel_attached_encoder(struct drm_connector *connector) +intel_attached_encoder(struct intel_connector *connector) { - return to_intel_connector(connector)->encoder; + return connector->encoder; } static inline bool intel_encoder_is_dig_port(struct intel_encoder *encoder) @@ -1454,12 +1457,12 @@ static inline bool intel_encoder_is_dig_port(struct intel_encoder *encoder) } static inline struct intel_digital_port * -enc_to_dig_port(struct drm_encoder *encoder) +enc_to_dig_port(struct intel_encoder *encoder) { - struct intel_encoder *intel_encoder = to_intel_encoder(encoder); + struct intel_encoder *intel_encoder = encoder; if (intel_encoder_is_dig_port(intel_encoder)) - return container_of(encoder, struct intel_digital_port, + return container_of(&encoder->base, struct intel_digital_port, base.base); else return NULL; @@ -1468,16 +1471,17 @@ enc_to_dig_port(struct drm_encoder *encoder) static inline struct intel_digital_port * conn_to_dig_port(struct intel_connector *connector) { - return enc_to_dig_port(&intel_attached_encoder(&connector->base)->base); + return enc_to_dig_port(intel_attached_encoder(connector)); } static inline struct intel_dp_mst_encoder * -enc_to_mst(struct drm_encoder *encoder) +enc_to_mst(struct intel_encoder *encoder) { - return container_of(encoder, struct intel_dp_mst_encoder, base.base); + return container_of(&encoder->base, struct intel_dp_mst_encoder, + base.base); } -static inline struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder) +static inline struct intel_dp *enc_to_intel_dp(struct intel_encoder *encoder) { return &enc_to_dig_port(encoder)->dp; } @@ -1490,14 +1494,14 @@ static inline bool intel_encoder_is_dp(struct intel_encoder *encoder) return true; case INTEL_OUTPUT_DDI: /* Skip pure HDMI/DVI DDI encoders */ - return i915_mmio_reg_valid(enc_to_intel_dp(&encoder->base)->output_reg); + return i915_mmio_reg_valid(enc_to_intel_dp(encoder)->output_reg); default: return false; } } static inline struct intel_lspcon * -enc_to_intel_lspcon(struct drm_encoder *encoder) +enc_to_intel_lspcon(struct intel_encoder *encoder) { return &enc_to_dig_port(encoder)->lspcon; } diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 2f31d226c6eb..c7424e2a04a3 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -146,9 +146,9 @@ bool intel_dp_is_edp(struct intel_dp *intel_dp) return intel_dig_port->base.type == INTEL_OUTPUT_EDP; } -static struct intel_dp *intel_attached_dp(struct drm_connector *connector) +static struct intel_dp *intel_attached_dp(struct intel_connector *connector) { - return enc_to_intel_dp(&intel_attached_encoder(connector)->base); + return enc_to_intel_dp(intel_attached_encoder(connector)); } static void intel_dp_link_down(struct intel_encoder *encoder, @@ -614,7 +614,7 @@ static enum drm_mode_status intel_dp_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { - struct intel_dp *intel_dp = intel_attached_dp(connector); + struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); struct intel_connector *intel_connector = to_intel_connector(connector); struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; struct drm_i915_private *dev_priv = to_i915(connector->dev); @@ -834,7 +834,7 @@ static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv) * Pick one that's not used by other ports. */ for_each_intel_dp(&dev_priv->drm, encoder) { - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); if (encoder->type == INTEL_OUTPUT_EDP) { WARN_ON(intel_dp->active_pipe != INVALID_PIPE && @@ -1031,7 +1031,7 @@ void intel_power_sequencer_reset(struct drm_i915_private *dev_priv) */ for_each_intel_dp(&dev_priv->drm, encoder) { - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); WARN_ON(intel_dp->active_pipe != INVALID_PIPE); @@ -2034,7 +2034,7 @@ static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc) static int intel_dp_dsc_compute_params(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state) { - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; u8 line_buf_depth; int ret; @@ -2205,7 +2205,7 @@ intel_dp_compute_link_config(struct intel_encoder *encoder, struct drm_connector_state *conn_state) { struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct link_config_limits limits; int common_len; int ret; @@ -2366,8 +2366,8 @@ intel_dp_compute_config(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); - struct intel_lspcon *lspcon = enc_to_intel_lspcon(&encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + struct intel_lspcon *lspcon = enc_to_intel_lspcon(encoder); enum port port = encoder->port; struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc); struct intel_connector *intel_connector = intel_dp->attached_connector; @@ -2482,7 +2482,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); enum port port = encoder->port; struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; @@ -2509,7 +2509,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder, * * CPT PCH is quite different, having many bits moved * to the TRANS_DP_CTL register instead. That - * configuration happens (oddly) in ironlake_pch_enable + * configuration happens (oddly) in ilk_pch_enable */ /* Preserve the BIOS-computed detected bit. This is @@ -2653,7 +2653,7 @@ static void edp_wait_backlight_off(struct intel_dp *intel_dp) * is locked */ -static u32 ironlake_get_pp_control(struct intel_dp *intel_dp) +static u32 ilk_get_pp_control(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); u32 control; @@ -2703,7 +2703,7 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp) if (!edp_have_panel_power(intel_dp)) wait_panel_power_cycle(intel_dp); - pp = ironlake_get_pp_control(intel_dp); + pp = ilk_get_pp_control(intel_dp); pp |= EDP_FORCE_VDD; pp_stat_reg = _pp_stat_reg(intel_dp); @@ -2768,7 +2768,7 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp) intel_dig_port->base.base.base.id, intel_dig_port->base.base.name); - pp = ironlake_get_pp_control(intel_dp); + pp = ilk_get_pp_control(intel_dp); pp &= ~EDP_FORCE_VDD; pp_ctrl_reg = _pp_ctrl_reg(intel_dp); @@ -2864,7 +2864,7 @@ static void edp_panel_on(struct intel_dp *intel_dp) wait_panel_power_cycle(intel_dp); pp_ctrl_reg = _pp_ctrl_reg(intel_dp); - pp = ironlake_get_pp_control(intel_dp); + pp = ilk_get_pp_control(intel_dp); if (IS_GEN(dev_priv, 5)) { /* ILK workaround: disable reset around power sequence */ pp &= ~PANEL_POWER_RESET; @@ -2919,7 +2919,7 @@ static void edp_panel_off(struct intel_dp *intel_dp) WARN(!intel_dp->want_panel_vdd, "Need [ENCODER:%d:%s] VDD to turn off panel\n", dig_port->base.base.base.id, dig_port->base.base.name); - pp = ironlake_get_pp_control(intel_dp); + pp = ilk_get_pp_control(intel_dp); /* We need to switch off panel power _and_ force vdd, for otherwise some * panels get very unhappy and cease to work. */ pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD | @@ -2968,7 +2968,7 @@ static void _intel_edp_backlight_on(struct intel_dp *intel_dp) i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp); u32 pp; - pp = ironlake_get_pp_control(intel_dp); + pp = ilk_get_pp_control(intel_dp); pp |= EDP_BLC_ENABLE; I915_WRITE(pp_ctrl_reg, pp); @@ -2980,7 +2980,7 @@ static void _intel_edp_backlight_on(struct intel_dp *intel_dp) void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { - struct intel_dp *intel_dp = enc_to_intel_dp(conn_state->best_encoder); + struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(conn_state->best_encoder)); if (!intel_dp_is_edp(intel_dp)) return; @@ -3004,7 +3004,7 @@ static void _intel_edp_backlight_off(struct intel_dp *intel_dp) i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp); u32 pp; - pp = ironlake_get_pp_control(intel_dp); + pp = ilk_get_pp_control(intel_dp); pp &= ~EDP_BLC_ENABLE; I915_WRITE(pp_ctrl_reg, pp); @@ -3018,7 +3018,7 @@ static void _intel_edp_backlight_off(struct intel_dp *intel_dp) /* Disable backlight PP control and backlight PWM. */ void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state) { - struct intel_dp *intel_dp = enc_to_intel_dp(old_conn_state->best_encoder); + struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder)); if (!intel_dp_is_edp(intel_dp)) return; @@ -3036,13 +3036,13 @@ void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state) static void intel_edp_backlight_power(struct intel_connector *connector, bool enable) { - struct intel_dp *intel_dp = intel_attached_dp(&connector->base); + struct intel_dp *intel_dp = intel_attached_dp(connector); intel_wakeref_t wakeref; bool is_enabled; is_enabled = false; with_pps_lock(intel_dp, wakeref) - is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE; + is_enabled = ilk_get_pp_control(intel_dp) & EDP_BLC_ENABLE; if (is_enabled == enable) return; @@ -3079,13 +3079,13 @@ static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state) #define assert_edp_pll_enabled(d) assert_edp_pll((d), true) #define assert_edp_pll_disabled(d) assert_edp_pll((d), false) -static void ironlake_edp_pll_on(struct intel_dp *intel_dp, - const struct intel_crtc_state *pipe_config) +static void ilk_edp_pll_on(struct intel_dp *intel_dp, + const struct intel_crtc_state *pipe_config) { struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - assert_pipe_disabled(dev_priv, crtc->pipe); + assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder); assert_dp_port_disabled(intel_dp); assert_edp_pll_disabled(dev_priv); @@ -3119,13 +3119,13 @@ static void ironlake_edp_pll_on(struct intel_dp *intel_dp, udelay(200); } -static void ironlake_edp_pll_off(struct intel_dp *intel_dp, - const struct intel_crtc_state *old_crtc_state) +static void ilk_edp_pll_off(struct intel_dp *intel_dp, + const struct intel_crtc_state *old_crtc_state) { struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - assert_pipe_disabled(dev_priv, crtc->pipe); + assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder); assert_dp_port_disabled(intel_dp); assert_edp_pll_enabled(dev_priv); @@ -3258,7 +3258,7 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); intel_wakeref_t wakeref; bool ret; @@ -3279,7 +3279,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); u32 tmp, flags = 0; enum port port = encoder->port; struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); @@ -3363,7 +3363,7 @@ static void intel_disable_dp(struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); intel_dp->link_trained = false; @@ -3397,7 +3397,7 @@ static void g4x_post_disable_dp(struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); enum port port = encoder->port; /* @@ -3410,7 +3410,7 @@ static void g4x_post_disable_dp(struct intel_encoder *encoder, /* Only ilk+ has port A */ if (port == PORT_A) - ironlake_edp_pll_off(intel_dp, old_crtc_state); + ilk_edp_pll_off(intel_dp, old_crtc_state); } static void vlv_post_disable_dp(struct intel_encoder *encoder, @@ -3548,7 +3548,7 @@ static void intel_enable_dp(struct intel_encoder *encoder, const struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); u32 dp_reg = I915_READ(intel_dp->output_reg); enum pipe pipe = crtc->pipe; @@ -3608,14 +3608,14 @@ static void g4x_pre_enable_dp(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); enum port port = encoder->port; intel_dp_prepare(encoder, pipe_config); /* Only ilk+ has port A */ if (port == PORT_A) - ironlake_edp_pll_on(intel_dp, pipe_config); + ilk_edp_pll_on(intel_dp, pipe_config); } static void vlv_detach_power_sequencer(struct intel_dp *intel_dp) @@ -3658,7 +3658,7 @@ static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv, lockdep_assert_held(&dev_priv->pps_mutex); for_each_intel_dp(&dev_priv->drm, encoder) { - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); WARN(intel_dp->active_pipe == pipe, "stealing pipe %c power sequencer from active [ENCODER:%d:%s]\n", @@ -3681,7 +3681,7 @@ static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); lockdep_assert_held(&dev_priv->pps_mutex); @@ -4203,7 +4203,7 @@ intel_dp_link_down(struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); enum port port = encoder->port; u32 DP = intel_dp->DP; @@ -4903,7 +4903,7 @@ static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp) intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width); intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height); /* Set test active flag here so userspace doesn't interrupt things */ - intel_dp->compliance.test_active = 1; + intel_dp->compliance.test_active = true; return DP_TEST_ACK; } @@ -4947,7 +4947,7 @@ static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp) } /* Set test active flag here so userspace doesn't interrupt things */ - intel_dp->compliance.test_active = 1; + intel_dp->compliance.test_active = true; return test_result; } @@ -5096,7 +5096,7 @@ int intel_dp_retrain_link(struct intel_encoder *encoder, struct drm_modeset_acquire_ctx *ctx) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct intel_connector *connector = intel_dp->attached_connector; struct drm_connector_state *conn_state; struct intel_crtc_state *crtc_state; @@ -5536,7 +5536,7 @@ static bool intel_combo_phy_connected(struct drm_i915_private *dev_priv, static bool icp_digital_port_connected(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); enum phy phy = intel_port_to_phy(dev_priv, encoder->port); if (intel_phy_is_combo(dev_priv, phy)) @@ -5651,7 +5651,7 @@ intel_dp_detect(struct drm_connector *connector, bool force) { struct drm_i915_private *dev_priv = to_i915(connector->dev); - struct intel_dp *intel_dp = intel_attached_dp(connector); + struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct intel_encoder *encoder = &dig_port->base; enum drm_connector_status status; @@ -5755,7 +5755,7 @@ out: static void intel_dp_force(struct drm_connector *connector) { - struct intel_dp *intel_dp = intel_attached_dp(connector); + struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct intel_encoder *intel_encoder = &dig_port->base; struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev); @@ -5790,7 +5790,7 @@ static int intel_dp_get_modes(struct drm_connector *connector) } /* if eDP has no EDID, fall back to fixed mode */ - if (intel_dp_is_edp(intel_attached_dp(connector)) && + if (intel_dp_is_edp(intel_attached_dp(to_intel_connector(connector))) && intel_connector->panel.fixed_mode) { struct drm_display_mode *mode; @@ -5808,7 +5808,7 @@ static int intel_dp_get_modes(struct drm_connector *connector) static int intel_dp_connector_register(struct drm_connector *connector) { - struct intel_dp *intel_dp = intel_attached_dp(connector); + struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); int ret; ret = intel_connector_register(connector); @@ -5830,7 +5830,7 @@ intel_dp_connector_register(struct drm_connector *connector) static void intel_dp_connector_unregister(struct drm_connector *connector) { - struct intel_dp *intel_dp = intel_attached_dp(connector); + struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); drm_dp_cec_unregister_connector(&intel_dp->aux); drm_dp_aux_unregister(&intel_dp->aux); @@ -5839,7 +5839,7 @@ intel_dp_connector_unregister(struct drm_connector *connector) void intel_dp_encoder_flush_work(struct drm_encoder *encoder) { - struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); + struct intel_digital_port *intel_dig_port = enc_to_dig_port(to_intel_encoder(encoder)); struct intel_dp *intel_dp = &intel_dig_port->dp; intel_dp_mst_encoder_cleanup(intel_dig_port); @@ -5868,12 +5868,12 @@ static void intel_dp_encoder_destroy(struct drm_encoder *encoder) intel_dp_encoder_flush_work(encoder); drm_encoder_cleanup(encoder); - kfree(enc_to_dig_port(encoder)); + kfree(enc_to_dig_port(to_intel_encoder(encoder))); } void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder) { - struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder); intel_wakeref_t wakeref; if (!intel_dp_is_edp(intel_dp)) @@ -5904,7 +5904,7 @@ static int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port, u8 *an) { - struct intel_dp *intel_dp = enc_to_intel_dp(&intel_dig_port->base.base); + struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(&intel_dig_port->base.base)); static const struct drm_dp_aux_msg msg = { .request = DP_AUX_NATIVE_WRITE, .address = DP_AUX_HDCP_AKSV, @@ -6514,7 +6514,7 @@ static enum pipe vlv_active_pipe(struct intel_dp *intel_dp) void intel_dp_encoder_reset(struct drm_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->dev); - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(encoder)); struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp); intel_wakeref_t wakeref; @@ -6693,7 +6693,7 @@ intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq) intel_pps_get_registers(intel_dp, ®s); - pp_ctl = ironlake_get_pp_control(intel_dp); + pp_ctl = ilk_get_pp_control(intel_dp); /* Ensure PPS is unlocked */ if (!HAS_DDI(dev_priv)) @@ -6863,7 +6863,7 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp, * soon as the new power sequencer gets initialized. */ if (force_disable_vdd) { - u32 pp = ironlake_get_pp_control(intel_dp); + u32 pp = ilk_get_pp_control(intel_dp); WARN(pp & PANEL_POWER_ON, "Panel power already on\n"); @@ -7660,7 +7660,7 @@ void intel_dp_mst_suspend(struct drm_i915_private *dev_priv) if (encoder->type != INTEL_OUTPUT_DDI) continue; - intel_dp = enc_to_intel_dp(&encoder->base); + intel_dp = enc_to_intel_dp(encoder); if (!intel_dp->can_mst) continue; @@ -7681,7 +7681,7 @@ void intel_dp_mst_resume(struct drm_i915_private *dev_priv) if (encoder->type != INTEL_OUTPUT_DDI) continue; - intel_dp = enc_to_intel_dp(&encoder->base); + intel_dp = enc_to_intel_dp(encoder); if (!intel_dp->can_mst) continue; diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c index 020422da2ae2..7c653f8c307f 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c +++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c @@ -57,7 +57,7 @@ static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable) */ static u32 intel_dp_aux_get_backlight(struct intel_connector *connector) { - struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder); u8 read_val[2] = { 0x0 }; u16 level = 0; @@ -82,7 +82,7 @@ static void intel_dp_aux_set_backlight(const struct drm_connector_state *conn_state, u32 level) { struct intel_connector *connector = to_intel_connector(conn_state->connector); - struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder); u8 vals[2] = { 0x0 }; vals[0] = level; @@ -110,7 +110,7 @@ intel_dp_aux_set_backlight(const struct drm_connector_state *conn_state, u32 lev static bool intel_dp_aux_set_pwm_freq(struct intel_connector *connector) { struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder); int freq, fxp, fxp_min, fxp_max, fxp_actual, f = 1; u8 pn, pn_min, pn_max; @@ -178,7 +178,7 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st const struct drm_connector_state *conn_state) { struct intel_connector *connector = to_intel_connector(conn_state->connector); - struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder); u8 dpcd_buf, new_dpcd_buf, edp_backlight_mode; if (drm_dp_dpcd_readb(&intel_dp->aux, @@ -222,13 +222,14 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st static void intel_dp_aux_disable_backlight(const struct drm_connector_state *old_conn_state) { - set_aux_backlight_enable(enc_to_intel_dp(old_conn_state->best_encoder), false); + set_aux_backlight_enable(enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder)), + false); } static int intel_dp_aux_setup_backlight(struct intel_connector *connector, enum pipe pipe) { - struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder); struct intel_panel *panel = &connector->panel; if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT) @@ -247,7 +248,7 @@ static int intel_dp_aux_setup_backlight(struct intel_connector *connector, static bool intel_dp_aux_display_control_capable(struct intel_connector *connector) { - struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder); /* Check the eDP Display control capabilities registers to determine if * the panel can support backlight control over the aux channel diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index 53bc14d0e953..cba68c5a80fa 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -43,7 +43,7 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder, struct link_config_limits *limits) { struct drm_atomic_state *state = crtc_state->uapi.state; - struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); + struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); struct intel_dp *intel_dp = &intel_mst->primary->dp; struct intel_connector *connector = to_intel_connector(conn_state->connector); @@ -88,12 +88,58 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder, return 0; } +/* + * Iterate over all connectors and return the smallest transcoder in the MST + * stream + */ +static enum transcoder +intel_dp_mst_master_trans_compute(struct intel_atomic_state *state, + struct intel_dp *mst_port) +{ + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_digital_connector_state *conn_state; + struct intel_connector *connector; + enum pipe ret = I915_MAX_PIPES; + int i; + + if (INTEL_GEN(dev_priv) < 12) + return INVALID_TRANSCODER; + + for_each_new_intel_connector_in_state(state, connector, conn_state, i) { + struct intel_crtc_state *crtc_state; + struct intel_crtc *crtc; + + if (connector->mst_port != mst_port || !conn_state->base.crtc) + continue; + + crtc = to_intel_crtc(conn_state->base.crtc); + crtc_state = intel_atomic_get_new_crtc_state(state, crtc); + if (!crtc_state->uapi.active) + continue; + + /* + * Using crtc->pipe because crtc_state->cpu_transcoder is + * computed, so others CRTCs could have non-computed + * cpu_transcoder + */ + if (crtc->pipe < ret) + ret = crtc->pipe; + } + + if (ret == I915_MAX_PIPES) + return INVALID_TRANSCODER; + + /* Simple cast works because TGL don't have a eDP transcoder */ + return (enum transcoder)ret; +} + static int intel_dp_mst_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state) { + struct intel_atomic_state *state = to_intel_atomic_state(conn_state->state); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); + struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); struct intel_dp *intel_dp = &intel_mst->primary->dp; struct intel_connector *connector = to_intel_connector(conn_state->connector); @@ -155,24 +201,91 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder, intel_ddi_compute_min_voltage_level(dev_priv, pipe_config); + pipe_config->mst_master_transcoder = intel_dp_mst_master_trans_compute(state, intel_dp); + + return 0; +} + +/* + * If one of the connectors in a MST stream needs a modeset, mark all CRTCs + * that shares the same MST stream as mode changed, + * intel_modeset_pipe_config()+intel_crtc_check_fastset() will take care to do + * a fastset when possible. + */ +static int +intel_dp_mst_atomic_master_trans_check(struct intel_connector *connector, + struct intel_atomic_state *state) +{ + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct drm_connector_list_iter connector_list_iter; + struct intel_connector *connector_iter; + + if (INTEL_GEN(dev_priv) < 12) + return 0; + + if (!intel_connector_needs_modeset(state, &connector->base)) + return 0; + + drm_connector_list_iter_begin(&dev_priv->drm, &connector_list_iter); + for_each_intel_connector_iter(connector_iter, &connector_list_iter) { + struct intel_digital_connector_state *conn_iter_state; + struct intel_crtc_state *crtc_state; + struct intel_crtc *crtc; + int ret; + + if (connector_iter->mst_port != connector->mst_port || + connector_iter == connector) + continue; + + conn_iter_state = intel_atomic_get_digital_connector_state(state, + connector_iter); + if (IS_ERR(conn_iter_state)) { + drm_connector_list_iter_end(&connector_list_iter); + return PTR_ERR(conn_iter_state); + } + + if (!conn_iter_state->base.crtc) + continue; + + crtc = to_intel_crtc(conn_iter_state->base.crtc); + crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); + if (IS_ERR(crtc_state)) { + drm_connector_list_iter_end(&connector_list_iter); + return PTR_ERR(crtc_state); + } + + ret = drm_atomic_add_affected_planes(&state->base, &crtc->base); + if (ret) { + drm_connector_list_iter_end(&connector_list_iter); + return ret; + } + crtc_state->uapi.mode_changed = true; + } + drm_connector_list_iter_end(&connector_list_iter); + return 0; } static int intel_dp_mst_atomic_check(struct drm_connector *connector, - struct drm_atomic_state *state) + struct drm_atomic_state *_state) { + struct intel_atomic_state *state = to_intel_atomic_state(_state); struct drm_connector_state *new_conn_state = - drm_atomic_get_new_connector_state(state, connector); + drm_atomic_get_new_connector_state(&state->base, connector); struct drm_connector_state *old_conn_state = - drm_atomic_get_old_connector_state(state, connector); + drm_atomic_get_old_connector_state(&state->base, connector); struct intel_connector *intel_connector = to_intel_connector(connector); struct drm_crtc *new_crtc = new_conn_state->crtc; struct drm_dp_mst_topology_mgr *mgr; int ret; - ret = intel_digital_connector_atomic_check(connector, state); + ret = intel_digital_connector_atomic_check(connector, &state->base); + if (ret) + return ret; + + ret = intel_dp_mst_atomic_master_trans_check(intel_connector, state); if (ret) return ret; @@ -183,12 +296,9 @@ intel_dp_mst_atomic_check(struct drm_connector *connector, * connector */ if (new_crtc) { - struct intel_atomic_state *intel_state = - to_intel_atomic_state(state); struct intel_crtc *intel_crtc = to_intel_crtc(new_crtc); struct intel_crtc_state *crtc_state = - intel_atomic_get_new_crtc_state(intel_state, - intel_crtc); + intel_atomic_get_new_crtc_state(state, intel_crtc); if (!crtc_state || !drm_atomic_crtc_needs_modeset(&crtc_state->uapi) || @@ -196,8 +306,8 @@ intel_dp_mst_atomic_check(struct drm_connector *connector, return 0; } - mgr = &enc_to_mst(old_conn_state->best_encoder)->primary->dp.mst_mgr; - ret = drm_dp_atomic_release_vcpi_slots(state, mgr, + mgr = &enc_to_mst(to_intel_encoder(old_conn_state->best_encoder))->primary->dp.mst_mgr; + ret = drm_dp_atomic_release_vcpi_slots(&state->base, mgr, intel_connector->port); return ret; @@ -207,7 +317,7 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { - struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); + struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); struct intel_digital_port *intel_dig_port = intel_mst->primary; struct intel_dp *intel_dp = &intel_dig_port->dp; struct intel_connector *connector = @@ -231,29 +341,51 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { - struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); + struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); struct intel_digital_port *intel_dig_port = intel_mst->primary; struct intel_dp *intel_dp = &intel_dig_port->dp; struct intel_connector *connector = to_intel_connector(old_conn_state->connector); struct drm_i915_private *dev_priv = to_i915(connector->base.dev); bool last_mst_stream; + u32 val; intel_dp->active_mst_links--; last_mst_stream = intel_dp->active_mst_links == 0; + WARN_ON(INTEL_GEN(dev_priv) >= 12 && last_mst_stream && + !intel_dp_mst_is_master_trans(old_crtc_state)); intel_crtc_vblank_off(old_crtc_state); intel_disable_pipe(old_crtc_state); + drm_dp_update_payload_part2(&intel_dp->mst_mgr); + + val = I915_READ(TRANS_DDI_FUNC_CTL(old_crtc_state->cpu_transcoder)); + val &= ~TRANS_DDI_DP_VC_PAYLOAD_ALLOC; + I915_WRITE(TRANS_DDI_FUNC_CTL(old_crtc_state->cpu_transcoder), val); + + if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status, + DP_TP_STATUS_ACT_SENT, 1)) + DRM_ERROR("Timed out waiting for ACT sent when disabling\n"); + drm_dp_check_act_status(&intel_dp->mst_mgr); + + drm_dp_mst_deallocate_vcpi(&intel_dp->mst_mgr, connector->port); + intel_ddi_disable_transcoder_func(old_crtc_state); if (INTEL_GEN(dev_priv) >= 9) - skylake_scaler_disable(old_crtc_state); + skl_scaler_disable(old_crtc_state); else - ironlake_pfit_disable(old_crtc_state); + ilk_pfit_disable(old_crtc_state); /* + * Power down mst path before disabling the port, otherwise we end + * up getting interrupts from the sink upon detecting link loss. + */ + drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port, + false); + /* * From TGL spec: "If multi-stream slave transcoder: Configure * Transcoder Clock Select to direct no clock to the transcoder" * @@ -263,19 +395,6 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder, if (INTEL_GEN(dev_priv) < 12 || !last_mst_stream) intel_ddi_disable_pipe_clock(old_crtc_state); - /* this can fail */ - drm_dp_check_act_status(&intel_dp->mst_mgr); - /* and this can also fail */ - drm_dp_update_payload_part2(&intel_dp->mst_mgr); - - drm_dp_mst_deallocate_vcpi(&intel_dp->mst_mgr, connector->port); - - /* - * Power down mst path before disabling the port, otherwise we end - * up getting interrupts from the sink upon detecting link loss. - */ - drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port, - false); intel_mst->connector = NULL; if (last_mst_stream) @@ -289,7 +408,7 @@ static void intel_mst_pre_pll_enable_dp(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { - struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); + struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); struct intel_digital_port *intel_dig_port = intel_mst->primary; struct intel_dp *intel_dp = &intel_dig_port->dp; @@ -302,7 +421,7 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { - struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); + struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); struct intel_digital_port *intel_dig_port = intel_mst->primary; struct intel_dp *intel_dp = &intel_dig_port->dp; struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); @@ -318,6 +437,8 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder, connector->encoder = encoder; intel_mst->connector = connector; first_mst_stream = intel_dp->active_mst_links == 0; + WARN_ON(INTEL_GEN(dev_priv) >= 12 && first_mst_stream && + !intel_dp_mst_is_master_trans(pipe_config)); DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links); @@ -360,7 +481,7 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { - struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); + struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); struct intel_digital_port *intel_dig_port = intel_mst->primary; struct intel_dp *intel_dp = &intel_dig_port->dp; struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); @@ -381,7 +502,7 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder, static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe) { - struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); + struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); *pipe = intel_mst->pipe; if (intel_mst->connector) return true; @@ -391,7 +512,7 @@ static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder, static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { - struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); + struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); struct intel_digital_port *intel_dig_port = intel_mst->primary; intel_ddi_get_config(&intel_dig_port->base, pipe_config); @@ -499,7 +620,7 @@ static const struct drm_connector_helper_funcs intel_dp_mst_connector_helper_fun static void intel_dp_mst_encoder_destroy(struct drm_encoder *encoder) { - struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); + struct intel_dp_mst_encoder *intel_mst = enc_to_mst(to_intel_encoder(encoder)); drm_encoder_cleanup(encoder); kfree(intel_mst); @@ -723,3 +844,14 @@ intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port) drm_dp_mst_topology_mgr_destroy(&intel_dp->mst_mgr); /* encoders will get killed by normal cleanup */ } + +bool intel_dp_mst_is_master_trans(const struct intel_crtc_state *crtc_state) +{ + return crtc_state->mst_master_transcoder == crtc_state->cpu_transcoder; +} + +bool intel_dp_mst_is_slave_trans(const struct intel_crtc_state *crtc_state) +{ + return crtc_state->mst_master_transcoder != INVALID_TRANSCODER && + crtc_state->mst_master_transcoder != crtc_state->cpu_transcoder; +} diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.h b/drivers/gpu/drm/i915/display/intel_dp_mst.h index f660ad80db04..854724f68f09 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.h +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.h @@ -6,10 +6,15 @@ #ifndef __INTEL_DP_MST_H__ #define __INTEL_DP_MST_H__ +#include <linux/types.h> + struct intel_digital_port; +struct intel_crtc_state; int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id); void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port); int intel_dp_mst_encoder_active_links(struct intel_digital_port *intel_dig_port); +bool intel_dp_mst_is_master_trans(const struct intel_crtc_state *crtc_state); +bool intel_dp_mst_is_slave_trans(const struct intel_crtc_state *crtc_state); #endif /* __INTEL_DP_MST_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.c b/drivers/gpu/drm/i915/display/intel_dpio_phy.c index 704f38681c4b..6fb1f7a7364e 100644 --- a/drivers/gpu/drm/i915/display/intel_dpio_phy.c +++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.c @@ -642,7 +642,7 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder, bool uniq_trans_scale) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); + struct intel_digital_port *dport = enc_to_dig_port(encoder); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); enum dpio_channel ch = vlv_dport_to_channel(dport); enum pipe pipe = intel_crtc->pipe; @@ -738,7 +738,7 @@ void chv_data_lane_soft_reset(struct intel_encoder *encoder, bool reset) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base)); + enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(encoder)); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); enum pipe pipe = crtc->pipe; u32 val; @@ -781,7 +781,7 @@ void chv_data_lane_soft_reset(struct intel_encoder *encoder, void chv_phy_pre_pll_enable(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); + struct intel_digital_port *dport = enc_to_dig_port(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); enum dpio_channel ch = vlv_dport_to_channel(dport); @@ -861,7 +861,7 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder, void chv_phy_pre_encoder_enable(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct intel_digital_port *dport = dp_to_dig_port(intel_dp); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); @@ -940,7 +940,7 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder, void chv_phy_release_cl2_override(struct intel_encoder *encoder) { - struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); + struct intel_digital_port *dport = enc_to_dig_port(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); if (dport->release_cl2_override) { @@ -989,7 +989,7 @@ void vlv_set_phy_signal_level(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); - struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); + struct intel_digital_port *dport = enc_to_dig_port(encoder); enum dpio_channel port = vlv_dport_to_channel(dport); enum pipe pipe = intel_crtc->pipe; @@ -1014,7 +1014,7 @@ void vlv_set_phy_signal_level(struct intel_encoder *encoder, void vlv_phy_pre_pll_enable(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); + struct intel_digital_port *dport = enc_to_dig_port(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); enum dpio_channel port = vlv_dport_to_channel(dport); @@ -1043,7 +1043,7 @@ void vlv_phy_pre_pll_enable(struct intel_encoder *encoder, void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct intel_digital_port *dport = dp_to_dig_port(intel_dp); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); @@ -1073,7 +1073,7 @@ void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder, void vlv_phy_reset_lanes(struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state) { - struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); + struct intel_digital_port *dport = enc_to_dig_port(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); enum dpio_channel port = vlv_dport_to_channel(dport); diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c index 728a4b045de7..c75e34d87111 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c @@ -2972,8 +2972,8 @@ static void icl_update_active_dpll(struct intel_atomic_state *state, enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT; primary_port = encoder->type == INTEL_OUTPUT_DP_MST ? - enc_to_mst(&encoder->base)->primary : - enc_to_dig_port(&encoder->base); + enc_to_mst(encoder)->primary : + enc_to_dig_port(encoder); if (primary_port && (primary_port->tc_mode == TC_PORT_DP_ALT || diff --git a/drivers/gpu/drm/i915/display/intel_dsi.h b/drivers/gpu/drm/i915/display/intel_dsi.h index b15be5814599..19f78a4022d3 100644 --- a/drivers/gpu/drm/i915/display/intel_dsi.h +++ b/drivers/gpu/drm/i915/display/intel_dsi.h @@ -45,8 +45,9 @@ struct intel_dsi { struct intel_dsi_host *dsi_hosts[I915_MAX_PORTS]; intel_wakeref_t io_wakeref[I915_MAX_PORTS]; - /* GPIO Desc for CRC based Panel control */ + /* GPIO Desc for panel and backlight control */ struct gpio_desc *gpio_panel; + struct gpio_desc *gpio_backlight; struct intel_connector *attached_connector; @@ -68,6 +69,9 @@ struct intel_dsi { /* number of DSI lanes */ unsigned int lane_count; + /* i2c bus associated with the slave device */ + int i2c_bus_num; + /* * video mode pixel format * @@ -141,9 +145,9 @@ static inline struct intel_dsi_host *to_intel_dsi_host(struct mipi_dsi_host *h) #define for_each_dsi_phy(__phy, __phys_mask) \ for_each_phy_masked(__phy, __phys_mask) -static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder) +static inline struct intel_dsi *enc_to_intel_dsi(struct intel_encoder *encoder) { - return container_of(encoder, struct intel_dsi, base.base); + return container_of(&encoder->base, struct intel_dsi, base.base); } static inline bool is_vid_mode(struct intel_dsi *intel_dsi) @@ -158,7 +162,7 @@ static inline bool is_cmd_mode(struct intel_dsi *intel_dsi) static inline u16 intel_dsi_encoder_ports(struct intel_encoder *encoder) { - return enc_to_intel_dsi(&encoder->base)->ports; + return enc_to_intel_dsi(encoder)->ports; } /* icl_dsi.c */ @@ -203,6 +207,8 @@ void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port); /* intel_dsi_vbt.c */ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id); +void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on); +void intel_dsi_vbt_gpio_cleanup(struct intel_dsi *intel_dsi); void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi, enum mipi_seq seq_id); void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec); diff --git a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c index bb3fd8b786a2..c87838843d0b 100644 --- a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c +++ b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c @@ -46,7 +46,7 @@ static u32 dcs_get_backlight(struct intel_connector *connector) { struct intel_encoder *encoder = connector->encoder; - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); struct mipi_dsi_device *dsi_device; u8 data = 0; enum port port; @@ -64,7 +64,7 @@ static u32 dcs_get_backlight(struct intel_connector *connector) static void dcs_set_backlight(const struct drm_connector_state *conn_state, u32 level) { - struct intel_dsi *intel_dsi = enc_to_intel_dsi(conn_state->best_encoder); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(conn_state->best_encoder)); struct mipi_dsi_device *dsi_device; u8 data = level; enum port port; @@ -79,7 +79,7 @@ static void dcs_set_backlight(const struct drm_connector_state *conn_state, u32 static void dcs_disable_backlight(const struct drm_connector_state *conn_state) { - struct intel_dsi *intel_dsi = enc_to_intel_dsi(conn_state->best_encoder); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(conn_state->best_encoder)); struct mipi_dsi_device *dsi_device; enum port port; @@ -113,7 +113,7 @@ static void dcs_disable_backlight(const struct drm_connector_state *conn_state) static void dcs_enable_backlight(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { - struct intel_dsi *intel_dsi = enc_to_intel_dsi(conn_state->best_encoder); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(conn_state->best_encoder)); struct intel_panel *panel = &to_intel_connector(conn_state->connector)->panel; struct mipi_dsi_device *dsi_device; enum port port; diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c index f90946c912ee..89fb0d90b694 100644 --- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c +++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c @@ -25,7 +25,10 @@ */ #include <linux/gpio/consumer.h> +#include <linux/gpio/machine.h> #include <linux/mfd/intel_soc_pmic.h> +#include <linux/pinctrl/consumer.h> +#include <linux/pinctrl/machine.h> #include <linux/slab.h> #include <asm/intel-mid.h> @@ -83,6 +86,12 @@ static struct gpio_map vlv_gpio_table[] = { { VLV_GPIO_NC_11_PANEL1_BKLTCTL }, }; +struct i2c_adapter_lookup { + u16 slave_addr; + struct intel_dsi *intel_dsi; + acpi_handle dev_handle; +}; + #define CHV_GPIO_IDX_START_N 0 #define CHV_GPIO_IDX_START_E 73 #define CHV_GPIO_IDX_START_SW 100 @@ -375,11 +384,98 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data) return data; } +static int i2c_adapter_lookup(struct acpi_resource *ares, void *data) +{ + struct i2c_adapter_lookup *lookup = data; + struct intel_dsi *intel_dsi = lookup->intel_dsi; + struct acpi_resource_i2c_serialbus *sb; + struct i2c_adapter *adapter; + acpi_handle adapter_handle; + acpi_status status; + + if (intel_dsi->i2c_bus_num >= 0 || + !i2c_acpi_get_i2c_resource(ares, &sb)) + return 1; + + if (lookup->slave_addr != sb->slave_address) + return 1; + + status = acpi_get_handle(lookup->dev_handle, + sb->resource_source.string_ptr, + &adapter_handle); + if (ACPI_FAILURE(status)) + return 1; + + adapter = i2c_acpi_find_adapter_by_handle(adapter_handle); + if (adapter) + intel_dsi->i2c_bus_num = adapter->nr; + + return 1; +} + static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data) { - DRM_DEBUG_KMS("Skipping I2C element execution\n"); + struct drm_device *drm_dev = intel_dsi->base.base.dev; + struct device *dev = &drm_dev->pdev->dev; + struct i2c_adapter *adapter; + struct acpi_device *acpi_dev; + struct list_head resource_list; + struct i2c_adapter_lookup lookup; + struct i2c_msg msg; + int ret; + u8 vbt_i2c_bus_num = *(data + 2); + u16 slave_addr = *(u16 *)(data + 3); + u8 reg_offset = *(data + 5); + u8 payload_size = *(data + 6); + u8 *payload_data; + + if (intel_dsi->i2c_bus_num < 0) { + intel_dsi->i2c_bus_num = vbt_i2c_bus_num; + + acpi_dev = ACPI_COMPANION(dev); + if (acpi_dev) { + memset(&lookup, 0, sizeof(lookup)); + lookup.slave_addr = slave_addr; + lookup.intel_dsi = intel_dsi; + lookup.dev_handle = acpi_device_handle(acpi_dev); + + INIT_LIST_HEAD(&resource_list); + acpi_dev_get_resources(acpi_dev, &resource_list, + i2c_adapter_lookup, + &lookup); + acpi_dev_free_resource_list(&resource_list); + } + } - return data + *(data + 6) + 7; + adapter = i2c_get_adapter(intel_dsi->i2c_bus_num); + if (!adapter) { + DRM_DEV_ERROR(dev, "Cannot find a valid i2c bus for xfer\n"); + goto err_bus; + } + + payload_data = kzalloc(payload_size + 1, GFP_KERNEL); + if (!payload_data) + goto err_alloc; + + payload_data[0] = reg_offset; + memcpy(&payload_data[1], (data + 7), payload_size); + + msg.addr = slave_addr; + msg.flags = 0; + msg.len = payload_size + 1; + msg.buf = payload_data; + + ret = i2c_transfer(adapter, &msg, 1); + if (ret < 0) + DRM_DEV_ERROR(dev, + "Failed to xfer payload of size (%u) to reg (%u)\n", + payload_size, reg_offset); + + kfree(payload_data); +err_alloc: + i2c_put_adapter(adapter); +err_bus: + return data + payload_size + 7; } static const u8 *mipi_exec_spi(struct intel_dsi *intel_dsi, const u8 *data) @@ -453,8 +549,8 @@ static const char *sequence_name(enum mipi_seq seq_id) return "(unknown)"; } -void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi, - enum mipi_seq seq_id) +static void intel_dsi_vbt_exec(struct intel_dsi *intel_dsi, + enum mipi_seq seq_id) { struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev); const u8 *data; @@ -519,6 +615,22 @@ void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi, } } +void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi, + enum mipi_seq seq_id) +{ + if (seq_id == MIPI_SEQ_POWER_ON && intel_dsi->gpio_panel) + gpiod_set_value_cansleep(intel_dsi->gpio_panel, 1); + if (seq_id == MIPI_SEQ_BACKLIGHT_ON && intel_dsi->gpio_backlight) + gpiod_set_value_cansleep(intel_dsi->gpio_backlight, 1); + + intel_dsi_vbt_exec(intel_dsi, seq_id); + + if (seq_id == MIPI_SEQ_POWER_OFF && intel_dsi->gpio_panel) + gpiod_set_value_cansleep(intel_dsi->gpio_panel, 0); + if (seq_id == MIPI_SEQ_BACKLIGHT_OFF && intel_dsi->gpio_backlight) + gpiod_set_value_cansleep(intel_dsi->gpio_backlight, 0); +} + void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec) { struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev); @@ -664,6 +776,8 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id) intel_dsi->panel_off_delay = pps->panel_off_delay / 10; intel_dsi->panel_pwr_cycle_delay = pps->panel_power_cycle_delay / 10; + intel_dsi->i2c_bus_num = -1; + /* a regular driver would get the device in probe */ for_each_dsi_port(port, intel_dsi->ports) { mipi_dsi_attach(intel_dsi->dsi_hosts[port]->device); @@ -671,3 +785,110 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id) return true; } + +/* + * On some BYT/CHT devs some sequences are incomplete and we need to manually + * control some GPIOs. We need to add a GPIO lookup table before we get these. + * If the GOP did not initialize the panel (HDMI inserted) we may need to also + * change the pinmux for the SoC's PWM0 pin from GPIO to PWM. + */ +static struct gpiod_lookup_table pmic_panel_gpio_table = { + /* Intel GFX is consumer */ + .dev_id = "0000:00:02.0", + .table = { + /* Panel EN/DISABLE */ + GPIO_LOOKUP("gpio_crystalcove", 94, "panel", GPIO_ACTIVE_HIGH), + { } + }, +}; + +static struct gpiod_lookup_table soc_panel_gpio_table = { + .dev_id = "0000:00:02.0", + .table = { + GPIO_LOOKUP("INT33FC:01", 10, "backlight", GPIO_ACTIVE_HIGH), + GPIO_LOOKUP("INT33FC:01", 11, "panel", GPIO_ACTIVE_HIGH), + { } + }, +}; + +static const struct pinctrl_map soc_pwm_pinctrl_map[] = { + PIN_MAP_MUX_GROUP("0000:00:02.0", "soc_pwm0", "INT33FC:00", + "pwm0_grp", "pwm"), +}; + +void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on) +{ + struct drm_device *dev = intel_dsi->base.base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); + struct mipi_config *mipi_config = dev_priv->vbt.dsi.config; + enum gpiod_flags flags = panel_is_on ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW; + bool want_backlight_gpio = false; + bool want_panel_gpio = false; + struct pinctrl *pinctrl; + int ret; + + if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && + mipi_config->pwm_blc == PPS_BLC_PMIC) { + gpiod_add_lookup_table(&pmic_panel_gpio_table); + want_panel_gpio = true; + } + + if (IS_VALLEYVIEW(dev_priv) && mipi_config->pwm_blc == PPS_BLC_SOC) { + gpiod_add_lookup_table(&soc_panel_gpio_table); + want_panel_gpio = true; + want_backlight_gpio = true; + + /* Ensure PWM0 pin is muxed as PWM instead of GPIO */ + ret = pinctrl_register_mappings(soc_pwm_pinctrl_map, + ARRAY_SIZE(soc_pwm_pinctrl_map)); + if (ret) + DRM_ERROR("Failed to register pwm0 pinmux mapping\n"); + + pinctrl = devm_pinctrl_get_select(dev->dev, "soc_pwm0"); + if (IS_ERR(pinctrl)) + DRM_ERROR("Failed to set pinmux to PWM\n"); + } + + if (want_panel_gpio) { + intel_dsi->gpio_panel = gpiod_get(dev->dev, "panel", flags); + if (IS_ERR(intel_dsi->gpio_panel)) { + DRM_ERROR("Failed to own gpio for panel control\n"); + intel_dsi->gpio_panel = NULL; + } + } + + if (want_backlight_gpio) { + intel_dsi->gpio_backlight = + gpiod_get(dev->dev, "backlight", flags); + if (IS_ERR(intel_dsi->gpio_backlight)) { + DRM_ERROR("Failed to own gpio for backlight control\n"); + intel_dsi->gpio_backlight = NULL; + } + } +} + +void intel_dsi_vbt_gpio_cleanup(struct intel_dsi *intel_dsi) +{ + struct drm_device *dev = intel_dsi->base.base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); + struct mipi_config *mipi_config = dev_priv->vbt.dsi.config; + + if (intel_dsi->gpio_panel) { + gpiod_put(intel_dsi->gpio_panel); + intel_dsi->gpio_panel = NULL; + } + + if (intel_dsi->gpio_backlight) { + gpiod_put(intel_dsi->gpio_backlight); + intel_dsi->gpio_backlight = NULL; + } + + if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && + mipi_config->pwm_blc == PPS_BLC_PMIC) + gpiod_remove_lookup_table(&pmic_panel_gpio_table); + + if (IS_VALLEYVIEW(dev_priv) && mipi_config->pwm_blc == PPS_BLC_SOC) { + pinctrl_unregister_mappings(soc_pwm_pinctrl_map); + gpiod_remove_lookup_table(&soc_panel_gpio_table); + } +} diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c index a74dc5b915d1..86a337c9d85d 100644 --- a/drivers/gpu/drm/i915/display/intel_dvo.c +++ b/drivers/gpu/drm/i915/display/intel_dvo.c @@ -125,7 +125,7 @@ static struct intel_dvo *enc_to_dvo(struct intel_encoder *encoder) return container_of(encoder, struct intel_dvo, base); } -static struct intel_dvo *intel_attached_dvo(struct drm_connector *connector) +static struct intel_dvo *intel_attached_dvo(struct intel_connector *connector) { return enc_to_dvo(intel_attached_encoder(connector)); } @@ -134,7 +134,7 @@ static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector) { struct drm_device *dev = connector->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_dvo *intel_dvo = intel_attached_dvo(&connector->base); + struct intel_dvo *intel_dvo = intel_attached_dvo(connector); u32 tmp; tmp = I915_READ(intel_dvo->dev.dvo_reg); @@ -220,7 +220,7 @@ static enum drm_mode_status intel_dvo_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { - struct intel_dvo *intel_dvo = intel_attached_dvo(connector); + struct intel_dvo *intel_dvo = intel_attached_dvo(to_intel_connector(connector)); const struct drm_display_mode *fixed_mode = to_intel_connector(connector)->panel.fixed_mode; int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; @@ -311,7 +311,7 @@ static void intel_dvo_pre_enable(struct intel_encoder *encoder, static enum drm_connector_status intel_dvo_detect(struct drm_connector *connector, bool force) { - struct intel_dvo *intel_dvo = intel_attached_dvo(connector); + struct intel_dvo *intel_dvo = intel_attached_dvo(to_intel_connector(connector)); DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, connector->name); return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev); diff --git a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c index ab61f88d1d33..6c83b350525d 100644 --- a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c +++ b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c @@ -126,8 +126,8 @@ static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev, } } -static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev, - enum pipe pipe, bool enable) +static void ilk_set_fifo_underrun_reporting(struct drm_device *dev, + enum pipe pipe, bool enable) { struct drm_i915_private *dev_priv = to_i915(dev); u32 bit = (pipe == PIPE_A) ? @@ -139,7 +139,7 @@ static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev, ilk_disable_display_irq(dev_priv, bit); } -static void ivybridge_check_fifo_underruns(struct intel_crtc *crtc) +static void ivb_check_fifo_underruns(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; @@ -157,9 +157,9 @@ static void ivybridge_check_fifo_underruns(struct intel_crtc *crtc) DRM_ERROR("fifo underrun on pipe %c\n", pipe_name(pipe)); } -static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev, - enum pipe pipe, - bool enable, bool old) +static void ivb_set_fifo_underrun_reporting(struct drm_device *dev, + enum pipe pipe, bool enable, + bool old) { struct drm_i915_private *dev_priv = to_i915(dev); if (enable) { @@ -180,8 +180,8 @@ static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev, } } -static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev, - enum pipe pipe, bool enable) +static void bdw_set_fifo_underrun_reporting(struct drm_device *dev, + enum pipe pipe, bool enable) { struct drm_i915_private *dev_priv = to_i915(dev); @@ -264,11 +264,11 @@ static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, if (HAS_GMCH(dev_priv)) i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old); else if (IS_GEN_RANGE(dev_priv, 5, 6)) - ironlake_set_fifo_underrun_reporting(dev, pipe, enable); + ilk_set_fifo_underrun_reporting(dev, pipe, enable); else if (IS_GEN(dev_priv, 7)) - ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old); + ivb_set_fifo_underrun_reporting(dev, pipe, enable, old); else if (INTEL_GEN(dev_priv) >= 8) - broadwell_set_fifo_underrun_reporting(dev, pipe, enable); + bdw_set_fifo_underrun_reporting(dev, pipe, enable); return old; } @@ -427,7 +427,7 @@ void intel_check_cpu_fifo_underruns(struct drm_i915_private *dev_priv) if (HAS_GMCH(dev_priv)) i9xx_check_fifo_underruns(crtc); else if (IS_GEN(dev_priv, 7)) - ivybridge_check_fifo_underruns(crtc); + ivb_check_fifo_underruns(crtc); } spin_unlock_irq(&dev_priv->irq_lock); diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c index 685589064d10..93ac0f296852 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.c +++ b/drivers/gpu/drm/i915/display/intel_hdmi.c @@ -85,16 +85,17 @@ assert_hdmi_transcoder_func_disabled(struct drm_i915_private *dev_priv, "HDMI transcoder function enabled, expecting disabled\n"); } -struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder) +struct intel_hdmi *enc_to_intel_hdmi(struct intel_encoder *encoder) { struct intel_digital_port *intel_dig_port = - container_of(encoder, struct intel_digital_port, base.base); + container_of(&encoder->base, struct intel_digital_port, + base.base); return &intel_dig_port->hdmi; } -static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector) +static struct intel_hdmi *intel_attached_hdmi(struct intel_connector *connector) { - return enc_to_intel_hdmi(&intel_attached_encoder(connector)->base); + return enc_to_intel_hdmi(intel_attached_encoder(connector)); } static u32 g4x_infoframe_index(unsigned int type) @@ -602,7 +603,7 @@ u32 intel_hdmi_infoframes_enabled(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); u32 val, ret = 0; int i; @@ -646,7 +647,7 @@ static void intel_write_infoframe(struct intel_encoder *encoder, enum hdmi_infoframe_type type, const union hdmi_infoframe *frame) { - struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base); + struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); u8 buffer[VIDEO_DIP_DATA_SIZE]; ssize_t len; @@ -675,7 +676,7 @@ void intel_read_infoframe(struct intel_encoder *encoder, enum hdmi_infoframe_type type, union hdmi_infoframe *frame) { - struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base); + struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); u8 buffer[VIDEO_DIP_DATA_SIZE]; int ret; @@ -855,7 +856,7 @@ static void g4x_set_infoframes(struct intel_encoder *encoder, const struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base); + struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi; i915_reg_t reg = VIDEO_DIP_CTL; u32 val = I915_READ(reg); @@ -1038,7 +1039,7 @@ static void ibx_set_infoframes(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base); + struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi; i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe); u32 val = I915_READ(reg); @@ -1097,7 +1098,7 @@ static void cpt_set_infoframes(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe); u32 val = I915_READ(reg); @@ -1146,7 +1147,7 @@ static void vlv_set_infoframes(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe); u32 val = I915_READ(reg); u32 port = VIDEO_DIP_PORT(encoder->port); @@ -1737,7 +1738,7 @@ static void intel_hdmi_prepare(struct intel_encoder *encoder, struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; u32 hdmi_val; @@ -1774,7 +1775,7 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); intel_wakeref_t wakeref; bool ret; @@ -1793,7 +1794,7 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder, static void intel_hdmi_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { - struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); u32 tmp, flags = 0; @@ -1874,7 +1875,7 @@ static void g4x_enable_hdmi(struct intel_encoder *encoder, { struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); u32 temp; temp = I915_READ(intel_hdmi->hdmi_reg); @@ -1896,7 +1897,7 @@ static void ibx_enable_hdmi(struct intel_encoder *encoder, { struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); u32 temp; temp = I915_READ(intel_hdmi->hdmi_reg); @@ -1947,7 +1948,7 @@ static void cpt_enable_hdmi(struct intel_encoder *encoder, struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); - struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); enum pipe pipe = crtc->pipe; u32 temp; @@ -2007,7 +2008,7 @@ static void intel_disable_hdmi(struct intel_encoder *encoder, { struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); struct intel_digital_port *intel_dig_port = hdmi_to_dig_port(intel_hdmi); struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); @@ -2160,7 +2161,7 @@ static enum drm_mode_status intel_hdmi_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { - struct intel_hdmi *hdmi = intel_attached_hdmi(connector); + struct intel_hdmi *hdmi = intel_attached_hdmi(to_intel_connector(connector)); struct drm_device *dev = intel_hdmi_to_dev(hdmi); struct drm_i915_private *dev_priv = to_i915(dev); enum drm_mode_status status; @@ -2316,7 +2317,7 @@ static int intel_hdmi_compute_bpc(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state, int clock, bool force_dvi) { - struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); int bpc; for (bpc = 12; bpc >= 10; bpc -= 2) { @@ -2334,7 +2335,7 @@ static int intel_hdmi_compute_clock(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state, bool force_dvi) { - struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; int bpc, clock = adjusted_mode->crtc_clock; @@ -2404,7 +2405,7 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state) { - struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; struct drm_connector *connector = conn_state->connector; @@ -2496,7 +2497,7 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder, static void intel_hdmi_unset_edid(struct drm_connector *connector) { - struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); + struct intel_hdmi *intel_hdmi = intel_attached_hdmi(to_intel_connector(connector)); intel_hdmi->has_hdmi_sink = false; intel_hdmi->has_audio = false; @@ -2512,7 +2513,7 @@ static void intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector, bool has_edid) { struct drm_i915_private *dev_priv = to_i915(connector->dev); - struct intel_hdmi *hdmi = intel_attached_hdmi(connector); + struct intel_hdmi *hdmi = intel_attached_hdmi(to_intel_connector(connector)); enum port port = hdmi_to_dig_port(hdmi)->base.port; struct i2c_adapter *adapter = intel_gmbus_get_adapter(dev_priv, hdmi->ddc_bus); @@ -2559,7 +2560,7 @@ static bool intel_hdmi_set_edid(struct drm_connector *connector) { struct drm_i915_private *dev_priv = to_i915(connector->dev); - struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); + struct intel_hdmi *intel_hdmi = intel_attached_hdmi(to_intel_connector(connector)); intel_wakeref_t wakeref; struct edid *edid; bool connected = false; @@ -2600,7 +2601,7 @@ intel_hdmi_detect(struct drm_connector *connector, bool force) { enum drm_connector_status status = connector_status_disconnected; struct drm_i915_private *dev_priv = to_i915(connector->dev); - struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); + struct intel_hdmi *intel_hdmi = intel_attached_hdmi(to_intel_connector(connector)); struct intel_encoder *encoder = &hdmi_to_dig_port(intel_hdmi)->base; intel_wakeref_t wakeref; @@ -2663,7 +2664,7 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder, const struct drm_connector_state *conn_state) { struct intel_digital_port *intel_dig_port = - enc_to_dig_port(&encoder->base); + enc_to_dig_port(encoder); intel_hdmi_prepare(encoder, pipe_config); @@ -2676,7 +2677,7 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { - struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); + struct intel_digital_port *dport = enc_to_dig_port(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); vlv_phy_pre_encoder_enable(encoder, pipe_config); @@ -2746,7 +2747,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { - struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); + struct intel_digital_port *dport = enc_to_dig_port(encoder); struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); @@ -2772,7 +2773,7 @@ static struct i2c_adapter * intel_hdmi_get_i2c_adapter(struct drm_connector *connector) { struct drm_i915_private *dev_priv = to_i915(connector->dev); - struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); + struct intel_hdmi *intel_hdmi = intel_attached_hdmi(to_intel_connector(connector)); return intel_gmbus_get_adapter(dev_priv, intel_hdmi->ddc_bus); } @@ -2816,7 +2817,7 @@ intel_hdmi_connector_register(struct drm_connector *connector) static void intel_hdmi_destroy(struct drm_connector *connector) { - struct cec_notifier *n = intel_attached_hdmi(connector)->cec_notifier; + struct cec_notifier *n = intel_attached_hdmi(to_intel_connector(connector))->cec_notifier; cec_notifier_conn_unregister(n); @@ -2906,7 +2907,7 @@ bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder, bool scrambling) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); struct drm_scrambling *sink_scrambling = &connector->display_info.hdmi.scdc.scrambling; struct i2c_adapter *adapter = diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.h b/drivers/gpu/drm/i915/display/intel_hdmi.h index cf1ea5427639..d3659d0b408b 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.h +++ b/drivers/gpu/drm/i915/display/intel_hdmi.h @@ -29,7 +29,7 @@ void intel_hdmi_init(struct drm_i915_private *dev_priv, i915_reg_t hdmi_reg, enum port port); void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, struct intel_connector *intel_connector); -struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder); +struct intel_hdmi *enc_to_intel_hdmi(struct intel_encoder *encoder); int intel_hdmi_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state); diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c index fc29046d48ea..99d3a3c7989e 100644 --- a/drivers/gpu/drm/i915/display/intel_hotplug.c +++ b/drivers/gpu/drm/i915/display/intel_hotplug.c @@ -302,7 +302,7 @@ intel_encoder_hotplug(struct intel_encoder *encoder, static bool intel_encoder_has_hpd_pulse(struct intel_encoder *encoder) { return intel_encoder_is_dig_port(encoder) && - enc_to_dig_port(&encoder->base)->hpd_pulse != NULL; + enc_to_dig_port(encoder)->hpd_pulse != NULL; } static void i915_digport_work_func(struct work_struct *work) @@ -335,7 +335,7 @@ static void i915_digport_work_func(struct work_struct *work) if (!long_hpd && !short_hpd) continue; - dig_port = enc_to_dig_port(&encoder->base); + dig_port = enc_to_dig_port(encoder); ret = dig_port->hpd_pulse(dig_port, long_hpd); if (ret == IRQ_NONE) { diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.c b/drivers/gpu/drm/i915/display/intel_lspcon.c index 5145ff8b962b..d807c5648c87 100644 --- a/drivers/gpu/drm/i915/display/intel_lspcon.c +++ b/drivers/gpu/drm/i915/display/intel_lspcon.c @@ -434,8 +434,8 @@ void lspcon_write_infoframe(struct intel_encoder *encoder, const void *frame, ssize_t len) { bool ret; - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); - struct intel_lspcon *lspcon = enc_to_intel_lspcon(&encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + struct intel_lspcon *lspcon = enc_to_intel_lspcon(encoder); /* LSPCON only needs AVI IF */ if (type != HDMI_INFOFRAME_TYPE_AVI) @@ -472,7 +472,7 @@ void lspcon_set_infoframes(struct intel_encoder *encoder, ssize_t ret; union hdmi_infoframe frame; u8 buf[VIDEO_DIP_DATA_SIZE]; - struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); struct intel_lspcon *lspcon = &dig_port->lspcon; const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; @@ -522,7 +522,7 @@ u32 lspcon_infoframes_enabled(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config) { /* FIXME actually read this from the hw */ - return enc_to_intel_lspcon(&encoder->base)->active; + return enc_to_intel_lspcon(encoder)->active; } void lspcon_resume(struct intel_lspcon *lspcon) diff --git a/drivers/gpu/drm/i915/display/intel_pipe_crc.c b/drivers/gpu/drm/i915/display/intel_pipe_crc.c index 2746512f4466..520408e83681 100644 --- a/drivers/gpu/drm/i915/display/intel_pipe_crc.c +++ b/drivers/gpu/drm/i915/display/intel_pipe_crc.c @@ -98,7 +98,7 @@ static int i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv, break; case INTEL_OUTPUT_DP: case INTEL_OUTPUT_EDP: - dig_port = enc_to_dig_port(&encoder->base); + dig_port = enc_to_dig_port(encoder); switch (dig_port->base.port) { case PORT_B: *source = INTEL_PIPE_CRC_SOURCE_DP_B; diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index 16e9ff47d519..89c9cf5f38d2 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -1523,3 +1523,27 @@ bool intel_psr_enabled(struct intel_dp *intel_dp) return ret; } + +void intel_psr_atomic_check(struct drm_connector *connector, + struct drm_connector_state *old_state, + struct drm_connector_state *new_state) +{ + struct drm_i915_private *dev_priv = to_i915(connector->dev); + struct intel_connector *intel_connector; + struct intel_digital_port *dig_port; + struct drm_crtc_state *crtc_state; + + if (!CAN_PSR(dev_priv) || !new_state->crtc || + dev_priv->psr.initially_probed) + return; + + intel_connector = to_intel_connector(connector); + dig_port = enc_to_dig_port(intel_connector->encoder); + if (dev_priv->psr.dp != &dig_port->dp) + return; + + crtc_state = drm_atomic_get_new_crtc_state(new_state->state, + new_state->crtc); + crtc_state->mode_changed = true; + dev_priv->psr.initially_probed = true; +} diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h index 46e4de8b8cd5..c58a1d438808 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.h +++ b/drivers/gpu/drm/i915/display/intel_psr.h @@ -8,6 +8,8 @@ #include "intel_frontbuffer.h" +struct drm_connector; +struct drm_connector_state; struct drm_i915_private; struct intel_crtc_state; struct intel_dp; @@ -35,5 +37,8 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp); int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state, u32 *out_value); bool intel_psr_enabled(struct intel_dp *intel_dp); +void intel_psr_atomic_check(struct drm_connector *connector, + struct drm_connector_state *old_state, + struct drm_connector_state *new_state); #endif /* __INTEL_PSR_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c index 8758ee2a4442..e8819fd21e03 100644 --- a/drivers/gpu/drm/i915/display/intel_sdvo.c +++ b/drivers/gpu/drm/i915/display/intel_sdvo.c @@ -180,7 +180,7 @@ static struct intel_sdvo *to_sdvo(struct intel_encoder *encoder) return container_of(encoder, struct intel_sdvo, base); } -static struct intel_sdvo *intel_attached_sdvo(struct drm_connector *connector) +static struct intel_sdvo *intel_attached_sdvo(struct intel_connector *connector) { return to_sdvo(intel_attached_encoder(connector)); } @@ -1551,7 +1551,7 @@ static bool intel_sdvo_connector_get_hw_state(struct intel_connector *connector) { struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(&connector->base); - struct intel_sdvo *intel_sdvo = intel_attached_sdvo(&connector->base); + struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); u16 active_outputs = 0; intel_sdvo_get_active_outputs(intel_sdvo, &active_outputs); @@ -1823,7 +1823,7 @@ static enum drm_mode_status intel_sdvo_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { - struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); + struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector)); struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; @@ -1941,7 +1941,7 @@ intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo) static struct edid * intel_sdvo_get_edid(struct drm_connector *connector) { - struct intel_sdvo *sdvo = intel_attached_sdvo(connector); + struct intel_sdvo *sdvo = intel_attached_sdvo(to_intel_connector(connector)); return drm_get_edid(connector, &sdvo->ddc); } @@ -1959,7 +1959,7 @@ intel_sdvo_get_analog_edid(struct drm_connector *connector) static enum drm_connector_status intel_sdvo_tmds_sink_detect(struct drm_connector *connector) { - struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); + struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector)); struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); enum drm_connector_status status; @@ -2028,7 +2028,7 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connector, bool force) { u16 response; - struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); + struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector)); struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); enum drm_connector_status ret; @@ -2175,7 +2175,7 @@ static const struct drm_display_mode sdvo_tv_modes[] = { static void intel_sdvo_get_tv_modes(struct drm_connector *connector) { - struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); + struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector)); const struct drm_connector_state *conn_state = connector->state; struct intel_sdvo_sdtv_resolution_request tv_res; u32 reply = 0, format_map = 0; @@ -2215,7 +2215,7 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector) static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) { - struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); + struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector)); struct drm_i915_private *dev_priv = to_i915(connector->dev); struct drm_display_mode *newmode; @@ -2379,7 +2379,7 @@ intel_sdvo_connector_atomic_set_property(struct drm_connector *connector, static int intel_sdvo_connector_register(struct drm_connector *connector) { - struct intel_sdvo *sdvo = intel_attached_sdvo(connector); + struct intel_sdvo *sdvo = intel_attached_sdvo(to_intel_connector(connector)); int ret; ret = intel_connector_register(connector); @@ -2394,7 +2394,7 @@ intel_sdvo_connector_register(struct drm_connector *connector) static void intel_sdvo_connector_unregister(struct drm_connector *connector) { - struct intel_sdvo *sdvo = intel_attached_sdvo(connector); + struct intel_sdvo *sdvo = intel_attached_sdvo(to_intel_connector(connector)); sysfs_remove_link(&connector->kdev->kobj, sdvo->ddc.dev.kobj.name); @@ -2932,7 +2932,7 @@ static void intel_sdvo_output_cleanup(struct intel_sdvo *intel_sdvo) list_for_each_entry_safe(connector, tmp, &dev->mode_config.connector_list, head) { - if (intel_attached_encoder(connector) == &intel_sdvo->base) { + if (intel_attached_encoder(to_intel_connector(connector)) == &intel_sdvo->base) { drm_connector_unregister(connector); intel_connector_destroy(connector); } diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c index 3f7b8f2ff671..fca77ec1e0dd 100644 --- a/drivers/gpu/drm/i915/display/intel_sprite.c +++ b/drivers/gpu/drm/i915/display/intel_sprite.c @@ -583,15 +583,16 @@ skl_program_plane(struct intel_plane *plane, const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; u32 surf_addr = plane_state->color_plane[color_plane].offset; u32 stride = skl_plane_stride(plane_state, color_plane); - u32 aux_dist = plane_state->color_plane[1].offset - surf_addr; - u32 aux_stride = skl_plane_stride(plane_state, 1); + const struct drm_framebuffer *fb = plane_state->hw.fb; + int aux_plane = intel_main_to_aux_plane(fb, color_plane); + u32 aux_dist = plane_state->color_plane[aux_plane].offset - surf_addr; + u32 aux_stride = skl_plane_stride(plane_state, aux_plane); int crtc_x = plane_state->uapi.dst.x1; int crtc_y = plane_state->uapi.dst.y1; u32 x = plane_state->color_plane[color_plane].x; u32 y = plane_state->color_plane[color_plane].y; u32 src_w = drm_rect_width(&plane_state->uapi.src) >> 16; u32 src_h = drm_rect_height(&plane_state->uapi.src) >> 16; - const struct drm_framebuffer *fb = plane_state->hw.fb; u8 alpha = plane_state->hw.alpha >> 8; u32 plane_color_ctl = 0; unsigned long irqflags; @@ -2106,7 +2107,8 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state, fb->modifier == I915_FORMAT_MOD_Yf_TILED || fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS || fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS || - fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS)) { + fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS || + fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS)) { DRM_DEBUG_KMS("Y/Yf tiling not supported in IF-ID mode\n"); return -EINVAL; } @@ -2578,7 +2580,16 @@ static const u64 skl_plane_format_modifiers_ccs[] = { DRM_FORMAT_MOD_INVALID }; -static const u64 gen12_plane_format_modifiers_ccs[] = { +static const u64 gen12_plane_format_modifiers_mc_ccs[] = { + I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS, + I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS, + I915_FORMAT_MOD_Y_TILED, + I915_FORMAT_MOD_X_TILED, + DRM_FORMAT_MOD_LINEAR, + DRM_FORMAT_MOD_INVALID +}; + +static const u64 gen12_plane_format_modifiers_rc_ccs[] = { I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS, I915_FORMAT_MOD_Y_TILED, I915_FORMAT_MOD_X_TILED, @@ -2743,10 +2754,21 @@ static bool skl_plane_format_mod_supported(struct drm_plane *_plane, } } +static bool gen12_plane_supports_mc_ccs(enum plane_id plane_id) +{ + return plane_id < PLANE_SPRITE4; +} + static bool gen12_plane_format_mod_supported(struct drm_plane *_plane, u32 format, u64 modifier) { + struct intel_plane *plane = to_intel_plane(_plane); + switch (modifier) { + case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: + if (!gen12_plane_supports_mc_ccs(plane->id)) + return false; + /* fall through */ case DRM_FORMAT_MOD_LINEAR: case I915_FORMAT_MOD_X_TILED: case I915_FORMAT_MOD_Y_TILED: @@ -2764,11 +2786,6 @@ static bool gen12_plane_format_mod_supported(struct drm_plane *_plane, if (is_ccs_modifier(modifier)) return true; /* fall through */ - case DRM_FORMAT_RGB565: - case DRM_FORMAT_XRGB2101010: - case DRM_FORMAT_XBGR2101010: - case DRM_FORMAT_ARGB2101010: - case DRM_FORMAT_ABGR2101010: case DRM_FORMAT_YUYV: case DRM_FORMAT_YVYU: case DRM_FORMAT_UYVY: @@ -2777,6 +2794,14 @@ static bool gen12_plane_format_mod_supported(struct drm_plane *_plane, case DRM_FORMAT_P010: case DRM_FORMAT_P012: case DRM_FORMAT_P016: + if (modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS) + return true; + /* fall through */ + case DRM_FORMAT_RGB565: + case DRM_FORMAT_XRGB2101010: + case DRM_FORMAT_XBGR2101010: + case DRM_FORMAT_ARGB2101010: + case DRM_FORMAT_ABGR2101010: case DRM_FORMAT_XVYU2101010: case DRM_FORMAT_C8: case DRM_FORMAT_XBGR16161616F: @@ -2910,6 +2935,14 @@ static const u32 *icl_get_plane_formats(struct drm_i915_private *dev_priv, } } +static const u64 *gen12_get_plane_modifiers(enum plane_id plane_id) +{ + if (gen12_plane_supports_mc_ccs(plane_id)) + return gen12_plane_format_modifiers_mc_ccs; + else + return gen12_plane_format_modifiers_rc_ccs; +} + static bool skl_plane_has_ccs(struct drm_i915_private *dev_priv, enum pipe pipe, enum plane_id plane_id) { @@ -2975,7 +3008,7 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv, plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe, plane_id); if (INTEL_GEN(dev_priv) >= 12) { - modifiers = gen12_plane_format_modifiers_ccs; + modifiers = gen12_get_plane_modifiers(plane_id); plane_funcs = &gen12_plane_funcs; } else { if (plane->has_ccs) diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c index 50703536436c..c75e0ceecee6 100644 --- a/drivers/gpu/drm/i915/display/intel_tv.c +++ b/drivers/gpu/drm/i915/display/intel_tv.c @@ -898,7 +898,7 @@ static struct intel_tv *enc_to_tv(struct intel_encoder *encoder) return container_of(encoder, struct intel_tv, base); } -static struct intel_tv *intel_attached_tv(struct drm_connector *connector) +static struct intel_tv *intel_attached_tv(struct intel_connector *connector) { return enc_to_tv(intel_attached_encoder(connector)); } @@ -1527,7 +1527,7 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder, ((video_levels->black << TV_BLACK_LEVEL_SHIFT) | (video_levels->blank << TV_BLANK_LEVEL_SHIFT))); - assert_pipe_disabled(dev_priv, intel_crtc->pipe); + assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder); /* Filter ctl must be set before TV_WIN_SIZE */ tv_filter_ctl = TV_AUTO_SCALE; @@ -1662,7 +1662,7 @@ intel_tv_detect_type(struct intel_tv *intel_tv, */ static void intel_tv_find_better_format(struct drm_connector *connector) { - struct intel_tv *intel_tv = intel_attached_tv(connector); + struct intel_tv *intel_tv = intel_attached_tv(to_intel_connector(connector)); const struct tv_mode *tv_mode = intel_tv_mode_find(connector->state); int i; @@ -1689,7 +1689,7 @@ intel_tv_detect(struct drm_connector *connector, struct drm_modeset_acquire_ctx *ctx, bool force) { - struct intel_tv *intel_tv = intel_attached_tv(connector); + struct intel_tv *intel_tv = intel_attached_tv(to_intel_connector(connector)); enum drm_connector_status status; int type; diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c index 6bab08db5d75..9e6aaa302e40 100644 --- a/drivers/gpu/drm/i915/display/intel_vdsc.c +++ b/drivers/gpu/drm/i915/display/intel_vdsc.c @@ -943,7 +943,7 @@ static void intel_dsc_dsi_pps_write(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); struct mipi_dsi_device *dsi; struct drm_dsc_picture_parameter_set pps; enum port port; @@ -961,7 +961,7 @@ static void intel_dsc_dsi_pps_write(struct intel_encoder *encoder, static void intel_dsc_dp_pps_write(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; struct drm_dsc_pps_infoframe dp_dsc_pps_sdp; diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c index 21e820299107..daf4fc3dab6f 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi.c +++ b/drivers/gpu/drm/i915/display/vlv_dsi.c @@ -23,7 +23,6 @@ * Author: Jani Nikula <jani.nikula@intel.com> */ -#include <linux/gpio/consumer.h> #include <linux/slab.h> #include <drm/drm_atomic_helper.h> @@ -319,7 +318,7 @@ static int intel_dsi_compute_config(struct intel_encoder *encoder, static bool glk_dsi_enable_io(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; u32 tmp; bool cold_boot = false; @@ -367,7 +366,7 @@ static bool glk_dsi_enable_io(struct intel_encoder *encoder) static void glk_dsi_device_ready(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; u32 val; @@ -438,7 +437,7 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder) static void bxt_dsi_device_ready(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; u32 val; @@ -465,7 +464,7 @@ static void bxt_dsi_device_ready(struct intel_encoder *encoder) static void vlv_dsi_device_ready(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; u32 val; @@ -516,7 +515,7 @@ static void intel_dsi_device_ready(struct intel_encoder *encoder) static void glk_dsi_enter_low_power_mode(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; u32 val; @@ -546,7 +545,7 @@ static void glk_dsi_enter_low_power_mode(struct intel_encoder *encoder) static void glk_dsi_disable_mipi_io(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; u32 tmp; @@ -579,7 +578,7 @@ static void glk_dsi_clear_device_ready(struct intel_encoder *encoder) static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; DRM_DEBUG_KMS("\n"); @@ -625,7 +624,7 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) { @@ -681,7 +680,7 @@ static void intel_dsi_port_disable(struct intel_encoder *encoder) { struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; for_each_dsi_port(port, intel_dsi->ports) { @@ -745,7 +744,7 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); struct drm_crtc *crtc = pipe_config->uapi.crtc; struct drm_i915_private *dev_priv = to_i915(crtc->dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); @@ -793,9 +792,6 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder, if (!IS_GEMINILAKE(dev_priv)) intel_dsi_prepare(encoder, pipe_config); - /* Power on, try both CRC pmic gpio and VBT */ - if (intel_dsi->gpio_panel) - gpiod_set_value_cansleep(intel_dsi->gpio_panel, 1); intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_ON); intel_dsi_msleep(intel_dsi, intel_dsi->panel_on_delay); @@ -850,7 +846,7 @@ static void intel_dsi_disable(struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; DRM_DEBUG_KMS("\n"); @@ -886,7 +882,7 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder, const struct drm_connector_state *old_conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; u32 val; @@ -895,7 +891,7 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder, if (IS_GEN9_LP(dev_priv)) { intel_crtc_vblank_off(old_crtc_state); - skylake_scaler_disable(old_crtc_state); + skl_scaler_disable(old_crtc_state); } if (is_vid_mode(intel_dsi)) { @@ -945,11 +941,8 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder, /* Assert reset */ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_ASSERT_RESET); - /* Power off, try both CRC pmic gpio and VBT */ intel_dsi_msleep(intel_dsi, intel_dsi->panel_off_delay); intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_OFF); - if (intel_dsi->gpio_panel) - gpiod_set_value_cansleep(intel_dsi->gpio_panel, 0); /* * FIXME As we do with eDP, just make a note of the time here @@ -962,7 +955,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); intel_wakeref_t wakeref; enum port port; bool active = false; @@ -1041,7 +1034,7 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder, &pipe_config->hw.adjusted_mode; struct drm_display_mode *adjusted_mode_sw; struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); unsigned int lane_count = intel_dsi->lane_count; unsigned int bpp, fmt; enum port port; @@ -1234,7 +1227,7 @@ static void set_dsi_timings(struct drm_encoder *encoder, { struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(encoder)); enum port port; unsigned int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); unsigned int lane_count = intel_dsi->lane_count; @@ -1322,7 +1315,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder, struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(encoder)); const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; enum port port; unsigned int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); @@ -1512,7 +1505,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder, static void intel_dsi_unprepare(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; u32 val; @@ -1539,12 +1532,9 @@ static void intel_dsi_unprepare(struct intel_encoder *encoder) static void intel_dsi_encoder_destroy(struct drm_encoder *encoder) { - struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); - - /* dispose of the gpios */ - if (intel_dsi->gpio_panel) - gpiod_put(intel_dsi->gpio_panel); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(encoder)); + intel_dsi_vbt_gpio_cleanup(intel_dsi); intel_encoder_destroy(encoder); } @@ -1825,6 +1815,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv) struct drm_connector *connector; struct drm_display_mode *current_mode, *fixed_mode; enum port port; + enum pipe pipe; DRM_DEBUG_KMS("\n"); @@ -1923,20 +1914,8 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv) vlv_dphy_param_init(intel_dsi); - /* - * In case of BYT with CRC PMIC, we need to use GPIO for - * Panel control. - */ - if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && - (dev_priv->vbt.dsi.config->pwm_blc == PPS_BLC_PMIC)) { - intel_dsi->gpio_panel = - gpiod_get(dev->dev, "panel", GPIOD_OUT_HIGH); - - if (IS_ERR(intel_dsi->gpio_panel)) { - DRM_ERROR("Failed to own gpio for panel control\n"); - intel_dsi->gpio_panel = NULL; - } - } + intel_dsi_vbt_gpio_init(intel_dsi, + intel_dsi_get_hw_state(intel_encoder, &pipe)); drm_connector_init(dev, connector, &intel_dsi_connector_funcs, DRM_MODE_CONNECTOR_DSI); diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c index 95f39cd0ce02..6b89e67b120f 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c +++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c @@ -117,7 +117,7 @@ int vlv_dsi_pll_compute(struct intel_encoder *encoder, struct intel_crtc_state *config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); int ret; u32 dsi_clk; @@ -255,7 +255,7 @@ u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, struct intel_crtc_state *config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); u32 dsi_clock, pclk; u32 pll_ctl, pll_div; @@ -321,7 +321,7 @@ u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, u32 pclk; u32 dsi_clk; u32 dsi_ratio; - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); @@ -341,7 +341,7 @@ void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port) { u32 temp; struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); temp = I915_READ(MIPI_CTRL(port)); temp &= ~ESCAPE_CLOCK_DIVIDER_MASK; @@ -455,7 +455,7 @@ int bxt_dsi_pll_compute(struct intel_encoder *encoder, struct intel_crtc_state *config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); u8 dsi_ratio, dsi_ratio_min, dsi_ratio_max; u32 dsi_clk; @@ -503,7 +503,7 @@ void bxt_dsi_pll_enable(struct intel_encoder *encoder, const struct intel_crtc_state *config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; u32 val; diff --git a/drivers/gpu/drm/i915/gem/Makefile b/drivers/gpu/drm/i915/gem/Makefile deleted file mode 100644 index 7e73aa587967..000000000000 --- a/drivers/gpu/drm/i915/gem/Makefile +++ /dev/null @@ -1,5 +0,0 @@ -# For building individual subdir files on the command line -subdir-ccflags-y += -I$(srctree)/$(src)/.. - -# Extra header tests -header-test-pattern-$(CONFIG_DRM_I915_WERROR) := *.h diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c index dc90b044a217..a2e57e62af30 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c @@ -69,6 +69,7 @@ #include <drm/i915_drm.h> +#include "gt/gen6_ppgtt.h" #include "gt/intel_context.h" #include "gt/intel_engine_heartbeat.h" #include "gt/intel_engine_pm.h" @@ -705,7 +706,7 @@ i915_gem_create_context(struct drm_i915_private *i915, unsigned int flags) if (HAS_FULL_PPGTT(i915)) { struct i915_ppgtt *ppgtt; - ppgtt = i915_ppgtt_create(i915); + ppgtt = i915_ppgtt_create(&i915->gt); if (IS_ERR(ppgtt)) { DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n", PTR_ERR(ppgtt)); @@ -760,12 +761,6 @@ void i915_gem_driver_release__contexts(struct drm_i915_private *i915) flush_work(&i915->gem.contexts.free_work); } -static int context_idr_cleanup(int id, void *p, void *data) -{ - context_close(p); - return 0; -} - static int vm_idr_cleanup(int id, void *p, void *data) { i915_vm_put(p); @@ -773,7 +768,8 @@ static int vm_idr_cleanup(int id, void *p, void *data) } static int gem_context_register(struct i915_gem_context *ctx, - struct drm_i915_file_private *fpriv) + struct drm_i915_file_private *fpriv, + u32 *id) { struct i915_address_space *vm; int ret; @@ -791,14 +787,10 @@ static int gem_context_register(struct i915_gem_context *ctx, current->comm, pid_nr(ctx->pid)); /* And finally expose ourselves to userspace via the idr */ - mutex_lock(&fpriv->context_idr_lock); - ret = idr_alloc(&fpriv->context_idr, ctx, 0, 0, GFP_KERNEL); - mutex_unlock(&fpriv->context_idr_lock); - if (ret >= 0) - goto out; + ret = xa_alloc(&fpriv->context_xa, id, ctx, xa_limit_32b, GFP_KERNEL); + if (ret) + put_pid(fetch_and_zero(&ctx->pid)); - put_pid(fetch_and_zero(&ctx->pid)); -out: return ret; } @@ -808,11 +800,11 @@ int i915_gem_context_open(struct drm_i915_private *i915, struct drm_i915_file_private *file_priv = file->driver_priv; struct i915_gem_context *ctx; int err; + u32 id; - mutex_init(&file_priv->context_idr_lock); - mutex_init(&file_priv->vm_idr_lock); + xa_init_flags(&file_priv->context_xa, XA_FLAGS_ALLOC); - idr_init(&file_priv->context_idr); + mutex_init(&file_priv->vm_idr_lock); idr_init_base(&file_priv->vm_idr, 1); ctx = i915_gem_create_context(i915, 0); @@ -821,21 +813,19 @@ int i915_gem_context_open(struct drm_i915_private *i915, goto err; } - err = gem_context_register(ctx, file_priv); + err = gem_context_register(ctx, file_priv, &id); if (err < 0) goto err_ctx; - GEM_BUG_ON(err > 0); - + GEM_BUG_ON(id); return 0; err_ctx: context_close(ctx); err: idr_destroy(&file_priv->vm_idr); - idr_destroy(&file_priv->context_idr); + xa_destroy(&file_priv->context_xa); mutex_destroy(&file_priv->vm_idr_lock); - mutex_destroy(&file_priv->context_idr_lock); return err; } @@ -843,10 +833,12 @@ void i915_gem_context_close(struct drm_file *file) { struct drm_i915_file_private *file_priv = file->driver_priv; struct drm_i915_private *i915 = file_priv->dev_priv; + struct i915_gem_context *ctx; + unsigned long idx; - idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL); - idr_destroy(&file_priv->context_idr); - mutex_destroy(&file_priv->context_idr_lock); + xa_for_each(&file_priv->context_xa, idx, ctx) + context_close(ctx); + xa_destroy(&file_priv->context_xa); idr_for_each(&file_priv->vm_idr, vm_idr_cleanup, NULL); idr_destroy(&file_priv->vm_idr); @@ -870,7 +862,7 @@ int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data, if (args->flags) return -EINVAL; - ppgtt = i915_ppgtt_create(i915); + ppgtt = i915_ppgtt_create(&i915->gt); if (IS_ERR(ppgtt)) return PTR_ERR(ppgtt); @@ -1244,12 +1236,14 @@ gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu) * image, or into the registers directory, does not stick). Pristine * and idle contexts will be configured on pinning. */ - if (!intel_context_is_pinned(ce)) + if (!intel_context_pin_if_active(ce)) return 0; rq = intel_engine_create_kernel_request(ce->engine); - if (IS_ERR(rq)) - return PTR_ERR(rq); + if (IS_ERR(rq)) { + ret = PTR_ERR(rq); + goto out_unpin; + } /* Serialise with the remote context */ ret = intel_context_prepare_remote_request(ce, rq); @@ -1257,6 +1251,8 @@ gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu) ret = gen8_emit_rpcs_config(rq, ce, sseu); i915_request_add(rq); +out_unpin: + intel_context_unpin(ce); return ret; } @@ -2187,6 +2183,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, struct drm_i915_gem_context_create_ext *args = data; struct create_ext ext_data; int ret; + u32 id; if (!DRIVER_CAPS(i915)->has_logical_contexts) return -ENODEV; @@ -2218,11 +2215,11 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, goto err_ctx; } - ret = gem_context_register(ext_data.ctx, ext_data.fpriv); + ret = gem_context_register(ext_data.ctx, ext_data.fpriv, &id); if (ret < 0) goto err_ctx; - args->ctx_id = ret; + args->ctx_id = id; DRM_DEBUG("HW context %d created\n", args->ctx_id); return 0; @@ -2245,11 +2242,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, if (!args->ctx_id) return -ENOENT; - if (mutex_lock_interruptible(&file_priv->context_idr_lock)) - return -EINTR; - - ctx = idr_remove(&file_priv->context_idr, args->ctx_id); - mutex_unlock(&file_priv->context_idr_lock); + ctx = xa_erase(&file_priv->context_xa, args->ctx_id); if (!ctx) return -ENOENT; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.h b/drivers/gpu/drm/i915/gem/i915_gem_context.h index 14f3cc1b7583..3ae61a355d87 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.h @@ -13,7 +13,6 @@ #include "i915_drv.h" #include "i915_gem.h" -#include "i915_gem_gtt.h" #include "i915_scheduler.h" #include "intel_device_info.h" diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index cbd2bcade3c8..d5a0f5ae4a8b 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -2173,7 +2173,7 @@ static int eb_submit(struct i915_execbuffer *eb) } if (intel_context_nopreempt(eb->context)) - eb->request->flags |= I915_REQUEST_NOPREEMPT; + __set_bit(I915_FENCE_FLAG_NOPREEMPT, &eb->request->fence.flags); return 0; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c index 520cc9cac471..70543c83df06 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c @@ -16,46 +16,6 @@ const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops = { .release = i915_gem_object_release_memory_region, }; -/* XXX: Time to vfunc your life up? */ -void __iomem * -i915_gem_object_lmem_io_map_page(struct drm_i915_gem_object *obj, - unsigned long n) -{ - resource_size_t offset; - - offset = i915_gem_object_get_dma_address(obj, n); - offset -= obj->mm.region->region.start; - - return io_mapping_map_wc(&obj->mm.region->iomap, offset, PAGE_SIZE); -} - -void __iomem * -i915_gem_object_lmem_io_map_page_atomic(struct drm_i915_gem_object *obj, - unsigned long n) -{ - resource_size_t offset; - - offset = i915_gem_object_get_dma_address(obj, n); - offset -= obj->mm.region->region.start; - - return io_mapping_map_atomic_wc(&obj->mm.region->iomap, offset); -} - -void __iomem * -i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj, - unsigned long n, - unsigned long size) -{ - resource_size_t offset; - - GEM_BUG_ON(!i915_gem_object_is_contiguous(obj)); - - offset = i915_gem_object_get_dma_address(obj, n); - offset -= obj->mm.region->region.start; - - return io_mapping_map_wc(&obj->mm.region->iomap, offset, size); -} - bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj) { return obj->ops == &i915_gem_lmem_obj_ops; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h index 7c176b8b7d2f..fc3f15580fe3 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h @@ -14,14 +14,6 @@ struct intel_memory_region; extern const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops; -void __iomem *i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj, - unsigned long n, unsigned long size); -void __iomem *i915_gem_object_lmem_io_map_page(struct drm_i915_gem_object *obj, - unsigned long n); -void __iomem * -i915_gem_object_lmem_io_map_page_atomic(struct drm_i915_gem_object *obj, - unsigned long n); - bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj); struct drm_i915_gem_object * diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c index 879fff8adc48..b9fdac2f9003 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c @@ -4,6 +4,7 @@ * Copyright © 2014-2016 Intel Corporation */ +#include <linux/anon_inodes.h> #include <linux/mman.h> #include <linux/pfn_t.h> #include <linux/sizes.h> @@ -212,6 +213,7 @@ static vm_fault_t i915_error_to_vmf_fault(int err) case -EIO: /* shmemfs failure from swap device */ case -EFAULT: /* purged object */ case -ENODEV: /* bad object, how did you get here! */ + case -ENXIO: /* unable to access backing store (on device) */ return VM_FAULT_SIGBUS; case -ENOSPC: /* shmemfs allocation failure */ @@ -236,42 +238,38 @@ static vm_fault_t vm_fault_cpu(struct vm_fault *vmf) struct vm_area_struct *area = vmf->vma; struct i915_mmap_offset *mmo = area->vm_private_data; struct drm_i915_gem_object *obj = mmo->obj; - unsigned long i, size = area->vm_end - area->vm_start; - bool write = area->vm_flags & VM_WRITE; - vm_fault_t ret = VM_FAULT_SIGBUS; + resource_size_t iomap; int err; - if (!i915_gem_object_has_struct_page(obj)) - return ret; - /* Sanity check that we allow writing into this object */ - if (i915_gem_object_is_readonly(obj) && write) - return ret; + if (unlikely(i915_gem_object_is_readonly(obj) && + area->vm_flags & VM_WRITE)) + return VM_FAULT_SIGBUS; err = i915_gem_object_pin_pages(obj); if (err) - return i915_error_to_vmf_fault(err); + goto out; - /* PTEs are revoked in obj->ops->put_pages() */ - for (i = 0; i < size >> PAGE_SHIFT; i++) { - struct page *page = i915_gem_object_get_page(obj, i); - - ret = vmf_insert_pfn(area, - (unsigned long)area->vm_start + i * PAGE_SIZE, - page_to_pfn(page)); - if (ret != VM_FAULT_NOPAGE) - break; + iomap = -1; + if (!i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_STRUCT_PAGE)) { + iomap = obj->mm.region->iomap.base; + iomap -= obj->mm.region->region.start; } - if (write) { + /* PTEs are revoked in obj->ops->put_pages() */ + err = remap_io_sg(area, + area->vm_start, area->vm_end - area->vm_start, + obj->mm.pages->sgl, iomap); + + if (area->vm_flags & VM_WRITE) { GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); - obj->cache_dirty = true; /* XXX flush after PAT update? */ obj->mm.dirty = true; } i915_gem_object_unpin_pages(obj); - return ret; +out: + return i915_error_to_vmf_fault(err); } static vm_fault_t vm_fault_gtt(struct vm_fault *vmf) @@ -560,7 +558,9 @@ __assign_mmap_offset(struct drm_file *file, } if (mmap_type != I915_MMAP_TYPE_GTT && - !i915_gem_object_has_struct_page(obj)) { + !i915_gem_object_type_has(obj, + I915_GEM_OBJECT_HAS_STRUCT_PAGE | + I915_GEM_OBJECT_HAS_IOMEM)) { err = -ENODEV; goto out; } @@ -694,6 +694,46 @@ static const struct vm_operations_struct vm_ops_cpu = { .close = vm_close, }; +static int singleton_release(struct inode *inode, struct file *file) +{ + struct drm_i915_private *i915 = file->private_data; + + cmpxchg(&i915->gem.mmap_singleton, file, NULL); + drm_dev_put(&i915->drm); + + return 0; +} + +static const struct file_operations singleton_fops = { + .owner = THIS_MODULE, + .release = singleton_release, +}; + +static struct file *mmap_singleton(struct drm_i915_private *i915) +{ + struct file *file; + + rcu_read_lock(); + file = i915->gem.mmap_singleton; + if (file && !get_file_rcu(file)) + file = NULL; + rcu_read_unlock(); + if (file) + return file; + + file = anon_inode_getfile("i915.gem", &singleton_fops, i915, O_RDWR); + if (IS_ERR(file)) + return file; + + /* Everyone shares a single global address space */ + file->f_mapping = i915->drm.anon_inode->i_mapping; + + smp_store_mb(i915->gem.mmap_singleton, file); + drm_dev_get(&i915->drm); + + return file; +} + /* * This overcomes the limitation in drm_gem_mmap's assignment of a * drm_gem_object as the vma->vm_private_data. Since we need to @@ -707,6 +747,7 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma) struct drm_device *dev = priv->minor->dev; struct i915_mmap_offset *mmo = NULL; struct drm_gem_object *obj = NULL; + struct file *anon; if (drm_dev_is_unplugged(dev)) return -ENODEV; @@ -755,9 +796,26 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma) vma->vm_flags &= ~VM_MAYWRITE; } + anon = mmap_singleton(to_i915(obj->dev)); + if (IS_ERR(anon)) { + drm_gem_object_put_unlocked(obj); + return PTR_ERR(anon); + } + vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; vma->vm_private_data = mmo; + /* + * We keep the ref on mmo->obj, not vm_file, but we require + * vma->vm_file->f_mapping, see vma_link(), for later revocation. + * Our userspace is accustomed to having per-file resource cleanup + * (i.e. contexts, objects and requests) on their close(fd), which + * requires avoiding extraneous references to their filp, hence why + * we prefer to use an anonymous file for their mmaps. + */ + fput(vma->vm_file); + vma->vm_file = anon; + switch (mmo->mmap_type) { case I915_MMAP_TYPE_WC: vma->vm_page_prot = diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h index 858f8bf49a04..db70a3306e59 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h @@ -16,6 +16,7 @@ #include "display/intel_frontbuffer.h" #include "i915_gem_object_types.h" #include "i915_gem_gtt.h" +#include "i915_vma_types.h" void i915_gem_init__objects(struct drm_i915_private *i915); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c index 75197ca696a8..54aca5c9101e 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c @@ -158,9 +158,7 @@ static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj) static void unmap_object(struct drm_i915_gem_object *obj, void *ptr) { - if (i915_gem_object_is_lmem(obj)) - io_mapping_unmap((void __force __iomem *)ptr); - else if (is_vmalloc_addr(ptr)) + if (is_vmalloc_addr(ptr)) vunmap(ptr); else kunmap(kmap_to_page(ptr)); @@ -236,46 +234,44 @@ unlock: return err; } +static inline pte_t iomap_pte(resource_size_t base, + dma_addr_t offset, + pgprot_t prot) +{ + return pte_mkspecial(pfn_pte((base + offset) >> PAGE_SHIFT, prot)); +} + /* The 'mapping' part of i915_gem_object_pin_map() below */ static void *i915_gem_object_map(struct drm_i915_gem_object *obj, enum i915_map_type type) { - unsigned long n_pages = obj->base.size >> PAGE_SHIFT; + unsigned long n_pte = obj->base.size >> PAGE_SHIFT; struct sg_table *sgt = obj->mm.pages; - struct sgt_iter sgt_iter; - struct page *page; - struct page *stack_pages[32]; - struct page **pages = stack_pages; - unsigned long i = 0; + pte_t *stack[32], **mem; + struct vm_struct *area; pgprot_t pgprot; - void *addr; - if (i915_gem_object_is_lmem(obj)) { - void __iomem *io; - - if (type != I915_MAP_WC) - return NULL; - - io = i915_gem_object_lmem_io_map(obj, 0, obj->base.size); - return (void __force *)io; - } + if (!i915_gem_object_has_struct_page(obj) && type != I915_MAP_WC) + return NULL; /* A single page can always be kmapped */ - if (n_pages == 1 && type == I915_MAP_WB) + if (n_pte == 1 && type == I915_MAP_WB) return kmap(sg_page(sgt->sgl)); - if (n_pages > ARRAY_SIZE(stack_pages)) { + mem = stack; + if (n_pte > ARRAY_SIZE(stack)) { /* Too big for stack -- allocate temporary array instead */ - pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL); - if (!pages) + mem = kvmalloc_array(n_pte, sizeof(*mem), GFP_KERNEL); + if (!mem) return NULL; } - for_each_sgt_page(page, sgt_iter, sgt) - pages[i++] = page; - - /* Check that we have the expected number of pages */ - GEM_BUG_ON(i != n_pages); + area = alloc_vm_area(obj->base.size, mem); + if (!area) { + if (mem != stack) + kvfree(mem); + return NULL; + } switch (type) { default: @@ -288,12 +284,31 @@ static void *i915_gem_object_map(struct drm_i915_gem_object *obj, pgprot = pgprot_writecombine(PAGE_KERNEL_IO); break; } - addr = vmap(pages, n_pages, 0, pgprot); - if (pages != stack_pages) - kvfree(pages); + if (i915_gem_object_has_struct_page(obj)) { + struct sgt_iter iter; + struct page *page; + pte_t **ptes = mem; + + for_each_sgt_page(page, iter, sgt) + **ptes++ = mk_pte(page, pgprot); + } else { + resource_size_t iomap; + struct sgt_iter iter; + pte_t **ptes = mem; + dma_addr_t addr; + + iomap = obj->mm.region->iomap.base; + iomap -= obj->mm.region->region.start; + + for_each_sgt_daddr(addr, iter, sgt) + **ptes++ = iomap_pte(iomap, addr, pgprot); + } + + if (mem != stack) + kvfree(mem); - return addr; + return area->addr; } /* get, pin, and map the pages of the object into kernel space */ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.c b/drivers/gpu/drm/i915/gem/i915_gem_region.c index d50adac12249..1515384d7e0e 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_region.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_region.c @@ -107,7 +107,10 @@ void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj, { INIT_LIST_HEAD(&obj->mm.blocks); obj->mm.region = intel_memory_region_get(mem); + obj->flags |= flags; + if (obj->base.size <= mem->min_page_size) + obj->flags |= I915_BO_ALLOC_CONTIGUOUS; mutex_lock(&mem->objects.lock); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c index 4d69c3fc3439..a2a980d9d241 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c @@ -594,6 +594,8 @@ static int init_shmem(struct intel_memory_region *mem) err); } + intel_memory_region_set_name(mem, "system"); + return 0; /* Don't error, we can simply fallback to the kernel mnt */ } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c index afb08a1704a2..451f3078d60d 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c @@ -645,6 +645,8 @@ i915_gem_object_create_stolen(struct drm_i915_private *i915, static int init_stolen(struct intel_memory_region *mem) { + intel_memory_region_set_name(mem, "stolen"); + /* * Initialise stolen early so that we may reserve preallocated * objects for the BIOS to KMS transition. diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.h b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.h index 549c1394bcdc..b8cf31b7bf14 100644 --- a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.h +++ b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.h @@ -7,6 +7,12 @@ #ifndef __HUGE_GEM_OBJECT_H #define __HUGE_GEM_OBJECT_H +#include <linux/types.h> + +#include "gem/i915_gem_object_types.h" + +struct drm_i915_private; + struct drm_i915_gem_object * huge_gem_object(struct drm_i915_private *i915, phys_addr_t phys_size, diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c index 2479395c1873..9311250d7d6f 100644 --- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c @@ -1017,38 +1017,33 @@ __cpu_check_shmem(struct drm_i915_gem_object *obj, u32 dword, u32 val) return err; } -static int __cpu_check_lmem(struct drm_i915_gem_object *obj, u32 dword, u32 val) +static int __cpu_check_vmap(struct drm_i915_gem_object *obj, u32 dword, u32 val) { - unsigned long n; + unsigned long n = obj->base.size >> PAGE_SHIFT; + u32 *ptr; int err; - i915_gem_object_lock(obj); - err = i915_gem_object_set_to_wc_domain(obj, false); - i915_gem_object_unlock(obj); - if (err) - return err; - - err = i915_gem_object_pin_pages(obj); + err = i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT); if (err) return err; - for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) { - u32 __iomem *base; - u32 read_val; - - base = i915_gem_object_lmem_io_map_page_atomic(obj, n); + ptr = i915_gem_object_pin_map(obj, I915_MAP_WC); + if (IS_ERR(ptr)) + return PTR_ERR(ptr); - read_val = ioread32(base + dword); - io_mapping_unmap_atomic(base); - if (read_val != val) { - pr_err("n=%lu base[%u]=%u, val=%u\n", - n, dword, read_val, val); + ptr += dword; + while (n--) { + if (*ptr != val) { + pr_err("base[%u]=%08x, val=%08x\n", + dword, *ptr, val); err = -EINVAL; break; } + + ptr += PAGE_SIZE / sizeof(*ptr); } - i915_gem_object_unpin_pages(obj); + i915_gem_object_unpin_map(obj); return err; } @@ -1056,10 +1051,8 @@ static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val) { if (i915_gem_object_has_struct_page(obj)) return __cpu_check_shmem(obj, dword, val); - else if (i915_gem_object_is_lmem(obj)) - return __cpu_check_lmem(obj, dword, val); - - return -ENODEV; + else + return __cpu_check_vmap(obj, dword, val); } static int __igt_write_huge(struct intel_context *ce, @@ -1872,7 +1865,7 @@ int i915_gem_huge_page_mock_selftests(void) mkwrite_device_info(dev_priv)->ppgtt_type = INTEL_PPGTT_FULL; mkwrite_device_info(dev_priv)->ppgtt_size = 48; - ppgtt = i915_ppgtt_create(dev_priv); + ppgtt = i915_ppgtt_create(&dev_priv->gt); if (IS_ERR(ppgtt)) { err = PTR_ERR(ppgtt); goto out_unlock; diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c index 49edc51111d5..3f6079e1dfb6 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c @@ -325,7 +325,10 @@ static int igt_gem_coherency(void *arg) values = offsets + ncachelines; ctx.engine = random_engine(i915, &prng); - GEM_BUG_ON(!ctx.engine); + if (!ctx.engine) { + err = -ENODEV; + goto out_free; + } pr_info("%s: using %s\n", __func__, ctx.engine->name); intel_engine_pm_get(ctx.engine); @@ -354,7 +357,7 @@ static int igt_gem_coherency(void *arg) ctx.obj = i915_gem_object_create_internal(i915, PAGE_SIZE); if (IS_ERR(ctx.obj)) { err = PTR_ERR(ctx.obj); - goto free; + goto out_pm; } i915_random_reorder(offsets, ncachelines, &prng); @@ -405,14 +408,15 @@ static int igt_gem_coherency(void *arg) } } } -free: +out_pm: intel_engine_pm_put(ctx.engine); +out_free: kfree(offsets); return err; put_object: i915_gem_object_put(ctx.obj); - goto free; + goto out_pm; } int i915_gem_coherency_live_selftests(struct drm_i915_private *i915) diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c index cbf796da64e3..ef7c74cff28a 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c @@ -9,6 +9,7 @@ #include "gt/intel_engine_pm.h" #include "gt/intel_gt.h" #include "gt/intel_gt_pm.h" +#include "gem/i915_gem_region.h" #include "huge_gem_object.h" #include "i915_selftest.h" #include "selftests/i915_random.h" @@ -725,114 +726,359 @@ err_obj: goto out; } -#define expand32(x) (((x) << 0) | ((x) << 8) | ((x) << 16) | ((x) << 24)) -static int igt_mmap(void *arg, enum i915_mmap_type type) +static int gtt_set(struct drm_i915_gem_object *obj) { - struct drm_i915_private *i915 = arg; - struct drm_i915_gem_object *obj; - struct i915_mmap_offset *mmo; - struct vm_area_struct *area; - unsigned long addr; - void *vaddr; - int err = 0, i; + struct i915_vma *vma; + void __iomem *map; + int err = 0; - if (!i915_ggtt_has_aperture(&i915->ggtt)) - return 0; + vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE); + if (IS_ERR(vma)) + return PTR_ERR(vma); - obj = i915_gem_object_create_internal(i915, PAGE_SIZE); - if (IS_ERR(obj)) - return PTR_ERR(obj); + intel_gt_pm_get(vma->vm->gt); + map = i915_vma_pin_iomap(vma); + i915_vma_unpin(vma); + if (IS_ERR(map)) { + err = PTR_ERR(map); + goto out; + } + + memset_io(map, POISON_INUSE, obj->base.size); + i915_vma_unpin_iomap(vma); + +out: + intel_gt_pm_put(vma->vm->gt); + return err; +} + +static int gtt_check(struct drm_i915_gem_object *obj) +{ + struct i915_vma *vma; + void __iomem *map; + int err = 0; - vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB); - if (IS_ERR(vaddr)) { - err = PTR_ERR(vaddr); + vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE); + if (IS_ERR(vma)) + return PTR_ERR(vma); + + intel_gt_pm_get(vma->vm->gt); + map = i915_vma_pin_iomap(vma); + i915_vma_unpin(vma); + if (IS_ERR(map)) { + err = PTR_ERR(map); goto out; } - memset(vaddr, POISON_INUSE, PAGE_SIZE); + + if (memchr_inv((void __force *)map, POISON_FREE, obj->base.size)) { + pr_err("%s: Write via mmap did not land in backing store (GTT)\n", + obj->mm.region->name); + err = -EINVAL; + } + i915_vma_unpin_iomap(vma); + +out: + intel_gt_pm_put(vma->vm->gt); + return err; +} + +static int wc_set(struct drm_i915_gem_object *obj) +{ + void *vaddr; + + vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC); + if (IS_ERR(vaddr)) + return PTR_ERR(vaddr); + + memset(vaddr, POISON_INUSE, obj->base.size); i915_gem_object_flush_map(obj); i915_gem_object_unpin_map(obj); - mmo = mmap_offset_attach(obj, type, NULL); - if (IS_ERR(mmo)) { - err = PTR_ERR(mmo); - goto out; + return 0; +} + +static int wc_check(struct drm_i915_gem_object *obj) +{ + void *vaddr; + int err = 0; + + vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC); + if (IS_ERR(vaddr)) + return PTR_ERR(vaddr); + + if (memchr_inv(vaddr, POISON_FREE, obj->base.size)) { + pr_err("%s: Write via mmap did not land in backing store (WC)\n", + obj->mm.region->name); + err = -EINVAL; } + i915_gem_object_unpin_map(obj); + + return err; +} + +static bool can_mmap(struct drm_i915_gem_object *obj, enum i915_mmap_type type) +{ + if (type == I915_MMAP_TYPE_GTT && + !i915_ggtt_has_aperture(&to_i915(obj->base.dev)->ggtt)) + return false; + + if (type != I915_MMAP_TYPE_GTT && + !i915_gem_object_type_has(obj, + I915_GEM_OBJECT_HAS_STRUCT_PAGE | + I915_GEM_OBJECT_HAS_IOMEM)) + return false; + + return true; +} + +#define expand32(x) (((x) << 0) | ((x) << 8) | ((x) << 16) | ((x) << 24)) +static int __igt_mmap(struct drm_i915_private *i915, + struct drm_i915_gem_object *obj, + enum i915_mmap_type type) +{ + struct i915_mmap_offset *mmo; + struct vm_area_struct *area; + unsigned long addr; + int err, i; + + if (!can_mmap(obj, type)) + return 0; + + err = wc_set(obj); + if (err == -ENXIO) + err = gtt_set(obj); + if (err) + return err; + + mmo = mmap_offset_attach(obj, type, NULL); + if (IS_ERR(mmo)) + return PTR_ERR(mmo); addr = igt_mmap_node(i915, &mmo->vma_node, 0, PROT_WRITE, MAP_SHARED); - if (IS_ERR_VALUE(addr)) { - err = addr; - goto out; - } + if (IS_ERR_VALUE(addr)) + return addr; - pr_debug("igt_mmap() @ %lx\n", addr); + pr_debug("igt_mmap(%s, %d) @ %lx\n", obj->mm.region->name, type, addr); area = find_vma(current->mm, addr); if (!area) { - pr_err("Did not create a vm_area_struct for the mmap\n"); + pr_err("%s: Did not create a vm_area_struct for the mmap\n", + obj->mm.region->name); err = -EINVAL; goto out_unmap; } if (area->vm_private_data != mmo) { - pr_err("vm_area_struct did not point back to our mmap_offset object!\n"); + pr_err("%s: vm_area_struct did not point back to our mmap_offset object!\n", + obj->mm.region->name); err = -EINVAL; goto out_unmap; } - for (i = 0; i < PAGE_SIZE / sizeof(u32); i++) { + for (i = 0; i < obj->base.size / sizeof(u32); i++) { u32 __user *ux = u64_to_user_ptr((u64)(addr + i * sizeof(*ux))); u32 x; if (get_user(x, ux)) { - pr_err("Unable to read from mmap, offset:%zd\n", - i * sizeof(x)); + pr_err("%s: Unable to read from mmap, offset:%zd\n", + obj->mm.region->name, i * sizeof(x)); err = -EFAULT; - break; + goto out_unmap; } if (x != expand32(POISON_INUSE)) { - pr_err("Read incorrect value from mmap, offset:%zd, found:%x, expected:%x\n", + pr_err("%s: Read incorrect value from mmap, offset:%zd, found:%x, expected:%x\n", + obj->mm.region->name, i * sizeof(x), x, expand32(POISON_INUSE)); err = -EINVAL; - break; + goto out_unmap; } x = expand32(POISON_FREE); if (put_user(x, ux)) { - pr_err("Unable to write to mmap, offset:%zd\n", - i * sizeof(x)); + pr_err("%s: Unable to write to mmap, offset:%zd\n", + obj->mm.region->name, i * sizeof(x)); err = -EFAULT; - break; + goto out_unmap; } } + if (type == I915_MMAP_TYPE_GTT) + intel_gt_flush_ggtt_writes(&i915->gt); + + err = wc_check(obj); + if (err == -ENXIO) + err = gtt_check(obj); out_unmap: - vm_munmap(addr, PAGE_SIZE); + vm_munmap(addr, obj->base.size); + return err; +} - vaddr = i915_gem_object_pin_map(obj, I915_MAP_FORCE_WC); - if (IS_ERR(vaddr)) { - err = PTR_ERR(vaddr); - goto out; - } - if (err == 0 && memchr_inv(vaddr, POISON_FREE, PAGE_SIZE)) { - pr_err("Write via mmap did not land in backing store\n"); - err = -EINVAL; +static int igt_mmap(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct intel_memory_region *mr; + enum intel_region_id id; + + for_each_memory_region(mr, i915, id) { + unsigned long sizes[] = { + PAGE_SIZE, + mr->min_page_size, + SZ_4M, + }; + int i; + + for (i = 0; i < ARRAY_SIZE(sizes); i++) { + struct drm_i915_gem_object *obj; + int err; + + obj = i915_gem_object_create_region(mr, sizes[i], 0); + if (obj == ERR_PTR(-ENODEV)) + continue; + + if (IS_ERR(obj)) + return PTR_ERR(obj); + + err = __igt_mmap(i915, obj, I915_MMAP_TYPE_GTT); + if (err == 0) + err = __igt_mmap(i915, obj, I915_MMAP_TYPE_WC); + + i915_gem_object_put(obj); + if (err) + return err; + } } - i915_gem_object_unpin_map(obj); -out: - i915_gem_object_put(obj); - return err; + return 0; } -static int igt_mmap_gtt(void *arg) +static int __igt_mmap_gpu(struct drm_i915_private *i915, + struct drm_i915_gem_object *obj, + enum i915_mmap_type type) { - return igt_mmap(arg, I915_MMAP_TYPE_GTT); + struct intel_engine_cs *engine; + struct i915_mmap_offset *mmo; + unsigned long addr; + u32 __user *ux; + u32 bbe; + int err; + + /* + * Verify that the mmap access into the backing store aligns with + * that of the GPU, i.e. that mmap is indeed writing into the same + * page as being read by the GPU. + */ + + if (!can_mmap(obj, type)) + return 0; + + err = wc_set(obj); + if (err == -ENXIO) + err = gtt_set(obj); + if (err) + return err; + + mmo = mmap_offset_attach(obj, type, NULL); + if (IS_ERR(mmo)) + return PTR_ERR(mmo); + + addr = igt_mmap_node(i915, &mmo->vma_node, 0, PROT_WRITE, MAP_SHARED); + if (IS_ERR_VALUE(addr)) + return addr; + + ux = u64_to_user_ptr((u64)addr); + bbe = MI_BATCH_BUFFER_END; + if (put_user(bbe, ux)) { + pr_err("%s: Unable to write to mmap\n", obj->mm.region->name); + err = -EFAULT; + goto out_unmap; + } + + if (type == I915_MMAP_TYPE_GTT) + intel_gt_flush_ggtt_writes(&i915->gt); + + for_each_uabi_engine(engine, i915) { + struct i915_request *rq; + struct i915_vma *vma; + + vma = i915_vma_instance(obj, engine->kernel_context->vm, NULL); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto out_unmap; + } + + err = i915_vma_pin(vma, 0, 0, PIN_USER); + if (err) + goto out_unmap; + + rq = i915_request_create(engine->kernel_context); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto out_unpin; + } + + i915_vma_lock(vma); + err = i915_request_await_object(rq, vma->obj, false); + if (err == 0) + err = i915_vma_move_to_active(vma, rq, 0); + i915_vma_unlock(vma); + + err = engine->emit_bb_start(rq, vma->node.start, 0, 0); + i915_request_get(rq); + i915_request_add(rq); + + if (i915_request_wait(rq, 0, HZ / 5) < 0) { + struct drm_printer p = + drm_info_printer(engine->i915->drm.dev); + + pr_err("%s(%s, %s): Failed to execute batch\n", + __func__, engine->name, obj->mm.region->name); + intel_engine_dump(engine, &p, + "%s\n", engine->name); + + intel_gt_set_wedged(engine->gt); + err = -EIO; + } + i915_request_put(rq); + +out_unpin: + i915_vma_unpin(vma); + if (err) + goto out_unmap; + } + +out_unmap: + vm_munmap(addr, obj->base.size); + return err; } -static int igt_mmap_cpu(void *arg) +static int igt_mmap_gpu(void *arg) { - return igt_mmap(arg, I915_MMAP_TYPE_WC); + struct drm_i915_private *i915 = arg; + struct intel_memory_region *mr; + enum intel_region_id id; + + for_each_memory_region(mr, i915, id) { + struct drm_i915_gem_object *obj; + int err; + + obj = i915_gem_object_create_region(mr, PAGE_SIZE, 0); + if (obj == ERR_PTR(-ENODEV)) + continue; + + if (IS_ERR(obj)) + return PTR_ERR(obj); + + err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_GTT); + if (err == 0) + err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_WC); + + i915_gem_object_put(obj); + if (err) + return err; + } + + return 0; } static int check_present_pte(pte_t *pte, unsigned long addr, void *data) @@ -887,32 +1133,24 @@ static int prefault_range(u64 start, u64 len) return __get_user(c, end - 1); } -static int igt_mmap_revoke(void *arg, enum i915_mmap_type type) +static int __igt_mmap_revoke(struct drm_i915_private *i915, + struct drm_i915_gem_object *obj, + enum i915_mmap_type type) { - struct drm_i915_private *i915 = arg; - struct drm_i915_gem_object *obj; struct i915_mmap_offset *mmo; unsigned long addr; int err; - if (!i915_ggtt_has_aperture(&i915->ggtt)) + if (!can_mmap(obj, type)) return 0; - obj = i915_gem_object_create_internal(i915, SZ_4M); - if (IS_ERR(obj)) - return PTR_ERR(obj); - mmo = mmap_offset_attach(obj, type, NULL); - if (IS_ERR(mmo)) { - err = PTR_ERR(mmo); - goto out; - } + if (IS_ERR(mmo)) + return PTR_ERR(mmo); addr = igt_mmap_node(i915, &mmo->vma_node, 0, PROT_WRITE, MAP_SHARED); - if (IS_ERR_VALUE(addr)) { - err = addr; - goto out; - } + if (IS_ERR_VALUE(addr)) + return addr; err = prefault_range(addr, obj->base.size); if (err) @@ -922,8 +1160,10 @@ static int igt_mmap_revoke(void *arg, enum i915_mmap_type type) !atomic_read(&obj->bind_count)); err = check_present(addr, obj->base.size); - if (err) + if (err) { + pr_err("%s: was not present\n", obj->mm.region->name); goto out_unmap; + } /* * After unbinding the object from the GGTT, its address may be reused @@ -947,24 +1187,43 @@ static int igt_mmap_revoke(void *arg, enum i915_mmap_type type) } err = check_absent(addr, obj->base.size); - if (err) + if (err) { + pr_err("%s: was not absent\n", obj->mm.region->name); goto out_unmap; + } out_unmap: vm_munmap(addr, obj->base.size); -out: - i915_gem_object_put(obj); return err; } -static int igt_mmap_gtt_revoke(void *arg) +static int igt_mmap_revoke(void *arg) { - return igt_mmap_revoke(arg, I915_MMAP_TYPE_GTT); -} + struct drm_i915_private *i915 = arg; + struct intel_memory_region *mr; + enum intel_region_id id; -static int igt_mmap_cpu_revoke(void *arg) -{ - return igt_mmap_revoke(arg, I915_MMAP_TYPE_WC); + for_each_memory_region(mr, i915, id) { + struct drm_i915_gem_object *obj; + int err; + + obj = i915_gem_object_create_region(mr, PAGE_SIZE, 0); + if (obj == ERR_PTR(-ENODEV)) + continue; + + if (IS_ERR(obj)) + return PTR_ERR(obj); + + err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_GTT); + if (err == 0) + err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_WC); + + i915_gem_object_put(obj); + if (err) + return err; + } + + return 0; } int i915_gem_mman_live_selftests(struct drm_i915_private *i915) @@ -973,10 +1232,9 @@ int i915_gem_mman_live_selftests(struct drm_i915_private *i915) SUBTEST(igt_partial_tiling), SUBTEST(igt_smoke_tiling), SUBTEST(igt_mmap_offset_exhaustion), - SUBTEST(igt_mmap_gtt), - SUBTEST(igt_mmap_cpu), - SUBTEST(igt_mmap_gtt_revoke), - SUBTEST(igt_mmap_cpu_revoke), + SUBTEST(igt_mmap), + SUBTEST(igt_mmap_revoke), + SUBTEST(igt_mmap_gpu), }; return i915_subtests(tests, i915); diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_context.c b/drivers/gpu/drm/i915/gem/selftests/mock_context.c index 7d7e13dc2fdf..384143aa7776 100644 --- a/drivers/gpu/drm/i915/gem/selftests/mock_context.c +++ b/drivers/gpu/drm/i915/gem/selftests/mock_context.c @@ -77,12 +77,13 @@ live_context(struct drm_i915_private *i915, struct file *file) { struct i915_gem_context *ctx; int err; + u32 id; ctx = i915_gem_create_context(i915, 0); if (IS_ERR(ctx)) return ctx; - err = gem_context_register(ctx, to_drm_file(file)->driver_priv); + err = gem_context_register(ctx, to_drm_file(file)->driver_priv, &id); if (err < 0) goto err_ctx; diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_gem_object.h b/drivers/gpu/drm/i915/gem/selftests/mock_gem_object.h index 370360b4a148..688511afa883 100644 --- a/drivers/gpu/drm/i915/gem/selftests/mock_gem_object.h +++ b/drivers/gpu/drm/i915/gem/selftests/mock_gem_object.h @@ -7,6 +7,8 @@ #ifndef __MOCK_GEM_OBJECT_H__ #define __MOCK_GEM_OBJECT_H__ +#include "gem/i915_gem_object_types.h" + struct mock_object { struct drm_i915_gem_object base; }; diff --git a/drivers/gpu/drm/i915/gt/Makefile b/drivers/gpu/drm/i915/gt/Makefile deleted file mode 100644 index 7e73aa587967..000000000000 --- a/drivers/gpu/drm/i915/gt/Makefile +++ /dev/null @@ -1,5 +0,0 @@ -# For building individual subdir files on the command line -subdir-ccflags-y += -I$(srctree)/$(src)/.. - -# Extra header tests -header-test-pattern-$(CONFIG_DRM_I915_WERROR) := *.h diff --git a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c new file mode 100644 index 000000000000..f10b2c41571c --- /dev/null +++ b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c @@ -0,0 +1,482 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2020 Intel Corporation + */ + +#include <linux/log2.h> + +#include "gen6_ppgtt.h" +#include "i915_scatterlist.h" +#include "i915_trace.h" +#include "i915_vgpu.h" +#include "intel_gt.h" + +/* Write pde (index) from the page directory @pd to the page table @pt */ +static inline void gen6_write_pde(const struct gen6_ppgtt *ppgtt, + const unsigned int pde, + const struct i915_page_table *pt) +{ + /* Caller needs to make sure the write completes if necessary */ + iowrite32(GEN6_PDE_ADDR_ENCODE(px_dma(pt)) | GEN6_PDE_VALID, + ppgtt->pd_addr + pde); +} + +void gen7_ppgtt_enable(struct intel_gt *gt) +{ + struct drm_i915_private *i915 = gt->i915; + struct intel_uncore *uncore = gt->uncore; + struct intel_engine_cs *engine; + enum intel_engine_id id; + u32 ecochk; + + intel_uncore_rmw(uncore, GAC_ECO_BITS, 0, ECOBITS_PPGTT_CACHE64B); + + ecochk = intel_uncore_read(uncore, GAM_ECOCHK); + if (IS_HASWELL(i915)) { + ecochk |= ECOCHK_PPGTT_WB_HSW; + } else { + ecochk |= ECOCHK_PPGTT_LLC_IVB; + ecochk &= ~ECOCHK_PPGTT_GFDT_IVB; + } + intel_uncore_write(uncore, GAM_ECOCHK, ecochk); + + for_each_engine(engine, gt, id) { + /* GFX_MODE is per-ring on gen7+ */ + ENGINE_WRITE(engine, + RING_MODE_GEN7, + _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); + } +} + +void gen6_ppgtt_enable(struct intel_gt *gt) +{ + struct intel_uncore *uncore = gt->uncore; + + intel_uncore_rmw(uncore, + GAC_ECO_BITS, + 0, + ECOBITS_SNB_BIT | ECOBITS_PPGTT_CACHE64B); + + intel_uncore_rmw(uncore, + GAB_CTL, + 0, + GAB_CTL_CONT_AFTER_PAGEFAULT); + + intel_uncore_rmw(uncore, + GAM_ECOCHK, + 0, + ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B); + + if (HAS_PPGTT(uncore->i915)) /* may be disabled for VT-d */ + intel_uncore_write(uncore, + GFX_MODE, + _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); +} + +/* PPGTT support for Sandybdrige/Gen6 and later */ +static void gen6_ppgtt_clear_range(struct i915_address_space *vm, + u64 start, u64 length) +{ + struct gen6_ppgtt * const ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm)); + const unsigned int first_entry = start / I915_GTT_PAGE_SIZE; + const gen6_pte_t scratch_pte = vm->scratch[0].encode; + unsigned int pde = first_entry / GEN6_PTES; + unsigned int pte = first_entry % GEN6_PTES; + unsigned int num_entries = length / I915_GTT_PAGE_SIZE; + + while (num_entries) { + struct i915_page_table * const pt = + i915_pt_entry(ppgtt->base.pd, pde++); + const unsigned int count = min(num_entries, GEN6_PTES - pte); + gen6_pte_t *vaddr; + + GEM_BUG_ON(px_base(pt) == px_base(&vm->scratch[1])); + + num_entries -= count; + + GEM_BUG_ON(count > atomic_read(&pt->used)); + if (!atomic_sub_return(count, &pt->used)) + ppgtt->scan_for_unused_pt = true; + + /* + * Note that the hw doesn't support removing PDE on the fly + * (they are cached inside the context with no means to + * invalidate the cache), so we can only reset the PTE + * entries back to scratch. + */ + + vaddr = kmap_atomic_px(pt); + memset32(vaddr + pte, scratch_pte, count); + kunmap_atomic(vaddr); + + pte = 0; + } +} + +static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, + struct i915_vma *vma, + enum i915_cache_level cache_level, + u32 flags) +{ + struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); + struct i915_page_directory * const pd = ppgtt->pd; + unsigned int first_entry = vma->node.start / I915_GTT_PAGE_SIZE; + unsigned int act_pt = first_entry / GEN6_PTES; + unsigned int act_pte = first_entry % GEN6_PTES; + const u32 pte_encode = vm->pte_encode(0, cache_level, flags); + struct sgt_dma iter = sgt_dma(vma); + gen6_pte_t *vaddr; + + GEM_BUG_ON(pd->entry[act_pt] == &vm->scratch[1]); + + vaddr = kmap_atomic_px(i915_pt_entry(pd, act_pt)); + do { + vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma); + + iter.dma += I915_GTT_PAGE_SIZE; + if (iter.dma == iter.max) { + iter.sg = __sg_next(iter.sg); + if (!iter.sg) + break; + + iter.dma = sg_dma_address(iter.sg); + iter.max = iter.dma + iter.sg->length; + } + + if (++act_pte == GEN6_PTES) { + kunmap_atomic(vaddr); + vaddr = kmap_atomic_px(i915_pt_entry(pd, ++act_pt)); + act_pte = 0; + } + } while (1); + kunmap_atomic(vaddr); + + vma->page_sizes.gtt = I915_GTT_PAGE_SIZE; +} + +static void gen6_flush_pd(struct gen6_ppgtt *ppgtt, u64 start, u64 end) +{ + struct i915_page_directory * const pd = ppgtt->base.pd; + struct i915_page_table *pt; + unsigned int pde; + + start = round_down(start, SZ_64K); + end = round_up(end, SZ_64K) - start; + + mutex_lock(&ppgtt->flush); + + gen6_for_each_pde(pt, pd, start, end, pde) + gen6_write_pde(ppgtt, pde, pt); + + mb(); + ioread32(ppgtt->pd_addr + pde - 1); + gen6_ggtt_invalidate(ppgtt->base.vm.gt->ggtt); + mb(); + + mutex_unlock(&ppgtt->flush); +} + +static int gen6_alloc_va_range(struct i915_address_space *vm, + u64 start, u64 length) +{ + struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm)); + struct i915_page_directory * const pd = ppgtt->base.pd; + struct i915_page_table *pt, *alloc = NULL; + intel_wakeref_t wakeref; + u64 from = start; + unsigned int pde; + int ret = 0; + + wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm); + + spin_lock(&pd->lock); + gen6_for_each_pde(pt, pd, start, length, pde) { + const unsigned int count = gen6_pte_count(start, length); + + if (px_base(pt) == px_base(&vm->scratch[1])) { + spin_unlock(&pd->lock); + + pt = fetch_and_zero(&alloc); + if (!pt) + pt = alloc_pt(vm); + if (IS_ERR(pt)) { + ret = PTR_ERR(pt); + goto unwind_out; + } + + fill32_px(pt, vm->scratch[0].encode); + + spin_lock(&pd->lock); + if (pd->entry[pde] == &vm->scratch[1]) { + pd->entry[pde] = pt; + } else { + alloc = pt; + pt = pd->entry[pde]; + } + } + + atomic_add(count, &pt->used); + } + spin_unlock(&pd->lock); + + if (i915_vma_is_bound(ppgtt->vma, I915_VMA_GLOBAL_BIND)) + gen6_flush_pd(ppgtt, from, start); + + goto out; + +unwind_out: + gen6_ppgtt_clear_range(vm, from, start - from); +out: + if (alloc) + free_px(vm, alloc); + intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref); + return ret; +} + +static int gen6_ppgtt_init_scratch(struct gen6_ppgtt *ppgtt) +{ + struct i915_address_space * const vm = &ppgtt->base.vm; + struct i915_page_directory * const pd = ppgtt->base.pd; + int ret; + + ret = setup_scratch_page(vm, __GFP_HIGHMEM); + if (ret) + return ret; + + vm->scratch[0].encode = + vm->pte_encode(px_dma(&vm->scratch[0]), + I915_CACHE_NONE, PTE_READ_ONLY); + + if (unlikely(setup_page_dma(vm, px_base(&vm->scratch[1])))) { + cleanup_scratch_page(vm); + return -ENOMEM; + } + + fill32_px(&vm->scratch[1], vm->scratch[0].encode); + memset_p(pd->entry, &vm->scratch[1], I915_PDES); + + return 0; +} + +static void gen6_ppgtt_free_pd(struct gen6_ppgtt *ppgtt) +{ + struct i915_page_directory * const pd = ppgtt->base.pd; + struct i915_page_dma * const scratch = + px_base(&ppgtt->base.vm.scratch[1]); + struct i915_page_table *pt; + u32 pde; + + gen6_for_all_pdes(pt, pd, pde) + if (px_base(pt) != scratch) + free_px(&ppgtt->base.vm, pt); +} + +static void gen6_ppgtt_cleanup(struct i915_address_space *vm) +{ + struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm)); + + __i915_vma_put(ppgtt->vma); + + gen6_ppgtt_free_pd(ppgtt); + free_scratch(vm); + + mutex_destroy(&ppgtt->flush); + mutex_destroy(&ppgtt->pin_mutex); + kfree(ppgtt->base.pd); +} + +static int pd_vma_set_pages(struct i915_vma *vma) +{ + vma->pages = ERR_PTR(-ENODEV); + return 0; +} + +static void pd_vma_clear_pages(struct i915_vma *vma) +{ + GEM_BUG_ON(!vma->pages); + + vma->pages = NULL; +} + +static int pd_vma_bind(struct i915_vma *vma, + enum i915_cache_level cache_level, + u32 unused) +{ + struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm); + struct gen6_ppgtt *ppgtt = vma->private; + u32 ggtt_offset = i915_ggtt_offset(vma) / I915_GTT_PAGE_SIZE; + + px_base(ppgtt->base.pd)->ggtt_offset = ggtt_offset * sizeof(gen6_pte_t); + ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm + ggtt_offset; + + gen6_flush_pd(ppgtt, 0, ppgtt->base.vm.total); + return 0; +} + +static void pd_vma_unbind(struct i915_vma *vma) +{ + struct gen6_ppgtt *ppgtt = vma->private; + struct i915_page_directory * const pd = ppgtt->base.pd; + struct i915_page_dma * const scratch = + px_base(&ppgtt->base.vm.scratch[1]); + struct i915_page_table *pt; + unsigned int pde; + + if (!ppgtt->scan_for_unused_pt) + return; + + /* Free all no longer used page tables */ + gen6_for_all_pdes(pt, ppgtt->base.pd, pde) { + if (px_base(pt) == scratch || atomic_read(&pt->used)) + continue; + + free_px(&ppgtt->base.vm, pt); + pd->entry[pde] = scratch; + } + + ppgtt->scan_for_unused_pt = false; +} + +static const struct i915_vma_ops pd_vma_ops = { + .set_pages = pd_vma_set_pages, + .clear_pages = pd_vma_clear_pages, + .bind_vma = pd_vma_bind, + .unbind_vma = pd_vma_unbind, +}; + +static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size) +{ + struct i915_ggtt *ggtt = ppgtt->base.vm.gt->ggtt; + struct i915_vma *vma; + + GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)); + GEM_BUG_ON(size > ggtt->vm.total); + + vma = i915_vma_alloc(); + if (!vma) + return ERR_PTR(-ENOMEM); + + i915_active_init(&vma->active, NULL, NULL); + + kref_init(&vma->ref); + mutex_init(&vma->pages_mutex); + vma->vm = i915_vm_get(&ggtt->vm); + vma->ops = &pd_vma_ops; + vma->private = ppgtt; + + vma->size = size; + vma->fence_size = size; + atomic_set(&vma->flags, I915_VMA_GGTT); + vma->ggtt_view.type = I915_GGTT_VIEW_ROTATED; /* prevent fencing */ + + INIT_LIST_HEAD(&vma->obj_link); + INIT_LIST_HEAD(&vma->closed_link); + + return vma; +} + +int gen6_ppgtt_pin(struct i915_ppgtt *base) +{ + struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base); + int err; + + GEM_BUG_ON(!atomic_read(&ppgtt->base.vm.open)); + + /* + * Workaround the limited maximum vma->pin_count and the aliasing_ppgtt + * which will be pinned into every active context. + * (When vma->pin_count becomes atomic, I expect we will naturally + * need a larger, unpacked, type and kill this redundancy.) + */ + if (atomic_add_unless(&ppgtt->pin_count, 1, 0)) + return 0; + + if (mutex_lock_interruptible(&ppgtt->pin_mutex)) + return -EINTR; + + /* + * PPGTT PDEs reside in the GGTT and consists of 512 entries. The + * allocator works in address space sizes, so it's multiplied by page + * size. We allocate at the top of the GTT to avoid fragmentation. + */ + err = 0; + if (!atomic_read(&ppgtt->pin_count)) + err = i915_ggtt_pin(ppgtt->vma, GEN6_PD_ALIGN, PIN_HIGH); + if (!err) + atomic_inc(&ppgtt->pin_count); + mutex_unlock(&ppgtt->pin_mutex); + + return err; +} + +void gen6_ppgtt_unpin(struct i915_ppgtt *base) +{ + struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base); + + GEM_BUG_ON(!atomic_read(&ppgtt->pin_count)); + if (atomic_dec_and_test(&ppgtt->pin_count)) + i915_vma_unpin(ppgtt->vma); +} + +void gen6_ppgtt_unpin_all(struct i915_ppgtt *base) +{ + struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base); + + if (!atomic_read(&ppgtt->pin_count)) + return; + + i915_vma_unpin(ppgtt->vma); + atomic_set(&ppgtt->pin_count, 0); +} + +struct i915_ppgtt *gen6_ppgtt_create(struct intel_gt *gt) +{ + struct i915_ggtt * const ggtt = gt->ggtt; + struct gen6_ppgtt *ppgtt; + int err; + + ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL); + if (!ppgtt) + return ERR_PTR(-ENOMEM); + + mutex_init(&ppgtt->flush); + mutex_init(&ppgtt->pin_mutex); + + ppgtt_init(&ppgtt->base, gt); + ppgtt->base.vm.top = 1; + + ppgtt->base.vm.bind_async_flags = I915_VMA_LOCAL_BIND; + ppgtt->base.vm.allocate_va_range = gen6_alloc_va_range; + ppgtt->base.vm.clear_range = gen6_ppgtt_clear_range; + ppgtt->base.vm.insert_entries = gen6_ppgtt_insert_entries; + ppgtt->base.vm.cleanup = gen6_ppgtt_cleanup; + + ppgtt->base.vm.pte_encode = ggtt->vm.pte_encode; + + ppgtt->base.pd = __alloc_pd(sizeof(*ppgtt->base.pd)); + if (!ppgtt->base.pd) { + err = -ENOMEM; + goto err_free; + } + + err = gen6_ppgtt_init_scratch(ppgtt); + if (err) + goto err_pd; + + ppgtt->vma = pd_vma_create(ppgtt, GEN6_PD_SIZE); + if (IS_ERR(ppgtt->vma)) { + err = PTR_ERR(ppgtt->vma); + goto err_scratch; + } + + return &ppgtt->base; + +err_scratch: + free_scratch(&ppgtt->base.vm); +err_pd: + kfree(ppgtt->base.pd); +err_free: + mutex_destroy(&ppgtt->pin_mutex); + kfree(ppgtt); + return ERR_PTR(err); +} diff --git a/drivers/gpu/drm/i915/gt/gen6_ppgtt.h b/drivers/gpu/drm/i915/gt/gen6_ppgtt.h new file mode 100644 index 000000000000..72e481806c96 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/gen6_ppgtt.h @@ -0,0 +1,76 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2020 Intel Corporation + */ + +#ifndef __GEN6_PPGTT_H__ +#define __GEN6_PPGTT_H__ + +#include "intel_gtt.h" + +struct gen6_ppgtt { + struct i915_ppgtt base; + + struct mutex flush; + struct i915_vma *vma; + gen6_pte_t __iomem *pd_addr; + + atomic_t pin_count; + struct mutex pin_mutex; + + bool scan_for_unused_pt; +}; + +static inline u32 gen6_pte_index(u32 addr) +{ + return i915_pte_index(addr, GEN6_PDE_SHIFT); +} + +static inline u32 gen6_pte_count(u32 addr, u32 length) +{ + return i915_pte_count(addr, length, GEN6_PDE_SHIFT); +} + +static inline u32 gen6_pde_index(u32 addr) +{ + return i915_pde_index(addr, GEN6_PDE_SHIFT); +} + +#define __to_gen6_ppgtt(base) container_of(base, struct gen6_ppgtt, base) + +static inline struct gen6_ppgtt *to_gen6_ppgtt(struct i915_ppgtt *base) +{ + BUILD_BUG_ON(offsetof(struct gen6_ppgtt, base)); + return __to_gen6_ppgtt(base); +} + +/* + * gen6_for_each_pde() iterates over every pde from start until start+length. + * If start and start+length are not perfectly divisible, the macro will round + * down and up as needed. Start=0 and length=2G effectively iterates over + * every PDE in the system. The macro modifies ALL its parameters except 'pd', + * so each of the other parameters should preferably be a simple variable, or + * at most an lvalue with no side-effects! + */ +#define gen6_for_each_pde(pt, pd, start, length, iter) \ + for (iter = gen6_pde_index(start); \ + length > 0 && iter < I915_PDES && \ + (pt = i915_pt_entry(pd, iter), true); \ + ({ u32 temp = ALIGN(start+1, 1 << GEN6_PDE_SHIFT); \ + temp = min(temp - start, length); \ + start += temp, length -= temp; }), ++iter) + +#define gen6_for_all_pdes(pt, pd, iter) \ + for (iter = 0; \ + iter < I915_PDES && \ + (pt = i915_pt_entry(pd, iter), true); \ + ++iter) + +int gen6_ppgtt_pin(struct i915_ppgtt *base); +void gen6_ppgtt_unpin(struct i915_ppgtt *base); +void gen6_ppgtt_unpin_all(struct i915_ppgtt *base); +void gen6_ppgtt_enable(struct intel_gt *gt); +void gen7_ppgtt_enable(struct intel_gt *gt); +struct i915_ppgtt *gen6_ppgtt_create(struct intel_gt *gt); + +#endif diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c new file mode 100644 index 000000000000..077b8f7cf6cb --- /dev/null +++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c @@ -0,0 +1,723 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2020 Intel Corporation + */ + +#include <linux/log2.h> + +#include "gen8_ppgtt.h" +#include "i915_scatterlist.h" +#include "i915_trace.h" +#include "i915_vgpu.h" +#include "intel_gt.h" +#include "intel_gtt.h" + +static u64 gen8_pde_encode(const dma_addr_t addr, + const enum i915_cache_level level) +{ + u64 pde = addr | _PAGE_PRESENT | _PAGE_RW; + + if (level != I915_CACHE_NONE) + pde |= PPAT_CACHED_PDE; + else + pde |= PPAT_UNCACHED; + + return pde; +} + +static void gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create) +{ + struct drm_i915_private *i915 = ppgtt->vm.i915; + struct intel_uncore *uncore = ppgtt->vm.gt->uncore; + enum vgt_g2v_type msg; + int i; + + if (create) + atomic_inc(px_used(ppgtt->pd)); /* never remove */ + else + atomic_dec(px_used(ppgtt->pd)); + + mutex_lock(&i915->vgpu.lock); + + if (i915_vm_is_4lvl(&ppgtt->vm)) { + const u64 daddr = px_dma(ppgtt->pd); + + intel_uncore_write(uncore, + vgtif_reg(pdp[0].lo), lower_32_bits(daddr)); + intel_uncore_write(uncore, + vgtif_reg(pdp[0].hi), upper_32_bits(daddr)); + + msg = create ? + VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE : + VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY; + } else { + for (i = 0; i < GEN8_3LVL_PDPES; i++) { + const u64 daddr = i915_page_dir_dma_addr(ppgtt, i); + + intel_uncore_write(uncore, + vgtif_reg(pdp[i].lo), + lower_32_bits(daddr)); + intel_uncore_write(uncore, + vgtif_reg(pdp[i].hi), + upper_32_bits(daddr)); + } + + msg = create ? + VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE : + VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY; + } + + /* g2v_notify atomically (via hv trap) consumes the message packet. */ + intel_uncore_write(uncore, vgtif_reg(g2v_notify), msg); + + mutex_unlock(&i915->vgpu.lock); +} + +/* Index shifts into the pagetable are offset by GEN8_PTE_SHIFT [12] */ +#define GEN8_PAGE_SIZE (SZ_4K) /* page and page-directory sizes are the same */ +#define GEN8_PTE_SHIFT (ilog2(GEN8_PAGE_SIZE)) +#define GEN8_PDES (GEN8_PAGE_SIZE / sizeof(u64)) +#define gen8_pd_shift(lvl) ((lvl) * ilog2(GEN8_PDES)) +#define gen8_pd_index(i, lvl) i915_pde_index((i), gen8_pd_shift(lvl)) +#define __gen8_pte_shift(lvl) (GEN8_PTE_SHIFT + gen8_pd_shift(lvl)) +#define __gen8_pte_index(a, lvl) i915_pde_index((a), __gen8_pte_shift(lvl)) + +#define as_pd(x) container_of((x), typeof(struct i915_page_directory), pt) + +static inline unsigned int +gen8_pd_range(u64 start, u64 end, int lvl, unsigned int *idx) +{ + const int shift = gen8_pd_shift(lvl); + const u64 mask = ~0ull << gen8_pd_shift(lvl + 1); + + GEM_BUG_ON(start >= end); + end += ~mask >> gen8_pd_shift(1); + + *idx = i915_pde_index(start, shift); + if ((start ^ end) & mask) + return GEN8_PDES - *idx; + else + return i915_pde_index(end, shift) - *idx; +} + +static inline bool gen8_pd_contains(u64 start, u64 end, int lvl) +{ + const u64 mask = ~0ull << gen8_pd_shift(lvl + 1); + + GEM_BUG_ON(start >= end); + return (start ^ end) & mask && (start & ~mask) == 0; +} + +static inline unsigned int gen8_pt_count(u64 start, u64 end) +{ + GEM_BUG_ON(start >= end); + if ((start ^ end) >> gen8_pd_shift(1)) + return GEN8_PDES - (start & (GEN8_PDES - 1)); + else + return end - start; +} + +static inline unsigned int +gen8_pd_top_count(const struct i915_address_space *vm) +{ + unsigned int shift = __gen8_pte_shift(vm->top); + return (vm->total + (1ull << shift) - 1) >> shift; +} + +static inline struct i915_page_directory * +gen8_pdp_for_page_index(struct i915_address_space * const vm, const u64 idx) +{ + struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm); + + if (vm->top == 2) + return ppgtt->pd; + else + return i915_pd_entry(ppgtt->pd, gen8_pd_index(idx, vm->top)); +} + +static inline struct i915_page_directory * +gen8_pdp_for_page_address(struct i915_address_space * const vm, const u64 addr) +{ + return gen8_pdp_for_page_index(vm, addr >> GEN8_PTE_SHIFT); +} + +static void __gen8_ppgtt_cleanup(struct i915_address_space *vm, + struct i915_page_directory *pd, + int count, int lvl) +{ + if (lvl) { + void **pde = pd->entry; + + do { + if (!*pde) + continue; + + __gen8_ppgtt_cleanup(vm, *pde, GEN8_PDES, lvl - 1); + } while (pde++, --count); + } + + free_px(vm, pd); +} + +static void gen8_ppgtt_cleanup(struct i915_address_space *vm) +{ + struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); + + if (intel_vgpu_active(vm->i915)) + gen8_ppgtt_notify_vgt(ppgtt, false); + + __gen8_ppgtt_cleanup(vm, ppgtt->pd, gen8_pd_top_count(vm), vm->top); + free_scratch(vm); +} + +static u64 __gen8_ppgtt_clear(struct i915_address_space * const vm, + struct i915_page_directory * const pd, + u64 start, const u64 end, int lvl) +{ + const struct i915_page_scratch * const scratch = &vm->scratch[lvl]; + unsigned int idx, len; + + GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT); + + len = gen8_pd_range(start, end, lvl--, &idx); + DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n", + __func__, vm, lvl + 1, start, end, + idx, len, atomic_read(px_used(pd))); + GEM_BUG_ON(!len || len >= atomic_read(px_used(pd))); + + do { + struct i915_page_table *pt = pd->entry[idx]; + + if (atomic_fetch_inc(&pt->used) >> gen8_pd_shift(1) && + gen8_pd_contains(start, end, lvl)) { + DBG("%s(%p):{ lvl:%d, idx:%d, start:%llx, end:%llx } removing pd\n", + __func__, vm, lvl + 1, idx, start, end); + clear_pd_entry(pd, idx, scratch); + __gen8_ppgtt_cleanup(vm, as_pd(pt), I915_PDES, lvl); + start += (u64)I915_PDES << gen8_pd_shift(lvl); + continue; + } + + if (lvl) { + start = __gen8_ppgtt_clear(vm, as_pd(pt), + start, end, lvl); + } else { + unsigned int count; + u64 *vaddr; + + count = gen8_pt_count(start, end); + DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d } removing pte\n", + __func__, vm, lvl, start, end, + gen8_pd_index(start, 0), count, + atomic_read(&pt->used)); + GEM_BUG_ON(!count || count >= atomic_read(&pt->used)); + + vaddr = kmap_atomic_px(pt); + memset64(vaddr + gen8_pd_index(start, 0), + vm->scratch[0].encode, + count); + kunmap_atomic(vaddr); + + atomic_sub(count, &pt->used); + start += count; + } + + if (release_pd_entry(pd, idx, pt, scratch)) + free_px(vm, pt); + } while (idx++, --len); + + return start; +} + +static void gen8_ppgtt_clear(struct i915_address_space *vm, + u64 start, u64 length) +{ + GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT))); + GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT))); + GEM_BUG_ON(range_overflows(start, length, vm->total)); + + start >>= GEN8_PTE_SHIFT; + length >>= GEN8_PTE_SHIFT; + GEM_BUG_ON(length == 0); + + __gen8_ppgtt_clear(vm, i915_vm_to_ppgtt(vm)->pd, + start, start + length, vm->top); +} + +static int __gen8_ppgtt_alloc(struct i915_address_space * const vm, + struct i915_page_directory * const pd, + u64 * const start, const u64 end, int lvl) +{ + const struct i915_page_scratch * const scratch = &vm->scratch[lvl]; + struct i915_page_table *alloc = NULL; + unsigned int idx, len; + int ret = 0; + + GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT); + + len = gen8_pd_range(*start, end, lvl--, &idx); + DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n", + __func__, vm, lvl + 1, *start, end, + idx, len, atomic_read(px_used(pd))); + GEM_BUG_ON(!len || (idx + len - 1) >> gen8_pd_shift(1)); + + spin_lock(&pd->lock); + GEM_BUG_ON(!atomic_read(px_used(pd))); /* Must be pinned! */ + do { + struct i915_page_table *pt = pd->entry[idx]; + + if (!pt) { + spin_unlock(&pd->lock); + + DBG("%s(%p):{ lvl:%d, idx:%d } allocating new tree\n", + __func__, vm, lvl + 1, idx); + + pt = fetch_and_zero(&alloc); + if (lvl) { + if (!pt) { + pt = &alloc_pd(vm)->pt; + if (IS_ERR(pt)) { + ret = PTR_ERR(pt); + goto out; + } + } + + fill_px(pt, vm->scratch[lvl].encode); + } else { + if (!pt) { + pt = alloc_pt(vm); + if (IS_ERR(pt)) { + ret = PTR_ERR(pt); + goto out; + } + } + + if (intel_vgpu_active(vm->i915) || + gen8_pt_count(*start, end) < I915_PDES) + fill_px(pt, vm->scratch[lvl].encode); + } + + spin_lock(&pd->lock); + if (likely(!pd->entry[idx])) + set_pd_entry(pd, idx, pt); + else + alloc = pt, pt = pd->entry[idx]; + } + + if (lvl) { + atomic_inc(&pt->used); + spin_unlock(&pd->lock); + + ret = __gen8_ppgtt_alloc(vm, as_pd(pt), + start, end, lvl); + if (unlikely(ret)) { + if (release_pd_entry(pd, idx, pt, scratch)) + free_px(vm, pt); + goto out; + } + + spin_lock(&pd->lock); + atomic_dec(&pt->used); + GEM_BUG_ON(!atomic_read(&pt->used)); + } else { + unsigned int count = gen8_pt_count(*start, end); + + DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d } inserting pte\n", + __func__, vm, lvl, *start, end, + gen8_pd_index(*start, 0), count, + atomic_read(&pt->used)); + + atomic_add(count, &pt->used); + /* All other pdes may be simultaneously removed */ + GEM_BUG_ON(atomic_read(&pt->used) > NALLOC * I915_PDES); + *start += count; + } + } while (idx++, --len); + spin_unlock(&pd->lock); +out: + if (alloc) + free_px(vm, alloc); + return ret; +} + +static int gen8_ppgtt_alloc(struct i915_address_space *vm, + u64 start, u64 length) +{ + u64 from; + int err; + + GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT))); + GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT))); + GEM_BUG_ON(range_overflows(start, length, vm->total)); + + start >>= GEN8_PTE_SHIFT; + length >>= GEN8_PTE_SHIFT; + GEM_BUG_ON(length == 0); + from = start; + + err = __gen8_ppgtt_alloc(vm, i915_vm_to_ppgtt(vm)->pd, + &start, start + length, vm->top); + if (unlikely(err && from != start)) + __gen8_ppgtt_clear(vm, i915_vm_to_ppgtt(vm)->pd, + from, start, vm->top); + + return err; +} + +static __always_inline u64 +gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt, + struct i915_page_directory *pdp, + struct sgt_dma *iter, + u64 idx, + enum i915_cache_level cache_level, + u32 flags) +{ + struct i915_page_directory *pd; + const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags); + gen8_pte_t *vaddr; + + pd = i915_pd_entry(pdp, gen8_pd_index(idx, 2)); + vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1))); + do { + vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma; + + iter->dma += I915_GTT_PAGE_SIZE; + if (iter->dma >= iter->max) { + iter->sg = __sg_next(iter->sg); + if (!iter->sg) { + idx = 0; + break; + } + + iter->dma = sg_dma_address(iter->sg); + iter->max = iter->dma + iter->sg->length; + } + + if (gen8_pd_index(++idx, 0) == 0) { + if (gen8_pd_index(idx, 1) == 0) { + /* Limited by sg length for 3lvl */ + if (gen8_pd_index(idx, 2) == 0) + break; + + pd = pdp->entry[gen8_pd_index(idx, 2)]; + } + + kunmap_atomic(vaddr); + vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1))); + } + } while (1); + kunmap_atomic(vaddr); + + return idx; +} + +static void gen8_ppgtt_insert_huge(struct i915_vma *vma, + struct sgt_dma *iter, + enum i915_cache_level cache_level, + u32 flags) +{ + const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags); + u64 start = vma->node.start; + dma_addr_t rem = iter->sg->length; + + GEM_BUG_ON(!i915_vm_is_4lvl(vma->vm)); + + do { + struct i915_page_directory * const pdp = + gen8_pdp_for_page_address(vma->vm, start); + struct i915_page_directory * const pd = + i915_pd_entry(pdp, __gen8_pte_index(start, 2)); + gen8_pte_t encode = pte_encode; + unsigned int maybe_64K = -1; + unsigned int page_size; + gen8_pte_t *vaddr; + u16 index; + + if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_2M && + IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_2M) && + rem >= I915_GTT_PAGE_SIZE_2M && + !__gen8_pte_index(start, 0)) { + index = __gen8_pte_index(start, 1); + encode |= GEN8_PDE_PS_2M; + page_size = I915_GTT_PAGE_SIZE_2M; + + vaddr = kmap_atomic_px(pd); + } else { + struct i915_page_table *pt = + i915_pt_entry(pd, __gen8_pte_index(start, 1)); + + index = __gen8_pte_index(start, 0); + page_size = I915_GTT_PAGE_SIZE; + + if (!index && + vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K && + IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) && + (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) || + rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE)) + maybe_64K = __gen8_pte_index(start, 1); + + vaddr = kmap_atomic_px(pt); + } + + do { + GEM_BUG_ON(iter->sg->length < page_size); + vaddr[index++] = encode | iter->dma; + + start += page_size; + iter->dma += page_size; + rem -= page_size; + if (iter->dma >= iter->max) { + iter->sg = __sg_next(iter->sg); + if (!iter->sg) + break; + + rem = iter->sg->length; + iter->dma = sg_dma_address(iter->sg); + iter->max = iter->dma + rem; + + if (maybe_64K != -1 && index < I915_PDES && + !(IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) && + (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) || + rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE))) + maybe_64K = -1; + + if (unlikely(!IS_ALIGNED(iter->dma, page_size))) + break; + } + } while (rem >= page_size && index < I915_PDES); + + kunmap_atomic(vaddr); + + /* + * Is it safe to mark the 2M block as 64K? -- Either we have + * filled whole page-table with 64K entries, or filled part of + * it and have reached the end of the sg table and we have + * enough padding. + */ + if (maybe_64K != -1 && + (index == I915_PDES || + (i915_vm_has_scratch_64K(vma->vm) && + !iter->sg && IS_ALIGNED(vma->node.start + + vma->node.size, + I915_GTT_PAGE_SIZE_2M)))) { + vaddr = kmap_atomic_px(pd); + vaddr[maybe_64K] |= GEN8_PDE_IPS_64K; + kunmap_atomic(vaddr); + page_size = I915_GTT_PAGE_SIZE_64K; + + /* + * We write all 4K page entries, even when using 64K + * pages. In order to verify that the HW isn't cheating + * by using the 4K PTE instead of the 64K PTE, we want + * to remove all the surplus entries. If the HW skipped + * the 64K PTE, it will read/write into the scratch page + * instead - which we detect as missing results during + * selftests. + */ + if (I915_SELFTEST_ONLY(vma->vm->scrub_64K)) { + u16 i; + + encode = vma->vm->scratch[0].encode; + vaddr = kmap_atomic_px(i915_pt_entry(pd, maybe_64K)); + + for (i = 1; i < index; i += 16) + memset64(vaddr + i, encode, 15); + + kunmap_atomic(vaddr); + } + } + + vma->page_sizes.gtt |= page_size; + } while (iter->sg); +} + +static void gen8_ppgtt_insert(struct i915_address_space *vm, + struct i915_vma *vma, + enum i915_cache_level cache_level, + u32 flags) +{ + struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm); + struct sgt_dma iter = sgt_dma(vma); + + if (vma->page_sizes.sg > I915_GTT_PAGE_SIZE) { + gen8_ppgtt_insert_huge(vma, &iter, cache_level, flags); + } else { + u64 idx = vma->node.start >> GEN8_PTE_SHIFT; + + do { + struct i915_page_directory * const pdp = + gen8_pdp_for_page_index(vm, idx); + + idx = gen8_ppgtt_insert_pte(ppgtt, pdp, &iter, idx, + cache_level, flags); + } while (idx); + + vma->page_sizes.gtt = I915_GTT_PAGE_SIZE; + } +} + +static int gen8_init_scratch(struct i915_address_space *vm) +{ + int ret; + int i; + + /* + * If everybody agrees to not to write into the scratch page, + * we can reuse it for all vm, keeping contexts and processes separate. + */ + if (vm->has_read_only && vm->gt->vm && !i915_is_ggtt(vm->gt->vm)) { + struct i915_address_space *clone = vm->gt->vm; + + GEM_BUG_ON(!clone->has_read_only); + + vm->scratch_order = clone->scratch_order; + memcpy(vm->scratch, clone->scratch, sizeof(vm->scratch)); + px_dma(&vm->scratch[0]) = 0; /* no xfer of ownership */ + return 0; + } + + ret = setup_scratch_page(vm, __GFP_HIGHMEM); + if (ret) + return ret; + + vm->scratch[0].encode = + gen8_pte_encode(px_dma(&vm->scratch[0]), + I915_CACHE_LLC, vm->has_read_only); + + for (i = 1; i <= vm->top; i++) { + if (unlikely(setup_page_dma(vm, px_base(&vm->scratch[i])))) + goto free_scratch; + + fill_px(&vm->scratch[i], vm->scratch[i - 1].encode); + vm->scratch[i].encode = + gen8_pde_encode(px_dma(&vm->scratch[i]), + I915_CACHE_LLC); + } + + return 0; + +free_scratch: + free_scratch(vm); + return -ENOMEM; +} + +static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt) +{ + struct i915_address_space *vm = &ppgtt->vm; + struct i915_page_directory *pd = ppgtt->pd; + unsigned int idx; + + GEM_BUG_ON(vm->top != 2); + GEM_BUG_ON(gen8_pd_top_count(vm) != GEN8_3LVL_PDPES); + + for (idx = 0; idx < GEN8_3LVL_PDPES; idx++) { + struct i915_page_directory *pde; + + pde = alloc_pd(vm); + if (IS_ERR(pde)) + return PTR_ERR(pde); + + fill_px(pde, vm->scratch[1].encode); + set_pd_entry(pd, idx, pde); + atomic_inc(px_used(pde)); /* keep pinned */ + } + wmb(); + + return 0; +} + +static struct i915_page_directory * +gen8_alloc_top_pd(struct i915_address_space *vm) +{ + const unsigned int count = gen8_pd_top_count(vm); + struct i915_page_directory *pd; + + GEM_BUG_ON(count > ARRAY_SIZE(pd->entry)); + + pd = __alloc_pd(offsetof(typeof(*pd), entry[count])); + if (unlikely(!pd)) + return ERR_PTR(-ENOMEM); + + if (unlikely(setup_page_dma(vm, px_base(pd)))) { + kfree(pd); + return ERR_PTR(-ENOMEM); + } + + fill_page_dma(px_base(pd), vm->scratch[vm->top].encode, count); + atomic_inc(px_used(pd)); /* mark as pinned */ + return pd; +} + +/* + * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers + * with a net effect resembling a 2-level page table in normal x86 terms. Each + * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address + * space. + * + */ +struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt) +{ + struct i915_ppgtt *ppgtt; + int err; + + ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL); + if (!ppgtt) + return ERR_PTR(-ENOMEM); + + ppgtt_init(ppgtt, gt); + ppgtt->vm.top = i915_vm_is_4lvl(&ppgtt->vm) ? 3 : 2; + + /* + * From bdw, there is hw support for read-only pages in the PPGTT. + * + * Gen11 has HSDES#:1807136187 unresolved. Disable ro support + * for now. + * + * Gen12 has inherited the same read-only fault issue from gen11. + */ + ppgtt->vm.has_read_only = !IS_GEN_RANGE(gt->i915, 11, 12); + + /* + * There are only few exceptions for gen >=6. chv and bxt. + * And we are not sure about the latter so play safe for now. + */ + if (IS_CHERRYVIEW(gt->i915) || IS_BROXTON(gt->i915)) + ppgtt->vm.pt_kmap_wc = true; + + err = gen8_init_scratch(&ppgtt->vm); + if (err) + goto err_free; + + ppgtt->pd = gen8_alloc_top_pd(&ppgtt->vm); + if (IS_ERR(ppgtt->pd)) { + err = PTR_ERR(ppgtt->pd); + goto err_free_scratch; + } + + if (!i915_vm_is_4lvl(&ppgtt->vm)) { + err = gen8_preallocate_top_level_pdp(ppgtt); + if (err) + goto err_free_pd; + } + + ppgtt->vm.bind_async_flags = I915_VMA_LOCAL_BIND; + ppgtt->vm.insert_entries = gen8_ppgtt_insert; + ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc; + ppgtt->vm.clear_range = gen8_ppgtt_clear; + + if (intel_vgpu_active(gt->i915)) + gen8_ppgtt_notify_vgt(ppgtt, true); + + ppgtt->vm.cleanup = gen8_ppgtt_cleanup; + + return ppgtt; + +err_free_pd: + __gen8_ppgtt_cleanup(&ppgtt->vm, ppgtt->pd, + gen8_pd_top_count(&ppgtt->vm), ppgtt->vm.top); +err_free_scratch: + free_scratch(&ppgtt->vm); +err_free: + kfree(ppgtt); + return ERR_PTR(err); +} diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.h b/drivers/gpu/drm/i915/gt/gen8_ppgtt.h new file mode 100644 index 000000000000..76a08b9c1f5c --- /dev/null +++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2020 Intel Corporation + */ + +#ifndef __GEN8_PPGTT_H__ +#define __GEN8_PPGTT_H__ + +struct intel_gt; + +struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt); + +#endif diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index fbaa9df6f436..23137b2a8689 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -43,30 +43,76 @@ intel_context_create(struct intel_engine_cs *engine) return ce; } -int __intel_context_do_pin(struct intel_context *ce) +int intel_context_alloc_state(struct intel_context *ce) { - int err; + int err = 0; if (mutex_lock_interruptible(&ce->pin_mutex)) return -EINTR; - if (likely(!atomic_read(&ce->pin_count))) { - intel_wakeref_t wakeref; + if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) { + err = ce->ops->alloc(ce); + if (unlikely(err)) + goto unlock; - if (unlikely(!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))) { - err = ce->ops->alloc(ce); - if (unlikely(err)) - goto err; + set_bit(CONTEXT_ALLOC_BIT, &ce->flags); + } - __set_bit(CONTEXT_ALLOC_BIT, &ce->flags); +unlock: + mutex_unlock(&ce->pin_mutex); + return err; +} + +static int intel_context_active_acquire(struct intel_context *ce) +{ + int err; + + err = i915_active_acquire(&ce->active); + if (err) + return err; + + /* Preallocate tracking nodes */ + if (!intel_context_is_barrier(ce)) { + err = i915_active_acquire_preallocate_barrier(&ce->active, + ce->engine); + if (err) { + i915_active_release(&ce->active); + return err; } + } + + return 0; +} + +static void intel_context_active_release(struct intel_context *ce) +{ + /* Nodes preallocated in intel_context_active() */ + i915_active_acquire_barrier(&ce->active); + i915_active_release(&ce->active); +} - err = 0; - with_intel_runtime_pm(ce->engine->uncore->rpm, wakeref) - err = ce->ops->pin(ce); +int __intel_context_do_pin(struct intel_context *ce) +{ + int err; + + if (unlikely(!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))) { + err = intel_context_alloc_state(ce); if (err) + return err; + } + + if (mutex_lock_interruptible(&ce->pin_mutex)) + return -EINTR; + + if (likely(!atomic_read(&ce->pin_count))) { + err = intel_context_active_acquire(ce); + if (unlikely(err)) goto err; + err = ce->ops->pin(ce); + if (unlikely(err)) + goto err_active; + CE_TRACE(ce, "pin ring:{head:%04x, tail:%04x}\n", ce->ring->head, ce->ring->tail); @@ -79,6 +125,8 @@ int __intel_context_do_pin(struct intel_context *ce) mutex_unlock(&ce->pin_mutex); return 0; +err_active: + intel_context_active_release(ce); err: mutex_unlock(&ce->pin_mutex); return err; @@ -86,22 +134,20 @@ err: void intel_context_unpin(struct intel_context *ce) { - if (likely(atomic_add_unless(&ce->pin_count, -1, 1))) + if (!atomic_dec_and_test(&ce->pin_count)) return; - /* We may be called from inside intel_context_pin() to evict another */ - intel_context_get(ce); - mutex_lock_nested(&ce->pin_mutex, SINGLE_DEPTH_NESTING); - - if (likely(atomic_dec_and_test(&ce->pin_count))) { - CE_TRACE(ce, "retire\n"); - - ce->ops->unpin(ce); + CE_TRACE(ce, "unpin\n"); + ce->ops->unpin(ce); - intel_context_active_release(ce); - } - - mutex_unlock(&ce->pin_mutex); + /* + * Once released, we may asynchronously drop the active reference. + * As that may be the only reference keeping the context alive, + * take an extra now so that it is not freed before we finish + * dereferencing it. + */ + intel_context_get(ce); + intel_context_active_release(ce); intel_context_put(ce); } @@ -114,6 +160,10 @@ static int __context_pin_state(struct i915_vma *vma) if (err) return err; + err = i915_active_acquire(&vma->active); + if (err) + goto err_unpin; + /* * And mark it as a globally pinned object to let the shrinker know * it cannot reclaim the object until we release it. @@ -122,14 +172,44 @@ static int __context_pin_state(struct i915_vma *vma) vma->obj->mm.dirty = true; return 0; + +err_unpin: + i915_vma_unpin(vma); + return err; } static void __context_unpin_state(struct i915_vma *vma) { i915_vma_make_shrinkable(vma); + i915_active_release(&vma->active); __i915_vma_unpin(vma); } +static int __ring_active(struct intel_ring *ring) +{ + int err; + + err = i915_active_acquire(&ring->vma->active); + if (err) + return err; + + err = intel_ring_pin(ring); + if (err) + goto err_active; + + return 0; + +err_active: + i915_active_release(&ring->vma->active); + return err; +} + +static void __ring_retire(struct intel_ring *ring) +{ + intel_ring_unpin(ring); + i915_active_release(&ring->vma->active); +} + __i915_active_call static void __intel_context_retire(struct i915_active *active) { @@ -142,7 +222,7 @@ static void __intel_context_retire(struct i915_active *active) __context_unpin_state(ce->state); intel_timeline_unpin(ce->timeline); - intel_ring_unpin(ce->ring); + __ring_retire(ce->ring); intel_context_put(ce); } @@ -152,9 +232,11 @@ static int __intel_context_active(struct i915_active *active) struct intel_context *ce = container_of(active, typeof(*ce), active); int err; + CE_TRACE(ce, "active\n"); + intel_context_get(ce); - err = intel_ring_pin(ce->ring); + err = __ring_active(ce->ring); if (err) goto err_put; @@ -174,40 +256,12 @@ static int __intel_context_active(struct i915_active *active) err_timeline: intel_timeline_unpin(ce->timeline); err_ring: - intel_ring_unpin(ce->ring); + __ring_retire(ce->ring); err_put: intel_context_put(ce); return err; } -int intel_context_active_acquire(struct intel_context *ce) -{ - int err; - - err = i915_active_acquire(&ce->active); - if (err) - return err; - - /* Preallocate tracking nodes */ - if (!intel_context_is_barrier(ce)) { - err = i915_active_acquire_preallocate_barrier(&ce->active, - ce->engine); - if (err) { - i915_active_release(&ce->active); - return err; - } - } - - return 0; -} - -void intel_context_active_release(struct intel_context *ce) -{ - /* Nodes preallocated in intel_context_active() */ - i915_active_acquire_barrier(&ce->active); - i915_active_release(&ce->active); -} - void intel_context_init(struct intel_context *ce, struct intel_engine_cs *engine) diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h index 1d4a1b1357cf..30bd248827d8 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.h +++ b/drivers/gpu/drm/i915/gt/intel_context.h @@ -19,7 +19,7 @@ #define CE_TRACE(ce, fmt, ...) do { \ const struct intel_context *ce__ = (ce); \ - ENGINE_TRACE(ce__->engine, "context:%llx" fmt, \ + ENGINE_TRACE(ce__->engine, "context:%llx " fmt, \ ce__->timeline->fence_context, \ ##__VA_ARGS__); \ } while (0) @@ -31,6 +31,8 @@ void intel_context_fini(struct intel_context *ce); struct intel_context * intel_context_create(struct intel_engine_cs *engine); +int intel_context_alloc_state(struct intel_context *ce); + void intel_context_free(struct intel_context *ce); /** @@ -76,9 +78,14 @@ static inline void intel_context_unlock_pinned(struct intel_context *ce) int __intel_context_do_pin(struct intel_context *ce); +static inline bool intel_context_pin_if_active(struct intel_context *ce) +{ + return atomic_inc_not_zero(&ce->pin_count); +} + static inline int intel_context_pin(struct intel_context *ce) { - if (likely(atomic_inc_not_zero(&ce->pin_count))) + if (likely(intel_context_pin_if_active(ce))) return 0; return __intel_context_do_pin(ce); @@ -116,9 +123,6 @@ static inline void intel_context_exit(struct intel_context *ce) ce->ops->exit(ce); } -int intel_context_active_acquire(struct intel_context *ce); -void intel_context_active_release(struct intel_context *ce); - static inline struct intel_context *intel_context_get(struct intel_context *ce) { kref_get(&ce->ref); diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h index 9527a659546c..ca1420fb8b53 100644 --- a/drivers/gpu/drm/i915/gt/intel_context_types.h +++ b/drivers/gpu/drm/i915/gt/intel_context_types.h @@ -17,6 +17,8 @@ #include "intel_engine_types.h" #include "intel_sseu.h" +#define CONTEXT_REDZONE POISON_INUSE + struct i915_gem_context; struct i915_vma; struct intel_context; diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h index 79ecac5ac0ab..5df003061e44 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine.h +++ b/drivers/gpu/drm/i915/gt/intel_engine.h @@ -202,7 +202,7 @@ void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask); u64 intel_engine_get_active_head(const struct intel_engine_cs *engine); u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine); -void intel_engine_get_instdone(struct intel_engine_cs *engine, +void intel_engine_get_instdone(const struct intel_engine_cs *engine, struct intel_instdone *instdone); void intel_engine_init_execlists(struct intel_engine_cs *engine); @@ -282,7 +282,7 @@ static inline void __intel_engine_reset(struct intel_engine_cs *engine, bool intel_engines_are_idle(struct intel_gt *gt); bool intel_engine_is_idle(struct intel_engine_cs *engine); -bool intel_engine_flush_submission(struct intel_engine_cs *engine); +void intel_engine_flush_submission(struct intel_engine_cs *engine); void intel_engines_reset_default_submission(struct intel_gt *gt); diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index ddf9543b1261..f451ef376548 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -914,8 +914,8 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type) } static u32 -read_subslice_reg(struct intel_engine_cs *engine, int slice, int subslice, - i915_reg_t reg) +read_subslice_reg(const struct intel_engine_cs *engine, + int slice, int subslice, i915_reg_t reg) { struct drm_i915_private *i915 = engine->i915; struct intel_uncore *uncore = engine->uncore; @@ -959,7 +959,7 @@ read_subslice_reg(struct intel_engine_cs *engine, int slice, int subslice, } /* NB: please notice the memset */ -void intel_engine_get_instdone(struct intel_engine_cs *engine, +void intel_engine_get_instdone(const struct intel_engine_cs *engine, struct intel_instdone *instdone) { struct drm_i915_private *i915 = engine->i915; @@ -1047,10 +1047,9 @@ static bool ring_is_idle(struct intel_engine_cs *engine) return idle; } -bool intel_engine_flush_submission(struct intel_engine_cs *engine) +void intel_engine_flush_submission(struct intel_engine_cs *engine) { struct tasklet_struct *t = &engine->execlists.tasklet; - bool active = tasklet_is_locked(t); if (__tasklet_is_scheduled(t)) { local_bh_disable(); @@ -1061,13 +1060,10 @@ bool intel_engine_flush_submission(struct intel_engine_cs *engine) tasklet_unlock(t); } local_bh_enable(); - active = true; } /* Otherwise flush the tasklet if it was running on another cpu */ tasklet_unlock_wait(t); - - return active; } /** diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c index 742628e40201..6c6fd185457c 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c @@ -199,7 +199,7 @@ int intel_engine_pulse(struct intel_engine_cs *engine) goto out_unlock; } - rq->flags |= I915_REQUEST_SENTINEL; + __set_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags); idle_pulse(engine, rq); __i915_request_commit(rq); diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c index 010620b78202..ea90ab3e396e 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c @@ -20,6 +20,7 @@ static int __engine_unpark(struct intel_wakeref *wf) { struct intel_engine_cs *engine = container_of(wf, typeof(*engine), wakeref); + struct intel_context *ce; void *map; ENGINE_TRACE(engine, "\n"); @@ -34,6 +35,27 @@ static int __engine_unpark(struct intel_wakeref *wf) if (!IS_ERR_OR_NULL(map)) engine->pinned_default_state = map; + /* Discard stale context state from across idling */ + ce = engine->kernel_context; + if (ce) { + GEM_BUG_ON(test_bit(CONTEXT_VALID_BIT, &ce->flags)); + + /* First poison the image to verify we never fully trust it */ + if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && ce->state) { + struct drm_i915_gem_object *obj = ce->state->obj; + int type = i915_coherent_map_type(engine->i915); + + map = i915_gem_object_pin_map(obj, type); + if (!IS_ERR(map)) { + memset(map, CONTEXT_REDZONE, obj->base.size); + i915_gem_object_flush_map(obj); + i915_gem_object_unpin_map(obj); + } + } + + ce->ops->reset(ce); + } + if (engine->unpark) engine->unpark(engine); @@ -123,16 +145,16 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine) unsigned long flags; bool result = true; + /* GPU is pointing to the void, as good as in the kernel context. */ + if (intel_gt_is_wedged(engine->gt)) + return true; + GEM_BUG_ON(!intel_context_is_barrier(ce)); /* Already inside the kernel context, safe to power down. */ if (engine->wakeref_serial == engine->serial) return true; - /* GPU is pointing to the void, as good as in the kernel context. */ - if (intel_gt_is_wedged(engine->gt)) - return true; - /* * Note, we do this without taking the timeline->mutex. We cannot * as we may be called while retiring the kernel context and so diff --git a/drivers/gpu/drm/i915/gt/intel_engine_user.c b/drivers/gpu/drm/i915/gt/intel_engine_user.c index 7f7150a733f4..9e7f12bef828 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_user.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_user.c @@ -11,6 +11,7 @@ #include "i915_drv.h" #include "intel_engine.h" #include "intel_engine_user.h" +#include "intel_gt.h" struct intel_engine_cs * intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance) @@ -200,6 +201,9 @@ void intel_engines_driver_register(struct drm_i915_private *i915) uabi_node); char old[sizeof(engine->name)]; + if (intel_gt_has_init_error(engine->gt)) + continue; /* ignore incomplete engines */ + GEM_BUG_ON(engine->class >= ARRAY_SIZE(uabi_classes)); engine->uabi_class = uabi_classes[engine->class]; diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c new file mode 100644 index 000000000000..79096722ce16 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c @@ -0,0 +1,1486 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2020 Intel Corporation + */ + +#include <linux/stop_machine.h> + +#include <asm/set_memory.h> +#include <asm/smp.h> + +#include "intel_gt.h" +#include "i915_drv.h" +#include "i915_scatterlist.h" +#include "i915_vgpu.h" + +#include "intel_gtt.h" + +static int +i915_get_ggtt_vma_pages(struct i915_vma *vma); + +static void i915_ggtt_color_adjust(const struct drm_mm_node *node, + unsigned long color, + u64 *start, + u64 *end) +{ + if (i915_node_color_differs(node, color)) + *start += I915_GTT_PAGE_SIZE; + + /* + * Also leave a space between the unallocated reserved node after the + * GTT and any objects within the GTT, i.e. we use the color adjustment + * to insert a guard page to prevent prefetches crossing over the + * GTT boundary. + */ + node = list_next_entry(node, node_list); + if (node->color != color) + *end -= I915_GTT_PAGE_SIZE; +} + +static int ggtt_init_hw(struct i915_ggtt *ggtt) +{ + struct drm_i915_private *i915 = ggtt->vm.i915; + + i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT); + + ggtt->vm.is_ggtt = true; + + /* Only VLV supports read-only GGTT mappings */ + ggtt->vm.has_read_only = IS_VALLEYVIEW(i915); + + if (!HAS_LLC(i915) && !HAS_PPGTT(i915)) + ggtt->vm.mm.color_adjust = i915_ggtt_color_adjust; + + if (ggtt->mappable_end) { + if (!io_mapping_init_wc(&ggtt->iomap, + ggtt->gmadr.start, + ggtt->mappable_end)) { + ggtt->vm.cleanup(&ggtt->vm); + return -EIO; + } + + ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start, + ggtt->mappable_end); + } + + i915_ggtt_init_fences(ggtt); + + return 0; +} + +/** + * i915_ggtt_init_hw - Initialize GGTT hardware + * @i915: i915 device + */ +int i915_ggtt_init_hw(struct drm_i915_private *i915) +{ + int ret; + + stash_init(&i915->mm.wc_stash); + + /* + * Note that we use page colouring to enforce a guard page at the + * end of the address space. This is required as the CS may prefetch + * beyond the end of the batch buffer, across the page boundary, + * and beyond the end of the GTT if we do not provide a guard. + */ + ret = ggtt_init_hw(&i915->ggtt); + if (ret) + return ret; + + return 0; +} + +/* + * Certain Gen5 chipsets require require idling the GPU before + * unmapping anything from the GTT when VT-d is enabled. + */ +static bool needs_idle_maps(struct drm_i915_private *i915) +{ + /* + * Query intel_iommu to see if we need the workaround. Presumably that + * was loaded first. + */ + return IS_GEN(i915, 5) && IS_MOBILE(i915) && intel_vtd_active(); +} + +static void ggtt_suspend_mappings(struct i915_ggtt *ggtt) +{ + struct drm_i915_private *i915 = ggtt->vm.i915; + + /* + * Don't bother messing with faults pre GEN6 as we have little + * documentation supporting that it's a good idea. + */ + if (INTEL_GEN(i915) < 6) + return; + + intel_gt_check_and_clear_faults(ggtt->vm.gt); + + ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total); + + ggtt->invalidate(ggtt); +} + +void i915_gem_suspend_gtt_mappings(struct drm_i915_private *i915) +{ + ggtt_suspend_mappings(&i915->ggtt); +} + +void gen6_ggtt_invalidate(struct i915_ggtt *ggtt) +{ + struct intel_uncore *uncore = ggtt->vm.gt->uncore; + + spin_lock_irq(&uncore->lock); + intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); + intel_uncore_read_fw(uncore, GFX_FLSH_CNTL_GEN6); + spin_unlock_irq(&uncore->lock); +} + +static void gen8_ggtt_invalidate(struct i915_ggtt *ggtt) +{ + struct intel_uncore *uncore = ggtt->vm.gt->uncore; + + /* + * Note that as an uncached mmio write, this will flush the + * WCB of the writes into the GGTT before it triggers the invalidate. + */ + intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); +} + +static void guc_ggtt_invalidate(struct i915_ggtt *ggtt) +{ + struct intel_uncore *uncore = ggtt->vm.gt->uncore; + struct drm_i915_private *i915 = ggtt->vm.i915; + + gen8_ggtt_invalidate(ggtt); + + if (INTEL_GEN(i915) >= 12) + intel_uncore_write_fw(uncore, GEN12_GUC_TLB_INV_CR, + GEN12_GUC_TLB_INV_CR_INVALIDATE); + else + intel_uncore_write_fw(uncore, GEN8_GTCR, GEN8_GTCR_INVALIDATE); +} + +static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt) +{ + intel_gtt_chipset_flush(); +} + +static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte) +{ + writeq(pte, addr); +} + +static void gen8_ggtt_insert_page(struct i915_address_space *vm, + dma_addr_t addr, + u64 offset, + enum i915_cache_level level, + u32 unused) +{ + struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); + gen8_pte_t __iomem *pte = + (gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE; + + gen8_set_pte(pte, gen8_pte_encode(addr, level, 0)); + + ggtt->invalidate(ggtt); +} + +static void gen8_ggtt_insert_entries(struct i915_address_space *vm, + struct i915_vma *vma, + enum i915_cache_level level, + u32 flags) +{ + struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); + struct sgt_iter sgt_iter; + gen8_pte_t __iomem *gtt_entries; + const gen8_pte_t pte_encode = gen8_pte_encode(0, level, 0); + dma_addr_t addr; + + /* + * Note that we ignore PTE_READ_ONLY here. The caller must be careful + * not to allow the user to override access to a read only page. + */ + + gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm; + gtt_entries += vma->node.start / I915_GTT_PAGE_SIZE; + for_each_sgt_daddr(addr, sgt_iter, vma->pages) + gen8_set_pte(gtt_entries++, pte_encode | addr); + + /* + * We want to flush the TLBs only after we're certain all the PTE + * updates have finished. + */ + ggtt->invalidate(ggtt); +} + +static void gen6_ggtt_insert_page(struct i915_address_space *vm, + dma_addr_t addr, + u64 offset, + enum i915_cache_level level, + u32 flags) +{ + struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); + gen6_pte_t __iomem *pte = + (gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE; + + iowrite32(vm->pte_encode(addr, level, flags), pte); + + ggtt->invalidate(ggtt); +} + +/* + * Binds an object into the global gtt with the specified cache level. + * The object will be accessible to the GPU via commands whose operands + * reference offsets within the global GTT as well as accessible by the GPU + * through the GMADR mapped BAR (i915->mm.gtt->gtt). + */ +static void gen6_ggtt_insert_entries(struct i915_address_space *vm, + struct i915_vma *vma, + enum i915_cache_level level, + u32 flags) +{ + struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); + gen6_pte_t __iomem *entries = (gen6_pte_t __iomem *)ggtt->gsm; + unsigned int i = vma->node.start / I915_GTT_PAGE_SIZE; + struct sgt_iter iter; + dma_addr_t addr; + + for_each_sgt_daddr(addr, iter, vma->pages) + iowrite32(vm->pte_encode(addr, level, flags), &entries[i++]); + + /* + * We want to flush the TLBs only after we're certain all the PTE + * updates have finished. + */ + ggtt->invalidate(ggtt); +} + +static void nop_clear_range(struct i915_address_space *vm, + u64 start, u64 length) +{ +} + +static void gen8_ggtt_clear_range(struct i915_address_space *vm, + u64 start, u64 length) +{ + struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); + unsigned int first_entry = start / I915_GTT_PAGE_SIZE; + unsigned int num_entries = length / I915_GTT_PAGE_SIZE; + const gen8_pte_t scratch_pte = vm->scratch[0].encode; + gen8_pte_t __iomem *gtt_base = + (gen8_pte_t __iomem *)ggtt->gsm + first_entry; + const int max_entries = ggtt_total_entries(ggtt) - first_entry; + int i; + + if (WARN(num_entries > max_entries, + "First entry = %d; Num entries = %d (max=%d)\n", + first_entry, num_entries, max_entries)) + num_entries = max_entries; + + for (i = 0; i < num_entries; i++) + gen8_set_pte(>t_base[i], scratch_pte); +} + +static void bxt_vtd_ggtt_wa(struct i915_address_space *vm) +{ + /* + * Make sure the internal GAM fifo has been cleared of all GTT + * writes before exiting stop_machine(). This guarantees that + * any aperture accesses waiting to start in another process + * cannot back up behind the GTT writes causing a hang. + * The register can be any arbitrary GAM register. + */ + intel_uncore_posting_read_fw(vm->gt->uncore, GFX_FLSH_CNTL_GEN6); +} + +struct insert_page { + struct i915_address_space *vm; + dma_addr_t addr; + u64 offset; + enum i915_cache_level level; +}; + +static int bxt_vtd_ggtt_insert_page__cb(void *_arg) +{ + struct insert_page *arg = _arg; + + gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0); + bxt_vtd_ggtt_wa(arg->vm); + + return 0; +} + +static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm, + dma_addr_t addr, + u64 offset, + enum i915_cache_level level, + u32 unused) +{ + struct insert_page arg = { vm, addr, offset, level }; + + stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL); +} + +struct insert_entries { + struct i915_address_space *vm; + struct i915_vma *vma; + enum i915_cache_level level; + u32 flags; +}; + +static int bxt_vtd_ggtt_insert_entries__cb(void *_arg) +{ + struct insert_entries *arg = _arg; + + gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, arg->flags); + bxt_vtd_ggtt_wa(arg->vm); + + return 0; +} + +static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm, + struct i915_vma *vma, + enum i915_cache_level level, + u32 flags) +{ + struct insert_entries arg = { vm, vma, level, flags }; + + stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL); +} + +struct clear_range { + struct i915_address_space *vm; + u64 start; + u64 length; +}; + +static int bxt_vtd_ggtt_clear_range__cb(void *_arg) +{ + struct clear_range *arg = _arg; + + gen8_ggtt_clear_range(arg->vm, arg->start, arg->length); + bxt_vtd_ggtt_wa(arg->vm); + + return 0; +} + +static void bxt_vtd_ggtt_clear_range__BKL(struct i915_address_space *vm, + u64 start, + u64 length) +{ + struct clear_range arg = { vm, start, length }; + + stop_machine(bxt_vtd_ggtt_clear_range__cb, &arg, NULL); +} + +static void gen6_ggtt_clear_range(struct i915_address_space *vm, + u64 start, u64 length) +{ + struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); + unsigned int first_entry = start / I915_GTT_PAGE_SIZE; + unsigned int num_entries = length / I915_GTT_PAGE_SIZE; + gen6_pte_t scratch_pte, __iomem *gtt_base = + (gen6_pte_t __iomem *)ggtt->gsm + first_entry; + const int max_entries = ggtt_total_entries(ggtt) - first_entry; + int i; + + if (WARN(num_entries > max_entries, + "First entry = %d; Num entries = %d (max=%d)\n", + first_entry, num_entries, max_entries)) + num_entries = max_entries; + + scratch_pte = vm->scratch[0].encode; + for (i = 0; i < num_entries; i++) + iowrite32(scratch_pte, >t_base[i]); +} + +static void i915_ggtt_insert_page(struct i915_address_space *vm, + dma_addr_t addr, + u64 offset, + enum i915_cache_level cache_level, + u32 unused) +{ + unsigned int flags = (cache_level == I915_CACHE_NONE) ? + AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; + + intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags); +} + +static void i915_ggtt_insert_entries(struct i915_address_space *vm, + struct i915_vma *vma, + enum i915_cache_level cache_level, + u32 unused) +{ + unsigned int flags = (cache_level == I915_CACHE_NONE) ? + AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; + + intel_gtt_insert_sg_entries(vma->pages, vma->node.start >> PAGE_SHIFT, + flags); +} + +static void i915_ggtt_clear_range(struct i915_address_space *vm, + u64 start, u64 length) +{ + intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT); +} + +static int ggtt_bind_vma(struct i915_vma *vma, + enum i915_cache_level cache_level, + u32 flags) +{ + struct drm_i915_gem_object *obj = vma->obj; + u32 pte_flags; + + /* Applicable to VLV (gen8+ do not support RO in the GGTT) */ + pte_flags = 0; + if (i915_gem_object_is_readonly(obj)) + pte_flags |= PTE_READ_ONLY; + + vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags); + + vma->page_sizes.gtt = I915_GTT_PAGE_SIZE; + + /* + * Without aliasing PPGTT there's no difference between + * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally + * upgrade to both bound if we bind either to avoid double-binding. + */ + atomic_or(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND, &vma->flags); + + return 0; +} + +static void ggtt_unbind_vma(struct i915_vma *vma) +{ + vma->vm->clear_range(vma->vm, vma->node.start, vma->size); +} + +static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt) +{ + u64 size; + int ret; + + if (!USES_GUC(ggtt->vm.i915)) + return 0; + + GEM_BUG_ON(ggtt->vm.total <= GUC_GGTT_TOP); + size = ggtt->vm.total - GUC_GGTT_TOP; + + ret = i915_gem_gtt_reserve(&ggtt->vm, &ggtt->uc_fw, size, + GUC_GGTT_TOP, I915_COLOR_UNEVICTABLE, + PIN_NOEVICT); + if (ret) + DRM_DEBUG_DRIVER("Failed to reserve top of GGTT for GuC\n"); + + return ret; +} + +static void ggtt_release_guc_top(struct i915_ggtt *ggtt) +{ + if (drm_mm_node_allocated(&ggtt->uc_fw)) + drm_mm_remove_node(&ggtt->uc_fw); +} + +static void cleanup_init_ggtt(struct i915_ggtt *ggtt) +{ + ggtt_release_guc_top(ggtt); + if (drm_mm_node_allocated(&ggtt->error_capture)) + drm_mm_remove_node(&ggtt->error_capture); + mutex_destroy(&ggtt->error_mutex); +} + +static int init_ggtt(struct i915_ggtt *ggtt) +{ + /* + * Let GEM Manage all of the aperture. + * + * However, leave one page at the end still bound to the scratch page. + * There are a number of places where the hardware apparently prefetches + * past the end of the object, and we've seen multiple hangs with the + * GPU head pointer stuck in a batchbuffer bound at the last page of the + * aperture. One page should be enough to keep any prefetching inside + * of the aperture. + */ + unsigned long hole_start, hole_end; + struct drm_mm_node *entry; + int ret; + + /* + * GuC requires all resources that we're sharing with it to be placed in + * non-WOPCM memory. If GuC is not present or not in use we still need a + * small bias as ring wraparound at offset 0 sometimes hangs. No idea + * why. + */ + ggtt->pin_bias = max_t(u32, I915_GTT_PAGE_SIZE, + intel_wopcm_guc_size(&ggtt->vm.i915->wopcm)); + + ret = intel_vgt_balloon(ggtt); + if (ret) + return ret; + + mutex_init(&ggtt->error_mutex); + if (ggtt->mappable_end) { + /* Reserve a mappable slot for our lockless error capture */ + ret = drm_mm_insert_node_in_range(&ggtt->vm.mm, + &ggtt->error_capture, + PAGE_SIZE, 0, + I915_COLOR_UNEVICTABLE, + 0, ggtt->mappable_end, + DRM_MM_INSERT_LOW); + if (ret) + return ret; + } + + /* + * The upper portion of the GuC address space has a sizeable hole + * (several MB) that is inaccessible by GuC. Reserve this range within + * GGTT as it can comfortably hold GuC/HuC firmware images. + */ + ret = ggtt_reserve_guc_top(ggtt); + if (ret) + goto err; + + /* Clear any non-preallocated blocks */ + drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) { + DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n", + hole_start, hole_end); + ggtt->vm.clear_range(&ggtt->vm, hole_start, + hole_end - hole_start); + } + + /* And finally clear the reserved guard page */ + ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE); + + return 0; + +err: + cleanup_init_ggtt(ggtt); + return ret; +} + +static int aliasing_gtt_bind_vma(struct i915_vma *vma, + enum i915_cache_level cache_level, + u32 flags) +{ + u32 pte_flags; + int ret; + + /* Currently applicable only to VLV */ + pte_flags = 0; + if (i915_gem_object_is_readonly(vma->obj)) + pte_flags |= PTE_READ_ONLY; + + if (flags & I915_VMA_LOCAL_BIND) { + struct i915_ppgtt *alias = i915_vm_to_ggtt(vma->vm)->alias; + + if (flags & I915_VMA_ALLOC) { + ret = alias->vm.allocate_va_range(&alias->vm, + vma->node.start, + vma->size); + if (ret) + return ret; + + set_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma)); + } + + GEM_BUG_ON(!test_bit(I915_VMA_ALLOC_BIT, + __i915_vma_flags(vma))); + alias->vm.insert_entries(&alias->vm, vma, + cache_level, pte_flags); + } + + if (flags & I915_VMA_GLOBAL_BIND) + vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags); + + return 0; +} + +static void aliasing_gtt_unbind_vma(struct i915_vma *vma) +{ + if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) { + struct i915_address_space *vm = vma->vm; + + vm->clear_range(vm, vma->node.start, vma->size); + } + + if (test_and_clear_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma))) { + struct i915_address_space *vm = + &i915_vm_to_ggtt(vma->vm)->alias->vm; + + vm->clear_range(vm, vma->node.start, vma->size); + } +} + +static int init_aliasing_ppgtt(struct i915_ggtt *ggtt) +{ + struct i915_ppgtt *ppgtt; + int err; + + ppgtt = i915_ppgtt_create(ggtt->vm.gt); + if (IS_ERR(ppgtt)) + return PTR_ERR(ppgtt); + + if (GEM_WARN_ON(ppgtt->vm.total < ggtt->vm.total)) { + err = -ENODEV; + goto err_ppgtt; + } + + /* + * Note we only pre-allocate as far as the end of the global + * GTT. On 48b / 4-level page-tables, the difference is very, + * very significant! We have to preallocate as GVT/vgpu does + * not like the page directory disappearing. + */ + err = ppgtt->vm.allocate_va_range(&ppgtt->vm, 0, ggtt->vm.total); + if (err) + goto err_ppgtt; + + ggtt->alias = ppgtt; + ggtt->vm.bind_async_flags |= ppgtt->vm.bind_async_flags; + + GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != ggtt_bind_vma); + ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma; + + GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != ggtt_unbind_vma); + ggtt->vm.vma_ops.unbind_vma = aliasing_gtt_unbind_vma; + + return 0; + +err_ppgtt: + i915_vm_put(&ppgtt->vm); + return err; +} + +static void fini_aliasing_ppgtt(struct i915_ggtt *ggtt) +{ + struct i915_ppgtt *ppgtt; + + ppgtt = fetch_and_zero(&ggtt->alias); + if (!ppgtt) + return; + + i915_vm_put(&ppgtt->vm); + + ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; + ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; +} + +int i915_init_ggtt(struct drm_i915_private *i915) +{ + int ret; + + ret = init_ggtt(&i915->ggtt); + if (ret) + return ret; + + if (INTEL_PPGTT(i915) == INTEL_PPGTT_ALIASING) { + ret = init_aliasing_ppgtt(&i915->ggtt); + if (ret) + cleanup_init_ggtt(&i915->ggtt); + } + + return 0; +} + +static void ggtt_cleanup_hw(struct i915_ggtt *ggtt) +{ + struct i915_vma *vma, *vn; + + atomic_set(&ggtt->vm.open, 0); + + rcu_barrier(); /* flush the RCU'ed__i915_vm_release */ + flush_workqueue(ggtt->vm.i915->wq); + + mutex_lock(&ggtt->vm.mutex); + + list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) + WARN_ON(__i915_vma_unbind(vma)); + + if (drm_mm_node_allocated(&ggtt->error_capture)) + drm_mm_remove_node(&ggtt->error_capture); + mutex_destroy(&ggtt->error_mutex); + + ggtt_release_guc_top(ggtt); + intel_vgt_deballoon(ggtt); + + ggtt->vm.cleanup(&ggtt->vm); + + mutex_unlock(&ggtt->vm.mutex); + i915_address_space_fini(&ggtt->vm); + + arch_phys_wc_del(ggtt->mtrr); + + if (ggtt->iomap.size) + io_mapping_fini(&ggtt->iomap); +} + +/** + * i915_ggtt_driver_release - Clean up GGTT hardware initialization + * @i915: i915 device + */ +void i915_ggtt_driver_release(struct drm_i915_private *i915) +{ + struct pagevec *pvec; + + fini_aliasing_ppgtt(&i915->ggtt); + + ggtt_cleanup_hw(&i915->ggtt); + + pvec = &i915->mm.wc_stash.pvec; + if (pvec->nr) { + set_pages_array_wb(pvec->pages, pvec->nr); + __pagevec_release(pvec); + } +} + +static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl) +{ + snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT; + snb_gmch_ctl &= SNB_GMCH_GGMS_MASK; + return snb_gmch_ctl << 20; +} + +static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl) +{ + bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT; + bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK; + if (bdw_gmch_ctl) + bdw_gmch_ctl = 1 << bdw_gmch_ctl; + +#ifdef CONFIG_X86_32 + /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * I915_GTT_PAGE_SIZE */ + if (bdw_gmch_ctl > 4) + bdw_gmch_ctl = 4; +#endif + + return bdw_gmch_ctl << 20; +} + +static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl) +{ + gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT; + gmch_ctrl &= SNB_GMCH_GGMS_MASK; + + if (gmch_ctrl) + return 1 << (20 + gmch_ctrl); + + return 0; +} + +static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size) +{ + struct drm_i915_private *i915 = ggtt->vm.i915; + struct pci_dev *pdev = i915->drm.pdev; + phys_addr_t phys_addr; + int ret; + + /* For Modern GENs the PTEs and register space are split in the BAR */ + phys_addr = pci_resource_start(pdev, 0) + pci_resource_len(pdev, 0) / 2; + + /* + * On BXT+/CNL+ writes larger than 64 bit to the GTT pagetable range + * will be dropped. For WC mappings in general we have 64 byte burst + * writes when the WC buffer is flushed, so we can't use it, but have to + * resort to an uncached mapping. The WC issue is easily caught by the + * readback check when writing GTT PTE entries. + */ + if (IS_GEN9_LP(i915) || INTEL_GEN(i915) >= 10) + ggtt->gsm = ioremap_nocache(phys_addr, size); + else + ggtt->gsm = ioremap_wc(phys_addr, size); + if (!ggtt->gsm) { + DRM_ERROR("Failed to map the ggtt page table\n"); + return -ENOMEM; + } + + ret = setup_scratch_page(&ggtt->vm, GFP_DMA32); + if (ret) { + DRM_ERROR("Scratch setup failed\n"); + /* iounmap will also get called at remove, but meh */ + iounmap(ggtt->gsm); + return ret; + } + + ggtt->vm.scratch[0].encode = + ggtt->vm.pte_encode(px_dma(&ggtt->vm.scratch[0]), + I915_CACHE_NONE, 0); + + return 0; +} + +int ggtt_set_pages(struct i915_vma *vma) +{ + int ret; + + GEM_BUG_ON(vma->pages); + + ret = i915_get_ggtt_vma_pages(vma); + if (ret) + return ret; + + vma->page_sizes = vma->obj->mm.page_sizes; + + return 0; +} + +static void gen6_gmch_remove(struct i915_address_space *vm) +{ + struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); + + iounmap(ggtt->gsm); + cleanup_scratch_page(vm); +} + +static struct resource pci_resource(struct pci_dev *pdev, int bar) +{ + return (struct resource)DEFINE_RES_MEM(pci_resource_start(pdev, bar), + pci_resource_len(pdev, bar)); +} + +static int gen8_gmch_probe(struct i915_ggtt *ggtt) +{ + struct drm_i915_private *i915 = ggtt->vm.i915; + struct pci_dev *pdev = i915->drm.pdev; + unsigned int size; + u16 snb_gmch_ctl; + int err; + + /* TODO: We're not aware of mappable constraints on gen8 yet */ + if (!IS_DGFX(i915)) { + ggtt->gmadr = pci_resource(pdev, 2); + ggtt->mappable_end = resource_size(&ggtt->gmadr); + } + + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(39)); + if (!err) + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39)); + if (err) + DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err); + + pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); + if (IS_CHERRYVIEW(i915)) + size = chv_get_total_gtt_size(snb_gmch_ctl); + else + size = gen8_get_total_gtt_size(snb_gmch_ctl); + + ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE; + ggtt->vm.cleanup = gen6_gmch_remove; + ggtt->vm.insert_page = gen8_ggtt_insert_page; + ggtt->vm.clear_range = nop_clear_range; + if (intel_scanout_needs_vtd_wa(i915)) + ggtt->vm.clear_range = gen8_ggtt_clear_range; + + ggtt->vm.insert_entries = gen8_ggtt_insert_entries; + + /* Serialize GTT updates with aperture access on BXT if VT-d is on. */ + if (intel_ggtt_update_needs_vtd_wa(i915) || + IS_CHERRYVIEW(i915) /* fails with concurrent use/update */) { + ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL; + ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL; + if (ggtt->vm.clear_range != nop_clear_range) + ggtt->vm.clear_range = bxt_vtd_ggtt_clear_range__BKL; + } + + ggtt->invalidate = gen8_ggtt_invalidate; + + ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; + ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; + ggtt->vm.vma_ops.set_pages = ggtt_set_pages; + ggtt->vm.vma_ops.clear_pages = clear_pages; + + ggtt->vm.pte_encode = gen8_pte_encode; + + setup_private_pat(ggtt->vm.gt->uncore); + + return ggtt_probe_common(ggtt, size); +} + +static u64 snb_pte_encode(dma_addr_t addr, + enum i915_cache_level level, + u32 flags) +{ + gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID; + + switch (level) { + case I915_CACHE_L3_LLC: + case I915_CACHE_LLC: + pte |= GEN6_PTE_CACHE_LLC; + break; + case I915_CACHE_NONE: + pte |= GEN6_PTE_UNCACHED; + break; + default: + MISSING_CASE(level); + } + + return pte; +} + +static u64 ivb_pte_encode(dma_addr_t addr, + enum i915_cache_level level, + u32 flags) +{ + gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID; + + switch (level) { + case I915_CACHE_L3_LLC: + pte |= GEN7_PTE_CACHE_L3_LLC; + break; + case I915_CACHE_LLC: + pte |= GEN6_PTE_CACHE_LLC; + break; + case I915_CACHE_NONE: + pte |= GEN6_PTE_UNCACHED; + break; + default: + MISSING_CASE(level); + } + + return pte; +} + +static u64 byt_pte_encode(dma_addr_t addr, + enum i915_cache_level level, + u32 flags) +{ + gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID; + + if (!(flags & PTE_READ_ONLY)) + pte |= BYT_PTE_WRITEABLE; + + if (level != I915_CACHE_NONE) + pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES; + + return pte; +} + +static u64 hsw_pte_encode(dma_addr_t addr, + enum i915_cache_level level, + u32 flags) +{ + gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID; + + if (level != I915_CACHE_NONE) + pte |= HSW_WB_LLC_AGE3; + + return pte; +} + +static u64 iris_pte_encode(dma_addr_t addr, + enum i915_cache_level level, + u32 flags) +{ + gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID; + + switch (level) { + case I915_CACHE_NONE: + break; + case I915_CACHE_WT: + pte |= HSW_WT_ELLC_LLC_AGE3; + break; + default: + pte |= HSW_WB_ELLC_LLC_AGE3; + break; + } + + return pte; +} + +static int gen6_gmch_probe(struct i915_ggtt *ggtt) +{ + struct drm_i915_private *i915 = ggtt->vm.i915; + struct pci_dev *pdev = i915->drm.pdev; + unsigned int size; + u16 snb_gmch_ctl; + int err; + + ggtt->gmadr = pci_resource(pdev, 2); + ggtt->mappable_end = resource_size(&ggtt->gmadr); + + /* + * 64/512MB is the current min/max we actually know of, but this is + * just a coarse sanity check. + */ + if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) { + DRM_ERROR("Unknown GMADR size (%pa)\n", &ggtt->mappable_end); + return -ENXIO; + } + + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40)); + if (!err) + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40)); + if (err) + DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err); + pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); + + size = gen6_get_total_gtt_size(snb_gmch_ctl); + ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE; + + ggtt->vm.clear_range = nop_clear_range; + if (!HAS_FULL_PPGTT(i915) || intel_scanout_needs_vtd_wa(i915)) + ggtt->vm.clear_range = gen6_ggtt_clear_range; + ggtt->vm.insert_page = gen6_ggtt_insert_page; + ggtt->vm.insert_entries = gen6_ggtt_insert_entries; + ggtt->vm.cleanup = gen6_gmch_remove; + + ggtt->invalidate = gen6_ggtt_invalidate; + + if (HAS_EDRAM(i915)) + ggtt->vm.pte_encode = iris_pte_encode; + else if (IS_HASWELL(i915)) + ggtt->vm.pte_encode = hsw_pte_encode; + else if (IS_VALLEYVIEW(i915)) + ggtt->vm.pte_encode = byt_pte_encode; + else if (INTEL_GEN(i915) >= 7) + ggtt->vm.pte_encode = ivb_pte_encode; + else + ggtt->vm.pte_encode = snb_pte_encode; + + ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; + ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; + ggtt->vm.vma_ops.set_pages = ggtt_set_pages; + ggtt->vm.vma_ops.clear_pages = clear_pages; + + return ggtt_probe_common(ggtt, size); +} + +static void i915_gmch_remove(struct i915_address_space *vm) +{ + intel_gmch_remove(); +} + +static int i915_gmch_probe(struct i915_ggtt *ggtt) +{ + struct drm_i915_private *i915 = ggtt->vm.i915; + phys_addr_t gmadr_base; + int ret; + + ret = intel_gmch_probe(i915->bridge_dev, i915->drm.pdev, NULL); + if (!ret) { + DRM_ERROR("failed to set up gmch\n"); + return -EIO; + } + + intel_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end); + + ggtt->gmadr = + (struct resource)DEFINE_RES_MEM(gmadr_base, ggtt->mappable_end); + + ggtt->do_idle_maps = needs_idle_maps(i915); + ggtt->vm.insert_page = i915_ggtt_insert_page; + ggtt->vm.insert_entries = i915_ggtt_insert_entries; + ggtt->vm.clear_range = i915_ggtt_clear_range; + ggtt->vm.cleanup = i915_gmch_remove; + + ggtt->invalidate = gmch_ggtt_invalidate; + + ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; + ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; + ggtt->vm.vma_ops.set_pages = ggtt_set_pages; + ggtt->vm.vma_ops.clear_pages = clear_pages; + + if (unlikely(ggtt->do_idle_maps)) + dev_notice(i915->drm.dev, + "Applying Ironlake quirks for intel_iommu\n"); + + return 0; +} + +static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt) +{ + struct drm_i915_private *i915 = gt->i915; + int ret; + + ggtt->vm.gt = gt; + ggtt->vm.i915 = i915; + ggtt->vm.dma = &i915->drm.pdev->dev; + + if (INTEL_GEN(i915) <= 5) + ret = i915_gmch_probe(ggtt); + else if (INTEL_GEN(i915) < 8) + ret = gen6_gmch_probe(ggtt); + else + ret = gen8_gmch_probe(ggtt); + if (ret) + return ret; + + if ((ggtt->vm.total - 1) >> 32) { + DRM_ERROR("We never expected a Global GTT with more than 32bits" + " of address space! Found %lldM!\n", + ggtt->vm.total >> 20); + ggtt->vm.total = 1ULL << 32; + ggtt->mappable_end = + min_t(u64, ggtt->mappable_end, ggtt->vm.total); + } + + if (ggtt->mappable_end > ggtt->vm.total) { + DRM_ERROR("mappable aperture extends past end of GGTT," + " aperture=%pa, total=%llx\n", + &ggtt->mappable_end, ggtt->vm.total); + ggtt->mappable_end = ggtt->vm.total; + } + + /* GMADR is the PCI mmio aperture into the global GTT. */ + DRM_DEBUG_DRIVER("GGTT size = %lluM\n", ggtt->vm.total >> 20); + DRM_DEBUG_DRIVER("GMADR size = %lluM\n", (u64)ggtt->mappable_end >> 20); + DRM_DEBUG_DRIVER("DSM size = %lluM\n", + (u64)resource_size(&intel_graphics_stolen_res) >> 20); + + return 0; +} + +/** + * i915_ggtt_probe_hw - Probe GGTT hardware location + * @i915: i915 device + */ +int i915_ggtt_probe_hw(struct drm_i915_private *i915) +{ + int ret; + + ret = ggtt_probe_hw(&i915->ggtt, &i915->gt); + if (ret) + return ret; + + if (intel_vtd_active()) + dev_info(i915->drm.dev, "VT-d active for gfx access\n"); + + return 0; +} + +int i915_ggtt_enable_hw(struct drm_i915_private *i915) +{ + if (INTEL_GEN(i915) < 6 && !intel_enable_gtt()) + return -EIO; + + return 0; +} + +void i915_ggtt_enable_guc(struct i915_ggtt *ggtt) +{ + GEM_BUG_ON(ggtt->invalidate != gen8_ggtt_invalidate); + + ggtt->invalidate = guc_ggtt_invalidate; + + ggtt->invalidate(ggtt); +} + +void i915_ggtt_disable_guc(struct i915_ggtt *ggtt) +{ + /* XXX Temporary pardon for error unload */ + if (ggtt->invalidate == gen8_ggtt_invalidate) + return; + + /* We should only be called after i915_ggtt_enable_guc() */ + GEM_BUG_ON(ggtt->invalidate != guc_ggtt_invalidate); + + ggtt->invalidate = gen8_ggtt_invalidate; + + ggtt->invalidate(ggtt); +} + +static void ggtt_restore_mappings(struct i915_ggtt *ggtt) +{ + struct i915_vma *vma; + bool flush = false; + int open; + + intel_gt_check_and_clear_faults(ggtt->vm.gt); + + mutex_lock(&ggtt->vm.mutex); + + /* First fill our portion of the GTT with scratch pages */ + ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total); + + /* Skip rewriting PTE on VMA unbind. */ + open = atomic_xchg(&ggtt->vm.open, 0); + + /* clflush objects bound into the GGTT and rebind them. */ + list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link) { + struct drm_i915_gem_object *obj = vma->obj; + + if (!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) + continue; + + clear_bit(I915_VMA_GLOBAL_BIND_BIT, __i915_vma_flags(vma)); + WARN_ON(i915_vma_bind(vma, + obj ? obj->cache_level : 0, + PIN_GLOBAL, NULL)); + if (obj) { /* only used during resume => exclusive access */ + flush |= fetch_and_zero(&obj->write_domain); + obj->read_domains |= I915_GEM_DOMAIN_GTT; + } + } + + atomic_set(&ggtt->vm.open, open); + ggtt->invalidate(ggtt); + + mutex_unlock(&ggtt->vm.mutex); + + if (flush) + wbinvd_on_all_cpus(); +} + +void i915_gem_restore_gtt_mappings(struct drm_i915_private *i915) +{ + struct i915_ggtt *ggtt = &i915->ggtt; + + ggtt_restore_mappings(ggtt); + + if (INTEL_GEN(i915) >= 8) + setup_private_pat(ggtt->vm.gt->uncore); +} + +static struct scatterlist * +rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset, + unsigned int width, unsigned int height, + unsigned int stride, + struct sg_table *st, struct scatterlist *sg) +{ + unsigned int column, row; + unsigned int src_idx; + + for (column = 0; column < width; column++) { + src_idx = stride * (height - 1) + column + offset; + for (row = 0; row < height; row++) { + st->nents++; + /* + * We don't need the pages, but need to initialize + * the entries so the sg list can be happily traversed. + * The only thing we need are DMA addresses. + */ + sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0); + sg_dma_address(sg) = + i915_gem_object_get_dma_address(obj, src_idx); + sg_dma_len(sg) = I915_GTT_PAGE_SIZE; + sg = sg_next(sg); + src_idx -= stride; + } + } + + return sg; +} + +static noinline struct sg_table * +intel_rotate_pages(struct intel_rotation_info *rot_info, + struct drm_i915_gem_object *obj) +{ + unsigned int size = intel_rotation_info_size(rot_info); + struct sg_table *st; + struct scatterlist *sg; + int ret = -ENOMEM; + int i; + + /* Allocate target SG list. */ + st = kmalloc(sizeof(*st), GFP_KERNEL); + if (!st) + goto err_st_alloc; + + ret = sg_alloc_table(st, size, GFP_KERNEL); + if (ret) + goto err_sg_alloc; + + st->nents = 0; + sg = st->sgl; + + for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) { + sg = rotate_pages(obj, rot_info->plane[i].offset, + rot_info->plane[i].width, rot_info->plane[i].height, + rot_info->plane[i].stride, st, sg); + } + + return st; + +err_sg_alloc: + kfree(st); +err_st_alloc: + + DRM_DEBUG_DRIVER("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n", + obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size); + + return ERR_PTR(ret); +} + +static struct scatterlist * +remap_pages(struct drm_i915_gem_object *obj, unsigned int offset, + unsigned int width, unsigned int height, + unsigned int stride, + struct sg_table *st, struct scatterlist *sg) +{ + unsigned int row; + + for (row = 0; row < height; row++) { + unsigned int left = width * I915_GTT_PAGE_SIZE; + + while (left) { + dma_addr_t addr; + unsigned int length; + + /* + * We don't need the pages, but need to initialize + * the entries so the sg list can be happily traversed. + * The only thing we need are DMA addresses. + */ + + addr = i915_gem_object_get_dma_address_len(obj, offset, &length); + + length = min(left, length); + + st->nents++; + + sg_set_page(sg, NULL, length, 0); + sg_dma_address(sg) = addr; + sg_dma_len(sg) = length; + sg = sg_next(sg); + + offset += length / I915_GTT_PAGE_SIZE; + left -= length; + } + + offset += stride - width; + } + + return sg; +} + +static noinline struct sg_table * +intel_remap_pages(struct intel_remapped_info *rem_info, + struct drm_i915_gem_object *obj) +{ + unsigned int size = intel_remapped_info_size(rem_info); + struct sg_table *st; + struct scatterlist *sg; + int ret = -ENOMEM; + int i; + + /* Allocate target SG list. */ + st = kmalloc(sizeof(*st), GFP_KERNEL); + if (!st) + goto err_st_alloc; + + ret = sg_alloc_table(st, size, GFP_KERNEL); + if (ret) + goto err_sg_alloc; + + st->nents = 0; + sg = st->sgl; + + for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) { + sg = remap_pages(obj, rem_info->plane[i].offset, + rem_info->plane[i].width, rem_info->plane[i].height, + rem_info->plane[i].stride, st, sg); + } + + i915_sg_trim(st); + + return st; + +err_sg_alloc: + kfree(st); +err_st_alloc: + + DRM_DEBUG_DRIVER("Failed to create remapped mapping for object size %zu! (%ux%u tiles, %u pages)\n", + obj->base.size, rem_info->plane[0].width, rem_info->plane[0].height, size); + + return ERR_PTR(ret); +} + +static noinline struct sg_table * +intel_partial_pages(const struct i915_ggtt_view *view, + struct drm_i915_gem_object *obj) +{ + struct sg_table *st; + struct scatterlist *sg, *iter; + unsigned int count = view->partial.size; + unsigned int offset; + int ret = -ENOMEM; + + st = kmalloc(sizeof(*st), GFP_KERNEL); + if (!st) + goto err_st_alloc; + + ret = sg_alloc_table(st, count, GFP_KERNEL); + if (ret) + goto err_sg_alloc; + + iter = i915_gem_object_get_sg(obj, view->partial.offset, &offset); + GEM_BUG_ON(!iter); + + sg = st->sgl; + st->nents = 0; + do { + unsigned int len; + + len = min(iter->length - (offset << PAGE_SHIFT), + count << PAGE_SHIFT); + sg_set_page(sg, NULL, len, 0); + sg_dma_address(sg) = + sg_dma_address(iter) + (offset << PAGE_SHIFT); + sg_dma_len(sg) = len; + + st->nents++; + count -= len >> PAGE_SHIFT; + if (count == 0) { + sg_mark_end(sg); + i915_sg_trim(st); /* Drop any unused tail entries. */ + + return st; + } + + sg = __sg_next(sg); + iter = __sg_next(iter); + offset = 0; + } while (1); + +err_sg_alloc: + kfree(st); +err_st_alloc: + return ERR_PTR(ret); +} + +static int +i915_get_ggtt_vma_pages(struct i915_vma *vma) +{ + int ret; + + /* + * The vma->pages are only valid within the lifespan of the borrowed + * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so + * must be the vma->pages. A simple rule is that vma->pages must only + * be accessed when the obj->mm.pages are pinned. + */ + GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj)); + + switch (vma->ggtt_view.type) { + default: + GEM_BUG_ON(vma->ggtt_view.type); + /* fall through */ + case I915_GGTT_VIEW_NORMAL: + vma->pages = vma->obj->mm.pages; + return 0; + + case I915_GGTT_VIEW_ROTATED: + vma->pages = + intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj); + break; + + case I915_GGTT_VIEW_REMAPPED: + vma->pages = + intel_remap_pages(&vma->ggtt_view.remapped, vma->obj); + break; + + case I915_GGTT_VIEW_PARTIAL: + vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj); + break; + } + + ret = 0; + if (IS_ERR(vma->pages)) { + ret = PTR_ERR(vma->pages); + vma->pages = NULL; + DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n", + vma->ggtt_view.type, ret); + } + return ret; +} diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index ec84b5e62fef..da2b6e2ae692 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -38,8 +38,6 @@ void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915) void intel_gt_init_hw_early(struct intel_gt *gt, struct i915_ggtt *ggtt) { gt->ggtt = ggtt; - - intel_gt_sanitize(gt, false); } static void init_unused_ring(struct intel_gt *gt, u32 base) @@ -77,10 +75,6 @@ int intel_gt_init_hw(struct intel_gt *gt) struct intel_uncore *uncore = gt->uncore; int ret; - ret = intel_gt_terminally_wedged(gt); - if (ret) - return ret; - gt->last_init_time = ktime_get(); /* Double layer security blanket, see i915_gem_init() */ @@ -372,7 +366,7 @@ static void intel_gt_fini_scratch(struct intel_gt *gt) static struct i915_address_space *kernel_vm(struct intel_gt *gt) { if (INTEL_PPGTT(gt->i915) > INTEL_PPGTT_ALIASING) - return &i915_ppgtt_create(gt->i915)->vm; + return &i915_ppgtt_create(gt)->vm; else return i915_vm_get(>->ggtt->vm); } @@ -410,14 +404,13 @@ static int __engines_record_defaults(struct intel_gt *gt) struct intel_context *ce; struct i915_request *rq; + /* We must be able to switch to something! */ + GEM_BUG_ON(!engine->kernel_context); + err = intel_renderstate_init(&so, engine); if (err) goto out; - /* We must be able to switch to something! */ - GEM_BUG_ON(!engine->kernel_context); - engine->serial++; /* force the kernel context switch */ - ce = intel_context_create(engine); if (IS_ERR(ce)) { err = PTR_ERR(ce); diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h index 2355cf129e9c..1dac441cb8f4 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.h +++ b/drivers/gpu/drm/i915/gt/intel_gt.h @@ -58,9 +58,14 @@ static inline u32 intel_gt_scratch_offset(const struct intel_gt *gt, return i915_ggtt_offset(gt->scratch) + field; } -static inline bool intel_gt_is_wedged(struct intel_gt *gt) +static inline bool intel_gt_is_wedged(const struct intel_gt *gt) { return __intel_reset_failed(>->reset); } +static inline bool intel_gt_has_init_error(const struct intel_gt *gt) +{ + return test_bit(I915_WEDGED_ON_INIT, >->reset.flags); +} + #endif /* __INTEL_GT_H__ */ diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c index 45b68a17da4d..d1c2f034296a 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c @@ -126,17 +126,7 @@ static bool reset_engines(struct intel_gt *gt) return __intel_gt_reset(gt, ALL_ENGINES) == 0; } -/** - * intel_gt_sanitize: called after the GPU has lost power - * @gt: the i915 GT container - * @force: ignore a failed reset and sanitize engine state anyway - * - * Anytime we reset the GPU, either with an explicit GPU reset or through a - * PCI power cycle, the GPU loses state and we must reset our state tracking - * to match. Note that calling intel_gt_sanitize() if the GPU has not - * been reset results in much confusion! - */ -void intel_gt_sanitize(struct intel_gt *gt, bool force) +static void gt_sanitize(struct intel_gt *gt, bool force) { struct intel_engine_cs *engine; enum intel_engine_id id; @@ -189,6 +179,10 @@ int intel_gt_resume(struct intel_gt *gt) enum intel_engine_id id; int err; + err = intel_gt_has_init_error(gt); + if (err) + return err; + GT_TRACE(gt, "\n"); /* @@ -201,30 +195,26 @@ int intel_gt_resume(struct intel_gt *gt) intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL); intel_rc6_sanitize(>->rc6); + gt_sanitize(gt, true); + if (intel_gt_is_wedged(gt)) { + err = -EIO; + goto out_fw; + } /* Only when the HW is re-initialised, can we replay the requests */ err = intel_gt_init_hw(gt); if (err) { dev_err(gt->i915->drm.dev, "Failed to initialize GPU, declaring it wedged!\n"); - intel_gt_set_wedged(gt); - goto err_fw; + goto err_wedged; } intel_rps_enable(>->rps); intel_llc_enable(>->llc); for_each_engine(engine, gt, id) { - struct intel_context *ce; - intel_engine_pm_get(engine); - ce = engine->kernel_context; - if (ce) { - GEM_BUG_ON(!intel_context_is_pinned(ce)); - ce->ops->reset(ce); - } - engine->serial++; /* kernel context lost */ err = engine->resume(engine); @@ -233,7 +223,7 @@ int intel_gt_resume(struct intel_gt *gt) dev_err(gt->i915->drm.dev, "Failed to restart %s (%d)\n", engine->name, err); - break; + goto err_wedged; } } @@ -243,11 +233,14 @@ int intel_gt_resume(struct intel_gt *gt) user_forcewake(gt, false); -err_fw: +out_fw: intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL); intel_gt_pm_put(gt); - return err; + +err_wedged: + intel_gt_set_wedged(gt); + goto out_fw; } static void wait_for_suspend(struct intel_gt *gt) @@ -315,7 +308,7 @@ void intel_gt_suspend_late(struct intel_gt *gt) intel_llc_disable(>->llc); } - intel_gt_sanitize(gt, false); + gt_sanitize(gt, false); GT_TRACE(gt, "\n"); } diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h index 4a9e48c12bd4..60f0e2fbe55c 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h @@ -51,8 +51,6 @@ void intel_gt_pm_init_early(struct intel_gt *gt); void intel_gt_pm_init(struct intel_gt *gt); void intel_gt_pm_fini(struct intel_gt *gt); -void intel_gt_sanitize(struct intel_gt *gt, bool force); - void intel_gt_suspend_prepare(struct intel_gt *gt); void intel_gt_suspend_late(struct intel_gt *gt); int intel_gt_resume(struct intel_gt *gt); diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.c b/drivers/gpu/drm/i915/gt/intel_gt_requests.c index b4f04614230e..7ef1d37970f6 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_requests.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.c @@ -14,13 +14,16 @@ #include "intel_gt_requests.h" #include "intel_timeline.h" -static void retire_requests(struct intel_timeline *tl) +static bool retire_requests(struct intel_timeline *tl) { struct i915_request *rq, *rn; list_for_each_entry_safe(rq, rn, &tl->requests, link) if (!i915_request_retire(rq)) - break; + return false; + + /* And check nothing new was submitted */ + return !i915_active_fence_isset(&tl->last_request); } static bool flush_submission(struct intel_gt *gt) @@ -29,9 +32,13 @@ static bool flush_submission(struct intel_gt *gt) enum intel_engine_id id; bool active = false; + if (!intel_gt_pm_is_awake(gt)) + return false; + for_each_engine(engine, gt, id) { - active |= intel_engine_flush_submission(engine); + intel_engine_flush_submission(engine); active |= flush_work(&engine->retire_work); + active |= flush_work(&engine->wakeref.work); } return active; @@ -120,7 +127,6 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout) timeout = -timeout, interruptible = false; flush_submission(gt); /* kick the ksoftirqd tasklets */ - spin_lock(&timelines->lock); list_for_each_entry_safe(tl, tn, &timelines->active_list, link) { if (!mutex_trylock(&tl->mutex)) { @@ -145,7 +151,8 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout) } } - retire_requests(tl); + if (!retire_requests(tl) || flush_submission(gt)) + active_count++; spin_lock(&timelines->lock); @@ -153,8 +160,6 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout) list_safe_reset_next(tl, tn, link); if (atomic_dec_and_test(&tl->active_count)) list_del(&tl->link); - else - active_count += i915_active_fence_isset(&tl->last_request); mutex_unlock(&tl->mutex); @@ -169,9 +174,6 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout) list_for_each_entry_safe(tl, tn, &free, link) __intel_timeline_free(&tl->kref); - if (flush_submission(gt)) - active_count++; - return active_count ? timeout : 0; } diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c new file mode 100644 index 000000000000..16acdc5d6734 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_gtt.c @@ -0,0 +1,598 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2020 Intel Corporation + */ + +#include <linux/slab.h> /* fault-inject.h is not standalone! */ + +#include <linux/fault-inject.h> + +#include "i915_trace.h" +#include "intel_gt.h" +#include "intel_gtt.h" + +void stash_init(struct pagestash *stash) +{ + pagevec_init(&stash->pvec); + spin_lock_init(&stash->lock); +} + +static struct page *stash_pop_page(struct pagestash *stash) +{ + struct page *page = NULL; + + spin_lock(&stash->lock); + if (likely(stash->pvec.nr)) + page = stash->pvec.pages[--stash->pvec.nr]; + spin_unlock(&stash->lock); + + return page; +} + +static void stash_push_pagevec(struct pagestash *stash, struct pagevec *pvec) +{ + unsigned int nr; + + spin_lock_nested(&stash->lock, SINGLE_DEPTH_NESTING); + + nr = min_t(typeof(nr), pvec->nr, pagevec_space(&stash->pvec)); + memcpy(stash->pvec.pages + stash->pvec.nr, + pvec->pages + pvec->nr - nr, + sizeof(pvec->pages[0]) * nr); + stash->pvec.nr += nr; + + spin_unlock(&stash->lock); + + pvec->nr -= nr; +} + +static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp) +{ + struct pagevec stack; + struct page *page; + + if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1))) + i915_gem_shrink_all(vm->i915); + + page = stash_pop_page(&vm->free_pages); + if (page) + return page; + + if (!vm->pt_kmap_wc) + return alloc_page(gfp); + + /* Look in our global stash of WC pages... */ + page = stash_pop_page(&vm->i915->mm.wc_stash); + if (page) + return page; + + /* + * Otherwise batch allocate pages to amortize cost of set_pages_wc. + * + * We have to be careful as page allocation may trigger the shrinker + * (via direct reclaim) which will fill up the WC stash underneath us. + * So we add our WB pages into a temporary pvec on the stack and merge + * them into the WC stash after all the allocations are complete. + */ + pagevec_init(&stack); + do { + struct page *page; + + page = alloc_page(gfp); + if (unlikely(!page)) + break; + + stack.pages[stack.nr++] = page; + } while (pagevec_space(&stack)); + + if (stack.nr && !set_pages_array_wc(stack.pages, stack.nr)) { + page = stack.pages[--stack.nr]; + + /* Merge spare WC pages to the global stash */ + if (stack.nr) + stash_push_pagevec(&vm->i915->mm.wc_stash, &stack); + + /* Push any surplus WC pages onto the local VM stash */ + if (stack.nr) + stash_push_pagevec(&vm->free_pages, &stack); + } + + /* Return unwanted leftovers */ + if (unlikely(stack.nr)) { + WARN_ON_ONCE(set_pages_array_wb(stack.pages, stack.nr)); + __pagevec_release(&stack); + } + + return page; +} + +static void vm_free_pages_release(struct i915_address_space *vm, + bool immediate) +{ + struct pagevec *pvec = &vm->free_pages.pvec; + struct pagevec stack; + + lockdep_assert_held(&vm->free_pages.lock); + GEM_BUG_ON(!pagevec_count(pvec)); + + if (vm->pt_kmap_wc) { + /* + * When we use WC, first fill up the global stash and then + * only if full immediately free the overflow. + */ + stash_push_pagevec(&vm->i915->mm.wc_stash, pvec); + + /* + * As we have made some room in the VM's free_pages, + * we can wait for it to fill again. Unless we are + * inside i915_address_space_fini() and must + * immediately release the pages! + */ + if (pvec->nr <= (immediate ? 0 : PAGEVEC_SIZE - 1)) + return; + + /* + * We have to drop the lock to allow ourselves to sleep, + * so take a copy of the pvec and clear the stash for + * others to use it as we sleep. + */ + stack = *pvec; + pagevec_reinit(pvec); + spin_unlock(&vm->free_pages.lock); + + pvec = &stack; + set_pages_array_wb(pvec->pages, pvec->nr); + + spin_lock(&vm->free_pages.lock); + } + + __pagevec_release(pvec); +} + +static void vm_free_page(struct i915_address_space *vm, struct page *page) +{ + /* + * On !llc, we need to change the pages back to WB. We only do so + * in bulk, so we rarely need to change the page attributes here, + * but doing so requires a stop_machine() from deep inside arch/x86/mm. + * To make detection of the possible sleep more likely, use an + * unconditional might_sleep() for everybody. + */ + might_sleep(); + spin_lock(&vm->free_pages.lock); + while (!pagevec_space(&vm->free_pages.pvec)) + vm_free_pages_release(vm, false); + GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec) >= PAGEVEC_SIZE); + pagevec_add(&vm->free_pages.pvec, page); + spin_unlock(&vm->free_pages.lock); +} + +void __i915_vm_close(struct i915_address_space *vm) +{ + struct i915_vma *vma, *vn; + + mutex_lock(&vm->mutex); + list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) { + struct drm_i915_gem_object *obj = vma->obj; + + /* Keep the obj (and hence the vma) alive as _we_ destroy it */ + if (!kref_get_unless_zero(&obj->base.refcount)) + continue; + + atomic_and(~I915_VMA_PIN_MASK, &vma->flags); + WARN_ON(__i915_vma_unbind(vma)); + __i915_vma_put(vma); + + i915_gem_object_put(obj); + } + GEM_BUG_ON(!list_empty(&vm->bound_list)); + mutex_unlock(&vm->mutex); +} + +void i915_address_space_fini(struct i915_address_space *vm) +{ + spin_lock(&vm->free_pages.lock); + if (pagevec_count(&vm->free_pages.pvec)) + vm_free_pages_release(vm, true); + GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec)); + spin_unlock(&vm->free_pages.lock); + + drm_mm_takedown(&vm->mm); + + mutex_destroy(&vm->mutex); +} + +static void __i915_vm_release(struct work_struct *work) +{ + struct i915_address_space *vm = + container_of(work, struct i915_address_space, rcu.work); + + vm->cleanup(vm); + i915_address_space_fini(vm); + + kfree(vm); +} + +void i915_vm_release(struct kref *kref) +{ + struct i915_address_space *vm = + container_of(kref, struct i915_address_space, ref); + + GEM_BUG_ON(i915_is_ggtt(vm)); + trace_i915_ppgtt_release(vm); + + queue_rcu_work(vm->i915->wq, &vm->rcu); +} + +void i915_address_space_init(struct i915_address_space *vm, int subclass) +{ + kref_init(&vm->ref); + INIT_RCU_WORK(&vm->rcu, __i915_vm_release); + atomic_set(&vm->open, 1); + + /* + * The vm->mutex must be reclaim safe (for use in the shrinker). + * Do a dummy acquire now under fs_reclaim so that any allocation + * attempt holding the lock is immediately reported by lockdep. + */ + mutex_init(&vm->mutex); + lockdep_set_subclass(&vm->mutex, subclass); + i915_gem_shrinker_taints_mutex(vm->i915, &vm->mutex); + + GEM_BUG_ON(!vm->total); + drm_mm_init(&vm->mm, 0, vm->total); + vm->mm.head_node.color = I915_COLOR_UNEVICTABLE; + + stash_init(&vm->free_pages); + + INIT_LIST_HEAD(&vm->bound_list); +} + +void clear_pages(struct i915_vma *vma) +{ + GEM_BUG_ON(!vma->pages); + + if (vma->pages != vma->obj->mm.pages) { + sg_free_table(vma->pages); + kfree(vma->pages); + } + vma->pages = NULL; + + memset(&vma->page_sizes, 0, sizeof(vma->page_sizes)); +} + +static int __setup_page_dma(struct i915_address_space *vm, + struct i915_page_dma *p, + gfp_t gfp) +{ + p->page = vm_alloc_page(vm, gfp | I915_GFP_ALLOW_FAIL); + if (unlikely(!p->page)) + return -ENOMEM; + + p->daddr = dma_map_page_attrs(vm->dma, + p->page, 0, PAGE_SIZE, + PCI_DMA_BIDIRECTIONAL, + DMA_ATTR_SKIP_CPU_SYNC | + DMA_ATTR_NO_WARN); + if (unlikely(dma_mapping_error(vm->dma, p->daddr))) { + vm_free_page(vm, p->page); + return -ENOMEM; + } + + return 0; +} + +int setup_page_dma(struct i915_address_space *vm, struct i915_page_dma *p) +{ + return __setup_page_dma(vm, p, __GFP_HIGHMEM); +} + +void cleanup_page_dma(struct i915_address_space *vm, struct i915_page_dma *p) +{ + dma_unmap_page(vm->dma, p->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); + vm_free_page(vm, p->page); +} + +void +fill_page_dma(const struct i915_page_dma *p, const u64 val, unsigned int count) +{ + kunmap_atomic(memset64(kmap_atomic(p->page), val, count)); +} + +int setup_scratch_page(struct i915_address_space *vm, gfp_t gfp) +{ + unsigned long size; + + /* + * In order to utilize 64K pages for an object with a size < 2M, we will + * need to support a 64K scratch page, given that every 16th entry for a + * page-table operating in 64K mode must point to a properly aligned 64K + * region, including any PTEs which happen to point to scratch. + * + * This is only relevant for the 48b PPGTT where we support + * huge-gtt-pages, see also i915_vma_insert(). However, as we share the + * scratch (read-only) between all vm, we create one 64k scratch page + * for all. + */ + size = I915_GTT_PAGE_SIZE_4K; + if (i915_vm_is_4lvl(vm) && + HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K)) { + size = I915_GTT_PAGE_SIZE_64K; + gfp |= __GFP_NOWARN; + } + gfp |= __GFP_ZERO | __GFP_RETRY_MAYFAIL; + + do { + unsigned int order = get_order(size); + struct page *page; + dma_addr_t addr; + + page = alloc_pages(gfp, order); + if (unlikely(!page)) + goto skip; + + addr = dma_map_page_attrs(vm->dma, + page, 0, size, + PCI_DMA_BIDIRECTIONAL, + DMA_ATTR_SKIP_CPU_SYNC | + DMA_ATTR_NO_WARN); + if (unlikely(dma_mapping_error(vm->dma, addr))) + goto free_page; + + if (unlikely(!IS_ALIGNED(addr, size))) + goto unmap_page; + + vm->scratch[0].base.page = page; + vm->scratch[0].base.daddr = addr; + vm->scratch_order = order; + return 0; + +unmap_page: + dma_unmap_page(vm->dma, addr, size, PCI_DMA_BIDIRECTIONAL); +free_page: + __free_pages(page, order); +skip: + if (size == I915_GTT_PAGE_SIZE_4K) + return -ENOMEM; + + size = I915_GTT_PAGE_SIZE_4K; + gfp &= ~__GFP_NOWARN; + } while (1); +} + +void cleanup_scratch_page(struct i915_address_space *vm) +{ + struct i915_page_dma *p = px_base(&vm->scratch[0]); + unsigned int order = vm->scratch_order; + + dma_unmap_page(vm->dma, p->daddr, BIT(order) << PAGE_SHIFT, + PCI_DMA_BIDIRECTIONAL); + __free_pages(p->page, order); +} + +void free_scratch(struct i915_address_space *vm) +{ + int i; + + if (!px_dma(&vm->scratch[0])) /* set to 0 on clones */ + return; + + for (i = 1; i <= vm->top; i++) { + if (!px_dma(&vm->scratch[i])) + break; + cleanup_page_dma(vm, px_base(&vm->scratch[i])); + } + + cleanup_scratch_page(vm); +} + +void gtt_write_workarounds(struct intel_gt *gt) +{ + struct drm_i915_private *i915 = gt->i915; + struct intel_uncore *uncore = gt->uncore; + + /* + * This function is for gtt related workarounds. This function is + * called on driver load and after a GPU reset, so you can place + * workarounds here even if they get overwritten by GPU reset. + */ + /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl,cnl,icl */ + if (IS_BROADWELL(i915)) + intel_uncore_write(uncore, + GEN8_L3_LRA_1_GPGPU, + GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW); + else if (IS_CHERRYVIEW(i915)) + intel_uncore_write(uncore, + GEN8_L3_LRA_1_GPGPU, + GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV); + else if (IS_GEN9_LP(i915)) + intel_uncore_write(uncore, + GEN8_L3_LRA_1_GPGPU, + GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT); + else if (INTEL_GEN(i915) >= 9 && INTEL_GEN(i915) <= 11) + intel_uncore_write(uncore, + GEN8_L3_LRA_1_GPGPU, + GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL); + + /* + * To support 64K PTEs we need to first enable the use of the + * Intermediate-Page-Size(IPS) bit of the PDE field via some magical + * mmio, otherwise the page-walker will simply ignore the IPS bit. This + * shouldn't be needed after GEN10. + * + * 64K pages were first introduced from BDW+, although technically they + * only *work* from gen9+. For pre-BDW we instead have the option for + * 32K pages, but we don't currently have any support for it in our + * driver. + */ + if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_64K) && + INTEL_GEN(i915) <= 10) + intel_uncore_rmw(uncore, + GEN8_GAMW_ECO_DEV_RW_IA, + 0, + GAMW_ECO_ENABLE_64K_IPS_FIELD); + + if (IS_GEN_RANGE(i915, 8, 11)) { + bool can_use_gtt_cache = true; + + /* + * According to the BSpec if we use 2M/1G pages then we also + * need to disable the GTT cache. At least on BDW we can see + * visual corruption when using 2M pages, and not disabling the + * GTT cache. + */ + if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_2M)) + can_use_gtt_cache = false; + + /* WaGttCachingOffByDefault */ + intel_uncore_write(uncore, + HSW_GTT_CACHE_EN, + can_use_gtt_cache ? GTT_CACHE_EN_ALL : 0); + WARN_ON_ONCE(can_use_gtt_cache && + intel_uncore_read(uncore, + HSW_GTT_CACHE_EN) == 0); + } +} + +u64 gen8_pte_encode(dma_addr_t addr, + enum i915_cache_level level, + u32 flags) +{ + gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW; + + if (unlikely(flags & PTE_READ_ONLY)) + pte &= ~_PAGE_RW; + + switch (level) { + case I915_CACHE_NONE: + pte |= PPAT_UNCACHED; + break; + case I915_CACHE_WT: + pte |= PPAT_DISPLAY_ELLC; + break; + default: + pte |= PPAT_CACHED; + break; + } + + return pte; +} + +static void tgl_setup_private_ppat(struct intel_uncore *uncore) +{ + /* TGL doesn't support LLC or AGE settings */ + intel_uncore_write(uncore, GEN12_PAT_INDEX(0), GEN8_PPAT_WB); + intel_uncore_write(uncore, GEN12_PAT_INDEX(1), GEN8_PPAT_WC); + intel_uncore_write(uncore, GEN12_PAT_INDEX(2), GEN8_PPAT_WT); + intel_uncore_write(uncore, GEN12_PAT_INDEX(3), GEN8_PPAT_UC); + intel_uncore_write(uncore, GEN12_PAT_INDEX(4), GEN8_PPAT_WB); + intel_uncore_write(uncore, GEN12_PAT_INDEX(5), GEN8_PPAT_WB); + intel_uncore_write(uncore, GEN12_PAT_INDEX(6), GEN8_PPAT_WB); + intel_uncore_write(uncore, GEN12_PAT_INDEX(7), GEN8_PPAT_WB); +} + +static void cnl_setup_private_ppat(struct intel_uncore *uncore) +{ + intel_uncore_write(uncore, + GEN10_PAT_INDEX(0), + GEN8_PPAT_WB | GEN8_PPAT_LLC); + intel_uncore_write(uncore, + GEN10_PAT_INDEX(1), + GEN8_PPAT_WC | GEN8_PPAT_LLCELLC); + intel_uncore_write(uncore, + GEN10_PAT_INDEX(2), + GEN8_PPAT_WT | GEN8_PPAT_LLCELLC); + intel_uncore_write(uncore, + GEN10_PAT_INDEX(3), + GEN8_PPAT_UC); + intel_uncore_write(uncore, + GEN10_PAT_INDEX(4), + GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)); + intel_uncore_write(uncore, + GEN10_PAT_INDEX(5), + GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)); + intel_uncore_write(uncore, + GEN10_PAT_INDEX(6), + GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)); + intel_uncore_write(uncore, + GEN10_PAT_INDEX(7), + GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3)); +} + +/* + * The GGTT and PPGTT need a private PPAT setup in order to handle cacheability + * bits. When using advanced contexts each context stores its own PAT, but + * writing this data shouldn't be harmful even in those cases. + */ +static void bdw_setup_private_ppat(struct intel_uncore *uncore) +{ + u64 pat; + + pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */ + GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */ + GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) | /* for scanout with eLLC */ + GEN8_PPAT(3, GEN8_PPAT_UC) | /* Uncached objects, mostly for scanout */ + GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) | + GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) | + GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) | + GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3)); + + intel_uncore_write(uncore, GEN8_PRIVATE_PAT_LO, lower_32_bits(pat)); + intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat)); +} + +static void chv_setup_private_ppat(struct intel_uncore *uncore) +{ + u64 pat; + + /* + * Map WB on BDW to snooped on CHV. + * + * Only the snoop bit has meaning for CHV, the rest is + * ignored. + * + * The hardware will never snoop for certain types of accesses: + * - CPU GTT (GMADR->GGTT->no snoop->memory) + * - PPGTT page tables + * - some other special cycles + * + * As with BDW, we also need to consider the following for GT accesses: + * "For GGTT, there is NO pat_sel[2:0] from the entry, + * so RTL will always use the value corresponding to + * pat_sel = 000". + * Which means we must set the snoop bit in PAT entry 0 + * in order to keep the global status page working. + */ + + pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) | + GEN8_PPAT(1, 0) | + GEN8_PPAT(2, 0) | + GEN8_PPAT(3, 0) | + GEN8_PPAT(4, CHV_PPAT_SNOOP) | + GEN8_PPAT(5, CHV_PPAT_SNOOP) | + GEN8_PPAT(6, CHV_PPAT_SNOOP) | + GEN8_PPAT(7, CHV_PPAT_SNOOP); + + intel_uncore_write(uncore, GEN8_PRIVATE_PAT_LO, lower_32_bits(pat)); + intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat)); +} + +void setup_private_pat(struct intel_uncore *uncore) +{ + struct drm_i915_private *i915 = uncore->i915; + + GEM_BUG_ON(INTEL_GEN(i915) < 8); + + if (INTEL_GEN(i915) >= 12) + tgl_setup_private_ppat(uncore); + else if (INTEL_GEN(i915) >= 10) + cnl_setup_private_ppat(uncore); + else if (IS_CHERRYVIEW(i915) || IS_GEN9_LP(i915)) + chv_setup_private_ppat(uncore); + else + bdw_setup_private_ppat(uncore); +} + +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) +#include "selftests/mock_gtt.c" +#endif diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h new file mode 100644 index 000000000000..7da7681c20b1 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_gtt.h @@ -0,0 +1,587 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2020 Intel Corporation + * + * Please try to maintain the following order within this file unless it makes + * sense to do otherwise. From top to bottom: + * 1. typedefs + * 2. #defines, and macros + * 3. structure definitions + * 4. function prototypes + * + * Within each section, please try to order by generation in ascending order, + * from top to bottom (ie. gen6 on the top, gen8 on the bottom). + */ + +#ifndef __INTEL_GTT_H__ +#define __INTEL_GTT_H__ + +#include <linux/io-mapping.h> +#include <linux/kref.h> +#include <linux/mm.h> +#include <linux/pagevec.h> +#include <linux/scatterlist.h> +#include <linux/workqueue.h> + +#include <drm/drm_mm.h> + +#include "gt/intel_reset.h" +#include "i915_gem_fence_reg.h" +#include "i915_selftest.h" +#include "i915_vma_types.h" + +#define I915_GFP_ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN) + +#if IS_ENABLED(CONFIG_DRM_I915_TRACE_GTT) +#define DBG(...) trace_printk(__VA_ARGS__) +#else +#define DBG(...) +#endif + +#define NALLOC 3 /* 1 normal, 1 for concurrent threads, 1 for preallocation */ + +#define I915_GTT_PAGE_SIZE_4K BIT_ULL(12) +#define I915_GTT_PAGE_SIZE_64K BIT_ULL(16) +#define I915_GTT_PAGE_SIZE_2M BIT_ULL(21) + +#define I915_GTT_PAGE_SIZE I915_GTT_PAGE_SIZE_4K +#define I915_GTT_MAX_PAGE_SIZE I915_GTT_PAGE_SIZE_2M + +#define I915_GTT_PAGE_MASK -I915_GTT_PAGE_SIZE + +#define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE + +#define I915_FENCE_REG_NONE -1 +#define I915_MAX_NUM_FENCES 32 +/* 32 fences + sign bit for FENCE_REG_NONE */ +#define I915_MAX_NUM_FENCE_BITS 6 + +typedef u32 gen6_pte_t; +typedef u64 gen8_pte_t; + +#define ggtt_total_entries(ggtt) ((ggtt)->vm.total >> PAGE_SHIFT) + +#define I915_PTES(pte_len) ((unsigned int)(PAGE_SIZE / (pte_len))) +#define I915_PTE_MASK(pte_len) (I915_PTES(pte_len) - 1) +#define I915_PDES 512 +#define I915_PDE_MASK (I915_PDES - 1) + +/* gen6-hsw has bit 11-4 for physical addr bit 39-32 */ +#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0)) +#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) +#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) +#define GEN6_PTE_CACHE_LLC (2 << 1) +#define GEN6_PTE_UNCACHED (1 << 1) +#define GEN6_PTE_VALID REG_BIT(0) + +#define GEN6_PTES I915_PTES(sizeof(gen6_pte_t)) +#define GEN6_PD_SIZE (I915_PDES * PAGE_SIZE) +#define GEN6_PD_ALIGN (PAGE_SIZE * 16) +#define GEN6_PDE_SHIFT 22 +#define GEN6_PDE_VALID REG_BIT(0) +#define NUM_PTE(pde_shift) (1 << (pde_shift - PAGE_SHIFT)) + +#define GEN7_PTE_CACHE_L3_LLC (3 << 1) + +#define BYT_PTE_SNOOPED_BY_CPU_CACHES REG_BIT(2) +#define BYT_PTE_WRITEABLE REG_BIT(1) + +/* + * Cacheability Control is a 4-bit value. The low three bits are stored in bits + * 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE. + */ +#define HSW_CACHEABILITY_CONTROL(bits) ((((bits) & 0x7) << 1) | \ + (((bits) & 0x8) << (11 - 3))) +#define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2) +#define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3) +#define HSW_WB_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x8) +#define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb) +#define HSW_WT_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x7) +#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6) +#define HSW_PTE_UNCACHED (0) +#define HSW_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0x7f0)) +#define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr) + +/* + * GEN8 32b style address is defined as a 3 level page table: + * 31:30 | 29:21 | 20:12 | 11:0 + * PDPE | PDE | PTE | offset + * The difference as compared to normal x86 3 level page table is the PDPEs are + * programmed via register. + * + * GEN8 48b style address is defined as a 4 level page table: + * 47:39 | 38:30 | 29:21 | 20:12 | 11:0 + * PML4E | PDPE | PDE | PTE | offset + */ +#define GEN8_3LVL_PDPES 4 + +#define PPAT_UNCACHED (_PAGE_PWT | _PAGE_PCD) +#define PPAT_CACHED_PDE 0 /* WB LLC */ +#define PPAT_CACHED _PAGE_PAT /* WB LLCeLLC */ +#define PPAT_DISPLAY_ELLC _PAGE_PCD /* WT eLLC */ + +#define CHV_PPAT_SNOOP REG_BIT(6) +#define GEN8_PPAT_AGE(x) ((x)<<4) +#define GEN8_PPAT_LLCeLLC (3<<2) +#define GEN8_PPAT_LLCELLC (2<<2) +#define GEN8_PPAT_LLC (1<<2) +#define GEN8_PPAT_WB (3<<0) +#define GEN8_PPAT_WT (2<<0) +#define GEN8_PPAT_WC (1<<0) +#define GEN8_PPAT_UC (0<<0) +#define GEN8_PPAT_ELLC_OVERRIDE (0<<2) +#define GEN8_PPAT(i, x) ((u64)(x) << ((i) * 8)) + +#define GEN8_PDE_IPS_64K BIT(11) +#define GEN8_PDE_PS_2M BIT(7) + +#define for_each_sgt_daddr(__dp, __iter, __sgt) \ + __for_each_sgt_daddr(__dp, __iter, __sgt, I915_GTT_PAGE_SIZE) + +struct i915_page_dma { + struct page *page; + union { + dma_addr_t daddr; + + /* + * For gen6/gen7 only. This is the offset in the GGTT + * where the page directory entries for PPGTT begin + */ + u32 ggtt_offset; + }; +}; + +struct i915_page_scratch { + struct i915_page_dma base; + u64 encode; +}; + +struct i915_page_table { + struct i915_page_dma base; + atomic_t used; +}; + +struct i915_page_directory { + struct i915_page_table pt; + spinlock_t lock; + void *entry[512]; +}; + +#define __px_choose_expr(x, type, expr, other) \ + __builtin_choose_expr( \ + __builtin_types_compatible_p(typeof(x), type) || \ + __builtin_types_compatible_p(typeof(x), const type), \ + ({ type __x = (type)(x); expr; }), \ + other) + +#define px_base(px) \ + __px_choose_expr(px, struct i915_page_dma *, __x, \ + __px_choose_expr(px, struct i915_page_scratch *, &__x->base, \ + __px_choose_expr(px, struct i915_page_table *, &__x->base, \ + __px_choose_expr(px, struct i915_page_directory *, &__x->pt.base, \ + (void)0)))) +#define px_dma(px) (px_base(px)->daddr) + +#define px_pt(px) \ + __px_choose_expr(px, struct i915_page_table *, __x, \ + __px_choose_expr(px, struct i915_page_directory *, &__x->pt, \ + (void)0)) +#define px_used(px) (&px_pt(px)->used) + +enum i915_cache_level; + +struct drm_i915_file_private; +struct drm_i915_gem_object; +struct i915_vma; +struct intel_gt; + +struct i915_vma_ops { + /* Map an object into an address space with the given cache flags. */ + int (*bind_vma)(struct i915_vma *vma, + enum i915_cache_level cache_level, + u32 flags); + /* + * Unmap an object from an address space. This usually consists of + * setting the valid PTE entries to a reserved scratch page. + */ + void (*unbind_vma)(struct i915_vma *vma); + + int (*set_pages)(struct i915_vma *vma); + void (*clear_pages)(struct i915_vma *vma); +}; + +struct pagestash { + spinlock_t lock; + struct pagevec pvec; +}; + +void stash_init(struct pagestash *stash); + +struct i915_address_space { + struct kref ref; + struct rcu_work rcu; + + struct drm_mm mm; + struct intel_gt *gt; + struct drm_i915_private *i915; + struct device *dma; + /* + * Every address space belongs to a struct file - except for the global + * GTT that is owned by the driver (and so @file is set to NULL). In + * principle, no information should leak from one context to another + * (or between files/processes etc) unless explicitly shared by the + * owner. Tracking the owner is important in order to free up per-file + * objects along with the file, to aide resource tracking, and to + * assign blame. + */ + struct drm_i915_file_private *file; + u64 total; /* size addr space maps (ex. 2GB for ggtt) */ + u64 reserved; /* size addr space reserved */ + + unsigned int bind_async_flags; + + /* + * Each active user context has its own address space (in full-ppgtt). + * Since the vm may be shared between multiple contexts, we count how + * many contexts keep us "open". Once open hits zero, we are closed + * and do not allow any new attachments, and proceed to shutdown our + * vma and page directories. + */ + atomic_t open; + + struct mutex mutex; /* protects vma and our lists */ +#define VM_CLASS_GGTT 0 +#define VM_CLASS_PPGTT 1 + + struct i915_page_scratch scratch[4]; + unsigned int scratch_order; + unsigned int top; + + /** + * List of vma currently bound. + */ + struct list_head bound_list; + + struct pagestash free_pages; + + /* Global GTT */ + bool is_ggtt:1; + + /* Some systems require uncached updates of the page directories */ + bool pt_kmap_wc:1; + + /* Some systems support read-only mappings for GGTT and/or PPGTT */ + bool has_read_only:1; + + u64 (*pte_encode)(dma_addr_t addr, + enum i915_cache_level level, + u32 flags); /* Create a valid PTE */ +#define PTE_READ_ONLY BIT(0) + + int (*allocate_va_range)(struct i915_address_space *vm, + u64 start, u64 length); + void (*clear_range)(struct i915_address_space *vm, + u64 start, u64 length); + void (*insert_page)(struct i915_address_space *vm, + dma_addr_t addr, + u64 offset, + enum i915_cache_level cache_level, + u32 flags); + void (*insert_entries)(struct i915_address_space *vm, + struct i915_vma *vma, + enum i915_cache_level cache_level, + u32 flags); + void (*cleanup)(struct i915_address_space *vm); + + struct i915_vma_ops vma_ops; + + I915_SELFTEST_DECLARE(struct fault_attr fault_attr); + I915_SELFTEST_DECLARE(bool scrub_64K); +}; + +/* + * The Graphics Translation Table is the way in which GEN hardware translates a + * Graphics Virtual Address into a Physical Address. In addition to the normal + * collateral associated with any va->pa translations GEN hardware also has a + * portion of the GTT which can be mapped by the CPU and remain both coherent + * and correct (in cases like swizzling). That region is referred to as GMADR in + * the spec. + */ +struct i915_ggtt { + struct i915_address_space vm; + + struct io_mapping iomap; /* Mapping to our CPU mappable region */ + struct resource gmadr; /* GMADR resource */ + resource_size_t mappable_end; /* End offset that we can CPU map */ + + /** "Graphics Stolen Memory" holds the global PTEs */ + void __iomem *gsm; + void (*invalidate)(struct i915_ggtt *ggtt); + + /** PPGTT used for aliasing the PPGTT with the GTT */ + struct i915_ppgtt *alias; + + bool do_idle_maps; + + int mtrr; + + /** Bit 6 swizzling required for X tiling */ + u32 bit_6_swizzle_x; + /** Bit 6 swizzling required for Y tiling */ + u32 bit_6_swizzle_y; + + u32 pin_bias; + + unsigned int num_fences; + struct i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; + struct list_head fence_list; + + /** + * List of all objects in gtt_space, currently mmaped by userspace. + * All objects within this list must also be on bound_list. + */ + struct list_head userfault_list; + + /* Manual runtime pm autosuspend delay for user GGTT mmaps */ + struct intel_wakeref_auto userfault_wakeref; + + struct mutex error_mutex; + struct drm_mm_node error_capture; + struct drm_mm_node uc_fw; +}; + +struct i915_ppgtt { + struct i915_address_space vm; + + struct i915_page_directory *pd; +}; + +#define i915_is_ggtt(vm) ((vm)->is_ggtt) + +static inline bool +i915_vm_is_4lvl(const struct i915_address_space *vm) +{ + return (vm->total - 1) >> 32; +} + +static inline bool +i915_vm_has_scratch_64K(struct i915_address_space *vm) +{ + return vm->scratch_order == get_order(I915_GTT_PAGE_SIZE_64K); +} + +static inline bool +i915_vm_has_cache_coloring(struct i915_address_space *vm) +{ + return i915_is_ggtt(vm) && vm->mm.color_adjust; +} + +static inline struct i915_ggtt * +i915_vm_to_ggtt(struct i915_address_space *vm) +{ + BUILD_BUG_ON(offsetof(struct i915_ggtt, vm)); + GEM_BUG_ON(!i915_is_ggtt(vm)); + return container_of(vm, struct i915_ggtt, vm); +} + +static inline struct i915_ppgtt * +i915_vm_to_ppgtt(struct i915_address_space *vm) +{ + BUILD_BUG_ON(offsetof(struct i915_ppgtt, vm)); + GEM_BUG_ON(i915_is_ggtt(vm)); + return container_of(vm, struct i915_ppgtt, vm); +} + +static inline struct i915_address_space * +i915_vm_get(struct i915_address_space *vm) +{ + kref_get(&vm->ref); + return vm; +} + +void i915_vm_release(struct kref *kref); + +static inline void i915_vm_put(struct i915_address_space *vm) +{ + kref_put(&vm->ref, i915_vm_release); +} + +static inline struct i915_address_space * +i915_vm_open(struct i915_address_space *vm) +{ + GEM_BUG_ON(!atomic_read(&vm->open)); + atomic_inc(&vm->open); + return i915_vm_get(vm); +} + +static inline bool +i915_vm_tryopen(struct i915_address_space *vm) +{ + if (atomic_add_unless(&vm->open, 1, 0)) + return i915_vm_get(vm); + + return false; +} + +void __i915_vm_close(struct i915_address_space *vm); + +static inline void +i915_vm_close(struct i915_address_space *vm) +{ + GEM_BUG_ON(!atomic_read(&vm->open)); + if (atomic_dec_and_test(&vm->open)) + __i915_vm_close(vm); + + i915_vm_put(vm); +} + +void i915_address_space_init(struct i915_address_space *vm, int subclass); +void i915_address_space_fini(struct i915_address_space *vm); + +static inline u32 i915_pte_index(u64 address, unsigned int pde_shift) +{ + const u32 mask = NUM_PTE(pde_shift) - 1; + + return (address >> PAGE_SHIFT) & mask; +} + +/* + * Helper to counts the number of PTEs within the given length. This count + * does not cross a page table boundary, so the max value would be + * GEN6_PTES for GEN6, and GEN8_PTES for GEN8. + */ +static inline u32 i915_pte_count(u64 addr, u64 length, unsigned int pde_shift) +{ + const u64 mask = ~((1ULL << pde_shift) - 1); + u64 end; + + GEM_BUG_ON(length == 0); + GEM_BUG_ON(offset_in_page(addr | length)); + + end = addr + length; + + if ((addr & mask) != (end & mask)) + return NUM_PTE(pde_shift) - i915_pte_index(addr, pde_shift); + + return i915_pte_index(end, pde_shift) - i915_pte_index(addr, pde_shift); +} + +static inline u32 i915_pde_index(u64 addr, u32 shift) +{ + return (addr >> shift) & I915_PDE_MASK; +} + +static inline struct i915_page_table * +i915_pt_entry(const struct i915_page_directory * const pd, + const unsigned short n) +{ + return pd->entry[n]; +} + +static inline struct i915_page_directory * +i915_pd_entry(const struct i915_page_directory * const pdp, + const unsigned short n) +{ + return pdp->entry[n]; +} + +static inline dma_addr_t +i915_page_dir_dma_addr(const struct i915_ppgtt *ppgtt, const unsigned int n) +{ + struct i915_page_dma *pt = ppgtt->pd->entry[n]; + + return px_dma(pt ?: px_base(&ppgtt->vm.scratch[ppgtt->vm.top])); +} + +void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt); + +int i915_ggtt_probe_hw(struct drm_i915_private *i915); +int i915_ggtt_init_hw(struct drm_i915_private *i915); +int i915_ggtt_enable_hw(struct drm_i915_private *i915); +void i915_ggtt_enable_guc(struct i915_ggtt *ggtt); +void i915_ggtt_disable_guc(struct i915_ggtt *ggtt); +int i915_init_ggtt(struct drm_i915_private *i915); +void i915_ggtt_driver_release(struct drm_i915_private *i915); + +static inline bool i915_ggtt_has_aperture(const struct i915_ggtt *ggtt) +{ + return ggtt->mappable_end > 0; +} + +int i915_ppgtt_init_hw(struct intel_gt *gt); + +struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt); + +void i915_gem_suspend_gtt_mappings(struct drm_i915_private *i915); +void i915_gem_restore_gtt_mappings(struct drm_i915_private *i915); + +u64 gen8_pte_encode(dma_addr_t addr, + enum i915_cache_level level, + u32 flags); + +int setup_page_dma(struct i915_address_space *vm, struct i915_page_dma *p); +void cleanup_page_dma(struct i915_address_space *vm, struct i915_page_dma *p); + +#define kmap_atomic_px(px) kmap_atomic(px_base(px)->page) + +void +fill_page_dma(const struct i915_page_dma *p, const u64 val, unsigned int count); + +#define fill_px(px, v) fill_page_dma(px_base(px), (v), PAGE_SIZE / sizeof(u64)) +#define fill32_px(px, v) do { \ + u64 v__ = lower_32_bits(v); \ + fill_px((px), v__ << 32 | v__); \ +} while (0) + +int setup_scratch_page(struct i915_address_space *vm, gfp_t gfp); +void cleanup_scratch_page(struct i915_address_space *vm); +void free_scratch(struct i915_address_space *vm); + +struct i915_page_table *alloc_pt(struct i915_address_space *vm); +struct i915_page_directory *alloc_pd(struct i915_address_space *vm); +struct i915_page_directory *__alloc_pd(size_t sz); + +void free_pd(struct i915_address_space *vm, struct i915_page_dma *pd); + +#define free_px(vm, px) free_pd(vm, px_base(px)) + +void +__set_pd_entry(struct i915_page_directory * const pd, + const unsigned short idx, + struct i915_page_dma * const to, + u64 (*encode)(const dma_addr_t, const enum i915_cache_level)); + +#define set_pd_entry(pd, idx, to) \ + __set_pd_entry((pd), (idx), px_base(to), gen8_pde_encode) + +void +clear_pd_entry(struct i915_page_directory * const pd, + const unsigned short idx, + const struct i915_page_scratch * const scratch); + +bool +release_pd_entry(struct i915_page_directory * const pd, + const unsigned short idx, + struct i915_page_table * const pt, + const struct i915_page_scratch * const scratch); +void gen6_ggtt_invalidate(struct i915_ggtt *ggtt); + +int ggtt_set_pages(struct i915_vma *vma); +int ppgtt_set_pages(struct i915_vma *vma); +void clear_pages(struct i915_vma *vma); + +void gtt_write_workarounds(struct intel_gt *gt); + +void setup_private_pat(struct intel_uncore *uncore); + +static inline struct sgt_dma { + struct scatterlist *sg; + dma_addr_t dma, max; +} sgt_dma(struct i915_vma *vma) { + struct scatterlist *sg = vma->pages->sgl; + dma_addr_t addr = sg_dma_address(sg); + + return (struct sgt_dma){ sg, addr, addr + sg->length }; +} + +#endif diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 4fb70a7716e3..9e430590fb3a 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -488,17 +488,23 @@ lrc_descriptor(struct intel_context *ce, struct intel_engine_cs *engine) return desc; } -static u32 *set_offsets(u32 *regs, +static inline unsigned int dword_in_page(void *addr) +{ + return offset_in_page(addr) / sizeof(u32); +} + +static void set_offsets(u32 *regs, const u8 *data, - const struct intel_engine_cs *engine) + const struct intel_engine_cs *engine, + bool clear) #define NOP(x) (BIT(7) | (x)) -#define LRI(count, flags) ((flags) << 6 | (count)) +#define LRI(count, flags) ((flags) << 6 | (count) | BUILD_BUG_ON_ZERO(count >= BIT(6))) #define POSTED BIT(0) #define REG(x) (((x) >> 2) | BUILD_BUG_ON_ZERO(x >= 0x200)) #define REG16(x) \ (((x) >> 9) | BIT(7) | BUILD_BUG_ON_ZERO(x >= 0x10000)), \ (((x) >> 2) & 0x7f) -#define END() 0 +#define END(x) 0, (x) { const u32 base = engine->mmio_base; @@ -506,7 +512,10 @@ static u32 *set_offsets(u32 *regs, u8 count, flags; if (*data & BIT(7)) { /* skip */ - regs += *data++ & ~BIT(7); + count = *data++ & ~BIT(7); + if (clear) + memset32(regs, MI_NOOP, count); + regs += count; continue; } @@ -532,12 +541,25 @@ static u32 *set_offsets(u32 *regs, offset |= v & ~BIT(7); } while (v & BIT(7)); - *regs = base + (offset << 2); + regs[0] = base + (offset << 2); + if (clear) + regs[1] = 0; regs += 2; } while (--count); } - return regs; + if (clear) { + u8 count = *++data; + + /* Clear past the tail for HW access */ + GEM_BUG_ON(dword_in_page(regs) > count); + memset32(regs, MI_NOOP, count - dword_in_page(regs)); + + /* Close the batch; used mainly by live_lrc_layout() */ + *regs = MI_BATCH_BUFFER_END; + if (INTEL_GEN(engine->i915) >= 10) + *regs |= BIT(0); + } } static const u8 gen8_xcs_offsets[] = { @@ -572,7 +594,7 @@ static const u8 gen8_xcs_offsets[] = { REG16(0x200), REG(0x028), - END(), + END(80) }; static const u8 gen9_xcs_offsets[] = { @@ -656,7 +678,7 @@ static const u8 gen9_xcs_offsets[] = { REG16(0x67c), REG(0x068), - END(), + END(176) }; static const u8 gen12_xcs_offsets[] = { @@ -688,7 +710,7 @@ static const u8 gen12_xcs_offsets[] = { REG16(0x274), REG16(0x270), - END(), + END(80) }; static const u8 gen8_rcs_offsets[] = { @@ -725,7 +747,91 @@ static const u8 gen8_rcs_offsets[] = { LRI(1, 0), REG(0x0c8), - END(), + END(80) +}; + +static const u8 gen9_rcs_offsets[] = { + NOP(1), + LRI(14, POSTED), + REG16(0x244), + REG(0x34), + REG(0x30), + REG(0x38), + REG(0x3c), + REG(0x168), + REG(0x140), + REG(0x110), + REG(0x11c), + REG(0x114), + REG(0x118), + REG(0x1c0), + REG(0x1c4), + REG(0x1c8), + + NOP(3), + LRI(9, POSTED), + REG16(0x3a8), + REG16(0x28c), + REG16(0x288), + REG16(0x284), + REG16(0x280), + REG16(0x27c), + REG16(0x278), + REG16(0x274), + REG16(0x270), + + NOP(13), + LRI(1, 0), + REG(0xc8), + + NOP(13), + LRI(44, POSTED), + REG(0x28), + REG(0x9c), + REG(0xc0), + REG(0x178), + REG(0x17c), + REG16(0x358), + REG(0x170), + REG(0x150), + REG(0x154), + REG(0x158), + REG16(0x41c), + REG16(0x600), + REG16(0x604), + REG16(0x608), + REG16(0x60c), + REG16(0x610), + REG16(0x614), + REG16(0x618), + REG16(0x61c), + REG16(0x620), + REG16(0x624), + REG16(0x628), + REG16(0x62c), + REG16(0x630), + REG16(0x634), + REG16(0x638), + REG16(0x63c), + REG16(0x640), + REG16(0x644), + REG16(0x648), + REG16(0x64c), + REG16(0x650), + REG16(0x654), + REG16(0x658), + REG16(0x65c), + REG16(0x660), + REG16(0x664), + REG16(0x668), + REG16(0x66c), + REG16(0x670), + REG16(0x674), + REG16(0x678), + REG16(0x67c), + REG(0x68), + + END(176) }; static const u8 gen11_rcs_offsets[] = { @@ -766,7 +872,7 @@ static const u8 gen11_rcs_offsets[] = { LRI(1, 0), REG(0x0c8), - END(), + END(80) }; static const u8 gen12_rcs_offsets[] = { @@ -807,7 +913,7 @@ static const u8 gen12_rcs_offsets[] = { LRI(1, 0), REG(0x0c8), - END(), + END(80) }; #undef END @@ -832,6 +938,8 @@ static const u8 *reg_offsets(const struct intel_engine_cs *engine) return gen12_rcs_offsets; else if (INTEL_GEN(engine->i915) >= 11) return gen11_rcs_offsets; + else if (INTEL_GEN(engine->i915) >= 9) + return gen9_rcs_offsets; else return gen8_rcs_offsets; } else { @@ -1108,7 +1216,7 @@ __execlists_schedule_in(struct i915_request *rq) /* We don't need a strict matching tag, just different values */ ce->lrc_desc &= ~GENMASK_ULL(47, 37); ce->lrc_desc |= - (u64)(engine->context_tag++ % NUM_CONTEXT_TAG) << + (u64)(++engine->context_tag % NUM_CONTEXT_TAG) << GEN11_SW_CTX_ID_SHIFT; BUILD_BUG_ON(NUM_CONTEXT_TAG > GEN12_MAX_CONTEXT_HW_ID); } @@ -1243,10 +1351,6 @@ static u64 execlists_update_context(struct i915_request *rq) */ wmb(); - /* Wa_1607138340:tgl */ - if (IS_TGL_REVID(rq->i915, TGL_REVID_A0, TGL_REVID_A0)) - desc |= CTX_DESC_FORCE_RESTORE; - ce->lrc_desc &= ~CTX_DESC_FORCE_RESTORE; return desc; } @@ -1430,8 +1534,8 @@ static bool can_merge_rq(const struct i915_request *prev, if (i915_request_completed(next)) return true; - if (unlikely((prev->flags ^ next->flags) & - (I915_REQUEST_NOPREEMPT | I915_REQUEST_SENTINEL))) + if (unlikely((prev->fence.flags ^ next->fence.flags) & + (I915_FENCE_FLAG_NOPREEMPT | I915_FENCE_FLAG_SENTINEL))) return false; if (!can_merge_ctx(prev->context, next->context)) @@ -1443,7 +1547,7 @@ static bool can_merge_rq(const struct i915_request *prev, static void virtual_update_register_offsets(u32 *regs, struct intel_engine_cs *engine) { - set_offsets(regs, reg_offsets(engine), engine); + set_offsets(regs, reg_offsets(engine), engine, false); } static bool virtual_matches(const struct virtual_engine *ve, @@ -1590,7 +1694,7 @@ active_timeslice(const struct intel_engine_cs *engine) { const struct i915_request *rq = *engine->execlists.active; - if (i915_request_completed(rq)) + if (!rq || i915_request_completed(rq)) return 0; if (engine->execlists.switch_priority_hint < effective_prio(rq)) @@ -1636,6 +1740,11 @@ static void set_preempt_timeout(struct intel_engine_cs *engine) active_preempt_timeout(engine)); } +static inline void clear_ports(struct i915_request **ports, int count) +{ + memset_p((void **)ports, NULL, count); +} + static void execlists_dequeue(struct intel_engine_cs *engine) { struct intel_engine_execlists * const execlists = &engine->execlists; @@ -1996,10 +2105,9 @@ done: goto skip_submit; } + clear_ports(port + 1, last_port - port); - memset(port + 1, 0, (last_port - port) * sizeof(*port)); execlists_submit_ports(engine); - set_preempt_timeout(engine); } else { skip_submit: @@ -2014,13 +2122,14 @@ cancel_port_requests(struct intel_engine_execlists * const execlists) for (port = execlists->pending; *port; port++) execlists_schedule_out(*port); - memset(execlists->pending, 0, sizeof(execlists->pending)); + clear_ports(execlists->pending, ARRAY_SIZE(execlists->pending)); /* Mark the end of active before we overwrite *active */ for (port = xchg(&execlists->active, execlists->pending); *port; port++) execlists_schedule_out(*port); - WRITE_ONCE(execlists->active, - memset(execlists->inflight, 0, sizeof(execlists->inflight))); + clear_ports(execlists->inflight, ARRAY_SIZE(execlists->inflight)); + + WRITE_ONCE(execlists->active, execlists->inflight); } static inline void @@ -2176,7 +2285,6 @@ static void process_csb(struct intel_engine_cs *engine) /* Point active to the new ELSP; prevent overwriting */ WRITE_ONCE(execlists->active, execlists->pending); - set_timeslice(engine); if (!inject_preempt_hang(execlists)) ring_set_paused(engine, 0); @@ -2217,6 +2325,7 @@ static void process_csb(struct intel_engine_cs *engine) } while (head != tail); execlists->csb_head = head; + set_timeslice(engine); /* * Gen11 has proven to fail wrt global observation point between @@ -2399,7 +2508,7 @@ set_redzone(void *vaddr, const struct intel_engine_cs *engine) vaddr += engine->context_size; - memset(vaddr, POISON_INUSE, I915_GTT_PAGE_SIZE); + memset(vaddr, CONTEXT_REDZONE, I915_GTT_PAGE_SIZE); } static void @@ -2410,7 +2519,7 @@ check_redzone(const void *vaddr, const struct intel_engine_cs *engine) vaddr += engine->context_size; - if (memchr_inv(vaddr, POISON_INUSE, I915_GTT_PAGE_SIZE)) + if (memchr_inv(vaddr, CONTEXT_REDZONE, I915_GTT_PAGE_SIZE)) dev_err_once(engine->i915->drm.dev, "%s context redzone overwritten!\n", engine->name); @@ -2453,33 +2562,21 @@ __execlists_context_pin(struct intel_context *ce, struct intel_engine_cs *engine) { void *vaddr; - int ret; GEM_BUG_ON(!ce->state); - - ret = intel_context_active_acquire(ce); - if (ret) - goto err; GEM_BUG_ON(!i915_vma_is_pinned(ce->state)); vaddr = i915_gem_object_pin_map(ce->state->obj, i915_coherent_map_type(engine->i915) | I915_MAP_OVERRIDE); - if (IS_ERR(vaddr)) { - ret = PTR_ERR(vaddr); - goto unpin_active; - } + if (IS_ERR(vaddr)) + return PTR_ERR(vaddr); - ce->lrc_desc = lrc_descriptor(ce, engine); + ce->lrc_desc = lrc_descriptor(ce, engine) | CTX_DESC_FORCE_RESTORE; ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE; __execlists_update_reg_state(ce, engine); return 0; - -unpin_active: - intel_context_active_release(ce); -err: - return ret; } static int execlists_context_pin(struct intel_context *ce) @@ -2494,6 +2591,9 @@ static int execlists_context_alloc(struct intel_context *ce) static void execlists_context_reset(struct intel_context *ce) { + CE_TRACE(ce, "reset\n"); + GEM_BUG_ON(!intel_context_is_pinned(ce)); + /* * Because we emit WA_TAIL_DWORDS there may be a disparity * between our bookkeeping in ce->ring->head and ce->ring->tail and @@ -2510,8 +2610,14 @@ static void execlists_context_reset(struct intel_context *ce) * So to avoid that we reset the context images upon resume. For * simplicity, we just zero everything out. */ - intel_ring_reset(ce->ring, 0); + intel_ring_reset(ce->ring, ce->ring->emit); + + /* Scrub away the garbage */ + execlists_init_reg_state(ce->lrc_reg_state, + ce, ce->engine, ce->ring, true); __execlists_update_reg_state(ce, ce->engine); + + ce->lrc_desc |= CTX_DESC_FORCE_RESTORE; } static const struct intel_context_ops execlists_context_ops = { @@ -2925,6 +3031,8 @@ static void enable_execlists(struct intel_engine_cs *engine) RING_HWS_PGA, i915_ggtt_offset(engine->status_page.vma)); ENGINE_POSTING_READ(engine, RING_HWS_PGA); + + engine->context_tag = 0; } static bool unexpected_starting_state(struct intel_engine_cs *engine) @@ -3030,10 +3138,8 @@ static void reset_csb_pointers(struct intel_engine_cs *engine) &execlists->csb_status[reset_value]); } -static void __execlists_reset_reg_state(const struct intel_context *ce, - const struct intel_engine_cs *engine) +static void __reset_stop_ring(u32 *regs, const struct intel_engine_cs *engine) { - u32 *regs = ce->lrc_reg_state; int x; x = lrc_ring_mi_mode(engine); @@ -3043,6 +3149,14 @@ static void __execlists_reset_reg_state(const struct intel_context *ce, } } +static void __execlists_reset_reg_state(const struct intel_context *ce, + const struct intel_engine_cs *engine) +{ + u32 *regs = ce->lrc_reg_state; + + __reset_stop_ring(regs, engine); +} + static void __execlists_reset(struct intel_engine_cs *engine, bool stalled) { struct intel_engine_execlists * const execlists = &engine->execlists; @@ -3795,7 +3909,6 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine) { /* Default vfuncs which can be overriden by each engine. */ - engine->release = execlists_release; engine->resume = execlists_resume; engine->cops = &execlists_context_ops; @@ -3910,6 +4023,9 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine) reset_csb_pointers(engine); + /* Finally, take ownership and responsibility for cleanup! */ + engine->release = execlists_release; + return 0; } @@ -3949,18 +4065,21 @@ static u32 intel_lr_indirect_ctx_offset(const struct intel_engine_cs *engine) static void init_common_reg_state(u32 * const regs, const struct intel_engine_cs *engine, - const struct intel_ring *ring) + const struct intel_ring *ring, + bool inhibit) { - regs[CTX_CONTEXT_CONTROL] = - _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT) | - _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH); + u32 ctl; + + ctl = _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH); + ctl |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT); + if (inhibit) + ctl |= CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT; if (INTEL_GEN(engine->i915) < 11) - regs[CTX_CONTEXT_CONTROL] |= - _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT | - CTX_CTRL_RS_CTX_ENABLE); + ctl |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT | + CTX_CTRL_RS_CTX_ENABLE); + regs[CTX_CONTEXT_CONTROL] = ctl; regs[CTX_RING_CTL] = RING_CTL_SIZE(ring->size) | RING_VALID; - regs[CTX_BB_STATE] = RING_BB_PPGTT; } static void init_wa_bb_reg_state(u32 * const regs, @@ -4016,7 +4135,7 @@ static void execlists_init_reg_state(u32 *regs, const struct intel_context *ce, const struct intel_engine_cs *engine, const struct intel_ring *ring, - bool close) + bool inhibit) { /* * A context is actually a big batch buffer with several @@ -4028,21 +4147,17 @@ static void execlists_init_reg_state(u32 *regs, * * Must keep consistent with virtual_update_register_offsets(). */ - u32 *bbe = set_offsets(regs, reg_offsets(engine), engine); - - if (close) { /* Close the batch; used mainly by live_lrc_layout() */ - *bbe = MI_BATCH_BUFFER_END; - if (INTEL_GEN(engine->i915) >= 10) - *bbe |= BIT(0); - } + set_offsets(regs, reg_offsets(engine), engine, inhibit); - init_common_reg_state(regs, engine, ring); + init_common_reg_state(regs, engine, ring, inhibit); init_ppgtt_reg_state(regs, vm_alias(ce->vm)); init_wa_bb_reg_state(regs, engine, INTEL_GEN(engine->i915) >= 12 ? GEN12_CTX_BB_PER_CTX_PTR : CTX_BB_PER_CTX_PTR); + + __reset_stop_ring(regs, engine); } static int @@ -4053,7 +4168,6 @@ populate_lr_context(struct intel_context *ce, { bool inhibit = true; void *vaddr; - u32 *regs; int ret; vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB); @@ -4083,11 +4197,8 @@ populate_lr_context(struct intel_context *ce, /* The second page of the context object contains some fields which must * be set up prior to the first execution. */ - regs = vaddr + LRC_STATE_PN * PAGE_SIZE; - execlists_init_reg_state(regs, ce, engine, ring, inhibit); - if (inhibit) - regs[CTX_CONTEXT_CONTROL] |= - _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT); + execlists_init_reg_state(vaddr + LRC_STATE_PN * PAGE_SIZE, + ce, engine, ring, inhibit); ret = 0; err_unpin_ctx: @@ -4481,9 +4592,11 @@ intel_execlists_create_virtual(struct intel_engine_cs **siblings, ve->base.gt = siblings[0]->gt; ve->base.uncore = siblings[0]->uncore; ve->base.id = -1; + ve->base.class = OTHER_CLASS; ve->base.uabi_class = I915_ENGINE_CLASS_INVALID; ve->base.instance = I915_ENGINE_CLASS_INVALID_VIRTUAL; + ve->base.uabi_instance = I915_ENGINE_CLASS_INVALID_VIRTUAL; /* * The decision on whether to submit a request using semaphores diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c index 893249ea48d4..eeef90b55c64 100644 --- a/drivers/gpu/drm/i915/gt/intel_mocs.c +++ b/drivers/gpu/drm/i915/gt/intel_mocs.c @@ -127,7 +127,7 @@ struct drm_i915_mocs_table { LE_0_PAGETABLE | LE_TC_2_LLC_ELLC | LE_LRUM(3), \ L3_3_WB) -static const struct drm_i915_mocs_entry skylake_mocs_table[] = { +static const struct drm_i915_mocs_entry skl_mocs_table[] = { GEN9_MOCS_ENTRIES, MOCS_ENTRY(I915_MOCS_CACHED, LE_3_WB | LE_TC_2_LLC_ELLC | LE_LRUM(3), @@ -233,7 +233,7 @@ static const struct drm_i915_mocs_entry broxton_mocs_table[] = { LE_3_WB | LE_TC_1_LLC | LE_LRUM(3), \ L3_1_UC) -static const struct drm_i915_mocs_entry tigerlake_mocs_table[] = { +static const struct drm_i915_mocs_entry tgl_mocs_table[] = { /* Base - Error (Reserved for Non-Use) */ MOCS_ENTRY(0, 0x0, 0x0), /* Base - Reserved */ @@ -267,7 +267,7 @@ static const struct drm_i915_mocs_entry tigerlake_mocs_table[] = { L3_3_WB), }; -static const struct drm_i915_mocs_entry icelake_mocs_table[] = { +static const struct drm_i915_mocs_entry icl_mocs_table[] = { /* Base - Uncached (Deprecated) */ MOCS_ENTRY(I915_MOCS_UNCACHED, LE_1_UC | LE_TC_1_LLC, @@ -284,17 +284,17 @@ static bool get_mocs_settings(const struct drm_i915_private *i915, struct drm_i915_mocs_table *table) { if (INTEL_GEN(i915) >= 12) { - table->size = ARRAY_SIZE(tigerlake_mocs_table); - table->table = tigerlake_mocs_table; + table->size = ARRAY_SIZE(tgl_mocs_table); + table->table = tgl_mocs_table; table->n_entries = GEN11_NUM_MOCS_ENTRIES; } else if (IS_GEN(i915, 11)) { - table->size = ARRAY_SIZE(icelake_mocs_table); - table->table = icelake_mocs_table; + table->size = ARRAY_SIZE(icl_mocs_table); + table->table = icl_mocs_table; table->n_entries = GEN11_NUM_MOCS_ENTRIES; } else if (IS_GEN9_BC(i915) || IS_CANNONLAKE(i915)) { - table->size = ARRAY_SIZE(skylake_mocs_table); + table->size = ARRAY_SIZE(skl_mocs_table); table->n_entries = GEN9_NUM_MOCS_ENTRIES; - table->table = skylake_mocs_table; + table->table = skl_mocs_table; } else if (IS_GEN9_LP(i915)) { table->size = ARRAY_SIZE(broxton_mocs_table); table->n_entries = GEN9_NUM_MOCS_ENTRIES; diff --git a/drivers/gpu/drm/i915/gt/intel_ppgtt.c b/drivers/gpu/drm/i915/gt/intel_ppgtt.c new file mode 100644 index 000000000000..f86f7e68ce5e --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_ppgtt.c @@ -0,0 +1,218 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2020 Intel Corporation + */ + +#include <linux/slab.h> + +#include "i915_trace.h" +#include "intel_gtt.h" +#include "gen6_ppgtt.h" +#include "gen8_ppgtt.h" + +struct i915_page_table *alloc_pt(struct i915_address_space *vm) +{ + struct i915_page_table *pt; + + pt = kmalloc(sizeof(*pt), I915_GFP_ALLOW_FAIL); + if (unlikely(!pt)) + return ERR_PTR(-ENOMEM); + + if (unlikely(setup_page_dma(vm, &pt->base))) { + kfree(pt); + return ERR_PTR(-ENOMEM); + } + + atomic_set(&pt->used, 0); + return pt; +} + +struct i915_page_directory *__alloc_pd(size_t sz) +{ + struct i915_page_directory *pd; + + pd = kzalloc(sz, I915_GFP_ALLOW_FAIL); + if (unlikely(!pd)) + return NULL; + + spin_lock_init(&pd->lock); + return pd; +} + +struct i915_page_directory *alloc_pd(struct i915_address_space *vm) +{ + struct i915_page_directory *pd; + + pd = __alloc_pd(sizeof(*pd)); + if (unlikely(!pd)) + return ERR_PTR(-ENOMEM); + + if (unlikely(setup_page_dma(vm, px_base(pd)))) { + kfree(pd); + return ERR_PTR(-ENOMEM); + } + + return pd; +} + +void free_pd(struct i915_address_space *vm, struct i915_page_dma *pd) +{ + cleanup_page_dma(vm, pd); + kfree(pd); +} + +static inline void +write_dma_entry(struct i915_page_dma * const pdma, + const unsigned short idx, + const u64 encoded_entry) +{ + u64 * const vaddr = kmap_atomic(pdma->page); + + vaddr[idx] = encoded_entry; + kunmap_atomic(vaddr); +} + +void +__set_pd_entry(struct i915_page_directory * const pd, + const unsigned short idx, + struct i915_page_dma * const to, + u64 (*encode)(const dma_addr_t, const enum i915_cache_level)) +{ + /* Each thread pre-pins the pd, and we may have a thread per pde. */ + GEM_BUG_ON(atomic_read(px_used(pd)) > NALLOC * ARRAY_SIZE(pd->entry)); + + atomic_inc(px_used(pd)); + pd->entry[idx] = to; + write_dma_entry(px_base(pd), idx, encode(to->daddr, I915_CACHE_LLC)); +} + +void +clear_pd_entry(struct i915_page_directory * const pd, + const unsigned short idx, + const struct i915_page_scratch * const scratch) +{ + GEM_BUG_ON(atomic_read(px_used(pd)) == 0); + + write_dma_entry(px_base(pd), idx, scratch->encode); + pd->entry[idx] = NULL; + atomic_dec(px_used(pd)); +} + +bool +release_pd_entry(struct i915_page_directory * const pd, + const unsigned short idx, + struct i915_page_table * const pt, + const struct i915_page_scratch * const scratch) +{ + bool free = false; + + if (atomic_add_unless(&pt->used, -1, 1)) + return false; + + spin_lock(&pd->lock); + if (atomic_dec_and_test(&pt->used)) { + clear_pd_entry(pd, idx, scratch); + free = true; + } + spin_unlock(&pd->lock); + + return free; +} + +int i915_ppgtt_init_hw(struct intel_gt *gt) +{ + struct drm_i915_private *i915 = gt->i915; + + gtt_write_workarounds(gt); + + if (IS_GEN(i915, 6)) + gen6_ppgtt_enable(gt); + else if (IS_GEN(i915, 7)) + gen7_ppgtt_enable(gt); + + return 0; +} + +static struct i915_ppgtt * +__ppgtt_create(struct intel_gt *gt) +{ + if (INTEL_GEN(gt->i915) < 8) + return gen6_ppgtt_create(gt); + else + return gen8_ppgtt_create(gt); +} + +struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt) +{ + struct i915_ppgtt *ppgtt; + + ppgtt = __ppgtt_create(gt); + if (IS_ERR(ppgtt)) + return ppgtt; + + trace_i915_ppgtt_create(&ppgtt->vm); + + return ppgtt; +} + +static int ppgtt_bind_vma(struct i915_vma *vma, + enum i915_cache_level cache_level, + u32 flags) +{ + u32 pte_flags; + int err; + + if (flags & I915_VMA_ALLOC) { + err = vma->vm->allocate_va_range(vma->vm, + vma->node.start, vma->size); + if (err) + return err; + + set_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma)); + } + + /* Applicable to VLV, and gen8+ */ + pte_flags = 0; + if (i915_gem_object_is_readonly(vma->obj)) + pte_flags |= PTE_READ_ONLY; + + GEM_BUG_ON(!test_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma))); + vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags); + wmb(); + + return 0; +} + +static void ppgtt_unbind_vma(struct i915_vma *vma) +{ + if (test_and_clear_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma))) + vma->vm->clear_range(vma->vm, vma->node.start, vma->size); +} + +int ppgtt_set_pages(struct i915_vma *vma) +{ + GEM_BUG_ON(vma->pages); + + vma->pages = vma->obj->mm.pages; + + vma->page_sizes = vma->obj->mm.page_sizes; + + return 0; +} + +void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt) +{ + struct drm_i915_private *i915 = gt->i915; + + ppgtt->vm.gt = gt; + ppgtt->vm.i915 = i915; + ppgtt->vm.dma = &i915->drm.pdev->dev; + ppgtt->vm.total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size); + + i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT); + + ppgtt->vm.vma_ops.bind_vma = ppgtt_bind_vma; + ppgtt->vm.vma_ops.unbind_vma = ppgtt_unbind_vma; + ppgtt->vm.vma_ops.set_pages = ppgtt_set_pages; + ppgtt->vm.vma_ops.clear_pages = clear_pages; +} diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index 1c51296646e0..beee0cf89bce 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -147,11 +147,7 @@ static void mark_innocent(struct i915_request *rq) void __i915_request_reset(struct i915_request *rq, bool guilty) { - GEM_TRACE("%s rq=%llx:%lld, guilty? %s\n", - rq->engine->name, - rq->fence.context, - rq->fence.seqno, - yesno(guilty)); + RQ_TRACE(rq, "guilty? %s\n", yesno(guilty)); GEM_BUG_ON(i915_request_completed(rq)); @@ -251,9 +247,8 @@ out: return ret; } -static int ironlake_do_reset(struct intel_gt *gt, - intel_engine_mask_t engine_mask, - unsigned int retry) +static int ilk_do_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask, + unsigned int retry) { struct intel_uncore *uncore = gt->uncore; int ret; @@ -597,7 +592,7 @@ static reset_func intel_get_gpu_reset(const struct intel_gt *gt) else if (INTEL_GEN(i915) >= 6) return gen6_reset_engines; else if (INTEL_GEN(i915) >= 5) - return ironlake_do_reset; + return ilk_do_reset; else if (IS_G4X(i915)) return g4x_do_reset; else if (IS_G33(i915) || IS_PINEVIEW(i915)) @@ -625,7 +620,7 @@ int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask) */ intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL); for (retry = 0; ret == -ETIMEDOUT && retry < retries; retry++) { - GEM_TRACE("engine_mask=%x\n", engine_mask); + GT_TRACE(gt, "engine_mask=%x\n", engine_mask); preempt_disable(); ret = reset(gt, engine_mask, retry); preempt_enable(); @@ -785,8 +780,7 @@ static void nop_submit_request(struct i915_request *request) struct intel_engine_cs *engine = request->engine; unsigned long flags; - GEM_TRACE("%s fence %llx:%lld -> -EIO\n", - engine->name, request->fence.context, request->fence.seqno); + RQ_TRACE(request, "-EIO\n"); dma_fence_set_error(&request->fence, -EIO); spin_lock_irqsave(&engine->active.lock, flags); @@ -813,7 +807,7 @@ static void __intel_gt_set_wedged(struct intel_gt *gt) intel_engine_dump(engine, &p, "%s\n", engine->name); } - GEM_TRACE("start\n"); + GT_TRACE(gt, "start\n"); /* * First, stop submission to hw, but do not yet complete requests by @@ -844,7 +838,7 @@ static void __intel_gt_set_wedged(struct intel_gt *gt) reset_finish(gt, awake); - GEM_TRACE("end\n"); + GT_TRACE(gt, "end\n"); } void intel_gt_set_wedged(struct intel_gt *gt) @@ -870,7 +864,7 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt) if (test_bit(I915_WEDGED_ON_INIT, >->reset.flags)) return false; - GEM_TRACE("start\n"); + GT_TRACE(gt, "start\n"); /* * Before unwedging, make sure that all pending operations @@ -932,7 +926,7 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt) */ intel_engines_reset_default_submission(gt); - GEM_TRACE("end\n"); + GT_TRACE(gt, "end\n"); smp_mb__before_atomic(); /* complete takeover before enabling execbuf */ clear_bit(I915_WEDGED, >->reset.flags); @@ -1007,7 +1001,7 @@ void intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t awake; int ret; - GEM_TRACE("flags=%lx\n", gt->reset.flags); + GT_TRACE(gt, "flags=%lx\n", gt->reset.flags); might_sleep(); GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, >->reset.flags)); @@ -1236,7 +1230,7 @@ void intel_gt_handle_error(struct intel_gt *gt, engine_mask &= INTEL_INFO(gt->i915)->engine_mask; if (flags & I915_ERROR_CAPTURE) { - i915_capture_error_state(gt->i915, engine_mask, msg); + i915_capture_error_state(gt->i915); intel_gt_clear_error_registers(gt, engine_mask); } @@ -1329,10 +1323,10 @@ int intel_gt_terminally_wedged(struct intel_gt *gt) if (!intel_gt_is_wedged(gt)) return 0; - /* Reset still in progress? Maybe we will recover? */ - if (!test_bit(I915_RESET_BACKOFF, >->reset.flags)) + if (intel_gt_has_init_error(gt)) return -EIO; + /* Reset still in progress? Maybe we will recover? */ if (wait_event_interruptible(gt->reset.queue, !test_bit(I915_RESET_BACKOFF, >->reset.flags))) @@ -1354,6 +1348,9 @@ void intel_gt_init_reset(struct intel_gt *gt) init_waitqueue_head(>->reset.queue); mutex_init(>->reset.mutex); init_srcu_struct(>->reset.backoff_srcu); + + /* no GPU until we are ready! */ + __set_bit(I915_WEDGED, >->reset.flags); } void intel_gt_fini_reset(struct intel_gt *gt) diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c index 81f872f9ef03..bc44fe8e5ffa 100644 --- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c +++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c @@ -33,6 +33,7 @@ #include "gem/i915_gem_context.h" +#include "gen6_ppgtt.h" #include "i915_drv.h" #include "i915_trace.h" #include "intel_context.h" @@ -1328,26 +1329,12 @@ static int ring_context_alloc(struct intel_context *ce) static int ring_context_pin(struct intel_context *ce) { - int err; - - err = intel_context_active_acquire(ce); - if (err) - return err; - - err = __context_pin_ppgtt(ce); - if (err) - goto err_active; - - return 0; - -err_active: - intel_context_active_release(ce); - return err; + return __context_pin_ppgtt(ce); } static void ring_context_reset(struct intel_context *ce) { - intel_ring_reset(ce->ring, 0); + intel_ring_reset(ce->ring, ce->ring->emit); } static const struct intel_context_ops ring_context_ops = { @@ -1394,7 +1381,7 @@ static int load_pd_dir(struct i915_request *rq, intel_ring_advance(rq, cs); - return 0; + return rq->engine->emit_flush(rq, EMIT_FLUSH); } static inline int mi_set_context(struct i915_request *rq, u32 flags) @@ -1408,14 +1395,6 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags) int len; u32 *cs; - flags |= MI_MM_SPACE_GTT; - if (IS_HASWELL(i915)) - /* These flags are for resource streamer on HSW+ */ - flags |= HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN; - else - /* We need to save the extended state for powersaving modes */ - flags |= MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN; - len = 4; if (IS_GEN(i915, 7)) len += 2 + (num_engines ? 4 * num_engines + 6 : 0); @@ -1592,7 +1571,7 @@ static int switch_mm(struct i915_request *rq, struct i915_address_space *vm) if (ret) return ret; - return rq->engine->emit_flush(rq, EMIT_FLUSH); + return rq->engine->emit_flush(rq, EMIT_INVALIDATE); } static int switch_context(struct i915_request *rq) @@ -1607,15 +1586,21 @@ static int switch_context(struct i915_request *rq) return ret; if (ce->state) { - u32 hw_flags; + u32 flags; GEM_BUG_ON(rq->engine->id != RCS0); - hw_flags = 0; - if (!test_bit(CONTEXT_VALID_BIT, &ce->flags)) - hw_flags = MI_RESTORE_INHIBIT; + /* For resource streamer on HSW+ and power context elsewhere */ + BUILD_BUG_ON(HSW_MI_RS_SAVE_STATE_EN != MI_SAVE_EXT_STATE_EN); + BUILD_BUG_ON(HSW_MI_RS_RESTORE_STATE_EN != MI_RESTORE_EXT_STATE_EN); + + flags = MI_SAVE_EXT_STATE_EN | MI_MM_SPACE_GTT; + if (test_bit(CONTEXT_VALID_BIT, &ce->flags)) + flags |= MI_RESTORE_EXT_STATE_EN; + else + flags |= MI_RESTORE_INHIBIT; - ret = mi_set_context(rq, hw_flags); + ret = mi_set_context(rq, flags); if (ret) return ret; } @@ -1842,8 +1827,6 @@ static void setup_common(struct intel_engine_cs *engine) setup_irq(engine); - engine->release = ring_release; - engine->resume = xcs_resume; engine->reset.prepare = reset_prepare; engine->reset.rewind = reset_rewind; @@ -2009,6 +1992,9 @@ int intel_ring_submission_setup(struct intel_engine_cs *engine) GEM_BUG_ON(timeline->hwsp_ggtt != engine->status_page.vma); + /* Finally, take ownership and responsibility for cleanup! */ + engine->release = ring_release; + return 0; err_ring: diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c index f232036c3c7a..d2a3d935d186 100644 --- a/drivers/gpu/drm/i915/gt/intel_rps.c +++ b/drivers/gpu/drm/i915/gt/intel_rps.c @@ -777,7 +777,7 @@ void intel_rps_boost(struct i915_request *rq) spin_lock_irqsave(&rq->lock, flags); if (!i915_request_has_waitboost(rq) && !dma_fence_is_signaled_locked(&rq->fence)) { - rq->flags |= I915_REQUEST_WAITBOOST; + set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags); if (!atomic_fetch_inc(&rps->num_waiters) && READ_ONCE(rps->cur_freq) < rps->boost_freq) diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c index ee5dc4fbdeb9..87716529cd2f 100644 --- a/drivers/gpu/drm/i915/gt/intel_timeline.c +++ b/drivers/gpu/drm/i915/gt/intel_timeline.c @@ -348,7 +348,6 @@ void intel_timeline_enter(struct intel_timeline *tl) * use atomic to manipulate tl->active_count. */ lockdep_assert_held(&tl->mutex); - GEM_BUG_ON(!atomic_read(&tl->pin_count)); if (atomic_add_unless(&tl->active_count, 1, 0)) return; diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index 195ccf7db272..4e292d4bf7b9 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -254,7 +254,7 @@ static void bdw_ctx_workarounds_init(struct intel_engine_cs *engine, /* WaDisableDopClockGating:bdw * - * Also see the related UCGTCL1 write in broadwell_init_clock_gating() + * Also see the related UCGTCL1 write in bdw_init_clock_gating() * to disable EUTC clock gating. */ WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2, diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c index 4e1eafa94be9..a560b7eee2cd 100644 --- a/drivers/gpu/drm/i915/gt/mock_engine.c +++ b/drivers/gpu/drm/i915/gt/mock_engine.c @@ -149,7 +149,11 @@ static int mock_context_alloc(struct intel_context *ce) static int mock_context_pin(struct intel_context *ce) { - return intel_context_active_acquire(ce); + return 0; +} + +static void mock_context_reset(struct intel_context *ce) +{ } static const struct intel_context_ops mock_context_ops = { @@ -161,6 +165,7 @@ static const struct intel_context_ops mock_context_ops = { .enter = intel_context_enter_engine, .exit = intel_context_exit_engine, + .reset = mock_context_reset, .destroy = mock_context_destroy, }; diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c index 5dbda2a74272..3e5e6c86e843 100644 --- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c +++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c @@ -1312,7 +1312,7 @@ static int igt_reset_evict_ppgtt(void *arg) if (INTEL_PPGTT(gt->i915) < INTEL_PPGTT_FULL) return 0; - ppgtt = i915_ppgtt_create(gt->i915); + ppgtt = i915_ppgtt_create(gt); if (IS_ERR(ppgtt)) return PTR_ERR(ppgtt); @@ -1498,7 +1498,7 @@ static int igt_handle_error(void *arg) struct intel_engine_cs *engine = gt->engine[RCS0]; struct hang h; struct i915_request *rq; - struct i915_gpu_state *error; + struct i915_gpu_coredump *error; int err; /* Check that we can issue a global GPU and engine reset */ diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c index 9ec9833c9c7b..15cda024e3e4 100644 --- a/drivers/gpu/drm/i915/gt/selftest_lrc.c +++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c @@ -527,13 +527,19 @@ static struct i915_request *nop_request(struct intel_engine_cs *engine) return rq; } -static void wait_for_submit(struct intel_engine_cs *engine, - struct i915_request *rq) +static int wait_for_submit(struct intel_engine_cs *engine, + struct i915_request *rq, + unsigned long timeout) { + timeout += jiffies; do { cond_resched(); intel_engine_flush_submission(engine); - } while (!i915_request_is_active(rq)); + if (i915_request_is_active(rq)) + return 0; + } while (time_before(jiffies, timeout)); + + return -ETIME; } static long timeslice_threshold(const struct intel_engine_cs *engine) @@ -601,7 +607,12 @@ static int live_timeslice_queue(void *arg) goto err_heartbeat; } engine->schedule(rq, &attr); - wait_for_submit(engine, rq); + err = wait_for_submit(engine, rq, HZ / 2); + if (err) { + pr_err("%s: Timed out trying to submit semaphores\n", + engine->name); + goto err_rq; + } /* ELSP[1]: nop request */ nop = nop_request(engine); @@ -609,8 +620,13 @@ static int live_timeslice_queue(void *arg) err = PTR_ERR(nop); goto err_rq; } - wait_for_submit(engine, nop); + err = wait_for_submit(engine, nop, HZ / 2); i915_request_put(nop); + if (err) { + pr_err("%s: Timed out trying to submit nop\n", + engine->name); + goto err_rq; + } GEM_BUG_ON(i915_request_completed(rq)); GEM_BUG_ON(execlists_active(&engine->execlists) != rq); @@ -1137,7 +1153,7 @@ static int live_nopreempt(void *arg) } /* Low priority client, but unpreemptable! */ - rq_a->flags |= I915_REQUEST_NOPREEMPT; + __set_bit(I915_FENCE_FLAG_NOPREEMPT, &rq_a->fence.flags); i915_request_add(rq_a); if (!igt_wait_for_spinner(&a.spin, rq_a)) { @@ -3362,7 +3378,7 @@ static int live_lrc_layout(void *arg) struct intel_gt *gt = arg; struct intel_engine_cs *engine; enum intel_engine_id id; - u32 *mem; + u32 *lrc; int err; /* @@ -3370,13 +3386,13 @@ static int live_lrc_layout(void *arg) * match the layout saved by HW. */ - mem = kmalloc(PAGE_SIZE, GFP_KERNEL); - if (!mem) + lrc = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!lrc) return -ENOMEM; err = 0; for_each_engine(engine, gt, id) { - u32 *hw, *lrc; + u32 *hw; int dw; if (!engine->default_state) @@ -3390,8 +3406,7 @@ static int live_lrc_layout(void *arg) } hw += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw); - lrc = memset(mem, 0, PAGE_SIZE); - execlists_init_reg_state(lrc, + execlists_init_reg_state(memset(lrc, POISON_INUSE, PAGE_SIZE), engine->kernel_context, engine, engine->kernel_context->ring, @@ -3406,6 +3421,13 @@ static int live_lrc_layout(void *arg) continue; } + if (lrc[dw] == 0) { + pr_debug("%s: skipped instruction %x at dword %d\n", + engine->name, lri, dw); + dw++; + continue; + } + if ((lri & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) { pr_err("%s: Expected LRI command at dword %d, found %08x\n", engine->name, dw, lri); @@ -3454,7 +3476,7 @@ static int live_lrc_layout(void *arg) break; } - kfree(mem); + kfree(lrc); return err; } diff --git a/drivers/gpu/drm/i915/gt/uc/Makefile b/drivers/gpu/drm/i915/gt/uc/Makefile deleted file mode 100644 index bec94d434cb6..000000000000 --- a/drivers/gpu/drm/i915/gt/uc/Makefile +++ /dev/null @@ -1,5 +0,0 @@ -# For building individual subdir files on the command line -subdir-ccflags-y += -I$(srctree)/$(src)/../.. - -# Extra header tests -header-test-pattern-$(CONFIG_DRM_I915_WERROR) := *.h diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c index 3ffc6267f96e..64934a876a50 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c @@ -12,6 +12,9 @@ #include "i915_drv.h" +static const struct intel_uc_ops uc_ops_off; +static const struct intel_uc_ops uc_ops_on; + /* Reset GuC providing us with fresh state for both GuC and HuC. */ static int __intel_uc_reset_hw(struct intel_uc *uc) @@ -89,6 +92,11 @@ void intel_uc_init_early(struct intel_uc *uc) intel_huc_init_early(&uc->huc); __confirm_options(uc); + + if (intel_uc_uses_guc(uc)) + uc->ops = &uc_ops_on; + else + uc->ops = &uc_ops_off; } void intel_uc_driver_late_release(struct intel_uc *uc) @@ -245,12 +253,11 @@ static void guc_disable_communication(struct intel_guc *guc) DRM_INFO("GuC communication disabled\n"); } -void intel_uc_fetch_firmwares(struct intel_uc *uc) +static void __uc_fetch_firmwares(struct intel_uc *uc) { int err; - if (!intel_uc_uses_guc(uc)) - return; + GEM_BUG_ON(!intel_uc_uses_guc(uc)); err = intel_uc_fw_fetch(&uc->guc.fw); if (err) @@ -260,20 +267,19 @@ void intel_uc_fetch_firmwares(struct intel_uc *uc) intel_uc_fw_fetch(&uc->huc.fw); } -void intel_uc_cleanup_firmwares(struct intel_uc *uc) +static void __uc_cleanup_firmwares(struct intel_uc *uc) { intel_uc_fw_cleanup_fetch(&uc->huc.fw); intel_uc_fw_cleanup_fetch(&uc->guc.fw); } -void intel_uc_init(struct intel_uc *uc) +static void __uc_init(struct intel_uc *uc) { struct intel_guc *guc = &uc->guc; struct intel_huc *huc = &uc->huc; int ret; - if (!intel_uc_uses_guc(uc)) - return; + GEM_BUG_ON(!intel_uc_uses_guc(uc)); /* XXX: GuC submission is unavailable for now */ GEM_BUG_ON(intel_uc_supports_guc_submission(uc)); @@ -288,7 +294,7 @@ void intel_uc_init(struct intel_uc *uc) intel_huc_init(huc); } -void intel_uc_fini(struct intel_uc *uc) +static void __uc_fini(struct intel_uc *uc) { intel_huc_fini(&uc->huc); intel_guc_fini(&uc->guc); @@ -309,14 +315,6 @@ static int __uc_sanitize(struct intel_uc *uc) return __intel_uc_reset_hw(uc); } -void intel_uc_sanitize(struct intel_uc *uc) -{ - if (!intel_uc_supports_guc(uc)) - return; - - __uc_sanitize(uc); -} - /* Initialize and verify the uC regs related to uC positioning in WOPCM */ static int uc_init_wopcm(struct intel_uc *uc) { @@ -380,13 +378,8 @@ static bool uc_is_wopcm_locked(struct intel_uc *uc) (intel_uncore_read(uncore, DMA_GUC_WOPCM_OFFSET) & GUC_WOPCM_OFFSET_VALID); } -int intel_uc_init_hw(struct intel_uc *uc) +static int __uc_check_hw(struct intel_uc *uc) { - struct drm_i915_private *i915 = uc_to_gt(uc)->i915; - struct intel_guc *guc = &uc->guc; - struct intel_huc *huc = &uc->huc; - int ret, attempts; - if (!intel_uc_supports_guc(uc)) return 0; @@ -395,11 +388,24 @@ int intel_uc_init_hw(struct intel_uc *uc) * before on this system after reboot, otherwise we risk GPU hangs. * To check if GuC was loaded before we look at WOPCM registers. */ - if (!intel_uc_uses_guc(uc) && !uc_is_wopcm_locked(uc)) - return 0; + if (uc_is_wopcm_locked(uc)) + return -EIO; + + return 0; +} + +static int __uc_init_hw(struct intel_uc *uc) +{ + struct drm_i915_private *i915 = uc_to_gt(uc)->i915; + struct intel_guc *guc = &uc->guc; + struct intel_huc *huc = &uc->huc; + int ret, attempts; + + GEM_BUG_ON(!intel_uc_supports_guc(uc)); + GEM_BUG_ON(!intel_uc_uses_guc(uc)); if (!intel_uc_fw_is_available(&guc->fw)) { - ret = uc_is_wopcm_locked(uc) || + ret = __uc_check_hw(uc) || intel_uc_fw_is_overridden(&guc->fw) || intel_uc_supports_guc_submission(uc) ? intel_uc_fw_status_to_error(guc->fw.status) : 0; @@ -495,7 +501,7 @@ err_out: return -EIO; } -void intel_uc_fini_hw(struct intel_uc *uc) +static void __uc_fini_hw(struct intel_uc *uc) { struct intel_guc *guc = &uc->guc; @@ -595,3 +601,20 @@ int intel_uc_runtime_resume(struct intel_uc *uc) */ return __uc_resume(uc, true); } + +static const struct intel_uc_ops uc_ops_off = { + .init_hw = __uc_check_hw, +}; + +static const struct intel_uc_ops uc_ops_on = { + .sanitize = __uc_sanitize, + + .init_fw = __uc_fetch_firmwares, + .fini_fw = __uc_cleanup_firmwares, + + .init = __uc_init, + .fini = __uc_fini, + + .init_hw = __uc_init_hw, + .fini_hw = __uc_fini_hw, +}; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.h b/drivers/gpu/drm/i915/gt/uc/intel_uc.h index 527995c21196..49c913524686 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.h @@ -10,7 +10,20 @@ #include "intel_huc.h" #include "i915_params.h" +struct intel_uc; + +struct intel_uc_ops { + int (*sanitize)(struct intel_uc *uc); + void (*init_fw)(struct intel_uc *uc); + void (*fini_fw)(struct intel_uc *uc); + void (*init)(struct intel_uc *uc); + void (*fini)(struct intel_uc *uc); + int (*init_hw)(struct intel_uc *uc); + void (*fini_hw)(struct intel_uc *uc); +}; + struct intel_uc { + struct intel_uc_ops const *ops; struct intel_guc guc; struct intel_huc huc; @@ -21,13 +34,6 @@ struct intel_uc { void intel_uc_init_early(struct intel_uc *uc); void intel_uc_driver_late_release(struct intel_uc *uc); void intel_uc_init_mmio(struct intel_uc *uc); -void intel_uc_fetch_firmwares(struct intel_uc *uc); -void intel_uc_cleanup_firmwares(struct intel_uc *uc); -void intel_uc_sanitize(struct intel_uc *uc); -void intel_uc_init(struct intel_uc *uc); -int intel_uc_init_hw(struct intel_uc *uc); -void intel_uc_fini_hw(struct intel_uc *uc); -void intel_uc_fini(struct intel_uc *uc); void intel_uc_reset_prepare(struct intel_uc *uc); void intel_uc_suspend(struct intel_uc *uc); void intel_uc_runtime_suspend(struct intel_uc *uc); @@ -64,4 +70,20 @@ static inline bool intel_uc_uses_huc(struct intel_uc *uc) return intel_huc_is_enabled(&uc->huc); } +#define intel_uc_ops_function(_NAME, _OPS, _TYPE, _RET) \ +static inline _TYPE intel_uc_##_NAME(struct intel_uc *uc) \ +{ \ + if (uc->ops->_OPS) \ + return uc->ops->_OPS(uc); \ + return _RET; \ +} +intel_uc_ops_function(sanitize, sanitize, int, 0); +intel_uc_ops_function(fetch_firmwares, init_fw, void, ); +intel_uc_ops_function(cleanup_firmwares, fini_fw, void, ); +intel_uc_ops_function(init, init, void, ); +intel_uc_ops_function(fini, fini, void, ); +intel_uc_ops_function(init_hw, init_hw, int, 0); +intel_uc_ops_function(fini_hw, fini_hw, void, ); +#undef intel_uc_ops_function + #endif diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index bb9fe6bf5275..21af822a79e0 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -2675,7 +2675,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) return 0; } -static int init_broadwell_mmio_info(struct intel_gvt *gvt) +static int init_bdw_mmio_info(struct intel_gvt *gvt) { struct drm_i915_private *dev_priv = gvt->dev_priv; int ret; @@ -3364,20 +3364,20 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt) goto err; if (IS_BROADWELL(dev_priv)) { - ret = init_broadwell_mmio_info(gvt); + ret = init_bdw_mmio_info(gvt); if (ret) goto err; } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) { - ret = init_broadwell_mmio_info(gvt); + ret = init_bdw_mmio_info(gvt); if (ret) goto err; ret = init_skl_mmio_info(gvt); if (ret) goto err; } else if (IS_BROXTON(dev_priv)) { - ret = init_broadwell_mmio_info(gvt); + ret = init_bdw_mmio_info(gvt); if (ret) goto err; ret = init_skl_mmio_info(gvt); diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index b3299f88e24e..685d1e04a5ff 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -1224,7 +1224,7 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu) enum intel_engine_id i; int ret; - ppgtt = i915_ppgtt_create(i915); + ppgtt = i915_ppgtt_create(&i915->gt); if (IS_ERR(ppgtt)) return PTR_ERR(ppgtt); diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c index cfe09964622b..f3da5c06f331 100644 --- a/drivers/gpu/drm/i915/i915_active.c +++ b/drivers/gpu/drm/i915/i915_active.c @@ -605,12 +605,15 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref, struct intel_engine_cs *engine) { intel_engine_mask_t tmp, mask = engine->mask; + struct llist_node *pos = NULL, *next; struct intel_gt *gt = engine->gt; - struct llist_node *pos, *next; int err; GEM_BUG_ON(i915_active_is_idle(ref)); - GEM_BUG_ON(!llist_empty(&ref->preallocated_barriers)); + + /* Wait until the previous preallocation is completed */ + while (!llist_empty(&ref->preallocated_barriers)) + cond_resched(); /* * Preallocate a node for each physical engine supporting the target @@ -653,16 +656,24 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref, GEM_BUG_ON(rcu_access_pointer(node->base.fence) != ERR_PTR(-EAGAIN)); GEM_BUG_ON(barrier_to_engine(node) != engine); - llist_add(barrier_to_ll(node), &ref->preallocated_barriers); + next = barrier_to_ll(node); + next->next = pos; + if (!pos) + pos = next; intel_engine_pm_get(engine); } + GEM_BUG_ON(!llist_empty(&ref->preallocated_barriers)); + llist_add_batch(next, pos, &ref->preallocated_barriers); + return 0; unwind: - llist_for_each_safe(pos, next, take_preallocated_barriers(ref)) { + while (pos) { struct active_node *node = barrier_from_ll(pos); + pos = pos->next; + atomic_dec(&ref->count); intel_engine_pm_put(barrier_to_engine(node)); diff --git a/drivers/gpu/drm/i915/i915_buddy.c b/drivers/gpu/drm/i915/i915_buddy.c index e9d4200ce3bc..66883af64ca1 100644 --- a/drivers/gpu/drm/i915/i915_buddy.c +++ b/drivers/gpu/drm/i915/i915_buddy.c @@ -262,8 +262,10 @@ void i915_buddy_free_list(struct i915_buddy_mm *mm, struct list_head *objects) { struct i915_buddy_block *block, *on; - list_for_each_entry_safe(block, on, objects, link) + list_for_each_entry_safe(block, on, objects, link) { i915_buddy_free(mm, block); + cond_resched(); + } INIT_LIST_HEAD(objects); } diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index d28468eaed57..d5a9b8a964c2 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -321,16 +321,15 @@ static void print_context_stats(struct seq_file *m, for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { - intel_context_lock_pinned(ce); - if (intel_context_is_pinned(ce)) { + if (intel_context_pin_if_active(ce)) { rcu_read_lock(); if (ce->state) per_file_stats(0, ce->state->obj, &kstats); per_file_stats(0, ce->ring->vma->obj, &kstats); rcu_read_unlock(); + intel_context_unpin(ce); } - intel_context_unlock_pinned(ce); } i915_gem_context_unlock_engines(ctx); @@ -367,12 +366,16 @@ static void print_context_stats(struct seq_file *m, static int i915_gem_object_info(struct seq_file *m, void *data) { struct drm_i915_private *i915 = node_to_i915(m->private); + struct intel_memory_region *mr; + enum intel_region_id id; seq_printf(m, "%u shrinkable [%u free] objects, %llu bytes\n", i915->mm.shrink_count, atomic_read(&i915->mm.free_count), i915->mm.shrink_memory); - + for_each_memory_region(mr, i915, id) + seq_printf(m, "%s: total:%pa, available:%pa bytes\n", + mr->name, &mr->total, &mr->avail); seq_putc(m, '\n'); print_context_stats(m, i915); @@ -682,7 +685,7 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data) static ssize_t gpu_state_read(struct file *file, char __user *ubuf, size_t count, loff_t *pos) { - struct i915_gpu_state *error; + struct i915_gpu_coredump *error; ssize_t ret; void *buf; @@ -695,7 +698,7 @@ static ssize_t gpu_state_read(struct file *file, char __user *ubuf, if (!buf) return -ENOMEM; - ret = i915_gpu_state_copy_to_buffer(error, buf, *pos, count); + ret = i915_gpu_coredump_copy_to_buffer(error, buf, *pos, count); if (ret <= 0) goto out; @@ -711,19 +714,19 @@ out: static int gpu_state_release(struct inode *inode, struct file *file) { - i915_gpu_state_put(file->private_data); + i915_gpu_coredump_put(file->private_data); return 0; } static int i915_gpu_info_open(struct inode *inode, struct file *file) { struct drm_i915_private *i915 = inode->i_private; - struct i915_gpu_state *gpu; + struct i915_gpu_coredump *gpu; intel_wakeref_t wakeref; gpu = NULL; with_intel_runtime_pm(&i915->runtime_pm, wakeref) - gpu = i915_capture_gpu_state(i915); + gpu = i915_gpu_coredump(i915); if (IS_ERR(gpu)) return PTR_ERR(gpu); @@ -745,7 +748,7 @@ i915_error_state_write(struct file *filp, size_t cnt, loff_t *ppos) { - struct i915_gpu_state *error = filp->private_data; + struct i915_gpu_coredump *error = filp->private_data; if (!error) return 0; @@ -758,7 +761,7 @@ i915_error_state_write(struct file *filp, static int i915_error_state_open(struct inode *inode, struct file *file) { - struct i915_gpu_state *error; + struct i915_gpu_coredump *error; error = i915_first_error_state(inode->i_private); if (IS_ERR(error)) @@ -1001,7 +1004,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused) return ret; } -static int ironlake_drpc_info(struct seq_file *m) +static int ilk_drpc_info(struct seq_file *m) { struct drm_i915_private *i915 = node_to_i915(m->private); struct intel_uncore *uncore = &i915->uncore; @@ -1209,7 +1212,7 @@ static int i915_drpc_info(struct seq_file *m, void *unused) else if (INTEL_GEN(dev_priv) >= 6) err = gen6_drpc_info(m); else - err = ironlake_drpc_info(m); + err = ilk_drpc_info(m); } return err; @@ -1509,15 +1512,14 @@ static int i915_context_status(struct seq_file *m, void *unused) for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { - intel_context_lock_pinned(ce); - if (intel_context_is_pinned(ce)) { + if (intel_context_pin_if_active(ce)) { seq_printf(m, "%s: ", ce->engine->name); if (ce->state) describe_obj(m, ce->state->obj); describe_ctx_ring(m, ce->ring); seq_putc(m, '\n'); + intel_context_unpin(ce); } - intel_context_unlock_pinned(ce); } i915_gem_context_unlock_engines(ctx); @@ -1977,7 +1979,7 @@ static int i915_psr_sink_status_show(struct seq_file *m, void *data) struct drm_connector *connector = m->private; struct drm_i915_private *dev_priv = to_i915(connector->dev); struct intel_dp *intel_dp = - enc_to_intel_dp(&intel_attached_encoder(connector)->base); + enc_to_intel_dp(intel_attached_encoder(to_intel_connector(connector))); int ret; if (!CAN_PSR(dev_priv)) { @@ -2389,7 +2391,7 @@ static void intel_dp_info(struct seq_file *m, struct intel_connector *intel_connector) { struct intel_encoder *intel_encoder = intel_connector->encoder; - struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder); seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]); seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio)); @@ -2409,7 +2411,7 @@ static void intel_dp_mst_info(struct seq_file *m, { struct intel_encoder *intel_encoder = intel_connector->encoder; struct intel_dp_mst_encoder *intel_mst = - enc_to_mst(&intel_encoder->base); + enc_to_mst(intel_encoder); struct intel_digital_port *intel_dig_port = intel_mst->primary; struct intel_dp *intel_dp = &intel_dig_port->dp; bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr, @@ -2422,7 +2424,7 @@ static void intel_hdmi_info(struct seq_file *m, struct intel_connector *intel_connector) { struct intel_encoder *intel_encoder = intel_connector->encoder; - struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base); + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(intel_encoder); seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio)); if (intel_connector->hdcp.shim) { @@ -3012,11 +3014,11 @@ static int i915_dp_mst_info(struct seq_file *m, void *unused) if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) continue; - intel_encoder = intel_attached_encoder(connector); + intel_encoder = intel_attached_encoder(to_intel_connector(connector)); if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST) continue; - intel_dig_port = enc_to_dig_port(&intel_encoder->base); + intel_dig_port = enc_to_dig_port(intel_encoder); if (!intel_dig_port->dp.can_mst) continue; @@ -3066,7 +3068,7 @@ static ssize_t i915_displayport_test_active_write(struct file *file, continue; if (encoder && connector->status == connector_status_connected) { - intel_dp = enc_to_intel_dp(&encoder->base); + intel_dp = enc_to_intel_dp(encoder); status = kstrtoint(input_buffer, 10, &val); if (status < 0) break; @@ -3075,9 +3077,9 @@ static ssize_t i915_displayport_test_active_write(struct file *file, * testing code, only accept an actual value of 1 here */ if (val == 1) - intel_dp->compliance.test_active = 1; + intel_dp->compliance.test_active = true; else - intel_dp->compliance.test_active = 0; + intel_dp->compliance.test_active = false; } } drm_connector_list_iter_end(&conn_iter); @@ -3110,7 +3112,7 @@ static int i915_displayport_test_active_show(struct seq_file *m, void *data) continue; if (encoder && connector->status == connector_status_connected) { - intel_dp = enc_to_intel_dp(&encoder->base); + intel_dp = enc_to_intel_dp(encoder); if (intel_dp->compliance.test_active) seq_puts(m, "1"); else @@ -3160,7 +3162,7 @@ static int i915_displayport_test_data_show(struct seq_file *m, void *data) continue; if (encoder && connector->status == connector_status_connected) { - intel_dp = enc_to_intel_dp(&encoder->base); + intel_dp = enc_to_intel_dp(encoder); if (intel_dp->compliance.test_type == DP_TEST_LINK_EDID_READ) seq_printf(m, "%lx", @@ -3204,7 +3206,7 @@ static int i915_displayport_test_type_show(struct seq_file *m, void *data) continue; if (encoder && connector->status == connector_status_connected) { - intel_dp = enc_to_intel_dp(&encoder->base); + intel_dp = enc_to_intel_dp(encoder); seq_printf(m, "%02lx", intel_dp->compliance.test_type); } else seq_puts(m, "0"); @@ -3815,8 +3817,8 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv, #undef SS_MAX } -static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv, - struct sseu_dev_info *sseu) +static void bdw_sseu_device_status(struct drm_i915_private *dev_priv, + struct sseu_dev_info *sseu) { const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv); u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO); @@ -3901,7 +3903,7 @@ static int i915_sseu_status(struct seq_file *m, void *unused) if (IS_CHERRYVIEW(dev_priv)) cherryview_sseu_device_status(dev_priv, &sseu); else if (IS_BROADWELL(dev_priv)) - broadwell_sseu_device_status(dev_priv, &sseu); + bdw_sseu_device_status(dev_priv, &sseu); else if (IS_GEN(dev_priv, 9)) gen9_sseu_device_status(dev_priv, &sseu); else if (INTEL_GEN(dev_priv) >= 10) @@ -4142,14 +4144,14 @@ static int i915_drrs_ctl_set(void *data, u64 val) drm_connector_mask(connector))) continue; - encoder = intel_attached_encoder(connector); + encoder = intel_attached_encoder(to_intel_connector(connector)); if (encoder->type != INTEL_OUTPUT_EDP) continue; DRM_DEBUG_DRIVER("Manually %sabling DRRS. %llu\n", val ? "en" : "dis", val); - intel_dp = enc_to_intel_dp(&encoder->base); + intel_dp = enc_to_intel_dp(encoder); if (val) intel_edp_drrs_enable(intel_dp, crtc_state); @@ -4353,7 +4355,7 @@ static int i915_dpcd_show(struct seq_file *m, void *data) { struct drm_connector *connector = m->private; struct intel_dp *intel_dp = - enc_to_intel_dp(&intel_attached_encoder(connector)->base); + enc_to_intel_dp(intel_attached_encoder(to_intel_connector(connector))); u8 buf[16]; ssize_t err; int i; @@ -4388,7 +4390,7 @@ static int i915_panel_show(struct seq_file *m, void *data) { struct drm_connector *connector = m->private; struct intel_dp *intel_dp = - enc_to_intel_dp(&intel_attached_encoder(connector)->base); + enc_to_intel_dp(intel_attached_encoder(to_intel_connector(connector))); if (connector->status != connector_status_connected) return -ENODEV; @@ -4466,7 +4468,7 @@ static int i915_dsc_fec_support_show(struct seq_file *m, void *data) } else if (ret) { break; } - intel_dp = enc_to_intel_dp(&intel_attached_encoder(connector)->base); + intel_dp = enc_to_intel_dp(intel_attached_encoder(to_intel_connector(connector))); crtc_state = to_intel_crtc_state(crtc->state); seq_printf(m, "DSC_Enabled: %s\n", yesno(crtc_state->dsc.compression_enable)); @@ -4493,8 +4495,8 @@ static ssize_t i915_dsc_fec_support_write(struct file *file, int ret; struct drm_connector *connector = ((struct seq_file *)file->private_data)->private; - struct intel_encoder *encoder = intel_attached_encoder(connector); - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector)); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); if (len == 0) return 0; diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 59525094d0e3..f7385abdd74b 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -469,6 +469,12 @@ static void vlv_free_s0ix_state(struct drm_i915_private *i915) i915->vlv_s0ix_state = NULL; } +static void sanitize_gpu(struct drm_i915_private *i915) +{ + if (!INTEL_INFO(i915)->gpu_reset_clobbers_display) + __intel_gt_reset(&i915->gt, ALL_ENGINES); +} + /** * i915_driver_early_probe - setup state not requiring device access * @dev_priv: device private @@ -602,6 +608,9 @@ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv) if (ret) goto err_uncore; + /* As early as possible, scrub existing GPU state before clobbering */ + sanitize_gpu(dev_priv); + return 0; err_uncore: @@ -1817,7 +1826,7 @@ static int i915_drm_resume(struct drm_device *dev) disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); - intel_gt_sanitize(&dev_priv->gt, true); + sanitize_gpu(dev_priv); ret = i915_ggtt_enable_hw(dev_priv); if (ret) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index d05a968227f7..077af22b8340 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -46,6 +46,7 @@ #include <linux/dma-resv.h> #include <linux/shmem_fs.h> #include <linux/stackdepot.h> +#include <linux/xarray.h> #include <drm/intel-gtt.h> #include <drm/drm_legacy.h> /* for struct drm_dma_handle */ @@ -110,8 +111,8 @@ #define DRIVER_NAME "i915" #define DRIVER_DESC "Intel Graphics" -#define DRIVER_DATE "20191223" -#define DRIVER_TIMESTAMP 1577120893 +#define DRIVER_DATE "20200114" +#define DRIVER_TIMESTAMP 1579001978 struct drm_i915_gem_object; @@ -201,8 +202,7 @@ struct drm_i915_file_private { struct list_head request_list; } mm; - struct idr context_idr; - struct mutex context_idr_lock; /* guards context_idr */ + struct xarray context_xa; struct idr vm_idr; struct mutex vm_idr_lock; /* guards vm_idr */ @@ -505,6 +505,7 @@ struct i915_psr { bool dc3co_enabled; u32 dc3co_exit_delay; struct delayed_work idle_work; + bool initially_probed; }; #define QUIRK_LVDS_SSC_DISABLE (1<<1) @@ -1252,6 +1253,16 @@ struct drm_i915_private { struct llist_head free_list; struct work_struct free_work; } contexts; + + /* + * We replace the local file with a global mappings as the + * backing storage for the mmap is on the device and not + * on the struct file, and we do not want to prolong the + * lifetime of the local fd. To minimise the number of + * anonymous inodes we create, we use a global singleton to + * share the global mapping. + */ + struct file *mmap_singleton; } gem; u8 pch_ssc_use; @@ -1657,8 +1668,10 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, (IS_BROADWELL(dev_priv) || IS_GEN(dev_priv, 9)) /* WaRsDisableCoarsePowerGating:skl,cnl */ -#define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \ - IS_GEN_RANGE(dev_priv, 9, 10) +#define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \ + (IS_CANNONLAKE(dev_priv) || \ + IS_SKL_GT3(dev_priv) || \ + IS_SKL_GT4(dev_priv)) #define HAS_GMBUS_IRQ(dev_priv) (INTEL_GEN(dev_priv) >= 4) #define HAS_GMBUS_BURST_READ(dev_priv) (INTEL_GEN(dev_priv) >= 10 || \ @@ -1861,7 +1874,7 @@ static inline u32 i915_reset_count(struct i915_gpu_error *error) } static inline u32 i915_reset_engine_count(struct i915_gpu_error *error, - struct intel_engine_cs *engine) + const struct intel_engine_cs *engine) { return atomic_read(&error->reset_engine_count[engine->uabi_class]); } @@ -1889,7 +1902,7 @@ struct dma_buf *i915_gem_prime_export(struct drm_gem_object *gem_obj, int flags) static inline struct i915_gem_context * __i915_gem_context_lookup_rcu(struct drm_i915_file_private *file_priv, u32 id) { - return idr_find(&file_priv->context_idr, id); + return xa_load(&file_priv->context_xa, id); } static inline struct i915_gem_context * @@ -2015,6 +2028,9 @@ int i915_reg_read_ioctl(struct drm_device *dev, void *data, int remap_io_mapping(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn, unsigned long size, struct io_mapping *iomap); +int remap_io_sg(struct vm_area_struct *vma, + unsigned long addr, unsigned long size, + struct scatterlist *sgl, resource_size_t iobase); static inline int intel_hws_csb_write_index(struct drm_i915_private *i915) { diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 9ddcf17230e6..94f993e4c12f 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -45,6 +45,7 @@ #include "gem/i915_gem_context.h" #include "gem/i915_gem_ioctls.h" #include "gem/i915_gem_mman.h" +#include "gem/i915_gem_region.h" #include "gt/intel_engine_user.h" #include "gt/intel_gt.h" #include "gt/intel_gt_pm.h" @@ -200,7 +201,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, static int i915_gem_create(struct drm_file *file, - struct drm_i915_private *dev_priv, + struct intel_memory_region *mr, u64 *size_p, u32 *handle_p) { @@ -209,12 +210,16 @@ i915_gem_create(struct drm_file *file, u64 size; int ret; - size = round_up(*size_p, PAGE_SIZE); + GEM_BUG_ON(!is_power_of_2(mr->min_page_size)); + size = round_up(*size_p, mr->min_page_size); if (size == 0) return -EINVAL; + /* For most of the ABI (e.g. mmap) we think in system pages */ + GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE)); + /* Allocate the new object */ - obj = i915_gem_object_create_shmem(dev_priv, size); + obj = i915_gem_object_create_region(mr, size, 0); if (IS_ERR(obj)) return PTR_ERR(obj); @@ -234,6 +239,7 @@ i915_gem_dumb_create(struct drm_file *file, struct drm_device *dev, struct drm_mode_create_dumb *args) { + enum intel_memory_type mem_type; int cpp = DIV_ROUND_UP(args->bpp, 8); u32 format; @@ -260,7 +266,14 @@ i915_gem_dumb_create(struct drm_file *file, args->pitch = ALIGN(args->pitch, 4096); args->size = args->pitch * args->height; - return i915_gem_create(file, to_i915(dev), + + mem_type = INTEL_MEMORY_SYSTEM; + if (HAS_LMEM(to_i915(dev))) + mem_type = INTEL_MEMORY_LOCAL; + + return i915_gem_create(file, + intel_memory_region_by_type(to_i915(dev), + mem_type), &args->size, &args->handle); } @@ -274,12 +287,14 @@ int i915_gem_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *i915 = to_i915(dev); struct drm_i915_gem_create *args = data; - i915_gem_flush_free_objects(dev_priv); + i915_gem_flush_free_objects(i915); - return i915_gem_create(file, dev_priv, + return i915_gem_create(file, + intel_memory_region_by_type(i915, + INTEL_MEMORY_SYSTEM), &args->size, &args->handle); } @@ -1172,6 +1187,8 @@ void i915_gem_driver_remove(struct drm_i915_private *dev_priv) void i915_gem_driver_release(struct drm_i915_private *dev_priv) { + i915_gem_driver_release__contexts(dev_priv); + intel_gt_driver_release(&dev_priv->gt); intel_wa_list_free(&dev_priv->gt_wa_list); @@ -1179,8 +1196,6 @@ void i915_gem_driver_release(struct drm_i915_private *dev_priv) intel_uc_cleanup_firmwares(&dev_priv->gt.uc); i915_gem_cleanup_userptr(dev_priv); - i915_gem_driver_release__contexts(dev_priv); - i915_gem_drain_freed_objects(dev_priv); WARN_ON(!list_empty(&dev_priv->gem.contexts.list)); diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c index 71efccfde122..d9c34a23cd67 100644 --- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c +++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c @@ -412,6 +412,9 @@ int i915_vma_pin_fence(struct i915_vma *vma) { int err; + if (!vma->fence && !i915_gem_object_is_tiled(vma->obj)) + return 0; + /* * Note that we revoke fences on runtime suspend. Therefore the user * must keep the device awake whilst using the fence. diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 1efe58ad0ce9..e039eb56900f 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -1,26 +1,7 @@ +// SPDX-License-Identifier: MIT /* * Copyright © 2010 Daniel Vetter - * Copyright © 2011-2014 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - * + * Copyright © 2020 Intel Corporation */ #include <linux/slab.h> /* fault-inject.h is not standalone! */ @@ -45,2116 +26,6 @@ #include "i915_trace.h" #include "i915_vgpu.h" -#define I915_GFP_ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN) - -#if IS_ENABLED(CONFIG_DRM_I915_TRACE_GTT) -#define DBG(...) trace_printk(__VA_ARGS__) -#else -#define DBG(...) -#endif - -#define NALLOC 3 /* 1 normal, 1 for concurrent threads, 1 for preallocation */ - -/** - * DOC: Global GTT views - * - * Background and previous state - * - * Historically objects could exists (be bound) in global GTT space only as - * singular instances with a view representing all of the object's backing pages - * in a linear fashion. This view will be called a normal view. - * - * To support multiple views of the same object, where the number of mapped - * pages is not equal to the backing store, or where the layout of the pages - * is not linear, concept of a GGTT view was added. - * - * One example of an alternative view is a stereo display driven by a single - * image. In this case we would have a framebuffer looking like this - * (2x2 pages): - * - * 12 - * 34 - * - * Above would represent a normal GGTT view as normally mapped for GPU or CPU - * rendering. In contrast, fed to the display engine would be an alternative - * view which could look something like this: - * - * 1212 - * 3434 - * - * In this example both the size and layout of pages in the alternative view is - * different from the normal view. - * - * Implementation and usage - * - * GGTT views are implemented using VMAs and are distinguished via enum - * i915_ggtt_view_type and struct i915_ggtt_view. - * - * A new flavour of core GEM functions which work with GGTT bound objects were - * added with the _ggtt_ infix, and sometimes with _view postfix to avoid - * renaming in large amounts of code. They take the struct i915_ggtt_view - * parameter encapsulating all metadata required to implement a view. - * - * As a helper for callers which are only interested in the normal view, - * globally const i915_ggtt_view_normal singleton instance exists. All old core - * GEM API functions, the ones not taking the view parameter, are operating on, - * or with the normal GGTT view. - * - * Code wanting to add or use a new GGTT view needs to: - * - * 1. Add a new enum with a suitable name. - * 2. Extend the metadata in the i915_ggtt_view structure if required. - * 3. Add support to i915_get_vma_pages(). - * - * New views are required to build a scatter-gather table from within the - * i915_get_vma_pages function. This table is stored in the vma.ggtt_view and - * exists for the lifetime of an VMA. - * - * Core API is designed to have copy semantics which means that passed in - * struct i915_ggtt_view does not need to be persistent (left around after - * calling the core API functions). - * - */ - -#define as_pd(x) container_of((x), typeof(struct i915_page_directory), pt) - -static int -i915_get_ggtt_vma_pages(struct i915_vma *vma); - -static void gen6_ggtt_invalidate(struct i915_ggtt *ggtt) -{ - struct intel_uncore *uncore = ggtt->vm.gt->uncore; - - spin_lock_irq(&uncore->lock); - intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); - intel_uncore_read_fw(uncore, GFX_FLSH_CNTL_GEN6); - spin_unlock_irq(&uncore->lock); -} - -static void gen8_ggtt_invalidate(struct i915_ggtt *ggtt) -{ - struct intel_uncore *uncore = ggtt->vm.gt->uncore; - - /* - * Note that as an uncached mmio write, this will flush the - * WCB of the writes into the GGTT before it triggers the invalidate. - */ - intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); -} - -static void guc_ggtt_invalidate(struct i915_ggtt *ggtt) -{ - struct intel_uncore *uncore = ggtt->vm.gt->uncore; - struct drm_i915_private *i915 = ggtt->vm.i915; - - gen8_ggtt_invalidate(ggtt); - - if (INTEL_GEN(i915) >= 12) - intel_uncore_write_fw(uncore, GEN12_GUC_TLB_INV_CR, - GEN12_GUC_TLB_INV_CR_INVALIDATE); - else - intel_uncore_write_fw(uncore, GEN8_GTCR, GEN8_GTCR_INVALIDATE); -} - -static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt) -{ - intel_gtt_chipset_flush(); -} - -static int ppgtt_bind_vma(struct i915_vma *vma, - enum i915_cache_level cache_level, - u32 flags) -{ - u32 pte_flags; - int err; - - if (flags & I915_VMA_ALLOC) { - err = vma->vm->allocate_va_range(vma->vm, - vma->node.start, vma->size); - if (err) - return err; - - set_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma)); - } - - /* Applicable to VLV, and gen8+ */ - pte_flags = 0; - if (i915_gem_object_is_readonly(vma->obj)) - pte_flags |= PTE_READ_ONLY; - - GEM_BUG_ON(!test_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma))); - vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags); - wmb(); - - return 0; -} - -static void ppgtt_unbind_vma(struct i915_vma *vma) -{ - if (test_and_clear_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma))) - vma->vm->clear_range(vma->vm, vma->node.start, vma->size); -} - -static int ppgtt_set_pages(struct i915_vma *vma) -{ - GEM_BUG_ON(vma->pages); - - vma->pages = vma->obj->mm.pages; - - vma->page_sizes = vma->obj->mm.page_sizes; - - return 0; -} - -static void clear_pages(struct i915_vma *vma) -{ - GEM_BUG_ON(!vma->pages); - - if (vma->pages != vma->obj->mm.pages) { - sg_free_table(vma->pages); - kfree(vma->pages); - } - vma->pages = NULL; - - memset(&vma->page_sizes, 0, sizeof(vma->page_sizes)); -} - -static u64 gen8_pte_encode(dma_addr_t addr, - enum i915_cache_level level, - u32 flags) -{ - gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW; - - if (unlikely(flags & PTE_READ_ONLY)) - pte &= ~_PAGE_RW; - - switch (level) { - case I915_CACHE_NONE: - pte |= PPAT_UNCACHED; - break; - case I915_CACHE_WT: - pte |= PPAT_DISPLAY_ELLC; - break; - default: - pte |= PPAT_CACHED; - break; - } - - return pte; -} - -static u64 gen8_pde_encode(const dma_addr_t addr, - const enum i915_cache_level level) -{ - u64 pde = _PAGE_PRESENT | _PAGE_RW; - pde |= addr; - if (level != I915_CACHE_NONE) - pde |= PPAT_CACHED_PDE; - else - pde |= PPAT_UNCACHED; - return pde; -} - -static u64 snb_pte_encode(dma_addr_t addr, - enum i915_cache_level level, - u32 flags) -{ - gen6_pte_t pte = GEN6_PTE_VALID; - pte |= GEN6_PTE_ADDR_ENCODE(addr); - - switch (level) { - case I915_CACHE_L3_LLC: - case I915_CACHE_LLC: - pte |= GEN6_PTE_CACHE_LLC; - break; - case I915_CACHE_NONE: - pte |= GEN6_PTE_UNCACHED; - break; - default: - MISSING_CASE(level); - } - - return pte; -} - -static u64 ivb_pte_encode(dma_addr_t addr, - enum i915_cache_level level, - u32 flags) -{ - gen6_pte_t pte = GEN6_PTE_VALID; - pte |= GEN6_PTE_ADDR_ENCODE(addr); - - switch (level) { - case I915_CACHE_L3_LLC: - pte |= GEN7_PTE_CACHE_L3_LLC; - break; - case I915_CACHE_LLC: - pte |= GEN6_PTE_CACHE_LLC; - break; - case I915_CACHE_NONE: - pte |= GEN6_PTE_UNCACHED; - break; - default: - MISSING_CASE(level); - } - - return pte; -} - -static u64 byt_pte_encode(dma_addr_t addr, - enum i915_cache_level level, - u32 flags) -{ - gen6_pte_t pte = GEN6_PTE_VALID; - pte |= GEN6_PTE_ADDR_ENCODE(addr); - - if (!(flags & PTE_READ_ONLY)) - pte |= BYT_PTE_WRITEABLE; - - if (level != I915_CACHE_NONE) - pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES; - - return pte; -} - -static u64 hsw_pte_encode(dma_addr_t addr, - enum i915_cache_level level, - u32 flags) -{ - gen6_pte_t pte = GEN6_PTE_VALID; - pte |= HSW_PTE_ADDR_ENCODE(addr); - - if (level != I915_CACHE_NONE) - pte |= HSW_WB_LLC_AGE3; - - return pte; -} - -static u64 iris_pte_encode(dma_addr_t addr, - enum i915_cache_level level, - u32 flags) -{ - gen6_pte_t pte = GEN6_PTE_VALID; - pte |= HSW_PTE_ADDR_ENCODE(addr); - - switch (level) { - case I915_CACHE_NONE: - break; - case I915_CACHE_WT: - pte |= HSW_WT_ELLC_LLC_AGE3; - break; - default: - pte |= HSW_WB_ELLC_LLC_AGE3; - break; - } - - return pte; -} - -static void stash_init(struct pagestash *stash) -{ - pagevec_init(&stash->pvec); - spin_lock_init(&stash->lock); -} - -static struct page *stash_pop_page(struct pagestash *stash) -{ - struct page *page = NULL; - - spin_lock(&stash->lock); - if (likely(stash->pvec.nr)) - page = stash->pvec.pages[--stash->pvec.nr]; - spin_unlock(&stash->lock); - - return page; -} - -static void stash_push_pagevec(struct pagestash *stash, struct pagevec *pvec) -{ - unsigned int nr; - - spin_lock_nested(&stash->lock, SINGLE_DEPTH_NESTING); - - nr = min_t(typeof(nr), pvec->nr, pagevec_space(&stash->pvec)); - memcpy(stash->pvec.pages + stash->pvec.nr, - pvec->pages + pvec->nr - nr, - sizeof(pvec->pages[0]) * nr); - stash->pvec.nr += nr; - - spin_unlock(&stash->lock); - - pvec->nr -= nr; -} - -static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp) -{ - struct pagevec stack; - struct page *page; - - if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1))) - i915_gem_shrink_all(vm->i915); - - page = stash_pop_page(&vm->free_pages); - if (page) - return page; - - if (!vm->pt_kmap_wc) - return alloc_page(gfp); - - /* Look in our global stash of WC pages... */ - page = stash_pop_page(&vm->i915->mm.wc_stash); - if (page) - return page; - - /* - * Otherwise batch allocate pages to amortize cost of set_pages_wc. - * - * We have to be careful as page allocation may trigger the shrinker - * (via direct reclaim) which will fill up the WC stash underneath us. - * So we add our WB pages into a temporary pvec on the stack and merge - * them into the WC stash after all the allocations are complete. - */ - pagevec_init(&stack); - do { - struct page *page; - - page = alloc_page(gfp); - if (unlikely(!page)) - break; - - stack.pages[stack.nr++] = page; - } while (pagevec_space(&stack)); - - if (stack.nr && !set_pages_array_wc(stack.pages, stack.nr)) { - page = stack.pages[--stack.nr]; - - /* Merge spare WC pages to the global stash */ - if (stack.nr) - stash_push_pagevec(&vm->i915->mm.wc_stash, &stack); - - /* Push any surplus WC pages onto the local VM stash */ - if (stack.nr) - stash_push_pagevec(&vm->free_pages, &stack); - } - - /* Return unwanted leftovers */ - if (unlikely(stack.nr)) { - WARN_ON_ONCE(set_pages_array_wb(stack.pages, stack.nr)); - __pagevec_release(&stack); - } - - return page; -} - -static void vm_free_pages_release(struct i915_address_space *vm, - bool immediate) -{ - struct pagevec *pvec = &vm->free_pages.pvec; - struct pagevec stack; - - lockdep_assert_held(&vm->free_pages.lock); - GEM_BUG_ON(!pagevec_count(pvec)); - - if (vm->pt_kmap_wc) { - /* - * When we use WC, first fill up the global stash and then - * only if full immediately free the overflow. - */ - stash_push_pagevec(&vm->i915->mm.wc_stash, pvec); - - /* - * As we have made some room in the VM's free_pages, - * we can wait for it to fill again. Unless we are - * inside i915_address_space_fini() and must - * immediately release the pages! - */ - if (pvec->nr <= (immediate ? 0 : PAGEVEC_SIZE - 1)) - return; - - /* - * We have to drop the lock to allow ourselves to sleep, - * so take a copy of the pvec and clear the stash for - * others to use it as we sleep. - */ - stack = *pvec; - pagevec_reinit(pvec); - spin_unlock(&vm->free_pages.lock); - - pvec = &stack; - set_pages_array_wb(pvec->pages, pvec->nr); - - spin_lock(&vm->free_pages.lock); - } - - __pagevec_release(pvec); -} - -static void vm_free_page(struct i915_address_space *vm, struct page *page) -{ - /* - * On !llc, we need to change the pages back to WB. We only do so - * in bulk, so we rarely need to change the page attributes here, - * but doing so requires a stop_machine() from deep inside arch/x86/mm. - * To make detection of the possible sleep more likely, use an - * unconditional might_sleep() for everybody. - */ - might_sleep(); - spin_lock(&vm->free_pages.lock); - while (!pagevec_space(&vm->free_pages.pvec)) - vm_free_pages_release(vm, false); - GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec) >= PAGEVEC_SIZE); - pagevec_add(&vm->free_pages.pvec, page); - spin_unlock(&vm->free_pages.lock); -} - -static void i915_address_space_fini(struct i915_address_space *vm) -{ - spin_lock(&vm->free_pages.lock); - if (pagevec_count(&vm->free_pages.pvec)) - vm_free_pages_release(vm, true); - GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec)); - spin_unlock(&vm->free_pages.lock); - - drm_mm_takedown(&vm->mm); - - mutex_destroy(&vm->mutex); -} - -void __i915_vm_close(struct i915_address_space *vm) -{ - struct i915_vma *vma, *vn; - - mutex_lock(&vm->mutex); - list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) { - struct drm_i915_gem_object *obj = vma->obj; - - /* Keep the obj (and hence the vma) alive as _we_ destroy it */ - if (!kref_get_unless_zero(&obj->base.refcount)) - continue; - - atomic_and(~I915_VMA_PIN_MASK, &vma->flags); - WARN_ON(__i915_vma_unbind(vma)); - __i915_vma_put(vma); - - i915_gem_object_put(obj); - } - GEM_BUG_ON(!list_empty(&vm->bound_list)); - mutex_unlock(&vm->mutex); -} - -static void __i915_vm_release(struct work_struct *work) -{ - struct i915_address_space *vm = - container_of(work, struct i915_address_space, rcu.work); - - vm->cleanup(vm); - i915_address_space_fini(vm); - - kfree(vm); -} - -void i915_vm_release(struct kref *kref) -{ - struct i915_address_space *vm = - container_of(kref, struct i915_address_space, ref); - - GEM_BUG_ON(i915_is_ggtt(vm)); - trace_i915_ppgtt_release(vm); - - queue_rcu_work(vm->i915->wq, &vm->rcu); -} - -static void i915_address_space_init(struct i915_address_space *vm, int subclass) -{ - kref_init(&vm->ref); - INIT_RCU_WORK(&vm->rcu, __i915_vm_release); - atomic_set(&vm->open, 1); - - /* - * The vm->mutex must be reclaim safe (for use in the shrinker). - * Do a dummy acquire now under fs_reclaim so that any allocation - * attempt holding the lock is immediately reported by lockdep. - */ - mutex_init(&vm->mutex); - lockdep_set_subclass(&vm->mutex, subclass); - i915_gem_shrinker_taints_mutex(vm->i915, &vm->mutex); - - GEM_BUG_ON(!vm->total); - drm_mm_init(&vm->mm, 0, vm->total); - vm->mm.head_node.color = I915_COLOR_UNEVICTABLE; - - stash_init(&vm->free_pages); - - INIT_LIST_HEAD(&vm->bound_list); -} - -static int __setup_page_dma(struct i915_address_space *vm, - struct i915_page_dma *p, - gfp_t gfp) -{ - p->page = vm_alloc_page(vm, gfp | I915_GFP_ALLOW_FAIL); - if (unlikely(!p->page)) - return -ENOMEM; - - p->daddr = dma_map_page_attrs(vm->dma, - p->page, 0, PAGE_SIZE, - PCI_DMA_BIDIRECTIONAL, - DMA_ATTR_SKIP_CPU_SYNC | - DMA_ATTR_NO_WARN); - if (unlikely(dma_mapping_error(vm->dma, p->daddr))) { - vm_free_page(vm, p->page); - return -ENOMEM; - } - - return 0; -} - -static int setup_page_dma(struct i915_address_space *vm, - struct i915_page_dma *p) -{ - return __setup_page_dma(vm, p, __GFP_HIGHMEM); -} - -static void cleanup_page_dma(struct i915_address_space *vm, - struct i915_page_dma *p) -{ - dma_unmap_page(vm->dma, p->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); - vm_free_page(vm, p->page); -} - -#define kmap_atomic_px(px) kmap_atomic(px_base(px)->page) - -static void -fill_page_dma(const struct i915_page_dma *p, const u64 val, unsigned int count) -{ - kunmap_atomic(memset64(kmap_atomic(p->page), val, count)); -} - -#define fill_px(px, v) fill_page_dma(px_base(px), (v), PAGE_SIZE / sizeof(u64)) -#define fill32_px(px, v) do { \ - u64 v__ = lower_32_bits(v); \ - fill_px((px), v__ << 32 | v__); \ -} while (0) - -static int -setup_scratch_page(struct i915_address_space *vm, gfp_t gfp) -{ - unsigned long size; - - /* - * In order to utilize 64K pages for an object with a size < 2M, we will - * need to support a 64K scratch page, given that every 16th entry for a - * page-table operating in 64K mode must point to a properly aligned 64K - * region, including any PTEs which happen to point to scratch. - * - * This is only relevant for the 48b PPGTT where we support - * huge-gtt-pages, see also i915_vma_insert(). However, as we share the - * scratch (read-only) between all vm, we create one 64k scratch page - * for all. - */ - size = I915_GTT_PAGE_SIZE_4K; - if (i915_vm_is_4lvl(vm) && - HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K)) { - size = I915_GTT_PAGE_SIZE_64K; - gfp |= __GFP_NOWARN; - } - gfp |= __GFP_ZERO | __GFP_RETRY_MAYFAIL; - - do { - unsigned int order = get_order(size); - struct page *page; - dma_addr_t addr; - - page = alloc_pages(gfp, order); - if (unlikely(!page)) - goto skip; - - addr = dma_map_page_attrs(vm->dma, - page, 0, size, - PCI_DMA_BIDIRECTIONAL, - DMA_ATTR_SKIP_CPU_SYNC | - DMA_ATTR_NO_WARN); - if (unlikely(dma_mapping_error(vm->dma, addr))) - goto free_page; - - if (unlikely(!IS_ALIGNED(addr, size))) - goto unmap_page; - - vm->scratch[0].base.page = page; - vm->scratch[0].base.daddr = addr; - vm->scratch_order = order; - return 0; - -unmap_page: - dma_unmap_page(vm->dma, addr, size, PCI_DMA_BIDIRECTIONAL); -free_page: - __free_pages(page, order); -skip: - if (size == I915_GTT_PAGE_SIZE_4K) - return -ENOMEM; - - size = I915_GTT_PAGE_SIZE_4K; - gfp &= ~__GFP_NOWARN; - } while (1); -} - -static void cleanup_scratch_page(struct i915_address_space *vm) -{ - struct i915_page_dma *p = px_base(&vm->scratch[0]); - unsigned int order = vm->scratch_order; - - dma_unmap_page(vm->dma, p->daddr, BIT(order) << PAGE_SHIFT, - PCI_DMA_BIDIRECTIONAL); - __free_pages(p->page, order); -} - -static void free_scratch(struct i915_address_space *vm) -{ - int i; - - if (!px_dma(&vm->scratch[0])) /* set to 0 on clones */ - return; - - for (i = 1; i <= vm->top; i++) { - if (!px_dma(&vm->scratch[i])) - break; - cleanup_page_dma(vm, px_base(&vm->scratch[i])); - } - - cleanup_scratch_page(vm); -} - -static struct i915_page_table *alloc_pt(struct i915_address_space *vm) -{ - struct i915_page_table *pt; - - pt = kmalloc(sizeof(*pt), I915_GFP_ALLOW_FAIL); - if (unlikely(!pt)) - return ERR_PTR(-ENOMEM); - - if (unlikely(setup_page_dma(vm, &pt->base))) { - kfree(pt); - return ERR_PTR(-ENOMEM); - } - - atomic_set(&pt->used, 0); - return pt; -} - -static struct i915_page_directory *__alloc_pd(size_t sz) -{ - struct i915_page_directory *pd; - - pd = kzalloc(sz, I915_GFP_ALLOW_FAIL); - if (unlikely(!pd)) - return NULL; - - spin_lock_init(&pd->lock); - return pd; -} - -static struct i915_page_directory *alloc_pd(struct i915_address_space *vm) -{ - struct i915_page_directory *pd; - - pd = __alloc_pd(sizeof(*pd)); - if (unlikely(!pd)) - return ERR_PTR(-ENOMEM); - - if (unlikely(setup_page_dma(vm, px_base(pd)))) { - kfree(pd); - return ERR_PTR(-ENOMEM); - } - - return pd; -} - -static void free_pd(struct i915_address_space *vm, struct i915_page_dma *pd) -{ - cleanup_page_dma(vm, pd); - kfree(pd); -} - -#define free_px(vm, px) free_pd(vm, px_base(px)) - -static inline void -write_dma_entry(struct i915_page_dma * const pdma, - const unsigned short idx, - const u64 encoded_entry) -{ - u64 * const vaddr = kmap_atomic(pdma->page); - - vaddr[idx] = encoded_entry; - kunmap_atomic(vaddr); -} - -static inline void -__set_pd_entry(struct i915_page_directory * const pd, - const unsigned short idx, - struct i915_page_dma * const to, - u64 (*encode)(const dma_addr_t, const enum i915_cache_level)) -{ - /* Each thread pre-pins the pd, and we may have a thread per pde. */ - GEM_BUG_ON(atomic_read(px_used(pd)) > NALLOC * ARRAY_SIZE(pd->entry)); - - atomic_inc(px_used(pd)); - pd->entry[idx] = to; - write_dma_entry(px_base(pd), idx, encode(to->daddr, I915_CACHE_LLC)); -} - -#define set_pd_entry(pd, idx, to) \ - __set_pd_entry((pd), (idx), px_base(to), gen8_pde_encode) - -static inline void -clear_pd_entry(struct i915_page_directory * const pd, - const unsigned short idx, - const struct i915_page_scratch * const scratch) -{ - GEM_BUG_ON(atomic_read(px_used(pd)) == 0); - - write_dma_entry(px_base(pd), idx, scratch->encode); - pd->entry[idx] = NULL; - atomic_dec(px_used(pd)); -} - -static bool -release_pd_entry(struct i915_page_directory * const pd, - const unsigned short idx, - struct i915_page_table * const pt, - const struct i915_page_scratch * const scratch) -{ - bool free = false; - - if (atomic_add_unless(&pt->used, -1, 1)) - return false; - - spin_lock(&pd->lock); - if (atomic_dec_and_test(&pt->used)) { - clear_pd_entry(pd, idx, scratch); - free = true; - } - spin_unlock(&pd->lock); - - return free; -} - -static void gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create) -{ - struct drm_i915_private *dev_priv = ppgtt->vm.i915; - enum vgt_g2v_type msg; - int i; - - if (create) - atomic_inc(px_used(ppgtt->pd)); /* never remove */ - else - atomic_dec(px_used(ppgtt->pd)); - - mutex_lock(&dev_priv->vgpu.lock); - - if (i915_vm_is_4lvl(&ppgtt->vm)) { - const u64 daddr = px_dma(ppgtt->pd); - - I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr)); - I915_WRITE(vgtif_reg(pdp[0].hi), upper_32_bits(daddr)); - - msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE : - VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY); - } else { - for (i = 0; i < GEN8_3LVL_PDPES; i++) { - const u64 daddr = i915_page_dir_dma_addr(ppgtt, i); - - I915_WRITE(vgtif_reg(pdp[i].lo), lower_32_bits(daddr)); - I915_WRITE(vgtif_reg(pdp[i].hi), upper_32_bits(daddr)); - } - - msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE : - VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY); - } - - /* g2v_notify atomically (via hv trap) consumes the message packet. */ - I915_WRITE(vgtif_reg(g2v_notify), msg); - - mutex_unlock(&dev_priv->vgpu.lock); -} - -/* Index shifts into the pagetable are offset by GEN8_PTE_SHIFT [12] */ -#define GEN8_PAGE_SIZE (SZ_4K) /* page and page-directory sizes are the same */ -#define GEN8_PTE_SHIFT (ilog2(GEN8_PAGE_SIZE)) -#define GEN8_PDES (GEN8_PAGE_SIZE / sizeof(u64)) -#define gen8_pd_shift(lvl) ((lvl) * ilog2(GEN8_PDES)) -#define gen8_pd_index(i, lvl) i915_pde_index((i), gen8_pd_shift(lvl)) -#define __gen8_pte_shift(lvl) (GEN8_PTE_SHIFT + gen8_pd_shift(lvl)) -#define __gen8_pte_index(a, lvl) i915_pde_index((a), __gen8_pte_shift(lvl)) - -static inline unsigned int -gen8_pd_range(u64 start, u64 end, int lvl, unsigned int *idx) -{ - const int shift = gen8_pd_shift(lvl); - const u64 mask = ~0ull << gen8_pd_shift(lvl + 1); - - GEM_BUG_ON(start >= end); - end += ~mask >> gen8_pd_shift(1); - - *idx = i915_pde_index(start, shift); - if ((start ^ end) & mask) - return GEN8_PDES - *idx; - else - return i915_pde_index(end, shift) - *idx; -} - -static inline bool gen8_pd_contains(u64 start, u64 end, int lvl) -{ - const u64 mask = ~0ull << gen8_pd_shift(lvl + 1); - - GEM_BUG_ON(start >= end); - return (start ^ end) & mask && (start & ~mask) == 0; -} - -static inline unsigned int gen8_pt_count(u64 start, u64 end) -{ - GEM_BUG_ON(start >= end); - if ((start ^ end) >> gen8_pd_shift(1)) - return GEN8_PDES - (start & (GEN8_PDES - 1)); - else - return end - start; -} - -static inline unsigned int gen8_pd_top_count(const struct i915_address_space *vm) -{ - unsigned int shift = __gen8_pte_shift(vm->top); - return (vm->total + (1ull << shift) - 1) >> shift; -} - -static inline struct i915_page_directory * -gen8_pdp_for_page_index(struct i915_address_space * const vm, const u64 idx) -{ - struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm); - - if (vm->top == 2) - return ppgtt->pd; - else - return i915_pd_entry(ppgtt->pd, gen8_pd_index(idx, vm->top)); -} - -static inline struct i915_page_directory * -gen8_pdp_for_page_address(struct i915_address_space * const vm, const u64 addr) -{ - return gen8_pdp_for_page_index(vm, addr >> GEN8_PTE_SHIFT); -} - -static void __gen8_ppgtt_cleanup(struct i915_address_space *vm, - struct i915_page_directory *pd, - int count, int lvl) -{ - if (lvl) { - void **pde = pd->entry; - - do { - if (!*pde) - continue; - - __gen8_ppgtt_cleanup(vm, *pde, GEN8_PDES, lvl - 1); - } while (pde++, --count); - } - - free_px(vm, pd); -} - -static void gen8_ppgtt_cleanup(struct i915_address_space *vm) -{ - struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); - - if (intel_vgpu_active(vm->i915)) - gen8_ppgtt_notify_vgt(ppgtt, false); - - __gen8_ppgtt_cleanup(vm, ppgtt->pd, gen8_pd_top_count(vm), vm->top); - free_scratch(vm); -} - -static u64 __gen8_ppgtt_clear(struct i915_address_space * const vm, - struct i915_page_directory * const pd, - u64 start, const u64 end, int lvl) -{ - const struct i915_page_scratch * const scratch = &vm->scratch[lvl]; - unsigned int idx, len; - - GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT); - - len = gen8_pd_range(start, end, lvl--, &idx); - DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n", - __func__, vm, lvl + 1, start, end, - idx, len, atomic_read(px_used(pd))); - GEM_BUG_ON(!len || len >= atomic_read(px_used(pd))); - - do { - struct i915_page_table *pt = pd->entry[idx]; - - if (atomic_fetch_inc(&pt->used) >> gen8_pd_shift(1) && - gen8_pd_contains(start, end, lvl)) { - DBG("%s(%p):{ lvl:%d, idx:%d, start:%llx, end:%llx } removing pd\n", - __func__, vm, lvl + 1, idx, start, end); - clear_pd_entry(pd, idx, scratch); - __gen8_ppgtt_cleanup(vm, as_pd(pt), I915_PDES, lvl); - start += (u64)I915_PDES << gen8_pd_shift(lvl); - continue; - } - - if (lvl) { - start = __gen8_ppgtt_clear(vm, as_pd(pt), - start, end, lvl); - } else { - unsigned int count; - u64 *vaddr; - - count = gen8_pt_count(start, end); - DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d } removing pte\n", - __func__, vm, lvl, start, end, - gen8_pd_index(start, 0), count, - atomic_read(&pt->used)); - GEM_BUG_ON(!count || count >= atomic_read(&pt->used)); - - vaddr = kmap_atomic_px(pt); - memset64(vaddr + gen8_pd_index(start, 0), - vm->scratch[0].encode, - count); - kunmap_atomic(vaddr); - - atomic_sub(count, &pt->used); - start += count; - } - - if (release_pd_entry(pd, idx, pt, scratch)) - free_px(vm, pt); - } while (idx++, --len); - - return start; -} - -static void gen8_ppgtt_clear(struct i915_address_space *vm, - u64 start, u64 length) -{ - GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT))); - GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT))); - GEM_BUG_ON(range_overflows(start, length, vm->total)); - - start >>= GEN8_PTE_SHIFT; - length >>= GEN8_PTE_SHIFT; - GEM_BUG_ON(length == 0); - - __gen8_ppgtt_clear(vm, i915_vm_to_ppgtt(vm)->pd, - start, start + length, vm->top); -} - -static int __gen8_ppgtt_alloc(struct i915_address_space * const vm, - struct i915_page_directory * const pd, - u64 * const start, const u64 end, int lvl) -{ - const struct i915_page_scratch * const scratch = &vm->scratch[lvl]; - struct i915_page_table *alloc = NULL; - unsigned int idx, len; - int ret = 0; - - GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT); - - len = gen8_pd_range(*start, end, lvl--, &idx); - DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n", - __func__, vm, lvl + 1, *start, end, - idx, len, atomic_read(px_used(pd))); - GEM_BUG_ON(!len || (idx + len - 1) >> gen8_pd_shift(1)); - - spin_lock(&pd->lock); - GEM_BUG_ON(!atomic_read(px_used(pd))); /* Must be pinned! */ - do { - struct i915_page_table *pt = pd->entry[idx]; - - if (!pt) { - spin_unlock(&pd->lock); - - DBG("%s(%p):{ lvl:%d, idx:%d } allocating new tree\n", - __func__, vm, lvl + 1, idx); - - pt = fetch_and_zero(&alloc); - if (lvl) { - if (!pt) { - pt = &alloc_pd(vm)->pt; - if (IS_ERR(pt)) { - ret = PTR_ERR(pt); - goto out; - } - } - - fill_px(pt, vm->scratch[lvl].encode); - } else { - if (!pt) { - pt = alloc_pt(vm); - if (IS_ERR(pt)) { - ret = PTR_ERR(pt); - goto out; - } - } - - if (intel_vgpu_active(vm->i915) || - gen8_pt_count(*start, end) < I915_PDES) - fill_px(pt, vm->scratch[lvl].encode); - } - - spin_lock(&pd->lock); - if (likely(!pd->entry[idx])) - set_pd_entry(pd, idx, pt); - else - alloc = pt, pt = pd->entry[idx]; - } - - if (lvl) { - atomic_inc(&pt->used); - spin_unlock(&pd->lock); - - ret = __gen8_ppgtt_alloc(vm, as_pd(pt), - start, end, lvl); - if (unlikely(ret)) { - if (release_pd_entry(pd, idx, pt, scratch)) - free_px(vm, pt); - goto out; - } - - spin_lock(&pd->lock); - atomic_dec(&pt->used); - GEM_BUG_ON(!atomic_read(&pt->used)); - } else { - unsigned int count = gen8_pt_count(*start, end); - - DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d } inserting pte\n", - __func__, vm, lvl, *start, end, - gen8_pd_index(*start, 0), count, - atomic_read(&pt->used)); - - atomic_add(count, &pt->used); - /* All other pdes may be simultaneously removed */ - GEM_BUG_ON(atomic_read(&pt->used) > NALLOC * I915_PDES); - *start += count; - } - } while (idx++, --len); - spin_unlock(&pd->lock); -out: - if (alloc) - free_px(vm, alloc); - return ret; -} - -static int gen8_ppgtt_alloc(struct i915_address_space *vm, - u64 start, u64 length) -{ - u64 from; - int err; - - GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT))); - GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT))); - GEM_BUG_ON(range_overflows(start, length, vm->total)); - - start >>= GEN8_PTE_SHIFT; - length >>= GEN8_PTE_SHIFT; - GEM_BUG_ON(length == 0); - from = start; - - err = __gen8_ppgtt_alloc(vm, i915_vm_to_ppgtt(vm)->pd, - &start, start + length, vm->top); - if (unlikely(err && from != start)) - __gen8_ppgtt_clear(vm, i915_vm_to_ppgtt(vm)->pd, - from, start, vm->top); - - return err; -} - -static inline struct sgt_dma { - struct scatterlist *sg; - dma_addr_t dma, max; -} sgt_dma(struct i915_vma *vma) { - struct scatterlist *sg = vma->pages->sgl; - dma_addr_t addr = sg_dma_address(sg); - return (struct sgt_dma) { sg, addr, addr + sg->length }; -} - -static __always_inline u64 -gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt, - struct i915_page_directory *pdp, - struct sgt_dma *iter, - u64 idx, - enum i915_cache_level cache_level, - u32 flags) -{ - struct i915_page_directory *pd; - const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags); - gen8_pte_t *vaddr; - - pd = i915_pd_entry(pdp, gen8_pd_index(idx, 2)); - vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1))); - do { - vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma; - - iter->dma += I915_GTT_PAGE_SIZE; - if (iter->dma >= iter->max) { - iter->sg = __sg_next(iter->sg); - if (!iter->sg) { - idx = 0; - break; - } - - iter->dma = sg_dma_address(iter->sg); - iter->max = iter->dma + iter->sg->length; - } - - if (gen8_pd_index(++idx, 0) == 0) { - if (gen8_pd_index(idx, 1) == 0) { - /* Limited by sg length for 3lvl */ - if (gen8_pd_index(idx, 2) == 0) - break; - - pd = pdp->entry[gen8_pd_index(idx, 2)]; - } - - kunmap_atomic(vaddr); - vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1))); - } - } while (1); - kunmap_atomic(vaddr); - - return idx; -} - -static void gen8_ppgtt_insert_huge(struct i915_vma *vma, - struct sgt_dma *iter, - enum i915_cache_level cache_level, - u32 flags) -{ - const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags); - u64 start = vma->node.start; - dma_addr_t rem = iter->sg->length; - - GEM_BUG_ON(!i915_vm_is_4lvl(vma->vm)); - - do { - struct i915_page_directory * const pdp = - gen8_pdp_for_page_address(vma->vm, start); - struct i915_page_directory * const pd = - i915_pd_entry(pdp, __gen8_pte_index(start, 2)); - gen8_pte_t encode = pte_encode; - unsigned int maybe_64K = -1; - unsigned int page_size; - gen8_pte_t *vaddr; - u16 index; - - if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_2M && - IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_2M) && - rem >= I915_GTT_PAGE_SIZE_2M && - !__gen8_pte_index(start, 0)) { - index = __gen8_pte_index(start, 1); - encode |= GEN8_PDE_PS_2M; - page_size = I915_GTT_PAGE_SIZE_2M; - - vaddr = kmap_atomic_px(pd); - } else { - struct i915_page_table *pt = - i915_pt_entry(pd, __gen8_pte_index(start, 1)); - - index = __gen8_pte_index(start, 0); - page_size = I915_GTT_PAGE_SIZE; - - if (!index && - vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K && - IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) && - (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) || - rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE)) - maybe_64K = __gen8_pte_index(start, 1); - - vaddr = kmap_atomic_px(pt); - } - - do { - GEM_BUG_ON(iter->sg->length < page_size); - vaddr[index++] = encode | iter->dma; - - start += page_size; - iter->dma += page_size; - rem -= page_size; - if (iter->dma >= iter->max) { - iter->sg = __sg_next(iter->sg); - if (!iter->sg) - break; - - rem = iter->sg->length; - iter->dma = sg_dma_address(iter->sg); - iter->max = iter->dma + rem; - - if (maybe_64K != -1 && index < I915_PDES && - !(IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) && - (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) || - rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE))) - maybe_64K = -1; - - if (unlikely(!IS_ALIGNED(iter->dma, page_size))) - break; - } - } while (rem >= page_size && index < I915_PDES); - - kunmap_atomic(vaddr); - - /* - * Is it safe to mark the 2M block as 64K? -- Either we have - * filled whole page-table with 64K entries, or filled part of - * it and have reached the end of the sg table and we have - * enough padding. - */ - if (maybe_64K != -1 && - (index == I915_PDES || - (i915_vm_has_scratch_64K(vma->vm) && - !iter->sg && IS_ALIGNED(vma->node.start + - vma->node.size, - I915_GTT_PAGE_SIZE_2M)))) { - vaddr = kmap_atomic_px(pd); - vaddr[maybe_64K] |= GEN8_PDE_IPS_64K; - kunmap_atomic(vaddr); - page_size = I915_GTT_PAGE_SIZE_64K; - - /* - * We write all 4K page entries, even when using 64K - * pages. In order to verify that the HW isn't cheating - * by using the 4K PTE instead of the 64K PTE, we want - * to remove all the surplus entries. If the HW skipped - * the 64K PTE, it will read/write into the scratch page - * instead - which we detect as missing results during - * selftests. - */ - if (I915_SELFTEST_ONLY(vma->vm->scrub_64K)) { - u16 i; - - encode = vma->vm->scratch[0].encode; - vaddr = kmap_atomic_px(i915_pt_entry(pd, maybe_64K)); - - for (i = 1; i < index; i += 16) - memset64(vaddr + i, encode, 15); - - kunmap_atomic(vaddr); - } - } - - vma->page_sizes.gtt |= page_size; - } while (iter->sg); -} - -static void gen8_ppgtt_insert(struct i915_address_space *vm, - struct i915_vma *vma, - enum i915_cache_level cache_level, - u32 flags) -{ - struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm); - struct sgt_dma iter = sgt_dma(vma); - - if (vma->page_sizes.sg > I915_GTT_PAGE_SIZE) { - gen8_ppgtt_insert_huge(vma, &iter, cache_level, flags); - } else { - u64 idx = vma->node.start >> GEN8_PTE_SHIFT; - - do { - struct i915_page_directory * const pdp = - gen8_pdp_for_page_index(vm, idx); - - idx = gen8_ppgtt_insert_pte(ppgtt, pdp, &iter, idx, - cache_level, flags); - } while (idx); - - vma->page_sizes.gtt = I915_GTT_PAGE_SIZE; - } -} - -static int gen8_init_scratch(struct i915_address_space *vm) -{ - int ret; - int i; - - /* - * If everybody agrees to not to write into the scratch page, - * we can reuse it for all vm, keeping contexts and processes separate. - */ - if (vm->has_read_only && vm->gt->vm && !i915_is_ggtt(vm->gt->vm)) { - struct i915_address_space *clone = vm->gt->vm; - - GEM_BUG_ON(!clone->has_read_only); - - vm->scratch_order = clone->scratch_order; - memcpy(vm->scratch, clone->scratch, sizeof(vm->scratch)); - px_dma(&vm->scratch[0]) = 0; /* no xfer of ownership */ - return 0; - } - - ret = setup_scratch_page(vm, __GFP_HIGHMEM); - if (ret) - return ret; - - vm->scratch[0].encode = - gen8_pte_encode(px_dma(&vm->scratch[0]), - I915_CACHE_LLC, vm->has_read_only); - - for (i = 1; i <= vm->top; i++) { - if (unlikely(setup_page_dma(vm, px_base(&vm->scratch[i])))) - goto free_scratch; - - fill_px(&vm->scratch[i], vm->scratch[i - 1].encode); - vm->scratch[i].encode = - gen8_pde_encode(px_dma(&vm->scratch[i]), - I915_CACHE_LLC); - } - - return 0; - -free_scratch: - free_scratch(vm); - return -ENOMEM; -} - -static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt) -{ - struct i915_address_space *vm = &ppgtt->vm; - struct i915_page_directory *pd = ppgtt->pd; - unsigned int idx; - - GEM_BUG_ON(vm->top != 2); - GEM_BUG_ON(gen8_pd_top_count(vm) != GEN8_3LVL_PDPES); - - for (idx = 0; idx < GEN8_3LVL_PDPES; idx++) { - struct i915_page_directory *pde; - - pde = alloc_pd(vm); - if (IS_ERR(pde)) - return PTR_ERR(pde); - - fill_px(pde, vm->scratch[1].encode); - set_pd_entry(pd, idx, pde); - atomic_inc(px_used(pde)); /* keep pinned */ - } - wmb(); - - return 0; -} - -static void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt) -{ - struct drm_i915_private *i915 = gt->i915; - - ppgtt->vm.gt = gt; - ppgtt->vm.i915 = i915; - ppgtt->vm.dma = &i915->drm.pdev->dev; - ppgtt->vm.total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size); - - i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT); - - ppgtt->vm.vma_ops.bind_vma = ppgtt_bind_vma; - ppgtt->vm.vma_ops.unbind_vma = ppgtt_unbind_vma; - ppgtt->vm.vma_ops.set_pages = ppgtt_set_pages; - ppgtt->vm.vma_ops.clear_pages = clear_pages; -} - -static struct i915_page_directory * -gen8_alloc_top_pd(struct i915_address_space *vm) -{ - const unsigned int count = gen8_pd_top_count(vm); - struct i915_page_directory *pd; - - GEM_BUG_ON(count > ARRAY_SIZE(pd->entry)); - - pd = __alloc_pd(offsetof(typeof(*pd), entry[count])); - if (unlikely(!pd)) - return ERR_PTR(-ENOMEM); - - if (unlikely(setup_page_dma(vm, px_base(pd)))) { - kfree(pd); - return ERR_PTR(-ENOMEM); - } - - fill_page_dma(px_base(pd), vm->scratch[vm->top].encode, count); - atomic_inc(px_used(pd)); /* mark as pinned */ - return pd; -} - -/* - * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers - * with a net effect resembling a 2-level page table in normal x86 terms. Each - * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address - * space. - * - */ -static struct i915_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915) -{ - struct i915_ppgtt *ppgtt; - int err; - - ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL); - if (!ppgtt) - return ERR_PTR(-ENOMEM); - - ppgtt_init(ppgtt, &i915->gt); - ppgtt->vm.top = i915_vm_is_4lvl(&ppgtt->vm) ? 3 : 2; - - /* - * From bdw, there is hw support for read-only pages in the PPGTT. - * - * Gen11 has HSDES#:1807136187 unresolved. Disable ro support - * for now. - * - * Gen12 has inherited the same read-only fault issue from gen11. - */ - ppgtt->vm.has_read_only = !IS_GEN_RANGE(i915, 11, 12); - - /* There are only few exceptions for gen >=6. chv and bxt. - * And we are not sure about the latter so play safe for now. - */ - if (IS_CHERRYVIEW(i915) || IS_BROXTON(i915)) - ppgtt->vm.pt_kmap_wc = true; - - err = gen8_init_scratch(&ppgtt->vm); - if (err) - goto err_free; - - ppgtt->pd = gen8_alloc_top_pd(&ppgtt->vm); - if (IS_ERR(ppgtt->pd)) { - err = PTR_ERR(ppgtt->pd); - goto err_free_scratch; - } - - if (!i915_vm_is_4lvl(&ppgtt->vm)) { - err = gen8_preallocate_top_level_pdp(ppgtt); - if (err) - goto err_free_pd; - } - - ppgtt->vm.bind_async_flags = I915_VMA_LOCAL_BIND; - ppgtt->vm.insert_entries = gen8_ppgtt_insert; - ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc; - ppgtt->vm.clear_range = gen8_ppgtt_clear; - - if (intel_vgpu_active(i915)) - gen8_ppgtt_notify_vgt(ppgtt, true); - - ppgtt->vm.cleanup = gen8_ppgtt_cleanup; - - return ppgtt; - -err_free_pd: - __gen8_ppgtt_cleanup(&ppgtt->vm, ppgtt->pd, - gen8_pd_top_count(&ppgtt->vm), ppgtt->vm.top); -err_free_scratch: - free_scratch(&ppgtt->vm); -err_free: - kfree(ppgtt); - return ERR_PTR(err); -} - -/* Write pde (index) from the page directory @pd to the page table @pt */ -static inline void gen6_write_pde(const struct gen6_ppgtt *ppgtt, - const unsigned int pde, - const struct i915_page_table *pt) -{ - /* Caller needs to make sure the write completes if necessary */ - iowrite32(GEN6_PDE_ADDR_ENCODE(px_dma(pt)) | GEN6_PDE_VALID, - ppgtt->pd_addr + pde); -} - -static void gen7_ppgtt_enable(struct intel_gt *gt) -{ - struct drm_i915_private *i915 = gt->i915; - struct intel_uncore *uncore = gt->uncore; - struct intel_engine_cs *engine; - enum intel_engine_id id; - u32 ecochk; - - intel_uncore_rmw(uncore, GAC_ECO_BITS, 0, ECOBITS_PPGTT_CACHE64B); - - ecochk = intel_uncore_read(uncore, GAM_ECOCHK); - if (IS_HASWELL(i915)) { - ecochk |= ECOCHK_PPGTT_WB_HSW; - } else { - ecochk |= ECOCHK_PPGTT_LLC_IVB; - ecochk &= ~ECOCHK_PPGTT_GFDT_IVB; - } - intel_uncore_write(uncore, GAM_ECOCHK, ecochk); - - for_each_engine(engine, gt, id) { - /* GFX_MODE is per-ring on gen7+ */ - ENGINE_WRITE(engine, - RING_MODE_GEN7, - _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); - } -} - -static void gen6_ppgtt_enable(struct intel_gt *gt) -{ - struct intel_uncore *uncore = gt->uncore; - - intel_uncore_rmw(uncore, - GAC_ECO_BITS, - 0, - ECOBITS_SNB_BIT | ECOBITS_PPGTT_CACHE64B); - - intel_uncore_rmw(uncore, - GAB_CTL, - 0, - GAB_CTL_CONT_AFTER_PAGEFAULT); - - intel_uncore_rmw(uncore, - GAM_ECOCHK, - 0, - ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B); - - if (HAS_PPGTT(uncore->i915)) /* may be disabled for VT-d */ - intel_uncore_write(uncore, - GFX_MODE, - _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); -} - -/* PPGTT support for Sandybdrige/Gen6 and later */ -static void gen6_ppgtt_clear_range(struct i915_address_space *vm, - u64 start, u64 length) -{ - struct gen6_ppgtt * const ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm)); - const unsigned int first_entry = start / I915_GTT_PAGE_SIZE; - const gen6_pte_t scratch_pte = vm->scratch[0].encode; - unsigned int pde = first_entry / GEN6_PTES; - unsigned int pte = first_entry % GEN6_PTES; - unsigned int num_entries = length / I915_GTT_PAGE_SIZE; - - while (num_entries) { - struct i915_page_table * const pt = - i915_pt_entry(ppgtt->base.pd, pde++); - const unsigned int count = min(num_entries, GEN6_PTES - pte); - gen6_pte_t *vaddr; - - GEM_BUG_ON(px_base(pt) == px_base(&vm->scratch[1])); - - num_entries -= count; - - GEM_BUG_ON(count > atomic_read(&pt->used)); - if (!atomic_sub_return(count, &pt->used)) - ppgtt->scan_for_unused_pt = true; - - /* - * Note that the hw doesn't support removing PDE on the fly - * (they are cached inside the context with no means to - * invalidate the cache), so we can only reset the PTE - * entries back to scratch. - */ - - vaddr = kmap_atomic_px(pt); - memset32(vaddr + pte, scratch_pte, count); - kunmap_atomic(vaddr); - - pte = 0; - } -} - -static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, - struct i915_vma *vma, - enum i915_cache_level cache_level, - u32 flags) -{ - struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); - struct i915_page_directory * const pd = ppgtt->pd; - unsigned first_entry = vma->node.start / I915_GTT_PAGE_SIZE; - unsigned act_pt = first_entry / GEN6_PTES; - unsigned act_pte = first_entry % GEN6_PTES; - const u32 pte_encode = vm->pte_encode(0, cache_level, flags); - struct sgt_dma iter = sgt_dma(vma); - gen6_pte_t *vaddr; - - GEM_BUG_ON(pd->entry[act_pt] == &vm->scratch[1]); - - vaddr = kmap_atomic_px(i915_pt_entry(pd, act_pt)); - do { - vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma); - - iter.dma += I915_GTT_PAGE_SIZE; - if (iter.dma == iter.max) { - iter.sg = __sg_next(iter.sg); - if (!iter.sg) - break; - - iter.dma = sg_dma_address(iter.sg); - iter.max = iter.dma + iter.sg->length; - } - - if (++act_pte == GEN6_PTES) { - kunmap_atomic(vaddr); - vaddr = kmap_atomic_px(i915_pt_entry(pd, ++act_pt)); - act_pte = 0; - } - } while (1); - kunmap_atomic(vaddr); - - vma->page_sizes.gtt = I915_GTT_PAGE_SIZE; -} - -static void gen6_flush_pd(struct gen6_ppgtt *ppgtt, u64 start, u64 end) -{ - struct i915_page_directory * const pd = ppgtt->base.pd; - struct i915_page_table *pt; - unsigned int pde; - - start = round_down(start, SZ_64K); - end = round_up(end, SZ_64K) - start; - - mutex_lock(&ppgtt->flush); - - gen6_for_each_pde(pt, pd, start, end, pde) - gen6_write_pde(ppgtt, pde, pt); - - mb(); - ioread32(ppgtt->pd_addr + pde - 1); - gen6_ggtt_invalidate(ppgtt->base.vm.gt->ggtt); - mb(); - - mutex_unlock(&ppgtt->flush); -} - -static int gen6_alloc_va_range(struct i915_address_space *vm, - u64 start, u64 length) -{ - struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm)); - struct i915_page_directory * const pd = ppgtt->base.pd; - struct i915_page_table *pt, *alloc = NULL; - intel_wakeref_t wakeref; - u64 from = start; - unsigned int pde; - int ret = 0; - - wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm); - - spin_lock(&pd->lock); - gen6_for_each_pde(pt, pd, start, length, pde) { - const unsigned int count = gen6_pte_count(start, length); - - if (px_base(pt) == px_base(&vm->scratch[1])) { - spin_unlock(&pd->lock); - - pt = fetch_and_zero(&alloc); - if (!pt) - pt = alloc_pt(vm); - if (IS_ERR(pt)) { - ret = PTR_ERR(pt); - goto unwind_out; - } - - fill32_px(pt, vm->scratch[0].encode); - - spin_lock(&pd->lock); - if (pd->entry[pde] == &vm->scratch[1]) { - pd->entry[pde] = pt; - } else { - alloc = pt; - pt = pd->entry[pde]; - } - } - - atomic_add(count, &pt->used); - } - spin_unlock(&pd->lock); - - if (i915_vma_is_bound(ppgtt->vma, I915_VMA_GLOBAL_BIND)) - gen6_flush_pd(ppgtt, from, start); - - goto out; - -unwind_out: - gen6_ppgtt_clear_range(vm, from, start - from); -out: - if (alloc) - free_px(vm, alloc); - intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref); - return ret; -} - -static int gen6_ppgtt_init_scratch(struct gen6_ppgtt *ppgtt) -{ - struct i915_address_space * const vm = &ppgtt->base.vm; - struct i915_page_directory * const pd = ppgtt->base.pd; - int ret; - - ret = setup_scratch_page(vm, __GFP_HIGHMEM); - if (ret) - return ret; - - vm->scratch[0].encode = - vm->pte_encode(px_dma(&vm->scratch[0]), - I915_CACHE_NONE, PTE_READ_ONLY); - - if (unlikely(setup_page_dma(vm, px_base(&vm->scratch[1])))) { - cleanup_scratch_page(vm); - return -ENOMEM; - } - - fill32_px(&vm->scratch[1], vm->scratch[0].encode); - memset_p(pd->entry, &vm->scratch[1], I915_PDES); - - return 0; -} - -static void gen6_ppgtt_free_pd(struct gen6_ppgtt *ppgtt) -{ - struct i915_page_directory * const pd = ppgtt->base.pd; - struct i915_page_dma * const scratch = - px_base(&ppgtt->base.vm.scratch[1]); - struct i915_page_table *pt; - u32 pde; - - gen6_for_all_pdes(pt, pd, pde) - if (px_base(pt) != scratch) - free_px(&ppgtt->base.vm, pt); -} - -static void gen6_ppgtt_cleanup(struct i915_address_space *vm) -{ - struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm)); - - __i915_vma_put(ppgtt->vma); - - gen6_ppgtt_free_pd(ppgtt); - free_scratch(vm); - - mutex_destroy(&ppgtt->flush); - mutex_destroy(&ppgtt->pin_mutex); - kfree(ppgtt->base.pd); -} - -static int pd_vma_set_pages(struct i915_vma *vma) -{ - vma->pages = ERR_PTR(-ENODEV); - return 0; -} - -static void pd_vma_clear_pages(struct i915_vma *vma) -{ - GEM_BUG_ON(!vma->pages); - - vma->pages = NULL; -} - -static int pd_vma_bind(struct i915_vma *vma, - enum i915_cache_level cache_level, - u32 unused) -{ - struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm); - struct gen6_ppgtt *ppgtt = vma->private; - u32 ggtt_offset = i915_ggtt_offset(vma) / I915_GTT_PAGE_SIZE; - - px_base(ppgtt->base.pd)->ggtt_offset = ggtt_offset * sizeof(gen6_pte_t); - ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm + ggtt_offset; - - gen6_flush_pd(ppgtt, 0, ppgtt->base.vm.total); - return 0; -} - -static void pd_vma_unbind(struct i915_vma *vma) -{ - struct gen6_ppgtt *ppgtt = vma->private; - struct i915_page_directory * const pd = ppgtt->base.pd; - struct i915_page_dma * const scratch = - px_base(&ppgtt->base.vm.scratch[1]); - struct i915_page_table *pt; - unsigned int pde; - - if (!ppgtt->scan_for_unused_pt) - return; - - /* Free all no longer used page tables */ - gen6_for_all_pdes(pt, ppgtt->base.pd, pde) { - if (px_base(pt) == scratch || atomic_read(&pt->used)) - continue; - - free_px(&ppgtt->base.vm, pt); - pd->entry[pde] = scratch; - } - - ppgtt->scan_for_unused_pt = false; -} - -static const struct i915_vma_ops pd_vma_ops = { - .set_pages = pd_vma_set_pages, - .clear_pages = pd_vma_clear_pages, - .bind_vma = pd_vma_bind, - .unbind_vma = pd_vma_unbind, -}; - -static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size) -{ - struct i915_ggtt *ggtt = ppgtt->base.vm.gt->ggtt; - struct i915_vma *vma; - - GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)); - GEM_BUG_ON(size > ggtt->vm.total); - - vma = i915_vma_alloc(); - if (!vma) - return ERR_PTR(-ENOMEM); - - i915_active_init(&vma->active, NULL, NULL); - - kref_init(&vma->ref); - mutex_init(&vma->pages_mutex); - vma->vm = i915_vm_get(&ggtt->vm); - vma->ops = &pd_vma_ops; - vma->private = ppgtt; - - vma->size = size; - vma->fence_size = size; - atomic_set(&vma->flags, I915_VMA_GGTT); - vma->ggtt_view.type = I915_GGTT_VIEW_ROTATED; /* prevent fencing */ - - INIT_LIST_HEAD(&vma->obj_link); - INIT_LIST_HEAD(&vma->closed_link); - - return vma; -} - -int gen6_ppgtt_pin(struct i915_ppgtt *base) -{ - struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base); - int err = 0; - - GEM_BUG_ON(!atomic_read(&ppgtt->base.vm.open)); - - /* - * Workaround the limited maximum vma->pin_count and the aliasing_ppgtt - * which will be pinned into every active context. - * (When vma->pin_count becomes atomic, I expect we will naturally - * need a larger, unpacked, type and kill this redundancy.) - */ - if (atomic_add_unless(&ppgtt->pin_count, 1, 0)) - return 0; - - if (mutex_lock_interruptible(&ppgtt->pin_mutex)) - return -EINTR; - - /* - * PPGTT PDEs reside in the GGTT and consists of 512 entries. The - * allocator works in address space sizes, so it's multiplied by page - * size. We allocate at the top of the GTT to avoid fragmentation. - */ - if (!atomic_read(&ppgtt->pin_count)) { - err = i915_ggtt_pin(ppgtt->vma, GEN6_PD_ALIGN, PIN_HIGH); - } - if (!err) - atomic_inc(&ppgtt->pin_count); - mutex_unlock(&ppgtt->pin_mutex); - - return err; -} - -void gen6_ppgtt_unpin(struct i915_ppgtt *base) -{ - struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base); - - GEM_BUG_ON(!atomic_read(&ppgtt->pin_count)); - if (atomic_dec_and_test(&ppgtt->pin_count)) - i915_vma_unpin(ppgtt->vma); -} - -void gen6_ppgtt_unpin_all(struct i915_ppgtt *base) -{ - struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base); - - if (!atomic_read(&ppgtt->pin_count)) - return; - - i915_vma_unpin(ppgtt->vma); - atomic_set(&ppgtt->pin_count, 0); -} - -static struct i915_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915) -{ - struct i915_ggtt * const ggtt = &i915->ggtt; - struct gen6_ppgtt *ppgtt; - int err; - - ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL); - if (!ppgtt) - return ERR_PTR(-ENOMEM); - - mutex_init(&ppgtt->flush); - mutex_init(&ppgtt->pin_mutex); - - ppgtt_init(&ppgtt->base, &i915->gt); - ppgtt->base.vm.top = 1; - - ppgtt->base.vm.bind_async_flags = I915_VMA_LOCAL_BIND; - ppgtt->base.vm.allocate_va_range = gen6_alloc_va_range; - ppgtt->base.vm.clear_range = gen6_ppgtt_clear_range; - ppgtt->base.vm.insert_entries = gen6_ppgtt_insert_entries; - ppgtt->base.vm.cleanup = gen6_ppgtt_cleanup; - - ppgtt->base.vm.pte_encode = ggtt->vm.pte_encode; - - ppgtt->base.pd = __alloc_pd(sizeof(*ppgtt->base.pd)); - if (!ppgtt->base.pd) { - err = -ENOMEM; - goto err_free; - } - - err = gen6_ppgtt_init_scratch(ppgtt); - if (err) - goto err_pd; - - ppgtt->vma = pd_vma_create(ppgtt, GEN6_PD_SIZE); - if (IS_ERR(ppgtt->vma)) { - err = PTR_ERR(ppgtt->vma); - goto err_scratch; - } - - return &ppgtt->base; - -err_scratch: - free_scratch(&ppgtt->base.vm); -err_pd: - kfree(ppgtt->base.pd); -err_free: - mutex_destroy(&ppgtt->pin_mutex); - kfree(ppgtt); - return ERR_PTR(err); -} - -static void gtt_write_workarounds(struct intel_gt *gt) -{ - struct drm_i915_private *i915 = gt->i915; - struct intel_uncore *uncore = gt->uncore; - - /* This function is for gtt related workarounds. This function is - * called on driver load and after a GPU reset, so you can place - * workarounds here even if they get overwritten by GPU reset. - */ - /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl,cnl,icl */ - if (IS_BROADWELL(i915)) - intel_uncore_write(uncore, - GEN8_L3_LRA_1_GPGPU, - GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW); - else if (IS_CHERRYVIEW(i915)) - intel_uncore_write(uncore, - GEN8_L3_LRA_1_GPGPU, - GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV); - else if (IS_GEN9_LP(i915)) - intel_uncore_write(uncore, - GEN8_L3_LRA_1_GPGPU, - GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT); - else if (INTEL_GEN(i915) >= 9 && INTEL_GEN(i915) <= 11) - intel_uncore_write(uncore, - GEN8_L3_LRA_1_GPGPU, - GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL); - - /* - * To support 64K PTEs we need to first enable the use of the - * Intermediate-Page-Size(IPS) bit of the PDE field via some magical - * mmio, otherwise the page-walker will simply ignore the IPS bit. This - * shouldn't be needed after GEN10. - * - * 64K pages were first introduced from BDW+, although technically they - * only *work* from gen9+. For pre-BDW we instead have the option for - * 32K pages, but we don't currently have any support for it in our - * driver. - */ - if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_64K) && - INTEL_GEN(i915) <= 10) - intel_uncore_rmw(uncore, - GEN8_GAMW_ECO_DEV_RW_IA, - 0, - GAMW_ECO_ENABLE_64K_IPS_FIELD); - - if (IS_GEN_RANGE(i915, 8, 11)) { - bool can_use_gtt_cache = true; - - /* - * According to the BSpec if we use 2M/1G pages then we also - * need to disable the GTT cache. At least on BDW we can see - * visual corruption when using 2M pages, and not disabling the - * GTT cache. - */ - if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_2M)) - can_use_gtt_cache = false; - - /* WaGttCachingOffByDefault */ - intel_uncore_write(uncore, - HSW_GTT_CACHE_EN, - can_use_gtt_cache ? GTT_CACHE_EN_ALL : 0); - WARN_ON_ONCE(can_use_gtt_cache && - intel_uncore_read(uncore, - HSW_GTT_CACHE_EN) == 0); - } -} - -int i915_ppgtt_init_hw(struct intel_gt *gt) -{ - struct drm_i915_private *i915 = gt->i915; - - gtt_write_workarounds(gt); - - if (IS_GEN(i915, 6)) - gen6_ppgtt_enable(gt); - else if (IS_GEN(i915, 7)) - gen7_ppgtt_enable(gt); - - return 0; -} - -static struct i915_ppgtt * -__ppgtt_create(struct drm_i915_private *i915) -{ - if (INTEL_GEN(i915) < 8) - return gen6_ppgtt_create(i915); - else - return gen8_ppgtt_create(i915); -} - -struct i915_ppgtt * -i915_ppgtt_create(struct drm_i915_private *i915) -{ - struct i915_ppgtt *ppgtt; - - ppgtt = __ppgtt_create(i915); - if (IS_ERR(ppgtt)) - return ppgtt; - - trace_i915_ppgtt_create(&ppgtt->vm); - - return ppgtt; -} - -/* Certain Gen5 chipsets require require idling the GPU before - * unmapping anything from the GTT when VT-d is enabled. - */ -static bool needs_idle_maps(struct drm_i915_private *dev_priv) -{ - /* Query intel_iommu to see if we need the workaround. Presumably that - * was loaded first. - */ - return IS_GEN(dev_priv, 5) && IS_MOBILE(dev_priv) && intel_vtd_active(); -} - -static void ggtt_suspend_mappings(struct i915_ggtt *ggtt) -{ - struct drm_i915_private *i915 = ggtt->vm.i915; - - /* Don't bother messing with faults pre GEN6 as we have little - * documentation supporting that it's a good idea. - */ - if (INTEL_GEN(i915) < 6) - return; - - intel_gt_check_and_clear_faults(ggtt->vm.gt); - - ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total); - - ggtt->invalidate(ggtt); -} - -void i915_gem_suspend_gtt_mappings(struct drm_i915_private *i915) -{ - ggtt_suspend_mappings(&i915->ggtt); -} - int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj, struct sg_table *pages) { @@ -2181,368 +52,6 @@ int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj, return -ENOSPC; } -static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte) -{ - writeq(pte, addr); -} - -static void gen8_ggtt_insert_page(struct i915_address_space *vm, - dma_addr_t addr, - u64 offset, - enum i915_cache_level level, - u32 unused) -{ - struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); - gen8_pte_t __iomem *pte = - (gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE; - - gen8_set_pte(pte, gen8_pte_encode(addr, level, 0)); - - ggtt->invalidate(ggtt); -} - -static void gen8_ggtt_insert_entries(struct i915_address_space *vm, - struct i915_vma *vma, - enum i915_cache_level level, - u32 flags) -{ - struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); - struct sgt_iter sgt_iter; - gen8_pte_t __iomem *gtt_entries; - const gen8_pte_t pte_encode = gen8_pte_encode(0, level, 0); - dma_addr_t addr; - - /* - * Note that we ignore PTE_READ_ONLY here. The caller must be careful - * not to allow the user to override access to a read only page. - */ - - gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm; - gtt_entries += vma->node.start / I915_GTT_PAGE_SIZE; - for_each_sgt_daddr(addr, sgt_iter, vma->pages) - gen8_set_pte(gtt_entries++, pte_encode | addr); - - /* - * We want to flush the TLBs only after we're certain all the PTE - * updates have finished. - */ - ggtt->invalidate(ggtt); -} - -static void gen6_ggtt_insert_page(struct i915_address_space *vm, - dma_addr_t addr, - u64 offset, - enum i915_cache_level level, - u32 flags) -{ - struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); - gen6_pte_t __iomem *pte = - (gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE; - - iowrite32(vm->pte_encode(addr, level, flags), pte); - - ggtt->invalidate(ggtt); -} - -/* - * Binds an object into the global gtt with the specified cache level. The object - * will be accessible to the GPU via commands whose operands reference offsets - * within the global GTT as well as accessible by the GPU through the GMADR - * mapped BAR (dev_priv->mm.gtt->gtt). - */ -static void gen6_ggtt_insert_entries(struct i915_address_space *vm, - struct i915_vma *vma, - enum i915_cache_level level, - u32 flags) -{ - struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); - gen6_pte_t __iomem *entries = (gen6_pte_t __iomem *)ggtt->gsm; - unsigned int i = vma->node.start / I915_GTT_PAGE_SIZE; - struct sgt_iter iter; - dma_addr_t addr; - for_each_sgt_daddr(addr, iter, vma->pages) - iowrite32(vm->pte_encode(addr, level, flags), &entries[i++]); - - /* - * We want to flush the TLBs only after we're certain all the PTE - * updates have finished. - */ - ggtt->invalidate(ggtt); -} - -static void nop_clear_range(struct i915_address_space *vm, - u64 start, u64 length) -{ -} - -static void gen8_ggtt_clear_range(struct i915_address_space *vm, - u64 start, u64 length) -{ - struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); - unsigned first_entry = start / I915_GTT_PAGE_SIZE; - unsigned num_entries = length / I915_GTT_PAGE_SIZE; - const gen8_pte_t scratch_pte = vm->scratch[0].encode; - gen8_pte_t __iomem *gtt_base = - (gen8_pte_t __iomem *)ggtt->gsm + first_entry; - const int max_entries = ggtt_total_entries(ggtt) - first_entry; - int i; - - if (WARN(num_entries > max_entries, - "First entry = %d; Num entries = %d (max=%d)\n", - first_entry, num_entries, max_entries)) - num_entries = max_entries; - - for (i = 0; i < num_entries; i++) - gen8_set_pte(>t_base[i], scratch_pte); -} - -static void bxt_vtd_ggtt_wa(struct i915_address_space *vm) -{ - struct drm_i915_private *dev_priv = vm->i915; - - /* - * Make sure the internal GAM fifo has been cleared of all GTT - * writes before exiting stop_machine(). This guarantees that - * any aperture accesses waiting to start in another process - * cannot back up behind the GTT writes causing a hang. - * The register can be any arbitrary GAM register. - */ - POSTING_READ(GFX_FLSH_CNTL_GEN6); -} - -struct insert_page { - struct i915_address_space *vm; - dma_addr_t addr; - u64 offset; - enum i915_cache_level level; -}; - -static int bxt_vtd_ggtt_insert_page__cb(void *_arg) -{ - struct insert_page *arg = _arg; - - gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0); - bxt_vtd_ggtt_wa(arg->vm); - - return 0; -} - -static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm, - dma_addr_t addr, - u64 offset, - enum i915_cache_level level, - u32 unused) -{ - struct insert_page arg = { vm, addr, offset, level }; - - stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL); -} - -struct insert_entries { - struct i915_address_space *vm; - struct i915_vma *vma; - enum i915_cache_level level; - u32 flags; -}; - -static int bxt_vtd_ggtt_insert_entries__cb(void *_arg) -{ - struct insert_entries *arg = _arg; - - gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, arg->flags); - bxt_vtd_ggtt_wa(arg->vm); - - return 0; -} - -static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm, - struct i915_vma *vma, - enum i915_cache_level level, - u32 flags) -{ - struct insert_entries arg = { vm, vma, level, flags }; - - stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL); -} - -struct clear_range { - struct i915_address_space *vm; - u64 start; - u64 length; -}; - -static int bxt_vtd_ggtt_clear_range__cb(void *_arg) -{ - struct clear_range *arg = _arg; - - gen8_ggtt_clear_range(arg->vm, arg->start, arg->length); - bxt_vtd_ggtt_wa(arg->vm); - - return 0; -} - -static void bxt_vtd_ggtt_clear_range__BKL(struct i915_address_space *vm, - u64 start, - u64 length) -{ - struct clear_range arg = { vm, start, length }; - - stop_machine(bxt_vtd_ggtt_clear_range__cb, &arg, NULL); -} - -static void gen6_ggtt_clear_range(struct i915_address_space *vm, - u64 start, u64 length) -{ - struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); - unsigned first_entry = start / I915_GTT_PAGE_SIZE; - unsigned num_entries = length / I915_GTT_PAGE_SIZE; - gen6_pte_t scratch_pte, __iomem *gtt_base = - (gen6_pte_t __iomem *)ggtt->gsm + first_entry; - const int max_entries = ggtt_total_entries(ggtt) - first_entry; - int i; - - if (WARN(num_entries > max_entries, - "First entry = %d; Num entries = %d (max=%d)\n", - first_entry, num_entries, max_entries)) - num_entries = max_entries; - - scratch_pte = vm->scratch[0].encode; - for (i = 0; i < num_entries; i++) - iowrite32(scratch_pte, >t_base[i]); -} - -static void i915_ggtt_insert_page(struct i915_address_space *vm, - dma_addr_t addr, - u64 offset, - enum i915_cache_level cache_level, - u32 unused) -{ - unsigned int flags = (cache_level == I915_CACHE_NONE) ? - AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; - - intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags); -} - -static void i915_ggtt_insert_entries(struct i915_address_space *vm, - struct i915_vma *vma, - enum i915_cache_level cache_level, - u32 unused) -{ - unsigned int flags = (cache_level == I915_CACHE_NONE) ? - AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; - - intel_gtt_insert_sg_entries(vma->pages, vma->node.start >> PAGE_SHIFT, - flags); -} - -static void i915_ggtt_clear_range(struct i915_address_space *vm, - u64 start, u64 length) -{ - intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT); -} - -static int ggtt_bind_vma(struct i915_vma *vma, - enum i915_cache_level cache_level, - u32 flags) -{ - struct drm_i915_private *i915 = vma->vm->i915; - struct drm_i915_gem_object *obj = vma->obj; - intel_wakeref_t wakeref; - u32 pte_flags; - - /* Applicable to VLV (gen8+ do not support RO in the GGTT) */ - pte_flags = 0; - if (i915_gem_object_is_readonly(obj)) - pte_flags |= PTE_READ_ONLY; - - with_intel_runtime_pm(&i915->runtime_pm, wakeref) - vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags); - - vma->page_sizes.gtt = I915_GTT_PAGE_SIZE; - - /* - * Without aliasing PPGTT there's no difference between - * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally - * upgrade to both bound if we bind either to avoid double-binding. - */ - atomic_or(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND, &vma->flags); - - return 0; -} - -static void ggtt_unbind_vma(struct i915_vma *vma) -{ - struct drm_i915_private *i915 = vma->vm->i915; - intel_wakeref_t wakeref; - - with_intel_runtime_pm(&i915->runtime_pm, wakeref) - vma->vm->clear_range(vma->vm, vma->node.start, vma->size); -} - -static int aliasing_gtt_bind_vma(struct i915_vma *vma, - enum i915_cache_level cache_level, - u32 flags) -{ - struct drm_i915_private *i915 = vma->vm->i915; - u32 pte_flags; - int ret; - - /* Currently applicable only to VLV */ - pte_flags = 0; - if (i915_gem_object_is_readonly(vma->obj)) - pte_flags |= PTE_READ_ONLY; - - if (flags & I915_VMA_LOCAL_BIND) { - struct i915_ppgtt *alias = i915_vm_to_ggtt(vma->vm)->alias; - - if (flags & I915_VMA_ALLOC) { - ret = alias->vm.allocate_va_range(&alias->vm, - vma->node.start, - vma->size); - if (ret) - return ret; - - set_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma)); - } - - GEM_BUG_ON(!test_bit(I915_VMA_ALLOC_BIT, - __i915_vma_flags(vma))); - alias->vm.insert_entries(&alias->vm, vma, - cache_level, pte_flags); - } - - if (flags & I915_VMA_GLOBAL_BIND) { - intel_wakeref_t wakeref; - - with_intel_runtime_pm(&i915->runtime_pm, wakeref) { - vma->vm->insert_entries(vma->vm, vma, - cache_level, pte_flags); - } - } - - return 0; -} - -static void aliasing_gtt_unbind_vma(struct i915_vma *vma) -{ - struct drm_i915_private *i915 = vma->vm->i915; - - if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) { - struct i915_address_space *vm = vma->vm; - intel_wakeref_t wakeref; - - with_intel_runtime_pm(&i915->runtime_pm, wakeref) - vm->clear_range(vm, vma->node.start, vma->size); - } - - if (test_and_clear_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma))) { - struct i915_address_space *vm = - &i915_vm_to_ggtt(vma->vm)->alias->vm; - - vm->clear_range(vm, vma->node.start, vma->size); - } -} - void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj, struct sg_table *pages) { @@ -2563,1070 +72,6 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj, dma_unmap_sg(kdev, pages->sgl, pages->nents, PCI_DMA_BIDIRECTIONAL); } -static int ggtt_set_pages(struct i915_vma *vma) -{ - int ret; - - GEM_BUG_ON(vma->pages); - - ret = i915_get_ggtt_vma_pages(vma); - if (ret) - return ret; - - vma->page_sizes = vma->obj->mm.page_sizes; - - return 0; -} - -static void i915_ggtt_color_adjust(const struct drm_mm_node *node, - unsigned long color, - u64 *start, - u64 *end) -{ - if (i915_node_color_differs(node, color)) - *start += I915_GTT_PAGE_SIZE; - - /* Also leave a space between the unallocated reserved node after the - * GTT and any objects within the GTT, i.e. we use the color adjustment - * to insert a guard page to prevent prefetches crossing over the - * GTT boundary. - */ - node = list_next_entry(node, node_list); - if (node->color != color) - *end -= I915_GTT_PAGE_SIZE; -} - -static int init_aliasing_ppgtt(struct i915_ggtt *ggtt) -{ - struct i915_ppgtt *ppgtt; - int err; - - ppgtt = i915_ppgtt_create(ggtt->vm.i915); - if (IS_ERR(ppgtt)) - return PTR_ERR(ppgtt); - - if (GEM_WARN_ON(ppgtt->vm.total < ggtt->vm.total)) { - err = -ENODEV; - goto err_ppgtt; - } - - /* - * Note we only pre-allocate as far as the end of the global - * GTT. On 48b / 4-level page-tables, the difference is very, - * very significant! We have to preallocate as GVT/vgpu does - * not like the page directory disappearing. - */ - err = ppgtt->vm.allocate_va_range(&ppgtt->vm, 0, ggtt->vm.total); - if (err) - goto err_ppgtt; - - ggtt->alias = ppgtt; - ggtt->vm.bind_async_flags |= ppgtt->vm.bind_async_flags; - - GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != ggtt_bind_vma); - ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma; - - GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != ggtt_unbind_vma); - ggtt->vm.vma_ops.unbind_vma = aliasing_gtt_unbind_vma; - - return 0; - -err_ppgtt: - i915_vm_put(&ppgtt->vm); - return err; -} - -static void fini_aliasing_ppgtt(struct i915_ggtt *ggtt) -{ - struct i915_ppgtt *ppgtt; - - ppgtt = fetch_and_zero(&ggtt->alias); - if (!ppgtt) - return; - - i915_vm_put(&ppgtt->vm); - - ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; - ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; -} - -static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt) -{ - u64 size; - int ret; - - if (!USES_GUC(ggtt->vm.i915)) - return 0; - - GEM_BUG_ON(ggtt->vm.total <= GUC_GGTT_TOP); - size = ggtt->vm.total - GUC_GGTT_TOP; - - ret = i915_gem_gtt_reserve(&ggtt->vm, &ggtt->uc_fw, size, - GUC_GGTT_TOP, I915_COLOR_UNEVICTABLE, - PIN_NOEVICT); - if (ret) - DRM_DEBUG_DRIVER("Failed to reserve top of GGTT for GuC\n"); - - return ret; -} - -static void ggtt_release_guc_top(struct i915_ggtt *ggtt) -{ - if (drm_mm_node_allocated(&ggtt->uc_fw)) - drm_mm_remove_node(&ggtt->uc_fw); -} - -static void cleanup_init_ggtt(struct i915_ggtt *ggtt) -{ - ggtt_release_guc_top(ggtt); - if (drm_mm_node_allocated(&ggtt->error_capture)) - drm_mm_remove_node(&ggtt->error_capture); -} - -static int init_ggtt(struct i915_ggtt *ggtt) -{ - /* Let GEM Manage all of the aperture. - * - * However, leave one page at the end still bound to the scratch page. - * There are a number of places where the hardware apparently prefetches - * past the end of the object, and we've seen multiple hangs with the - * GPU head pointer stuck in a batchbuffer bound at the last page of the - * aperture. One page should be enough to keep any prefetching inside - * of the aperture. - */ - unsigned long hole_start, hole_end; - struct drm_mm_node *entry; - int ret; - - /* - * GuC requires all resources that we're sharing with it to be placed in - * non-WOPCM memory. If GuC is not present or not in use we still need a - * small bias as ring wraparound at offset 0 sometimes hangs. No idea - * why. - */ - ggtt->pin_bias = max_t(u32, I915_GTT_PAGE_SIZE, - intel_wopcm_guc_size(&ggtt->vm.i915->wopcm)); - - ret = intel_vgt_balloon(ggtt); - if (ret) - return ret; - - if (ggtt->mappable_end) { - /* Reserve a mappable slot for our lockless error capture */ - ret = drm_mm_insert_node_in_range(&ggtt->vm.mm, &ggtt->error_capture, - PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE, - 0, ggtt->mappable_end, - DRM_MM_INSERT_LOW); - if (ret) - return ret; - } - - /* - * The upper portion of the GuC address space has a sizeable hole - * (several MB) that is inaccessible by GuC. Reserve this range within - * GGTT as it can comfortably hold GuC/HuC firmware images. - */ - ret = ggtt_reserve_guc_top(ggtt); - if (ret) - goto err; - - /* Clear any non-preallocated blocks */ - drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) { - DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n", - hole_start, hole_end); - ggtt->vm.clear_range(&ggtt->vm, hole_start, - hole_end - hole_start); - } - - /* And finally clear the reserved guard page */ - ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE); - - return 0; - -err: - cleanup_init_ggtt(ggtt); - return ret; -} - -int i915_init_ggtt(struct drm_i915_private *i915) -{ - int ret; - - ret = init_ggtt(&i915->ggtt); - if (ret) - return ret; - - if (INTEL_PPGTT(i915) == INTEL_PPGTT_ALIASING) { - ret = init_aliasing_ppgtt(&i915->ggtt); - if (ret) - cleanup_init_ggtt(&i915->ggtt); - } - - return 0; -} - -static void ggtt_cleanup_hw(struct i915_ggtt *ggtt) -{ - struct i915_vma *vma, *vn; - - atomic_set(&ggtt->vm.open, 0); - - rcu_barrier(); /* flush the RCU'ed__i915_vm_release */ - flush_workqueue(ggtt->vm.i915->wq); - - mutex_lock(&ggtt->vm.mutex); - - list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) - WARN_ON(__i915_vma_unbind(vma)); - - if (drm_mm_node_allocated(&ggtt->error_capture)) - drm_mm_remove_node(&ggtt->error_capture); - - ggtt_release_guc_top(ggtt); - intel_vgt_deballoon(ggtt); - - ggtt->vm.cleanup(&ggtt->vm); - - mutex_unlock(&ggtt->vm.mutex); - i915_address_space_fini(&ggtt->vm); - - arch_phys_wc_del(ggtt->mtrr); - - if (ggtt->iomap.size) - io_mapping_fini(&ggtt->iomap); -} - -/** - * i915_ggtt_driver_release - Clean up GGTT hardware initialization - * @i915: i915 device - */ -void i915_ggtt_driver_release(struct drm_i915_private *i915) -{ - struct pagevec *pvec; - - fini_aliasing_ppgtt(&i915->ggtt); - - ggtt_cleanup_hw(&i915->ggtt); - - pvec = &i915->mm.wc_stash.pvec; - if (pvec->nr) { - set_pages_array_wb(pvec->pages, pvec->nr); - __pagevec_release(pvec); - } -} - -static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl) -{ - snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT; - snb_gmch_ctl &= SNB_GMCH_GGMS_MASK; - return snb_gmch_ctl << 20; -} - -static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl) -{ - bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT; - bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK; - if (bdw_gmch_ctl) - bdw_gmch_ctl = 1 << bdw_gmch_ctl; - -#ifdef CONFIG_X86_32 - /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * I915_GTT_PAGE_SIZE */ - if (bdw_gmch_ctl > 4) - bdw_gmch_ctl = 4; -#endif - - return bdw_gmch_ctl << 20; -} - -static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl) -{ - gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT; - gmch_ctrl &= SNB_GMCH_GGMS_MASK; - - if (gmch_ctrl) - return 1 << (20 + gmch_ctrl); - - return 0; -} - -static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size) -{ - struct drm_i915_private *dev_priv = ggtt->vm.i915; - struct pci_dev *pdev = dev_priv->drm.pdev; - phys_addr_t phys_addr; - int ret; - - /* For Modern GENs the PTEs and register space are split in the BAR */ - phys_addr = pci_resource_start(pdev, 0) + pci_resource_len(pdev, 0) / 2; - - /* - * On BXT+/CNL+ writes larger than 64 bit to the GTT pagetable range - * will be dropped. For WC mappings in general we have 64 byte burst - * writes when the WC buffer is flushed, so we can't use it, but have to - * resort to an uncached mapping. The WC issue is easily caught by the - * readback check when writing GTT PTE entries. - */ - if (IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 10) - ggtt->gsm = ioremap_nocache(phys_addr, size); - else - ggtt->gsm = ioremap_wc(phys_addr, size); - if (!ggtt->gsm) { - DRM_ERROR("Failed to map the ggtt page table\n"); - return -ENOMEM; - } - - ret = setup_scratch_page(&ggtt->vm, GFP_DMA32); - if (ret) { - DRM_ERROR("Scratch setup failed\n"); - /* iounmap will also get called at remove, but meh */ - iounmap(ggtt->gsm); - return ret; - } - - ggtt->vm.scratch[0].encode = - ggtt->vm.pte_encode(px_dma(&ggtt->vm.scratch[0]), - I915_CACHE_NONE, 0); - - return 0; -} - -static void tgl_setup_private_ppat(struct intel_uncore *uncore) -{ - /* TGL doesn't support LLC or AGE settings */ - intel_uncore_write(uncore, GEN12_PAT_INDEX(0), GEN8_PPAT_WB); - intel_uncore_write(uncore, GEN12_PAT_INDEX(1), GEN8_PPAT_WC); - intel_uncore_write(uncore, GEN12_PAT_INDEX(2), GEN8_PPAT_WT); - intel_uncore_write(uncore, GEN12_PAT_INDEX(3), GEN8_PPAT_UC); - intel_uncore_write(uncore, GEN12_PAT_INDEX(4), GEN8_PPAT_WB); - intel_uncore_write(uncore, GEN12_PAT_INDEX(5), GEN8_PPAT_WB); - intel_uncore_write(uncore, GEN12_PAT_INDEX(6), GEN8_PPAT_WB); - intel_uncore_write(uncore, GEN12_PAT_INDEX(7), GEN8_PPAT_WB); -} - -static void cnl_setup_private_ppat(struct intel_uncore *uncore) -{ - intel_uncore_write(uncore, - GEN10_PAT_INDEX(0), - GEN8_PPAT_WB | GEN8_PPAT_LLC); - intel_uncore_write(uncore, - GEN10_PAT_INDEX(1), - GEN8_PPAT_WC | GEN8_PPAT_LLCELLC); - intel_uncore_write(uncore, - GEN10_PAT_INDEX(2), - GEN8_PPAT_WT | GEN8_PPAT_LLCELLC); - intel_uncore_write(uncore, - GEN10_PAT_INDEX(3), - GEN8_PPAT_UC); - intel_uncore_write(uncore, - GEN10_PAT_INDEX(4), - GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)); - intel_uncore_write(uncore, - GEN10_PAT_INDEX(5), - GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)); - intel_uncore_write(uncore, - GEN10_PAT_INDEX(6), - GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)); - intel_uncore_write(uncore, - GEN10_PAT_INDEX(7), - GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3)); -} - -/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability - * bits. When using advanced contexts each context stores its own PAT, but - * writing this data shouldn't be harmful even in those cases. */ -static void bdw_setup_private_ppat(struct intel_uncore *uncore) -{ - u64 pat; - - pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */ - GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */ - GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) | /* for scanout with eLLC */ - GEN8_PPAT(3, GEN8_PPAT_UC) | /* Uncached objects, mostly for scanout */ - GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) | - GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) | - GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) | - GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3)); - - intel_uncore_write(uncore, GEN8_PRIVATE_PAT_LO, lower_32_bits(pat)); - intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat)); -} - -static void chv_setup_private_ppat(struct intel_uncore *uncore) -{ - u64 pat; - - /* - * Map WB on BDW to snooped on CHV. - * - * Only the snoop bit has meaning for CHV, the rest is - * ignored. - * - * The hardware will never snoop for certain types of accesses: - * - CPU GTT (GMADR->GGTT->no snoop->memory) - * - PPGTT page tables - * - some other special cycles - * - * As with BDW, we also need to consider the following for GT accesses: - * "For GGTT, there is NO pat_sel[2:0] from the entry, - * so RTL will always use the value corresponding to - * pat_sel = 000". - * Which means we must set the snoop bit in PAT entry 0 - * in order to keep the global status page working. - */ - - pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) | - GEN8_PPAT(1, 0) | - GEN8_PPAT(2, 0) | - GEN8_PPAT(3, 0) | - GEN8_PPAT(4, CHV_PPAT_SNOOP) | - GEN8_PPAT(5, CHV_PPAT_SNOOP) | - GEN8_PPAT(6, CHV_PPAT_SNOOP) | - GEN8_PPAT(7, CHV_PPAT_SNOOP); - - intel_uncore_write(uncore, GEN8_PRIVATE_PAT_LO, lower_32_bits(pat)); - intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat)); -} - -static void gen6_gmch_remove(struct i915_address_space *vm) -{ - struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); - - iounmap(ggtt->gsm); - cleanup_scratch_page(vm); -} - -static void setup_private_pat(struct intel_uncore *uncore) -{ - struct drm_i915_private *i915 = uncore->i915; - - GEM_BUG_ON(INTEL_GEN(i915) < 8); - - if (INTEL_GEN(i915) >= 12) - tgl_setup_private_ppat(uncore); - else if (INTEL_GEN(i915) >= 10) - cnl_setup_private_ppat(uncore); - else if (IS_CHERRYVIEW(i915) || IS_GEN9_LP(i915)) - chv_setup_private_ppat(uncore); - else - bdw_setup_private_ppat(uncore); -} - -static struct resource pci_resource(struct pci_dev *pdev, int bar) -{ - return (struct resource)DEFINE_RES_MEM(pci_resource_start(pdev, bar), - pci_resource_len(pdev, bar)); -} - -static int gen8_gmch_probe(struct i915_ggtt *ggtt) -{ - struct drm_i915_private *dev_priv = ggtt->vm.i915; - struct pci_dev *pdev = dev_priv->drm.pdev; - unsigned int size; - u16 snb_gmch_ctl; - int err; - - /* TODO: We're not aware of mappable constraints on gen8 yet */ - if (!IS_DGFX(dev_priv)) { - ggtt->gmadr = pci_resource(pdev, 2); - ggtt->mappable_end = resource_size(&ggtt->gmadr); - } - - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(39)); - if (!err) - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39)); - if (err) - DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err); - - pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); - if (IS_CHERRYVIEW(dev_priv)) - size = chv_get_total_gtt_size(snb_gmch_ctl); - else - size = gen8_get_total_gtt_size(snb_gmch_ctl); - - ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE; - ggtt->vm.cleanup = gen6_gmch_remove; - ggtt->vm.insert_page = gen8_ggtt_insert_page; - ggtt->vm.clear_range = nop_clear_range; - if (intel_scanout_needs_vtd_wa(dev_priv)) - ggtt->vm.clear_range = gen8_ggtt_clear_range; - - ggtt->vm.insert_entries = gen8_ggtt_insert_entries; - - /* Serialize GTT updates with aperture access on BXT if VT-d is on. */ - if (intel_ggtt_update_needs_vtd_wa(dev_priv) || - IS_CHERRYVIEW(dev_priv) /* fails with concurrent use/update */) { - ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL; - ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL; - if (ggtt->vm.clear_range != nop_clear_range) - ggtt->vm.clear_range = bxt_vtd_ggtt_clear_range__BKL; - } - - ggtt->invalidate = gen8_ggtt_invalidate; - - ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; - ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; - ggtt->vm.vma_ops.set_pages = ggtt_set_pages; - ggtt->vm.vma_ops.clear_pages = clear_pages; - - ggtt->vm.pte_encode = gen8_pte_encode; - - setup_private_pat(ggtt->vm.gt->uncore); - - return ggtt_probe_common(ggtt, size); -} - -static int gen6_gmch_probe(struct i915_ggtt *ggtt) -{ - struct drm_i915_private *dev_priv = ggtt->vm.i915; - struct pci_dev *pdev = dev_priv->drm.pdev; - unsigned int size; - u16 snb_gmch_ctl; - int err; - - ggtt->gmadr = - (struct resource) DEFINE_RES_MEM(pci_resource_start(pdev, 2), - pci_resource_len(pdev, 2)); - ggtt->mappable_end = resource_size(&ggtt->gmadr); - - /* 64/512MB is the current min/max we actually know of, but this is just - * a coarse sanity check. - */ - if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) { - DRM_ERROR("Unknown GMADR size (%pa)\n", &ggtt->mappable_end); - return -ENXIO; - } - - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40)); - if (!err) - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40)); - if (err) - DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err); - pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); - - size = gen6_get_total_gtt_size(snb_gmch_ctl); - ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE; - - ggtt->vm.clear_range = nop_clear_range; - if (!HAS_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv)) - ggtt->vm.clear_range = gen6_ggtt_clear_range; - ggtt->vm.insert_page = gen6_ggtt_insert_page; - ggtt->vm.insert_entries = gen6_ggtt_insert_entries; - ggtt->vm.cleanup = gen6_gmch_remove; - - ggtt->invalidate = gen6_ggtt_invalidate; - - if (HAS_EDRAM(dev_priv)) - ggtt->vm.pte_encode = iris_pte_encode; - else if (IS_HASWELL(dev_priv)) - ggtt->vm.pte_encode = hsw_pte_encode; - else if (IS_VALLEYVIEW(dev_priv)) - ggtt->vm.pte_encode = byt_pte_encode; - else if (INTEL_GEN(dev_priv) >= 7) - ggtt->vm.pte_encode = ivb_pte_encode; - else - ggtt->vm.pte_encode = snb_pte_encode; - - ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; - ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; - ggtt->vm.vma_ops.set_pages = ggtt_set_pages; - ggtt->vm.vma_ops.clear_pages = clear_pages; - - return ggtt_probe_common(ggtt, size); -} - -static void i915_gmch_remove(struct i915_address_space *vm) -{ - intel_gmch_remove(); -} - -static int i915_gmch_probe(struct i915_ggtt *ggtt) -{ - struct drm_i915_private *dev_priv = ggtt->vm.i915; - phys_addr_t gmadr_base; - int ret; - - ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->drm.pdev, NULL); - if (!ret) { - DRM_ERROR("failed to set up gmch\n"); - return -EIO; - } - - intel_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end); - - ggtt->gmadr = - (struct resource) DEFINE_RES_MEM(gmadr_base, - ggtt->mappable_end); - - ggtt->do_idle_maps = needs_idle_maps(dev_priv); - ggtt->vm.insert_page = i915_ggtt_insert_page; - ggtt->vm.insert_entries = i915_ggtt_insert_entries; - ggtt->vm.clear_range = i915_ggtt_clear_range; - ggtt->vm.cleanup = i915_gmch_remove; - - ggtt->invalidate = gmch_ggtt_invalidate; - - ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; - ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; - ggtt->vm.vma_ops.set_pages = ggtt_set_pages; - ggtt->vm.vma_ops.clear_pages = clear_pages; - - if (unlikely(ggtt->do_idle_maps)) - dev_notice(dev_priv->drm.dev, - "Applying Ironlake quirks for intel_iommu\n"); - - return 0; -} - -static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt) -{ - struct drm_i915_private *i915 = gt->i915; - int ret; - - ggtt->vm.gt = gt; - ggtt->vm.i915 = i915; - ggtt->vm.dma = &i915->drm.pdev->dev; - - if (INTEL_GEN(i915) <= 5) - ret = i915_gmch_probe(ggtt); - else if (INTEL_GEN(i915) < 8) - ret = gen6_gmch_probe(ggtt); - else - ret = gen8_gmch_probe(ggtt); - if (ret) - return ret; - - if ((ggtt->vm.total - 1) >> 32) { - DRM_ERROR("We never expected a Global GTT with more than 32bits" - " of address space! Found %lldM!\n", - ggtt->vm.total >> 20); - ggtt->vm.total = 1ULL << 32; - ggtt->mappable_end = - min_t(u64, ggtt->mappable_end, ggtt->vm.total); - } - - if (ggtt->mappable_end > ggtt->vm.total) { - DRM_ERROR("mappable aperture extends past end of GGTT," - " aperture=%pa, total=%llx\n", - &ggtt->mappable_end, ggtt->vm.total); - ggtt->mappable_end = ggtt->vm.total; - } - - /* GMADR is the PCI mmio aperture into the global GTT. */ - DRM_DEBUG_DRIVER("GGTT size = %lluM\n", ggtt->vm.total >> 20); - DRM_DEBUG_DRIVER("GMADR size = %lluM\n", (u64)ggtt->mappable_end >> 20); - DRM_DEBUG_DRIVER("DSM size = %lluM\n", - (u64)resource_size(&intel_graphics_stolen_res) >> 20); - - return 0; -} - -/** - * i915_ggtt_probe_hw - Probe GGTT hardware location - * @i915: i915 device - */ -int i915_ggtt_probe_hw(struct drm_i915_private *i915) -{ - int ret; - - ret = ggtt_probe_hw(&i915->ggtt, &i915->gt); - if (ret) - return ret; - - if (intel_vtd_active()) - dev_info(i915->drm.dev, "VT-d active for gfx access\n"); - - return 0; -} - -static int ggtt_init_hw(struct i915_ggtt *ggtt) -{ - struct drm_i915_private *i915 = ggtt->vm.i915; - - i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT); - - ggtt->vm.is_ggtt = true; - - /* Only VLV supports read-only GGTT mappings */ - ggtt->vm.has_read_only = IS_VALLEYVIEW(i915); - - if (!HAS_LLC(i915) && !HAS_PPGTT(i915)) - ggtt->vm.mm.color_adjust = i915_ggtt_color_adjust; - - if (ggtt->mappable_end) { - if (!io_mapping_init_wc(&ggtt->iomap, - ggtt->gmadr.start, - ggtt->mappable_end)) { - ggtt->vm.cleanup(&ggtt->vm); - return -EIO; - } - - ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start, - ggtt->mappable_end); - } - - i915_ggtt_init_fences(ggtt); - - return 0; -} - -/** - * i915_ggtt_init_hw - Initialize GGTT hardware - * @dev_priv: i915 device - */ -int i915_ggtt_init_hw(struct drm_i915_private *dev_priv) -{ - int ret; - - stash_init(&dev_priv->mm.wc_stash); - - /* Note that we use page colouring to enforce a guard page at the - * end of the address space. This is required as the CS may prefetch - * beyond the end of the batch buffer, across the page boundary, - * and beyond the end of the GTT if we do not provide a guard. - */ - ret = ggtt_init_hw(&dev_priv->ggtt); - if (ret) - return ret; - - return 0; -} - -int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv) -{ - if (INTEL_GEN(dev_priv) < 6 && !intel_enable_gtt()) - return -EIO; - - return 0; -} - -void i915_ggtt_enable_guc(struct i915_ggtt *ggtt) -{ - GEM_BUG_ON(ggtt->invalidate != gen8_ggtt_invalidate); - - ggtt->invalidate = guc_ggtt_invalidate; - - ggtt->invalidate(ggtt); -} - -void i915_ggtt_disable_guc(struct i915_ggtt *ggtt) -{ - /* XXX Temporary pardon for error unload */ - if (ggtt->invalidate == gen8_ggtt_invalidate) - return; - - /* We should only be called after i915_ggtt_enable_guc() */ - GEM_BUG_ON(ggtt->invalidate != guc_ggtt_invalidate); - - ggtt->invalidate = gen8_ggtt_invalidate; - - ggtt->invalidate(ggtt); -} - -static void ggtt_restore_mappings(struct i915_ggtt *ggtt) -{ - struct i915_vma *vma, *vn; - bool flush = false; - int open; - - intel_gt_check_and_clear_faults(ggtt->vm.gt); - - mutex_lock(&ggtt->vm.mutex); - - /* First fill our portion of the GTT with scratch pages */ - ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total); - - /* Skip rewriting PTE on VMA unbind. */ - open = atomic_xchg(&ggtt->vm.open, 0); - - /* clflush objects bound into the GGTT and rebind them. */ - list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) { - struct drm_i915_gem_object *obj = vma->obj; - - if (!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) - continue; - - if (!__i915_vma_unbind(vma)) - continue; - - clear_bit(I915_VMA_GLOBAL_BIND_BIT, __i915_vma_flags(vma)); - WARN_ON(i915_vma_bind(vma, - obj ? obj->cache_level : 0, - PIN_GLOBAL, NULL)); - if (obj) { /* only used during resume => exclusive access */ - flush |= fetch_and_zero(&obj->write_domain); - obj->read_domains |= I915_GEM_DOMAIN_GTT; - } - } - - atomic_set(&ggtt->vm.open, open); - ggtt->invalidate(ggtt); - - mutex_unlock(&ggtt->vm.mutex); - - if (flush) - wbinvd_on_all_cpus(); -} - -void i915_gem_restore_gtt_mappings(struct drm_i915_private *i915) -{ - struct i915_ggtt *ggtt = &i915->ggtt; - - ggtt_restore_mappings(ggtt); - - if (INTEL_GEN(i915) >= 8) - setup_private_pat(ggtt->vm.gt->uncore); -} - -static struct scatterlist * -rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset, - unsigned int width, unsigned int height, - unsigned int stride, - struct sg_table *st, struct scatterlist *sg) -{ - unsigned int column, row; - unsigned int src_idx; - - for (column = 0; column < width; column++) { - src_idx = stride * (height - 1) + column + offset; - for (row = 0; row < height; row++) { - st->nents++; - /* We don't need the pages, but need to initialize - * the entries so the sg list can be happily traversed. - * The only thing we need are DMA addresses. - */ - sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0); - sg_dma_address(sg) = - i915_gem_object_get_dma_address(obj, src_idx); - sg_dma_len(sg) = I915_GTT_PAGE_SIZE; - sg = sg_next(sg); - src_idx -= stride; - } - } - - return sg; -} - -static noinline struct sg_table * -intel_rotate_pages(struct intel_rotation_info *rot_info, - struct drm_i915_gem_object *obj) -{ - unsigned int size = intel_rotation_info_size(rot_info); - struct sg_table *st; - struct scatterlist *sg; - int ret = -ENOMEM; - int i; - - /* Allocate target SG list. */ - st = kmalloc(sizeof(*st), GFP_KERNEL); - if (!st) - goto err_st_alloc; - - ret = sg_alloc_table(st, size, GFP_KERNEL); - if (ret) - goto err_sg_alloc; - - st->nents = 0; - sg = st->sgl; - - for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) { - sg = rotate_pages(obj, rot_info->plane[i].offset, - rot_info->plane[i].width, rot_info->plane[i].height, - rot_info->plane[i].stride, st, sg); - } - - return st; - -err_sg_alloc: - kfree(st); -err_st_alloc: - - DRM_DEBUG_DRIVER("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n", - obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size); - - return ERR_PTR(ret); -} - -static struct scatterlist * -remap_pages(struct drm_i915_gem_object *obj, unsigned int offset, - unsigned int width, unsigned int height, - unsigned int stride, - struct sg_table *st, struct scatterlist *sg) -{ - unsigned int row; - - for (row = 0; row < height; row++) { - unsigned int left = width * I915_GTT_PAGE_SIZE; - - while (left) { - dma_addr_t addr; - unsigned int length; - - /* We don't need the pages, but need to initialize - * the entries so the sg list can be happily traversed. - * The only thing we need are DMA addresses. - */ - - addr = i915_gem_object_get_dma_address_len(obj, offset, &length); - - length = min(left, length); - - st->nents++; - - sg_set_page(sg, NULL, length, 0); - sg_dma_address(sg) = addr; - sg_dma_len(sg) = length; - sg = sg_next(sg); - - offset += length / I915_GTT_PAGE_SIZE; - left -= length; - } - - offset += stride - width; - } - - return sg; -} - -static noinline struct sg_table * -intel_remap_pages(struct intel_remapped_info *rem_info, - struct drm_i915_gem_object *obj) -{ - unsigned int size = intel_remapped_info_size(rem_info); - struct sg_table *st; - struct scatterlist *sg; - int ret = -ENOMEM; - int i; - - /* Allocate target SG list. */ - st = kmalloc(sizeof(*st), GFP_KERNEL); - if (!st) - goto err_st_alloc; - - ret = sg_alloc_table(st, size, GFP_KERNEL); - if (ret) - goto err_sg_alloc; - - st->nents = 0; - sg = st->sgl; - - for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) { - sg = remap_pages(obj, rem_info->plane[i].offset, - rem_info->plane[i].width, rem_info->plane[i].height, - rem_info->plane[i].stride, st, sg); - } - - i915_sg_trim(st); - - return st; - -err_sg_alloc: - kfree(st); -err_st_alloc: - - DRM_DEBUG_DRIVER("Failed to create remapped mapping for object size %zu! (%ux%u tiles, %u pages)\n", - obj->base.size, rem_info->plane[0].width, rem_info->plane[0].height, size); - - return ERR_PTR(ret); -} - -static noinline struct sg_table * -intel_partial_pages(const struct i915_ggtt_view *view, - struct drm_i915_gem_object *obj) -{ - struct sg_table *st; - struct scatterlist *sg, *iter; - unsigned int count = view->partial.size; - unsigned int offset; - int ret = -ENOMEM; - - st = kmalloc(sizeof(*st), GFP_KERNEL); - if (!st) - goto err_st_alloc; - - ret = sg_alloc_table(st, count, GFP_KERNEL); - if (ret) - goto err_sg_alloc; - - iter = i915_gem_object_get_sg(obj, view->partial.offset, &offset); - GEM_BUG_ON(!iter); - - sg = st->sgl; - st->nents = 0; - do { - unsigned int len; - - len = min(iter->length - (offset << PAGE_SHIFT), - count << PAGE_SHIFT); - sg_set_page(sg, NULL, len, 0); - sg_dma_address(sg) = - sg_dma_address(iter) + (offset << PAGE_SHIFT); - sg_dma_len(sg) = len; - - st->nents++; - count -= len >> PAGE_SHIFT; - if (count == 0) { - sg_mark_end(sg); - i915_sg_trim(st); /* Drop any unused tail entries. */ - - return st; - } - - sg = __sg_next(sg); - iter = __sg_next(iter); - offset = 0; - } while (1); - -err_sg_alloc: - kfree(st); -err_st_alloc: - return ERR_PTR(ret); -} - -static int -i915_get_ggtt_vma_pages(struct i915_vma *vma) -{ - int ret; - - /* The vma->pages are only valid within the lifespan of the borrowed - * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so - * must be the vma->pages. A simple rule is that vma->pages must only - * be accessed when the obj->mm.pages are pinned. - */ - GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj)); - - switch (vma->ggtt_view.type) { - default: - GEM_BUG_ON(vma->ggtt_view.type); - /* fall through */ - case I915_GGTT_VIEW_NORMAL: - vma->pages = vma->obj->mm.pages; - return 0; - - case I915_GGTT_VIEW_ROTATED: - vma->pages = - intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj); - break; - - case I915_GGTT_VIEW_REMAPPED: - vma->pages = - intel_remap_pages(&vma->ggtt_view.remapped, vma->obj); - break; - - case I915_GGTT_VIEW_PARTIAL: - vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj); - break; - } - - ret = 0; - if (IS_ERR(vma->pages)) { - ret = PTR_ERR(vma->pages); - vma->pages = NULL; - DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n", - vma->ggtt_view.type, ret); - } - return ret; -} - /** * i915_gem_gtt_reserve - reserve a node in an address_space (GTT) * @vm: the &struct i915_address_space @@ -3848,6 +293,5 @@ int i915_gem_gtt_insert(struct i915_address_space *vm, } #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) -#include "selftests/mock_gtt.c" #include "selftests/i915_gem_gtt.c" #endif diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index 31a4a96ddd0d..f6226df9f972 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -1,639 +1,21 @@ +/* SPDX-License-Identifier: MIT */ /* - * Copyright © 2014 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - * - * Please try to maintain the following order within this file unless it makes - * sense to do otherwise. From top to bottom: - * 1. typedefs - * 2. #defines, and macros - * 3. structure definitions - * 4. function prototypes - * - * Within each section, please try to order by generation in ascending order, - * from top to bottom (ie. gen6 on the top, gen8 on the bottom). + * Copyright © 2020 Intel Corporation */ #ifndef __I915_GEM_GTT_H__ #define __I915_GEM_GTT_H__ #include <linux/io-mapping.h> -#include <linux/kref.h> -#include <linux/mm.h> -#include <linux/pagevec.h> -#include <linux/workqueue.h> +#include <linux/types.h> #include <drm/drm_mm.h> -#include "gt/intel_reset.h" -#include "i915_gem_fence_reg.h" -#include "i915_request.h" +#include "gt/intel_gtt.h" #include "i915_scatterlist.h" -#include "i915_selftest.h" -#include "gt/intel_timeline.h" -#define I915_GTT_PAGE_SIZE_4K BIT_ULL(12) -#define I915_GTT_PAGE_SIZE_64K BIT_ULL(16) -#define I915_GTT_PAGE_SIZE_2M BIT_ULL(21) - -#define I915_GTT_PAGE_SIZE I915_GTT_PAGE_SIZE_4K -#define I915_GTT_MAX_PAGE_SIZE I915_GTT_PAGE_SIZE_2M - -#define I915_GTT_PAGE_MASK -I915_GTT_PAGE_SIZE - -#define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE - -#define I915_FENCE_REG_NONE -1 -#define I915_MAX_NUM_FENCES 32 -/* 32 fences + sign bit for FENCE_REG_NONE */ -#define I915_MAX_NUM_FENCE_BITS 6 - -struct drm_i915_file_private; struct drm_i915_gem_object; -struct i915_vma; -struct intel_gt; - -typedef u32 gen6_pte_t; -typedef u64 gen8_pte_t; - -#define ggtt_total_entries(ggtt) ((ggtt)->vm.total >> PAGE_SHIFT) - -/* gen6-hsw has bit 11-4 for physical addr bit 39-32 */ -#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0)) -#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) -#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) -#define GEN6_PTE_CACHE_LLC (2 << 1) -#define GEN6_PTE_UNCACHED (1 << 1) -#define GEN6_PTE_VALID (1 << 0) - -#define I915_PTES(pte_len) ((unsigned int)(PAGE_SIZE / (pte_len))) -#define I915_PTE_MASK(pte_len) (I915_PTES(pte_len) - 1) -#define I915_PDES 512 -#define I915_PDE_MASK (I915_PDES - 1) -#define NUM_PTE(pde_shift) (1 << (pde_shift - PAGE_SHIFT)) - -#define GEN6_PTES I915_PTES(sizeof(gen6_pte_t)) -#define GEN6_PD_SIZE (I915_PDES * PAGE_SIZE) -#define GEN6_PD_ALIGN (PAGE_SIZE * 16) -#define GEN6_PDE_SHIFT 22 -#define GEN6_PDE_VALID (1 << 0) - -#define GEN7_PTE_CACHE_L3_LLC (3 << 1) - -#define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2) -#define BYT_PTE_WRITEABLE (1 << 1) - -/* Cacheability Control is a 4-bit value. The low three bits are stored in bits - * 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE. - */ -#define HSW_CACHEABILITY_CONTROL(bits) ((((bits) & 0x7) << 1) | \ - (((bits) & 0x8) << (11 - 3))) -#define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2) -#define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3) -#define HSW_WB_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x8) -#define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb) -#define HSW_WT_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x7) -#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6) -#define HSW_PTE_UNCACHED (0) -#define HSW_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0x7f0)) -#define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr) - -/* - * GEN8 32b style address is defined as a 3 level page table: - * 31:30 | 29:21 | 20:12 | 11:0 - * PDPE | PDE | PTE | offset - * The difference as compared to normal x86 3 level page table is the PDPEs are - * programmed via register. - * - * GEN8 48b style address is defined as a 4 level page table: - * 47:39 | 38:30 | 29:21 | 20:12 | 11:0 - * PML4E | PDPE | PDE | PTE | offset - */ -#define GEN8_3LVL_PDPES 4 - -#define PPAT_UNCACHED (_PAGE_PWT | _PAGE_PCD) -#define PPAT_CACHED_PDE 0 /* WB LLC */ -#define PPAT_CACHED _PAGE_PAT /* WB LLCeLLC */ -#define PPAT_DISPLAY_ELLC _PAGE_PCD /* WT eLLC */ - -#define CHV_PPAT_SNOOP (1<<6) -#define GEN8_PPAT_AGE(x) ((x)<<4) -#define GEN8_PPAT_LLCeLLC (3<<2) -#define GEN8_PPAT_LLCELLC (2<<2) -#define GEN8_PPAT_LLC (1<<2) -#define GEN8_PPAT_WB (3<<0) -#define GEN8_PPAT_WT (2<<0) -#define GEN8_PPAT_WC (1<<0) -#define GEN8_PPAT_UC (0<<0) -#define GEN8_PPAT_ELLC_OVERRIDE (0<<2) -#define GEN8_PPAT(i, x) ((u64)(x) << ((i) * 8)) - -#define GEN8_PDE_IPS_64K BIT(11) -#define GEN8_PDE_PS_2M BIT(7) - -#define for_each_sgt_daddr(__dp, __iter, __sgt) \ - __for_each_sgt_daddr(__dp, __iter, __sgt, I915_GTT_PAGE_SIZE) - -struct intel_remapped_plane_info { - /* in gtt pages */ - unsigned int width, height, stride, offset; -} __packed; - -struct intel_remapped_info { - struct intel_remapped_plane_info plane[2]; - unsigned int unused_mbz; -} __packed; - -struct intel_rotation_info { - struct intel_remapped_plane_info plane[2]; -} __packed; - -struct intel_partial_info { - u64 offset; - unsigned int size; -} __packed; - -enum i915_ggtt_view_type { - I915_GGTT_VIEW_NORMAL = 0, - I915_GGTT_VIEW_ROTATED = sizeof(struct intel_rotation_info), - I915_GGTT_VIEW_PARTIAL = sizeof(struct intel_partial_info), - I915_GGTT_VIEW_REMAPPED = sizeof(struct intel_remapped_info), -}; - -static inline void assert_i915_gem_gtt_types(void) -{ - BUILD_BUG_ON(sizeof(struct intel_rotation_info) != 8*sizeof(unsigned int)); - BUILD_BUG_ON(sizeof(struct intel_partial_info) != sizeof(u64) + sizeof(unsigned int)); - BUILD_BUG_ON(sizeof(struct intel_remapped_info) != 9*sizeof(unsigned int)); - - /* Check that rotation/remapped shares offsets for simplicity */ - BUILD_BUG_ON(offsetof(struct intel_remapped_info, plane[0]) != - offsetof(struct intel_rotation_info, plane[0])); - BUILD_BUG_ON(offsetofend(struct intel_remapped_info, plane[1]) != - offsetofend(struct intel_rotation_info, plane[1])); - - /* As we encode the size of each branch inside the union into its type, - * we have to be careful that each branch has a unique size. - */ - switch ((enum i915_ggtt_view_type)0) { - case I915_GGTT_VIEW_NORMAL: - case I915_GGTT_VIEW_PARTIAL: - case I915_GGTT_VIEW_ROTATED: - case I915_GGTT_VIEW_REMAPPED: - /* gcc complains if these are identical cases */ - break; - } -} - -struct i915_ggtt_view { - enum i915_ggtt_view_type type; - union { - /* Members need to contain no holes/padding */ - struct intel_partial_info partial; - struct intel_rotation_info rotated; - struct intel_remapped_info remapped; - }; -}; - -enum i915_cache_level; - -struct i915_vma; - -struct i915_page_dma { - struct page *page; - union { - dma_addr_t daddr; - - /* For gen6/gen7 only. This is the offset in the GGTT - * where the page directory entries for PPGTT begin - */ - u32 ggtt_offset; - }; -}; - -struct i915_page_scratch { - struct i915_page_dma base; - u64 encode; -}; - -struct i915_page_table { - struct i915_page_dma base; - atomic_t used; -}; - -struct i915_page_directory { - struct i915_page_table pt; - spinlock_t lock; - void *entry[512]; -}; - -#define __px_choose_expr(x, type, expr, other) \ - __builtin_choose_expr( \ - __builtin_types_compatible_p(typeof(x), type) || \ - __builtin_types_compatible_p(typeof(x), const type), \ - ({ type __x = (type)(x); expr; }), \ - other) - -#define px_base(px) \ - __px_choose_expr(px, struct i915_page_dma *, __x, \ - __px_choose_expr(px, struct i915_page_scratch *, &__x->base, \ - __px_choose_expr(px, struct i915_page_table *, &__x->base, \ - __px_choose_expr(px, struct i915_page_directory *, &__x->pt.base, \ - (void)0)))) -#define px_dma(px) (px_base(px)->daddr) - -#define px_pt(px) \ - __px_choose_expr(px, struct i915_page_table *, __x, \ - __px_choose_expr(px, struct i915_page_directory *, &__x->pt, \ - (void)0)) -#define px_used(px) (&px_pt(px)->used) - -struct i915_vma_ops { - /* Map an object into an address space with the given cache flags. */ - int (*bind_vma)(struct i915_vma *vma, - enum i915_cache_level cache_level, - u32 flags); - /* - * Unmap an object from an address space. This usually consists of - * setting the valid PTE entries to a reserved scratch page. - */ - void (*unbind_vma)(struct i915_vma *vma); - - int (*set_pages)(struct i915_vma *vma); - void (*clear_pages)(struct i915_vma *vma); -}; - -struct pagestash { - spinlock_t lock; - struct pagevec pvec; -}; - -struct i915_address_space { - struct kref ref; - struct rcu_work rcu; - - struct drm_mm mm; - struct intel_gt *gt; - struct drm_i915_private *i915; - struct device *dma; - /* Every address space belongs to a struct file - except for the global - * GTT that is owned by the driver (and so @file is set to NULL). In - * principle, no information should leak from one context to another - * (or between files/processes etc) unless explicitly shared by the - * owner. Tracking the owner is important in order to free up per-file - * objects along with the file, to aide resource tracking, and to - * assign blame. - */ - struct drm_i915_file_private *file; - u64 total; /* size addr space maps (ex. 2GB for ggtt) */ - u64 reserved; /* size addr space reserved */ - - unsigned int bind_async_flags; - - /* - * Each active user context has its own address space (in full-ppgtt). - * Since the vm may be shared between multiple contexts, we count how - * many contexts keep us "open". Once open hits zero, we are closed - * and do not allow any new attachments, and proceed to shutdown our - * vma and page directories. - */ - atomic_t open; - - struct mutex mutex; /* protects vma and our lists */ -#define VM_CLASS_GGTT 0 -#define VM_CLASS_PPGTT 1 - - struct i915_page_scratch scratch[4]; - unsigned int scratch_order; - unsigned int top; - - /** - * List of vma currently bound. - */ - struct list_head bound_list; - - struct pagestash free_pages; - - /* Global GTT */ - bool is_ggtt:1; - - /* Some systems require uncached updates of the page directories */ - bool pt_kmap_wc:1; - - /* Some systems support read-only mappings for GGTT and/or PPGTT */ - bool has_read_only:1; - - u64 (*pte_encode)(dma_addr_t addr, - enum i915_cache_level level, - u32 flags); /* Create a valid PTE */ -#define PTE_READ_ONLY (1<<0) - - int (*allocate_va_range)(struct i915_address_space *vm, - u64 start, u64 length); - void (*clear_range)(struct i915_address_space *vm, - u64 start, u64 length); - void (*insert_page)(struct i915_address_space *vm, - dma_addr_t addr, - u64 offset, - enum i915_cache_level cache_level, - u32 flags); - void (*insert_entries)(struct i915_address_space *vm, - struct i915_vma *vma, - enum i915_cache_level cache_level, - u32 flags); - void (*cleanup)(struct i915_address_space *vm); - - struct i915_vma_ops vma_ops; - - I915_SELFTEST_DECLARE(struct fault_attr fault_attr); - I915_SELFTEST_DECLARE(bool scrub_64K); -}; - -#define i915_is_ggtt(vm) ((vm)->is_ggtt) - -static inline bool -i915_vm_is_4lvl(const struct i915_address_space *vm) -{ - return (vm->total - 1) >> 32; -} - -static inline bool -i915_vm_has_scratch_64K(struct i915_address_space *vm) -{ - return vm->scratch_order == get_order(I915_GTT_PAGE_SIZE_64K); -} - -static inline bool -i915_vm_has_cache_coloring(struct i915_address_space *vm) -{ - return i915_is_ggtt(vm) && vm->mm.color_adjust; -} - -/* The Graphics Translation Table is the way in which GEN hardware translates a - * Graphics Virtual Address into a Physical Address. In addition to the normal - * collateral associated with any va->pa translations GEN hardware also has a - * portion of the GTT which can be mapped by the CPU and remain both coherent - * and correct (in cases like swizzling). That region is referred to as GMADR in - * the spec. - */ -struct i915_ggtt { - struct i915_address_space vm; - - struct io_mapping iomap; /* Mapping to our CPU mappable region */ - struct resource gmadr; /* GMADR resource */ - resource_size_t mappable_end; /* End offset that we can CPU map */ - - /** "Graphics Stolen Memory" holds the global PTEs */ - void __iomem *gsm; - void (*invalidate)(struct i915_ggtt *ggtt); - - /** PPGTT used for aliasing the PPGTT with the GTT */ - struct i915_ppgtt *alias; - - bool do_idle_maps; - - int mtrr; - - /** Bit 6 swizzling required for X tiling */ - u32 bit_6_swizzle_x; - /** Bit 6 swizzling required for Y tiling */ - u32 bit_6_swizzle_y; - - u32 pin_bias; - - unsigned int num_fences; - struct i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; - struct list_head fence_list; - - /** List of all objects in gtt_space, currently mmaped by userspace. - * All objects within this list must also be on bound_list. - */ - struct list_head userfault_list; - - /* Manual runtime pm autosuspend delay for user GGTT mmaps */ - struct intel_wakeref_auto userfault_wakeref; - - struct drm_mm_node error_capture; - struct drm_mm_node uc_fw; -}; - -struct i915_ppgtt { - struct i915_address_space vm; - - struct i915_page_directory *pd; -}; - -struct gen6_ppgtt { - struct i915_ppgtt base; - - struct mutex flush; - struct i915_vma *vma; - gen6_pte_t __iomem *pd_addr; - - atomic_t pin_count; - struct mutex pin_mutex; - - bool scan_for_unused_pt; -}; - -#define __to_gen6_ppgtt(base) container_of(base, struct gen6_ppgtt, base) - -static inline struct gen6_ppgtt *to_gen6_ppgtt(struct i915_ppgtt *base) -{ - BUILD_BUG_ON(offsetof(struct gen6_ppgtt, base)); - return __to_gen6_ppgtt(base); -} - -/* - * gen6_for_each_pde() iterates over every pde from start until start+length. - * If start and start+length are not perfectly divisible, the macro will round - * down and up as needed. Start=0 and length=2G effectively iterates over - * every PDE in the system. The macro modifies ALL its parameters except 'pd', - * so each of the other parameters should preferably be a simple variable, or - * at most an lvalue with no side-effects! - */ -#define gen6_for_each_pde(pt, pd, start, length, iter) \ - for (iter = gen6_pde_index(start); \ - length > 0 && iter < I915_PDES && \ - (pt = i915_pt_entry(pd, iter), true); \ - ({ u32 temp = ALIGN(start+1, 1 << GEN6_PDE_SHIFT); \ - temp = min(temp - start, length); \ - start += temp, length -= temp; }), ++iter) - -#define gen6_for_all_pdes(pt, pd, iter) \ - for (iter = 0; \ - iter < I915_PDES && \ - (pt = i915_pt_entry(pd, iter), true); \ - ++iter) - -static inline u32 i915_pte_index(u64 address, unsigned int pde_shift) -{ - const u32 mask = NUM_PTE(pde_shift) - 1; - - return (address >> PAGE_SHIFT) & mask; -} - -/* Helper to counts the number of PTEs within the given length. This count - * does not cross a page table boundary, so the max value would be - * GEN6_PTES for GEN6, and GEN8_PTES for GEN8. -*/ -static inline u32 i915_pte_count(u64 addr, u64 length, unsigned int pde_shift) -{ - const u64 mask = ~((1ULL << pde_shift) - 1); - u64 end; - - GEM_BUG_ON(length == 0); - GEM_BUG_ON(offset_in_page(addr | length)); - - end = addr + length; - - if ((addr & mask) != (end & mask)) - return NUM_PTE(pde_shift) - i915_pte_index(addr, pde_shift); - - return i915_pte_index(end, pde_shift) - i915_pte_index(addr, pde_shift); -} - -static inline u32 i915_pde_index(u64 addr, u32 shift) -{ - return (addr >> shift) & I915_PDE_MASK; -} - -static inline u32 gen6_pte_index(u32 addr) -{ - return i915_pte_index(addr, GEN6_PDE_SHIFT); -} - -static inline u32 gen6_pte_count(u32 addr, u32 length) -{ - return i915_pte_count(addr, length, GEN6_PDE_SHIFT); -} - -static inline u32 gen6_pde_index(u32 addr) -{ - return i915_pde_index(addr, GEN6_PDE_SHIFT); -} - -static inline struct i915_page_table * -i915_pt_entry(const struct i915_page_directory * const pd, - const unsigned short n) -{ - return pd->entry[n]; -} - -static inline struct i915_page_directory * -i915_pd_entry(const struct i915_page_directory * const pdp, - const unsigned short n) -{ - return pdp->entry[n]; -} - -static inline dma_addr_t -i915_page_dir_dma_addr(const struct i915_ppgtt *ppgtt, const unsigned int n) -{ - struct i915_page_dma *pt = ppgtt->pd->entry[n]; - - return px_dma(pt ?: px_base(&ppgtt->vm.scratch[ppgtt->vm.top])); -} - -static inline struct i915_ggtt * -i915_vm_to_ggtt(struct i915_address_space *vm) -{ - BUILD_BUG_ON(offsetof(struct i915_ggtt, vm)); - GEM_BUG_ON(!i915_is_ggtt(vm)); - return container_of(vm, struct i915_ggtt, vm); -} - -static inline struct i915_ppgtt * -i915_vm_to_ppgtt(struct i915_address_space *vm) -{ - BUILD_BUG_ON(offsetof(struct i915_ppgtt, vm)); - GEM_BUG_ON(i915_is_ggtt(vm)); - return container_of(vm, struct i915_ppgtt, vm); -} - -int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv); -int i915_ggtt_init_hw(struct drm_i915_private *dev_priv); -int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv); -void i915_ggtt_enable_guc(struct i915_ggtt *ggtt); -void i915_ggtt_disable_guc(struct i915_ggtt *ggtt); -int i915_init_ggtt(struct drm_i915_private *dev_priv); -void i915_ggtt_driver_release(struct drm_i915_private *dev_priv); - -static inline bool i915_ggtt_has_aperture(const struct i915_ggtt *ggtt) -{ - return ggtt->mappable_end > 0; -} - -int i915_ppgtt_init_hw(struct intel_gt *gt); - -struct i915_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv); - -static inline struct i915_address_space * -i915_vm_get(struct i915_address_space *vm) -{ - kref_get(&vm->ref); - return vm; -} - -void i915_vm_release(struct kref *kref); - -static inline void i915_vm_put(struct i915_address_space *vm) -{ - kref_put(&vm->ref, i915_vm_release); -} - -static inline struct i915_address_space * -i915_vm_open(struct i915_address_space *vm) -{ - GEM_BUG_ON(!atomic_read(&vm->open)); - atomic_inc(&vm->open); - return i915_vm_get(vm); -} - -static inline bool -i915_vm_tryopen(struct i915_address_space *vm) -{ - if (atomic_add_unless(&vm->open, 1, 0)) - return i915_vm_get(vm); - - return false; -} - -void __i915_vm_close(struct i915_address_space *vm); - -static inline void -i915_vm_close(struct i915_address_space *vm) -{ - GEM_BUG_ON(!atomic_read(&vm->open)); - if (atomic_dec_and_test(&vm->open)) - __i915_vm_close(vm); - - i915_vm_put(vm); -} - -int gen6_ppgtt_pin(struct i915_ppgtt *base); -void gen6_ppgtt_unpin(struct i915_ppgtt *base); -void gen6_ppgtt_unpin_all(struct i915_ppgtt *base); - -void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv); -void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv); +struct i915_address_space; int __must_check i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj, struct sg_table *pages); @@ -664,6 +46,6 @@ int i915_gem_gtt_insert(struct i915_address_space *vm, #define PIN_GLOBAL BIT_ULL(10) /* I915_VMA_GLOBAL_BIND */ #define PIN_USER BIT_ULL(11) /* I915_VMA_LOCAL_BIND */ -#define PIN_OFFSET_MASK (-I915_GTT_PAGE_SIZE) +#define PIN_OFFSET_MASK I915_GTT_PAGE_MASK #endif diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index fda0977d2059..4c1836f0a991 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -41,6 +41,7 @@ #include "gem/i915_gem_context.h" #include "gem/i915_gem_lmem.h" +#include "gt/intel_gt_pm.h" #include "i915_drv.h" #include "i915_gpu_error.h" @@ -232,14 +233,13 @@ static void pool_free(struct pagevec *pv, void *addr) #ifdef CONFIG_DRM_I915_COMPRESS_ERROR -struct compress { +struct i915_vma_compress { struct pagevec pool; struct z_stream_s zstream; void *tmp; - bool wc; }; -static bool compress_init(struct compress *c) +static bool compress_init(struct i915_vma_compress *c) { struct z_stream_s *zstream = &c->zstream; @@ -261,7 +261,7 @@ static bool compress_init(struct compress *c) return true; } -static bool compress_start(struct compress *c) +static bool compress_start(struct i915_vma_compress *c) { struct z_stream_s *zstream = &c->zstream; void *workspace = zstream->workspace; @@ -272,8 +272,8 @@ static bool compress_start(struct compress *c) return zlib_deflateInit(zstream, Z_DEFAULT_COMPRESSION) == Z_OK; } -static void *compress_next_page(struct compress *c, - struct drm_i915_error_object *dst) +static void *compress_next_page(struct i915_vma_compress *c, + struct i915_vma_coredump *dst) { void *page; @@ -287,14 +287,15 @@ static void *compress_next_page(struct compress *c, return dst->pages[dst->page_count++] = page; } -static int compress_page(struct compress *c, +static int compress_page(struct i915_vma_compress *c, void *src, - struct drm_i915_error_object *dst) + struct i915_vma_coredump *dst, + bool wc) { struct z_stream_s *zstream = &c->zstream; zstream->next_in = src; - if (c->wc && c->tmp && i915_memcpy_from_wc(c->tmp, src, PAGE_SIZE)) + if (wc && c->tmp && i915_memcpy_from_wc(c->tmp, src, PAGE_SIZE)) zstream->next_in = c->tmp; zstream->avail_in = PAGE_SIZE; @@ -318,8 +319,8 @@ static int compress_page(struct compress *c, return 0; } -static int compress_flush(struct compress *c, - struct drm_i915_error_object *dst) +static int compress_flush(struct i915_vma_compress *c, + struct i915_vma_coredump *dst) { struct z_stream_s *zstream = &c->zstream; @@ -347,12 +348,12 @@ end: return 0; } -static void compress_finish(struct compress *c) +static void compress_finish(struct i915_vma_compress *c) { zlib_deflateEnd(&c->zstream); } -static void compress_fini(struct compress *c) +static void compress_fini(struct i915_vma_compress *c) { kfree(c->zstream.workspace); if (c->tmp) @@ -367,24 +368,24 @@ static void err_compression_marker(struct drm_i915_error_state_buf *m) #else -struct compress { +struct i915_vma_compress { struct pagevec pool; - bool wc; }; -static bool compress_init(struct compress *c) +static bool compress_init(struct i915_vma_compress *c) { return pool_init(&c->pool, ALLOW_FAIL) == 0; } -static bool compress_start(struct compress *c) +static bool compress_start(struct i915_vma_compress *c) { return true; } -static int compress_page(struct compress *c, +static int compress_page(struct i915_vma_compress *c, void *src, - struct drm_i915_error_object *dst) + struct i915_vma_coredump *dst, + bool wc) { void *ptr; @@ -392,24 +393,24 @@ static int compress_page(struct compress *c, if (!ptr) return -ENOMEM; - if (!(c->wc && i915_memcpy_from_wc(ptr, src, PAGE_SIZE))) + if (!(wc && i915_memcpy_from_wc(ptr, src, PAGE_SIZE))) memcpy(ptr, src, PAGE_SIZE); dst->pages[dst->page_count++] = ptr; return 0; } -static int compress_flush(struct compress *c, - struct drm_i915_error_object *dst) +static int compress_flush(struct i915_vma_compress *c, + struct i915_vma_coredump *dst) { return 0; } -static void compress_finish(struct compress *c) +static void compress_finish(struct i915_vma_compress *c) { } -static void compress_fini(struct compress *c) +static void compress_fini(struct i915_vma_compress *c) { pool_fini(&c->pool); } @@ -422,7 +423,7 @@ static void err_compression_marker(struct drm_i915_error_state_buf *m) #endif static void error_print_instdone(struct drm_i915_error_state_buf *m, - const struct drm_i915_error_engine *ee) + const struct intel_engine_coredump *ee) { const struct sseu_dev_info *sseu = &RUNTIME_INFO(m->i915)->sseu; int slice; @@ -453,40 +454,56 @@ static void error_print_instdone(struct drm_i915_error_state_buf *m, static void error_print_request(struct drm_i915_error_state_buf *m, const char *prefix, - const struct drm_i915_error_request *erq, - const unsigned long epoch) + const struct i915_request_coredump *erq) { if (!erq->seqno) return; - err_printf(m, "%s pid %d, seqno %8x:%08x%s%s, prio %d, emitted %dms, start %08x, head %08x, tail %08x\n", + err_printf(m, "%s pid %d, seqno %8x:%08x%s%s, prio %d, start %08x, head %08x, tail %08x\n", prefix, erq->pid, erq->context, erq->seqno, test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &erq->flags) ? "!" : "", test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &erq->flags) ? "+" : "", erq->sched_attr.priority, - jiffies_to_msecs(erq->jiffies - epoch), erq->start, erq->head, erq->tail); } static void error_print_context(struct drm_i915_error_state_buf *m, const char *header, - const struct drm_i915_error_context *ctx) + const struct i915_gem_context_coredump *ctx) { err_printf(m, "%s%s[%d] prio %d, guilty %d active %d\n", header, ctx->comm, ctx->pid, ctx->sched_attr.priority, ctx->guilty, ctx->active); } +static struct i915_vma_coredump * +__find_vma(struct i915_vma_coredump *vma, const char *name) +{ + while (vma) { + if (strcmp(vma->name, name) == 0) + return vma; + vma = vma->next; + } + + return NULL; +} + +static struct i915_vma_coredump * +find_batch(const struct intel_engine_coredump *ee) +{ + return __find_vma(ee->vma, "batch"); +} + static void error_print_engine(struct drm_i915_error_state_buf *m, - const struct drm_i915_error_engine *ee, - const unsigned long epoch) + const struct intel_engine_coredump *ee) { + struct i915_vma_coredump *batch; int n; err_printf(m, "%s command stream:\n", ee->engine->name); - err_printf(m, " IDLE?: %s\n", yesno(ee->idle)); + err_printf(m, " CCID: 0x%08x\n", ee->ccid); err_printf(m, " START: 0x%08x\n", ee->start); err_printf(m, " HEAD: 0x%08x [0x%08x]\n", ee->head, ee->rq_head); err_printf(m, " TAIL: 0x%08x [0x%08x, 0x%08x]\n", @@ -501,9 +518,10 @@ static void error_print_engine(struct drm_i915_error_state_buf *m, error_print_instdone(m, ee); - if (ee->batchbuffer) { - u64 start = ee->batchbuffer->gtt_offset; - u64 end = start + ee->batchbuffer->gtt_size; + batch = find_batch(ee); + if (batch) { + u64 start = batch->gtt_offset; + u64 end = start + batch->gtt_size; err_printf(m, " batch: [0x%08x_%08x, 0x%08x_%08x]\n", upper_32_bits(start), lower_32_bits(start), @@ -535,13 +553,11 @@ static void error_print_engine(struct drm_i915_error_state_buf *m, ee->vm_info.pp_dir_base); } } - err_printf(m, " ring->head: 0x%08x\n", ee->cpu_ring_head); - err_printf(m, " ring->tail: 0x%08x\n", ee->cpu_ring_tail); err_printf(m, " engine reset count: %u\n", ee->reset_count); for (n = 0; n < ee->num_ports; n++) { err_printf(m, " ELSP[%d]:", n); - error_print_request(m, " ", &ee->execlist[n], epoch); + error_print_request(m, " ", &ee->execlist[n]); } error_print_context(m, " Active context: ", &ee->context); @@ -556,38 +572,35 @@ void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...) va_end(args); } -static void print_error_obj(struct drm_i915_error_state_buf *m, +static void print_error_vma(struct drm_i915_error_state_buf *m, const struct intel_engine_cs *engine, - const char *name, - const struct drm_i915_error_object *obj) + const struct i915_vma_coredump *vma) { char out[ASCII85_BUFSZ]; int page; - if (!obj) + if (!vma) return; - if (name) { - err_printf(m, "%s --- %s = 0x%08x %08x\n", - engine ? engine->name : "global", name, - upper_32_bits(obj->gtt_offset), - lower_32_bits(obj->gtt_offset)); - } + err_printf(m, "%s --- %s = 0x%08x %08x\n", + engine ? engine->name : "global", vma->name, + upper_32_bits(vma->gtt_offset), + lower_32_bits(vma->gtt_offset)); - if (obj->gtt_page_sizes > I915_GTT_PAGE_SIZE_4K) - err_printf(m, "gtt_page_sizes = 0x%08x\n", obj->gtt_page_sizes); + if (vma->gtt_page_sizes > I915_GTT_PAGE_SIZE_4K) + err_printf(m, "gtt_page_sizes = 0x%08x\n", vma->gtt_page_sizes); err_compression_marker(m); - for (page = 0; page < obj->page_count; page++) { + for (page = 0; page < vma->page_count; page++) { int i, len; len = PAGE_SIZE; - if (page == obj->page_count - 1) - len -= obj->unused; + if (page == vma->page_count - 1) + len -= vma->unused; len = ascii85_encode_len(len); for (i = 0; i < len; i++) - err_puts(m, ascii85_encode(obj->pages[page][i], out)); + err_puts(m, ascii85_encode(vma->pages[page][i], out)); } err_puts(m, "\n"); } @@ -626,18 +639,13 @@ static void err_print_pciid(struct drm_i915_error_state_buf *m, } static void err_print_uc(struct drm_i915_error_state_buf *m, - const struct i915_error_uc *error_uc) + const struct intel_uc_coredump *error_uc) { struct drm_printer p = i915_error_printer(m); - const struct i915_gpu_state *error = - container_of(error_uc, typeof(*error), uc); - - if (!error->device_info.has_gt_uc) - return; intel_uc_fw_dump(&error_uc->guc_fw, &p); intel_uc_fw_dump(&error_uc->huc_fw, &p); - print_error_obj(m, NULL, "GuC log buffer", error_uc->guc_log); + print_error_vma(m, NULL, error_uc->guc_log); } static void err_free_sgl(struct scatterlist *sgl) @@ -657,12 +665,69 @@ static void err_free_sgl(struct scatterlist *sgl) } } +static void err_print_gt(struct drm_i915_error_state_buf *m, + struct intel_gt_coredump *gt) +{ + const struct intel_engine_coredump *ee; + int i; + + err_printf(m, "GT awake: %s\n", yesno(gt->awake)); + err_printf(m, "EIR: 0x%08x\n", gt->eir); + err_printf(m, "IER: 0x%08x\n", gt->ier); + for (i = 0; i < gt->ngtier; i++) + err_printf(m, "GTIER[%d]: 0x%08x\n", i, gt->gtier[i]); + err_printf(m, "PGTBL_ER: 0x%08x\n", gt->pgtbl_er); + err_printf(m, "FORCEWAKE: 0x%08x\n", gt->forcewake); + err_printf(m, "DERRMR: 0x%08x\n", gt->derrmr); + + for (i = 0; i < gt->nfence; i++) + err_printf(m, " fence[%d] = %08llx\n", i, gt->fence[i]); + + if (IS_GEN_RANGE(m->i915, 6, 11)) { + err_printf(m, "ERROR: 0x%08x\n", gt->error); + err_printf(m, "DONE_REG: 0x%08x\n", gt->done_reg); + } + + if (INTEL_GEN(m->i915) >= 8) + err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n", + gt->fault_data1, gt->fault_data0); + + if (IS_GEN(m->i915, 7)) + err_printf(m, "ERR_INT: 0x%08x\n", gt->err_int); + + if (IS_GEN_RANGE(m->i915, 8, 11)) + err_printf(m, "GTT_CACHE_EN: 0x%08x\n", gt->gtt_cache); + + if (IS_GEN(m->i915, 12)) + err_printf(m, "AUX_ERR_DBG: 0x%08x\n", gt->aux_err); + + if (INTEL_GEN(m->i915) >= 12) { + int i; + + for (i = 0; i < GEN12_SFC_DONE_MAX; i++) + err_printf(m, " SFC_DONE[%d]: 0x%08x\n", i, + gt->sfc_done[i]); + + err_printf(m, " GAM_DONE: 0x%08x\n", gt->gam_done); + } + + for (ee = gt->engine; ee; ee = ee->next) { + const struct i915_vma_coredump *vma; + + error_print_engine(m, ee); + for (vma = ee->vma; vma; vma = vma->next) + print_error_vma(m, ee->engine, vma); + } + + if (gt->uc) + err_print_uc(m, gt->uc); +} + static void __err_print_to_sgl(struct drm_i915_error_state_buf *m, - struct i915_gpu_state *error) + struct i915_gpu_coredump *error) { - const struct drm_i915_error_engine *ee; + const struct intel_engine_coredump *ee; struct timespec64 ts; - int i, j; if (*error->error_msg) err_printf(m, "%s\n", error->error_msg); @@ -682,7 +747,7 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m, err_printf(m, "Capture: %lu jiffies; %d ms ago\n", error->capture, jiffies_to_msecs(jiffies - error->capture)); - for (ee = error->engine; ee; ee = ee->next) + for (ee = error->gt ? error->gt->engine : NULL; ee; ee = ee->next) err_printf(m, "Active process (on ring %s): %s [%d]\n", ee->engine->name, ee->context.comm, @@ -708,90 +773,11 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m, CSR_VERSION_MINOR(csr->version)); } - err_printf(m, "GT awake: %s\n", yesno(error->awake)); err_printf(m, "RPM wakelock: %s\n", yesno(error->wakelock)); err_printf(m, "PM suspended: %s\n", yesno(error->suspended)); - err_printf(m, "EIR: 0x%08x\n", error->eir); - err_printf(m, "IER: 0x%08x\n", error->ier); - for (i = 0; i < error->ngtier; i++) - err_printf(m, "GTIER[%d]: 0x%08x\n", i, error->gtier[i]); - err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er); - err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake); - err_printf(m, "DERRMR: 0x%08x\n", error->derrmr); - err_printf(m, "CCID: 0x%08x\n", error->ccid); - - for (i = 0; i < error->nfence; i++) - err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]); - - if (IS_GEN_RANGE(m->i915, 6, 11)) { - err_printf(m, "ERROR: 0x%08x\n", error->error); - err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg); - } - if (INTEL_GEN(m->i915) >= 8) - err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n", - error->fault_data1, error->fault_data0); - - if (IS_GEN(m->i915, 7)) - err_printf(m, "ERR_INT: 0x%08x\n", error->err_int); - - if (IS_GEN_RANGE(m->i915, 8, 11)) - err_printf(m, "GTT_CACHE_EN: 0x%08x\n", error->gtt_cache); - - if (IS_GEN(m->i915, 12)) - err_printf(m, "AUX_ERR_DBG: 0x%08x\n", error->aux_err); - - if (INTEL_GEN(m->i915) >= 12) { - int i; - - for (i = 0; i < GEN12_SFC_DONE_MAX; i++) - err_printf(m, " SFC_DONE[%d]: 0x%08x\n", i, - error->sfc_done[i]); - - err_printf(m, " GAM_DONE: 0x%08x\n", error->gam_done); - } - - for (ee = error->engine; ee; ee = ee->next) - error_print_engine(m, ee, error->capture); - - for (ee = error->engine; ee; ee = ee->next) { - const struct drm_i915_error_object *obj; - - obj = ee->batchbuffer; - if (obj) { - err_puts(m, ee->engine->name); - if (ee->context.pid) - err_printf(m, " (submitted by %s [%d])", - ee->context.comm, - ee->context.pid); - err_printf(m, " --- gtt_offset = 0x%08x %08x\n", - upper_32_bits(obj->gtt_offset), - lower_32_bits(obj->gtt_offset)); - print_error_obj(m, ee->engine, NULL, obj); - } - - for (j = 0; j < ee->user_bo_count; j++) - print_error_obj(m, ee->engine, "user", ee->user_bo[j]); - - if (ee->num_requests) { - err_printf(m, "%s --- %d requests\n", - ee->engine->name, - ee->num_requests); - for (j = 0; j < ee->num_requests; j++) - error_print_request(m, " ", - &ee->requests[j], - error->capture); - } - - print_error_obj(m, ee->engine, "ringbuffer", ee->ringbuffer); - print_error_obj(m, ee->engine, "HW Status", ee->hws_page); - print_error_obj(m, ee->engine, "HW context", ee->ctx); - print_error_obj(m, ee->engine, "WA context", ee->wa_ctx); - print_error_obj(m, ee->engine, - "WA batchbuffer", ee->wa_batchbuffer); - print_error_obj(m, ee->engine, - "NULL context", ee->default_state); - } + if (error->gt) + err_print_gt(m, error->gt); if (error->overlay) intel_overlay_print_error_state(m, error->overlay); @@ -802,10 +788,9 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m, err_print_capabilities(m, &error->device_info, &error->runtime_info, &error->driver_caps); err_print_params(m, &error->params); - err_print_uc(m, &error->uc); } -static int err_print_to_sgl(struct i915_gpu_state *error) +static int err_print_to_sgl(struct i915_gpu_coredump *error) { struct drm_i915_error_state_buf m; @@ -842,8 +827,8 @@ static int err_print_to_sgl(struct i915_gpu_state *error) return 0; } -ssize_t i915_gpu_state_copy_to_buffer(struct i915_gpu_state *error, - char *buf, loff_t off, size_t rem) +ssize_t i915_gpu_coredump_copy_to_buffer(struct i915_gpu_coredump *error, + char *buf, loff_t off, size_t rem) { struct scatterlist *sg; size_t count; @@ -906,85 +891,88 @@ ssize_t i915_gpu_state_copy_to_buffer(struct i915_gpu_state *error, return count; } -static void i915_error_object_free(struct drm_i915_error_object *obj) +static void i915_vma_coredump_free(struct i915_vma_coredump *vma) { - int page; + while (vma) { + struct i915_vma_coredump *next = vma->next; + int page; - if (obj == NULL) - return; + for (page = 0; page < vma->page_count; page++) + free_page((unsigned long)vma->pages[page]); - for (page = 0; page < obj->page_count; page++) - free_page((unsigned long)obj->pages[page]); - - kfree(obj); + kfree(vma); + vma = next; + } } - -static void cleanup_params(struct i915_gpu_state *error) +static void cleanup_params(struct i915_gpu_coredump *error) { i915_params_free(&error->params); } -static void cleanup_uc_state(struct i915_gpu_state *error) +static void cleanup_uc(struct intel_uc_coredump *uc) { - struct i915_error_uc *error_uc = &error->uc; + kfree(uc->guc_fw.path); + kfree(uc->huc_fw.path); + i915_vma_coredump_free(uc->guc_log); - kfree(error_uc->guc_fw.path); - kfree(error_uc->huc_fw.path); - i915_error_object_free(error_uc->guc_log); + kfree(uc); } -void __i915_gpu_state_free(struct kref *error_ref) +static void cleanup_gt(struct intel_gt_coredump *gt) { - struct i915_gpu_state *error = - container_of(error_ref, typeof(*error), ref); - long i; + while (gt->engine) { + struct intel_engine_coredump *ee = gt->engine; + + gt->engine = ee->next; + + i915_vma_coredump_free(ee->vma); + kfree(ee); + } - while (error->engine) { - struct drm_i915_error_engine *ee = error->engine; + if (gt->uc) + cleanup_uc(gt->uc); - error->engine = ee->next; + kfree(gt); +} - for (i = 0; i < ee->user_bo_count; i++) - i915_error_object_free(ee->user_bo[i]); - kfree(ee->user_bo); +void __i915_gpu_coredump_free(struct kref *error_ref) +{ + struct i915_gpu_coredump *error = + container_of(error_ref, typeof(*error), ref); - i915_error_object_free(ee->batchbuffer); - i915_error_object_free(ee->wa_batchbuffer); - i915_error_object_free(ee->ringbuffer); - i915_error_object_free(ee->hws_page); - i915_error_object_free(ee->ctx); - i915_error_object_free(ee->wa_ctx); + while (error->gt) { + struct intel_gt_coredump *gt = error->gt; - kfree(ee->requests); - kfree(ee); + error->gt = gt->next; + cleanup_gt(gt); } kfree(error->overlay); kfree(error->display); cleanup_params(error); - cleanup_uc_state(error); err_free_sgl(error->sgl); kfree(error); } -static struct drm_i915_error_object * -i915_error_object_create(struct drm_i915_private *i915, - struct i915_vma *vma, - struct compress *compress) +static struct i915_vma_coredump * +i915_vma_coredump_create(const struct intel_gt *gt, + const struct i915_vma *vma, + const char *name, + struct i915_vma_compress *compress) { - struct i915_ggtt *ggtt = &i915->ggtt; + struct i915_ggtt *ggtt = gt->ggtt; const u64 slot = ggtt->error_capture.start; - struct drm_i915_error_object *dst; + struct i915_vma_coredump *dst; unsigned long num_pages; struct sgt_iter iter; int ret; might_sleep(); - if (!vma || !vma->pages) + if (!vma || !vma->pages || !compress) return NULL; num_pages = min_t(u64, vma->size, vma->obj->base.size) >> PAGE_SHIFT; @@ -998,6 +986,9 @@ i915_error_object_create(struct drm_i915_private *i915, return NULL; } + strcpy(dst->name, name); + dst->next = NULL; + dst->gtt_offset = vma->node.start; dst->gtt_size = vma->node.size; dst->gtt_page_sizes = vma->page_sizes.gtt; @@ -1005,9 +996,6 @@ i915_error_object_create(struct drm_i915_private *i915, dst->page_count = 0; dst->unused = 0; - compress->wc = i915_gem_object_is_lmem(vma->obj) || - drm_mm_node_allocated(&ggtt->error_capture); - ret = -EINVAL; if (drm_mm_node_allocated(&ggtt->error_capture)) { void __iomem *s; @@ -1016,9 +1004,12 @@ i915_error_object_create(struct drm_i915_private *i915, for_each_sgt_daddr(dma, iter, vma->pages) { ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0); + mb(); s = io_mapping_map_wc(&ggtt->iomap, slot, PAGE_SIZE); - ret = compress_page(compress, (void __force *)s, dst); + ret = compress_page(compress, + (void __force *)s, dst, + true); io_mapping_unmap(s); if (ret) break; @@ -1031,7 +1022,9 @@ i915_error_object_create(struct drm_i915_private *i915, void __iomem *s; s = io_mapping_map_wc(&mem->iomap, dma, PAGE_SIZE); - ret = compress_page(compress, (void __force *)s, dst); + ret = compress_page(compress, + (void __force *)s, dst, + true); io_mapping_unmap(s); if (ret) break; @@ -1045,7 +1038,7 @@ i915_error_object_create(struct drm_i915_private *i915, drm_clflush_pages(&page, 1); s = kmap(page); - ret = compress_page(compress, s, dst); + ret = compress_page(compress, s, dst, false); kunmap(page); drm_clflush_pages(&page, 1); @@ -1066,77 +1059,56 @@ i915_error_object_create(struct drm_i915_private *i915, return dst; } -/* - * Generate a semi-unique error code. The code is not meant to have meaning, The - * code's only purpose is to try to prevent false duplicated bug reports by - * grossly estimating a GPU error state. - * - * TODO Ideally, hashing the batchbuffer would be a very nice way to determine - * the hang if we could strip the GTT offset information from it. - * - * It's only a small step better than a random number in its current form. - */ -static u32 i915_error_generate_code(struct i915_gpu_state *error) -{ - const struct drm_i915_error_engine *ee = error->engine; - - /* - * IPEHR would be an ideal way to detect errors, as it's the gross - * measure of "the command that hung." However, has some very common - * synchronization commands which almost always appear in the case - * strictly a client bug. Use instdone to differentiate those some. - */ - return ee ? ee->ipehr ^ ee->instdone.instdone : 0; -} - -static void gem_record_fences(struct i915_gpu_state *error) +static void gt_record_fences(struct intel_gt_coredump *gt) { - struct drm_i915_private *dev_priv = error->i915; - struct intel_uncore *uncore = &dev_priv->uncore; + struct i915_ggtt *ggtt = gt->_gt->ggtt; + struct intel_uncore *uncore = gt->_gt->uncore; int i; - if (INTEL_GEN(dev_priv) >= 6) { - for (i = 0; i < dev_priv->ggtt.num_fences; i++) - error->fence[i] = + if (INTEL_GEN(uncore->i915) >= 6) { + for (i = 0; i < ggtt->num_fences; i++) + gt->fence[i] = intel_uncore_read64(uncore, FENCE_REG_GEN6_LO(i)); - } else if (INTEL_GEN(dev_priv) >= 4) { - for (i = 0; i < dev_priv->ggtt.num_fences; i++) - error->fence[i] = + } else if (INTEL_GEN(uncore->i915) >= 4) { + for (i = 0; i < ggtt->num_fences; i++) + gt->fence[i] = intel_uncore_read64(uncore, FENCE_REG_965_LO(i)); } else { - for (i = 0; i < dev_priv->ggtt.num_fences; i++) - error->fence[i] = + for (i = 0; i < ggtt->num_fences; i++) + gt->fence[i] = intel_uncore_read(uncore, FENCE_REG(i)); } - error->nfence = i; + gt->nfence = i; } -static void error_record_engine_registers(struct i915_gpu_state *error, - struct intel_engine_cs *engine, - struct drm_i915_error_engine *ee) +static void engine_record_registers(struct intel_engine_coredump *ee) { - struct drm_i915_private *dev_priv = engine->i915; + const struct intel_engine_cs *engine = ee->engine; + struct drm_i915_private *i915 = engine->i915; - if (INTEL_GEN(dev_priv) >= 6) { + if (INTEL_GEN(i915) >= 6) { ee->rc_psmi = ENGINE_READ(engine, RING_PSMI_CTL); - if (INTEL_GEN(dev_priv) >= 12) - ee->fault_reg = I915_READ(GEN12_RING_FAULT_REG); - else if (INTEL_GEN(dev_priv) >= 8) - ee->fault_reg = I915_READ(GEN8_RING_FAULT_REG); + if (INTEL_GEN(i915) >= 12) + ee->fault_reg = intel_uncore_read(engine->uncore, + GEN12_RING_FAULT_REG); + else if (INTEL_GEN(i915) >= 8) + ee->fault_reg = intel_uncore_read(engine->uncore, + GEN8_RING_FAULT_REG); else ee->fault_reg = GEN6_RING_FAULT_REG_READ(engine); } - if (INTEL_GEN(dev_priv) >= 4) { + if (INTEL_GEN(i915) >= 4) { ee->faddr = ENGINE_READ(engine, RING_DMA_FADD); ee->ipeir = ENGINE_READ(engine, RING_IPEIR); ee->ipehr = ENGINE_READ(engine, RING_IPEHR); ee->instps = ENGINE_READ(engine, RING_INSTPS); ee->bbaddr = ENGINE_READ(engine, RING_BBADDR); - if (INTEL_GEN(dev_priv) >= 8) { + ee->ccid = ENGINE_READ(engine, CCID); + if (INTEL_GEN(i915) >= 8) { ee->faddr |= (u64)ENGINE_READ(engine, RING_DMA_FADD_UDW) << 32; ee->bbaddr |= (u64)ENGINE_READ(engine, RING_BBADDR_UDW) << 32; } @@ -1155,13 +1127,13 @@ static void error_record_engine_registers(struct i915_gpu_state *error, ee->head = ENGINE_READ(engine, RING_HEAD); ee->tail = ENGINE_READ(engine, RING_TAIL); ee->ctl = ENGINE_READ(engine, RING_CTL); - if (INTEL_GEN(dev_priv) > 2) + if (INTEL_GEN(i915) > 2) ee->mode = ENGINE_READ(engine, RING_MI_MODE); - if (!HWS_NEEDS_PHYSICAL(dev_priv)) { + if (!HWS_NEEDS_PHYSICAL(i915)) { i915_reg_t mmio; - if (IS_GEN(dev_priv, 7)) { + if (IS_GEN(i915, 7)) { switch (engine->id) { default: MISSING_CASE(engine->id); @@ -1186,40 +1158,40 @@ static void error_record_engine_registers(struct i915_gpu_state *error, mmio = RING_HWS_PGA(engine->mmio_base); } - ee->hws = I915_READ(mmio); + ee->hws = intel_uncore_read(engine->uncore, mmio); } - ee->idle = intel_engine_is_idle(engine); - ee->reset_count = i915_reset_engine_count(&dev_priv->gpu_error, - engine); + ee->reset_count = i915_reset_engine_count(&i915->gpu_error, engine); - if (HAS_PPGTT(dev_priv)) { + if (HAS_PPGTT(i915)) { int i; ee->vm_info.gfx_mode = ENGINE_READ(engine, RING_MODE_GEN7); - if (IS_GEN(dev_priv, 6)) { + if (IS_GEN(i915, 6)) { ee->vm_info.pp_dir_base = ENGINE_READ(engine, RING_PP_DIR_BASE_READ); - } else if (IS_GEN(dev_priv, 7)) { + } else if (IS_GEN(i915, 7)) { ee->vm_info.pp_dir_base = ENGINE_READ(engine, RING_PP_DIR_BASE); - } else if (INTEL_GEN(dev_priv) >= 8) { + } else if (INTEL_GEN(i915) >= 8) { u32 base = engine->mmio_base; for (i = 0; i < 4; i++) { ee->vm_info.pdp[i] = - I915_READ(GEN8_RING_PDP_UDW(base, i)); + intel_uncore_read(engine->uncore, + GEN8_RING_PDP_UDW(base, i)); ee->vm_info.pdp[i] <<= 32; ee->vm_info.pdp[i] |= - I915_READ(GEN8_RING_PDP_LDW(base, i)); + intel_uncore_read(engine->uncore, + GEN8_RING_PDP_LDW(base, i)); } } } } static void record_request(const struct i915_request *request, - struct drm_i915_error_request *erq) + struct i915_request_coredump *erq) { const struct i915_gem_context *ctx; @@ -1227,7 +1199,6 @@ static void record_request(const struct i915_request *request, erq->context = request->fence.context; erq->seqno = request->fence.seqno; erq->sched_attr = request->sched.attr; - erq->jiffies = request->emitted_jiffies; erq->start = i915_ggtt_offset(request->ring->vma); erq->head = request->head; erq->tail = request->tail; @@ -1240,59 +1211,10 @@ static void record_request(const struct i915_request *request, rcu_read_unlock(); } -static void engine_record_requests(struct intel_engine_cs *engine, - struct i915_request *first, - struct drm_i915_error_engine *ee) +static void engine_record_execlists(struct intel_engine_coredump *ee) { - struct i915_request *request; - int count; - - count = 0; - request = first; - list_for_each_entry_from(request, &engine->active.requests, sched.link) - count++; - if (!count) - return; - - ee->requests = kcalloc(count, sizeof(*ee->requests), ATOMIC_MAYFAIL); - if (!ee->requests) - return; - - ee->num_requests = count; - - count = 0; - request = first; - list_for_each_entry_from(request, - &engine->active.requests, sched.link) { - if (count >= ee->num_requests) { - /* - * If the ring request list was changed in - * between the point where the error request - * list was created and dimensioned and this - * point then just exit early to avoid crashes. - * - * We don't need to communicate that the - * request list changed state during error - * state capture and that the error state is - * slightly incorrect as a consequence since we - * are typically only interested in the request - * list state at the point of error state - * capture, not in any changes happening during - * the capture. - */ - break; - } - - record_request(request, &ee->requests[count++]); - } - ee->num_requests = count; -} - -static void error_record_engine_execlists(const struct intel_engine_cs *engine, - struct drm_i915_error_engine *ee) -{ - const struct intel_engine_execlists * const execlists = &engine->execlists; - struct i915_request * const *port = execlists->active; + const struct intel_engine_execlists * const el = &ee->engine->execlists; + struct i915_request * const *port = el->active; unsigned int n = 0; while (*port) @@ -1301,7 +1223,7 @@ static void error_record_engine_execlists(const struct intel_engine_cs *engine, ee->num_ports = n; } -static bool record_context(struct drm_i915_error_context *e, +static bool record_context(struct i915_gem_context_coredump *e, const struct i915_request *rq) { struct i915_gem_context *ctx; @@ -1334,23 +1256,24 @@ static bool record_context(struct drm_i915_error_context *e, return capture; } -struct capture_vma { - struct capture_vma *next; - void **slot; +struct intel_engine_capture_vma { + struct intel_engine_capture_vma *next; + struct i915_vma *vma; + char name[16]; }; -static struct capture_vma * -capture_vma(struct capture_vma *next, +static struct intel_engine_capture_vma * +capture_vma(struct intel_engine_capture_vma *next, struct i915_vma *vma, - struct drm_i915_error_object **out) + const char *name, + gfp_t gfp) { - struct capture_vma *c; + struct intel_engine_capture_vma *c; - *out = NULL; if (!vma) return next; - c = kmalloc(sizeof(*c), ATOMIC_MAYFAIL); + c = kmalloc(sizeof(*c), gfp); if (!c) return next; @@ -1359,54 +1282,31 @@ capture_vma(struct capture_vma *next, return next; } - c->slot = (void **)out; - *c->slot = i915_vma_get(vma); + strcpy(c->name, name); + c->vma = i915_vma_get(vma); c->next = next; return c; } -static struct capture_vma * -request_record_user_bo(struct i915_request *request, - struct drm_i915_error_engine *ee, - struct capture_vma *capture) +static struct intel_engine_capture_vma * +capture_user(struct intel_engine_capture_vma *capture, + const struct i915_request *rq, + gfp_t gfp) { struct i915_capture_list *c; - struct drm_i915_error_object **bo; - long count, max; - - max = 0; - for (c = request->capture_list; c; c = c->next) - max++; - if (!max) - return capture; - - bo = kmalloc_array(max, sizeof(*bo), ATOMIC_MAYFAIL); - if (!bo) { - /* If we can't capture everything, try to capture something. */ - max = min_t(long, max, PAGE_SIZE / sizeof(*bo)); - bo = kmalloc_array(max, sizeof(*bo), ATOMIC_MAYFAIL); - } - if (!bo) - return capture; - count = 0; - for (c = request->capture_list; c; c = c->next) { - capture = capture_vma(capture, c->vma, &bo[count]); - if (++count == max) - break; - } - - ee->user_bo = bo; - ee->user_bo_count = count; + for (c = rq->capture_list; c; c = c->next) + capture = capture_vma(capture, c->vma, "user", gfp); return capture; } -static struct drm_i915_error_object * -capture_object(struct drm_i915_private *dev_priv, +static struct i915_vma_coredump * +capture_object(const struct intel_gt *gt, struct drm_i915_gem_object *obj, - struct compress *compress) + const char *name, + struct i915_vma_compress *compress) { if (obj && i915_gem_object_has_pages(obj)) { struct i915_vma fake = { @@ -1416,127 +1316,175 @@ capture_object(struct drm_i915_private *dev_priv, .obj = obj, }; - return i915_error_object_create(dev_priv, &fake, compress); + return i915_vma_coredump_create(gt, &fake, name, compress); } else { return NULL; } } -static void -gem_record_rings(struct i915_gpu_state *error, struct compress *compress) +static void add_vma(struct intel_engine_coredump *ee, + struct i915_vma_coredump *vma) { - struct drm_i915_private *i915 = error->i915; - struct intel_engine_cs *engine; - struct drm_i915_error_engine *ee; + if (vma) { + vma->next = ee->vma; + ee->vma = vma; + } +} + +struct intel_engine_coredump * +intel_engine_coredump_alloc(struct intel_engine_cs *engine, gfp_t gfp) +{ + struct intel_engine_coredump *ee; - ee = kzalloc(sizeof(*ee), GFP_KERNEL); + ee = kzalloc(sizeof(*ee), gfp); if (!ee) - return; + return NULL; - for_each_uabi_engine(engine, i915) { - struct capture_vma *capture = NULL; - struct i915_request *request; - unsigned long flags; + ee->engine = engine; - /* Refill our page pool before entering atomic section */ - pool_refill(&compress->pool, ALLOW_FAIL); + engine_record_registers(ee); + engine_record_execlists(ee); - spin_lock_irqsave(&engine->active.lock, flags); - request = intel_engine_find_active_request(engine); - if (!request) { - spin_unlock_irqrestore(&engine->active.lock, flags); - continue; - } + return ee; +} - error->simulated |= record_context(&ee->context, request); +struct intel_engine_capture_vma * +intel_engine_coredump_add_request(struct intel_engine_coredump *ee, + struct i915_request *rq, + gfp_t gfp) +{ + struct intel_engine_capture_vma *vma = NULL; - /* - * We need to copy these to an anonymous buffer - * as the simplest method to avoid being overwritten - * by userspace. - */ - capture = capture_vma(capture, - request->batch, - &ee->batchbuffer); + ee->simulated |= record_context(&ee->context, rq); + if (ee->simulated) + return NULL; - if (HAS_BROKEN_CS_TLB(i915)) - capture = capture_vma(capture, - engine->gt->scratch, - &ee->wa_batchbuffer); + /* + * We need to copy these to an anonymous buffer + * as the simplest method to avoid being overwritten + * by userspace. + */ + vma = capture_vma(vma, rq->batch, "batch", gfp); + vma = capture_user(vma, rq, gfp); + vma = capture_vma(vma, rq->ring->vma, "ring", gfp); + vma = capture_vma(vma, rq->context->state, "HW context", gfp); - capture = request_record_user_bo(request, ee, capture); + ee->rq_head = rq->head; + ee->rq_post = rq->postfix; + ee->rq_tail = rq->tail; - capture = capture_vma(capture, - request->context->state, - &ee->ctx); + return vma; +} - capture = capture_vma(capture, - request->ring->vma, - &ee->ringbuffer); +void +intel_engine_coredump_add_vma(struct intel_engine_coredump *ee, + struct intel_engine_capture_vma *capture, + struct i915_vma_compress *compress) +{ + const struct intel_engine_cs *engine = ee->engine; - ee->cpu_ring_head = request->ring->head; - ee->cpu_ring_tail = request->ring->tail; + while (capture) { + struct intel_engine_capture_vma *this = capture; + struct i915_vma *vma = this->vma; - ee->rq_head = request->head; - ee->rq_post = request->postfix; - ee->rq_tail = request->tail; + add_vma(ee, + i915_vma_coredump_create(engine->gt, + vma, this->name, + compress)); - engine_record_requests(engine, request, ee); - spin_unlock_irqrestore(&engine->active.lock, flags); + i915_active_release(&vma->active); + i915_vma_put(vma); - error_record_engine_registers(error, engine, ee); - error_record_engine_execlists(engine, ee); + capture = this->next; + kfree(this); + } - while (capture) { - struct capture_vma *this = capture; - struct i915_vma *vma = *this->slot; + add_vma(ee, + i915_vma_coredump_create(engine->gt, + engine->status_page.vma, + "HW Status", + compress)); - *this->slot = - i915_error_object_create(i915, vma, compress); + add_vma(ee, + i915_vma_coredump_create(engine->gt, + engine->wa_ctx.vma, + "WA context", + compress)); - i915_active_release(&vma->active); - i915_vma_put(vma); + add_vma(ee, + capture_object(engine->gt, + engine->default_state, + "NULL context", + compress)); +} - capture = this->next; - kfree(this); - } +static struct intel_engine_coredump * +capture_engine(struct intel_engine_cs *engine, + struct i915_vma_compress *compress) +{ + struct intel_engine_capture_vma *capture = NULL; + struct intel_engine_coredump *ee; + struct i915_request *rq; + unsigned long flags; - ee->hws_page = - i915_error_object_create(i915, - engine->status_page.vma, - compress); + ee = intel_engine_coredump_alloc(engine, GFP_KERNEL); + if (!ee) + return NULL; + + spin_lock_irqsave(&engine->active.lock, flags); + rq = intel_engine_find_active_request(engine); + if (rq) + capture = intel_engine_coredump_add_request(ee, rq, + ATOMIC_MAYFAIL); + spin_unlock_irqrestore(&engine->active.lock, flags); + if (!capture) { + kfree(ee); + return NULL; + } - ee->wa_ctx = - i915_error_object_create(i915, - engine->wa_ctx.vma, - compress); + intel_engine_coredump_add_vma(ee, capture, compress); - ee->default_state = - capture_object(i915, engine->default_state, compress); + return ee; +} - ee->engine = engine; +static void +gt_record_engines(struct intel_gt_coredump *gt, + struct i915_vma_compress *compress) +{ + struct intel_engine_cs *engine; + enum intel_engine_id id; - ee->next = error->engine; - error->engine = ee; + for_each_engine(engine, gt->_gt, id) { + struct intel_engine_coredump *ee; - ee = kzalloc(sizeof(*ee), GFP_KERNEL); + /* Refill our page pool before entering atomic section */ + pool_refill(&compress->pool, ALLOW_FAIL); + + ee = capture_engine(engine, compress); if (!ee) - return; - } + continue; - kfree(ee); + gt->simulated |= ee->simulated; + if (ee->simulated) { + kfree(ee); + continue; + } + + ee->next = gt->engine; + gt->engine = ee; + } } -static void -capture_uc_state(struct i915_gpu_state *error, struct compress *compress) +static struct intel_uc_coredump * +gt_record_uc(struct intel_gt_coredump *gt, + struct i915_vma_compress *compress) { - struct drm_i915_private *i915 = error->i915; - struct i915_error_uc *error_uc = &error->uc; - struct intel_uc *uc = &i915->gt.uc; + const struct intel_uc *uc = >->_gt->uc; + struct intel_uc_coredump *error_uc; - /* Capturing uC state won't be useful if there is no GuC */ - if (!error->device_info.has_gt_uc) - return; + error_uc = kzalloc(sizeof(*error_uc), ALLOW_FAIL); + if (!error_uc) + return NULL; memcpy(&error_uc->guc_fw, &uc->guc.fw, sizeof(uc->guc.fw)); memcpy(&error_uc->huc_fw, &uc->huc.fw, sizeof(uc->huc.fw)); @@ -1547,19 +1495,42 @@ capture_uc_state(struct i915_gpu_state *error, struct compress *compress) */ error_uc->guc_fw.path = kstrdup(uc->guc.fw.path, ALLOW_FAIL); error_uc->huc_fw.path = kstrdup(uc->huc.fw.path, ALLOW_FAIL); - error_uc->guc_log = i915_error_object_create(i915, - uc->guc.log.vma, - compress); + error_uc->guc_log = + i915_vma_coredump_create(gt->_gt, + uc->guc.log.vma, "GuC log buffer", + compress); + + return error_uc; +} + +static void gt_capture_prepare(struct intel_gt_coredump *gt) +{ + struct i915_ggtt *ggtt = gt->_gt->ggtt; + + mutex_lock(&ggtt->error_mutex); +} + +static void gt_capture_finish(struct intel_gt_coredump *gt) +{ + struct i915_ggtt *ggtt = gt->_gt->ggtt; + + if (drm_mm_node_allocated(&ggtt->error_capture)) + ggtt->vm.clear_range(&ggtt->vm, + ggtt->error_capture.start, + PAGE_SIZE); + + mutex_unlock(&ggtt->error_mutex); } /* Capture all registers which don't fit into another category. */ -static void capture_reg_state(struct i915_gpu_state *error) +static void gt_record_regs(struct intel_gt_coredump *gt) { - struct drm_i915_private *i915 = error->i915; - struct intel_uncore *uncore = &i915->uncore; + struct intel_uncore *uncore = gt->_gt->uncore; + struct drm_i915_private *i915 = uncore->i915; int i; - /* General organization + /* + * General organization * 1. Registers specific to a single generation * 2. Registers which belong to multiple generations * 3. Feature specific registers. @@ -1569,138 +1540,162 @@ static void capture_reg_state(struct i915_gpu_state *error) /* 1: Registers specific to a single generation */ if (IS_VALLEYVIEW(i915)) { - error->gtier[0] = intel_uncore_read(uncore, GTIER); - error->ier = intel_uncore_read(uncore, VLV_IER); - error->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_VLV); + gt->gtier[0] = intel_uncore_read(uncore, GTIER); + gt->ier = intel_uncore_read(uncore, VLV_IER); + gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_VLV); } if (IS_GEN(i915, 7)) - error->err_int = intel_uncore_read(uncore, GEN7_ERR_INT); + gt->err_int = intel_uncore_read(uncore, GEN7_ERR_INT); if (INTEL_GEN(i915) >= 12) { - error->fault_data0 = intel_uncore_read(uncore, - GEN12_FAULT_TLB_DATA0); - error->fault_data1 = intel_uncore_read(uncore, - GEN12_FAULT_TLB_DATA1); + gt->fault_data0 = intel_uncore_read(uncore, + GEN12_FAULT_TLB_DATA0); + gt->fault_data1 = intel_uncore_read(uncore, + GEN12_FAULT_TLB_DATA1); } else if (INTEL_GEN(i915) >= 8) { - error->fault_data0 = intel_uncore_read(uncore, - GEN8_FAULT_TLB_DATA0); - error->fault_data1 = intel_uncore_read(uncore, - GEN8_FAULT_TLB_DATA1); + gt->fault_data0 = intel_uncore_read(uncore, + GEN8_FAULT_TLB_DATA0); + gt->fault_data1 = intel_uncore_read(uncore, + GEN8_FAULT_TLB_DATA1); } if (IS_GEN(i915, 6)) { - error->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE); - error->gab_ctl = intel_uncore_read(uncore, GAB_CTL); - error->gfx_mode = intel_uncore_read(uncore, GFX_MODE); + gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE); + gt->gab_ctl = intel_uncore_read(uncore, GAB_CTL); + gt->gfx_mode = intel_uncore_read(uncore, GFX_MODE); } /* 2: Registers which belong to multiple generations */ if (INTEL_GEN(i915) >= 7) - error->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_MT); + gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_MT); if (INTEL_GEN(i915) >= 6) { - error->derrmr = intel_uncore_read(uncore, DERRMR); + gt->derrmr = intel_uncore_read(uncore, DERRMR); if (INTEL_GEN(i915) < 12) { - error->error = intel_uncore_read(uncore, ERROR_GEN6); - error->done_reg = intel_uncore_read(uncore, DONE_REG); + gt->error = intel_uncore_read(uncore, ERROR_GEN6); + gt->done_reg = intel_uncore_read(uncore, DONE_REG); } } - if (INTEL_GEN(i915) >= 5) - error->ccid = intel_uncore_read(uncore, CCID(RENDER_RING_BASE)); - /* 3: Feature specific registers */ if (IS_GEN_RANGE(i915, 6, 7)) { - error->gam_ecochk = intel_uncore_read(uncore, GAM_ECOCHK); - error->gac_eco = intel_uncore_read(uncore, GAC_ECO_BITS); + gt->gam_ecochk = intel_uncore_read(uncore, GAM_ECOCHK); + gt->gac_eco = intel_uncore_read(uncore, GAC_ECO_BITS); } if (IS_GEN_RANGE(i915, 8, 11)) - error->gtt_cache = intel_uncore_read(uncore, HSW_GTT_CACHE_EN); + gt->gtt_cache = intel_uncore_read(uncore, HSW_GTT_CACHE_EN); if (IS_GEN(i915, 12)) - error->aux_err = intel_uncore_read(uncore, GEN12_AUX_ERR_DBG); + gt->aux_err = intel_uncore_read(uncore, GEN12_AUX_ERR_DBG); if (INTEL_GEN(i915) >= 12) { for (i = 0; i < GEN12_SFC_DONE_MAX; i++) { - error->sfc_done[i] = + gt->sfc_done[i] = intel_uncore_read(uncore, GEN12_SFC_DONE(i)); } - error->gam_done = intel_uncore_read(uncore, GEN12_GAM_DONE); + gt->gam_done = intel_uncore_read(uncore, GEN12_GAM_DONE); } /* 4: Everything else */ if (INTEL_GEN(i915) >= 11) { - error->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER); - error->gtier[0] = + gt->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER); + gt->gtier[0] = intel_uncore_read(uncore, GEN11_RENDER_COPY_INTR_ENABLE); - error->gtier[1] = + gt->gtier[1] = intel_uncore_read(uncore, GEN11_VCS_VECS_INTR_ENABLE); - error->gtier[2] = + gt->gtier[2] = intel_uncore_read(uncore, GEN11_GUC_SG_INTR_ENABLE); - error->gtier[3] = + gt->gtier[3] = intel_uncore_read(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE); - error->gtier[4] = + gt->gtier[4] = intel_uncore_read(uncore, GEN11_CRYPTO_RSVD_INTR_ENABLE); - error->gtier[5] = + gt->gtier[5] = intel_uncore_read(uncore, GEN11_GUNIT_CSME_INTR_ENABLE); - error->ngtier = 6; + gt->ngtier = 6; } else if (INTEL_GEN(i915) >= 8) { - error->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER); + gt->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER); for (i = 0; i < 4; i++) - error->gtier[i] = intel_uncore_read(uncore, - GEN8_GT_IER(i)); - error->ngtier = 4; + gt->gtier[i] = + intel_uncore_read(uncore, GEN8_GT_IER(i)); + gt->ngtier = 4; } else if (HAS_PCH_SPLIT(i915)) { - error->ier = intel_uncore_read(uncore, DEIER); - error->gtier[0] = intel_uncore_read(uncore, GTIER); - error->ngtier = 1; + gt->ier = intel_uncore_read(uncore, DEIER); + gt->gtier[0] = intel_uncore_read(uncore, GTIER); + gt->ngtier = 1; } else if (IS_GEN(i915, 2)) { - error->ier = intel_uncore_read16(uncore, GEN2_IER); + gt->ier = intel_uncore_read16(uncore, GEN2_IER); } else if (!IS_VALLEYVIEW(i915)) { - error->ier = intel_uncore_read(uncore, GEN2_IER); + gt->ier = intel_uncore_read(uncore, GEN2_IER); } - error->eir = intel_uncore_read(uncore, EIR); - error->pgtbl_er = intel_uncore_read(uncore, PGTBL_ER); + gt->eir = intel_uncore_read(uncore, EIR); + gt->pgtbl_er = intel_uncore_read(uncore, PGTBL_ER); } -static const char * -error_msg(struct i915_gpu_state *error, - intel_engine_mask_t engines, const char *msg) +/* + * Generate a semi-unique error code. The code is not meant to have meaning, The + * code's only purpose is to try to prevent false duplicated bug reports by + * grossly estimating a GPU error state. + * + * TODO Ideally, hashing the batchbuffer would be a very nice way to determine + * the hang if we could strip the GTT offset information from it. + * + * It's only a small step better than a random number in its current form. + */ +static u32 generate_ecode(const struct intel_engine_coredump *ee) { + /* + * IPEHR would be an ideal way to detect errors, as it's the gross + * measure of "the command that hung." However, has some very common + * synchronization commands which almost always appear in the case + * strictly a client bug. Use instdone to differentiate those some. + */ + return ee ? ee->ipehr ^ ee->instdone.instdone : 0; +} + +static const char *error_msg(struct i915_gpu_coredump *error) +{ + struct intel_engine_coredump *first = NULL; + struct intel_gt_coredump *gt; + intel_engine_mask_t engines; int len; + engines = 0; + for (gt = error->gt; gt; gt = gt->next) { + struct intel_engine_coredump *cs; + + if (gt->engine && !first) + first = gt->engine; + + for (cs = gt->engine; cs; cs = cs->next) + engines |= cs->engine->mask; + } + len = scnprintf(error->error_msg, sizeof(error->error_msg), - "GPU HANG: ecode %d:%x:0x%08x", + "GPU HANG: ecode %d:%x:%08x", INTEL_GEN(error->i915), engines, - i915_error_generate_code(error)); - if (error->engine) { + generate_ecode(first)); + if (first) { /* Just show the first executing process, more is confusing */ len += scnprintf(error->error_msg + len, sizeof(error->error_msg) - len, ", in %s [%d]", - error->engine->context.comm, - error->engine->context.pid); + first->context.comm, first->context.pid); } - if (msg) - len += scnprintf(error->error_msg + len, - sizeof(error->error_msg) - len, - ", %s", msg); return error->error_msg; } -static void capture_gen_state(struct i915_gpu_state *error) +static void capture_gen(struct i915_gpu_coredump *error) { struct drm_i915_private *i915 = error->i915; - error->awake = i915->gt.awake; error->wakelock = atomic_read(&i915->runtime_pm.wakeref_count); error->suspended = i915->runtime_pm.suspended; @@ -1711,6 +1706,7 @@ static void capture_gen_state(struct i915_gpu_state *error) error->reset_count = i915_reset_count(&i915->gpu_error); error->suspend_count = i915->suspend_count; + i915_params_copy(&error->params, &i915_modparams); memcpy(&error->device_info, INTEL_INFO(i915), sizeof(error->device_info)); @@ -1720,115 +1716,138 @@ static void capture_gen_state(struct i915_gpu_state *error) error->driver_caps = i915->caps; } -static void capture_params(struct i915_gpu_state *error) +struct i915_gpu_coredump * +i915_gpu_coredump_alloc(struct drm_i915_private *i915, gfp_t gfp) { - i915_params_copy(&error->params, &i915_modparams); + struct i915_gpu_coredump *error; + + if (!i915_modparams.error_capture) + return NULL; + + error = kzalloc(sizeof(*error), gfp); + if (!error) + return NULL; + + kref_init(&error->ref); + error->i915 = i915; + + error->time = ktime_get_real(); + error->boottime = ktime_get_boottime(); + error->uptime = ktime_sub(ktime_get(), i915->gt.last_init_time); + error->capture = jiffies; + + capture_gen(error); + + return error; } -static void capture_finish(struct i915_gpu_state *error) +#define DAY_AS_SECONDS(x) (24 * 60 * 60 * (x)) + +struct intel_gt_coredump * +intel_gt_coredump_alloc(struct intel_gt *gt, gfp_t gfp) { - struct i915_ggtt *ggtt = &error->i915->ggtt; + struct intel_gt_coredump *gc; - if (drm_mm_node_allocated(&ggtt->error_capture)) { - const u64 slot = ggtt->error_capture.start; + gc = kzalloc(sizeof(*gc), gfp); + if (!gc) + return NULL; + + gc->_gt = gt; + gc->awake = intel_gt_pm_is_awake(gt); - ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE); + gt_record_regs(gc); + gt_record_fences(gc); + + return gc; +} + +struct i915_vma_compress * +i915_vma_capture_prepare(struct intel_gt_coredump *gt) +{ + struct i915_vma_compress *compress; + + compress = kmalloc(sizeof(*compress), ALLOW_FAIL); + if (!compress) + return NULL; + + if (!compress_init(compress)) { + kfree(compress); + return NULL; } + + gt_capture_prepare(gt); + + return compress; } -#define DAY_AS_SECONDS(x) (24 * 60 * 60 * (x)) +void i915_vma_capture_finish(struct intel_gt_coredump *gt, + struct i915_vma_compress *compress) +{ + if (!compress) + return; + + gt_capture_finish(gt); -struct i915_gpu_state * -i915_capture_gpu_state(struct drm_i915_private *i915) + compress_fini(compress); + kfree(compress); +} + +struct i915_gpu_coredump *i915_gpu_coredump(struct drm_i915_private *i915) { - struct i915_gpu_state *error; - struct compress compress; + struct i915_gpu_coredump *error; /* Check if GPU capture has been disabled */ error = READ_ONCE(i915->gpu_error.first_error); if (IS_ERR(error)) return error; - error = kzalloc(sizeof(*error), ALLOW_FAIL); - if (!error) { - i915_disable_error_state(i915, -ENOMEM); + error = i915_gpu_coredump_alloc(i915, ALLOW_FAIL); + if (!error) return ERR_PTR(-ENOMEM); - } - if (!compress_init(&compress)) { - kfree(error); - i915_disable_error_state(i915, -ENOMEM); - return ERR_PTR(-ENOMEM); - } + error->gt = intel_gt_coredump_alloc(&i915->gt, ALLOW_FAIL); + if (error->gt) { + struct i915_vma_compress *compress; - kref_init(&error->ref); - error->i915 = i915; + compress = i915_vma_capture_prepare(error->gt); + if (!compress) { + kfree(error->gt); + kfree(error); + return ERR_PTR(-ENOMEM); + } - error->time = ktime_get_real(); - error->boottime = ktime_get_boottime(); - error->uptime = ktime_sub(ktime_get(), i915->gt.last_init_time); - error->capture = jiffies; + gt_record_engines(error->gt, compress); - capture_params(error); - capture_gen_state(error); - capture_uc_state(error, &compress); - capture_reg_state(error); - gem_record_fences(error); - gem_record_rings(error, &compress); + if (INTEL_INFO(i915)->has_gt_uc) + error->gt->uc = gt_record_uc(error->gt, compress); + + i915_vma_capture_finish(error->gt, compress); + + error->simulated |= error->gt->simulated; + } error->overlay = intel_overlay_capture_error_state(i915); error->display = intel_display_capture_error_state(i915); - capture_finish(error); - compress_fini(&compress); - return error; } -/** - * i915_capture_error_state - capture an error record for later analysis - * @i915: i915 device - * @engine_mask: the mask of engines triggering the hang - * @msg: a message to insert into the error capture header - * - * Should be called when an error is detected (either a hang or an error - * interrupt) to capture error state from the time of the error. Fills - * out a structure which becomes available in debugfs for user level tools - * to pick up. - */ -void i915_capture_error_state(struct drm_i915_private *i915, - intel_engine_mask_t engine_mask, - const char *msg) +void i915_error_state_store(struct i915_gpu_coredump *error) { + struct drm_i915_private *i915; static bool warned; - struct i915_gpu_state *error; - unsigned long flags; - if (!i915_modparams.error_capture) + if (IS_ERR_OR_NULL(error)) return; - if (READ_ONCE(i915->gpu_error.first_error)) - return; + i915 = error->i915; + dev_info(i915->drm.dev, "%s\n", error_msg(error)); - error = i915_capture_gpu_state(i915); - if (IS_ERR(error)) + if (error->simulated || + cmpxchg(&i915->gpu_error.first_error, NULL, error)) return; - dev_info(i915->drm.dev, "%s\n", error_msg(error, engine_mask, msg)); - - if (!error->simulated) { - spin_lock_irqsave(&i915->gpu_error.lock, flags); - if (!i915->gpu_error.first_error) { - i915->gpu_error.first_error = error; - error = NULL; - } - spin_unlock_irqrestore(&i915->gpu_error.lock, flags); - } - - if (error) { - __i915_gpu_state_free(&error->ref); - return; - } + i915_gpu_coredump_get(error); if (!xchg(&warned, true) && ktime_get_real_seconds() - DRIVER_TIMESTAMP < DAY_AS_SECONDS(180)) { @@ -1841,15 +1860,38 @@ void i915_capture_error_state(struct drm_i915_private *i915, } } -struct i915_gpu_state * +/** + * i915_capture_error_state - capture an error record for later analysis + * @i915: i915 device + * + * Should be called when an error is detected (either a hang or an error + * interrupt) to capture error state from the time of the error. Fills + * out a structure which becomes available in debugfs for user level tools + * to pick up. + */ +void i915_capture_error_state(struct drm_i915_private *i915) +{ + struct i915_gpu_coredump *error; + + error = i915_gpu_coredump(i915); + if (IS_ERR(error)) { + cmpxchg(&i915->gpu_error.first_error, NULL, error); + return; + } + + i915_error_state_store(error); + i915_gpu_coredump_put(error); +} + +struct i915_gpu_coredump * i915_first_error_state(struct drm_i915_private *i915) { - struct i915_gpu_state *error; + struct i915_gpu_coredump *error; spin_lock_irq(&i915->gpu_error.lock); error = i915->gpu_error.first_error; if (!IS_ERR_OR_NULL(error)) - i915_gpu_state_get(error); + i915_gpu_coredump_get(error); spin_unlock_irq(&i915->gpu_error.lock); return error; @@ -1857,7 +1899,7 @@ i915_first_error_state(struct drm_i915_private *i915) void i915_reset_error_state(struct drm_i915_private *i915) { - struct i915_gpu_state *error; + struct i915_gpu_coredump *error; spin_lock_irq(&i915->gpu_error.lock); error = i915->gpu_error.first_error; @@ -1866,7 +1908,7 @@ void i915_reset_error_state(struct drm_i915_private *i915) spin_unlock_irq(&i915->gpu_error.lock); if (!IS_ERR_OR_NULL(error)) - i915_gpu_state_put(error); + i915_gpu_coredump_put(error); } void i915_disable_error_state(struct drm_i915_private *i915, int err) diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h index 5d2c3372ff99..9109004956bd 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.h +++ b/drivers/gpu/drm/i915/i915_gpu_error.h @@ -25,43 +25,100 @@ #include "i915_scheduler.h" struct drm_i915_private; +struct i915_vma_compress; +struct intel_engine_capture_vma; struct intel_overlay_error_state; struct intel_display_error_state; -struct i915_gpu_state { - struct kref ref; - ktime_t time; - ktime_t boottime; - ktime_t uptime; - unsigned long capture; +struct i915_vma_coredump { + struct i915_vma_coredump *next; - struct drm_i915_private *i915; + char name[20]; + + u64 gtt_offset; + u64 gtt_size; + u32 gtt_page_sizes; + + int num_pages; + int page_count; + int unused; + u32 *pages[0]; +}; + +struct i915_request_coredump { + unsigned long flags; + pid_t pid; + u32 context; + u32 seqno; + u32 start; + u32 head; + u32 tail; + struct i915_sched_attr sched_attr; +}; + +struct intel_engine_coredump { + const struct intel_engine_cs *engine; - char error_msg[128]; bool simulated; - bool awake; - bool wakelock; - bool suspended; - int iommu; u32 reset_count; - u32 suspend_count; - struct intel_device_info device_info; - struct intel_runtime_info runtime_info; - struct intel_driver_caps driver_caps; - struct i915_params params; - struct i915_error_uc { - struct intel_uc_fw guc_fw; - struct intel_uc_fw huc_fw; - struct drm_i915_error_object *guc_log; - } uc; + /* position of active request inside the ring */ + u32 rq_head, rq_post, rq_tail; + + /* Register state */ + u32 ccid; + u32 start; + u32 tail; + u32 head; + u32 ctl; + u32 mode; + u32 hws; + u32 ipeir; + u32 ipehr; + u32 bbstate; + u32 instpm; + u32 instps; + u64 bbaddr; + u64 acthd; + u32 fault_reg; + u64 faddr; + u32 rc_psmi; /* sleep state */ + struct intel_instdone instdone; + + struct i915_gem_context_coredump { + char comm[TASK_COMM_LEN]; + pid_t pid; + int active; + int guilty; + struct i915_sched_attr sched_attr; + } context; + + struct i915_vma_coredump *vma; + + struct i915_request_coredump execlist[EXECLIST_MAX_PORTS]; + unsigned int num_ports; + + struct { + u32 gfx_mode; + union { + u64 pdp[4]; + u32 pp_dir_base; + }; + } vm_info; + + struct intel_engine_coredump *next; +}; + +struct intel_gt_coredump { + const struct intel_gt *_gt; + bool awake; + bool simulated; /* Generic register state */ u32 eir; u32 pgtbl_er; u32 ier; u32 gtier[6], ngtier; - u32 ccid; u32 derrmr; u32 forcewake; u32 error; /* gen6+ */ @@ -80,91 +137,45 @@ struct i915_gpu_state { u32 nfence; u64 fence[I915_MAX_NUM_FENCES]; + + struct intel_engine_coredump *engine; + + struct intel_uc_coredump { + struct intel_uc_fw guc_fw; + struct intel_uc_fw huc_fw; + struct i915_vma_coredump *guc_log; + } *uc; + + struct intel_gt_coredump *next; +}; + +struct i915_gpu_coredump { + struct kref ref; + ktime_t time; + ktime_t boottime; + ktime_t uptime; + unsigned long capture; + + struct drm_i915_private *i915; + + struct intel_gt_coredump *gt; + + char error_msg[128]; + bool simulated; + bool wakelock; + bool suspended; + int iommu; + u32 reset_count; + u32 suspend_count; + + struct intel_device_info device_info; + struct intel_runtime_info runtime_info; + struct intel_driver_caps driver_caps; + struct i915_params params; + struct intel_overlay_error_state *overlay; struct intel_display_error_state *display; - struct drm_i915_error_engine { - const struct intel_engine_cs *engine; - - /* Software tracked state */ - bool idle; - int num_requests; - u32 reset_count; - - /* position of active request inside the ring */ - u32 rq_head, rq_post, rq_tail; - - /* our own tracking of ring head and tail */ - u32 cpu_ring_head; - u32 cpu_ring_tail; - - /* Register state */ - u32 start; - u32 tail; - u32 head; - u32 ctl; - u32 mode; - u32 hws; - u32 ipeir; - u32 ipehr; - u32 bbstate; - u32 instpm; - u32 instps; - u64 bbaddr; - u64 acthd; - u32 fault_reg; - u64 faddr; - u32 rc_psmi; /* sleep state */ - struct intel_instdone instdone; - - struct drm_i915_error_context { - char comm[TASK_COMM_LEN]; - pid_t pid; - int active; - int guilty; - struct i915_sched_attr sched_attr; - } context; - - struct drm_i915_error_object { - u64 gtt_offset; - u64 gtt_size; - u32 gtt_page_sizes; - int num_pages; - int page_count; - int unused; - u32 *pages[0]; - } *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page; - - struct drm_i915_error_object **user_bo; - long user_bo_count; - - struct drm_i915_error_object *wa_ctx; - struct drm_i915_error_object *default_state; - - struct drm_i915_error_request { - unsigned long flags; - long jiffies; - pid_t pid; - u32 context; - u32 seqno; - u32 start; - u32 head; - u32 tail; - struct i915_sched_attr sched_attr; - } *requests, execlist[EXECLIST_MAX_PORTS]; - unsigned int num_ports; - - struct { - u32 gfx_mode; - union { - u64 pdp[4]; - u32 pp_dir_base; - }; - } vm_info; - - struct drm_i915_error_engine *next; - } *engine; - struct scatterlist *sgl, *fit; }; @@ -172,7 +183,7 @@ struct i915_gpu_error { /* For reset and error_state handling. */ spinlock_t lock; /* Protected by the above dev->gpu_error.lock. */ - struct i915_gpu_state *first_error; + struct i915_gpu_coredump *first_error; atomic_t pending_fb_pin; @@ -200,41 +211,115 @@ struct drm_i915_error_state_buf { __printf(2, 3) void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...); -struct i915_gpu_state *i915_capture_gpu_state(struct drm_i915_private *i915); -void i915_capture_error_state(struct drm_i915_private *dev_priv, - intel_engine_mask_t engine_mask, - const char *error_msg); +struct i915_gpu_coredump *i915_gpu_coredump(struct drm_i915_private *i915); +void i915_capture_error_state(struct drm_i915_private *i915); + +struct i915_gpu_coredump * +i915_gpu_coredump_alloc(struct drm_i915_private *i915, gfp_t gfp); + +struct intel_gt_coredump * +intel_gt_coredump_alloc(struct intel_gt *gt, gfp_t gfp); + +struct intel_engine_coredump * +intel_engine_coredump_alloc(struct intel_engine_cs *engine, gfp_t gfp); + +struct intel_engine_capture_vma * +intel_engine_coredump_add_request(struct intel_engine_coredump *ee, + struct i915_request *rq, + gfp_t gfp); -static inline struct i915_gpu_state * -i915_gpu_state_get(struct i915_gpu_state *gpu) +void intel_engine_coredump_add_vma(struct intel_engine_coredump *ee, + struct intel_engine_capture_vma *capture, + struct i915_vma_compress *compress); + +struct i915_vma_compress * +i915_vma_capture_prepare(struct intel_gt_coredump *gt); + +void i915_vma_capture_finish(struct intel_gt_coredump *gt, + struct i915_vma_compress *compress); + +void i915_error_state_store(struct i915_gpu_coredump *error); + +static inline struct i915_gpu_coredump * +i915_gpu_coredump_get(struct i915_gpu_coredump *gpu) { kref_get(&gpu->ref); return gpu; } -ssize_t i915_gpu_state_copy_to_buffer(struct i915_gpu_state *error, - char *buf, loff_t offset, size_t count); +ssize_t +i915_gpu_coredump_copy_to_buffer(struct i915_gpu_coredump *error, + char *buf, loff_t offset, size_t count); -void __i915_gpu_state_free(struct kref *kref); -static inline void i915_gpu_state_put(struct i915_gpu_state *gpu) +void __i915_gpu_coredump_free(struct kref *kref); +static inline void i915_gpu_coredump_put(struct i915_gpu_coredump *gpu) { if (gpu) - kref_put(&gpu->ref, __i915_gpu_state_free); + kref_put(&gpu->ref, __i915_gpu_coredump_free); } -struct i915_gpu_state *i915_first_error_state(struct drm_i915_private *i915); +struct i915_gpu_coredump *i915_first_error_state(struct drm_i915_private *i915); void i915_reset_error_state(struct drm_i915_private *i915); void i915_disable_error_state(struct drm_i915_private *i915, int err); #else -static inline void i915_capture_error_state(struct drm_i915_private *dev_priv, - u32 engine_mask, - const char *error_msg) +static inline void i915_capture_error_state(struct drm_i915_private *i915) +{ +} + +static inline struct i915_gpu_coredump * +i915_gpu_coredump_alloc(struct drm_i915_private *i915, gfp_t gfp) +{ + return NULL; +} + +static inline struct intel_gt_coredump * +intel_gt_coredump_alloc(struct intel_gt *gt, gfp_t gfp) +{ + return NULL; +} + +static inline struct intel_engine_coredump * +intel_engine_coredump_alloc(struct intel_engine_cs *engine, gfp_t gfp) +{ + return NULL; +} + +static inline struct intel_engine_capture_vma * +intel_engine_coredump_add_request(struct intel_engine_coredump *ee, + struct i915_request *rq, + gfp_t gfp) +{ + return NULL; +} + +static inline void +intel_engine_coredump_add_vma(struct intel_engine_coredump *ee, + struct intel_engine_capture_vma *capture, + struct i915_vma_compress *compress) +{ +} + +static inline struct i915_vma_compress * +i915_vma_capture_prepare(struct intel_gt_coredump *gt) +{ + return NULL; +} + +static inline void +i915_vma_capture_finish(struct intel_gt_coredump *gt, + struct i915_vma_compress *compress) +{ +} + +static inline void +i915_error_state_store(struct drm_i915_private *i915, + struct i915_gpu_coredump *error) { } -static inline struct i915_gpu_state * +static inline struct i915_gpu_coredump * i915_first_error_state(struct drm_i915_private *i915) { return ERR_PTR(-ENODEV); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 42b79f577500..afc6aad9bf8c 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -893,7 +893,7 @@ int intel_get_crtc_scanline(struct intel_crtc *crtc) } /** - * ivybridge_parity_work - Workqueue called when a parity error interrupt + * ivb_parity_work - Workqueue called when a parity error interrupt * occurred. * @work: workqueue struct * @@ -901,7 +901,7 @@ int intel_get_crtc_scanline(struct intel_crtc *crtc) * this event, userspace should try to remap the bad rows since statistically * it is likely the same row is more likely to go bad again. */ -static void ivybridge_parity_work(struct work_struct *work) +static void ivb_parity_work(struct work_struct *work) { struct drm_i915_private *dev_priv = container_of(work, typeof(*dev_priv), l3_parity.error_work); @@ -2031,7 +2031,7 @@ static void ivb_display_irq_handler(struct drm_i915_private *dev_priv, * 4 - Process the interrupt(s) that had bits set in the IIRs. * 5 - Re-enable Master Interrupt Control. */ -static irqreturn_t ironlake_irq_handler(int irq, void *arg) +static irqreturn_t ilk_irq_handler(int irq, void *arg) { struct drm_i915_private *dev_priv = arg; u32 de_iir, gt_iir, de_ier, sde_ier = 0; @@ -2742,7 +2742,7 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) /* drm_dma.h hooks */ -static void ironlake_irq_reset(struct drm_i915_private *dev_priv) +static void ilk_irq_reset(struct drm_i915_private *dev_priv) { struct intel_uncore *uncore = &dev_priv->uncore; @@ -3225,7 +3225,7 @@ static void ibx_irq_postinstall(struct drm_i915_private *dev_priv) spt_hpd_detection_setup(dev_priv); } -static void ironlake_irq_postinstall(struct drm_i915_private *dev_priv) +static void ilk_irq_postinstall(struct drm_i915_private *dev_priv) { struct intel_uncore *uncore = &dev_priv->uncore; u32 display_mask, extra_mask; @@ -3899,7 +3899,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv) intel_hpd_init_work(dev_priv); - INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); + INIT_WORK(&dev_priv->l3_parity.error_work, ivb_parity_work); for (i = 0; i < MAX_L3_SLICES; ++i) dev_priv->l3_parity.remap_info[i] = NULL; @@ -3980,7 +3980,7 @@ static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv) else if (INTEL_GEN(dev_priv) >= 8) return gen8_irq_handler; else - return ironlake_irq_handler; + return ilk_irq_handler; } } @@ -4003,7 +4003,7 @@ static void intel_irq_reset(struct drm_i915_private *dev_priv) else if (INTEL_GEN(dev_priv) >= 8) gen8_irq_reset(dev_priv); else - ironlake_irq_reset(dev_priv); + ilk_irq_reset(dev_priv); } } @@ -4026,7 +4026,7 @@ static void intel_irq_postinstall(struct drm_i915_private *dev_priv) else if (INTEL_GEN(dev_priv) >= 8) gen8_irq_postinstall(dev_priv); else - ironlake_irq_postinstall(dev_priv); + ilk_irq_postinstall(dev_priv); } } diff --git a/drivers/gpu/drm/i915/i915_mm.c b/drivers/gpu/drm/i915/i915_mm.c index 318562ce64c0..b6376b25ef63 100644 --- a/drivers/gpu/drm/i915/i915_mm.c +++ b/drivers/gpu/drm/i915/i915_mm.c @@ -33,6 +33,9 @@ struct remap_pfn { struct mm_struct *mm; unsigned long pfn; pgprot_t prot; + + struct sgt_iter sgt; + resource_size_t iobase; }; static int remap_pfn(pte_t *pte, unsigned long addr, void *data) @@ -46,6 +49,35 @@ static int remap_pfn(pte_t *pte, unsigned long addr, void *data) return 0; } +#define use_dma(io) ((io) != -1) + +static inline unsigned long sgt_pfn(const struct remap_pfn *r) +{ + if (use_dma(r->iobase)) + return (r->sgt.dma + r->sgt.curr + r->iobase) >> PAGE_SHIFT; + else + return r->sgt.pfn + (r->sgt.curr >> PAGE_SHIFT); +} + +static int remap_sg(pte_t *pte, unsigned long addr, void *data) +{ + struct remap_pfn *r = data; + + if (GEM_WARN_ON(!r->sgt.pfn)) + return -EINVAL; + + /* Special PTE are not associated with any struct page */ + set_pte_at(r->mm, addr, pte, + pte_mkspecial(pfn_pte(sgt_pfn(r), r->prot))); + r->pfn++; /* track insertions in case we need to unwind later */ + + r->sgt.curr += PAGE_SIZE; + if (r->sgt.curr >= r->sgt.max) + r->sgt = __sgt_iter(__sg_next(r->sgt.sgp), use_dma(r->iobase)); + + return 0; +} + /** * remap_io_mapping - remap an IO mapping to userspace * @vma: user vma to map to @@ -80,3 +112,40 @@ int remap_io_mapping(struct vm_area_struct *vma, return 0; } + +/** + * remap_io_sg - remap an IO mapping to userspace + * @vma: user vma to map to + * @addr: target user address to start at + * @size: size of map area + * @sgl: Start sg entry + * @iobase: Use stored dma address offset by this address or pfn if -1 + * + * Note: this is only safe if the mm semaphore is held when called. + */ +int remap_io_sg(struct vm_area_struct *vma, + unsigned long addr, unsigned long size, + struct scatterlist *sgl, resource_size_t iobase) +{ + struct remap_pfn r = { + .mm = vma->vm_mm, + .prot = vma->vm_page_prot, + .sgt = __sgt_iter(sgl, use_dma(iobase)), + .iobase = iobase, + }; + int err; + + /* We rely on prevalidation of the io-mapping to skip track_pfn(). */ + GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS); + + if (!use_dma(iobase)) + flush_cache_range(vma, addr, size); + + err = apply_to_page_range(r.mm, addr, size, remap_sg, &r); + if (unlikely(err)) { + zap_vma_ptes(vma, addr, r.pfn << PAGE_SHIFT); + return err; + } + + return 0; +} diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index 9571611b4b16..83f01401b8b5 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -193,23 +193,23 @@ GEN_DEFAULT_PAGE_SIZES, \ GEN_DEFAULT_REGIONS -static const struct intel_device_info intel_i830_info = { +static const struct intel_device_info i830_info = { I830_FEATURES, PLATFORM(INTEL_I830), }; -static const struct intel_device_info intel_i845g_info = { +static const struct intel_device_info i845g_info = { I845_FEATURES, PLATFORM(INTEL_I845G), }; -static const struct intel_device_info intel_i85x_info = { +static const struct intel_device_info i85x_info = { I830_FEATURES, PLATFORM(INTEL_I85X), .display.has_fbc = 1, }; -static const struct intel_device_info intel_i865g_info = { +static const struct intel_device_info i865g_info = { I845_FEATURES, PLATFORM(INTEL_I865G), }; @@ -228,7 +228,7 @@ static const struct intel_device_info intel_i865g_info = { GEN_DEFAULT_PAGE_SIZES, \ GEN_DEFAULT_REGIONS -static const struct intel_device_info intel_i915g_info = { +static const struct intel_device_info i915g_info = { GEN3_FEATURES, PLATFORM(INTEL_I915G), .has_coherent_ggtt = false, @@ -239,7 +239,7 @@ static const struct intel_device_info intel_i915g_info = { .unfenced_needs_alignment = 1, }; -static const struct intel_device_info intel_i915gm_info = { +static const struct intel_device_info i915gm_info = { GEN3_FEATURES, PLATFORM(INTEL_I915GM), .is_mobile = 1, @@ -252,7 +252,7 @@ static const struct intel_device_info intel_i915gm_info = { .unfenced_needs_alignment = 1, }; -static const struct intel_device_info intel_i945g_info = { +static const struct intel_device_info i945g_info = { GEN3_FEATURES, PLATFORM(INTEL_I945G), .display.has_hotplug = 1, @@ -263,7 +263,7 @@ static const struct intel_device_info intel_i945g_info = { .unfenced_needs_alignment = 1, }; -static const struct intel_device_info intel_i945gm_info = { +static const struct intel_device_info i945gm_info = { GEN3_FEATURES, PLATFORM(INTEL_I945GM), .is_mobile = 1, @@ -277,21 +277,21 @@ static const struct intel_device_info intel_i945gm_info = { .unfenced_needs_alignment = 1, }; -static const struct intel_device_info intel_g33_info = { +static const struct intel_device_info g33_info = { GEN3_FEATURES, PLATFORM(INTEL_G33), .display.has_hotplug = 1, .display.has_overlay = 1, }; -static const struct intel_device_info intel_pineview_g_info = { +static const struct intel_device_info pnv_g_info = { GEN3_FEATURES, PLATFORM(INTEL_PINEVIEW), .display.has_hotplug = 1, .display.has_overlay = 1, }; -static const struct intel_device_info intel_pineview_m_info = { +static const struct intel_device_info pnv_m_info = { GEN3_FEATURES, PLATFORM(INTEL_PINEVIEW), .is_mobile = 1, @@ -314,7 +314,7 @@ static const struct intel_device_info intel_pineview_m_info = { GEN_DEFAULT_PAGE_SIZES, \ GEN_DEFAULT_REGIONS -static const struct intel_device_info intel_i965g_info = { +static const struct intel_device_info i965g_info = { GEN4_FEATURES, PLATFORM(INTEL_I965G), .display.has_overlay = 1, @@ -322,7 +322,7 @@ static const struct intel_device_info intel_i965g_info = { .has_snoop = false, }; -static const struct intel_device_info intel_i965gm_info = { +static const struct intel_device_info i965gm_info = { GEN4_FEATURES, PLATFORM(INTEL_I965GM), .is_mobile = 1, @@ -333,14 +333,14 @@ static const struct intel_device_info intel_i965gm_info = { .has_snoop = false, }; -static const struct intel_device_info intel_g45_info = { +static const struct intel_device_info g45_info = { GEN4_FEATURES, PLATFORM(INTEL_G45), .engine_mask = BIT(RCS0) | BIT(VCS0), .gpu_reset_clobbers_display = false, }; -static const struct intel_device_info intel_gm45_info = { +static const struct intel_device_info gm45_info = { GEN4_FEATURES, PLATFORM(INTEL_GM45), .is_mobile = 1, @@ -365,12 +365,12 @@ static const struct intel_device_info intel_gm45_info = { GEN_DEFAULT_PAGE_SIZES, \ GEN_DEFAULT_REGIONS -static const struct intel_device_info intel_ironlake_d_info = { +static const struct intel_device_info ilk_d_info = { GEN5_FEATURES, PLATFORM(INTEL_IRONLAKE), }; -static const struct intel_device_info intel_ironlake_m_info = { +static const struct intel_device_info ilk_m_info = { GEN5_FEATURES, PLATFORM(INTEL_IRONLAKE), .is_mobile = 1, @@ -400,12 +400,12 @@ static const struct intel_device_info intel_ironlake_m_info = { GEN6_FEATURES, \ PLATFORM(INTEL_SANDYBRIDGE) -static const struct intel_device_info intel_sandybridge_d_gt1_info = { +static const struct intel_device_info snb_d_gt1_info = { SNB_D_PLATFORM, .gt = 1, }; -static const struct intel_device_info intel_sandybridge_d_gt2_info = { +static const struct intel_device_info snb_d_gt2_info = { SNB_D_PLATFORM, .gt = 2, }; @@ -416,12 +416,12 @@ static const struct intel_device_info intel_sandybridge_d_gt2_info = { .is_mobile = 1 -static const struct intel_device_info intel_sandybridge_m_gt1_info = { +static const struct intel_device_info snb_m_gt1_info = { SNB_M_PLATFORM, .gt = 1, }; -static const struct intel_device_info intel_sandybridge_m_gt2_info = { +static const struct intel_device_info snb_m_gt2_info = { SNB_M_PLATFORM, .gt = 2, }; @@ -450,12 +450,12 @@ static const struct intel_device_info intel_sandybridge_m_gt2_info = { PLATFORM(INTEL_IVYBRIDGE), \ .has_l3_dpf = 1 -static const struct intel_device_info intel_ivybridge_d_gt1_info = { +static const struct intel_device_info ivb_d_gt1_info = { IVB_D_PLATFORM, .gt = 1, }; -static const struct intel_device_info intel_ivybridge_d_gt2_info = { +static const struct intel_device_info ivb_d_gt2_info = { IVB_D_PLATFORM, .gt = 2, }; @@ -466,17 +466,17 @@ static const struct intel_device_info intel_ivybridge_d_gt2_info = { .is_mobile = 1, \ .has_l3_dpf = 1 -static const struct intel_device_info intel_ivybridge_m_gt1_info = { +static const struct intel_device_info ivb_m_gt1_info = { IVB_M_PLATFORM, .gt = 1, }; -static const struct intel_device_info intel_ivybridge_m_gt2_info = { +static const struct intel_device_info ivb_m_gt2_info = { IVB_M_PLATFORM, .gt = 2, }; -static const struct intel_device_info intel_ivybridge_q_info = { +static const struct intel_device_info ivb_q_info = { GEN7_FEATURES, PLATFORM(INTEL_IVYBRIDGE), .gt = 2, @@ -484,7 +484,7 @@ static const struct intel_device_info intel_ivybridge_q_info = { .has_l3_dpf = 1, }; -static const struct intel_device_info intel_valleyview_info = { +static const struct intel_device_info vlv_info = { PLATFORM(INTEL_VALLEYVIEW), GEN(7), .is_lp = 1, @@ -523,17 +523,17 @@ static const struct intel_device_info intel_valleyview_info = { PLATFORM(INTEL_HASWELL), \ .has_l3_dpf = 1 -static const struct intel_device_info intel_haswell_gt1_info = { +static const struct intel_device_info hsw_gt1_info = { HSW_PLATFORM, .gt = 1, }; -static const struct intel_device_info intel_haswell_gt2_info = { +static const struct intel_device_info hsw_gt2_info = { HSW_PLATFORM, .gt = 2, }; -static const struct intel_device_info intel_haswell_gt3_info = { +static const struct intel_device_info hsw_gt3_info = { HSW_PLATFORM, .gt = 3, }; @@ -551,17 +551,17 @@ static const struct intel_device_info intel_haswell_gt3_info = { GEN8_FEATURES, \ PLATFORM(INTEL_BROADWELL) -static const struct intel_device_info intel_broadwell_gt1_info = { +static const struct intel_device_info bdw_gt1_info = { BDW_PLATFORM, .gt = 1, }; -static const struct intel_device_info intel_broadwell_gt2_info = { +static const struct intel_device_info bdw_gt2_info = { BDW_PLATFORM, .gt = 2, }; -static const struct intel_device_info intel_broadwell_rsvd_info = { +static const struct intel_device_info bdw_rsvd_info = { BDW_PLATFORM, .gt = 3, /* According to the device ID those devices are GT3, they were @@ -569,14 +569,14 @@ static const struct intel_device_info intel_broadwell_rsvd_info = { */ }; -static const struct intel_device_info intel_broadwell_gt3_info = { +static const struct intel_device_info bdw_gt3_info = { BDW_PLATFORM, .gt = 3, .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1), }; -static const struct intel_device_info intel_cherryview_info = { +static const struct intel_device_info chv_info = { PLATFORM(INTEL_CHERRYVIEW), GEN(8), .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), @@ -621,12 +621,12 @@ static const struct intel_device_info intel_cherryview_info = { GEN9_FEATURES, \ PLATFORM(INTEL_SKYLAKE) -static const struct intel_device_info intel_skylake_gt1_info = { +static const struct intel_device_info skl_gt1_info = { SKL_PLATFORM, .gt = 1, }; -static const struct intel_device_info intel_skylake_gt2_info = { +static const struct intel_device_info skl_gt2_info = { SKL_PLATFORM, .gt = 2, }; @@ -637,12 +637,12 @@ static const struct intel_device_info intel_skylake_gt2_info = { BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1) -static const struct intel_device_info intel_skylake_gt3_info = { +static const struct intel_device_info skl_gt3_info = { SKL_GT3_PLUS_PLATFORM, .gt = 3, }; -static const struct intel_device_info intel_skylake_gt4_info = { +static const struct intel_device_info skl_gt4_info = { SKL_GT3_PLUS_PLATFORM, .gt = 4, }; @@ -679,13 +679,13 @@ static const struct intel_device_info intel_skylake_gt4_info = { GEN9_DEFAULT_PAGE_SIZES, \ GEN_DEFAULT_REGIONS -static const struct intel_device_info intel_broxton_info = { +static const struct intel_device_info bxt_info = { GEN9_LP_FEATURES, PLATFORM(INTEL_BROXTON), .ddb_size = 512, }; -static const struct intel_device_info intel_geminilake_info = { +static const struct intel_device_info glk_info = { GEN9_LP_FEATURES, PLATFORM(INTEL_GEMINILAKE), .ddb_size = 1024, @@ -696,17 +696,17 @@ static const struct intel_device_info intel_geminilake_info = { GEN9_FEATURES, \ PLATFORM(INTEL_KABYLAKE) -static const struct intel_device_info intel_kabylake_gt1_info = { +static const struct intel_device_info kbl_gt1_info = { KBL_PLATFORM, .gt = 1, }; -static const struct intel_device_info intel_kabylake_gt2_info = { +static const struct intel_device_info kbl_gt2_info = { KBL_PLATFORM, .gt = 2, }; -static const struct intel_device_info intel_kabylake_gt3_info = { +static const struct intel_device_info kbl_gt3_info = { KBL_PLATFORM, .gt = 3, .engine_mask = @@ -717,17 +717,17 @@ static const struct intel_device_info intel_kabylake_gt3_info = { GEN9_FEATURES, \ PLATFORM(INTEL_COFFEELAKE) -static const struct intel_device_info intel_coffeelake_gt1_info = { +static const struct intel_device_info cfl_gt1_info = { CFL_PLATFORM, .gt = 1, }; -static const struct intel_device_info intel_coffeelake_gt2_info = { +static const struct intel_device_info cfl_gt2_info = { CFL_PLATFORM, .gt = 2, }; -static const struct intel_device_info intel_coffeelake_gt3_info = { +static const struct intel_device_info cfl_gt3_info = { CFL_PLATFORM, .gt = 3, .engine_mask = @@ -742,7 +742,7 @@ static const struct intel_device_info intel_coffeelake_gt3_info = { .has_coherent_ggtt = false, \ GLK_COLORS -static const struct intel_device_info intel_cannonlake_info = { +static const struct intel_device_info cnl_info = { GEN10_FEATURES, PLATFORM(INTEL_CANNONLAKE), .gt = 2, @@ -777,14 +777,14 @@ static const struct intel_device_info intel_cannonlake_info = { .has_logical_ring_elsq = 1, \ .color = { .degamma_lut_size = 33, .gamma_lut_size = 262145 } -static const struct intel_device_info intel_icelake_11_info = { +static const struct intel_device_info icl_info = { GEN11_FEATURES, PLATFORM(INTEL_ICELAKE), .engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2), }; -static const struct intel_device_info intel_elkhartlake_info = { +static const struct intel_device_info ehl_info = { GEN11_FEATURES, PLATFORM(INTEL_ELKHARTLAKE), .require_force_probe = 1, @@ -815,7 +815,7 @@ static const struct intel_device_info intel_elkhartlake_info = { .has_global_mocs = 1, \ .display.has_dsb = 1 -static const struct intel_device_info intel_tigerlake_12_info = { +static const struct intel_device_info tgl_info = { GEN12_FEATURES, PLATFORM(INTEL_TIGERLAKE), .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), @@ -840,70 +840,70 @@ static const struct intel_device_info intel_tigerlake_12_info = { * PCI ID matches, otherwise we'll use the wrong info struct above. */ static const struct pci_device_id pciidlist[] = { - INTEL_I830_IDS(&intel_i830_info), - INTEL_I845G_IDS(&intel_i845g_info), - INTEL_I85X_IDS(&intel_i85x_info), - INTEL_I865G_IDS(&intel_i865g_info), - INTEL_I915G_IDS(&intel_i915g_info), - INTEL_I915GM_IDS(&intel_i915gm_info), - INTEL_I945G_IDS(&intel_i945g_info), - INTEL_I945GM_IDS(&intel_i945gm_info), - INTEL_I965G_IDS(&intel_i965g_info), - INTEL_G33_IDS(&intel_g33_info), - INTEL_I965GM_IDS(&intel_i965gm_info), - INTEL_GM45_IDS(&intel_gm45_info), - INTEL_G45_IDS(&intel_g45_info), - INTEL_PINEVIEW_G_IDS(&intel_pineview_g_info), - INTEL_PINEVIEW_M_IDS(&intel_pineview_m_info), - INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info), - INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info), - INTEL_SNB_D_GT1_IDS(&intel_sandybridge_d_gt1_info), - INTEL_SNB_D_GT2_IDS(&intel_sandybridge_d_gt2_info), - INTEL_SNB_M_GT1_IDS(&intel_sandybridge_m_gt1_info), - INTEL_SNB_M_GT2_IDS(&intel_sandybridge_m_gt2_info), - INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ - INTEL_IVB_M_GT1_IDS(&intel_ivybridge_m_gt1_info), - INTEL_IVB_M_GT2_IDS(&intel_ivybridge_m_gt2_info), - INTEL_IVB_D_GT1_IDS(&intel_ivybridge_d_gt1_info), - INTEL_IVB_D_GT2_IDS(&intel_ivybridge_d_gt2_info), - INTEL_HSW_GT1_IDS(&intel_haswell_gt1_info), - INTEL_HSW_GT2_IDS(&intel_haswell_gt2_info), - INTEL_HSW_GT3_IDS(&intel_haswell_gt3_info), - INTEL_VLV_IDS(&intel_valleyview_info), - INTEL_BDW_GT1_IDS(&intel_broadwell_gt1_info), - INTEL_BDW_GT2_IDS(&intel_broadwell_gt2_info), - INTEL_BDW_GT3_IDS(&intel_broadwell_gt3_info), - INTEL_BDW_RSVD_IDS(&intel_broadwell_rsvd_info), - INTEL_CHV_IDS(&intel_cherryview_info), - INTEL_SKL_GT1_IDS(&intel_skylake_gt1_info), - INTEL_SKL_GT2_IDS(&intel_skylake_gt2_info), - INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info), - INTEL_SKL_GT4_IDS(&intel_skylake_gt4_info), - INTEL_BXT_IDS(&intel_broxton_info), - INTEL_GLK_IDS(&intel_geminilake_info), - INTEL_KBL_GT1_IDS(&intel_kabylake_gt1_info), - INTEL_KBL_GT2_IDS(&intel_kabylake_gt2_info), - INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info), - INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info), - INTEL_AML_KBL_GT2_IDS(&intel_kabylake_gt2_info), - INTEL_CFL_S_GT1_IDS(&intel_coffeelake_gt1_info), - INTEL_CFL_S_GT2_IDS(&intel_coffeelake_gt2_info), - INTEL_CFL_H_GT1_IDS(&intel_coffeelake_gt1_info), - INTEL_CFL_H_GT2_IDS(&intel_coffeelake_gt2_info), - INTEL_CFL_U_GT2_IDS(&intel_coffeelake_gt2_info), - INTEL_CFL_U_GT3_IDS(&intel_coffeelake_gt3_info), - INTEL_WHL_U_GT1_IDS(&intel_coffeelake_gt1_info), - INTEL_WHL_U_GT2_IDS(&intel_coffeelake_gt2_info), - INTEL_AML_CFL_GT2_IDS(&intel_coffeelake_gt2_info), - INTEL_WHL_U_GT3_IDS(&intel_coffeelake_gt3_info), - INTEL_CML_GT1_IDS(&intel_coffeelake_gt1_info), - INTEL_CML_GT2_IDS(&intel_coffeelake_gt2_info), - INTEL_CML_U_GT1_IDS(&intel_coffeelake_gt1_info), - INTEL_CML_U_GT2_IDS(&intel_coffeelake_gt2_info), - INTEL_CNL_IDS(&intel_cannonlake_info), - INTEL_ICL_11_IDS(&intel_icelake_11_info), - INTEL_EHL_IDS(&intel_elkhartlake_info), - INTEL_TGL_12_IDS(&intel_tigerlake_12_info), + INTEL_I830_IDS(&i830_info), + INTEL_I845G_IDS(&i845g_info), + INTEL_I85X_IDS(&i85x_info), + INTEL_I865G_IDS(&i865g_info), + INTEL_I915G_IDS(&i915g_info), + INTEL_I915GM_IDS(&i915gm_info), + INTEL_I945G_IDS(&i945g_info), + INTEL_I945GM_IDS(&i945gm_info), + INTEL_I965G_IDS(&i965g_info), + INTEL_G33_IDS(&g33_info), + INTEL_I965GM_IDS(&i965gm_info), + INTEL_GM45_IDS(&gm45_info), + INTEL_G45_IDS(&g45_info), + INTEL_PINEVIEW_G_IDS(&pnv_g_info), + INTEL_PINEVIEW_M_IDS(&pnv_m_info), + INTEL_IRONLAKE_D_IDS(&ilk_d_info), + INTEL_IRONLAKE_M_IDS(&ilk_m_info), + INTEL_SNB_D_GT1_IDS(&snb_d_gt1_info), + INTEL_SNB_D_GT2_IDS(&snb_d_gt2_info), + INTEL_SNB_M_GT1_IDS(&snb_m_gt1_info), + INTEL_SNB_M_GT2_IDS(&snb_m_gt2_info), + INTEL_IVB_Q_IDS(&ivb_q_info), /* must be first IVB */ + INTEL_IVB_M_GT1_IDS(&ivb_m_gt1_info), + INTEL_IVB_M_GT2_IDS(&ivb_m_gt2_info), + INTEL_IVB_D_GT1_IDS(&ivb_d_gt1_info), + INTEL_IVB_D_GT2_IDS(&ivb_d_gt2_info), + INTEL_HSW_GT1_IDS(&hsw_gt1_info), + INTEL_HSW_GT2_IDS(&hsw_gt2_info), + INTEL_HSW_GT3_IDS(&hsw_gt3_info), + INTEL_VLV_IDS(&vlv_info), + INTEL_BDW_GT1_IDS(&bdw_gt1_info), + INTEL_BDW_GT2_IDS(&bdw_gt2_info), + INTEL_BDW_GT3_IDS(&bdw_gt3_info), + INTEL_BDW_RSVD_IDS(&bdw_rsvd_info), + INTEL_CHV_IDS(&chv_info), + INTEL_SKL_GT1_IDS(&skl_gt1_info), + INTEL_SKL_GT2_IDS(&skl_gt2_info), + INTEL_SKL_GT3_IDS(&skl_gt3_info), + INTEL_SKL_GT4_IDS(&skl_gt4_info), + INTEL_BXT_IDS(&bxt_info), + INTEL_GLK_IDS(&glk_info), + INTEL_KBL_GT1_IDS(&kbl_gt1_info), + INTEL_KBL_GT2_IDS(&kbl_gt2_info), + INTEL_KBL_GT3_IDS(&kbl_gt3_info), + INTEL_KBL_GT4_IDS(&kbl_gt3_info), + INTEL_AML_KBL_GT2_IDS(&kbl_gt2_info), + INTEL_CFL_S_GT1_IDS(&cfl_gt1_info), + INTEL_CFL_S_GT2_IDS(&cfl_gt2_info), + INTEL_CFL_H_GT1_IDS(&cfl_gt1_info), + INTEL_CFL_H_GT2_IDS(&cfl_gt2_info), + INTEL_CFL_U_GT2_IDS(&cfl_gt2_info), + INTEL_CFL_U_GT3_IDS(&cfl_gt3_info), + INTEL_WHL_U_GT1_IDS(&cfl_gt1_info), + INTEL_WHL_U_GT2_IDS(&cfl_gt2_info), + INTEL_AML_CFL_GT2_IDS(&cfl_gt2_info), + INTEL_WHL_U_GT3_IDS(&cfl_gt3_info), + INTEL_CML_GT1_IDS(&cfl_gt1_info), + INTEL_CML_GT2_IDS(&cfl_gt2_info), + INTEL_CML_U_GT1_IDS(&cfl_gt1_info), + INTEL_CML_U_GT2_IDS(&cfl_gt2_info), + INTEL_CNL_IDS(&cnl_info), + INTEL_ICL_11_IDS(&icl_info), + INTEL_EHL_IDS(&ehl_info), + INTEL_TGL_12_IDS(&tgl_info), {0, 0, 0} }; MODULE_DEVICE_TABLE(pci, pciidlist); diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 84350c7bc711..0f556d80ba36 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -2159,8 +2159,6 @@ static int gen8_modify_context(struct intel_context *ce, struct i915_request *rq; int err; - lockdep_assert_held(&ce->pin_mutex); - rq = intel_engine_create_kernel_request(ce->engine); if (IS_ERR(rq)) return PTR_ERR(rq); @@ -2203,17 +2201,14 @@ static int gen8_configure_context(struct i915_gem_context *ctx, if (ce->engine->class != RENDER_CLASS) continue; - err = intel_context_lock_pinned(ce); - if (err) - break; + /* Otherwise OA settings will be set upon first use */ + if (!intel_context_pin_if_active(ce)) + continue; flex->value = intel_sseu_make_rpcs(ctx->i915, &ce->sseu); + err = gen8_modify_context(ce, flex, count); - /* Otherwise OA settings will be set upon first use */ - if (intel_context_is_pinned(ce)) - err = gen8_modify_context(ce, flex, count); - - intel_context_unlock_pinned(ce); + intel_context_unpin(ce); if (err) break; } diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index f3ef6700a5f2..28a82c849bac 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -1117,12 +1117,17 @@ void i915_pmu_register(struct drm_i915_private *i915) hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); pmu->timer.function = i915_sample; - if (!is_igp(i915)) + if (!is_igp(i915)) { pmu->name = kasprintf(GFP_KERNEL, - "i915-%s", + "i915_%s", dev_name(i915->drm.dev)); - else + if (pmu->name) { + /* tools/perf reserves colons as special. */ + strreplace((char *)pmu->name, ':', '_'); + } + } else { pmu->name = "i915"; + } if (!pmu->name) goto err; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index bbfedeb00b7f..6cc55c103f67 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -2244,26 +2244,6 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) MG_DP_MODE_LN1_ACU_PORT1) #define MG_DP_MODE_CFG_DP_X2_MODE (1 << 7) #define MG_DP_MODE_CFG_DP_X1_MODE (1 << 6) -#define MG_DP_MODE_CFG_TR2PWR_GATING (1 << 5) -#define MG_DP_MODE_CFG_TRPWR_GATING (1 << 4) -#define MG_DP_MODE_CFG_CLNPWR_GATING (1 << 3) -#define MG_DP_MODE_CFG_DIGPWR_GATING (1 << 2) -#define MG_DP_MODE_CFG_GAONPWR_GATING (1 << 1) - -#define MG_MISC_SUS0_PORT1 0x168814 -#define MG_MISC_SUS0_PORT2 0x169814 -#define MG_MISC_SUS0_PORT3 0x16A814 -#define MG_MISC_SUS0_PORT4 0x16B814 -#define MG_MISC_SUS0(tc_port) \ - _MMIO(_PORT(tc_port, MG_MISC_SUS0_PORT1, MG_MISC_SUS0_PORT2)) -#define MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE_MASK (3 << 14) -#define MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE(x) ((x) << 14) -#define MG_MISC_SUS0_CFG_TR2PWR_GATING (1 << 12) -#define MG_MISC_SUS0_CFG_CL2PWR_GATING (1 << 11) -#define MG_MISC_SUS0_CFG_GAONPWR_GATING (1 << 10) -#define MG_MISC_SUS0_CFG_TRPWR_GATING (1 << 7) -#define MG_MISC_SUS0_CFG_CL1PWR_GATING (1 << 6) -#define MG_MISC_SUS0_CFG_DGPWR_GATING (1 << 5) /* The spec defines this only for BXT PHY0, but lets assume that this * would exist for PHY1 too if it had a second channel. @@ -4177,7 +4157,13 @@ enum { #define CPSSUNIT_CLKGATE_DIS REG_BIT(9) #define UNSLICE_UNIT_LEVEL_CLKGATE _MMIO(0x9434) -#define VFUNIT_CLKGATE_DIS (1 << 20) +#define VFUNIT_CLKGATE_DIS REG_BIT(20) +#define HSUNIT_CLKGATE_DIS REG_BIT(8) +#define VSUNIT_CLKGATE_DIS REG_BIT(3) + +#define UNSLICE_UNIT_LEVEL_CLKGATE2 _MMIO(0x94e4) +#define VSUNIT_CLKGATE_DIS_TGL REG_BIT(19) +#define PSDUNIT_CLKGATE_DIS REG_BIT(5) #define INF_UNIT_LEVEL_CLKGATE _MMIO(0x9560) #define CGPSF_CLKGATE_DIS (1 << 3) @@ -6808,6 +6794,7 @@ enum { #define PLANE_CTL_TILED_Y (4 << 10) #define PLANE_CTL_TILED_YF (5 << 10) #define PLANE_CTL_FLIP_HORIZONTAL (1 << 8) +#define PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE (1 << 4) /* TGL+ */ #define PLANE_CTL_ALPHA_MASK (0x3 << 4) /* Pre-GLK */ #define PLANE_CTL_ALPHA_DISABLE (0 << 4) #define PLANE_CTL_ALPHA_SW_PREMULTIPLY (2 << 4) diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 44a0d1a950c5..be185886e4fc 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -658,7 +658,6 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp) rq->engine = ce->engine; rq->ring = ce->ring; rq->execution_mask = ce->engine->mask; - rq->flags = 0; RCU_INIT_POINTER(rq->timeline, tl); RCU_INIT_POINTER(rq->hwsp_cacheline, tl->hwsp_cacheline); diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h index 565322640378..031433691a06 100644 --- a/drivers/gpu/drm/i915/i915_request.h +++ b/drivers/gpu/drm/i915/i915_request.h @@ -51,7 +51,7 @@ struct i915_capture_list { #define RQ_TRACE(rq, fmt, ...) do { \ const struct i915_request *rq__ = (rq); \ - ENGINE_TRACE(rq__->engine, "fence %llx:%lld, current %d" fmt, \ + ENGINE_TRACE(rq__->engine, "fence %llx:%lld, current %d " fmt, \ rq__->fence.context, rq__->fence.seqno, \ hwsp_seqno(rq__), ##__VA_ARGS__); \ } while (0) @@ -77,6 +77,38 @@ enum { * a request is on the various signal_list. */ I915_FENCE_FLAG_SIGNAL, + + /* + * I915_FENCE_FLAG_NOPREEMPT - this request should not be preempted + * + * The execution of some requests should not be interrupted. This is + * a sensitive operation as it makes the request super important, + * blocking other higher priority work. Abuse of this flag will + * lead to quality of service issues. + */ + I915_FENCE_FLAG_NOPREEMPT, + + /* + * I915_FENCE_FLAG_SENTINEL - this request should be last in the queue + * + * A high priority sentinel request may be submitted to clear the + * submission queue. As it will be the only request in-flight, upon + * execution all other active requests will have been preempted and + * unsubmitted. This preemptive pulse is used to re-evaluate the + * in-flight requests, particularly in cases where an active context + * is banned and those active requests need to be cancelled. + */ + I915_FENCE_FLAG_SENTINEL, + + /* + * I915_FENCE_FLAG_BOOST - upclock the gpu for this request + * + * Some requests are more important than others! In particular, a + * request that the user is waiting on is typically required for + * interactive latency, for which we want to minimise by upclocking + * the GPU. Here we track such boost requests on a per-request basis. + */ + I915_FENCE_FLAG_BOOST, }; /** @@ -225,11 +257,6 @@ struct i915_request { /** Time at which this request was emitted, in jiffies. */ unsigned long emitted_jiffies; - unsigned long flags; -#define I915_REQUEST_WAITBOOST BIT(0) -#define I915_REQUEST_NOPREEMPT BIT(1) -#define I915_REQUEST_SENTINEL BIT(2) - /** timeline->request entry for this request */ struct list_head link; @@ -442,18 +469,18 @@ static inline void i915_request_mark_complete(struct i915_request *rq) static inline bool i915_request_has_waitboost(const struct i915_request *rq) { - return rq->flags & I915_REQUEST_WAITBOOST; + return test_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags); } static inline bool i915_request_has_nopreempt(const struct i915_request *rq) { /* Preemption should only be disabled very rarely */ - return unlikely(rq->flags & I915_REQUEST_NOPREEMPT); + return unlikely(test_bit(I915_FENCE_FLAG_NOPREEMPT, &rq->fence.flags)); } static inline bool i915_request_has_sentinel(const struct i915_request *rq) { - return unlikely(rq->flags & I915_REQUEST_SENTINEL); + return unlikely(test_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags)); } static inline struct intel_timeline * diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index ad2b1b833d7b..0cef3130db05 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c @@ -498,15 +498,15 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj, struct device *kdev = kobj_to_dev(kobj); struct drm_i915_private *i915 = kdev_minor_to_i915(kdev); - struct i915_gpu_state *gpu; + struct i915_gpu_coredump *gpu; ssize_t ret; gpu = i915_first_error_state(i915); if (IS_ERR(gpu)) { ret = PTR_ERR(gpu); } else if (gpu) { - ret = i915_gpu_state_copy_to_buffer(gpu, buf, off, count); - i915_gpu_state_put(gpu); + ret = i915_gpu_coredump_copy_to_buffer(gpu, buf, off, count); + i915_gpu_coredump_put(gpu); } else { const char *str = "No error state collected\n"; size_t len = strlen(str); diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index cbd783c31adb..17d7c525ea5c 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -423,8 +423,6 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma) void __iomem *ptr; int err; - /* Access through the GTT requires the device to be awake. */ - assert_rpm_wakelock_held(vma->vm->gt->uncore->rpm); if (GEM_WARN_ON(!i915_vma_is_map_and_fenceable(vma))) { err = -ENODEV; goto err; @@ -456,6 +454,8 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma) goto err_unpin; i915_vma_set_ggtt_write(vma); + + /* NB Access through the GTT requires the device to be awake. */ return ptr; err_unpin: @@ -858,6 +858,7 @@ static void vma_unbind_pages(struct i915_vma *vma) int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) { struct i915_vma_work *work = NULL; + intel_wakeref_t wakeref = 0; unsigned int bound; int err; @@ -883,6 +884,9 @@ int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) } } + if (flags & PIN_GLOBAL) + wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm); + /* No more allocations allowed once we hold vm->mutex */ err = mutex_lock_interruptible(&vma->vm->mutex); if (err) @@ -946,6 +950,8 @@ err_unlock: err_fence: if (work) dma_fence_work_commit(&work->base); + if (wakeref) + intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref); err_pages: vma_put_pages(vma); return err; @@ -1246,11 +1252,16 @@ int __i915_vma_unbind(struct i915_vma *vma) int i915_vma_unbind(struct i915_vma *vma) { struct i915_address_space *vm = vma->vm; + intel_wakeref_t wakeref = 0; int err; if (!drm_mm_node_allocated(&vma->node)) return 0; + if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) + /* XXX not always required: nop_clear_range */ + wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm); + err = mutex_lock_interruptible(&vm->mutex); if (err) return err; @@ -1258,6 +1269,9 @@ int i915_vma_unbind(struct i915_vma *vma) err = __i915_vma_unbind(vma); mutex_unlock(&vm->mutex); + if (wakeref) + intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref); + return err; } diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h index 5fffa3c58908..02b31a62951e 100644 --- a/drivers/gpu/drm/i915/i915_vma.h +++ b/drivers/gpu/drm/i915/i915_vma.h @@ -30,148 +30,14 @@ #include <drm/drm_mm.h> +#include "gem/i915_gem_object.h" + #include "i915_gem_gtt.h" #include "i915_gem_fence_reg.h" -#include "gem/i915_gem_object.h" #include "i915_active.h" #include "i915_request.h" - -enum i915_cache_level; - -/** - * DOC: Virtual Memory Address - * - * A VMA represents a GEM BO that is bound into an address space. Therefore, a - * VMA's presence cannot be guaranteed before binding, or after unbinding the - * object into/from the address space. - * - * To make things as simple as possible (ie. no refcounting), a VMA's lifetime - * will always be <= an objects lifetime. So object refcounting should cover us. - */ -struct i915_vma { - struct drm_mm_node node; - - struct i915_address_space *vm; - const struct i915_vma_ops *ops; - - struct drm_i915_gem_object *obj; - struct dma_resv *resv; /** Alias of obj->resv */ - - struct sg_table *pages; - void __iomem *iomap; - void *private; /* owned by creator */ - - struct i915_fence_reg *fence; - - u64 size; - u64 display_alignment; - struct i915_page_sizes page_sizes; - - /* mmap-offset associated with fencing for this vma */ - struct i915_mmap_offset *mmo; - - u32 fence_size; - u32 fence_alignment; - - /** - * Count of the number of times this vma has been opened by different - * handles (but same file) for execbuf, i.e. the number of aliases - * that exist in the ctx->handle_vmas LUT for this vma. - */ - struct kref ref; - atomic_t open_count; - atomic_t flags; - /** - * How many users have pinned this object in GTT space. - * - * This is a tightly bound, fairly small number of users, so we - * stuff inside the flags field so that we can both check for overflow - * and detect a no-op i915_vma_pin() in a single check, while also - * pinning the vma. - * - * The worst case display setup would have the same vma pinned for - * use on each plane on each crtc, while also building the next atomic - * state and holding a pin for the length of the cleanup queue. In the - * future, the flip queue may be increased from 1. - * Estimated worst case: 3 [qlen] * 4 [max crtcs] * 7 [max planes] = 84 - * - * For GEM, the number of concurrent users for pwrite/pread is - * unbounded. For execbuffer, it is currently one but will in future - * be extended to allow multiple clients to pin vma concurrently. - * - * We also use suballocated pages, with each suballocation claiming - * its own pin on the shared vma. At present, this is limited to - * exclusive cachelines of a single page, so a maximum of 64 possible - * users. - */ -#define I915_VMA_PIN_MASK 0x3ff -#define I915_VMA_OVERFLOW 0x200 - - /** Flags and address space this VMA is bound to */ -#define I915_VMA_GLOBAL_BIND_BIT 10 -#define I915_VMA_LOCAL_BIND_BIT 11 - -#define I915_VMA_GLOBAL_BIND ((int)BIT(I915_VMA_GLOBAL_BIND_BIT)) -#define I915_VMA_LOCAL_BIND ((int)BIT(I915_VMA_LOCAL_BIND_BIT)) - -#define I915_VMA_BIND_MASK (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND) - -#define I915_VMA_ALLOC_BIT 12 -#define I915_VMA_ALLOC ((int)BIT(I915_VMA_ALLOC_BIT)) - -#define I915_VMA_ERROR_BIT 13 -#define I915_VMA_ERROR ((int)BIT(I915_VMA_ERROR_BIT)) - -#define I915_VMA_GGTT_BIT 14 -#define I915_VMA_CAN_FENCE_BIT 15 -#define I915_VMA_USERFAULT_BIT 16 -#define I915_VMA_GGTT_WRITE_BIT 17 - -#define I915_VMA_GGTT ((int)BIT(I915_VMA_GGTT_BIT)) -#define I915_VMA_CAN_FENCE ((int)BIT(I915_VMA_CAN_FENCE_BIT)) -#define I915_VMA_USERFAULT ((int)BIT(I915_VMA_USERFAULT_BIT)) -#define I915_VMA_GGTT_WRITE ((int)BIT(I915_VMA_GGTT_WRITE_BIT)) - - struct i915_active active; - -#define I915_VMA_PAGES_BIAS 24 -#define I915_VMA_PAGES_ACTIVE (BIT(24) | 1) - atomic_t pages_count; /* number of active binds to the pages */ - struct mutex pages_mutex; /* protect acquire/release of backing pages */ - - /** - * Support different GGTT views into the same object. - * This means there can be multiple VMA mappings per object and per VM. - * i915_ggtt_view_type is used to distinguish between those entries. - * The default one of zero (I915_GGTT_VIEW_NORMAL) is default and also - * assumed in GEM functions which take no ggtt view parameter. - */ - struct i915_ggtt_view ggtt_view; - - /** This object's place on the active/inactive lists */ - struct list_head vm_link; - - struct list_head obj_link; /* Link in the object's VMA list */ - struct rb_node obj_node; - struct hlist_node obj_hash; - - /** This vma's place in the execbuf reservation list */ - struct list_head exec_link; - struct list_head reloc_link; - - /** This vma's place in the eviction list */ - struct list_head evict_link; - - struct list_head closed_link; - - /** - * Used for performing relocations during execbuffer insertion. - */ - unsigned int *exec_flags; - struct hlist_node exec_node; - u32 exec_handle; -}; +#include "i915_vma_types.h" struct i915_vma * i915_vma_instance(struct drm_i915_gem_object *obj, diff --git a/drivers/gpu/drm/i915/i915_vma_types.h b/drivers/gpu/drm/i915/i915_vma_types.h new file mode 100644 index 000000000000..e0942efd5236 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_vma_types.h @@ -0,0 +1,294 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2016 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __I915_VMA_TYPES_H__ +#define __I915_VMA_TYPES_H__ + +#include <linux/rbtree.h> + +#include <drm/drm_mm.h> + +#include "gem/i915_gem_object_types.h" + +enum i915_cache_level; + +/** + * DOC: Global GTT views + * + * Background and previous state + * + * Historically objects could exists (be bound) in global GTT space only as + * singular instances with a view representing all of the object's backing pages + * in a linear fashion. This view will be called a normal view. + * + * To support multiple views of the same object, where the number of mapped + * pages is not equal to the backing store, or where the layout of the pages + * is not linear, concept of a GGTT view was added. + * + * One example of an alternative view is a stereo display driven by a single + * image. In this case we would have a framebuffer looking like this + * (2x2 pages): + * + * 12 + * 34 + * + * Above would represent a normal GGTT view as normally mapped for GPU or CPU + * rendering. In contrast, fed to the display engine would be an alternative + * view which could look something like this: + * + * 1212 + * 3434 + * + * In this example both the size and layout of pages in the alternative view is + * different from the normal view. + * + * Implementation and usage + * + * GGTT views are implemented using VMAs and are distinguished via enum + * i915_ggtt_view_type and struct i915_ggtt_view. + * + * A new flavour of core GEM functions which work with GGTT bound objects were + * added with the _ggtt_ infix, and sometimes with _view postfix to avoid + * renaming in large amounts of code. They take the struct i915_ggtt_view + * parameter encapsulating all metadata required to implement a view. + * + * As a helper for callers which are only interested in the normal view, + * globally const i915_ggtt_view_normal singleton instance exists. All old core + * GEM API functions, the ones not taking the view parameter, are operating on, + * or with the normal GGTT view. + * + * Code wanting to add or use a new GGTT view needs to: + * + * 1. Add a new enum with a suitable name. + * 2. Extend the metadata in the i915_ggtt_view structure if required. + * 3. Add support to i915_get_vma_pages(). + * + * New views are required to build a scatter-gather table from within the + * i915_get_vma_pages function. This table is stored in the vma.ggtt_view and + * exists for the lifetime of an VMA. + * + * Core API is designed to have copy semantics which means that passed in + * struct i915_ggtt_view does not need to be persistent (left around after + * calling the core API functions). + * + */ + +struct intel_remapped_plane_info { + /* in gtt pages */ + unsigned int width, height, stride, offset; +} __packed; + +struct intel_remapped_info { + struct intel_remapped_plane_info plane[2]; + unsigned int unused_mbz; +} __packed; + +struct intel_rotation_info { + struct intel_remapped_plane_info plane[2]; +} __packed; + +struct intel_partial_info { + u64 offset; + unsigned int size; +} __packed; + +enum i915_ggtt_view_type { + I915_GGTT_VIEW_NORMAL = 0, + I915_GGTT_VIEW_ROTATED = sizeof(struct intel_rotation_info), + I915_GGTT_VIEW_PARTIAL = sizeof(struct intel_partial_info), + I915_GGTT_VIEW_REMAPPED = sizeof(struct intel_remapped_info), +}; + +static inline void assert_i915_gem_gtt_types(void) +{ + BUILD_BUG_ON(sizeof(struct intel_rotation_info) != 8*sizeof(unsigned int)); + BUILD_BUG_ON(sizeof(struct intel_partial_info) != sizeof(u64) + sizeof(unsigned int)); + BUILD_BUG_ON(sizeof(struct intel_remapped_info) != 9*sizeof(unsigned int)); + + /* Check that rotation/remapped shares offsets for simplicity */ + BUILD_BUG_ON(offsetof(struct intel_remapped_info, plane[0]) != + offsetof(struct intel_rotation_info, plane[0])); + BUILD_BUG_ON(offsetofend(struct intel_remapped_info, plane[1]) != + offsetofend(struct intel_rotation_info, plane[1])); + + /* As we encode the size of each branch inside the union into its type, + * we have to be careful that each branch has a unique size. + */ + switch ((enum i915_ggtt_view_type)0) { + case I915_GGTT_VIEW_NORMAL: + case I915_GGTT_VIEW_PARTIAL: + case I915_GGTT_VIEW_ROTATED: + case I915_GGTT_VIEW_REMAPPED: + /* gcc complains if these are identical cases */ + break; + } +} + +struct i915_ggtt_view { + enum i915_ggtt_view_type type; + union { + /* Members need to contain no holes/padding */ + struct intel_partial_info partial; + struct intel_rotation_info rotated; + struct intel_remapped_info remapped; + }; +}; + +/** + * DOC: Virtual Memory Address + * + * A VMA represents a GEM BO that is bound into an address space. Therefore, a + * VMA's presence cannot be guaranteed before binding, or after unbinding the + * object into/from the address space. + * + * To make things as simple as possible (ie. no refcounting), a VMA's lifetime + * will always be <= an objects lifetime. So object refcounting should cover us. + */ +struct i915_vma { + struct drm_mm_node node; + + struct i915_address_space *vm; + const struct i915_vma_ops *ops; + + struct drm_i915_gem_object *obj; + struct dma_resv *resv; /** Alias of obj->resv */ + + struct sg_table *pages; + void __iomem *iomap; + void *private; /* owned by creator */ + + struct i915_fence_reg *fence; + + u64 size; + u64 display_alignment; + struct i915_page_sizes page_sizes; + + /* mmap-offset associated with fencing for this vma */ + struct i915_mmap_offset *mmo; + + u32 fence_size; + u32 fence_alignment; + + /** + * Count of the number of times this vma has been opened by different + * handles (but same file) for execbuf, i.e. the number of aliases + * that exist in the ctx->handle_vmas LUT for this vma. + */ + struct kref ref; + atomic_t open_count; + atomic_t flags; + /** + * How many users have pinned this object in GTT space. + * + * This is a tightly bound, fairly small number of users, so we + * stuff inside the flags field so that we can both check for overflow + * and detect a no-op i915_vma_pin() in a single check, while also + * pinning the vma. + * + * The worst case display setup would have the same vma pinned for + * use on each plane on each crtc, while also building the next atomic + * state and holding a pin for the length of the cleanup queue. In the + * future, the flip queue may be increased from 1. + * Estimated worst case: 3 [qlen] * 4 [max crtcs] * 7 [max planes] = 84 + * + * For GEM, the number of concurrent users for pwrite/pread is + * unbounded. For execbuffer, it is currently one but will in future + * be extended to allow multiple clients to pin vma concurrently. + * + * We also use suballocated pages, with each suballocation claiming + * its own pin on the shared vma. At present, this is limited to + * exclusive cachelines of a single page, so a maximum of 64 possible + * users. + */ +#define I915_VMA_PIN_MASK 0x3ff +#define I915_VMA_OVERFLOW 0x200 + + /** Flags and address space this VMA is bound to */ +#define I915_VMA_GLOBAL_BIND_BIT 10 +#define I915_VMA_LOCAL_BIND_BIT 11 + +#define I915_VMA_GLOBAL_BIND ((int)BIT(I915_VMA_GLOBAL_BIND_BIT)) +#define I915_VMA_LOCAL_BIND ((int)BIT(I915_VMA_LOCAL_BIND_BIT)) + +#define I915_VMA_BIND_MASK (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND) + +#define I915_VMA_ALLOC_BIT 12 +#define I915_VMA_ALLOC ((int)BIT(I915_VMA_ALLOC_BIT)) + +#define I915_VMA_ERROR_BIT 13 +#define I915_VMA_ERROR ((int)BIT(I915_VMA_ERROR_BIT)) + +#define I915_VMA_GGTT_BIT 14 +#define I915_VMA_CAN_FENCE_BIT 15 +#define I915_VMA_USERFAULT_BIT 16 +#define I915_VMA_GGTT_WRITE_BIT 17 + +#define I915_VMA_GGTT ((int)BIT(I915_VMA_GGTT_BIT)) +#define I915_VMA_CAN_FENCE ((int)BIT(I915_VMA_CAN_FENCE_BIT)) +#define I915_VMA_USERFAULT ((int)BIT(I915_VMA_USERFAULT_BIT)) +#define I915_VMA_GGTT_WRITE ((int)BIT(I915_VMA_GGTT_WRITE_BIT)) + + struct i915_active active; + +#define I915_VMA_PAGES_BIAS 24 +#define I915_VMA_PAGES_ACTIVE (BIT(24) | 1) + atomic_t pages_count; /* number of active binds to the pages */ + struct mutex pages_mutex; /* protect acquire/release of backing pages */ + + /** + * Support different GGTT views into the same object. + * This means there can be multiple VMA mappings per object and per VM. + * i915_ggtt_view_type is used to distinguish between those entries. + * The default one of zero (I915_GGTT_VIEW_NORMAL) is default and also + * assumed in GEM functions which take no ggtt view parameter. + */ + struct i915_ggtt_view ggtt_view; + + /** This object's place on the active/inactive lists */ + struct list_head vm_link; + + struct list_head obj_link; /* Link in the object's VMA list */ + struct rb_node obj_node; + struct hlist_node obj_hash; + + /** This vma's place in the execbuf reservation list */ + struct list_head exec_link; + struct list_head reloc_link; + + /** This vma's place in the eviction list */ + struct list_head evict_link; + + struct list_head closed_link; + + /** + * Used for performing relocations during execbuffer insertion. + */ + unsigned int *exec_flags; + struct hlist_node exec_node; + u32 exec_handle; +}; + +#endif + diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index 1acb5db77431..6670a0763be2 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -519,7 +519,7 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv) } } -static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv) +static void bdw_sseu_info_init(struct drm_i915_private *dev_priv) { struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; int s, ss; @@ -600,7 +600,7 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv) sseu->has_eu_pg = 0; } -static void haswell_sseu_info_init(struct drm_i915_private *dev_priv) +static void hsw_sseu_info_init(struct drm_i915_private *dev_priv) { struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; u32 fuse1; @@ -1021,11 +1021,11 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) /* Initialize slice/subslice/EU info */ if (IS_HASWELL(dev_priv)) - haswell_sseu_info_init(dev_priv); + hsw_sseu_info_init(dev_priv); else if (IS_CHERRYVIEW(dev_priv)) cherryview_sseu_info_init(dev_priv); else if (IS_BROADWELL(dev_priv)) - broadwell_sseu_info_init(dev_priv); + bdw_sseu_info_init(dev_priv); else if (IS_GEN(dev_priv, 9)) gen9_sseu_info_init(dev_priv); else if (IS_GEN(dev_priv, 10)) @@ -1093,7 +1093,7 @@ void intel_device_info_init_mmio(struct drm_i915_private *dev_priv) * hooked up to an SFC (Scaler & Format Converter) unit. * In TGL each VDBOX has access to an SFC. */ - if (IS_TIGERLAKE(dev_priv) || logical_vdbox++ % 2 == 0) + if (INTEL_GEN(dev_priv) >= 12 || logical_vdbox++ % 2 == 0) RUNTIME_INFO(dev_priv)->vdbox_sfc_access |= BIT(i); } DRM_DEBUG_DRIVER("vdbox enable: %04x, instances: %04lx\n", diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c index e24c280e5930..d0d038b3cd79 100644 --- a/drivers/gpu/drm/i915/intel_memory_region.c +++ b/drivers/gpu/drm/i915/intel_memory_region.c @@ -16,6 +16,20 @@ const u32 intel_region_map[] = { [INTEL_REGION_STOLEN] = REGION_MAP(INTEL_MEMORY_STOLEN, 0), }; +struct intel_memory_region * +intel_memory_region_by_type(struct drm_i915_private *i915, + enum intel_memory_type mem_type) +{ + struct intel_memory_region *mr; + int id; + + for_each_memory_region(mr, i915, id) + if (mr->type == mem_type) + return mr; + + return NULL; +} + static u64 intel_memory_region_free_pages(struct intel_memory_region *mem, struct list_head *blocks) @@ -37,7 +51,7 @@ __intel_memory_region_put_pages_buddy(struct intel_memory_region *mem, struct list_head *blocks) { mutex_lock(&mem->mm_lock); - intel_memory_region_free_pages(mem, blocks); + mem->avail += intel_memory_region_free_pages(mem, blocks); mutex_unlock(&mem->mm_lock); } @@ -106,6 +120,7 @@ __intel_memory_region_get_pages_buddy(struct intel_memory_region *mem, break; } while (1); + mem->avail -= size; mutex_unlock(&mem->mm_lock); return 0; @@ -164,6 +179,8 @@ intel_memory_region_create(struct drm_i915_private *i915, mem->io_start = io_start; mem->min_page_size = min_page_size; mem->ops = ops; + mem->total = size; + mem->avail = mem->total; mutex_init(&mem->objects.lock); INIT_LIST_HEAD(&mem->objects.list); @@ -185,6 +202,16 @@ err_free: return ERR_PTR(err); } +void intel_memory_region_set_name(struct intel_memory_region *mem, + const char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + vsnprintf(mem->name, sizeof(mem->name), fmt, ap); + va_end(ap); +} + static void __intel_memory_region_destroy(struct kref *kref) { struct intel_memory_region *mem = diff --git a/drivers/gpu/drm/i915/intel_memory_region.h b/drivers/gpu/drm/i915/intel_memory_region.h index 238722009677..232490d89a83 100644 --- a/drivers/gpu/drm/i915/intel_memory_region.h +++ b/drivers/gpu/drm/i915/intel_memory_region.h @@ -47,6 +47,10 @@ enum intel_region_id { #define I915_ALLOC_MIN_PAGE_SIZE BIT(0) #define I915_ALLOC_CONTIGUOUS BIT(1) +#define for_each_memory_region(mr, i915, id) \ + for (id = 0; id < ARRAY_SIZE((i915)->mm.regions); id++) \ + for_each_if((mr) = (i915)->mm.regions[id]) + /** * Memory regions encoded as type | instance */ @@ -82,10 +86,13 @@ struct intel_memory_region { resource_size_t io_start; resource_size_t min_page_size; + resource_size_t total; + resource_size_t avail; unsigned int type; unsigned int instance; unsigned int id; + char name[8]; dma_addr_t remap_addr; @@ -125,5 +132,12 @@ void intel_memory_region_put(struct intel_memory_region *mem); int intel_memory_regions_hw_probe(struct drm_i915_private *i915); void intel_memory_regions_driver_release(struct drm_i915_private *i915); +struct intel_memory_region * +intel_memory_region_by_type(struct drm_i915_private *i915, + enum intel_memory_type mem_type); + +__printf(2, 3) void +intel_memory_region_set_name(struct intel_memory_region *mem, + const char *fmt, ...); #endif diff --git a/drivers/gpu/drm/i915/intel_pch.c b/drivers/gpu/drm/i915/intel_pch.c index 43b68b5fc562..4ed60e1f01db 100644 --- a/drivers/gpu/drm/i915/intel_pch.c +++ b/drivers/gpu/drm/i915/intel_pch.c @@ -12,90 +12,91 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id) { switch (id) { case INTEL_PCH_IBX_DEVICE_ID_TYPE: - DRM_DEBUG_KMS("Found Ibex Peak PCH\n"); + drm_dbg_kms(&dev_priv->drm, "Found Ibex Peak PCH\n"); WARN_ON(!IS_GEN(dev_priv, 5)); return PCH_IBX; case INTEL_PCH_CPT_DEVICE_ID_TYPE: - DRM_DEBUG_KMS("Found CougarPoint PCH\n"); + drm_dbg_kms(&dev_priv->drm, "Found CougarPoint PCH\n"); WARN_ON(!IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv)); return PCH_CPT; case INTEL_PCH_PPT_DEVICE_ID_TYPE: - DRM_DEBUG_KMS("Found PantherPoint PCH\n"); + drm_dbg_kms(&dev_priv->drm, "Found PantherPoint PCH\n"); WARN_ON(!IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv)); /* PantherPoint is CPT compatible */ return PCH_CPT; case INTEL_PCH_LPT_DEVICE_ID_TYPE: - DRM_DEBUG_KMS("Found LynxPoint PCH\n"); + drm_dbg_kms(&dev_priv->drm, "Found LynxPoint PCH\n"); WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)); WARN_ON(IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv)); return PCH_LPT; case INTEL_PCH_LPT_LP_DEVICE_ID_TYPE: - DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); + drm_dbg_kms(&dev_priv->drm, "Found LynxPoint LP PCH\n"); WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)); WARN_ON(!IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv)); return PCH_LPT; case INTEL_PCH_WPT_DEVICE_ID_TYPE: - DRM_DEBUG_KMS("Found WildcatPoint PCH\n"); + drm_dbg_kms(&dev_priv->drm, "Found WildcatPoint PCH\n"); WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)); WARN_ON(IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv)); /* WildcatPoint is LPT compatible */ return PCH_LPT; case INTEL_PCH_WPT_LP_DEVICE_ID_TYPE: - DRM_DEBUG_KMS("Found WildcatPoint LP PCH\n"); + drm_dbg_kms(&dev_priv->drm, "Found WildcatPoint LP PCH\n"); WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)); WARN_ON(!IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv)); /* WildcatPoint is LPT compatible */ return PCH_LPT; case INTEL_PCH_SPT_DEVICE_ID_TYPE: - DRM_DEBUG_KMS("Found SunrisePoint PCH\n"); + drm_dbg_kms(&dev_priv->drm, "Found SunrisePoint PCH\n"); WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv)); return PCH_SPT; case INTEL_PCH_SPT_LP_DEVICE_ID_TYPE: - DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n"); + drm_dbg_kms(&dev_priv->drm, "Found SunrisePoint LP PCH\n"); WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv)); return PCH_SPT; case INTEL_PCH_KBP_DEVICE_ID_TYPE: - DRM_DEBUG_KMS("Found Kaby Lake PCH (KBP)\n"); + drm_dbg_kms(&dev_priv->drm, "Found Kaby Lake PCH (KBP)\n"); WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv)); /* KBP is SPT compatible */ return PCH_SPT; case INTEL_PCH_CNP_DEVICE_ID_TYPE: - DRM_DEBUG_KMS("Found Cannon Lake PCH (CNP)\n"); + drm_dbg_kms(&dev_priv->drm, "Found Cannon Lake PCH (CNP)\n"); WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv)); return PCH_CNP; case INTEL_PCH_CNP_LP_DEVICE_ID_TYPE: - DRM_DEBUG_KMS("Found Cannon Lake LP PCH (CNP-LP)\n"); + drm_dbg_kms(&dev_priv->drm, + "Found Cannon Lake LP PCH (CNP-LP)\n"); WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv)); return PCH_CNP; case INTEL_PCH_CMP_DEVICE_ID_TYPE: case INTEL_PCH_CMP2_DEVICE_ID_TYPE: - DRM_DEBUG_KMS("Found Comet Lake PCH (CMP)\n"); + drm_dbg_kms(&dev_priv->drm, "Found Comet Lake PCH (CMP)\n"); WARN_ON(!IS_COFFEELAKE(dev_priv)); /* CometPoint is CNP Compatible */ return PCH_CNP; case INTEL_PCH_CMP_V_DEVICE_ID_TYPE: - DRM_DEBUG_KMS("Found Comet Lake V PCH (CMP-V)\n"); + drm_dbg_kms(&dev_priv->drm, "Found Comet Lake V PCH (CMP-V)\n"); WARN_ON(!IS_COFFEELAKE(dev_priv)); /* Comet Lake V PCH is based on KBP, which is SPT compatible */ return PCH_SPT; case INTEL_PCH_ICP_DEVICE_ID_TYPE: - DRM_DEBUG_KMS("Found Ice Lake PCH\n"); + drm_dbg_kms(&dev_priv->drm, "Found Ice Lake PCH\n"); WARN_ON(!IS_ICELAKE(dev_priv)); return PCH_ICP; case INTEL_PCH_MCC_DEVICE_ID_TYPE: - DRM_DEBUG_KMS("Found Mule Creek Canyon PCH\n"); + drm_dbg_kms(&dev_priv->drm, "Found Mule Creek Canyon PCH\n"); WARN_ON(!IS_ELKHARTLAKE(dev_priv)); return PCH_MCC; case INTEL_PCH_TGP_DEVICE_ID_TYPE: case INTEL_PCH_TGP2_DEVICE_ID_TYPE: - DRM_DEBUG_KMS("Found Tiger Lake LP PCH\n"); + drm_dbg_kms(&dev_priv->drm, "Found Tiger Lake LP PCH\n"); WARN_ON(!IS_TIGERLAKE(dev_priv)); return PCH_TGP; case INTEL_PCH_JSP_DEVICE_ID_TYPE: case INTEL_PCH_JSP2_DEVICE_ID_TYPE: - DRM_DEBUG_KMS("Found Jasper Lake PCH\n"); + drm_dbg_kms(&dev_priv->drm, "Found Jasper Lake PCH\n"); WARN_ON(!IS_ELKHARTLAKE(dev_priv)); return PCH_JSP; default: @@ -145,9 +146,9 @@ intel_virt_detect_pch(const struct drm_i915_private *dev_priv) id = INTEL_PCH_IBX_DEVICE_ID_TYPE; if (id) - DRM_DEBUG_KMS("Assuming PCH ID %04x\n", id); + drm_dbg_kms(&dev_priv->drm, "Assuming PCH ID %04x\n", id); else - DRM_DEBUG_KMS("Assuming no PCH\n"); + drm_dbg_kms(&dev_priv->drm, "Assuming no PCH\n"); return id; } @@ -201,13 +202,14 @@ void intel_detect_pch(struct drm_i915_private *dev_priv) * display. */ if (pch && !HAS_DISPLAY(dev_priv)) { - DRM_DEBUG_KMS("Display disabled, reverting to NOP PCH\n"); + drm_dbg_kms(&dev_priv->drm, + "Display disabled, reverting to NOP PCH\n"); dev_priv->pch_type = PCH_NOP; dev_priv->pch_id = 0; } if (!pch) - DRM_DEBUG_KMS("No PCH found.\n"); + drm_dbg_kms(&dev_priv->drm, "No PCH found.\n"); pci_dev_put(pch); } diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 31ec82337e4f..bd2d30ecc030 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -140,7 +140,7 @@ static void glk_init_clock_gating(struct drm_i915_private *dev_priv) } -static void i915_pineview_get_mem_freq(struct drm_i915_private *dev_priv) +static void pnv_get_mem_freq(struct drm_i915_private *dev_priv) { u32 tmp; @@ -178,7 +178,7 @@ static void i915_pineview_get_mem_freq(struct drm_i915_private *dev_priv) dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0; } -static void i915_ironlake_get_mem_freq(struct drm_i915_private *dev_priv) +static void ilk_get_mem_freq(struct drm_i915_private *dev_priv) { u16 ddrpll, csipll; @@ -199,8 +199,8 @@ static void i915_ironlake_get_mem_freq(struct drm_i915_private *dev_priv) dev_priv->mem_freq = 1600; break; default: - DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n", - ddrpll & 0xff); + drm_dbg(&dev_priv->drm, "unknown memory frequency 0x%02x\n", + ddrpll & 0xff); dev_priv->mem_freq = 0; break; } @@ -228,8 +228,8 @@ static void i915_ironlake_get_mem_freq(struct drm_i915_private *dev_priv) dev_priv->fsb_freq = 6400; break; default: - DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n", - csipll & 0x3ff); + drm_dbg(&dev_priv->drm, "unknown fsb frequency 0x%04x\n", + csipll & 0x3ff); dev_priv->fsb_freq = 0; break; } @@ -314,7 +314,8 @@ static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable) if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) & FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) - DRM_ERROR("timed out waiting for Punit DDR DVFS request\n"); + drm_err(&dev_priv->drm, + "timed out waiting for Punit DDR DVFS request\n"); vlv_punit_put(dev_priv); } @@ -383,9 +384,9 @@ static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enabl trace_intel_memory_cxsr(dev_priv, was_enabled, enable); - DRM_DEBUG_KMS("memory self-refresh is %s (was %s)\n", - enableddisabled(enable), - enableddisabled(was_enabled)); + drm_dbg_kms(&dev_priv->drm, "memory self-refresh is %s (was %s)\n", + enableddisabled(enable), + enableddisabled(was_enabled)); return was_enabled; } @@ -510,8 +511,8 @@ static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv, if (i9xx_plane == PLANE_B) size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size; - DRM_DEBUG_KMS("FIFO size - (0x%08x) %c: %d\n", - dsparb, plane_name(i9xx_plane), size); + drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n", + dsparb, plane_name(i9xx_plane), size); return size; } @@ -527,8 +528,8 @@ static int i830_get_fifo_size(struct drm_i915_private *dev_priv, size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size; size >>= 1; /* Convert to cachelines */ - DRM_DEBUG_KMS("FIFO size - (0x%08x) %c: %d\n", - dsparb, plane_name(i9xx_plane), size); + drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n", + dsparb, plane_name(i9xx_plane), size); return size; } @@ -542,41 +543,45 @@ static int i845_get_fifo_size(struct drm_i915_private *dev_priv, size = dsparb & 0x7f; size >>= 2; /* Convert to cachelines */ - DRM_DEBUG_KMS("FIFO size - (0x%08x) %c: %d\n", - dsparb, plane_name(i9xx_plane), size); + drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n", + dsparb, plane_name(i9xx_plane), size); return size; } /* Pineview has different values for various configs */ -static const struct intel_watermark_params pineview_display_wm = { +static const struct intel_watermark_params pnv_display_wm = { .fifo_size = PINEVIEW_DISPLAY_FIFO, .max_wm = PINEVIEW_MAX_WM, .default_wm = PINEVIEW_DFT_WM, .guard_size = PINEVIEW_GUARD_WM, .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, }; -static const struct intel_watermark_params pineview_display_hplloff_wm = { + +static const struct intel_watermark_params pnv_display_hplloff_wm = { .fifo_size = PINEVIEW_DISPLAY_FIFO, .max_wm = PINEVIEW_MAX_WM, .default_wm = PINEVIEW_DFT_HPLLOFF_WM, .guard_size = PINEVIEW_GUARD_WM, .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, }; -static const struct intel_watermark_params pineview_cursor_wm = { + +static const struct intel_watermark_params pnv_cursor_wm = { .fifo_size = PINEVIEW_CURSOR_FIFO, .max_wm = PINEVIEW_CURSOR_MAX_WM, .default_wm = PINEVIEW_CURSOR_DFT_WM, .guard_size = PINEVIEW_CURSOR_GUARD_WM, .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, }; -static const struct intel_watermark_params pineview_cursor_hplloff_wm = { + +static const struct intel_watermark_params pnv_cursor_hplloff_wm = { .fifo_size = PINEVIEW_CURSOR_FIFO, .max_wm = PINEVIEW_CURSOR_MAX_WM, .default_wm = PINEVIEW_CURSOR_DFT_WM, .guard_size = PINEVIEW_CURSOR_GUARD_WM, .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, }; + static const struct intel_watermark_params i965_cursor_wm_info = { .fifo_size = I965_CURSOR_FIFO, .max_wm = I965_CURSOR_MAX_WM, @@ -584,6 +589,7 @@ static const struct intel_watermark_params i965_cursor_wm_info = { .guard_size = 2, .cacheline_size = I915_FIFO_LINE_SIZE, }; + static const struct intel_watermark_params i945_wm_info = { .fifo_size = I945_FIFO_SIZE, .max_wm = I915_MAX_WM, @@ -591,6 +597,7 @@ static const struct intel_watermark_params i945_wm_info = { .guard_size = 2, .cacheline_size = I915_FIFO_LINE_SIZE, }; + static const struct intel_watermark_params i915_wm_info = { .fifo_size = I915_FIFO_SIZE, .max_wm = I915_MAX_WM, @@ -598,6 +605,7 @@ static const struct intel_watermark_params i915_wm_info = { .guard_size = 2, .cacheline_size = I915_FIFO_LINE_SIZE, }; + static const struct intel_watermark_params i830_a_wm_info = { .fifo_size = I855GM_FIFO_SIZE, .max_wm = I915_MAX_WM, @@ -605,6 +613,7 @@ static const struct intel_watermark_params i830_a_wm_info = { .guard_size = 2, .cacheline_size = I830_FIFO_LINE_SIZE, }; + static const struct intel_watermark_params i830_bc_wm_info = { .fifo_size = I855GM_FIFO_SIZE, .max_wm = I915_MAX_WM/2, @@ -612,6 +621,7 @@ static const struct intel_watermark_params i830_bc_wm_info = { .guard_size = 2, .cacheline_size = I830_FIFO_LINE_SIZE, }; + static const struct intel_watermark_params i845_wm_info = { .fifo_size = I830_FIFO_SIZE, .max_wm = I915_MAX_WM, @@ -848,7 +858,7 @@ static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv) return enabled; } -static void pineview_update_wm(struct intel_crtc *unused_crtc) +static void pnv_update_wm(struct intel_crtc *unused_crtc) { struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev); struct intel_crtc *crtc; @@ -861,7 +871,8 @@ static void pineview_update_wm(struct intel_crtc *unused_crtc) dev_priv->fsb_freq, dev_priv->mem_freq); if (!latency) { - DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); + drm_dbg_kms(&dev_priv->drm, + "Unknown FSB/MEM found, disable CxSR\n"); intel_set_memory_cxsr(dev_priv, false); return; } @@ -876,18 +887,18 @@ static void pineview_update_wm(struct intel_crtc *unused_crtc) int clock = adjusted_mode->crtc_clock; /* Display SR */ - wm = intel_calculate_wm(clock, &pineview_display_wm, - pineview_display_wm.fifo_size, + wm = intel_calculate_wm(clock, &pnv_display_wm, + pnv_display_wm.fifo_size, cpp, latency->display_sr); reg = I915_READ(DSPFW1); reg &= ~DSPFW_SR_MASK; reg |= FW_WM(wm, SR); I915_WRITE(DSPFW1, reg); - DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg); + drm_dbg_kms(&dev_priv->drm, "DSPFW1 register is %x\n", reg); /* cursor SR */ - wm = intel_calculate_wm(clock, &pineview_cursor_wm, - pineview_display_wm.fifo_size, + wm = intel_calculate_wm(clock, &pnv_cursor_wm, + pnv_display_wm.fifo_size, 4, latency->cursor_sr); reg = I915_READ(DSPFW3); reg &= ~DSPFW_CURSOR_SR_MASK; @@ -895,8 +906,8 @@ static void pineview_update_wm(struct intel_crtc *unused_crtc) I915_WRITE(DSPFW3, reg); /* Display HPLL off SR */ - wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm, - pineview_display_hplloff_wm.fifo_size, + wm = intel_calculate_wm(clock, &pnv_display_hplloff_wm, + pnv_display_hplloff_wm.fifo_size, cpp, latency->display_hpll_disable); reg = I915_READ(DSPFW3); reg &= ~DSPFW_HPLL_SR_MASK; @@ -904,14 +915,14 @@ static void pineview_update_wm(struct intel_crtc *unused_crtc) I915_WRITE(DSPFW3, reg); /* cursor HPLL off SR */ - wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm, - pineview_display_hplloff_wm.fifo_size, + wm = intel_calculate_wm(clock, &pnv_cursor_hplloff_wm, + pnv_display_hplloff_wm.fifo_size, 4, latency->cursor_hpll_disable); reg = I915_READ(DSPFW3); reg &= ~DSPFW_HPLL_CURSOR_MASK; reg |= FW_WM(wm, HPLL_CURSOR); I915_WRITE(DSPFW3, reg); - DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg); + drm_dbg_kms(&dev_priv->drm, "DSPFW3 register is %x\n", reg); intel_set_memory_cxsr(dev_priv, true); } else { @@ -1202,6 +1213,7 @@ static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); + struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); int num_levels = intel_wm_num_levels(to_i915(plane->base.dev)); enum plane_id plane_id = plane->id; bool dirty = false; @@ -1254,16 +1266,18 @@ static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state, out: if (dirty) { - DRM_DEBUG_KMS("%s watermarks: normal=%d, SR=%d, HPLL=%d\n", - plane->base.name, - crtc_state->wm.g4x.raw[G4X_WM_LEVEL_NORMAL].plane[plane_id], - crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].plane[plane_id], - crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].plane[plane_id]); + drm_dbg_kms(&dev_priv->drm, + "%s watermarks: normal=%d, SR=%d, HPLL=%d\n", + plane->base.name, + crtc_state->wm.g4x.raw[G4X_WM_LEVEL_NORMAL].plane[plane_id], + crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].plane[plane_id], + crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].plane[plane_id]); if (plane_id == PLANE_PRIMARY) - DRM_DEBUG_KMS("FBC watermarks: SR=%d, HPLL=%d\n", - crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].fbc, - crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].fbc); + drm_dbg_kms(&dev_priv->drm, + "FBC watermarks: SR=%d, HPLL=%d\n", + crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].fbc, + crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].fbc); } return dirty; @@ -1781,6 +1795,7 @@ static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); + struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); enum plane_id plane_id = plane->id; int num_levels = intel_wm_num_levels(to_i915(plane->base.dev)); int level; @@ -1808,11 +1823,12 @@ static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state, out: if (dirty) - DRM_DEBUG_KMS("%s watermarks: PM2=%d, PM5=%d, DDR DVFS=%d\n", - plane->base.name, - crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2].plane[plane_id], - crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM5].plane[plane_id], - crtc_state->wm.vlv.raw[VLV_WM_LEVEL_DDR_DVFS].plane[plane_id]); + drm_dbg_kms(&dev_priv->drm, + "%s watermarks: PM2=%d, PM5=%d, DDR DVFS=%d\n", + plane->base.name, + crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2].plane[plane_id], + crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM5].plane[plane_id], + crtc_state->wm.vlv.raw[VLV_WM_LEVEL_DDR_DVFS].plane[plane_id]); return dirty; } @@ -2227,8 +2243,9 @@ static void i965_update_wm(struct intel_crtc *unused_crtc) if (srwm < 0) srwm = 1; srwm &= 0x1ff; - DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n", - entries, srwm); + drm_dbg_kms(&dev_priv->drm, + "self-refresh entries: %d, wm: %d\n", + entries, srwm); entries = intel_wm_method2(clock, htotal, crtc->base.cursor->state->crtc_w, 4, @@ -2241,8 +2258,9 @@ static void i965_update_wm(struct intel_crtc *unused_crtc) if (cursor_sr > i965_cursor_wm_info.max_wm) cursor_sr = i965_cursor_wm_info.max_wm; - DRM_DEBUG_KMS("self-refresh watermark: display plane %d " - "cursor %d\n", srwm, cursor_sr); + drm_dbg_kms(&dev_priv->drm, + "self-refresh watermark: display plane %d " + "cursor %d\n", srwm, cursor_sr); cxsr_enabled = true; } else { @@ -2251,8 +2269,9 @@ static void i965_update_wm(struct intel_crtc *unused_crtc) intel_set_memory_cxsr(dev_priv, false); } - DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", - srwm); + drm_dbg_kms(&dev_priv->drm, + "Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", + srwm); /* 965 has limitations... */ I915_WRITE(DSPFW1, FW_WM(srwm, SR) | @@ -2342,7 +2361,8 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc) planeb_wm = wm_info->max_wm; } - DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm); + drm_dbg_kms(&dev_priv->drm, + "FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm); if (IS_I915GM(dev_priv) && enabled) { struct drm_i915_gem_object *obj; @@ -2384,7 +2404,8 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc) entries = intel_wm_method2(clock, htotal, hdisplay, cpp, sr_latency_ns / 100); entries = DIV_ROUND_UP(entries, wm_info->cacheline_size); - DRM_DEBUG_KMS("self-refresh entries: %d\n", entries); + drm_dbg_kms(&dev_priv->drm, + "self-refresh entries: %d\n", entries); srwm = wm_info->fifo_size - entries; if (srwm < 0) srwm = 1; @@ -2396,8 +2417,9 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc) I915_WRITE(FW_BLC_SELF, srwm & 0x3f); } - DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", - planea_wm, planeb_wm, cwm, srwm); + drm_dbg_kms(&dev_priv->drm, + "Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", + planea_wm, planeb_wm, cwm, srwm); fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f); fwater_hi = (cwm & 0x1f); @@ -2433,7 +2455,8 @@ static void i845_update_wm(struct intel_crtc *unused_crtc) fwater_lo = I915_READ(FW_BLC) & ~0xfff; fwater_lo |= (3<<8) | planea_wm; - DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm); + drm_dbg_kms(&dev_priv->drm, + "Setting FIFO watermarks - A: %d\n", planea_wm); I915_WRITE(FW_BLC, fwater_lo); } @@ -2832,7 +2855,8 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv, &val, NULL); if (ret) { - DRM_ERROR("SKL Mailbox read error = %d\n", ret); + drm_err(&dev_priv->drm, + "SKL Mailbox read error = %d\n", ret); return; } @@ -2850,7 +2874,8 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv, GEN9_PCODE_READ_MEM_LATENCY, &val, NULL); if (ret) { - DRM_ERROR("SKL Mailbox read error = %d\n", ret); + drm_err(&dev_priv->drm, + "SKL Mailbox read error = %d\n", ret); return; } @@ -2968,8 +2993,9 @@ static void intel_print_wm_latency(struct drm_i915_private *dev_priv, unsigned int latency = wm[level]; if (latency == 0) { - DRM_DEBUG_KMS("%s WM%d latency not provided\n", - name, level); + drm_dbg_kms(&dev_priv->drm, + "%s WM%d latency not provided\n", + name, level); continue; } @@ -2982,9 +3008,9 @@ static void intel_print_wm_latency(struct drm_i915_private *dev_priv, else if (level > 0) latency *= 5; - DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n", - name, level, wm[level], - latency / 10, latency % 10); + drm_dbg_kms(&dev_priv->drm, + "%s WM%d latency %u (%u.%u usec)\n", name, level, + wm[level], latency / 10, latency % 10); } } @@ -3018,7 +3044,8 @@ static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv) if (!changed) return; - DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n"); + drm_dbg_kms(&dev_priv->drm, + "WM latency values increased to avoid potential underruns\n"); intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency); intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency); intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency); @@ -3046,7 +3073,8 @@ static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv) dev_priv->wm.spr_latency[3] = 0; dev_priv->wm.cur_latency[3] = 0; - DRM_DEBUG_KMS("LP3 watermarks disabled due to potential for lost interrupts\n"); + drm_dbg_kms(&dev_priv->drm, + "LP3 watermarks disabled due to potential for lost interrupts\n"); intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency); intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency); intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency); @@ -3096,7 +3124,7 @@ static bool ilk_validate_pipe_wm(const struct drm_i915_private *dev_priv, /* At least LP0 must be valid */ if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) { - DRM_DEBUG_KMS("LP0 watermark invalid\n"); + drm_dbg_kms(&dev_priv->drm, "LP0 watermark invalid\n"); return false; } @@ -3673,7 +3701,7 @@ skl_setup_sagv_block_time(struct drm_i915_private *dev_priv) return; } - DRM_DEBUG_DRIVER("Couldn't read SAGV block time!\n"); + drm_dbg(&dev_priv->drm, "Couldn't read SAGV block time!\n"); } else if (IS_GEN(dev_priv, 11)) { dev_priv->sagv_block_time_us = 10; return; @@ -3713,7 +3741,7 @@ intel_enable_sagv(struct drm_i915_private *dev_priv) if (dev_priv->sagv_status == I915_SAGV_ENABLED) return 0; - DRM_DEBUG_KMS("Enabling SAGV\n"); + drm_dbg_kms(&dev_priv->drm, "Enabling SAGV\n"); ret = sandybridge_pcode_write(dev_priv, GEN9_PCODE_SAGV_CONTROL, GEN9_SAGV_ENABLE); @@ -3724,11 +3752,11 @@ intel_enable_sagv(struct drm_i915_private *dev_priv) * don't actually have SAGV. */ if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) { - DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n"); + drm_dbg(&dev_priv->drm, "No SAGV found on system, ignoring\n"); dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED; return 0; } else if (ret < 0) { - DRM_ERROR("Failed to enable SAGV\n"); + drm_err(&dev_priv->drm, "Failed to enable SAGV\n"); return ret; } @@ -3747,7 +3775,7 @@ intel_disable_sagv(struct drm_i915_private *dev_priv) if (dev_priv->sagv_status == I915_SAGV_DISABLED) return 0; - DRM_DEBUG_KMS("Disabling SAGV\n"); + drm_dbg_kms(&dev_priv->drm, "Disabling SAGV\n"); /* bspec says to keep retrying for at least 1 ms */ ret = skl_pcode_request(dev_priv, GEN9_PCODE_SAGV_CONTROL, GEN9_SAGV_DISABLE, @@ -3758,11 +3786,11 @@ intel_disable_sagv(struct drm_i915_private *dev_priv) * don't actually have SAGV. */ if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) { - DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n"); + drm_dbg(&dev_priv->drm, "No SAGV found on system, ignoring\n"); dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED; return 0; } else if (ret < 0) { - DRM_ERROR("Failed to disable SAGV (%d)\n", ret); + drm_err(&dev_priv->drm, "Failed to disable SAGV (%d)\n", ret); return ret; } @@ -4331,9 +4359,10 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state, } if (level < 0) { - DRM_DEBUG_KMS("Requested display configuration exceeds system DDB limitations"); - DRM_DEBUG_KMS("minimum required %d/%d\n", blocks, - alloc_size); + drm_dbg_kms(&dev_priv->drm, + "Requested display configuration exceeds system DDB limitations"); + drm_dbg_kms(&dev_priv->drm, "minimum required %d/%d\n", + blocks, alloc_size); return -EINVAL; } @@ -4561,7 +4590,8 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state, /* only planar format has two planes */ if (color_plane == 1 && !intel_format_info_is_yuv_semiplanar(format, modifier)) { - DRM_DEBUG_KMS("Non planar format have single plane\n"); + drm_dbg_kms(&dev_priv->drm, + "Non planar format have single plane\n"); return -EINVAL; } @@ -5260,10 +5290,11 @@ skl_print_wm_changes(struct intel_atomic_state *state) if (skl_ddb_entry_equal(old, new)) continue; - DRM_DEBUG_KMS("[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n", - plane->base.base.id, plane->base.name, - old->start, old->end, new->start, new->end, - skl_ddb_entry_size(old), skl_ddb_entry_size(new)); + drm_dbg_kms(&dev_priv->drm, + "[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n", + plane->base.base.id, plane->base.name, + old->start, old->end, new->start, new->end, + skl_ddb_entry_size(old), skl_ddb_entry_size(new)); } for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { @@ -5276,70 +5307,74 @@ skl_print_wm_changes(struct intel_atomic_state *state) if (skl_plane_wm_equals(dev_priv, old_wm, new_wm)) continue; - DRM_DEBUG_KMS("[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm" - " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm\n", - plane->base.base.id, plane->base.name, - enast(old_wm->wm[0].plane_en), enast(old_wm->wm[1].plane_en), - enast(old_wm->wm[2].plane_en), enast(old_wm->wm[3].plane_en), - enast(old_wm->wm[4].plane_en), enast(old_wm->wm[5].plane_en), - enast(old_wm->wm[6].plane_en), enast(old_wm->wm[7].plane_en), - enast(old_wm->trans_wm.plane_en), - enast(new_wm->wm[0].plane_en), enast(new_wm->wm[1].plane_en), - enast(new_wm->wm[2].plane_en), enast(new_wm->wm[3].plane_en), - enast(new_wm->wm[4].plane_en), enast(new_wm->wm[5].plane_en), - enast(new_wm->wm[6].plane_en), enast(new_wm->wm[7].plane_en), - enast(new_wm->trans_wm.plane_en)); - - DRM_DEBUG_KMS("[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d" + drm_dbg_kms(&dev_priv->drm, + "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm" + " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm\n", + plane->base.base.id, plane->base.name, + enast(old_wm->wm[0].plane_en), enast(old_wm->wm[1].plane_en), + enast(old_wm->wm[2].plane_en), enast(old_wm->wm[3].plane_en), + enast(old_wm->wm[4].plane_en), enast(old_wm->wm[5].plane_en), + enast(old_wm->wm[6].plane_en), enast(old_wm->wm[7].plane_en), + enast(old_wm->trans_wm.plane_en), + enast(new_wm->wm[0].plane_en), enast(new_wm->wm[1].plane_en), + enast(new_wm->wm[2].plane_en), enast(new_wm->wm[3].plane_en), + enast(new_wm->wm[4].plane_en), enast(new_wm->wm[5].plane_en), + enast(new_wm->wm[6].plane_en), enast(new_wm->wm[7].plane_en), + enast(new_wm->trans_wm.plane_en)); + + drm_dbg_kms(&dev_priv->drm, + "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d" " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d\n", - plane->base.base.id, plane->base.name, - enast(old_wm->wm[0].ignore_lines), old_wm->wm[0].plane_res_l, - enast(old_wm->wm[1].ignore_lines), old_wm->wm[1].plane_res_l, - enast(old_wm->wm[2].ignore_lines), old_wm->wm[2].plane_res_l, - enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].plane_res_l, - enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].plane_res_l, - enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].plane_res_l, - enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].plane_res_l, - enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].plane_res_l, - enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm.plane_res_l, - - enast(new_wm->wm[0].ignore_lines), new_wm->wm[0].plane_res_l, - enast(new_wm->wm[1].ignore_lines), new_wm->wm[1].plane_res_l, - enast(new_wm->wm[2].ignore_lines), new_wm->wm[2].plane_res_l, - enast(new_wm->wm[3].ignore_lines), new_wm->wm[3].plane_res_l, - enast(new_wm->wm[4].ignore_lines), new_wm->wm[4].plane_res_l, - enast(new_wm->wm[5].ignore_lines), new_wm->wm[5].plane_res_l, - enast(new_wm->wm[6].ignore_lines), new_wm->wm[6].plane_res_l, - enast(new_wm->wm[7].ignore_lines), new_wm->wm[7].plane_res_l, - enast(new_wm->trans_wm.ignore_lines), new_wm->trans_wm.plane_res_l); - - DRM_DEBUG_KMS("[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d" - " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n", - plane->base.base.id, plane->base.name, - old_wm->wm[0].plane_res_b, old_wm->wm[1].plane_res_b, - old_wm->wm[2].plane_res_b, old_wm->wm[3].plane_res_b, - old_wm->wm[4].plane_res_b, old_wm->wm[5].plane_res_b, - old_wm->wm[6].plane_res_b, old_wm->wm[7].plane_res_b, - old_wm->trans_wm.plane_res_b, - new_wm->wm[0].plane_res_b, new_wm->wm[1].plane_res_b, - new_wm->wm[2].plane_res_b, new_wm->wm[3].plane_res_b, - new_wm->wm[4].plane_res_b, new_wm->wm[5].plane_res_b, - new_wm->wm[6].plane_res_b, new_wm->wm[7].plane_res_b, - new_wm->trans_wm.plane_res_b); - - DRM_DEBUG_KMS("[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d" - " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n", - plane->base.base.id, plane->base.name, - old_wm->wm[0].min_ddb_alloc, old_wm->wm[1].min_ddb_alloc, - old_wm->wm[2].min_ddb_alloc, old_wm->wm[3].min_ddb_alloc, - old_wm->wm[4].min_ddb_alloc, old_wm->wm[5].min_ddb_alloc, - old_wm->wm[6].min_ddb_alloc, old_wm->wm[7].min_ddb_alloc, - old_wm->trans_wm.min_ddb_alloc, - new_wm->wm[0].min_ddb_alloc, new_wm->wm[1].min_ddb_alloc, - new_wm->wm[2].min_ddb_alloc, new_wm->wm[3].min_ddb_alloc, - new_wm->wm[4].min_ddb_alloc, new_wm->wm[5].min_ddb_alloc, - new_wm->wm[6].min_ddb_alloc, new_wm->wm[7].min_ddb_alloc, - new_wm->trans_wm.min_ddb_alloc); + plane->base.base.id, plane->base.name, + enast(old_wm->wm[0].ignore_lines), old_wm->wm[0].plane_res_l, + enast(old_wm->wm[1].ignore_lines), old_wm->wm[1].plane_res_l, + enast(old_wm->wm[2].ignore_lines), old_wm->wm[2].plane_res_l, + enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].plane_res_l, + enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].plane_res_l, + enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].plane_res_l, + enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].plane_res_l, + enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].plane_res_l, + enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm.plane_res_l, + + enast(new_wm->wm[0].ignore_lines), new_wm->wm[0].plane_res_l, + enast(new_wm->wm[1].ignore_lines), new_wm->wm[1].plane_res_l, + enast(new_wm->wm[2].ignore_lines), new_wm->wm[2].plane_res_l, + enast(new_wm->wm[3].ignore_lines), new_wm->wm[3].plane_res_l, + enast(new_wm->wm[4].ignore_lines), new_wm->wm[4].plane_res_l, + enast(new_wm->wm[5].ignore_lines), new_wm->wm[5].plane_res_l, + enast(new_wm->wm[6].ignore_lines), new_wm->wm[6].plane_res_l, + enast(new_wm->wm[7].ignore_lines), new_wm->wm[7].plane_res_l, + enast(new_wm->trans_wm.ignore_lines), new_wm->trans_wm.plane_res_l); + + drm_dbg_kms(&dev_priv->drm, + "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d" + " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n", + plane->base.base.id, plane->base.name, + old_wm->wm[0].plane_res_b, old_wm->wm[1].plane_res_b, + old_wm->wm[2].plane_res_b, old_wm->wm[3].plane_res_b, + old_wm->wm[4].plane_res_b, old_wm->wm[5].plane_res_b, + old_wm->wm[6].plane_res_b, old_wm->wm[7].plane_res_b, + old_wm->trans_wm.plane_res_b, + new_wm->wm[0].plane_res_b, new_wm->wm[1].plane_res_b, + new_wm->wm[2].plane_res_b, new_wm->wm[3].plane_res_b, + new_wm->wm[4].plane_res_b, new_wm->wm[5].plane_res_b, + new_wm->wm[6].plane_res_b, new_wm->wm[7].plane_res_b, + new_wm->trans_wm.plane_res_b); + + drm_dbg_kms(&dev_priv->drm, + "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d" + " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n", + plane->base.base.id, plane->base.name, + old_wm->wm[0].min_ddb_alloc, old_wm->wm[1].min_ddb_alloc, + old_wm->wm[2].min_ddb_alloc, old_wm->wm[3].min_ddb_alloc, + old_wm->wm[4].min_ddb_alloc, old_wm->wm[5].min_ddb_alloc, + old_wm->wm[6].min_ddb_alloc, old_wm->wm[7].min_ddb_alloc, + old_wm->trans_wm.min_ddb_alloc, + new_wm->wm[0].min_ddb_alloc, new_wm->wm[1].min_ddb_alloc, + new_wm->wm[2].min_ddb_alloc, new_wm->wm[3].min_ddb_alloc, + new_wm->wm[4].min_ddb_alloc, new_wm->wm[5].min_ddb_alloc, + new_wm->wm[6].min_ddb_alloc, new_wm->wm[7].min_ddb_alloc, + new_wm->trans_wm.min_ddb_alloc); } } } @@ -5931,19 +5966,22 @@ void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv) crtc_state->wm.g4x.optimal = *active; crtc_state->wm.g4x.intermediate = *active; - DRM_DEBUG_KMS("Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite=%d\n", - pipe_name(pipe), - wm->pipe[pipe].plane[PLANE_PRIMARY], - wm->pipe[pipe].plane[PLANE_CURSOR], - wm->pipe[pipe].plane[PLANE_SPRITE0]); + drm_dbg_kms(&dev_priv->drm, + "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite=%d\n", + pipe_name(pipe), + wm->pipe[pipe].plane[PLANE_PRIMARY], + wm->pipe[pipe].plane[PLANE_CURSOR], + wm->pipe[pipe].plane[PLANE_SPRITE0]); } - DRM_DEBUG_KMS("Initial SR watermarks: plane=%d, cursor=%d fbc=%d\n", - wm->sr.plane, wm->sr.cursor, wm->sr.fbc); - DRM_DEBUG_KMS("Initial HPLL watermarks: plane=%d, SR cursor=%d fbc=%d\n", - wm->hpll.plane, wm->hpll.cursor, wm->hpll.fbc); - DRM_DEBUG_KMS("Initial SR=%s HPLL=%s FBC=%s\n", - yesno(wm->cxsr), yesno(wm->hpll_en), yesno(wm->fbc_en)); + drm_dbg_kms(&dev_priv->drm, + "Initial SR watermarks: plane=%d, cursor=%d fbc=%d\n", + wm->sr.plane, wm->sr.cursor, wm->sr.fbc); + drm_dbg_kms(&dev_priv->drm, + "Initial HPLL watermarks: plane=%d, SR cursor=%d fbc=%d\n", + wm->hpll.plane, wm->hpll.cursor, wm->hpll.fbc); + drm_dbg_kms(&dev_priv->drm, "Initial SR=%s HPLL=%s FBC=%s\n", + yesno(wm->cxsr), yesno(wm->hpll_en), yesno(wm->fbc_en)); } void g4x_wm_sanitize(struct drm_i915_private *dev_priv) @@ -6035,8 +6073,9 @@ void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv) if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) & FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) { - DRM_DEBUG_KMS("Punit not acking DDR DVFS request, " - "assuming DDR DVFS is disabled\n"); + drm_dbg_kms(&dev_priv->drm, + "Punit not acking DDR DVFS request, " + "assuming DDR DVFS is disabled\n"); dev_priv->wm.max_level = VLV_WM_LEVEL_PM5; } else { val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2); @@ -6087,16 +6126,18 @@ void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv) crtc_state->wm.vlv.optimal = *active; crtc_state->wm.vlv.intermediate = *active; - DRM_DEBUG_KMS("Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n", - pipe_name(pipe), - wm->pipe[pipe].plane[PLANE_PRIMARY], - wm->pipe[pipe].plane[PLANE_CURSOR], - wm->pipe[pipe].plane[PLANE_SPRITE0], - wm->pipe[pipe].plane[PLANE_SPRITE1]); + drm_dbg_kms(&dev_priv->drm, + "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n", + pipe_name(pipe), + wm->pipe[pipe].plane[PLANE_PRIMARY], + wm->pipe[pipe].plane[PLANE_CURSOR], + wm->pipe[pipe].plane[PLANE_SPRITE0], + wm->pipe[pipe].plane[PLANE_SPRITE1]); } - DRM_DEBUG_KMS("Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n", - wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr); + drm_dbg_kms(&dev_priv->drm, + "Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n", + wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr); } void vlv_wm_sanitize(struct drm_i915_private *dev_priv) @@ -6412,8 +6453,9 @@ static void gen6_check_mch_setup(struct drm_i915_private *dev_priv) tmp = I915_READ(MCH_SSKPD); if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL) - DRM_DEBUG_KMS("Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n", - tmp); + drm_dbg_kms(&dev_priv->drm, + "Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n", + tmp); } static void gen6_init_clock_gating(struct drm_i915_private *dev_priv) @@ -6590,6 +6632,17 @@ static void icl_init_clock_gating(struct drm_i915_private *dev_priv) /* WaEnable32PlaneMode:icl */ I915_WRITE(GEN9_CSFE_CHICKEN1_RCS, _MASKED_BIT_ENABLE(GEN11_ENABLE_32_PLANE_MODE)); + + /* + * Wa_1408615072:icl,ehl (vsunit) + * Wa_1407596294:icl,ehl (hsunit) + */ + intel_uncore_rmw(&dev_priv->uncore, UNSLICE_UNIT_LEVEL_CLKGATE, + 0, VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS); + + /* Wa_1407352427:icl,ehl */ + intel_uncore_rmw(&dev_priv->uncore, UNSLICE_UNIT_LEVEL_CLKGATE2, + 0, PSDUNIT_CLKGATE_DIS); } static void tgl_init_clock_gating(struct drm_i915_private *dev_priv) @@ -6597,6 +6650,10 @@ static void tgl_init_clock_gating(struct drm_i915_private *dev_priv) u32 vd_pg_enable = 0; unsigned int i; + /* Wa_1408615072:tgl */ + intel_uncore_rmw(&dev_priv->uncore, UNSLICE_UNIT_LEVEL_CLKGATE2, + 0, VSUNIT_CLKGATE_DIS_TGL); + /* This is not a WA. Enable VD HCP & MFX_ENC powergate */ for (i = 0; i < I915_MAX_VCS; i++) { if (HAS_ENGINE(dev_priv, _VCS(i))) @@ -7113,7 +7170,8 @@ void intel_suspend_hw(struct drm_i915_private *dev_priv) static void nop_init_clock_gating(struct drm_i915_private *dev_priv) { - DRM_DEBUG_KMS("No clock gating settings or workarounds applied.\n"); + drm_dbg_kms(&dev_priv->drm, + "No clock gating settings or workarounds applied.\n"); } /** @@ -7180,9 +7238,9 @@ void intel_init_pm(struct drm_i915_private *dev_priv) { /* For cxsr */ if (IS_PINEVIEW(dev_priv)) - i915_pineview_get_mem_freq(dev_priv); + pnv_get_mem_freq(dev_priv); else if (IS_GEN(dev_priv, 5)) - i915_ironlake_get_mem_freq(dev_priv); + ilk_get_mem_freq(dev_priv); if (intel_has_sagv(dev_priv)) skl_setup_sagv_block_time(dev_priv); @@ -7208,8 +7266,9 @@ void intel_init_pm(struct drm_i915_private *dev_priv) dev_priv->display.optimize_watermarks = ilk_optimize_watermarks; } else { - DRM_DEBUG_KMS("Failed to read display plane latency. " - "Disable CxSR\n"); + drm_dbg_kms(&dev_priv->drm, + "Failed to read display plane latency. " + "Disable CxSR\n"); } } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { vlv_setup_wm_latency(dev_priv); @@ -7229,7 +7288,8 @@ void intel_init_pm(struct drm_i915_private *dev_priv) dev_priv->is_ddr3, dev_priv->fsb_freq, dev_priv->mem_freq)) { - DRM_INFO("failed to find known CxSR latency " + drm_info(&dev_priv->drm, + "failed to find known CxSR latency " "(found ddr%s fsb freq %d, mem freq %d), " "disabling CxSR\n", (dev_priv->is_ddr3 == 1) ? "3" : "2", @@ -7238,7 +7298,7 @@ void intel_init_pm(struct drm_i915_private *dev_priv) intel_set_memory_cxsr(dev_priv, false); dev_priv->display.update_wm = NULL; } else - dev_priv->display.update_wm = pineview_update_wm; + dev_priv->display.update_wm = pnv_update_wm; } else if (IS_GEN(dev_priv, 4)) { dev_priv->display.update_wm = i965_update_wm; } else if (IS_GEN(dev_priv, 3)) { @@ -7253,7 +7313,8 @@ void intel_init_pm(struct drm_i915_private *dev_priv) dev_priv->display.get_fifo_size = i830_get_fifo_size; } } else { - DRM_ERROR("unexpected fall-through in intel_init_pm\n"); + drm_err(&dev_priv->drm, + "unexpected fall-through in %s\n", __func__); } } diff --git a/drivers/gpu/drm/i915/intel_region_lmem.c b/drivers/gpu/drm/i915/intel_region_lmem.c index eddb392917aa..14b59b899c9b 100644 --- a/drivers/gpu/drm/i915/intel_region_lmem.c +++ b/drivers/gpu/drm/i915/intel_region_lmem.c @@ -90,6 +90,8 @@ region_lmem_init(struct intel_memory_region *mem) if (ret) io_mapping_fini(&mem->iomap); + intel_memory_region_set_name(mem, "local"); + return ret; } @@ -123,10 +125,12 @@ intel_setup_fake_lmem(struct drm_i915_private *i915) io_start, &intel_region_lmem_ops); if (!IS_ERR(mem)) { - DRM_INFO("Intel graphics fake LMEM: %pR\n", &mem->region); - DRM_INFO("Intel graphics fake LMEM IO start: %llx\n", - (u64)mem->io_start); - DRM_INFO("Intel graphics fake LMEM size: %llx\n", + drm_info(&i915->drm, "Intel graphics fake LMEM: %pR\n", + &mem->region); + drm_info(&i915->drm, + "Intel graphics fake LMEM IO start: %llx\n", + (u64)mem->io_start); + drm_info(&i915->drm, "Intel graphics fake LMEM size: %llx\n", (u64)resource_size(&mem->region)); } diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c index e06b35b844a0..cbfb7171d62d 100644 --- a/drivers/gpu/drm/i915/intel_sideband.c +++ b/drivers/gpu/drm/i915/intel_sideband.c @@ -105,8 +105,8 @@ static int vlv_sideband_rw(struct drm_i915_private *i915, if (intel_wait_for_register(uncore, VLV_IOSF_DOORBELL_REQ, IOSF_SB_BUSY, 0, 5)) { - DRM_DEBUG_DRIVER("IOSF sideband idle wait (%s) timed out\n", - is_read ? "read" : "write"); + drm_dbg(&i915->drm, "IOSF sideband idle wait (%s) timed out\n", + is_read ? "read" : "write"); return -EAGAIN; } @@ -129,8 +129,8 @@ static int vlv_sideband_rw(struct drm_i915_private *i915, *val = intel_uncore_read_fw(uncore, VLV_IOSF_DATA); err = 0; } else { - DRM_DEBUG_DRIVER("IOSF sideband finish wait (%s) timed out\n", - is_read ? "read" : "write"); + drm_dbg(&i915->drm, "IOSF sideband finish wait (%s) timed out\n", + is_read ? "read" : "write"); err = -ETIMEDOUT; } @@ -283,7 +283,8 @@ static int intel_sbi_rw(struct drm_i915_private *i915, u16 reg, if (intel_wait_for_register_fw(uncore, SBI_CTL_STAT, SBI_BUSY, 0, 100)) { - DRM_ERROR("timeout waiting for SBI to become ready\n"); + drm_err(&i915->drm, + "timeout waiting for SBI to become ready\n"); return -EBUSY; } @@ -301,12 +302,13 @@ static int intel_sbi_rw(struct drm_i915_private *i915, u16 reg, if (__intel_wait_for_register_fw(uncore, SBI_CTL_STAT, SBI_BUSY, 0, 100, 100, &cmd)) { - DRM_ERROR("timeout waiting for SBI to complete read\n"); + drm_err(&i915->drm, + "timeout waiting for SBI to complete read\n"); return -ETIMEDOUT; } if (cmd & SBI_RESPONSE_FAIL) { - DRM_ERROR("error during SBI read of reg %x\n", reg); + drm_err(&i915->drm, "error during SBI read of reg %x\n", reg); return -ENXIO; } @@ -426,8 +428,9 @@ int sandybridge_pcode_read(struct drm_i915_private *i915, u32 mbox, mutex_unlock(&i915->sb_lock); if (err) { - DRM_DEBUG_DRIVER("warning: pcode (read from mbox %x) mailbox access failed for %ps: %d\n", - mbox, __builtin_return_address(0), err); + drm_dbg(&i915->drm, + "warning: pcode (read from mbox %x) mailbox access failed for %ps: %d\n", + mbox, __builtin_return_address(0), err); } return err; @@ -447,8 +450,9 @@ int sandybridge_pcode_write_timeout(struct drm_i915_private *i915, mutex_unlock(&i915->sb_lock); if (err) { - DRM_DEBUG_DRIVER("warning: pcode (write of 0x%08x to mbox %x) mailbox access failed for %ps: %d\n", - val, mbox, __builtin_return_address(0), err); + drm_dbg(&i915->drm, + "warning: pcode (write of 0x%08x to mbox %x) mailbox access failed for %ps: %d\n", + val, mbox, __builtin_return_address(0), err); } return err; @@ -519,7 +523,8 @@ int skl_pcode_request(struct drm_i915_private *i915, u32 mbox, u32 request, * requests, and for any quirks of the PCODE firmware that delays * the request completion. */ - DRM_DEBUG_KMS("PCODE timeout, retrying with preemption disabled\n"); + drm_dbg_kms(&i915->drm, + "PCODE timeout, retrying with preemption disabled\n"); WARN_ON_ONCE(timeout_base_ms > 3); preempt_disable(); ret = wait_for_atomic(COND, 50); diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 94a97bf8c021..5f2cf6f43b8b 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -359,7 +359,8 @@ static void __gen6_gt_wait_for_fifo(struct intel_uncore *uncore) if (wait_for_atomic((n = fifo_free_entries(uncore)) > GT_FIFO_NUM_RESERVED_ENTRIES, GT_FIFO_TIMEOUT_MS)) { - DRM_DEBUG("GT_FIFO timeout, entries: %u\n", n); + drm_dbg(&uncore->i915->drm, + "GT_FIFO timeout, entries: %u\n", n); return; } } @@ -432,7 +433,7 @@ intel_uncore_forcewake_reset(struct intel_uncore *uncore) break; if (--retry_count == 0) { - DRM_ERROR("Timed out waiting for forcewake timers to finish\n"); + drm_err(&uncore->i915->drm, "Timed out waiting for forcewake timers to finish\n"); break; } @@ -490,7 +491,7 @@ gen6_check_for_fifo_debug(struct intel_uncore *uncore) fifodbg = __raw_uncore_read32(uncore, GTFIFODBG); if (unlikely(fifodbg)) { - DRM_DEBUG_DRIVER("GTFIFODBG = 0x08%x\n", fifodbg); + drm_dbg(&uncore->i915->drm, "GTFIFODBG = 0x08%x\n", fifodbg); __raw_uncore_write32(uncore, GTFIFODBG, fifodbg); } @@ -562,7 +563,7 @@ void intel_uncore_resume_early(struct intel_uncore *uncore) unsigned int restore_forcewake; if (intel_uncore_unclaimed_mmio(uncore)) - DRM_DEBUG("unclaimed mmio detected on resume, clearing\n"); + drm_dbg(&uncore->i915->drm, "unclaimed mmio detected on resume, clearing\n"); if (!intel_uncore_has_forcewake(uncore)) return; @@ -1595,8 +1596,8 @@ static int intel_uncore_fw_domains_init(struct intel_uncore *uncore) spin_unlock_irq(&uncore->lock); if (!(ecobus & FORCEWAKE_MT_ENABLE)) { - DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n"); - DRM_INFO("when using vblank-synced partial screen updates.\n"); + drm_info(&i915->drm, "No MT forcewake available on Ivybridge, this can result in issues\n"); + drm_info(&i915->drm, "when using vblank-synced partial screen updates.\n"); fw_domain_fini(uncore, FW_DOMAIN_ID_RENDER); fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, FORCEWAKE, FORCEWAKE_ACK); @@ -1683,8 +1684,7 @@ static int uncore_mmio_setup(struct intel_uncore *uncore) mmio_size = 2 * 1024 * 1024; uncore->regs = pci_iomap(pdev, mmio_bar, mmio_size); if (uncore->regs == NULL) { - DRM_ERROR("failed to map registers\n"); - + drm_err(&i915->drm, "failed to map registers\n"); return -EIO; } @@ -1807,7 +1807,7 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore) /* clear out unclaimed reg detection bit */ if (intel_uncore_unclaimed_mmio(uncore)) - DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n"); + drm_dbg(&i915->drm, "unclaimed mmio detected on uncore init, clearing\n"); return 0; @@ -2072,9 +2072,10 @@ intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore) if (unlikely(check_for_unclaimed_mmio(uncore))) { if (!i915_modparams.mmio_debug) { - DRM_DEBUG("Unclaimed register detected, " - "enabling oneshot unclaimed register reporting. " - "Please use i915.mmio_debug=N for more information.\n"); + drm_dbg(&uncore->i915->drm, + "Unclaimed register detected, " + "enabling oneshot unclaimed register reporting. " + "Please use i915.mmio_debug=N for more information.\n"); i915_modparams.mmio_debug++; } uncore->debug->unclaimed_mmio_check--; diff --git a/drivers/gpu/drm/i915/intel_wakeref.c b/drivers/gpu/drm/i915/intel_wakeref.c index 59aa1b6f1827..8fbf6f4d3f26 100644 --- a/drivers/gpu/drm/i915/intel_wakeref.c +++ b/drivers/gpu/drm/i915/intel_wakeref.c @@ -95,16 +95,17 @@ static void __intel_wakeref_put_work(struct work_struct *wrk) void __intel_wakeref_init(struct intel_wakeref *wf, struct intel_runtime_pm *rpm, const struct intel_wakeref_ops *ops, - struct lock_class_key *key) + struct intel_wakeref_lockclass *key) { wf->rpm = rpm; wf->ops = ops; - __mutex_init(&wf->mutex, "wakeref", key); + __mutex_init(&wf->mutex, "wakeref.mutex", &key->mutex); atomic_set(&wf->count, 0); wf->wakeref = 0; INIT_WORK(&wf->work, __intel_wakeref_put_work); + lockdep_init_map(&wf->work.lockdep_map, "wakeref.work", &key->work, 0); } int intel_wakeref_wait_for_idle(struct intel_wakeref *wf) diff --git a/drivers/gpu/drm/i915/intel_wakeref.h b/drivers/gpu/drm/i915/intel_wakeref.h index 8d945db94b7a..7d1e676b71ef 100644 --- a/drivers/gpu/drm/i915/intel_wakeref.h +++ b/drivers/gpu/drm/i915/intel_wakeref.h @@ -44,12 +44,17 @@ struct intel_wakeref { struct work_struct work; }; +struct intel_wakeref_lockclass { + struct lock_class_key mutex; + struct lock_class_key work; +}; + void __intel_wakeref_init(struct intel_wakeref *wf, struct intel_runtime_pm *rpm, const struct intel_wakeref_ops *ops, - struct lock_class_key *key); + struct intel_wakeref_lockclass *key); #define intel_wakeref_init(wf, rpm, ops) do { \ - static struct lock_class_key __key; \ + static struct intel_wakeref_lockclass __key; \ \ __intel_wakeref_init((wf), (rpm), (ops), &__key); \ } while (0) diff --git a/drivers/gpu/drm/i915/oa/Makefile b/drivers/gpu/drm/i915/oa/Makefile deleted file mode 100644 index df028e2b0d64..000000000000 --- a/drivers/gpu/drm/i915/oa/Makefile +++ /dev/null @@ -1,7 +0,0 @@ -# SPDX-License-Identifier: MIT - -# For building individual subdir files on the command line -subdir-ccflags-y += -I$(srctree)/$(src)/.. - -# Extra header tests -header-test-pattern-$(CONFIG_DRM_I915_WERROR) := *.h diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c index b37fc53973cc..78f36faf2bbe 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem.c @@ -124,8 +124,6 @@ static void pm_resume(struct drm_i915_private *i915) * that runtime-pm just works. */ with_intel_runtime_pm(&i915->runtime_pm, wakeref) { - intel_gt_sanitize(&i915->gt, false); - i915_gem_restore_gtt_mappings(i915); i915_gem_restore_fences(&i915->ggtt); diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c index 80cde5bda922..b342bef5e7c9 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c @@ -34,6 +34,7 @@ #include "mock_drm.h" #include "mock_gem_device.h" +#include "mock_gtt.h" #include "igt_flush_test.h" static void cleanup_freed_objects(struct drm_i915_private *i915) @@ -151,7 +152,7 @@ static int igt_ppgtt_alloc(void *arg) if (!HAS_PPGTT(dev_priv)) return 0; - ppgtt = __ppgtt_create(dev_priv); + ppgtt = i915_ppgtt_create(&dev_priv->gt); if (IS_ERR(ppgtt)) return PTR_ERR(ppgtt); @@ -206,8 +207,7 @@ err_ppgtt_cleanup: return err; } -static int lowlevel_hole(struct drm_i915_private *i915, - struct i915_address_space *vm, +static int lowlevel_hole(struct i915_address_space *vm, u64 hole_start, u64 hole_end, unsigned long end_time) { @@ -256,7 +256,7 @@ static int lowlevel_hole(struct drm_i915_private *i915, * memory. We expect to hit -ENOMEM. */ - obj = fake_dma_object(i915, BIT_ULL(size)); + obj = fake_dma_object(vm->i915, BIT_ULL(size)); if (IS_ERR(obj)) { kfree(order); break; @@ -291,7 +291,7 @@ static int lowlevel_hole(struct drm_i915_private *i915, mock_vma->node.size = BIT_ULL(size); mock_vma->node.start = addr; - with_intel_runtime_pm(&i915->runtime_pm, wakeref) + with_intel_runtime_pm(vm->gt->uncore->rpm, wakeref) vm->insert_entries(vm, mock_vma, I915_CACHE_NONE, 0); } @@ -303,7 +303,7 @@ static int lowlevel_hole(struct drm_i915_private *i915, intel_wakeref_t wakeref; GEM_BUG_ON(addr + BIT_ULL(size) > vm->total); - with_intel_runtime_pm(&i915->runtime_pm, wakeref) + with_intel_runtime_pm(vm->gt->uncore->rpm, wakeref) vm->clear_range(vm, addr, BIT_ULL(size)); } @@ -312,7 +312,7 @@ static int lowlevel_hole(struct drm_i915_private *i915, kfree(order); - cleanup_freed_objects(i915); + cleanup_freed_objects(vm->i915); } kfree(mock_vma); @@ -340,8 +340,7 @@ static void close_object_list(struct list_head *objects, } } -static int fill_hole(struct drm_i915_private *i915, - struct i915_address_space *vm, +static int fill_hole(struct i915_address_space *vm, u64 hole_start, u64 hole_end, unsigned long end_time) { @@ -374,7 +373,7 @@ static int fill_hole(struct drm_i915_private *i915, { } }, *p; - obj = fake_dma_object(i915, full_size); + obj = fake_dma_object(vm->i915, full_size); if (IS_ERR(obj)) break; @@ -542,7 +541,7 @@ static int fill_hole(struct drm_i915_private *i915, } close_object_list(&objects, vm); - cleanup_freed_objects(i915); + cleanup_freed_objects(vm->i915); } return 0; @@ -552,8 +551,7 @@ err: return err; } -static int walk_hole(struct drm_i915_private *i915, - struct i915_address_space *vm, +static int walk_hole(struct i915_address_space *vm, u64 hole_start, u64 hole_end, unsigned long end_time) { @@ -575,7 +573,7 @@ static int walk_hole(struct drm_i915_private *i915, u64 addr; int err = 0; - obj = fake_dma_object(i915, size << PAGE_SHIFT); + obj = fake_dma_object(vm->i915, size << PAGE_SHIFT); if (IS_ERR(obj)) break; @@ -630,14 +628,13 @@ err_put: if (err) return err; - cleanup_freed_objects(i915); + cleanup_freed_objects(vm->i915); } return 0; } -static int pot_hole(struct drm_i915_private *i915, - struct i915_address_space *vm, +static int pot_hole(struct i915_address_space *vm, u64 hole_start, u64 hole_end, unsigned long end_time) { @@ -651,7 +648,7 @@ static int pot_hole(struct drm_i915_private *i915, if (i915_is_ggtt(vm)) flags |= PIN_GLOBAL; - obj = i915_gem_object_create_internal(i915, 2 * I915_GTT_PAGE_SIZE); + obj = i915_gem_object_create_internal(vm->i915, 2 * I915_GTT_PAGE_SIZE); if (IS_ERR(obj)) return PTR_ERR(obj); @@ -712,8 +709,7 @@ err_obj: return err; } -static int drunk_hole(struct drm_i915_private *i915, - struct i915_address_space *vm, +static int drunk_hole(struct i915_address_space *vm, u64 hole_start, u64 hole_end, unsigned long end_time) { @@ -758,7 +754,7 @@ static int drunk_hole(struct drm_i915_private *i915, * memory. We expect to hit -ENOMEM. */ - obj = fake_dma_object(i915, BIT_ULL(size)); + obj = fake_dma_object(vm->i915, BIT_ULL(size)); if (IS_ERR(obj)) { kfree(order); break; @@ -816,14 +812,13 @@ err_obj: if (err) return err; - cleanup_freed_objects(i915); + cleanup_freed_objects(vm->i915); } return 0; } -static int __shrink_hole(struct drm_i915_private *i915, - struct i915_address_space *vm, +static int __shrink_hole(struct i915_address_space *vm, u64 hole_start, u64 hole_end, unsigned long end_time) { @@ -840,7 +835,7 @@ static int __shrink_hole(struct drm_i915_private *i915, u64 size = BIT_ULL(order++); size = min(size, hole_end - addr); - obj = fake_dma_object(i915, size); + obj = fake_dma_object(vm->i915, size); if (IS_ERR(obj)) { err = PTR_ERR(obj); break; @@ -894,12 +889,11 @@ static int __shrink_hole(struct drm_i915_private *i915, } close_object_list(&objects, vm); - cleanup_freed_objects(i915); + cleanup_freed_objects(vm->i915); return err; } -static int shrink_hole(struct drm_i915_private *i915, - struct i915_address_space *vm, +static int shrink_hole(struct i915_address_space *vm, u64 hole_start, u64 hole_end, unsigned long end_time) { @@ -911,7 +905,7 @@ static int shrink_hole(struct drm_i915_private *i915, for_each_prime_number_from(prime, 0, ULONG_MAX - 1) { vm->fault_attr.interval = prime; - err = __shrink_hole(i915, vm, hole_start, hole_end, end_time); + err = __shrink_hole(vm, hole_start, hole_end, end_time); if (err) break; } @@ -921,8 +915,7 @@ static int shrink_hole(struct drm_i915_private *i915, return err; } -static int shrink_boom(struct drm_i915_private *i915, - struct i915_address_space *vm, +static int shrink_boom(struct i915_address_space *vm, u64 hole_start, u64 hole_end, unsigned long end_time) { @@ -944,7 +937,7 @@ static int shrink_boom(struct drm_i915_private *i915, unsigned int size = sizes[i]; struct i915_vma *vma; - purge = fake_dma_object(i915, size); + purge = fake_dma_object(vm->i915, size); if (IS_ERR(purge)) return PTR_ERR(purge); @@ -961,7 +954,7 @@ static int shrink_boom(struct drm_i915_private *i915, /* Should now be ripe for purging */ i915_vma_unpin(vma); - explode = fake_dma_object(i915, size); + explode = fake_dma_object(vm->i915, size); if (IS_ERR(explode)) { err = PTR_ERR(explode); goto err_purge; @@ -987,7 +980,7 @@ static int shrink_boom(struct drm_i915_private *i915, i915_gem_object_put(explode); memset(&vm->fault_attr, 0, sizeof(vm->fault_attr)); - cleanup_freed_objects(i915); + cleanup_freed_objects(vm->i915); } return 0; @@ -1001,8 +994,7 @@ err_purge: } static int exercise_ppgtt(struct drm_i915_private *dev_priv, - int (*func)(struct drm_i915_private *i915, - struct i915_address_space *vm, + int (*func)(struct i915_address_space *vm, u64 hole_start, u64 hole_end, unsigned long end_time)) { @@ -1018,7 +1010,7 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv, if (IS_ERR(file)) return PTR_ERR(file); - ppgtt = i915_ppgtt_create(dev_priv); + ppgtt = i915_ppgtt_create(&dev_priv->gt); if (IS_ERR(ppgtt)) { err = PTR_ERR(ppgtt); goto out_free; @@ -1026,7 +1018,7 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv, GEM_BUG_ON(offset_in_page(ppgtt->vm.total)); GEM_BUG_ON(!atomic_read(&ppgtt->vm.open)); - err = func(dev_priv, &ppgtt->vm, 0, ppgtt->vm.total, end_time); + err = func(&ppgtt->vm, 0, ppgtt->vm.total, end_time); i915_vm_put(&ppgtt->vm); @@ -1082,8 +1074,7 @@ static int sort_holes(void *priv, struct list_head *A, struct list_head *B) } static int exercise_ggtt(struct drm_i915_private *i915, - int (*func)(struct drm_i915_private *i915, - struct i915_address_space *vm, + int (*func)(struct i915_address_space *vm, u64 hole_start, u64 hole_end, unsigned long end_time)) { @@ -1105,7 +1096,7 @@ restart: if (hole_start >= hole_end) continue; - err = func(i915, &ggtt->vm, hole_start, hole_end, end_time); + err = func(&ggtt->vm, hole_start, hole_end, end_time); if (err) break; @@ -1252,8 +1243,7 @@ static void track_vma_bind(struct i915_vma *vma) } static int exercise_mock(struct drm_i915_private *i915, - int (*func)(struct drm_i915_private *i915, - struct i915_address_space *vm, + int (*func)(struct i915_address_space *vm, u64 hole_start, u64 hole_end, unsigned long end_time)) { @@ -1268,7 +1258,7 @@ static int exercise_mock(struct drm_i915_private *i915, return -ENOMEM; vm = i915_gem_context_get_vm_rcu(ctx); - err = func(i915, vm, 0, min(vm->total, limit), end_time); + err = func(vm, 0, min(vm->total, limit), end_time); i915_vm_put(vm); mock_context_close(ctx); diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h index 476fba2ed8bb..34138c7bdd15 100644 --- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h +++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h @@ -1,5 +1,11 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* List each unit test as selftest(name, function) + +#ifndef selftest +#define selftest(x, y) +#endif + +/* + * List each unit test as selftest(name, function) * * The name is used as both an enum and expanded as subtest__name to create * a module parameter. It must be unique and legal for a C identifier. diff --git a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h index aa5a0e7f5d9e..5b39bab4da1d 100644 --- a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h +++ b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h @@ -1,5 +1,11 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* List each unit test as selftest(name, function) + +#ifndef selftest +#define selftest(x, y) +#endif + +/* + * List each unit test as selftest(name, function) * * The name is used as both an enum and expanded as subtest__name to create * a module parameter. It must be unique and legal for a C identifier. diff --git a/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h b/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h index f7129a243daa..5a577a1332f5 100644 --- a/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h +++ b/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h @@ -1,5 +1,11 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* List each unit test as selftest(name, function) + +#ifndef selftest +#define selftest(x, y) +#endif + +/* + * List each unit test as selftest(name, function) * * The name is used as both an enum and expanded as subtest__name to create * a module parameter. It must be unique and legal for a C identifier. diff --git a/drivers/gpu/drm/i915/selftests/i915_random.h b/drivers/gpu/drm/i915/selftests/i915_random.h index 35cc69a3a1b9..05364eca20f7 100644 --- a/drivers/gpu/drm/i915/selftests/i915_random.h +++ b/drivers/gpu/drm/i915/selftests/i915_random.h @@ -25,6 +25,7 @@ #ifndef __I915_SELFTESTS_RANDOM_H__ #define __I915_SELFTESTS_RANDOM_H__ +#include <linux/math64.h> #include <linux/random.h> #include "../i915_selftest.h" diff --git a/drivers/gpu/drm/i915/selftests/igt_atomic.c b/drivers/gpu/drm/i915/selftests/igt_atomic.c new file mode 100644 index 000000000000..fb506b699095 --- /dev/null +++ b/drivers/gpu/drm/i915/selftests/igt_atomic.c @@ -0,0 +1,47 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2018 Intel Corporation + */ + +#include <linux/preempt.h> +#include <linux/bottom_half.h> +#include <linux/irqflags.h> + +#include "igt_atomic.h" + +static void __preempt_begin(void) +{ + preempt_disable(); +} + +static void __preempt_end(void) +{ + preempt_enable(); +} + +static void __softirq_begin(void) +{ + local_bh_disable(); +} + +static void __softirq_end(void) +{ + local_bh_enable(); +} + +static void __hardirq_begin(void) +{ + local_irq_disable(); +} + +static void __hardirq_end(void) +{ + local_irq_enable(); +} + +const struct igt_atomic_section igt_atomic_phases[] = { + { "preempt", __preempt_begin, __preempt_end }, + { "softirq", __softirq_begin, __softirq_end }, + { "hardirq", __hardirq_begin, __hardirq_end }, + { } +}; diff --git a/drivers/gpu/drm/i915/selftests/igt_atomic.h b/drivers/gpu/drm/i915/selftests/igt_atomic.h index 93ec89f487ec..1991798abf4b 100644 --- a/drivers/gpu/drm/i915/selftests/igt_atomic.h +++ b/drivers/gpu/drm/i915/selftests/igt_atomic.h @@ -6,51 +6,12 @@ #ifndef IGT_ATOMIC_H #define IGT_ATOMIC_H -#include <linux/preempt.h> -#include <linux/bottom_half.h> -#include <linux/irqflags.h> - -static void __preempt_begin(void) -{ - preempt_disable(); -} - -static void __preempt_end(void) -{ - preempt_enable(); -} - -static void __softirq_begin(void) -{ - local_bh_disable(); -} - -static void __softirq_end(void) -{ - local_bh_enable(); -} - -static void __hardirq_begin(void) -{ - local_irq_disable(); -} - -static void __hardirq_end(void) -{ - local_irq_enable(); -} - struct igt_atomic_section { const char *name; void (*critical_section_begin)(void); void (*critical_section_end)(void); }; -static const struct igt_atomic_section igt_atomic_phases[] = { - { "preempt", __preempt_begin, __preempt_end }, - { "softirq", __softirq_begin, __softirq_end }, - { "hardirq", __hardirq_begin, __hardirq_end }, - { } -}; +extern const struct igt_atomic_section igt_atomic_phases[]; #endif /* IGT_ATOMIC_H */ diff --git a/drivers/gpu/drm/i915/selftests/igt_live_test.h b/drivers/gpu/drm/i915/selftests/igt_live_test.h index c0e9f99d50de..36ed42736c52 100644 --- a/drivers/gpu/drm/i915/selftests/igt_live_test.h +++ b/drivers/gpu/drm/i915/selftests/igt_live_test.h @@ -7,7 +7,7 @@ #ifndef IGT_LIVE_TEST_H #define IGT_LIVE_TEST_H -#include "../i915_gem.h" +#include "gt/intel_engine.h" /* for I915_NUM_ENGINES */ struct drm_i915_private; diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c index 04d0aa7b349e..3ef3620e0da5 100644 --- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c +++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c @@ -270,36 +270,31 @@ static int igt_gpu_write_dw(struct intel_context *ce, static int igt_cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val) { - unsigned long n; + unsigned long n = obj->base.size >> PAGE_SHIFT; + u32 *ptr; int err; - i915_gem_object_lock(obj); - err = i915_gem_object_set_to_wc_domain(obj, false); - i915_gem_object_unlock(obj); - if (err) - return err; - - err = i915_gem_object_pin_pages(obj); + err = i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT); if (err) return err; - for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) { - u32 __iomem *base; - u32 read_val; - - base = i915_gem_object_lmem_io_map_page_atomic(obj, n); + ptr = i915_gem_object_pin_map(obj, I915_MAP_WC); + if (IS_ERR(ptr)) + return PTR_ERR(ptr); - read_val = ioread32(base + dword); - io_mapping_unmap_atomic(base); - if (read_val != val) { - pr_err("n=%lu base[%u]=%u, val=%u\n", - n, dword, read_val, val); + ptr += dword; + while (n--) { + if (*ptr != val) { + pr_err("base[%u]=%08x, val=%08x\n", + dword, *ptr, val); err = -EINVAL; break; } + + ptr += PAGE_SIZE / sizeof(*ptr); } - i915_gem_object_unpin_pages(obj); + i915_gem_object_unpin_map(obj); return err; } diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index ac641f5360e1..3b8986983afc 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -58,6 +58,8 @@ static void mock_device_release(struct drm_device *dev) mock_device_flush(i915); intel_gt_driver_remove(&i915->gt); + i915_gem_driver_release__contexts(i915); + i915_gem_drain_workqueue(i915); i915_gem_drain_freed_objects(i915); @@ -184,6 +186,7 @@ struct drm_i915_private *mock_gem_device(void) if (mock_engine_init(i915->engine[RCS0])) goto err_context; + __clear_bit(I915_WEDGED, &i915->gt.reset.flags); intel_engines_driver_register(i915); return i915; diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c index 20ac3844edec..edc5e3dda8ca 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gtt.c +++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c @@ -55,6 +55,11 @@ static void mock_cleanup(struct i915_address_space *vm) { } +static void mock_clear_range(struct i915_address_space *vm, + u64 start, u64 length) +{ +} + struct i915_ppgtt *mock_ppgtt(struct drm_i915_private *i915, const char *name) { struct i915_ppgtt *ppgtt; @@ -70,7 +75,7 @@ struct i915_ppgtt *mock_ppgtt(struct drm_i915_private *i915, const char *name) i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT); - ppgtt->vm.clear_range = nop_clear_range; + ppgtt->vm.clear_range = mock_clear_range; ppgtt->vm.insert_page = mock_insert_page; ppgtt->vm.insert_entries = mock_insert_entries; ppgtt->vm.cleanup = mock_cleanup; @@ -107,7 +112,7 @@ void mock_init_ggtt(struct drm_i915_private *i915, struct i915_ggtt *ggtt) ggtt->mappable_end = resource_size(&ggtt->gmadr); ggtt->vm.total = 4096 * PAGE_SIZE; - ggtt->vm.clear_range = nop_clear_range; + ggtt->vm.clear_range = mock_clear_range; ggtt->vm.insert_page = mock_insert_page; ggtt->vm.insert_entries = mock_insert_entries; ggtt->vm.cleanup = mock_cleanup; diff --git a/drivers/gpu/drm/mediatek/Makefile b/drivers/gpu/drm/mediatek/Makefile index 5044dfb8e3d6..b7a82ed5788f 100644 --- a/drivers/gpu/drm/mediatek/Makefile +++ b/drivers/gpu/drm/mediatek/Makefile @@ -20,7 +20,7 @@ obj-$(CONFIG_DRM_MEDIATEK) += mediatek-drm.o mediatek-drm-hdmi-objs := mtk_cec.o \ mtk_hdmi.o \ mtk_hdmi_ddc.o \ - mtk_mt2701_hdmi_phy.o \ + mtk_mt2701_hdmi_phy.o \ mtk_mt8173_hdmi_phy.o \ mtk_hdmi_phy.o diff --git a/drivers/gpu/drm/mediatek/mtk_disp_color.c b/drivers/gpu/drm/mediatek/mtk_disp_color.c index 59de2a46aa49..6fb0d6983a4a 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_color.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_color.c @@ -9,6 +9,7 @@ #include <linux/of_device.h> #include <linux/of_irq.h> #include <linux/platform_device.h> +#include <linux/soc/mediatek/mtk-cmdq.h> #include "mtk_drm_crtc.h" #include "mtk_drm_ddp_comp.h" @@ -45,12 +46,12 @@ static inline struct mtk_disp_color *comp_to_color(struct mtk_ddp_comp *comp) static void mtk_color_config(struct mtk_ddp_comp *comp, unsigned int w, unsigned int h, unsigned int vrefresh, - unsigned int bpc) + unsigned int bpc, struct cmdq_pkt *cmdq_pkt) { struct mtk_disp_color *color = comp_to_color(comp); - writel(w, comp->regs + DISP_COLOR_WIDTH(color)); - writel(h, comp->regs + DISP_COLOR_HEIGHT(color)); + mtk_ddp_write(cmdq_pkt, w, comp, DISP_COLOR_WIDTH(color)); + mtk_ddp_write(cmdq_pkt, h, comp, DISP_COLOR_HEIGHT(color)); } static void mtk_color_start(struct mtk_ddp_comp *comp) diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c index 4a55bb6e2213..891d80c73e04 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c @@ -11,6 +11,7 @@ #include <linux/of_device.h> #include <linux/of_irq.h> #include <linux/platform_device.h> +#include <linux/soc/mediatek/mtk-cmdq.h> #include "mtk_drm_crtc.h" #include "mtk_drm_ddp_comp.h" @@ -124,14 +125,15 @@ static void mtk_ovl_stop(struct mtk_ddp_comp *comp) static void mtk_ovl_config(struct mtk_ddp_comp *comp, unsigned int w, unsigned int h, unsigned int vrefresh, - unsigned int bpc) + unsigned int bpc, struct cmdq_pkt *cmdq_pkt) { if (w != 0 && h != 0) - writel_relaxed(h << 16 | w, comp->regs + DISP_REG_OVL_ROI_SIZE); - writel_relaxed(0x0, comp->regs + DISP_REG_OVL_ROI_BGCLR); + mtk_ddp_write_relaxed(cmdq_pkt, h << 16 | w, comp, + DISP_REG_OVL_ROI_SIZE); + mtk_ddp_write_relaxed(cmdq_pkt, 0x0, comp, DISP_REG_OVL_ROI_BGCLR); - writel(0x1, comp->regs + DISP_REG_OVL_RST); - writel(0x0, comp->regs + DISP_REG_OVL_RST); + mtk_ddp_write(cmdq_pkt, 0x1, comp, DISP_REG_OVL_RST); + mtk_ddp_write(cmdq_pkt, 0x0, comp, DISP_REG_OVL_RST); } static unsigned int mtk_ovl_layer_nr(struct mtk_ddp_comp *comp) @@ -175,16 +177,16 @@ static int mtk_ovl_layer_check(struct mtk_ddp_comp *comp, unsigned int idx, return 0; } -static void mtk_ovl_layer_on(struct mtk_ddp_comp *comp, unsigned int idx) +static void mtk_ovl_layer_on(struct mtk_ddp_comp *comp, unsigned int idx, + struct cmdq_pkt *cmdq_pkt) { - unsigned int reg; unsigned int gmc_thrshd_l; unsigned int gmc_thrshd_h; unsigned int gmc_value; struct mtk_disp_ovl *ovl = comp_to_ovl(comp); - writel(0x1, comp->regs + DISP_REG_OVL_RDMA_CTRL(idx)); - + mtk_ddp_write(cmdq_pkt, 0x1, comp, + DISP_REG_OVL_RDMA_CTRL(idx)); gmc_thrshd_l = GMC_THRESHOLD_LOW >> (GMC_THRESHOLD_BITS - ovl->data->gmc_bits); gmc_thrshd_h = GMC_THRESHOLD_HIGH >> @@ -194,22 +196,19 @@ static void mtk_ovl_layer_on(struct mtk_ddp_comp *comp, unsigned int idx) else gmc_value = gmc_thrshd_l | gmc_thrshd_l << 8 | gmc_thrshd_h << 16 | gmc_thrshd_h << 24; - writel(gmc_value, comp->regs + DISP_REG_OVL_RDMA_GMC(idx)); - - reg = readl(comp->regs + DISP_REG_OVL_SRC_CON); - reg = reg | BIT(idx); - writel(reg, comp->regs + DISP_REG_OVL_SRC_CON); + mtk_ddp_write(cmdq_pkt, gmc_value, + comp, DISP_REG_OVL_RDMA_GMC(idx)); + mtk_ddp_write_mask(cmdq_pkt, BIT(idx), comp, + DISP_REG_OVL_SRC_CON, BIT(idx)); } -static void mtk_ovl_layer_off(struct mtk_ddp_comp *comp, unsigned int idx) +static void mtk_ovl_layer_off(struct mtk_ddp_comp *comp, unsigned int idx, + struct cmdq_pkt *cmdq_pkt) { - unsigned int reg; - - reg = readl(comp->regs + DISP_REG_OVL_SRC_CON); - reg = reg & ~BIT(idx); - writel(reg, comp->regs + DISP_REG_OVL_SRC_CON); - - writel(0x0, comp->regs + DISP_REG_OVL_RDMA_CTRL(idx)); + mtk_ddp_write_mask(cmdq_pkt, 0, comp, + DISP_REG_OVL_SRC_CON, BIT(idx)); + mtk_ddp_write(cmdq_pkt, 0, comp, + DISP_REG_OVL_RDMA_CTRL(idx)); } static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt) @@ -249,7 +248,8 @@ static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt) } static void mtk_ovl_layer_config(struct mtk_ddp_comp *comp, unsigned int idx, - struct mtk_plane_state *state) + struct mtk_plane_state *state, + struct cmdq_pkt *cmdq_pkt) { struct mtk_disp_ovl *ovl = comp_to_ovl(comp); struct mtk_plane_pending_state *pending = &state->pending; @@ -260,11 +260,13 @@ static void mtk_ovl_layer_config(struct mtk_ddp_comp *comp, unsigned int idx, unsigned int src_size = (pending->height << 16) | pending->width; unsigned int con; - if (!pending->enable) - mtk_ovl_layer_off(comp, idx); + if (!pending->enable) { + mtk_ovl_layer_off(comp, idx, cmdq_pkt); + return; + } con = ovl_fmt_convert(ovl, fmt); - if (idx != 0) + if (state->base.fb->format->has_alpha) con |= OVL_CON_AEN | OVL_CON_ALPHA; if (pending->rotation & DRM_MODE_REFLECT_Y) { @@ -277,14 +279,18 @@ static void mtk_ovl_layer_config(struct mtk_ddp_comp *comp, unsigned int idx, addr += pending->pitch - 1; } - writel_relaxed(con, comp->regs + DISP_REG_OVL_CON(idx)); - writel_relaxed(pitch, comp->regs + DISP_REG_OVL_PITCH(idx)); - writel_relaxed(src_size, comp->regs + DISP_REG_OVL_SRC_SIZE(idx)); - writel_relaxed(offset, comp->regs + DISP_REG_OVL_OFFSET(idx)); - writel_relaxed(addr, comp->regs + DISP_REG_OVL_ADDR(ovl, idx)); - - if (pending->enable) - mtk_ovl_layer_on(comp, idx); + mtk_ddp_write_relaxed(cmdq_pkt, con, comp, + DISP_REG_OVL_CON(idx)); + mtk_ddp_write_relaxed(cmdq_pkt, pitch, comp, + DISP_REG_OVL_PITCH(idx)); + mtk_ddp_write_relaxed(cmdq_pkt, src_size, comp, + DISP_REG_OVL_SRC_SIZE(idx)); + mtk_ddp_write_relaxed(cmdq_pkt, offset, comp, + DISP_REG_OVL_OFFSET(idx)); + mtk_ddp_write_relaxed(cmdq_pkt, addr, comp, + DISP_REG_OVL_ADDR(ovl, idx)); + + mtk_ovl_layer_on(comp, idx, cmdq_pkt); } static void mtk_ovl_bgclr_in_on(struct mtk_ddp_comp *comp) @@ -313,8 +319,6 @@ static const struct mtk_ddp_comp_funcs mtk_disp_ovl_funcs = { .disable_vblank = mtk_ovl_disable_vblank, .supported_rotations = mtk_ovl_supported_rotations, .layer_nr = mtk_ovl_layer_nr, - .layer_on = mtk_ovl_layer_on, - .layer_off = mtk_ovl_layer_off, .layer_check = mtk_ovl_layer_check, .layer_config = mtk_ovl_layer_config, .bgclr_in_on = mtk_ovl_bgclr_in_on, diff --git a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c index 405afef31407..0cb848d64206 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c @@ -9,6 +9,7 @@ #include <linux/of_device.h> #include <linux/of_irq.h> #include <linux/platform_device.h> +#include <linux/soc/mediatek/mtk-cmdq.h> #include "mtk_drm_crtc.h" #include "mtk_drm_ddp_comp.h" @@ -125,14 +126,16 @@ static void mtk_rdma_stop(struct mtk_ddp_comp *comp) static void mtk_rdma_config(struct mtk_ddp_comp *comp, unsigned int width, unsigned int height, unsigned int vrefresh, - unsigned int bpc) + unsigned int bpc, struct cmdq_pkt *cmdq_pkt) { unsigned int threshold; unsigned int reg; struct mtk_disp_rdma *rdma = comp_to_rdma(comp); - rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0, 0xfff, width); - rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_1, 0xfffff, height); + mtk_ddp_write_mask(cmdq_pkt, width, comp, + DISP_REG_RDMA_SIZE_CON_0, 0xfff); + mtk_ddp_write_mask(cmdq_pkt, height, comp, + DISP_REG_RDMA_SIZE_CON_1, 0xfffff); /* * Enable FIFO underflow since DSI and DPI can't be blocked. @@ -144,7 +147,7 @@ static void mtk_rdma_config(struct mtk_ddp_comp *comp, unsigned int width, reg = RDMA_FIFO_UNDERFLOW_EN | RDMA_FIFO_PSEUDO_SIZE(RDMA_FIFO_SIZE(rdma)) | RDMA_OUTPUT_VALID_FIFO_THRESHOLD(threshold); - writel(reg, comp->regs + DISP_REG_RDMA_FIFO_CON); + mtk_ddp_write(cmdq_pkt, reg, comp, DISP_REG_RDMA_FIFO_CON); } static unsigned int rdma_fmt_convert(struct mtk_disp_rdma *rdma, @@ -190,7 +193,8 @@ static unsigned int mtk_rdma_layer_nr(struct mtk_ddp_comp *comp) } static void mtk_rdma_layer_config(struct mtk_ddp_comp *comp, unsigned int idx, - struct mtk_plane_state *state) + struct mtk_plane_state *state, + struct cmdq_pkt *cmdq_pkt) { struct mtk_disp_rdma *rdma = comp_to_rdma(comp); struct mtk_plane_pending_state *pending = &state->pending; @@ -200,24 +204,27 @@ static void mtk_rdma_layer_config(struct mtk_ddp_comp *comp, unsigned int idx, unsigned int con; con = rdma_fmt_convert(rdma, fmt); - writel_relaxed(con, comp->regs + DISP_RDMA_MEM_CON); + mtk_ddp_write_relaxed(cmdq_pkt, con, comp, DISP_RDMA_MEM_CON); if (fmt == DRM_FORMAT_UYVY || fmt == DRM_FORMAT_YUYV) { - rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0, - RDMA_MATRIX_ENABLE, RDMA_MATRIX_ENABLE); - rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0, - RDMA_MATRIX_INT_MTX_SEL, - RDMA_MATRIX_INT_MTX_BT601_to_RGB); + mtk_ddp_write_mask(cmdq_pkt, RDMA_MATRIX_ENABLE, comp, + DISP_REG_RDMA_SIZE_CON_0, + RDMA_MATRIX_ENABLE); + mtk_ddp_write_mask(cmdq_pkt, RDMA_MATRIX_INT_MTX_BT601_to_RGB, + comp, DISP_REG_RDMA_SIZE_CON_0, + RDMA_MATRIX_INT_MTX_SEL); } else { - rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0, - RDMA_MATRIX_ENABLE, 0); + mtk_ddp_write_mask(cmdq_pkt, 0, comp, + DISP_REG_RDMA_SIZE_CON_0, + RDMA_MATRIX_ENABLE); } + mtk_ddp_write_relaxed(cmdq_pkt, addr, comp, DISP_RDMA_MEM_START_ADDR); + mtk_ddp_write_relaxed(cmdq_pkt, pitch, comp, DISP_RDMA_MEM_SRC_PITCH); + mtk_ddp_write(cmdq_pkt, RDMA_MEM_GMC, comp, + DISP_RDMA_MEM_GMC_SETTING_0); + mtk_ddp_write_mask(cmdq_pkt, RDMA_MODE_MEMORY, comp, + DISP_REG_RDMA_GLOBAL_CON, RDMA_MODE_MEMORY); - writel_relaxed(addr, comp->regs + DISP_RDMA_MEM_START_ADDR); - writel_relaxed(pitch, comp->regs + DISP_RDMA_MEM_SRC_PITCH); - writel(RDMA_MEM_GMC, comp->regs + DISP_RDMA_MEM_GMC_SETTING_0); - rdma_update_bits(comp, DISP_REG_RDMA_GLOBAL_CON, - RDMA_MODE_MEMORY, RDMA_MODE_MEMORY); } static const struct mtk_ddp_comp_funcs mtk_disp_rdma_funcs = { diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c index f80a8ba75977..0dfcd1787e65 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c @@ -5,6 +5,7 @@ #include <linux/clk.h> #include <linux/pm_runtime.h> +#include <linux/soc/mediatek/mtk-cmdq.h> #include <asm/barrier.h> #include <soc/mediatek/smi.h> @@ -42,11 +43,20 @@ struct mtk_drm_crtc { struct drm_plane *planes; unsigned int layer_nr; bool pending_planes; + bool pending_async_planes; + +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + struct cmdq_client *cmdq_client; + u32 cmdq_event; +#endif void __iomem *config_regs; struct mtk_disp_mutex *mutex; unsigned int ddp_comp_nr; struct mtk_ddp_comp **ddp_comp; + + /* lock for display hardware access */ + struct mutex hw_lock; }; struct mtk_crtc_state { @@ -215,11 +225,12 @@ struct mtk_ddp_comp *mtk_drm_ddp_comp_for_plane(struct drm_crtc *crtc, struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); struct mtk_ddp_comp *comp; int i, count = 0; + unsigned int local_index = plane - mtk_crtc->planes; for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { comp = mtk_crtc->ddp_comp[i]; - if (plane->index < (count + mtk_ddp_comp_layer_nr(comp))) { - *local_layer = plane->index - count; + if (local_index < (count + mtk_ddp_comp_layer_nr(comp))) { + *local_layer = local_index - count; return comp; } count += mtk_ddp_comp_layer_nr(comp); @@ -229,6 +240,13 @@ struct mtk_ddp_comp *mtk_drm_ddp_comp_for_plane(struct drm_crtc *crtc, return NULL; } +#if IS_REACHABLE(CONFIG_MTK_CMDQ) +static void ddp_cmdq_cb(struct cmdq_cb_data data) +{ + cmdq_pkt_destroy(data.data); +} +#endif + static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc) { struct drm_crtc *crtc = &mtk_crtc->base; @@ -297,7 +315,7 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc) if (i == 1) mtk_ddp_comp_bgclr_in_on(comp); - mtk_ddp_comp_config(comp, width, height, vrefresh, bpc); + mtk_ddp_comp_config(comp, width, height, vrefresh, bpc, NULL); mtk_ddp_comp_start(comp); } @@ -310,7 +328,9 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc) plane_state = to_mtk_plane_state(plane->state); comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer); - mtk_ddp_comp_layer_config(comp, local_layer, plane_state); + if (comp) + mtk_ddp_comp_layer_config(comp, local_layer, + plane_state, NULL); } return 0; @@ -325,6 +345,7 @@ err_pm_runtime_put: static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc) { struct drm_device *drm = mtk_crtc->base.dev; + struct drm_crtc *crtc = &mtk_crtc->base; int i; DRM_DEBUG_DRIVER("%s\n", __func__); @@ -350,9 +371,17 @@ static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc) mtk_disp_mutex_unprepare(mtk_crtc->mutex); pm_runtime_put(drm->dev); + + if (crtc->state->event && !crtc->state->active) { + spin_lock_irq(&crtc->dev->event_lock); + drm_crtc_send_vblank_event(crtc, crtc->state->event); + crtc->state->event = NULL; + spin_unlock_irq(&crtc->dev->event_lock); + } } -static void mtk_crtc_ddp_config(struct drm_crtc *crtc) +static void mtk_crtc_ddp_config(struct drm_crtc *crtc, + struct cmdq_pkt *cmdq_handle) { struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); struct mtk_crtc_state *state = to_mtk_crtc_state(mtk_crtc->base.state); @@ -368,7 +397,8 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc) if (state->pending_config) { mtk_ddp_comp_config(comp, state->pending_width, state->pending_height, - state->pending_vrefresh, 0); + state->pending_vrefresh, 0, + cmdq_handle); state->pending_config = false; } @@ -386,12 +416,84 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc) comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer); - mtk_ddp_comp_layer_config(comp, local_layer, - plane_state); + if (comp) + mtk_ddp_comp_layer_config(comp, local_layer, + plane_state, + cmdq_handle); plane_state->pending.config = false; } mtk_crtc->pending_planes = false; } + + if (mtk_crtc->pending_async_planes) { + for (i = 0; i < mtk_crtc->layer_nr; i++) { + struct drm_plane *plane = &mtk_crtc->planes[i]; + struct mtk_plane_state *plane_state; + + plane_state = to_mtk_plane_state(plane->state); + + if (!plane_state->pending.async_config) + continue; + + comp = mtk_drm_ddp_comp_for_plane(crtc, plane, + &local_layer); + + if (comp) + mtk_ddp_comp_layer_config(comp, local_layer, + plane_state, + cmdq_handle); + plane_state->pending.async_config = false; + } + mtk_crtc->pending_async_planes = false; + } +} + +static void mtk_drm_crtc_hw_config(struct mtk_drm_crtc *mtk_crtc) +{ +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + struct cmdq_pkt *cmdq_handle; +#endif + struct drm_crtc *crtc = &mtk_crtc->base; + struct mtk_drm_private *priv = crtc->dev->dev_private; + unsigned int pending_planes = 0, pending_async_planes = 0; + int i; + + mutex_lock(&mtk_crtc->hw_lock); + for (i = 0; i < mtk_crtc->layer_nr; i++) { + struct drm_plane *plane = &mtk_crtc->planes[i]; + struct mtk_plane_state *plane_state; + + plane_state = to_mtk_plane_state(plane->state); + if (plane_state->pending.dirty) { + plane_state->pending.config = true; + plane_state->pending.dirty = false; + pending_planes |= BIT(i); + } else if (plane_state->pending.async_dirty) { + plane_state->pending.async_config = true; + plane_state->pending.async_dirty = false; + pending_async_planes |= BIT(i); + } + } + if (pending_planes) + mtk_crtc->pending_planes = true; + if (pending_async_planes) + mtk_crtc->pending_async_planes = true; + + if (priv->data->shadow_register) { + mtk_disp_mutex_acquire(mtk_crtc->mutex); + mtk_crtc_ddp_config(crtc, NULL); + mtk_disp_mutex_release(mtk_crtc->mutex); + } +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + if (mtk_crtc->cmdq_client) { + cmdq_handle = cmdq_pkt_create(mtk_crtc->cmdq_client, PAGE_SIZE); + cmdq_pkt_clear_event(cmdq_handle, mtk_crtc->cmdq_event); + cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event); + mtk_crtc_ddp_config(crtc, cmdq_handle); + cmdq_pkt_flush_async(cmdq_handle, ddp_cmdq_cb, cmdq_handle); + } +#endif + mutex_unlock(&mtk_crtc->hw_lock); } int mtk_drm_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane, @@ -401,7 +503,23 @@ int mtk_drm_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane, struct mtk_ddp_comp *comp; comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer); - return mtk_ddp_comp_layer_check(comp, local_layer, state); + if (comp) + return mtk_ddp_comp_layer_check(comp, local_layer, state); + return 0; +} + +void mtk_drm_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane, + struct drm_plane_state *new_state) +{ + struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); + const struct drm_plane_helper_funcs *plane_helper_funcs = + plane->helper_private; + + if (!mtk_crtc->enabled) + return; + + plane_helper_funcs->atomic_update(plane, new_state); + mtk_drm_crtc_hw_config(mtk_crtc); } static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc, @@ -451,6 +569,7 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc, } mtk_crtc->pending_planes = true; + mtk_drm_crtc_hw_config(mtk_crtc); /* Wait for planes to be disabled */ drm_crtc_wait_one_vblank(crtc); @@ -482,34 +601,16 @@ static void mtk_drm_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_crtc_state *old_crtc_state) { struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); - struct mtk_drm_private *priv = crtc->dev->dev_private; - unsigned int pending_planes = 0; int i; if (mtk_crtc->event) mtk_crtc->pending_needs_vblank = true; - for (i = 0; i < mtk_crtc->layer_nr; i++) { - struct drm_plane *plane = &mtk_crtc->planes[i]; - struct mtk_plane_state *plane_state; - - plane_state = to_mtk_plane_state(plane->state); - if (plane_state->pending.dirty) { - plane_state->pending.config = true; - plane_state->pending.dirty = false; - pending_planes |= BIT(i); - } - } - if (pending_planes) - mtk_crtc->pending_planes = true; if (crtc->state->color_mgmt_changed) - for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) + for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { mtk_ddp_gamma_set(mtk_crtc->ddp_comp[i], crtc->state); - - if (priv->data->shadow_register) { - mtk_disp_mutex_acquire(mtk_crtc->mutex); - mtk_crtc_ddp_config(crtc); - mtk_disp_mutex_release(mtk_crtc->mutex); - } + mtk_ddp_ctm_set(mtk_crtc->ddp_comp[i], crtc->state); + } + mtk_drm_crtc_hw_config(mtk_crtc); } static const struct drm_crtc_funcs mtk_crtc_funcs = { @@ -559,8 +660,12 @@ void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *comp) struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); struct mtk_drm_private *priv = crtc->dev->dev_private; +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + if (!priv->data->shadow_register && !mtk_crtc->cmdq_client) +#else if (!priv->data->shadow_register) - mtk_crtc_ddp_config(crtc); +#endif + mtk_crtc_ddp_config(crtc, NULL); mtk_drm_finish_page_flip(mtk_crtc); } @@ -627,6 +732,8 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev, int pipe = priv->num_pipes; int ret; int i; + bool has_ctm = false; + uint gamma_lut_size = 0; if (!path) return 0; @@ -677,6 +784,14 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev, } mtk_crtc->ddp_comp[i] = comp; + + if (comp->funcs) { + if (comp->funcs->gamma_set) + gamma_lut_size = MTK_LUT_SIZE; + + if (comp->funcs->ctm_set) + has_ctm = true; + } } for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) @@ -697,9 +812,28 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev, NULL, pipe); if (ret < 0) return ret; - drm_mode_crtc_set_gamma_size(&mtk_crtc->base, MTK_LUT_SIZE); - drm_crtc_enable_color_mgmt(&mtk_crtc->base, 0, false, MTK_LUT_SIZE); - priv->num_pipes++; + if (gamma_lut_size) + drm_mode_crtc_set_gamma_size(&mtk_crtc->base, gamma_lut_size); + drm_crtc_enable_color_mgmt(&mtk_crtc->base, 0, has_ctm, gamma_lut_size); + priv->num_pipes++; + mutex_init(&mtk_crtc->hw_lock); + +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + mtk_crtc->cmdq_client = + cmdq_mbox_create(dev, drm_crtc_index(&mtk_crtc->base), + 2000); + if (IS_ERR(mtk_crtc->cmdq_client)) { + dev_dbg(dev, "mtk_crtc %d failed to create mailbox client, writing register by CPU now\n", + drm_crtc_index(&mtk_crtc->base)); + mtk_crtc->cmdq_client = NULL; + } + ret = of_property_read_u32_index(dev->of_node, "mediatek,gce-events", + drm_crtc_index(&mtk_crtc->base), + &mtk_crtc->cmdq_event); + if (ret) + dev_dbg(dev, "mtk_crtc %d failed to get mediatek,gce-events property\n", + drm_crtc_index(&mtk_crtc->base)); +#endif return 0; } diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h index 6afe1c19557a..a2b4677a451c 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h @@ -21,5 +21,7 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev, unsigned int path_len); int mtk_drm_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane, struct mtk_plane_state *state); +void mtk_drm_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane, + struct drm_plane_state *plane_state); #endif /* MTK_DRM_CRTC_H */ diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c index 7f21307cda75..1f5a112bb034 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c @@ -12,7 +12,7 @@ #include <linux/of_irq.h> #include <linux/of_platform.h> #include <linux/platform_device.h> - +#include <linux/soc/mediatek/mtk-cmdq.h> #include "mtk_drm_drv.h" #include "mtk_drm_plane.h" #include "mtk_drm_ddp_comp.h" @@ -37,7 +37,15 @@ #define CCORR_EN BIT(0) #define DISP_CCORR_CFG 0x0020 #define CCORR_RELAY_MODE BIT(0) +#define CCORR_ENGINE_EN BIT(1) +#define CCORR_GAMMA_OFF BIT(2) +#define CCORR_WGAMUT_SRC_CLIP BIT(3) #define DISP_CCORR_SIZE 0x0030 +#define DISP_CCORR_COEF_0 0x0080 +#define DISP_CCORR_COEF_1 0x0084 +#define DISP_CCORR_COEF_2 0x0088 +#define DISP_CCORR_COEF_3 0x008C +#define DISP_CCORR_COEF_4 0x0090 #define DISP_DITHER_EN 0x0000 #define DITHER_EN BIT(0) @@ -76,36 +84,84 @@ #define DITHER_ADD_LSHIFT_G(x) (((x) & 0x7) << 4) #define DITHER_ADD_RSHIFT_G(x) (((x) & 0x7) << 0) +void mtk_ddp_write(struct cmdq_pkt *cmdq_pkt, unsigned int value, + struct mtk_ddp_comp *comp, unsigned int offset) +{ +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + if (cmdq_pkt) + cmdq_pkt_write(cmdq_pkt, comp->subsys, + comp->regs_pa + offset, value); + else +#endif + writel(value, comp->regs + offset); +} + +void mtk_ddp_write_relaxed(struct cmdq_pkt *cmdq_pkt, unsigned int value, + struct mtk_ddp_comp *comp, + unsigned int offset) +{ +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + if (cmdq_pkt) + cmdq_pkt_write(cmdq_pkt, comp->subsys, + comp->regs_pa + offset, value); + else +#endif + writel_relaxed(value, comp->regs + offset); +} + +void mtk_ddp_write_mask(struct cmdq_pkt *cmdq_pkt, + unsigned int value, + struct mtk_ddp_comp *comp, + unsigned int offset, + unsigned int mask) +{ +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + if (cmdq_pkt) { + cmdq_pkt_write_mask(cmdq_pkt, comp->subsys, + comp->regs_pa + offset, value, mask); + } else { +#endif + u32 tmp = readl(comp->regs + offset); + + tmp = (tmp & ~mask) | (value & mask); + writel(tmp, comp->regs + offset); +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + } +#endif +} + void mtk_dither_set(struct mtk_ddp_comp *comp, unsigned int bpc, - unsigned int CFG) + unsigned int CFG, struct cmdq_pkt *cmdq_pkt) { /* If bpc equal to 0, the dithering function didn't be enabled */ if (bpc == 0) return; if (bpc >= MTK_MIN_BPC) { - writel(0, comp->regs + DISP_DITHER_5); - writel(0, comp->regs + DISP_DITHER_7); - writel(DITHER_LSB_ERR_SHIFT_R(MTK_MAX_BPC - bpc) | - DITHER_ADD_LSHIFT_R(MTK_MAX_BPC - bpc) | - DITHER_NEW_BIT_MODE, - comp->regs + DISP_DITHER_15); - writel(DITHER_LSB_ERR_SHIFT_B(MTK_MAX_BPC - bpc) | - DITHER_ADD_LSHIFT_B(MTK_MAX_BPC - bpc) | - DITHER_LSB_ERR_SHIFT_G(MTK_MAX_BPC - bpc) | - DITHER_ADD_LSHIFT_G(MTK_MAX_BPC - bpc), - comp->regs + DISP_DITHER_16); - writel(DISP_DITHERING, comp->regs + CFG); + mtk_ddp_write(cmdq_pkt, 0, comp, DISP_DITHER_5); + mtk_ddp_write(cmdq_pkt, 0, comp, DISP_DITHER_7); + mtk_ddp_write(cmdq_pkt, + DITHER_LSB_ERR_SHIFT_R(MTK_MAX_BPC - bpc) | + DITHER_ADD_LSHIFT_R(MTK_MAX_BPC - bpc) | + DITHER_NEW_BIT_MODE, + comp, DISP_DITHER_15); + mtk_ddp_write(cmdq_pkt, + DITHER_LSB_ERR_SHIFT_B(MTK_MAX_BPC - bpc) | + DITHER_ADD_LSHIFT_B(MTK_MAX_BPC - bpc) | + DITHER_LSB_ERR_SHIFT_G(MTK_MAX_BPC - bpc) | + DITHER_ADD_LSHIFT_G(MTK_MAX_BPC - bpc), + comp, DISP_DITHER_16); + mtk_ddp_write(cmdq_pkt, DISP_DITHERING, comp, CFG); } } static void mtk_od_config(struct mtk_ddp_comp *comp, unsigned int w, unsigned int h, unsigned int vrefresh, - unsigned int bpc) + unsigned int bpc, struct cmdq_pkt *cmdq_pkt) { - writel(w << 16 | h, comp->regs + DISP_OD_SIZE); - writel(OD_RELAYMODE, comp->regs + DISP_OD_CFG); - mtk_dither_set(comp, bpc, DISP_OD_CFG); + mtk_ddp_write(cmdq_pkt, w << 16 | h, comp, DISP_OD_SIZE); + mtk_ddp_write(cmdq_pkt, OD_RELAYMODE, comp, DISP_OD_CFG); + mtk_dither_set(comp, bpc, DISP_OD_CFG, cmdq_pkt); } static void mtk_od_start(struct mtk_ddp_comp *comp) @@ -120,9 +176,9 @@ static void mtk_ufoe_start(struct mtk_ddp_comp *comp) static void mtk_aal_config(struct mtk_ddp_comp *comp, unsigned int w, unsigned int h, unsigned int vrefresh, - unsigned int bpc) + unsigned int bpc, struct cmdq_pkt *cmdq_pkt) { - writel(h << 16 | w, comp->regs + DISP_AAL_SIZE); + mtk_ddp_write(cmdq_pkt, h << 16 | w, comp, DISP_AAL_SIZE); } static void mtk_aal_start(struct mtk_ddp_comp *comp) @@ -137,10 +193,10 @@ static void mtk_aal_stop(struct mtk_ddp_comp *comp) static void mtk_ccorr_config(struct mtk_ddp_comp *comp, unsigned int w, unsigned int h, unsigned int vrefresh, - unsigned int bpc) + unsigned int bpc, struct cmdq_pkt *cmdq_pkt) { - writel(h << 16 | w, comp->regs + DISP_CCORR_SIZE); - writel(CCORR_RELAY_MODE, comp->regs + DISP_CCORR_CFG); + mtk_ddp_write(cmdq_pkt, h << 16 | w, comp, DISP_CCORR_SIZE); + mtk_ddp_write(cmdq_pkt, CCORR_ENGINE_EN, comp, DISP_CCORR_CFG); } static void mtk_ccorr_start(struct mtk_ddp_comp *comp) @@ -153,12 +209,63 @@ static void mtk_ccorr_stop(struct mtk_ddp_comp *comp) writel_relaxed(0x0, comp->regs + DISP_CCORR_EN); } +/* Converts a DRM S31.32 value to the HW S1.10 format. */ +static u16 mtk_ctm_s31_32_to_s1_10(u64 in) +{ + u16 r; + + /* Sign bit. */ + r = in & BIT_ULL(63) ? BIT(11) : 0; + + if ((in & GENMASK_ULL(62, 33)) > 0) { + /* identity value 0x100000000 -> 0x400, */ + /* if bigger this, set it to max 0x7ff. */ + r |= GENMASK(10, 0); + } else { + /* take the 11 most important bits. */ + r |= (in >> 22) & GENMASK(10, 0); + } + + return r; +} + +static void mtk_ccorr_ctm_set(struct mtk_ddp_comp *comp, + struct drm_crtc_state *state) +{ + struct drm_property_blob *blob = state->ctm; + struct drm_color_ctm *ctm; + const u64 *input; + uint16_t coeffs[9] = { 0 }; + int i; + struct cmdq_pkt *cmdq_pkt = NULL; + + if (!blob) + return; + + ctm = (struct drm_color_ctm *)blob->data; + input = ctm->matrix; + + for (i = 0; i < ARRAY_SIZE(coeffs); i++) + coeffs[i] = mtk_ctm_s31_32_to_s1_10(input[i]); + + mtk_ddp_write(cmdq_pkt, coeffs[0] << 16 | coeffs[1], + comp, DISP_CCORR_COEF_0); + mtk_ddp_write(cmdq_pkt, coeffs[2] << 16 | coeffs[3], + comp, DISP_CCORR_COEF_1); + mtk_ddp_write(cmdq_pkt, coeffs[4] << 16 | coeffs[5], + comp, DISP_CCORR_COEF_2); + mtk_ddp_write(cmdq_pkt, coeffs[6] << 16 | coeffs[7], + comp, DISP_CCORR_COEF_3); + mtk_ddp_write(cmdq_pkt, coeffs[8] << 16, + comp, DISP_CCORR_COEF_4); +} + static void mtk_dither_config(struct mtk_ddp_comp *comp, unsigned int w, unsigned int h, unsigned int vrefresh, - unsigned int bpc) + unsigned int bpc, struct cmdq_pkt *cmdq_pkt) { - writel(h << 16 | w, comp->regs + DISP_DITHER_SIZE); - writel(DITHER_RELAY_MODE, comp->regs + DISP_DITHER_CFG); + mtk_ddp_write(cmdq_pkt, h << 16 | w, comp, DISP_DITHER_SIZE); + mtk_ddp_write(cmdq_pkt, DITHER_RELAY_MODE, comp, DISP_DITHER_CFG); } static void mtk_dither_start(struct mtk_ddp_comp *comp) @@ -173,10 +280,10 @@ static void mtk_dither_stop(struct mtk_ddp_comp *comp) static void mtk_gamma_config(struct mtk_ddp_comp *comp, unsigned int w, unsigned int h, unsigned int vrefresh, - unsigned int bpc) + unsigned int bpc, struct cmdq_pkt *cmdq_pkt) { - writel(h << 16 | w, comp->regs + DISP_GAMMA_SIZE); - mtk_dither_set(comp, bpc, DISP_GAMMA_CFG); + mtk_ddp_write(cmdq_pkt, h << 16 | w, comp, DISP_GAMMA_SIZE); + mtk_dither_set(comp, bpc, DISP_GAMMA_CFG, cmdq_pkt); } static void mtk_gamma_start(struct mtk_ddp_comp *comp) @@ -223,6 +330,7 @@ static const struct mtk_ddp_comp_funcs ddp_ccorr = { .config = mtk_ccorr_config, .start = mtk_ccorr_start, .stop = mtk_ccorr_stop, + .ctm_set = mtk_ccorr_ctm_set, }; static const struct mtk_ddp_comp_funcs ddp_dither = { @@ -326,6 +434,11 @@ int mtk_ddp_comp_init(struct device *dev, struct device_node *node, enum mtk_ddp_comp_type type; struct device_node *larb_node; struct platform_device *larb_pdev; +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + struct resource res; + struct cmdq_client_reg cmdq_reg; + int ret; +#endif if (comp_id < 0 || comp_id >= DDP_COMPONENT_ID_MAX) return -EINVAL; @@ -379,6 +492,19 @@ int mtk_ddp_comp_init(struct device *dev, struct device_node *node, comp->larb_dev = &larb_pdev->dev; +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + if (of_address_to_resource(node, 0, &res) != 0) { + dev_err(dev, "Missing reg in %s node\n", node->full_name); + return -EINVAL; + } + comp->regs_pa = res.start; + + ret = cmdq_dev_get_client_reg(dev, &cmdq_reg, 0); + if (ret) + dev_dbg(dev, "get mediatek,gce-client-reg fail!\n"); + else + comp->subsys = cmdq_reg.subsys; +#endif return 0; } diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h index 2f1e9e75b8da..debe36395fe7 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h @@ -69,27 +69,29 @@ enum mtk_ddp_comp_id { }; struct mtk_ddp_comp; - +struct cmdq_pkt; struct mtk_ddp_comp_funcs { void (*config)(struct mtk_ddp_comp *comp, unsigned int w, - unsigned int h, unsigned int vrefresh, unsigned int bpc); + unsigned int h, unsigned int vrefresh, + unsigned int bpc, struct cmdq_pkt *cmdq_pkt); void (*start)(struct mtk_ddp_comp *comp); void (*stop)(struct mtk_ddp_comp *comp); void (*enable_vblank)(struct mtk_ddp_comp *comp, struct drm_crtc *crtc); void (*disable_vblank)(struct mtk_ddp_comp *comp); unsigned int (*supported_rotations)(struct mtk_ddp_comp *comp); unsigned int (*layer_nr)(struct mtk_ddp_comp *comp); - void (*layer_on)(struct mtk_ddp_comp *comp, unsigned int idx); - void (*layer_off)(struct mtk_ddp_comp *comp, unsigned int idx); int (*layer_check)(struct mtk_ddp_comp *comp, unsigned int idx, struct mtk_plane_state *state); void (*layer_config)(struct mtk_ddp_comp *comp, unsigned int idx, - struct mtk_plane_state *state); + struct mtk_plane_state *state, + struct cmdq_pkt *cmdq_pkt); void (*gamma_set)(struct mtk_ddp_comp *comp, struct drm_crtc_state *state); void (*bgclr_in_on)(struct mtk_ddp_comp *comp); void (*bgclr_in_off)(struct mtk_ddp_comp *comp); + void (*ctm_set)(struct mtk_ddp_comp *comp, + struct drm_crtc_state *state); }; struct mtk_ddp_comp { @@ -99,14 +101,17 @@ struct mtk_ddp_comp { struct device *larb_dev; enum mtk_ddp_comp_id id; const struct mtk_ddp_comp_funcs *funcs; + resource_size_t regs_pa; + u8 subsys; }; static inline void mtk_ddp_comp_config(struct mtk_ddp_comp *comp, unsigned int w, unsigned int h, - unsigned int vrefresh, unsigned int bpc) + unsigned int vrefresh, unsigned int bpc, + struct cmdq_pkt *cmdq_pkt) { if (comp->funcs && comp->funcs->config) - comp->funcs->config(comp, w, h, vrefresh, bpc); + comp->funcs->config(comp, w, h, vrefresh, bpc, cmdq_pkt); } static inline void mtk_ddp_comp_start(struct mtk_ddp_comp *comp) @@ -151,20 +156,6 @@ static inline unsigned int mtk_ddp_comp_layer_nr(struct mtk_ddp_comp *comp) return 0; } -static inline void mtk_ddp_comp_layer_on(struct mtk_ddp_comp *comp, - unsigned int idx) -{ - if (comp->funcs && comp->funcs->layer_on) - comp->funcs->layer_on(comp, idx); -} - -static inline void mtk_ddp_comp_layer_off(struct mtk_ddp_comp *comp, - unsigned int idx) -{ - if (comp->funcs && comp->funcs->layer_off) - comp->funcs->layer_off(comp, idx); -} - static inline int mtk_ddp_comp_layer_check(struct mtk_ddp_comp *comp, unsigned int idx, struct mtk_plane_state *state) @@ -176,10 +167,11 @@ static inline int mtk_ddp_comp_layer_check(struct mtk_ddp_comp *comp, static inline void mtk_ddp_comp_layer_config(struct mtk_ddp_comp *comp, unsigned int idx, - struct mtk_plane_state *state) + struct mtk_plane_state *state, + struct cmdq_pkt *cmdq_pkt) { if (comp->funcs && comp->funcs->layer_config) - comp->funcs->layer_config(comp, idx, state); + comp->funcs->layer_config(comp, idx, state, cmdq_pkt); } static inline void mtk_ddp_gamma_set(struct mtk_ddp_comp *comp, @@ -201,6 +193,13 @@ static inline void mtk_ddp_comp_bgclr_in_off(struct mtk_ddp_comp *comp) comp->funcs->bgclr_in_off(comp); } +static inline void mtk_ddp_ctm_set(struct mtk_ddp_comp *comp, + struct drm_crtc_state *state) +{ + if (comp->funcs && comp->funcs->ctm_set) + comp->funcs->ctm_set(comp, state); +} + int mtk_ddp_comp_get_id(struct device_node *node, enum mtk_ddp_comp_type comp_type); int mtk_ddp_comp_init(struct device *dev, struct device_node *comp_node, @@ -209,6 +208,13 @@ int mtk_ddp_comp_init(struct device *dev, struct device_node *comp_node, int mtk_ddp_comp_register(struct drm_device *drm, struct mtk_ddp_comp *comp); void mtk_ddp_comp_unregister(struct drm_device *drm, struct mtk_ddp_comp *comp); void mtk_dither_set(struct mtk_ddp_comp *comp, unsigned int bpc, - unsigned int CFG); - + unsigned int CFG, struct cmdq_pkt *cmdq_pkt); +enum mtk_ddp_comp_type mtk_ddp_comp_get_type(enum mtk_ddp_comp_id comp_id); +void mtk_ddp_write(struct cmdq_pkt *cmdq_pkt, unsigned int value, + struct mtk_ddp_comp *comp, unsigned int offset); +void mtk_ddp_write_relaxed(struct cmdq_pkt *cmdq_pkt, unsigned int value, + struct mtk_ddp_comp *comp, unsigned int offset); +void mtk_ddp_write_mask(struct cmdq_pkt *cmdq_pkt, unsigned int value, + struct mtk_ddp_comp *comp, unsigned int offset, + unsigned int mask); #endif /* MTK_DRM_DDP_COMP_H */ diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index 2b1c122066ea..0563c6813333 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -37,84 +37,9 @@ #define DRIVER_MAJOR 1 #define DRIVER_MINOR 0 -static void mtk_atomic_schedule(struct mtk_drm_private *private, - struct drm_atomic_state *state) -{ - private->commit.state = state; - schedule_work(&private->commit.work); -} - -static void mtk_atomic_complete(struct mtk_drm_private *private, - struct drm_atomic_state *state) -{ - struct drm_device *drm = private->drm; - - drm_atomic_helper_wait_for_fences(drm, state, false); - - /* - * Mediatek drm supports runtime PM, so plane registers cannot be - * written when their crtc is disabled. - * - * The comment for drm_atomic_helper_commit states: - * For drivers supporting runtime PM the recommended sequence is - * - * drm_atomic_helper_commit_modeset_disables(dev, state); - * drm_atomic_helper_commit_modeset_enables(dev, state); - * drm_atomic_helper_commit_planes(dev, state, - * DRM_PLANE_COMMIT_ACTIVE_ONLY); - * - * See the kerneldoc entries for these three functions for more details. - */ - drm_atomic_helper_commit_modeset_disables(drm, state); - drm_atomic_helper_commit_modeset_enables(drm, state); - drm_atomic_helper_commit_planes(drm, state, - DRM_PLANE_COMMIT_ACTIVE_ONLY); - - drm_atomic_helper_wait_for_vblanks(drm, state); - - drm_atomic_helper_cleanup_planes(drm, state); - drm_atomic_state_put(state); -} - -static void mtk_atomic_work(struct work_struct *work) -{ - struct mtk_drm_private *private = container_of(work, - struct mtk_drm_private, commit.work); - - mtk_atomic_complete(private, private->commit.state); -} - -static int mtk_atomic_commit(struct drm_device *drm, - struct drm_atomic_state *state, - bool async) -{ - struct mtk_drm_private *private = drm->dev_private; - int ret; - - ret = drm_atomic_helper_prepare_planes(drm, state); - if (ret) - return ret; - - mutex_lock(&private->commit.lock); - flush_work(&private->commit.work); - - ret = drm_atomic_helper_swap_state(state, true); - if (ret) { - mutex_unlock(&private->commit.lock); - drm_atomic_helper_cleanup_planes(drm, state); - return ret; - } - - drm_atomic_state_get(state); - if (async) - mtk_atomic_schedule(private, state); - else - mtk_atomic_complete(private, state); - - mutex_unlock(&private->commit.lock); - - return 0; -} +static const struct drm_mode_config_helper_funcs mtk_drm_mode_config_helpers = { + .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm, +}; static struct drm_framebuffer * mtk_drm_mode_fb_create(struct drm_device *dev, @@ -132,7 +57,7 @@ mtk_drm_mode_fb_create(struct drm_device *dev, static const struct drm_mode_config_funcs mtk_drm_mode_config_funcs = { .fb_create = mtk_drm_mode_fb_create, .atomic_check = drm_atomic_helper_check, - .atomic_commit = mtk_atomic_commit, + .atomic_commit = drm_atomic_helper_commit, }; static const enum mtk_ddp_comp_id mt2701_mtk_ddp_main[] = { @@ -250,6 +175,7 @@ static int mtk_drm_kms_init(struct drm_device *drm) drm->mode_config.max_width = 4096; drm->mode_config.max_height = 4096; drm->mode_config.funcs = &mtk_drm_mode_config_funcs; + drm->mode_config.helper_private = &mtk_drm_mode_config_helpers; ret = component_bind_all(drm->dev, drm); if (ret) @@ -509,8 +435,6 @@ static int mtk_drm_probe(struct platform_device *pdev) if (!private) return -ENOMEM; - mutex_init(&private->commit.lock); - INIT_WORK(&private->commit.work, mtk_atomic_work); private->data = of_device_get_match_data(dev); mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.h b/drivers/gpu/drm/mediatek/mtk_drm_drv.h index e03fea12ff59..17bc99b9f5d4 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.h @@ -43,13 +43,6 @@ struct mtk_drm_private { struct device_node *comp_node[DDP_COMPONENT_ID_MAX]; struct mtk_ddp_comp *ddp_comp[DDP_COMPONENT_ID_MAX]; const struct mtk_mmsys_driver_data *data; - - struct { - struct drm_atomic_state *state; - struct work_struct work; - struct mutex lock; - } commit; - struct drm_atomic_state *suspend_state; bool dma_parms_allocated; diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c index f0b0325381e0..914cc7619cd7 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c @@ -7,6 +7,7 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_fourcc.h> +#include <drm/drm_atomic_uapi.h> #include <drm/drm_plane_helper.h> #include <drm/drm_gem_framebuffer_helper.h> @@ -75,6 +76,50 @@ static void mtk_drm_plane_destroy_state(struct drm_plane *plane, kfree(to_mtk_plane_state(state)); } +static int mtk_plane_atomic_async_check(struct drm_plane *plane, + struct drm_plane_state *state) +{ + struct drm_crtc_state *crtc_state; + + if (plane != state->crtc->cursor) + return -EINVAL; + + if (!plane->state) + return -EINVAL; + + if (!plane->state->fb) + return -EINVAL; + + if (state->state) + crtc_state = drm_atomic_get_existing_crtc_state(state->state, + state->crtc); + else /* Special case for asynchronous cursor updates. */ + crtc_state = state->crtc->state; + + return drm_atomic_helper_check_plane_state(plane->state, crtc_state, + DRM_PLANE_HELPER_NO_SCALING, + DRM_PLANE_HELPER_NO_SCALING, + true, true); +} + +static void mtk_plane_atomic_async_update(struct drm_plane *plane, + struct drm_plane_state *new_state) +{ + struct mtk_plane_state *state = to_mtk_plane_state(plane->state); + + plane->state->crtc_x = new_state->crtc_x; + plane->state->crtc_y = new_state->crtc_y; + plane->state->crtc_h = new_state->crtc_h; + plane->state->crtc_w = new_state->crtc_w; + plane->state->src_x = new_state->src_x; + plane->state->src_y = new_state->src_y; + plane->state->src_h = new_state->src_h; + plane->state->src_w = new_state->src_w; + state->pending.async_dirty = true; + + mtk_drm_crtc_async_update(new_state->crtc, plane, new_state); +} + static const struct drm_plane_funcs mtk_plane_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, @@ -163,6 +208,8 @@ static const struct drm_plane_helper_funcs mtk_plane_helper_funcs = { .atomic_check = mtk_plane_atomic_check, .atomic_update = mtk_plane_atomic_update, .atomic_disable = mtk_plane_atomic_disable, + .atomic_async_update = mtk_plane_atomic_async_update, + .atomic_async_check = mtk_plane_atomic_async_check, }; int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane, diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.h b/drivers/gpu/drm/mediatek/mtk_drm_plane.h index 760885e35b27..d454bece9535 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_plane.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.h @@ -22,6 +22,8 @@ struct mtk_plane_pending_state { unsigned int height; unsigned int rotation; bool dirty; + bool async_dirty; + bool async_config; }; struct mtk_plane_state { diff --git a/drivers/gpu/drm/nouveau/dispnv04/arb.c b/drivers/gpu/drm/nouveau/dispnv04/arb.c index 362495535e69..f607a04d262d 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/arb.c +++ b/drivers/gpu/drm/nouveau/dispnv04/arb.c @@ -54,7 +54,7 @@ static void nv04_calc_arb(struct nv_fifo_info *fifo, struct nv_sim_state *arb) { int pagemiss, cas, width, bpp; - int nvclks, mclks, pclks, crtpagemiss; + int nvclks, mclks, crtpagemiss; int found, mclk_extra, mclk_loop, cbs, m1, p1; int mclk_freq, pclk_freq, nvclk_freq; int us_m, us_n, us_p, crtc_drain_rate; @@ -69,7 +69,6 @@ nv04_calc_arb(struct nv_fifo_info *fifo, struct nv_sim_state *arb) bpp = arb->bpp; cbs = 128; - pclks = 2; nvclks = 10; mclks = 13 + cas; mclk_extra = 3; diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c index 03466f04c741..3a9489ed6544 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c +++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c @@ -644,16 +644,13 @@ static int nv17_tv_create_resources(struct drm_encoder *encoder, int i; if (nouveau_tv_norm) { - for (i = 0; i < num_tv_norms; i++) { - if (!strcmp(nv17_tv_norm_names[i], nouveau_tv_norm)) { - tv_enc->tv_norm = i; - break; - } - } - - if (i == num_tv_norms) + i = match_string(nv17_tv_norm_names, num_tv_norms, + nouveau_tv_norm); + if (i < 0) NV_WARN(drm, "Invalid TV norm setting \"%s\"\n", nouveau_tv_norm); + else + tv_enc->tv_norm = i; } drm_mode_create_tv_properties(dev, num_tv_norms, nv17_tv_norm_names); diff --git a/drivers/gpu/drm/nouveau/dispnv50/base907c.c b/drivers/gpu/drm/nouveau/dispnv50/base907c.c index 5f2de77e0f32..224a34c340fe 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/base907c.c +++ b/drivers/gpu/drm/nouveau/dispnv50/base907c.c @@ -75,12 +75,16 @@ base907c_xlut_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw) } } -static void -base907c_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw) +static bool +base907c_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, int size) { - asyw->xlut.i.mode = 7; + if (size != 256 && size != 1024) + return false; + + asyw->xlut.i.mode = size == 1024 ? 4 : 7; asyw->xlut.i.enable = 2; asyw->xlut.i.load = head907d_olut_load; + return true; } static inline u32 @@ -160,6 +164,7 @@ base907c = { .csc_set = base907c_csc_set, .csc_clr = base907c_csc_clr, .olut_core = true, + .ilut_size = 1024, .xlut_set = base907c_xlut_set, .xlut_clr = base907c_xlut_clr, .image_set = base907c_image_set, diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index fd31bff0c920..5fabe2b88eca 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -660,7 +660,6 @@ struct nv50_mstm { struct nouveau_encoder *outp; struct drm_dp_mst_topology_mgr mgr; - struct nv50_msto *msto[4]; bool modified; bool disabled; @@ -726,7 +725,6 @@ nv50_msto_cleanup(struct nv50_msto *msto) drm_dp_mst_deallocate_vcpi(&mstm->mgr, mstc->port); msto->mstc = NULL; - msto->head = NULL; msto->disabled = false; } @@ -872,7 +870,6 @@ nv50_msto_enable(struct drm_encoder *encoder) mstm->outp->update(mstm->outp, head->base.index, armh, proto, nv50_dp_bpc_to_depth(armh->or.bpc)); - msto->head = head; msto->mstc = mstc; mstm->modified = true; } @@ -913,45 +910,40 @@ nv50_msto = { .destroy = nv50_msto_destroy, }; -static int -nv50_msto_new(struct drm_device *dev, u32 heads, const char *name, int id, - struct nv50_msto **pmsto) +static struct nv50_msto * +nv50_msto_new(struct drm_device *dev, struct nv50_head *head, int id) { struct nv50_msto *msto; int ret; - if (!(msto = *pmsto = kzalloc(sizeof(*msto), GFP_KERNEL))) - return -ENOMEM; + msto = kzalloc(sizeof(*msto), GFP_KERNEL); + if (!msto) + return ERR_PTR(-ENOMEM); ret = drm_encoder_init(dev, &msto->encoder, &nv50_msto, - DRM_MODE_ENCODER_DPMST, "%s-mst-%d", name, id); + DRM_MODE_ENCODER_DPMST, "mst-%d", id); if (ret) { - kfree(*pmsto); - *pmsto = NULL; - return ret; + kfree(msto); + return ERR_PTR(ret); } drm_encoder_helper_add(&msto->encoder, &nv50_msto_help); - msto->encoder.possible_crtcs = heads; - return 0; + msto->encoder.possible_crtcs = drm_crtc_mask(&head->base.base); + msto->head = head; + return msto; } static struct drm_encoder * nv50_mstc_atomic_best_encoder(struct drm_connector *connector, struct drm_connector_state *connector_state) { - struct nv50_head *head = nv50_head(connector_state->crtc); struct nv50_mstc *mstc = nv50_mstc(connector); + struct drm_crtc *crtc = connector_state->crtc; - return &mstc->mstm->msto[head->base.index]->encoder; -} - -static struct drm_encoder * -nv50_mstc_best_encoder(struct drm_connector *connector) -{ - struct nv50_mstc *mstc = nv50_mstc(connector); + if (!(mstc->mstm->outp->dcb->heads & drm_crtc_mask(crtc))) + return NULL; - return &mstc->mstm->msto[0]->encoder; + return &nv50_head(crtc)->msto->encoder; } static enum drm_mode_status @@ -1038,7 +1030,6 @@ static const struct drm_connector_helper_funcs nv50_mstc_help = { .get_modes = nv50_mstc_get_modes, .mode_valid = nv50_mstc_mode_valid, - .best_encoder = nv50_mstc_best_encoder, .atomic_best_encoder = nv50_mstc_atomic_best_encoder, .atomic_check = nv50_mstc_atomic_check, .detect_ctx = nv50_mstc_detect, @@ -1071,8 +1062,9 @@ nv50_mstc_new(struct nv50_mstm *mstm, struct drm_dp_mst_port *port, const char *path, struct nv50_mstc **pmstc) { struct drm_device *dev = mstm->outp->base.base.dev; + struct drm_crtc *crtc; struct nv50_mstc *mstc; - int ret, i; + int ret; if (!(mstc = *pmstc = kzalloc(sizeof(*mstc), GFP_KERNEL))) return -ENOMEM; @@ -1092,8 +1084,13 @@ nv50_mstc_new(struct nv50_mstm *mstm, struct drm_dp_mst_port *port, mstc->connector.funcs->reset(&mstc->connector); nouveau_conn_attach_properties(&mstc->connector); - for (i = 0; i < ARRAY_SIZE(mstm->msto) && mstm->msto[i]; i++) - drm_connector_attach_encoder(&mstc->connector, &mstm->msto[i]->encoder); + drm_for_each_crtc(crtc, dev) { + if (!(mstm->outp->dcb->heads & drm_crtc_mask(crtc))) + continue; + + drm_connector_attach_encoder(&mstc->connector, + &nv50_head(crtc)->msto->encoder); + } drm_object_attach_property(&mstc->connector.base, dev->mode_config.path_property, 0); drm_object_attach_property(&mstc->connector.base, dev->mode_config.tile_property, 0); @@ -1367,7 +1364,7 @@ nv50_mstm_new(struct nouveau_encoder *outp, struct drm_dp_aux *aux, int aux_max, const int max_payloads = hweight8(outp->dcb->heads); struct drm_device *dev = outp->base.base.dev; struct nv50_mstm *mstm; - int ret, i; + int ret; u8 dpcd; /* This is a workaround for some monitors not functioning @@ -1390,13 +1387,6 @@ nv50_mstm_new(struct nouveau_encoder *outp, struct drm_dp_aux *aux, int aux_max, if (ret) return ret; - for (i = 0; i < max_payloads; i++) { - ret = nv50_msto_new(dev, outp->dcb->heads, outp->base.base.name, - i, &mstm->msto[i]); - if (ret) - return ret; - } - return 0; } @@ -1569,17 +1559,24 @@ nv50_sor_func = { .destroy = nv50_sor_destroy, }; +static bool nv50_has_mst(struct nouveau_drm *drm) +{ + struct nvkm_bios *bios = nvxx_bios(&drm->client.device); + u32 data; + u8 ver, hdr, cnt, len; + + data = nvbios_dp_table(bios, &ver, &hdr, &cnt, &len); + return data && ver >= 0x40 && (nvbios_rd08(bios, data + 0x08) & 0x04); +} + static int nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe) { struct nouveau_connector *nv_connector = nouveau_connector(connector); struct nouveau_drm *drm = nouveau_drm(connector->dev); - struct nvkm_bios *bios = nvxx_bios(&drm->client.device); struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device); struct nouveau_encoder *nv_encoder; struct drm_encoder *encoder; - u8 ver, hdr, cnt, len; - u32 data; int type, ret; switch (dcbe->type) { @@ -1624,10 +1621,9 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe) } if (nv_connector->type != DCB_CONNECTOR_eDP && - (data = nvbios_dp_table(bios, &ver, &hdr, &cnt, &len)) && - ver >= 0x40 && (nvbios_rd08(bios, data + 0x08) & 0x04)) { - ret = nv50_mstm_new(nv_encoder, &nv_connector->aux, 16, - nv_connector->base.base.id, + nv50_has_mst(drm)) { + ret = nv50_mstm_new(nv_encoder, &nv_connector->aux, + 16, nv_connector->base.base.id, &nv_encoder->dp.mstm); if (ret) return ret; @@ -2323,6 +2319,7 @@ nv50_display_create(struct drm_device *dev) struct nv50_disp *disp; struct dcb_output *dcbe; int crtcs, ret, i; + bool has_mst = nv50_has_mst(drm); disp = kzalloc(sizeof(*disp), GFP_KERNEL); if (!disp) @@ -2371,11 +2368,37 @@ nv50_display_create(struct drm_device *dev) crtcs = 0x3; for (i = 0; i < fls(crtcs); i++) { + struct nv50_head *head; + if (!(crtcs & (1 << i))) continue; - ret = nv50_head_create(dev, i); - if (ret) + + head = nv50_head_create(dev, i); + if (IS_ERR(head)) { + ret = PTR_ERR(head); goto out; + } + + if (has_mst) { + head->msto = nv50_msto_new(dev, head, i); + if (IS_ERR(head->msto)) { + ret = PTR_ERR(head->msto); + head->msto = NULL; + goto out; + } + + /* + * FIXME: This is a hack to workaround the following + * issues: + * + * https://gitlab.gnome.org/GNOME/mutter/issues/759 + * https://gitlab.freedesktop.org/xorg/xserver/merge_requests/277 + * + * Once these issues are closed, this should be + * removed + */ + head->msto->encoder.possible_crtcs = crtcs; + } } /* create encoder/connector objects based on VBIOS DCB table */ diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.h b/drivers/gpu/drm/nouveau/dispnv50/disp.h index c0a79531b087..d54fe00ac3a3 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.h +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.h @@ -4,6 +4,8 @@ #include "nouveau_display.h" +struct nv50_msto; + struct nv50_disp { struct nvif_disp *disp; struct nv50_core *core; diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c index c9692df2b76c..d9d64602947d 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/head.c +++ b/drivers/gpu/drm/nouveau/dispnv50/head.c @@ -213,6 +213,7 @@ nv50_head_atomic_check_lut(struct nv50_head *head, { struct nv50_disp *disp = nv50_disp(head->base.base.dev); struct drm_property_blob *olut = asyh->state.gamma_lut; + int size; /* Determine whether core output LUT should be enabled. */ if (olut) { @@ -229,14 +230,23 @@ nv50_head_atomic_check_lut(struct nv50_head *head, } } - if (!olut && !head->func->olut_identity) { - asyh->olut.handle = 0; - return 0; + if (!olut) { + if (!head->func->olut_identity) { + asyh->olut.handle = 0; + return 0; + } + size = 0; + } else { + size = drm_color_lut_size(olut); } + if (!head->func->olut(head, asyh, size)) { + DRM_DEBUG_KMS("Invalid olut\n"); + return -EINVAL; + } asyh->olut.handle = disp->core->chan.vram.handle; asyh->olut.buffer = !asyh->olut.buffer; - head->func->olut(head, asyh); + return 0; } @@ -473,7 +483,7 @@ nv50_head_func = { .atomic_destroy_state = nv50_head_atomic_destroy_state, }; -int +struct nv50_head * nv50_head_create(struct drm_device *dev, int index) { struct nouveau_drm *drm = nouveau_drm(dev); @@ -485,7 +495,7 @@ nv50_head_create(struct drm_device *dev, int index) head = kzalloc(sizeof(*head), GFP_KERNEL); if (!head) - return -ENOMEM; + return ERR_PTR(-ENOMEM); head->func = disp->core->func->head; head->base.index = index; @@ -503,27 +513,26 @@ nv50_head_create(struct drm_device *dev, int index) ret = nv50_curs_new(drm, head->base.index, &curs); if (ret) { kfree(head); - return ret; + return ERR_PTR(ret); } crtc = &head->base.base; drm_crtc_init_with_planes(dev, crtc, &base->plane, &curs->plane, &nv50_head_func, "head-%d", head->base.index); drm_crtc_helper_add(crtc, &nv50_head_help); + /* Keep the legacy gamma size at 256 to avoid compatibility issues */ drm_mode_crtc_set_gamma_size(crtc, 256); - if (disp->disp->object.oclass >= GF110_DISP) - drm_crtc_enable_color_mgmt(crtc, 256, true, 256); - else - drm_crtc_enable_color_mgmt(crtc, 0, false, 256); + drm_crtc_enable_color_mgmt(crtc, base->func->ilut_size, + disp->disp->object.oclass >= GF110_DISP, + head->func->olut_size); if (head->func->olut_set) { ret = nv50_lut_init(disp, &drm->client.mmu, &head->olut); - if (ret) - goto out; + if (ret) { + nv50_head_destroy(crtc); + return ERR_PTR(ret); + } } -out: - if (ret) - nv50_head_destroy(crtc); - return ret; + return head; } diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.h b/drivers/gpu/drm/nouveau/dispnv50/head.h index d1c002f534d4..c32b27cdaefc 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/head.h +++ b/drivers/gpu/drm/nouveau/dispnv50/head.h @@ -11,17 +11,19 @@ struct nv50_head { const struct nv50_head_func *func; struct nouveau_crtc base; struct nv50_lut olut; + struct nv50_msto *msto; }; -int nv50_head_create(struct drm_device *, int index); +struct nv50_head *nv50_head_create(struct drm_device *, int index); void nv50_head_flush_set(struct nv50_head *, struct nv50_head_atom *); void nv50_head_flush_clr(struct nv50_head *, struct nv50_head_atom *, bool y); struct nv50_head_func { void (*view)(struct nv50_head *, struct nv50_head_atom *); void (*mode)(struct nv50_head *, struct nv50_head_atom *); - void (*olut)(struct nv50_head *, struct nv50_head_atom *); + bool (*olut)(struct nv50_head *, struct nv50_head_atom *, int); bool olut_identity; + int olut_size; void (*olut_set)(struct nv50_head *, struct nv50_head_atom *); void (*olut_clr)(struct nv50_head *); void (*core_calc)(struct nv50_head *, struct nv50_head_atom *); @@ -43,7 +45,7 @@ struct nv50_head_func { extern const struct nv50_head_func head507d; void head507d_view(struct nv50_head *, struct nv50_head_atom *); void head507d_mode(struct nv50_head *, struct nv50_head_atom *); -void head507d_olut(struct nv50_head *, struct nv50_head_atom *); +bool head507d_olut(struct nv50_head *, struct nv50_head_atom *, int); void head507d_core_calc(struct nv50_head *, struct nv50_head_atom *); void head507d_core_clr(struct nv50_head *); int head507d_curs_layout(struct nv50_head *, struct nv50_wndw_atom *, @@ -60,7 +62,7 @@ extern const struct nv50_head_func head827d; extern const struct nv50_head_func head907d; void head907d_view(struct nv50_head *, struct nv50_head_atom *); void head907d_mode(struct nv50_head *, struct nv50_head_atom *); -void head907d_olut(struct nv50_head *, struct nv50_head_atom *); +bool head907d_olut(struct nv50_head *, struct nv50_head_atom *, int); void head907d_olut_set(struct nv50_head *, struct nv50_head_atom *); void head907d_olut_clr(struct nv50_head *); void head907d_core_set(struct nv50_head *, struct nv50_head_atom *); diff --git a/drivers/gpu/drm/nouveau/dispnv50/head507d.c b/drivers/gpu/drm/nouveau/dispnv50/head507d.c index 7561be5ca707..66ccf36b56a2 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/head507d.c +++ b/drivers/gpu/drm/nouveau/dispnv50/head507d.c @@ -271,15 +271,19 @@ head507d_olut_load(struct drm_color_lut *in, int size, void __iomem *mem) writew(readw(mem - 4), mem + 4); } -void -head507d_olut(struct nv50_head *head, struct nv50_head_atom *asyh) +bool +head507d_olut(struct nv50_head *head, struct nv50_head_atom *asyh, int size) { + if (size != 256) + return false; + if (asyh->base.cpp == 1) asyh->olut.mode = 0; else asyh->olut.mode = 1; asyh->olut.load = head507d_olut_load; + return true; } void @@ -328,6 +332,7 @@ head507d = { .view = head507d_view, .mode = head507d_mode, .olut = head507d_olut, + .olut_size = 256, .olut_set = head507d_olut_set, .olut_clr = head507d_olut_clr, .core_calc = head507d_core_calc, diff --git a/drivers/gpu/drm/nouveau/dispnv50/head827d.c b/drivers/gpu/drm/nouveau/dispnv50/head827d.c index af5e7bd5978b..11877119eea4 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/head827d.c +++ b/drivers/gpu/drm/nouveau/dispnv50/head827d.c @@ -108,6 +108,7 @@ head827d = { .view = head507d_view, .mode = head507d_mode, .olut = head507d_olut, + .olut_size = 256, .olut_set = head827d_olut_set, .olut_clr = head827d_olut_clr, .core_calc = head507d_core_calc, diff --git a/drivers/gpu/drm/nouveau/dispnv50/head907d.c b/drivers/gpu/drm/nouveau/dispnv50/head907d.c index c2d09dd97b1f..3002ec23d7a6 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/head907d.c +++ b/drivers/gpu/drm/nouveau/dispnv50/head907d.c @@ -230,11 +230,15 @@ head907d_olut_load(struct drm_color_lut *in, int size, void __iomem *mem) writew(readw(mem - 4), mem + 4); } -void -head907d_olut(struct nv50_head *head, struct nv50_head_atom *asyh) +bool +head907d_olut(struct nv50_head *head, struct nv50_head_atom *asyh, int size) { - asyh->olut.mode = 7; + if (size != 256 && size != 1024) + return false; + + asyh->olut.mode = size == 1024 ? 4 : 7; asyh->olut.load = head907d_olut_load; + return true; } void @@ -285,6 +289,7 @@ head907d = { .view = head907d_view, .mode = head907d_mode, .olut = head907d_olut, + .olut_size = 1024, .olut_set = head907d_olut_set, .olut_clr = head907d_olut_clr, .core_calc = head507d_core_calc, diff --git a/drivers/gpu/drm/nouveau/dispnv50/head917d.c b/drivers/gpu/drm/nouveau/dispnv50/head917d.c index 303df8459ca8..76958cedd51f 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/head917d.c +++ b/drivers/gpu/drm/nouveau/dispnv50/head917d.c @@ -83,6 +83,7 @@ head917d = { .view = head907d_view, .mode = head907d_mode, .olut = head907d_olut, + .olut_size = 1024, .olut_set = head907d_olut_set, .olut_clr = head907d_olut_clr, .core_calc = head507d_core_calc, diff --git a/drivers/gpu/drm/nouveau/dispnv50/headc37d.c b/drivers/gpu/drm/nouveau/dispnv50/headc37d.c index ef6a99d95a9c..00011ce109a6 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/headc37d.c +++ b/drivers/gpu/drm/nouveau/dispnv50/headc37d.c @@ -148,14 +148,18 @@ headc37d_olut_set(struct nv50_head *head, struct nv50_head_atom *asyh) } } -static void -headc37d_olut(struct nv50_head *head, struct nv50_head_atom *asyh) +static bool +headc37d_olut(struct nv50_head *head, struct nv50_head_atom *asyh, int size) { + if (size != 256 && size != 1024) + return false; + asyh->olut.mode = 2; - asyh->olut.size = 0; + asyh->olut.size = size == 1024 ? 2 : 0; asyh->olut.range = 0; asyh->olut.output_mode = 1; asyh->olut.load = head907d_olut_load; + return true; } static void @@ -201,6 +205,7 @@ headc37d = { .view = headc37d_view, .mode = headc37d_mode, .olut = headc37d_olut, + .olut_size = 1024, .olut_set = headc37d_olut_set, .olut_clr = headc37d_olut_clr, .curs_layout = head917d_curs_layout, diff --git a/drivers/gpu/drm/nouveau/dispnv50/headc57d.c b/drivers/gpu/drm/nouveau/dispnv50/headc57d.c index 32a7f9e85fb0..938d910a1b1e 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/headc57d.c +++ b/drivers/gpu/drm/nouveau/dispnv50/headc57d.c @@ -151,17 +151,20 @@ headc57d_olut_load(struct drm_color_lut *in, int size, void __iomem *mem) writew(readw(mem - 4), mem + 4); } -void -headc57d_olut(struct nv50_head *head, struct nv50_head_atom *asyh) +bool +headc57d_olut(struct nv50_head *head, struct nv50_head_atom *asyh, int size) { + if (size != 0 && size != 256 && size != 1024) + return false; + asyh->olut.mode = 2; /* DIRECT10 */ asyh->olut.size = 4 /* VSS header. */ + 1024 + 1 /* Entries. */; asyh->olut.output_mode = 1; /* INTERPOLATE_ENABLE. */ - if (asyh->state.gamma_lut && - asyh->state.gamma_lut->length / sizeof(struct drm_color_lut) == 256) + if (size == 256) asyh->olut.load = headc57d_olut_load_8; else asyh->olut.load = headc57d_olut_load; + return true; } static void @@ -194,6 +197,7 @@ headc57d = { .mode = headc57d_mode, .olut = headc57d_olut, .olut_identity = true, + .olut_size = 1024, .olut_set = headc57d_olut_set, .olut_clr = headc57d_olut_clr, .curs_layout = head917d_curs_layout, diff --git a/drivers/gpu/drm/nouveau/dispnv50/lut.c b/drivers/gpu/drm/nouveau/dispnv50/lut.c index 994def4fd51a..4e95ca5604ab 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/lut.c +++ b/drivers/gpu/drm/nouveau/dispnv50/lut.c @@ -49,7 +49,7 @@ nv50_lut_load(struct nv50_lut *lut, int buffer, struct drm_property_blob *blob, kvfree(in); } } else { - load(in, blob->length / sizeof(*in), mem); + load(in, drm_color_lut_size(blob), mem); } return addr; diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c index 5193b6257061..890315291b01 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c +++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c @@ -318,7 +318,7 @@ nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw, bool modeset, return wndw->func->acquire(wndw, asyw, asyh); } -static void +static int nv50_wndw_atomic_check_lut(struct nv50_wndw *wndw, struct nv50_wndw_atom *armw, struct nv50_wndw_atom *asyw, @@ -340,7 +340,7 @@ nv50_wndw_atomic_check_lut(struct nv50_wndw *wndw, */ if (!(ilut = asyh->state.gamma_lut)) { asyw->visible = false; - return; + return 0; } if (wndw->func->ilut) @@ -359,7 +359,10 @@ nv50_wndw_atomic_check_lut(struct nv50_wndw *wndw, /* Recalculate LUT state. */ memset(&asyw->xlut, 0x00, sizeof(asyw->xlut)); if ((asyw->ilut = wndw->func->ilut ? ilut : NULL)) { - wndw->func->ilut(wndw, asyw); + if (!wndw->func->ilut(wndw, asyw, drm_color_lut_size(ilut))) { + DRM_DEBUG_KMS("Invalid ilut\n"); + return -EINVAL; + } asyw->xlut.handle = wndw->wndw.vram.handle; asyw->xlut.i.buffer = !asyw->xlut.i.buffer; asyw->set.xlut = true; @@ -384,6 +387,7 @@ nv50_wndw_atomic_check_lut(struct nv50_wndw *wndw, /* Can't do an immediate flip while changing the LUT. */ asyh->state.async_flip = false; + return 0; } static int @@ -424,8 +428,11 @@ nv50_wndw_atomic_check(struct drm_plane *plane, struct drm_plane_state *state) (!armw->visible || asyh->state.color_mgmt_changed || asyw->state.fb->format->format != - armw->state.fb->format->format)) - nv50_wndw_atomic_check_lut(wndw, armw, asyw, asyh); + armw->state.fb->format->format)) { + ret = nv50_wndw_atomic_check_lut(wndw, armw, asyw, asyh); + if (ret) + return ret; + } /* Calculate new window state. */ if (asyw->visible) { diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.h b/drivers/gpu/drm/nouveau/dispnv50/wndw.h index c63bd3bdaf06..caf397475918 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/wndw.h +++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.h @@ -64,12 +64,13 @@ struct nv50_wndw_func { void (*ntfy_clr)(struct nv50_wndw *); int (*ntfy_wait_begun)(struct nouveau_bo *, u32 offset, struct nvif_device *); - void (*ilut)(struct nv50_wndw *, struct nv50_wndw_atom *); + bool (*ilut)(struct nv50_wndw *, struct nv50_wndw_atom *, int); void (*csc)(struct nv50_wndw *, struct nv50_wndw_atom *, const struct drm_color_ctm *); void (*csc_set)(struct nv50_wndw *, struct nv50_wndw_atom *); void (*csc_clr)(struct nv50_wndw *); bool ilut_identity; + int ilut_size; bool olut_core; void (*xlut_set)(struct nv50_wndw *, struct nv50_wndw_atom *); void (*xlut_clr)(struct nv50_wndw *); diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c b/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c index 0f9402162bde..b92dc3461bbd 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c +++ b/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c @@ -71,14 +71,18 @@ wndwc37e_ilut_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw) } } -static void -wndwc37e_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw) +static bool +wndwc37e_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, int size) { + if (size != 256 && size != 1024) + return false; + asyw->xlut.i.mode = 2; - asyw->xlut.i.size = 0; + asyw->xlut.i.size = size == 1024 ? 2 : 0; asyw->xlut.i.range = 0; asyw->xlut.i.output_mode = 1; asyw->xlut.i.load = head907d_olut_load; + return true; } void @@ -261,6 +265,7 @@ wndwc37e = { .ntfy_reset = corec37d_ntfy_init, .ntfy_wait_begun = base507c_ntfy_wait_begun, .ilut = wndwc37e_ilut, + .ilut_size = 1024, .xlut_set = wndwc37e_ilut_set, .xlut_clr = wndwc37e_ilut_clr, .csc = base907c_csc, diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c b/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c index a311c79e5295..35c9c52fab26 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c +++ b/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c @@ -156,19 +156,21 @@ wndwc57e_ilut_load(struct drm_color_lut *in, int size, void __iomem *mem) writew(readw(mem - 4), mem + 4); } -static void -wndwc57e_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw) +static bool +wndwc57e_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, int size) { - u16 size = asyw->ilut->length / sizeof(struct drm_color_lut); + if (size = size ? size : 1024, size != 256 && size != 1024) + return false; + if (size == 256) { asyw->xlut.i.mode = 1; /* DIRECT8. */ } else { asyw->xlut.i.mode = 2; /* DIRECT10. */ - size = 1024; } asyw->xlut.i.size = 4 /* VSS header. */ + size + 1 /* Entries. */; asyw->xlut.i.output_mode = 0; /* INTERPOLATE_DISABLE. */ asyw->xlut.i.load = wndwc57e_ilut_load; + return true; } static const struct nv50_wndw_func @@ -183,6 +185,7 @@ wndwc57e = { .ntfy_wait_begun = base507c_ntfy_wait_begun, .ilut = wndwc57e_ilut, .ilut_identity = true, + .ilut_size = 1024, .xlut_set = wndwc57e_ilut_set, .xlut_clr = wndwc57e_ilut_clr, .csc = base907c_csc, diff --git a/drivers/gpu/drm/nouveau/include/nvfw/acr.h b/drivers/gpu/drm/nouveau/include/nvfw/acr.h new file mode 100644 index 000000000000..e65d6a8db104 --- /dev/null +++ b/drivers/gpu/drm/nouveau/include/nvfw/acr.h @@ -0,0 +1,152 @@ +#ifndef __NVFW_ACR_H__ +#define __NVFW_ACR_H__ + +struct wpr_header { +#define WPR_HEADER_V0_FALCON_ID_INVALID 0xffffffff + u32 falcon_id; + u32 lsb_offset; + u32 bootstrap_owner; + u32 lazy_bootstrap; +#define WPR_HEADER_V0_STATUS_NONE 0 +#define WPR_HEADER_V0_STATUS_COPY 1 +#define WPR_HEADER_V0_STATUS_VALIDATION_CODE_FAILED 2 +#define WPR_HEADER_V0_STATUS_VALIDATION_DATA_FAILED 3 +#define WPR_HEADER_V0_STATUS_VALIDATION_DONE 4 +#define WPR_HEADER_V0_STATUS_VALIDATION_SKIPPED 5 +#define WPR_HEADER_V0_STATUS_BOOTSTRAP_READY 6 + u32 status; +}; + +void wpr_header_dump(struct nvkm_subdev *, const struct wpr_header *); + +struct wpr_header_v1 { +#define WPR_HEADER_V1_FALCON_ID_INVALID 0xffffffff + u32 falcon_id; + u32 lsb_offset; + u32 bootstrap_owner; + u32 lazy_bootstrap; + u32 bin_version; +#define WPR_HEADER_V1_STATUS_NONE 0 +#define WPR_HEADER_V1_STATUS_COPY 1 +#define WPR_HEADER_V1_STATUS_VALIDATION_CODE_FAILED 2 +#define WPR_HEADER_V1_STATUS_VALIDATION_DATA_FAILED 3 +#define WPR_HEADER_V1_STATUS_VALIDATION_DONE 4 +#define WPR_HEADER_V1_STATUS_VALIDATION_SKIPPED 5 +#define WPR_HEADER_V1_STATUS_BOOTSTRAP_READY 6 +#define WPR_HEADER_V1_STATUS_REVOCATION_CHECK_FAILED 7 + u32 status; +}; + +void wpr_header_v1_dump(struct nvkm_subdev *, const struct wpr_header_v1 *); + +struct lsf_signature { + u8 prd_keys[2][16]; + u8 dbg_keys[2][16]; + u32 b_prd_present; + u32 b_dbg_present; + u32 falcon_id; +}; + +struct lsf_signature_v1 { + u8 prd_keys[2][16]; + u8 dbg_keys[2][16]; + u32 b_prd_present; + u32 b_dbg_present; + u32 falcon_id; + u32 supports_versioning; + u32 version; + u32 depmap_count; + u8 depmap[11/*LSF_LSB_DEPMAP_SIZE*/ * 2 * 4]; + u8 kdf[16]; +}; + +struct lsb_header_tail { + u32 ucode_off; + u32 ucode_size; + u32 data_size; + u32 bl_code_size; + u32 bl_imem_off; + u32 bl_data_off; + u32 bl_data_size; + u32 app_code_off; + u32 app_code_size; + u32 app_data_off; + u32 app_data_size; + u32 flags; +}; + +struct lsb_header { + struct lsf_signature signature; + struct lsb_header_tail tail; +}; + +void lsb_header_dump(struct nvkm_subdev *, struct lsb_header *); + +struct lsb_header_v1 { + struct lsf_signature_v1 signature; + struct lsb_header_tail tail; +}; + +void lsb_header_v1_dump(struct nvkm_subdev *, struct lsb_header_v1 *); + +struct flcn_acr_desc { + union { + u8 reserved_dmem[0x200]; + u32 signatures[4]; + } ucode_reserved_space; + u32 wpr_region_id; + u32 wpr_offset; + u32 mmu_mem_range; + struct { + u32 no_regions; + struct { + u32 start_addr; + u32 end_addr; + u32 region_id; + u32 read_mask; + u32 write_mask; + u32 client_mask; + } region_props[2]; + } regions; + u32 ucode_blob_size; + u64 ucode_blob_base __aligned(8); + struct { + u32 vpr_enabled; + u32 vpr_start; + u32 vpr_end; + u32 hdcp_policies; + } vpr_desc; +}; + +void flcn_acr_desc_dump(struct nvkm_subdev *, struct flcn_acr_desc *); + +struct flcn_acr_desc_v1 { + u8 reserved_dmem[0x200]; + u32 signatures[4]; + u32 wpr_region_id; + u32 wpr_offset; + u32 mmu_memory_range; + struct { + u32 no_regions; + struct { + u32 start_addr; + u32 end_addr; + u32 region_id; + u32 read_mask; + u32 write_mask; + u32 client_mask; + u32 shadow_mem_start_addr; + } region_props[2]; + } regions; + u32 ucode_blob_size; + u64 ucode_blob_base __aligned(8); + struct { + u32 vpr_enabled; + u32 vpr_start; + u32 vpr_end; + u32 hdcp_policies; + } vpr_desc; +}; + +void flcn_acr_desc_v1_dump(struct nvkm_subdev *, struct flcn_acr_desc_v1 *); +#endif diff --git a/drivers/gpu/drm/nouveau/include/nvfw/flcn.h b/drivers/gpu/drm/nouveau/include/nvfw/flcn.h new file mode 100644 index 000000000000..e090f347d220 --- /dev/null +++ b/drivers/gpu/drm/nouveau/include/nvfw/flcn.h @@ -0,0 +1,97 @@ +/* SPDX-License-Identifier: MIT */ +#ifndef __NVFW_FLCN_H__ +#define __NVFW_FLCN_H__ +#include <core/os.h> +struct nvkm_subdev; + +struct loader_config { + u32 dma_idx; + u32 code_dma_base; + u32 code_size_total; + u32 code_size_to_load; + u32 code_entry_point; + u32 data_dma_base; + u32 data_size; + u32 overlay_dma_base; + u32 argc; + u32 argv; + u32 code_dma_base1; + u32 data_dma_base1; + u32 overlay_dma_base1; +}; + +void +loader_config_dump(struct nvkm_subdev *, const struct loader_config *); + +struct loader_config_v1 { + u32 reserved; + u32 dma_idx; + u64 code_dma_base; + u32 code_size_total; + u32 code_size_to_load; + u32 code_entry_point; + u64 data_dma_base; + u32 data_size; + u64 overlay_dma_base; + u32 argc; + u32 argv; +} __packed; + +void +loader_config_v1_dump(struct nvkm_subdev *, const struct loader_config_v1 *); + +struct flcn_bl_dmem_desc { + u32 reserved[4]; + u32 signature[4]; + u32 ctx_dma; + u32 code_dma_base; + u32 non_sec_code_off; + u32 non_sec_code_size; + u32 sec_code_off; + u32 sec_code_size; + u32 code_entry_point; + u32 data_dma_base; + u32 data_size; + u32 code_dma_base1; + u32 data_dma_base1; +}; + +void +flcn_bl_dmem_desc_dump(struct nvkm_subdev *, const struct flcn_bl_dmem_desc *); + +struct flcn_bl_dmem_desc_v1 { + u32 reserved[4]; + u32 signature[4]; + u32 ctx_dma; + u64 code_dma_base; + u32 non_sec_code_off; + u32 non_sec_code_size; + u32 sec_code_off; + u32 sec_code_size; + u32 code_entry_point; + u64 data_dma_base; + u32 data_size; +} __packed; + +void flcn_bl_dmem_desc_v1_dump(struct nvkm_subdev *, + const struct flcn_bl_dmem_desc_v1 *); + +struct flcn_bl_dmem_desc_v2 { + u32 reserved[4]; + u32 signature[4]; + u32 ctx_dma; + u64 code_dma_base; + u32 non_sec_code_off; + u32 non_sec_code_size; + u32 sec_code_off; + u32 sec_code_size; + u32 code_entry_point; + u64 data_dma_base; + u32 data_size; + u32 argc; + u32 argv; +} __packed; + +void flcn_bl_dmem_desc_v2_dump(struct nvkm_subdev *, + const struct flcn_bl_dmem_desc_v2 *); +#endif diff --git a/drivers/gpu/drm/nouveau/include/nvfw/fw.h b/drivers/gpu/drm/nouveau/include/nvfw/fw.h new file mode 100644 index 000000000000..a7cf1188c9d6 --- /dev/null +++ b/drivers/gpu/drm/nouveau/include/nvfw/fw.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: MIT */ +#ifndef __NVFW_FW_H__ +#define __NVFW_FW_H__ +#include <core/os.h> +struct nvkm_subdev; + +struct nvfw_bin_hdr { + u32 bin_magic; + u32 bin_ver; + u32 bin_size; + u32 header_offset; + u32 data_offset; + u32 data_size; +}; + +const struct nvfw_bin_hdr *nvfw_bin_hdr(struct nvkm_subdev *, const void *); + +struct nvfw_bl_desc { + u32 start_tag; + u32 dmem_load_off; + u32 code_off; + u32 code_size; + u32 data_off; + u32 data_size; +}; + +const struct nvfw_bl_desc *nvfw_bl_desc(struct nvkm_subdev *, const void *); +#endif diff --git a/drivers/gpu/drm/nouveau/include/nvfw/hs.h b/drivers/gpu/drm/nouveau/include/nvfw/hs.h new file mode 100644 index 000000000000..64d0d32200c2 --- /dev/null +++ b/drivers/gpu/drm/nouveau/include/nvfw/hs.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: MIT */ +#ifndef __NVFW_HS_H__ +#define __NVFW_HS_H__ +#include <core/os.h> +struct nvkm_subdev; + +struct nvfw_hs_header { + u32 sig_dbg_offset; + u32 sig_dbg_size; + u32 sig_prod_offset; + u32 sig_prod_size; + u32 patch_loc; + u32 patch_sig; + u32 hdr_offset; + u32 hdr_size; +}; + +const struct nvfw_hs_header *nvfw_hs_header(struct nvkm_subdev *, const void *); + +struct nvfw_hs_load_header { + u32 non_sec_code_off; + u32 non_sec_code_size; + u32 data_dma_base; + u32 data_size; + u32 num_apps; + u32 apps[0]; +}; + +const struct nvfw_hs_load_header * +nvfw_hs_load_header(struct nvkm_subdev *, const void *); +#endif diff --git a/drivers/gpu/drm/nouveau/include/nvfw/ls.h b/drivers/gpu/drm/nouveau/include/nvfw/ls.h new file mode 100644 index 000000000000..f63692a2a16c --- /dev/null +++ b/drivers/gpu/drm/nouveau/include/nvfw/ls.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: MIT */ +#ifndef __NVFW_LS_H__ +#define __NVFW_LS_H__ +#include <core/os.h> +struct nvkm_subdev; + +struct nvfw_ls_desc_head { + u32 descriptor_size; + u32 image_size; + u32 tools_version; + u32 app_version; + char date[64]; + u32 bootloader_start_offset; + u32 bootloader_size; + u32 bootloader_imem_offset; + u32 bootloader_entry_point; + u32 app_start_offset; + u32 app_size; + u32 app_imem_offset; + u32 app_imem_entry; + u32 app_dmem_offset; + u32 app_resident_code_offset; + u32 app_resident_code_size; + u32 app_resident_data_offset; + u32 app_resident_data_size; +}; + +struct nvfw_ls_desc { + struct nvfw_ls_desc_head head; + u32 nb_overlays; + struct { + u32 start; + u32 size; + } load_ovl[64]; + u32 compressed; +}; + +const struct nvfw_ls_desc *nvfw_ls_desc(struct nvkm_subdev *, const void *); + +struct nvfw_ls_desc_v1 { + struct nvfw_ls_desc_head head; + u32 nb_imem_overlays; + u32 nb_dmem_overlays; + struct { + u32 start; + u32 size; + } load_ovl[64]; + u32 compressed; +}; + +const struct nvfw_ls_desc_v1 * +nvfw_ls_desc_v1(struct nvkm_subdev *, const void *); +#endif diff --git a/drivers/gpu/drm/nouveau/include/nvfw/pmu.h b/drivers/gpu/drm/nouveau/include/nvfw/pmu.h new file mode 100644 index 000000000000..452ed7d03827 --- /dev/null +++ b/drivers/gpu/drm/nouveau/include/nvfw/pmu.h @@ -0,0 +1,98 @@ +#ifndef __NVFW_PMU_H__ +#define __NVFW_PMU_H__ + +struct nv_pmu_args { + u32 reserved; + u32 freq_hz; + u32 trace_size; + u32 trace_dma_base; + u16 trace_dma_base1; + u8 trace_dma_offset; + u32 trace_dma_idx; + bool secure_mode; + bool raise_priv_sec; + struct { + u32 dma_base; + u16 dma_base1; + u8 dma_offset; + u16 fb_size; + u8 dma_idx; + } gc6_ctx; + u8 pad; +}; + +#define NV_PMU_UNIT_INIT 0x07 +#define NV_PMU_UNIT_ACR 0x0a + +struct nv_pmu_init_msg { + struct nv_falcon_msg hdr; +#define NV_PMU_INIT_MSG_INIT 0x00 + u8 msg_type; + + u8 pad; + u16 os_debug_entry_point; + + struct { + u16 size; + u16 offset; + u8 index; + u8 pad; + } queue_info[5]; + + u16 sw_managed_area_offset; + u16 sw_managed_area_size; +}; + +struct nv_pmu_acr_cmd { + struct nv_falcon_cmd hdr; +#define NV_PMU_ACR_CMD_INIT_WPR_REGION 0x00 +#define NV_PMU_ACR_CMD_BOOTSTRAP_FALCON 0x01 +#define NV_PMU_ACR_CMD_BOOTSTRAP_MULTIPLE_FALCONS 0x03 + u8 cmd_type; +}; + +struct nv_pmu_acr_msg { + struct nv_falcon_cmd hdr; + u8 msg_type; +}; + +struct nv_pmu_acr_init_wpr_region_cmd { + struct nv_pmu_acr_cmd cmd; + u32 region_id; + u32 wpr_offset; +}; + +struct nv_pmu_acr_init_wpr_region_msg { + struct nv_pmu_acr_msg msg; + u32 error_code; +}; + +struct nv_pmu_acr_bootstrap_falcon_cmd { + struct nv_pmu_acr_cmd cmd; +#define NV_PMU_ACR_BOOTSTRAP_FALCON_FLAGS_RESET_YES 0x00000000 +#define NV_PMU_ACR_BOOTSTRAP_FALCON_FLAGS_RESET_NO 0x00000001 + u32 flags; + u32 falcon_id; +}; + +struct nv_pmu_acr_bootstrap_falcon_msg { + struct nv_pmu_acr_msg msg; + u32 falcon_id; +}; + +struct nv_pmu_acr_bootstrap_multiple_falcons_cmd { + struct nv_pmu_acr_cmd cmd; +#define NV_PMU_ACR_BOOTSTRAP_MULTIPLE_FALCONS_FLAGS_RESET_YES 0x00000000 +#define NV_PMU_ACR_BOOTSTRAP_MULTIPLE_FALCONS_FLAGS_RESET_NO 0x00000001 + u32 flags; + u32 falcon_mask; + u32 use_va_mask; + u32 wpr_lo; + u32 wpr_hi; +}; + +struct nv_pmu_acr_bootstrap_multiple_falcons_msg { + struct nv_pmu_acr_msg msg; + u32 falcon_mask; +}; +#endif diff --git a/drivers/gpu/drm/nouveau/include/nvfw/sec2.h b/drivers/gpu/drm/nouveau/include/nvfw/sec2.h new file mode 100644 index 000000000000..03496558b775 --- /dev/null +++ b/drivers/gpu/drm/nouveau/include/nvfw/sec2.h @@ -0,0 +1,60 @@ +#ifndef __NVFW_SEC2_H__ +#define __NVFW_SEC2_H__ + +struct nv_sec2_args { + u32 freq_hz; + u32 falc_trace_size; + u32 falc_trace_dma_base; + u32 falc_trace_dma_idx; + bool secure_mode; +}; + +#define NV_SEC2_UNIT_INIT 0x01 +#define NV_SEC2_UNIT_ACR 0x08 + +struct nv_sec2_init_msg { + struct nv_falcon_msg hdr; +#define NV_SEC2_INIT_MSG_INIT 0x00 + u8 msg_type; + + u8 num_queues; + u16 os_debug_entry_point; + + struct { + u32 offset; + u16 size; + u8 index; +#define NV_SEC2_INIT_MSG_QUEUE_ID_CMDQ 0x00 +#define NV_SEC2_INIT_MSG_QUEUE_ID_MSGQ 0x01 + u8 id; + } queue_info[2]; + + u32 sw_managed_area_offset; + u16 sw_managed_area_size; +}; + +struct nv_sec2_acr_cmd { + struct nv_falcon_cmd hdr; +#define NV_SEC2_ACR_CMD_BOOTSTRAP_FALCON 0x00 + u8 cmd_type; +}; + +struct nv_sec2_acr_msg { + struct nv_falcon_cmd hdr; + u8 msg_type; +}; + +struct nv_sec2_acr_bootstrap_falcon_cmd { + struct nv_sec2_acr_cmd cmd; +#define NV_SEC2_ACR_BOOTSTRAP_FALCON_FLAGS_RESET_YES 0x00000000 +#define NV_SEC2_ACR_BOOTSTRAP_FALCON_FLAGS_RESET_NO 0x00000001 + u32 flags; + u32 falcon_id; +}; + +struct nv_sec2_acr_bootstrap_falcon_msg { + struct nv_sec2_acr_msg msg; + u32 error_code; + u32 falcon_id; +}; +#endif diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h index f704ae600e94..30659747ffe8 100644 --- a/drivers/gpu/drm/nouveau/include/nvif/class.h +++ b/drivers/gpu/drm/nouveau/include/nvif/class.h @@ -166,6 +166,8 @@ #define VOLTA_A /* cl9097.h */ 0x0000c397 +#define TURING_A /* cl9097.h */ 0x0000c597 + #define NV74_BSP 0x000074b0 #define GT212_MSVLD 0x000085b1 @@ -207,6 +209,7 @@ #define PASCAL_COMPUTE_A 0x0000c0c0 #define PASCAL_COMPUTE_B 0x0000c1c0 #define VOLTA_COMPUTE_A 0x0000c3c0 +#define TURING_COMPUTE_A 0x0000c5c0 #define NV74_CIPHER 0x000074c1 #endif diff --git a/drivers/gpu/drm/nouveau/include/nvif/if0008.h b/drivers/gpu/drm/nouveau/include/nvif/if0008.h index 8450127420f5..c21d09f04f1d 100644 --- a/drivers/gpu/drm/nouveau/include/nvif/if0008.h +++ b/drivers/gpu/drm/nouveau/include/nvif/if0008.h @@ -35,7 +35,7 @@ struct nvif_mmu_type_v0 { struct nvif_mmu_kind_v0 { __u8 version; - __u8 pad01[1]; + __u8 kind_inv; __u16 count; __u8 data[]; }; diff --git a/drivers/gpu/drm/nouveau/include/nvif/mmu.h b/drivers/gpu/drm/nouveau/include/nvif/mmu.h index 747ecf67e403..cec1e88a0a05 100644 --- a/drivers/gpu/drm/nouveau/include/nvif/mmu.h +++ b/drivers/gpu/drm/nouveau/include/nvif/mmu.h @@ -7,6 +7,7 @@ struct nvif_mmu { u8 dmabits; u8 heap_nr; u8 type_nr; + u8 kind_inv; u16 kind_nr; s32 mem; @@ -36,9 +37,8 @@ void nvif_mmu_fini(struct nvif_mmu *); static inline bool nvif_mmu_kind_valid(struct nvif_mmu *mmu, u8 kind) { - const u8 invalid = mmu->kind_nr - 1; if (kind) { - if (kind >= mmu->kind_nr || mmu->kind[kind] == invalid) + if (kind >= mmu->kind_nr || mmu->kind[kind] == mmu->kind_inv) return false; } return true; diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h index 6d55cd0476aa..5c007ce62fc3 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h @@ -23,13 +23,13 @@ enum nvkm_devidx { NVKM_SUBDEV_MMU, NVKM_SUBDEV_BAR, NVKM_SUBDEV_FAULT, + NVKM_SUBDEV_ACR, NVKM_SUBDEV_PMU, NVKM_SUBDEV_VOLT, NVKM_SUBDEV_ICCSENSE, NVKM_SUBDEV_THERM, NVKM_SUBDEV_CLK, NVKM_SUBDEV_GSP, - NVKM_SUBDEV_SECBOOT, NVKM_ENGINE_BSP, @@ -129,6 +129,7 @@ struct nvkm_device { struct notifier_block nb; } acpi; + struct nvkm_acr *acr; struct nvkm_bar *bar; struct nvkm_bios *bios; struct nvkm_bus *bus; @@ -149,7 +150,6 @@ struct nvkm_device { struct nvkm_subdev *mxm; struct nvkm_pci *pci; struct nvkm_pmu *pmu; - struct nvkm_secboot *secboot; struct nvkm_therm *therm; struct nvkm_timer *timer; struct nvkm_top *top; @@ -169,7 +169,7 @@ struct nvkm_device { struct nvkm_engine *mspdec; struct nvkm_engine *msppp; struct nvkm_engine *msvld; - struct nvkm_engine *nvenc[3]; + struct nvkm_nvenc *nvenc[3]; struct nvkm_nvdec *nvdec[3]; struct nvkm_pm *pm; struct nvkm_engine *sec; @@ -202,6 +202,7 @@ struct nvkm_device_quirk { struct nvkm_device_chip { const char *name; + int (*acr )(struct nvkm_device *, int idx, struct nvkm_acr **); int (*bar )(struct nvkm_device *, int idx, struct nvkm_bar **); int (*bios )(struct nvkm_device *, int idx, struct nvkm_bios **); int (*bus )(struct nvkm_device *, int idx, struct nvkm_bus **); @@ -222,7 +223,6 @@ struct nvkm_device_chip { int (*mxm )(struct nvkm_device *, int idx, struct nvkm_subdev **); int (*pci )(struct nvkm_device *, int idx, struct nvkm_pci **); int (*pmu )(struct nvkm_device *, int idx, struct nvkm_pmu **); - int (*secboot )(struct nvkm_device *, int idx, struct nvkm_secboot **); int (*therm )(struct nvkm_device *, int idx, struct nvkm_therm **); int (*timer )(struct nvkm_device *, int idx, struct nvkm_timer **); int (*top )(struct nvkm_device *, int idx, struct nvkm_top **); @@ -242,7 +242,7 @@ struct nvkm_device_chip { int (*mspdec )(struct nvkm_device *, int idx, struct nvkm_engine **); int (*msppp )(struct nvkm_device *, int idx, struct nvkm_engine **); int (*msvld )(struct nvkm_device *, int idx, struct nvkm_engine **); - int (*nvenc[3])(struct nvkm_device *, int idx, struct nvkm_engine **); + int (*nvenc[3])(struct nvkm_device *, int idx, struct nvkm_nvenc **); int (*nvdec[3])(struct nvkm_device *, int idx, struct nvkm_nvdec **); int (*pm )(struct nvkm_device *, int idx, struct nvkm_pm **); int (*sec )(struct nvkm_device *, int idx, struct nvkm_engine **); diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/falcon.h b/drivers/gpu/drm/nouveau/include/nvkm/core/falcon.h new file mode 100644 index 000000000000..daa8e4bfb6bf --- /dev/null +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/falcon.h @@ -0,0 +1,77 @@ +#ifndef __NVKM_FALCON_H__ +#define __NVKM_FALCON_H__ +#include <engine/falcon.h> + +int nvkm_falcon_ctor(const struct nvkm_falcon_func *, struct nvkm_subdev *owner, + const char *name, u32 addr, struct nvkm_falcon *); +void nvkm_falcon_dtor(struct nvkm_falcon *); + +void nvkm_falcon_v1_load_imem(struct nvkm_falcon *, + void *, u32, u32, u16, u8, bool); +void nvkm_falcon_v1_load_dmem(struct nvkm_falcon *, void *, u32, u32, u8); +void nvkm_falcon_v1_read_dmem(struct nvkm_falcon *, u32, u32, u8, void *); +void nvkm_falcon_v1_bind_context(struct nvkm_falcon *, struct nvkm_memory *); +int nvkm_falcon_v1_wait_for_halt(struct nvkm_falcon *, u32); +int nvkm_falcon_v1_clear_interrupt(struct nvkm_falcon *, u32); +void nvkm_falcon_v1_set_start_addr(struct nvkm_falcon *, u32 start_addr); +void nvkm_falcon_v1_start(struct nvkm_falcon *); +int nvkm_falcon_v1_enable(struct nvkm_falcon *); +void nvkm_falcon_v1_disable(struct nvkm_falcon *); + +void gp102_sec2_flcn_bind_context(struct nvkm_falcon *, struct nvkm_memory *); +int gp102_sec2_flcn_enable(struct nvkm_falcon *); + +#define FLCN_PRINTK(t,f,fmt,a...) do { \ + if (nvkm_subdev_name[(f)->owner->index] != (f)->name) \ + nvkm_##t((f)->owner, "%s: "fmt"\n", (f)->name, ##a); \ + else \ + nvkm_##t((f)->owner, fmt"\n", ##a); \ +} while(0) +#define FLCN_DBG(f,fmt,a...) FLCN_PRINTK(debug, (f), fmt, ##a) +#define FLCN_ERR(f,fmt,a...) FLCN_PRINTK(error, (f), fmt, ##a) + +/** + * struct nv_falcon_msg - header for all messages + * + * @unit_id: id of firmware process that sent the message + * @size: total size of message + * @ctrl_flags: control flags + * @seq_id: used to match a message from its corresponding command + */ +struct nv_falcon_msg { + u8 unit_id; + u8 size; + u8 ctrl_flags; + u8 seq_id; +}; + +#define nv_falcon_cmd nv_falcon_msg +#define NV_FALCON_CMD_UNIT_ID_REWIND 0x00 + +struct nvkm_falcon_qmgr; +int nvkm_falcon_qmgr_new(struct nvkm_falcon *, struct nvkm_falcon_qmgr **); +void nvkm_falcon_qmgr_del(struct nvkm_falcon_qmgr **); + +typedef int +(*nvkm_falcon_qmgr_callback)(void *priv, struct nv_falcon_msg *); + +struct nvkm_falcon_cmdq; +int nvkm_falcon_cmdq_new(struct nvkm_falcon_qmgr *, const char *name, + struct nvkm_falcon_cmdq **); +void nvkm_falcon_cmdq_del(struct nvkm_falcon_cmdq **); +void nvkm_falcon_cmdq_init(struct nvkm_falcon_cmdq *, + u32 index, u32 offset, u32 size); +void nvkm_falcon_cmdq_fini(struct nvkm_falcon_cmdq *); +int nvkm_falcon_cmdq_send(struct nvkm_falcon_cmdq *, struct nv_falcon_cmd *, + nvkm_falcon_qmgr_callback, void *priv, + unsigned long timeout_jiffies); + +struct nvkm_falcon_msgq; +int nvkm_falcon_msgq_new(struct nvkm_falcon_qmgr *, const char *name, + struct nvkm_falcon_msgq **); +void nvkm_falcon_msgq_del(struct nvkm_falcon_msgq **); +void nvkm_falcon_msgq_init(struct nvkm_falcon_msgq *, + u32 index, u32 offset, u32 size); +int nvkm_falcon_msgq_recv_initmsg(struct nvkm_falcon_msgq *, void *, u32 size); +void nvkm_falcon_msgq_recv(struct nvkm_falcon_msgq *); +#endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h b/drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h index 383370c32428..d14b7fb07368 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h @@ -1,12 +1,55 @@ /* SPDX-License-Identifier: MIT */ #ifndef __NVKM_FIRMWARE_H__ #define __NVKM_FIRMWARE_H__ +#include <core/option.h> #include <core/subdev.h> -int nvkm_firmware_get_version(const struct nvkm_subdev *, const char *fwname, - int min_version, int max_version, - const struct firmware **); -int nvkm_firmware_get(const struct nvkm_subdev *, const char *fwname, +int nvkm_firmware_get(const struct nvkm_subdev *, const char *fwname, int ver, const struct firmware **); void nvkm_firmware_put(const struct firmware *); + +int nvkm_firmware_load_blob(const struct nvkm_subdev *subdev, const char *path, + const char *name, int ver, struct nvkm_blob *); +int nvkm_firmware_load_name(const struct nvkm_subdev *subdev, const char *path, + const char *name, int ver, + const struct firmware **); + +#define nvkm_firmware_load(s,l,o,p...) ({ \ + struct nvkm_subdev *_s = (s); \ + const char *_opts = (o); \ + char _option[32]; \ + typeof(l[0]) *_list = (l), *_next, *_fwif = NULL; \ + int _ver, _fwv, _ret = 0; \ + \ + snprintf(_option, sizeof(_option), "Nv%sFw", _opts); \ + _ver = nvkm_longopt(_s->device->cfgopt, _option, -2); \ + if (_ver >= -1) { \ + for (_next = _list; !_fwif && _next->load; _next++) { \ + if (_next->version == _ver) \ + _fwif = _next; \ + } \ + _ret = _fwif ? 0 : -EINVAL; \ + } \ + \ + if (_ret == 0) { \ + snprintf(_option, sizeof(_option), "Nv%sFwVer", _opts); \ + _fwv = _fwif ? _fwif->version : -1; \ + _ver = nvkm_longopt(_s->device->cfgopt, _option, _fwv); \ + for (_next = _fwif ? _fwif : _list; _next->load; _next++) { \ + _fwv = (_ver >= 0) ? _ver : _next->version; \ + _ret = _next->load(p, _fwv, _next); \ + if (_ret == 0 || _ver >= 0) { \ + _fwif = _next; \ + break; \ + } \ + } \ + } \ + \ + if (_ret) { \ + nvkm_error(_s, "failed to load firmware\n"); \ + _fwif = ERR_PTR(_ret); \ + } \ + \ + _fwif; \ +}) #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h b/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h index b23bf6109f2d..74d3f1a809d7 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h @@ -84,6 +84,22 @@ void nvkm_memory_tags_put(struct nvkm_memory *, struct nvkm_device *, nvkm_wo32((o), __a + 4, upper_32_bits(__d)); \ } while(0) +#define nvkm_robj(o,a,p,s) do { \ + u32 _addr = (a), _size = (s) >> 2, *_data = (void *)(p); \ + while (_size--) { \ + *(_data++) = nvkm_ro32((o), _addr); \ + _addr += 4; \ + } \ +} while(0) + +#define nvkm_wobj(o,a,p,s) do { \ + u32 _addr = (a), _size = (s) >> 2, *_data = (void *)(p); \ + while (_size--) { \ + nvkm_wo32((o), _addr, *(_data++)); \ + _addr += 4; \ + } \ +} while(0) + #define nvkm_fill(t,s,o,a,d,c) do { \ u64 _a = (a), _c = (c), _d = (d), _o = _a >> s, _s = _c << s; \ u##t __iomem *_m = nvkm_kmap(o); \ diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/msgqueue.h b/drivers/gpu/drm/nouveau/include/nvkm/core/msgqueue.h deleted file mode 100644 index bf3e532665fb..000000000000 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/msgqueue.h +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef __NVKM_CORE_MSGQUEUE_H -#define __NVKM_CORE_MSGQUEUE_H -#include <subdev/secboot.h> -struct nvkm_msgqueue; - -/* Hopefully we will never have firmware arguments larger than that... */ -#define NVKM_MSGQUEUE_CMDLINE_SIZE 0x100 - -int nvkm_msgqueue_new(u32, struct nvkm_falcon *, const struct nvkm_secboot *, - struct nvkm_msgqueue **); -void nvkm_msgqueue_del(struct nvkm_msgqueue **); -void nvkm_msgqueue_recv(struct nvkm_msgqueue *); -int nvkm_msgqueue_reinit(struct nvkm_msgqueue *); - -/* useful if we run a NVIDIA-signed firmware */ -void nvkm_msgqueue_write_cmdline(struct nvkm_msgqueue *, void *); - -/* interface to ACR unit running on falcon (NVIDIA signed firmware) */ -int nvkm_msgqueue_acr_boot_falcons(struct nvkm_msgqueue *, unsigned long); - -#endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/os.h b/drivers/gpu/drm/nouveau/include/nvkm/core/os.h index 029a416197db..d7ba3205207f 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/os.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/os.h @@ -21,4 +21,17 @@ iowrite32_native(lower_32_bits(_v), &_p[0]); \ iowrite32_native(upper_32_bits(_v), &_p[1]); \ } while(0) + +struct nvkm_blob { + void *data; + u32 size; +}; + +static inline void +nvkm_blob_dtor(struct nvkm_blob *blob) +{ + kfree(blob->data); + blob->data = NULL; + blob->size = 0; +} #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h index 23b582d696c6..27c1f868552c 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: MIT */ -#ifndef __NVKM_FALCON_H__ -#define __NVKM_FALCON_H__ +#ifndef __NVKM_FLCNEN_H__ +#define __NVKM_FLCNEN_H__ #define nvkm_falcon(p) container_of((p), struct nvkm_falcon, engine) #include <core/engine.h> struct nvkm_fifo_chan; @@ -23,12 +23,13 @@ struct nvkm_falcon { struct mutex mutex; struct mutex dmem_mutex; + bool oneinit; + const struct nvkm_subdev *user; u8 version; u8 secret; bool debug; - bool has_emem; struct nvkm_memory *core; bool external; @@ -76,9 +77,14 @@ struct nvkm_falcon_func { } data; void (*init)(struct nvkm_falcon *); void (*intr)(struct nvkm_falcon *, struct nvkm_fifo_chan *); + + u32 debug; + u32 fbif; + void (*load_imem)(struct nvkm_falcon *, void *, u32, u32, u16, u8, bool); void (*load_dmem)(struct nvkm_falcon *, void *, u32, u32, u8); void (*read_dmem)(struct nvkm_falcon *, u32, u32, u8, void *); + u32 emem_addr; void (*bind_context)(struct nvkm_falcon *, struct nvkm_memory *); int (*wait_for_halt)(struct nvkm_falcon *, u32); int (*clear_interrupt)(struct nvkm_falcon *, u32); @@ -86,6 +92,13 @@ struct nvkm_falcon_func { void (*start)(struct nvkm_falcon *); int (*enable)(struct nvkm_falcon *falcon); void (*disable)(struct nvkm_falcon *falcon); + int (*reset)(struct nvkm_falcon *); + + struct { + u32 head; + u32 tail; + u32 stride; + } cmdq, msgq; struct nvkm_sclass sclass[]; }; @@ -122,5 +135,4 @@ int nvkm_falcon_clear_interrupt(struct nvkm_falcon *, u32); int nvkm_falcon_enable(struct nvkm_falcon *); void nvkm_falcon_disable(struct nvkm_falcon *); int nvkm_falcon_reset(struct nvkm_falcon *); - #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h index 2cde36f3c064..1530c81f86a2 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h @@ -50,6 +50,8 @@ int gp100_gr_new(struct nvkm_device *, int, struct nvkm_gr **); int gp102_gr_new(struct nvkm_device *, int, struct nvkm_gr **); int gp104_gr_new(struct nvkm_device *, int, struct nvkm_gr **); int gp107_gr_new(struct nvkm_device *, int, struct nvkm_gr **); +int gp108_gr_new(struct nvkm_device *, int, struct nvkm_gr **); int gp10b_gr_new(struct nvkm_device *, int, struct nvkm_gr **); int gv100_gr_new(struct nvkm_device *, int, struct nvkm_gr **); +int tu102_gr_new(struct nvkm_device *, int, struct nvkm_gr **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h index 7c7d7f0abfcc..1b3183e31606 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h @@ -3,13 +3,13 @@ #define __NVKM_NVDEC_H__ #define nvkm_nvdec(p) container_of((p), struct nvkm_nvdec, engine) #include <core/engine.h> +#include <core/falcon.h> struct nvkm_nvdec { + const struct nvkm_nvdec_func *func; struct nvkm_engine engine; - u32 addr; - - struct nvkm_falcon *falcon; + struct nvkm_falcon falcon; }; -int gp102_nvdec_new(struct nvkm_device *, int, struct nvkm_nvdec **); +int gm107_nvdec_new(struct nvkm_device *, int, struct nvkm_nvdec **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h index 21624046d0a1..33e6ba8adc8d 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h @@ -1,5 +1,15 @@ /* SPDX-License-Identifier: MIT */ #ifndef __NVKM_NVENC_H__ #define __NVKM_NVENC_H__ +#define nvkm_nvenc(p) container_of((p), struct nvkm_nvenc, engine) #include <core/engine.h> +#include <core/falcon.h> + +struct nvkm_nvenc { + const struct nvkm_nvenc_func *func; + struct nvkm_engine engine; + struct nvkm_falcon falcon; +}; + +int gm107_nvenc_new(struct nvkm_device *, int, struct nvkm_nvenc **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h index 33078f86c779..34dc765648d5 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h @@ -1,17 +1,24 @@ /* SPDX-License-Identifier: MIT */ #ifndef __NVKM_SEC2_H__ #define __NVKM_SEC2_H__ +#define nvkm_sec2(p) container_of((p), struct nvkm_sec2, engine) #include <core/engine.h> +#include <core/falcon.h> struct nvkm_sec2 { + const struct nvkm_sec2_func *func; struct nvkm_engine engine; - u32 addr; + struct nvkm_falcon falcon; + + struct nvkm_falcon_qmgr *qmgr; + struct nvkm_falcon_cmdq *cmdq; + struct nvkm_falcon_msgq *msgq; - struct nvkm_falcon *falcon; - struct nvkm_msgqueue *queue; struct work_struct work; + bool initmsg_received; }; int gp102_sec2_new(struct nvkm_device *, int, struct nvkm_sec2 **); +int gp108_sec2_new(struct nvkm_device *, int, struct nvkm_sec2 **); int tu102_sec2_new(struct nvkm_device *, int, struct nvkm_sec2 **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/acr.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/acr.h new file mode 100644 index 000000000000..5d9c3a966de6 --- /dev/null +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/acr.h @@ -0,0 +1,126 @@ +/* SPDX-License-Identifier: MIT */ +#ifndef __NVKM_ACR_H__ +#define __NVKM_ACR_H__ +#define nvkm_acr(p) container_of((p), struct nvkm_acr, subdev) +#include <core/subdev.h> +#include <core/falcon.h> + +enum nvkm_acr_lsf_id { + NVKM_ACR_LSF_PMU = 0, + NVKM_ACR_LSF_GSPLITE = 1, + NVKM_ACR_LSF_FECS = 2, + NVKM_ACR_LSF_GPCCS = 3, + NVKM_ACR_LSF_NVDEC = 4, + NVKM_ACR_LSF_SEC2 = 7, + NVKM_ACR_LSF_MINION = 10, + NVKM_ACR_LSF_NUM +}; + +static inline const char * +nvkm_acr_lsf_id(enum nvkm_acr_lsf_id id) +{ + switch (id) { + case NVKM_ACR_LSF_PMU : return "pmu"; + case NVKM_ACR_LSF_GSPLITE: return "gsplite"; + case NVKM_ACR_LSF_FECS : return "fecs"; + case NVKM_ACR_LSF_GPCCS : return "gpccs"; + case NVKM_ACR_LSF_NVDEC : return "nvdec"; + case NVKM_ACR_LSF_SEC2 : return "sec2"; + case NVKM_ACR_LSF_MINION : return "minion"; + default: + return "unknown"; + } +} + +struct nvkm_acr { + const struct nvkm_acr_func *func; + struct nvkm_subdev subdev; + + struct list_head hsfw, hsf; + struct list_head lsfw, lsf; + + struct nvkm_memory *wpr; + u64 wpr_start; + u64 wpr_end; + u64 shadow_start; + + struct nvkm_memory *inst; + struct nvkm_vmm *vmm; + + bool done; + + const struct firmware *wpr_fw; + bool wpr_comp; + u64 wpr_prev; +}; + +bool nvkm_acr_managed_falcon(struct nvkm_device *, enum nvkm_acr_lsf_id); +int nvkm_acr_bootstrap_falcons(struct nvkm_device *, unsigned long mask); + +int gm200_acr_new(struct nvkm_device *, int, struct nvkm_acr **); +int gm20b_acr_new(struct nvkm_device *, int, struct nvkm_acr **); +int gp102_acr_new(struct nvkm_device *, int, struct nvkm_acr **); +int gp108_acr_new(struct nvkm_device *, int, struct nvkm_acr **); +int gp10b_acr_new(struct nvkm_device *, int, struct nvkm_acr **); +int tu102_acr_new(struct nvkm_device *, int, struct nvkm_acr **); + +struct nvkm_acr_lsfw { + const struct nvkm_acr_lsf_func *func; + struct nvkm_falcon *falcon; + enum nvkm_acr_lsf_id id; + + struct list_head head; + + struct nvkm_blob img; + + const struct firmware *sig; + + u32 bootloader_size; + u32 bootloader_imem_offset; + + u32 app_size; + u32 app_start_offset; + u32 app_imem_entry; + u32 app_resident_code_offset; + u32 app_resident_code_size; + u32 app_resident_data_offset; + u32 app_resident_data_size; + + u32 ucode_size; + u32 data_size; + + struct { + u32 lsb; + u32 img; + u32 bld; + } offset; + u32 bl_data_size; +}; + +struct nvkm_acr_lsf_func { +/* The (currently) map directly to LSB header flags. */ +#define NVKM_ACR_LSF_LOAD_CODE_AT_0 0x00000001 +#define NVKM_ACR_LSF_DMACTL_REQ_CTX 0x00000004 +#define NVKM_ACR_LSF_FORCE_PRIV_LOAD 0x00000008 + u32 flags; + u32 bld_size; + void (*bld_write)(struct nvkm_acr *, u32 bld, struct nvkm_acr_lsfw *); + void (*bld_patch)(struct nvkm_acr *, u32 bld, s64 adjust); + int (*boot)(struct nvkm_falcon *); + int (*bootstrap_falcon)(struct nvkm_falcon *, enum nvkm_acr_lsf_id); + int (*bootstrap_multiple_falcons)(struct nvkm_falcon *, u32 mask); +}; + +int +nvkm_acr_lsfw_load_sig_image_desc(struct nvkm_subdev *, struct nvkm_falcon *, + enum nvkm_acr_lsf_id, const char *path, + int ver, const struct nvkm_acr_lsf_func *); +int +nvkm_acr_lsfw_load_sig_image_desc_v1(struct nvkm_subdev *, struct nvkm_falcon *, + enum nvkm_acr_lsf_id, const char *path, + int ver, const struct nvkm_acr_lsf_func *); +int +nvkm_acr_lsfw_load_bl_inst_data_sig(struct nvkm_subdev *, struct nvkm_falcon *, + enum nvkm_acr_lsf_id, const char *path, + int ver, const struct nvkm_acr_lsf_func *); +#endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h index 97322f95b3ee..a513c16ab105 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h @@ -31,6 +31,7 @@ struct nvkm_fault_data { }; int gp100_fault_new(struct nvkm_device *, int, struct nvkm_fault **); +int gp10b_fault_new(struct nvkm_device *, int, struct nvkm_fault **); int gv100_fault_new(struct nvkm_device *, int, struct nvkm_fault **); int tu102_fault_new(struct nvkm_device *, int, struct nvkm_fault **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h index 239ad222b95a..34b56b10218a 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h @@ -33,6 +33,8 @@ struct nvkm_fb { const struct nvkm_fb_func *func; struct nvkm_subdev subdev; + struct nvkm_blob vpr_scrubber; + struct nvkm_ram *ram; struct nvkm_mm tags; diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h index 4c672a5c4cd5..06db67610a50 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h @@ -2,12 +2,11 @@ #define __NVKM_GSP_H__ #define nvkm_gsp(p) container_of((p), struct nvkm_gsp, subdev) #include <core/subdev.h> +#include <core/falcon.h> struct nvkm_gsp { struct nvkm_subdev subdev; - u32 addr; - - struct nvkm_falcon *falcon; + struct nvkm_falcon falcon; }; int gv100_gsp_new(struct nvkm_device *, int, struct nvkm_gsp **); diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h index 644d527c3b96..d76f60d7d29a 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h @@ -40,4 +40,5 @@ int gm107_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **); int gm200_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **); int gp100_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **); int gp102_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **); +int gp10b_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h index 4752006880f3..da553089d2d8 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h @@ -2,13 +2,20 @@ #ifndef __NVKM_PMU_H__ #define __NVKM_PMU_H__ #include <core/subdev.h> -#include <engine/falcon.h> +#include <core/falcon.h> struct nvkm_pmu { const struct nvkm_pmu_func *func; struct nvkm_subdev subdev; - struct nvkm_falcon *falcon; - struct nvkm_msgqueue *queue; + struct nvkm_falcon falcon; + + struct nvkm_falcon_qmgr *qmgr; + struct nvkm_falcon_cmdq *hpq; + struct nvkm_falcon_cmdq *lpq; + struct nvkm_falcon_msgq *msgq; + bool initmsg_received; + + struct completion wpr_ready; struct { u32 base; @@ -43,6 +50,7 @@ int gm107_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **); int gm20b_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **); int gp100_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **); int gp102_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **); +int gp10b_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **); /* interface to MEMX process running on PMU */ struct nvkm_memx; diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index f8015e0318d7..1b62ccc57aef 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -1162,7 +1162,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, void nouveau_bo_move_init(struct nouveau_drm *drm) { - static const struct { + static const struct _method_table { const char *name; int engine; s32 oclass; @@ -1192,7 +1192,8 @@ nouveau_bo_move_init(struct nouveau_drm *drm) { "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init }, {}, { "CRYPT", 0, 0x88b4, nv98_bo_move_exec, nv50_bo_move_init }, - }, *mthd = _methods; + }; + const struct _method_table *mthd = _methods; const char *name = "CPU"; int ret; diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c index fa1439941596..0ad5d87b5a8e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dmem.c +++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c @@ -635,10 +635,10 @@ nouveau_dmem_migrate_vma(struct nouveau_drm *drm, unsigned long c, i; int ret = -ENOMEM; - args.src = kcalloc(max, sizeof(args.src), GFP_KERNEL); + args.src = kcalloc(max, sizeof(*args.src), GFP_KERNEL); if (!args.src) goto out; - args.dst = kcalloc(max, sizeof(args.dst), GFP_KERNEL); + args.dst = kcalloc(max, sizeof(*args.dst), GFP_KERNEL); if (!args.dst) goto out_free_src; diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 2cd83849600f..b65ae817eabf 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -715,7 +715,6 @@ fail_nvkm: void nouveau_drm_device_remove(struct drm_device *dev) { - struct pci_dev *pdev = dev->pdev; struct nouveau_drm *drm = nouveau_drm(dev); struct nvkm_client *client; struct nvkm_device *device; @@ -727,7 +726,6 @@ nouveau_drm_device_remove(struct drm_device *dev) device = nvkm_device_find(client->device); nouveau_drm_device_fini(dev); - pci_disable_device(pdev); drm_dev_put(dev); nvkm_device_del(&device); } @@ -738,6 +736,7 @@ nouveau_drm_remove(struct pci_dev *pdev) struct drm_device *dev = pci_get_drvdata(pdev); nouveau_drm_device_remove(dev); + pci_disable_device(pdev); } static int diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index 9118df035b28..70bb6bb97af8 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c @@ -156,7 +156,7 @@ nouveau_fence_wait_uevent_handler(struct nvif_notify *notify) fence = list_entry(fctx->pending.next, typeof(*fence), head); chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock)); - if (nouveau_fence_update(fence->channel, fctx)) + if (nouveau_fence_update(chan, fctx)) ret = NVIF_NOTIFY_DROP; } spin_unlock_irqrestore(&fctx->lock, flags); diff --git a/drivers/gpu/drm/nouveau/nouveau_hwmon.c b/drivers/gpu/drm/nouveau/nouveau_hwmon.c index d445c6f3fece..1c3104d20571 100644 --- a/drivers/gpu/drm/nouveau/nouveau_hwmon.c +++ b/drivers/gpu/drm/nouveau/nouveau_hwmon.c @@ -741,7 +741,7 @@ nouveau_hwmon_init(struct drm_device *dev) special_groups[i++] = &pwm_fan_sensor_group; } - special_groups[i] = 0; + special_groups[i] = NULL; hwmon_dev = hwmon_device_register_with_info(dev->dev, "nouveau", dev, &nouveau_chip_info, special_groups); diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c index 77a0c6ad3cef..7ca0a2498532 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c @@ -63,14 +63,12 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man, { struct nouveau_bo *nvbo = nouveau_bo(bo); struct nouveau_drm *drm = nouveau_bdev(bo->bdev); - struct nouveau_mem *mem; int ret; if (drm->client.device.info.ram_size == 0) return -ENOMEM; ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg); - mem = nouveau_mem(reg); if (ret) return ret; @@ -103,11 +101,9 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man, { struct nouveau_bo *nvbo = nouveau_bo(bo); struct nouveau_drm *drm = nouveau_bdev(bo->bdev); - struct nouveau_mem *mem; int ret; ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg); - mem = nouveau_mem(reg); if (ret) return ret; diff --git a/drivers/gpu/drm/nouveau/nvif/mmu.c b/drivers/gpu/drm/nouveau/nvif/mmu.c index 5641bda2046d..47efc408efa6 100644 --- a/drivers/gpu/drm/nouveau/nvif/mmu.c +++ b/drivers/gpu/drm/nouveau/nvif/mmu.c @@ -121,6 +121,7 @@ nvif_mmu_init(struct nvif_object *parent, s32 oclass, struct nvif_mmu *mmu) kind, argc); if (ret == 0) memcpy(mmu->kind, kind->data, kind->count); + mmu->kind_inv = kind->kind_inv; kfree(kind); } diff --git a/drivers/gpu/drm/nouveau/nvkm/Kbuild b/drivers/gpu/drm/nouveau/nvkm/Kbuild index b53de9ba8c73..db3ade125fa9 100644 --- a/drivers/gpu/drm/nouveau/nvkm/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/Kbuild @@ -1,5 +1,6 @@ # SPDX-License-Identifier: MIT include $(src)/nvkm/core/Kbuild +include $(src)/nvkm/nvfw/Kbuild include $(src)/nvkm/falcon/Kbuild include $(src)/nvkm/subdev/Kbuild include $(src)/nvkm/engine/Kbuild diff --git a/drivers/gpu/drm/nouveau/nvkm/core/firmware.c b/drivers/gpu/drm/nouveau/nvkm/core/firmware.c index 092acdec2c39..8b25367917ca 100644 --- a/drivers/gpu/drm/nouveau/nvkm/core/firmware.c +++ b/drivers/gpu/drm/nouveau/nvkm/core/firmware.c @@ -22,6 +22,40 @@ #include <core/device.h> #include <core/firmware.h> +int +nvkm_firmware_load_name(const struct nvkm_subdev *subdev, const char *base, + const char *name, int ver, const struct firmware **pfw) +{ + char path[64]; + int ret; + + snprintf(path, sizeof(path), "%s%s", base, name); + ret = nvkm_firmware_get(subdev, path, ver, pfw); + if (ret < 0) + return ret; + + return 0; +} + +int +nvkm_firmware_load_blob(const struct nvkm_subdev *subdev, const char *base, + const char *name, int ver, struct nvkm_blob *blob) +{ + const struct firmware *fw; + int ret; + + ret = nvkm_firmware_load_name(subdev, base, name, ver, &fw); + if (ret == 0) { + blob->data = kmemdup(fw->data, fw->size, GFP_KERNEL); + blob->size = fw->size; + nvkm_firmware_put(fw); + if (!blob->data) + return -ENOMEM; + } + + return ret; +} + /** * nvkm_firmware_get - load firmware from the official nvidia/chip/ directory * @subdev subdevice that will use that firmware @@ -32,9 +66,8 @@ * Firmware files released by NVIDIA will always follow this format. */ int -nvkm_firmware_get_version(const struct nvkm_subdev *subdev, const char *fwname, - int min_version, int max_version, - const struct firmware **fw) +nvkm_firmware_get(const struct nvkm_subdev *subdev, const char *fwname, int ver, + const struct firmware **fw) { struct nvkm_device *device = subdev->device; char f[64]; @@ -50,31 +83,21 @@ nvkm_firmware_get_version(const struct nvkm_subdev *subdev, const char *fwname, cname[i] = tolower(cname[i]); } - for (i = max_version; i >= min_version; i--) { - if (i != 0) - snprintf(f, sizeof(f), "nvidia/%s/%s-%d.bin", cname, fwname, i); - else - snprintf(f, sizeof(f), "nvidia/%s/%s.bin", cname, fwname); - - if (!firmware_request_nowarn(fw, f, device->dev)) { - nvkm_debug(subdev, "firmware \"%s\" loaded\n", f); - return i; - } + if (ver != 0) + snprintf(f, sizeof(f), "nvidia/%s/%s-%d.bin", cname, fwname, ver); + else + snprintf(f, sizeof(f), "nvidia/%s/%s.bin", cname, fwname); - nvkm_debug(subdev, "firmware \"%s\" unavailable\n", f); + if (!firmware_request_nowarn(fw, f, device->dev)) { + nvkm_debug(subdev, "firmware \"%s\" loaded - %zu byte(s)\n", + f, (*fw)->size); + return 0; } - nvkm_error(subdev, "failed to load firmware \"%s\"", fwname); + nvkm_debug(subdev, "firmware \"%s\" unavailable\n", f); return -ENOENT; } -int -nvkm_firmware_get(const struct nvkm_subdev *subdev, const char *fwname, - const struct firmware **fw) -{ - return nvkm_firmware_get_version(subdev, fwname, 0, 0, fw); -} - /** * nvkm_firmware_put - release firmware loaded with nvkm_firmware_get */ diff --git a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c index 245990de1e90..79a8f9d305c5 100644 --- a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c +++ b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c @@ -30,6 +30,7 @@ static struct lock_class_key nvkm_subdev_lock_class[NVKM_SUBDEV_NR]; const char * nvkm_subdev_name[NVKM_SUBDEV_NR] = { + [NVKM_SUBDEV_ACR ] = "acr", [NVKM_SUBDEV_BAR ] = "bar", [NVKM_SUBDEV_VBIOS ] = "bios", [NVKM_SUBDEV_BUS ] = "bus", @@ -50,7 +51,6 @@ nvkm_subdev_name[NVKM_SUBDEV_NR] = { [NVKM_SUBDEV_MXM ] = "mxm", [NVKM_SUBDEV_PCI ] = "pci", [NVKM_SUBDEV_PMU ] = "pmu", - [NVKM_SUBDEV_SECBOOT ] = "secboot", [NVKM_SUBDEV_THERM ] = "therm", [NVKM_SUBDEV_TIMER ] = "tmr", [NVKM_SUBDEV_TOP ] = "top", diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c index c3c7159f3411..c7d700916eae 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c @@ -1987,6 +1987,8 @@ nv117_chipset = { .dma = gf119_dma_new, .fifo = gm107_fifo_new, .gr = gm107_gr_new, + .nvdec[0] = gm107_nvdec_new, + .nvenc[0] = gm107_nvenc_new, .sw = gf100_sw_new, }; @@ -2027,6 +2029,7 @@ nv118_chipset = { static const struct nvkm_device_chip nv120_chipset = { .name = "GM200", + .acr = gm200_acr_new, .bar = gm107_bar_new, .bios = nvkm_bios_new, .bus = gf100_bus_new, @@ -2045,7 +2048,6 @@ nv120_chipset = { .pci = gk104_pci_new, .pmu = gm107_pmu_new, .therm = gm200_therm_new, - .secboot = gm200_secboot_new, .timer = gk20a_timer_new, .top = gk104_top_new, .volt = gk104_volt_new, @@ -2056,12 +2058,16 @@ nv120_chipset = { .dma = gf119_dma_new, .fifo = gm200_fifo_new, .gr = gm200_gr_new, + .nvdec[0] = gm107_nvdec_new, + .nvenc[0] = gm107_nvenc_new, + .nvenc[1] = gm107_nvenc_new, .sw = gf100_sw_new, }; static const struct nvkm_device_chip nv124_chipset = { .name = "GM204", + .acr = gm200_acr_new, .bar = gm107_bar_new, .bios = nvkm_bios_new, .bus = gf100_bus_new, @@ -2080,7 +2086,6 @@ nv124_chipset = { .pci = gk104_pci_new, .pmu = gm107_pmu_new, .therm = gm200_therm_new, - .secboot = gm200_secboot_new, .timer = gk20a_timer_new, .top = gk104_top_new, .volt = gk104_volt_new, @@ -2091,12 +2096,16 @@ nv124_chipset = { .dma = gf119_dma_new, .fifo = gm200_fifo_new, .gr = gm200_gr_new, + .nvdec[0] = gm107_nvdec_new, + .nvenc[0] = gm107_nvenc_new, + .nvenc[1] = gm107_nvenc_new, .sw = gf100_sw_new, }; static const struct nvkm_device_chip nv126_chipset = { .name = "GM206", + .acr = gm200_acr_new, .bar = gm107_bar_new, .bios = nvkm_bios_new, .bus = gf100_bus_new, @@ -2115,7 +2124,6 @@ nv126_chipset = { .pci = gk104_pci_new, .pmu = gm107_pmu_new, .therm = gm200_therm_new, - .secboot = gm200_secboot_new, .timer = gk20a_timer_new, .top = gk104_top_new, .volt = gk104_volt_new, @@ -2126,12 +2134,15 @@ nv126_chipset = { .dma = gf119_dma_new, .fifo = gm200_fifo_new, .gr = gm200_gr_new, + .nvdec[0] = gm107_nvdec_new, + .nvenc[0] = gm107_nvenc_new, .sw = gf100_sw_new, }; static const struct nvkm_device_chip nv12b_chipset = { .name = "GM20B", + .acr = gm20b_acr_new, .bar = gm20b_bar_new, .bus = gf100_bus_new, .clk = gm20b_clk_new, @@ -2143,7 +2154,6 @@ nv12b_chipset = { .mc = gk20a_mc_new, .mmu = gm20b_mmu_new, .pmu = gm20b_pmu_new, - .secboot = gm20b_secboot_new, .timer = gk20a_timer_new, .top = gk104_top_new, .ce[2] = gm200_ce_new, @@ -2157,6 +2167,7 @@ nv12b_chipset = { static const struct nvkm_device_chip nv130_chipset = { .name = "GP100", + .acr = gm200_acr_new, .bar = gm107_bar_new, .bios = nvkm_bios_new, .bus = gf100_bus_new, @@ -2172,7 +2183,6 @@ nv130_chipset = { .mc = gp100_mc_new, .mmu = gp100_mmu_new, .therm = gp100_therm_new, - .secboot = gm200_secboot_new, .pci = gp100_pci_new, .pmu = gp100_pmu_new, .timer = gk20a_timer_new, @@ -2187,12 +2197,17 @@ nv130_chipset = { .disp = gp100_disp_new, .fifo = gp100_fifo_new, .gr = gp100_gr_new, + .nvdec[0] = gm107_nvdec_new, + .nvenc[0] = gm107_nvenc_new, + .nvenc[1] = gm107_nvenc_new, + .nvenc[2] = gm107_nvenc_new, .sw = gf100_sw_new, }; static const struct nvkm_device_chip nv132_chipset = { .name = "GP102", + .acr = gp102_acr_new, .bar = gm107_bar_new, .bios = nvkm_bios_new, .bus = gf100_bus_new, @@ -2208,7 +2223,6 @@ nv132_chipset = { .mc = gp100_mc_new, .mmu = gp100_mmu_new, .therm = gp100_therm_new, - .secboot = gp102_secboot_new, .pci = gp100_pci_new, .pmu = gp102_pmu_new, .timer = gk20a_timer_new, @@ -2221,7 +2235,9 @@ nv132_chipset = { .dma = gf119_dma_new, .fifo = gp100_fifo_new, .gr = gp102_gr_new, - .nvdec[0] = gp102_nvdec_new, + .nvdec[0] = gm107_nvdec_new, + .nvenc[0] = gm107_nvenc_new, + .nvenc[1] = gm107_nvenc_new, .sec2 = gp102_sec2_new, .sw = gf100_sw_new, }; @@ -2229,6 +2245,7 @@ nv132_chipset = { static const struct nvkm_device_chip nv134_chipset = { .name = "GP104", + .acr = gp102_acr_new, .bar = gm107_bar_new, .bios = nvkm_bios_new, .bus = gf100_bus_new, @@ -2244,7 +2261,6 @@ nv134_chipset = { .mc = gp100_mc_new, .mmu = gp100_mmu_new, .therm = gp100_therm_new, - .secboot = gp102_secboot_new, .pci = gp100_pci_new, .pmu = gp102_pmu_new, .timer = gk20a_timer_new, @@ -2257,7 +2273,9 @@ nv134_chipset = { .dma = gf119_dma_new, .fifo = gp100_fifo_new, .gr = gp104_gr_new, - .nvdec[0] = gp102_nvdec_new, + .nvdec[0] = gm107_nvdec_new, + .nvenc[0] = gm107_nvenc_new, + .nvenc[1] = gm107_nvenc_new, .sec2 = gp102_sec2_new, .sw = gf100_sw_new, }; @@ -2265,6 +2283,7 @@ nv134_chipset = { static const struct nvkm_device_chip nv136_chipset = { .name = "GP106", + .acr = gp102_acr_new, .bar = gm107_bar_new, .bios = nvkm_bios_new, .bus = gf100_bus_new, @@ -2280,7 +2299,6 @@ nv136_chipset = { .mc = gp100_mc_new, .mmu = gp100_mmu_new, .therm = gp100_therm_new, - .secboot = gp102_secboot_new, .pci = gp100_pci_new, .pmu = gp102_pmu_new, .timer = gk20a_timer_new, @@ -2293,7 +2311,8 @@ nv136_chipset = { .dma = gf119_dma_new, .fifo = gp100_fifo_new, .gr = gp104_gr_new, - .nvdec[0] = gp102_nvdec_new, + .nvdec[0] = gm107_nvdec_new, + .nvenc[0] = gm107_nvenc_new, .sec2 = gp102_sec2_new, .sw = gf100_sw_new, }; @@ -2301,6 +2320,7 @@ nv136_chipset = { static const struct nvkm_device_chip nv137_chipset = { .name = "GP107", + .acr = gp102_acr_new, .bar = gm107_bar_new, .bios = nvkm_bios_new, .bus = gf100_bus_new, @@ -2316,7 +2336,6 @@ nv137_chipset = { .mc = gp100_mc_new, .mmu = gp100_mmu_new, .therm = gp100_therm_new, - .secboot = gp102_secboot_new, .pci = gp100_pci_new, .pmu = gp102_pmu_new, .timer = gk20a_timer_new, @@ -2329,7 +2348,9 @@ nv137_chipset = { .dma = gf119_dma_new, .fifo = gp100_fifo_new, .gr = gp107_gr_new, - .nvdec[0] = gp102_nvdec_new, + .nvdec[0] = gm107_nvdec_new, + .nvenc[0] = gm107_nvenc_new, + .nvenc[1] = gm107_nvenc_new, .sec2 = gp102_sec2_new, .sw = gf100_sw_new, }; @@ -2337,6 +2358,7 @@ nv137_chipset = { static const struct nvkm_device_chip nv138_chipset = { .name = "GP108", + .acr = gp108_acr_new, .bar = gm107_bar_new, .bios = nvkm_bios_new, .bus = gf100_bus_new, @@ -2352,7 +2374,6 @@ nv138_chipset = { .mc = gp100_mc_new, .mmu = gp100_mmu_new, .therm = gp100_therm_new, - .secboot = gp108_secboot_new, .pci = gp100_pci_new, .pmu = gp102_pmu_new, .timer = gk20a_timer_new, @@ -2364,30 +2385,30 @@ nv138_chipset = { .disp = gp102_disp_new, .dma = gf119_dma_new, .fifo = gp100_fifo_new, - .gr = gp107_gr_new, - .nvdec[0] = gp102_nvdec_new, - .sec2 = gp102_sec2_new, + .gr = gp108_gr_new, + .nvdec[0] = gm107_nvdec_new, + .sec2 = gp108_sec2_new, .sw = gf100_sw_new, }; static const struct nvkm_device_chip nv13b_chipset = { .name = "GP10B", + .acr = gp10b_acr_new, .bar = gm20b_bar_new, .bus = gf100_bus_new, - .fault = gp100_fault_new, + .fault = gp10b_fault_new, .fb = gp10b_fb_new, .fuse = gm107_fuse_new, .ibus = gp10b_ibus_new, .imem = gk20a_instmem_new, - .ltc = gp102_ltc_new, + .ltc = gp10b_ltc_new, .mc = gp10b_mc_new, .mmu = gp10b_mmu_new, - .secboot = gp10b_secboot_new, - .pmu = gm20b_pmu_new, + .pmu = gp10b_pmu_new, .timer = gk20a_timer_new, .top = gk104_top_new, - .ce[2] = gp102_ce_new, + .ce[0] = gp100_ce_new, .dma = gf119_dma_new, .fifo = gp10b_fifo_new, .gr = gp10b_gr_new, @@ -2397,6 +2418,7 @@ nv13b_chipset = { static const struct nvkm_device_chip nv140_chipset = { .name = "GV100", + .acr = gp108_acr_new, .bar = gm107_bar_new, .bios = nvkm_bios_new, .bus = gf100_bus_new, @@ -2414,7 +2436,6 @@ nv140_chipset = { .mmu = gv100_mmu_new, .pci = gp100_pci_new, .pmu = gp102_pmu_new, - .secboot = gp108_secboot_new, .therm = gp100_therm_new, .timer = gk20a_timer_new, .top = gk104_top_new, @@ -2431,13 +2452,17 @@ nv140_chipset = { .dma = gv100_dma_new, .fifo = gv100_fifo_new, .gr = gv100_gr_new, - .nvdec[0] = gp102_nvdec_new, - .sec2 = gp102_sec2_new, + .nvdec[0] = gm107_nvdec_new, + .nvenc[0] = gm107_nvenc_new, + .nvenc[1] = gm107_nvenc_new, + .nvenc[2] = gm107_nvenc_new, + .sec2 = gp108_sec2_new, }; static const struct nvkm_device_chip nv162_chipset = { .name = "TU102", + .acr = tu102_acr_new, .bar = tu102_bar_new, .bios = nvkm_bios_new, .bus = gf100_bus_new, @@ -2466,13 +2491,16 @@ nv162_chipset = { .disp = tu102_disp_new, .dma = gv100_dma_new, .fifo = tu102_fifo_new, - .nvdec[0] = gp102_nvdec_new, + .gr = tu102_gr_new, + .nvdec[0] = gm107_nvdec_new, + .nvenc[0] = gm107_nvenc_new, .sec2 = tu102_sec2_new, }; static const struct nvkm_device_chip nv164_chipset = { .name = "TU104", + .acr = tu102_acr_new, .bar = tu102_bar_new, .bios = nvkm_bios_new, .bus = gf100_bus_new, @@ -2501,13 +2529,17 @@ nv164_chipset = { .disp = tu102_disp_new, .dma = gv100_dma_new, .fifo = tu102_fifo_new, - .nvdec[0] = gp102_nvdec_new, + .gr = tu102_gr_new, + .nvdec[0] = gm107_nvdec_new, + .nvdec[1] = gm107_nvdec_new, + .nvenc[0] = gm107_nvenc_new, .sec2 = tu102_sec2_new, }; static const struct nvkm_device_chip nv166_chipset = { .name = "TU106", + .acr = tu102_acr_new, .bar = tu102_bar_new, .bios = nvkm_bios_new, .bus = gf100_bus_new, @@ -2536,7 +2568,11 @@ nv166_chipset = { .disp = tu102_disp_new, .dma = gv100_dma_new, .fifo = tu102_fifo_new, - .nvdec[0] = gp102_nvdec_new, + .gr = tu102_gr_new, + .nvdec[0] = gm107_nvdec_new, + .nvdec[1] = gm107_nvdec_new, + .nvdec[2] = gm107_nvdec_new, + .nvenc[0] = gm107_nvenc_new, .sec2 = tu102_sec2_new, }; @@ -2571,7 +2607,8 @@ nv167_chipset = { .disp = tu102_disp_new, .dma = gv100_dma_new, .fifo = tu102_fifo_new, - .nvdec[0] = gp102_nvdec_new, + .nvdec[0] = gm107_nvdec_new, + .nvenc[0] = gm107_nvenc_new, .sec2 = tu102_sec2_new, }; @@ -2606,7 +2643,8 @@ nv168_chipset = { .disp = tu102_disp_new, .dma = gv100_dma_new, .fifo = tu102_fifo_new, - .nvdec[0] = gp102_nvdec_new, + .nvdec[0] = gm107_nvdec_new, + .nvenc[0] = gm107_nvenc_new, .sec2 = tu102_sec2_new, }; @@ -2638,6 +2676,7 @@ nvkm_device_subdev(struct nvkm_device *device, int index) switch (index) { #define _(n,p,m) case NVKM_SUBDEV_##n: if (p) return (m); break + _(ACR , device->acr , &device->acr->subdev); _(BAR , device->bar , &device->bar->subdev); _(VBIOS , device->bios , &device->bios->subdev); _(BUS , device->bus , &device->bus->subdev); @@ -2658,7 +2697,6 @@ nvkm_device_subdev(struct nvkm_device *device, int index) _(MXM , device->mxm , device->mxm); _(PCI , device->pci , &device->pci->subdev); _(PMU , device->pmu , &device->pmu->subdev); - _(SECBOOT , device->secboot , &device->secboot->subdev); _(THERM , device->therm , &device->therm->subdev); _(TIMER , device->timer , &device->timer->subdev); _(TOP , device->top , &device->top->subdev); @@ -2703,9 +2741,9 @@ nvkm_device_engine(struct nvkm_device *device, int index) _(MSPDEC , device->mspdec , device->mspdec); _(MSPPP , device->msppp , device->msppp); _(MSVLD , device->msvld , device->msvld); - _(NVENC0 , device->nvenc[0], device->nvenc[0]); - _(NVENC1 , device->nvenc[1], device->nvenc[1]); - _(NVENC2 , device->nvenc[2], device->nvenc[2]); + _(NVENC0 , device->nvenc[0], &device->nvenc[0]->engine); + _(NVENC1 , device->nvenc[1], &device->nvenc[1]->engine); + _(NVENC2 , device->nvenc[2], &device->nvenc[2]->engine); _(NVDEC0 , device->nvdec[0], &device->nvdec[0]->engine); _(NVDEC1 , device->nvdec[1], &device->nvdec[1]->engine); _(NVDEC2 , device->nvdec[2], &device->nvdec[2]->engine); @@ -3144,6 +3182,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func, } \ break switch (i) { + _(NVKM_SUBDEV_ACR , acr); _(NVKM_SUBDEV_BAR , bar); _(NVKM_SUBDEV_VBIOS , bios); _(NVKM_SUBDEV_BUS , bus); @@ -3164,7 +3203,6 @@ nvkm_device_ctor(const struct nvkm_device_func *func, _(NVKM_SUBDEV_MXM , mxm); _(NVKM_SUBDEV_PCI , pci); _(NVKM_SUBDEV_PMU , pmu); - _(NVKM_SUBDEV_SECBOOT , secboot); _(NVKM_SUBDEV_THERM , therm); _(NVKM_SUBDEV_TIMER , timer); _(NVKM_SUBDEV_TOP , top); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h index d8be2f77ac66..54eab5e04230 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h @@ -3,6 +3,7 @@ #define __NVKM_DEVICE_PRIV_H__ #include <core/device.h> +#include <subdev/acr.h> #include <subdev/bar.h> #include <subdev/bios.h> #include <subdev/bus.h> @@ -27,7 +28,6 @@ #include <subdev/timer.h> #include <subdev/top.h> #include <subdev/volt.h> -#include <subdev/secboot.h> #include <engine/bsp.h> #include <engine/ce.h> diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c index 0e372a190d3f..d0d52c1d4aee 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c @@ -52,18 +52,18 @@ nvkm_device_tegra_power_up(struct nvkm_device_tegra *tdev) clk_set_rate(tdev->clk_pwr, 204000000); udelay(10); - reset_control_assert(tdev->rst); - udelay(10); - if (!tdev->pdev->dev.pm_domain) { + reset_control_assert(tdev->rst); + udelay(10); + ret = tegra_powergate_remove_clamping(TEGRA_POWERGATE_3D); if (ret) goto err_clamp; udelay(10); - } - reset_control_deassert(tdev->rst); - udelay(10); + reset_control_deassert(tdev->rst); + udelay(10); + } return 0; @@ -279,6 +279,7 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func, struct nvkm_device **pdevice) { struct nvkm_device_tegra *tdev; + unsigned long rate; int ret; if (!(tdev = kzalloc(sizeof(*tdev), GFP_KERNEL))) @@ -307,6 +308,17 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func, goto free; } + rate = clk_get_rate(tdev->clk); + if (rate == 0) { + ret = clk_set_rate(tdev->clk, ULONG_MAX); + if (ret < 0) + goto free; + + rate = clk_get_rate(tdev->clk); + + dev_dbg(&pdev->dev, "GPU clock set to %lu\n", rate); + } + if (func->require_ref_clk) tdev->clk_ref = devm_clk_get(&pdev->dev, "ref"); if (IS_ERR(tdev->clk_ref)) { diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c index 818d21bd28d3..3800aeb507d0 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c @@ -365,7 +365,7 @@ nvkm_dp_train(struct nvkm_dp *dp, u32 dataKBps) * and it's better to have a failed modeset than that. */ for (cfg = nvkm_dp_rates; cfg->rate; cfg++) { - if (cfg->nr <= outp_nr && cfg->nr <= outp_bw) { + if (cfg->nr <= outp_nr && cfg->bw <= outp_bw) { /* Try to respect sink limits too when selecting * lowest link configuration. */ diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild index 73724a8cb861..558c86fd8e82 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild @@ -36,8 +36,10 @@ nvkm-y += nvkm/engine/gr/gp100.o nvkm-y += nvkm/engine/gr/gp102.o nvkm-y += nvkm/engine/gr/gp104.o nvkm-y += nvkm/engine/gr/gp107.o +nvkm-y += nvkm/engine/gr/gp108.o nvkm-y += nvkm/engine/gr/gp10b.o nvkm-y += nvkm/engine/gr/gv100.o +nvkm-y += nvkm/engine/gr/tu102.o nvkm-y += nvkm/engine/gr/ctxnv40.o nvkm-y += nvkm/engine/gr/ctxnv50.o @@ -60,3 +62,4 @@ nvkm-y += nvkm/engine/gr/ctxgp102.o nvkm-y += nvkm/engine/gr/ctxgp104.o nvkm-y += nvkm/engine/gr/ctxgp107.o nvkm-y += nvkm/engine/gr/ctxgv100.o +nvkm-y += nvkm/engine/gr/ctxtu102.o diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c index 85f2d1e950e8..297915719bf2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c @@ -1324,10 +1324,8 @@ gf100_grctx_generate_sm_id(struct gf100_gr *gr, int gpc, int tpc, int sm) void gf100_grctx_generate_floorsweep(struct gf100_gr *gr) { - struct nvkm_device *device = gr->base.engine.subdev.device; const struct gf100_grctx_func *func = gr->func->grctx; - int gpc, sm, i, j; - u32 data; + int sm; for (sm = 0; sm < gr->sm_nr; sm++) { func->sm_id(gr, gr->sm[sm].gpc, gr->sm[sm].tpc, sm); @@ -1335,12 +1333,9 @@ gf100_grctx_generate_floorsweep(struct gf100_gr *gr) func->tpc_nr(gr, gr->sm[sm].gpc); } - for (gpc = 0, i = 0; i < 4; i++) { - for (data = 0, j = 0; j < 8 && gpc < gr->gpc_nr; j++, gpc++) - data |= gr->tpc_nr[gpc] << (j * 4); - nvkm_wr32(device, 0x406028 + (i * 4), data); - nvkm_wr32(device, 0x405870 + (i * 4), data); - } + gf100_gr_init_num_tpc_per_gpc(gr, false, true); + if (!func->skip_pd_num_tpc_per_gpc) + gf100_gr_init_num_tpc_per_gpc(gr, true, false); if (func->r4060a8) func->r4060a8(gr); @@ -1374,7 +1369,7 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info) nvkm_mc_unk260(device, 0); - if (!gr->fuc_sw_ctx) { + if (!gr->sw_ctx) { gf100_gr_mmio(gr, grctx->hub); gf100_gr_mmio(gr, grctx->gpc_0); gf100_gr_mmio(gr, grctx->zcull); @@ -1382,7 +1377,7 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info) gf100_gr_mmio(gr, grctx->tpc); gf100_gr_mmio(gr, grctx->ppc); } else { - gf100_gr_mmio(gr, gr->fuc_sw_ctx); + gf100_gr_mmio(gr, gr->sw_ctx); } gf100_gr_wait_idle(gr); @@ -1401,8 +1396,8 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info) gf100_gr_wait_idle(gr); if (grctx->r400088) grctx->r400088(gr, false); - if (gr->fuc_bundle) - gf100_gr_icmd(gr, gr->fuc_bundle); + if (gr->bundle) + gf100_gr_icmd(gr, gr->bundle); else gf100_gr_icmd(gr, grctx->icmd); if (grctx->sw_veid_bundle_init) @@ -1411,8 +1406,8 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info) nvkm_wr32(device, 0x404154, idle_timeout); - if (gr->fuc_method) - gf100_gr_mthd(gr, gr->fuc_method); + if (gr->method) + gf100_gr_mthd(gr, gr->method); else gf100_gr_mthd(gr, grctx->mthd); nvkm_mc_unk260(device, 1); @@ -1431,6 +1426,8 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info) grctx->r419a3c(gr); if (grctx->r408840) grctx->r408840(gr); + if (grctx->r419c0c) + grctx->r419c0c(gr); } #define CB_RESERVED 0x80000 diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h index 478b4723d0f9..32bbddc0993e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h @@ -57,6 +57,7 @@ struct gf100_grctx_func { /* floorsweeping */ void (*sm_id)(struct gf100_gr *, int gpc, int tpc, int sm); void (*tpc_nr)(struct gf100_gr *, int gpc); + bool skip_pd_num_tpc_per_gpc; void (*r4060a8)(struct gf100_gr *); void (*rop_mapping)(struct gf100_gr *); void (*alpha_beta_tables)(struct gf100_gr *); @@ -76,6 +77,7 @@ struct gf100_grctx_func { void (*r418e94)(struct gf100_gr *); void (*r419a3c)(struct gf100_gr *); void (*r408840)(struct gf100_gr *); + void (*r419c0c)(struct gf100_gr *); }; extern const struct gf100_grctx_func gf100_grctx; @@ -153,6 +155,14 @@ extern const struct gf100_grctx_func gp107_grctx; extern const struct gf100_grctx_func gv100_grctx; +extern const struct gf100_grctx_func tu102_grctx; +void gv100_grctx_unkn88c(struct gf100_gr *, bool); +void gv100_grctx_generate_unkn(struct gf100_gr *); +extern const struct gf100_gr_init gv100_grctx_init_sw_veid_bundle_init_0[]; +void gv100_grctx_generate_attrib(struct gf100_grctx *); +void gv100_grctx_generate_rop_mapping(struct gf100_gr *); +void gv100_grctx_generate_r400088(struct gf100_gr *, bool); + /* context init value lists */ extern const struct gf100_gr_pack gf100_grctx_pack_icmd[]; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c index 896d473dcc0f..c0d36bc601f9 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c @@ -32,7 +32,7 @@ gk20a_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info) u32 idle_timeout; int i; - gf100_gr_mmio(gr, gr->fuc_sw_ctx); + gf100_gr_mmio(gr, gr->sw_ctx); gf100_gr_wait_idle(gr); @@ -56,10 +56,10 @@ gk20a_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info) nvkm_wr32(device, 0x404154, idle_timeout); gf100_gr_wait_idle(gr); - gf100_gr_mthd(gr, gr->fuc_method); + gf100_gr_mthd(gr, gr->method); gf100_gr_wait_idle(gr); - gf100_gr_icmd(gr, gr->fuc_bundle); + gf100_gr_icmd(gr, gr->bundle); grctx->pagepool(info); grctx->bundle(info); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c index a1d9e114ebeb..6b92f8aa18a3 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c @@ -29,7 +29,7 @@ gm20b_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info) u32 idle_timeout; int i, tmp; - gf100_gr_mmio(gr, gr->fuc_sw_ctx); + gf100_gr_mmio(gr, gr->sw_ctx); gf100_gr_wait_idle(gr); @@ -59,10 +59,10 @@ gm20b_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info) nvkm_wr32(device, 0x404154, idle_timeout); gf100_gr_wait_idle(gr); - gf100_gr_mthd(gr, gr->fuc_method); + gf100_gr_mthd(gr, gr->method); gf100_gr_wait_idle(gr); - gf100_gr_icmd(gr, gr->fuc_bundle); + gf100_gr_icmd(gr, gr->bundle); grctx->pagepool(info); grctx->bundle(info); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgv100.c index 0990765ef191..39553d55d3f3 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgv100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgv100.c @@ -25,7 +25,7 @@ * PGRAPH context implementation ******************************************************************************/ -static const struct gf100_gr_init +const struct gf100_gr_init gv100_grctx_init_sw_veid_bundle_init_0[] = { { 0x00001000, 64, 0x00100000, 0x00000008 }, { 0x00000941, 64, 0x00100000, 0x00000000 }, @@ -58,7 +58,7 @@ gv100_grctx_pack_sw_veid_bundle_init[] = { {} }; -static void +void gv100_grctx_generate_attrib(struct gf100_grctx *info) { struct gf100_gr *gr = info->gr; @@ -67,14 +67,14 @@ gv100_grctx_generate_attrib(struct gf100_grctx *info) const u32 attrib = grctx->attrib_nr; const u32 gfxp = grctx->gfxp_nr; const int s = 12; - const int max_batches = 0xffff; u32 size = grctx->alpha_nr_max * gr->tpc_total; u32 ao = 0; u32 bo = ao + size; int gpc, ppc, b, n = 0; - size += grctx->gfxp_nr * gr->tpc_total; - size = ((size * 0x20) + 128) & ~127; + for (gpc = 0; gpc < gr->gpc_nr; gpc++) + size += grctx->gfxp_nr * gr->ppc_nr[gpc] * gr->ppc_tpc_max; + size = ((size * 0x20) + 127) & ~127; b = mmio_vram(info, size, (1 << s), false); mmio_refn(info, 0x418810, 0x80000000, s, b); @@ -84,13 +84,12 @@ gv100_grctx_generate_attrib(struct gf100_grctx *info) mmio_wr32(info, 0x419e04, 0x80000000 | size >> 7); mmio_wr32(info, 0x405830, attrib); mmio_wr32(info, 0x40585c, alpha); - mmio_wr32(info, 0x4064c4, ((alpha / 4) << 16) | max_batches); for (gpc = 0; gpc < gr->gpc_nr; gpc++) { for (ppc = 0; ppc < gr->ppc_nr[gpc]; ppc++, n++) { const u32 as = alpha * gr->ppc_tpc_nr[gpc][ppc]; - const u32 bs = attrib * gr->ppc_tpc_nr[gpc][ppc]; - const u32 gs = gfxp * gr->ppc_tpc_nr[gpc][ppc]; + const u32 bs = attrib * gr->ppc_tpc_max; + const u32 gs = gfxp * gr->ppc_tpc_max; const u32 u = 0x418ea0 + (n * 0x04); const u32 o = PPC_UNIT(gpc, ppc, 0); if (!(gr->ppc_mask[gpc] & (1 << ppc))) @@ -110,7 +109,7 @@ gv100_grctx_generate_attrib(struct gf100_grctx *info) mmio_wr32(info, 0x41befc, 0x00000100); } -static void +void gv100_grctx_generate_rop_mapping(struct gf100_gr *gr) { struct nvkm_device *device = gr->base.engine.subdev.device; @@ -147,7 +146,7 @@ gv100_grctx_generate_rop_mapping(struct gf100_gr *gr) gr->screen_tile_row_offset); } -static void +void gv100_grctx_generate_r400088(struct gf100_gr *gr, bool on) { struct nvkm_device *device = gr->base.engine.subdev.device; @@ -163,7 +162,7 @@ gv100_grctx_generate_sm_id(struct gf100_gr *gr, int gpc, int tpc, int sm) nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x088), sm); } -static void +void gv100_grctx_generate_unkn(struct gf100_gr *gr) { struct nvkm_device *device = gr->base.engine.subdev.device; @@ -174,7 +173,7 @@ gv100_grctx_generate_unkn(struct gf100_gr *gr) nvkm_mask(device, 0x419c00, 0x00000008, 0x00000008); } -static void +void gv100_grctx_unkn88c(struct gf100_gr *gr, bool on) { struct nvkm_device *device = gr->base.engine.subdev.device; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxtu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxtu102.c new file mode 100644 index 000000000000..2299ca07d04a --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxtu102.c @@ -0,0 +1,95 @@ +/* + * Copyright 2019 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "ctxgf100.h" + +static void +tu102_grctx_generate_r419c0c(struct gf100_gr *gr) +{ + struct nvkm_device *device = gr->base.engine.subdev.device; + nvkm_mask(device, 0x419c0c, 0x80000000, 0x80000000); + nvkm_mask(device, 0x40584c, 0x00000008, 0x00000000); + nvkm_mask(device, 0x400080, 0x00000000, 0x00000000); +} + +static void +tu102_grctx_generate_sm_id(struct gf100_gr *gr, int gpc, int tpc, int sm) +{ + struct nvkm_device *device = gr->base.engine.subdev.device; + nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x608), sm); + nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x088), sm); +} + +static const struct gf100_gr_init +tu102_grctx_init_unknown_bundle_init_0[] = { + { 0x00001000, 1, 0x00000001, 0x00000004 }, + { 0x00002020, 64, 0x00000001, 0x00000000 }, + { 0x0001e100, 1, 0x00000001, 0x00000001 }, + {} +}; + +static const struct gf100_gr_pack +tu102_grctx_pack_sw_veid_bundle_init[] = { + { gv100_grctx_init_sw_veid_bundle_init_0 }, + { tu102_grctx_init_unknown_bundle_init_0 }, + {} +}; + +static void +tu102_grctx_generate_attrib(struct gf100_grctx *info) +{ + const u64 size = 0x80000; /*XXX: educated guess */ + const int s = 8; + const int b = mmio_vram(info, size, (1 << s), true); + + gv100_grctx_generate_attrib(info); + + mmio_refn(info, 0x408070, 0x00000000, s, b); + mmio_wr32(info, 0x408074, size >> s); /*XXX: guess */ + mmio_refn(info, 0x419034, 0x00000000, s, b); + mmio_wr32(info, 0x408078, 0x00000000); +} + +const struct gf100_grctx_func +tu102_grctx = { + .unkn88c = gv100_grctx_unkn88c, + .main = gf100_grctx_generate_main, + .unkn = gv100_grctx_generate_unkn, + .sw_veid_bundle_init = tu102_grctx_pack_sw_veid_bundle_init, + .bundle = gm107_grctx_generate_bundle, + .bundle_size = 0x3000, + .bundle_min_gpm_fifo_depth = 0x180, + .bundle_token_limit = 0xa80, + .pagepool = gp100_grctx_generate_pagepool, + .pagepool_size = 0x20000, + .attrib = tu102_grctx_generate_attrib, + .attrib_nr_max = 0x800, + .attrib_nr = 0x700, + .alpha_nr_max = 0xc00, + .alpha_nr = 0x800, + .gfxp_nr = 0xfa8, + .sm_id = tu102_grctx_generate_sm_id, + .skip_pd_num_tpc_per_gpc = true, + .rop_mapping = gv100_grctx_generate_rop_mapping, + .r406500 = gm200_grctx_generate_r406500, + .r400088 = gv100_grctx_generate_r400088, + .r419c0c = tu102_grctx_generate_r419c0c, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h index c24f35ad56a6..ae2d5b6891cb 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h @@ -441,7 +441,7 @@ static uint32_t gk208_grhub_code[] = { 0x020014fe, 0x12004002, 0xbd0002f6, - 0x05c94104, + 0x05ca4104, 0xbd0010fe, 0x07004024, 0xbd0002f6, @@ -460,423 +460,423 @@ static uint32_t gk208_grhub_code[] = { 0x01039204, 0x03090080, 0xbd0003f6, - 0x87044204, - 0xf6040040, - 0x04bd0002, - 0x00400402, - 0x0002f603, - 0x31f404bd, - 0x96048e10, - 0x00657e40, - 0xc7feb200, - 0x01b590f1, - 0x1ff4f003, - 0x01020fb5, - 0x041fbb01, - 0x800112b6, - 0xf6010300, - 0x04bd0001, - 0x01040080, + 0x87048204, + 0x04004000, + 0xbd0002f6, + 0x40040204, + 0x02f60300, + 0xf404bd00, + 0x048e1031, + 0x657e4096, + 0xfeb20000, + 0xb590f1c7, + 0xf4f00301, + 0x020fb51f, + 0x1fbb0101, + 0x0112b604, + 0x01030080, 0xbd0001f6, - 0x01004104, - 0xac7e020f, - 0xbb7e0006, - 0x100f0006, - 0x0006fd7e, - 0x98000e98, - 0x207e010f, - 0x14950001, - 0xc0008008, - 0x0004f601, - 0x008004bd, - 0x04f601c1, - 0xb704bd00, - 0xbb130030, - 0xf5b6001f, - 0xd3008002, - 0x000ff601, - 0x15b604bd, - 0x0110b608, - 0xb20814b6, - 0x02687e1f, - 0x001fbb00, - 0x84020398, -/* 0x041f: init_gpc */ - 0xb8502000, - 0x0008044e, - 0x8f7e1fb2, + 0x04008004, + 0x0001f601, + 0x004104bd, + 0x7e020f01, + 0x7e0006ad, + 0x0f0006bc, + 0x06fe7e10, + 0x000e9800, + 0x7e010f98, + 0x95000120, + 0x00800814, + 0x04f601c0, + 0x8004bd00, + 0xf601c100, + 0x04bd0004, + 0x130030b7, + 0xb6001fbb, + 0x008002f5, + 0x0ff601d3, + 0xb604bd00, + 0x10b60815, + 0x0814b601, + 0x687e1fb2, + 0x1fbb0002, + 0x02039800, + 0x50200084, +/* 0x0420: init_gpc */ + 0x08044eb8, + 0x7e1fb200, + 0xb800008f, + 0x00010c4e, + 0x8f7ef4bd, 0x4eb80000, - 0xbd00010c, - 0x008f7ef4, - 0x044eb800, - 0x8f7e0001, + 0x7e000104, + 0xb800008f, + 0x0001004e, + 0x8f7e020f, 0x4eb80000, - 0x0f000100, - 0x008f7e02, - 0x004eb800, -/* 0x044e: init_gpc_wait */ +/* 0x044f: init_gpc_wait */ + 0x7e000800, + 0xc8000065, + 0x0bf41fff, + 0x044eb8f9, 0x657e0008, - 0xffc80000, - 0xf90bf41f, - 0x08044eb8, - 0x00657e00, - 0x001fbb00, - 0x800040b7, - 0xf40132b6, - 0x000fb41b, - 0x0006fd7e, - 0xac7e000f, - 0x00800006, - 0x01f60201, - 0xbd04bd00, - 0x1f19f014, - 0x02300080, - 0xbd0001f6, -/* 0x0491: wait */ - 0x0028f404, -/* 0x0497: main */ - 0x0d0031f4, - 0x00377e10, - 0xf401f400, - 0x4001e4b1, - 0x00c71bf5, - 0x99f094bd, - 0x37008004, - 0x0009f602, - 0x008104bd, - 0x11cf02c0, - 0xc1008200, - 0x0022cf02, - 0xf41f13c8, - 0x23c8770b, - 0x550bf41f, - 0x12b220f9, - 0x99f094bd, - 0x37008007, - 0x0009f602, - 0x32f404bd, - 0x0231f401, - 0x0008807e, - 0x99f094bd, - 0x17008007, - 0x0009f602, - 0x20fc04bd, - 0x99f094bd, - 0x37008006, - 0x0009f602, - 0x31f404bd, - 0x08807e01, + 0x1fbb0000, + 0x0040b700, + 0x0132b680, + 0x0fb41bf4, + 0x06fe7e00, + 0x7e000f00, + 0x800006ad, + 0xf6020100, + 0x04bd0001, + 0x19f014bd, + 0x3000801f, + 0x0001f602, +/* 0x0492: wait */ + 0x28f404bd, + 0x0031f400, +/* 0x0498: main */ + 0x377e100d, + 0x01f40000, + 0x01e4b1f4, + 0xc71bf540, 0xf094bd00, - 0x00800699, + 0x00800499, + 0x09f60237, + 0x8104bd00, + 0xcf02c000, + 0x00820011, + 0x22cf02c1, + 0x1f13c800, + 0xc8770bf4, + 0x0bf41f23, + 0xb220f955, + 0xf094bd12, + 0x00800799, + 0x09f60237, + 0xf404bd00, + 0x31f40132, + 0x08817e02, + 0xf094bd00, + 0x00800799, 0x09f60217, + 0xfc04bd00, + 0xf094bd20, + 0x00800699, + 0x09f60237, 0xf404bd00, -/* 0x0522: chsw_prev_no_next */ - 0x20f92f0e, - 0x32f412b2, - 0x0232f401, - 0x0008807e, - 0x008020fc, - 0x02f602c0, + 0x817e0131, + 0x94bd0008, + 0x800699f0, + 0xf6021700, + 0x04bd0009, +/* 0x0523: chsw_prev_no_next */ + 0xf92f0ef4, + 0xf412b220, + 0x32f40132, + 0x08817e02, + 0x8020fc00, + 0xf602c000, + 0x04bd0002, +/* 0x053f: chsw_no_prev */ + 0xc8130ef4, + 0x0bf41f23, + 0x0131f40d, + 0x7e0232f4, +/* 0x054f: chsw_done */ + 0x02000881, + 0xc3008001, + 0x0002f602, + 0x94bd04bd, + 0x800499f0, + 0xf6021700, + 0x04bd0009, + 0xff300ef5, +/* 0x056c: main_not_ctx_switch */ + 0xf401e4b0, + 0xf2b20c1b, + 0x0008217e, +/* 0x057b: main_not_ctx_chan */ + 0xb0400ef4, + 0x1bf402e4, + 0xf094bd2c, + 0x00800799, + 0x09f60237, 0xf404bd00, -/* 0x053e: chsw_no_prev */ - 0x23c8130e, - 0x0d0bf41f, - 0xf40131f4, - 0x807e0232, -/* 0x054e: chsw_done */ - 0x01020008, - 0x02c30080, - 0xbd0002f6, - 0xf094bd04, - 0x00800499, + 0x32f40132, + 0x08817e02, + 0xf094bd00, + 0x00800799, 0x09f60217, - 0xf504bd00, -/* 0x056b: main_not_ctx_switch */ - 0xb0ff300e, - 0x1bf401e4, - 0x7ef2b20c, - 0xf4000820, -/* 0x057a: main_not_ctx_chan */ - 0xe4b0400e, - 0x2c1bf402, - 0x99f094bd, - 0x37008007, - 0x0009f602, - 0x32f404bd, - 0x0232f401, - 0x0008807e, - 0x99f094bd, - 0x17008007, - 0x0009f602, - 0x0ef404bd, -/* 0x05a9: main_not_ctx_save */ - 0x10ef9411, - 0x7e01f5f0, - 0xf50002f8, -/* 0x05b7: main_done */ - 0xbdfee40e, - 0x1f29f024, - 0x02300080, - 0xbd0002f6, - 0xd20ef504, -/* 0x05c9: ih */ - 0xf900f9fe, - 0x0188fe80, - 0x90f980f9, - 0xb0f9a0f9, - 0xe0f9d0f9, - 0x04bdf0f9, - 0xcf02004a, - 0xabc400aa, - 0x230bf404, - 0x004e100d, - 0x00eecf1a, - 0xcf19004f, - 0x047e00ff, - 0xb0b70000, - 0x010e0400, - 0xf61d0040, - 0x04bd000e, -/* 0x060c: ih_no_fifo */ - 0x0100abe4, - 0x0d0c0bf4, - 0x40014e10, - 0x0000047e, -/* 0x061c: ih_no_ctxsw */ - 0x0400abe4, - 0x8e560bf4, - 0x7e400708, + 0xf404bd00, +/* 0x05aa: main_not_ctx_save */ + 0xef94110e, + 0x01f5f010, + 0x0002f87e, + 0xfee40ef5, +/* 0x05b8: main_done */ + 0x29f024bd, + 0x3000801f, + 0x0002f602, + 0x0ef504bd, +/* 0x05ca: ih */ + 0x00f9fed2, + 0x88fe80f9, + 0xf980f901, + 0xf9a0f990, + 0xf9d0f9b0, + 0xbdf0f9e0, + 0x02004a04, + 0xc400aacf, + 0x0bf404ab, + 0x4e100d23, + 0xeecf1a00, + 0x19004f00, + 0x7e00ffcf, + 0xb7000004, + 0x0e0400b0, + 0x1d004001, + 0xbd000ef6, +/* 0x060d: ih_no_fifo */ + 0x00abe404, + 0x0c0bf401, + 0x014e100d, + 0x00047e40, +/* 0x061d: ih_no_ctxsw */ + 0x00abe400, + 0x560bf404, + 0x4007088e, + 0x0000657e, + 0x0080ffb2, + 0x0ff60204, + 0x8e04bd00, + 0x7e400704, 0xb2000065, - 0x040080ff, + 0x030080ff, 0x000ff602, - 0x048e04bd, - 0x657e4007, - 0xffb20000, - 0x02030080, - 0xbd000ff6, - 0x50fec704, - 0x8f02ee94, - 0xbb400700, - 0x657e00ef, - 0x00800000, - 0x0ff60202, + 0xfec704bd, + 0x02ee9450, + 0x4007008f, + 0x7e00efbb, + 0x80000065, + 0xf6020200, + 0x04bd000f, + 0xf87e030f, + 0x004b0002, + 0x8ebfb201, + 0x7e400144, +/* 0x0677: ih_no_fwmthd */ + 0x4b00008f, + 0xb0bd0504, + 0xf4b4abff, + 0x00800c0b, + 0x0bf60307, +/* 0x068b: ih_no_other */ + 0x4004bd00, + 0x0af60100, + 0xfc04bd00, + 0xfce0fcf0, + 0xfcb0fcd0, + 0xfc90fca0, + 0x0088fe80, + 0x00fc80fc, + 0xf80032f4, +/* 0x06ad: ctx_4170s */ + 0x10f5f001, + 0x708effb2, + 0x8f7e4041, + 0x00f80000, +/* 0x06bc: ctx_4170w */ + 0x4041708e, + 0x0000657e, + 0xf4f0ffb2, + 0xf31bf410, +/* 0x06ce: ctx_redswitch */ + 0x004e00f8, + 0x40e5f002, + 0xf020e5f0, + 0x008010e5, + 0x0ef60185, 0x0f04bd00, - 0x02f87e03, - 0x01004b00, - 0x448ebfb2, - 0x8f7e4001, -/* 0x0676: ih_no_fwmthd */ - 0x044b0000, - 0xffb0bd05, - 0x0bf4b4ab, - 0x0700800c, - 0x000bf603, -/* 0x068a: ih_no_other */ - 0x004004bd, - 0x000af601, - 0xf0fc04bd, - 0xd0fce0fc, - 0xa0fcb0fc, - 0x80fc90fc, - 0xfc0088fe, - 0xf400fc80, - 0x01f80032, -/* 0x06ac: ctx_4170s */ - 0xb210f5f0, - 0x41708eff, +/* 0x06e5: ctx_redswitch_delay */ + 0x01f2b608, + 0xf1fd1bf4, + 0xf10400e5, + 0x800100e5, + 0xf6018500, + 0x04bd000e, +/* 0x06fe: ctx_86c */ + 0x008000f8, + 0x0ff60223, + 0xb204bd00, + 0x8a148eff, 0x008f7e40, -/* 0x06bb: ctx_4170w */ - 0x8e00f800, - 0x7e404170, - 0xb2000065, - 0x10f4f0ff, - 0xf8f31bf4, -/* 0x06cd: ctx_redswitch */ - 0x02004e00, - 0xf040e5f0, - 0xe5f020e5, - 0x85008010, - 0x000ef601, - 0x080f04bd, -/* 0x06e4: ctx_redswitch_delay */ - 0xf401f2b6, - 0xe5f1fd1b, - 0xe5f10400, - 0x00800100, - 0x0ef60185, - 0xf804bd00, -/* 0x06fd: ctx_86c */ - 0x23008000, + 0x8effb200, + 0x7e41a88c, + 0xf800008f, +/* 0x071d: ctx_mem */ + 0x84008000, 0x000ff602, - 0xffb204bd, - 0x408a148e, - 0x00008f7e, - 0x8c8effb2, - 0x8f7e41a8, - 0x00f80000, -/* 0x071c: ctx_mem */ - 0x02840080, - 0xbd000ff6, -/* 0x0725: ctx_mem_wait */ - 0x84008f04, - 0x00ffcf02, - 0xf405fffd, - 0x00f8f61b, -/* 0x0734: ctx_load */ - 0x99f094bd, - 0x37008005, - 0x0009f602, - 0x0c0a04bd, - 0x0000b87e, - 0x0080f4bd, - 0x0ff60289, - 0x8004bd00, - 0xf602c100, - 0x04bd0002, - 0x02830080, +/* 0x0726: ctx_mem_wait */ + 0x008f04bd, + 0xffcf0284, + 0x05fffd00, + 0xf8f61bf4, +/* 0x0735: ctx_load */ + 0xf094bd00, + 0x00800599, + 0x09f60237, + 0x0a04bd00, + 0x00b87e0c, + 0x80f4bd00, + 0xf6028900, + 0x04bd000f, + 0x02c10080, 0xbd0002f6, - 0x7e070f04, - 0x8000071c, - 0xf602c000, - 0x04bd0002, - 0xf0000bfe, - 0x24b61f2a, - 0x0220b604, - 0x99f094bd, - 0x37008008, - 0x0009f602, - 0x008004bd, - 0x02f60281, - 0xd204bd00, - 0x80000000, - 0x800225f0, - 0xf6028800, - 0x04bd0002, - 0x00421001, - 0x0223f002, - 0xf80512fa, - 0xf094bd03, + 0x83008004, + 0x0002f602, + 0x070f04bd, + 0x00071d7e, + 0x02c00080, + 0xbd0002f6, + 0x000bfe04, + 0xb61f2af0, + 0x20b60424, + 0xf094bd02, 0x00800899, - 0x09f60217, - 0x9804bd00, - 0x14b68101, - 0x80029818, - 0xfd0825b6, - 0x01b50512, - 0xf094bd16, - 0x00800999, 0x09f60237, 0x8004bd00, 0xf6028100, - 0x04bd0001, - 0x00800102, - 0x02f60288, - 0x4104bd00, - 0x13f00100, - 0x0501fa06, + 0x04bd0002, + 0x000000d2, + 0x0225f080, + 0x02880080, + 0xbd0002f6, + 0x42100104, + 0x23f00200, + 0x0512fa02, 0x94bd03f8, - 0x800999f0, + 0x800899f0, 0xf6021700, 0x04bd0009, - 0x99f094bd, - 0x17008005, - 0x0009f602, - 0x00f804bd, -/* 0x0820: ctx_chan */ - 0x0007347e, - 0xb87e0c0a, - 0x050f0000, - 0x00071c7e, -/* 0x0832: ctx_mmio_exec */ - 0x039800f8, - 0x81008041, - 0x0003f602, - 0x34bd04bd, -/* 0x0840: ctx_mmio_loop */ - 0xf4ff34c4, - 0x00450e1b, - 0x0653f002, - 0xf80535fa, -/* 0x0851: ctx_mmio_pull */ - 0x804e9803, - 0x7e814f98, - 0xb600008f, - 0x12b60830, - 0xdf1bf401, -/* 0x0864: ctx_mmio_done */ - 0x80160398, - 0xf6028100, - 0x04bd0003, - 0x414000b5, - 0x13f00100, - 0x0601fa06, - 0x00f803f8, -/* 0x0880: ctx_xfer */ - 0x0080040e, - 0x0ef60302, -/* 0x088b: ctx_xfer_idle */ - 0x8e04bd00, - 0xcf030000, - 0xe4f100ee, - 0x1bf42000, - 0x0611f4f5, -/* 0x089f: ctx_xfer_pre */ - 0x0f0c02f4, - 0x06fd7e10, - 0x1b11f400, -/* 0x08a8: ctx_xfer_pre_load */ - 0xac7e020f, - 0xbb7e0006, - 0xcd7e0006, - 0xf4bd0006, - 0x0006ac7e, - 0x0007347e, -/* 0x08c0: ctx_xfer_exec */ - 0xbd160198, - 0x05008024, - 0x0002f601, - 0x1fb204bd, - 0x41a5008e, - 0x00008f7e, - 0xf001fcf0, - 0x24b6022c, - 0x05f2fd01, - 0x048effb2, - 0x8f7e41a5, - 0x167e0000, - 0x24bd0002, - 0x0247fc80, - 0xbd0002f6, - 0x012cf004, - 0x800320b6, - 0xf6024afc, + 0xb6810198, + 0x02981814, + 0x0825b680, + 0xb50512fd, + 0x94bd1601, + 0x800999f0, + 0xf6023700, + 0x04bd0009, + 0x02810080, + 0xbd0001f6, + 0x80010204, + 0xf6028800, 0x04bd0002, - 0xf001acf0, - 0x000b06a5, - 0x98000c98, - 0x000e010d, - 0x00013d7e, - 0xec7e080a, - 0x0a7e0000, - 0x01f40002, - 0x7e0c0a12, + 0xf0010041, + 0x01fa0613, + 0xbd03f805, + 0x0999f094, + 0x02170080, + 0xbd0009f6, + 0xf094bd04, + 0x00800599, + 0x09f60217, + 0xf804bd00, +/* 0x0821: ctx_chan */ + 0x07357e00, + 0x7e0c0a00, 0x0f0000b8, - 0x071c7e05, - 0x2d02f400, -/* 0x093c: ctx_xfer_post */ - 0xac7e020f, - 0xf4bd0006, - 0x0006fd7e, - 0x0002277e, - 0x0006bb7e, - 0xac7ef4bd, + 0x071d7e05, +/* 0x0833: ctx_mmio_exec */ + 0x9800f800, + 0x00804103, + 0x03f60281, + 0xbd04bd00, +/* 0x0841: ctx_mmio_loop */ + 0xff34c434, + 0x450e1bf4, + 0x53f00200, + 0x0535fa06, +/* 0x0852: ctx_mmio_pull */ + 0x4e9803f8, + 0x814f9880, + 0x00008f7e, + 0xb60830b6, + 0x1bf40112, +/* 0x0865: ctx_mmio_done */ + 0x160398df, + 0x02810080, + 0xbd0003f6, + 0x4000b504, + 0xf0010041, + 0x01fa0613, + 0xf803f806, +/* 0x0881: ctx_xfer */ + 0x80040e00, + 0xf6030200, + 0x04bd000e, +/* 0x088c: ctx_xfer_idle */ + 0x0300008e, + 0xf100eecf, + 0xf42000e4, + 0x11f4f51b, + 0x0c02f406, +/* 0x08a0: ctx_xfer_pre */ + 0xfe7e100f, 0x11f40006, - 0x40019810, - 0xf40511fd, - 0x327e070b, -/* 0x0966: ctx_xfer_no_post_mmio */ -/* 0x0966: ctx_xfer_done */ - 0x00f80008, +/* 0x08a9: ctx_xfer_pre_load */ + 0x7e020f1b, + 0x7e0006ad, + 0x7e0006bc, + 0xbd0006ce, + 0x06ad7ef4, + 0x07357e00, +/* 0x08c1: ctx_xfer_exec */ + 0x16019800, + 0x008024bd, + 0x02f60105, + 0xb204bd00, + 0xa5008e1f, + 0x008f7e41, + 0x01fcf000, + 0xb6022cf0, + 0xf2fd0124, + 0x8effb205, + 0x7e41a504, + 0x7e00008f, + 0xbd000216, + 0x47fc8024, + 0x0002f602, + 0x2cf004bd, + 0x0320b601, + 0x024afc80, + 0xbd0002f6, + 0x01acf004, + 0x0b06a5f0, + 0x000c9800, + 0x0e010d98, + 0x013d7e00, + 0x7e080a00, + 0x7e0000ec, + 0xf400020a, + 0x0c0a1201, + 0x0000b87e, + 0x1d7e050f, + 0x02f40007, +/* 0x093d: ctx_xfer_post */ + 0x7e020f2d, + 0xbd0006ad, + 0x06fe7ef4, + 0x02277e00, + 0x06bc7e00, + 0x7ef4bd00, + 0xf40006ad, + 0x01981011, + 0x0511fd40, + 0x7e070bf4, +/* 0x0967: ctx_xfer_no_post_mmio */ +/* 0x0967: ctx_xfer_done */ + 0xf8000833, 0x00000000, 0x00000000, 0x00000000, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h index 649a442b4390..449dae753203 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h @@ -441,7 +441,7 @@ static uint32_t gm107_grhub_code[] = { 0x020014fe, 0x12004002, 0xbd0002f6, - 0x05c94104, + 0x05ca4104, 0xbd0010fe, 0x07004024, 0xbd0002f6, @@ -460,423 +460,423 @@ static uint32_t gm107_grhub_code[] = { 0x01039204, 0x03090080, 0xbd0003f6, - 0x87044204, - 0xf6040040, - 0x04bd0002, - 0x00400402, - 0x0002f603, - 0x31f404bd, - 0x96048e10, - 0x00657e40, - 0xc7feb200, - 0x01b590f1, - 0x1ff4f003, - 0x01020fb5, - 0x041fbb01, - 0x800112b6, - 0xf6010300, - 0x04bd0001, - 0x01040080, + 0x87048204, + 0x04004000, + 0xbd0002f6, + 0x40040204, + 0x02f60300, + 0xf404bd00, + 0x048e1031, + 0x657e4096, + 0xfeb20000, + 0xb590f1c7, + 0xf4f00301, + 0x020fb51f, + 0x1fbb0101, + 0x0112b604, + 0x01030080, 0xbd0001f6, - 0x01004104, - 0xac7e020f, - 0xbb7e0006, - 0x100f0006, - 0x0006fd7e, - 0x98000e98, - 0x207e010f, - 0x14950001, - 0xc0008008, - 0x0004f601, - 0x008004bd, - 0x04f601c1, - 0xb704bd00, - 0xbb130030, - 0xf5b6001f, - 0xd3008002, - 0x000ff601, - 0x15b604bd, - 0x0110b608, - 0xb20814b6, - 0x02687e1f, - 0x001fbb00, - 0x84020398, -/* 0x041f: init_gpc */ - 0xb8502000, - 0x0008044e, - 0x8f7e1fb2, + 0x04008004, + 0x0001f601, + 0x004104bd, + 0x7e020f01, + 0x7e0006ad, + 0x0f0006bc, + 0x06fe7e10, + 0x000e9800, + 0x7e010f98, + 0x95000120, + 0x00800814, + 0x04f601c0, + 0x8004bd00, + 0xf601c100, + 0x04bd0004, + 0x130030b7, + 0xb6001fbb, + 0x008002f5, + 0x0ff601d3, + 0xb604bd00, + 0x10b60815, + 0x0814b601, + 0x687e1fb2, + 0x1fbb0002, + 0x02039800, + 0x50200084, +/* 0x0420: init_gpc */ + 0x08044eb8, + 0x7e1fb200, + 0xb800008f, + 0x00010c4e, + 0x8f7ef4bd, 0x4eb80000, - 0xbd00010c, - 0x008f7ef4, - 0x044eb800, - 0x8f7e0001, + 0x7e000104, + 0xb800008f, + 0x0001004e, + 0x8f7e020f, 0x4eb80000, - 0x0f000100, - 0x008f7e02, - 0x004eb800, -/* 0x044e: init_gpc_wait */ +/* 0x044f: init_gpc_wait */ + 0x7e000800, + 0xc8000065, + 0x0bf41fff, + 0x044eb8f9, 0x657e0008, - 0xffc80000, - 0xf90bf41f, - 0x08044eb8, - 0x00657e00, - 0x001fbb00, - 0x800040b7, - 0xf40132b6, - 0x000fb41b, - 0x0006fd7e, - 0xac7e000f, - 0x00800006, - 0x01f60201, - 0xbd04bd00, - 0x1f19f014, - 0x02300080, - 0xbd0001f6, -/* 0x0491: wait */ - 0x0028f404, -/* 0x0497: main */ - 0x0d0031f4, - 0x00377e10, - 0xf401f400, - 0x4001e4b1, - 0x00c71bf5, - 0x99f094bd, - 0x37008004, - 0x0009f602, - 0x008104bd, - 0x11cf02c0, - 0xc1008200, - 0x0022cf02, - 0xf41f13c8, - 0x23c8770b, - 0x550bf41f, - 0x12b220f9, - 0x99f094bd, - 0x37008007, - 0x0009f602, - 0x32f404bd, - 0x0231f401, - 0x0008807e, - 0x99f094bd, - 0x17008007, - 0x0009f602, - 0x20fc04bd, - 0x99f094bd, - 0x37008006, - 0x0009f602, - 0x31f404bd, - 0x08807e01, + 0x1fbb0000, + 0x0040b700, + 0x0132b680, + 0x0fb41bf4, + 0x06fe7e00, + 0x7e000f00, + 0x800006ad, + 0xf6020100, + 0x04bd0001, + 0x19f014bd, + 0x3000801f, + 0x0001f602, +/* 0x0492: wait */ + 0x28f404bd, + 0x0031f400, +/* 0x0498: main */ + 0x377e100d, + 0x01f40000, + 0x01e4b1f4, + 0xc71bf540, 0xf094bd00, - 0x00800699, + 0x00800499, + 0x09f60237, + 0x8104bd00, + 0xcf02c000, + 0x00820011, + 0x22cf02c1, + 0x1f13c800, + 0xc8770bf4, + 0x0bf41f23, + 0xb220f955, + 0xf094bd12, + 0x00800799, + 0x09f60237, + 0xf404bd00, + 0x31f40132, + 0x08817e02, + 0xf094bd00, + 0x00800799, 0x09f60217, + 0xfc04bd00, + 0xf094bd20, + 0x00800699, + 0x09f60237, 0xf404bd00, -/* 0x0522: chsw_prev_no_next */ - 0x20f92f0e, - 0x32f412b2, - 0x0232f401, - 0x0008807e, - 0x008020fc, - 0x02f602c0, + 0x817e0131, + 0x94bd0008, + 0x800699f0, + 0xf6021700, + 0x04bd0009, +/* 0x0523: chsw_prev_no_next */ + 0xf92f0ef4, + 0xf412b220, + 0x32f40132, + 0x08817e02, + 0x8020fc00, + 0xf602c000, + 0x04bd0002, +/* 0x053f: chsw_no_prev */ + 0xc8130ef4, + 0x0bf41f23, + 0x0131f40d, + 0x7e0232f4, +/* 0x054f: chsw_done */ + 0x02000881, + 0xc3008001, + 0x0002f602, + 0x94bd04bd, + 0x800499f0, + 0xf6021700, + 0x04bd0009, + 0xff300ef5, +/* 0x056c: main_not_ctx_switch */ + 0xf401e4b0, + 0xf2b20c1b, + 0x0008217e, +/* 0x057b: main_not_ctx_chan */ + 0xb0400ef4, + 0x1bf402e4, + 0xf094bd2c, + 0x00800799, + 0x09f60237, 0xf404bd00, -/* 0x053e: chsw_no_prev */ - 0x23c8130e, - 0x0d0bf41f, - 0xf40131f4, - 0x807e0232, -/* 0x054e: chsw_done */ - 0x01020008, - 0x02c30080, - 0xbd0002f6, - 0xf094bd04, - 0x00800499, + 0x32f40132, + 0x08817e02, + 0xf094bd00, + 0x00800799, 0x09f60217, - 0xf504bd00, -/* 0x056b: main_not_ctx_switch */ - 0xb0ff300e, - 0x1bf401e4, - 0x7ef2b20c, - 0xf4000820, -/* 0x057a: main_not_ctx_chan */ - 0xe4b0400e, - 0x2c1bf402, - 0x99f094bd, - 0x37008007, - 0x0009f602, - 0x32f404bd, - 0x0232f401, - 0x0008807e, - 0x99f094bd, - 0x17008007, - 0x0009f602, - 0x0ef404bd, -/* 0x05a9: main_not_ctx_save */ - 0x10ef9411, - 0x7e01f5f0, - 0xf50002f8, -/* 0x05b7: main_done */ - 0xbdfee40e, - 0x1f29f024, - 0x02300080, - 0xbd0002f6, - 0xd20ef504, -/* 0x05c9: ih */ - 0xf900f9fe, - 0x0188fe80, - 0x90f980f9, - 0xb0f9a0f9, - 0xe0f9d0f9, - 0x04bdf0f9, - 0xcf02004a, - 0xabc400aa, - 0x230bf404, - 0x004e100d, - 0x00eecf1a, - 0xcf19004f, - 0x047e00ff, - 0xb0b70000, - 0x010e0400, - 0xf61d0040, - 0x04bd000e, -/* 0x060c: ih_no_fifo */ - 0x0100abe4, - 0x0d0c0bf4, - 0x40014e10, - 0x0000047e, -/* 0x061c: ih_no_ctxsw */ - 0x0400abe4, - 0x8e560bf4, - 0x7e400708, + 0xf404bd00, +/* 0x05aa: main_not_ctx_save */ + 0xef94110e, + 0x01f5f010, + 0x0002f87e, + 0xfee40ef5, +/* 0x05b8: main_done */ + 0x29f024bd, + 0x3000801f, + 0x0002f602, + 0x0ef504bd, +/* 0x05ca: ih */ + 0x00f9fed2, + 0x88fe80f9, + 0xf980f901, + 0xf9a0f990, + 0xf9d0f9b0, + 0xbdf0f9e0, + 0x02004a04, + 0xc400aacf, + 0x0bf404ab, + 0x4e100d23, + 0xeecf1a00, + 0x19004f00, + 0x7e00ffcf, + 0xb7000004, + 0x0e0400b0, + 0x1d004001, + 0xbd000ef6, +/* 0x060d: ih_no_fifo */ + 0x00abe404, + 0x0c0bf401, + 0x014e100d, + 0x00047e40, +/* 0x061d: ih_no_ctxsw */ + 0x00abe400, + 0x560bf404, + 0x4007088e, + 0x0000657e, + 0x0080ffb2, + 0x0ff60204, + 0x8e04bd00, + 0x7e400704, 0xb2000065, - 0x040080ff, + 0x030080ff, 0x000ff602, - 0x048e04bd, - 0x657e4007, - 0xffb20000, - 0x02030080, - 0xbd000ff6, - 0x50fec704, - 0x8f02ee94, - 0xbb400700, - 0x657e00ef, - 0x00800000, - 0x0ff60202, + 0xfec704bd, + 0x02ee9450, + 0x4007008f, + 0x7e00efbb, + 0x80000065, + 0xf6020200, + 0x04bd000f, + 0xf87e030f, + 0x004b0002, + 0x8ebfb201, + 0x7e400144, +/* 0x0677: ih_no_fwmthd */ + 0x4b00008f, + 0xb0bd0504, + 0xf4b4abff, + 0x00800c0b, + 0x0bf60307, +/* 0x068b: ih_no_other */ + 0x4004bd00, + 0x0af60100, + 0xfc04bd00, + 0xfce0fcf0, + 0xfcb0fcd0, + 0xfc90fca0, + 0x0088fe80, + 0x00fc80fc, + 0xf80032f4, +/* 0x06ad: ctx_4170s */ + 0x10f5f001, + 0x708effb2, + 0x8f7e4041, + 0x00f80000, +/* 0x06bc: ctx_4170w */ + 0x4041708e, + 0x0000657e, + 0xf4f0ffb2, + 0xf31bf410, +/* 0x06ce: ctx_redswitch */ + 0x004e00f8, + 0x40e5f002, + 0xf020e5f0, + 0x008010e5, + 0x0ef60185, 0x0f04bd00, - 0x02f87e03, - 0x01004b00, - 0x448ebfb2, - 0x8f7e4001, -/* 0x0676: ih_no_fwmthd */ - 0x044b0000, - 0xffb0bd05, - 0x0bf4b4ab, - 0x0700800c, - 0x000bf603, -/* 0x068a: ih_no_other */ - 0x004004bd, - 0x000af601, - 0xf0fc04bd, - 0xd0fce0fc, - 0xa0fcb0fc, - 0x80fc90fc, - 0xfc0088fe, - 0xf400fc80, - 0x01f80032, -/* 0x06ac: ctx_4170s */ - 0xb210f5f0, - 0x41708eff, +/* 0x06e5: ctx_redswitch_delay */ + 0x01f2b608, + 0xf1fd1bf4, + 0xf10400e5, + 0x800100e5, + 0xf6018500, + 0x04bd000e, +/* 0x06fe: ctx_86c */ + 0x008000f8, + 0x0ff60223, + 0xb204bd00, + 0x8a148eff, 0x008f7e40, -/* 0x06bb: ctx_4170w */ - 0x8e00f800, - 0x7e404170, - 0xb2000065, - 0x10f4f0ff, - 0xf8f31bf4, -/* 0x06cd: ctx_redswitch */ - 0x02004e00, - 0xf040e5f0, - 0xe5f020e5, - 0x85008010, - 0x000ef601, - 0x080f04bd, -/* 0x06e4: ctx_redswitch_delay */ - 0xf401f2b6, - 0xe5f1fd1b, - 0xe5f10400, - 0x00800100, - 0x0ef60185, - 0xf804bd00, -/* 0x06fd: ctx_86c */ - 0x23008000, + 0x8effb200, + 0x7e41a88c, + 0xf800008f, +/* 0x071d: ctx_mem */ + 0x84008000, 0x000ff602, - 0xffb204bd, - 0x408a148e, - 0x00008f7e, - 0x8c8effb2, - 0x8f7e41a8, - 0x00f80000, -/* 0x071c: ctx_mem */ - 0x02840080, - 0xbd000ff6, -/* 0x0725: ctx_mem_wait */ - 0x84008f04, - 0x00ffcf02, - 0xf405fffd, - 0x00f8f61b, -/* 0x0734: ctx_load */ - 0x99f094bd, - 0x37008005, - 0x0009f602, - 0x0c0a04bd, - 0x0000b87e, - 0x0080f4bd, - 0x0ff60289, - 0x8004bd00, - 0xf602c100, - 0x04bd0002, - 0x02830080, +/* 0x0726: ctx_mem_wait */ + 0x008f04bd, + 0xffcf0284, + 0x05fffd00, + 0xf8f61bf4, +/* 0x0735: ctx_load */ + 0xf094bd00, + 0x00800599, + 0x09f60237, + 0x0a04bd00, + 0x00b87e0c, + 0x80f4bd00, + 0xf6028900, + 0x04bd000f, + 0x02c10080, 0xbd0002f6, - 0x7e070f04, - 0x8000071c, - 0xf602c000, - 0x04bd0002, - 0xf0000bfe, - 0x24b61f2a, - 0x0220b604, - 0x99f094bd, - 0x37008008, - 0x0009f602, - 0x008004bd, - 0x02f60281, - 0xd204bd00, - 0x80000000, - 0x800225f0, - 0xf6028800, - 0x04bd0002, - 0x00421001, - 0x0223f002, - 0xf80512fa, - 0xf094bd03, + 0x83008004, + 0x0002f602, + 0x070f04bd, + 0x00071d7e, + 0x02c00080, + 0xbd0002f6, + 0x000bfe04, + 0xb61f2af0, + 0x20b60424, + 0xf094bd02, 0x00800899, - 0x09f60217, - 0x9804bd00, - 0x14b68101, - 0x80029818, - 0xfd0825b6, - 0x01b50512, - 0xf094bd16, - 0x00800999, 0x09f60237, 0x8004bd00, 0xf6028100, - 0x04bd0001, - 0x00800102, - 0x02f60288, - 0x4104bd00, - 0x13f00100, - 0x0501fa06, + 0x04bd0002, + 0x000000d2, + 0x0225f080, + 0x02880080, + 0xbd0002f6, + 0x42100104, + 0x23f00200, + 0x0512fa02, 0x94bd03f8, - 0x800999f0, + 0x800899f0, 0xf6021700, 0x04bd0009, - 0x99f094bd, - 0x17008005, - 0x0009f602, - 0x00f804bd, -/* 0x0820: ctx_chan */ - 0x0007347e, - 0xb87e0c0a, - 0x050f0000, - 0x00071c7e, -/* 0x0832: ctx_mmio_exec */ - 0x039800f8, - 0x81008041, - 0x0003f602, - 0x34bd04bd, -/* 0x0840: ctx_mmio_loop */ - 0xf4ff34c4, - 0x00450e1b, - 0x0653f002, - 0xf80535fa, -/* 0x0851: ctx_mmio_pull */ - 0x804e9803, - 0x7e814f98, - 0xb600008f, - 0x12b60830, - 0xdf1bf401, -/* 0x0864: ctx_mmio_done */ - 0x80160398, - 0xf6028100, - 0x04bd0003, - 0x414000b5, - 0x13f00100, - 0x0601fa06, - 0x00f803f8, -/* 0x0880: ctx_xfer */ - 0x0080040e, - 0x0ef60302, -/* 0x088b: ctx_xfer_idle */ - 0x8e04bd00, - 0xcf030000, - 0xe4f100ee, - 0x1bf42000, - 0x0611f4f5, -/* 0x089f: ctx_xfer_pre */ - 0x0f0c02f4, - 0x06fd7e10, - 0x1b11f400, -/* 0x08a8: ctx_xfer_pre_load */ - 0xac7e020f, - 0xbb7e0006, - 0xcd7e0006, - 0xf4bd0006, - 0x0006ac7e, - 0x0007347e, -/* 0x08c0: ctx_xfer_exec */ - 0xbd160198, - 0x05008024, - 0x0002f601, - 0x1fb204bd, - 0x41a5008e, - 0x00008f7e, - 0xf001fcf0, - 0x24b6022c, - 0x05f2fd01, - 0x048effb2, - 0x8f7e41a5, - 0x167e0000, - 0x24bd0002, - 0x0247fc80, - 0xbd0002f6, - 0x012cf004, - 0x800320b6, - 0xf6024afc, + 0xb6810198, + 0x02981814, + 0x0825b680, + 0xb50512fd, + 0x94bd1601, + 0x800999f0, + 0xf6023700, + 0x04bd0009, + 0x02810080, + 0xbd0001f6, + 0x80010204, + 0xf6028800, 0x04bd0002, - 0xf001acf0, - 0x000b06a5, - 0x98000c98, - 0x000e010d, - 0x00013d7e, - 0xec7e080a, - 0x0a7e0000, - 0x01f40002, - 0x7e0c0a12, + 0xf0010041, + 0x01fa0613, + 0xbd03f805, + 0x0999f094, + 0x02170080, + 0xbd0009f6, + 0xf094bd04, + 0x00800599, + 0x09f60217, + 0xf804bd00, +/* 0x0821: ctx_chan */ + 0x07357e00, + 0x7e0c0a00, 0x0f0000b8, - 0x071c7e05, - 0x2d02f400, -/* 0x093c: ctx_xfer_post */ - 0xac7e020f, - 0xf4bd0006, - 0x0006fd7e, - 0x0002277e, - 0x0006bb7e, - 0xac7ef4bd, + 0x071d7e05, +/* 0x0833: ctx_mmio_exec */ + 0x9800f800, + 0x00804103, + 0x03f60281, + 0xbd04bd00, +/* 0x0841: ctx_mmio_loop */ + 0xff34c434, + 0x450e1bf4, + 0x53f00200, + 0x0535fa06, +/* 0x0852: ctx_mmio_pull */ + 0x4e9803f8, + 0x814f9880, + 0x00008f7e, + 0xb60830b6, + 0x1bf40112, +/* 0x0865: ctx_mmio_done */ + 0x160398df, + 0x02810080, + 0xbd0003f6, + 0x4000b504, + 0xf0010041, + 0x01fa0613, + 0xf803f806, +/* 0x0881: ctx_xfer */ + 0x80040e00, + 0xf6030200, + 0x04bd000e, +/* 0x088c: ctx_xfer_idle */ + 0x0300008e, + 0xf100eecf, + 0xf42000e4, + 0x11f4f51b, + 0x0c02f406, +/* 0x08a0: ctx_xfer_pre */ + 0xfe7e100f, 0x11f40006, - 0x40019810, - 0xf40511fd, - 0x327e070b, -/* 0x0966: ctx_xfer_no_post_mmio */ -/* 0x0966: ctx_xfer_done */ - 0x00f80008, +/* 0x08a9: ctx_xfer_pre_load */ + 0x7e020f1b, + 0x7e0006ad, + 0x7e0006bc, + 0xbd0006ce, + 0x06ad7ef4, + 0x07357e00, +/* 0x08c1: ctx_xfer_exec */ + 0x16019800, + 0x008024bd, + 0x02f60105, + 0xb204bd00, + 0xa5008e1f, + 0x008f7e41, + 0x01fcf000, + 0xb6022cf0, + 0xf2fd0124, + 0x8effb205, + 0x7e41a504, + 0x7e00008f, + 0xbd000216, + 0x47fc8024, + 0x0002f602, + 0x2cf004bd, + 0x0320b601, + 0x024afc80, + 0xbd0002f6, + 0x01acf004, + 0x0b06a5f0, + 0x000c9800, + 0x0e010d98, + 0x013d7e00, + 0x7e080a00, + 0x7e0000ec, + 0xf400020a, + 0x0c0a1201, + 0x0000b87e, + 0x1d7e050f, + 0x02f40007, +/* 0x093d: ctx_xfer_post */ + 0x7e020f2d, + 0xbd0006ad, + 0x06fe7ef4, + 0x02277e00, + 0x06bc7e00, + 0x7ef4bd00, + 0xf40006ad, + 0x01981011, + 0x0511fd40, + 0x7e070bf4, +/* 0x0967: ctx_xfer_no_post_mmio */ +/* 0x0967: ctx_xfer_done */ + 0xf8000833, 0x00000000, 0x00000000, 0x00000000, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c index c578deb5867a..dd8f85b8b3a7 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c @@ -26,9 +26,9 @@ #include "fuc/os.h" #include <core/client.h> -#include <core/option.h> #include <core/firmware.h> -#include <subdev/secboot.h> +#include <core/option.h> +#include <subdev/acr.h> #include <subdev/fb.h> #include <subdev/mc.h> #include <subdev/pmu.h> @@ -1636,7 +1636,7 @@ gf100_gr_intr(struct nvkm_gr *base) static void gf100_gr_init_fw(struct nvkm_falcon *falcon, - struct gf100_gr_fuc *code, struct gf100_gr_fuc *data) + struct nvkm_blob *code, struct nvkm_blob *data) { nvkm_falcon_load_dmem(falcon, data->data, 0x0, data->size, 0); nvkm_falcon_load_imem(falcon, code->data, 0x0, code->size, 0, 0, false); @@ -1690,26 +1690,30 @@ gf100_gr_init_ctxctl_ext(struct gf100_gr *gr) { struct nvkm_subdev *subdev = &gr->base.engine.subdev; struct nvkm_device *device = subdev->device; - struct nvkm_secboot *sb = device->secboot; - u32 secboot_mask = 0; + u32 lsf_mask = 0; int ret; /* load fuc microcode */ nvkm_mc_unk260(device, 0); /* securely-managed falcons must be reset using secure boot */ - if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_FECS)) - secboot_mask |= BIT(NVKM_SECBOOT_FALCON_FECS); - else - gf100_gr_init_fw(gr->fecs.falcon, &gr->fuc409c, &gr->fuc409d); - if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_GPCCS)) - secboot_mask |= BIT(NVKM_SECBOOT_FALCON_GPCCS); - else - gf100_gr_init_fw(gr->gpccs.falcon, &gr->fuc41ac, &gr->fuc41ad); + if (!nvkm_acr_managed_falcon(device, NVKM_ACR_LSF_FECS)) { + gf100_gr_init_fw(&gr->fecs.falcon, &gr->fecs.inst, + &gr->fecs.data); + } else { + lsf_mask |= BIT(NVKM_ACR_LSF_FECS); + } - if (secboot_mask != 0) { - int ret = nvkm_secboot_reset(sb, secboot_mask); + if (!nvkm_acr_managed_falcon(device, NVKM_ACR_LSF_GPCCS)) { + gf100_gr_init_fw(&gr->gpccs.falcon, &gr->gpccs.inst, + &gr->gpccs.data); + } else { + lsf_mask |= BIT(NVKM_ACR_LSF_GPCCS); + } + + if (lsf_mask) { + ret = nvkm_acr_bootstrap_falcons(device, lsf_mask); if (ret) return ret; } @@ -1721,8 +1725,8 @@ gf100_gr_init_ctxctl_ext(struct gf100_gr *gr) nvkm_wr32(device, 0x41a10c, 0x00000000); nvkm_wr32(device, 0x40910c, 0x00000000); - nvkm_falcon_start(gr->gpccs.falcon); - nvkm_falcon_start(gr->fecs.falcon); + nvkm_falcon_start(&gr->gpccs.falcon); + nvkm_falcon_start(&gr->fecs.falcon); if (nvkm_msec(device, 2000, if (nvkm_rd32(device, 0x409800) & 0x00000001) @@ -1784,18 +1788,18 @@ gf100_gr_init_ctxctl_int(struct gf100_gr *gr) /* load HUB microcode */ nvkm_mc_unk260(device, 0); - nvkm_falcon_load_dmem(gr->fecs.falcon, + nvkm_falcon_load_dmem(&gr->fecs.falcon, gr->func->fecs.ucode->data.data, 0x0, gr->func->fecs.ucode->data.size, 0); - nvkm_falcon_load_imem(gr->fecs.falcon, + nvkm_falcon_load_imem(&gr->fecs.falcon, gr->func->fecs.ucode->code.data, 0x0, gr->func->fecs.ucode->code.size, 0, 0, false); /* load GPC microcode */ - nvkm_falcon_load_dmem(gr->gpccs.falcon, + nvkm_falcon_load_dmem(&gr->gpccs.falcon, gr->func->gpccs.ucode->data.data, 0x0, gr->func->gpccs.ucode->data.size, 0); - nvkm_falcon_load_imem(gr->gpccs.falcon, + nvkm_falcon_load_imem(&gr->gpccs.falcon, gr->func->gpccs.ucode->code.data, 0x0, gr->func->gpccs.ucode->code.size, 0, 0, false); nvkm_mc_unk260(device, 1); @@ -1941,17 +1945,6 @@ gf100_gr_oneinit(struct nvkm_gr *base) struct nvkm_subdev *subdev = &gr->base.engine.subdev; struct nvkm_device *device = subdev->device; int i, j; - int ret; - - ret = nvkm_falcon_v1_new(subdev, "FECS", 0x409000, &gr->fecs.falcon); - if (ret) - return ret; - - mutex_init(&gr->fecs.mutex); - - ret = nvkm_falcon_v1_new(subdev, "GPCCS", 0x41a000, &gr->gpccs.falcon); - if (ret) - return ret; nvkm_pmu_pgob(device->pmu, false); @@ -1992,11 +1985,11 @@ gf100_gr_init_(struct nvkm_gr *base) nvkm_pmu_pgob(gr->base.engine.subdev.device->pmu, false); - ret = nvkm_falcon_get(gr->fecs.falcon, subdev); + ret = nvkm_falcon_get(&gr->fecs.falcon, subdev); if (ret) return ret; - ret = nvkm_falcon_get(gr->gpccs.falcon, subdev); + ret = nvkm_falcon_get(&gr->gpccs.falcon, subdev); if (ret) return ret; @@ -2004,49 +1997,34 @@ gf100_gr_init_(struct nvkm_gr *base) } static int -gf100_gr_fini_(struct nvkm_gr *base, bool suspend) +gf100_gr_fini(struct nvkm_gr *base, bool suspend) { struct gf100_gr *gr = gf100_gr(base); struct nvkm_subdev *subdev = &gr->base.engine.subdev; - nvkm_falcon_put(gr->gpccs.falcon, subdev); - nvkm_falcon_put(gr->fecs.falcon, subdev); + nvkm_falcon_put(&gr->gpccs.falcon, subdev); + nvkm_falcon_put(&gr->fecs.falcon, subdev); return 0; } -void -gf100_gr_dtor_fw(struct gf100_gr_fuc *fuc) -{ - kfree(fuc->data); - fuc->data = NULL; -} - -static void -gf100_gr_dtor_init(struct gf100_gr_pack *pack) -{ - vfree(pack); -} - void * gf100_gr_dtor(struct nvkm_gr *base) { struct gf100_gr *gr = gf100_gr(base); - if (gr->func->dtor) - gr->func->dtor(gr); kfree(gr->data); - nvkm_falcon_del(&gr->gpccs.falcon); - nvkm_falcon_del(&gr->fecs.falcon); + nvkm_falcon_dtor(&gr->gpccs.falcon); + nvkm_falcon_dtor(&gr->fecs.falcon); - gf100_gr_dtor_fw(&gr->fuc409c); - gf100_gr_dtor_fw(&gr->fuc409d); - gf100_gr_dtor_fw(&gr->fuc41ac); - gf100_gr_dtor_fw(&gr->fuc41ad); + nvkm_blob_dtor(&gr->fecs.inst); + nvkm_blob_dtor(&gr->fecs.data); + nvkm_blob_dtor(&gr->gpccs.inst); + nvkm_blob_dtor(&gr->gpccs.data); - gf100_gr_dtor_init(gr->fuc_bundle); - gf100_gr_dtor_init(gr->fuc_method); - gf100_gr_dtor_init(gr->fuc_sw_ctx); - gf100_gr_dtor_init(gr->fuc_sw_nonctx); + vfree(gr->bundle); + vfree(gr->method); + vfree(gr->sw_ctx); + vfree(gr->sw_nonctx); return gr; } @@ -2056,7 +2034,7 @@ gf100_gr_ = { .dtor = gf100_gr_dtor, .oneinit = gf100_gr_oneinit, .init = gf100_gr_init_, - .fini = gf100_gr_fini_, + .fini = gf100_gr_fini, .intr = gf100_gr_intr, .units = gf100_gr_units, .chan_new = gf100_gr_chan_new, @@ -2067,87 +2045,24 @@ gf100_gr_ = { .ctxsw.inst = gf100_gr_ctxsw_inst, }; -int -gf100_gr_ctor_fw_legacy(struct gf100_gr *gr, const char *fwname, - struct gf100_gr_fuc *fuc, int ret) -{ - struct nvkm_subdev *subdev = &gr->base.engine.subdev; - struct nvkm_device *device = subdev->device; - const struct firmware *fw; - char f[32]; - - /* see if this firmware has a legacy path */ - if (!strcmp(fwname, "fecs_inst")) - fwname = "fuc409c"; - else if (!strcmp(fwname, "fecs_data")) - fwname = "fuc409d"; - else if (!strcmp(fwname, "gpccs_inst")) - fwname = "fuc41ac"; - else if (!strcmp(fwname, "gpccs_data")) - fwname = "fuc41ad"; - else { - /* nope, let's just return the error we got */ - nvkm_error(subdev, "failed to load %s\n", fwname); - return ret; - } - - /* yes, try to load from the legacy path */ - nvkm_debug(subdev, "%s: falling back to legacy path\n", fwname); - - snprintf(f, sizeof(f), "nouveau/nv%02x_%s", device->chipset, fwname); - ret = request_firmware(&fw, f, device->dev); - if (ret) { - snprintf(f, sizeof(f), "nouveau/%s", fwname); - ret = request_firmware(&fw, f, device->dev); - if (ret) { - nvkm_error(subdev, "failed to load %s\n", fwname); - return ret; - } - } - - fuc->size = fw->size; - fuc->data = kmemdup(fw->data, fuc->size, GFP_KERNEL); - release_firmware(fw); - return (fuc->data != NULL) ? 0 : -ENOMEM; -} - -int -gf100_gr_ctor_fw(struct gf100_gr *gr, const char *fwname, - struct gf100_gr_fuc *fuc) -{ - const struct firmware *fw; - int ret; - - ret = nvkm_firmware_get(&gr->base.engine.subdev, fwname, &fw); - if (ret) { - ret = gf100_gr_ctor_fw_legacy(gr, fwname, fuc, ret); - if (ret) - return -ENODEV; - return 0; - } - - fuc->size = fw->size; - fuc->data = kmemdup(fw->data, fuc->size, GFP_KERNEL); - nvkm_firmware_put(fw); - return (fuc->data != NULL) ? 0 : -ENOMEM; -} - -int -gf100_gr_ctor(const struct gf100_gr_func *func, struct nvkm_device *device, - int index, struct gf100_gr *gr) -{ - gr->func = func; - gr->firmware = nvkm_boolopt(device->cfgopt, "NvGrUseFW", - func->fecs.ucode == NULL); - - return nvkm_gr_ctor(&gf100_gr_, device, index, - gr->firmware || func->fecs.ucode != NULL, - &gr->base); -} +static const struct nvkm_falcon_func +gf100_gr_flcn = { + .fbif = 0x600, + .load_imem = nvkm_falcon_v1_load_imem, + .load_dmem = nvkm_falcon_v1_load_dmem, + .read_dmem = nvkm_falcon_v1_read_dmem, + .bind_context = nvkm_falcon_v1_bind_context, + .wait_for_halt = nvkm_falcon_v1_wait_for_halt, + .clear_interrupt = nvkm_falcon_v1_clear_interrupt, + .set_start_addr = nvkm_falcon_v1_set_start_addr, + .start = nvkm_falcon_v1_start, + .enable = nvkm_falcon_v1_enable, + .disable = nvkm_falcon_v1_disable, +}; int -gf100_gr_new_(const struct gf100_gr_func *func, struct nvkm_device *device, - int index, struct nvkm_gr **pgr) +gf100_gr_new_(const struct gf100_gr_fwif *fwif, + struct nvkm_device *device, int index, struct nvkm_gr **pgr) { struct gf100_gr *gr; int ret; @@ -2156,22 +2071,49 @@ gf100_gr_new_(const struct gf100_gr_func *func, struct nvkm_device *device, return -ENOMEM; *pgr = &gr->base; - ret = gf100_gr_ctor(func, device, index, gr); + ret = nvkm_gr_ctor(&gf100_gr_, device, index, true, &gr->base); if (ret) return ret; - if (gr->firmware) { - if (gf100_gr_ctor_fw(gr, "fecs_inst", &gr->fuc409c) || - gf100_gr_ctor_fw(gr, "fecs_data", &gr->fuc409d) || - gf100_gr_ctor_fw(gr, "gpccs_inst", &gr->fuc41ac) || - gf100_gr_ctor_fw(gr, "gpccs_data", &gr->fuc41ad)) - return -ENODEV; - } + fwif = nvkm_firmware_load(&gr->base.engine.subdev, fwif, "Gr", gr); + if (IS_ERR(fwif)) + return -ENODEV; + + gr->func = fwif->func; + + ret = nvkm_falcon_ctor(&gf100_gr_flcn, &gr->base.engine.subdev, + "fecs", 0x409000, &gr->fecs.falcon); + if (ret) + return ret; + + mutex_init(&gr->fecs.mutex); + + ret = nvkm_falcon_ctor(&gf100_gr_flcn, &gr->base.engine.subdev, + "gpccs", 0x41a000, &gr->gpccs.falcon); + if (ret) + return ret; return 0; } void +gf100_gr_init_num_tpc_per_gpc(struct gf100_gr *gr, bool pd, bool ds) +{ + struct nvkm_device *device = gr->base.engine.subdev.device; + int gpc, i, j; + u32 data; + + for (gpc = 0, i = 0; i < 4; i++) { + for (data = 0, j = 0; j < 8 && gpc < gr->gpc_nr; j++, gpc++) + data |= gr->tpc_nr[gpc] << (j * 4); + if (pd) + nvkm_wr32(device, 0x406028 + (i * 4), data); + if (ds) + nvkm_wr32(device, 0x405870 + (i * 4), data); + } +} + +void gf100_gr_init_400054(struct gf100_gr *gr) { nvkm_wr32(gr->base.engine.subdev.device, 0x400054, 0x34ce3464); @@ -2295,8 +2237,8 @@ gf100_gr_init(struct gf100_gr *gr) gr->func->init_gpc_mmu(gr); - if (gr->fuc_sw_nonctx) - gf100_gr_mmio(gr, gr->fuc_sw_nonctx); + if (gr->sw_nonctx) + gf100_gr_mmio(gr, gr->sw_nonctx); else gf100_gr_mmio(gr, gr->func->mmio); @@ -2320,6 +2262,8 @@ gf100_gr_init(struct gf100_gr *gr) gr->func->init_bios_2(gr); if (gr->func->init_swdx_pes_mask) gr->func->init_swdx_pes_mask(gr); + if (gr->func->init_fs) + gr->func->init_fs(gr); nvkm_wr32(device, 0x400500, 0x00010001); @@ -2338,8 +2282,8 @@ gf100_gr_init(struct gf100_gr *gr) if (gr->func->init_40601c) gr->func->init_40601c(gr); - nvkm_wr32(device, 0x404490, 0xc0000000); nvkm_wr32(device, 0x406018, 0xc0000000); + nvkm_wr32(device, 0x404490, 0xc0000000); if (gr->func->init_sked_hww_esr) gr->func->init_sked_hww_esr(gr); @@ -2454,7 +2398,66 @@ gf100_gr = { }; int +gf100_gr_nofw(struct gf100_gr *gr, int ver, const struct gf100_gr_fwif *fwif) +{ + gr->firmware = false; + return 0; +} + +static int +gf100_gr_load_fw(struct gf100_gr *gr, const char *name, + struct nvkm_blob *blob) +{ + struct nvkm_subdev *subdev = &gr->base.engine.subdev; + struct nvkm_device *device = subdev->device; + const struct firmware *fw; + char f[32]; + int ret; + + snprintf(f, sizeof(f), "nouveau/nv%02x_%s", device->chipset, name); + ret = request_firmware(&fw, f, device->dev); + if (ret) { + snprintf(f, sizeof(f), "nouveau/%s", name); + ret = request_firmware(&fw, f, device->dev); + if (ret) { + nvkm_error(subdev, "failed to load %s\n", name); + return ret; + } + } + + blob->size = fw->size; + blob->data = kmemdup(fw->data, blob->size, GFP_KERNEL); + release_firmware(fw); + return (blob->data != NULL) ? 0 : -ENOMEM; +} + +int +gf100_gr_load(struct gf100_gr *gr, int ver, const struct gf100_gr_fwif *fwif) +{ + struct nvkm_device *device = gr->base.engine.subdev.device; + + if (!nvkm_boolopt(device->cfgopt, "NvGrUseFW", false)) + return -EINVAL; + + if (gf100_gr_load_fw(gr, "fuc409c", &gr->fecs.inst) || + gf100_gr_load_fw(gr, "fuc409d", &gr->fecs.data) || + gf100_gr_load_fw(gr, "fuc41ac", &gr->gpccs.inst) || + gf100_gr_load_fw(gr, "fuc41ad", &gr->gpccs.data)) + return -ENOENT; + + gr->firmware = true; + return 0; +} + +static const struct gf100_gr_fwif +gf100_gr_fwif[] = { + { -1, gf100_gr_load, &gf100_gr }, + { -1, gf100_gr_nofw, &gf100_gr }, + {} +}; + +int gf100_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) { - return gf100_gr_new_(&gf100_gr, device, index, pgr); + return gf100_gr_new_(gf100_gr_fwif, device, index, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h index fafdd0bbea9b..4c67b254c413 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h @@ -31,6 +31,8 @@ #include <subdev/mmu.h> #include <engine/falcon.h> +struct nvkm_acr_lsfw; + #define GPC_MAX 32 #define TPC_MAX_PER_GPC 8 #define TPC_MAX (GPC_MAX * TPC_MAX_PER_GPC) @@ -55,11 +57,6 @@ struct gf100_gr_mmio { int buffer; }; -struct gf100_gr_fuc { - u32 *data; - u32 size; -}; - struct gf100_gr_zbc_color { u32 format; u32 ds[4]; @@ -83,29 +80,30 @@ struct gf100_gr { struct nvkm_gr base; struct { - struct nvkm_falcon *falcon; + struct nvkm_falcon falcon; + struct nvkm_blob inst; + struct nvkm_blob data; + struct mutex mutex; u32 disable; } fecs; struct { - struct nvkm_falcon *falcon; + struct nvkm_falcon falcon; + struct nvkm_blob inst; + struct nvkm_blob data; } gpccs; - struct gf100_gr_fuc fuc409c; - struct gf100_gr_fuc fuc409d; - struct gf100_gr_fuc fuc41ac; - struct gf100_gr_fuc fuc41ad; bool firmware; /* * Used if the register packs are loaded from NVIDIA fw instead of * using hardcoded arrays. To be allocated with vzalloc(). */ - struct gf100_gr_pack *fuc_sw_nonctx; - struct gf100_gr_pack *fuc_sw_ctx; - struct gf100_gr_pack *fuc_bundle; - struct gf100_gr_pack *fuc_method; + struct gf100_gr_pack *sw_nonctx; + struct gf100_gr_pack *sw_ctx; + struct gf100_gr_pack *bundle; + struct gf100_gr_pack *method; struct gf100_gr_zbc_color zbc_color[NVKM_LTC_MAX_ZBC_CNT]; struct gf100_gr_zbc_depth zbc_depth[NVKM_LTC_MAX_ZBC_CNT]; @@ -140,12 +138,6 @@ struct gf100_gr { u32 size_pm; }; -int gf100_gr_ctor(const struct gf100_gr_func *, struct nvkm_device *, - int, struct gf100_gr *); -int gf100_gr_new_(const struct gf100_gr_func *, struct nvkm_device *, - int, struct nvkm_gr **); -void *gf100_gr_dtor(struct nvkm_gr *); - int gf100_gr_fecs_bind_pointer(struct gf100_gr *, u32 inst); struct gf100_gr_func_zbc { @@ -157,7 +149,6 @@ struct gf100_gr_func_zbc { }; struct gf100_gr_func { - void (*dtor)(struct gf100_gr *); void (*oneinit_tiles)(struct gf100_gr *); void (*oneinit_sm_id)(struct gf100_gr *); int (*init)(struct gf100_gr *); @@ -171,6 +162,7 @@ struct gf100_gr_func { void (*init_rop_active_fbps)(struct gf100_gr *); void (*init_bios_2)(struct gf100_gr *); void (*init_swdx_pes_mask)(struct gf100_gr *); + void (*init_fs)(struct gf100_gr *); void (*init_fecs_exceptions)(struct gf100_gr *); void (*init_ds_hww_esr_2)(struct gf100_gr *); void (*init_40601c)(struct gf100_gr *); @@ -217,6 +209,7 @@ void gf100_gr_init_419eb4(struct gf100_gr *); void gf100_gr_init_tex_hww_esr(struct gf100_gr *, int, int); void gf100_gr_init_shader_exceptions(struct gf100_gr *, int, int); void gf100_gr_init_400054(struct gf100_gr *); +void gf100_gr_init_num_tpc_per_gpc(struct gf100_gr *, bool, bool); extern const struct gf100_gr_func_zbc gf100_gr_zbc; void gf117_gr_init_zcull(struct gf100_gr *); @@ -249,6 +242,13 @@ void gp100_gr_zbc_clear_depth(struct gf100_gr *, int); void gp102_gr_init_swdx_pes_mask(struct gf100_gr *); extern const struct gf100_gr_func_zbc gp102_gr_zbc; +extern const struct gf100_gr_func gp107_gr; + +void gv100_gr_init_419bd8(struct gf100_gr *); +void gv100_gr_init_504430(struct gf100_gr *, int, int); +void gv100_gr_init_shader_exceptions(struct gf100_gr *, int, int); +void gv100_gr_trap_mp(struct gf100_gr *, int, int); + #define gf100_gr_chan(p) container_of((p), struct gf100_gr_chan, object) #include <core/object.h> @@ -269,9 +269,6 @@ struct gf100_gr_chan { void gf100_gr_ctxctl_debug(struct gf100_gr *); -void gf100_gr_dtor_fw(struct gf100_gr_fuc *); -int gf100_gr_ctor_fw(struct gf100_gr *, const char *, - struct gf100_gr_fuc *); u64 gf100_gr_units(struct nvkm_gr *); void gf100_gr_zbc_init(struct gf100_gr *); @@ -294,8 +291,8 @@ struct gf100_gr_pack { for (init = pack->init; init && init->count; init++) struct gf100_gr_ucode { - struct gf100_gr_fuc code; - struct gf100_gr_fuc data; + struct nvkm_blob code; + struct nvkm_blob data; }; extern struct gf100_gr_ucode gf100_gr_fecs_ucode; @@ -310,17 +307,6 @@ void gf100_gr_icmd(struct gf100_gr *, const struct gf100_gr_pack *); void gf100_gr_mthd(struct gf100_gr *, const struct gf100_gr_pack *); int gf100_gr_init_ctxctl(struct gf100_gr *); -/* external bundles loading functions */ -int gk20a_gr_av_to_init(struct gf100_gr *, const char *, - struct gf100_gr_pack **); -int gk20a_gr_aiv_to_init(struct gf100_gr *, const char *, - struct gf100_gr_pack **); -int gk20a_gr_av_to_method(struct gf100_gr *, const char *, - struct gf100_gr_pack **); - -int gm200_gr_new_(const struct gf100_gr_func *, struct nvkm_device *, int, - struct nvkm_gr **); - /* register init value lists */ extern const struct gf100_gr_init gf100_gr_init_main_0[]; @@ -403,4 +389,31 @@ extern const struct gf100_gr_init gm107_gr_init_cbm_0[]; void gm107_gr_init_bios(struct gf100_gr *); void gm200_gr_init_gpc_mmu(struct gf100_gr *); + +struct gf100_gr_fwif { + int version; + int (*load)(struct gf100_gr *, int ver, const struct gf100_gr_fwif *); + const struct gf100_gr_func *func; + const struct nvkm_acr_lsf_func *fecs; + const struct nvkm_acr_lsf_func *gpccs; +}; + +int gf100_gr_load(struct gf100_gr *, int, const struct gf100_gr_fwif *); +int gf100_gr_nofw(struct gf100_gr *, int, const struct gf100_gr_fwif *); + +int gk20a_gr_load_sw(struct gf100_gr *, const char *path, int ver); + +int gm200_gr_load(struct gf100_gr *, int, const struct gf100_gr_fwif *); +extern const struct nvkm_acr_lsf_func gm200_gr_gpccs_acr; +extern const struct nvkm_acr_lsf_func gm200_gr_fecs_acr; + +extern const struct nvkm_acr_lsf_func gm20b_gr_fecs_acr; +void gm20b_gr_acr_bld_write(struct nvkm_acr *, u32, struct nvkm_acr_lsfw *); +void gm20b_gr_acr_bld_patch(struct nvkm_acr *, u32, s64); + +extern const struct nvkm_acr_lsf_func gp108_gr_gpccs_acr; +extern const struct nvkm_acr_lsf_func gp108_gr_fecs_acr; + +int gf100_gr_new_(const struct gf100_gr_fwif *, struct nvkm_device *, int, + struct nvkm_gr **); #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c index 42c2fd9fc04e..0536fe8b2b92 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c @@ -144,8 +144,15 @@ gf104_gr = { } }; +static const struct gf100_gr_fwif +gf104_gr_fwif[] = { + { -1, gf100_gr_load, &gf104_gr }, + { -1, gf100_gr_nofw, &gf104_gr }, + {} +}; + int gf104_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) { - return gf100_gr_new_(&gf104_gr, device, index, pgr); + return gf100_gr_new_(gf104_gr_fwif, device, index, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c index 4731a460adc7..14284b06112f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c @@ -143,8 +143,15 @@ gf108_gr = { } }; +const struct gf100_gr_fwif +gf108_gr_fwif[] = { + { -1, gf100_gr_load, &gf108_gr }, + { -1, gf100_gr_nofw, &gf108_gr }, + {} +}; + int gf108_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) { - return gf100_gr_new_(&gf108_gr, device, index, pgr); + return gf100_gr_new_(gf108_gr_fwif, device, index, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c index cdf759c8cd7f..280752551a3a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c @@ -119,8 +119,15 @@ gf110_gr = { } }; +static const struct gf100_gr_fwif +gf110_gr_fwif[] = { + { -1, gf100_gr_load, &gf110_gr }, + { -1, gf100_gr_nofw, &gf110_gr }, + {} +}; + int gf110_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) { - return gf100_gr_new_(&gf110_gr, device, index, pgr); + return gf100_gr_new_(gf110_gr_fwif, device, index, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c index a4158f84c649..235c3fbe4b95 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c @@ -184,8 +184,15 @@ gf117_gr = { } }; +static const struct gf100_gr_fwif +gf117_gr_fwif[] = { + { -1, gf100_gr_load, &gf117_gr }, + { -1, gf100_gr_nofw, &gf117_gr }, + {} +}; + int gf117_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) { - return gf100_gr_new_(&gf117_gr, device, index, pgr); + return gf100_gr_new_(gf117_gr_fwif, device, index, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c index 4197844870b3..7eac385ece97 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c @@ -210,8 +210,15 @@ gf119_gr = { } }; +static const struct gf100_gr_fwif +gf119_gr_fwif[] = { + { -1, gf100_gr_load, &gf119_gr }, + { -1, gf100_gr_nofw, &gf119_gr }, + {} +}; + int gf119_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) { - return gf100_gr_new_(&gf119_gr, device, index, pgr); + return gf100_gr_new_(gf119_gr_fwif, device, index, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c index 477fee3e3715..89f51d76082b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c @@ -489,8 +489,15 @@ gk104_gr = { } }; +static const struct gf100_gr_fwif +gk104_gr_fwif[] = { + { -1, gf100_gr_load, &gk104_gr }, + { -1, gf100_gr_nofw, &gk104_gr }, + {} +}; + int gk104_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) { - return gf100_gr_new_(&gk104_gr, device, index, pgr); + return gf100_gr_new_(gk104_gr_fwif, device, index, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c index 7cd628c84e07..735f05e54d62 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c @@ -385,8 +385,15 @@ gk110_gr = { } }; +static const struct gf100_gr_fwif +gk110_gr_fwif[] = { + { -1, gf100_gr_load, &gk110_gr }, + { -1, gf100_gr_nofw, &gk110_gr }, + {} +}; + int gk110_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) { - return gf100_gr_new_(&gk110_gr, device, index, pgr); + return gf100_gr_new_(gk110_gr_fwif, device, index, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c index a38faa215635..adc971be8f3b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c @@ -136,8 +136,15 @@ gk110b_gr = { } }; +static const struct gf100_gr_fwif +gk110b_gr_fwif[] = { + { -1, gf100_gr_load, &gk110b_gr }, + { -1, gf100_gr_nofw, &gk110b_gr }, + {} +}; + int gk110b_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) { - return gf100_gr_new_(&gk110b_gr, device, index, pgr); + return gf100_gr_new_(gk110b_gr_fwif, device, index, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c index 58456660e603..aa0eff6795ac 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c @@ -194,8 +194,15 @@ gk208_gr = { } }; +static const struct gf100_gr_fwif +gk208_gr_fwif[] = { + { -1, gf100_gr_load, &gk208_gr }, + { -1, gf100_gr_nofw, &gk208_gr }, + {} +}; + int gk208_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) { - return gf100_gr_new_(&gk208_gr, device, index, pgr); + return gf100_gr_new_(gk208_gr_fwif, device, index, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c index 500cb08dd608..4209b24a46d7 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c @@ -22,6 +22,7 @@ #include "gf100.h" #include "ctxgf100.h" +#include <core/firmware.h> #include <subdev/timer.h> #include <nvif/class.h> @@ -33,21 +34,22 @@ struct gk20a_fw_av }; int -gk20a_gr_av_to_init(struct gf100_gr *gr, const char *fw_name, - struct gf100_gr_pack **ppack) +gk20a_gr_av_to_init(struct gf100_gr *gr, const char *path, const char *name, + int ver, struct gf100_gr_pack **ppack) { - struct gf100_gr_fuc fuc; + struct nvkm_subdev *subdev = &gr->base.engine.subdev; + struct nvkm_blob blob; struct gf100_gr_init *init; struct gf100_gr_pack *pack; int nent; int ret; int i; - ret = gf100_gr_ctor_fw(gr, fw_name, &fuc); + ret = nvkm_firmware_load_blob(subdev, path, name, ver, &blob); if (ret) return ret; - nent = (fuc.size / sizeof(struct gk20a_fw_av)); + nent = (blob.size / sizeof(struct gk20a_fw_av)); pack = vzalloc((sizeof(*pack) * 2) + (sizeof(*init) * (nent + 1))); if (!pack) { ret = -ENOMEM; @@ -59,7 +61,7 @@ gk20a_gr_av_to_init(struct gf100_gr *gr, const char *fw_name, for (i = 0; i < nent; i++) { struct gf100_gr_init *ent = &init[i]; - struct gk20a_fw_av *av = &((struct gk20a_fw_av *)fuc.data)[i]; + struct gk20a_fw_av *av = &((struct gk20a_fw_av *)blob.data)[i]; ent->addr = av->addr; ent->data = av->data; @@ -70,7 +72,7 @@ gk20a_gr_av_to_init(struct gf100_gr *gr, const char *fw_name, *ppack = pack; end: - gf100_gr_dtor_fw(&fuc); + nvkm_blob_dtor(&blob); return ret; } @@ -82,21 +84,22 @@ struct gk20a_fw_aiv }; int -gk20a_gr_aiv_to_init(struct gf100_gr *gr, const char *fw_name, - struct gf100_gr_pack **ppack) +gk20a_gr_aiv_to_init(struct gf100_gr *gr, const char *path, const char *name, + int ver, struct gf100_gr_pack **ppack) { - struct gf100_gr_fuc fuc; + struct nvkm_subdev *subdev = &gr->base.engine.subdev; + struct nvkm_blob blob; struct gf100_gr_init *init; struct gf100_gr_pack *pack; int nent; int ret; int i; - ret = gf100_gr_ctor_fw(gr, fw_name, &fuc); + ret = nvkm_firmware_load_blob(subdev, path, name, ver, &blob); if (ret) return ret; - nent = (fuc.size / sizeof(struct gk20a_fw_aiv)); + nent = (blob.size / sizeof(struct gk20a_fw_aiv)); pack = vzalloc((sizeof(*pack) * 2) + (sizeof(*init) * (nent + 1))); if (!pack) { ret = -ENOMEM; @@ -108,7 +111,7 @@ gk20a_gr_aiv_to_init(struct gf100_gr *gr, const char *fw_name, for (i = 0; i < nent; i++) { struct gf100_gr_init *ent = &init[i]; - struct gk20a_fw_aiv *av = &((struct gk20a_fw_aiv *)fuc.data)[i]; + struct gk20a_fw_aiv *av = &((struct gk20a_fw_aiv *)blob.data)[i]; ent->addr = av->addr; ent->data = av->data; @@ -119,15 +122,16 @@ gk20a_gr_aiv_to_init(struct gf100_gr *gr, const char *fw_name, *ppack = pack; end: - gf100_gr_dtor_fw(&fuc); + nvkm_blob_dtor(&blob); return ret; } int -gk20a_gr_av_to_method(struct gf100_gr *gr, const char *fw_name, - struct gf100_gr_pack **ppack) +gk20a_gr_av_to_method(struct gf100_gr *gr, const char *path, const char *name, + int ver, struct gf100_gr_pack **ppack) { - struct gf100_gr_fuc fuc; + struct nvkm_subdev *subdev = &gr->base.engine.subdev; + struct nvkm_blob blob; struct gf100_gr_init *init; struct gf100_gr_pack *pack; /* We don't suppose we will initialize more than 16 classes here... */ @@ -137,29 +141,30 @@ gk20a_gr_av_to_method(struct gf100_gr *gr, const char *fw_name, int ret; int i; - ret = gf100_gr_ctor_fw(gr, fw_name, &fuc); + ret = nvkm_firmware_load_blob(subdev, path, name, ver, &blob); if (ret) return ret; - nent = (fuc.size / sizeof(struct gk20a_fw_av)); + nent = (blob.size / sizeof(struct gk20a_fw_av)); - pack = vzalloc((sizeof(*pack) * max_classes) + - (sizeof(*init) * (nent + 1))); + pack = vzalloc((sizeof(*pack) * (max_classes + 1)) + + (sizeof(*init) * (nent + max_classes + 1))); if (!pack) { ret = -ENOMEM; goto end; } - init = (void *)(pack + max_classes); + init = (void *)(pack + max_classes + 1); - for (i = 0; i < nent; i++) { - struct gf100_gr_init *ent = &init[i]; - struct gk20a_fw_av *av = &((struct gk20a_fw_av *)fuc.data)[i]; + for (i = 0; i < nent; i++, init++) { + struct gk20a_fw_av *av = &((struct gk20a_fw_av *)blob.data)[i]; u32 class = av->addr & 0xffff; u32 addr = (av->addr & 0xffff0000) >> 14; if (prevclass != class) { - pack[classidx].init = ent; + if (prevclass) /* Add terminator to the method list. */ + init++; + pack[classidx].init = init; pack[classidx].type = class; prevclass = class; if (++classidx >= max_classes) { @@ -169,16 +174,16 @@ gk20a_gr_av_to_method(struct gf100_gr *gr, const char *fw_name, } } - ent->addr = addr; - ent->data = av->data; - ent->count = 1; - ent->pitch = 1; + init->addr = addr; + init->data = av->data; + init->count = 1; + init->pitch = 1; } *ppack = pack; end: - gf100_gr_dtor_fw(&fuc); + nvkm_blob_dtor(&blob); return ret; } @@ -224,7 +229,7 @@ gk20a_gr_init(struct gf100_gr *gr) /* Clear SCC RAM */ nvkm_wr32(device, 0x40802c, 0x1); - gf100_gr_mmio(gr, gr->fuc_sw_nonctx); + gf100_gr_mmio(gr, gr->sw_nonctx); ret = gk20a_gr_wait_mem_scrubbing(gr); if (ret) @@ -303,40 +308,45 @@ gk20a_gr = { }; int -gk20a_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) +gk20a_gr_load_sw(struct gf100_gr *gr, const char *path, int ver) { - struct gf100_gr *gr; - int ret; + if (gk20a_gr_av_to_init(gr, path, "sw_nonctx", ver, &gr->sw_nonctx) || + gk20a_gr_aiv_to_init(gr, path, "sw_ctx", ver, &gr->sw_ctx) || + gk20a_gr_av_to_init(gr, path, "sw_bundle_init", ver, &gr->bundle) || + gk20a_gr_av_to_method(gr, path, "sw_method_init", ver, &gr->method)) + return -ENOENT; - if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL))) - return -ENOMEM; - *pgr = &gr->base; - - ret = gf100_gr_ctor(&gk20a_gr, device, index, gr); - if (ret) - return ret; + return 0; +} - if (gf100_gr_ctor_fw(gr, "fecs_inst", &gr->fuc409c) || - gf100_gr_ctor_fw(gr, "fecs_data", &gr->fuc409d) || - gf100_gr_ctor_fw(gr, "gpccs_inst", &gr->fuc41ac) || - gf100_gr_ctor_fw(gr, "gpccs_data", &gr->fuc41ad)) - return -ENODEV; +static int +gk20a_gr_load(struct gf100_gr *gr, int ver, const struct gf100_gr_fwif *fwif) +{ + struct nvkm_subdev *subdev = &gr->base.engine.subdev; - ret = gk20a_gr_av_to_init(gr, "sw_nonctx", &gr->fuc_sw_nonctx); - if (ret) - return ret; + if (nvkm_firmware_load_blob(subdev, "", "fecs_inst", ver, + &gr->fecs.inst) || + nvkm_firmware_load_blob(subdev, "", "fecs_data", ver, + &gr->fecs.data) || + nvkm_firmware_load_blob(subdev, "", "gpccs_inst", ver, + &gr->gpccs.inst) || + nvkm_firmware_load_blob(subdev, "", "gpccs_data", ver, + &gr->gpccs.data)) + return -ENOENT; - ret = gk20a_gr_aiv_to_init(gr, "sw_ctx", &gr->fuc_sw_ctx); - if (ret) - return ret; + gr->firmware = true; - ret = gk20a_gr_av_to_init(gr, "sw_bundle_init", &gr->fuc_bundle); - if (ret) - return ret; + return gk20a_gr_load_sw(gr, "", ver); +} - ret = gk20a_gr_av_to_method(gr, "sw_method_init", &gr->fuc_method); - if (ret) - return ret; +static const struct gf100_gr_fwif +gk20a_gr_fwif[] = { + { -1, gk20a_gr_load, &gk20a_gr }, + {} +}; - return 0; +int +gk20a_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) +{ + return gf100_gr_new_(gk20a_gr_fwif, device, index, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c index 92e31d397207..09bb78ba9d00 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c @@ -429,8 +429,15 @@ gm107_gr = { } }; +static const struct gf100_gr_fwif +gm107_gr_fwif[] = { + { -1, gf100_gr_load, &gm107_gr }, + { -1, gf100_gr_nofw, &gm107_gr }, + {} +}; + int gm107_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) { - return gf100_gr_new_(&gm107_gr, device, index, pgr); + return gf100_gr_new_(gm107_gr_fwif, device, index, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c index eff30662b984..3d67cfb08395 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c @@ -24,14 +24,64 @@ #include "gf100.h" #include "ctxgf100.h" +#include <core/firmware.h> +#include <subdev/acr.h> #include <subdev/secboot.h> +#include <nvfw/flcn.h> + #include <nvif/class.h> /******************************************************************************* * PGRAPH engine/subdev functions ******************************************************************************/ +static void +gm200_gr_acr_bld_patch(struct nvkm_acr *acr, u32 bld, s64 adjust) +{ + struct flcn_bl_dmem_desc_v1 hdr; + nvkm_robj(acr->wpr, bld, &hdr, sizeof(hdr)); + hdr.code_dma_base = hdr.code_dma_base + adjust; + hdr.data_dma_base = hdr.data_dma_base + adjust; + nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr)); + flcn_bl_dmem_desc_v1_dump(&acr->subdev, &hdr); +} + +static void +gm200_gr_acr_bld_write(struct nvkm_acr *acr, u32 bld, + struct nvkm_acr_lsfw *lsfw) +{ + const u64 base = lsfw->offset.img + lsfw->app_start_offset; + const u64 code = base + lsfw->app_resident_code_offset; + const u64 data = base + lsfw->app_resident_data_offset; + const struct flcn_bl_dmem_desc_v1 hdr = { + .ctx_dma = FALCON_DMAIDX_UCODE, + .code_dma_base = code, + .non_sec_code_off = lsfw->app_resident_code_offset, + .non_sec_code_size = lsfw->app_resident_code_size, + .code_entry_point = lsfw->app_imem_entry, + .data_dma_base = data, + .data_size = lsfw->app_resident_data_size, + }; + + nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr)); +} + +const struct nvkm_acr_lsf_func +gm200_gr_gpccs_acr = { + .flags = NVKM_ACR_LSF_FORCE_PRIV_LOAD, + .bld_size = sizeof(struct flcn_bl_dmem_desc_v1), + .bld_write = gm200_gr_acr_bld_write, + .bld_patch = gm200_gr_acr_bld_patch, +}; + +const struct nvkm_acr_lsf_func +gm200_gr_fecs_acr = { + .bld_size = sizeof(struct flcn_bl_dmem_desc_v1), + .bld_write = gm200_gr_acr_bld_write, + .bld_patch = gm200_gr_acr_bld_patch, +}; + int gm200_gr_rops(struct gf100_gr *gr) { @@ -124,44 +174,6 @@ gm200_gr_oneinit_tiles(struct gf100_gr *gr) } } -int -gm200_gr_new_(const struct gf100_gr_func *func, struct nvkm_device *device, - int index, struct nvkm_gr **pgr) -{ - struct gf100_gr *gr; - int ret; - - if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL))) - return -ENOMEM; - *pgr = &gr->base; - - ret = gf100_gr_ctor(func, device, index, gr); - if (ret) - return ret; - - /* Load firmwares for non-secure falcons */ - if (!nvkm_secboot_is_managed(device->secboot, - NVKM_SECBOOT_FALCON_FECS)) { - if ((ret = gf100_gr_ctor_fw(gr, "gr/fecs_inst", &gr->fuc409c)) || - (ret = gf100_gr_ctor_fw(gr, "gr/fecs_data", &gr->fuc409d))) - return ret; - } - if (!nvkm_secboot_is_managed(device->secboot, - NVKM_SECBOOT_FALCON_GPCCS)) { - if ((ret = gf100_gr_ctor_fw(gr, "gr/gpccs_inst", &gr->fuc41ac)) || - (ret = gf100_gr_ctor_fw(gr, "gr/gpccs_data", &gr->fuc41ad))) - return ret; - } - - if ((ret = gk20a_gr_av_to_init(gr, "gr/sw_nonctx", &gr->fuc_sw_nonctx)) || - (ret = gk20a_gr_aiv_to_init(gr, "gr/sw_ctx", &gr->fuc_sw_ctx)) || - (ret = gk20a_gr_av_to_init(gr, "gr/sw_bundle_init", &gr->fuc_bundle)) || - (ret = gk20a_gr_av_to_method(gr, "gr/sw_method_init", &gr->fuc_method))) - return ret; - - return 0; -} - static const struct gf100_gr_func gm200_gr = { .oneinit_tiles = gm200_gr_oneinit_tiles, @@ -198,7 +210,77 @@ gm200_gr = { }; int +gm200_gr_load(struct gf100_gr *gr, int ver, const struct gf100_gr_fwif *fwif) +{ + int ret; + + ret = nvkm_acr_lsfw_load_bl_inst_data_sig(&gr->base.engine.subdev, + &gr->fecs.falcon, + NVKM_ACR_LSF_FECS, + "gr/fecs_", ver, fwif->fecs); + if (ret) + return ret; + + ret = nvkm_acr_lsfw_load_bl_inst_data_sig(&gr->base.engine.subdev, + &gr->gpccs.falcon, + NVKM_ACR_LSF_GPCCS, + "gr/gpccs_", ver, + fwif->gpccs); + if (ret) + return ret; + + gr->firmware = true; + + return gk20a_gr_load_sw(gr, "gr/", ver); +} + +MODULE_FIRMWARE("nvidia/gm200/gr/fecs_bl.bin"); +MODULE_FIRMWARE("nvidia/gm200/gr/fecs_inst.bin"); +MODULE_FIRMWARE("nvidia/gm200/gr/fecs_data.bin"); +MODULE_FIRMWARE("nvidia/gm200/gr/fecs_sig.bin"); +MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_bl.bin"); +MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_inst.bin"); +MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_data.bin"); +MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_sig.bin"); +MODULE_FIRMWARE("nvidia/gm200/gr/sw_ctx.bin"); +MODULE_FIRMWARE("nvidia/gm200/gr/sw_nonctx.bin"); +MODULE_FIRMWARE("nvidia/gm200/gr/sw_bundle_init.bin"); +MODULE_FIRMWARE("nvidia/gm200/gr/sw_method_init.bin"); + +MODULE_FIRMWARE("nvidia/gm204/gr/fecs_bl.bin"); +MODULE_FIRMWARE("nvidia/gm204/gr/fecs_inst.bin"); +MODULE_FIRMWARE("nvidia/gm204/gr/fecs_data.bin"); +MODULE_FIRMWARE("nvidia/gm204/gr/fecs_sig.bin"); +MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_bl.bin"); +MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_inst.bin"); +MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_data.bin"); +MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_sig.bin"); +MODULE_FIRMWARE("nvidia/gm204/gr/sw_ctx.bin"); +MODULE_FIRMWARE("nvidia/gm204/gr/sw_nonctx.bin"); +MODULE_FIRMWARE("nvidia/gm204/gr/sw_bundle_init.bin"); +MODULE_FIRMWARE("nvidia/gm204/gr/sw_method_init.bin"); + +MODULE_FIRMWARE("nvidia/gm206/gr/fecs_bl.bin"); +MODULE_FIRMWARE("nvidia/gm206/gr/fecs_inst.bin"); +MODULE_FIRMWARE("nvidia/gm206/gr/fecs_data.bin"); +MODULE_FIRMWARE("nvidia/gm206/gr/fecs_sig.bin"); +MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_bl.bin"); +MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_inst.bin"); +MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_data.bin"); +MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_sig.bin"); +MODULE_FIRMWARE("nvidia/gm206/gr/sw_ctx.bin"); +MODULE_FIRMWARE("nvidia/gm206/gr/sw_nonctx.bin"); +MODULE_FIRMWARE("nvidia/gm206/gr/sw_bundle_init.bin"); +MODULE_FIRMWARE("nvidia/gm206/gr/sw_method_init.bin"); + +static const struct gf100_gr_fwif +gm200_gr_fwif[] = { + { 0, gm200_gr_load, &gm200_gr, &gm200_gr_fecs_acr, &gm200_gr_gpccs_acr }, + {} +}; + +int gm200_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) { - return gm200_gr_new_(&gm200_gr, device, index, pgr); + return gf100_gr_new_(gm200_gr_fwif, device, index, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c index a667770ce3cb..09d8c5d5b000 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c @@ -22,10 +22,61 @@ #include "gf100.h" #include "ctxgf100.h" +#include <core/firmware.h> +#include <subdev/acr.h> #include <subdev/timer.h> +#include <nvfw/flcn.h> + #include <nvif/class.h> +void +gm20b_gr_acr_bld_patch(struct nvkm_acr *acr, u32 bld, s64 adjust) +{ + struct flcn_bl_dmem_desc hdr; + u64 addr; + + nvkm_robj(acr->wpr, bld, &hdr, sizeof(hdr)); + addr = ((u64)hdr.code_dma_base1 << 40 | hdr.code_dma_base << 8); + hdr.code_dma_base = lower_32_bits((addr + adjust) >> 8); + hdr.code_dma_base1 = upper_32_bits((addr + adjust) >> 8); + addr = ((u64)hdr.data_dma_base1 << 40 | hdr.data_dma_base << 8); + hdr.data_dma_base = lower_32_bits((addr + adjust) >> 8); + hdr.data_dma_base1 = upper_32_bits((addr + adjust) >> 8); + nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr)); + + flcn_bl_dmem_desc_dump(&acr->subdev, &hdr); +} + +void +gm20b_gr_acr_bld_write(struct nvkm_acr *acr, u32 bld, + struct nvkm_acr_lsfw *lsfw) +{ + const u64 base = lsfw->offset.img + lsfw->app_start_offset; + const u64 code = (base + lsfw->app_resident_code_offset) >> 8; + const u64 data = (base + lsfw->app_resident_data_offset) >> 8; + const struct flcn_bl_dmem_desc hdr = { + .ctx_dma = FALCON_DMAIDX_UCODE, + .code_dma_base = lower_32_bits(code), + .non_sec_code_off = lsfw->app_resident_code_offset, + .non_sec_code_size = lsfw->app_resident_code_size, + .code_entry_point = lsfw->app_imem_entry, + .data_dma_base = lower_32_bits(data), + .data_size = lsfw->app_resident_data_size, + .code_dma_base1 = upper_32_bits(code), + .data_dma_base1 = upper_32_bits(data), + }; + + nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr)); +} + +const struct nvkm_acr_lsf_func +gm20b_gr_fecs_acr = { + .bld_size = sizeof(struct flcn_bl_dmem_desc), + .bld_write = gm20b_gr_acr_bld_write, + .bld_patch = gm20b_gr_acr_bld_patch, +}; + static void gm20b_gr_init_gpc_mmu(struct gf100_gr *gr) { @@ -33,7 +84,7 @@ gm20b_gr_init_gpc_mmu(struct gf100_gr *gr) u32 val; /* Bypass MMU check for non-secure boot */ - if (!device->secboot) { + if (!device->acr) { nvkm_wr32(device, 0x100ce4, 0xffffffff); if (nvkm_rd32(device, 0x100ce4) != 0xffffffff) @@ -85,8 +136,51 @@ gm20b_gr = { } }; +static int +gm20b_gr_load(struct gf100_gr *gr, int ver, const struct gf100_gr_fwif *fwif) +{ + struct nvkm_subdev *subdev = &gr->base.engine.subdev; + int ret; + + ret = nvkm_acr_lsfw_load_bl_inst_data_sig(subdev, &gr->fecs.falcon, + NVKM_ACR_LSF_FECS, + "gr/fecs_", ver, fwif->fecs); + if (ret) + return ret; + + + if (nvkm_firmware_load_blob(subdev, "gr/", "gpccs_inst", ver, + &gr->gpccs.inst) || + nvkm_firmware_load_blob(subdev, "gr/", "gpccs_data", ver, + &gr->gpccs.data)) + return -ENOENT; + + gr->firmware = true; + + return gk20a_gr_load_sw(gr, "gr/", ver); +} + +#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC) +MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_bl.bin"); +MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_inst.bin"); +MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_data.bin"); +MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_sig.bin"); +MODULE_FIRMWARE("nvidia/gm20b/gr/gpccs_inst.bin"); +MODULE_FIRMWARE("nvidia/gm20b/gr/gpccs_data.bin"); +MODULE_FIRMWARE("nvidia/gm20b/gr/sw_ctx.bin"); +MODULE_FIRMWARE("nvidia/gm20b/gr/sw_nonctx.bin"); +MODULE_FIRMWARE("nvidia/gm20b/gr/sw_bundle_init.bin"); +MODULE_FIRMWARE("nvidia/gm20b/gr/sw_method_init.bin"); +#endif + +static const struct gf100_gr_fwif +gm20b_gr_fwif[] = { + { 0, gm20b_gr_load, &gm20b_gr, &gm20b_gr_fecs_acr }, + {} +}; + int gm20b_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) { - return gm200_gr_new_(&gm20b_gr, device, index, pgr); + return gf100_gr_new_(gm20b_gr_fwif, device, index, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c index 9d0521ce309a..bd5d8cc66987 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c @@ -135,8 +135,27 @@ gp100_gr = { } }; +MODULE_FIRMWARE("nvidia/gp100/gr/fecs_bl.bin"); +MODULE_FIRMWARE("nvidia/gp100/gr/fecs_inst.bin"); +MODULE_FIRMWARE("nvidia/gp100/gr/fecs_data.bin"); +MODULE_FIRMWARE("nvidia/gp100/gr/fecs_sig.bin"); +MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_bl.bin"); +MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_inst.bin"); +MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_data.bin"); +MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_sig.bin"); +MODULE_FIRMWARE("nvidia/gp100/gr/sw_ctx.bin"); +MODULE_FIRMWARE("nvidia/gp100/gr/sw_nonctx.bin"); +MODULE_FIRMWARE("nvidia/gp100/gr/sw_bundle_init.bin"); +MODULE_FIRMWARE("nvidia/gp100/gr/sw_method_init.bin"); + +static const struct gf100_gr_fwif +gp100_gr_fwif[] = { + { 0, gm200_gr_load, &gp100_gr, &gm200_gr_fecs_acr, &gm200_gr_gpccs_acr }, + {} +}; + int gp100_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) { - return gm200_gr_new_(&gp100_gr, device, index, pgr); + return gf100_gr_new_(gp100_gr_fwif, device, index, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c index 37f7d739bf80..7baf67f743f4 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c @@ -131,8 +131,27 @@ gp102_gr = { } }; +MODULE_FIRMWARE("nvidia/gp102/gr/fecs_bl.bin"); +MODULE_FIRMWARE("nvidia/gp102/gr/fecs_inst.bin"); +MODULE_FIRMWARE("nvidia/gp102/gr/fecs_data.bin"); +MODULE_FIRMWARE("nvidia/gp102/gr/fecs_sig.bin"); +MODULE_FIRMWARE("nvidia/gp102/gr/gpccs_bl.bin"); +MODULE_FIRMWARE("nvidia/gp102/gr/gpccs_inst.bin"); +MODULE_FIRMWARE("nvidia/gp102/gr/gpccs_data.bin"); +MODULE_FIRMWARE("nvidia/gp102/gr/gpccs_sig.bin"); +MODULE_FIRMWARE("nvidia/gp102/gr/sw_ctx.bin"); +MODULE_FIRMWARE("nvidia/gp102/gr/sw_nonctx.bin"); +MODULE_FIRMWARE("nvidia/gp102/gr/sw_bundle_init.bin"); +MODULE_FIRMWARE("nvidia/gp102/gr/sw_method_init.bin"); + +static const struct gf100_gr_fwif +gp102_gr_fwif[] = { + { 0, gm200_gr_load, &gp102_gr, &gm200_gr_fecs_acr, &gm200_gr_gpccs_acr }, + {} +}; + int gp102_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) { - return gm200_gr_new_(&gp102_gr, device, index, pgr); + return gf100_gr_new_(gp102_gr_fwif, device, index, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c index 4573c914c021..d9b8ef875f8d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c @@ -59,8 +59,40 @@ gp104_gr = { } }; +MODULE_FIRMWARE("nvidia/gp104/gr/fecs_bl.bin"); +MODULE_FIRMWARE("nvidia/gp104/gr/fecs_inst.bin"); +MODULE_FIRMWARE("nvidia/gp104/gr/fecs_data.bin"); +MODULE_FIRMWARE("nvidia/gp104/gr/fecs_sig.bin"); +MODULE_FIRMWARE("nvidia/gp104/gr/gpccs_bl.bin"); +MODULE_FIRMWARE("nvidia/gp104/gr/gpccs_inst.bin"); +MODULE_FIRMWARE("nvidia/gp104/gr/gpccs_data.bin"); +MODULE_FIRMWARE("nvidia/gp104/gr/gpccs_sig.bin"); +MODULE_FIRMWARE("nvidia/gp104/gr/sw_ctx.bin"); +MODULE_FIRMWARE("nvidia/gp104/gr/sw_nonctx.bin"); +MODULE_FIRMWARE("nvidia/gp104/gr/sw_bundle_init.bin"); +MODULE_FIRMWARE("nvidia/gp104/gr/sw_method_init.bin"); + +MODULE_FIRMWARE("nvidia/gp106/gr/fecs_bl.bin"); +MODULE_FIRMWARE("nvidia/gp106/gr/fecs_inst.bin"); +MODULE_FIRMWARE("nvidia/gp106/gr/fecs_data.bin"); +MODULE_FIRMWARE("nvidia/gp106/gr/fecs_sig.bin"); +MODULE_FIRMWARE("nvidia/gp106/gr/gpccs_bl.bin"); +MODULE_FIRMWARE("nvidia/gp106/gr/gpccs_inst.bin"); +MODULE_FIRMWARE("nvidia/gp106/gr/gpccs_data.bin"); +MODULE_FIRMWARE("nvidia/gp106/gr/gpccs_sig.bin"); +MODULE_FIRMWARE("nvidia/gp106/gr/sw_ctx.bin"); +MODULE_FIRMWARE("nvidia/gp106/gr/sw_nonctx.bin"); +MODULE_FIRMWARE("nvidia/gp106/gr/sw_bundle_init.bin"); +MODULE_FIRMWARE("nvidia/gp106/gr/sw_method_init.bin"); + +static const struct gf100_gr_fwif +gp104_gr_fwif[] = { + { 0, gm200_gr_load, &gp104_gr, &gm200_gr_fecs_acr, &gm200_gr_gpccs_acr }, + {} +}; + int gp104_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) { - return gm200_gr_new_(&gp104_gr, device, index, pgr); + return gf100_gr_new_(gp104_gr_fwif, device, index, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c index 812aba91653f..2b1ad5522184 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c @@ -26,7 +26,7 @@ #include <nvif/class.h> -static const struct gf100_gr_func +const struct gf100_gr_func gp107_gr = { .oneinit_tiles = gm200_gr_oneinit_tiles, .oneinit_sm_id = gm200_gr_oneinit_sm_id, @@ -61,8 +61,27 @@ gp107_gr = { } }; +MODULE_FIRMWARE("nvidia/gp107/gr/fecs_bl.bin"); +MODULE_FIRMWARE("nvidia/gp107/gr/fecs_inst.bin"); +MODULE_FIRMWARE("nvidia/gp107/gr/fecs_data.bin"); +MODULE_FIRMWARE("nvidia/gp107/gr/fecs_sig.bin"); +MODULE_FIRMWARE("nvidia/gp107/gr/gpccs_bl.bin"); +MODULE_FIRMWARE("nvidia/gp107/gr/gpccs_inst.bin"); +MODULE_FIRMWARE("nvidia/gp107/gr/gpccs_data.bin"); +MODULE_FIRMWARE("nvidia/gp107/gr/gpccs_sig.bin"); +MODULE_FIRMWARE("nvidia/gp107/gr/sw_ctx.bin"); +MODULE_FIRMWARE("nvidia/gp107/gr/sw_nonctx.bin"); +MODULE_FIRMWARE("nvidia/gp107/gr/sw_bundle_init.bin"); +MODULE_FIRMWARE("nvidia/gp107/gr/sw_method_init.bin"); + +static const struct gf100_gr_fwif +gp107_gr_fwif[] = { + { 0, gm200_gr_load, &gp107_gr, &gm200_gr_fecs_acr, &gm200_gr_gpccs_acr }, + {} +}; + int gp107_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) { - return gm200_gr_new_(&gp107_gr, device, index, pgr); + return gf100_gr_new_(gp107_gr_fwif, device, index, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp108.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp108.c new file mode 100644 index 000000000000..113e4c1ba9e8 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp108.c @@ -0,0 +1,97 @@ +/* + * Copyright 2019 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "gf100.h" + +#include <subdev/acr.h> + +#include <nvfw/flcn.h> + +static void +gp108_gr_acr_bld_patch(struct nvkm_acr *acr, u32 bld, s64 adjust) +{ + struct flcn_bl_dmem_desc_v2 hdr; + nvkm_robj(acr->wpr, bld, &hdr, sizeof(hdr)); + hdr.code_dma_base = hdr.code_dma_base + adjust; + hdr.data_dma_base = hdr.data_dma_base + adjust; + nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr)); + flcn_bl_dmem_desc_v2_dump(&acr->subdev, &hdr); +} + +static void +gp108_gr_acr_bld_write(struct nvkm_acr *acr, u32 bld, + struct nvkm_acr_lsfw *lsfw) +{ + const u64 base = lsfw->offset.img + lsfw->app_start_offset; + const u64 code = base + lsfw->app_resident_code_offset; + const u64 data = base + lsfw->app_resident_data_offset; + const struct flcn_bl_dmem_desc_v2 hdr = { + .ctx_dma = FALCON_DMAIDX_UCODE, + .code_dma_base = code, + .non_sec_code_off = lsfw->app_resident_code_offset, + .non_sec_code_size = lsfw->app_resident_code_size, + .code_entry_point = lsfw->app_imem_entry, + .data_dma_base = data, + .data_size = lsfw->app_resident_data_size, + }; + + nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr)); +} + +const struct nvkm_acr_lsf_func +gp108_gr_gpccs_acr = { + .flags = NVKM_ACR_LSF_FORCE_PRIV_LOAD, + .bld_size = sizeof(struct flcn_bl_dmem_desc_v2), + .bld_write = gp108_gr_acr_bld_write, + .bld_patch = gp108_gr_acr_bld_patch, +}; + +const struct nvkm_acr_lsf_func +gp108_gr_fecs_acr = { + .bld_size = sizeof(struct flcn_bl_dmem_desc_v2), + .bld_write = gp108_gr_acr_bld_write, + .bld_patch = gp108_gr_acr_bld_patch, +}; + +MODULE_FIRMWARE("nvidia/gp108/gr/fecs_bl.bin"); +MODULE_FIRMWARE("nvidia/gp108/gr/fecs_inst.bin"); +MODULE_FIRMWARE("nvidia/gp108/gr/fecs_data.bin"); +MODULE_FIRMWARE("nvidia/gp108/gr/fecs_sig.bin"); +MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_bl.bin"); +MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_inst.bin"); +MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_data.bin"); +MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_sig.bin"); +MODULE_FIRMWARE("nvidia/gp108/gr/sw_ctx.bin"); +MODULE_FIRMWARE("nvidia/gp108/gr/sw_nonctx.bin"); +MODULE_FIRMWARE("nvidia/gp108/gr/sw_bundle_init.bin"); +MODULE_FIRMWARE("nvidia/gp108/gr/sw_method_init.bin"); + +static const struct gf100_gr_fwif +gp108_gr_fwif[] = { + { 0, gm200_gr_load, &gp107_gr, &gp108_gr_fecs_acr, &gp108_gr_gpccs_acr }, + {} +}; + +int +gp108_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) +{ + return gf100_gr_new_(gp108_gr_fwif, device, index, pgr); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c index 303dceddd4a8..a3db2a95ff9a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c @@ -23,8 +23,20 @@ #include "gf100.h" #include "ctxgf100.h" +#include <subdev/acr.h> + #include <nvif/class.h> +#include <nvfw/flcn.h> + +static const struct nvkm_acr_lsf_func +gp10b_gr_gpccs_acr = { + .flags = NVKM_ACR_LSF_FORCE_PRIV_LOAD, + .bld_size = sizeof(struct flcn_bl_dmem_desc), + .bld_write = gm20b_gr_acr_bld_write, + .bld_patch = gm20b_gr_acr_bld_patch, +}; + static const struct gf100_gr_func gp10b_gr = { .oneinit_tiles = gm200_gr_oneinit_tiles, @@ -59,8 +71,29 @@ gp10b_gr = { } }; +#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) +MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_bl.bin"); +MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_inst.bin"); +MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_data.bin"); +MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_sig.bin"); +MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_bl.bin"); +MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_inst.bin"); +MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_data.bin"); +MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_sig.bin"); +MODULE_FIRMWARE("nvidia/gp10b/gr/sw_ctx.bin"); +MODULE_FIRMWARE("nvidia/gp10b/gr/sw_nonctx.bin"); +MODULE_FIRMWARE("nvidia/gp10b/gr/sw_bundle_init.bin"); +MODULE_FIRMWARE("nvidia/gp10b/gr/sw_method_init.bin"); +#endif + +static const struct gf100_gr_fwif +gp10b_gr_fwif[] = { + { 0, gm200_gr_load, &gp10b_gr, &gm20b_gr_fecs_acr, &gp10b_gr_gpccs_acr }, + {} +}; + int gp10b_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) { - return gm200_gr_new_(&gp10b_gr, device, index, pgr); + return gf100_gr_new_(gp10b_gr_fwif, device, index, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c index 3b3327789ae7..70639d88b8e6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c @@ -45,7 +45,7 @@ gv100_gr_trap_sm(struct gf100_gr *gr, int gpc, int tpc, int sm) nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x734 + sm * 0x80), gerr); } -static void +void gv100_gr_trap_mp(struct gf100_gr *gr, int gpc, int tpc) { gv100_gr_trap_sm(gr, gpc, tpc, 0); @@ -59,7 +59,7 @@ gv100_gr_init_4188a4(struct gf100_gr *gr) nvkm_mask(device, 0x4188a4, 0x03000000, 0x03000000); } -static void +void gv100_gr_init_shader_exceptions(struct gf100_gr *gr, int gpc, int tpc) { struct nvkm_device *device = gr->base.engine.subdev.device; @@ -71,14 +71,14 @@ gv100_gr_init_shader_exceptions(struct gf100_gr *gr, int gpc, int tpc) } } -static void +void gv100_gr_init_504430(struct gf100_gr *gr, int gpc, int tpc) { struct nvkm_device *device = gr->base.engine.subdev.device; nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x430), 0x403f0000); } -static void +void gv100_gr_init_419bd8(struct gf100_gr *gr) { struct nvkm_device *device = gr->base.engine.subdev.device; @@ -120,8 +120,27 @@ gv100_gr = { } }; +MODULE_FIRMWARE("nvidia/gv100/gr/fecs_bl.bin"); +MODULE_FIRMWARE("nvidia/gv100/gr/fecs_inst.bin"); +MODULE_FIRMWARE("nvidia/gv100/gr/fecs_data.bin"); +MODULE_FIRMWARE("nvidia/gv100/gr/fecs_sig.bin"); +MODULE_FIRMWARE("nvidia/gv100/gr/gpccs_bl.bin"); +MODULE_FIRMWARE("nvidia/gv100/gr/gpccs_inst.bin"); +MODULE_FIRMWARE("nvidia/gv100/gr/gpccs_data.bin"); +MODULE_FIRMWARE("nvidia/gv100/gr/gpccs_sig.bin"); +MODULE_FIRMWARE("nvidia/gv100/gr/sw_ctx.bin"); +MODULE_FIRMWARE("nvidia/gv100/gr/sw_nonctx.bin"); +MODULE_FIRMWARE("nvidia/gv100/gr/sw_bundle_init.bin"); +MODULE_FIRMWARE("nvidia/gv100/gr/sw_method_init.bin"); + +static const struct gf100_gr_fwif +gv100_gr_fwif[] = { + { 0, gm200_gr_load, &gv100_gr, &gp108_gr_fecs_acr, &gp108_gr_gpccs_acr }, + {} +}; + int gv100_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) { - return gm200_gr_new_(&gv100_gr, device, index, pgr); + return gf100_gr_new_(gv100_gr_fwif, device, index, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c new file mode 100644 index 000000000000..454668b1cf54 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c @@ -0,0 +1,177 @@ +/* + * Copyright 2019 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "gf100.h" +#include "ctxgf100.h" + +#include <nvif/class.h> + +static void +tu102_gr_init_fecs_exceptions(struct gf100_gr *gr) +{ + nvkm_wr32(gr->base.engine.subdev.device, 0x409c24, 0x006f0002); +} + +static void +tu102_gr_init_fs(struct gf100_gr *gr) +{ + struct nvkm_device *device = gr->base.engine.subdev.device; + int sm; + + gp100_grctx_generate_smid_config(gr); + gk104_grctx_generate_gpc_tpc_nr(gr); + + for (sm = 0; sm < gr->sm_nr; sm++) { + nvkm_wr32(device, GPC_UNIT(gr->sm[sm].gpc, 0x0c10 + + gr->sm[sm].tpc * 4), sm); + } + + gm200_grctx_generate_dist_skip_table(gr); + gf100_gr_init_num_tpc_per_gpc(gr, true, true); +} + +static void +tu102_gr_init_zcull(struct gf100_gr *gr) +{ + struct nvkm_device *device = gr->base.engine.subdev.device; + const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total); + const u8 tile_nr = ALIGN(gr->tpc_total, 64); + u8 bank[GPC_MAX] = {}, gpc, i, j; + u32 data; + + for (i = 0; i < tile_nr; i += 8) { + for (data = 0, j = 0; j < 8 && i + j < gr->tpc_total; j++) { + data |= bank[gr->tile[i + j]] << (j * 4); + bank[gr->tile[i + j]]++; + } + nvkm_wr32(device, GPC_BCAST(0x0980 + ((i / 8) * 4)), data); + } + + for (gpc = 0; gpc < gr->gpc_nr; gpc++) { + nvkm_wr32(device, GPC_UNIT(gpc, 0x0914), + gr->screen_tile_row_offset << 8 | gr->tpc_nr[gpc]); + nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 | + gr->tpc_total); + nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918); + } + + nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918); +} + +static void +tu102_gr_init_gpc_mmu(struct gf100_gr *gr) +{ + struct nvkm_device *device = gr->base.engine.subdev.device; + + nvkm_wr32(device, 0x418880, nvkm_rd32(device, 0x100c80) & 0xf8001fff); + nvkm_wr32(device, 0x418890, 0x00000000); + nvkm_wr32(device, 0x418894, 0x00000000); + + nvkm_wr32(device, 0x4188b4, nvkm_rd32(device, 0x100cc8)); + nvkm_wr32(device, 0x4188b8, nvkm_rd32(device, 0x100ccc)); + nvkm_wr32(device, 0x4188b0, nvkm_rd32(device, 0x100cc4)); +} + +static const struct gf100_gr_func +tu102_gr = { + .oneinit_tiles = gm200_gr_oneinit_tiles, + .oneinit_sm_id = gm200_gr_oneinit_sm_id, + .init = gf100_gr_init, + .init_419bd8 = gv100_gr_init_419bd8, + .init_gpc_mmu = tu102_gr_init_gpc_mmu, + .init_vsc_stream_master = gk104_gr_init_vsc_stream_master, + .init_zcull = tu102_gr_init_zcull, + .init_num_active_ltcs = gf100_gr_init_num_active_ltcs, + .init_rop_active_fbps = gp100_gr_init_rop_active_fbps, + .init_swdx_pes_mask = gp102_gr_init_swdx_pes_mask, + .init_fs = tu102_gr_init_fs, + .init_fecs_exceptions = tu102_gr_init_fecs_exceptions, + .init_ds_hww_esr_2 = gm200_gr_init_ds_hww_esr_2, + .init_sked_hww_esr = gk104_gr_init_sked_hww_esr, + .init_ppc_exceptions = gk104_gr_init_ppc_exceptions, + .init_504430 = gv100_gr_init_504430, + .init_shader_exceptions = gv100_gr_init_shader_exceptions, + .trap_mp = gv100_gr_trap_mp, + .rops = gm200_gr_rops, + .gpc_nr = 6, + .tpc_nr = 5, + .ppc_nr = 3, + .grctx = &tu102_grctx, + .zbc = &gp102_gr_zbc, + .sclass = { + { -1, -1, FERMI_TWOD_A }, + { -1, -1, KEPLER_INLINE_TO_MEMORY_B }, + { -1, -1, TURING_A, &gf100_fermi }, + { -1, -1, TURING_COMPUTE_A }, + {} + } +}; + +MODULE_FIRMWARE("nvidia/tu102/gr/fecs_bl.bin"); +MODULE_FIRMWARE("nvidia/tu102/gr/fecs_inst.bin"); +MODULE_FIRMWARE("nvidia/tu102/gr/fecs_data.bin"); +MODULE_FIRMWARE("nvidia/tu102/gr/fecs_sig.bin"); +MODULE_FIRMWARE("nvidia/tu102/gr/gpccs_bl.bin"); +MODULE_FIRMWARE("nvidia/tu102/gr/gpccs_inst.bin"); +MODULE_FIRMWARE("nvidia/tu102/gr/gpccs_data.bin"); +MODULE_FIRMWARE("nvidia/tu102/gr/gpccs_sig.bin"); +MODULE_FIRMWARE("nvidia/tu102/gr/sw_ctx.bin"); +MODULE_FIRMWARE("nvidia/tu102/gr/sw_nonctx.bin"); +MODULE_FIRMWARE("nvidia/tu102/gr/sw_bundle_init.bin"); +MODULE_FIRMWARE("nvidia/tu102/gr/sw_method_init.bin"); + +MODULE_FIRMWARE("nvidia/tu104/gr/fecs_bl.bin"); +MODULE_FIRMWARE("nvidia/tu104/gr/fecs_inst.bin"); +MODULE_FIRMWARE("nvidia/tu104/gr/fecs_data.bin"); +MODULE_FIRMWARE("nvidia/tu104/gr/fecs_sig.bin"); +MODULE_FIRMWARE("nvidia/tu104/gr/gpccs_bl.bin"); +MODULE_FIRMWARE("nvidia/tu104/gr/gpccs_inst.bin"); +MODULE_FIRMWARE("nvidia/tu104/gr/gpccs_data.bin"); +MODULE_FIRMWARE("nvidia/tu104/gr/gpccs_sig.bin"); +MODULE_FIRMWARE("nvidia/tu104/gr/sw_ctx.bin"); +MODULE_FIRMWARE("nvidia/tu104/gr/sw_nonctx.bin"); +MODULE_FIRMWARE("nvidia/tu104/gr/sw_bundle_init.bin"); +MODULE_FIRMWARE("nvidia/tu104/gr/sw_method_init.bin"); + +MODULE_FIRMWARE("nvidia/tu106/gr/fecs_bl.bin"); +MODULE_FIRMWARE("nvidia/tu106/gr/fecs_inst.bin"); +MODULE_FIRMWARE("nvidia/tu106/gr/fecs_data.bin"); +MODULE_FIRMWARE("nvidia/tu106/gr/fecs_sig.bin"); +MODULE_FIRMWARE("nvidia/tu106/gr/gpccs_bl.bin"); +MODULE_FIRMWARE("nvidia/tu106/gr/gpccs_inst.bin"); +MODULE_FIRMWARE("nvidia/tu106/gr/gpccs_data.bin"); +MODULE_FIRMWARE("nvidia/tu106/gr/gpccs_sig.bin"); +MODULE_FIRMWARE("nvidia/tu106/gr/sw_ctx.bin"); +MODULE_FIRMWARE("nvidia/tu106/gr/sw_nonctx.bin"); +MODULE_FIRMWARE("nvidia/tu106/gr/sw_bundle_init.bin"); +MODULE_FIRMWARE("nvidia/tu106/gr/sw_method_init.bin"); + +static const struct gf100_gr_fwif +tu102_gr_fwif[] = { + { 0, gm200_gr_load, &tu102_gr, &gp108_gr_fecs_acr, &gp108_gr_gpccs_acr }, + {} +}; + +int +tu102_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) +{ + return gf100_gr_new_(tu102_gr_fwif, device, index, pgr); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild index cdf631822282..9a0fd9812750 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild @@ -1,3 +1,3 @@ # SPDX-License-Identifier: MIT nvkm-y += nvkm/engine/nvdec/base.o -nvkm-y += nvkm/engine/nvdec/gp102.o +nvkm-y += nvkm/engine/nvdec/gm107.o diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c index 4a63581bdd5e..9b23c1b70ebf 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c @@ -20,48 +20,42 @@ * DEALINGS IN THE SOFTWARE. */ #include "priv.h" - -#include <subdev/top.h> -#include <engine/falcon.h> - -static int -nvkm_nvdec_oneinit(struct nvkm_engine *engine) -{ - struct nvkm_nvdec *nvdec = nvkm_nvdec(engine); - struct nvkm_subdev *subdev = &nvdec->engine.subdev; - - nvdec->addr = nvkm_top_addr(subdev->device, subdev->index); - if (!nvdec->addr) - return -EINVAL; - - /*XXX: fix naming of this when adding support for multiple-NVDEC */ - return nvkm_falcon_v1_new(subdev, "NVDEC", nvdec->addr, - &nvdec->falcon); -} +#include <core/firmware.h> static void * nvkm_nvdec_dtor(struct nvkm_engine *engine) { struct nvkm_nvdec *nvdec = nvkm_nvdec(engine); - nvkm_falcon_del(&nvdec->falcon); + nvkm_falcon_dtor(&nvdec->falcon); return nvdec; } static const struct nvkm_engine_func nvkm_nvdec = { .dtor = nvkm_nvdec_dtor, - .oneinit = nvkm_nvdec_oneinit, }; int -nvkm_nvdec_new_(struct nvkm_device *device, int index, - struct nvkm_nvdec **pnvdec) +nvkm_nvdec_new_(const struct nvkm_nvdec_fwif *fwif, struct nvkm_device *device, + int index, struct nvkm_nvdec **pnvdec) { struct nvkm_nvdec *nvdec; + int ret; if (!(nvdec = *pnvdec = kzalloc(sizeof(*nvdec), GFP_KERNEL))) return -ENOMEM; - return nvkm_engine_ctor(&nvkm_nvdec, device, index, true, - &nvdec->engine); + ret = nvkm_engine_ctor(&nvkm_nvdec, device, index, true, + &nvdec->engine); + if (ret) + return ret; + + fwif = nvkm_firmware_load(&nvdec->engine.subdev, fwif, "Nvdec", nvdec); + if (IS_ERR(fwif)) + return -ENODEV; + + nvdec->func = fwif->func; + + return nvkm_falcon_ctor(nvdec->func->flcn, &nvdec->engine.subdev, + nvkm_subdev_name[index], 0, &nvdec->falcon); }; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/gp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/gm107.c index fde6328c6d71..0ab27ab4d8ee 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/gp102.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/gm107.c @@ -19,12 +19,45 @@ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ - #include "priv.h" +static const struct nvkm_falcon_func +gm107_nvdec_flcn = { + .debug = 0xd00, + .fbif = 0x600, + .load_imem = nvkm_falcon_v1_load_imem, + .load_dmem = nvkm_falcon_v1_load_dmem, + .read_dmem = nvkm_falcon_v1_read_dmem, + .bind_context = nvkm_falcon_v1_bind_context, + .wait_for_halt = nvkm_falcon_v1_wait_for_halt, + .clear_interrupt = nvkm_falcon_v1_clear_interrupt, + .set_start_addr = nvkm_falcon_v1_set_start_addr, + .start = nvkm_falcon_v1_start, + .enable = nvkm_falcon_v1_enable, + .disable = nvkm_falcon_v1_disable, +}; + +static const struct nvkm_nvdec_func +gm107_nvdec = { + .flcn = &gm107_nvdec_flcn, +}; + +static int +gm107_nvdec_nofw(struct nvkm_nvdec *nvdec, int ver, + const struct nvkm_nvdec_fwif *fwif) +{ + return 0; +} + +static const struct nvkm_nvdec_fwif +gm107_nvdec_fwif[] = { + { -1, gm107_nvdec_nofw, &gm107_nvdec }, + {} +}; + int -gp102_nvdec_new(struct nvkm_device *device, int index, +gm107_nvdec_new(struct nvkm_device *device, int index, struct nvkm_nvdec **pnvdec) { - return nvkm_nvdec_new_(device, index, pnvdec); + return nvkm_nvdec_new_(gm107_nvdec_fwif, device, index, pnvdec); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h index 57bfa3aa1835..e14da8b000d0 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h @@ -3,5 +3,17 @@ #define __NVKM_NVDEC_PRIV_H__ #include <engine/nvdec.h> -int nvkm_nvdec_new_(struct nvkm_device *, int, struct nvkm_nvdec **); +struct nvkm_nvdec_func { + const struct nvkm_falcon_func *flcn; +}; + +struct nvkm_nvdec_fwif { + int version; + int (*load)(struct nvkm_nvdec *, int ver, + const struct nvkm_nvdec_fwif *); + const struct nvkm_nvdec_func *func; +}; + +int nvkm_nvdec_new_(const struct nvkm_nvdec_fwif *fwif, + struct nvkm_device *, int, struct nvkm_nvdec **); #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild index f316de8d45a8..75bf4436bf3f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild @@ -1,2 +1,3 @@ # SPDX-License-Identifier: MIT -#nvkm-y += nvkm/engine/nvenc/base.o +nvkm-y += nvkm/engine/nvenc/base.o +nvkm-y += nvkm/engine/nvenc/gm107.o diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/base.c new file mode 100644 index 000000000000..484100e15668 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/base.c @@ -0,0 +1,63 @@ +/* + * Copyright 2019 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" + +#include "priv.h" +#include <core/firmware.h> + +static void * +nvkm_nvenc_dtor(struct nvkm_engine *engine) +{ + struct nvkm_nvenc *nvenc = nvkm_nvenc(engine); + nvkm_falcon_dtor(&nvenc->falcon); + return nvenc; +} + +static const struct nvkm_engine_func +nvkm_nvenc = { + .dtor = nvkm_nvenc_dtor, +}; + +int +nvkm_nvenc_new_(const struct nvkm_nvenc_fwif *fwif, struct nvkm_device *device, + int index, struct nvkm_nvenc **pnvenc) +{ + struct nvkm_nvenc *nvenc; + int ret; + + if (!(nvenc = *pnvenc = kzalloc(sizeof(*nvenc), GFP_KERNEL))) + return -ENOMEM; + + ret = nvkm_engine_ctor(&nvkm_nvenc, device, index, true, + &nvenc->engine); + if (ret) + return ret; + + fwif = nvkm_firmware_load(&nvenc->engine.subdev, fwif, "Nvenc", nvenc); + if (IS_ERR(fwif)) + return -ENODEV; + + nvenc->func = fwif->func; + + return nvkm_falcon_ctor(nvenc->func->flcn, &nvenc->engine.subdev, + nvkm_subdev_name[index], 0, &nvenc->falcon); +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/gm107.c new file mode 100644 index 000000000000..d249c8ffb2d5 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/gm107.c @@ -0,0 +1,63 @@ +/* + * Copyright 2019 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include "priv.h" + +static const struct nvkm_falcon_func +gm107_nvenc_flcn = { + .fbif = 0x800, + .load_imem = nvkm_falcon_v1_load_imem, + .load_dmem = nvkm_falcon_v1_load_dmem, + .read_dmem = nvkm_falcon_v1_read_dmem, + .bind_context = nvkm_falcon_v1_bind_context, + .wait_for_halt = nvkm_falcon_v1_wait_for_halt, + .clear_interrupt = nvkm_falcon_v1_clear_interrupt, + .set_start_addr = nvkm_falcon_v1_set_start_addr, + .start = nvkm_falcon_v1_start, + .enable = nvkm_falcon_v1_enable, + .disable = nvkm_falcon_v1_disable, +}; + +static const struct nvkm_nvenc_func +gm107_nvenc = { + .flcn = &gm107_nvenc_flcn, +}; + +static int +gm107_nvenc_nofw(struct nvkm_nvenc *nvenc, int ver, + const struct nvkm_nvenc_fwif *fwif) +{ + return 0; +} + +static const struct nvkm_nvenc_fwif +gm107_nvenc_fwif[] = { + { -1, gm107_nvenc_nofw, &gm107_nvenc }, + {} +}; + +int +gm107_nvenc_new(struct nvkm_device *device, int index, + struct nvkm_nvenc **pnvenc) +{ + return nvkm_nvenc_new_(gm107_nvenc_fwif, device, index, pnvenc); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h new file mode 100644 index 000000000000..100fa5ebbeef --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: MIT */ +#ifndef __NVKM_NVENC_PRIV_H__ +#define __NVKM_NVENC_PRIV_H__ +#include <engine/nvenc.h> + +struct nvkm_nvenc_func { + const struct nvkm_falcon_func *flcn; +}; + +struct nvkm_nvenc_fwif { + int version; + int (*load)(struct nvkm_nvenc *, int ver, + const struct nvkm_nvenc_fwif *); + const struct nvkm_nvenc_func *func; +}; + +int nvkm_nvenc_new_(const struct nvkm_nvenc_fwif *, struct nvkm_device *, + int, struct nvkm_nvenc **pnvenc); +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/Kbuild index 97c4696171f0..63cd2be3de08 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/Kbuild @@ -1,4 +1,5 @@ # SPDX-License-Identifier: MIT nvkm-y += nvkm/engine/sec2/base.o nvkm-y += nvkm/engine/sec2/gp102.o +nvkm-y += nvkm/engine/sec2/gp108.o nvkm-y += nvkm/engine/sec2/tu102.o diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c index 1b49e5b6717f..41318aa0d481 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c @@ -21,97 +21,98 @@ */ #include "priv.h" -#include <core/msgqueue.h> +#include <core/firmware.h> #include <subdev/top.h> -#include <engine/falcon.h> - -static void * -nvkm_sec2_dtor(struct nvkm_engine *engine) -{ - struct nvkm_sec2 *sec2 = nvkm_sec2(engine); - nvkm_msgqueue_del(&sec2->queue); - nvkm_falcon_del(&sec2->falcon); - return sec2; -} static void -nvkm_sec2_intr(struct nvkm_engine *engine) +nvkm_sec2_recv(struct work_struct *work) { - struct nvkm_sec2 *sec2 = nvkm_sec2(engine); - struct nvkm_subdev *subdev = &engine->subdev; - struct nvkm_device *device = subdev->device; - u32 disp = nvkm_rd32(device, sec2->addr + 0x01c); - u32 intr = nvkm_rd32(device, sec2->addr + 0x008) & disp & ~(disp >> 16); - - if (intr & 0x00000040) { - schedule_work(&sec2->work); - nvkm_wr32(device, sec2->addr + 0x004, 0x00000040); - intr &= ~0x00000040; - } + struct nvkm_sec2 *sec2 = container_of(work, typeof(*sec2), work); - if (intr) { - nvkm_error(subdev, "unhandled intr %08x\n", intr); - nvkm_wr32(device, sec2->addr + 0x004, intr); + if (!sec2->initmsg_received) { + int ret = sec2->func->initmsg(sec2); + if (ret) { + nvkm_error(&sec2->engine.subdev, + "error parsing init message: %d\n", ret); + return; + } + sec2->initmsg_received = true; } + + nvkm_falcon_msgq_recv(sec2->msgq); } static void -nvkm_sec2_recv(struct work_struct *work) +nvkm_sec2_intr(struct nvkm_engine *engine) { - struct nvkm_sec2 *sec2 = container_of(work, typeof(*sec2), work); - - if (!sec2->queue) { - nvkm_warn(&sec2->engine.subdev, - "recv function called while no firmware set!\n"); - return; - } - - nvkm_msgqueue_recv(sec2->queue); + struct nvkm_sec2 *sec2 = nvkm_sec2(engine); + sec2->func->intr(sec2); } - static int -nvkm_sec2_oneinit(struct nvkm_engine *engine) +nvkm_sec2_fini(struct nvkm_engine *engine, bool suspend) { struct nvkm_sec2 *sec2 = nvkm_sec2(engine); - struct nvkm_subdev *subdev = &sec2->engine.subdev; - if (!sec2->addr) { - sec2->addr = nvkm_top_addr(subdev->device, subdev->index); - if (WARN_ON(!sec2->addr)) - return -EINVAL; + flush_work(&sec2->work); + + if (suspend) { + nvkm_falcon_cmdq_fini(sec2->cmdq); + sec2->initmsg_received = false; } - return nvkm_falcon_v1_new(subdev, "SEC2", sec2->addr, &sec2->falcon); + return 0; } -static int -nvkm_sec2_fini(struct nvkm_engine *engine, bool suspend) +static void * +nvkm_sec2_dtor(struct nvkm_engine *engine) { struct nvkm_sec2 *sec2 = nvkm_sec2(engine); - flush_work(&sec2->work); - return 0; + nvkm_falcon_msgq_del(&sec2->msgq); + nvkm_falcon_cmdq_del(&sec2->cmdq); + nvkm_falcon_qmgr_del(&sec2->qmgr); + nvkm_falcon_dtor(&sec2->falcon); + return sec2; } static const struct nvkm_engine_func nvkm_sec2 = { .dtor = nvkm_sec2_dtor, - .oneinit = nvkm_sec2_oneinit, .fini = nvkm_sec2_fini, .intr = nvkm_sec2_intr, }; int -nvkm_sec2_new_(struct nvkm_device *device, int index, u32 addr, - struct nvkm_sec2 **psec2) +nvkm_sec2_new_(const struct nvkm_sec2_fwif *fwif, struct nvkm_device *device, + int index, u32 addr, struct nvkm_sec2 **psec2) { struct nvkm_sec2 *sec2; + int ret; if (!(sec2 = *psec2 = kzalloc(sizeof(*sec2), GFP_KERNEL))) return -ENOMEM; - sec2->addr = addr; - INIT_WORK(&sec2->work, nvkm_sec2_recv); - return nvkm_engine_ctor(&nvkm_sec2, device, index, true, &sec2->engine); + ret = nvkm_engine_ctor(&nvkm_sec2, device, index, true, &sec2->engine); + if (ret) + return ret; + + fwif = nvkm_firmware_load(&sec2->engine.subdev, fwif, "Sec2", sec2); + if (IS_ERR(fwif)) + return PTR_ERR(fwif); + + sec2->func = fwif->func; + + ret = nvkm_falcon_ctor(sec2->func->flcn, &sec2->engine.subdev, + nvkm_subdev_name[index], addr, &sec2->falcon); + if (ret) + return ret; + + if ((ret = nvkm_falcon_qmgr_new(&sec2->falcon, &sec2->qmgr)) || + (ret = nvkm_falcon_cmdq_new(sec2->qmgr, "cmdq", &sec2->cmdq)) || + (ret = nvkm_falcon_msgq_new(sec2->qmgr, "msgq", &sec2->msgq))) + return ret; + + INIT_WORK(&sec2->work, nvkm_sec2_recv); + return 0; }; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp102.c index 858cf27fa010..368f2a0042ff 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp102.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp102.c @@ -19,12 +19,316 @@ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ - #include "priv.h" +#include <core/memory.h> +#include <subdev/acr.h> +#include <subdev/timer.h> + +#include <nvfw/flcn.h> +#include <nvfw/sec2.h> + +static int +gp102_sec2_acr_bootstrap_falcon_callback(void *priv, struct nv_falcon_msg *hdr) +{ + struct nv_sec2_acr_bootstrap_falcon_msg *msg = + container_of(hdr, typeof(*msg), msg.hdr); + struct nvkm_subdev *subdev = priv; + const char *name = nvkm_acr_lsf_id(msg->falcon_id); + + if (msg->error_code) { + nvkm_error(subdev, "ACR_BOOTSTRAP_FALCON failed for " + "falcon %d [%s]: %08x\n", + msg->falcon_id, name, msg->error_code); + return -EINVAL; + } + + nvkm_debug(subdev, "%s booted\n", name); + return 0; +} + +static int +gp102_sec2_acr_bootstrap_falcon(struct nvkm_falcon *falcon, + enum nvkm_acr_lsf_id id) +{ + struct nvkm_sec2 *sec2 = container_of(falcon, typeof(*sec2), falcon); + struct nv_sec2_acr_bootstrap_falcon_cmd cmd = { + .cmd.hdr.unit_id = sec2->func->unit_acr, + .cmd.hdr.size = sizeof(cmd), + .cmd.cmd_type = NV_SEC2_ACR_CMD_BOOTSTRAP_FALCON, + .flags = NV_SEC2_ACR_BOOTSTRAP_FALCON_FLAGS_RESET_YES, + .falcon_id = id, + }; + + return nvkm_falcon_cmdq_send(sec2->cmdq, &cmd.cmd.hdr, + gp102_sec2_acr_bootstrap_falcon_callback, + &sec2->engine.subdev, + msecs_to_jiffies(1000)); +} + +static int +gp102_sec2_acr_boot(struct nvkm_falcon *falcon) +{ + struct nv_sec2_args args = {}; + nvkm_falcon_load_dmem(falcon, &args, + falcon->func->emem_addr, sizeof(args), 0); + nvkm_falcon_start(falcon); + return 0; +} + +static void +gp102_sec2_acr_bld_patch(struct nvkm_acr *acr, u32 bld, s64 adjust) +{ + struct loader_config_v1 hdr; + nvkm_robj(acr->wpr, bld, &hdr, sizeof(hdr)); + hdr.code_dma_base = hdr.code_dma_base + adjust; + hdr.data_dma_base = hdr.data_dma_base + adjust; + hdr.overlay_dma_base = hdr.overlay_dma_base + adjust; + nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr)); + loader_config_v1_dump(&acr->subdev, &hdr); +} + +static void +gp102_sec2_acr_bld_write(struct nvkm_acr *acr, u32 bld, + struct nvkm_acr_lsfw *lsfw) +{ + const struct loader_config_v1 hdr = { + .dma_idx = FALCON_SEC2_DMAIDX_UCODE, + .code_dma_base = lsfw->offset.img + lsfw->app_start_offset, + .code_size_total = lsfw->app_size, + .code_size_to_load = lsfw->app_resident_code_size, + .code_entry_point = lsfw->app_imem_entry, + .data_dma_base = lsfw->offset.img + lsfw->app_start_offset + + lsfw->app_resident_data_offset, + .data_size = lsfw->app_resident_data_size, + .overlay_dma_base = lsfw->offset.img + lsfw->app_start_offset, + .argc = 1, + .argv = lsfw->falcon->func->emem_addr, + }; + + nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr)); +} + +static const struct nvkm_acr_lsf_func +gp102_sec2_acr_0 = { + .bld_size = sizeof(struct loader_config_v1), + .bld_write = gp102_sec2_acr_bld_write, + .bld_patch = gp102_sec2_acr_bld_patch, + .boot = gp102_sec2_acr_boot, + .bootstrap_falcon = gp102_sec2_acr_bootstrap_falcon, +}; + +int +gp102_sec2_initmsg(struct nvkm_sec2 *sec2) +{ + struct nv_sec2_init_msg msg; + int ret, i; + + ret = nvkm_falcon_msgq_recv_initmsg(sec2->msgq, &msg, sizeof(msg)); + if (ret) + return ret; + + if (msg.hdr.unit_id != NV_SEC2_UNIT_INIT || + msg.msg_type != NV_SEC2_INIT_MSG_INIT) + return -EINVAL; + + for (i = 0; i < ARRAY_SIZE(msg.queue_info); i++) { + if (msg.queue_info[i].id == NV_SEC2_INIT_MSG_QUEUE_ID_MSGQ) { + nvkm_falcon_msgq_init(sec2->msgq, + msg.queue_info[i].index, + msg.queue_info[i].offset, + msg.queue_info[i].size); + } else { + nvkm_falcon_cmdq_init(sec2->cmdq, + msg.queue_info[i].index, + msg.queue_info[i].offset, + msg.queue_info[i].size); + } + } + + return 0; +} + +void +gp102_sec2_intr(struct nvkm_sec2 *sec2) +{ + struct nvkm_subdev *subdev = &sec2->engine.subdev; + struct nvkm_falcon *falcon = &sec2->falcon; + u32 disp = nvkm_falcon_rd32(falcon, 0x01c); + u32 intr = nvkm_falcon_rd32(falcon, 0x008) & disp & ~(disp >> 16); + + if (intr & 0x00000040) { + schedule_work(&sec2->work); + nvkm_falcon_wr32(falcon, 0x004, 0x00000040); + intr &= ~0x00000040; + } + + if (intr) { + nvkm_error(subdev, "unhandled intr %08x\n", intr); + nvkm_falcon_wr32(falcon, 0x004, intr); + } +} + +int +gp102_sec2_flcn_enable(struct nvkm_falcon *falcon) +{ + nvkm_falcon_mask(falcon, 0x3c0, 0x00000001, 0x00000001); + udelay(10); + nvkm_falcon_mask(falcon, 0x3c0, 0x00000001, 0x00000000); + return nvkm_falcon_v1_enable(falcon); +} + +void +gp102_sec2_flcn_bind_context(struct nvkm_falcon *falcon, + struct nvkm_memory *ctx) +{ + struct nvkm_device *device = falcon->owner->device; + + nvkm_falcon_v1_bind_context(falcon, ctx); + if (!ctx) + return; + + /* Not sure if this is a WAR for a HW issue, or some additional + * programming sequence that's needed to properly complete the + * context switch we trigger above. + * + * Fixes unreliability of booting the SEC2 RTOS on Quadro P620, + * particularly when resuming from suspend. + * + * Also removes the need for an odd workaround where we needed + * to program SEC2's FALCON_CPUCTL_ALIAS_STARTCPU twice before + * the SEC2 RTOS would begin executing. + */ + nvkm_msec(device, 10, + u32 irqstat = nvkm_falcon_rd32(falcon, 0x008); + u32 flcn0dc = nvkm_falcon_rd32(falcon, 0x0dc); + if ((irqstat & 0x00000008) && + (flcn0dc & 0x00007000) == 0x00005000) + break; + ); + + nvkm_falcon_mask(falcon, 0x004, 0x00000008, 0x00000008); + nvkm_falcon_mask(falcon, 0x058, 0x00000002, 0x00000002); + + nvkm_msec(device, 10, + u32 flcn0dc = nvkm_falcon_rd32(falcon, 0x0dc); + if ((flcn0dc & 0x00007000) == 0x00000000) + break; + ); +} + +static const struct nvkm_falcon_func +gp102_sec2_flcn = { + .debug = 0x408, + .fbif = 0x600, + .load_imem = nvkm_falcon_v1_load_imem, + .load_dmem = nvkm_falcon_v1_load_dmem, + .read_dmem = nvkm_falcon_v1_read_dmem, + .emem_addr = 0x01000000, + .bind_context = gp102_sec2_flcn_bind_context, + .wait_for_halt = nvkm_falcon_v1_wait_for_halt, + .clear_interrupt = nvkm_falcon_v1_clear_interrupt, + .set_start_addr = nvkm_falcon_v1_set_start_addr, + .start = nvkm_falcon_v1_start, + .enable = gp102_sec2_flcn_enable, + .disable = nvkm_falcon_v1_disable, + .cmdq = { 0xa00, 0xa04, 8 }, + .msgq = { 0xa30, 0xa34, 8 }, +}; + +const struct nvkm_sec2_func +gp102_sec2 = { + .flcn = &gp102_sec2_flcn, + .unit_acr = NV_SEC2_UNIT_ACR, + .intr = gp102_sec2_intr, + .initmsg = gp102_sec2_initmsg, +}; + +MODULE_FIRMWARE("nvidia/gp102/sec2/desc.bin"); +MODULE_FIRMWARE("nvidia/gp102/sec2/image.bin"); +MODULE_FIRMWARE("nvidia/gp102/sec2/sig.bin"); +MODULE_FIRMWARE("nvidia/gp104/sec2/desc.bin"); +MODULE_FIRMWARE("nvidia/gp104/sec2/image.bin"); +MODULE_FIRMWARE("nvidia/gp104/sec2/sig.bin"); +MODULE_FIRMWARE("nvidia/gp106/sec2/desc.bin"); +MODULE_FIRMWARE("nvidia/gp106/sec2/image.bin"); +MODULE_FIRMWARE("nvidia/gp106/sec2/sig.bin"); +MODULE_FIRMWARE("nvidia/gp107/sec2/desc.bin"); +MODULE_FIRMWARE("nvidia/gp107/sec2/image.bin"); +MODULE_FIRMWARE("nvidia/gp107/sec2/sig.bin"); + +static void +gp102_sec2_acr_bld_patch_1(struct nvkm_acr *acr, u32 bld, s64 adjust) +{ + struct flcn_bl_dmem_desc_v2 hdr; + nvkm_robj(acr->wpr, bld, &hdr, sizeof(hdr)); + hdr.code_dma_base = hdr.code_dma_base + adjust; + hdr.data_dma_base = hdr.data_dma_base + adjust; + nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr)); + flcn_bl_dmem_desc_v2_dump(&acr->subdev, &hdr); +} + +static void +gp102_sec2_acr_bld_write_1(struct nvkm_acr *acr, u32 bld, + struct nvkm_acr_lsfw *lsfw) +{ + const struct flcn_bl_dmem_desc_v2 hdr = { + .ctx_dma = FALCON_SEC2_DMAIDX_UCODE, + .code_dma_base = lsfw->offset.img + lsfw->app_start_offset, + .non_sec_code_off = lsfw->app_resident_code_offset, + .non_sec_code_size = lsfw->app_resident_code_size, + .code_entry_point = lsfw->app_imem_entry, + .data_dma_base = lsfw->offset.img + lsfw->app_start_offset + + lsfw->app_resident_data_offset, + .data_size = lsfw->app_resident_data_size, + .argc = 1, + .argv = lsfw->falcon->func->emem_addr, + }; + + nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr)); +} + +const struct nvkm_acr_lsf_func +gp102_sec2_acr_1 = { + .bld_size = sizeof(struct flcn_bl_dmem_desc_v2), + .bld_write = gp102_sec2_acr_bld_write_1, + .bld_patch = gp102_sec2_acr_bld_patch_1, + .boot = gp102_sec2_acr_boot, + .bootstrap_falcon = gp102_sec2_acr_bootstrap_falcon, +}; + +int +gp102_sec2_load(struct nvkm_sec2 *sec2, int ver, + const struct nvkm_sec2_fwif *fwif) +{ + return nvkm_acr_lsfw_load_sig_image_desc_v1(&sec2->engine.subdev, + &sec2->falcon, + NVKM_ACR_LSF_SEC2, "sec2/", + ver, fwif->acr); +} + +MODULE_FIRMWARE("nvidia/gp102/sec2/desc-1.bin"); +MODULE_FIRMWARE("nvidia/gp102/sec2/image-1.bin"); +MODULE_FIRMWARE("nvidia/gp102/sec2/sig-1.bin"); +MODULE_FIRMWARE("nvidia/gp104/sec2/desc-1.bin"); +MODULE_FIRMWARE("nvidia/gp104/sec2/image-1.bin"); +MODULE_FIRMWARE("nvidia/gp104/sec2/sig-1.bin"); +MODULE_FIRMWARE("nvidia/gp106/sec2/desc-1.bin"); +MODULE_FIRMWARE("nvidia/gp106/sec2/image-1.bin"); +MODULE_FIRMWARE("nvidia/gp106/sec2/sig-1.bin"); +MODULE_FIRMWARE("nvidia/gp107/sec2/desc-1.bin"); +MODULE_FIRMWARE("nvidia/gp107/sec2/image-1.bin"); +MODULE_FIRMWARE("nvidia/gp107/sec2/sig-1.bin"); + +static const struct nvkm_sec2_fwif +gp102_sec2_fwif[] = { + { 1, gp102_sec2_load, &gp102_sec2, &gp102_sec2_acr_1 }, + { 0, gp102_sec2_load, &gp102_sec2, &gp102_sec2_acr_0 }, + {} +}; + int -gp102_sec2_new(struct nvkm_device *device, int index, - struct nvkm_sec2 **psec2) +gp102_sec2_new(struct nvkm_device *device, int index, struct nvkm_sec2 **psec2) { - return nvkm_sec2_new_(device, index, 0, psec2); + return nvkm_sec2_new_(gp102_sec2_fwif, device, index, 0, psec2); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.h b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp108.c index 8bdfb3e5cd1c..232a9d7c51e5 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp108.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * Copyright 2019 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -14,23 +14,26 @@ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. */ +#include "priv.h" +#include <subdev/acr.h> -#ifndef __NVKM_SECBOOT_ACR_R367_H__ -#define __NVKM_SECBOOT_ACR_R367_H__ +MODULE_FIRMWARE("nvidia/gp108/sec2/desc.bin"); +MODULE_FIRMWARE("nvidia/gp108/sec2/image.bin"); +MODULE_FIRMWARE("nvidia/gp108/sec2/sig.bin"); -#include "acr_r352.h" +static const struct nvkm_sec2_fwif +gp108_sec2_fwif[] = { + { 0, gp102_sec2_load, &gp102_sec2, &gp102_sec2_acr_1 }, + {} +}; -void acr_r367_fixup_hs_desc(struct acr_r352 *, struct nvkm_secboot *, void *); - -struct ls_ucode_img *acr_r367_ls_ucode_img_load(const struct acr_r352 *, - const struct nvkm_secboot *, - enum nvkm_secboot_falcon); -int acr_r367_ls_fill_headers(struct acr_r352 *, struct list_head *); -int acr_r367_ls_write_wpr(struct acr_r352 *, struct list_head *, - struct nvkm_gpuobj *, u64); -#endif +int +gp108_sec2_new(struct nvkm_device *device, int index, struct nvkm_sec2 **psec2) +{ + return nvkm_sec2_new_(gp108_sec2_fwif, device, index, 0, psec2); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h index b331b00517e6..bb88117e018a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h @@ -3,7 +3,27 @@ #define __NVKM_SEC2_PRIV_H__ #include <engine/sec2.h> -#define nvkm_sec2(p) container_of((p), struct nvkm_sec2, engine) +struct nvkm_sec2_func { + const struct nvkm_falcon_func *flcn; + u8 unit_acr; + void (*intr)(struct nvkm_sec2 *); + int (*initmsg)(struct nvkm_sec2 *); +}; -int nvkm_sec2_new_(struct nvkm_device *, int, u32 addr, struct nvkm_sec2 **); +void gp102_sec2_intr(struct nvkm_sec2 *); +int gp102_sec2_initmsg(struct nvkm_sec2 *); + +struct nvkm_sec2_fwif { + int version; + int (*load)(struct nvkm_sec2 *, int ver, const struct nvkm_sec2_fwif *); + const struct nvkm_sec2_func *func; + const struct nvkm_acr_lsf_func *acr; +}; + +int gp102_sec2_load(struct nvkm_sec2 *, int, const struct nvkm_sec2_fwif *); +extern const struct nvkm_sec2_func gp102_sec2; +extern const struct nvkm_acr_lsf_func gp102_sec2_acr_1; + +int nvkm_sec2_new_(const struct nvkm_sec2_fwif *, struct nvkm_device *, + int, u32 addr, struct nvkm_sec2 **); #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c index d655576164b1..b6ebd95c9ba1 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c @@ -19,15 +19,54 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ - #include "priv.h" +#include <subdev/acr.h> + +static const struct nvkm_falcon_func +tu102_sec2_flcn = { + .debug = 0x408, + .fbif = 0x600, + .load_imem = nvkm_falcon_v1_load_imem, + .load_dmem = nvkm_falcon_v1_load_dmem, + .read_dmem = nvkm_falcon_v1_read_dmem, + .emem_addr = 0x01000000, + .bind_context = gp102_sec2_flcn_bind_context, + .wait_for_halt = nvkm_falcon_v1_wait_for_halt, + .clear_interrupt = nvkm_falcon_v1_clear_interrupt, + .set_start_addr = nvkm_falcon_v1_set_start_addr, + .start = nvkm_falcon_v1_start, + .enable = nvkm_falcon_v1_enable, + .disable = nvkm_falcon_v1_disable, + .cmdq = { 0xc00, 0xc04, 8 }, + .msgq = { 0xc80, 0xc84, 8 }, +}; + +static const struct nvkm_sec2_func +tu102_sec2 = { + .flcn = &tu102_sec2_flcn, + .unit_acr = 0x07, + .intr = gp102_sec2_intr, + .initmsg = gp102_sec2_initmsg, +}; + +static int +tu102_sec2_nofw(struct nvkm_sec2 *sec2, int ver, + const struct nvkm_sec2_fwif *fwif) +{ + return 0; +} + +static const struct nvkm_sec2_fwif +tu102_sec2_fwif[] = { + { 0, gp102_sec2_load, &tu102_sec2, &gp102_sec2_acr_1 }, + { -1, tu102_sec2_nofw, &tu102_sec2 } +}; int -tu102_sec2_new(struct nvkm_device *device, int index, - struct nvkm_sec2 **psec2) +tu102_sec2_new(struct nvkm_device *device, int index, struct nvkm_sec2 **psec2) { /* TOP info wasn't updated on Turing to reflect the PRI * address change for some reason. We override it here. */ - return nvkm_sec2_new_(device, index, 0x840000, psec2); + return nvkm_sec2_new_(tu102_sec2_fwif, device, index, 0x840000, psec2); } diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild b/drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild index b5665ada850a..d79d783904ee 100644 --- a/drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild @@ -1,6 +1,6 @@ # SPDX-License-Identifier: MIT nvkm-y += nvkm/falcon/base.o +nvkm-y += nvkm/falcon/cmdq.o +nvkm-y += nvkm/falcon/msgq.o +nvkm-y += nvkm/falcon/qmgr.o nvkm-y += nvkm/falcon/v1.o -nvkm-y += nvkm/falcon/msgqueue.o -nvkm-y += nvkm/falcon/msgqueue_0137c63d.o -nvkm-y += nvkm/falcon/msgqueue_0148cdec.o diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/base.c b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c index 366c87de6e72..c6a3448180d6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/falcon/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c @@ -22,6 +22,7 @@ #include "priv.h" #include <subdev/mc.h> +#include <subdev/top.h> void nvkm_falcon_load_imem(struct nvkm_falcon *falcon, void *data, u32 start, @@ -134,6 +135,37 @@ nvkm_falcon_clear_interrupt(struct nvkm_falcon *falcon, u32 mask) return falcon->func->clear_interrupt(falcon, mask); } +static int +nvkm_falcon_oneinit(struct nvkm_falcon *falcon) +{ + const struct nvkm_falcon_func *func = falcon->func; + const struct nvkm_subdev *subdev = falcon->owner; + u32 reg; + + if (!falcon->addr) { + falcon->addr = nvkm_top_addr(subdev->device, subdev->index); + if (WARN_ON(!falcon->addr)) + return -ENODEV; + } + + reg = nvkm_falcon_rd32(falcon, 0x12c); + falcon->version = reg & 0xf; + falcon->secret = (reg >> 4) & 0x3; + falcon->code.ports = (reg >> 8) & 0xf; + falcon->data.ports = (reg >> 12) & 0xf; + + reg = nvkm_falcon_rd32(falcon, 0x108); + falcon->code.limit = (reg & 0x1ff) << 8; + falcon->data.limit = (reg & 0x3fe00) >> 1; + + if (func->debug) { + u32 val = nvkm_falcon_rd32(falcon, func->debug); + falcon->debug = (val >> 20) & 0x1; + } + + return 0; +} + void nvkm_falcon_put(struct nvkm_falcon *falcon, const struct nvkm_subdev *user) { @@ -151,6 +183,8 @@ nvkm_falcon_put(struct nvkm_falcon *falcon, const struct nvkm_subdev *user) int nvkm_falcon_get(struct nvkm_falcon *falcon, const struct nvkm_subdev *user) { + int ret = 0; + mutex_lock(&falcon->mutex); if (falcon->user) { nvkm_error(user, "%s falcon already acquired by %s!\n", @@ -160,70 +194,37 @@ nvkm_falcon_get(struct nvkm_falcon *falcon, const struct nvkm_subdev *user) } nvkm_debug(user, "acquired %s falcon\n", falcon->name); + if (!falcon->oneinit) + ret = nvkm_falcon_oneinit(falcon); falcon->user = user; mutex_unlock(&falcon->mutex); - return 0; + return ret; } void +nvkm_falcon_dtor(struct nvkm_falcon *falcon) +{ +} + +int nvkm_falcon_ctor(const struct nvkm_falcon_func *func, struct nvkm_subdev *subdev, const char *name, u32 addr, struct nvkm_falcon *falcon) { - u32 debug_reg; - u32 reg; - falcon->func = func; falcon->owner = subdev; falcon->name = name; falcon->addr = addr; mutex_init(&falcon->mutex); mutex_init(&falcon->dmem_mutex); - - reg = nvkm_falcon_rd32(falcon, 0x12c); - falcon->version = reg & 0xf; - falcon->secret = (reg >> 4) & 0x3; - falcon->code.ports = (reg >> 8) & 0xf; - falcon->data.ports = (reg >> 12) & 0xf; - - reg = nvkm_falcon_rd32(falcon, 0x108); - falcon->code.limit = (reg & 0x1ff) << 8; - falcon->data.limit = (reg & 0x3fe00) >> 1; - - switch (subdev->index) { - case NVKM_ENGINE_GR: - debug_reg = 0x0; - break; - case NVKM_SUBDEV_PMU: - debug_reg = 0xc08; - break; - case NVKM_ENGINE_NVDEC0: - debug_reg = 0xd00; - break; - case NVKM_ENGINE_SEC2: - debug_reg = 0x408; - falcon->has_emem = true; - break; - case NVKM_SUBDEV_GSP: - debug_reg = 0x0; /*XXX*/ - break; - default: - nvkm_warn(subdev, "unsupported falcon %s!\n", - nvkm_subdev_name[subdev->index]); - debug_reg = 0; - break; - } - - if (debug_reg) { - u32 val = nvkm_falcon_rd32(falcon, debug_reg); - falcon->debug = (val >> 20) & 0x1; - } + return 0; } void nvkm_falcon_del(struct nvkm_falcon **pfalcon) { if (*pfalcon) { + nvkm_falcon_dtor(*pfalcon); kfree(*pfalcon); *pfalcon = NULL; } diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/cmdq.c b/drivers/gpu/drm/nouveau/nvkm/falcon/cmdq.c new file mode 100644 index 000000000000..40e3f3fc83ef --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/falcon/cmdq.c @@ -0,0 +1,214 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "qmgr.h" + +static bool +nvkm_falcon_cmdq_has_room(struct nvkm_falcon_cmdq *cmdq, u32 size, bool *rewind) +{ + u32 head = nvkm_falcon_rd32(cmdq->qmgr->falcon, cmdq->head_reg); + u32 tail = nvkm_falcon_rd32(cmdq->qmgr->falcon, cmdq->tail_reg); + u32 free; + + size = ALIGN(size, QUEUE_ALIGNMENT); + + if (head >= tail) { + free = cmdq->offset + cmdq->size - head; + free -= HDR_SIZE; + + if (size > free) { + *rewind = true; + head = cmdq->offset; + } + } + + if (head < tail) + free = tail - head - 1; + + return size <= free; +} + +static void +nvkm_falcon_cmdq_push(struct nvkm_falcon_cmdq *cmdq, void *data, u32 size) +{ + struct nvkm_falcon *falcon = cmdq->qmgr->falcon; + nvkm_falcon_load_dmem(falcon, data, cmdq->position, size, 0); + cmdq->position += ALIGN(size, QUEUE_ALIGNMENT); +} + +static void +nvkm_falcon_cmdq_rewind(struct nvkm_falcon_cmdq *cmdq) +{ + struct nv_falcon_cmd cmd; + + cmd.unit_id = NV_FALCON_CMD_UNIT_ID_REWIND; + cmd.size = sizeof(cmd); + nvkm_falcon_cmdq_push(cmdq, &cmd, cmd.size); + + cmdq->position = cmdq->offset; +} + +static int +nvkm_falcon_cmdq_open(struct nvkm_falcon_cmdq *cmdq, u32 size) +{ + struct nvkm_falcon *falcon = cmdq->qmgr->falcon; + bool rewind = false; + + mutex_lock(&cmdq->mutex); + + if (!nvkm_falcon_cmdq_has_room(cmdq, size, &rewind)) { + FLCNQ_DBG(cmdq, "queue full"); + mutex_unlock(&cmdq->mutex); + return -EAGAIN; + } + + cmdq->position = nvkm_falcon_rd32(falcon, cmdq->head_reg); + + if (rewind) + nvkm_falcon_cmdq_rewind(cmdq); + + return 0; +} + +static void +nvkm_falcon_cmdq_close(struct nvkm_falcon_cmdq *cmdq) +{ + nvkm_falcon_wr32(cmdq->qmgr->falcon, cmdq->head_reg, cmdq->position); + mutex_unlock(&cmdq->mutex); +} + +static int +nvkm_falcon_cmdq_write(struct nvkm_falcon_cmdq *cmdq, struct nv_falcon_cmd *cmd) +{ + static unsigned timeout = 2000; + unsigned long end_jiffies = jiffies + msecs_to_jiffies(timeout); + int ret = -EAGAIN; + + while (ret == -EAGAIN && time_before(jiffies, end_jiffies)) + ret = nvkm_falcon_cmdq_open(cmdq, cmd->size); + if (ret) { + FLCNQ_ERR(cmdq, "timeout waiting for queue space"); + return ret; + } + + nvkm_falcon_cmdq_push(cmdq, cmd, cmd->size); + nvkm_falcon_cmdq_close(cmdq); + return ret; +} + +/* specifies that we want to know the command status in the answer message */ +#define CMD_FLAGS_STATUS BIT(0) +/* specifies that we want an interrupt when the answer message is queued */ +#define CMD_FLAGS_INTR BIT(1) + +int +nvkm_falcon_cmdq_send(struct nvkm_falcon_cmdq *cmdq, struct nv_falcon_cmd *cmd, + nvkm_falcon_qmgr_callback cb, void *priv, + unsigned long timeout) +{ + struct nvkm_falcon_qmgr_seq *seq; + int ret; + + if (!wait_for_completion_timeout(&cmdq->ready, + msecs_to_jiffies(1000))) { + FLCNQ_ERR(cmdq, "timeout waiting for queue ready"); + return -ETIMEDOUT; + } + + seq = nvkm_falcon_qmgr_seq_acquire(cmdq->qmgr); + if (IS_ERR(seq)) + return PTR_ERR(seq); + + cmd->seq_id = seq->id; + cmd->ctrl_flags = CMD_FLAGS_STATUS | CMD_FLAGS_INTR; + + seq->state = SEQ_STATE_USED; + seq->async = !timeout; + seq->callback = cb; + seq->priv = priv; + + ret = nvkm_falcon_cmdq_write(cmdq, cmd); + if (ret) { + seq->state = SEQ_STATE_PENDING; + nvkm_falcon_qmgr_seq_release(cmdq->qmgr, seq); + return ret; + } + + if (!seq->async) { + if (!wait_for_completion_timeout(&seq->done, timeout)) { + FLCNQ_ERR(cmdq, "timeout waiting for reply"); + return -ETIMEDOUT; + } + ret = seq->result; + nvkm_falcon_qmgr_seq_release(cmdq->qmgr, seq); + } + + return ret; +} + +void +nvkm_falcon_cmdq_fini(struct nvkm_falcon_cmdq *cmdq) +{ + reinit_completion(&cmdq->ready); +} + +void +nvkm_falcon_cmdq_init(struct nvkm_falcon_cmdq *cmdq, + u32 index, u32 offset, u32 size) +{ + const struct nvkm_falcon_func *func = cmdq->qmgr->falcon->func; + + cmdq->head_reg = func->cmdq.head + index * func->cmdq.stride; + cmdq->tail_reg = func->cmdq.tail + index * func->cmdq.stride; + cmdq->offset = offset; + cmdq->size = size; + complete_all(&cmdq->ready); + + FLCNQ_DBG(cmdq, "initialised @ index %d offset 0x%08x size 0x%08x", + index, cmdq->offset, cmdq->size); +} + +void +nvkm_falcon_cmdq_del(struct nvkm_falcon_cmdq **pcmdq) +{ + struct nvkm_falcon_cmdq *cmdq = *pcmdq; + if (cmdq) { + kfree(*pcmdq); + *pcmdq = NULL; + } +} + +int +nvkm_falcon_cmdq_new(struct nvkm_falcon_qmgr *qmgr, const char *name, + struct nvkm_falcon_cmdq **pcmdq) +{ + struct nvkm_falcon_cmdq *cmdq = *pcmdq; + + if (!(cmdq = *pcmdq = kzalloc(sizeof(*cmdq), GFP_KERNEL))) + return -ENOMEM; + + cmdq->qmgr = qmgr; + cmdq->name = name; + mutex_init(&cmdq->mutex); + init_completion(&cmdq->ready); + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/msgq.c b/drivers/gpu/drm/nouveau/nvkm/falcon/msgq.c new file mode 100644 index 000000000000..cbfe09a561a1 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/falcon/msgq.c @@ -0,0 +1,213 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "qmgr.h" + +static void +nvkm_falcon_msgq_open(struct nvkm_falcon_msgq *msgq) +{ + mutex_lock(&msgq->mutex); + msgq->position = nvkm_falcon_rd32(msgq->qmgr->falcon, msgq->tail_reg); +} + +static void +nvkm_falcon_msgq_close(struct nvkm_falcon_msgq *msgq, bool commit) +{ + struct nvkm_falcon *falcon = msgq->qmgr->falcon; + + if (commit) + nvkm_falcon_wr32(falcon, msgq->tail_reg, msgq->position); + + mutex_unlock(&msgq->mutex); +} + +static bool +nvkm_falcon_msgq_empty(struct nvkm_falcon_msgq *msgq) +{ + u32 head = nvkm_falcon_rd32(msgq->qmgr->falcon, msgq->head_reg); + u32 tail = nvkm_falcon_rd32(msgq->qmgr->falcon, msgq->tail_reg); + return head == tail; +} + +static int +nvkm_falcon_msgq_pop(struct nvkm_falcon_msgq *msgq, void *data, u32 size) +{ + struct nvkm_falcon *falcon = msgq->qmgr->falcon; + u32 head, tail, available; + + head = nvkm_falcon_rd32(falcon, msgq->head_reg); + /* has the buffer looped? */ + if (head < msgq->position) + msgq->position = msgq->offset; + + tail = msgq->position; + + available = head - tail; + if (size > available) { + FLCNQ_ERR(msgq, "requested %d bytes, but only %d available", + size, available); + return -EINVAL; + } + + nvkm_falcon_read_dmem(falcon, tail, size, 0, data); + msgq->position += ALIGN(size, QUEUE_ALIGNMENT); + return 0; +} + +static int +nvkm_falcon_msgq_read(struct nvkm_falcon_msgq *msgq, struct nv_falcon_msg *hdr) +{ + int ret = 0; + + nvkm_falcon_msgq_open(msgq); + + if (nvkm_falcon_msgq_empty(msgq)) + goto close; + + ret = nvkm_falcon_msgq_pop(msgq, hdr, HDR_SIZE); + if (ret) { + FLCNQ_ERR(msgq, "failed to read message header"); + goto close; + } + + if (hdr->size > MSG_BUF_SIZE) { + FLCNQ_ERR(msgq, "message too big, %d bytes", hdr->size); + ret = -ENOSPC; + goto close; + } + + if (hdr->size > HDR_SIZE) { + u32 read_size = hdr->size - HDR_SIZE; + + ret = nvkm_falcon_msgq_pop(msgq, (hdr + 1), read_size); + if (ret) { + FLCNQ_ERR(msgq, "failed to read message data"); + goto close; + } + } + + ret = 1; +close: + nvkm_falcon_msgq_close(msgq, (ret >= 0)); + return ret; +} + +static int +nvkm_falcon_msgq_exec(struct nvkm_falcon_msgq *msgq, struct nv_falcon_msg *hdr) +{ + struct nvkm_falcon_qmgr_seq *seq; + + seq = &msgq->qmgr->seq.id[hdr->seq_id]; + if (seq->state != SEQ_STATE_USED && seq->state != SEQ_STATE_CANCELLED) { + FLCNQ_ERR(msgq, "message for unknown sequence %08x", seq->id); + return -EINVAL; + } + + if (seq->state == SEQ_STATE_USED) { + if (seq->callback) + seq->result = seq->callback(seq->priv, hdr); + } + + if (seq->async) { + nvkm_falcon_qmgr_seq_release(msgq->qmgr, seq); + return 0; + } + + complete_all(&seq->done); + return 0; +} + +void +nvkm_falcon_msgq_recv(struct nvkm_falcon_msgq *msgq) +{ + /* + * We are invoked from a worker thread, so normally we have plenty of + * stack space to work with. + */ + u8 msg_buffer[MSG_BUF_SIZE]; + struct nv_falcon_msg *hdr = (void *)msg_buffer; + + while (nvkm_falcon_msgq_read(msgq, hdr) > 0) + nvkm_falcon_msgq_exec(msgq, hdr); +} + +int +nvkm_falcon_msgq_recv_initmsg(struct nvkm_falcon_msgq *msgq, + void *data, u32 size) +{ + struct nvkm_falcon *falcon = msgq->qmgr->falcon; + struct nv_falcon_msg *hdr = data; + int ret; + + msgq->head_reg = falcon->func->msgq.head; + msgq->tail_reg = falcon->func->msgq.tail; + msgq->offset = nvkm_falcon_rd32(falcon, falcon->func->msgq.tail); + + nvkm_falcon_msgq_open(msgq); + ret = nvkm_falcon_msgq_pop(msgq, data, size); + if (ret == 0 && hdr->size != size) { + FLCN_ERR(falcon, "unexpected init message size %d vs %d", + hdr->size, size); + ret = -EINVAL; + } + nvkm_falcon_msgq_close(msgq, ret == 0); + return ret; +} + +void +nvkm_falcon_msgq_init(struct nvkm_falcon_msgq *msgq, + u32 index, u32 offset, u32 size) +{ + const struct nvkm_falcon_func *func = msgq->qmgr->falcon->func; + + msgq->head_reg = func->msgq.head + index * func->msgq.stride; + msgq->tail_reg = func->msgq.tail + index * func->msgq.stride; + msgq->offset = offset; + + FLCNQ_DBG(msgq, "initialised @ index %d offset 0x%08x size 0x%08x", + index, msgq->offset, size); +} + +void +nvkm_falcon_msgq_del(struct nvkm_falcon_msgq **pmsgq) +{ + struct nvkm_falcon_msgq *msgq = *pmsgq; + if (msgq) { + kfree(*pmsgq); + *pmsgq = NULL; + } +} + +int +nvkm_falcon_msgq_new(struct nvkm_falcon_qmgr *qmgr, const char *name, + struct nvkm_falcon_msgq **pmsgq) +{ + struct nvkm_falcon_msgq *msgq = *pmsgq; + + if (!(msgq = *pmsgq = kzalloc(sizeof(*msgq), GFP_KERNEL))) + return -ENOMEM; + + msgq->qmgr = qmgr; + msgq->name = name; + mutex_init(&msgq->mutex); + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c deleted file mode 100644 index a8bee1e046aa..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c +++ /dev/null @@ -1,577 +0,0 @@ -/* - * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#include "msgqueue.h" -#include <engine/falcon.h> - -#include <subdev/secboot.h> - - -#define HDR_SIZE sizeof(struct nvkm_msgqueue_hdr) -#define QUEUE_ALIGNMENT 4 -/* max size of the messages we can receive */ -#define MSG_BUF_SIZE 128 - -static int -msg_queue_open(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue) -{ - struct nvkm_falcon *falcon = priv->falcon; - - mutex_lock(&queue->mutex); - - queue->position = nvkm_falcon_rd32(falcon, queue->tail_reg); - - return 0; -} - -static void -msg_queue_close(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue, - bool commit) -{ - struct nvkm_falcon *falcon = priv->falcon; - - if (commit) - nvkm_falcon_wr32(falcon, queue->tail_reg, queue->position); - - mutex_unlock(&queue->mutex); -} - -static bool -msg_queue_empty(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue) -{ - struct nvkm_falcon *falcon = priv->falcon; - u32 head, tail; - - head = nvkm_falcon_rd32(falcon, queue->head_reg); - tail = nvkm_falcon_rd32(falcon, queue->tail_reg); - - return head == tail; -} - -static int -msg_queue_pop(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue, - void *data, u32 size) -{ - struct nvkm_falcon *falcon = priv->falcon; - const struct nvkm_subdev *subdev = priv->falcon->owner; - u32 head, tail, available; - - head = nvkm_falcon_rd32(falcon, queue->head_reg); - /* has the buffer looped? */ - if (head < queue->position) - queue->position = queue->offset; - - tail = queue->position; - - available = head - tail; - - if (available == 0) { - nvkm_warn(subdev, "no message data available\n"); - return 0; - } - - if (size > available) { - nvkm_warn(subdev, "message data smaller than read request\n"); - size = available; - } - - nvkm_falcon_read_dmem(priv->falcon, tail, size, 0, data); - queue->position += ALIGN(size, QUEUE_ALIGNMENT); - - return size; -} - -static int -msg_queue_read(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue, - struct nvkm_msgqueue_hdr *hdr) -{ - const struct nvkm_subdev *subdev = priv->falcon->owner; - int err; - - err = msg_queue_open(priv, queue); - if (err) { - nvkm_error(subdev, "fail to open queue %d\n", queue->index); - return err; - } - - if (msg_queue_empty(priv, queue)) { - err = 0; - goto close; - } - - err = msg_queue_pop(priv, queue, hdr, HDR_SIZE); - if (err >= 0 && err != HDR_SIZE) - err = -EINVAL; - if (err < 0) { - nvkm_error(subdev, "failed to read message header: %d\n", err); - goto close; - } - - if (hdr->size > MSG_BUF_SIZE) { - nvkm_error(subdev, "message too big (%d bytes)\n", hdr->size); - err = -ENOSPC; - goto close; - } - - if (hdr->size > HDR_SIZE) { - u32 read_size = hdr->size - HDR_SIZE; - - err = msg_queue_pop(priv, queue, (hdr + 1), read_size); - if (err >= 0 && err != read_size) - err = -EINVAL; - if (err < 0) { - nvkm_error(subdev, "failed to read message: %d\n", err); - goto close; - } - } - -close: - msg_queue_close(priv, queue, (err >= 0)); - - return err; -} - -static bool -cmd_queue_has_room(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue, - u32 size, bool *rewind) -{ - struct nvkm_falcon *falcon = priv->falcon; - u32 head, tail, free; - - size = ALIGN(size, QUEUE_ALIGNMENT); - - head = nvkm_falcon_rd32(falcon, queue->head_reg); - tail = nvkm_falcon_rd32(falcon, queue->tail_reg); - - if (head >= tail) { - free = queue->offset + queue->size - head; - free -= HDR_SIZE; - - if (size > free) { - *rewind = true; - head = queue->offset; - } - } - - if (head < tail) - free = tail - head - 1; - - return size <= free; -} - -static int -cmd_queue_push(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue, - void *data, u32 size) -{ - nvkm_falcon_load_dmem(priv->falcon, data, queue->position, size, 0); - queue->position += ALIGN(size, QUEUE_ALIGNMENT); - - return 0; -} - -/* REWIND unit is always 0x00 */ -#define MSGQUEUE_UNIT_REWIND 0x00 - -static void -cmd_queue_rewind(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue) -{ - const struct nvkm_subdev *subdev = priv->falcon->owner; - struct nvkm_msgqueue_hdr cmd; - int err; - - cmd.unit_id = MSGQUEUE_UNIT_REWIND; - cmd.size = sizeof(cmd); - err = cmd_queue_push(priv, queue, &cmd, cmd.size); - if (err) - nvkm_error(subdev, "queue %d rewind failed\n", queue->index); - else - nvkm_error(subdev, "queue %d rewinded\n", queue->index); - - queue->position = queue->offset; -} - -static int -cmd_queue_open(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue, - u32 size) -{ - struct nvkm_falcon *falcon = priv->falcon; - const struct nvkm_subdev *subdev = priv->falcon->owner; - bool rewind = false; - - mutex_lock(&queue->mutex); - - if (!cmd_queue_has_room(priv, queue, size, &rewind)) { - nvkm_error(subdev, "queue full\n"); - mutex_unlock(&queue->mutex); - return -EAGAIN; - } - - queue->position = nvkm_falcon_rd32(falcon, queue->head_reg); - - if (rewind) - cmd_queue_rewind(priv, queue); - - return 0; -} - -static void -cmd_queue_close(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue, - bool commit) -{ - struct nvkm_falcon *falcon = priv->falcon; - - if (commit) - nvkm_falcon_wr32(falcon, queue->head_reg, queue->position); - - mutex_unlock(&queue->mutex); -} - -static int -cmd_write(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_hdr *cmd, - struct nvkm_msgqueue_queue *queue) -{ - const struct nvkm_subdev *subdev = priv->falcon->owner; - static unsigned timeout = 2000; - unsigned long end_jiffies = jiffies + msecs_to_jiffies(timeout); - int ret = -EAGAIN; - bool commit = true; - - while (ret == -EAGAIN && time_before(jiffies, end_jiffies)) - ret = cmd_queue_open(priv, queue, cmd->size); - if (ret) { - nvkm_error(subdev, "pmu_queue_open_write failed\n"); - return ret; - } - - ret = cmd_queue_push(priv, queue, cmd, cmd->size); - if (ret) { - nvkm_error(subdev, "pmu_queue_push failed\n"); - commit = false; - } - - cmd_queue_close(priv, queue, commit); - - return ret; -} - -static struct nvkm_msgqueue_seq * -msgqueue_seq_acquire(struct nvkm_msgqueue *priv) -{ - const struct nvkm_subdev *subdev = priv->falcon->owner; - struct nvkm_msgqueue_seq *seq; - u32 index; - - mutex_lock(&priv->seq_lock); - - index = find_first_zero_bit(priv->seq_tbl, NVKM_MSGQUEUE_NUM_SEQUENCES); - - if (index >= NVKM_MSGQUEUE_NUM_SEQUENCES) { - nvkm_error(subdev, "no free sequence available\n"); - mutex_unlock(&priv->seq_lock); - return ERR_PTR(-EAGAIN); - } - - set_bit(index, priv->seq_tbl); - - mutex_unlock(&priv->seq_lock); - - seq = &priv->seq[index]; - seq->state = SEQ_STATE_PENDING; - - return seq; -} - -static void -msgqueue_seq_release(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_seq *seq) -{ - /* no need to acquire seq_lock since clear_bit is atomic */ - seq->state = SEQ_STATE_FREE; - seq->callback = NULL; - seq->completion = NULL; - clear_bit(seq->id, priv->seq_tbl); -} - -/* specifies that we want to know the command status in the answer message */ -#define CMD_FLAGS_STATUS BIT(0) -/* specifies that we want an interrupt when the answer message is queued */ -#define CMD_FLAGS_INTR BIT(1) - -int -nvkm_msgqueue_post(struct nvkm_msgqueue *priv, enum msgqueue_msg_priority prio, - struct nvkm_msgqueue_hdr *cmd, nvkm_msgqueue_callback cb, - struct completion *completion, bool wait_init) -{ - struct nvkm_msgqueue_seq *seq; - struct nvkm_msgqueue_queue *queue; - int ret; - - if (wait_init && !wait_for_completion_timeout(&priv->init_done, - msecs_to_jiffies(1000))) - return -ETIMEDOUT; - - queue = priv->func->cmd_queue(priv, prio); - if (IS_ERR(queue)) - return PTR_ERR(queue); - - seq = msgqueue_seq_acquire(priv); - if (IS_ERR(seq)) - return PTR_ERR(seq); - - cmd->seq_id = seq->id; - cmd->ctrl_flags = CMD_FLAGS_STATUS | CMD_FLAGS_INTR; - - seq->callback = cb; - seq->state = SEQ_STATE_USED; - seq->completion = completion; - - ret = cmd_write(priv, cmd, queue); - if (ret) { - seq->state = SEQ_STATE_PENDING; - msgqueue_seq_release(priv, seq); - } - - return ret; -} - -static int -msgqueue_msg_handle(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_hdr *hdr) -{ - const struct nvkm_subdev *subdev = priv->falcon->owner; - struct nvkm_msgqueue_seq *seq; - - seq = &priv->seq[hdr->seq_id]; - if (seq->state != SEQ_STATE_USED && seq->state != SEQ_STATE_CANCELLED) { - nvkm_error(subdev, "msg for unknown sequence %d", seq->id); - return -EINVAL; - } - - if (seq->state == SEQ_STATE_USED) { - if (seq->callback) - seq->callback(priv, hdr); - } - - if (seq->completion) - complete(seq->completion); - - msgqueue_seq_release(priv, seq); - - return 0; -} - -static int -msgqueue_handle_init_msg(struct nvkm_msgqueue *priv, - struct nvkm_msgqueue_hdr *hdr) -{ - struct nvkm_falcon *falcon = priv->falcon; - const struct nvkm_subdev *subdev = falcon->owner; - u32 tail; - u32 tail_reg; - int ret; - - /* - * Of course the message queue registers vary depending on the falcon - * used... - */ - switch (falcon->owner->index) { - case NVKM_SUBDEV_PMU: - tail_reg = 0x4cc; - break; - case NVKM_ENGINE_SEC2: - tail_reg = 0xa34; - break; - default: - nvkm_error(subdev, "falcon %s unsupported for msgqueue!\n", - nvkm_subdev_name[falcon->owner->index]); - return -EINVAL; - } - - /* - * Read the message - queues are not initialized yet so we cannot rely - * on msg_queue_read() - */ - tail = nvkm_falcon_rd32(falcon, tail_reg); - nvkm_falcon_read_dmem(falcon, tail, HDR_SIZE, 0, hdr); - - if (hdr->size > MSG_BUF_SIZE) { - nvkm_error(subdev, "message too big (%d bytes)\n", hdr->size); - return -ENOSPC; - } - - nvkm_falcon_read_dmem(falcon, tail + HDR_SIZE, hdr->size - HDR_SIZE, 0, - (hdr + 1)); - - tail += ALIGN(hdr->size, QUEUE_ALIGNMENT); - nvkm_falcon_wr32(falcon, tail_reg, tail); - - ret = priv->func->init_func->init_callback(priv, hdr); - if (ret) - return ret; - - return 0; -} - -void -nvkm_msgqueue_process_msgs(struct nvkm_msgqueue *priv, - struct nvkm_msgqueue_queue *queue) -{ - /* - * We are invoked from a worker thread, so normally we have plenty of - * stack space to work with. - */ - u8 msg_buffer[MSG_BUF_SIZE]; - struct nvkm_msgqueue_hdr *hdr = (void *)msg_buffer; - int ret; - - /* the first message we receive must be the init message */ - if ((!priv->init_msg_received)) { - ret = msgqueue_handle_init_msg(priv, hdr); - if (!ret) - priv->init_msg_received = true; - } else { - while (msg_queue_read(priv, queue, hdr) > 0) - msgqueue_msg_handle(priv, hdr); - } -} - -void -nvkm_msgqueue_write_cmdline(struct nvkm_msgqueue *queue, void *buf) -{ - if (!queue || !queue->func || !queue->func->init_func) - return; - - queue->func->init_func->gen_cmdline(queue, buf); -} - -int -nvkm_msgqueue_acr_boot_falcons(struct nvkm_msgqueue *queue, - unsigned long falcon_mask) -{ - unsigned long falcon; - - if (!queue || !queue->func->acr_func) - return -ENODEV; - - /* Does the firmware support booting multiple falcons? */ - if (queue->func->acr_func->boot_multiple_falcons) - return queue->func->acr_func->boot_multiple_falcons(queue, - falcon_mask); - - /* Else boot all requested falcons individually */ - if (!queue->func->acr_func->boot_falcon) - return -ENODEV; - - for_each_set_bit(falcon, &falcon_mask, NVKM_SECBOOT_FALCON_END) { - int ret = queue->func->acr_func->boot_falcon(queue, falcon); - - if (ret) - return ret; - } - - return 0; -} - -int -nvkm_msgqueue_new(u32 version, struct nvkm_falcon *falcon, - const struct nvkm_secboot *sb, struct nvkm_msgqueue **queue) -{ - const struct nvkm_subdev *subdev = falcon->owner; - int ret = -EINVAL; - - switch (version) { - case 0x0137c63d: - ret = msgqueue_0137c63d_new(falcon, sb, queue); - break; - case 0x0137bca5: - ret = msgqueue_0137bca5_new(falcon, sb, queue); - break; - case 0x0148cdec: - case 0x015ccf3e: - case 0x0167d263: - ret = msgqueue_0148cdec_new(falcon, sb, queue); - break; - default: - nvkm_error(subdev, "unhandled firmware version 0x%08x\n", - version); - break; - } - - if (ret == 0) { - nvkm_debug(subdev, "firmware version: 0x%08x\n", version); - (*queue)->fw_version = version; - } - - return ret; -} - -void -nvkm_msgqueue_del(struct nvkm_msgqueue **queue) -{ - if (*queue) { - (*queue)->func->dtor(*queue); - *queue = NULL; - } -} - -void -nvkm_msgqueue_recv(struct nvkm_msgqueue *queue) -{ - if (!queue->func || !queue->func->recv) { - const struct nvkm_subdev *subdev = queue->falcon->owner; - - nvkm_warn(subdev, "missing msgqueue recv function\n"); - return; - } - - queue->func->recv(queue); -} - -int -nvkm_msgqueue_reinit(struct nvkm_msgqueue *queue) -{ - /* firmware not set yet... */ - if (!queue) - return 0; - - queue->init_msg_received = false; - reinit_completion(&queue->init_done); - - return 0; -} - -void -nvkm_msgqueue_ctor(const struct nvkm_msgqueue_func *func, - struct nvkm_falcon *falcon, - struct nvkm_msgqueue *queue) -{ - int i; - - queue->func = func; - queue->falcon = falcon; - mutex_init(&queue->seq_lock); - for (i = 0; i < NVKM_MSGQUEUE_NUM_SEQUENCES; i++) - queue->seq[i].id = i; - - init_completion(&queue->init_done); - - -} diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.h b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.h deleted file mode 100644 index 13b54f8d8e04..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.h +++ /dev/null @@ -1,213 +0,0 @@ -/* - * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#ifndef __NVKM_CORE_FALCON_MSGQUEUE_H -#define __NVKM_CORE_FALCON_MSGQUEUE_H - -#include <core/msgqueue.h> - -/* - * The struct nvkm_msgqueue (named so for lack of better candidate) manages - * a firmware (typically, NVIDIA signed firmware) running under a given falcon. - * - * Such firmwares expect to receive commands (through one or several command - * queues) and will reply to such command by sending messages (using one - * message queue). - * - * Each firmware can support one or several units - ACR for managing secure - * falcons, PMU for power management, etc. A unit can be seen as a class to - * which command can be sent. - * - * One usage example would be to send a command to the SEC falcon to ask it to - * reset a secure falcon. The SEC falcon will receive the command, process it, - * and send a message to signal success or failure. Only when the corresponding - * message is received can the requester assume the request has been processed. - * - * Since we expect many variations between the firmwares NVIDIA will release - * across GPU generations, this library is built in a very modular way. Message - * formats and queues details (such as number of usage) are left to - * specializations of struct nvkm_msgqueue, while the functions in msgqueue.c - * take care of posting commands and processing messages in a fashion that is - * universal. - * - */ - -enum msgqueue_msg_priority { - MSGQUEUE_MSG_PRIORITY_HIGH, - MSGQUEUE_MSG_PRIORITY_LOW, -}; - -/** - * struct nvkm_msgqueue_hdr - header for all commands/messages - * @unit_id: id of firmware using receiving the command/sending the message - * @size: total size of command/message - * @ctrl_flags: type of command/message - * @seq_id: used to match a message from its corresponding command - */ -struct nvkm_msgqueue_hdr { - u8 unit_id; - u8 size; - u8 ctrl_flags; - u8 seq_id; -}; - -/** - * struct nvkm_msgqueue_msg - base message. - * - * This is just a header and a message (or command) type. Useful when - * building command-specific structures. - */ -struct nvkm_msgqueue_msg { - struct nvkm_msgqueue_hdr hdr; - u8 msg_type; -}; - -struct nvkm_msgqueue; -typedef void -(*nvkm_msgqueue_callback)(struct nvkm_msgqueue *, struct nvkm_msgqueue_hdr *); - -/** - * struct nvkm_msgqueue_init_func - msgqueue functions related to initialization - * - * @gen_cmdline: build the commandline into a pre-allocated buffer - * @init_callback: called to process the init message - */ -struct nvkm_msgqueue_init_func { - void (*gen_cmdline)(struct nvkm_msgqueue *, void *); - int (*init_callback)(struct nvkm_msgqueue *, struct nvkm_msgqueue_hdr *); -}; - -/** - * struct nvkm_msgqueue_acr_func - msgqueue functions related to ACR - * - * @boot_falcon: build and send the command to reset a given falcon - * @boot_multiple_falcons: build and send the command to reset several falcons - */ -struct nvkm_msgqueue_acr_func { - int (*boot_falcon)(struct nvkm_msgqueue *, enum nvkm_secboot_falcon); - int (*boot_multiple_falcons)(struct nvkm_msgqueue *, unsigned long); -}; - -struct nvkm_msgqueue_func { - const struct nvkm_msgqueue_init_func *init_func; - const struct nvkm_msgqueue_acr_func *acr_func; - void (*dtor)(struct nvkm_msgqueue *); - struct nvkm_msgqueue_queue *(*cmd_queue)(struct nvkm_msgqueue *, - enum msgqueue_msg_priority); - void (*recv)(struct nvkm_msgqueue *queue); -}; - -/** - * struct nvkm_msgqueue_queue - information about a command or message queue - * - * The number of queues is firmware-dependent. All queues must have their - * information filled by the init message handler. - * - * @mutex_lock: to be acquired when the queue is being used - * @index: physical queue index - * @offset: DMEM offset where this queue begins - * @size: size allocated to this queue in DMEM (in bytes) - * @position: current write position - * @head_reg: address of the HEAD register for this queue - * @tail_reg: address of the TAIL register for this queue - */ -struct nvkm_msgqueue_queue { - struct mutex mutex; - u32 index; - u32 offset; - u32 size; - u32 position; - - u32 head_reg; - u32 tail_reg; -}; - -/** - * struct nvkm_msgqueue_seq - keep track of ongoing commands - * - * Every time a command is sent, a sequence is assigned to it so the - * corresponding message can be matched. Upon receiving the message, a callback - * can be called and/or a completion signaled. - * - * @id: sequence ID - * @state: current state - * @callback: callback to call upon receiving matching message - * @completion: completion to signal after callback is called - */ -struct nvkm_msgqueue_seq { - u16 id; - enum { - SEQ_STATE_FREE = 0, - SEQ_STATE_PENDING, - SEQ_STATE_USED, - SEQ_STATE_CANCELLED - } state; - nvkm_msgqueue_callback callback; - struct completion *completion; -}; - -/* - * We can have an arbitrary number of sequences, but realistically we will - * probably not use that much simultaneously. - */ -#define NVKM_MSGQUEUE_NUM_SEQUENCES 16 - -/** - * struct nvkm_msgqueue - manage a command/message based FW on a falcon - * - * @falcon: falcon to be managed - * @func: implementation of the firmware to use - * @init_msg_received: whether the init message has already been received - * @init_done: whether all init is complete and commands can be processed - * @seq_lock: protects seq and seq_tbl - * @seq: sequences to match commands and messages - * @seq_tbl: bitmap of sequences currently in use - */ -struct nvkm_msgqueue { - struct nvkm_falcon *falcon; - const struct nvkm_msgqueue_func *func; - u32 fw_version; - bool init_msg_received; - struct completion init_done; - - struct mutex seq_lock; - struct nvkm_msgqueue_seq seq[NVKM_MSGQUEUE_NUM_SEQUENCES]; - unsigned long seq_tbl[BITS_TO_LONGS(NVKM_MSGQUEUE_NUM_SEQUENCES)]; -}; - -void nvkm_msgqueue_ctor(const struct nvkm_msgqueue_func *, struct nvkm_falcon *, - struct nvkm_msgqueue *); -int nvkm_msgqueue_post(struct nvkm_msgqueue *, enum msgqueue_msg_priority, - struct nvkm_msgqueue_hdr *, nvkm_msgqueue_callback, - struct completion *, bool); -void nvkm_msgqueue_process_msgs(struct nvkm_msgqueue *, - struct nvkm_msgqueue_queue *); - -int msgqueue_0137c63d_new(struct nvkm_falcon *, const struct nvkm_secboot *, - struct nvkm_msgqueue **); -int msgqueue_0137bca5_new(struct nvkm_falcon *, const struct nvkm_secboot *, - struct nvkm_msgqueue **); -int msgqueue_0148cdec_new(struct nvkm_falcon *, const struct nvkm_secboot *, - struct nvkm_msgqueue **); - -#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c deleted file mode 100644 index fec0273158f6..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c +++ /dev/null @@ -1,436 +0,0 @@ -/* - * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#include "msgqueue.h" -#include <engine/falcon.h> -#include <subdev/secboot.h> - -/* Queues identifiers */ -enum { - /* High Priority Command Queue for Host -> PMU communication */ - MSGQUEUE_0137C63D_COMMAND_QUEUE_HPQ = 0, - /* Low Priority Command Queue for Host -> PMU communication */ - MSGQUEUE_0137C63D_COMMAND_QUEUE_LPQ = 1, - /* Message queue for PMU -> Host communication */ - MSGQUEUE_0137C63D_MESSAGE_QUEUE = 4, - MSGQUEUE_0137C63D_NUM_QUEUES = 5, -}; - -struct msgqueue_0137c63d { - struct nvkm_msgqueue base; - - struct nvkm_msgqueue_queue queue[MSGQUEUE_0137C63D_NUM_QUEUES]; -}; -#define msgqueue_0137c63d(q) \ - container_of(q, struct msgqueue_0137c63d, base) - -struct msgqueue_0137bca5 { - struct msgqueue_0137c63d base; - - u64 wpr_addr; -}; -#define msgqueue_0137bca5(q) \ - container_of(container_of(q, struct msgqueue_0137c63d, base), \ - struct msgqueue_0137bca5, base); - -static struct nvkm_msgqueue_queue * -msgqueue_0137c63d_cmd_queue(struct nvkm_msgqueue *queue, - enum msgqueue_msg_priority priority) -{ - struct msgqueue_0137c63d *priv = msgqueue_0137c63d(queue); - const struct nvkm_subdev *subdev = priv->base.falcon->owner; - - switch (priority) { - case MSGQUEUE_MSG_PRIORITY_HIGH: - return &priv->queue[MSGQUEUE_0137C63D_COMMAND_QUEUE_HPQ]; - case MSGQUEUE_MSG_PRIORITY_LOW: - return &priv->queue[MSGQUEUE_0137C63D_COMMAND_QUEUE_LPQ]; - default: - nvkm_error(subdev, "invalid command queue!\n"); - return ERR_PTR(-EINVAL); - } -} - -static void -msgqueue_0137c63d_process_msgs(struct nvkm_msgqueue *queue) -{ - struct msgqueue_0137c63d *priv = msgqueue_0137c63d(queue); - struct nvkm_msgqueue_queue *q_queue = - &priv->queue[MSGQUEUE_0137C63D_MESSAGE_QUEUE]; - - nvkm_msgqueue_process_msgs(&priv->base, q_queue); -} - -/* Init unit */ -#define MSGQUEUE_0137C63D_UNIT_INIT 0x07 - -enum { - INIT_MSG_INIT = 0x0, -}; - -static void -init_gen_cmdline(struct nvkm_msgqueue *queue, void *buf) -{ - struct { - u32 reserved; - u32 freq_hz; - u32 trace_size; - u32 trace_dma_base; - u16 trace_dma_base1; - u8 trace_dma_offset; - u32 trace_dma_idx; - bool secure_mode; - bool raise_priv_sec; - struct { - u32 dma_base; - u16 dma_base1; - u8 dma_offset; - u16 fb_size; - u8 dma_idx; - } gc6_ctx; - u8 pad; - } *args = buf; - - args->secure_mode = 1; -} - -/* forward declaration */ -static int acr_init_wpr(struct nvkm_msgqueue *queue); - -static int -init_callback(struct nvkm_msgqueue *_queue, struct nvkm_msgqueue_hdr *hdr) -{ - struct msgqueue_0137c63d *priv = msgqueue_0137c63d(_queue); - struct { - struct nvkm_msgqueue_msg base; - - u8 pad; - u16 os_debug_entry_point; - - struct { - u16 size; - u16 offset; - u8 index; - u8 pad; - } queue_info[MSGQUEUE_0137C63D_NUM_QUEUES]; - - u16 sw_managed_area_offset; - u16 sw_managed_area_size; - } *init = (void *)hdr; - const struct nvkm_subdev *subdev = _queue->falcon->owner; - int i; - - if (init->base.hdr.unit_id != MSGQUEUE_0137C63D_UNIT_INIT) { - nvkm_error(subdev, "expected message from init unit\n"); - return -EINVAL; - } - - if (init->base.msg_type != INIT_MSG_INIT) { - nvkm_error(subdev, "expected PMU init msg\n"); - return -EINVAL; - } - - for (i = 0; i < MSGQUEUE_0137C63D_NUM_QUEUES; i++) { - struct nvkm_msgqueue_queue *queue = &priv->queue[i]; - - mutex_init(&queue->mutex); - - queue->index = init->queue_info[i].index; - queue->offset = init->queue_info[i].offset; - queue->size = init->queue_info[i].size; - - if (i != MSGQUEUE_0137C63D_MESSAGE_QUEUE) { - queue->head_reg = 0x4a0 + (queue->index * 4); - queue->tail_reg = 0x4b0 + (queue->index * 4); - } else { - queue->head_reg = 0x4c8; - queue->tail_reg = 0x4cc; - } - - nvkm_debug(subdev, - "queue %d: index %d, offset 0x%08x, size 0x%08x\n", - i, queue->index, queue->offset, queue->size); - } - - /* Complete initialization by initializing WPR region */ - return acr_init_wpr(&priv->base); -} - -static const struct nvkm_msgqueue_init_func -msgqueue_0137c63d_init_func = { - .gen_cmdline = init_gen_cmdline, - .init_callback = init_callback, -}; - - - -/* ACR unit */ -#define MSGQUEUE_0137C63D_UNIT_ACR 0x0a - -enum { - ACR_CMD_INIT_WPR_REGION = 0x00, - ACR_CMD_BOOTSTRAP_FALCON = 0x01, - ACR_CMD_BOOTSTRAP_MULTIPLE_FALCONS = 0x03, -}; - -static void -acr_init_wpr_callback(struct nvkm_msgqueue *queue, - struct nvkm_msgqueue_hdr *hdr) -{ - struct { - struct nvkm_msgqueue_msg base; - u32 error_code; - } *msg = (void *)hdr; - const struct nvkm_subdev *subdev = queue->falcon->owner; - - if (msg->error_code) { - nvkm_error(subdev, "ACR WPR init failure: %d\n", - msg->error_code); - return; - } - - nvkm_debug(subdev, "ACR WPR init complete\n"); - complete_all(&queue->init_done); -} - -static int -acr_init_wpr(struct nvkm_msgqueue *queue) -{ - /* - * region_id: region ID in WPR region - * wpr_offset: offset in WPR region - */ - struct { - struct nvkm_msgqueue_hdr hdr; - u8 cmd_type; - u32 region_id; - u32 wpr_offset; - } cmd; - memset(&cmd, 0, sizeof(cmd)); - - cmd.hdr.unit_id = MSGQUEUE_0137C63D_UNIT_ACR; - cmd.hdr.size = sizeof(cmd); - cmd.cmd_type = ACR_CMD_INIT_WPR_REGION; - cmd.region_id = 0x01; - cmd.wpr_offset = 0x00; - - nvkm_msgqueue_post(queue, MSGQUEUE_MSG_PRIORITY_HIGH, &cmd.hdr, - acr_init_wpr_callback, NULL, false); - - return 0; -} - - -static void -acr_boot_falcon_callback(struct nvkm_msgqueue *priv, - struct nvkm_msgqueue_hdr *hdr) -{ - struct acr_bootstrap_falcon_msg { - struct nvkm_msgqueue_msg base; - - u32 falcon_id; - } *msg = (void *)hdr; - const struct nvkm_subdev *subdev = priv->falcon->owner; - u32 falcon_id = msg->falcon_id; - - if (falcon_id >= NVKM_SECBOOT_FALCON_END) { - nvkm_error(subdev, "in bootstrap falcon callback:\n"); - nvkm_error(subdev, "invalid falcon ID 0x%x\n", falcon_id); - return; - } - nvkm_debug(subdev, "%s booted\n", nvkm_secboot_falcon_name[falcon_id]); -} - -enum { - ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES = 0, - ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_NO = 1, -}; - -static int -acr_boot_falcon(struct nvkm_msgqueue *priv, enum nvkm_secboot_falcon falcon) -{ - DECLARE_COMPLETION_ONSTACK(completed); - /* - * flags - Flag specifying RESET or no RESET. - * falcon id - Falcon id specifying falcon to bootstrap. - */ - struct { - struct nvkm_msgqueue_hdr hdr; - u8 cmd_type; - u32 flags; - u32 falcon_id; - } cmd; - - memset(&cmd, 0, sizeof(cmd)); - - cmd.hdr.unit_id = MSGQUEUE_0137C63D_UNIT_ACR; - cmd.hdr.size = sizeof(cmd); - cmd.cmd_type = ACR_CMD_BOOTSTRAP_FALCON; - cmd.flags = ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES; - cmd.falcon_id = falcon; - nvkm_msgqueue_post(priv, MSGQUEUE_MSG_PRIORITY_HIGH, &cmd.hdr, - acr_boot_falcon_callback, &completed, true); - - if (!wait_for_completion_timeout(&completed, msecs_to_jiffies(1000))) - return -ETIMEDOUT; - - return 0; -} - -static void -acr_boot_multiple_falcons_callback(struct nvkm_msgqueue *priv, - struct nvkm_msgqueue_hdr *hdr) -{ - struct acr_bootstrap_falcon_msg { - struct nvkm_msgqueue_msg base; - - u32 falcon_mask; - } *msg = (void *)hdr; - const struct nvkm_subdev *subdev = priv->falcon->owner; - unsigned long falcon_mask = msg->falcon_mask; - u32 falcon_id, falcon_treated = 0; - - for_each_set_bit(falcon_id, &falcon_mask, NVKM_SECBOOT_FALCON_END) { - nvkm_debug(subdev, "%s booted\n", - nvkm_secboot_falcon_name[falcon_id]); - falcon_treated |= BIT(falcon_id); - } - - if (falcon_treated != msg->falcon_mask) { - nvkm_error(subdev, "in bootstrap falcon callback:\n"); - nvkm_error(subdev, "invalid falcon mask 0x%x\n", - msg->falcon_mask); - return; - } -} - -static int -acr_boot_multiple_falcons(struct nvkm_msgqueue *priv, unsigned long falcon_mask) -{ - DECLARE_COMPLETION_ONSTACK(completed); - /* - * flags - Flag specifying RESET or no RESET. - * falcon id - Falcon id specifying falcon to bootstrap. - */ - struct { - struct nvkm_msgqueue_hdr hdr; - u8 cmd_type; - u32 flags; - u32 falcon_mask; - u32 use_va_mask; - u32 wpr_lo; - u32 wpr_hi; - } cmd; - struct msgqueue_0137bca5 *queue = msgqueue_0137bca5(priv); - - memset(&cmd, 0, sizeof(cmd)); - - cmd.hdr.unit_id = MSGQUEUE_0137C63D_UNIT_ACR; - cmd.hdr.size = sizeof(cmd); - cmd.cmd_type = ACR_CMD_BOOTSTRAP_MULTIPLE_FALCONS; - cmd.flags = ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES; - cmd.falcon_mask = falcon_mask; - cmd.wpr_lo = lower_32_bits(queue->wpr_addr); - cmd.wpr_hi = upper_32_bits(queue->wpr_addr); - nvkm_msgqueue_post(priv, MSGQUEUE_MSG_PRIORITY_HIGH, &cmd.hdr, - acr_boot_multiple_falcons_callback, &completed, true); - - if (!wait_for_completion_timeout(&completed, msecs_to_jiffies(1000))) - return -ETIMEDOUT; - - return 0; -} - -static const struct nvkm_msgqueue_acr_func -msgqueue_0137c63d_acr_func = { - .boot_falcon = acr_boot_falcon, -}; - -static const struct nvkm_msgqueue_acr_func -msgqueue_0137bca5_acr_func = { - .boot_falcon = acr_boot_falcon, - .boot_multiple_falcons = acr_boot_multiple_falcons, -}; - -static void -msgqueue_0137c63d_dtor(struct nvkm_msgqueue *queue) -{ - kfree(msgqueue_0137c63d(queue)); -} - -static const struct nvkm_msgqueue_func -msgqueue_0137c63d_func = { - .init_func = &msgqueue_0137c63d_init_func, - .acr_func = &msgqueue_0137c63d_acr_func, - .cmd_queue = msgqueue_0137c63d_cmd_queue, - .recv = msgqueue_0137c63d_process_msgs, - .dtor = msgqueue_0137c63d_dtor, -}; - -int -msgqueue_0137c63d_new(struct nvkm_falcon *falcon, const struct nvkm_secboot *sb, - struct nvkm_msgqueue **queue) -{ - struct msgqueue_0137c63d *ret; - - ret = kzalloc(sizeof(*ret), GFP_KERNEL); - if (!ret) - return -ENOMEM; - - *queue = &ret->base; - - nvkm_msgqueue_ctor(&msgqueue_0137c63d_func, falcon, &ret->base); - - return 0; -} - -static const struct nvkm_msgqueue_func -msgqueue_0137bca5_func = { - .init_func = &msgqueue_0137c63d_init_func, - .acr_func = &msgqueue_0137bca5_acr_func, - .cmd_queue = msgqueue_0137c63d_cmd_queue, - .recv = msgqueue_0137c63d_process_msgs, - .dtor = msgqueue_0137c63d_dtor, -}; - -int -msgqueue_0137bca5_new(struct nvkm_falcon *falcon, const struct nvkm_secboot *sb, - struct nvkm_msgqueue **queue) -{ - struct msgqueue_0137bca5 *ret; - - ret = kzalloc(sizeof(*ret), GFP_KERNEL); - if (!ret) - return -ENOMEM; - - *queue = &ret->base.base; - - /* - * FIXME this must be set to the address of a *GPU* mapping within the - * ACR address space! - */ - /* ret->wpr_addr = sb->wpr_addr; */ - - nvkm_msgqueue_ctor(&msgqueue_0137bca5_func, falcon, &ret->base.base); - - return 0; -} diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0148cdec.c b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0148cdec.c deleted file mode 100644 index 9424803b9ef4..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0148cdec.c +++ /dev/null @@ -1,264 +0,0 @@ -/* - * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#include "msgqueue.h" -#include <engine/falcon.h> -#include <subdev/secboot.h> - -/* - * This firmware runs on the SEC falcon. It only has one command and one - * message queue, and uses a different command line and init message. - */ - -enum { - MSGQUEUE_0148CDEC_COMMAND_QUEUE = 0, - MSGQUEUE_0148CDEC_MESSAGE_QUEUE = 1, - MSGQUEUE_0148CDEC_NUM_QUEUES, -}; - -struct msgqueue_0148cdec { - struct nvkm_msgqueue base; - - struct nvkm_msgqueue_queue queue[MSGQUEUE_0148CDEC_NUM_QUEUES]; -}; -#define msgqueue_0148cdec(q) \ - container_of(q, struct msgqueue_0148cdec, base) - -static struct nvkm_msgqueue_queue * -msgqueue_0148cdec_cmd_queue(struct nvkm_msgqueue *queue, - enum msgqueue_msg_priority priority) -{ - struct msgqueue_0148cdec *priv = msgqueue_0148cdec(queue); - - return &priv->queue[MSGQUEUE_0148CDEC_COMMAND_QUEUE]; -} - -static void -msgqueue_0148cdec_process_msgs(struct nvkm_msgqueue *queue) -{ - struct msgqueue_0148cdec *priv = msgqueue_0148cdec(queue); - struct nvkm_msgqueue_queue *q_queue = - &priv->queue[MSGQUEUE_0148CDEC_MESSAGE_QUEUE]; - - nvkm_msgqueue_process_msgs(&priv->base, q_queue); -} - - -/* Init unit */ -#define MSGQUEUE_0148CDEC_UNIT_INIT 0x01 - -enum { - INIT_MSG_INIT = 0x0, -}; - -static void -init_gen_cmdline(struct nvkm_msgqueue *queue, void *buf) -{ - struct { - u32 freq_hz; - u32 falc_trace_size; - u32 falc_trace_dma_base; - u32 falc_trace_dma_idx; - bool secure_mode; - } *args = buf; - - args->secure_mode = false; -} - -static int -init_callback(struct nvkm_msgqueue *_queue, struct nvkm_msgqueue_hdr *hdr) -{ - struct msgqueue_0148cdec *priv = msgqueue_0148cdec(_queue); - struct { - struct nvkm_msgqueue_msg base; - - u8 num_queues; - u16 os_debug_entry_point; - - struct { - u32 offset; - u16 size; - u8 index; - u8 id; - } queue_info[MSGQUEUE_0148CDEC_NUM_QUEUES]; - - u16 sw_managed_area_offset; - u16 sw_managed_area_size; - } *init = (void *)hdr; - const struct nvkm_subdev *subdev = _queue->falcon->owner; - int i; - - if (init->base.hdr.unit_id != MSGQUEUE_0148CDEC_UNIT_INIT) { - nvkm_error(subdev, "expected message from init unit\n"); - return -EINVAL; - } - - if (init->base.msg_type != INIT_MSG_INIT) { - nvkm_error(subdev, "expected SEC init msg\n"); - return -EINVAL; - } - - for (i = 0; i < MSGQUEUE_0148CDEC_NUM_QUEUES; i++) { - u8 id = init->queue_info[i].id; - struct nvkm_msgqueue_queue *queue = &priv->queue[id]; - - mutex_init(&queue->mutex); - - queue->index = init->queue_info[i].index; - queue->offset = init->queue_info[i].offset; - queue->size = init->queue_info[i].size; - - if (id == MSGQUEUE_0148CDEC_MESSAGE_QUEUE) { - queue->head_reg = 0xa30 + (queue->index * 8); - queue->tail_reg = 0xa34 + (queue->index * 8); - } else { - queue->head_reg = 0xa00 + (queue->index * 8); - queue->tail_reg = 0xa04 + (queue->index * 8); - } - - nvkm_debug(subdev, - "queue %d: index %d, offset 0x%08x, size 0x%08x\n", - id, queue->index, queue->offset, queue->size); - } - - complete_all(&_queue->init_done); - - return 0; -} - -static const struct nvkm_msgqueue_init_func -msgqueue_0148cdec_init_func = { - .gen_cmdline = init_gen_cmdline, - .init_callback = init_callback, -}; - - - -/* ACR unit */ -#define MSGQUEUE_0148CDEC_UNIT_ACR 0x08 - -enum { - ACR_CMD_BOOTSTRAP_FALCON = 0x00, -}; - -static void -acr_boot_falcon_callback(struct nvkm_msgqueue *priv, - struct nvkm_msgqueue_hdr *hdr) -{ - struct acr_bootstrap_falcon_msg { - struct nvkm_msgqueue_msg base; - - u32 error_code; - u32 falcon_id; - } *msg = (void *)hdr; - const struct nvkm_subdev *subdev = priv->falcon->owner; - u32 falcon_id = msg->falcon_id; - - if (msg->error_code) { - nvkm_error(subdev, "in bootstrap falcon callback:\n"); - nvkm_error(subdev, "expected error code 0x%x\n", - msg->error_code); - return; - } - - if (falcon_id >= NVKM_SECBOOT_FALCON_END) { - nvkm_error(subdev, "in bootstrap falcon callback:\n"); - nvkm_error(subdev, "invalid falcon ID 0x%x\n", falcon_id); - return; - } - - nvkm_debug(subdev, "%s booted\n", nvkm_secboot_falcon_name[falcon_id]); -} - -enum { - ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES = 0, - ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_NO = 1, -}; - -static int -acr_boot_falcon(struct nvkm_msgqueue *priv, enum nvkm_secboot_falcon falcon) -{ - DECLARE_COMPLETION_ONSTACK(completed); - /* - * flags - Flag specifying RESET or no RESET. - * falcon id - Falcon id specifying falcon to bootstrap. - */ - struct { - struct nvkm_msgqueue_hdr hdr; - u8 cmd_type; - u32 flags; - u32 falcon_id; - } cmd; - - memset(&cmd, 0, sizeof(cmd)); - - cmd.hdr.unit_id = MSGQUEUE_0148CDEC_UNIT_ACR; - cmd.hdr.size = sizeof(cmd); - cmd.cmd_type = ACR_CMD_BOOTSTRAP_FALCON; - cmd.flags = ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES; - cmd.falcon_id = falcon; - nvkm_msgqueue_post(priv, MSGQUEUE_MSG_PRIORITY_HIGH, &cmd.hdr, - acr_boot_falcon_callback, &completed, true); - - if (!wait_for_completion_timeout(&completed, msecs_to_jiffies(1000))) - return -ETIMEDOUT; - - return 0; -} - -const struct nvkm_msgqueue_acr_func -msgqueue_0148cdec_acr_func = { - .boot_falcon = acr_boot_falcon, -}; - -static void -msgqueue_0148cdec_dtor(struct nvkm_msgqueue *queue) -{ - kfree(msgqueue_0148cdec(queue)); -} - -const struct nvkm_msgqueue_func -msgqueue_0148cdec_func = { - .init_func = &msgqueue_0148cdec_init_func, - .acr_func = &msgqueue_0148cdec_acr_func, - .cmd_queue = msgqueue_0148cdec_cmd_queue, - .recv = msgqueue_0148cdec_process_msgs, - .dtor = msgqueue_0148cdec_dtor, -}; - -int -msgqueue_0148cdec_new(struct nvkm_falcon *falcon, const struct nvkm_secboot *sb, - struct nvkm_msgqueue **queue) -{ - struct msgqueue_0148cdec *ret; - - ret = kzalloc(sizeof(*ret), GFP_KERNEL); - if (!ret) - return -ENOMEM; - - *queue = &ret->base; - - nvkm_msgqueue_ctor(&msgqueue_0148cdec_func, falcon, &ret->base); - - return 0; -} diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/priv.h b/drivers/gpu/drm/nouveau/nvkm/falcon/priv.h index 900fe1d37b4d..466188752eb0 100644 --- a/drivers/gpu/drm/nouveau/nvkm/falcon/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/falcon/priv.h @@ -1,9 +1,5 @@ /* SPDX-License-Identifier: MIT */ #ifndef __NVKM_FALCON_PRIV_H__ #define __NVKM_FALCON_PRIV_H__ -#include <engine/falcon.h> - -void -nvkm_falcon_ctor(const struct nvkm_falcon_func *, struct nvkm_subdev *, - const char *, u32, struct nvkm_falcon *); +#include <core/falcon.h> #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.c b/drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.c new file mode 100644 index 000000000000..a453de341a75 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.c @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "qmgr.h" + +struct nvkm_falcon_qmgr_seq * +nvkm_falcon_qmgr_seq_acquire(struct nvkm_falcon_qmgr *qmgr) +{ + const struct nvkm_subdev *subdev = qmgr->falcon->owner; + struct nvkm_falcon_qmgr_seq *seq; + u32 index; + + mutex_lock(&qmgr->seq.mutex); + index = find_first_zero_bit(qmgr->seq.tbl, NVKM_FALCON_QMGR_SEQ_NUM); + if (index >= NVKM_FALCON_QMGR_SEQ_NUM) { + nvkm_error(subdev, "no free sequence available\n"); + mutex_unlock(&qmgr->seq.mutex); + return ERR_PTR(-EAGAIN); + } + + set_bit(index, qmgr->seq.tbl); + mutex_unlock(&qmgr->seq.mutex); + + seq = &qmgr->seq.id[index]; + seq->state = SEQ_STATE_PENDING; + return seq; +} + +void +nvkm_falcon_qmgr_seq_release(struct nvkm_falcon_qmgr *qmgr, + struct nvkm_falcon_qmgr_seq *seq) +{ + /* no need to acquire seq.mutex since clear_bit is atomic */ + seq->state = SEQ_STATE_FREE; + seq->callback = NULL; + reinit_completion(&seq->done); + clear_bit(seq->id, qmgr->seq.tbl); +} + +void +nvkm_falcon_qmgr_del(struct nvkm_falcon_qmgr **pqmgr) +{ + struct nvkm_falcon_qmgr *qmgr = *pqmgr; + if (qmgr) { + kfree(*pqmgr); + *pqmgr = NULL; + } +} + +int +nvkm_falcon_qmgr_new(struct nvkm_falcon *falcon, + struct nvkm_falcon_qmgr **pqmgr) +{ + struct nvkm_falcon_qmgr *qmgr; + int i; + + if (!(qmgr = *pqmgr = kzalloc(sizeof(*qmgr), GFP_KERNEL))) + return -ENOMEM; + + qmgr->falcon = falcon; + mutex_init(&qmgr->seq.mutex); + for (i = 0; i < NVKM_FALCON_QMGR_SEQ_NUM; i++) { + qmgr->seq.id[i].id = i; + init_completion(&qmgr->seq.id[i].done); + } + + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.h b/drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.h new file mode 100644 index 000000000000..a45cd705e4f7 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.h @@ -0,0 +1,89 @@ +/* SPDX-License-Identifier: MIT */ +#ifndef __NVKM_FALCON_QMGR_H__ +#define __NVKM_FALCON_QMGR_H__ +#include <core/falcon.h> + +#define HDR_SIZE sizeof(struct nv_falcon_msg) +#define QUEUE_ALIGNMENT 4 +/* max size of the messages we can receive */ +#define MSG_BUF_SIZE 128 + +/** + * struct nvkm_falcon_qmgr_seq - keep track of ongoing commands + * + * Every time a command is sent, a sequence is assigned to it so the + * corresponding message can be matched. Upon receiving the message, a callback + * can be called and/or a completion signaled. + * + * @id: sequence ID + * @state: current state + * @callback: callback to call upon receiving matching message + * @completion: completion to signal after callback is called + */ +struct nvkm_falcon_qmgr_seq { + u16 id; + enum { + SEQ_STATE_FREE = 0, + SEQ_STATE_PENDING, + SEQ_STATE_USED, + SEQ_STATE_CANCELLED + } state; + bool async; + nvkm_falcon_qmgr_callback callback; + void *priv; + struct completion done; + int result; +}; + +/* + * We can have an arbitrary number of sequences, but realistically we will + * probably not use that much simultaneously. + */ +#define NVKM_FALCON_QMGR_SEQ_NUM 16 + +struct nvkm_falcon_qmgr { + struct nvkm_falcon *falcon; + + struct { + struct mutex mutex; + struct nvkm_falcon_qmgr_seq id[NVKM_FALCON_QMGR_SEQ_NUM]; + unsigned long tbl[BITS_TO_LONGS(NVKM_FALCON_QMGR_SEQ_NUM)]; + } seq; +}; + +struct nvkm_falcon_qmgr_seq * +nvkm_falcon_qmgr_seq_acquire(struct nvkm_falcon_qmgr *); +void nvkm_falcon_qmgr_seq_release(struct nvkm_falcon_qmgr *, + struct nvkm_falcon_qmgr_seq *); + +struct nvkm_falcon_cmdq { + struct nvkm_falcon_qmgr *qmgr; + const char *name; + struct mutex mutex; + struct completion ready; + + u32 head_reg; + u32 tail_reg; + u32 offset; + u32 size; + + u32 position; +}; + +struct nvkm_falcon_msgq { + struct nvkm_falcon_qmgr *qmgr; + const char *name; + struct mutex mutex; + + u32 head_reg; + u32 tail_reg; + u32 offset; + + u32 position; +}; + +#define FLCNQ_PRINTK(t,q,f,a...) \ + FLCN_PRINTK(t, (q)->qmgr->falcon, "%s: "f, (q)->name, ##a) +#define FLCNQ_DBG(q,f,a...) FLCNQ_PRINTK(debug, (q), f, ##a) +#define FLCNQ_ERR(q,f,a...) FLCNQ_PRINTK(error, (q), f, ##a) +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/v1.c b/drivers/gpu/drm/nouveau/nvkm/falcon/v1.c index 6d978feebbd7..1ff9b9c2e651 100644 --- a/drivers/gpu/drm/nouveau/nvkm/falcon/v1.c +++ b/drivers/gpu/drm/nouveau/nvkm/falcon/v1.c @@ -25,7 +25,7 @@ #include <core/memory.h> #include <subdev/timer.h> -static void +void nvkm_falcon_v1_load_imem(struct nvkm_falcon *falcon, void *data, u32 start, u32 size, u16 tag, u8 port, bool secure) { @@ -89,18 +89,17 @@ nvkm_falcon_v1_load_emem(struct nvkm_falcon *falcon, void *data, u32 start, } } -static const u32 EMEM_START_ADDR = 0x1000000; - -static void +void nvkm_falcon_v1_load_dmem(struct nvkm_falcon *falcon, void *data, u32 start, - u32 size, u8 port) + u32 size, u8 port) { + const struct nvkm_falcon_func *func = falcon->func; u8 rem = size % 4; int i; - if (start >= EMEM_START_ADDR && falcon->has_emem) + if (func->emem_addr && start >= func->emem_addr) return nvkm_falcon_v1_load_emem(falcon, data, - start - EMEM_START_ADDR, size, + start - func->emem_addr, size, port); size -= rem; @@ -148,15 +147,16 @@ nvkm_falcon_v1_read_emem(struct nvkm_falcon *falcon, u32 start, u32 size, } } -static void +void nvkm_falcon_v1_read_dmem(struct nvkm_falcon *falcon, u32 start, u32 size, u8 port, void *data) { + const struct nvkm_falcon_func *func = falcon->func; u8 rem = size % 4; int i; - if (start >= EMEM_START_ADDR && falcon->has_emem) - return nvkm_falcon_v1_read_emem(falcon, start - EMEM_START_ADDR, + if (func->emem_addr && start >= func->emem_addr) + return nvkm_falcon_v1_read_emem(falcon, start - func->emem_addr, size, port, data); size -= rem; @@ -179,12 +179,11 @@ nvkm_falcon_v1_read_dmem(struct nvkm_falcon *falcon, u32 start, u32 size, } } -static void +void nvkm_falcon_v1_bind_context(struct nvkm_falcon *falcon, struct nvkm_memory *ctx) { - struct nvkm_device *device = falcon->owner->device; + const u32 fbif = falcon->func->fbif; u32 inst_loc; - u32 fbif; /* disable instance block binding */ if (ctx == NULL) { @@ -192,20 +191,6 @@ nvkm_falcon_v1_bind_context(struct nvkm_falcon *falcon, struct nvkm_memory *ctx) return; } - switch (falcon->owner->index) { - case NVKM_ENGINE_NVENC0: - case NVKM_ENGINE_NVENC1: - case NVKM_ENGINE_NVENC2: - fbif = 0x800; - break; - case NVKM_SUBDEV_PMU: - fbif = 0xe00; - break; - default: - fbif = 0x600; - break; - } - nvkm_falcon_wr32(falcon, 0x10c, 0x1); /* setup apertures - virtual */ @@ -234,50 +219,15 @@ nvkm_falcon_v1_bind_context(struct nvkm_falcon *falcon, struct nvkm_memory *ctx) nvkm_falcon_mask(falcon, 0x090, 0x10000, 0x10000); nvkm_falcon_mask(falcon, 0x0a4, 0x8, 0x8); - - /* Not sure if this is a WAR for a HW issue, or some additional - * programming sequence that's needed to properly complete the - * context switch we trigger above. - * - * Fixes unreliability of booting the SEC2 RTOS on Quadro P620, - * particularly when resuming from suspend. - * - * Also removes the need for an odd workaround where we needed - * to program SEC2's FALCON_CPUCTL_ALIAS_STARTCPU twice before - * the SEC2 RTOS would begin executing. - */ - switch (falcon->owner->index) { - case NVKM_SUBDEV_GSP: - case NVKM_ENGINE_SEC2: - nvkm_msec(device, 10, - u32 irqstat = nvkm_falcon_rd32(falcon, 0x008); - u32 flcn0dc = nvkm_falcon_rd32(falcon, 0x0dc); - if ((irqstat & 0x00000008) && - (flcn0dc & 0x00007000) == 0x00005000) - break; - ); - - nvkm_falcon_mask(falcon, 0x004, 0x00000008, 0x00000008); - nvkm_falcon_mask(falcon, 0x058, 0x00000002, 0x00000002); - - nvkm_msec(device, 10, - u32 flcn0dc = nvkm_falcon_rd32(falcon, 0x0dc); - if ((flcn0dc & 0x00007000) == 0x00000000) - break; - ); - break; - default: - break; - } } -static void +void nvkm_falcon_v1_set_start_addr(struct nvkm_falcon *falcon, u32 start_addr) { nvkm_falcon_wr32(falcon, 0x104, start_addr); } -static void +void nvkm_falcon_v1_start(struct nvkm_falcon *falcon) { u32 reg = nvkm_falcon_rd32(falcon, 0x100); @@ -288,7 +238,7 @@ nvkm_falcon_v1_start(struct nvkm_falcon *falcon) nvkm_falcon_wr32(falcon, 0x100, 0x2); } -static int +int nvkm_falcon_v1_wait_for_halt(struct nvkm_falcon *falcon, u32 ms) { struct nvkm_device *device = falcon->owner->device; @@ -301,7 +251,7 @@ nvkm_falcon_v1_wait_for_halt(struct nvkm_falcon *falcon, u32 ms) return 0; } -static int +int nvkm_falcon_v1_clear_interrupt(struct nvkm_falcon *falcon, u32 mask) { struct nvkm_device *device = falcon->owner->device; @@ -330,7 +280,7 @@ falcon_v1_wait_idle(struct nvkm_falcon *falcon) return 0; } -static int +int nvkm_falcon_v1_enable(struct nvkm_falcon *falcon) { struct nvkm_device *device = falcon->owner->device; @@ -352,7 +302,7 @@ nvkm_falcon_v1_enable(struct nvkm_falcon *falcon) return 0; } -static void +void nvkm_falcon_v1_disable(struct nvkm_falcon *falcon) { /* disable IRQs and wait for any previous code to complete */ diff --git a/drivers/gpu/drm/nouveau/nvkm/nvfw/Kbuild b/drivers/gpu/drm/nouveau/nvkm/nvfw/Kbuild new file mode 100644 index 000000000000..41d75f98e603 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/nvfw/Kbuild @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: MIT +nvkm-y += nvkm/nvfw/fw.o +nvkm-y += nvkm/nvfw/hs.o +nvkm-y += nvkm/nvfw/ls.o + +nvkm-y += nvkm/nvfw/acr.o +nvkm-y += nvkm/nvfw/flcn.o diff --git a/drivers/gpu/drm/nouveau/nvkm/nvfw/acr.c b/drivers/gpu/drm/nouveau/nvkm/nvfw/acr.c new file mode 100644 index 000000000000..0d063b8317f7 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/nvfw/acr.c @@ -0,0 +1,165 @@ +/* + * Copyright 2019 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include <core/subdev.h> +#include <nvfw/acr.h> + +void +wpr_header_dump(struct nvkm_subdev *subdev, const struct wpr_header *hdr) +{ + nvkm_debug(subdev, "wprHeader\n"); + nvkm_debug(subdev, "\tfalconID : %d\n", hdr->falcon_id); + nvkm_debug(subdev, "\tlsbOffset : 0x%x\n", hdr->lsb_offset); + nvkm_debug(subdev, "\tbootstrapOwner: %d\n", hdr->bootstrap_owner); + nvkm_debug(subdev, "\tlazyBootstrap : %d\n", hdr->lazy_bootstrap); + nvkm_debug(subdev, "\tstatus : %d\n", hdr->status); +} + +void +wpr_header_v1_dump(struct nvkm_subdev *subdev, const struct wpr_header_v1 *hdr) +{ + nvkm_debug(subdev, "wprHeader\n"); + nvkm_debug(subdev, "\tfalconID : %d\n", hdr->falcon_id); + nvkm_debug(subdev, "\tlsbOffset : 0x%x\n", hdr->lsb_offset); + nvkm_debug(subdev, "\tbootstrapOwner: %d\n", hdr->bootstrap_owner); + nvkm_debug(subdev, "\tlazyBootstrap : %d\n", hdr->lazy_bootstrap); + nvkm_debug(subdev, "\tbinVersion : %d\n", hdr->bin_version); + nvkm_debug(subdev, "\tstatus : %d\n", hdr->status); +} + +void +lsb_header_tail_dump(struct nvkm_subdev *subdev, + struct lsb_header_tail *hdr) +{ + nvkm_debug(subdev, "lsbHeader\n"); + nvkm_debug(subdev, "\tucodeOff : 0x%x\n", hdr->ucode_off); + nvkm_debug(subdev, "\tucodeSize : 0x%x\n", hdr->ucode_size); + nvkm_debug(subdev, "\tdataSize : 0x%x\n", hdr->data_size); + nvkm_debug(subdev, "\tblCodeSize : 0x%x\n", hdr->bl_code_size); + nvkm_debug(subdev, "\tblImemOff : 0x%x\n", hdr->bl_imem_off); + nvkm_debug(subdev, "\tblDataOff : 0x%x\n", hdr->bl_data_off); + nvkm_debug(subdev, "\tblDataSize : 0x%x\n", hdr->bl_data_size); + nvkm_debug(subdev, "\tappCodeOff : 0x%x\n", hdr->app_code_off); + nvkm_debug(subdev, "\tappCodeSize : 0x%x\n", hdr->app_code_size); + nvkm_debug(subdev, "\tappDataOff : 0x%x\n", hdr->app_data_off); + nvkm_debug(subdev, "\tappDataSize : 0x%x\n", hdr->app_data_size); + nvkm_debug(subdev, "\tflags : 0x%x\n", hdr->flags); +} + +void +lsb_header_dump(struct nvkm_subdev *subdev, struct lsb_header *hdr) +{ + lsb_header_tail_dump(subdev, &hdr->tail); +} + +void +lsb_header_v1_dump(struct nvkm_subdev *subdev, struct lsb_header_v1 *hdr) +{ + lsb_header_tail_dump(subdev, &hdr->tail); +} + +void +flcn_acr_desc_dump(struct nvkm_subdev *subdev, struct flcn_acr_desc *hdr) +{ + int i; + + nvkm_debug(subdev, "acrDesc\n"); + nvkm_debug(subdev, "\twprRegionId : %d\n", hdr->wpr_region_id); + nvkm_debug(subdev, "\twprOffset : 0x%x\n", hdr->wpr_offset); + nvkm_debug(subdev, "\tmmuMemRange : 0x%x\n", + hdr->mmu_mem_range); + nvkm_debug(subdev, "\tnoRegions : %d\n", + hdr->regions.no_regions); + + for (i = 0; i < ARRAY_SIZE(hdr->regions.region_props); i++) { + nvkm_debug(subdev, "\tregion[%d] :\n", i); + nvkm_debug(subdev, "\t startAddr : 0x%x\n", + hdr->regions.region_props[i].start_addr); + nvkm_debug(subdev, "\t endAddr : 0x%x\n", + hdr->regions.region_props[i].end_addr); + nvkm_debug(subdev, "\t regionId : %d\n", + hdr->regions.region_props[i].region_id); + nvkm_debug(subdev, "\t readMask : 0x%x\n", + hdr->regions.region_props[i].read_mask); + nvkm_debug(subdev, "\t writeMask : 0x%x\n", + hdr->regions.region_props[i].write_mask); + nvkm_debug(subdev, "\t clientMask : 0x%x\n", + hdr->regions.region_props[i].client_mask); + } + + nvkm_debug(subdev, "\tucodeBlobSize: %d\n", + hdr->ucode_blob_size); + nvkm_debug(subdev, "\tucodeBlobBase: 0x%llx\n", + hdr->ucode_blob_base); + nvkm_debug(subdev, "\tvprEnabled : %d\n", + hdr->vpr_desc.vpr_enabled); + nvkm_debug(subdev, "\tvprStart : 0x%x\n", + hdr->vpr_desc.vpr_start); + nvkm_debug(subdev, "\tvprEnd : 0x%x\n", + hdr->vpr_desc.vpr_end); + nvkm_debug(subdev, "\thdcpPolicies : 0x%x\n", + hdr->vpr_desc.hdcp_policies); +} + +void +flcn_acr_desc_v1_dump(struct nvkm_subdev *subdev, struct flcn_acr_desc_v1 *hdr) +{ + int i; + + nvkm_debug(subdev, "acrDesc\n"); + nvkm_debug(subdev, "\twprRegionId : %d\n", hdr->wpr_region_id); + nvkm_debug(subdev, "\twprOffset : 0x%x\n", hdr->wpr_offset); + nvkm_debug(subdev, "\tmmuMemoryRange : 0x%x\n", + hdr->mmu_memory_range); + nvkm_debug(subdev, "\tnoRegions : %d\n", + hdr->regions.no_regions); + + for (i = 0; i < ARRAY_SIZE(hdr->regions.region_props); i++) { + nvkm_debug(subdev, "\tregion[%d] :\n", i); + nvkm_debug(subdev, "\t startAddr : 0x%x\n", + hdr->regions.region_props[i].start_addr); + nvkm_debug(subdev, "\t endAddr : 0x%x\n", + hdr->regions.region_props[i].end_addr); + nvkm_debug(subdev, "\t regionId : %d\n", + hdr->regions.region_props[i].region_id); + nvkm_debug(subdev, "\t readMask : 0x%x\n", + hdr->regions.region_props[i].read_mask); + nvkm_debug(subdev, "\t writeMask : 0x%x\n", + hdr->regions.region_props[i].write_mask); + nvkm_debug(subdev, "\t clientMask : 0x%x\n", + hdr->regions.region_props[i].client_mask); + nvkm_debug(subdev, "\t shadowMemStartAddr: 0x%x\n", + hdr->regions.region_props[i].shadow_mem_start_addr); + } + + nvkm_debug(subdev, "\tucodeBlobSize : %d\n", + hdr->ucode_blob_size); + nvkm_debug(subdev, "\tucodeBlobBase : 0x%llx\n", + hdr->ucode_blob_base); + nvkm_debug(subdev, "\tvprEnabled : %d\n", + hdr->vpr_desc.vpr_enabled); + nvkm_debug(subdev, "\tvprStart : 0x%x\n", + hdr->vpr_desc.vpr_start); + nvkm_debug(subdev, "\tvprEnd : 0x%x\n", + hdr->vpr_desc.vpr_end); + nvkm_debug(subdev, "\thdcpPolicies : 0x%x\n", + hdr->vpr_desc.hdcp_policies); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/nvfw/flcn.c b/drivers/gpu/drm/nouveau/nvkm/nvfw/flcn.c new file mode 100644 index 000000000000..00ec764e1aab --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/nvfw/flcn.c @@ -0,0 +1,115 @@ +/* + * Copyright 2019 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include <core/subdev.h> +#include <nvfw/flcn.h> + +void +loader_config_dump(struct nvkm_subdev *subdev, const struct loader_config *hdr) +{ + nvkm_debug(subdev, "loaderConfig\n"); + nvkm_debug(subdev, "\tdmaIdx : %d\n", hdr->dma_idx); + nvkm_debug(subdev, "\tcodeDmaBase : 0x%xx\n", hdr->code_dma_base); + nvkm_debug(subdev, "\tcodeSizeTotal : 0x%x\n", hdr->code_size_total); + nvkm_debug(subdev, "\tcodeSizeToLoad: 0x%x\n", hdr->code_size_to_load); + nvkm_debug(subdev, "\tcodeEntryPoint: 0x%x\n", hdr->code_entry_point); + nvkm_debug(subdev, "\tdataDmaBase : 0x%x\n", hdr->data_dma_base); + nvkm_debug(subdev, "\tdataSize : 0x%x\n", hdr->data_size); + nvkm_debug(subdev, "\toverlayDmaBase: 0x%x\n", hdr->overlay_dma_base); + nvkm_debug(subdev, "\targc : 0x%08x\n", hdr->argc); + nvkm_debug(subdev, "\targv : 0x%08x\n", hdr->argv); + nvkm_debug(subdev, "\tcodeDmaBase1 : 0x%x\n", hdr->code_dma_base1); + nvkm_debug(subdev, "\tdataDmaBase1 : 0x%x\n", hdr->data_dma_base1); + nvkm_debug(subdev, "\tovlyDmaBase1 : 0x%x\n", hdr->overlay_dma_base1); +} + +void +loader_config_v1_dump(struct nvkm_subdev *subdev, + const struct loader_config_v1 *hdr) +{ + nvkm_debug(subdev, "loaderConfig\n"); + nvkm_debug(subdev, "\treserved : 0x%08x\n", hdr->reserved); + nvkm_debug(subdev, "\tdmaIdx : %d\n", hdr->dma_idx); + nvkm_debug(subdev, "\tcodeDmaBase : 0x%llxx\n", hdr->code_dma_base); + nvkm_debug(subdev, "\tcodeSizeTotal : 0x%x\n", hdr->code_size_total); + nvkm_debug(subdev, "\tcodeSizeToLoad: 0x%x\n", hdr->code_size_to_load); + nvkm_debug(subdev, "\tcodeEntryPoint: 0x%x\n", hdr->code_entry_point); + nvkm_debug(subdev, "\tdataDmaBase : 0x%llx\n", hdr->data_dma_base); + nvkm_debug(subdev, "\tdataSize : 0x%x\n", hdr->data_size); + nvkm_debug(subdev, "\toverlayDmaBase: 0x%llx\n", hdr->overlay_dma_base); + nvkm_debug(subdev, "\targc : 0x%08x\n", hdr->argc); + nvkm_debug(subdev, "\targv : 0x%08x\n", hdr->argv); +} + +void +flcn_bl_dmem_desc_dump(struct nvkm_subdev *subdev, + const struct flcn_bl_dmem_desc *hdr) +{ + nvkm_debug(subdev, "flcnBlDmemDesc\n"); + nvkm_debug(subdev, "\treserved : 0x%08x 0x%08x 0x%08x 0x%08x\n", + hdr->reserved[0], hdr->reserved[1], hdr->reserved[2], + hdr->reserved[3]); + nvkm_debug(subdev, "\tsignature : 0x%08x 0x%08x 0x%08x 0x%08x\n", + hdr->signature[0], hdr->signature[1], hdr->signature[2], + hdr->signature[3]); + nvkm_debug(subdev, "\tctxDma : %d\n", hdr->ctx_dma); + nvkm_debug(subdev, "\tcodeDmaBase : 0x%x\n", hdr->code_dma_base); + nvkm_debug(subdev, "\tnonSecCodeOff : 0x%x\n", hdr->non_sec_code_off); + nvkm_debug(subdev, "\tnonSecCodeSize: 0x%x\n", hdr->non_sec_code_size); + nvkm_debug(subdev, "\tsecCodeOff : 0x%x\n", hdr->sec_code_off); + nvkm_debug(subdev, "\tsecCodeSize : 0x%x\n", hdr->sec_code_size); + nvkm_debug(subdev, "\tcodeEntryPoint: 0x%x\n", hdr->code_entry_point); + nvkm_debug(subdev, "\tdataDmaBase : 0x%x\n", hdr->data_dma_base); + nvkm_debug(subdev, "\tdataSize : 0x%x\n", hdr->data_size); + nvkm_debug(subdev, "\tcodeDmaBase1 : 0x%x\n", hdr->code_dma_base1); + nvkm_debug(subdev, "\tdataDmaBase1 : 0x%x\n", hdr->data_dma_base1); +} + +void +flcn_bl_dmem_desc_v1_dump(struct nvkm_subdev *subdev, + const struct flcn_bl_dmem_desc_v1 *hdr) +{ + nvkm_debug(subdev, "flcnBlDmemDesc\n"); + nvkm_debug(subdev, "\treserved : 0x%08x 0x%08x 0x%08x 0x%08x\n", + hdr->reserved[0], hdr->reserved[1], hdr->reserved[2], + hdr->reserved[3]); + nvkm_debug(subdev, "\tsignature : 0x%08x 0x%08x 0x%08x 0x%08x\n", + hdr->signature[0], hdr->signature[1], hdr->signature[2], + hdr->signature[3]); + nvkm_debug(subdev, "\tctxDma : %d\n", hdr->ctx_dma); + nvkm_debug(subdev, "\tcodeDmaBase : 0x%llx\n", hdr->code_dma_base); + nvkm_debug(subdev, "\tnonSecCodeOff : 0x%x\n", hdr->non_sec_code_off); + nvkm_debug(subdev, "\tnonSecCodeSize: 0x%x\n", hdr->non_sec_code_size); + nvkm_debug(subdev, "\tsecCodeOff : 0x%x\n", hdr->sec_code_off); + nvkm_debug(subdev, "\tsecCodeSize : 0x%x\n", hdr->sec_code_size); + nvkm_debug(subdev, "\tcodeEntryPoint: 0x%x\n", hdr->code_entry_point); + nvkm_debug(subdev, "\tdataDmaBase : 0x%llx\n", hdr->data_dma_base); + nvkm_debug(subdev, "\tdataSize : 0x%x\n", hdr->data_size); +} + +void +flcn_bl_dmem_desc_v2_dump(struct nvkm_subdev *subdev, + const struct flcn_bl_dmem_desc_v2 *hdr) +{ + flcn_bl_dmem_desc_v1_dump(subdev, (void *)hdr); + nvkm_debug(subdev, "\targc : 0x%08x\n", hdr->argc); + nvkm_debug(subdev, "\targv : 0x%08x\n", hdr->argv); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/nvfw/fw.c b/drivers/gpu/drm/nouveau/nvkm/nvfw/fw.c new file mode 100644 index 000000000000..746803bd5318 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/nvfw/fw.c @@ -0,0 +1,51 @@ +/* + * Copyright 2019 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include <core/subdev.h> +#include <nvfw/fw.h> + +const struct nvfw_bin_hdr * +nvfw_bin_hdr(struct nvkm_subdev *subdev, const void *data) +{ + const struct nvfw_bin_hdr *hdr = data; + nvkm_debug(subdev, "binHdr:\n"); + nvkm_debug(subdev, "\tbinMagic : 0x%08x\n", hdr->bin_magic); + nvkm_debug(subdev, "\tbinVer : %d\n", hdr->bin_ver); + nvkm_debug(subdev, "\tbinSize : %d\n", hdr->bin_size); + nvkm_debug(subdev, "\theaderOffset : 0x%x\n", hdr->header_offset); + nvkm_debug(subdev, "\tdataOffset : 0x%x\n", hdr->data_offset); + nvkm_debug(subdev, "\tdataSize : 0x%x\n", hdr->data_size); + return hdr; +} + +const struct nvfw_bl_desc * +nvfw_bl_desc(struct nvkm_subdev *subdev, const void *data) +{ + const struct nvfw_bl_desc *hdr = data; + nvkm_debug(subdev, "blDesc\n"); + nvkm_debug(subdev, "\tstartTag : 0x%x\n", hdr->start_tag); + nvkm_debug(subdev, "\tdmemLoadOff : 0x%x\n", hdr->dmem_load_off); + nvkm_debug(subdev, "\tcodeOff : 0x%x\n", hdr->code_off); + nvkm_debug(subdev, "\tcodeSize : 0x%x\n", hdr->code_size); + nvkm_debug(subdev, "\tdataOff : 0x%x\n", hdr->data_off); + nvkm_debug(subdev, "\tdataSize : 0x%x\n", hdr->data_size); + return hdr; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/nvfw/hs.c b/drivers/gpu/drm/nouveau/nvkm/nvfw/hs.c new file mode 100644 index 000000000000..04ed77cb2eba --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/nvfw/hs.c @@ -0,0 +1,62 @@ +/* + * Copyright 2019 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include <core/subdev.h> +#include <nvfw/hs.h> + +const struct nvfw_hs_header * +nvfw_hs_header(struct nvkm_subdev *subdev, const void *data) +{ + const struct nvfw_hs_header *hdr = data; + nvkm_debug(subdev, "hsHeader:\n"); + nvkm_debug(subdev, "\tsigDbgOffset : 0x%x\n", hdr->sig_dbg_offset); + nvkm_debug(subdev, "\tsigDbgSize : 0x%x\n", hdr->sig_dbg_size); + nvkm_debug(subdev, "\tsigProdOffset : 0x%x\n", hdr->sig_prod_offset); + nvkm_debug(subdev, "\tsigProdSize : 0x%x\n", hdr->sig_prod_size); + nvkm_debug(subdev, "\tpatchLoc : 0x%x\n", hdr->patch_loc); + nvkm_debug(subdev, "\tpatchSig : 0x%x\n", hdr->patch_sig); + nvkm_debug(subdev, "\thdrOffset : 0x%x\n", hdr->hdr_offset); + nvkm_debug(subdev, "\thdrSize : 0x%x\n", hdr->hdr_size); + return hdr; +} + +const struct nvfw_hs_load_header * +nvfw_hs_load_header(struct nvkm_subdev *subdev, const void *data) +{ + const struct nvfw_hs_load_header *hdr = data; + int i; + + nvkm_debug(subdev, "hsLoadHeader:\n"); + nvkm_debug(subdev, "\tnonSecCodeOff : 0x%x\n", + hdr->non_sec_code_off); + nvkm_debug(subdev, "\tnonSecCodeSize : 0x%x\n", + hdr->non_sec_code_size); + nvkm_debug(subdev, "\tdataDmaBase : 0x%x\n", hdr->data_dma_base); + nvkm_debug(subdev, "\tdataSize : 0x%x\n", hdr->data_size); + nvkm_debug(subdev, "\tnumApps : 0x%x\n", hdr->num_apps); + for (i = 0; i < hdr->num_apps; i++) { + nvkm_debug(subdev, + "\tApp[%d] : offset 0x%x size 0x%x\n", i, + hdr->apps[(i * 2) + 0], hdr->apps[(i * 2) + 1]); + } + + return hdr; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/nvfw/ls.c b/drivers/gpu/drm/nouveau/nvkm/nvfw/ls.c new file mode 100644 index 000000000000..b847f281ce97 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/nvfw/ls.c @@ -0,0 +1,108 @@ +/* + * Copyright 2019 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include <core/subdev.h> +#include <nvfw/ls.h> + +static void +nvfw_ls_desc_head(struct nvkm_subdev *subdev, + const struct nvfw_ls_desc_head *hdr) +{ + char *date; + + nvkm_debug(subdev, "lsUcodeImgDesc:\n"); + nvkm_debug(subdev, "\tdescriptorSize : %d\n", + hdr->descriptor_size); + nvkm_debug(subdev, "\timageSize : %d\n", hdr->image_size); + nvkm_debug(subdev, "\ttoolsVersion : 0x%x\n", + hdr->tools_version); + nvkm_debug(subdev, "\tappVersion : 0x%x\n", hdr->app_version); + + date = kstrndup(hdr->date, sizeof(hdr->date), GFP_KERNEL); + nvkm_debug(subdev, "\tdate : %s\n", date); + kfree(date); + + nvkm_debug(subdev, "\tbootloaderStartOffset: 0x%x\n", + hdr->bootloader_start_offset); + nvkm_debug(subdev, "\tbootloaderSize : 0x%x\n", + hdr->bootloader_size); + nvkm_debug(subdev, "\tbootloaderImemOffset : 0x%x\n", + hdr->bootloader_imem_offset); + nvkm_debug(subdev, "\tbootloaderEntryPoint : 0x%x\n", + hdr->bootloader_entry_point); + + nvkm_debug(subdev, "\tappStartOffset : 0x%x\n", + hdr->app_start_offset); + nvkm_debug(subdev, "\tappSize : 0x%x\n", hdr->app_size); + nvkm_debug(subdev, "\tappImemOffset : 0x%x\n", + hdr->app_imem_offset); + nvkm_debug(subdev, "\tappImemEntry : 0x%x\n", + hdr->app_imem_entry); + nvkm_debug(subdev, "\tappDmemOffset : 0x%x\n", + hdr->app_dmem_offset); + nvkm_debug(subdev, "\tappResidentCodeOffset: 0x%x\n", + hdr->app_resident_code_offset); + nvkm_debug(subdev, "\tappResidentCodeSize : 0x%x\n", + hdr->app_resident_code_size); + nvkm_debug(subdev, "\tappResidentDataOffset: 0x%x\n", + hdr->app_resident_data_offset); + nvkm_debug(subdev, "\tappResidentDataSize : 0x%x\n", + hdr->app_resident_data_size); +} + +const struct nvfw_ls_desc * +nvfw_ls_desc(struct nvkm_subdev *subdev, const void *data) +{ + const struct nvfw_ls_desc *hdr = data; + int i; + + nvfw_ls_desc_head(subdev, &hdr->head); + + nvkm_debug(subdev, "\tnbOverlays : %d\n", hdr->nb_overlays); + for (i = 0; i < ARRAY_SIZE(hdr->load_ovl); i++) { + nvkm_debug(subdev, "\tloadOvl[%d] : 0x%x %d\n", i, + hdr->load_ovl[i].start, hdr->load_ovl[i].size); + } + nvkm_debug(subdev, "\tcompressed : %d\n", hdr->compressed); + + return hdr; +} + +const struct nvfw_ls_desc_v1 * +nvfw_ls_desc_v1(struct nvkm_subdev *subdev, const void *data) +{ + const struct nvfw_ls_desc_v1 *hdr = data; + int i; + + nvfw_ls_desc_head(subdev, &hdr->head); + + nvkm_debug(subdev, "\tnbImemOverlays : %d\n", + hdr->nb_imem_overlays); + nvkm_debug(subdev, "\tnbDmemOverlays : %d\n", + hdr->nb_imem_overlays); + for (i = 0; i < ARRAY_SIZE(hdr->load_ovl); i++) { + nvkm_debug(subdev, "\tloadOvl[%2d] : 0x%x %d\n", i, + hdr->load_ovl[i].start, hdr->load_ovl[i].size); + } + nvkm_debug(subdev, "\tcompressed : %d\n", hdr->compressed); + + return hdr; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild index 4e136f3d7c28..fb4fff1222af 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild @@ -1,4 +1,5 @@ # SPDX-License-Identifier: MIT +include $(src)/nvkm/subdev/acr/Kbuild include $(src)/nvkm/subdev/bar/Kbuild include $(src)/nvkm/subdev/bios/Kbuild include $(src)/nvkm/subdev/bus/Kbuild @@ -19,7 +20,6 @@ include $(src)/nvkm/subdev/mmu/Kbuild include $(src)/nvkm/subdev/mxm/Kbuild include $(src)/nvkm/subdev/pci/Kbuild include $(src)/nvkm/subdev/pmu/Kbuild -include $(src)/nvkm/subdev/secboot/Kbuild include $(src)/nvkm/subdev/therm/Kbuild include $(src)/nvkm/subdev/timer/Kbuild include $(src)/nvkm/subdev/top/Kbuild diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/Kbuild new file mode 100644 index 000000000000..5b9f64a8957f --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/Kbuild @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: MIT +nvkm-y += nvkm/subdev/acr/base.o +nvkm-y += nvkm/subdev/acr/hsfw.o +nvkm-y += nvkm/subdev/acr/lsfw.o +nvkm-y += nvkm/subdev/acr/gm200.o +nvkm-y += nvkm/subdev/acr/gm20b.o +nvkm-y += nvkm/subdev/acr/gp102.o +nvkm-y += nvkm/subdev/acr/gp108.o +nvkm-y += nvkm/subdev/acr/gp10b.o +nvkm-y += nvkm/subdev/acr/tu102.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c new file mode 100644 index 000000000000..8eb2a930a9b5 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c @@ -0,0 +1,411 @@ +/* + * Copyright 2019 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" + +#include <core/firmware.h> +#include <core/memory.h> +#include <subdev/mmu.h> + +static struct nvkm_acr_hsf * +nvkm_acr_hsf_find(struct nvkm_acr *acr, const char *name) +{ + struct nvkm_acr_hsf *hsf; + list_for_each_entry(hsf, &acr->hsf, head) { + if (!strcmp(hsf->name, name)) + return hsf; + } + return NULL; +} + +int +nvkm_acr_hsf_boot(struct nvkm_acr *acr, const char *name) +{ + struct nvkm_subdev *subdev = &acr->subdev; + struct nvkm_acr_hsf *hsf; + int ret; + + hsf = nvkm_acr_hsf_find(acr, name); + if (!hsf) + return -EINVAL; + + nvkm_debug(subdev, "executing %s binary\n", hsf->name); + ret = nvkm_falcon_get(hsf->falcon, subdev); + if (ret) + return ret; + + ret = hsf->func->boot(acr, hsf); + nvkm_falcon_put(hsf->falcon, subdev); + if (ret) { + nvkm_error(subdev, "%s binary failed\n", hsf->name); + return ret; + } + + nvkm_debug(subdev, "%s binary completed successfully\n", hsf->name); + return 0; +} + +static void +nvkm_acr_unload(struct nvkm_acr *acr) +{ + if (acr->done) { + nvkm_acr_hsf_boot(acr, "unload"); + acr->done = false; + } +} + +static int +nvkm_acr_load(struct nvkm_acr *acr) +{ + struct nvkm_subdev *subdev = &acr->subdev; + struct nvkm_acr_lsf *lsf; + u64 start, limit; + int ret; + + if (list_empty(&acr->lsf)) { + nvkm_debug(subdev, "No LSF(s) present.\n"); + return 0; + } + + ret = acr->func->init(acr); + if (ret) + return ret; + + acr->func->wpr_check(acr, &start, &limit); + + if (start != acr->wpr_start || limit != acr->wpr_end) { + nvkm_error(subdev, "WPR not configured as expected: " + "%016llx-%016llx vs %016llx-%016llx\n", + acr->wpr_start, acr->wpr_end, start, limit); + return -EIO; + } + + acr->done = true; + + list_for_each_entry(lsf, &acr->lsf, head) { + if (lsf->func->boot) { + ret = lsf->func->boot(lsf->falcon); + if (ret) + break; + } + } + + return ret; +} + +static int +nvkm_acr_reload(struct nvkm_acr *acr) +{ + nvkm_acr_unload(acr); + return nvkm_acr_load(acr); +} + +static struct nvkm_acr_lsf * +nvkm_acr_falcon(struct nvkm_device *device) +{ + struct nvkm_acr *acr = device->acr; + struct nvkm_acr_lsf *lsf; + + if (acr) { + list_for_each_entry(lsf, &acr->lsf, head) { + if (lsf->func->bootstrap_falcon) + return lsf; + } + } + + return NULL; +} + +int +nvkm_acr_bootstrap_falcons(struct nvkm_device *device, unsigned long mask) +{ + struct nvkm_acr_lsf *acrflcn = nvkm_acr_falcon(device); + struct nvkm_acr *acr = device->acr; + unsigned long id; + + if (!acrflcn) { + int ret = nvkm_acr_reload(acr); + if (ret) + return ret; + + return acr->done ? 0 : -EINVAL; + } + + if (acrflcn->func->bootstrap_multiple_falcons) { + return acrflcn->func-> + bootstrap_multiple_falcons(acrflcn->falcon, mask); + } + + for_each_set_bit(id, &mask, NVKM_ACR_LSF_NUM) { + int ret = acrflcn->func->bootstrap_falcon(acrflcn->falcon, id); + if (ret) + return ret; + } + + return 0; +} + +bool +nvkm_acr_managed_falcon(struct nvkm_device *device, enum nvkm_acr_lsf_id id) +{ + struct nvkm_acr *acr = device->acr; + struct nvkm_acr_lsf *lsf; + + if (acr) { + list_for_each_entry(lsf, &acr->lsf, head) { + if (lsf->id == id) + return true; + } + } + + return false; +} + +static int +nvkm_acr_fini(struct nvkm_subdev *subdev, bool suspend) +{ + nvkm_acr_unload(nvkm_acr(subdev)); + return 0; +} + +static int +nvkm_acr_init(struct nvkm_subdev *subdev) +{ + if (!nvkm_acr_falcon(subdev->device)) + return 0; + + return nvkm_acr_load(nvkm_acr(subdev)); +} + +static void +nvkm_acr_cleanup(struct nvkm_acr *acr) +{ + nvkm_acr_lsfw_del_all(acr); + nvkm_acr_hsfw_del_all(acr); + nvkm_firmware_put(acr->wpr_fw); + acr->wpr_fw = NULL; +} + +static int +nvkm_acr_oneinit(struct nvkm_subdev *subdev) +{ + struct nvkm_device *device = subdev->device; + struct nvkm_acr *acr = nvkm_acr(subdev); + struct nvkm_acr_hsfw *hsfw; + struct nvkm_acr_lsfw *lsfw, *lsft; + struct nvkm_acr_lsf *lsf; + u32 wpr_size = 0; + int ret, i; + + if (list_empty(&acr->hsfw)) { + nvkm_debug(subdev, "No HSFW(s)\n"); + nvkm_acr_cleanup(acr); + return 0; + } + + /* Determine layout/size of WPR image up-front, as we need to know + * it to allocate memory before we begin constructing it. + */ + list_for_each_entry_safe(lsfw, lsft, &acr->lsfw, head) { + /* Cull unknown falcons that are present in WPR image. */ + if (acr->wpr_fw) { + if (!lsfw->func) { + nvkm_acr_lsfw_del(lsfw); + continue; + } + + wpr_size = acr->wpr_fw->size; + } + + /* Ensure we've fetched falcon configuration. */ + ret = nvkm_falcon_get(lsfw->falcon, subdev); + if (ret) + return ret; + + nvkm_falcon_put(lsfw->falcon, subdev); + + if (!(lsf = kmalloc(sizeof(*lsf), GFP_KERNEL))) + return -ENOMEM; + lsf->func = lsfw->func; + lsf->falcon = lsfw->falcon; + lsf->id = lsfw->id; + list_add_tail(&lsf->head, &acr->lsf); + } + + if (!acr->wpr_fw || acr->wpr_comp) + wpr_size = acr->func->wpr_layout(acr); + + /* Allocate/Locate WPR + fill ucode blob pointer. + * + * dGPU: allocate WPR + shadow blob + * Tegra: locate WPR with regs, ensure size is sufficient, + * allocate ucode blob. + */ + ret = acr->func->wpr_alloc(acr, wpr_size); + if (ret) + return ret; + + nvkm_debug(subdev, "WPR region is from 0x%llx-0x%llx (shadow 0x%llx)\n", + acr->wpr_start, acr->wpr_end, acr->shadow_start); + + /* Write WPR to ucode blob. */ + nvkm_kmap(acr->wpr); + if (acr->wpr_fw && !acr->wpr_comp) + nvkm_wobj(acr->wpr, 0, acr->wpr_fw->data, acr->wpr_fw->size); + + if (!acr->wpr_fw || acr->wpr_comp) + acr->func->wpr_build(acr, nvkm_acr_falcon(device)); + acr->func->wpr_patch(acr, (s64)acr->wpr_start - acr->wpr_prev); + + if (acr->wpr_fw && acr->wpr_comp) { + nvkm_kmap(acr->wpr); + for (i = 0; i < acr->wpr_fw->size; i += 4) { + u32 us = nvkm_ro32(acr->wpr, i); + u32 fw = ((u32 *)acr->wpr_fw->data)[i/4]; + if (fw != us) { + nvkm_warn(subdev, "%08x: %08x %08x\n", + i, us, fw); + } + } + return -EINVAL; + } + nvkm_done(acr->wpr); + + /* Allocate instance block for ACR-related stuff. */ + ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0, true, + &acr->inst); + if (ret) + return ret; + + ret = nvkm_vmm_new(device, 0, 0, NULL, 0, NULL, "acr", &acr->vmm); + if (ret) + return ret; + + acr->vmm->debug = acr->subdev.debug; + + ret = nvkm_vmm_join(acr->vmm, acr->inst); + if (ret) + return ret; + + /* Load HS firmware blobs into ACR VMM. */ + list_for_each_entry(hsfw, &acr->hsfw, head) { + nvkm_debug(subdev, "loading %s fw\n", hsfw->name); + ret = hsfw->func->load(acr, hsfw); + if (ret) + return ret; + } + + /* Kill temporary data. */ + nvkm_acr_cleanup(acr); + return 0; +} + +static void * +nvkm_acr_dtor(struct nvkm_subdev *subdev) +{ + struct nvkm_acr *acr = nvkm_acr(subdev); + struct nvkm_acr_hsf *hsf, *hst; + struct nvkm_acr_lsf *lsf, *lst; + + list_for_each_entry_safe(hsf, hst, &acr->hsf, head) { + nvkm_vmm_put(acr->vmm, &hsf->vma); + nvkm_memory_unref(&hsf->ucode); + kfree(hsf->imem); + list_del(&hsf->head); + kfree(hsf); + } + + nvkm_vmm_part(acr->vmm, acr->inst); + nvkm_vmm_unref(&acr->vmm); + nvkm_memory_unref(&acr->inst); + + nvkm_memory_unref(&acr->wpr); + + list_for_each_entry_safe(lsf, lst, &acr->lsf, head) { + list_del(&lsf->head); + kfree(lsf); + } + + nvkm_acr_cleanup(acr); + return acr; +} + +static const struct nvkm_subdev_func +nvkm_acr = { + .dtor = nvkm_acr_dtor, + .oneinit = nvkm_acr_oneinit, + .init = nvkm_acr_init, + .fini = nvkm_acr_fini, +}; + +static int +nvkm_acr_ctor_wpr(struct nvkm_acr *acr, int ver) +{ + struct nvkm_subdev *subdev = &acr->subdev; + struct nvkm_device *device = subdev->device; + int ret; + + ret = nvkm_firmware_get(subdev, "acr/wpr", ver, &acr->wpr_fw); + if (ret < 0) + return ret; + + /* Pre-add LSFs in the order they appear in the FW WPR image so that + * we're able to do a binary comparison with our own generator. + */ + ret = acr->func->wpr_parse(acr); + if (ret) + return ret; + + acr->wpr_comp = nvkm_boolopt(device->cfgopt, "NvAcrWprCompare", false); + acr->wpr_prev = nvkm_longopt(device->cfgopt, "NvAcrWprPrevAddr", 0); + return 0; +} + +int +nvkm_acr_new_(const struct nvkm_acr_fwif *fwif, struct nvkm_device *device, + int index, struct nvkm_acr **pacr) +{ + struct nvkm_acr *acr; + long wprfw; + + if (!(acr = *pacr = kzalloc(sizeof(*acr), GFP_KERNEL))) + return -ENOMEM; + nvkm_subdev_ctor(&nvkm_acr, device, index, &acr->subdev); + INIT_LIST_HEAD(&acr->hsfw); + INIT_LIST_HEAD(&acr->lsfw); + INIT_LIST_HEAD(&acr->hsf); + INIT_LIST_HEAD(&acr->lsf); + + fwif = nvkm_firmware_load(&acr->subdev, fwif, "Acr", acr); + if (IS_ERR(fwif)) + return PTR_ERR(fwif); + + acr->func = fwif->func; + + wprfw = nvkm_longopt(device->cfgopt, "NvAcrWpr", -1); + if (wprfw >= 0) { + int ret = nvkm_acr_ctor_wpr(acr, wprfw); + if (ret) + return ret; + } + + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c new file mode 100644 index 000000000000..9a6394085cf0 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c @@ -0,0 +1,470 @@ +/* + * Copyright 2019 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" + +#include <core/falcon.h> +#include <core/firmware.h> +#include <core/memory.h> +#include <subdev/mc.h> +#include <subdev/mmu.h> +#include <subdev/pmu.h> +#include <subdev/timer.h> + +#include <nvfw/acr.h> +#include <nvfw/flcn.h> + +int +gm200_acr_init(struct nvkm_acr *acr) +{ + return nvkm_acr_hsf_boot(acr, "load"); +} + +void +gm200_acr_wpr_check(struct nvkm_acr *acr, u64 *start, u64 *limit) +{ + struct nvkm_device *device = acr->subdev.device; + + nvkm_wr32(device, 0x100cd4, 2); + *start = (u64)(nvkm_rd32(device, 0x100cd4) & 0xffffff00) << 8; + nvkm_wr32(device, 0x100cd4, 3); + *limit = (u64)(nvkm_rd32(device, 0x100cd4) & 0xffffff00) << 8; + *limit = *limit + 0x20000; +} + +void +gm200_acr_wpr_patch(struct nvkm_acr *acr, s64 adjust) +{ + struct nvkm_subdev *subdev = &acr->subdev; + struct wpr_header hdr; + struct lsb_header lsb; + struct nvkm_acr_lsf *lsfw; + u32 offset = 0; + + do { + nvkm_robj(acr->wpr, offset, &hdr, sizeof(hdr)); + wpr_header_dump(subdev, &hdr); + + list_for_each_entry(lsfw, &acr->lsfw, head) { + if (lsfw->id != hdr.falcon_id) + continue; + + nvkm_robj(acr->wpr, hdr.lsb_offset, &lsb, sizeof(lsb)); + lsb_header_dump(subdev, &lsb); + + lsfw->func->bld_patch(acr, lsb.tail.bl_data_off, adjust); + break; + } + offset += sizeof(hdr); + } while (hdr.falcon_id != WPR_HEADER_V0_FALCON_ID_INVALID); +} + +void +gm200_acr_wpr_build_lsb_tail(struct nvkm_acr_lsfw *lsfw, + struct lsb_header_tail *hdr) +{ + hdr->ucode_off = lsfw->offset.img; + hdr->ucode_size = lsfw->ucode_size; + hdr->data_size = lsfw->data_size; + hdr->bl_code_size = lsfw->bootloader_size; + hdr->bl_imem_off = lsfw->bootloader_imem_offset; + hdr->bl_data_off = lsfw->offset.bld; + hdr->bl_data_size = lsfw->bl_data_size; + hdr->app_code_off = lsfw->app_start_offset + + lsfw->app_resident_code_offset; + hdr->app_code_size = lsfw->app_resident_code_size; + hdr->app_data_off = lsfw->app_start_offset + + lsfw->app_resident_data_offset; + hdr->app_data_size = lsfw->app_resident_data_size; + hdr->flags = lsfw->func->flags; +} + +static int +gm200_acr_wpr_build_lsb(struct nvkm_acr *acr, struct nvkm_acr_lsfw *lsfw) +{ + struct lsb_header hdr; + + if (WARN_ON(lsfw->sig->size != sizeof(hdr.signature))) + return -EINVAL; + + memcpy(&hdr.signature, lsfw->sig->data, lsfw->sig->size); + gm200_acr_wpr_build_lsb_tail(lsfw, &hdr.tail); + + nvkm_wobj(acr->wpr, lsfw->offset.lsb, &hdr, sizeof(hdr)); + return 0; +} + +int +gm200_acr_wpr_build(struct nvkm_acr *acr, struct nvkm_acr_lsf *rtos) +{ + struct nvkm_acr_lsfw *lsfw; + u32 offset = 0; + int ret; + + /* Fill per-LSF structures. */ + list_for_each_entry(lsfw, &acr->lsfw, head) { + struct wpr_header hdr = { + .falcon_id = lsfw->id, + .lsb_offset = lsfw->offset.lsb, + .bootstrap_owner = NVKM_ACR_LSF_PMU, + .lazy_bootstrap = rtos && lsfw->id != rtos->id, + .status = WPR_HEADER_V0_STATUS_COPY, + }; + + /* Write WPR header. */ + nvkm_wobj(acr->wpr, offset, &hdr, sizeof(hdr)); + offset += sizeof(hdr); + + /* Write LSB header. */ + ret = gm200_acr_wpr_build_lsb(acr, lsfw); + if (ret) + return ret; + + /* Write ucode image. */ + nvkm_wobj(acr->wpr, lsfw->offset.img, + lsfw->img.data, + lsfw->img.size); + + /* Write bootloader data. */ + lsfw->func->bld_write(acr, lsfw->offset.bld, lsfw); + } + + /* Finalise WPR. */ + nvkm_wo32(acr->wpr, offset, WPR_HEADER_V0_FALCON_ID_INVALID); + return 0; +} + +static int +gm200_acr_wpr_alloc(struct nvkm_acr *acr, u32 wpr_size) +{ + int ret = nvkm_memory_new(acr->subdev.device, NVKM_MEM_TARGET_INST, + ALIGN(wpr_size, 0x40000), 0x40000, true, + &acr->wpr); + if (ret) + return ret; + + acr->wpr_start = nvkm_memory_addr(acr->wpr); + acr->wpr_end = acr->wpr_start + nvkm_memory_size(acr->wpr); + return 0; +} + +u32 +gm200_acr_wpr_layout(struct nvkm_acr *acr) +{ + struct nvkm_acr_lsfw *lsfw; + u32 wpr = 0; + + wpr += 11 /* MAX_LSF */ * sizeof(struct wpr_header); + + list_for_each_entry(lsfw, &acr->lsfw, head) { + wpr = ALIGN(wpr, 256); + lsfw->offset.lsb = wpr; + wpr += sizeof(struct lsb_header); + + wpr = ALIGN(wpr, 4096); + lsfw->offset.img = wpr; + wpr += lsfw->img.size; + + wpr = ALIGN(wpr, 256); + lsfw->offset.bld = wpr; + lsfw->bl_data_size = ALIGN(lsfw->func->bld_size, 256); + wpr += lsfw->bl_data_size; + } + + return wpr; +} + +int +gm200_acr_wpr_parse(struct nvkm_acr *acr) +{ + const struct wpr_header *hdr = (void *)acr->wpr_fw->data; + + while (hdr->falcon_id != WPR_HEADER_V0_FALCON_ID_INVALID) { + wpr_header_dump(&acr->subdev, hdr); + if (!nvkm_acr_lsfw_add(NULL, acr, NULL, (hdr++)->falcon_id)) + return -ENOMEM; + } + + return 0; +} + +void +gm200_acr_hsfw_bld(struct nvkm_acr *acr, struct nvkm_acr_hsf *hsf) +{ + struct flcn_bl_dmem_desc_v1 hsdesc = { + .ctx_dma = FALCON_DMAIDX_VIRT, + .code_dma_base = hsf->vma->addr, + .non_sec_code_off = hsf->non_sec_addr, + .non_sec_code_size = hsf->non_sec_size, + .sec_code_off = hsf->sec_addr, + .sec_code_size = hsf->sec_size, + .code_entry_point = 0, + .data_dma_base = hsf->vma->addr + hsf->data_addr, + .data_size = hsf->data_size, + }; + + flcn_bl_dmem_desc_v1_dump(&acr->subdev, &hsdesc); + + nvkm_falcon_load_dmem(hsf->falcon, &hsdesc, 0, sizeof(hsdesc), 0); +} + +int +gm200_acr_hsfw_boot(struct nvkm_acr *acr, struct nvkm_acr_hsf *hsf, + u32 intr_clear, u32 mbox0_ok) +{ + struct nvkm_subdev *subdev = &acr->subdev; + struct nvkm_device *device = subdev->device; + struct nvkm_falcon *falcon = hsf->falcon; + u32 mbox0, mbox1; + int ret; + + /* Reset falcon. */ + nvkm_falcon_reset(falcon); + nvkm_falcon_bind_context(falcon, acr->inst); + + /* Load bootloader into IMEM. */ + nvkm_falcon_load_imem(falcon, hsf->imem, + falcon->code.limit - hsf->imem_size, + hsf->imem_size, + hsf->imem_tag, + 0, false); + + /* Load bootloader data into DMEM. */ + hsf->func->bld(acr, hsf); + + /* Boot the falcon. */ + nvkm_mc_intr_mask(device, falcon->owner->index, false); + + nvkm_falcon_wr32(falcon, 0x040, 0xdeada5a5); + nvkm_falcon_set_start_addr(falcon, hsf->imem_tag << 8); + nvkm_falcon_start(falcon); + ret = nvkm_falcon_wait_for_halt(falcon, 100); + if (ret) + return ret; + + /* Check for successful completion. */ + mbox0 = nvkm_falcon_rd32(falcon, 0x040); + mbox1 = nvkm_falcon_rd32(falcon, 0x044); + nvkm_debug(subdev, "mailbox %08x %08x\n", mbox0, mbox1); + if (mbox0 && mbox0 != mbox0_ok) + return -EIO; + + nvkm_falcon_clear_interrupt(falcon, intr_clear); + nvkm_mc_intr_mask(device, falcon->owner->index, true); + return ret; +} + +int +gm200_acr_hsfw_load(struct nvkm_acr *acr, struct nvkm_acr_hsfw *hsfw, + struct nvkm_falcon *falcon) +{ + struct nvkm_subdev *subdev = &acr->subdev; + struct nvkm_acr_hsf *hsf; + int ret; + + /* Patch the appropriate signature (production/debug) into the FW + * image, as determined by the mode the falcon is in. + */ + ret = nvkm_falcon_get(falcon, subdev); + if (ret) + return ret; + + if (hsfw->sig.patch_loc) { + if (!falcon->debug) { + nvkm_debug(subdev, "patching production signature\n"); + memcpy(hsfw->image + hsfw->sig.patch_loc, + hsfw->sig.prod.data, + hsfw->sig.prod.size); + } else { + nvkm_debug(subdev, "patching debug signature\n"); + memcpy(hsfw->image + hsfw->sig.patch_loc, + hsfw->sig.dbg.data, + hsfw->sig.dbg.size); + } + } + + nvkm_falcon_put(falcon, subdev); + + if (!(hsf = kzalloc(sizeof(*hsf), GFP_KERNEL))) + return -ENOMEM; + hsf->func = hsfw->func; + hsf->name = hsfw->name; + list_add_tail(&hsf->head, &acr->hsf); + + hsf->imem_size = hsfw->imem_size; + hsf->imem_tag = hsfw->imem_tag; + hsf->imem = kmemdup(hsfw->imem, hsfw->imem_size, GFP_KERNEL); + if (!hsf->imem) + return -ENOMEM; + + hsf->non_sec_addr = hsfw->non_sec_addr; + hsf->non_sec_size = hsfw->non_sec_size; + hsf->sec_addr = hsfw->sec_addr; + hsf->sec_size = hsfw->sec_size; + hsf->data_addr = hsfw->data_addr; + hsf->data_size = hsfw->data_size; + + /* Make the FW image accessible to the HS bootloader. */ + ret = nvkm_memory_new(subdev->device, NVKM_MEM_TARGET_INST, + hsfw->image_size, 0x1000, false, &hsf->ucode); + if (ret) + return ret; + + nvkm_kmap(hsf->ucode); + nvkm_wobj(hsf->ucode, 0, hsfw->image, hsfw->image_size); + nvkm_done(hsf->ucode); + + ret = nvkm_vmm_get(acr->vmm, 12, nvkm_memory_size(hsf->ucode), + &hsf->vma); + if (ret) + return ret; + + ret = nvkm_memory_map(hsf->ucode, 0, acr->vmm, hsf->vma, NULL, 0); + if (ret) + return ret; + + hsf->falcon = falcon; + return 0; +} + +int +gm200_acr_unload_boot(struct nvkm_acr *acr, struct nvkm_acr_hsf *hsf) +{ + return gm200_acr_hsfw_boot(acr, hsf, 0, 0x1d); +} + +int +gm200_acr_unload_load(struct nvkm_acr *acr, struct nvkm_acr_hsfw *hsfw) +{ + return gm200_acr_hsfw_load(acr, hsfw, &acr->subdev.device->pmu->falcon); +} + +const struct nvkm_acr_hsf_func +gm200_acr_unload_0 = { + .load = gm200_acr_unload_load, + .boot = gm200_acr_unload_boot, + .bld = gm200_acr_hsfw_bld, +}; + +MODULE_FIRMWARE("nvidia/gm200/acr/ucode_unload.bin"); +MODULE_FIRMWARE("nvidia/gm204/acr/ucode_unload.bin"); +MODULE_FIRMWARE("nvidia/gm206/acr/ucode_unload.bin"); +MODULE_FIRMWARE("nvidia/gp100/acr/ucode_unload.bin"); + +static const struct nvkm_acr_hsf_fwif +gm200_acr_unload_fwif[] = { + { 0, nvkm_acr_hsfw_load, &gm200_acr_unload_0 }, + {} +}; + +int +gm200_acr_load_boot(struct nvkm_acr *acr, struct nvkm_acr_hsf *hsf) +{ + return gm200_acr_hsfw_boot(acr, hsf, 0x10, 0); +} + +static int +gm200_acr_load_load(struct nvkm_acr *acr, struct nvkm_acr_hsfw *hsfw) +{ + struct flcn_acr_desc *desc = (void *)&hsfw->image[hsfw->data_addr]; + + desc->wpr_region_id = 1; + desc->regions.no_regions = 2; + desc->regions.region_props[0].start_addr = acr->wpr_start >> 8; + desc->regions.region_props[0].end_addr = acr->wpr_end >> 8; + desc->regions.region_props[0].region_id = 1; + desc->regions.region_props[0].read_mask = 0xf; + desc->regions.region_props[0].write_mask = 0xc; + desc->regions.region_props[0].client_mask = 0x2; + flcn_acr_desc_dump(&acr->subdev, desc); + + return gm200_acr_hsfw_load(acr, hsfw, &acr->subdev.device->pmu->falcon); +} + +static const struct nvkm_acr_hsf_func +gm200_acr_load_0 = { + .load = gm200_acr_load_load, + .boot = gm200_acr_load_boot, + .bld = gm200_acr_hsfw_bld, +}; + +MODULE_FIRMWARE("nvidia/gm200/acr/bl.bin"); +MODULE_FIRMWARE("nvidia/gm200/acr/ucode_load.bin"); + +MODULE_FIRMWARE("nvidia/gm204/acr/bl.bin"); +MODULE_FIRMWARE("nvidia/gm204/acr/ucode_load.bin"); + +MODULE_FIRMWARE("nvidia/gm206/acr/bl.bin"); +MODULE_FIRMWARE("nvidia/gm206/acr/ucode_load.bin"); + +MODULE_FIRMWARE("nvidia/gp100/acr/bl.bin"); +MODULE_FIRMWARE("nvidia/gp100/acr/ucode_load.bin"); + +static const struct nvkm_acr_hsf_fwif +gm200_acr_load_fwif[] = { + { 0, nvkm_acr_hsfw_load, &gm200_acr_load_0 }, + {} +}; + +static const struct nvkm_acr_func +gm200_acr = { + .load = gm200_acr_load_fwif, + .unload = gm200_acr_unload_fwif, + .wpr_parse = gm200_acr_wpr_parse, + .wpr_layout = gm200_acr_wpr_layout, + .wpr_alloc = gm200_acr_wpr_alloc, + .wpr_build = gm200_acr_wpr_build, + .wpr_patch = gm200_acr_wpr_patch, + .wpr_check = gm200_acr_wpr_check, + .init = gm200_acr_init, +}; + +static int +gm200_acr_load(struct nvkm_acr *acr, int ver, const struct nvkm_acr_fwif *fwif) +{ + struct nvkm_subdev *subdev = &acr->subdev; + const struct nvkm_acr_hsf_fwif *hsfwif; + + hsfwif = nvkm_firmware_load(subdev, fwif->func->load, "AcrLoad", + acr, "acr/bl", "acr/ucode_load", "load"); + if (IS_ERR(hsfwif)) + return PTR_ERR(hsfwif); + + hsfwif = nvkm_firmware_load(subdev, fwif->func->unload, "AcrUnload", + acr, "acr/bl", "acr/ucode_unload", + "unload"); + if (IS_ERR(hsfwif)) + return PTR_ERR(hsfwif); + + return 0; +} + +static const struct nvkm_acr_fwif +gm200_acr_fwif[] = { + { 0, gm200_acr_load, &gm200_acr }, + {} +}; + +int +gm200_acr_new(struct nvkm_device *device, int index, struct nvkm_acr **pacr) +{ + return nvkm_acr_new_(gm200_acr_fwif, device, index, pacr); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm20b.c new file mode 100644 index 000000000000..034a6ede70c7 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm20b.c @@ -0,0 +1,134 @@ +/* + * Copyright 2019 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" + +#include <core/firmware.h> +#include <core/memory.h> +#include <subdev/mmu.h> +#include <subdev/pmu.h> + +#include <nvfw/acr.h> +#include <nvfw/flcn.h> + +int +gm20b_acr_wpr_alloc(struct nvkm_acr *acr, u32 wpr_size) +{ + struct nvkm_subdev *subdev = &acr->subdev; + + acr->func->wpr_check(acr, &acr->wpr_start, &acr->wpr_end); + + if ((acr->wpr_end - acr->wpr_start) < wpr_size) { + nvkm_error(subdev, "WPR image too big for WPR!\n"); + return -ENOSPC; + } + + return nvkm_memory_new(subdev->device, NVKM_MEM_TARGET_INST, + wpr_size, 0, true, &acr->wpr); +} + +static void +gm20b_acr_load_bld(struct nvkm_acr *acr, struct nvkm_acr_hsf *hsf) +{ + struct flcn_bl_dmem_desc hsdesc = { + .ctx_dma = FALCON_DMAIDX_VIRT, + .code_dma_base = hsf->vma->addr >> 8, + .non_sec_code_off = hsf->non_sec_addr, + .non_sec_code_size = hsf->non_sec_size, + .sec_code_off = hsf->sec_addr, + .sec_code_size = hsf->sec_size, + .code_entry_point = 0, + .data_dma_base = (hsf->vma->addr + hsf->data_addr) >> 8, + .data_size = hsf->data_size, + }; + + flcn_bl_dmem_desc_dump(&acr->subdev, &hsdesc); + + nvkm_falcon_load_dmem(hsf->falcon, &hsdesc, 0, sizeof(hsdesc), 0); +} + +static int +gm20b_acr_load_load(struct nvkm_acr *acr, struct nvkm_acr_hsfw *hsfw) +{ + struct flcn_acr_desc *desc = (void *)&hsfw->image[hsfw->data_addr]; + + desc->ucode_blob_base = nvkm_memory_addr(acr->wpr); + desc->ucode_blob_size = nvkm_memory_size(acr->wpr); + flcn_acr_desc_dump(&acr->subdev, desc); + + return gm200_acr_hsfw_load(acr, hsfw, &acr->subdev.device->pmu->falcon); +} + +const struct nvkm_acr_hsf_func +gm20b_acr_load_0 = { + .load = gm20b_acr_load_load, + .boot = gm200_acr_load_boot, + .bld = gm20b_acr_load_bld, +}; + +#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC) +MODULE_FIRMWARE("nvidia/gm20b/acr/bl.bin"); +MODULE_FIRMWARE("nvidia/gm20b/acr/ucode_load.bin"); +#endif + +static const struct nvkm_acr_hsf_fwif +gm20b_acr_load_fwif[] = { + { 0, nvkm_acr_hsfw_load, &gm20b_acr_load_0 }, + {} +}; + +static const struct nvkm_acr_func +gm20b_acr = { + .load = gm20b_acr_load_fwif, + .wpr_parse = gm200_acr_wpr_parse, + .wpr_layout = gm200_acr_wpr_layout, + .wpr_alloc = gm20b_acr_wpr_alloc, + .wpr_build = gm200_acr_wpr_build, + .wpr_patch = gm200_acr_wpr_patch, + .wpr_check = gm200_acr_wpr_check, + .init = gm200_acr_init, +}; + +int +gm20b_acr_load(struct nvkm_acr *acr, int ver, const struct nvkm_acr_fwif *fwif) +{ + struct nvkm_subdev *subdev = &acr->subdev; + const struct nvkm_acr_hsf_fwif *hsfwif; + + hsfwif = nvkm_firmware_load(subdev, fwif->func->load, "AcrLoad", + acr, "acr/bl", "acr/ucode_load", "load"); + if (IS_ERR(hsfwif)) + return PTR_ERR(hsfwif); + + return 0; +} + +static const struct nvkm_acr_fwif +gm20b_acr_fwif[] = { + { 0, gm20b_acr_load, &gm20b_acr }, + {} +}; + +int +gm20b_acr_new(struct nvkm_device *device, int index, struct nvkm_acr **pacr) +{ + return nvkm_acr_new_(gm20b_acr_fwif, device, index, pacr); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c new file mode 100644 index 000000000000..49e11c46d525 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c @@ -0,0 +1,281 @@ +/* + * Copyright 2019 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" + +#include <core/firmware.h> +#include <core/memory.h> +#include <subdev/mmu.h> +#include <engine/sec2.h> + +#include <nvfw/acr.h> +#include <nvfw/flcn.h> + +void +gp102_acr_wpr_patch(struct nvkm_acr *acr, s64 adjust) +{ + struct wpr_header_v1 hdr; + struct lsb_header_v1 lsb; + struct nvkm_acr_lsfw *lsfw; + u32 offset = 0; + + do { + nvkm_robj(acr->wpr, offset, &hdr, sizeof(hdr)); + wpr_header_v1_dump(&acr->subdev, &hdr); + + list_for_each_entry(lsfw, &acr->lsfw, head) { + if (lsfw->id != hdr.falcon_id) + continue; + + nvkm_robj(acr->wpr, hdr.lsb_offset, &lsb, sizeof(lsb)); + lsb_header_v1_dump(&acr->subdev, &lsb); + + lsfw->func->bld_patch(acr, lsb.tail.bl_data_off, adjust); + break; + } + + offset += sizeof(hdr); + } while (hdr.falcon_id != WPR_HEADER_V1_FALCON_ID_INVALID); +} + +int +gp102_acr_wpr_build_lsb(struct nvkm_acr *acr, struct nvkm_acr_lsfw *lsfw) +{ + struct lsb_header_v1 hdr; + + if (WARN_ON(lsfw->sig->size != sizeof(hdr.signature))) + return -EINVAL; + + memcpy(&hdr.signature, lsfw->sig->data, lsfw->sig->size); + gm200_acr_wpr_build_lsb_tail(lsfw, &hdr.tail); + + nvkm_wobj(acr->wpr, lsfw->offset.lsb, &hdr, sizeof(hdr)); + return 0; +} + +int +gp102_acr_wpr_build(struct nvkm_acr *acr, struct nvkm_acr_lsf *rtos) +{ + struct nvkm_acr_lsfw *lsfw; + u32 offset = 0; + int ret; + + /* Fill per-LSF structures. */ + list_for_each_entry(lsfw, &acr->lsfw, head) { + struct lsf_signature_v1 *sig = (void *)lsfw->sig->data; + struct wpr_header_v1 hdr = { + .falcon_id = lsfw->id, + .lsb_offset = lsfw->offset.lsb, + .bootstrap_owner = NVKM_ACR_LSF_SEC2, + .lazy_bootstrap = rtos && lsfw->id != rtos->id, + .bin_version = sig->version, + .status = WPR_HEADER_V1_STATUS_COPY, + }; + + /* Write WPR header. */ + nvkm_wobj(acr->wpr, offset, &hdr, sizeof(hdr)); + offset += sizeof(hdr); + + /* Write LSB header. */ + ret = gp102_acr_wpr_build_lsb(acr, lsfw); + if (ret) + return ret; + + /* Write ucode image. */ + nvkm_wobj(acr->wpr, lsfw->offset.img, + lsfw->img.data, + lsfw->img.size); + + /* Write bootloader data. */ + lsfw->func->bld_write(acr, lsfw->offset.bld, lsfw); + } + + /* Finalise WPR. */ + nvkm_wo32(acr->wpr, offset, WPR_HEADER_V1_FALCON_ID_INVALID); + return 0; +} + +int +gp102_acr_wpr_alloc(struct nvkm_acr *acr, u32 wpr_size) +{ + int ret = nvkm_memory_new(acr->subdev.device, NVKM_MEM_TARGET_INST, + ALIGN(wpr_size, 0x40000) << 1, 0x40000, true, + &acr->wpr); + if (ret) + return ret; + + acr->shadow_start = nvkm_memory_addr(acr->wpr); + acr->wpr_start = acr->shadow_start + (nvkm_memory_size(acr->wpr) >> 1); + acr->wpr_end = acr->wpr_start + (nvkm_memory_size(acr->wpr) >> 1); + return 0; +} + +u32 +gp102_acr_wpr_layout(struct nvkm_acr *acr) +{ + struct nvkm_acr_lsfw *lsfw; + u32 wpr = 0; + + wpr += 11 /* MAX_LSF */ * sizeof(struct wpr_header_v1); + wpr = ALIGN(wpr, 256); + + wpr += 0x100; /* Shared sub-WPR headers. */ + + list_for_each_entry(lsfw, &acr->lsfw, head) { + wpr = ALIGN(wpr, 256); + lsfw->offset.lsb = wpr; + wpr += sizeof(struct lsb_header_v1); + + wpr = ALIGN(wpr, 4096); + lsfw->offset.img = wpr; + wpr += lsfw->img.size; + + wpr = ALIGN(wpr, 256); + lsfw->offset.bld = wpr; + lsfw->bl_data_size = ALIGN(lsfw->func->bld_size, 256); + wpr += lsfw->bl_data_size; + } + + return wpr; +} + +int +gp102_acr_wpr_parse(struct nvkm_acr *acr) +{ + const struct wpr_header_v1 *hdr = (void *)acr->wpr_fw->data; + + while (hdr->falcon_id != WPR_HEADER_V1_FALCON_ID_INVALID) { + wpr_header_v1_dump(&acr->subdev, hdr); + if (!nvkm_acr_lsfw_add(NULL, acr, NULL, (hdr++)->falcon_id)) + return -ENOMEM; + } + + return 0; +} + +MODULE_FIRMWARE("nvidia/gp102/acr/unload_bl.bin"); +MODULE_FIRMWARE("nvidia/gp102/acr/ucode_unload.bin"); + +MODULE_FIRMWARE("nvidia/gp104/acr/unload_bl.bin"); +MODULE_FIRMWARE("nvidia/gp104/acr/ucode_unload.bin"); + +MODULE_FIRMWARE("nvidia/gp106/acr/unload_bl.bin"); +MODULE_FIRMWARE("nvidia/gp106/acr/ucode_unload.bin"); + +MODULE_FIRMWARE("nvidia/gp107/acr/unload_bl.bin"); +MODULE_FIRMWARE("nvidia/gp107/acr/ucode_unload.bin"); + +static const struct nvkm_acr_hsf_fwif +gp102_acr_unload_fwif[] = { + { 0, nvkm_acr_hsfw_load, &gm200_acr_unload_0 }, + {} +}; + +int +gp102_acr_load_load(struct nvkm_acr *acr, struct nvkm_acr_hsfw *hsfw) +{ + struct flcn_acr_desc_v1 *desc = (void *)&hsfw->image[hsfw->data_addr]; + + desc->wpr_region_id = 1; + desc->regions.no_regions = 2; + desc->regions.region_props[0].start_addr = acr->wpr_start >> 8; + desc->regions.region_props[0].end_addr = acr->wpr_end >> 8; + desc->regions.region_props[0].region_id = 1; + desc->regions.region_props[0].read_mask = 0xf; + desc->regions.region_props[0].write_mask = 0xc; + desc->regions.region_props[0].client_mask = 0x2; + desc->regions.region_props[0].shadow_mem_start_addr = + acr->shadow_start >> 8; + flcn_acr_desc_v1_dump(&acr->subdev, desc); + + return gm200_acr_hsfw_load(acr, hsfw, + &acr->subdev.device->sec2->falcon); +} + +static const struct nvkm_acr_hsf_func +gp102_acr_load_0 = { + .load = gp102_acr_load_load, + .boot = gm200_acr_load_boot, + .bld = gm200_acr_hsfw_bld, +}; + +MODULE_FIRMWARE("nvidia/gp102/acr/bl.bin"); +MODULE_FIRMWARE("nvidia/gp102/acr/ucode_load.bin"); + +MODULE_FIRMWARE("nvidia/gp104/acr/bl.bin"); +MODULE_FIRMWARE("nvidia/gp104/acr/ucode_load.bin"); + +MODULE_FIRMWARE("nvidia/gp106/acr/bl.bin"); +MODULE_FIRMWARE("nvidia/gp106/acr/ucode_load.bin"); + +MODULE_FIRMWARE("nvidia/gp107/acr/bl.bin"); +MODULE_FIRMWARE("nvidia/gp107/acr/ucode_load.bin"); + +static const struct nvkm_acr_hsf_fwif +gp102_acr_load_fwif[] = { + { 0, nvkm_acr_hsfw_load, &gp102_acr_load_0 }, + {} +}; + +static const struct nvkm_acr_func +gp102_acr = { + .load = gp102_acr_load_fwif, + .unload = gp102_acr_unload_fwif, + .wpr_parse = gp102_acr_wpr_parse, + .wpr_layout = gp102_acr_wpr_layout, + .wpr_alloc = gp102_acr_wpr_alloc, + .wpr_build = gp102_acr_wpr_build, + .wpr_patch = gp102_acr_wpr_patch, + .wpr_check = gm200_acr_wpr_check, + .init = gm200_acr_init, +}; + +int +gp102_acr_load(struct nvkm_acr *acr, int ver, const struct nvkm_acr_fwif *fwif) +{ + struct nvkm_subdev *subdev = &acr->subdev; + const struct nvkm_acr_hsf_fwif *hsfwif; + + hsfwif = nvkm_firmware_load(subdev, fwif->func->load, "AcrLoad", + acr, "acr/bl", "acr/ucode_load", "load"); + if (IS_ERR(hsfwif)) + return PTR_ERR(hsfwif); + + hsfwif = nvkm_firmware_load(subdev, fwif->func->unload, "AcrUnload", + acr, "acr/unload_bl", "acr/ucode_unload", + "unload"); + if (IS_ERR(hsfwif)) + return PTR_ERR(hsfwif); + + return 0; +} + +static const struct nvkm_acr_fwif +gp102_acr_fwif[] = { + { 0, gp102_acr_load, &gp102_acr }, + {} +}; + +int +gp102_acr_new(struct nvkm_device *device, int index, struct nvkm_acr **pacr) +{ + return nvkm_acr_new_(gp102_acr_fwif, device, index, pacr); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp108.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp108.c new file mode 100644 index 000000000000..f10dc9112678 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp108.c @@ -0,0 +1,111 @@ +/* + * Copyright 2019 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" + +#include <subdev/mmu.h> + +#include <nvfw/flcn.h> + +void +gp108_acr_hsfw_bld(struct nvkm_acr *acr, struct nvkm_acr_hsf *hsf) +{ + struct flcn_bl_dmem_desc_v2 hsdesc = { + .ctx_dma = FALCON_DMAIDX_VIRT, + .code_dma_base = hsf->vma->addr, + .non_sec_code_off = hsf->non_sec_addr, + .non_sec_code_size = hsf->non_sec_size, + .sec_code_off = hsf->sec_addr, + .sec_code_size = hsf->sec_size, + .code_entry_point = 0, + .data_dma_base = hsf->vma->addr + hsf->data_addr, + .data_size = hsf->data_size, + .argc = 0, + .argv = 0, + }; + + flcn_bl_dmem_desc_v2_dump(&acr->subdev, &hsdesc); + + nvkm_falcon_load_dmem(hsf->falcon, &hsdesc, 0, sizeof(hsdesc), 0); +} + +const struct nvkm_acr_hsf_func +gp108_acr_unload_0 = { + .load = gm200_acr_unload_load, + .boot = gm200_acr_unload_boot, + .bld = gp108_acr_hsfw_bld, +}; + +MODULE_FIRMWARE("nvidia/gp108/acr/unload_bl.bin"); +MODULE_FIRMWARE("nvidia/gp108/acr/ucode_unload.bin"); + +MODULE_FIRMWARE("nvidia/gv100/acr/unload_bl.bin"); +MODULE_FIRMWARE("nvidia/gv100/acr/ucode_unload.bin"); + +static const struct nvkm_acr_hsf_fwif +gp108_acr_unload_fwif[] = { + { 0, nvkm_acr_hsfw_load, &gp108_acr_unload_0 }, + {} +}; + +static const struct nvkm_acr_hsf_func +gp108_acr_load_0 = { + .load = gp102_acr_load_load, + .boot = gm200_acr_load_boot, + .bld = gp108_acr_hsfw_bld, +}; + +MODULE_FIRMWARE("nvidia/gp108/acr/bl.bin"); +MODULE_FIRMWARE("nvidia/gp108/acr/ucode_load.bin"); + +MODULE_FIRMWARE("nvidia/gv100/acr/bl.bin"); +MODULE_FIRMWARE("nvidia/gv100/acr/ucode_load.bin"); + +static const struct nvkm_acr_hsf_fwif +gp108_acr_load_fwif[] = { + { 0, nvkm_acr_hsfw_load, &gp108_acr_load_0 }, + {} +}; + +static const struct nvkm_acr_func +gp108_acr = { + .load = gp108_acr_load_fwif, + .unload = gp108_acr_unload_fwif, + .wpr_parse = gp102_acr_wpr_parse, + .wpr_layout = gp102_acr_wpr_layout, + .wpr_alloc = gp102_acr_wpr_alloc, + .wpr_build = gp102_acr_wpr_build, + .wpr_patch = gp102_acr_wpr_patch, + .wpr_check = gm200_acr_wpr_check, + .init = gm200_acr_init, +}; + +static const struct nvkm_acr_fwif +gp108_acr_fwif[] = { + { 0, gp102_acr_load, &gp108_acr }, + {} +}; + +int +gp108_acr_new(struct nvkm_device *device, int index, struct nvkm_acr **pacr) +{ + return nvkm_acr_new_(gp108_acr_fwif, device, index, pacr); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp10b.c new file mode 100644 index 000000000000..39de64292a41 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp10b.c @@ -0,0 +1,57 @@ +/* + * Copyright 2019 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" + +#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) +MODULE_FIRMWARE("nvidia/gp10b/acr/bl.bin"); +MODULE_FIRMWARE("nvidia/gp10b/acr/ucode_load.bin"); +#endif + +static const struct nvkm_acr_hsf_fwif +gp10b_acr_load_fwif[] = { + { 0, nvkm_acr_hsfw_load, &gm20b_acr_load_0 }, + {} +}; + +static const struct nvkm_acr_func +gp10b_acr = { + .load = gp10b_acr_load_fwif, + .wpr_parse = gm200_acr_wpr_parse, + .wpr_layout = gm200_acr_wpr_layout, + .wpr_alloc = gm20b_acr_wpr_alloc, + .wpr_build = gm200_acr_wpr_build, + .wpr_patch = gm200_acr_wpr_patch, + .wpr_check = gm200_acr_wpr_check, + .init = gm200_acr_init, +}; + +static const struct nvkm_acr_fwif +gp10b_acr_fwif[] = { + { 0, gm20b_acr_load, &gp10b_acr }, + {} +}; + +int +gp10b_acr_new(struct nvkm_device *device, int index, struct nvkm_acr **pacr) +{ + return nvkm_acr_new_(gp10b_acr_fwif, device, index, pacr); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/hsfw.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/hsfw.c new file mode 100644 index 000000000000..aecce2dac558 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/hsfw.c @@ -0,0 +1,180 @@ +/* + * Copyright 2019 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" + +#include <core/firmware.h> + +#include <nvfw/fw.h> +#include <nvfw/hs.h> + +static void +nvkm_acr_hsfw_del(struct nvkm_acr_hsfw *hsfw) +{ + list_del(&hsfw->head); + kfree(hsfw->imem); + kfree(hsfw->image); + kfree(hsfw->sig.prod.data); + kfree(hsfw->sig.dbg.data); + kfree(hsfw); +} + +void +nvkm_acr_hsfw_del_all(struct nvkm_acr *acr) +{ + struct nvkm_acr_hsfw *hsfw, *hsft; + list_for_each_entry_safe(hsfw, hsft, &acr->hsfw, head) { + nvkm_acr_hsfw_del(hsfw); + } +} + +static int +nvkm_acr_hsfw_load_image(struct nvkm_acr *acr, const char *name, int ver, + struct nvkm_acr_hsfw *hsfw) +{ + struct nvkm_subdev *subdev = &acr->subdev; + const struct firmware *fw; + const struct nvfw_bin_hdr *hdr; + const struct nvfw_hs_header *fwhdr; + const struct nvfw_hs_load_header *lhdr; + u32 loc, sig; + int ret; + + ret = nvkm_firmware_get(subdev, name, ver, &fw); + if (ret < 0) + return ret; + + hdr = nvfw_bin_hdr(subdev, fw->data); + fwhdr = nvfw_hs_header(subdev, fw->data + hdr->header_offset); + + /* Earlier FW releases by NVIDIA for Nouveau's use aren't in NVIDIA's + * standard format, and don't have the indirection seen in the 0x10de + * case. + */ + switch (hdr->bin_magic) { + case 0x000010de: + loc = *(u32 *)(fw->data + fwhdr->patch_loc); + sig = *(u32 *)(fw->data + fwhdr->patch_sig); + break; + case 0x3b1d14f0: + loc = fwhdr->patch_loc; + sig = fwhdr->patch_sig; + break; + default: + ret = -EINVAL; + goto done; + } + + lhdr = nvfw_hs_load_header(subdev, fw->data + fwhdr->hdr_offset); + + if (!(hsfw->image = kmalloc(hdr->data_size, GFP_KERNEL))) { + ret = -ENOMEM; + goto done; + } + + memcpy(hsfw->image, fw->data + hdr->data_offset, hdr->data_size); + hsfw->image_size = hdr->data_size; + hsfw->non_sec_addr = lhdr->non_sec_code_off; + hsfw->non_sec_size = lhdr->non_sec_code_size; + hsfw->sec_addr = lhdr->apps[0]; + hsfw->sec_size = lhdr->apps[lhdr->num_apps]; + hsfw->data_addr = lhdr->data_dma_base; + hsfw->data_size = lhdr->data_size; + + hsfw->sig.prod.size = fwhdr->sig_prod_size; + hsfw->sig.prod.data = kmalloc(hsfw->sig.prod.size, GFP_KERNEL); + if (!hsfw->sig.prod.data) { + ret = -ENOMEM; + goto done; + } + + memcpy(hsfw->sig.prod.data, fw->data + fwhdr->sig_prod_offset + sig, + hsfw->sig.prod.size); + + hsfw->sig.dbg.size = fwhdr->sig_dbg_size; + hsfw->sig.dbg.data = kmalloc(hsfw->sig.dbg.size, GFP_KERNEL); + if (!hsfw->sig.dbg.data) { + ret = -ENOMEM; + goto done; + } + + memcpy(hsfw->sig.dbg.data, fw->data + fwhdr->sig_dbg_offset + sig, + hsfw->sig.dbg.size); + + hsfw->sig.patch_loc = loc; +done: + nvkm_firmware_put(fw); + return ret; +} + +static int +nvkm_acr_hsfw_load_bl(struct nvkm_acr *acr, const char *name, int ver, + struct nvkm_acr_hsfw *hsfw) +{ + struct nvkm_subdev *subdev = &acr->subdev; + const struct nvfw_bin_hdr *hdr; + const struct nvfw_bl_desc *desc; + const struct firmware *fw; + u8 *data; + int ret; + + ret = nvkm_firmware_get(subdev, name, ver, &fw); + if (ret) + return ret; + + hdr = nvfw_bin_hdr(subdev, fw->data); + desc = nvfw_bl_desc(subdev, fw->data + hdr->header_offset); + data = (void *)fw->data + hdr->data_offset; + + hsfw->imem_size = desc->code_size; + hsfw->imem_tag = desc->start_tag; + hsfw->imem = kmalloc(desc->code_size, GFP_KERNEL); + memcpy(hsfw->imem, data + desc->code_off, desc->code_size); + + nvkm_firmware_put(fw); + return 0; +} + +int +nvkm_acr_hsfw_load(struct nvkm_acr *acr, const char *bl, const char *fw, + const char *name, int version, + const struct nvkm_acr_hsf_fwif *fwif) +{ + struct nvkm_acr_hsfw *hsfw; + int ret; + + if (!(hsfw = kzalloc(sizeof(*hsfw), GFP_KERNEL))) + return -ENOMEM; + + hsfw->func = fwif->func; + hsfw->name = name; + list_add_tail(&hsfw->head, &acr->hsfw); + + ret = nvkm_acr_hsfw_load_bl(acr, bl, version, hsfw); + if (ret) + goto done; + + ret = nvkm_acr_hsfw_load_image(acr, fw, version, hsfw); +done: + if (ret) + nvkm_acr_hsfw_del(hsfw); + return ret; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/lsfw.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/lsfw.c new file mode 100644 index 000000000000..9896462960ea --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/lsfw.c @@ -0,0 +1,249 @@ +/* + * Copyright 2019 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" +#include <core/falcon.h> +#include <core/firmware.h> +#include <nvfw/fw.h> +#include <nvfw/ls.h> + +void +nvkm_acr_lsfw_del(struct nvkm_acr_lsfw *lsfw) +{ + nvkm_blob_dtor(&lsfw->img); + nvkm_firmware_put(lsfw->sig); + list_del(&lsfw->head); + kfree(lsfw); +} + +void +nvkm_acr_lsfw_del_all(struct nvkm_acr *acr) +{ + struct nvkm_acr_lsfw *lsfw, *lsft; + list_for_each_entry_safe(lsfw, lsft, &acr->lsfw, head) { + nvkm_acr_lsfw_del(lsfw); + } +} + +static struct nvkm_acr_lsfw * +nvkm_acr_lsfw_get(struct nvkm_acr *acr, enum nvkm_acr_lsf_id id) +{ + struct nvkm_acr_lsfw *lsfw; + list_for_each_entry(lsfw, &acr->lsfw, head) { + if (lsfw->id == id) + return lsfw; + } + return NULL; +} + +struct nvkm_acr_lsfw * +nvkm_acr_lsfw_add(const struct nvkm_acr_lsf_func *func, struct nvkm_acr *acr, + struct nvkm_falcon *falcon, enum nvkm_acr_lsf_id id) +{ + struct nvkm_acr_lsfw *lsfw = nvkm_acr_lsfw_get(acr, id); + + if (lsfw && lsfw->func) { + nvkm_error(&acr->subdev, "LSFW %d redefined\n", id); + return ERR_PTR(-EEXIST); + } + + if (!lsfw) { + if (!(lsfw = kzalloc(sizeof(*lsfw), GFP_KERNEL))) + return ERR_PTR(-ENOMEM); + + lsfw->id = id; + list_add_tail(&lsfw->head, &acr->lsfw); + } + + lsfw->func = func; + lsfw->falcon = falcon; + return lsfw; +} + +static struct nvkm_acr_lsfw * +nvkm_acr_lsfw_load_sig_image_desc_(struct nvkm_subdev *subdev, + struct nvkm_falcon *falcon, + enum nvkm_acr_lsf_id id, + const char *path, int ver, + const struct nvkm_acr_lsf_func *func, + const struct firmware **pdesc) +{ + struct nvkm_acr *acr = subdev->device->acr; + struct nvkm_acr_lsfw *lsfw; + int ret; + + if (IS_ERR((lsfw = nvkm_acr_lsfw_add(func, acr, falcon, id)))) + return lsfw; + + ret = nvkm_firmware_load_name(subdev, path, "sig", ver, &lsfw->sig); + if (ret) + goto done; + + ret = nvkm_firmware_load_blob(subdev, path, "image", ver, &lsfw->img); + if (ret) + goto done; + + ret = nvkm_firmware_load_name(subdev, path, "desc", ver, pdesc); +done: + if (ret) { + nvkm_acr_lsfw_del(lsfw); + return ERR_PTR(ret); + } + + return lsfw; +} + +static void +nvkm_acr_lsfw_from_desc(const struct nvfw_ls_desc_head *desc, + struct nvkm_acr_lsfw *lsfw) +{ + lsfw->bootloader_size = ALIGN(desc->bootloader_size, 256); + lsfw->bootloader_imem_offset = desc->bootloader_imem_offset; + + lsfw->app_size = ALIGN(desc->app_size, 256); + lsfw->app_start_offset = desc->app_start_offset; + lsfw->app_imem_entry = desc->app_imem_entry; + lsfw->app_resident_code_offset = desc->app_resident_code_offset; + lsfw->app_resident_code_size = desc->app_resident_code_size; + lsfw->app_resident_data_offset = desc->app_resident_data_offset; + lsfw->app_resident_data_size = desc->app_resident_data_size; + + lsfw->ucode_size = ALIGN(lsfw->app_resident_data_offset, 256) + + lsfw->bootloader_size; + lsfw->data_size = lsfw->app_size + lsfw->bootloader_size - + lsfw->ucode_size; +} + +int +nvkm_acr_lsfw_load_sig_image_desc(struct nvkm_subdev *subdev, + struct nvkm_falcon *falcon, + enum nvkm_acr_lsf_id id, + const char *path, int ver, + const struct nvkm_acr_lsf_func *func) +{ + const struct firmware *fw; + struct nvkm_acr_lsfw *lsfw; + + lsfw = nvkm_acr_lsfw_load_sig_image_desc_(subdev, falcon, id, path, ver, + func, &fw); + if (IS_ERR(lsfw)) + return PTR_ERR(lsfw); + + nvkm_acr_lsfw_from_desc(&nvfw_ls_desc(subdev, fw->data)->head, lsfw); + nvkm_firmware_put(fw); + return 0; +} + +int +nvkm_acr_lsfw_load_sig_image_desc_v1(struct nvkm_subdev *subdev, + struct nvkm_falcon *falcon, + enum nvkm_acr_lsf_id id, + const char *path, int ver, + const struct nvkm_acr_lsf_func *func) +{ + const struct firmware *fw; + struct nvkm_acr_lsfw *lsfw; + + lsfw = nvkm_acr_lsfw_load_sig_image_desc_(subdev, falcon, id, path, ver, + func, &fw); + if (IS_ERR(lsfw)) + return PTR_ERR(lsfw); + + nvkm_acr_lsfw_from_desc(&nvfw_ls_desc_v1(subdev, fw->data)->head, lsfw); + nvkm_firmware_put(fw); + return 0; +} + +int +nvkm_acr_lsfw_load_bl_inst_data_sig(struct nvkm_subdev *subdev, + struct nvkm_falcon *falcon, + enum nvkm_acr_lsf_id id, + const char *path, int ver, + const struct nvkm_acr_lsf_func *func) +{ + struct nvkm_acr *acr = subdev->device->acr; + struct nvkm_acr_lsfw *lsfw; + const struct firmware *bl = NULL, *inst = NULL, *data = NULL; + const struct nvfw_bin_hdr *hdr; + const struct nvfw_bl_desc *desc; + u32 *bldata; + int ret; + + if (IS_ERR((lsfw = nvkm_acr_lsfw_add(func, acr, falcon, id)))) + return PTR_ERR(lsfw); + + ret = nvkm_firmware_load_name(subdev, path, "bl", ver, &bl); + if (ret) + goto done; + + hdr = nvfw_bin_hdr(subdev, bl->data); + desc = nvfw_bl_desc(subdev, bl->data + hdr->header_offset); + bldata = (void *)(bl->data + hdr->data_offset); + + ret = nvkm_firmware_load_name(subdev, path, "inst", ver, &inst); + if (ret) + goto done; + + ret = nvkm_firmware_load_name(subdev, path, "data", ver, &data); + if (ret) + goto done; + + ret = nvkm_firmware_load_name(subdev, path, "sig", ver, &lsfw->sig); + if (ret) + goto done; + + lsfw->bootloader_size = ALIGN(desc->code_size, 256); + lsfw->bootloader_imem_offset = desc->start_tag << 8; + + lsfw->app_start_offset = lsfw->bootloader_size; + lsfw->app_imem_entry = 0; + lsfw->app_resident_code_offset = 0; + lsfw->app_resident_code_size = ALIGN(inst->size, 256); + lsfw->app_resident_data_offset = lsfw->app_resident_code_size; + lsfw->app_resident_data_size = ALIGN(data->size, 256); + lsfw->app_size = lsfw->app_resident_code_size + + lsfw->app_resident_data_size; + + lsfw->img.size = lsfw->bootloader_size + lsfw->app_size; + if (!(lsfw->img.data = kzalloc(lsfw->img.size, GFP_KERNEL))) { + ret = -ENOMEM; + goto done; + } + + memcpy(lsfw->img.data, bldata, lsfw->bootloader_size); + memcpy(lsfw->img.data + lsfw->app_start_offset + + lsfw->app_resident_code_offset, inst->data, inst->size); + memcpy(lsfw->img.data + lsfw->app_start_offset + + lsfw->app_resident_data_offset, data->data, data->size); + + lsfw->ucode_size = ALIGN(lsfw->app_resident_data_offset, 256) + + lsfw->bootloader_size; + lsfw->data_size = lsfw->app_size + lsfw->bootloader_size - + lsfw->ucode_size; + +done: + if (ret) + nvkm_acr_lsfw_del(lsfw); + nvkm_firmware_put(data); + nvkm_firmware_put(inst); + nvkm_firmware_put(bl); + return ret; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/priv.h new file mode 100644 index 000000000000..d8ba72806d39 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/priv.h @@ -0,0 +1,151 @@ +#ifndef __NVKM_ACR_PRIV_H__ +#define __NVKM_ACR_PRIV_H__ +#include <subdev/acr.h> +struct lsb_header_tail; + +struct nvkm_acr_fwif { + int version; + int (*load)(struct nvkm_acr *, int version, + const struct nvkm_acr_fwif *); + const struct nvkm_acr_func *func; +}; + +int gm20b_acr_load(struct nvkm_acr *, int, const struct nvkm_acr_fwif *); +int gp102_acr_load(struct nvkm_acr *, int, const struct nvkm_acr_fwif *); + +struct nvkm_acr_lsf; +struct nvkm_acr_func { + const struct nvkm_acr_hsf_fwif *load; + const struct nvkm_acr_hsf_fwif *ahesasc; + const struct nvkm_acr_hsf_fwif *asb; + const struct nvkm_acr_hsf_fwif *unload; + int (*wpr_parse)(struct nvkm_acr *); + u32 (*wpr_layout)(struct nvkm_acr *); + int (*wpr_alloc)(struct nvkm_acr *, u32 wpr_size); + int (*wpr_build)(struct nvkm_acr *, struct nvkm_acr_lsf *rtos); + void (*wpr_patch)(struct nvkm_acr *, s64 adjust); + void (*wpr_check)(struct nvkm_acr *, u64 *start, u64 *limit); + int (*init)(struct nvkm_acr *); + void (*fini)(struct nvkm_acr *); +}; + +int gm200_acr_wpr_parse(struct nvkm_acr *); +u32 gm200_acr_wpr_layout(struct nvkm_acr *); +int gm200_acr_wpr_build(struct nvkm_acr *, struct nvkm_acr_lsf *); +void gm200_acr_wpr_patch(struct nvkm_acr *, s64); +void gm200_acr_wpr_check(struct nvkm_acr *, u64 *, u64 *); +void gm200_acr_wpr_build_lsb_tail(struct nvkm_acr_lsfw *, + struct lsb_header_tail *); +int gm200_acr_init(struct nvkm_acr *); + +int gm20b_acr_wpr_alloc(struct nvkm_acr *, u32 wpr_size); + +int gp102_acr_wpr_parse(struct nvkm_acr *); +u32 gp102_acr_wpr_layout(struct nvkm_acr *); +int gp102_acr_wpr_alloc(struct nvkm_acr *, u32 wpr_size); +int gp102_acr_wpr_build(struct nvkm_acr *, struct nvkm_acr_lsf *); +int gp102_acr_wpr_build_lsb(struct nvkm_acr *, struct nvkm_acr_lsfw *); +void gp102_acr_wpr_patch(struct nvkm_acr *, s64); + +struct nvkm_acr_hsfw { + const struct nvkm_acr_hsf_func *func; + const char *name; + struct list_head head; + + u32 imem_size; + u32 imem_tag; + u32 *imem; + + u8 *image; + u32 image_size; + u32 non_sec_addr; + u32 non_sec_size; + u32 sec_addr; + u32 sec_size; + u32 data_addr; + u32 data_size; + + struct { + struct { + void *data; + u32 size; + } prod, dbg; + u32 patch_loc; + } sig; +}; + +struct nvkm_acr_hsf_fwif { + int version; + int (*load)(struct nvkm_acr *, const char *bl, const char *fw, + const char *name, int version, + const struct nvkm_acr_hsf_fwif *); + const struct nvkm_acr_hsf_func *func; +}; + +int nvkm_acr_hsfw_load(struct nvkm_acr *, const char *, const char *, + const char *, int, const struct nvkm_acr_hsf_fwif *); +void nvkm_acr_hsfw_del_all(struct nvkm_acr *); + +struct nvkm_acr_hsf { + const struct nvkm_acr_hsf_func *func; + const char *name; + struct list_head head; + + u32 imem_size; + u32 imem_tag; + u32 *imem; + + u32 non_sec_addr; + u32 non_sec_size; + u32 sec_addr; + u32 sec_size; + u32 data_addr; + u32 data_size; + + struct nvkm_memory *ucode; + struct nvkm_vma *vma; + struct nvkm_falcon *falcon; +}; + +struct nvkm_acr_hsf_func { + int (*load)(struct nvkm_acr *, struct nvkm_acr_hsfw *); + int (*boot)(struct nvkm_acr *, struct nvkm_acr_hsf *); + void (*bld)(struct nvkm_acr *, struct nvkm_acr_hsf *); +}; + +int gm200_acr_hsfw_load(struct nvkm_acr *, struct nvkm_acr_hsfw *, + struct nvkm_falcon *); +int gm200_acr_hsfw_boot(struct nvkm_acr *, struct nvkm_acr_hsf *, + u32 clear_intr, u32 mbox0_ok); + +int gm200_acr_load_boot(struct nvkm_acr *, struct nvkm_acr_hsf *); + +extern const struct nvkm_acr_hsf_func gm200_acr_unload_0; +int gm200_acr_unload_load(struct nvkm_acr *, struct nvkm_acr_hsfw *); +int gm200_acr_unload_boot(struct nvkm_acr *, struct nvkm_acr_hsf *); +void gm200_acr_hsfw_bld(struct nvkm_acr *, struct nvkm_acr_hsf *); + +extern const struct nvkm_acr_hsf_func gm20b_acr_load_0; + +int gp102_acr_load_load(struct nvkm_acr *, struct nvkm_acr_hsfw *); + +extern const struct nvkm_acr_hsf_func gp108_acr_unload_0; +void gp108_acr_hsfw_bld(struct nvkm_acr *, struct nvkm_acr_hsf *); + +int nvkm_acr_new_(const struct nvkm_acr_fwif *, struct nvkm_device *, int, + struct nvkm_acr **); +int nvkm_acr_hsf_boot(struct nvkm_acr *, const char *name); + +struct nvkm_acr_lsf { + const struct nvkm_acr_lsf_func *func; + struct nvkm_falcon *falcon; + enum nvkm_acr_lsf_id id; + struct list_head head; +}; + +struct nvkm_acr_lsfw *nvkm_acr_lsfw_add(const struct nvkm_acr_lsf_func *, + struct nvkm_acr *, struct nvkm_falcon *, + enum nvkm_acr_lsf_id); +void nvkm_acr_lsfw_del(struct nvkm_acr_lsfw *); +void nvkm_acr_lsfw_del_all(struct nvkm_acr *); +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c new file mode 100644 index 000000000000..7f4b89d82d32 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c @@ -0,0 +1,215 @@ +/* + * Copyright 2019 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" + +#include <core/firmware.h> +#include <core/memory.h> +#include <subdev/gsp.h> +#include <subdev/pmu.h> +#include <engine/sec2.h> + +#include <nvfw/acr.h> + +static int +tu102_acr_init(struct nvkm_acr *acr) +{ + int ret = nvkm_acr_hsf_boot(acr, "AHESASC"); + if (ret) + return ret; + + return nvkm_acr_hsf_boot(acr, "ASB"); +} + +static int +tu102_acr_wpr_build(struct nvkm_acr *acr, struct nvkm_acr_lsf *rtos) +{ + struct nvkm_acr_lsfw *lsfw; + u32 offset = 0; + int ret; + + /*XXX: shared sub-WPR headers, fill terminator for now. */ + nvkm_wo32(acr->wpr, 0x200, 0xffffffff); + + /* Fill per-LSF structures. */ + list_for_each_entry(lsfw, &acr->lsfw, head) { + struct lsf_signature_v1 *sig = (void *)lsfw->sig->data; + struct wpr_header_v1 hdr = { + .falcon_id = lsfw->id, + .lsb_offset = lsfw->offset.lsb, + .bootstrap_owner = NVKM_ACR_LSF_GSPLITE, + .lazy_bootstrap = 1, + .bin_version = sig->version, + .status = WPR_HEADER_V1_STATUS_COPY, + }; + + /* Write WPR header. */ + nvkm_wobj(acr->wpr, offset, &hdr, sizeof(hdr)); + offset += sizeof(hdr); + + /* Write LSB header. */ + ret = gp102_acr_wpr_build_lsb(acr, lsfw); + if (ret) + return ret; + + /* Write ucode image. */ + nvkm_wobj(acr->wpr, lsfw->offset.img, + lsfw->img.data, + lsfw->img.size); + + /* Write bootloader data. */ + lsfw->func->bld_write(acr, lsfw->offset.bld, lsfw); + } + + /* Finalise WPR. */ + nvkm_wo32(acr->wpr, offset, WPR_HEADER_V1_FALCON_ID_INVALID); + return 0; +} + +static int +tu102_acr_hsfw_boot(struct nvkm_acr *acr, struct nvkm_acr_hsf *hsf) +{ + return gm200_acr_hsfw_boot(acr, hsf, 0, 0); +} + +static int +tu102_acr_hsfw_nofw(struct nvkm_acr *acr, const char *bl, const char *fw, + const char *name, int version, + const struct nvkm_acr_hsf_fwif *fwif) +{ + return 0; +} + +MODULE_FIRMWARE("nvidia/tu102/acr/unload_bl.bin"); +MODULE_FIRMWARE("nvidia/tu102/acr/ucode_unload.bin"); + +MODULE_FIRMWARE("nvidia/tu104/acr/unload_bl.bin"); +MODULE_FIRMWARE("nvidia/tu104/acr/ucode_unload.bin"); + +MODULE_FIRMWARE("nvidia/tu106/acr/unload_bl.bin"); +MODULE_FIRMWARE("nvidia/tu106/acr/ucode_unload.bin"); + +static const struct nvkm_acr_hsf_fwif +tu102_acr_unload_fwif[] = { + { 0, nvkm_acr_hsfw_load, &gp108_acr_unload_0 }, + { -1, tu102_acr_hsfw_nofw }, + {} +}; + +static int +tu102_acr_asb_load(struct nvkm_acr *acr, struct nvkm_acr_hsfw *hsfw) +{ + return gm200_acr_hsfw_load(acr, hsfw, &acr->subdev.device->gsp->falcon); +} + +static const struct nvkm_acr_hsf_func +tu102_acr_asb_0 = { + .load = tu102_acr_asb_load, + .boot = tu102_acr_hsfw_boot, + .bld = gp108_acr_hsfw_bld, +}; + +MODULE_FIRMWARE("nvidia/tu102/acr/ucode_asb.bin"); +MODULE_FIRMWARE("nvidia/tu104/acr/ucode_asb.bin"); +MODULE_FIRMWARE("nvidia/tu106/acr/ucode_asb.bin"); + +static const struct nvkm_acr_hsf_fwif +tu102_acr_asb_fwif[] = { + { 0, nvkm_acr_hsfw_load, &tu102_acr_asb_0 }, + { -1, tu102_acr_hsfw_nofw }, + {} +}; + +static const struct nvkm_acr_hsf_func +tu102_acr_ahesasc_0 = { + .load = gp102_acr_load_load, + .boot = tu102_acr_hsfw_boot, + .bld = gp108_acr_hsfw_bld, +}; + +MODULE_FIRMWARE("nvidia/tu102/acr/bl.bin"); +MODULE_FIRMWARE("nvidia/tu102/acr/ucode_ahesasc.bin"); + +MODULE_FIRMWARE("nvidia/tu104/acr/bl.bin"); +MODULE_FIRMWARE("nvidia/tu104/acr/ucode_ahesasc.bin"); + +MODULE_FIRMWARE("nvidia/tu106/acr/bl.bin"); +MODULE_FIRMWARE("nvidia/tu106/acr/ucode_ahesasc.bin"); + +static const struct nvkm_acr_hsf_fwif +tu102_acr_ahesasc_fwif[] = { + { 0, nvkm_acr_hsfw_load, &tu102_acr_ahesasc_0 }, + { -1, tu102_acr_hsfw_nofw }, + {} +}; + +static const struct nvkm_acr_func +tu102_acr = { + .ahesasc = tu102_acr_ahesasc_fwif, + .asb = tu102_acr_asb_fwif, + .unload = tu102_acr_unload_fwif, + .wpr_parse = gp102_acr_wpr_parse, + .wpr_layout = gp102_acr_wpr_layout, + .wpr_alloc = gp102_acr_wpr_alloc, + .wpr_patch = gp102_acr_wpr_patch, + .wpr_build = tu102_acr_wpr_build, + .wpr_check = gm200_acr_wpr_check, + .init = tu102_acr_init, +}; + +static int +tu102_acr_load(struct nvkm_acr *acr, int version, + const struct nvkm_acr_fwif *fwif) +{ + struct nvkm_subdev *subdev = &acr->subdev; + const struct nvkm_acr_hsf_fwif *hsfwif; + + hsfwif = nvkm_firmware_load(subdev, fwif->func->ahesasc, "AcrAHESASC", + acr, "acr/bl", "acr/ucode_ahesasc", + "AHESASC"); + if (IS_ERR(hsfwif)) + return PTR_ERR(hsfwif); + + hsfwif = nvkm_firmware_load(subdev, fwif->func->asb, "AcrASB", + acr, "acr/bl", "acr/ucode_asb", "ASB"); + if (IS_ERR(hsfwif)) + return PTR_ERR(hsfwif); + + hsfwif = nvkm_firmware_load(subdev, fwif->func->unload, "AcrUnload", + acr, "acr/unload_bl", "acr/ucode_unload", + "unload"); + if (IS_ERR(hsfwif)) + return PTR_ERR(hsfwif); + + return 0; +} + +static const struct nvkm_acr_fwif +tu102_acr_fwif[] = { + { 0, tu102_acr_load, &tu102_acr }, + {} +}; + +int +tu102_acr_new(struct nvkm_device *device, int index, struct nvkm_acr **pacr) +{ + return nvkm_acr_new_(tu102_acr_fwif, device, index, pacr); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild index 53b9d638f2c8..d65ec719f153 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild @@ -2,5 +2,6 @@ nvkm-y += nvkm/subdev/fault/base.o nvkm-y += nvkm/subdev/fault/user.o nvkm-y += nvkm/subdev/fault/gp100.o +nvkm-y += nvkm/subdev/fault/gp10b.o nvkm-y += nvkm/subdev/fault/gv100.o nvkm-y += nvkm/subdev/fault/tu102.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c index ca251560d3e0..f6dca97140d6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c @@ -108,7 +108,7 @@ nvkm_fault_oneinit_buffer(struct nvkm_fault *fault, int id) return ret; /* Pin fault buffer in BAR2. */ - buffer->addr = nvkm_memory_bar2(buffer->mem); + buffer->addr = fault->func->buffer.pin(buffer); if (buffer->addr == ~0ULL) return -EFAULT; @@ -146,6 +146,7 @@ nvkm_fault_dtor(struct nvkm_subdev *subdev) struct nvkm_fault *fault = nvkm_fault(subdev); int i; + nvkm_notify_fini(&fault->nrpfb); nvkm_event_fini(&fault->event); for (i = 0; i < fault->buffer_nr; i++) { diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c index 4f3c4e091117..f6b189cc4330 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c @@ -21,25 +21,26 @@ */ #include "priv.h" +#include <core/memory.h> #include <subdev/mc.h> #include <nvif/class.h> -static void +void gp100_fault_buffer_intr(struct nvkm_fault_buffer *buffer, bool enable) { struct nvkm_device *device = buffer->fault->subdev.device; nvkm_mc_intr_mask(device, NVKM_SUBDEV_FAULT, enable); } -static void +void gp100_fault_buffer_fini(struct nvkm_fault_buffer *buffer) { struct nvkm_device *device = buffer->fault->subdev.device; nvkm_mask(device, 0x002a70, 0x00000001, 0x00000000); } -static void +void gp100_fault_buffer_init(struct nvkm_fault_buffer *buffer) { struct nvkm_device *device = buffer->fault->subdev.device; @@ -48,7 +49,12 @@ gp100_fault_buffer_init(struct nvkm_fault_buffer *buffer) nvkm_mask(device, 0x002a70, 0x00000001, 0x00000001); } -static void +u64 gp100_fault_buffer_pin(struct nvkm_fault_buffer *buffer) +{ + return nvkm_memory_bar2(buffer->mem); +} + +void gp100_fault_buffer_info(struct nvkm_fault_buffer *buffer) { buffer->entries = nvkm_rd32(buffer->fault->subdev.device, 0x002a78); @@ -56,7 +62,7 @@ gp100_fault_buffer_info(struct nvkm_fault_buffer *buffer) buffer->put = 0x002a80; } -static void +void gp100_fault_intr(struct nvkm_fault *fault) { nvkm_event_send(&fault->event, 1, 0, NULL, 0); @@ -68,6 +74,7 @@ gp100_fault = { .buffer.nr = 1, .buffer.entry_size = 32, .buffer.info = gp100_fault_buffer_info, + .buffer.pin = gp100_fault_buffer_pin, .buffer.init = gp100_fault_buffer_init, .buffer.fini = gp100_fault_buffer_fini, .buffer.intr = gp100_fault_buffer_intr, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp10b.c new file mode 100644 index 000000000000..9e66d1f7654d --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp10b.c @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2019 NVIDIA Corporation. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include "priv.h" + +#include <core/memory.h> + +#include <nvif/class.h> + +u64 +gp10b_fault_buffer_pin(struct nvkm_fault_buffer *buffer) +{ + return nvkm_memory_addr(buffer->mem); +} + +static const struct nvkm_fault_func +gp10b_fault = { + .intr = gp100_fault_intr, + .buffer.nr = 1, + .buffer.entry_size = 32, + .buffer.info = gp100_fault_buffer_info, + .buffer.pin = gp10b_fault_buffer_pin, + .buffer.init = gp100_fault_buffer_init, + .buffer.fini = gp100_fault_buffer_fini, + .buffer.intr = gp100_fault_buffer_intr, + .user = { { 0, 0, MAXWELL_FAULT_BUFFER_A }, 0 }, +}; + +int +gp10b_fault_new(struct nvkm_device *device, int index, + struct nvkm_fault **pfault) +{ + return nvkm_fault_new_(&gp10b_fault, device, index, pfault); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c index 6747f09c2dc3..2707be4ffabc 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c @@ -214,6 +214,7 @@ gv100_fault = { .buffer.nr = 2, .buffer.entry_size = 32, .buffer.info = gv100_fault_buffer_info, + .buffer.pin = gp100_fault_buffer_pin, .buffer.init = gv100_fault_buffer_init, .buffer.fini = gv100_fault_buffer_fini, .buffer.intr = gv100_fault_buffer_intr, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h index 975e66ac6344..f6f1dd7eee1f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h @@ -30,6 +30,7 @@ struct nvkm_fault_func { int nr; u32 entry_size; void (*info)(struct nvkm_fault_buffer *); + u64 (*pin)(struct nvkm_fault_buffer *); void (*init)(struct nvkm_fault_buffer *); void (*fini)(struct nvkm_fault_buffer *); void (*intr)(struct nvkm_fault_buffer *, bool enable); @@ -40,6 +41,15 @@ struct nvkm_fault_func { } user; }; +void gp100_fault_buffer_intr(struct nvkm_fault_buffer *, bool enable); +void gp100_fault_buffer_fini(struct nvkm_fault_buffer *); +void gp100_fault_buffer_init(struct nvkm_fault_buffer *); +u64 gp100_fault_buffer_pin(struct nvkm_fault_buffer *); +void gp100_fault_buffer_info(struct nvkm_fault_buffer *); +void gp100_fault_intr(struct nvkm_fault *); + +u64 gp10b_fault_buffer_pin(struct nvkm_fault_buffer *); + int gv100_fault_oneinit(struct nvkm_fault *); int nvkm_ufault_new(struct nvkm_device *, const struct nvkm_oclass *, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c index fa1dfe5692b0..45a6a68b9f48 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c @@ -154,6 +154,7 @@ tu102_fault = { .buffer.nr = 2, .buffer.entry_size = 32, .buffer.info = tu102_fault_buffer_info, + .buffer.pin = gp100_fault_buffer_pin, .buffer.init = tu102_fault_buffer_init, .buffer.fini = tu102_fault_buffer_fini, .buffer.intr = tu102_fault_buffer_intr, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c index b2bb5a3ccb02..d09db7c6b7ee 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c @@ -154,6 +154,23 @@ nvkm_fb_init(struct nvkm_subdev *subdev) if (fb->func->init_unkn) fb->func->init_unkn(fb); + + if (fb->func->vpr.scrub_required && + fb->func->vpr.scrub_required(fb)) { + nvkm_debug(subdev, "VPR locked, running scrubber binary\n"); + + ret = fb->func->vpr.scrub(fb); + if (ret) + return ret; + + if (fb->func->vpr.scrub_required(fb)) { + nvkm_error(subdev, "VPR still locked after scrub!\n"); + return -EIO; + } + + nvkm_debug(subdev, "VPR scrubber binary successful\n"); + } + return 0; } @@ -172,6 +189,8 @@ nvkm_fb_dtor(struct nvkm_subdev *subdev) nvkm_mm_fini(&fb->tags); nvkm_ram_del(&fb->ram); + nvkm_blob_dtor(&fb->vpr_scrubber); + if (fb->func->dtor) return fb->func->dtor(fb); return fb; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c index b4d74e815674..9be7316c6642 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c @@ -24,7 +24,81 @@ #include "gf100.h" #include "ram.h" +#include <core/firmware.h> #include <core/memory.h> +#include <nvfw/fw.h> +#include <nvfw/hs.h> +#include <engine/nvdec.h> + +int +gp102_fb_vpr_scrub(struct nvkm_fb *fb) +{ + struct nvkm_subdev *subdev = &fb->subdev; + struct nvkm_device *device = subdev->device; + struct nvkm_falcon *falcon = &device->nvdec[0]->falcon; + struct nvkm_blob *blob = &fb->vpr_scrubber; + const struct nvfw_bin_hdr *hsbin_hdr; + const struct nvfw_hs_header *fw_hdr; + const struct nvfw_hs_load_header *lhdr; + void *scrub_data; + u32 patch_loc, patch_sig; + int ret; + + nvkm_falcon_get(falcon, subdev); + + hsbin_hdr = nvfw_bin_hdr(subdev, blob->data); + fw_hdr = nvfw_hs_header(subdev, blob->data + hsbin_hdr->header_offset); + lhdr = nvfw_hs_load_header(subdev, blob->data + fw_hdr->hdr_offset); + scrub_data = blob->data + hsbin_hdr->data_offset; + + patch_loc = *(u32 *)(blob->data + fw_hdr->patch_loc); + patch_sig = *(u32 *)(blob->data + fw_hdr->patch_sig); + if (falcon->debug) { + memcpy(scrub_data + patch_loc, + blob->data + fw_hdr->sig_dbg_offset + patch_sig, + fw_hdr->sig_dbg_size); + } else { + memcpy(scrub_data + patch_loc, + blob->data + fw_hdr->sig_prod_offset + patch_sig, + fw_hdr->sig_prod_size); + } + + nvkm_falcon_reset(falcon); + nvkm_falcon_bind_context(falcon, NULL); + + nvkm_falcon_load_imem(falcon, scrub_data, lhdr->non_sec_code_off, + lhdr->non_sec_code_size, + lhdr->non_sec_code_off >> 8, 0, false); + nvkm_falcon_load_imem(falcon, scrub_data + lhdr->apps[0], + ALIGN(lhdr->apps[0], 0x100), + lhdr->apps[1], + lhdr->apps[0] >> 8, 0, true); + nvkm_falcon_load_dmem(falcon, scrub_data + lhdr->data_dma_base, 0, + lhdr->data_size, 0); + + nvkm_falcon_set_start_addr(falcon, 0x0); + nvkm_falcon_start(falcon); + + ret = nvkm_falcon_wait_for_halt(falcon, 500); + if (ret < 0) { + ret = -ETIMEDOUT; + goto end; + } + + /* put nvdec in clean state - without reset it will remain in HS mode */ + nvkm_falcon_reset(falcon); +end: + nvkm_falcon_put(falcon, subdev); + return ret; +} + +bool +gp102_fb_vpr_scrub_required(struct nvkm_fb *fb) +{ + struct nvkm_device *device = fb->subdev.device; + nvkm_wr32(device, 0x100cd0, 0x2); + return (nvkm_rd32(device, 0x100cd0) & 0x00000010) != 0; +} static const struct nvkm_fb_func gp102_fb = { @@ -33,11 +107,31 @@ gp102_fb = { .init = gp100_fb_init, .init_remapper = gp100_fb_init_remapper, .init_page = gm200_fb_init_page, + .vpr.scrub_required = gp102_fb_vpr_scrub_required, + .vpr.scrub = gp102_fb_vpr_scrub, .ram_new = gp100_ram_new, }; int +gp102_fb_new_(const struct nvkm_fb_func *func, struct nvkm_device *device, + int index, struct nvkm_fb **pfb) +{ + int ret = gf100_fb_new_(func, device, index, pfb); + if (ret) + return ret; + + return nvkm_firmware_load_blob(&(*pfb)->subdev, "nvdec/scrubber", "", 0, + &(*pfb)->vpr_scrubber); +} + +int gp102_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb) { - return gf100_fb_new_(&gp102_fb, device, index, pfb); + return gp102_fb_new_(&gp102_fb, device, index, pfb); } + +MODULE_FIRMWARE("nvidia/gp102/nvdec/scrubber.bin"); +MODULE_FIRMWARE("nvidia/gp104/nvdec/scrubber.bin"); +MODULE_FIRMWARE("nvidia/gp106/nvdec/scrubber.bin"); +MODULE_FIRMWARE("nvidia/gp107/nvdec/scrubber.bin"); +MODULE_FIRMWARE("nvidia/gp108/nvdec/scrubber.bin"); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c index 3c5e02e9794a..389bad312bf2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c @@ -35,6 +35,8 @@ gv100_fb = { .init = gp100_fb_init, .init_page = gv100_fb_init_page, .init_unkn = gp100_fb_init_unkn, + .vpr.scrub_required = gp102_fb_vpr_scrub_required, + .vpr.scrub = gp102_fb_vpr_scrub, .ram_new = gp100_ram_new, .default_bigpage = 16, }; @@ -42,5 +44,10 @@ gv100_fb = { int gv100_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb) { - return gf100_fb_new_(&gv100_fb, device, index, pfb); + return gp102_fb_new_(&gv100_fb, device, index, pfb); } + +MODULE_FIRMWARE("nvidia/gv100/nvdec/scrubber.bin"); +MODULE_FIRMWARE("nvidia/tu102/nvdec/scrubber.bin"); +MODULE_FIRMWARE("nvidia/tu104/nvdec/scrubber.bin"); +MODULE_FIRMWARE("nvidia/tu106/nvdec/scrubber.bin"); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h index c4e9f55af283..5be9c563350d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h @@ -17,6 +17,11 @@ struct nvkm_fb_func { void (*intr)(struct nvkm_fb *); struct { + bool (*scrub_required)(struct nvkm_fb *); + int (*scrub)(struct nvkm_fb *); + } vpr; + + struct { int regions; void (*init)(struct nvkm_fb *, int i, u32 addr, u32 size, u32 pitch, u32 flags, struct nvkm_fb_tile *); @@ -72,4 +77,9 @@ int gm200_fb_init_page(struct nvkm_fb *); void gp100_fb_init_remapper(struct nvkm_fb *); void gp100_fb_init_unkn(struct nvkm_fb *); + +int gp102_fb_new_(const struct nvkm_fb_func *, struct nvkm_device *, int, + struct nvkm_fb **); +bool gp102_fb_vpr_scrub_required(struct nvkm_fb *); +int gp102_fb_vpr_scrub(struct nvkm_fb *); #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c index ac87a3b6b7c9..ba43fe158b22 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c @@ -655,7 +655,7 @@ gf100_ram_new_(const struct nvkm_ram_func *func, static const struct nvkm_ram_func gf100_ram = { - .upper = 0x0200000000, + .upper = 0x0200000000ULL, .probe_fbp = gf100_ram_probe_fbp, .probe_fbp_amount = gf100_ram_probe_fbp_amount, .probe_fbpa_amount = gf100_ram_probe_fbpa_amount, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf108.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf108.c index 70a06e3cd55a..d97fa43efb91 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf108.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf108.c @@ -43,7 +43,7 @@ gf108_ram_probe_fbp_amount(const struct nvkm_ram_func *func, u32 fbpao, static const struct nvkm_ram_func gf108_ram = { - .upper = 0x0200000000, + .upper = 0x0200000000ULL, .probe_fbp = gf100_ram_probe_fbp, .probe_fbp_amount = gf108_ram_probe_fbp_amount, .probe_fbpa_amount = gf100_ram_probe_fbpa_amount, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c index 456aed1f2a02..d350d92852d2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c @@ -1698,7 +1698,7 @@ gk104_ram_new_(const struct nvkm_ram_func *func, struct nvkm_fb *fb, static const struct nvkm_ram_func gk104_ram = { - .upper = 0x0200000000, + .upper = 0x0200000000ULL, .probe_fbp = gf100_ram_probe_fbp, .probe_fbp_amount = gf108_ram_probe_fbp_amount, .probe_fbpa_amount = gf100_ram_probe_fbpa_amount, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c index 27c68e3f9772..be91da854dca 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c @@ -33,7 +33,7 @@ gm107_ram_probe_fbp(const struct nvkm_ram_func *func, static const struct nvkm_ram_func gm107_ram = { - .upper = 0x1000000000, + .upper = 0x1000000000ULL, .probe_fbp = gm107_ram_probe_fbp, .probe_fbp_amount = gf108_ram_probe_fbp_amount, .probe_fbpa_amount = gf100_ram_probe_fbpa_amount, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm200.c index 6b0cac1fe7b4..8f91ea91ee25 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm200.c @@ -48,7 +48,7 @@ gm200_ram_probe_fbp_amount(const struct nvkm_ram_func *func, u32 fbpao, static const struct nvkm_ram_func gm200_ram = { - .upper = 0x1000000000, + .upper = 0x1000000000ULL, .probe_fbp = gm107_ram_probe_fbp, .probe_fbp_amount = gm200_ram_probe_fbp_amount, .probe_fbpa_amount = gf100_ram_probe_fbpa_amount, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c index adb62a6beb63..378f6fb70990 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c @@ -79,7 +79,7 @@ gp100_ram_probe_fbpa(struct nvkm_device *device, int fbpa) static const struct nvkm_ram_func gp100_ram = { - .upper = 0x1000000000, + .upper = 0x1000000000ULL, .probe_fbp = gm107_ram_probe_fbp, .probe_fbp_amount = gm200_ram_probe_fbp_amount, .probe_fbpa_amount = gp100_ram_probe_fbpa, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild index e7c4f068936e..67cc3b320169 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild @@ -1,2 +1,3 @@ # SPDX-License-Identifier: MIT +nvkm-y += nvkm/subdev/gsp/base.o nvkm-y += nvkm/subdev/gsp/gv100.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c new file mode 100644 index 000000000000..5a32df0f9992 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c @@ -0,0 +1,59 @@ +/* + * Copyright 2019 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" +#include <core/falcon.h> +#include <core/firmware.h> +#include <subdev/acr.h> +#include <subdev/top.h> + +static void * +nvkm_gsp_dtor(struct nvkm_subdev *subdev) +{ + struct nvkm_gsp *gsp = nvkm_gsp(subdev); + nvkm_falcon_dtor(&gsp->falcon); + return gsp; +} + +static const struct nvkm_subdev_func +nvkm_gsp = { + .dtor = nvkm_gsp_dtor, +}; + +int +nvkm_gsp_new_(const struct nvkm_gsp_fwif *fwif, struct nvkm_device *device, + int index, struct nvkm_gsp **pgsp) +{ + struct nvkm_gsp *gsp; + + if (!(gsp = *pgsp = kzalloc(sizeof(*gsp), GFP_KERNEL))) + return -ENOMEM; + + nvkm_subdev_ctor(&nvkm_gsp, device, index, &gsp->subdev); + + fwif = nvkm_firmware_load(&gsp->subdev, fwif, "Gsp", gsp); + if (IS_ERR(fwif)) + return PTR_ERR(fwif); + + return nvkm_falcon_ctor(fwif->flcn, &gsp->subdev, + nvkm_subdev_name[gsp->subdev.index], 0, + &gsp->falcon); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c index dccfaf1162e2..2114f9b00a28 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c @@ -19,44 +19,37 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ -#include <subdev/gsp.h> -#include <subdev/top.h> -#include <engine/falcon.h> +#include "priv.h" + +static const struct nvkm_falcon_func +gv100_gsp_flcn = { + .fbif = 0x600, + .load_imem = nvkm_falcon_v1_load_imem, + .load_dmem = nvkm_falcon_v1_load_dmem, + .read_dmem = nvkm_falcon_v1_read_dmem, + .bind_context = gp102_sec2_flcn_bind_context, + .wait_for_halt = nvkm_falcon_v1_wait_for_halt, + .clear_interrupt = nvkm_falcon_v1_clear_interrupt, + .set_start_addr = nvkm_falcon_v1_set_start_addr, + .start = nvkm_falcon_v1_start, + .enable = gp102_sec2_flcn_enable, + .disable = nvkm_falcon_v1_disable, +}; static int -gv100_gsp_oneinit(struct nvkm_subdev *subdev) -{ - struct nvkm_gsp *gsp = nvkm_gsp(subdev); - - gsp->addr = nvkm_top_addr(subdev->device, subdev->index); - if (!gsp->addr) - return -EINVAL; - - return nvkm_falcon_v1_new(subdev, "GSP", gsp->addr, &gsp->falcon); -} - -static void * -gv100_gsp_dtor(struct nvkm_subdev *subdev) +gv100_gsp_nofw(struct nvkm_gsp *gsp, int ver, const struct nvkm_gsp_fwif *fwif) { - struct nvkm_gsp *gsp = nvkm_gsp(subdev); - nvkm_falcon_del(&gsp->falcon); - return gsp; + return 0; } -static const struct nvkm_subdev_func -gv100_gsp = { - .dtor = gv100_gsp_dtor, - .oneinit = gv100_gsp_oneinit, +struct nvkm_gsp_fwif +gv100_gsp[] = { + { -1, gv100_gsp_nofw, &gv100_gsp_flcn }, + {} }; int gv100_gsp_new(struct nvkm_device *device, int index, struct nvkm_gsp **pgsp) { - struct nvkm_gsp *gsp; - - if (!(gsp = *pgsp = kzalloc(sizeof(*gsp), GFP_KERNEL))) - return -ENOMEM; - - nvkm_subdev_ctor(&gv100_gsp, device, index, &gsp->subdev); - return 0; + return nvkm_gsp_new_(gv100_gsp, device, index, pgsp); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h new file mode 100644 index 000000000000..92820fb997c1 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: MIT */ +#ifndef __NVKM_GSP_PRIV_H__ +#define __NVKM_GSP_PRIV_H__ +#include <subdev/gsp.h> +enum nvkm_acr_lsf_id; + +struct nvkm_gsp_fwif { + int version; + int (*load)(struct nvkm_gsp *, int ver, const struct nvkm_gsp_fwif *); + const struct nvkm_falcon_func *flcn; +}; + +int nvkm_gsp_new_(const struct nvkm_gsp_fwif *, struct nvkm_device *, int, + struct nvkm_gsp **); +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild index 2b6d36ea7067..728d75010847 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild @@ -6,3 +6,4 @@ nvkm-y += nvkm/subdev/ltc/gm107.o nvkm-y += nvkm/subdev/ltc/gm200.o nvkm-y += nvkm/subdev/ltc/gp100.o nvkm-y += nvkm/subdev/ltc/gp102.o +nvkm-y += nvkm/subdev/ltc/gp10b.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp10b.c new file mode 100644 index 000000000000..c0063c7caa50 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp10b.c @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2019 NVIDIA Corporation. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Thierry Reding + */ + +#include "priv.h" + +static void +gp10b_ltc_init(struct nvkm_ltc *ltc) +{ + struct nvkm_device *device = ltc->subdev.device; + struct iommu_fwspec *spec; + + nvkm_wr32(device, 0x17e27c, ltc->ltc_nr); + nvkm_wr32(device, 0x17e000, ltc->ltc_nr); + nvkm_wr32(device, 0x100800, ltc->ltc_nr); + + spec = dev_iommu_fwspec_get(device->dev); + if (spec) { + u32 sid = spec->ids[0] & 0xffff; + + /* stream ID */ + nvkm_wr32(device, 0x160000, sid << 2); + } +} + +static const struct nvkm_ltc_func +gp10b_ltc = { + .oneinit = gp100_ltc_oneinit, + .init = gp10b_ltc_init, + .intr = gp100_ltc_intr, + .cbc_clear = gm107_ltc_cbc_clear, + .cbc_wait = gm107_ltc_cbc_wait, + .zbc = 16, + .zbc_clear_color = gm107_ltc_zbc_clear_color, + .zbc_clear_depth = gm107_ltc_zbc_clear_depth, + .zbc_clear_stencil = gp102_ltc_zbc_clear_stencil, + .invalidate = gf100_ltc_invalidate, + .flush = gf100_ltc_flush, +}; + +int +gp10b_ltc_new(struct nvkm_device *device, int index, struct nvkm_ltc **pltc) +{ + return nvkm_ltc_new_(&gp10b_ltc, device, index, pltc); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h index 2fcf18e46ce3..eca5a711b1b8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h @@ -46,4 +46,6 @@ void gm107_ltc_zbc_clear_depth(struct nvkm_ltc *, int, const u32); int gp100_ltc_oneinit(struct nvkm_ltc *); void gp100_ltc_init(struct nvkm_ltc *); void gp100_ltc_intr(struct nvkm_ltc *); + +void gp102_ltc_zbc_clear_stencil(struct nvkm_ltc *, int, const u32); #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c index 2d075246dc46..2cd5ec81c0d0 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c @@ -30,7 +30,7 @@ * The value 0xff represents an invalid storage type. */ const u8 * -gf100_mmu_kind(struct nvkm_mmu *mmu, int *count) +gf100_mmu_kind(struct nvkm_mmu *mmu, int *count, u8 *invalid) { static const u8 kind[256] = { @@ -69,6 +69,7 @@ gf100_mmu_kind(struct nvkm_mmu *mmu, int *count) }; *count = ARRAY_SIZE(kind); + *invalid = 0xff; return kind; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c index dbf644ebac97..83990c83f9f8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c @@ -27,7 +27,7 @@ #include <nvif/class.h> const u8 * -gm200_mmu_kind(struct nvkm_mmu *mmu, int *count) +gm200_mmu_kind(struct nvkm_mmu *mmu, int *count, u8 *invalid) { static const u8 kind[256] = { @@ -65,6 +65,7 @@ gm200_mmu_kind(struct nvkm_mmu *mmu, int *count) 0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xfd, 0xfe, 0xff }; *count = ARRAY_SIZE(kind); + *invalid = 0xff; return kind; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c index db3dfbbb2aa0..c0083ddda65a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c @@ -27,7 +27,7 @@ #include <nvif/class.h> const u8 * -nv50_mmu_kind(struct nvkm_mmu *base, int *count) +nv50_mmu_kind(struct nvkm_mmu *base, int *count, u8 *invalid) { /* 0x01: no bank swizzle * 0x02: bank swizzled @@ -57,6 +57,7 @@ nv50_mmu_kind(struct nvkm_mmu *base, int *count) 0x01, 0x01, 0x02, 0x02, 0x01, 0x01, 0x7f, 0x7f }; *count = ARRAY_SIZE(kind); + *invalid = 0x7f; return kind; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h index 07f2fcd18f3d..479b02344271 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h @@ -35,17 +35,17 @@ struct nvkm_mmu_func { u32 pd_offset; } vmm; - const u8 *(*kind)(struct nvkm_mmu *, int *count); + const u8 *(*kind)(struct nvkm_mmu *, int *count, u8 *invalid); bool kind_sys; }; extern const struct nvkm_mmu_func nv04_mmu; -const u8 *nv50_mmu_kind(struct nvkm_mmu *, int *count); +const u8 *nv50_mmu_kind(struct nvkm_mmu *, int *count, u8 *invalid); -const u8 *gf100_mmu_kind(struct nvkm_mmu *, int *count); +const u8 *gf100_mmu_kind(struct nvkm_mmu *, int *count, u8 *invalid); -const u8 *gm200_mmu_kind(struct nvkm_mmu *, int *); +const u8 *gm200_mmu_kind(struct nvkm_mmu *, int *, u8 *); struct nvkm_mmu_pt { union { diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c index c0db0ce10cba..b21e82eb0916 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c @@ -1,5 +1,6 @@ /* * Copyright 2018 Red Hat Inc. + * Copyright 2019 NVIDIA Corporation. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -26,13 +27,26 @@ #include <nvif/class.h> +const u8 * +tu102_mmu_kind(struct nvkm_mmu *mmu, int *count, u8 *invalid) +{ + static const u8 + kind[16] = { + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00 */ + 0x06, 0x06, 0x02, 0x01, 0x03, 0x04, 0x05, 0x07, + }; + *count = ARRAY_SIZE(kind); + *invalid = 0x07; + return kind; +} + static const struct nvkm_mmu_func tu102_mmu = { .dma_bits = 47, .mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}}, .mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map }, .vmm = {{ -1, 0, NVIF_CLASS_VMM_GP100}, tu102_vmm_new }, - .kind = gm200_mmu_kind, + .kind = tu102_mmu_kind, .kind_sys = true, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c index 353f10f92b77..0e4b8941da37 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c @@ -111,15 +111,17 @@ nvkm_ummu_kind(struct nvkm_ummu *ummu, void *argv, u32 argc) } *args = argv; const u8 *kind = NULL; int ret = -ENOSYS, count = 0; + u8 kind_inv = 0; if (mmu->func->kind) - kind = mmu->func->kind(mmu, &count); + kind = mmu->func->kind(mmu, &count, &kind_inv); if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, true))) { if (argc != args->v0.count * sizeof(*args->v0.data)) return -EINVAL; if (args->v0.count > count) return -EINVAL; + args->v0.kind_inv = kind_inv; memcpy(args->v0.data, kind, args->v0.count); } else return ret; @@ -157,9 +159,10 @@ nvkm_ummu_new(struct nvkm_device *device, const struct nvkm_oclass *oclass, struct nvkm_mmu *mmu = device->mmu; struct nvkm_ummu *ummu; int ret = -ENOSYS, kinds = 0; + u8 unused = 0; if (mmu->func->kind) - mmu->func->kind(mmu, &kinds); + mmu->func->kind(mmu, &kinds, &unused); if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) { args->v0.dmabits = mmu->dma_bits; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c index ab6424faf84c..6a2d9eb8e1ea 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c @@ -247,7 +247,7 @@ gf100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc, } *args = argv; struct nvkm_device *device = vmm->mmu->subdev.device; struct nvkm_memory *memory = map->memory; - u8 kind, priv, ro, vol; + u8 kind, kind_inv, priv, ro, vol; int kindn, aper, ret = -ENOSYS; const u8 *kindm; @@ -274,8 +274,8 @@ gf100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc, if (WARN_ON(aper < 0)) return aper; - kindm = vmm->mmu->func->kind(vmm->mmu, &kindn); - if (kind >= kindn || kindm[kind] == 0xff) { + kindm = vmm->mmu->func->kind(vmm->mmu, &kindn, &kind_inv); + if (kind >= kindn || kindm[kind] == kind_inv) { VMM_DEBUG(vmm, "kind %02x", kind); return -EINVAL; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c index b4f519768d5e..d86287565542 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c @@ -320,7 +320,7 @@ gp100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc, } *args = argv; struct nvkm_device *device = vmm->mmu->subdev.device; struct nvkm_memory *memory = map->memory; - u8 kind, priv, ro, vol; + u8 kind, kind_inv, priv, ro, vol; int kindn, aper, ret = -ENOSYS; const u8 *kindm; @@ -347,8 +347,8 @@ gp100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc, if (WARN_ON(aper < 0)) return aper; - kindm = vmm->mmu->func->kind(vmm->mmu, &kindn); - if (kind >= kindn || kindm[kind] == 0xff) { + kindm = vmm->mmu->func->kind(vmm->mmu, &kindn, &kind_inv); + if (kind >= kindn || kindm[kind] == kind_inv) { VMM_DEBUG(vmm, "kind %02x", kind); return -EINVAL; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c index c98afe3134ee..2d89e27e8e9e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c @@ -235,7 +235,7 @@ nv50_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc, struct nvkm_device *device = vmm->mmu->subdev.device; struct nvkm_ram *ram = device->fb->ram; struct nvkm_memory *memory = map->memory; - u8 aper, kind, comp, priv, ro; + u8 aper, kind, kind_inv, comp, priv, ro; int kindn, ret = -ENOSYS; const u8 *kindm; @@ -278,8 +278,8 @@ nv50_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc, return -EINVAL; } - kindm = vmm->mmu->func->kind(vmm->mmu, &kindn); - if (kind >= kindn || kindm[kind] == 0x7f) { + kindm = vmm->mmu->func->kind(vmm->mmu, &kindn, &kind_inv); + if (kind >= kindn || kindm[kind] == kind_inv) { VMM_DEBUG(vmm, "kind %02x", kind); return -EINVAL; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild index e37b6e45eaa2..a76c2a7bd696 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild @@ -12,3 +12,4 @@ nvkm-y += nvkm/subdev/pmu/gm107.o nvkm-y += nvkm/subdev/pmu/gm20b.o nvkm-y += nvkm/subdev/pmu/gp100.o nvkm-y += nvkm/subdev/pmu/gp102.o +nvkm-y += nvkm/subdev/pmu/gp10b.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c index ea2e11771bca..a0fe607c9c07 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c @@ -23,7 +23,7 @@ */ #include "priv.h" -#include <core/msgqueue.h> +#include <core/firmware.h> #include <subdev/timer.h> bool @@ -85,6 +85,12 @@ nvkm_pmu_fini(struct nvkm_subdev *subdev, bool suspend) pmu->func->fini(pmu); flush_work(&pmu->recv.work); + + reinit_completion(&pmu->wpr_ready); + + nvkm_falcon_cmdq_fini(pmu->lpq); + nvkm_falcon_cmdq_fini(pmu->hpq); + pmu->initmsg_received = false; return 0; } @@ -133,19 +139,15 @@ nvkm_pmu_init(struct nvkm_subdev *subdev) return ret; } -static int -nvkm_pmu_oneinit(struct nvkm_subdev *subdev) -{ - struct nvkm_pmu *pmu = nvkm_pmu(subdev); - return nvkm_falcon_v1_new(&pmu->subdev, "PMU", 0x10a000, &pmu->falcon); -} - static void * nvkm_pmu_dtor(struct nvkm_subdev *subdev) { struct nvkm_pmu *pmu = nvkm_pmu(subdev); - nvkm_msgqueue_del(&pmu->queue); - nvkm_falcon_del(&pmu->falcon); + nvkm_falcon_msgq_del(&pmu->msgq); + nvkm_falcon_cmdq_del(&pmu->lpq); + nvkm_falcon_cmdq_del(&pmu->hpq); + nvkm_falcon_qmgr_del(&pmu->qmgr); + nvkm_falcon_dtor(&pmu->falcon); return nvkm_pmu(subdev); } @@ -153,29 +155,50 @@ static const struct nvkm_subdev_func nvkm_pmu = { .dtor = nvkm_pmu_dtor, .preinit = nvkm_pmu_preinit, - .oneinit = nvkm_pmu_oneinit, .init = nvkm_pmu_init, .fini = nvkm_pmu_fini, .intr = nvkm_pmu_intr, }; int -nvkm_pmu_ctor(const struct nvkm_pmu_func *func, struct nvkm_device *device, +nvkm_pmu_ctor(const struct nvkm_pmu_fwif *fwif, struct nvkm_device *device, int index, struct nvkm_pmu *pmu) { + int ret; + nvkm_subdev_ctor(&nvkm_pmu, device, index, &pmu->subdev); - pmu->func = func; + INIT_WORK(&pmu->recv.work, nvkm_pmu_recv); init_waitqueue_head(&pmu->recv.wait); + + fwif = nvkm_firmware_load(&pmu->subdev, fwif, "Pmu", pmu); + if (IS_ERR(fwif)) + return PTR_ERR(fwif); + + pmu->func = fwif->func; + + ret = nvkm_falcon_ctor(pmu->func->flcn, &pmu->subdev, + nvkm_subdev_name[pmu->subdev.index], 0x10a000, + &pmu->falcon); + if (ret) + return ret; + + if ((ret = nvkm_falcon_qmgr_new(&pmu->falcon, &pmu->qmgr)) || + (ret = nvkm_falcon_cmdq_new(pmu->qmgr, "hpq", &pmu->hpq)) || + (ret = nvkm_falcon_cmdq_new(pmu->qmgr, "lpq", &pmu->lpq)) || + (ret = nvkm_falcon_msgq_new(pmu->qmgr, "msgq", &pmu->msgq))) + return ret; + + init_completion(&pmu->wpr_ready); return 0; } int -nvkm_pmu_new_(const struct nvkm_pmu_func *func, struct nvkm_device *device, +nvkm_pmu_new_(const struct nvkm_pmu_fwif *fwif, struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) { struct nvkm_pmu *pmu; if (!(pmu = *ppmu = kzalloc(sizeof(*pmu), GFP_KERNEL))) return -ENOMEM; - return nvkm_pmu_ctor(func, device, index, *ppmu); + return nvkm_pmu_ctor(fwif, device, index, *ppmu); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c index 0b458656e870..3ecb3d9cbcf2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c @@ -42,6 +42,7 @@ gf100_pmu_enabled(struct nvkm_pmu *pmu) static const struct nvkm_pmu_func gf100_pmu = { + .flcn = >215_pmu_flcn, .code.data = gf100_pmu_code, .code.size = sizeof(gf100_pmu_code), .data.data = gf100_pmu_data, @@ -56,7 +57,19 @@ gf100_pmu = { }; int +gf100_pmu_nofw(struct nvkm_pmu *pmu, int ver, const struct nvkm_pmu_fwif *fwif) +{ + return 0; +} + +static const struct nvkm_pmu_fwif +gf100_pmu_fwif[] = { + { -1, gf100_pmu_nofw, &gf100_pmu }, + {} +}; + +int gf100_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) { - return nvkm_pmu_new_(&gf100_pmu, device, index, ppmu); + return nvkm_pmu_new_(gf100_pmu_fwif, device, index, ppmu); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c index 3dfa79d4fb13..8dd0271aaaee 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c @@ -26,6 +26,7 @@ static const struct nvkm_pmu_func gf119_pmu = { + .flcn = >215_pmu_flcn, .code.data = gf119_pmu_code, .code.size = sizeof(gf119_pmu_code), .data.data = gf119_pmu_data, @@ -39,8 +40,14 @@ gf119_pmu = { .recv = gt215_pmu_recv, }; +static const struct nvkm_pmu_fwif +gf119_pmu_fwif[] = { + { -1, gf100_pmu_nofw, &gf119_pmu }, + {} +}; + int gf119_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) { - return nvkm_pmu_new_(&gf119_pmu, device, index, ppmu); + return nvkm_pmu_new_(gf119_pmu_fwif, device, index, ppmu); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c index 8f7ec10fd2a4..8b70cc17a634 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c @@ -105,6 +105,7 @@ gk104_pmu_pgob(struct nvkm_pmu *pmu, bool enable) static const struct nvkm_pmu_func gk104_pmu = { + .flcn = >215_pmu_flcn, .code.data = gk104_pmu_code, .code.size = sizeof(gk104_pmu_code), .data.data = gk104_pmu_data, @@ -119,8 +120,14 @@ gk104_pmu = { .pgob = gk104_pmu_pgob, }; +static const struct nvkm_pmu_fwif +gk104_pmu_fwif[] = { + { -1, gf100_pmu_nofw, &gk104_pmu }, + {} +}; + int gk104_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) { - return nvkm_pmu_new_(&gk104_pmu, device, index, ppmu); + return nvkm_pmu_new_(gk104_pmu_fwif, device, index, ppmu); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c index 345741d55a56..0081f2141b10 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c @@ -84,6 +84,7 @@ gk110_pmu_pgob(struct nvkm_pmu *pmu, bool enable) static const struct nvkm_pmu_func gk110_pmu = { + .flcn = >215_pmu_flcn, .code.data = gk110_pmu_code, .code.size = sizeof(gk110_pmu_code), .data.data = gk110_pmu_data, @@ -98,8 +99,14 @@ gk110_pmu = { .pgob = gk110_pmu_pgob, }; +static const struct nvkm_pmu_fwif +gk110_pmu_fwif[] = { + { -1, gf100_pmu_nofw, &gk110_pmu }, + {} +}; + int gk110_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) { - return nvkm_pmu_new_(&gk110_pmu, device, index, ppmu); + return nvkm_pmu_new_(gk110_pmu_fwif, device, index, ppmu); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c index e4acf7876ea1..b227c701a5e7 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c @@ -26,6 +26,7 @@ static const struct nvkm_pmu_func gk208_pmu = { + .flcn = >215_pmu_flcn, .code.data = gk208_pmu_code, .code.size = sizeof(gk208_pmu_code), .data.data = gk208_pmu_data, @@ -40,8 +41,14 @@ gk208_pmu = { .pgob = gk110_pmu_pgob, }; +static const struct nvkm_pmu_fwif +gk208_pmu_fwif[] = { + { -1, gf100_pmu_nofw, &gk208_pmu }, + {} +}; + int gk208_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) { - return nvkm_pmu_new_(&gk208_pmu, device, index, ppmu); + return nvkm_pmu_new_(gk208_pmu_fwif, device, index, ppmu); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c index 05e81855c367..26c1adf8f44c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c @@ -95,7 +95,7 @@ static void gk20a_pmu_dvfs_get_dev_status(struct gk20a_pmu *pmu, struct gk20a_pmu_dvfs_dev_status *status) { - struct nvkm_falcon *falcon = pmu->base.falcon; + struct nvkm_falcon *falcon = &pmu->base.falcon; status->busy = nvkm_falcon_rd32(falcon, 0x508 + (BUSY_SLOT * 0x10)); status->total= nvkm_falcon_rd32(falcon, 0x508 + (CLK_SLOT * 0x10)); @@ -104,7 +104,7 @@ gk20a_pmu_dvfs_get_dev_status(struct gk20a_pmu *pmu, static void gk20a_pmu_dvfs_reset_dev_status(struct gk20a_pmu *pmu) { - struct nvkm_falcon *falcon = pmu->base.falcon; + struct nvkm_falcon *falcon = &pmu->base.falcon; nvkm_falcon_wr32(falcon, 0x508 + (BUSY_SLOT * 0x10), 0x80000000); nvkm_falcon_wr32(falcon, 0x508 + (CLK_SLOT * 0x10), 0x80000000); @@ -160,7 +160,7 @@ gk20a_pmu_fini(struct nvkm_pmu *pmu) struct gk20a_pmu *gpmu = gk20a_pmu(pmu); nvkm_timer_alarm(pmu->subdev.device->timer, 0, &gpmu->alarm); - nvkm_falcon_put(pmu->falcon, &pmu->subdev); + nvkm_falcon_put(&pmu->falcon, &pmu->subdev); } static int @@ -169,7 +169,7 @@ gk20a_pmu_init(struct nvkm_pmu *pmu) struct gk20a_pmu *gpmu = gk20a_pmu(pmu); struct nvkm_subdev *subdev = &pmu->subdev; struct nvkm_device *device = pmu->subdev.device; - struct nvkm_falcon *falcon = pmu->falcon; + struct nvkm_falcon *falcon = &pmu->falcon; int ret; ret = nvkm_falcon_get(falcon, subdev); @@ -196,25 +196,34 @@ gk20a_dvfs_data= { static const struct nvkm_pmu_func gk20a_pmu = { + .flcn = >215_pmu_flcn, .enabled = gf100_pmu_enabled, .init = gk20a_pmu_init, .fini = gk20a_pmu_fini, .reset = gf100_pmu_reset, }; +static const struct nvkm_pmu_fwif +gk20a_pmu_fwif[] = { + { -1, gf100_pmu_nofw, &gk20a_pmu }, + {} +}; + int gk20a_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) { struct gk20a_pmu *pmu; + int ret; if (!(pmu = kzalloc(sizeof(*pmu), GFP_KERNEL))) return -ENOMEM; *ppmu = &pmu->base; - nvkm_pmu_ctor(&gk20a_pmu, device, index, &pmu->base); + ret = nvkm_pmu_ctor(gk20a_pmu_fwif, device, index, &pmu->base); + if (ret) + return ret; pmu->data = &gk20a_dvfs_data; nvkm_alarm_init(&pmu->alarm, gk20a_pmu_dvfs_work); - return 0; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c index 459df1ef9e70..5afb55e58b51 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c @@ -28,6 +28,7 @@ static const struct nvkm_pmu_func gm107_pmu = { + .flcn = >215_pmu_flcn, .code.data = gm107_pmu_code, .code.size = sizeof(gm107_pmu_code), .data.data = gm107_pmu_data, @@ -41,8 +42,14 @@ gm107_pmu = { .recv = gt215_pmu_recv, }; +static const struct nvkm_pmu_fwif +gm107_pmu_fwif[] = { + { -1, gf100_pmu_nofw, &gm107_pmu }, + {} +}; + int gm107_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) { - return nvkm_pmu_new_(&gm107_pmu, device, index, ppmu); + return nvkm_pmu_new_(gm107_pmu_fwif, device, index, ppmu); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c index 31c843145c7a..6d5a13e4a857 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c @@ -19,38 +19,219 @@ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ - -#include <engine/falcon.h> -#include <core/msgqueue.h> #include "priv.h" -static void +#include <core/memory.h> +#include <subdev/acr.h> + +#include <nvfw/flcn.h> +#include <nvfw/pmu.h> + +static int +gm20b_pmu_acr_bootstrap_falcon_cb(void *priv, struct nv_falcon_msg *hdr) +{ + struct nv_pmu_acr_bootstrap_falcon_msg *msg = + container_of(hdr, typeof(*msg), msg.hdr); + return msg->falcon_id; +} + +int +gm20b_pmu_acr_bootstrap_falcon(struct nvkm_falcon *falcon, + enum nvkm_acr_lsf_id id) +{ + struct nvkm_pmu *pmu = container_of(falcon, typeof(*pmu), falcon); + struct nv_pmu_acr_bootstrap_falcon_cmd cmd = { + .cmd.hdr.unit_id = NV_PMU_UNIT_ACR, + .cmd.hdr.size = sizeof(cmd), + .cmd.cmd_type = NV_PMU_ACR_CMD_BOOTSTRAP_FALCON, + .flags = NV_PMU_ACR_BOOTSTRAP_FALCON_FLAGS_RESET_YES, + .falcon_id = id, + }; + int ret; + + ret = nvkm_falcon_cmdq_send(pmu->hpq, &cmd.cmd.hdr, + gm20b_pmu_acr_bootstrap_falcon_cb, + &pmu->subdev, msecs_to_jiffies(1000)); + if (ret >= 0 && ret != cmd.falcon_id) + ret = -EIO; + return ret; +} + +int +gm20b_pmu_acr_boot(struct nvkm_falcon *falcon) +{ + struct nv_pmu_args args = { .secure_mode = true }; + const u32 addr_args = falcon->data.limit - sizeof(struct nv_pmu_args); + nvkm_falcon_load_dmem(falcon, &args, addr_args, sizeof(args), 0); + nvkm_falcon_start(falcon); + return 0; +} + +void +gm20b_pmu_acr_bld_patch(struct nvkm_acr *acr, u32 bld, s64 adjust) +{ + struct loader_config hdr; + u64 addr; + + nvkm_robj(acr->wpr, bld, &hdr, sizeof(hdr)); + addr = ((u64)hdr.code_dma_base1 << 40 | hdr.code_dma_base << 8); + hdr.code_dma_base = lower_32_bits((addr + adjust) >> 8); + hdr.code_dma_base1 = upper_32_bits((addr + adjust) >> 8); + addr = ((u64)hdr.data_dma_base1 << 40 | hdr.data_dma_base << 8); + hdr.data_dma_base = lower_32_bits((addr + adjust) >> 8); + hdr.data_dma_base1 = upper_32_bits((addr + adjust) >> 8); + addr = ((u64)hdr.overlay_dma_base1 << 40 | hdr.overlay_dma_base << 8); + hdr.overlay_dma_base = lower_32_bits((addr + adjust) << 8); + hdr.overlay_dma_base1 = upper_32_bits((addr + adjust) << 8); + nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr)); + + loader_config_dump(&acr->subdev, &hdr); +} + +void +gm20b_pmu_acr_bld_write(struct nvkm_acr *acr, u32 bld, + struct nvkm_acr_lsfw *lsfw) +{ + const u64 base = lsfw->offset.img + lsfw->app_start_offset; + const u64 code = (base + lsfw->app_resident_code_offset) >> 8; + const u64 data = (base + lsfw->app_resident_data_offset) >> 8; + const struct loader_config hdr = { + .dma_idx = FALCON_DMAIDX_UCODE, + .code_dma_base = lower_32_bits(code), + .code_size_total = lsfw->app_size, + .code_size_to_load = lsfw->app_resident_code_size, + .code_entry_point = lsfw->app_imem_entry, + .data_dma_base = lower_32_bits(data), + .data_size = lsfw->app_resident_data_size, + .overlay_dma_base = lower_32_bits(code), + .argc = 1, + .argv = lsfw->falcon->data.limit - sizeof(struct nv_pmu_args), + .code_dma_base1 = upper_32_bits(code), + .data_dma_base1 = upper_32_bits(data), + .overlay_dma_base1 = upper_32_bits(code), + }; + + nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr)); +} + +static const struct nvkm_acr_lsf_func +gm20b_pmu_acr = { + .flags = NVKM_ACR_LSF_DMACTL_REQ_CTX, + .bld_size = sizeof(struct loader_config), + .bld_write = gm20b_pmu_acr_bld_write, + .bld_patch = gm20b_pmu_acr_bld_patch, + .boot = gm20b_pmu_acr_boot, + .bootstrap_falcon = gm20b_pmu_acr_bootstrap_falcon, +}; + +static int +gm20b_pmu_acr_init_wpr_callback(void *priv, struct nv_falcon_msg *hdr) +{ + struct nv_pmu_acr_init_wpr_region_msg *msg = + container_of(hdr, typeof(*msg), msg.hdr); + struct nvkm_pmu *pmu = priv; + struct nvkm_subdev *subdev = &pmu->subdev; + + if (msg->error_code) { + nvkm_error(subdev, "ACR WPR init failure: %d\n", + msg->error_code); + return -EINVAL; + } + + nvkm_debug(subdev, "ACR WPR init complete\n"); + complete_all(&pmu->wpr_ready); + return 0; +} + +static int +gm20b_pmu_acr_init_wpr(struct nvkm_pmu *pmu) +{ + struct nv_pmu_acr_init_wpr_region_cmd cmd = { + .cmd.hdr.unit_id = NV_PMU_UNIT_ACR, + .cmd.hdr.size = sizeof(cmd), + .cmd.cmd_type = NV_PMU_ACR_CMD_INIT_WPR_REGION, + .region_id = 1, + .wpr_offset = 0, + }; + + return nvkm_falcon_cmdq_send(pmu->hpq, &cmd.cmd.hdr, + gm20b_pmu_acr_init_wpr_callback, pmu, 0); +} + +int +gm20b_pmu_initmsg(struct nvkm_pmu *pmu) +{ + struct nv_pmu_init_msg msg; + int ret; + + ret = nvkm_falcon_msgq_recv_initmsg(pmu->msgq, &msg, sizeof(msg)); + if (ret) + return ret; + + if (msg.hdr.unit_id != NV_PMU_UNIT_INIT || + msg.msg_type != NV_PMU_INIT_MSG_INIT) + return -EINVAL; + + nvkm_falcon_cmdq_init(pmu->hpq, msg.queue_info[0].index, + msg.queue_info[0].offset, + msg.queue_info[0].size); + nvkm_falcon_cmdq_init(pmu->lpq, msg.queue_info[1].index, + msg.queue_info[1].offset, + msg.queue_info[1].size); + nvkm_falcon_msgq_init(pmu->msgq, msg.queue_info[4].index, + msg.queue_info[4].offset, + msg.queue_info[4].size); + return gm20b_pmu_acr_init_wpr(pmu); +} + +void gm20b_pmu_recv(struct nvkm_pmu *pmu) { - if (!pmu->queue) { - nvkm_warn(&pmu->subdev, - "recv function called while no firmware set!\n"); - return; + if (!pmu->initmsg_received) { + int ret = pmu->func->initmsg(pmu); + if (ret) { + nvkm_error(&pmu->subdev, + "error parsing init message: %d\n", ret); + return; + } + + pmu->initmsg_received = true; } - nvkm_msgqueue_recv(pmu->queue); + nvkm_falcon_msgq_recv(pmu->msgq); } static const struct nvkm_pmu_func gm20b_pmu = { + .flcn = >215_pmu_flcn, .enabled = gf100_pmu_enabled, .intr = gt215_pmu_intr, .recv = gm20b_pmu_recv, + .initmsg = gm20b_pmu_initmsg, }; +#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC) +MODULE_FIRMWARE("nvidia/gm20b/pmu/desc.bin"); +MODULE_FIRMWARE("nvidia/gm20b/pmu/image.bin"); +MODULE_FIRMWARE("nvidia/gm20b/pmu/sig.bin"); +#endif + int -gm20b_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) +gm20b_pmu_load(struct nvkm_pmu *pmu, int ver, const struct nvkm_pmu_fwif *fwif) { - int ret; + return nvkm_acr_lsfw_load_sig_image_desc(&pmu->subdev, &pmu->falcon, + NVKM_ACR_LSF_PMU, "pmu/", + ver, fwif->acr); +} - ret = nvkm_pmu_new_(&gm20b_pmu, device, index, ppmu); - if (ret) - return ret; +static const struct nvkm_pmu_fwif +gm20b_pmu_fwif[] = { + { 0, gm20b_pmu_load, &gm20b_pmu, &gm20b_pmu_acr }, + {} +}; - return 0; +int +gm20b_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) +{ + return nvkm_pmu_new_(gm20b_pmu_fwif, device, index, ppmu); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp100.c index e210cd6af816..09e05db21ff5 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp100.c @@ -25,12 +25,19 @@ static const struct nvkm_pmu_func gp100_pmu = { + .flcn = >215_pmu_flcn, .enabled = gf100_pmu_enabled, .reset = gf100_pmu_reset, }; +static const struct nvkm_pmu_fwif +gp100_pmu_fwif[] = { + { -1, gf100_pmu_nofw, &gp100_pmu }, + {} +}; + int gp100_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) { - return nvkm_pmu_new_(&gp100_pmu, device, index, ppmu); + return nvkm_pmu_new_(gp100_pmu_fwif, device, index, ppmu); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c index 98c7a2a8afc4..262b8a3dd507 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c @@ -39,12 +39,19 @@ gp102_pmu_enabled(struct nvkm_pmu *pmu) static const struct nvkm_pmu_func gp102_pmu = { + .flcn = >215_pmu_flcn, .enabled = gp102_pmu_enabled, .reset = gp102_pmu_reset, }; +static const struct nvkm_pmu_fwif +gp102_pmu_fwif[] = { + { -1, gf100_pmu_nofw, &gp102_pmu }, + {} +}; + int gp102_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) { - return nvkm_pmu_new_(&gp102_pmu, device, index, ppmu); + return nvkm_pmu_new_(gp102_pmu_fwif, device, index, ppmu); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c new file mode 100644 index 000000000000..39c86bc56310 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" + +#include <subdev/acr.h> + +#include <nvfw/flcn.h> +#include <nvfw/pmu.h> + +static int +gp10b_pmu_acr_bootstrap_multiple_falcons_cb(void *priv, + struct nv_falcon_msg *hdr) +{ + struct nv_pmu_acr_bootstrap_multiple_falcons_msg *msg = + container_of(hdr, typeof(*msg), msg.hdr); + return msg->falcon_mask; +} +static int +gp10b_pmu_acr_bootstrap_multiple_falcons(struct nvkm_falcon *falcon, u32 mask) +{ + struct nvkm_pmu *pmu = container_of(falcon, typeof(*pmu), falcon); + struct nv_pmu_acr_bootstrap_multiple_falcons_cmd cmd = { + .cmd.hdr.unit_id = NV_PMU_UNIT_ACR, + .cmd.hdr.size = sizeof(cmd), + .cmd.cmd_type = NV_PMU_ACR_CMD_BOOTSTRAP_MULTIPLE_FALCONS, + .flags = NV_PMU_ACR_BOOTSTRAP_MULTIPLE_FALCONS_FLAGS_RESET_YES, + .falcon_mask = mask, + .wpr_lo = 0, /*XXX*/ + .wpr_hi = 0, /*XXX*/ + }; + int ret; + + ret = nvkm_falcon_cmdq_send(pmu->hpq, &cmd.cmd.hdr, + gp10b_pmu_acr_bootstrap_multiple_falcons_cb, + &pmu->subdev, msecs_to_jiffies(1000)); + if (ret >= 0 && ret != cmd.falcon_mask) + ret = -EIO; + return ret; +} + +static const struct nvkm_acr_lsf_func +gp10b_pmu_acr = { + .flags = NVKM_ACR_LSF_DMACTL_REQ_CTX, + .bld_size = sizeof(struct loader_config), + .bld_write = gm20b_pmu_acr_bld_write, + .bld_patch = gm20b_pmu_acr_bld_patch, + .boot = gm20b_pmu_acr_boot, + .bootstrap_falcon = gm20b_pmu_acr_bootstrap_falcon, + .bootstrap_multiple_falcons = gp10b_pmu_acr_bootstrap_multiple_falcons, +}; + +static const struct nvkm_pmu_func +gp10b_pmu = { + .flcn = >215_pmu_flcn, + .enabled = gf100_pmu_enabled, + .intr = gt215_pmu_intr, + .recv = gm20b_pmu_recv, + .initmsg = gm20b_pmu_initmsg, +}; + +#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC) +MODULE_FIRMWARE("nvidia/gp10b/pmu/desc.bin"); +MODULE_FIRMWARE("nvidia/gp10b/pmu/image.bin"); +MODULE_FIRMWARE("nvidia/gp10b/pmu/sig.bin"); +#endif + +static const struct nvkm_pmu_fwif +gp10b_pmu_fwif[] = { + { 0, gm20b_pmu_load, &gp10b_pmu, &gp10b_pmu_acr }, + {} +}; + +int +gp10b_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) +{ + return nvkm_pmu_new_(gp10b_pmu_fwif, device, index, ppmu); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c index e04216daea58..88b909913ff9 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c @@ -241,8 +241,27 @@ gt215_pmu_init(struct nvkm_pmu *pmu) return 0; } +const struct nvkm_falcon_func +gt215_pmu_flcn = { + .debug = 0xc08, + .fbif = 0xe00, + .load_imem = nvkm_falcon_v1_load_imem, + .load_dmem = nvkm_falcon_v1_load_dmem, + .read_dmem = nvkm_falcon_v1_read_dmem, + .bind_context = nvkm_falcon_v1_bind_context, + .wait_for_halt = nvkm_falcon_v1_wait_for_halt, + .clear_interrupt = nvkm_falcon_v1_clear_interrupt, + .set_start_addr = nvkm_falcon_v1_set_start_addr, + .start = nvkm_falcon_v1_start, + .enable = nvkm_falcon_v1_enable, + .disable = nvkm_falcon_v1_disable, + .cmdq = { 0x4a0, 0x4b0, 4 }, + .msgq = { 0x4c8, 0x4cc, 0 }, +}; + static const struct nvkm_pmu_func gt215_pmu = { + .flcn = >215_pmu_flcn, .code.data = gt215_pmu_code, .code.size = sizeof(gt215_pmu_code), .data.data = gt215_pmu_data, @@ -256,8 +275,14 @@ gt215_pmu = { .recv = gt215_pmu_recv, }; +static const struct nvkm_pmu_fwif +gt215_pmu_fwif[] = { + { -1, gf100_pmu_nofw, >215_pmu }, + {} +}; + int gt215_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) { - return nvkm_pmu_new_(>215_pmu, device, index, ppmu); + return nvkm_pmu_new_(gt215_pmu_fwif, device, index, ppmu); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h index 26d73f9cd6d3..f470859244de 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h @@ -4,13 +4,12 @@ #define nvkm_pmu(p) container_of((p), struct nvkm_pmu, subdev) #include <subdev/pmu.h> #include <subdev/pmu/fuc/os.h> - -int nvkm_pmu_ctor(const struct nvkm_pmu_func *, struct nvkm_device *, - int index, struct nvkm_pmu *); -int nvkm_pmu_new_(const struct nvkm_pmu_func *, struct nvkm_device *, - int index, struct nvkm_pmu **); +enum nvkm_acr_lsf_id; +struct nvkm_acr_lsfw; struct nvkm_pmu_func { + const struct nvkm_falcon_func *flcn; + struct { u32 *data; u32 size; @@ -29,9 +28,11 @@ struct nvkm_pmu_func { int (*send)(struct nvkm_pmu *, u32 reply[2], u32 process, u32 message, u32 data0, u32 data1); void (*recv)(struct nvkm_pmu *); + int (*initmsg)(struct nvkm_pmu *); void (*pgob)(struct nvkm_pmu *, bool); }; +extern const struct nvkm_falcon_func gt215_pmu_flcn; int gt215_pmu_init(struct nvkm_pmu *); void gt215_pmu_fini(struct nvkm_pmu *); void gt215_pmu_intr(struct nvkm_pmu *); @@ -42,4 +43,26 @@ bool gf100_pmu_enabled(struct nvkm_pmu *); void gf100_pmu_reset(struct nvkm_pmu *); void gk110_pmu_pgob(struct nvkm_pmu *, bool); + +void gm20b_pmu_acr_bld_patch(struct nvkm_acr *, u32, s64); +void gm20b_pmu_acr_bld_write(struct nvkm_acr *, u32, struct nvkm_acr_lsfw *); +int gm20b_pmu_acr_boot(struct nvkm_falcon *); +int gm20b_pmu_acr_bootstrap_falcon(struct nvkm_falcon *, enum nvkm_acr_lsf_id); +void gm20b_pmu_recv(struct nvkm_pmu *); +int gm20b_pmu_initmsg(struct nvkm_pmu *); + +struct nvkm_pmu_fwif { + int version; + int (*load)(struct nvkm_pmu *, int ver, const struct nvkm_pmu_fwif *); + const struct nvkm_pmu_func *func; + const struct nvkm_acr_lsf_func *acr; +}; + +int gf100_pmu_nofw(struct nvkm_pmu *, int, const struct nvkm_pmu_fwif *); +int gm20b_pmu_load(struct nvkm_pmu *, int, const struct nvkm_pmu_fwif *); + +int nvkm_pmu_ctor(const struct nvkm_pmu_fwif *, struct nvkm_device *, + int index, struct nvkm_pmu *); +int nvkm_pmu_new_(const struct nvkm_pmu_fwif *, struct nvkm_device *, + int index, struct nvkm_pmu **); #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild deleted file mode 100644 index f3dee2693c79..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild +++ /dev/null @@ -1,17 +0,0 @@ -# SPDX-License-Identifier: MIT -nvkm-y += nvkm/subdev/secboot/base.o -nvkm-y += nvkm/subdev/secboot/hs_ucode.o -nvkm-y += nvkm/subdev/secboot/ls_ucode_gr.o -nvkm-y += nvkm/subdev/secboot/ls_ucode_msgqueue.o -nvkm-y += nvkm/subdev/secboot/acr.o -nvkm-y += nvkm/subdev/secboot/acr_r352.o -nvkm-y += nvkm/subdev/secboot/acr_r361.o -nvkm-y += nvkm/subdev/secboot/acr_r364.o -nvkm-y += nvkm/subdev/secboot/acr_r367.o -nvkm-y += nvkm/subdev/secboot/acr_r370.o -nvkm-y += nvkm/subdev/secboot/acr_r375.o -nvkm-y += nvkm/subdev/secboot/gm200.o -nvkm-y += nvkm/subdev/secboot/gm20b.o -nvkm-y += nvkm/subdev/secboot/gp102.o -nvkm-y += nvkm/subdev/secboot/gp108.o -nvkm-y += nvkm/subdev/secboot/gp10b.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.c deleted file mode 100644 index dc80985cf093..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.c +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "acr.h" - -#include <core/firmware.h> - -/** - * Convenience function to duplicate a firmware file in memory and check that - * it has the required minimum size. - */ -void * -nvkm_acr_load_firmware(const struct nvkm_subdev *subdev, const char *name, - size_t min_size) -{ - const struct firmware *fw; - void *blob; - int ret; - - ret = nvkm_firmware_get(subdev, name, &fw); - if (ret) - return ERR_PTR(ret); - if (fw->size < min_size) { - nvkm_error(subdev, "%s is smaller than expected size %zu\n", - name, min_size); - nvkm_firmware_put(fw); - return ERR_PTR(-EINVAL); - } - blob = kmemdup(fw->data, fw->size, GFP_KERNEL); - nvkm_firmware_put(fw); - if (!blob) - return ERR_PTR(-ENOMEM); - - return blob; -} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h deleted file mode 100644 index 73a2ac81ac69..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ -#ifndef __NVKM_SECBOOT_ACR_H__ -#define __NVKM_SECBOOT_ACR_H__ - -#include "priv.h" - -struct nvkm_acr; - -/** - * struct nvkm_acr_func - properties and functions specific to an ACR - * - * @load: make the ACR ready to run on the given secboot device - * @reset: reset the specified falcon - * @start: start the specified falcon (assumed to have been reset) - */ -struct nvkm_acr_func { - void (*dtor)(struct nvkm_acr *); - int (*oneinit)(struct nvkm_acr *, struct nvkm_secboot *); - int (*fini)(struct nvkm_acr *, struct nvkm_secboot *, bool); - int (*load)(struct nvkm_acr *, struct nvkm_falcon *, - struct nvkm_gpuobj *, u64); - int (*reset)(struct nvkm_acr *, struct nvkm_secboot *, unsigned long); -}; - -/** - * struct nvkm_acr - instance of an ACR - * - * @boot_falcon: ID of the falcon that will perform secure boot - * @managed_falcons: bitfield of falcons managed by this ACR - * @optional_falcons: bitfield of falcons we can live without - */ -struct nvkm_acr { - const struct nvkm_acr_func *func; - const struct nvkm_subdev *subdev; - - enum nvkm_secboot_falcon boot_falcon; - unsigned long managed_falcons; - unsigned long optional_falcons; -}; - -void *nvkm_acr_load_firmware(const struct nvkm_subdev *, const char *, size_t); - -struct nvkm_acr *acr_r352_new(unsigned long); -struct nvkm_acr *acr_r361_new(unsigned long); -struct nvkm_acr *acr_r364_new(unsigned long); -struct nvkm_acr *acr_r367_new(enum nvkm_secboot_falcon, unsigned long); -struct nvkm_acr *acr_r370_new(enum nvkm_secboot_falcon, unsigned long); -struct nvkm_acr *acr_r375_new(enum nvkm_secboot_falcon, unsigned long); - -#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c deleted file mode 100644 index 7af971db91bc..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c +++ /dev/null @@ -1,1241 +0,0 @@ -/* - * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "acr_r352.h" -#include "hs_ucode.h" - -#include <core/gpuobj.h> -#include <core/firmware.h> -#include <engine/falcon.h> -#include <subdev/pmu.h> -#include <core/msgqueue.h> -#include <engine/sec2.h> - -/** - * struct acr_r352_flcn_bl_desc - DMEM bootloader descriptor - * @signature: 16B signature for secure code. 0s if no secure code - * @ctx_dma: DMA context to be used by BL while loading code/data - * @code_dma_base: 256B-aligned Physical FB Address where code is located - * (falcon's $xcbase register) - * @non_sec_code_off: offset from code_dma_base where the non-secure code is - * located. The offset must be multiple of 256 to help perf - * @non_sec_code_size: the size of the nonSecure code part. - * @sec_code_off: offset from code_dma_base where the secure code is - * located. The offset must be multiple of 256 to help perf - * @sec_code_size: offset from code_dma_base where the secure code is - * located. The offset must be multiple of 256 to help perf - * @code_entry_point: code entry point which will be invoked by BL after - * code is loaded. - * @data_dma_base: 256B aligned Physical FB Address where data is located. - * (falcon's $xdbase register) - * @data_size: size of data block. Should be multiple of 256B - * - * Structure used by the bootloader to load the rest of the code. This has - * to be filled by host and copied into DMEM at offset provided in the - * hsflcn_bl_desc.bl_desc_dmem_load_off. - */ -struct acr_r352_flcn_bl_desc { - u32 reserved[4]; - u32 signature[4]; - u32 ctx_dma; - u32 code_dma_base; - u32 non_sec_code_off; - u32 non_sec_code_size; - u32 sec_code_off; - u32 sec_code_size; - u32 code_entry_point; - u32 data_dma_base; - u32 data_size; - u32 code_dma_base1; - u32 data_dma_base1; -}; - -/** - * acr_r352_generate_flcn_bl_desc - generate generic BL descriptor for LS image - */ -static void -acr_r352_generate_flcn_bl_desc(const struct nvkm_acr *acr, - const struct ls_ucode_img *img, u64 wpr_addr, - void *_desc) -{ - struct acr_r352_flcn_bl_desc *desc = _desc; - const struct ls_ucode_img_desc *pdesc = &img->ucode_desc; - u64 base, addr_code, addr_data; - - base = wpr_addr + img->ucode_off + pdesc->app_start_offset; - addr_code = (base + pdesc->app_resident_code_offset) >> 8; - addr_data = (base + pdesc->app_resident_data_offset) >> 8; - - desc->ctx_dma = FALCON_DMAIDX_UCODE; - desc->code_dma_base = lower_32_bits(addr_code); - desc->code_dma_base1 = upper_32_bits(addr_code); - desc->non_sec_code_off = pdesc->app_resident_code_offset; - desc->non_sec_code_size = pdesc->app_resident_code_size; - desc->code_entry_point = pdesc->app_imem_entry; - desc->data_dma_base = lower_32_bits(addr_data); - desc->data_dma_base1 = upper_32_bits(addr_data); - desc->data_size = pdesc->app_resident_data_size; -} - - -/** - * struct hsflcn_acr_desc - data section of the HS firmware - * - * This header is to be copied at the beginning of DMEM by the HS bootloader. - * - * @signature: signature of ACR ucode - * @wpr_region_id: region ID holding the WPR header and its details - * @wpr_offset: offset from the WPR region holding the wpr header - * @regions: region descriptors - * @nonwpr_ucode_blob_size: size of LS blob - * @nonwpr_ucode_blob_start: FB location of LS blob is - */ -struct hsflcn_acr_desc { - union { - u8 reserved_dmem[0x200]; - u32 signatures[4]; - } ucode_reserved_space; - u32 wpr_region_id; - u32 wpr_offset; - u32 mmu_mem_range; -#define FLCN_ACR_MAX_REGIONS 2 - struct { - u32 no_regions; - struct { - u32 start_addr; - u32 end_addr; - u32 region_id; - u32 read_mask; - u32 write_mask; - u32 client_mask; - } region_props[FLCN_ACR_MAX_REGIONS]; - } regions; - u32 ucode_blob_size; - u64 ucode_blob_base __aligned(8); - struct { - u32 vpr_enabled; - u32 vpr_start; - u32 vpr_end; - u32 hdcp_policies; - } vpr_desc; -}; - - -/* - * Low-secure blob creation - */ - -/** - * struct acr_r352_lsf_lsb_header - LS firmware header - * @signature: signature to verify the firmware against - * @ucode_off: offset of the ucode blob in the WPR region. The ucode - * blob contains the bootloader, code and data of the - * LS falcon - * @ucode_size: size of the ucode blob, including bootloader - * @data_size: size of the ucode blob data - * @bl_code_size: size of the bootloader code - * @bl_imem_off: offset in imem of the bootloader - * @bl_data_off: offset of the bootloader data in WPR region - * @bl_data_size: size of the bootloader data - * @app_code_off: offset of the app code relative to ucode_off - * @app_code_size: size of the app code - * @app_data_off: offset of the app data relative to ucode_off - * @app_data_size: size of the app data - * @flags: flags for the secure bootloader - * - * This structure is written into the WPR region for each managed falcon. Each - * instance is referenced by the lsb_offset member of the corresponding - * lsf_wpr_header. - */ -struct acr_r352_lsf_lsb_header { - /** - * LS falcon signatures - * @prd_keys: signature to use in production mode - * @dgb_keys: signature to use in debug mode - * @b_prd_present: whether the production key is present - * @b_dgb_present: whether the debug key is present - * @falcon_id: ID of the falcon the ucode applies to - */ - struct { - u8 prd_keys[2][16]; - u8 dbg_keys[2][16]; - u32 b_prd_present; - u32 b_dbg_present; - u32 falcon_id; - } signature; - u32 ucode_off; - u32 ucode_size; - u32 data_size; - u32 bl_code_size; - u32 bl_imem_off; - u32 bl_data_off; - u32 bl_data_size; - u32 app_code_off; - u32 app_code_size; - u32 app_data_off; - u32 app_data_size; - u32 flags; -}; - -/** - * struct acr_r352_lsf_wpr_header - LS blob WPR Header - * @falcon_id: LS falcon ID - * @lsb_offset: offset of the lsb_lsf_header in the WPR region - * @bootstrap_owner: secure falcon reponsible for bootstrapping the LS falcon - * @lazy_bootstrap: skip bootstrapping by ACR - * @status: bootstrapping status - * - * An array of these is written at the beginning of the WPR region, one for - * each managed falcon. The array is terminated by an instance which falcon_id - * is LSF_FALCON_ID_INVALID. - */ -struct acr_r352_lsf_wpr_header { - u32 falcon_id; - u32 lsb_offset; - u32 bootstrap_owner; - u32 lazy_bootstrap; - u32 status; -#define LSF_IMAGE_STATUS_NONE 0 -#define LSF_IMAGE_STATUS_COPY 1 -#define LSF_IMAGE_STATUS_VALIDATION_CODE_FAILED 2 -#define LSF_IMAGE_STATUS_VALIDATION_DATA_FAILED 3 -#define LSF_IMAGE_STATUS_VALIDATION_DONE 4 -#define LSF_IMAGE_STATUS_VALIDATION_SKIPPED 5 -#define LSF_IMAGE_STATUS_BOOTSTRAP_READY 6 -}; - -/** - * struct ls_ucode_img_r352 - ucode image augmented with r352 headers - */ -struct ls_ucode_img_r352 { - struct ls_ucode_img base; - - const struct acr_r352_lsf_func *func; - - struct acr_r352_lsf_wpr_header wpr_header; - struct acr_r352_lsf_lsb_header lsb_header; -}; -#define ls_ucode_img_r352(i) container_of(i, struct ls_ucode_img_r352, base) - -/** - * ls_ucode_img_load() - create a lsf_ucode_img and load it - */ -struct ls_ucode_img * -acr_r352_ls_ucode_img_load(const struct acr_r352 *acr, - const struct nvkm_secboot *sb, - enum nvkm_secboot_falcon falcon_id) -{ - const struct nvkm_subdev *subdev = acr->base.subdev; - const struct acr_r352_ls_func *func = acr->func->ls_func[falcon_id]; - struct ls_ucode_img_r352 *img; - int ret; - - img = kzalloc(sizeof(*img), GFP_KERNEL); - if (!img) - return ERR_PTR(-ENOMEM); - - img->base.falcon_id = falcon_id; - - ret = func->load(sb, func->version_max, &img->base); - if (ret < 0) { - kfree(img->base.ucode_data); - kfree(img->base.sig); - kfree(img); - return ERR_PTR(ret); - } - - img->func = func->version[ret]; - - /* Check that the signature size matches our expectations... */ - if (img->base.sig_size != sizeof(img->lsb_header.signature)) { - nvkm_error(subdev, "invalid signature size for %s falcon!\n", - nvkm_secboot_falcon_name[falcon_id]); - return ERR_PTR(-EINVAL); - } - - /* Copy signature to the right place */ - memcpy(&img->lsb_header.signature, img->base.sig, img->base.sig_size); - - /* not needed? the signature should already have the right value */ - img->lsb_header.signature.falcon_id = falcon_id; - - return &img->base; -} - -#define LSF_LSB_HEADER_ALIGN 256 -#define LSF_BL_DATA_ALIGN 256 -#define LSF_BL_DATA_SIZE_ALIGN 256 -#define LSF_BL_CODE_SIZE_ALIGN 256 -#define LSF_UCODE_DATA_ALIGN 4096 - -/** - * acr_r352_ls_img_fill_headers - fill the WPR and LSB headers of an image - * @acr: ACR to use - * @img: image to generate for - * @offset: offset in the WPR region where this image starts - * - * Allocate space in the WPR area from offset and write the WPR and LSB headers - * accordingly. - * - * Return: offset at the end of this image. - */ -static u32 -acr_r352_ls_img_fill_headers(struct acr_r352 *acr, - struct ls_ucode_img_r352 *img, u32 offset) -{ - struct ls_ucode_img *_img = &img->base; - struct acr_r352_lsf_wpr_header *whdr = &img->wpr_header; - struct acr_r352_lsf_lsb_header *lhdr = &img->lsb_header; - struct ls_ucode_img_desc *desc = &_img->ucode_desc; - const struct acr_r352_lsf_func *func = img->func; - - /* Fill WPR header */ - whdr->falcon_id = _img->falcon_id; - whdr->bootstrap_owner = acr->base.boot_falcon; - whdr->status = LSF_IMAGE_STATUS_COPY; - - /* Skip bootstrapping falcons started by someone else than ACR */ - if (acr->lazy_bootstrap & BIT(_img->falcon_id)) - whdr->lazy_bootstrap = 1; - - /* Align, save off, and include an LSB header size */ - offset = ALIGN(offset, LSF_LSB_HEADER_ALIGN); - whdr->lsb_offset = offset; - offset += sizeof(*lhdr); - - /* - * Align, save off, and include the original (static) ucode - * image size - */ - offset = ALIGN(offset, LSF_UCODE_DATA_ALIGN); - _img->ucode_off = lhdr->ucode_off = offset; - offset += _img->ucode_size; - - /* - * For falcons that use a boot loader (BL), we append a loader - * desc structure on the end of the ucode image and consider - * this the boot loader data. The host will then copy the loader - * desc args to this space within the WPR region (before locking - * down) and the HS bin will then copy them to DMEM 0 for the - * loader. - */ - lhdr->bl_code_size = ALIGN(desc->bootloader_size, - LSF_BL_CODE_SIZE_ALIGN); - lhdr->ucode_size = ALIGN(desc->app_resident_data_offset, - LSF_BL_CODE_SIZE_ALIGN) + lhdr->bl_code_size; - lhdr->data_size = ALIGN(desc->app_size, LSF_BL_CODE_SIZE_ALIGN) + - lhdr->bl_code_size - lhdr->ucode_size; - /* - * Though the BL is located at 0th offset of the image, the VA - * is different to make sure that it doesn't collide the actual - * OS VA range - */ - lhdr->bl_imem_off = desc->bootloader_imem_offset; - lhdr->app_code_off = desc->app_start_offset + - desc->app_resident_code_offset; - lhdr->app_code_size = desc->app_resident_code_size; - lhdr->app_data_off = desc->app_start_offset + - desc->app_resident_data_offset; - lhdr->app_data_size = desc->app_resident_data_size; - - lhdr->flags = func->lhdr_flags; - if (_img->falcon_id == acr->base.boot_falcon) - lhdr->flags |= LSF_FLAG_DMACTL_REQ_CTX; - - /* Align and save off BL descriptor size */ - lhdr->bl_data_size = ALIGN(func->bl_desc_size, LSF_BL_DATA_SIZE_ALIGN); - - /* - * Align, save off, and include the additional BL data - */ - offset = ALIGN(offset, LSF_BL_DATA_ALIGN); - lhdr->bl_data_off = offset; - offset += lhdr->bl_data_size; - - return offset; -} - -/** - * acr_r352_ls_fill_headers - fill WPR and LSB headers of all managed images - */ -int -acr_r352_ls_fill_headers(struct acr_r352 *acr, struct list_head *imgs) -{ - struct ls_ucode_img_r352 *img; - struct list_head *l; - u32 count = 0; - u32 offset; - - /* Count the number of images to manage */ - list_for_each(l, imgs) - count++; - - /* - * Start with an array of WPR headers at the base of the WPR. - * The expectation here is that the secure falcon will do a single DMA - * read of this array and cache it internally so it's ok to pack these. - * Also, we add 1 to the falcon count to indicate the end of the array. - */ - offset = sizeof(img->wpr_header) * (count + 1); - - /* - * Walk the managed falcons, accounting for the LSB structs - * as well as the ucode images. - */ - list_for_each_entry(img, imgs, base.node) { - offset = acr_r352_ls_img_fill_headers(acr, img, offset); - } - - return offset; -} - -/** - * acr_r352_ls_write_wpr - write the WPR blob contents - */ -int -acr_r352_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs, - struct nvkm_gpuobj *wpr_blob, u64 wpr_addr) -{ - struct ls_ucode_img *_img; - u32 pos = 0; - u32 max_desc_size = 0; - u8 *gdesc; - - /* Figure out how large we need gdesc to be. */ - list_for_each_entry(_img, imgs, node) { - struct ls_ucode_img_r352 *img = ls_ucode_img_r352(_img); - const struct acr_r352_lsf_func *ls_func = img->func; - - max_desc_size = max(max_desc_size, ls_func->bl_desc_size); - } - - gdesc = kmalloc(max_desc_size, GFP_KERNEL); - if (!gdesc) - return -ENOMEM; - - nvkm_kmap(wpr_blob); - - list_for_each_entry(_img, imgs, node) { - struct ls_ucode_img_r352 *img = ls_ucode_img_r352(_img); - const struct acr_r352_lsf_func *ls_func = img->func; - - nvkm_gpuobj_memcpy_to(wpr_blob, pos, &img->wpr_header, - sizeof(img->wpr_header)); - - nvkm_gpuobj_memcpy_to(wpr_blob, img->wpr_header.lsb_offset, - &img->lsb_header, sizeof(img->lsb_header)); - - /* Generate and write BL descriptor */ - memset(gdesc, 0, ls_func->bl_desc_size); - ls_func->generate_bl_desc(&acr->base, _img, wpr_addr, gdesc); - - nvkm_gpuobj_memcpy_to(wpr_blob, img->lsb_header.bl_data_off, - gdesc, ls_func->bl_desc_size); - - /* Copy ucode */ - nvkm_gpuobj_memcpy_to(wpr_blob, img->lsb_header.ucode_off, - _img->ucode_data, _img->ucode_size); - - pos += sizeof(img->wpr_header); - } - - nvkm_wo32(wpr_blob, pos, NVKM_SECBOOT_FALCON_INVALID); - - nvkm_done(wpr_blob); - - kfree(gdesc); - - return 0; -} - -/* Both size and address of WPR need to be 256K-aligned */ -#define WPR_ALIGNMENT 0x40000 -/** - * acr_r352_prepare_ls_blob() - prepare the LS blob - * - * For each securely managed falcon, load the FW, signatures and bootloaders and - * prepare a ucode blob. Then, compute the offsets in the WPR region for each - * blob, and finally write the headers and ucode blobs into a GPU object that - * will be copied into the WPR region by the HS firmware. - */ -static int -acr_r352_prepare_ls_blob(struct acr_r352 *acr, struct nvkm_secboot *sb) -{ - const struct nvkm_subdev *subdev = acr->base.subdev; - struct list_head imgs; - struct ls_ucode_img *img, *t; - unsigned long managed_falcons = acr->base.managed_falcons; - u64 wpr_addr = sb->wpr_addr; - u32 wpr_size = sb->wpr_size; - int managed_count = 0; - u32 image_wpr_size, ls_blob_size; - int falcon_id; - int ret; - - INIT_LIST_HEAD(&imgs); - - /* Load all LS blobs */ - for_each_set_bit(falcon_id, &managed_falcons, NVKM_SECBOOT_FALCON_END) { - struct ls_ucode_img *img; - - img = acr->func->ls_ucode_img_load(acr, sb, falcon_id); - if (IS_ERR(img)) { - if (acr->base.optional_falcons & BIT(falcon_id)) { - managed_falcons &= ~BIT(falcon_id); - nvkm_info(subdev, "skipping %s falcon...\n", - nvkm_secboot_falcon_name[falcon_id]); - continue; - } - ret = PTR_ERR(img); - goto cleanup; - } - - list_add_tail(&img->node, &imgs); - managed_count++; - } - - /* Commit the actual list of falcons we will manage from now on */ - acr->base.managed_falcons = managed_falcons; - - /* - * If the boot falcon has a firmare, let it manage the bootstrap of other - * falcons. - */ - if (acr->func->ls_func[acr->base.boot_falcon] && - (managed_falcons & BIT(acr->base.boot_falcon))) { - for_each_set_bit(falcon_id, &managed_falcons, - NVKM_SECBOOT_FALCON_END) { - if (falcon_id == acr->base.boot_falcon) - continue; - - acr->lazy_bootstrap |= BIT(falcon_id); - } - } - - /* - * Fill the WPR and LSF headers with the right offsets and compute - * required WPR size - */ - image_wpr_size = acr->func->ls_fill_headers(acr, &imgs); - image_wpr_size = ALIGN(image_wpr_size, WPR_ALIGNMENT); - - ls_blob_size = image_wpr_size; - - /* - * If we need a shadow area, allocate twice the size and use the - * upper half as WPR - */ - if (wpr_size == 0 && acr->func->shadow_blob) - ls_blob_size *= 2; - - /* Allocate GPU object that will contain the WPR region */ - ret = nvkm_gpuobj_new(subdev->device, ls_blob_size, WPR_ALIGNMENT, - false, NULL, &acr->ls_blob); - if (ret) - goto cleanup; - - nvkm_debug(subdev, "%d managed LS falcons, WPR size is %d bytes\n", - managed_count, image_wpr_size); - - /* If WPR address and size are not fixed, set them to fit the LS blob */ - if (wpr_size == 0) { - wpr_addr = acr->ls_blob->addr; - if (acr->func->shadow_blob) - wpr_addr += acr->ls_blob->size / 2; - - wpr_size = image_wpr_size; - /* - * But if the WPR region is set by the bootloader, it is illegal for - * the HS blob to be larger than this region. - */ - } else if (image_wpr_size > wpr_size) { - nvkm_error(subdev, "WPR region too small for FW blob!\n"); - nvkm_error(subdev, "required: %dB\n", image_wpr_size); - nvkm_error(subdev, "available: %dB\n", wpr_size); - ret = -ENOSPC; - goto cleanup; - } - - /* Write LS blob */ - ret = acr->func->ls_write_wpr(acr, &imgs, acr->ls_blob, wpr_addr); - if (ret) - nvkm_gpuobj_del(&acr->ls_blob); - -cleanup: - list_for_each_entry_safe(img, t, &imgs, node) { - kfree(img->ucode_data); - kfree(img->sig); - kfree(img); - } - - return ret; -} - - - - -void -acr_r352_fixup_hs_desc(struct acr_r352 *acr, struct nvkm_secboot *sb, - void *_desc) -{ - struct hsflcn_acr_desc *desc = _desc; - struct nvkm_gpuobj *ls_blob = acr->ls_blob; - - /* WPR region information if WPR is not fixed */ - if (sb->wpr_size == 0) { - u64 wpr_start = ls_blob->addr; - u64 wpr_end = wpr_start + ls_blob->size; - - desc->wpr_region_id = 1; - desc->regions.no_regions = 2; - desc->regions.region_props[0].start_addr = wpr_start >> 8; - desc->regions.region_props[0].end_addr = wpr_end >> 8; - desc->regions.region_props[0].region_id = 1; - desc->regions.region_props[0].read_mask = 0xf; - desc->regions.region_props[0].write_mask = 0xc; - desc->regions.region_props[0].client_mask = 0x2; - } else { - desc->ucode_blob_base = ls_blob->addr; - desc->ucode_blob_size = ls_blob->size; - } -} - -static void -acr_r352_generate_hs_bl_desc(const struct hsf_load_header *hdr, void *_bl_desc, - u64 offset) -{ - struct acr_r352_flcn_bl_desc *bl_desc = _bl_desc; - u64 addr_code, addr_data; - - addr_code = offset >> 8; - addr_data = (offset + hdr->data_dma_base) >> 8; - - bl_desc->ctx_dma = FALCON_DMAIDX_VIRT; - bl_desc->code_dma_base = lower_32_bits(addr_code); - bl_desc->non_sec_code_off = hdr->non_sec_code_off; - bl_desc->non_sec_code_size = hdr->non_sec_code_size; - bl_desc->sec_code_off = hsf_load_header_app_off(hdr, 0); - bl_desc->sec_code_size = hsf_load_header_app_size(hdr, 0); - bl_desc->code_entry_point = 0; - bl_desc->data_dma_base = lower_32_bits(addr_data); - bl_desc->data_size = hdr->data_size; -} - -/** - * acr_r352_prepare_hs_blob - load and prepare a HS blob and BL descriptor - * - * @sb secure boot instance to prepare for - * @fw name of the HS firmware to load - * @blob pointer to gpuobj that will be allocated to receive the HS FW payload - * @bl_desc pointer to the BL descriptor to write for this firmware - * @patch whether we should patch the HS descriptor (only for HS loaders) - */ -static int -acr_r352_prepare_hs_blob(struct acr_r352 *acr, struct nvkm_secboot *sb, - const char *fw, struct nvkm_gpuobj **blob, - struct hsf_load_header *load_header, bool patch) -{ - struct nvkm_subdev *subdev = &sb->subdev; - void *acr_image; - struct fw_bin_header *hsbin_hdr; - struct hsf_fw_header *fw_hdr; - struct hsf_load_header *load_hdr; - void *acr_data; - int ret; - - acr_image = hs_ucode_load_blob(subdev, sb->boot_falcon, fw); - if (IS_ERR(acr_image)) - return PTR_ERR(acr_image); - - hsbin_hdr = acr_image; - fw_hdr = acr_image + hsbin_hdr->header_offset; - load_hdr = acr_image + fw_hdr->hdr_offset; - acr_data = acr_image + hsbin_hdr->data_offset; - - /* Patch descriptor with WPR information? */ - if (patch) { - struct hsflcn_acr_desc *desc; - - desc = acr_data + load_hdr->data_dma_base; - acr->func->fixup_hs_desc(acr, sb, desc); - } - - if (load_hdr->num_apps > ACR_R352_MAX_APPS) { - nvkm_error(subdev, "more apps (%d) than supported (%d)!", - load_hdr->num_apps, ACR_R352_MAX_APPS); - ret = -EINVAL; - goto cleanup; - } - memcpy(load_header, load_hdr, sizeof(*load_header) + - (sizeof(load_hdr->apps[0]) * 2 * load_hdr->num_apps)); - - /* Create ACR blob and copy HS data to it */ - ret = nvkm_gpuobj_new(subdev->device, ALIGN(hsbin_hdr->data_size, 256), - 0x1000, false, NULL, blob); - if (ret) - goto cleanup; - - nvkm_kmap(*blob); - nvkm_gpuobj_memcpy_to(*blob, 0, acr_data, hsbin_hdr->data_size); - nvkm_done(*blob); - -cleanup: - kfree(acr_image); - - return ret; -} - -/** - * acr_r352_load_blobs - load blobs common to all ACR V1 versions. - * - * This includes the LS blob, HS ucode loading blob, and HS bootloader. - * - * The HS ucode unload blob is only used on dGPU if the WPR region is variable. - */ -int -acr_r352_load_blobs(struct acr_r352 *acr, struct nvkm_secboot *sb) -{ - struct nvkm_subdev *subdev = &sb->subdev; - int ret; - - /* Firmware already loaded? */ - if (acr->firmware_ok) - return 0; - - /* Load and prepare the managed falcon's firmwares */ - ret = acr_r352_prepare_ls_blob(acr, sb); - if (ret) - return ret; - - /* Load the HS firmware that will load the LS firmwares */ - if (!acr->load_blob) { - ret = acr_r352_prepare_hs_blob(acr, sb, "acr/ucode_load", - &acr->load_blob, - &acr->load_bl_header, true); - if (ret) - return ret; - } - - /* If the ACR region is dynamically programmed, we need an unload FW */ - if (sb->wpr_size == 0) { - ret = acr_r352_prepare_hs_blob(acr, sb, "acr/ucode_unload", - &acr->unload_blob, - &acr->unload_bl_header, false); - if (ret) - return ret; - } - - /* Load the HS firmware bootloader */ - if (!acr->hsbl_blob) { - acr->hsbl_blob = nvkm_acr_load_firmware(subdev, "acr/bl", 0); - if (IS_ERR(acr->hsbl_blob)) { - ret = PTR_ERR(acr->hsbl_blob); - acr->hsbl_blob = NULL; - return ret; - } - - if (acr->base.boot_falcon != NVKM_SECBOOT_FALCON_PMU) { - acr->hsbl_unload_blob = nvkm_acr_load_firmware(subdev, - "acr/unload_bl", 0); - if (IS_ERR(acr->hsbl_unload_blob)) { - ret = PTR_ERR(acr->hsbl_unload_blob); - acr->hsbl_unload_blob = NULL; - return ret; - } - } else { - acr->hsbl_unload_blob = acr->hsbl_blob; - } - } - - acr->firmware_ok = true; - nvkm_debug(&sb->subdev, "LS blob successfully created\n"); - - return 0; -} - -/** - * acr_r352_load() - prepare HS falcon to run the specified blob, mapped. - * - * Returns the start address to use, or a negative error value. - */ -static int -acr_r352_load(struct nvkm_acr *_acr, struct nvkm_falcon *falcon, - struct nvkm_gpuobj *blob, u64 offset) -{ - struct acr_r352 *acr = acr_r352(_acr); - const u32 bl_desc_size = acr->func->hs_bl_desc_size; - const struct hsf_load_header *load_hdr; - struct fw_bin_header *bl_hdr; - struct fw_bl_desc *hsbl_desc; - void *bl, *blob_data, *hsbl_code, *hsbl_data; - u32 code_size; - u8 *bl_desc; - - bl_desc = kzalloc(bl_desc_size, GFP_KERNEL); - if (!bl_desc) - return -ENOMEM; - - /* Find the bootloader descriptor for our blob and copy it */ - if (blob == acr->load_blob) { - load_hdr = &acr->load_bl_header; - bl = acr->hsbl_blob; - } else if (blob == acr->unload_blob) { - load_hdr = &acr->unload_bl_header; - bl = acr->hsbl_unload_blob; - } else { - nvkm_error(_acr->subdev, "invalid secure boot blob!\n"); - kfree(bl_desc); - return -EINVAL; - } - - bl_hdr = bl; - hsbl_desc = bl + bl_hdr->header_offset; - blob_data = bl + bl_hdr->data_offset; - hsbl_code = blob_data + hsbl_desc->code_off; - hsbl_data = blob_data + hsbl_desc->data_off; - code_size = ALIGN(hsbl_desc->code_size, 256); - - /* - * Copy HS bootloader data - */ - nvkm_falcon_load_dmem(falcon, hsbl_data, 0x0, hsbl_desc->data_size, 0); - - /* Copy HS bootloader code to end of IMEM */ - nvkm_falcon_load_imem(falcon, hsbl_code, falcon->code.limit - code_size, - code_size, hsbl_desc->start_tag, 0, false); - - /* Generate the BL header */ - acr->func->generate_hs_bl_desc(load_hdr, bl_desc, offset); - - /* - * Copy HS BL header where the HS descriptor expects it to be - */ - nvkm_falcon_load_dmem(falcon, bl_desc, hsbl_desc->dmem_load_off, - bl_desc_size, 0); - - kfree(bl_desc); - return hsbl_desc->start_tag << 8; -} - -static int -acr_r352_shutdown(struct acr_r352 *acr, struct nvkm_secboot *sb) -{ - struct nvkm_subdev *subdev = &sb->subdev; - int i; - - /* Run the unload blob to unprotect the WPR region */ - if (acr->unload_blob && sb->wpr_set) { - int ret; - - nvkm_debug(subdev, "running HS unload blob\n"); - ret = sb->func->run_blob(sb, acr->unload_blob, sb->halt_falcon); - if (ret < 0) - return ret; - /* - * Unload blob will return this error code - it is not an error - * and the expected behavior on RM as well - */ - if (ret && ret != 0x1d) { - nvkm_error(subdev, "HS unload failed, ret 0x%08x\n", ret); - return -EINVAL; - } - nvkm_debug(subdev, "HS unload blob completed\n"); - } - - for (i = 0; i < NVKM_SECBOOT_FALCON_END; i++) - acr->falcon_state[i] = NON_SECURE; - - sb->wpr_set = false; - - return 0; -} - -/** - * Check if the WPR region has been indeed set by the ACR firmware, and - * matches where it should be. - */ -static bool -acr_r352_wpr_is_set(const struct acr_r352 *acr, const struct nvkm_secboot *sb) -{ - const struct nvkm_subdev *subdev = &sb->subdev; - const struct nvkm_device *device = subdev->device; - u64 wpr_lo, wpr_hi; - u64 wpr_range_lo, wpr_range_hi; - - nvkm_wr32(device, 0x100cd4, 0x2); - wpr_lo = (nvkm_rd32(device, 0x100cd4) & ~0xff); - wpr_lo <<= 8; - nvkm_wr32(device, 0x100cd4, 0x3); - wpr_hi = (nvkm_rd32(device, 0x100cd4) & ~0xff); - wpr_hi <<= 8; - - if (sb->wpr_size != 0) { - wpr_range_lo = sb->wpr_addr; - wpr_range_hi = wpr_range_lo + sb->wpr_size; - } else { - wpr_range_lo = acr->ls_blob->addr; - wpr_range_hi = wpr_range_lo + acr->ls_blob->size; - } - - return (wpr_lo >= wpr_range_lo && wpr_lo < wpr_range_hi && - wpr_hi > wpr_range_lo && wpr_hi <= wpr_range_hi); -} - -static int -acr_r352_bootstrap(struct acr_r352 *acr, struct nvkm_secboot *sb) -{ - const struct nvkm_subdev *subdev = &sb->subdev; - unsigned long managed_falcons = acr->base.managed_falcons; - int falcon_id; - int ret; - - if (sb->wpr_set) - return 0; - - /* Make sure all blobs are ready */ - ret = acr_r352_load_blobs(acr, sb); - if (ret) - return ret; - - nvkm_debug(subdev, "running HS load blob\n"); - ret = sb->func->run_blob(sb, acr->load_blob, sb->boot_falcon); - /* clear halt interrupt */ - nvkm_falcon_clear_interrupt(sb->boot_falcon, 0x10); - sb->wpr_set = acr_r352_wpr_is_set(acr, sb); - if (ret < 0) { - return ret; - } else if (ret > 0) { - nvkm_error(subdev, "HS load failed, ret 0x%08x\n", ret); - return -EINVAL; - } - nvkm_debug(subdev, "HS load blob completed\n"); - /* WPR must be set at this point */ - if (!sb->wpr_set) { - nvkm_error(subdev, "ACR blob completed but WPR not set!\n"); - return -EINVAL; - } - - /* Run LS firmwares post_run hooks */ - for_each_set_bit(falcon_id, &managed_falcons, NVKM_SECBOOT_FALCON_END) { - const struct acr_r352_ls_func *func = - acr->func->ls_func[falcon_id]; - - if (func->post_run) { - ret = func->post_run(&acr->base, sb); - if (ret) - return ret; - } - } - - return 0; -} - -/** - * acr_r352_reset_nopmu - dummy reset method when no PMU firmware is loaded - * - * Reset is done by re-executing secure boot from scratch, with lazy bootstrap - * disabled. This has the effect of making all managed falcons ready-to-run. - */ -static int -acr_r352_reset_nopmu(struct acr_r352 *acr, struct nvkm_secboot *sb, - unsigned long falcon_mask) -{ - int falcon; - int ret; - - /* - * Perform secure boot each time we are called on FECS. Since only FECS - * and GPCCS are managed and started together, this ought to be safe. - */ - if (!(falcon_mask & BIT(NVKM_SECBOOT_FALCON_FECS))) - goto end; - - ret = acr_r352_shutdown(acr, sb); - if (ret) - return ret; - - ret = acr_r352_bootstrap(acr, sb); - if (ret) - return ret; - -end: - for_each_set_bit(falcon, &falcon_mask, NVKM_SECBOOT_FALCON_END) { - acr->falcon_state[falcon] = RESET; - } - return 0; -} - -/* - * acr_r352_reset() - execute secure boot from the prepared state - * - * Load the HS bootloader and ask the falcon to run it. This will in turn - * load the HS firmware and run it, so once the falcon stops all the managed - * falcons should have their LS firmware loaded and be ready to run. - */ -static int -acr_r352_reset(struct nvkm_acr *_acr, struct nvkm_secboot *sb, - unsigned long falcon_mask) -{ - struct acr_r352 *acr = acr_r352(_acr); - struct nvkm_msgqueue *queue; - int falcon; - bool wpr_already_set = sb->wpr_set; - int ret; - - /* Make sure secure boot is performed */ - ret = acr_r352_bootstrap(acr, sb); - if (ret) - return ret; - - /* No PMU interface? */ - if (!nvkm_secboot_is_managed(sb, _acr->boot_falcon)) { - /* Redo secure boot entirely if it was already done */ - if (wpr_already_set) - return acr_r352_reset_nopmu(acr, sb, falcon_mask); - /* Else return the result of the initial invokation */ - else - return ret; - } - - switch (_acr->boot_falcon) { - case NVKM_SECBOOT_FALCON_PMU: - queue = sb->subdev.device->pmu->queue; - break; - case NVKM_SECBOOT_FALCON_SEC2: - queue = sb->subdev.device->sec2->queue; - break; - default: - return -EINVAL; - } - - /* Otherwise just ask the LS firmware to reset the falcon */ - for_each_set_bit(falcon, &falcon_mask, NVKM_SECBOOT_FALCON_END) - nvkm_debug(&sb->subdev, "resetting %s falcon\n", - nvkm_secboot_falcon_name[falcon]); - ret = nvkm_msgqueue_acr_boot_falcons(queue, falcon_mask); - if (ret) { - nvkm_error(&sb->subdev, "error during falcon reset: %d\n", ret); - return ret; - } - nvkm_debug(&sb->subdev, "falcon reset done\n"); - - return 0; -} - -static int -acr_r352_fini(struct nvkm_acr *_acr, struct nvkm_secboot *sb, bool suspend) -{ - struct acr_r352 *acr = acr_r352(_acr); - - return acr_r352_shutdown(acr, sb); -} - -static void -acr_r352_dtor(struct nvkm_acr *_acr) -{ - struct acr_r352 *acr = acr_r352(_acr); - - nvkm_gpuobj_del(&acr->unload_blob); - - if (_acr->boot_falcon != NVKM_SECBOOT_FALCON_PMU) - kfree(acr->hsbl_unload_blob); - kfree(acr->hsbl_blob); - nvkm_gpuobj_del(&acr->load_blob); - nvkm_gpuobj_del(&acr->ls_blob); - - kfree(acr); -} - -static const struct acr_r352_lsf_func -acr_r352_ls_fecs_func_0 = { - .generate_bl_desc = acr_r352_generate_flcn_bl_desc, - .bl_desc_size = sizeof(struct acr_r352_flcn_bl_desc), -}; - -const struct acr_r352_ls_func -acr_r352_ls_fecs_func = { - .load = acr_ls_ucode_load_fecs, - .version_max = 0, - .version = { - &acr_r352_ls_fecs_func_0, - } -}; - -static const struct acr_r352_lsf_func -acr_r352_ls_gpccs_func_0 = { - .generate_bl_desc = acr_r352_generate_flcn_bl_desc, - .bl_desc_size = sizeof(struct acr_r352_flcn_bl_desc), - /* GPCCS will be loaded using PRI */ - .lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD, -}; - -static const struct acr_r352_ls_func -acr_r352_ls_gpccs_func = { - .load = acr_ls_ucode_load_gpccs, - .version_max = 0, - .version = { - &acr_r352_ls_gpccs_func_0, - } -}; - - - -/** - * struct acr_r352_pmu_bl_desc - PMU DMEM bootloader descriptor - * @dma_idx: DMA context to be used by BL while loading code/data - * @code_dma_base: 256B-aligned Physical FB Address where code is located - * @total_code_size: total size of the code part in the ucode - * @code_size_to_load: size of the code part to load in PMU IMEM. - * @code_entry_point: entry point in the code. - * @data_dma_base: Physical FB address where data part of ucode is located - * @data_size: Total size of the data portion. - * @overlay_dma_base: Physical Fb address for resident code present in ucode - * @argc: Total number of args - * @argv: offset where args are copied into PMU's DMEM. - * - * Structure used by the PMU bootloader to load the rest of the code - */ -struct acr_r352_pmu_bl_desc { - u32 dma_idx; - u32 code_dma_base; - u32 code_size_total; - u32 code_size_to_load; - u32 code_entry_point; - u32 data_dma_base; - u32 data_size; - u32 overlay_dma_base; - u32 argc; - u32 argv; - u16 code_dma_base1; - u16 data_dma_base1; - u16 overlay_dma_base1; -}; - -/** - * acr_r352_generate_pmu_bl_desc() - populate a DMEM BL descriptor for PMU LS image - * - */ -static void -acr_r352_generate_pmu_bl_desc(const struct nvkm_acr *acr, - const struct ls_ucode_img *img, u64 wpr_addr, - void *_desc) -{ - const struct ls_ucode_img_desc *pdesc = &img->ucode_desc; - const struct nvkm_pmu *pmu = acr->subdev->device->pmu; - struct acr_r352_pmu_bl_desc *desc = _desc; - u64 base; - u64 addr_code; - u64 addr_data; - u32 addr_args; - - base = wpr_addr + img->ucode_off + pdesc->app_start_offset; - addr_code = (base + pdesc->app_resident_code_offset) >> 8; - addr_data = (base + pdesc->app_resident_data_offset) >> 8; - addr_args = pmu->falcon->data.limit; - addr_args -= NVKM_MSGQUEUE_CMDLINE_SIZE; - - desc->dma_idx = FALCON_DMAIDX_UCODE; - desc->code_dma_base = lower_32_bits(addr_code); - desc->code_dma_base1 = upper_32_bits(addr_code); - desc->code_size_total = pdesc->app_size; - desc->code_size_to_load = pdesc->app_resident_code_size; - desc->code_entry_point = pdesc->app_imem_entry; - desc->data_dma_base = lower_32_bits(addr_data); - desc->data_dma_base1 = upper_32_bits(addr_data); - desc->data_size = pdesc->app_resident_data_size; - desc->overlay_dma_base = lower_32_bits(addr_code); - desc->overlay_dma_base1 = upper_32_bits(addr_code); - desc->argc = 1; - desc->argv = addr_args; -} - -static const struct acr_r352_lsf_func -acr_r352_ls_pmu_func_0 = { - .generate_bl_desc = acr_r352_generate_pmu_bl_desc, - .bl_desc_size = sizeof(struct acr_r352_pmu_bl_desc), -}; - -static const struct acr_r352_ls_func -acr_r352_ls_pmu_func = { - .load = acr_ls_ucode_load_pmu, - .post_run = acr_ls_pmu_post_run, - .version_max = 0, - .version = { - &acr_r352_ls_pmu_func_0, - } -}; - -const struct acr_r352_func -acr_r352_func = { - .fixup_hs_desc = acr_r352_fixup_hs_desc, - .generate_hs_bl_desc = acr_r352_generate_hs_bl_desc, - .hs_bl_desc_size = sizeof(struct acr_r352_flcn_bl_desc), - .ls_ucode_img_load = acr_r352_ls_ucode_img_load, - .ls_fill_headers = acr_r352_ls_fill_headers, - .ls_write_wpr = acr_r352_ls_write_wpr, - .ls_func = { - [NVKM_SECBOOT_FALCON_FECS] = &acr_r352_ls_fecs_func, - [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r352_ls_gpccs_func, - [NVKM_SECBOOT_FALCON_PMU] = &acr_r352_ls_pmu_func, - }, -}; - -static const struct nvkm_acr_func -acr_r352_base_func = { - .dtor = acr_r352_dtor, - .fini = acr_r352_fini, - .load = acr_r352_load, - .reset = acr_r352_reset, -}; - -struct nvkm_acr * -acr_r352_new_(const struct acr_r352_func *func, - enum nvkm_secboot_falcon boot_falcon, - unsigned long managed_falcons) -{ - struct acr_r352 *acr; - int i; - - /* Check that all requested falcons are supported */ - for_each_set_bit(i, &managed_falcons, NVKM_SECBOOT_FALCON_END) { - if (!func->ls_func[i]) - return ERR_PTR(-ENOTSUPP); - } - - acr = kzalloc(sizeof(*acr), GFP_KERNEL); - if (!acr) - return ERR_PTR(-ENOMEM); - - acr->base.boot_falcon = boot_falcon; - acr->base.managed_falcons = managed_falcons; - acr->base.func = &acr_r352_base_func; - acr->func = func; - - return &acr->base; -} - -struct nvkm_acr * -acr_r352_new(unsigned long managed_falcons) -{ - return acr_r352_new_(&acr_r352_func, NVKM_SECBOOT_FALCON_PMU, - managed_falcons); -} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h deleted file mode 100644 index e516cab849dd..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h +++ /dev/null @@ -1,167 +0,0 @@ -/* - * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ -#ifndef __NVKM_SECBOOT_ACR_R352_H__ -#define __NVKM_SECBOOT_ACR_R352_H__ - -#include "acr.h" -#include "ls_ucode.h" -#include "hs_ucode.h" - -struct ls_ucode_img; - -#define ACR_R352_MAX_APPS 8 - -#define LSF_FLAG_LOAD_CODE_AT_0 1 -#define LSF_FLAG_DMACTL_REQ_CTX 4 -#define LSF_FLAG_FORCE_PRIV_LOAD 8 - -static inline u32 -hsf_load_header_app_off(const struct hsf_load_header *hdr, u32 app) -{ - return hdr->apps[app]; -} - -static inline u32 -hsf_load_header_app_size(const struct hsf_load_header *hdr, u32 app) -{ - return hdr->apps[hdr->num_apps + app]; -} - -/** - * struct acr_r352_lsf_func - manages a specific LS firmware version - * - * @generate_bl_desc: function called on a block of bl_desc_size to generate the - * proper bootloader descriptor for this LS firmware - * @bl_desc_size: size of the bootloader descriptor - * @lhdr_flags: LS flags - */ -struct acr_r352_lsf_func { - void (*generate_bl_desc)(const struct nvkm_acr *, - const struct ls_ucode_img *, u64, void *); - u32 bl_desc_size; - u32 lhdr_flags; -}; - -/** - * struct acr_r352_ls_func - manages a single LS falcon - * - * @load: load the external firmware into a ls_ucode_img - * @post_run: hook called right after the ACR is executed - */ -struct acr_r352_ls_func { - int (*load)(const struct nvkm_secboot *, int maxver, - struct ls_ucode_img *); - int (*post_run)(const struct nvkm_acr *, const struct nvkm_secboot *); - int version_max; - const struct acr_r352_lsf_func *version[]; -}; - -struct acr_r352; - -/** - * struct acr_r352_func - manages nuances between ACR versions - * - * @generate_hs_bl_desc: function called on a block of bl_desc_size to generate - * the proper HS bootloader descriptor - * @hs_bl_desc_size: size of the HS bootloader descriptor - */ -struct acr_r352_func { - void (*generate_hs_bl_desc)(const struct hsf_load_header *, void *, - u64); - void (*fixup_hs_desc)(struct acr_r352 *, struct nvkm_secboot *, void *); - u32 hs_bl_desc_size; - bool shadow_blob; - - struct ls_ucode_img *(*ls_ucode_img_load)(const struct acr_r352 *, - const struct nvkm_secboot *, - enum nvkm_secboot_falcon); - int (*ls_fill_headers)(struct acr_r352 *, struct list_head *); - int (*ls_write_wpr)(struct acr_r352 *, struct list_head *, - struct nvkm_gpuobj *, u64); - - const struct acr_r352_ls_func *ls_func[NVKM_SECBOOT_FALCON_END]; -}; - -/** - * struct acr_r352 - ACR data for driver release 352 (and beyond) - */ -struct acr_r352 { - struct nvkm_acr base; - const struct acr_r352_func *func; - - /* - * HS FW - lock WPR region (dGPU only) and load LS FWs - * on Tegra the HS FW copies the LS blob into the fixed WPR instead - */ - struct nvkm_gpuobj *load_blob; - struct { - struct hsf_load_header load_bl_header; - u32 __load_apps[ACR_R352_MAX_APPS * 2]; - }; - - /* HS FW - unlock WPR region (dGPU only) */ - struct nvkm_gpuobj *unload_blob; - struct { - struct hsf_load_header unload_bl_header; - u32 __unload_apps[ACR_R352_MAX_APPS * 2]; - }; - - /* HS bootloader */ - void *hsbl_blob; - - /* HS bootloader for unload blob, if using a different falcon */ - void *hsbl_unload_blob; - - /* LS FWs, to be loaded by the HS ACR */ - struct nvkm_gpuobj *ls_blob; - - /* Firmware already loaded? */ - bool firmware_ok; - - /* Falcons to lazy-bootstrap */ - u32 lazy_bootstrap; - - /* To keep track of the state of all managed falcons */ - enum { - /* In non-secure state, no firmware loaded, no privileges*/ - NON_SECURE = 0, - /* In low-secure mode and ready to be started */ - RESET, - /* In low-secure mode and running */ - RUNNING, - } falcon_state[NVKM_SECBOOT_FALCON_END]; -}; -#define acr_r352(acr) container_of(acr, struct acr_r352, base) - -struct nvkm_acr *acr_r352_new_(const struct acr_r352_func *, - enum nvkm_secboot_falcon, unsigned long); - -struct ls_ucode_img *acr_r352_ls_ucode_img_load(const struct acr_r352 *, - const struct nvkm_secboot *, - enum nvkm_secboot_falcon); -int acr_r352_ls_fill_headers(struct acr_r352 *, struct list_head *); -int acr_r352_ls_write_wpr(struct acr_r352 *, struct list_head *, - struct nvkm_gpuobj *, u64); - -void acr_r352_fixup_hs_desc(struct acr_r352 *, struct nvkm_secboot *, void *); - -#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.c deleted file mode 100644 index f6b2d20d7fc3..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.c +++ /dev/null @@ -1,229 +0,0 @@ -/* - * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "acr_r361.h" - -#include <engine/falcon.h> -#include <core/msgqueue.h> -#include <subdev/pmu.h> -#include <engine/sec2.h> - -static void -acr_r361_generate_flcn_bl_desc(const struct nvkm_acr *acr, - const struct ls_ucode_img *img, u64 wpr_addr, - void *_desc) -{ - struct acr_r361_flcn_bl_desc *desc = _desc; - const struct ls_ucode_img_desc *pdesc = &img->ucode_desc; - u64 base, addr_code, addr_data; - - base = wpr_addr + img->ucode_off + pdesc->app_start_offset; - addr_code = base + pdesc->app_resident_code_offset; - addr_data = base + pdesc->app_resident_data_offset; - - desc->ctx_dma = FALCON_DMAIDX_UCODE; - desc->code_dma_base = u64_to_flcn64(addr_code); - desc->non_sec_code_off = pdesc->app_resident_code_offset; - desc->non_sec_code_size = pdesc->app_resident_code_size; - desc->code_entry_point = pdesc->app_imem_entry; - desc->data_dma_base = u64_to_flcn64(addr_data); - desc->data_size = pdesc->app_resident_data_size; -} - -void -acr_r361_generate_hs_bl_desc(const struct hsf_load_header *hdr, void *_bl_desc, - u64 offset) -{ - struct acr_r361_flcn_bl_desc *bl_desc = _bl_desc; - - bl_desc->ctx_dma = FALCON_DMAIDX_VIRT; - bl_desc->code_dma_base = u64_to_flcn64(offset); - bl_desc->non_sec_code_off = hdr->non_sec_code_off; - bl_desc->non_sec_code_size = hdr->non_sec_code_size; - bl_desc->sec_code_off = hsf_load_header_app_off(hdr, 0); - bl_desc->sec_code_size = hsf_load_header_app_size(hdr, 0); - bl_desc->code_entry_point = 0; - bl_desc->data_dma_base = u64_to_flcn64(offset + hdr->data_dma_base); - bl_desc->data_size = hdr->data_size; -} - -static const struct acr_r352_lsf_func -acr_r361_ls_fecs_func_0 = { - .generate_bl_desc = acr_r361_generate_flcn_bl_desc, - .bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc), -}; - -const struct acr_r352_ls_func -acr_r361_ls_fecs_func = { - .load = acr_ls_ucode_load_fecs, - .version_max = 0, - .version = { - &acr_r361_ls_fecs_func_0, - } -}; - -static const struct acr_r352_lsf_func -acr_r361_ls_gpccs_func_0 = { - .generate_bl_desc = acr_r361_generate_flcn_bl_desc, - .bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc), - /* GPCCS will be loaded using PRI */ - .lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD, -}; - -const struct acr_r352_ls_func -acr_r361_ls_gpccs_func = { - .load = acr_ls_ucode_load_gpccs, - .version_max = 0, - .version = { - &acr_r361_ls_gpccs_func_0, - } -}; - -struct acr_r361_pmu_bl_desc { - u32 reserved; - u32 dma_idx; - struct flcn_u64 code_dma_base; - u32 total_code_size; - u32 code_size_to_load; - u32 code_entry_point; - struct flcn_u64 data_dma_base; - u32 data_size; - struct flcn_u64 overlay_dma_base; - u32 argc; - u32 argv; -}; - -static void -acr_r361_generate_pmu_bl_desc(const struct nvkm_acr *acr, - const struct ls_ucode_img *img, u64 wpr_addr, - void *_desc) -{ - const struct ls_ucode_img_desc *pdesc = &img->ucode_desc; - const struct nvkm_pmu *pmu = acr->subdev->device->pmu; - struct acr_r361_pmu_bl_desc *desc = _desc; - u64 base, addr_code, addr_data; - u32 addr_args; - - base = wpr_addr + img->ucode_off + pdesc->app_start_offset; - addr_code = base + pdesc->app_resident_code_offset; - addr_data = base + pdesc->app_resident_data_offset; - addr_args = pmu->falcon->data.limit; - addr_args -= NVKM_MSGQUEUE_CMDLINE_SIZE; - - desc->dma_idx = FALCON_DMAIDX_UCODE; - desc->code_dma_base = u64_to_flcn64(addr_code); - desc->total_code_size = pdesc->app_size; - desc->code_size_to_load = pdesc->app_resident_code_size; - desc->code_entry_point = pdesc->app_imem_entry; - desc->data_dma_base = u64_to_flcn64(addr_data); - desc->data_size = pdesc->app_resident_data_size; - desc->overlay_dma_base = u64_to_flcn64(addr_code); - desc->argc = 1; - desc->argv = addr_args; -} - -static const struct acr_r352_lsf_func -acr_r361_ls_pmu_func_0 = { - .generate_bl_desc = acr_r361_generate_pmu_bl_desc, - .bl_desc_size = sizeof(struct acr_r361_pmu_bl_desc), -}; - -const struct acr_r352_ls_func -acr_r361_ls_pmu_func = { - .load = acr_ls_ucode_load_pmu, - .post_run = acr_ls_pmu_post_run, - .version_max = 0, - .version = { - &acr_r361_ls_pmu_func_0, - } -}; - -static void -acr_r361_generate_sec2_bl_desc(const struct nvkm_acr *acr, - const struct ls_ucode_img *img, u64 wpr_addr, - void *_desc) -{ - const struct ls_ucode_img_desc *pdesc = &img->ucode_desc; - const struct nvkm_sec2 *sec = acr->subdev->device->sec2; - struct acr_r361_pmu_bl_desc *desc = _desc; - u64 base, addr_code, addr_data; - u32 addr_args; - - base = wpr_addr + img->ucode_off + pdesc->app_start_offset; - /* For some reason we should not add app_resident_code_offset here */ - addr_code = base; - addr_data = base + pdesc->app_resident_data_offset; - addr_args = sec->falcon->data.limit; - addr_args -= NVKM_MSGQUEUE_CMDLINE_SIZE; - - desc->dma_idx = FALCON_SEC2_DMAIDX_UCODE; - desc->code_dma_base = u64_to_flcn64(addr_code); - desc->total_code_size = pdesc->app_size; - desc->code_size_to_load = pdesc->app_resident_code_size; - desc->code_entry_point = pdesc->app_imem_entry; - desc->data_dma_base = u64_to_flcn64(addr_data); - desc->data_size = pdesc->app_resident_data_size; - desc->overlay_dma_base = u64_to_flcn64(addr_code); - desc->argc = 1; - /* args are stored at the beginning of EMEM */ - desc->argv = 0x01000000; -} - -const struct acr_r352_lsf_func -acr_r361_ls_sec2_func_0 = { - .generate_bl_desc = acr_r361_generate_sec2_bl_desc, - .bl_desc_size = sizeof(struct acr_r361_pmu_bl_desc), -}; - -static const struct acr_r352_ls_func -acr_r361_ls_sec2_func = { - .load = acr_ls_ucode_load_sec2, - .post_run = acr_ls_sec2_post_run, - .version_max = 0, - .version = { - &acr_r361_ls_sec2_func_0, - } -}; - - -const struct acr_r352_func -acr_r361_func = { - .fixup_hs_desc = acr_r352_fixup_hs_desc, - .generate_hs_bl_desc = acr_r361_generate_hs_bl_desc, - .hs_bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc), - .ls_ucode_img_load = acr_r352_ls_ucode_img_load, - .ls_fill_headers = acr_r352_ls_fill_headers, - .ls_write_wpr = acr_r352_ls_write_wpr, - .ls_func = { - [NVKM_SECBOOT_FALCON_FECS] = &acr_r361_ls_fecs_func, - [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r361_ls_gpccs_func, - [NVKM_SECBOOT_FALCON_PMU] = &acr_r361_ls_pmu_func, - [NVKM_SECBOOT_FALCON_SEC2] = &acr_r361_ls_sec2_func, - }, -}; - -struct nvkm_acr * -acr_r361_new(unsigned long managed_falcons) -{ - return acr_r352_new_(&acr_r361_func, NVKM_SECBOOT_FALCON_PMU, - managed_falcons); -} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.h deleted file mode 100644 index 38dec93779c8..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.h +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef __NVKM_SECBOOT_ACR_R361_H__ -#define __NVKM_SECBOOT_ACR_R361_H__ - -#include "acr_r352.h" - -/** - * struct acr_r361_flcn_bl_desc - DMEM bootloader descriptor - * @signature: 16B signature for secure code. 0s if no secure code - * @ctx_dma: DMA context to be used by BL while loading code/data - * @code_dma_base: 256B-aligned Physical FB Address where code is located - * (falcon's $xcbase register) - * @non_sec_code_off: offset from code_dma_base where the non-secure code is - * located. The offset must be multiple of 256 to help perf - * @non_sec_code_size: the size of the nonSecure code part. - * @sec_code_off: offset from code_dma_base where the secure code is - * located. The offset must be multiple of 256 to help perf - * @sec_code_size: offset from code_dma_base where the secure code is - * located. The offset must be multiple of 256 to help perf - * @code_entry_point: code entry point which will be invoked by BL after - * code is loaded. - * @data_dma_base: 256B aligned Physical FB Address where data is located. - * (falcon's $xdbase register) - * @data_size: size of data block. Should be multiple of 256B - * - * Structure used by the bootloader to load the rest of the code. This has - * to be filled by host and copied into DMEM at offset provided in the - * hsflcn_bl_desc.bl_desc_dmem_load_off. - */ -struct acr_r361_flcn_bl_desc { - u32 reserved[4]; - u32 signature[4]; - u32 ctx_dma; - struct flcn_u64 code_dma_base; - u32 non_sec_code_off; - u32 non_sec_code_size; - u32 sec_code_off; - u32 sec_code_size; - u32 code_entry_point; - struct flcn_u64 data_dma_base; - u32 data_size; -}; - -void acr_r361_generate_hs_bl_desc(const struct hsf_load_header *, void *, u64); - -extern const struct acr_r352_ls_func acr_r361_ls_fecs_func; -extern const struct acr_r352_ls_func acr_r361_ls_gpccs_func; -extern const struct acr_r352_ls_func acr_r361_ls_pmu_func; -extern const struct acr_r352_lsf_func acr_r361_ls_sec2_func_0; -#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r364.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r364.c deleted file mode 100644 index 30cf04109991..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r364.c +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "acr_r361.h" - -#include <core/gpuobj.h> - -/* - * r364 ACR: hsflcn_desc structure has changed to introduce the shadow_mem - * parameter. - */ - -struct acr_r364_hsflcn_desc { - union { - u8 reserved_dmem[0x200]; - u32 signatures[4]; - } ucode_reserved_space; - u32 wpr_region_id; - u32 wpr_offset; - u32 mmu_memory_range; - struct { - u32 no_regions; - struct { - u32 start_addr; - u32 end_addr; - u32 region_id; - u32 read_mask; - u32 write_mask; - u32 client_mask; - u32 shadow_mem_start_addr; - } region_props[2]; - } regions; - u32 ucode_blob_size; - u64 ucode_blob_base __aligned(8); - struct { - u32 vpr_enabled; - u32 vpr_start; - u32 vpr_end; - u32 hdcp_policies; - } vpr_desc; -}; - -static void -acr_r364_fixup_hs_desc(struct acr_r352 *acr, struct nvkm_secboot *sb, - void *_desc) -{ - struct acr_r364_hsflcn_desc *desc = _desc; - struct nvkm_gpuobj *ls_blob = acr->ls_blob; - - /* WPR region information if WPR is not fixed */ - if (sb->wpr_size == 0) { - u64 wpr_start = ls_blob->addr; - u64 wpr_end = ls_blob->addr + ls_blob->size; - - if (acr->func->shadow_blob) - wpr_start += ls_blob->size / 2; - - desc->wpr_region_id = 1; - desc->regions.no_regions = 2; - desc->regions.region_props[0].start_addr = wpr_start >> 8; - desc->regions.region_props[0].end_addr = wpr_end >> 8; - desc->regions.region_props[0].region_id = 1; - desc->regions.region_props[0].read_mask = 0xf; - desc->regions.region_props[0].write_mask = 0xc; - desc->regions.region_props[0].client_mask = 0x2; - if (acr->func->shadow_blob) - desc->regions.region_props[0].shadow_mem_start_addr = - ls_blob->addr >> 8; - else - desc->regions.region_props[0].shadow_mem_start_addr = 0; - } else { - desc->ucode_blob_base = ls_blob->addr; - desc->ucode_blob_size = ls_blob->size; - } -} - -const struct acr_r352_func -acr_r364_func = { - .fixup_hs_desc = acr_r364_fixup_hs_desc, - .generate_hs_bl_desc = acr_r361_generate_hs_bl_desc, - .hs_bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc), - .ls_ucode_img_load = acr_r352_ls_ucode_img_load, - .ls_fill_headers = acr_r352_ls_fill_headers, - .ls_write_wpr = acr_r352_ls_write_wpr, - .ls_func = { - [NVKM_SECBOOT_FALCON_FECS] = &acr_r361_ls_fecs_func, - [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r361_ls_gpccs_func, - [NVKM_SECBOOT_FALCON_PMU] = &acr_r361_ls_pmu_func, - }, -}; - - -struct nvkm_acr * -acr_r364_new(unsigned long managed_falcons) -{ - return acr_r352_new_(&acr_r364_func, NVKM_SECBOOT_FALCON_PMU, - managed_falcons); -} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c deleted file mode 100644 index 472ced29da7e..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c +++ /dev/null @@ -1,418 +0,0 @@ -/* - * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "acr_r367.h" -#include "acr_r361.h" -#include "acr_r370.h" - -#include <core/gpuobj.h> - -/* - * r367 ACR: new LS signature format requires a rewrite of LS firmware and - * blob creation functions. Also the hsflcn_desc layout has changed slightly. - */ - -#define LSF_LSB_DEPMAP_SIZE 11 - -/** - * struct acr_r367_lsf_lsb_header - LS firmware header - * - * See also struct acr_r352_lsf_lsb_header for documentation. - */ -struct acr_r367_lsf_lsb_header { - /** - * LS falcon signatures - * @prd_keys: signature to use in production mode - * @dgb_keys: signature to use in debug mode - * @b_prd_present: whether the production key is present - * @b_dgb_present: whether the debug key is present - * @falcon_id: ID of the falcon the ucode applies to - */ - struct { - u8 prd_keys[2][16]; - u8 dbg_keys[2][16]; - u32 b_prd_present; - u32 b_dbg_present; - u32 falcon_id; - u32 supports_versioning; - u32 version; - u32 depmap_count; - u8 depmap[LSF_LSB_DEPMAP_SIZE * 2 * 4]; - u8 kdf[16]; - } signature; - u32 ucode_off; - u32 ucode_size; - u32 data_size; - u32 bl_code_size; - u32 bl_imem_off; - u32 bl_data_off; - u32 bl_data_size; - u32 app_code_off; - u32 app_code_size; - u32 app_data_off; - u32 app_data_size; - u32 flags; -}; - -/** - * struct acr_r367_lsf_wpr_header - LS blob WPR Header - * - * See also struct acr_r352_lsf_wpr_header for documentation. - */ -struct acr_r367_lsf_wpr_header { - u32 falcon_id; - u32 lsb_offset; - u32 bootstrap_owner; - u32 lazy_bootstrap; - u32 bin_version; - u32 status; -#define LSF_IMAGE_STATUS_NONE 0 -#define LSF_IMAGE_STATUS_COPY 1 -#define LSF_IMAGE_STATUS_VALIDATION_CODE_FAILED 2 -#define LSF_IMAGE_STATUS_VALIDATION_DATA_FAILED 3 -#define LSF_IMAGE_STATUS_VALIDATION_DONE 4 -#define LSF_IMAGE_STATUS_VALIDATION_SKIPPED 5 -#define LSF_IMAGE_STATUS_BOOTSTRAP_READY 6 -#define LSF_IMAGE_STATUS_REVOCATION_CHECK_FAILED 7 -}; - -/** - * struct ls_ucode_img_r367 - ucode image augmented with r367 headers - */ -struct ls_ucode_img_r367 { - struct ls_ucode_img base; - - const struct acr_r352_lsf_func *func; - - struct acr_r367_lsf_wpr_header wpr_header; - struct acr_r367_lsf_lsb_header lsb_header; -}; -#define ls_ucode_img_r367(i) container_of(i, struct ls_ucode_img_r367, base) - -struct ls_ucode_img * -acr_r367_ls_ucode_img_load(const struct acr_r352 *acr, - const struct nvkm_secboot *sb, - enum nvkm_secboot_falcon falcon_id) -{ - const struct nvkm_subdev *subdev = acr->base.subdev; - const struct acr_r352_ls_func *func = acr->func->ls_func[falcon_id]; - struct ls_ucode_img_r367 *img; - int ret; - - img = kzalloc(sizeof(*img), GFP_KERNEL); - if (!img) - return ERR_PTR(-ENOMEM); - - img->base.falcon_id = falcon_id; - - ret = func->load(sb, func->version_max, &img->base); - if (ret < 0) { - kfree(img->base.ucode_data); - kfree(img->base.sig); - kfree(img); - return ERR_PTR(ret); - } - - img->func = func->version[ret]; - - /* Check that the signature size matches our expectations... */ - if (img->base.sig_size != sizeof(img->lsb_header.signature)) { - nvkm_error(subdev, "invalid signature size for %s falcon!\n", - nvkm_secboot_falcon_name[falcon_id]); - return ERR_PTR(-EINVAL); - } - - /* Copy signature to the right place */ - memcpy(&img->lsb_header.signature, img->base.sig, img->base.sig_size); - - /* not needed? the signature should already have the right value */ - img->lsb_header.signature.falcon_id = falcon_id; - - return &img->base; -} - -#define LSF_LSB_HEADER_ALIGN 256 -#define LSF_BL_DATA_ALIGN 256 -#define LSF_BL_DATA_SIZE_ALIGN 256 -#define LSF_BL_CODE_SIZE_ALIGN 256 -#define LSF_UCODE_DATA_ALIGN 4096 - -static u32 -acr_r367_ls_img_fill_headers(struct acr_r352 *acr, - struct ls_ucode_img_r367 *img, u32 offset) -{ - struct ls_ucode_img *_img = &img->base; - struct acr_r367_lsf_wpr_header *whdr = &img->wpr_header; - struct acr_r367_lsf_lsb_header *lhdr = &img->lsb_header; - struct ls_ucode_img_desc *desc = &_img->ucode_desc; - const struct acr_r352_lsf_func *func = img->func; - - /* Fill WPR header */ - whdr->falcon_id = _img->falcon_id; - whdr->bootstrap_owner = acr->base.boot_falcon; - whdr->bin_version = lhdr->signature.version; - whdr->status = LSF_IMAGE_STATUS_COPY; - - /* Skip bootstrapping falcons started by someone else than ACR */ - if (acr->lazy_bootstrap & BIT(_img->falcon_id)) - whdr->lazy_bootstrap = 1; - - /* Align, save off, and include an LSB header size */ - offset = ALIGN(offset, LSF_LSB_HEADER_ALIGN); - whdr->lsb_offset = offset; - offset += sizeof(*lhdr); - - /* - * Align, save off, and include the original (static) ucode - * image size - */ - offset = ALIGN(offset, LSF_UCODE_DATA_ALIGN); - _img->ucode_off = lhdr->ucode_off = offset; - offset += _img->ucode_size; - - /* - * For falcons that use a boot loader (BL), we append a loader - * desc structure on the end of the ucode image and consider - * this the boot loader data. The host will then copy the loader - * desc args to this space within the WPR region (before locking - * down) and the HS bin will then copy them to DMEM 0 for the - * loader. - */ - lhdr->bl_code_size = ALIGN(desc->bootloader_size, - LSF_BL_CODE_SIZE_ALIGN); - lhdr->ucode_size = ALIGN(desc->app_resident_data_offset, - LSF_BL_CODE_SIZE_ALIGN) + lhdr->bl_code_size; - lhdr->data_size = ALIGN(desc->app_size, LSF_BL_CODE_SIZE_ALIGN) + - lhdr->bl_code_size - lhdr->ucode_size; - /* - * Though the BL is located at 0th offset of the image, the VA - * is different to make sure that it doesn't collide the actual - * OS VA range - */ - lhdr->bl_imem_off = desc->bootloader_imem_offset; - lhdr->app_code_off = desc->app_start_offset + - desc->app_resident_code_offset; - lhdr->app_code_size = desc->app_resident_code_size; - lhdr->app_data_off = desc->app_start_offset + - desc->app_resident_data_offset; - lhdr->app_data_size = desc->app_resident_data_size; - - lhdr->flags = func->lhdr_flags; - if (_img->falcon_id == acr->base.boot_falcon) - lhdr->flags |= LSF_FLAG_DMACTL_REQ_CTX; - - /* Align and save off BL descriptor size */ - lhdr->bl_data_size = ALIGN(func->bl_desc_size, LSF_BL_DATA_SIZE_ALIGN); - - /* - * Align, save off, and include the additional BL data - */ - offset = ALIGN(offset, LSF_BL_DATA_ALIGN); - lhdr->bl_data_off = offset; - offset += lhdr->bl_data_size; - - return offset; -} - -int -acr_r367_ls_fill_headers(struct acr_r352 *acr, struct list_head *imgs) -{ - struct ls_ucode_img_r367 *img; - struct list_head *l; - u32 count = 0; - u32 offset; - - /* Count the number of images to manage */ - list_for_each(l, imgs) - count++; - - /* - * Start with an array of WPR headers at the base of the WPR. - * The expectation here is that the secure falcon will do a single DMA - * read of this array and cache it internally so it's ok to pack these. - * Also, we add 1 to the falcon count to indicate the end of the array. - */ - offset = sizeof(img->wpr_header) * (count + 1); - - /* - * Walk the managed falcons, accounting for the LSB structs - * as well as the ucode images. - */ - list_for_each_entry(img, imgs, base.node) { - offset = acr_r367_ls_img_fill_headers(acr, img, offset); - } - - return offset; -} - -int -acr_r367_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs, - struct nvkm_gpuobj *wpr_blob, u64 wpr_addr) -{ - struct ls_ucode_img *_img; - u32 pos = 0; - u32 max_desc_size = 0; - u8 *gdesc; - - list_for_each_entry(_img, imgs, node) { - struct ls_ucode_img_r367 *img = ls_ucode_img_r367(_img); - const struct acr_r352_lsf_func *ls_func = img->func; - - max_desc_size = max(max_desc_size, ls_func->bl_desc_size); - } - - gdesc = kmalloc(max_desc_size, GFP_KERNEL); - if (!gdesc) - return -ENOMEM; - - nvkm_kmap(wpr_blob); - - list_for_each_entry(_img, imgs, node) { - struct ls_ucode_img_r367 *img = ls_ucode_img_r367(_img); - const struct acr_r352_lsf_func *ls_func = img->func; - - nvkm_gpuobj_memcpy_to(wpr_blob, pos, &img->wpr_header, - sizeof(img->wpr_header)); - - nvkm_gpuobj_memcpy_to(wpr_blob, img->wpr_header.lsb_offset, - &img->lsb_header, sizeof(img->lsb_header)); - - /* Generate and write BL descriptor */ - memset(gdesc, 0, ls_func->bl_desc_size); - ls_func->generate_bl_desc(&acr->base, _img, wpr_addr, gdesc); - - nvkm_gpuobj_memcpy_to(wpr_blob, img->lsb_header.bl_data_off, - gdesc, ls_func->bl_desc_size); - - /* Copy ucode */ - nvkm_gpuobj_memcpy_to(wpr_blob, img->lsb_header.ucode_off, - _img->ucode_data, _img->ucode_size); - - pos += sizeof(img->wpr_header); - } - - nvkm_wo32(wpr_blob, pos, NVKM_SECBOOT_FALCON_INVALID); - - nvkm_done(wpr_blob); - - kfree(gdesc); - - return 0; -} - -struct acr_r367_hsflcn_desc { - u8 reserved_dmem[0x200]; - u32 signatures[4]; - u32 wpr_region_id; - u32 wpr_offset; - u32 mmu_memory_range; -#define FLCN_ACR_MAX_REGIONS 2 - struct { - u32 no_regions; - struct { - u32 start_addr; - u32 end_addr; - u32 region_id; - u32 read_mask; - u32 write_mask; - u32 client_mask; - u32 shadow_mem_start_addr; - } region_props[FLCN_ACR_MAX_REGIONS]; - } regions; - u32 ucode_blob_size; - u64 ucode_blob_base __aligned(8); - struct { - u32 vpr_enabled; - u32 vpr_start; - u32 vpr_end; - u32 hdcp_policies; - } vpr_desc; -}; - -void -acr_r367_fixup_hs_desc(struct acr_r352 *acr, struct nvkm_secboot *sb, - void *_desc) -{ - struct acr_r367_hsflcn_desc *desc = _desc; - struct nvkm_gpuobj *ls_blob = acr->ls_blob; - - /* WPR region information if WPR is not fixed */ - if (sb->wpr_size == 0) { - u64 wpr_start = ls_blob->addr; - u64 wpr_end = ls_blob->addr + ls_blob->size; - - if (acr->func->shadow_blob) - wpr_start += ls_blob->size / 2; - - desc->wpr_region_id = 1; - desc->regions.no_regions = 2; - desc->regions.region_props[0].start_addr = wpr_start >> 8; - desc->regions.region_props[0].end_addr = wpr_end >> 8; - desc->regions.region_props[0].region_id = 1; - desc->regions.region_props[0].read_mask = 0xf; - desc->regions.region_props[0].write_mask = 0xc; - desc->regions.region_props[0].client_mask = 0x2; - if (acr->func->shadow_blob) - desc->regions.region_props[0].shadow_mem_start_addr = - ls_blob->addr >> 8; - else - desc->regions.region_props[0].shadow_mem_start_addr = 0; - } else { - desc->ucode_blob_base = ls_blob->addr; - desc->ucode_blob_size = ls_blob->size; - } -} - -static const struct acr_r352_ls_func -acr_r367_ls_sec2_func = { - .load = acr_ls_ucode_load_sec2, - .post_run = acr_ls_sec2_post_run, - .version_max = 1, - .version = { - &acr_r361_ls_sec2_func_0, - &acr_r370_ls_sec2_func_0, - } -}; - -const struct acr_r352_func -acr_r367_func = { - .fixup_hs_desc = acr_r367_fixup_hs_desc, - .generate_hs_bl_desc = acr_r361_generate_hs_bl_desc, - .hs_bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc), - .shadow_blob = true, - .ls_ucode_img_load = acr_r367_ls_ucode_img_load, - .ls_fill_headers = acr_r367_ls_fill_headers, - .ls_write_wpr = acr_r367_ls_write_wpr, - .ls_func = { - [NVKM_SECBOOT_FALCON_FECS] = &acr_r361_ls_fecs_func, - [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r361_ls_gpccs_func, - [NVKM_SECBOOT_FALCON_PMU] = &acr_r361_ls_pmu_func, - [NVKM_SECBOOT_FALCON_SEC2] = &acr_r367_ls_sec2_func, - }, -}; - -struct nvkm_acr * -acr_r367_new(enum nvkm_secboot_falcon boot_falcon, - unsigned long managed_falcons) -{ - return acr_r352_new_(&acr_r367_func, boot_falcon, managed_falcons); -} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.c deleted file mode 100644 index e821d0fd6217..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.c +++ /dev/null @@ -1,168 +0,0 @@ -/* - * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "acr_r370.h" -#include "acr_r367.h" - -#include <core/msgqueue.h> -#include <engine/falcon.h> -#include <engine/sec2.h> - -static void -acr_r370_generate_flcn_bl_desc(const struct nvkm_acr *acr, - const struct ls_ucode_img *img, u64 wpr_addr, - void *_desc) -{ - struct acr_r370_flcn_bl_desc *desc = _desc; - const struct ls_ucode_img_desc *pdesc = &img->ucode_desc; - u64 base, addr_code, addr_data; - - base = wpr_addr + img->ucode_off + pdesc->app_start_offset; - addr_code = base + pdesc->app_resident_code_offset; - addr_data = base + pdesc->app_resident_data_offset; - - desc->ctx_dma = FALCON_DMAIDX_UCODE; - desc->code_dma_base = u64_to_flcn64(addr_code); - desc->non_sec_code_off = pdesc->app_resident_code_offset; - desc->non_sec_code_size = pdesc->app_resident_code_size; - desc->code_entry_point = pdesc->app_imem_entry; - desc->data_dma_base = u64_to_flcn64(addr_data); - desc->data_size = pdesc->app_resident_data_size; -} - -static const struct acr_r352_lsf_func -acr_r370_ls_fecs_func_0 = { - .generate_bl_desc = acr_r370_generate_flcn_bl_desc, - .bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc), -}; - -const struct acr_r352_ls_func -acr_r370_ls_fecs_func = { - .load = acr_ls_ucode_load_fecs, - .version_max = 0, - .version = { - &acr_r370_ls_fecs_func_0, - } -}; - -static const struct acr_r352_lsf_func -acr_r370_ls_gpccs_func_0 = { - .generate_bl_desc = acr_r370_generate_flcn_bl_desc, - .bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc), - /* GPCCS will be loaded using PRI */ - .lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD, -}; - -const struct acr_r352_ls_func -acr_r370_ls_gpccs_func = { - .load = acr_ls_ucode_load_gpccs, - .version_max = 0, - .version = { - &acr_r370_ls_gpccs_func_0, - } -}; - -static void -acr_r370_generate_sec2_bl_desc(const struct nvkm_acr *acr, - const struct ls_ucode_img *img, u64 wpr_addr, - void *_desc) -{ - const struct ls_ucode_img_desc *pdesc = &img->ucode_desc; - const struct nvkm_sec2 *sec = acr->subdev->device->sec2; - struct acr_r370_flcn_bl_desc *desc = _desc; - u64 base, addr_code, addr_data; - u32 addr_args; - - base = wpr_addr + img->ucode_off + pdesc->app_start_offset; - /* For some reason we should not add app_resident_code_offset here */ - addr_code = base; - addr_data = base + pdesc->app_resident_data_offset; - addr_args = sec->falcon->data.limit; - addr_args -= NVKM_MSGQUEUE_CMDLINE_SIZE; - - desc->ctx_dma = FALCON_SEC2_DMAIDX_UCODE; - desc->code_dma_base = u64_to_flcn64(addr_code); - desc->non_sec_code_off = pdesc->app_resident_code_offset; - desc->non_sec_code_size = pdesc->app_resident_code_size; - desc->code_entry_point = pdesc->app_imem_entry; - desc->data_dma_base = u64_to_flcn64(addr_data); - desc->data_size = pdesc->app_resident_data_size; - desc->argc = 1; - /* args are stored at the beginning of EMEM */ - desc->argv = 0x01000000; -} - -const struct acr_r352_lsf_func -acr_r370_ls_sec2_func_0 = { - .generate_bl_desc = acr_r370_generate_sec2_bl_desc, - .bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc), -}; - -const struct acr_r352_ls_func -acr_r370_ls_sec2_func = { - .load = acr_ls_ucode_load_sec2, - .post_run = acr_ls_sec2_post_run, - .version_max = 0, - .version = { - &acr_r370_ls_sec2_func_0, - } -}; - -void -acr_r370_generate_hs_bl_desc(const struct hsf_load_header *hdr, void *_bl_desc, - u64 offset) -{ - struct acr_r370_flcn_bl_desc *bl_desc = _bl_desc; - - bl_desc->ctx_dma = FALCON_DMAIDX_VIRT; - bl_desc->non_sec_code_off = hdr->non_sec_code_off; - bl_desc->non_sec_code_size = hdr->non_sec_code_size; - bl_desc->sec_code_off = hsf_load_header_app_off(hdr, 0); - bl_desc->sec_code_size = hsf_load_header_app_size(hdr, 0); - bl_desc->code_entry_point = 0; - bl_desc->code_dma_base = u64_to_flcn64(offset); - bl_desc->data_dma_base = u64_to_flcn64(offset + hdr->data_dma_base); - bl_desc->data_size = hdr->data_size; -} - -const struct acr_r352_func -acr_r370_func = { - .fixup_hs_desc = acr_r367_fixup_hs_desc, - .generate_hs_bl_desc = acr_r370_generate_hs_bl_desc, - .hs_bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc), - .shadow_blob = true, - .ls_ucode_img_load = acr_r367_ls_ucode_img_load, - .ls_fill_headers = acr_r367_ls_fill_headers, - .ls_write_wpr = acr_r367_ls_write_wpr, - .ls_func = { - [NVKM_SECBOOT_FALCON_SEC2] = &acr_r370_ls_sec2_func, - [NVKM_SECBOOT_FALCON_FECS] = &acr_r370_ls_fecs_func, - [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r370_ls_gpccs_func, - }, -}; - -struct nvkm_acr * -acr_r370_new(enum nvkm_secboot_falcon boot_falcon, - unsigned long managed_falcons) -{ - return acr_r352_new_(&acr_r370_func, boot_falcon, managed_falcons); -} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.h deleted file mode 100644 index 2efed6f995ad..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.h +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef __NVKM_SECBOOT_ACR_R370_H__ -#define __NVKM_SECBOOT_ACR_R370_H__ - -#include "priv.h" -struct hsf_load_header; - -/* Same as acr_r361_flcn_bl_desc, plus argc/argv */ -struct acr_r370_flcn_bl_desc { - u32 reserved[4]; - u32 signature[4]; - u32 ctx_dma; - struct flcn_u64 code_dma_base; - u32 non_sec_code_off; - u32 non_sec_code_size; - u32 sec_code_off; - u32 sec_code_size; - u32 code_entry_point; - struct flcn_u64 data_dma_base; - u32 data_size; - u32 argc; - u32 argv; -}; - -void acr_r370_generate_hs_bl_desc(const struct hsf_load_header *, void *, u64); -extern const struct acr_r352_ls_func acr_r370_ls_fecs_func; -extern const struct acr_r352_ls_func acr_r370_ls_gpccs_func; -extern const struct acr_r352_lsf_func acr_r370_ls_sec2_func_0; -#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r375.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r375.c deleted file mode 100644 index 8f0647766038..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r375.c +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "acr_r370.h" -#include "acr_r367.h" - -#include <core/msgqueue.h> -#include <subdev/pmu.h> - -static void -acr_r375_generate_pmu_bl_desc(const struct nvkm_acr *acr, - const struct ls_ucode_img *img, u64 wpr_addr, - void *_desc) -{ - const struct ls_ucode_img_desc *pdesc = &img->ucode_desc; - const struct nvkm_pmu *pmu = acr->subdev->device->pmu; - struct acr_r370_flcn_bl_desc *desc = _desc; - u64 base, addr_code, addr_data; - u32 addr_args; - - base = wpr_addr + img->ucode_off + pdesc->app_start_offset; - addr_code = base + pdesc->app_resident_code_offset; - addr_data = base + pdesc->app_resident_data_offset; - addr_args = pmu->falcon->data.limit; - addr_args -= NVKM_MSGQUEUE_CMDLINE_SIZE; - - desc->ctx_dma = FALCON_DMAIDX_UCODE; - desc->code_dma_base = u64_to_flcn64(addr_code); - desc->non_sec_code_off = pdesc->app_resident_code_offset; - desc->non_sec_code_size = pdesc->app_resident_code_size; - desc->code_entry_point = pdesc->app_imem_entry; - desc->data_dma_base = u64_to_flcn64(addr_data); - desc->data_size = pdesc->app_resident_data_size; - desc->argc = 1; - desc->argv = addr_args; -} - -static const struct acr_r352_lsf_func -acr_r375_ls_pmu_func_0 = { - .generate_bl_desc = acr_r375_generate_pmu_bl_desc, - .bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc), -}; - -const struct acr_r352_ls_func -acr_r375_ls_pmu_func = { - .load = acr_ls_ucode_load_pmu, - .post_run = acr_ls_pmu_post_run, - .version_max = 0, - .version = { - &acr_r375_ls_pmu_func_0, - } -}; - -const struct acr_r352_func -acr_r375_func = { - .fixup_hs_desc = acr_r367_fixup_hs_desc, - .generate_hs_bl_desc = acr_r370_generate_hs_bl_desc, - .hs_bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc), - .shadow_blob = true, - .ls_ucode_img_load = acr_r367_ls_ucode_img_load, - .ls_fill_headers = acr_r367_ls_fill_headers, - .ls_write_wpr = acr_r367_ls_write_wpr, - .ls_func = { - [NVKM_SECBOOT_FALCON_FECS] = &acr_r370_ls_fecs_func, - [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r370_ls_gpccs_func, - [NVKM_SECBOOT_FALCON_PMU] = &acr_r375_ls_pmu_func, - }, -}; - -struct nvkm_acr * -acr_r375_new(enum nvkm_secboot_falcon boot_falcon, - unsigned long managed_falcons) -{ - return acr_r352_new_(&acr_r375_func, boot_falcon, managed_falcons); -} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c deleted file mode 100644 index ee29c6c11afd..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c +++ /dev/null @@ -1,213 +0,0 @@ -/* - * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -/* - * Secure boot is the process by which NVIDIA-signed firmware is loaded into - * some of the falcons of a GPU. For production devices this is the only way - * for the firmware to access useful (but sensitive) registers. - * - * A Falcon microprocessor supporting advanced security modes can run in one of - * three modes: - * - * - Non-secure (NS). In this mode, functionality is similar to Falcon - * architectures before security modes were introduced (pre-Maxwell), but - * capability is restricted. In particular, certain registers may be - * inaccessible for reads and/or writes, and physical memory access may be - * disabled (on certain Falcon instances). This is the only possible mode that - * can be used if you don't have microcode cryptographically signed by NVIDIA. - * - * - Heavy Secure (HS). In this mode, the microprocessor is a black box - it's - * not possible to read or write any Falcon internal state or Falcon registers - * from outside the Falcon (for example, from the host system). The only way - * to enable this mode is by loading microcode that has been signed by NVIDIA. - * (The loading process involves tagging the IMEM block as secure, writing the - * signature into a Falcon register, and starting execution. The hardware will - * validate the signature, and if valid, grant HS privileges.) - * - * - Light Secure (LS). In this mode, the microprocessor has more privileges - * than NS but fewer than HS. Some of the microprocessor state is visible to - * host software to ease debugging. The only way to enable this mode is by HS - * microcode enabling LS mode. Some privileges available to HS mode are not - * available here. LS mode is introduced in GM20x. - * - * Secure boot consists in temporarily switching a HS-capable falcon (typically - * PMU) into HS mode in order to validate the LS firmwares of managed falcons, - * load them, and switch managed falcons into LS mode. Once secure boot - * completes, no falcon remains in HS mode. - * - * Secure boot requires a write-protected memory region (WPR) which can only be - * written by the secure falcon. On dGPU, the driver sets up the WPR region in - * video memory. On Tegra, it is set up by the bootloader and its location and - * size written into memory controller registers. - * - * The secure boot process takes place as follows: - * - * 1) A LS blob is constructed that contains all the LS firmwares we want to - * load, along with their signatures and bootloaders. - * - * 2) A HS blob (also called ACR) is created that contains the signed HS - * firmware in charge of loading the LS firmwares into their respective - * falcons. - * - * 3) The HS blob is loaded (via its own bootloader) and executed on the - * HS-capable falcon. It authenticates itself, switches the secure falcon to - * HS mode and setup the WPR region around the LS blob (dGPU) or copies the - * LS blob into the WPR region (Tegra). - * - * 4) The LS blob is now secure from all external tampering. The HS falcon - * checks the signatures of the LS firmwares and, if valid, switches the - * managed falcons to LS mode and makes them ready to run the LS firmware. - * - * 5) The managed falcons remain in LS mode and can be started. - * - */ - -#include "priv.h" -#include "acr.h" - -#include <subdev/mc.h> -#include <subdev/timer.h> -#include <subdev/pmu.h> -#include <engine/sec2.h> - -const char * -nvkm_secboot_falcon_name[] = { - [NVKM_SECBOOT_FALCON_PMU] = "PMU", - [NVKM_SECBOOT_FALCON_RESERVED] = "<reserved>", - [NVKM_SECBOOT_FALCON_FECS] = "FECS", - [NVKM_SECBOOT_FALCON_GPCCS] = "GPCCS", - [NVKM_SECBOOT_FALCON_SEC2] = "SEC2", - [NVKM_SECBOOT_FALCON_END] = "<invalid>", -}; -/** - * nvkm_secboot_reset() - reset specified falcon - */ -int -nvkm_secboot_reset(struct nvkm_secboot *sb, unsigned long falcon_mask) -{ - /* Unmanaged falcon? */ - if ((falcon_mask | sb->acr->managed_falcons) != sb->acr->managed_falcons) { - nvkm_error(&sb->subdev, "cannot reset unmanaged falcon!\n"); - return -EINVAL; - } - - return sb->acr->func->reset(sb->acr, sb, falcon_mask); -} - -/** - * nvkm_secboot_is_managed() - check whether a given falcon is securely-managed - */ -bool -nvkm_secboot_is_managed(struct nvkm_secboot *sb, enum nvkm_secboot_falcon fid) -{ - if (!sb) - return false; - - return sb->acr->managed_falcons & BIT(fid); -} - -static int -nvkm_secboot_oneinit(struct nvkm_subdev *subdev) -{ - struct nvkm_secboot *sb = nvkm_secboot(subdev); - int ret = 0; - - switch (sb->acr->boot_falcon) { - case NVKM_SECBOOT_FALCON_PMU: - sb->halt_falcon = sb->boot_falcon = subdev->device->pmu->falcon; - break; - case NVKM_SECBOOT_FALCON_SEC2: - /* we must keep SEC2 alive forever since ACR will run on it */ - nvkm_engine_ref(&subdev->device->sec2->engine); - sb->boot_falcon = subdev->device->sec2->falcon; - sb->halt_falcon = subdev->device->pmu->falcon; - break; - default: - nvkm_error(subdev, "Unmanaged boot falcon %s!\n", - nvkm_secboot_falcon_name[sb->acr->boot_falcon]); - return -EINVAL; - } - nvkm_debug(subdev, "using %s falcon for ACR\n", sb->boot_falcon->name); - - /* Call chip-specific init function */ - if (sb->func->oneinit) - ret = sb->func->oneinit(sb); - if (ret) { - nvkm_error(subdev, "Secure Boot initialization failed: %d\n", - ret); - return ret; - } - - return 0; -} - -static int -nvkm_secboot_fini(struct nvkm_subdev *subdev, bool suspend) -{ - struct nvkm_secboot *sb = nvkm_secboot(subdev); - int ret = 0; - - if (sb->func->fini) - ret = sb->func->fini(sb, suspend); - - return ret; -} - -static void * -nvkm_secboot_dtor(struct nvkm_subdev *subdev) -{ - struct nvkm_secboot *sb = nvkm_secboot(subdev); - void *ret = NULL; - - if (sb->func->dtor) - ret = sb->func->dtor(sb); - - return ret; -} - -static const struct nvkm_subdev_func -nvkm_secboot = { - .oneinit = nvkm_secboot_oneinit, - .fini = nvkm_secboot_fini, - .dtor = nvkm_secboot_dtor, -}; - -int -nvkm_secboot_ctor(const struct nvkm_secboot_func *func, struct nvkm_acr *acr, - struct nvkm_device *device, int index, - struct nvkm_secboot *sb) -{ - unsigned long fid; - - nvkm_subdev_ctor(&nvkm_secboot, device, index, &sb->subdev); - sb->func = func; - sb->acr = acr; - acr->subdev = &sb->subdev; - - nvkm_debug(&sb->subdev, "securely managed falcons:\n"); - for_each_set_bit(fid, &sb->acr->managed_falcons, - NVKM_SECBOOT_FALCON_END) - nvkm_debug(&sb->subdev, "- %s\n", - nvkm_secboot_falcon_name[fid]); - - return 0; -} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c deleted file mode 100644 index 5e91b3f90065..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c +++ /dev/null @@ -1,262 +0,0 @@ -/* - * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - - -#include "acr.h" -#include "gm200.h" - -#include <core/gpuobj.h> -#include <subdev/fb.h> -#include <engine/falcon.h> -#include <subdev/mc.h> - -/** - * gm200_secboot_run_blob() - run the given high-secure blob - * - */ -int -gm200_secboot_run_blob(struct nvkm_secboot *sb, struct nvkm_gpuobj *blob, - struct nvkm_falcon *falcon) -{ - struct gm200_secboot *gsb = gm200_secboot(sb); - struct nvkm_subdev *subdev = &gsb->base.subdev; - struct nvkm_vma *vma = NULL; - u32 start_address; - int ret; - - ret = nvkm_falcon_get(falcon, subdev); - if (ret) - return ret; - - /* Map the HS firmware so the HS bootloader can see it */ - ret = nvkm_vmm_get(gsb->vmm, 12, blob->size, &vma); - if (ret) { - nvkm_falcon_put(falcon, subdev); - return ret; - } - - ret = nvkm_memory_map(blob, 0, gsb->vmm, vma, NULL, 0); - if (ret) - goto end; - - /* Reset and set the falcon up */ - ret = nvkm_falcon_reset(falcon); - if (ret) - goto end; - nvkm_falcon_bind_context(falcon, gsb->inst); - - /* Load the HS bootloader into the falcon's IMEM/DMEM */ - ret = sb->acr->func->load(sb->acr, falcon, blob, vma->addr); - if (ret < 0) - goto end; - - start_address = ret; - - /* Disable interrupts as we will poll for the HALT bit */ - nvkm_mc_intr_mask(sb->subdev.device, falcon->owner->index, false); - - /* Set default error value in mailbox register */ - nvkm_falcon_wr32(falcon, 0x040, 0xdeada5a5); - - /* Start the HS bootloader */ - nvkm_falcon_set_start_addr(falcon, start_address); - nvkm_falcon_start(falcon); - ret = nvkm_falcon_wait_for_halt(falcon, 100); - if (ret) - goto end; - - /* - * The mailbox register contains the (positive) error code - return this - * to the caller - */ - ret = nvkm_falcon_rd32(falcon, 0x040); - -end: - /* Reenable interrupts */ - nvkm_mc_intr_mask(sb->subdev.device, falcon->owner->index, true); - - /* We don't need the ACR firmware anymore */ - nvkm_vmm_put(gsb->vmm, &vma); - nvkm_falcon_put(falcon, subdev); - - return ret; -} - -int -gm200_secboot_oneinit(struct nvkm_secboot *sb) -{ - struct gm200_secboot *gsb = gm200_secboot(sb); - struct nvkm_device *device = sb->subdev.device; - int ret; - - /* Allocate instance block and VM */ - ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0, true, - &gsb->inst); - if (ret) - return ret; - - ret = nvkm_vmm_new(device, 0, 600 * 1024, NULL, 0, NULL, "acr", - &gsb->vmm); - if (ret) - return ret; - - atomic_inc(&gsb->vmm->engref[NVKM_SUBDEV_PMU]); - gsb->vmm->debug = gsb->base.subdev.debug; - - ret = nvkm_vmm_join(gsb->vmm, gsb->inst); - if (ret) - return ret; - - if (sb->acr->func->oneinit) { - ret = sb->acr->func->oneinit(sb->acr, sb); - if (ret) - return ret; - } - - return 0; -} - -int -gm200_secboot_fini(struct nvkm_secboot *sb, bool suspend) -{ - int ret = 0; - - if (sb->acr->func->fini) - ret = sb->acr->func->fini(sb->acr, sb, suspend); - - return ret; -} - -void * -gm200_secboot_dtor(struct nvkm_secboot *sb) -{ - struct gm200_secboot *gsb = gm200_secboot(sb); - - sb->acr->func->dtor(sb->acr); - - nvkm_vmm_part(gsb->vmm, gsb->inst); - nvkm_vmm_unref(&gsb->vmm); - nvkm_memory_unref(&gsb->inst); - - return gsb; -} - - -static const struct nvkm_secboot_func -gm200_secboot = { - .dtor = gm200_secboot_dtor, - .oneinit = gm200_secboot_oneinit, - .fini = gm200_secboot_fini, - .run_blob = gm200_secboot_run_blob, -}; - -int -gm200_secboot_new(struct nvkm_device *device, int index, - struct nvkm_secboot **psb) -{ - int ret; - struct gm200_secboot *gsb; - struct nvkm_acr *acr; - - acr = acr_r361_new(BIT(NVKM_SECBOOT_FALCON_FECS) | - BIT(NVKM_SECBOOT_FALCON_GPCCS)); - if (IS_ERR(acr)) - return PTR_ERR(acr); - - gsb = kzalloc(sizeof(*gsb), GFP_KERNEL); - if (!gsb) { - psb = NULL; - return -ENOMEM; - } - *psb = &gsb->base; - - ret = nvkm_secboot_ctor(&gm200_secboot, acr, device, index, &gsb->base); - if (ret) - return ret; - - return 0; -} - - -MODULE_FIRMWARE("nvidia/gm200/acr/bl.bin"); -MODULE_FIRMWARE("nvidia/gm200/acr/ucode_load.bin"); -MODULE_FIRMWARE("nvidia/gm200/acr/ucode_unload.bin"); -MODULE_FIRMWARE("nvidia/gm200/gr/fecs_bl.bin"); -MODULE_FIRMWARE("nvidia/gm200/gr/fecs_inst.bin"); -MODULE_FIRMWARE("nvidia/gm200/gr/fecs_data.bin"); -MODULE_FIRMWARE("nvidia/gm200/gr/fecs_sig.bin"); -MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_bl.bin"); -MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_inst.bin"); -MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_data.bin"); -MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_sig.bin"); -MODULE_FIRMWARE("nvidia/gm200/gr/sw_ctx.bin"); -MODULE_FIRMWARE("nvidia/gm200/gr/sw_nonctx.bin"); -MODULE_FIRMWARE("nvidia/gm200/gr/sw_bundle_init.bin"); -MODULE_FIRMWARE("nvidia/gm200/gr/sw_method_init.bin"); - -MODULE_FIRMWARE("nvidia/gm204/acr/bl.bin"); -MODULE_FIRMWARE("nvidia/gm204/acr/ucode_load.bin"); -MODULE_FIRMWARE("nvidia/gm204/acr/ucode_unload.bin"); -MODULE_FIRMWARE("nvidia/gm204/gr/fecs_bl.bin"); -MODULE_FIRMWARE("nvidia/gm204/gr/fecs_inst.bin"); -MODULE_FIRMWARE("nvidia/gm204/gr/fecs_data.bin"); -MODULE_FIRMWARE("nvidia/gm204/gr/fecs_sig.bin"); -MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_bl.bin"); -MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_inst.bin"); -MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_data.bin"); -MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_sig.bin"); -MODULE_FIRMWARE("nvidia/gm204/gr/sw_ctx.bin"); -MODULE_FIRMWARE("nvidia/gm204/gr/sw_nonctx.bin"); -MODULE_FIRMWARE("nvidia/gm204/gr/sw_bundle_init.bin"); -MODULE_FIRMWARE("nvidia/gm204/gr/sw_method_init.bin"); - -MODULE_FIRMWARE("nvidia/gm206/acr/bl.bin"); -MODULE_FIRMWARE("nvidia/gm206/acr/ucode_load.bin"); -MODULE_FIRMWARE("nvidia/gm206/acr/ucode_unload.bin"); -MODULE_FIRMWARE("nvidia/gm206/gr/fecs_bl.bin"); -MODULE_FIRMWARE("nvidia/gm206/gr/fecs_inst.bin"); -MODULE_FIRMWARE("nvidia/gm206/gr/fecs_data.bin"); -MODULE_FIRMWARE("nvidia/gm206/gr/fecs_sig.bin"); -MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_bl.bin"); -MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_inst.bin"); -MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_data.bin"); -MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_sig.bin"); -MODULE_FIRMWARE("nvidia/gm206/gr/sw_ctx.bin"); -MODULE_FIRMWARE("nvidia/gm206/gr/sw_nonctx.bin"); -MODULE_FIRMWARE("nvidia/gm206/gr/sw_bundle_init.bin"); -MODULE_FIRMWARE("nvidia/gm206/gr/sw_method_init.bin"); - -MODULE_FIRMWARE("nvidia/gp100/acr/bl.bin"); -MODULE_FIRMWARE("nvidia/gp100/acr/ucode_load.bin"); -MODULE_FIRMWARE("nvidia/gp100/acr/ucode_unload.bin"); -MODULE_FIRMWARE("nvidia/gp100/gr/fecs_bl.bin"); -MODULE_FIRMWARE("nvidia/gp100/gr/fecs_inst.bin"); -MODULE_FIRMWARE("nvidia/gp100/gr/fecs_data.bin"); -MODULE_FIRMWARE("nvidia/gp100/gr/fecs_sig.bin"); -MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_bl.bin"); -MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_inst.bin"); -MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_data.bin"); -MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_sig.bin"); -MODULE_FIRMWARE("nvidia/gp100/gr/sw_ctx.bin"); -MODULE_FIRMWARE("nvidia/gp100/gr/sw_nonctx.bin"); -MODULE_FIRMWARE("nvidia/gp100/gr/sw_bundle_init.bin"); -MODULE_FIRMWARE("nvidia/gp100/gr/sw_method_init.bin"); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.h deleted file mode 100644 index 62c5e162099a..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.h +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef __NVKM_SECBOOT_GM200_H__ -#define __NVKM_SECBOOT_GM200_H__ - -#include "priv.h" - -struct gm200_secboot { - struct nvkm_secboot base; - - /* Instance block & address space used for HS FW execution */ - struct nvkm_memory *inst; - struct nvkm_vmm *vmm; -}; -#define gm200_secboot(sb) container_of(sb, struct gm200_secboot, base) - -int gm200_secboot_oneinit(struct nvkm_secboot *); -int gm200_secboot_fini(struct nvkm_secboot *, bool); -void *gm200_secboot_dtor(struct nvkm_secboot *); -int gm200_secboot_run_blob(struct nvkm_secboot *, struct nvkm_gpuobj *, - struct nvkm_falcon *); - -/* Tegra-only */ -int gm20b_secboot_tegra_read_wpr(struct gm200_secboot *, u32); - -#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c deleted file mode 100644 index df8b919dcf09..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c +++ /dev/null @@ -1,148 +0,0 @@ -/* - * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "acr.h" -#include "gm200.h" - -#define TEGRA210_MC_BASE 0x70019000 - -#ifdef CONFIG_ARCH_TEGRA -#define MC_SECURITY_CARVEOUT2_CFG0 0xc58 -#define MC_SECURITY_CARVEOUT2_BOM_0 0xc5c -#define MC_SECURITY_CARVEOUT2_BOM_HI_0 0xc60 -#define MC_SECURITY_CARVEOUT2_SIZE_128K 0xc64 -#define TEGRA_MC_SECURITY_CARVEOUT_CFG_LOCKED (1 << 1) -/** - * gm20b_secboot_tegra_read_wpr() - read the WPR registers on Tegra - * - * On dGPU, we can manage the WPR region ourselves, but on Tegra the WPR region - * is reserved from system memory by the bootloader and irreversibly locked. - * This function reads the address and size of the pre-configured WPR region. - */ -int -gm20b_secboot_tegra_read_wpr(struct gm200_secboot *gsb, u32 mc_base) -{ - struct nvkm_secboot *sb = &gsb->base; - void __iomem *mc; - u32 cfg; - - mc = ioremap(mc_base, 0xd00); - if (!mc) { - nvkm_error(&sb->subdev, "Cannot map Tegra MC registers\n"); - return -ENOMEM; - } - sb->wpr_addr = ioread32_native(mc + MC_SECURITY_CARVEOUT2_BOM_0) | - ((u64)ioread32_native(mc + MC_SECURITY_CARVEOUT2_BOM_HI_0) << 32); - sb->wpr_size = ioread32_native(mc + MC_SECURITY_CARVEOUT2_SIZE_128K) - << 17; - cfg = ioread32_native(mc + MC_SECURITY_CARVEOUT2_CFG0); - iounmap(mc); - - /* Check that WPR settings are valid */ - if (sb->wpr_size == 0) { - nvkm_error(&sb->subdev, "WPR region is empty\n"); - return -EINVAL; - } - - if (!(cfg & TEGRA_MC_SECURITY_CARVEOUT_CFG_LOCKED)) { - nvkm_error(&sb->subdev, "WPR region not locked\n"); - return -EINVAL; - } - - return 0; -} -#else -int -gm20b_secboot_tegra_read_wpr(struct gm200_secboot *gsb, u32 mc_base) -{ - nvkm_error(&gsb->base.subdev, "Tegra support not compiled in\n"); - return -EINVAL; -} -#endif - -static int -gm20b_secboot_oneinit(struct nvkm_secboot *sb) -{ - struct gm200_secboot *gsb = gm200_secboot(sb); - int ret; - - ret = gm20b_secboot_tegra_read_wpr(gsb, TEGRA210_MC_BASE); - if (ret) - return ret; - - return gm200_secboot_oneinit(sb); -} - -static const struct nvkm_secboot_func -gm20b_secboot = { - .dtor = gm200_secboot_dtor, - .oneinit = gm20b_secboot_oneinit, - .fini = gm200_secboot_fini, - .run_blob = gm200_secboot_run_blob, -}; - -int -gm20b_secboot_new(struct nvkm_device *device, int index, - struct nvkm_secboot **psb) -{ - int ret; - struct gm200_secboot *gsb; - struct nvkm_acr *acr; - - acr = acr_r352_new(BIT(NVKM_SECBOOT_FALCON_FECS) | - BIT(NVKM_SECBOOT_FALCON_PMU)); - if (IS_ERR(acr)) - return PTR_ERR(acr); - /* Support the initial GM20B firmware release without PMU */ - acr->optional_falcons = BIT(NVKM_SECBOOT_FALCON_PMU); - - gsb = kzalloc(sizeof(*gsb), GFP_KERNEL); - if (!gsb) { - psb = NULL; - return -ENOMEM; - } - *psb = &gsb->base; - - ret = nvkm_secboot_ctor(&gm20b_secboot, acr, device, index, &gsb->base); - if (ret) - return ret; - - return 0; -} - -#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC) -MODULE_FIRMWARE("nvidia/gm20b/acr/bl.bin"); -MODULE_FIRMWARE("nvidia/gm20b/acr/ucode_load.bin"); -MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_bl.bin"); -MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_inst.bin"); -MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_data.bin"); -MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_sig.bin"); -MODULE_FIRMWARE("nvidia/gm20b/gr/gpccs_inst.bin"); -MODULE_FIRMWARE("nvidia/gm20b/gr/gpccs_data.bin"); -MODULE_FIRMWARE("nvidia/gm20b/gr/sw_ctx.bin"); -MODULE_FIRMWARE("nvidia/gm20b/gr/sw_nonctx.bin"); -MODULE_FIRMWARE("nvidia/gm20b/gr/sw_bundle_init.bin"); -MODULE_FIRMWARE("nvidia/gm20b/gr/sw_method_init.bin"); -MODULE_FIRMWARE("nvidia/gm20b/pmu/desc.bin"); -MODULE_FIRMWARE("nvidia/gm20b/pmu/image.bin"); -MODULE_FIRMWARE("nvidia/gm20b/pmu/sig.bin"); -#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c deleted file mode 100644 index 4695f1c8e33f..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c +++ /dev/null @@ -1,264 +0,0 @@ -/* - * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "acr.h" -#include "gm200.h" - -#include "ls_ucode.h" -#include "hs_ucode.h" -#include <subdev/mc.h> -#include <subdev/timer.h> -#include <engine/falcon.h> -#include <engine/nvdec.h> - -static bool -gp102_secboot_scrub_required(struct nvkm_secboot *sb) -{ - struct nvkm_subdev *subdev = &sb->subdev; - struct nvkm_device *device = subdev->device; - u32 reg; - - nvkm_wr32(device, 0x100cd0, 0x2); - reg = nvkm_rd32(device, 0x100cd0); - - return (reg & BIT(4)); -} - -static int -gp102_run_secure_scrub(struct nvkm_secboot *sb) -{ - struct nvkm_subdev *subdev = &sb->subdev; - struct nvkm_device *device = subdev->device; - struct nvkm_engine *engine; - struct nvkm_falcon *falcon; - void *scrub_image; - struct fw_bin_header *hsbin_hdr; - struct hsf_fw_header *fw_hdr; - struct hsf_load_header *lhdr; - void *scrub_data; - int ret; - - nvkm_debug(subdev, "running VPR scrubber binary on NVDEC...\n"); - - engine = nvkm_engine_ref(&device->nvdec[0]->engine); - if (IS_ERR(engine)) - return PTR_ERR(engine); - falcon = device->nvdec[0]->falcon; - - nvkm_falcon_get(falcon, &sb->subdev); - - scrub_image = hs_ucode_load_blob(subdev, falcon, "nvdec/scrubber"); - if (IS_ERR(scrub_image)) - return PTR_ERR(scrub_image); - - nvkm_falcon_reset(falcon); - nvkm_falcon_bind_context(falcon, NULL); - - hsbin_hdr = scrub_image; - fw_hdr = scrub_image + hsbin_hdr->header_offset; - lhdr = scrub_image + fw_hdr->hdr_offset; - scrub_data = scrub_image + hsbin_hdr->data_offset; - - nvkm_falcon_load_imem(falcon, scrub_data, lhdr->non_sec_code_off, - lhdr->non_sec_code_size, - lhdr->non_sec_code_off >> 8, 0, false); - nvkm_falcon_load_imem(falcon, scrub_data + lhdr->apps[0], - ALIGN(lhdr->apps[0], 0x100), - lhdr->apps[1], - lhdr->apps[0] >> 8, 0, true); - nvkm_falcon_load_dmem(falcon, scrub_data + lhdr->data_dma_base, 0, - lhdr->data_size, 0); - - kfree(scrub_image); - - nvkm_falcon_set_start_addr(falcon, 0x0); - nvkm_falcon_start(falcon); - - ret = nvkm_falcon_wait_for_halt(falcon, 500); - if (ret < 0) { - nvkm_error(subdev, "failed to run VPR scrubber binary!\n"); - ret = -ETIMEDOUT; - goto end; - } - - /* put nvdec in clean state - without reset it will remain in HS mode */ - nvkm_falcon_reset(falcon); - - if (gp102_secboot_scrub_required(sb)) { - nvkm_error(subdev, "VPR scrubber binary failed!\n"); - ret = -EINVAL; - goto end; - } - - nvkm_debug(subdev, "VPR scrub successfully completed\n"); - -end: - nvkm_falcon_put(falcon, &sb->subdev); - nvkm_engine_unref(&engine); - return ret; -} - -static int -gp102_secboot_run_blob(struct nvkm_secboot *sb, struct nvkm_gpuobj *blob, - struct nvkm_falcon *falcon) -{ - int ret; - - /* make sure the VPR region is unlocked */ - if (gp102_secboot_scrub_required(sb)) { - ret = gp102_run_secure_scrub(sb); - if (ret) - return ret; - } - - return gm200_secboot_run_blob(sb, blob, falcon); -} - -const struct nvkm_secboot_func -gp102_secboot = { - .dtor = gm200_secboot_dtor, - .oneinit = gm200_secboot_oneinit, - .fini = gm200_secboot_fini, - .run_blob = gp102_secboot_run_blob, -}; - -int -gp102_secboot_new(struct nvkm_device *device, int index, - struct nvkm_secboot **psb) -{ - int ret; - struct gm200_secboot *gsb; - struct nvkm_acr *acr; - - acr = acr_r367_new(NVKM_SECBOOT_FALCON_SEC2, - BIT(NVKM_SECBOOT_FALCON_FECS) | - BIT(NVKM_SECBOOT_FALCON_GPCCS) | - BIT(NVKM_SECBOOT_FALCON_SEC2)); - if (IS_ERR(acr)) - return PTR_ERR(acr); - - gsb = kzalloc(sizeof(*gsb), GFP_KERNEL); - if (!gsb) { - psb = NULL; - return -ENOMEM; - } - *psb = &gsb->base; - - ret = nvkm_secboot_ctor(&gp102_secboot, acr, device, index, &gsb->base); - if (ret) - return ret; - - return 0; -} - -MODULE_FIRMWARE("nvidia/gp102/acr/bl.bin"); -MODULE_FIRMWARE("nvidia/gp102/acr/unload_bl.bin"); -MODULE_FIRMWARE("nvidia/gp102/acr/ucode_load.bin"); -MODULE_FIRMWARE("nvidia/gp102/acr/ucode_unload.bin"); -MODULE_FIRMWARE("nvidia/gp102/gr/fecs_bl.bin"); -MODULE_FIRMWARE("nvidia/gp102/gr/fecs_inst.bin"); -MODULE_FIRMWARE("nvidia/gp102/gr/fecs_data.bin"); -MODULE_FIRMWARE("nvidia/gp102/gr/fecs_sig.bin"); -MODULE_FIRMWARE("nvidia/gp102/gr/gpccs_bl.bin"); -MODULE_FIRMWARE("nvidia/gp102/gr/gpccs_inst.bin"); -MODULE_FIRMWARE("nvidia/gp102/gr/gpccs_data.bin"); -MODULE_FIRMWARE("nvidia/gp102/gr/gpccs_sig.bin"); -MODULE_FIRMWARE("nvidia/gp102/gr/sw_ctx.bin"); -MODULE_FIRMWARE("nvidia/gp102/gr/sw_nonctx.bin"); -MODULE_FIRMWARE("nvidia/gp102/gr/sw_bundle_init.bin"); -MODULE_FIRMWARE("nvidia/gp102/gr/sw_method_init.bin"); -MODULE_FIRMWARE("nvidia/gp102/nvdec/scrubber.bin"); -MODULE_FIRMWARE("nvidia/gp102/sec2/desc.bin"); -MODULE_FIRMWARE("nvidia/gp102/sec2/image.bin"); -MODULE_FIRMWARE("nvidia/gp102/sec2/sig.bin"); -MODULE_FIRMWARE("nvidia/gp102/sec2/desc-1.bin"); -MODULE_FIRMWARE("nvidia/gp102/sec2/image-1.bin"); -MODULE_FIRMWARE("nvidia/gp102/sec2/sig-1.bin"); -MODULE_FIRMWARE("nvidia/gp104/acr/bl.bin"); -MODULE_FIRMWARE("nvidia/gp104/acr/unload_bl.bin"); -MODULE_FIRMWARE("nvidia/gp104/acr/ucode_load.bin"); -MODULE_FIRMWARE("nvidia/gp104/acr/ucode_unload.bin"); -MODULE_FIRMWARE("nvidia/gp104/gr/fecs_bl.bin"); -MODULE_FIRMWARE("nvidia/gp104/gr/fecs_inst.bin"); -MODULE_FIRMWARE("nvidia/gp104/gr/fecs_data.bin"); -MODULE_FIRMWARE("nvidia/gp104/gr/fecs_sig.bin"); -MODULE_FIRMWARE("nvidia/gp104/gr/gpccs_bl.bin"); -MODULE_FIRMWARE("nvidia/gp104/gr/gpccs_inst.bin"); -MODULE_FIRMWARE("nvidia/gp104/gr/gpccs_data.bin"); -MODULE_FIRMWARE("nvidia/gp104/gr/gpccs_sig.bin"); -MODULE_FIRMWARE("nvidia/gp104/gr/sw_ctx.bin"); -MODULE_FIRMWARE("nvidia/gp104/gr/sw_nonctx.bin"); -MODULE_FIRMWARE("nvidia/gp104/gr/sw_bundle_init.bin"); -MODULE_FIRMWARE("nvidia/gp104/gr/sw_method_init.bin"); -MODULE_FIRMWARE("nvidia/gp104/nvdec/scrubber.bin"); -MODULE_FIRMWARE("nvidia/gp104/sec2/desc.bin"); -MODULE_FIRMWARE("nvidia/gp104/sec2/image.bin"); -MODULE_FIRMWARE("nvidia/gp104/sec2/sig.bin"); -MODULE_FIRMWARE("nvidia/gp104/sec2/desc-1.bin"); -MODULE_FIRMWARE("nvidia/gp104/sec2/image-1.bin"); -MODULE_FIRMWARE("nvidia/gp104/sec2/sig-1.bin"); -MODULE_FIRMWARE("nvidia/gp106/acr/bl.bin"); -MODULE_FIRMWARE("nvidia/gp106/acr/unload_bl.bin"); -MODULE_FIRMWARE("nvidia/gp106/acr/ucode_load.bin"); -MODULE_FIRMWARE("nvidia/gp106/acr/ucode_unload.bin"); -MODULE_FIRMWARE("nvidia/gp106/gr/fecs_bl.bin"); -MODULE_FIRMWARE("nvidia/gp106/gr/fecs_inst.bin"); -MODULE_FIRMWARE("nvidia/gp106/gr/fecs_data.bin"); -MODULE_FIRMWARE("nvidia/gp106/gr/fecs_sig.bin"); -MODULE_FIRMWARE("nvidia/gp106/gr/gpccs_bl.bin"); -MODULE_FIRMWARE("nvidia/gp106/gr/gpccs_inst.bin"); -MODULE_FIRMWARE("nvidia/gp106/gr/gpccs_data.bin"); -MODULE_FIRMWARE("nvidia/gp106/gr/gpccs_sig.bin"); -MODULE_FIRMWARE("nvidia/gp106/gr/sw_ctx.bin"); -MODULE_FIRMWARE("nvidia/gp106/gr/sw_nonctx.bin"); -MODULE_FIRMWARE("nvidia/gp106/gr/sw_bundle_init.bin"); -MODULE_FIRMWARE("nvidia/gp106/gr/sw_method_init.bin"); -MODULE_FIRMWARE("nvidia/gp106/nvdec/scrubber.bin"); -MODULE_FIRMWARE("nvidia/gp106/sec2/desc.bin"); -MODULE_FIRMWARE("nvidia/gp106/sec2/image.bin"); -MODULE_FIRMWARE("nvidia/gp106/sec2/sig.bin"); -MODULE_FIRMWARE("nvidia/gp106/sec2/desc-1.bin"); -MODULE_FIRMWARE("nvidia/gp106/sec2/image-1.bin"); -MODULE_FIRMWARE("nvidia/gp106/sec2/sig-1.bin"); -MODULE_FIRMWARE("nvidia/gp107/acr/bl.bin"); -MODULE_FIRMWARE("nvidia/gp107/acr/unload_bl.bin"); -MODULE_FIRMWARE("nvidia/gp107/acr/ucode_load.bin"); -MODULE_FIRMWARE("nvidia/gp107/acr/ucode_unload.bin"); -MODULE_FIRMWARE("nvidia/gp107/gr/fecs_bl.bin"); -MODULE_FIRMWARE("nvidia/gp107/gr/fecs_inst.bin"); -MODULE_FIRMWARE("nvidia/gp107/gr/fecs_data.bin"); -MODULE_FIRMWARE("nvidia/gp107/gr/fecs_sig.bin"); -MODULE_FIRMWARE("nvidia/gp107/gr/gpccs_bl.bin"); -MODULE_FIRMWARE("nvidia/gp107/gr/gpccs_inst.bin"); -MODULE_FIRMWARE("nvidia/gp107/gr/gpccs_data.bin"); -MODULE_FIRMWARE("nvidia/gp107/gr/gpccs_sig.bin"); -MODULE_FIRMWARE("nvidia/gp107/gr/sw_ctx.bin"); -MODULE_FIRMWARE("nvidia/gp107/gr/sw_nonctx.bin"); -MODULE_FIRMWARE("nvidia/gp107/gr/sw_bundle_init.bin"); -MODULE_FIRMWARE("nvidia/gp107/gr/sw_method_init.bin"); -MODULE_FIRMWARE("nvidia/gp107/nvdec/scrubber.bin"); -MODULE_FIRMWARE("nvidia/gp107/sec2/desc.bin"); -MODULE_FIRMWARE("nvidia/gp107/sec2/image.bin"); -MODULE_FIRMWARE("nvidia/gp107/sec2/sig.bin"); -MODULE_FIRMWARE("nvidia/gp107/sec2/desc-1.bin"); -MODULE_FIRMWARE("nvidia/gp107/sec2/image-1.bin"); -MODULE_FIRMWARE("nvidia/gp107/sec2/sig-1.bin"); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp108.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp108.c deleted file mode 100644 index 737a8d50a1f2..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp108.c +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Copyright 2017 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ -#include "gm200.h" -#include "acr.h" - -int -gp108_secboot_new(struct nvkm_device *device, int index, - struct nvkm_secboot **psb) -{ - struct gm200_secboot *gsb; - struct nvkm_acr *acr; - - acr = acr_r370_new(NVKM_SECBOOT_FALCON_SEC2, - BIT(NVKM_SECBOOT_FALCON_FECS) | - BIT(NVKM_SECBOOT_FALCON_GPCCS) | - BIT(NVKM_SECBOOT_FALCON_SEC2)); - if (IS_ERR(acr)) - return PTR_ERR(acr); - - if (!(gsb = kzalloc(sizeof(*gsb), GFP_KERNEL))) { - acr->func->dtor(acr); - return -ENOMEM; - } - *psb = &gsb->base; - - return nvkm_secboot_ctor(&gp102_secboot, acr, device, index, &gsb->base); -} - -MODULE_FIRMWARE("nvidia/gp108/acr/bl.bin"); -MODULE_FIRMWARE("nvidia/gp108/acr/unload_bl.bin"); -MODULE_FIRMWARE("nvidia/gp108/acr/ucode_load.bin"); -MODULE_FIRMWARE("nvidia/gp108/acr/ucode_unload.bin"); -MODULE_FIRMWARE("nvidia/gp108/gr/fecs_bl.bin"); -MODULE_FIRMWARE("nvidia/gp108/gr/fecs_inst.bin"); -MODULE_FIRMWARE("nvidia/gp108/gr/fecs_data.bin"); -MODULE_FIRMWARE("nvidia/gp108/gr/fecs_sig.bin"); -MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_bl.bin"); -MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_inst.bin"); -MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_data.bin"); -MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_sig.bin"); -MODULE_FIRMWARE("nvidia/gp108/gr/sw_ctx.bin"); -MODULE_FIRMWARE("nvidia/gp108/gr/sw_nonctx.bin"); -MODULE_FIRMWARE("nvidia/gp108/gr/sw_bundle_init.bin"); -MODULE_FIRMWARE("nvidia/gp108/gr/sw_method_init.bin"); -MODULE_FIRMWARE("nvidia/gp108/nvdec/scrubber.bin"); -MODULE_FIRMWARE("nvidia/gp108/sec2/desc.bin"); -MODULE_FIRMWARE("nvidia/gp108/sec2/image.bin"); -MODULE_FIRMWARE("nvidia/gp108/sec2/sig.bin"); - -MODULE_FIRMWARE("nvidia/gv100/acr/bl.bin"); -MODULE_FIRMWARE("nvidia/gv100/acr/unload_bl.bin"); -MODULE_FIRMWARE("nvidia/gv100/acr/ucode_load.bin"); -MODULE_FIRMWARE("nvidia/gv100/acr/ucode_unload.bin"); -MODULE_FIRMWARE("nvidia/gv100/gr/fecs_bl.bin"); -MODULE_FIRMWARE("nvidia/gv100/gr/fecs_inst.bin"); -MODULE_FIRMWARE("nvidia/gv100/gr/fecs_data.bin"); -MODULE_FIRMWARE("nvidia/gv100/gr/fecs_sig.bin"); -MODULE_FIRMWARE("nvidia/gv100/gr/gpccs_bl.bin"); -MODULE_FIRMWARE("nvidia/gv100/gr/gpccs_inst.bin"); -MODULE_FIRMWARE("nvidia/gv100/gr/gpccs_data.bin"); -MODULE_FIRMWARE("nvidia/gv100/gr/gpccs_sig.bin"); -MODULE_FIRMWARE("nvidia/gv100/gr/sw_ctx.bin"); -MODULE_FIRMWARE("nvidia/gv100/gr/sw_nonctx.bin"); -MODULE_FIRMWARE("nvidia/gv100/gr/sw_bundle_init.bin"); -MODULE_FIRMWARE("nvidia/gv100/gr/sw_method_init.bin"); -MODULE_FIRMWARE("nvidia/gv100/nvdec/scrubber.bin"); -MODULE_FIRMWARE("nvidia/gv100/sec2/desc.bin"); -MODULE_FIRMWARE("nvidia/gv100/sec2/image.bin"); -MODULE_FIRMWARE("nvidia/gv100/sec2/sig.bin"); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp10b.c deleted file mode 100644 index 28ca29d0eeee..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp10b.c +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "acr.h" -#include "gm200.h" - -#define TEGRA186_MC_BASE 0x02c10000 - -static int -gp10b_secboot_oneinit(struct nvkm_secboot *sb) -{ - struct gm200_secboot *gsb = gm200_secboot(sb); - int ret; - - ret = gm20b_secboot_tegra_read_wpr(gsb, TEGRA186_MC_BASE); - if (ret) - return ret; - - return gm200_secboot_oneinit(sb); -} - -static const struct nvkm_secboot_func -gp10b_secboot = { - .dtor = gm200_secboot_dtor, - .oneinit = gp10b_secboot_oneinit, - .fini = gm200_secboot_fini, - .run_blob = gm200_secboot_run_blob, -}; - -int -gp10b_secboot_new(struct nvkm_device *device, int index, - struct nvkm_secboot **psb) -{ - int ret; - struct gm200_secboot *gsb; - struct nvkm_acr *acr; - - acr = acr_r352_new(BIT(NVKM_SECBOOT_FALCON_FECS) | - BIT(NVKM_SECBOOT_FALCON_GPCCS) | - BIT(NVKM_SECBOOT_FALCON_PMU)); - if (IS_ERR(acr)) - return PTR_ERR(acr); - - gsb = kzalloc(sizeof(*gsb), GFP_KERNEL); - if (!gsb) { - psb = NULL; - return -ENOMEM; - } - *psb = &gsb->base; - - ret = nvkm_secboot_ctor(&gp10b_secboot, acr, device, index, &gsb->base); - if (ret) - return ret; - - return 0; -} - -#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) -MODULE_FIRMWARE("nvidia/gp10b/acr/bl.bin"); -MODULE_FIRMWARE("nvidia/gp10b/acr/ucode_load.bin"); -MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_bl.bin"); -MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_inst.bin"); -MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_data.bin"); -MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_sig.bin"); -MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_bl.bin"); -MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_inst.bin"); -MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_data.bin"); -MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_sig.bin"); -MODULE_FIRMWARE("nvidia/gp10b/gr/sw_ctx.bin"); -MODULE_FIRMWARE("nvidia/gp10b/gr/sw_nonctx.bin"); -MODULE_FIRMWARE("nvidia/gp10b/gr/sw_bundle_init.bin"); -MODULE_FIRMWARE("nvidia/gp10b/gr/sw_method_init.bin"); -MODULE_FIRMWARE("nvidia/gp10b/pmu/desc.bin"); -MODULE_FIRMWARE("nvidia/gp10b/pmu/image.bin"); -MODULE_FIRMWARE("nvidia/gp10b/pmu/sig.bin"); -#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/hs_ucode.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/hs_ucode.c deleted file mode 100644 index 6b33182ddc2f..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/hs_ucode.c +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "hs_ucode.h" -#include "ls_ucode.h" -#include "acr.h" - -#include <engine/falcon.h> - -/** - * hs_ucode_patch_signature() - patch HS blob with correct signature for - * specified falcon. - */ -static void -hs_ucode_patch_signature(const struct nvkm_falcon *falcon, void *acr_image, - bool new_format) -{ - struct fw_bin_header *hsbin_hdr = acr_image; - struct hsf_fw_header *fw_hdr = acr_image + hsbin_hdr->header_offset; - void *hs_data = acr_image + hsbin_hdr->data_offset; - void *sig; - u32 sig_size; - u32 patch_loc, patch_sig; - - /* - * I had the brilliant idea to "improve" the binary format by - * removing this useless indirection. However to make NVIDIA files - * directly compatible, let's support both format. - */ - if (new_format) { - patch_loc = fw_hdr->patch_loc; - patch_sig = fw_hdr->patch_sig; - } else { - patch_loc = *(u32 *)(acr_image + fw_hdr->patch_loc); - patch_sig = *(u32 *)(acr_image + fw_hdr->patch_sig); - } - - /* Falcon in debug or production mode? */ - if (falcon->debug) { - sig = acr_image + fw_hdr->sig_dbg_offset; - sig_size = fw_hdr->sig_dbg_size; - } else { - sig = acr_image + fw_hdr->sig_prod_offset; - sig_size = fw_hdr->sig_prod_size; - } - - /* Patch signature */ - memcpy(hs_data + patch_loc, sig + patch_sig, sig_size); -} - -void * -hs_ucode_load_blob(struct nvkm_subdev *subdev, const struct nvkm_falcon *falcon, - const char *fw) -{ - void *acr_image; - bool new_format; - - acr_image = nvkm_acr_load_firmware(subdev, fw, 0); - if (IS_ERR(acr_image)) - return acr_image; - - /* detect the format to define how signature should be patched */ - switch (((u32 *)acr_image)[0]) { - case 0x3b1d14f0: - new_format = true; - break; - case 0x000010de: - new_format = false; - break; - default: - nvkm_error(subdev, "unknown header for HS blob %s\n", fw); - return ERR_PTR(-EINVAL); - } - - hs_ucode_patch_signature(falcon, acr_image, new_format); - - return acr_image; -} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/hs_ucode.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/hs_ucode.h deleted file mode 100644 index d8cfc6f7752a..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/hs_ucode.h +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef __NVKM_SECBOOT_HS_UCODE_H__ -#define __NVKM_SECBOOT_HS_UCODE_H__ - -#include <core/os.h> -#include <core/subdev.h> - -struct nvkm_falcon; - -/** - * struct hsf_fw_header - HS firmware descriptor - * @sig_dbg_offset: offset of the debug signature - * @sig_dbg_size: size of the debug signature - * @sig_prod_offset: offset of the production signature - * @sig_prod_size: size of the production signature - * @patch_loc: offset of the offset (sic) of where the signature is - * @patch_sig: offset of the offset (sic) to add to sig_*_offset - * @hdr_offset: offset of the load header (see struct hs_load_header) - * @hdr_size: size of above header - * - * This structure is embedded in the HS firmware image at - * hs_bin_hdr.header_offset. - */ -struct hsf_fw_header { - u32 sig_dbg_offset; - u32 sig_dbg_size; - u32 sig_prod_offset; - u32 sig_prod_size; - u32 patch_loc; - u32 patch_sig; - u32 hdr_offset; - u32 hdr_size; -}; - -/** - * struct hsf_load_header - HS firmware load header - */ -struct hsf_load_header { - u32 non_sec_code_off; - u32 non_sec_code_size; - u32 data_dma_base; - u32 data_size; - u32 num_apps; - /* - * Organized as follows: - * - app0_code_off - * - app1_code_off - * - ... - * - appn_code_off - * - app0_code_size - * - app1_code_size - * - ... - */ - u32 apps[0]; -}; - -void *hs_ucode_load_blob(struct nvkm_subdev *, const struct nvkm_falcon *, - const char *); - -#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h deleted file mode 100644 index d43f906da3a7..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h +++ /dev/null @@ -1,161 +0,0 @@ -/* - * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef __NVKM_SECBOOT_LS_UCODE_H__ -#define __NVKM_SECBOOT_LS_UCODE_H__ - -#include <core/os.h> -#include <core/subdev.h> -#include <subdev/secboot.h> - -struct nvkm_acr; - -/** - * struct ls_ucode_img_desc - descriptor of firmware image - * @descriptor_size: size of this descriptor - * @image_size: size of the whole image - * @bootloader_start_offset: start offset of the bootloader in ucode image - * @bootloader_size: size of the bootloader - * @bootloader_imem_offset: start off set of the bootloader in IMEM - * @bootloader_entry_point: entry point of the bootloader in IMEM - * @app_start_offset: start offset of the LS firmware - * @app_size: size of the LS firmware's code and data - * @app_imem_offset: offset of the app in IMEM - * @app_imem_entry: entry point of the app in IMEM - * @app_dmem_offset: offset of the data in DMEM - * @app_resident_code_offset: offset of app code from app_start_offset - * @app_resident_code_size: size of the code - * @app_resident_data_offset: offset of data from app_start_offset - * @app_resident_data_size: size of data - * - * A firmware image contains the code, data, and bootloader of a given LS - * falcon in a single blob. This structure describes where everything is. - * - * This can be generated from a (bootloader, code, data) set if they have - * been loaded separately, or come directly from a file. - */ -struct ls_ucode_img_desc { - u32 descriptor_size; - u32 image_size; - u32 tools_version; - u32 app_version; - char date[64]; - u32 bootloader_start_offset; - u32 bootloader_size; - u32 bootloader_imem_offset; - u32 bootloader_entry_point; - u32 app_start_offset; - u32 app_size; - u32 app_imem_offset; - u32 app_imem_entry; - u32 app_dmem_offset; - u32 app_resident_code_offset; - u32 app_resident_code_size; - u32 app_resident_data_offset; - u32 app_resident_data_size; - u32 nb_overlays; - struct {u32 start; u32 size; } load_ovl[64]; - u32 compressed; -}; - -/** - * struct ls_ucode_img - temporary storage for loaded LS firmwares - * @node: to link within lsf_ucode_mgr - * @falcon_id: ID of the falcon this LS firmware is for - * @ucode_desc: loaded or generated map of ucode_data - * @ucode_data: firmware payload (code and data) - * @ucode_size: size in bytes of data in ucode_data - * @ucode_off: offset of the ucode in ucode_data - * @sig: signature for this firmware - * @sig:size: size of the signature in bytes - * - * Preparing the WPR LS blob requires information about all the LS firmwares - * (size, etc) to be known. This structure contains all the data of one LS - * firmware. - */ -struct ls_ucode_img { - struct list_head node; - enum nvkm_secboot_falcon falcon_id; - - struct ls_ucode_img_desc ucode_desc; - u8 *ucode_data; - u32 ucode_size; - u32 ucode_off; - - u8 *sig; - u32 sig_size; -}; - -/** - * struct fw_bin_header - header of firmware files - * @bin_magic: always 0x3b1d14f0 - * @bin_ver: version of the bin format - * @bin_size: entire image size including this header - * @header_offset: offset of the firmware/bootloader header in the file - * @data_offset: offset of the firmware/bootloader payload in the file - * @data_size: size of the payload - * - * This header is located at the beginning of the HS firmware and HS bootloader - * files, to describe where the headers and data can be found. - */ -struct fw_bin_header { - u32 bin_magic; - u32 bin_ver; - u32 bin_size; - u32 header_offset; - u32 data_offset; - u32 data_size; -}; - -/** - * struct fw_bl_desc - firmware bootloader descriptor - * @start_tag: starting tag of bootloader - * @desc_dmem_load_off: DMEM offset of flcn_bl_dmem_desc - * @code_off: offset of code section - * @code_size: size of code section - * @data_off: offset of data section - * @data_size: size of data section - * - * This structure is embedded in bootloader firmware files at to describe the - * IMEM and DMEM layout expected by the bootloader. - */ -struct fw_bl_desc { - u32 start_tag; - u32 dmem_load_off; - u32 code_off; - u32 code_size; - u32 data_off; - u32 data_size; -}; - -int acr_ls_ucode_load_fecs(const struct nvkm_secboot *, int, - struct ls_ucode_img *); -int acr_ls_ucode_load_gpccs(const struct nvkm_secboot *, int, - struct ls_ucode_img *); -int acr_ls_ucode_load_pmu(const struct nvkm_secboot *, int, - struct ls_ucode_img *); -int acr_ls_pmu_post_run(const struct nvkm_acr *, const struct nvkm_secboot *); -int acr_ls_ucode_load_sec2(const struct nvkm_secboot *, int, - struct ls_ucode_img *); -int acr_ls_sec2_post_run(const struct nvkm_acr *, const struct nvkm_secboot *); - -#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c deleted file mode 100644 index 821d3b2bdb1f..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c +++ /dev/null @@ -1,160 +0,0 @@ -/* - * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - - -#include "ls_ucode.h" -#include "acr.h" - -#include <core/firmware.h> - -#define BL_DESC_BLK_SIZE 256 -/** - * Build a ucode image and descriptor from provided bootloader, code and data. - * - * @bl: bootloader image, including 16-bytes descriptor - * @code: LS firmware code segment - * @data: LS firmware data segment - * @desc: ucode descriptor to be written - * - * Return: allocated ucode image with corresponding descriptor information. desc - * is also updated to contain the right offsets within returned image. - */ -static void * -ls_ucode_img_build(const struct firmware *bl, const struct firmware *code, - const struct firmware *data, struct ls_ucode_img_desc *desc) -{ - struct fw_bin_header *bin_hdr = (void *)bl->data; - struct fw_bl_desc *bl_desc = (void *)bl->data + bin_hdr->header_offset; - void *bl_data = (void *)bl->data + bin_hdr->data_offset; - u32 pos = 0; - void *image; - - desc->bootloader_start_offset = pos; - desc->bootloader_size = ALIGN(bl_desc->code_size, sizeof(u32)); - desc->bootloader_imem_offset = bl_desc->start_tag * 256; - desc->bootloader_entry_point = bl_desc->start_tag * 256; - - pos = ALIGN(pos + desc->bootloader_size, BL_DESC_BLK_SIZE); - desc->app_start_offset = pos; - desc->app_size = ALIGN(code->size, BL_DESC_BLK_SIZE) + - ALIGN(data->size, BL_DESC_BLK_SIZE); - desc->app_imem_offset = 0; - desc->app_imem_entry = 0; - desc->app_dmem_offset = 0; - desc->app_resident_code_offset = 0; - desc->app_resident_code_size = ALIGN(code->size, BL_DESC_BLK_SIZE); - - pos = ALIGN(pos + desc->app_resident_code_size, BL_DESC_BLK_SIZE); - desc->app_resident_data_offset = pos - desc->app_start_offset; - desc->app_resident_data_size = ALIGN(data->size, BL_DESC_BLK_SIZE); - - desc->image_size = ALIGN(bl_desc->code_size, BL_DESC_BLK_SIZE) + - desc->app_size; - - image = kzalloc(desc->image_size, GFP_KERNEL); - if (!image) - return ERR_PTR(-ENOMEM); - - memcpy(image + desc->bootloader_start_offset, bl_data, - bl_desc->code_size); - memcpy(image + desc->app_start_offset, code->data, code->size); - memcpy(image + desc->app_start_offset + desc->app_resident_data_offset, - data->data, data->size); - - return image; -} - -/** - * ls_ucode_img_load_gr() - load and prepare a LS GR ucode image - * - * Load the LS microcode, bootloader and signature and pack them into a single - * blob. Also generate the corresponding ucode descriptor. - */ -static int -ls_ucode_img_load_gr(const struct nvkm_subdev *subdev, int maxver, - struct ls_ucode_img *img, const char *falcon_name) -{ - const struct firmware *bl, *code, *data, *sig; - char f[64]; - int ret; - - snprintf(f, sizeof(f), "gr/%s_bl", falcon_name); - ret = nvkm_firmware_get(subdev, f, &bl); - if (ret) - goto error; - - snprintf(f, sizeof(f), "gr/%s_inst", falcon_name); - ret = nvkm_firmware_get(subdev, f, &code); - if (ret) - goto free_bl; - - snprintf(f, sizeof(f), "gr/%s_data", falcon_name); - ret = nvkm_firmware_get(subdev, f, &data); - if (ret) - goto free_inst; - - snprintf(f, sizeof(f), "gr/%s_sig", falcon_name); - ret = nvkm_firmware_get(subdev, f, &sig); - if (ret) - goto free_data; - - img->sig = kmemdup(sig->data, sig->size, GFP_KERNEL); - if (!img->sig) { - ret = -ENOMEM; - goto free_sig; - } - img->sig_size = sig->size; - - img->ucode_data = ls_ucode_img_build(bl, code, data, - &img->ucode_desc); - if (IS_ERR(img->ucode_data)) { - kfree(img->sig); - ret = PTR_ERR(img->ucode_data); - goto free_sig; - } - img->ucode_size = img->ucode_desc.image_size; - -free_sig: - nvkm_firmware_put(sig); -free_data: - nvkm_firmware_put(data); -free_inst: - nvkm_firmware_put(code); -free_bl: - nvkm_firmware_put(bl); -error: - return ret; -} - -int -acr_ls_ucode_load_fecs(const struct nvkm_secboot *sb, int maxver, - struct ls_ucode_img *img) -{ - return ls_ucode_img_load_gr(&sb->subdev, maxver, img, "fecs"); -} - -int -acr_ls_ucode_load_gpccs(const struct nvkm_secboot *sb, int maxver, - struct ls_ucode_img *img) -{ - return ls_ucode_img_load_gr(&sb->subdev, maxver, img, "gpccs"); -} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c deleted file mode 100644 index a84a999445bb..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c +++ /dev/null @@ -1,177 +0,0 @@ -/* - * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - - -#include "ls_ucode.h" -#include "acr.h" - -#include <core/firmware.h> -#include <core/msgqueue.h> -#include <subdev/pmu.h> -#include <engine/sec2.h> -#include <subdev/mc.h> -#include <subdev/timer.h> - -/** - * acr_ls_ucode_load_msgqueue - load and prepare a ucode img for a msgqueue fw - * - * Load the LS microcode, desc and signature and pack them into a single - * blob. - */ -static int -acr_ls_ucode_load_msgqueue(const struct nvkm_subdev *subdev, const char *name, - int maxver, struct ls_ucode_img *img) -{ - const struct firmware *image, *desc, *sig; - char f[64]; - int ver, ret; - - snprintf(f, sizeof(f), "%s/image", name); - ver = nvkm_firmware_get_version(subdev, f, 0, maxver, &image); - if (ver < 0) - return ver; - img->ucode_data = kmemdup(image->data, image->size, GFP_KERNEL); - nvkm_firmware_put(image); - if (!img->ucode_data) - return -ENOMEM; - - snprintf(f, sizeof(f), "%s/desc", name); - ret = nvkm_firmware_get_version(subdev, f, ver, ver, &desc); - if (ret < 0) - return ret; - memcpy(&img->ucode_desc, desc->data, sizeof(img->ucode_desc)); - img->ucode_size = ALIGN(img->ucode_desc.app_start_offset + img->ucode_desc.app_size, 256); - nvkm_firmware_put(desc); - - snprintf(f, sizeof(f), "%s/sig", name); - ret = nvkm_firmware_get_version(subdev, f, ver, ver, &sig); - if (ret < 0) - return ret; - img->sig_size = sig->size; - img->sig = kmemdup(sig->data, sig->size, GFP_KERNEL); - nvkm_firmware_put(sig); - if (!img->sig) - return -ENOMEM; - - return ver; -} - -static int -acr_ls_msgqueue_post_run(struct nvkm_msgqueue *queue, - struct nvkm_falcon *falcon, u32 addr_args) -{ - struct nvkm_device *device = falcon->owner->device; - u8 buf[NVKM_MSGQUEUE_CMDLINE_SIZE]; - - memset(buf, 0, sizeof(buf)); - nvkm_msgqueue_write_cmdline(queue, buf); - nvkm_falcon_load_dmem(falcon, buf, addr_args, sizeof(buf), 0); - /* rearm the queue so it will wait for the init message */ - nvkm_msgqueue_reinit(queue); - - /* Enable interrupts */ - nvkm_falcon_wr32(falcon, 0x10, 0xff); - nvkm_mc_intr_mask(device, falcon->owner->index, true); - - /* Start LS firmware on boot falcon */ - nvkm_falcon_start(falcon); - - return 0; -} - -int -acr_ls_ucode_load_pmu(const struct nvkm_secboot *sb, int maxver, - struct ls_ucode_img *img) -{ - struct nvkm_pmu *pmu = sb->subdev.device->pmu; - int ret; - - ret = acr_ls_ucode_load_msgqueue(&sb->subdev, "pmu", maxver, img); - if (ret) - return ret; - - /* Allocate the PMU queue corresponding to the FW version */ - ret = nvkm_msgqueue_new(img->ucode_desc.app_version, pmu->falcon, - sb, &pmu->queue); - if (ret) - return ret; - - return 0; -} - -int -acr_ls_pmu_post_run(const struct nvkm_acr *acr, const struct nvkm_secboot *sb) -{ - struct nvkm_device *device = sb->subdev.device; - struct nvkm_pmu *pmu = device->pmu; - u32 addr_args = pmu->falcon->data.limit - NVKM_MSGQUEUE_CMDLINE_SIZE; - int ret; - - ret = acr_ls_msgqueue_post_run(pmu->queue, pmu->falcon, addr_args); - if (ret) - return ret; - - nvkm_debug(&sb->subdev, "%s started\n", - nvkm_secboot_falcon_name[acr->boot_falcon]); - - return 0; -} - -int -acr_ls_ucode_load_sec2(const struct nvkm_secboot *sb, int maxver, - struct ls_ucode_img *img) -{ - struct nvkm_sec2 *sec = sb->subdev.device->sec2; - int ver, ret; - - ver = acr_ls_ucode_load_msgqueue(&sb->subdev, "sec2", maxver, img); - if (ver < 0) - return ver; - - /* Allocate the PMU queue corresponding to the FW version */ - ret = nvkm_msgqueue_new(img->ucode_desc.app_version, sec->falcon, - sb, &sec->queue); - if (ret) - return ret; - - return ver; -} - -int -acr_ls_sec2_post_run(const struct nvkm_acr *acr, const struct nvkm_secboot *sb) -{ - const struct nvkm_subdev *subdev = &sb->subdev; - struct nvkm_device *device = subdev->device; - struct nvkm_sec2 *sec = device->sec2; - /* on SEC arguments are always at the beginning of EMEM */ - const u32 addr_args = 0x01000000; - int ret; - - ret = acr_ls_msgqueue_post_run(sec->queue, sec->falcon, addr_args); - if (ret) - return ret; - - nvkm_debug(&sb->subdev, "%s started\n", - nvkm_secboot_falcon_name[acr->boot_falcon]); - - return 0; -} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h deleted file mode 100644 index 959a7b2dbdc9..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef __NVKM_SECBOOT_PRIV_H__ -#define __NVKM_SECBOOT_PRIV_H__ - -#include <subdev/secboot.h> -#include <subdev/mmu.h> -struct nvkm_gpuobj; - -struct nvkm_secboot_func { - int (*oneinit)(struct nvkm_secboot *); - int (*fini)(struct nvkm_secboot *, bool suspend); - void *(*dtor)(struct nvkm_secboot *); - int (*run_blob)(struct nvkm_secboot *, struct nvkm_gpuobj *, - struct nvkm_falcon *); -}; - -int nvkm_secboot_ctor(const struct nvkm_secboot_func *, struct nvkm_acr *, - struct nvkm_device *, int, struct nvkm_secboot *); -int nvkm_secboot_falcon_reset(struct nvkm_secboot *); -int nvkm_secboot_falcon_run(struct nvkm_secboot *); - -extern const struct nvkm_secboot_func gp102_secboot; - -struct flcn_u64 { - u32 lo; - u32 hi; -}; - -static inline u64 flcn64_to_u64(const struct flcn_u64 f) -{ - return ((u64)f.hi) << 32 | f.lo; -} - -static inline struct flcn_u64 u64_to_flcn64(u64 u) -{ - struct flcn_u64 ret; - - ret.hi = upper_32_bits(u); - ret.lo = lower_32_bits(u); - - return ret; -} - -#endif diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c index 714af052fbef..7c70fd31a4c2 100644 --- a/drivers/gpu/drm/tegra/dc.c +++ b/drivers/gpu/drm/tegra/dc.c @@ -1727,6 +1727,7 @@ static void tegra_crtc_atomic_disable(struct drm_crtc *crtc, { struct tegra_dc *dc = to_tegra_dc(crtc); u32 value; + int err; if (!tegra_dc_idle(dc)) { tegra_dc_stop(dc); @@ -1773,7 +1774,9 @@ static void tegra_crtc_atomic_disable(struct drm_crtc *crtc, spin_unlock_irq(&crtc->dev->event_lock); - pm_runtime_put_sync(dc->dev); + err = host1x_client_suspend(&dc->client); + if (err < 0) + dev_err(dc->dev, "failed to suspend: %d\n", err); } static void tegra_crtc_atomic_enable(struct drm_crtc *crtc, @@ -1783,8 +1786,13 @@ static void tegra_crtc_atomic_enable(struct drm_crtc *crtc, struct tegra_dc_state *state = to_dc_state(crtc->state); struct tegra_dc *dc = to_tegra_dc(crtc); u32 value; + int err; - pm_runtime_get_sync(dc->dev); + err = host1x_client_resume(&dc->client); + if (err < 0) { + dev_err(dc->dev, "failed to resume: %d\n", err); + return; + } /* initialize display controller */ if (dc->syncpt) { @@ -1996,7 +2004,7 @@ static bool tegra_dc_has_window_groups(struct tegra_dc *dc) static int tegra_dc_init(struct host1x_client *client) { - struct drm_device *drm = dev_get_drvdata(client->parent); + struct drm_device *drm = dev_get_drvdata(client->host); unsigned long flags = HOST1X_SYNCPT_CLIENT_MANAGED; struct tegra_dc *dc = host1x_client_to_dc(client); struct tegra_drm *tegra = drm->dev_private; @@ -2012,6 +2020,15 @@ static int tegra_dc_init(struct host1x_client *client) if (!tegra_dc_has_window_groups(dc)) return 0; + /* + * Set the display hub as the host1x client parent for the display + * controller. This is needed for the runtime reference counting that + * ensures the display hub is always powered when any of the display + * controllers are. + */ + if (dc->soc->has_nvdisplay) + client->parent = &tegra->hub->client; + dc->syncpt = host1x_syncpt_request(client, flags); if (!dc->syncpt) dev_warn(dc->dev, "failed to allocate syncpoint\n"); @@ -2077,9 +2094,9 @@ static int tegra_dc_init(struct host1x_client *client) /* * Inherit the DMA parameters (such as maximum segment size) from the - * parent device. + * parent host1x device. */ - client->dev->dma_parms = client->parent->dma_parms; + client->dev->dma_parms = client->host->dma_parms; return 0; @@ -2121,9 +2138,74 @@ static int tegra_dc_exit(struct host1x_client *client) return 0; } +static int tegra_dc_runtime_suspend(struct host1x_client *client) +{ + struct tegra_dc *dc = host1x_client_to_dc(client); + struct device *dev = client->dev; + int err; + + err = reset_control_assert(dc->rst); + if (err < 0) { + dev_err(dev, "failed to assert reset: %d\n", err); + return err; + } + + if (dc->soc->has_powergate) + tegra_powergate_power_off(dc->powergate); + + clk_disable_unprepare(dc->clk); + pm_runtime_put_sync(dev); + + return 0; +} + +static int tegra_dc_runtime_resume(struct host1x_client *client) +{ + struct tegra_dc *dc = host1x_client_to_dc(client); + struct device *dev = client->dev; + int err; + + err = pm_runtime_get_sync(dev); + if (err < 0) { + dev_err(dev, "failed to get runtime PM: %d\n", err); + return err; + } + + if (dc->soc->has_powergate) { + err = tegra_powergate_sequence_power_up(dc->powergate, dc->clk, + dc->rst); + if (err < 0) { + dev_err(dev, "failed to power partition: %d\n", err); + goto put_rpm; + } + } else { + err = clk_prepare_enable(dc->clk); + if (err < 0) { + dev_err(dev, "failed to enable clock: %d\n", err); + goto put_rpm; + } + + err = reset_control_deassert(dc->rst); + if (err < 0) { + dev_err(dev, "failed to deassert reset: %d\n", err); + goto disable_clk; + } + } + + return 0; + +disable_clk: + clk_disable_unprepare(dc->clk); +put_rpm: + pm_runtime_put_sync(dev); + return err; +} + static const struct host1x_client_ops dc_client_ops = { .init = tegra_dc_init, .exit = tegra_dc_exit, + .suspend = tegra_dc_runtime_suspend, + .resume = tegra_dc_runtime_resume, }; static const struct tegra_dc_soc_info tegra20_dc_soc_info = { @@ -2535,65 +2617,10 @@ static int tegra_dc_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM -static int tegra_dc_suspend(struct device *dev) -{ - struct tegra_dc *dc = dev_get_drvdata(dev); - int err; - - err = reset_control_assert(dc->rst); - if (err < 0) { - dev_err(dev, "failed to assert reset: %d\n", err); - return err; - } - - if (dc->soc->has_powergate) - tegra_powergate_power_off(dc->powergate); - - clk_disable_unprepare(dc->clk); - - return 0; -} - -static int tegra_dc_resume(struct device *dev) -{ - struct tegra_dc *dc = dev_get_drvdata(dev); - int err; - - if (dc->soc->has_powergate) { - err = tegra_powergate_sequence_power_up(dc->powergate, dc->clk, - dc->rst); - if (err < 0) { - dev_err(dev, "failed to power partition: %d\n", err); - return err; - } - } else { - err = clk_prepare_enable(dc->clk); - if (err < 0) { - dev_err(dev, "failed to enable clock: %d\n", err); - return err; - } - - err = reset_control_deassert(dc->rst); - if (err < 0) { - dev_err(dev, "failed to deassert reset: %d\n", err); - return err; - } - } - - return 0; -} -#endif - -static const struct dev_pm_ops tegra_dc_pm_ops = { - SET_RUNTIME_PM_OPS(tegra_dc_suspend, tegra_dc_resume, NULL) -}; - struct platform_driver tegra_dc_driver = { .driver = { .name = "tegra-dc", .of_match_table = tegra_dc_of_match, - .pm = &tegra_dc_pm_ops, }, .probe = tegra_dc_probe, .remove = tegra_dc_remove, diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c index 622cdf1ad246..7dfb50f65067 100644 --- a/drivers/gpu/drm/tegra/dpaux.c +++ b/drivers/gpu/drm/tegra/dpaux.c @@ -588,7 +588,7 @@ static int tegra_dpaux_remove(struct platform_device *pdev) /* make sure pads are powered down when not in use */ tegra_dpaux_pad_power_down(dpaux); - pm_runtime_put(&pdev->dev); + pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); drm_dp_aux_unregister(&dpaux->aux); diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index f455ce71e85d..aa9e49f04988 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -905,7 +905,7 @@ int tegra_drm_unregister_client(struct tegra_drm *tegra, int host1x_client_iommu_attach(struct host1x_client *client) { struct iommu_domain *domain = iommu_get_domain_for_dev(client->dev); - struct drm_device *drm = dev_get_drvdata(client->parent); + struct drm_device *drm = dev_get_drvdata(client->host); struct tegra_drm *tegra = drm->dev_private; struct iommu_group *group = NULL; int err; @@ -941,7 +941,7 @@ int host1x_client_iommu_attach(struct host1x_client *client) void host1x_client_iommu_detach(struct host1x_client *client) { - struct drm_device *drm = dev_get_drvdata(client->parent); + struct drm_device *drm = dev_get_drvdata(client->host); struct tegra_drm *tegra = drm->dev_private; struct iommu_domain *domain; diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h index d941553f7a3d..ed99b67deb29 100644 --- a/drivers/gpu/drm/tegra/drm.h +++ b/drivers/gpu/drm/tegra/drm.h @@ -144,6 +144,8 @@ int tegra_output_init(struct drm_device *drm, struct tegra_output *output); void tegra_output_exit(struct tegra_output *output); void tegra_output_find_possible_crtcs(struct tegra_output *output, struct drm_device *drm); +int tegra_output_suspend(struct tegra_output *output); +int tegra_output_resume(struct tegra_output *output); int tegra_output_connector_get_modes(struct drm_connector *connector); enum drm_connector_status diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c index a5d47e301c5f..88b9d64c77bf 100644 --- a/drivers/gpu/drm/tegra/dsi.c +++ b/drivers/gpu/drm/tegra/dsi.c @@ -840,7 +840,9 @@ static void tegra_dsi_unprepare(struct tegra_dsi *dsi) dev_err(dsi->dev, "failed to disable MIPI calibration: %d\n", err); - pm_runtime_put(dsi->dev); + err = host1x_client_suspend(&dsi->client); + if (err < 0) + dev_err(dsi->dev, "failed to suspend: %d\n", err); } static void tegra_dsi_encoder_disable(struct drm_encoder *encoder) @@ -882,11 +884,15 @@ static void tegra_dsi_encoder_disable(struct drm_encoder *encoder) tegra_dsi_unprepare(dsi); } -static void tegra_dsi_prepare(struct tegra_dsi *dsi) +static int tegra_dsi_prepare(struct tegra_dsi *dsi) { int err; - pm_runtime_get_sync(dsi->dev); + err = host1x_client_resume(&dsi->client); + if (err < 0) { + dev_err(dsi->dev, "failed to resume: %d\n", err); + return err; + } err = tegra_mipi_enable(dsi->mipi); if (err < 0) @@ -899,6 +905,8 @@ static void tegra_dsi_prepare(struct tegra_dsi *dsi) if (dsi->slave) tegra_dsi_prepare(dsi->slave); + + return 0; } static void tegra_dsi_encoder_enable(struct drm_encoder *encoder) @@ -909,8 +917,13 @@ static void tegra_dsi_encoder_enable(struct drm_encoder *encoder) struct tegra_dsi *dsi = to_dsi(output); struct tegra_dsi_state *state; u32 value; + int err; - tegra_dsi_prepare(dsi); + err = tegra_dsi_prepare(dsi); + if (err < 0) { + dev_err(dsi->dev, "failed to prepare: %d\n", err); + return; + } state = tegra_dsi_get_state(dsi); @@ -1030,7 +1043,7 @@ static const struct drm_encoder_helper_funcs tegra_dsi_encoder_helper_funcs = { static int tegra_dsi_init(struct host1x_client *client) { - struct drm_device *drm = dev_get_drvdata(client->parent); + struct drm_device *drm = dev_get_drvdata(client->host); struct tegra_dsi *dsi = host1x_client_to_dsi(client); int err; @@ -1075,9 +1088,89 @@ static int tegra_dsi_exit(struct host1x_client *client) return 0; } +static int tegra_dsi_runtime_suspend(struct host1x_client *client) +{ + struct tegra_dsi *dsi = host1x_client_to_dsi(client); + struct device *dev = client->dev; + int err; + + if (dsi->rst) { + err = reset_control_assert(dsi->rst); + if (err < 0) { + dev_err(dev, "failed to assert reset: %d\n", err); + return err; + } + } + + usleep_range(1000, 2000); + + clk_disable_unprepare(dsi->clk_lp); + clk_disable_unprepare(dsi->clk); + + regulator_disable(dsi->vdd); + pm_runtime_put_sync(dev); + + return 0; +} + +static int tegra_dsi_runtime_resume(struct host1x_client *client) +{ + struct tegra_dsi *dsi = host1x_client_to_dsi(client); + struct device *dev = client->dev; + int err; + + err = pm_runtime_get_sync(dev); + if (err < 0) { + dev_err(dev, "failed to get runtime PM: %d\n", err); + return err; + } + + err = regulator_enable(dsi->vdd); + if (err < 0) { + dev_err(dev, "failed to enable VDD supply: %d\n", err); + goto put_rpm; + } + + err = clk_prepare_enable(dsi->clk); + if (err < 0) { + dev_err(dev, "cannot enable DSI clock: %d\n", err); + goto disable_vdd; + } + + err = clk_prepare_enable(dsi->clk_lp); + if (err < 0) { + dev_err(dev, "cannot enable low-power clock: %d\n", err); + goto disable_clk; + } + + usleep_range(1000, 2000); + + if (dsi->rst) { + err = reset_control_deassert(dsi->rst); + if (err < 0) { + dev_err(dev, "cannot assert reset: %d\n", err); + goto disable_clk_lp; + } + } + + return 0; + +disable_clk_lp: + clk_disable_unprepare(dsi->clk_lp); +disable_clk: + clk_disable_unprepare(dsi->clk); +disable_vdd: + regulator_disable(dsi->vdd); +put_rpm: + pm_runtime_put_sync(dev); + return err; +} + static const struct host1x_client_ops dsi_client_ops = { .init = tegra_dsi_init, .exit = tegra_dsi_exit, + .suspend = tegra_dsi_runtime_suspend, + .resume = tegra_dsi_runtime_resume, }; static int tegra_dsi_setup_clocks(struct tegra_dsi *dsi) @@ -1596,79 +1689,6 @@ static int tegra_dsi_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM -static int tegra_dsi_suspend(struct device *dev) -{ - struct tegra_dsi *dsi = dev_get_drvdata(dev); - int err; - - if (dsi->rst) { - err = reset_control_assert(dsi->rst); - if (err < 0) { - dev_err(dev, "failed to assert reset: %d\n", err); - return err; - } - } - - usleep_range(1000, 2000); - - clk_disable_unprepare(dsi->clk_lp); - clk_disable_unprepare(dsi->clk); - - regulator_disable(dsi->vdd); - - return 0; -} - -static int tegra_dsi_resume(struct device *dev) -{ - struct tegra_dsi *dsi = dev_get_drvdata(dev); - int err; - - err = regulator_enable(dsi->vdd); - if (err < 0) { - dev_err(dsi->dev, "failed to enable VDD supply: %d\n", err); - return err; - } - - err = clk_prepare_enable(dsi->clk); - if (err < 0) { - dev_err(dev, "cannot enable DSI clock: %d\n", err); - goto disable_vdd; - } - - err = clk_prepare_enable(dsi->clk_lp); - if (err < 0) { - dev_err(dev, "cannot enable low-power clock: %d\n", err); - goto disable_clk; - } - - usleep_range(1000, 2000); - - if (dsi->rst) { - err = reset_control_deassert(dsi->rst); - if (err < 0) { - dev_err(dev, "cannot assert reset: %d\n", err); - goto disable_clk_lp; - } - } - - return 0; - -disable_clk_lp: - clk_disable_unprepare(dsi->clk_lp); -disable_clk: - clk_disable_unprepare(dsi->clk); -disable_vdd: - regulator_disable(dsi->vdd); - return err; -} -#endif - -static const struct dev_pm_ops tegra_dsi_pm_ops = { - SET_RUNTIME_PM_OPS(tegra_dsi_suspend, tegra_dsi_resume, NULL) -}; - static const struct of_device_id tegra_dsi_of_match[] = { { .compatible = "nvidia,tegra210-dsi", }, { .compatible = "nvidia,tegra132-dsi", }, @@ -1682,7 +1702,6 @@ struct platform_driver tegra_dsi_driver = { .driver = { .name = "tegra-dsi", .of_match_table = tegra_dsi_of_match, - .pm = &tegra_dsi_pm_ops, }, .probe = tegra_dsi_probe, .remove = tegra_dsi_remove, diff --git a/drivers/gpu/drm/tegra/gr2d.c b/drivers/gpu/drm/tegra/gr2d.c index 1fc4e56c7cc5..48363f744bb9 100644 --- a/drivers/gpu/drm/tegra/gr2d.c +++ b/drivers/gpu/drm/tegra/gr2d.c @@ -34,7 +34,7 @@ static inline struct gr2d *to_gr2d(struct tegra_drm_client *client) static int gr2d_init(struct host1x_client *client) { struct tegra_drm_client *drm = host1x_to_drm_client(client); - struct drm_device *dev = dev_get_drvdata(client->parent); + struct drm_device *dev = dev_get_drvdata(client->host); unsigned long flags = HOST1X_SYNCPT_HAS_BASE; struct gr2d *gr2d = to_gr2d(drm); int err; @@ -76,7 +76,7 @@ put: static int gr2d_exit(struct host1x_client *client) { struct tegra_drm_client *drm = host1x_to_drm_client(client); - struct drm_device *dev = dev_get_drvdata(client->parent); + struct drm_device *dev = dev_get_drvdata(client->host); struct tegra_drm *tegra = dev->dev_private; struct gr2d *gr2d = to_gr2d(drm); int err; diff --git a/drivers/gpu/drm/tegra/gr3d.c b/drivers/gpu/drm/tegra/gr3d.c index 24fae0f64032..c0a528be0369 100644 --- a/drivers/gpu/drm/tegra/gr3d.c +++ b/drivers/gpu/drm/tegra/gr3d.c @@ -43,7 +43,7 @@ static inline struct gr3d *to_gr3d(struct tegra_drm_client *client) static int gr3d_init(struct host1x_client *client) { struct tegra_drm_client *drm = host1x_to_drm_client(client); - struct drm_device *dev = dev_get_drvdata(client->parent); + struct drm_device *dev = dev_get_drvdata(client->host); unsigned long flags = HOST1X_SYNCPT_HAS_BASE; struct gr3d *gr3d = to_gr3d(drm); int err; @@ -85,7 +85,7 @@ put: static int gr3d_exit(struct host1x_client *client) { struct tegra_drm_client *drm = host1x_to_drm_client(client); - struct drm_device *dev = dev_get_drvdata(client->parent); + struct drm_device *dev = dev_get_drvdata(client->host); struct gr3d *gr3d = to_gr3d(drm); int err; diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c index 21a629adcb51..6f117628f257 100644 --- a/drivers/gpu/drm/tegra/hdmi.c +++ b/drivers/gpu/drm/tegra/hdmi.c @@ -1146,6 +1146,7 @@ static void tegra_hdmi_encoder_disable(struct drm_encoder *encoder) struct tegra_dc *dc = to_tegra_dc(encoder->crtc); struct tegra_hdmi *hdmi = to_hdmi(output); u32 value; + int err; /* * The following accesses registers of the display controller, so make @@ -1171,7 +1172,9 @@ static void tegra_hdmi_encoder_disable(struct drm_encoder *encoder) tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_INT_ENABLE); tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_INT_MASK); - pm_runtime_put(hdmi->dev); + err = host1x_client_suspend(&hdmi->client); + if (err < 0) + dev_err(hdmi->dev, "failed to suspend: %d\n", err); } static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder) @@ -1186,7 +1189,11 @@ static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder) u32 value; int err; - pm_runtime_get_sync(hdmi->dev); + err = host1x_client_resume(&hdmi->client); + if (err < 0) { + dev_err(hdmi->dev, "failed to resume: %d\n", err); + return; + } /* * Enable and unmask the HDA codec SCRATCH0 register interrupt. This @@ -1424,8 +1431,8 @@ static const struct drm_encoder_helper_funcs tegra_hdmi_encoder_helper_funcs = { static int tegra_hdmi_init(struct host1x_client *client) { - struct drm_device *drm = dev_get_drvdata(client->parent); struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client); + struct drm_device *drm = dev_get_drvdata(client->host); int err; hdmi->output.dev = client->dev; @@ -1490,9 +1497,66 @@ static int tegra_hdmi_exit(struct host1x_client *client) return 0; } +static int tegra_hdmi_runtime_suspend(struct host1x_client *client) +{ + struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client); + struct device *dev = client->dev; + int err; + + err = reset_control_assert(hdmi->rst); + if (err < 0) { + dev_err(dev, "failed to assert reset: %d\n", err); + return err; + } + + usleep_range(1000, 2000); + + clk_disable_unprepare(hdmi->clk); + pm_runtime_put_sync(dev); + + return 0; +} + +static int tegra_hdmi_runtime_resume(struct host1x_client *client) +{ + struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client); + struct device *dev = client->dev; + int err; + + err = pm_runtime_get_sync(dev); + if (err < 0) { + dev_err(dev, "failed to get runtime PM: %d\n", err); + return err; + } + + err = clk_prepare_enable(hdmi->clk); + if (err < 0) { + dev_err(dev, "failed to enable clock: %d\n", err); + goto put_rpm; + } + + usleep_range(1000, 2000); + + err = reset_control_deassert(hdmi->rst); + if (err < 0) { + dev_err(dev, "failed to deassert reset: %d\n", err); + goto disable_clk; + } + + return 0; + +disable_clk: + clk_disable_unprepare(hdmi->clk); +put_rpm: + pm_runtime_put_sync(dev); + return err; +} + static const struct host1x_client_ops hdmi_client_ops = { .init = tegra_hdmi_init, .exit = tegra_hdmi_exit, + .suspend = tegra_hdmi_runtime_suspend, + .resume = tegra_hdmi_runtime_resume, }; static const struct tegra_hdmi_config tegra20_hdmi_config = { @@ -1700,58 +1764,10 @@ static int tegra_hdmi_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM -static int tegra_hdmi_suspend(struct device *dev) -{ - struct tegra_hdmi *hdmi = dev_get_drvdata(dev); - int err; - - err = reset_control_assert(hdmi->rst); - if (err < 0) { - dev_err(dev, "failed to assert reset: %d\n", err); - return err; - } - - usleep_range(1000, 2000); - - clk_disable_unprepare(hdmi->clk); - - return 0; -} - -static int tegra_hdmi_resume(struct device *dev) -{ - struct tegra_hdmi *hdmi = dev_get_drvdata(dev); - int err; - - err = clk_prepare_enable(hdmi->clk); - if (err < 0) { - dev_err(dev, "failed to enable clock: %d\n", err); - return err; - } - - usleep_range(1000, 2000); - - err = reset_control_deassert(hdmi->rst); - if (err < 0) { - dev_err(dev, "failed to deassert reset: %d\n", err); - clk_disable_unprepare(hdmi->clk); - return err; - } - - return 0; -} -#endif - -static const struct dev_pm_ops tegra_hdmi_pm_ops = { - SET_RUNTIME_PM_OPS(tegra_hdmi_suspend, tegra_hdmi_resume, NULL) -}; - struct platform_driver tegra_hdmi_driver = { .driver = { .name = "tegra-hdmi", .of_match_table = tegra_hdmi_of_match, - .pm = &tegra_hdmi_pm_ops, }, .probe = tegra_hdmi_probe, .remove = tegra_hdmi_remove, diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c index 47d985ac7cd7..8183e617bf6b 100644 --- a/drivers/gpu/drm/tegra/hub.c +++ b/drivers/gpu/drm/tegra/hub.c @@ -95,17 +95,25 @@ static inline void tegra_plane_writel(struct tegra_plane *plane, u32 value, static int tegra_windowgroup_enable(struct tegra_windowgroup *wgrp) { + int err = 0; + mutex_lock(&wgrp->lock); if (wgrp->usecount == 0) { - pm_runtime_get_sync(wgrp->parent); + err = host1x_client_resume(wgrp->parent); + if (err < 0) { + dev_err(wgrp->parent->dev, "failed to resume: %d\n", err); + goto unlock; + } + reset_control_deassert(wgrp->rst); } wgrp->usecount++; - mutex_unlock(&wgrp->lock); - return 0; +unlock: + mutex_unlock(&wgrp->lock); + return err; } static void tegra_windowgroup_disable(struct tegra_windowgroup *wgrp) @@ -121,7 +129,7 @@ static void tegra_windowgroup_disable(struct tegra_windowgroup *wgrp) wgrp->index); } - pm_runtime_put(wgrp->parent); + host1x_client_suspend(wgrp->parent); } wgrp->usecount--; @@ -379,6 +387,7 @@ static void tegra_shared_plane_atomic_disable(struct drm_plane *plane, struct tegra_plane *p = to_tegra_plane(plane); struct tegra_dc *dc; u32 value; + int err; /* rien ne va plus */ if (!old_state || !old_state->crtc) @@ -386,6 +395,12 @@ static void tegra_shared_plane_atomic_disable(struct drm_plane *plane, dc = to_tegra_dc(old_state->crtc); + err = host1x_client_resume(&dc->client); + if (err < 0) { + dev_err(dc->dev, "failed to resume: %d\n", err); + return; + } + /* * XXX Legacy helpers seem to sometimes call ->atomic_disable() even * on planes that are already disabled. Make sure we fallback to the @@ -394,15 +409,13 @@ static void tegra_shared_plane_atomic_disable(struct drm_plane *plane, if (WARN_ON(p->dc == NULL)) p->dc = dc; - pm_runtime_get_sync(dc->dev); - value = tegra_plane_readl(p, DC_WIN_WIN_OPTIONS); value &= ~WIN_ENABLE; tegra_plane_writel(p, value, DC_WIN_WIN_OPTIONS); tegra_dc_remove_shared_plane(dc, p); - pm_runtime_put(dc->dev); + host1x_client_suspend(&dc->client); } static void tegra_shared_plane_atomic_update(struct drm_plane *plane, @@ -415,6 +428,7 @@ static void tegra_shared_plane_atomic_update(struct drm_plane *plane, struct tegra_plane *p = to_tegra_plane(plane); dma_addr_t base; u32 value; + int err; /* rien ne va plus */ if (!plane->state->crtc || !plane->state->fb) @@ -425,7 +439,11 @@ static void tegra_shared_plane_atomic_update(struct drm_plane *plane, return; } - pm_runtime_get_sync(dc->dev); + err = host1x_client_resume(&dc->client); + if (err < 0) { + dev_err(dc->dev, "failed to resume: %d\n", err); + return; + } tegra_dc_assign_shared_plane(dc, p); @@ -515,7 +533,7 @@ static void tegra_shared_plane_atomic_update(struct drm_plane *plane, value &= ~CONTROL_CSC_ENABLE; tegra_plane_writel(p, value, DC_WIN_WINDOW_SET_CONTROL); - pm_runtime_put(dc->dev); + host1x_client_suspend(&dc->client); } static const struct drm_plane_helper_funcs tegra_shared_plane_helper_funcs = { @@ -551,7 +569,7 @@ struct drm_plane *tegra_shared_plane_create(struct drm_device *drm, plane->base.index = index; plane->wgrp = &hub->wgrps[wgrp]; - plane->wgrp->parent = dc->dev; + plane->wgrp->parent = &dc->client; p = &plane->base.base; @@ -656,8 +674,13 @@ int tegra_display_hub_atomic_check(struct drm_device *drm, static void tegra_display_hub_update(struct tegra_dc *dc) { u32 value; + int err; - pm_runtime_get_sync(dc->dev); + err = host1x_client_resume(&dc->client); + if (err < 0) { + dev_err(dc->dev, "failed to resume: %d\n", err); + return; + } value = tegra_dc_readl(dc, DC_CMD_IHUB_COMMON_MISC_CTL); value &= ~LATENCY_EVENT; @@ -672,7 +695,7 @@ static void tegra_display_hub_update(struct tegra_dc *dc) tegra_dc_writel(dc, COMMON_ACTREQ, DC_CMD_STATE_CONTROL); tegra_dc_readl(dc, DC_CMD_STATE_CONTROL); - pm_runtime_put(dc->dev); + host1x_client_suspend(&dc->client); } void tegra_display_hub_atomic_commit(struct drm_device *drm, @@ -705,7 +728,7 @@ void tegra_display_hub_atomic_commit(struct drm_device *drm, static int tegra_display_hub_init(struct host1x_client *client) { struct tegra_display_hub *hub = to_tegra_display_hub(client); - struct drm_device *drm = dev_get_drvdata(client->parent); + struct drm_device *drm = dev_get_drvdata(client->host); struct tegra_drm *tegra = drm->dev_private; struct tegra_display_hub_state *state; @@ -723,7 +746,7 @@ static int tegra_display_hub_init(struct host1x_client *client) static int tegra_display_hub_exit(struct host1x_client *client) { - struct drm_device *drm = dev_get_drvdata(client->parent); + struct drm_device *drm = dev_get_drvdata(client->host); struct tegra_drm *tegra = drm->dev_private; drm_atomic_private_obj_fini(&tegra->hub->base); @@ -732,9 +755,85 @@ static int tegra_display_hub_exit(struct host1x_client *client) return 0; } +static int tegra_display_hub_runtime_suspend(struct host1x_client *client) +{ + struct tegra_display_hub *hub = to_tegra_display_hub(client); + struct device *dev = client->dev; + unsigned int i = hub->num_heads; + int err; + + err = reset_control_assert(hub->rst); + if (err < 0) + return err; + + while (i--) + clk_disable_unprepare(hub->clk_heads[i]); + + clk_disable_unprepare(hub->clk_hub); + clk_disable_unprepare(hub->clk_dsc); + clk_disable_unprepare(hub->clk_disp); + + pm_runtime_put_sync(dev); + + return 0; +} + +static int tegra_display_hub_runtime_resume(struct host1x_client *client) +{ + struct tegra_display_hub *hub = to_tegra_display_hub(client); + struct device *dev = client->dev; + unsigned int i; + int err; + + err = pm_runtime_get_sync(dev); + if (err < 0) { + dev_err(dev, "failed to get runtime PM: %d\n", err); + return err; + } + + err = clk_prepare_enable(hub->clk_disp); + if (err < 0) + goto put_rpm; + + err = clk_prepare_enable(hub->clk_dsc); + if (err < 0) + goto disable_disp; + + err = clk_prepare_enable(hub->clk_hub); + if (err < 0) + goto disable_dsc; + + for (i = 0; i < hub->num_heads; i++) { + err = clk_prepare_enable(hub->clk_heads[i]); + if (err < 0) + goto disable_heads; + } + + err = reset_control_deassert(hub->rst); + if (err < 0) + goto disable_heads; + + return 0; + +disable_heads: + while (i--) + clk_disable_unprepare(hub->clk_heads[i]); + + clk_disable_unprepare(hub->clk_hub); +disable_dsc: + clk_disable_unprepare(hub->clk_dsc); +disable_disp: + clk_disable_unprepare(hub->clk_disp); +put_rpm: + pm_runtime_put_sync(dev); + return err; +} + static const struct host1x_client_ops tegra_display_hub_ops = { .init = tegra_display_hub_init, .exit = tegra_display_hub_exit, + .suspend = tegra_display_hub_runtime_suspend, + .resume = tegra_display_hub_runtime_resume, }; static int tegra_display_hub_probe(struct platform_device *pdev) @@ -851,6 +950,7 @@ static int tegra_display_hub_probe(struct platform_device *pdev) static int tegra_display_hub_remove(struct platform_device *pdev) { struct tegra_display_hub *hub = platform_get_drvdata(pdev); + unsigned int i; int err; err = host1x_client_unregister(&hub->client); @@ -859,78 +959,17 @@ static int tegra_display_hub_remove(struct platform_device *pdev) err); } - pm_runtime_disable(&pdev->dev); - - return err; -} - -static int __maybe_unused tegra_display_hub_suspend(struct device *dev) -{ - struct tegra_display_hub *hub = dev_get_drvdata(dev); - unsigned int i = hub->num_heads; - int err; - - err = reset_control_assert(hub->rst); - if (err < 0) - return err; - - while (i--) - clk_disable_unprepare(hub->clk_heads[i]); - - clk_disable_unprepare(hub->clk_hub); - clk_disable_unprepare(hub->clk_dsc); - clk_disable_unprepare(hub->clk_disp); - - return 0; -} - -static int __maybe_unused tegra_display_hub_resume(struct device *dev) -{ - struct tegra_display_hub *hub = dev_get_drvdata(dev); - unsigned int i; - int err; - - err = clk_prepare_enable(hub->clk_disp); - if (err < 0) - return err; - - err = clk_prepare_enable(hub->clk_dsc); - if (err < 0) - goto disable_disp; - - err = clk_prepare_enable(hub->clk_hub); - if (err < 0) - goto disable_dsc; + for (i = 0; i < hub->soc->num_wgrps; i++) { + struct tegra_windowgroup *wgrp = &hub->wgrps[i]; - for (i = 0; i < hub->num_heads; i++) { - err = clk_prepare_enable(hub->clk_heads[i]); - if (err < 0) - goto disable_heads; + mutex_destroy(&wgrp->lock); } - err = reset_control_deassert(hub->rst); - if (err < 0) - goto disable_heads; - - return 0; - -disable_heads: - while (i--) - clk_disable_unprepare(hub->clk_heads[i]); + pm_runtime_disable(&pdev->dev); - clk_disable_unprepare(hub->clk_hub); -disable_dsc: - clk_disable_unprepare(hub->clk_dsc); -disable_disp: - clk_disable_unprepare(hub->clk_disp); return err; } -static const struct dev_pm_ops tegra_display_hub_pm_ops = { - SET_RUNTIME_PM_OPS(tegra_display_hub_suspend, - tegra_display_hub_resume, NULL) -}; - static const struct tegra_display_hub_soc tegra186_display_hub = { .num_wgrps = 6, .supports_dsc = true, @@ -958,7 +997,6 @@ struct platform_driver tegra_display_hub_driver = { .driver = { .name = "tegra-display-hub", .of_match_table = tegra_display_hub_of_match, - .pm = &tegra_display_hub_pm_ops, }, .probe = tegra_display_hub_probe, .remove = tegra_display_hub_remove, diff --git a/drivers/gpu/drm/tegra/hub.h b/drivers/gpu/drm/tegra/hub.h index 767a60d9313c..3efa1be07ff8 100644 --- a/drivers/gpu/drm/tegra/hub.h +++ b/drivers/gpu/drm/tegra/hub.h @@ -17,7 +17,7 @@ struct tegra_windowgroup { struct mutex lock; unsigned int index; - struct device *parent; + struct host1x_client *parent; struct reset_control *rst; }; diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c index 80ddde4adbae..a264259b97a2 100644 --- a/drivers/gpu/drm/tegra/output.c +++ b/drivers/gpu/drm/tegra/output.c @@ -250,3 +250,19 @@ void tegra_output_find_possible_crtcs(struct tegra_output *output, output->encoder.possible_crtcs = mask; } + +int tegra_output_suspend(struct tegra_output *output) +{ + if (output->hpd_irq) + disable_irq(output->hpd_irq); + + return 0; +} + +int tegra_output_resume(struct tegra_output *output) +{ + if (output->hpd_irq) + enable_irq(output->hpd_irq); + + return 0; +} diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c index 1b8087d2dafe..41d24949478e 100644 --- a/drivers/gpu/drm/tegra/sor.c +++ b/drivers/gpu/drm/tegra/sor.c @@ -2255,7 +2255,7 @@ static void tegra_sor_hdmi_disable(struct drm_encoder *encoder) if (err < 0) dev_err(sor->dev, "failed to power off I/O pad: %d\n", err); - pm_runtime_put(sor->dev); + host1x_client_suspend(&sor->client); } static void tegra_sor_hdmi_enable(struct drm_encoder *encoder) @@ -2276,7 +2276,11 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder) mode = &encoder->crtc->state->adjusted_mode; pclk = mode->clock * 1000; - pm_runtime_get_sync(sor->dev); + err = host1x_client_resume(&sor->client); + if (err < 0) { + dev_err(sor->dev, "failed to resume: %d\n", err); + return; + } /* switch to safe parent clock */ err = tegra_sor_set_parent_clock(sor, sor->clk_safe); @@ -2722,7 +2726,7 @@ static void tegra_sor_dp_disable(struct drm_encoder *encoder) if (output->panel) drm_panel_unprepare(output->panel); - pm_runtime_put(sor->dev); + host1x_client_suspend(&sor->client); } static void tegra_sor_dp_enable(struct drm_encoder *encoder) @@ -2742,7 +2746,11 @@ static void tegra_sor_dp_enable(struct drm_encoder *encoder) mode = &encoder->crtc->state->adjusted_mode; info = &output->connector.display_info; - pm_runtime_get_sync(sor->dev); + err = host1x_client_resume(&sor->client); + if (err < 0) { + dev_err(sor->dev, "failed to resume: %d\n", err); + return; + } /* switch to safe parent clock */ err = tegra_sor_set_parent_clock(sor, sor->clk_safe); @@ -3053,7 +3061,7 @@ static const struct tegra_sor_ops tegra_sor_dp_ops = { static int tegra_sor_init(struct host1x_client *client) { - struct drm_device *drm = dev_get_drvdata(client->parent); + struct drm_device *drm = dev_get_drvdata(client->host); const struct drm_encoder_helper_funcs *helpers = NULL; struct tegra_sor *sor = host1x_client_to_sor(client); int connector = DRM_MODE_CONNECTOR_Unknown; @@ -3190,9 +3198,80 @@ static int tegra_sor_exit(struct host1x_client *client) return 0; } +static int tegra_sor_runtime_suspend(struct host1x_client *client) +{ + struct tegra_sor *sor = host1x_client_to_sor(client); + struct device *dev = client->dev; + int err; + + if (sor->rst) { + err = reset_control_assert(sor->rst); + if (err < 0) { + dev_err(dev, "failed to assert reset: %d\n", err); + return err; + } + + reset_control_release(sor->rst); + } + + usleep_range(1000, 2000); + + clk_disable_unprepare(sor->clk); + pm_runtime_put_sync(dev); + + return 0; +} + +static int tegra_sor_runtime_resume(struct host1x_client *client) +{ + struct tegra_sor *sor = host1x_client_to_sor(client); + struct device *dev = client->dev; + int err; + + err = pm_runtime_get_sync(dev); + if (err < 0) { + dev_err(dev, "failed to get runtime PM: %d\n", err); + return err; + } + + err = clk_prepare_enable(sor->clk); + if (err < 0) { + dev_err(dev, "failed to enable clock: %d\n", err); + goto put_rpm; + } + + usleep_range(1000, 2000); + + if (sor->rst) { + err = reset_control_acquire(sor->rst); + if (err < 0) { + dev_err(dev, "failed to acquire reset: %d\n", err); + goto disable_clk; + } + + err = reset_control_deassert(sor->rst); + if (err < 0) { + dev_err(dev, "failed to deassert reset: %d\n", err); + goto release_reset; + } + } + + return 0; + +release_reset: + reset_control_release(sor->rst); +disable_clk: + clk_disable_unprepare(sor->clk); +put_rpm: + pm_runtime_put_sync(dev); + return err; +} + static const struct host1x_client_ops sor_client_ops = { .init = tegra_sor_init, .exit = tegra_sor_exit, + .suspend = tegra_sor_runtime_suspend, + .resume = tegra_sor_runtime_resume, }; static const u8 tegra124_sor_xbar_cfg[5] = { @@ -3843,10 +3922,9 @@ static int tegra_sor_probe(struct platform_device *pdev) if (!sor->clk_pad) { char *name; - err = pm_runtime_get_sync(&pdev->dev); + err = host1x_client_resume(&sor->client); if (err < 0) { - dev_err(&pdev->dev, "failed to get runtime PM: %d\n", - err); + dev_err(sor->dev, "failed to resume: %d\n", err); goto remove; } @@ -3857,7 +3935,7 @@ static int tegra_sor_probe(struct platform_device *pdev) } sor->clk_pad = tegra_clk_sor_pad_register(sor, name); - pm_runtime_put(&pdev->dev); + host1x_client_suspend(&sor->client); } if (IS_ERR(sor->clk_pad)) { @@ -3913,54 +3991,21 @@ static int tegra_sor_remove(struct platform_device *pdev) return 0; } -static int tegra_sor_runtime_suspend(struct device *dev) -{ - struct tegra_sor *sor = dev_get_drvdata(dev); - int err; - - if (sor->rst) { - err = reset_control_assert(sor->rst); - if (err < 0) { - dev_err(dev, "failed to assert reset: %d\n", err); - return err; - } - - reset_control_release(sor->rst); - } - - usleep_range(1000, 2000); - - clk_disable_unprepare(sor->clk); - - return 0; -} - -static int tegra_sor_runtime_resume(struct device *dev) +static int __maybe_unused tegra_sor_suspend(struct device *dev) { struct tegra_sor *sor = dev_get_drvdata(dev); int err; - err = clk_prepare_enable(sor->clk); + err = tegra_output_suspend(&sor->output); if (err < 0) { - dev_err(dev, "failed to enable clock: %d\n", err); + dev_err(dev, "failed to suspend output: %d\n", err); return err; } - usleep_range(1000, 2000); - - if (sor->rst) { - err = reset_control_acquire(sor->rst); - if (err < 0) { - dev_err(dev, "failed to acquire reset: %d\n", err); - clk_disable_unprepare(sor->clk); - return err; - } - - err = reset_control_deassert(sor->rst); + if (sor->hdmi_supply) { + err = regulator_disable(sor->hdmi_supply); if (err < 0) { - dev_err(dev, "failed to deassert reset: %d\n", err); - reset_control_release(sor->rst); - clk_disable_unprepare(sor->clk); + tegra_output_resume(&sor->output); return err; } } @@ -3968,37 +4013,31 @@ static int tegra_sor_runtime_resume(struct device *dev) return 0; } -static int tegra_sor_suspend(struct device *dev) +static int __maybe_unused tegra_sor_resume(struct device *dev) { struct tegra_sor *sor = dev_get_drvdata(dev); int err; if (sor->hdmi_supply) { - err = regulator_disable(sor->hdmi_supply); + err = regulator_enable(sor->hdmi_supply); if (err < 0) return err; } - return 0; -} + err = tegra_output_resume(&sor->output); + if (err < 0) { + dev_err(dev, "failed to resume output: %d\n", err); -static int tegra_sor_resume(struct device *dev) -{ - struct tegra_sor *sor = dev_get_drvdata(dev); - int err; + if (sor->hdmi_supply) + regulator_disable(sor->hdmi_supply); - if (sor->hdmi_supply) { - err = regulator_enable(sor->hdmi_supply); - if (err < 0) - return err; + return err; } return 0; } static const struct dev_pm_ops tegra_sor_pm_ops = { - SET_RUNTIME_PM_OPS(tegra_sor_runtime_suspend, tegra_sor_runtime_resume, - NULL) SET_SYSTEM_SLEEP_PM_OPS(tegra_sor_suspend, tegra_sor_resume) }; diff --git a/drivers/gpu/drm/tegra/vic.c b/drivers/gpu/drm/tegra/vic.c index 3526c2892ddb..ade56b860cf9 100644 --- a/drivers/gpu/drm/tegra/vic.c +++ b/drivers/gpu/drm/tegra/vic.c @@ -161,7 +161,7 @@ static int vic_boot(struct vic *vic) static int vic_init(struct host1x_client *client) { struct tegra_drm_client *drm = host1x_to_drm_client(client); - struct drm_device *dev = dev_get_drvdata(client->parent); + struct drm_device *dev = dev_get_drvdata(client->host); struct tegra_drm *tegra = dev->dev_private; struct vic *vic = to_vic(drm); int err; @@ -190,9 +190,9 @@ static int vic_init(struct host1x_client *client) /* * Inherit the DMA parameters (such as maximum segment size) from the - * parent device. + * parent host1x device. */ - client->dev->dma_parms = client->parent->dma_parms; + client->dev->dma_parms = client->host->dma_parms; return 0; @@ -209,7 +209,7 @@ detach: static int vic_exit(struct host1x_client *client) { struct tegra_drm_client *drm = host1x_to_drm_client(client); - struct drm_device *dev = dev_get_drvdata(client->parent); + struct drm_device *dev = dev_get_drvdata(client->host); struct tegra_drm *tegra = dev->dev_private; struct vic *vic = to_vic(drm); int err; diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c index 2c8559ff3481..6a995db51d6d 100644 --- a/drivers/gpu/host1x/bus.c +++ b/drivers/gpu/host1x/bus.c @@ -120,7 +120,7 @@ static void host1x_subdev_register(struct host1x_device *device, mutex_lock(&device->clients_lock); list_move_tail(&client->list, &device->clients); list_move_tail(&subdev->list, &device->active); - client->parent = &device->dev; + client->host = &device->dev; subdev->client = client; mutex_unlock(&device->clients_lock); mutex_unlock(&device->subdevs_lock); @@ -156,7 +156,7 @@ static void __host1x_subdev_unregister(struct host1x_device *device, */ mutex_lock(&device->clients_lock); subdev->client = NULL; - client->parent = NULL; + client->host = NULL; list_move_tail(&subdev->list, &device->subdevs); /* * XXX: Perhaps don't do this here, but rather explicitly remove it @@ -710,6 +710,10 @@ int host1x_client_register(struct host1x_client *client) struct host1x *host1x; int err; + INIT_LIST_HEAD(&client->list); + mutex_init(&client->lock); + client->usecount = 0; + mutex_lock(&devices_lock); list_for_each_entry(host1x, &devices, list) { @@ -768,3 +772,74 @@ int host1x_client_unregister(struct host1x_client *client) return 0; } EXPORT_SYMBOL(host1x_client_unregister); + +int host1x_client_suspend(struct host1x_client *client) +{ + int err = 0; + + mutex_lock(&client->lock); + + if (client->usecount == 1) { + if (client->ops && client->ops->suspend) { + err = client->ops->suspend(client); + if (err < 0) + goto unlock; + } + } + + client->usecount--; + dev_dbg(client->dev, "use count: %u\n", client->usecount); + + if (client->parent) { + err = host1x_client_suspend(client->parent); + if (err < 0) + goto resume; + } + + goto unlock; + +resume: + if (client->usecount == 0) + if (client->ops && client->ops->resume) + client->ops->resume(client); + + client->usecount++; +unlock: + mutex_unlock(&client->lock); + return err; +} +EXPORT_SYMBOL(host1x_client_suspend); + +int host1x_client_resume(struct host1x_client *client) +{ + int err = 0; + + mutex_lock(&client->lock); + + if (client->parent) { + err = host1x_client_resume(client->parent); + if (err < 0) + goto unlock; + } + + if (client->usecount == 0) { + if (client->ops && client->ops->resume) { + err = client->ops->resume(client); + if (err < 0) + goto suspend; + } + } + + client->usecount++; + dev_dbg(client->dev, "use count: %u\n", client->usecount); + + goto unlock; + +suspend: + if (client->parent) + host1x_client_suspend(client->parent); +unlock: + mutex_unlock(&client->lock); + return err; +} +EXPORT_SYMBOL(host1x_client_resume); diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c index a738ea55e407..388bcc2889aa 100644 --- a/drivers/gpu/host1x/dev.c +++ b/drivers/gpu/host1x/dev.c @@ -339,10 +339,8 @@ static int host1x_probe(struct platform_device *pdev) } syncpt_irq = platform_get_irq(pdev, 0); - if (syncpt_irq < 0) { - dev_err(&pdev->dev, "failed to get IRQ: %d\n", syncpt_irq); + if (syncpt_irq < 0) return syncpt_irq; - } mutex_init(&host->devices_lock); INIT_LIST_HEAD(&host->devices); diff --git a/drivers/gpu/host1x/syncpt.c b/drivers/gpu/host1x/syncpt.c index dd1cd0142941..fce7892d5137 100644 --- a/drivers/gpu/host1x/syncpt.c +++ b/drivers/gpu/host1x/syncpt.c @@ -421,7 +421,7 @@ int host1x_syncpt_init(struct host1x *host) struct host1x_syncpt *host1x_syncpt_request(struct host1x_client *client, unsigned long flags) { - struct host1x *host = dev_get_drvdata(client->parent->parent); + struct host1x *host = dev_get_drvdata(client->host->parent); return host1x_syncpt_alloc(host, client, flags); } diff --git a/drivers/mfd/intel_soc_pmic_core.c b/drivers/mfd/intel_soc_pmic_core.c index 47188df3080d..ddd64f9e3341 100644 --- a/drivers/mfd/intel_soc_pmic_core.c +++ b/drivers/mfd/intel_soc_pmic_core.c @@ -9,8 +9,6 @@ */ #include <linux/acpi.h> -#include <linux/gpio/consumer.h> -#include <linux/gpio/machine.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/module.h> @@ -25,17 +23,6 @@ #define BYT_CRC_HRV 2 #define CHT_CRC_HRV 3 -/* Lookup table for the Panel Enable/Disable line as GPIO signals */ -static struct gpiod_lookup_table panel_gpio_table = { - /* Intel GFX is consumer */ - .dev_id = "0000:00:02.0", - .table = { - /* Panel EN/DISABLE */ - GPIO_LOOKUP("gpio_crystalcove", 94, "panel", GPIO_ACTIVE_HIGH), - { }, - }, -}; - /* PWM consumed by the Intel GFX */ static struct pwm_lookup crc_pwm_lookup[] = { PWM_LOOKUP("crystal_cove_pwm", 0, "0000:00:02.0", "pwm_pmic_backlight", 0, PWM_POLARITY_NORMAL), @@ -96,9 +83,6 @@ static int intel_soc_pmic_i2c_probe(struct i2c_client *i2c, if (ret) dev_warn(dev, "Can't enable IRQ as wake source: %d\n", ret); - /* Add lookup table binding for Panel Control to the GPIO Chip */ - gpiod_add_lookup_table(&panel_gpio_table); - /* Add lookup table for crc-pwm */ pwm_add_table(crc_pwm_lookup, ARRAY_SIZE(crc_pwm_lookup)); @@ -121,9 +105,6 @@ static int intel_soc_pmic_i2c_remove(struct i2c_client *i2c) regmap_del_irq_chip(pmic->irq, pmic->irq_chip_data); - /* Remove lookup table for Panel Control from the GPIO Chip */ - gpiod_remove_lookup_table(&panel_gpio_table); - /* remove crc-pwm lookup table */ pwm_remove_table(crc_pwm_lookup, ARRAY_SIZE(crc_pwm_lookup)); diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c index 2bbd8ee93507..b0eea728455d 100644 --- a/drivers/pinctrl/core.c +++ b/drivers/pinctrl/core.c @@ -1376,8 +1376,15 @@ void devm_pinctrl_put(struct pinctrl *p) } EXPORT_SYMBOL_GPL(devm_pinctrl_put); -int pinctrl_register_map(const struct pinctrl_map *maps, unsigned num_maps, - bool dup) +/** + * pinctrl_register_mappings() - register a set of pin controller mappings + * @maps: the pincontrol mappings table to register. Note the pinctrl-core + * keeps a reference to the passed in maps, so they should _not_ be + * marked with __initdata. + * @num_maps: the number of maps in the mapping table + */ +int pinctrl_register_mappings(const struct pinctrl_map *maps, + unsigned num_maps) { int i, ret; struct pinctrl_maps *maps_node; @@ -1430,17 +1437,8 @@ int pinctrl_register_map(const struct pinctrl_map *maps, unsigned num_maps, if (!maps_node) return -ENOMEM; + maps_node->maps = maps; maps_node->num_maps = num_maps; - if (dup) { - maps_node->maps = kmemdup(maps, sizeof(*maps) * num_maps, - GFP_KERNEL); - if (!maps_node->maps) { - kfree(maps_node); - return -ENOMEM; - } - } else { - maps_node->maps = maps; - } mutex_lock(&pinctrl_maps_mutex); list_add_tail(&maps_node->node, &pinctrl_maps); @@ -1448,22 +1446,14 @@ int pinctrl_register_map(const struct pinctrl_map *maps, unsigned num_maps, return 0; } +EXPORT_SYMBOL_GPL(pinctrl_register_mappings); /** - * pinctrl_register_mappings() - register a set of pin controller mappings - * @maps: the pincontrol mappings table to register. This should probably be - * marked with __initdata so it can be discarded after boot. This - * function will perform a shallow copy for the mapping entries. - * @num_maps: the number of maps in the mapping table + * pinctrl_unregister_mappings() - unregister a set of pin controller mappings + * @maps: the pincontrol mappings table passed to pinctrl_register_mappings() + * when registering the mappings. */ -int pinctrl_register_mappings(const struct pinctrl_map *maps, - unsigned num_maps) -{ - return pinctrl_register_map(maps, num_maps, true); -} -EXPORT_SYMBOL_GPL(pinctrl_register_mappings); - -void pinctrl_unregister_map(const struct pinctrl_map *map) +void pinctrl_unregister_mappings(const struct pinctrl_map *map) { struct pinctrl_maps *maps_node; @@ -1478,6 +1468,7 @@ void pinctrl_unregister_map(const struct pinctrl_map *map) } mutex_unlock(&pinctrl_maps_mutex); } +EXPORT_SYMBOL_GPL(pinctrl_unregister_mappings); /** * pinctrl_force_sleep() - turn a given controller device into sleep state diff --git a/drivers/pinctrl/core.h b/drivers/pinctrl/core.h index 7f34167a0405..840103c40c14 100644 --- a/drivers/pinctrl/core.h +++ b/drivers/pinctrl/core.h @@ -236,10 +236,6 @@ extern struct pinctrl_gpio_range * pinctrl_find_gpio_range_from_pin_nolock(struct pinctrl_dev *pctldev, unsigned int pin); -int pinctrl_register_map(const struct pinctrl_map *maps, unsigned num_maps, - bool dup); -void pinctrl_unregister_map(const struct pinctrl_map *map); - extern int pinctrl_force_sleep(struct pinctrl_dev *pctldev); extern int pinctrl_force_default(struct pinctrl_dev *pctldev); diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c index 674920daac26..9357f7c46cf3 100644 --- a/drivers/pinctrl/devicetree.c +++ b/drivers/pinctrl/devicetree.c @@ -51,7 +51,7 @@ void pinctrl_dt_free_maps(struct pinctrl *p) struct pinctrl_dt_map *dt_map, *n1; list_for_each_entry_safe(dt_map, n1, &p->dt_maps, node) { - pinctrl_unregister_map(dt_map->map); + pinctrl_unregister_mappings(dt_map->map); list_del(&dt_map->node); dt_free_map(dt_map->pctldev, dt_map->map, dt_map->num_maps); @@ -92,7 +92,7 @@ static int dt_remember_or_free_map(struct pinctrl *p, const char *statename, dt_map->num_maps = num_maps; list_add_tail(&dt_map->node, &p->dt_maps); - return pinctrl_register_map(map, num_maps, false); + return pinctrl_register_mappings(map, num_maps); err_free_map: dt_free_map(pctldev, map, num_maps); diff --git a/drivers/soc/mediatek/mtk-cmdq-helper.c b/drivers/soc/mediatek/mtk-cmdq-helper.c index 3c82de5f9417..9add0fd5fa6c 100644 --- a/drivers/soc/mediatek/mtk-cmdq-helper.c +++ b/drivers/soc/mediatek/mtk-cmdq-helper.c @@ -9,12 +9,54 @@ #include <linux/mailbox_controller.h> #include <linux/soc/mediatek/mtk-cmdq.h> -#define CMDQ_ARG_A_WRITE_MASK 0xffff #define CMDQ_WRITE_ENABLE_MASK BIT(0) +#define CMDQ_POLL_ENABLE_MASK BIT(0) #define CMDQ_EOC_IRQ_EN BIT(0) #define CMDQ_EOC_CMD ((u64)((CMDQ_CODE_EOC << CMDQ_OP_CODE_SHIFT)) \ << 32 | CMDQ_EOC_IRQ_EN) +struct cmdq_instruction { + union { + u32 value; + u32 mask; + }; + union { + u16 offset; + u16 event; + }; + u8 subsys; + u8 op; +}; + +int cmdq_dev_get_client_reg(struct device *dev, + struct cmdq_client_reg *client_reg, int idx) +{ + struct of_phandle_args spec; + int err; + + if (!client_reg) + return -ENOENT; + + err = of_parse_phandle_with_fixed_args(dev->of_node, + "mediatek,gce-client-reg", + 3, idx, &spec); + if (err < 0) { + dev_err(dev, + "error %d can't parse gce-client-reg property (%d)", + err, idx); + + return err; + } + + client_reg->subsys = (u8)spec.args[0]; + client_reg->offset = (u16)spec.args[1]; + client_reg->size = (u16)spec.args[2]; + of_node_put(spec.np); + + return 0; +} +EXPORT_SYMBOL(cmdq_dev_get_client_reg); + static void cmdq_client_timeout(struct timer_list *t) { struct cmdq_client *client = from_timer(client, t, timer); @@ -110,10 +152,10 @@ void cmdq_pkt_destroy(struct cmdq_pkt *pkt) } EXPORT_SYMBOL(cmdq_pkt_destroy); -static int cmdq_pkt_append_command(struct cmdq_pkt *pkt, enum cmdq_code code, - u32 arg_a, u32 arg_b) +static int cmdq_pkt_append_command(struct cmdq_pkt *pkt, + struct cmdq_instruction inst) { - u64 *cmd_ptr; + struct cmdq_instruction *cmd_ptr; if (unlikely(pkt->cmd_buf_size + CMDQ_INST_SIZE > pkt->buf_size)) { /* @@ -129,8 +171,9 @@ static int cmdq_pkt_append_command(struct cmdq_pkt *pkt, enum cmdq_code code, __func__, (u32)pkt->buf_size); return -ENOMEM; } + cmd_ptr = pkt->va_base + pkt->cmd_buf_size; - (*cmd_ptr) = (u64)((code << CMDQ_OP_CODE_SHIFT) | arg_a) << 32 | arg_b; + *cmd_ptr = inst; pkt->cmd_buf_size += CMDQ_INST_SIZE; return 0; @@ -138,24 +181,34 @@ static int cmdq_pkt_append_command(struct cmdq_pkt *pkt, enum cmdq_code code, int cmdq_pkt_write(struct cmdq_pkt *pkt, u8 subsys, u16 offset, u32 value) { - u32 arg_a = (offset & CMDQ_ARG_A_WRITE_MASK) | - (subsys << CMDQ_SUBSYS_SHIFT); + struct cmdq_instruction inst; - return cmdq_pkt_append_command(pkt, CMDQ_CODE_WRITE, arg_a, value); + inst.op = CMDQ_CODE_WRITE; + inst.value = value; + inst.offset = offset; + inst.subsys = subsys; + + return cmdq_pkt_append_command(pkt, inst); } EXPORT_SYMBOL(cmdq_pkt_write); int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u8 subsys, u16 offset, u32 value, u32 mask) { - u32 offset_mask = offset; - int err = 0; + struct cmdq_instruction inst = { {0} }; + u16 offset_mask = offset; + int err; if (mask != 0xffffffff) { - err = cmdq_pkt_append_command(pkt, CMDQ_CODE_MASK, 0, ~mask); + inst.op = CMDQ_CODE_MASK; + inst.mask = ~mask; + err = cmdq_pkt_append_command(pkt, inst); + if (err < 0) + return err; + offset_mask |= CMDQ_WRITE_ENABLE_MASK; } - err |= cmdq_pkt_write(pkt, subsys, offset_mask, value); + err = cmdq_pkt_write(pkt, subsys, offset_mask, value); return err; } @@ -163,43 +216,85 @@ EXPORT_SYMBOL(cmdq_pkt_write_mask); int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event) { - u32 arg_b; + struct cmdq_instruction inst = { {0} }; if (event >= CMDQ_MAX_EVENT) return -EINVAL; - /* - * WFE arg_b - * bit 0-11: wait value - * bit 15: 1 - wait, 0 - no wait - * bit 16-27: update value - * bit 31: 1 - update, 0 - no update - */ - arg_b = CMDQ_WFE_UPDATE | CMDQ_WFE_WAIT | CMDQ_WFE_WAIT_VALUE; + inst.op = CMDQ_CODE_WFE; + inst.value = CMDQ_WFE_OPTION; + inst.event = event; - return cmdq_pkt_append_command(pkt, CMDQ_CODE_WFE, event, arg_b); + return cmdq_pkt_append_command(pkt, inst); } EXPORT_SYMBOL(cmdq_pkt_wfe); int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u16 event) { + struct cmdq_instruction inst = { {0} }; + if (event >= CMDQ_MAX_EVENT) return -EINVAL; - return cmdq_pkt_append_command(pkt, CMDQ_CODE_WFE, event, - CMDQ_WFE_UPDATE); + inst.op = CMDQ_CODE_WFE; + inst.value = CMDQ_WFE_UPDATE; + inst.event = event; + + return cmdq_pkt_append_command(pkt, inst); } EXPORT_SYMBOL(cmdq_pkt_clear_event); +int cmdq_pkt_poll(struct cmdq_pkt *pkt, u8 subsys, + u16 offset, u32 value) +{ + struct cmdq_instruction inst = { {0} }; + int err; + + inst.op = CMDQ_CODE_POLL; + inst.value = value; + inst.offset = offset; + inst.subsys = subsys; + err = cmdq_pkt_append_command(pkt, inst); + + return err; +} +EXPORT_SYMBOL(cmdq_pkt_poll); + +int cmdq_pkt_poll_mask(struct cmdq_pkt *pkt, u8 subsys, + u16 offset, u32 value, u32 mask) +{ + struct cmdq_instruction inst = { {0} }; + int err; + + inst.op = CMDQ_CODE_MASK; + inst.mask = ~mask; + err = cmdq_pkt_append_command(pkt, inst); + if (err < 0) + return err; + + offset = offset | CMDQ_POLL_ENABLE_MASK; + err = cmdq_pkt_poll(pkt, subsys, offset, value); + + return err; +} +EXPORT_SYMBOL(cmdq_pkt_poll_mask); + static int cmdq_pkt_finalize(struct cmdq_pkt *pkt) { + struct cmdq_instruction inst = { {0} }; int err; /* insert EOC and generate IRQ for each command iteration */ - err = cmdq_pkt_append_command(pkt, CMDQ_CODE_EOC, 0, CMDQ_EOC_IRQ_EN); + inst.op = CMDQ_CODE_EOC; + inst.value = CMDQ_EOC_IRQ_EN; + err = cmdq_pkt_append_command(pkt, inst); + if (err < 0) + return err; /* JUMP to end */ - err |= cmdq_pkt_append_command(pkt, CMDQ_CODE_JUMP, 0, CMDQ_JUMP_PASS); + inst.op = CMDQ_CODE_JUMP; + inst.value = CMDQ_JUMP_PASS; + err = cmdq_pkt_append_command(pkt, inst); return err; } diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h index 306d1efeb5e0..156b122c0ad5 100644 --- a/include/drm/drm_fourcc.h +++ b/include/drm/drm_fourcc.h @@ -78,7 +78,7 @@ struct drm_format_info { * triplet @char_per_block, @block_w, @block_h for better * describing the pixel format. */ - u8 cpp[3]; + u8 cpp[4]; /** * @char_per_block: @@ -104,7 +104,7 @@ struct drm_format_info { * information from their drm_mode_config.get_format_info hook * if they want the core to be validating the pitch. */ - u8 char_per_block[3]; + u8 char_per_block[4]; }; /** @@ -113,7 +113,7 @@ struct drm_format_info { * Block width in pixels, this is intended to be accessed through * drm_format_info_block_width() */ - u8 block_w[3]; + u8 block_w[4]; /** * @block_h: @@ -121,7 +121,7 @@ struct drm_format_info { * Block height in pixels, this is intended to be accessed through * drm_format_info_block_height() */ - u8 block_h[3]; + u8 block_h[4]; /** @hsub: Horizontal chroma subsampling factor */ u8 hsub; diff --git a/include/linux/host1x.h b/include/linux/host1x.h index 6edeb9228c4e..62d216ff1097 100644 --- a/include/linux/host1x.h +++ b/include/linux/host1x.h @@ -24,16 +24,20 @@ struct iommu_group; * struct host1x_client_ops - host1x client operations * @init: host1x client initialization code * @exit: host1x client tear down code + * @suspend: host1x client suspend code + * @resume: host1x client resume code */ struct host1x_client_ops { int (*init)(struct host1x_client *client); int (*exit)(struct host1x_client *client); + int (*suspend)(struct host1x_client *client); + int (*resume)(struct host1x_client *client); }; /** * struct host1x_client - host1x client structure * @list: list node for the host1x client - * @parent: pointer to struct device representing the host1x controller + * @host: pointer to struct device representing the host1x controller * @dev: pointer to struct device backing this host1x client * @group: IOMMU group that this client is a member of * @ops: host1x client operations @@ -44,7 +48,7 @@ struct host1x_client_ops { */ struct host1x_client { struct list_head list; - struct device *parent; + struct device *host; struct device *dev; struct iommu_group *group; @@ -55,6 +59,10 @@ struct host1x_client { struct host1x_syncpt **syncpts; unsigned int num_syncpts; + + struct host1x_client *parent; + unsigned int usecount; + struct mutex lock; }; /* @@ -309,6 +317,9 @@ int host1x_device_exit(struct host1x_device *device); int host1x_client_register(struct host1x_client *client); int host1x_client_unregister(struct host1x_client *client); +int host1x_client_suspend(struct host1x_client *client); +int host1x_client_resume(struct host1x_client *client); + struct tegra_mipi_device; struct tegra_mipi_device *tegra_mipi_request(struct device *device); diff --git a/include/linux/mailbox/mtk-cmdq-mailbox.h b/include/linux/mailbox/mtk-cmdq-mailbox.h index e6f54ef6698b..a4dc45fbec0a 100644 --- a/include/linux/mailbox/mtk-cmdq-mailbox.h +++ b/include/linux/mailbox/mtk-cmdq-mailbox.h @@ -20,6 +20,16 @@ #define CMDQ_WFE_WAIT BIT(15) #define CMDQ_WFE_WAIT_VALUE 0x1 +/* + * WFE arg_b + * bit 0-11: wait value + * bit 15: 1 - wait, 0 - no wait + * bit 16-27: update value + * bit 31: 1 - update, 0 - no update + */ +#define CMDQ_WFE_OPTION (CMDQ_WFE_UPDATE | CMDQ_WFE_WAIT | \ + CMDQ_WFE_WAIT_VALUE) + /** cmdq event maximum */ #define CMDQ_MAX_EVENT 0x3ff @@ -45,6 +55,7 @@ enum cmdq_code { CMDQ_CODE_MASK = 0x02, CMDQ_CODE_WRITE = 0x04, + CMDQ_CODE_POLL = 0x08, CMDQ_CODE_JUMP = 0x10, CMDQ_CODE_WFE = 0x20, CMDQ_CODE_EOC = 0x40, diff --git a/include/linux/pinctrl/machine.h b/include/linux/pinctrl/machine.h index ddd1b2773431..e987dc9fd2af 100644 --- a/include/linux/pinctrl/machine.h +++ b/include/linux/pinctrl/machine.h @@ -153,6 +153,7 @@ struct pinctrl_map { extern int pinctrl_register_mappings(const struct pinctrl_map *map, unsigned num_maps); +extern void pinctrl_unregister_mappings(const struct pinctrl_map *map); extern void pinctrl_provide_dummies(void); #else @@ -162,6 +163,10 @@ static inline int pinctrl_register_mappings(const struct pinctrl_map *map, return 0; } +static inline void pinctrl_unregister_mappings(const struct pinctrl_map *map) +{ +} + static inline void pinctrl_provide_dummies(void) { } diff --git a/include/linux/soc/mediatek/mtk-cmdq.h b/include/linux/soc/mediatek/mtk-cmdq.h index 9618debb9ceb..a74c1d5acdf3 100644 --- a/include/linux/soc/mediatek/mtk-cmdq.h +++ b/include/linux/soc/mediatek/mtk-cmdq.h @@ -15,6 +15,12 @@ struct cmdq_pkt; +struct cmdq_client_reg { + u8 subsys; + u16 offset; + u16 size; +}; + struct cmdq_client { spinlock_t lock; u32 pkt_cnt; @@ -25,6 +31,21 @@ struct cmdq_client { }; /** + * cmdq_dev_get_client_reg() - parse cmdq client reg from the device + * node of CMDQ client + * @dev: device of CMDQ mailbox client + * @client_reg: CMDQ client reg pointer + * @idx: the index of desired reg + * + * Return: 0 for success; else the error code is returned + * + * Help CMDQ client parsing the cmdq client reg + * from the device node of CMDQ client. + */ +int cmdq_dev_get_client_reg(struct device *dev, + struct cmdq_client_reg *client_reg, int idx); + +/** * cmdq_mbox_create() - create CMDQ mailbox client and channel * @dev: device of CMDQ mailbox client * @index: index of CMDQ mailbox channel @@ -100,6 +121,38 @@ int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event); int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u16 event); /** + * cmdq_pkt_poll() - Append polling command to the CMDQ packet, ask GCE to + * execute an instruction that wait for a specified + * hardware register to check for the value w/o mask. + * All GCE hardware threads will be blocked by this + * instruction. + * @pkt: the CMDQ packet + * @subsys: the CMDQ sub system code + * @offset: register offset from CMDQ sub system + * @value: the specified target register value + * + * Return: 0 for success; else the error code is returned + */ +int cmdq_pkt_poll(struct cmdq_pkt *pkt, u8 subsys, + u16 offset, u32 value); + +/** + * cmdq_pkt_poll_mask() - Append polling command to the CMDQ packet, ask GCE to + * execute an instruction that wait for a specified + * hardware register to check for the value w/ mask. + * All GCE hardware threads will be blocked by this + * instruction. + * @pkt: the CMDQ packet + * @subsys: the CMDQ sub system code + * @offset: register offset from CMDQ sub system + * @value: the specified target register value + * @mask: the specified target register mask + * + * Return: 0 for success; else the error code is returned + */ +int cmdq_pkt_poll_mask(struct cmdq_pkt *pkt, u8 subsys, + u16 offset, u32 value, u32 mask); +/** * cmdq_pkt_flush_async() - trigger CMDQ to asynchronously execute the CMDQ * packet and call back at the end of done packet * @pkt: the CMDQ packet diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h index 5ba481f49931..8bc0b31597d8 100644 --- a/include/uapi/drm/drm_fourcc.h +++ b/include/uapi/drm/drm_fourcc.h @@ -422,6 +422,19 @@ extern "C" { #define I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS fourcc_mod_code(INTEL, 6) /* + * Intel color control surfaces (CCS) for Gen-12 media compression + * + * The main surface is Y-tiled and at plane index 0, the CCS is linear and + * at index 1. A 64B CCS cache line corresponds to an area of 4x1 tiles in + * main surface. In other words, 4 bits in CCS map to a main surface cache + * line pair. The main surface pitch is required to be a multiple of four + * Y-tile widths. For semi-planar formats like NV12, CCS planes follow the + * Y and UV planes i.e., planes 0 and 1 are used for Y and UV surfaces, + * planes 2 and 3 for the respective CCS. + */ +#define I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS fourcc_mod_code(INTEL, 7) + +/* * Tiled, NV12MT, grouped in 64 (pixels) x 32 (lines) -sized macroblocks * * Macroblocks are laid in a Z-shape, and each pixel data is following the |