drm fixes for 5.4-rc6
amdgpu: - clang alignment fixes - Updated golden settings - navi: gpuvm, sdma and display fixes - Freesync fix - Gamma fix for DCN - DP dongle detection fix - vega10: Fix for undervolting radeon: - reenable kexec fix for ppc scheduler: - set an error if hw job failed i915: - fix PCH reference clock for HSW/BDW - TGL display PLL doc fix panfrost: - warning fix - runtime pm fix - bad pointer dereference fix v3d: - memleak fix etnaviv: - memory corruption fix - deadlock fix - reintroduce lost debug message -----BEGIN PGP SIGNATURE----- iQIcBAABAgAGBQJdu786AAoJEAx081l5xIa+O5gQAKW+TAPJzM4iGs0fv7AgprTH ts5uY9hTk1Toc3Tl9KhRZauy36dcfOvoo3RD+bqbJju6e+ouMlK0uUyJvC30mpEY 9WczrXMYL4qhcnxnkOiLw76RZOuEX5WM2YkVbrB4qdNU3N55ZukeXgtDr+e9hSzy nI+X+/8sgQOeFy8oE7s9k/I8Tn8CIgXvzWzH3L7P1r+t+rKvHF2i0NdanNk9dwrs e8WmkzQhw0sdkLxZKh9sWxw0EipqJkP5y+XsUwYvyL97XhMsw59YF/OXk4OXFTbu uct22gf8nYj1MbW93IQkoN5JQ09G1nnwYsvFERfZWeKgAL5oXGMdZbfEhwxHaymG fD0ce2brs0chQVK7RbN7UQ30XgZBsG3H5JdPXNHrcqiIKfsLqtUvqLM3CoSfokUu PGNDDpbU9LLOjI7s6DqX1FulQTYeIs49l0TIcgAGp7fxwU3GQFmFO8COqn28b54y IHSB8vXwOYxC0NFTE3/H5z9h5PIXKBgnyMHouUehg9dGjEnuX/M7hiDgUZDqPPQW REAEaSlyUB9wDYQbw92QaMhNSbK7jR+X+TqFkYn3GAwtq8XLrSB6+EzgQhnDgOPs GQc0X+E0WFE4NgWnDYkHhsylFKdLtNZ2eghtlGpE/S+3PjO2jhhTdyo8o39QoI/Y agFin9YQphep0HWq6p3Y =4aG/ -----END PGP SIGNATURE----- Merge tag 'drm-fixes-2019-11-01' of git://anongit.freedesktop.org/drm/drm Pull drm fixes from Dave Airlie: "This is the regular drm fixes pull request for 5.4-rc6. It's a bit larger than I'd like but then last week was quieter than usual. The main fixes are amdgpu, and the two bigger area are navi fixes which are the newest GPU range so still getting actively fixed up, but also a bunch of clang stack alignment fixes (as amdgpu uses double in some places). Otherwise it's all fairly run of the mill fixes, i915, panfrost, etnaviv, v3d and radeon, along with a core scheduler fix. Summary: amdgpu: - clang alignment fixes - Updated golden settings - navi: gpuvm, sdma and display fixes - Freesync fix - Gamma fix for DCN - DP dongle detection fix - vega10: Fix for undervolting radeon: - reenable kexec fix for ppc scheduler: - set an error if hw job failed i915: - fix PCH reference clock for HSW/BDW - TGL display PLL doc fix panfrost: - warning fix - runtime pm fix - bad pointer dereference fix v3d: - memleak fix etnaviv: - memory corruption fix - deadlock fix - reintroduce lost debug message" * tag 'drm-fixes-2019-11-01' of git://anongit.freedesktop.org/drm/drm: (29 commits) drm/amdgpu: enable -msse2 for GCC 7.1+ users drm/amdgpu: fix stack alignment ABI mismatch for GCC 7.1+ drm/amdgpu: fix stack alignment ABI mismatch for Clang drm/radeon: Fix EEH during kexec drm/amdgpu/gmc10: properly set BANK_SELECT and FRAGMENT_SIZE drm/amdgpu/powerplay/vega10: allow undervolting in p7 dc.c:use kzalloc without test drm/amd/display: setting the DIG_MODE to the correct value. drm/amd/display: Passive DP->HDMI dongle detection fix drm/amd/display: add 50us buffer as WA for pstate switch in active drm/amd/display: Allow inverted gamma drm/amd/display: do not synchronize "drr" displays drm/amdgpu: If amdgpu_ib_schedule fails return back the error. drm/sched: Set error to s_fence if HW job submission failed. drm/amdgpu/gfx10: update gfx golden settings for navi12 drm/amdgpu/gfx10: update gfx golden settings for navi14 drm/amdgpu/gfx10: update gfx golden settings drm/amd/display: Change Navi14's DWB flag to 1 drm/amdgpu/sdma5: do not execute 0-sized IBs (v2) drm/amdgpu: Fix SDMA hang when performing VKexample test ...
This commit is contained in:
commit
1461624491
|
@ -218,7 +218,7 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
|
|||
struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched);
|
||||
struct dma_fence *fence = NULL, *finished;
|
||||
struct amdgpu_job *job;
|
||||
int r;
|
||||
int r = 0;
|
||||
|
||||
job = to_amdgpu_job(sched_job);
|
||||
finished = &job->base.s_fence->finished;
|
||||
|
@ -243,6 +243,8 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
|
|||
job->fence = dma_fence_get(fence);
|
||||
|
||||
amdgpu_job_free_resources(job);
|
||||
|
||||
fence = r ? ERR_PTR(r) : fence;
|
||||
return fence;
|
||||
}
|
||||
|
||||
|
|
|
@ -93,7 +93,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_1[] =
|
|||
{
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0xffffffff, 0x00400014),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_CPF_CLK_CTRL, 0xfcff8fff, 0xf8000100),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xc0000000, 0xc0000100),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xcd000000, 0x0d000100),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQ_CLK_CTRL, 0x60000ff0, 0x60000100),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQG_CLK_CTRL, 0x40000000, 0x40000100),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_VGT_CLK_CTRL, 0xffff8fff, 0xffff8100),
|
||||
|
@ -140,7 +140,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_1[] =
|
|||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0xffffffff, 0x003c0014),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_GS_NGG_CLK_CTRL, 0xffff8fff, 0xffff8100),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_IA_CLK_CTRL, 0xffff0fff, 0xffff0100),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xc0000000, 0xc0000100),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xcd000000, 0x0d000100),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQ_CLK_CTRL, 0xf8ff0fff, 0x60000100),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQG_CLK_CTRL, 0x40000ff0, 0x40000100),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_VGT_CLK_CTRL, 0xffff8fff, 0xffff8100),
|
||||
|
@ -179,7 +179,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_2[] =
|
|||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0x003e001f, 0x003c0014),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_GS_NGG_CLK_CTRL, 0xffff8fff, 0xffff8100),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_IA_CLK_CTRL, 0xffff0fff, 0xffff0100),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xff7f0fff, 0xc0000100),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xff7f0fff, 0x0d000100),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQ_CLK_CTRL, 0xffffcfff, 0x60000100),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQG_CLK_CTRL, 0xffff0fff, 0x40000100),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_VGT_CLK_CTRL, 0xffff8fff, 0xffff8100),
|
||||
|
|
|
@ -151,6 +151,15 @@ static void gfxhub_v2_0_init_cache_regs(struct amdgpu_device *adev)
|
|||
WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL2, tmp);
|
||||
|
||||
tmp = mmGCVM_L2_CNTL3_DEFAULT;
|
||||
if (adev->gmc.translate_further) {
|
||||
tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3, BANK_SELECT, 12);
|
||||
tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3,
|
||||
L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
|
||||
} else {
|
||||
tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3, BANK_SELECT, 9);
|
||||
tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3,
|
||||
L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
|
||||
}
|
||||
WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL3, tmp);
|
||||
|
||||
tmp = mmGCVM_L2_CNTL4_DEFAULT;
|
||||
|
|
|
@ -309,6 +309,7 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
|
|||
|
||||
job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gart.bo);
|
||||
job->vm_needs_flush = true;
|
||||
job->ibs->ptr[job->ibs->length_dw++] = ring->funcs->nop;
|
||||
amdgpu_ring_pad_ib(ring, &job->ibs[0]);
|
||||
r = amdgpu_job_submit(job, &adev->mman.entity,
|
||||
AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
|
||||
|
|
|
@ -137,6 +137,15 @@ static void mmhub_v2_0_init_cache_regs(struct amdgpu_device *adev)
|
|||
WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL2, tmp);
|
||||
|
||||
tmp = mmMMVM_L2_CNTL3_DEFAULT;
|
||||
if (adev->gmc.translate_further) {
|
||||
tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3, BANK_SELECT, 12);
|
||||
tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3,
|
||||
L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
|
||||
} else {
|
||||
tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3, BANK_SELECT, 9);
|
||||
tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3,
|
||||
L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
|
||||
}
|
||||
WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL3, tmp);
|
||||
|
||||
tmp = mmMMVM_L2_CNTL4_DEFAULT;
|
||||
|
|
|
@ -254,6 +254,7 @@ static const struct soc15_reg_golden golden_settings_sdma_4_3[] = {
|
|||
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
|
||||
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
|
||||
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
|
||||
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000)
|
||||
};
|
||||
|
||||
static u32 sdma_v4_0_get_reg_offset(struct amdgpu_device *adev,
|
||||
|
|
|
@ -24,15 +24,20 @@
|
|||
# It calculates Bandwidth and Watermarks values for HW programming
|
||||
#
|
||||
|
||||
ifneq ($(call cc-option, -mpreferred-stack-boundary=4),)
|
||||
cc_stack_align := -mpreferred-stack-boundary=4
|
||||
else ifneq ($(call cc-option, -mstack-alignment=16),)
|
||||
cc_stack_align := -mstack-alignment=16
|
||||
calcs_ccflags := -mhard-float -msse
|
||||
|
||||
ifdef CONFIG_CC_IS_GCC
|
||||
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
|
||||
IS_OLD_GCC = 1
|
||||
endif
|
||||
endif
|
||||
|
||||
calcs_ccflags := -mhard-float -msse $(cc_stack_align)
|
||||
|
||||
ifdef CONFIG_CC_IS_CLANG
|
||||
ifdef IS_OLD_GCC
|
||||
# Stack alignment mismatch, proceed with caution.
|
||||
# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
|
||||
# (8B stack alignment).
|
||||
calcs_ccflags += -mpreferred-stack-boundary=4
|
||||
else
|
||||
calcs_ccflags += -msse2
|
||||
endif
|
||||
|
||||
|
|
|
@ -580,6 +580,10 @@ static bool construct(struct dc *dc,
|
|||
#ifdef CONFIG_DRM_AMD_DC_DCN2_0
|
||||
// Allocate memory for the vm_helper
|
||||
dc->vm_helper = kzalloc(sizeof(struct vm_helper), GFP_KERNEL);
|
||||
if (!dc->vm_helper) {
|
||||
dm_error("%s: failed to create dc->vm_helper\n", __func__);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
#endif
|
||||
memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides));
|
||||
|
|
|
@ -2767,6 +2767,15 @@ void core_link_enable_stream(
|
|||
CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
|
||||
COLOR_DEPTH_UNDEFINED);
|
||||
|
||||
/* This second call is needed to reconfigure the DIG
|
||||
* as a workaround for the incorrect value being applied
|
||||
* from transmitter control.
|
||||
*/
|
||||
if (!dc_is_virtual_signal(pipe_ctx->stream->signal))
|
||||
stream->link->link_enc->funcs->setup(
|
||||
stream->link->link_enc,
|
||||
pipe_ctx->stream->signal);
|
||||
|
||||
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
|
||||
if (pipe_ctx->stream->timing.flags.DSC) {
|
||||
if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
|
||||
|
|
|
@ -374,6 +374,7 @@ void dal_ddc_service_i2c_query_dp_dual_mode_adaptor(
|
|||
enum display_dongle_type *dongle = &sink_cap->dongle_type;
|
||||
uint8_t type2_dongle_buf[DP_ADAPTOR_TYPE2_SIZE];
|
||||
bool is_type2_dongle = false;
|
||||
int retry_count = 2;
|
||||
struct dp_hdmi_dongle_signature_data *dongle_signature;
|
||||
|
||||
/* Assume we have no valid DP passive dongle connected */
|
||||
|
@ -386,13 +387,24 @@ void dal_ddc_service_i2c_query_dp_dual_mode_adaptor(
|
|||
DP_HDMI_DONGLE_ADDRESS,
|
||||
type2_dongle_buf,
|
||||
sizeof(type2_dongle_buf))) {
|
||||
*dongle = DISPLAY_DONGLE_DP_DVI_DONGLE;
|
||||
sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_DVI_MAX_TMDS_CLK;
|
||||
/* Passive HDMI dongles can sometimes fail here without retrying*/
|
||||
while (retry_count > 0) {
|
||||
if (i2c_read(ddc,
|
||||
DP_HDMI_DONGLE_ADDRESS,
|
||||
type2_dongle_buf,
|
||||
sizeof(type2_dongle_buf)))
|
||||
break;
|
||||
retry_count--;
|
||||
}
|
||||
if (retry_count == 0) {
|
||||
*dongle = DISPLAY_DONGLE_DP_DVI_DONGLE;
|
||||
sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_DVI_MAX_TMDS_CLK;
|
||||
|
||||
CONN_DATA_DETECT(ddc->link, type2_dongle_buf, sizeof(type2_dongle_buf),
|
||||
"DP-DVI passive dongle %dMhz: ",
|
||||
DP_ADAPTOR_DVI_MAX_TMDS_CLK / 1000);
|
||||
return;
|
||||
CONN_DATA_DETECT(ddc->link, type2_dongle_buf, sizeof(type2_dongle_buf),
|
||||
"DP-DVI passive dongle %dMhz: ",
|
||||
DP_ADAPTOR_DVI_MAX_TMDS_CLK / 1000);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/* Check if Type 2 dongle.*/
|
||||
|
|
|
@ -404,6 +404,9 @@ bool resource_are_streams_timing_synchronizable(
|
|||
if (stream1->view_format != stream2->view_format)
|
||||
return false;
|
||||
|
||||
if (stream1->ignore_msa_timing_param || stream2->ignore_msa_timing_param)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
static bool is_dp_and_hdmi_sharable(
|
||||
|
@ -1540,6 +1543,9 @@ bool dc_is_stream_unchanged(
|
|||
if (!are_stream_backends_same(old_stream, stream))
|
||||
return false;
|
||||
|
||||
if (old_stream->ignore_msa_timing_param != stream->ignore_msa_timing_param)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -393,6 +393,10 @@ bool cm_helper_translate_curve_to_hw_format(
|
|||
rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index];
|
||||
rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index];
|
||||
|
||||
rgb_resulted[hw_points].red = rgb_resulted[hw_points - 1].red;
|
||||
rgb_resulted[hw_points].green = rgb_resulted[hw_points - 1].green;
|
||||
rgb_resulted[hw_points].blue = rgb_resulted[hw_points - 1].blue;
|
||||
|
||||
// All 3 color channels have same x
|
||||
corner_points[0].red.x = dc_fixpt_pow(dc_fixpt_from_int(2),
|
||||
dc_fixpt_from_int(region_start));
|
||||
|
@ -464,13 +468,6 @@ bool cm_helper_translate_curve_to_hw_format(
|
|||
|
||||
i = 1;
|
||||
while (i != hw_points + 1) {
|
||||
if (dc_fixpt_lt(rgb_plus_1->red, rgb->red))
|
||||
rgb_plus_1->red = rgb->red;
|
||||
if (dc_fixpt_lt(rgb_plus_1->green, rgb->green))
|
||||
rgb_plus_1->green = rgb->green;
|
||||
if (dc_fixpt_lt(rgb_plus_1->blue, rgb->blue))
|
||||
rgb_plus_1->blue = rgb->blue;
|
||||
|
||||
rgb->delta_red = dc_fixpt_sub(rgb_plus_1->red, rgb->red);
|
||||
rgb->delta_green = dc_fixpt_sub(rgb_plus_1->green, rgb->green);
|
||||
rgb->delta_blue = dc_fixpt_sub(rgb_plus_1->blue, rgb->blue);
|
||||
|
@ -562,6 +559,10 @@ bool cm_helper_translate_curve_to_degamma_hw_format(
|
|||
rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index];
|
||||
rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index];
|
||||
|
||||
rgb_resulted[hw_points].red = rgb_resulted[hw_points - 1].red;
|
||||
rgb_resulted[hw_points].green = rgb_resulted[hw_points - 1].green;
|
||||
rgb_resulted[hw_points].blue = rgb_resulted[hw_points - 1].blue;
|
||||
|
||||
corner_points[0].red.x = dc_fixpt_pow(dc_fixpt_from_int(2),
|
||||
dc_fixpt_from_int(region_start));
|
||||
corner_points[0].green.x = corner_points[0].red.x;
|
||||
|
@ -624,13 +625,6 @@ bool cm_helper_translate_curve_to_degamma_hw_format(
|
|||
|
||||
i = 1;
|
||||
while (i != hw_points + 1) {
|
||||
if (dc_fixpt_lt(rgb_plus_1->red, rgb->red))
|
||||
rgb_plus_1->red = rgb->red;
|
||||
if (dc_fixpt_lt(rgb_plus_1->green, rgb->green))
|
||||
rgb_plus_1->green = rgb->green;
|
||||
if (dc_fixpt_lt(rgb_plus_1->blue, rgb->blue))
|
||||
rgb_plus_1->blue = rgb->blue;
|
||||
|
||||
rgb->delta_red = dc_fixpt_sub(rgb_plus_1->red, rgb->red);
|
||||
rgb->delta_green = dc_fixpt_sub(rgb_plus_1->green, rgb->green);
|
||||
rgb->delta_blue = dc_fixpt_sub(rgb_plus_1->blue, rgb->blue);
|
||||
|
|
|
@ -10,15 +10,20 @@ ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
|
|||
DCN20 += dcn20_dsc.o
|
||||
endif
|
||||
|
||||
ifneq ($(call cc-option, -mpreferred-stack-boundary=4),)
|
||||
cc_stack_align := -mpreferred-stack-boundary=4
|
||||
else ifneq ($(call cc-option, -mstack-alignment=16),)
|
||||
cc_stack_align := -mstack-alignment=16
|
||||
CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o := -mhard-float -msse
|
||||
|
||||
ifdef CONFIG_CC_IS_GCC
|
||||
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
|
||||
IS_OLD_GCC = 1
|
||||
endif
|
||||
endif
|
||||
|
||||
CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o := -mhard-float -msse $(cc_stack_align)
|
||||
|
||||
ifdef CONFIG_CC_IS_CLANG
|
||||
ifdef IS_OLD_GCC
|
||||
# Stack alignment mismatch, proceed with caution.
|
||||
# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
|
||||
# (8B stack alignment).
|
||||
CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o += -mpreferred-stack-boundary=4
|
||||
else
|
||||
CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o += -msse2
|
||||
endif
|
||||
|
||||
|
|
|
@ -814,7 +814,7 @@ static const struct resource_caps res_cap_nv14 = {
|
|||
.num_audio = 6,
|
||||
.num_stream_encoder = 5,
|
||||
.num_pll = 5,
|
||||
.num_dwb = 0,
|
||||
.num_dwb = 1,
|
||||
.num_ddc = 5,
|
||||
};
|
||||
|
||||
|
|
|
@ -3,15 +3,20 @@
|
|||
|
||||
DCN21 = dcn21_hubp.o dcn21_hubbub.o dcn21_resource.o
|
||||
|
||||
ifneq ($(call cc-option, -mpreferred-stack-boundary=4),)
|
||||
cc_stack_align := -mpreferred-stack-boundary=4
|
||||
else ifneq ($(call cc-option, -mstack-alignment=16),)
|
||||
cc_stack_align := -mstack-alignment=16
|
||||
CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mhard-float -msse
|
||||
|
||||
ifdef CONFIG_CC_IS_GCC
|
||||
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
|
||||
IS_OLD_GCC = 1
|
||||
endif
|
||||
endif
|
||||
|
||||
CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mhard-float -msse $(cc_stack_align)
|
||||
|
||||
ifdef CONFIG_CC_IS_CLANG
|
||||
ifdef IS_OLD_GCC
|
||||
# Stack alignment mismatch, proceed with caution.
|
||||
# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
|
||||
# (8B stack alignment).
|
||||
CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o += -mpreferred-stack-boundary=4
|
||||
else
|
||||
CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o += -msse2
|
||||
endif
|
||||
|
||||
|
|
|
@ -24,15 +24,20 @@
|
|||
# It provides the general basic services required by other DAL
|
||||
# subcomponents.
|
||||
|
||||
ifneq ($(call cc-option, -mpreferred-stack-boundary=4),)
|
||||
cc_stack_align := -mpreferred-stack-boundary=4
|
||||
else ifneq ($(call cc-option, -mstack-alignment=16),)
|
||||
cc_stack_align := -mstack-alignment=16
|
||||
dml_ccflags := -mhard-float -msse
|
||||
|
||||
ifdef CONFIG_CC_IS_GCC
|
||||
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
|
||||
IS_OLD_GCC = 1
|
||||
endif
|
||||
endif
|
||||
|
||||
dml_ccflags := -mhard-float -msse $(cc_stack_align)
|
||||
|
||||
ifdef CONFIG_CC_IS_CLANG
|
||||
ifdef IS_OLD_GCC
|
||||
# Stack alignment mismatch, proceed with caution.
|
||||
# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
|
||||
# (8B stack alignment).
|
||||
dml_ccflags += -mpreferred-stack-boundary=4
|
||||
else
|
||||
dml_ccflags += -msse2
|
||||
endif
|
||||
|
||||
|
|
|
@ -2577,7 +2577,8 @@ static void dml20_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPer
|
|||
mode_lib->vba.MinActiveDRAMClockChangeMargin
|
||||
+ mode_lib->vba.DRAMClockChangeLatency;
|
||||
|
||||
if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 0) {
|
||||
if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 50) {
|
||||
mode_lib->vba.DRAMClockChangeWatermark += 25;
|
||||
mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive;
|
||||
} else {
|
||||
if (mode_lib->vba.SynchronizedVBlank || mode_lib->vba.NumberOfActivePlanes == 1) {
|
||||
|
|
|
@ -1,15 +1,20 @@
|
|||
#
|
||||
# Makefile for the 'dsc' sub-component of DAL.
|
||||
|
||||
ifneq ($(call cc-option, -mpreferred-stack-boundary=4),)
|
||||
cc_stack_align := -mpreferred-stack-boundary=4
|
||||
else ifneq ($(call cc-option, -mstack-alignment=16),)
|
||||
cc_stack_align := -mstack-alignment=16
|
||||
dsc_ccflags := -mhard-float -msse
|
||||
|
||||
ifdef CONFIG_CC_IS_GCC
|
||||
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
|
||||
IS_OLD_GCC = 1
|
||||
endif
|
||||
endif
|
||||
|
||||
dsc_ccflags := -mhard-float -msse $(cc_stack_align)
|
||||
|
||||
ifdef CONFIG_CC_IS_CLANG
|
||||
ifdef IS_OLD_GCC
|
||||
# Stack alignment mismatch, proceed with caution.
|
||||
# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
|
||||
# (8B stack alignment).
|
||||
dsc_ccflags += -mpreferred-stack-boundary=4
|
||||
else
|
||||
dsc_ccflags += -msse2
|
||||
endif
|
||||
|
||||
|
|
|
@ -5098,9 +5098,7 @@ static void vega10_odn_update_soc_table(struct pp_hwmgr *hwmgr,
|
|||
|
||||
if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) {
|
||||
podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_sclk;
|
||||
for (i = 0; i < podn_vdd_dep->count - 1; i++)
|
||||
od_vddc_lookup_table->entries[i].us_vdd = podn_vdd_dep->entries[i].vddc;
|
||||
if (od_vddc_lookup_table->entries[i].us_vdd < podn_vdd_dep->entries[i].vddc)
|
||||
for (i = 0; i < podn_vdd_dep->count; i++)
|
||||
od_vddc_lookup_table->entries[i].us_vdd = podn_vdd_dep->entries[i].vddc;
|
||||
} else if (type == PP_OD_EDIT_MCLK_VDDC_TABLE) {
|
||||
podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_mclk;
|
||||
|
|
|
@ -180,6 +180,8 @@ void etnaviv_core_dump(struct etnaviv_gem_submit *submit)
|
|||
etnaviv_cmdbuf_get_va(&submit->cmdbuf,
|
||||
&gpu->mmu_context->cmdbuf_mapping));
|
||||
|
||||
mutex_unlock(&gpu->mmu_context->lock);
|
||||
|
||||
/* Reserve space for the bomap */
|
||||
if (n_bomap_pages) {
|
||||
bomap_start = bomap = iter.data;
|
||||
|
@ -221,8 +223,6 @@ void etnaviv_core_dump(struct etnaviv_gem_submit *submit)
|
|||
obj->base.size);
|
||||
}
|
||||
|
||||
mutex_unlock(&gpu->mmu_context->lock);
|
||||
|
||||
etnaviv_core_dump_header(&iter, ETDUMP_BUF_END, iter.data);
|
||||
|
||||
dev_coredumpv(gpu->dev, iter.start, iter.data - iter.start, GFP_KERNEL);
|
||||
|
|
|
@ -155,9 +155,11 @@ static void etnaviv_iommuv2_dump(struct etnaviv_iommu_context *context, void *bu
|
|||
|
||||
memcpy(buf, v2_context->mtlb_cpu, SZ_4K);
|
||||
buf += SZ_4K;
|
||||
for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++, buf += SZ_4K)
|
||||
if (v2_context->mtlb_cpu[i] & MMUv2_PTE_PRESENT)
|
||||
for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++)
|
||||
if (v2_context->mtlb_cpu[i] & MMUv2_PTE_PRESENT) {
|
||||
memcpy(buf, v2_context->stlb_cpu[i], SZ_4K);
|
||||
buf += SZ_4K;
|
||||
}
|
||||
}
|
||||
|
||||
static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu *gpu,
|
||||
|
|
|
@ -328,12 +328,23 @@ etnaviv_iommu_context_init(struct etnaviv_iommu_global *global,
|
|||
|
||||
ret = etnaviv_cmdbuf_suballoc_map(suballoc, ctx, &ctx->cmdbuf_mapping,
|
||||
global->memory_base);
|
||||
if (ret) {
|
||||
global->ops->free(ctx);
|
||||
return NULL;
|
||||
if (ret)
|
||||
goto out_free;
|
||||
|
||||
if (global->version == ETNAVIV_IOMMU_V1 &&
|
||||
ctx->cmdbuf_mapping.iova > 0x80000000) {
|
||||
dev_err(global->dev,
|
||||
"command buffer outside valid memory window\n");
|
||||
goto out_unmap;
|
||||
}
|
||||
|
||||
return ctx;
|
||||
|
||||
out_unmap:
|
||||
etnaviv_cmdbuf_suballoc_unmap(ctx, &ctx->cmdbuf_mapping);
|
||||
out_free:
|
||||
global->ops->free(ctx);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void etnaviv_iommu_restore(struct etnaviv_gpu *gpu,
|
||||
|
|
|
@ -9315,7 +9315,6 @@ static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
|
|||
static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_encoder *encoder;
|
||||
bool pch_ssc_in_use = false;
|
||||
bool has_fdi = false;
|
||||
|
||||
for_each_intel_encoder(&dev_priv->drm, encoder) {
|
||||
|
@ -9343,22 +9342,24 @@ static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
|
|||
* clock hierarchy. That would also allow us to do
|
||||
* clock bending finally.
|
||||
*/
|
||||
dev_priv->pch_ssc_use = 0;
|
||||
|
||||
if (spll_uses_pch_ssc(dev_priv)) {
|
||||
DRM_DEBUG_KMS("SPLL using PCH SSC\n");
|
||||
pch_ssc_in_use = true;
|
||||
dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL);
|
||||
}
|
||||
|
||||
if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) {
|
||||
DRM_DEBUG_KMS("WRPLL1 using PCH SSC\n");
|
||||
pch_ssc_in_use = true;
|
||||
dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1);
|
||||
}
|
||||
|
||||
if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) {
|
||||
DRM_DEBUG_KMS("WRPLL2 using PCH SSC\n");
|
||||
pch_ssc_in_use = true;
|
||||
dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2);
|
||||
}
|
||||
|
||||
if (pch_ssc_in_use)
|
||||
if (dev_priv->pch_ssc_use)
|
||||
return;
|
||||
|
||||
if (has_fdi) {
|
||||
|
|
|
@ -525,16 +525,31 @@ static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
|
|||
val = I915_READ(WRPLL_CTL(id));
|
||||
I915_WRITE(WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE);
|
||||
POSTING_READ(WRPLL_CTL(id));
|
||||
|
||||
/*
|
||||
* Try to set up the PCH reference clock once all DPLLs
|
||||
* that depend on it have been shut down.
|
||||
*/
|
||||
if (dev_priv->pch_ssc_use & BIT(id))
|
||||
intel_init_pch_refclk(dev_priv);
|
||||
}
|
||||
|
||||
static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
|
||||
struct intel_shared_dpll *pll)
|
||||
{
|
||||
enum intel_dpll_id id = pll->info->id;
|
||||
u32 val;
|
||||
|
||||
val = I915_READ(SPLL_CTL);
|
||||
I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
|
||||
POSTING_READ(SPLL_CTL);
|
||||
|
||||
/*
|
||||
* Try to set up the PCH reference clock once all DPLLs
|
||||
* that depend on it have been shut down.
|
||||
*/
|
||||
if (dev_priv->pch_ssc_use & BIT(id))
|
||||
intel_init_pch_refclk(dev_priv);
|
||||
}
|
||||
|
||||
static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
|
||||
|
|
|
@ -147,11 +147,11 @@ enum intel_dpll_id {
|
|||
*/
|
||||
DPLL_ID_ICL_MGPLL4 = 6,
|
||||
/**
|
||||
* @DPLL_ID_TGL_TCPLL5: TGL TC PLL port 5 (TC5)
|
||||
* @DPLL_ID_TGL_MGPLL5: TGL TC PLL port 5 (TC5)
|
||||
*/
|
||||
DPLL_ID_TGL_MGPLL5 = 7,
|
||||
/**
|
||||
* @DPLL_ID_TGL_TCPLL6: TGL TC PLL port 6 (TC6)
|
||||
* @DPLL_ID_TGL_MGPLL6: TGL TC PLL port 6 (TC6)
|
||||
*/
|
||||
DPLL_ID_TGL_MGPLL6 = 8,
|
||||
};
|
||||
|
|
|
@ -1723,6 +1723,8 @@ struct drm_i915_private {
|
|||
struct work_struct idle_work;
|
||||
} gem;
|
||||
|
||||
u8 pch_ssc_use;
|
||||
|
||||
/* For i945gm vblank irq vs. C3 workaround */
|
||||
struct {
|
||||
struct work_struct work;
|
||||
|
|
|
@ -556,11 +556,11 @@ static int panfrost_probe(struct platform_device *pdev)
|
|||
return 0;
|
||||
|
||||
err_out2:
|
||||
pm_runtime_disable(pfdev->dev);
|
||||
panfrost_devfreq_fini(pfdev);
|
||||
err_out1:
|
||||
panfrost_device_fini(pfdev);
|
||||
err_out0:
|
||||
pm_runtime_disable(pfdev->dev);
|
||||
drm_dev_put(ddev);
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -224,9 +224,9 @@ static size_t get_pgsize(u64 addr, size_t size)
|
|||
return SZ_2M;
|
||||
}
|
||||
|
||||
void panfrost_mmu_flush_range(struct panfrost_device *pfdev,
|
||||
struct panfrost_mmu *mmu,
|
||||
u64 iova, size_t size)
|
||||
static void panfrost_mmu_flush_range(struct panfrost_device *pfdev,
|
||||
struct panfrost_mmu *mmu,
|
||||
u64 iova, size_t size)
|
||||
{
|
||||
if (mmu->as < 0)
|
||||
return;
|
||||
|
@ -406,11 +406,11 @@ addr_to_drm_mm_node(struct panfrost_device *pfdev, int as, u64 addr)
|
|||
spin_lock(&pfdev->as_lock);
|
||||
list_for_each_entry(mmu, &pfdev->as_lru_list, list) {
|
||||
if (as == mmu->as)
|
||||
break;
|
||||
goto found_mmu;
|
||||
}
|
||||
if (as != mmu->as)
|
||||
goto out;
|
||||
goto out;
|
||||
|
||||
found_mmu:
|
||||
priv = container_of(mmu, struct panfrost_file_priv, mmu);
|
||||
|
||||
spin_lock(&priv->mm_lock);
|
||||
|
@ -432,7 +432,8 @@ addr_to_drm_mm_node(struct panfrost_device *pfdev, int as, u64 addr)
|
|||
|
||||
#define NUM_FAULT_PAGES (SZ_2M / PAGE_SIZE)
|
||||
|
||||
int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, u64 addr)
|
||||
static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
|
||||
u64 addr)
|
||||
{
|
||||
int ret, i;
|
||||
struct panfrost_gem_object *bo;
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
#include "panfrost_issues.h"
|
||||
#include "panfrost_job.h"
|
||||
#include "panfrost_mmu.h"
|
||||
#include "panfrost_perfcnt.h"
|
||||
#include "panfrost_regs.h"
|
||||
|
||||
#define COUNTERS_PER_BLOCK 64
|
||||
|
|
|
@ -379,11 +379,25 @@ radeon_pci_remove(struct pci_dev *pdev)
|
|||
static void
|
||||
radeon_pci_shutdown(struct pci_dev *pdev)
|
||||
{
|
||||
#ifdef CONFIG_PPC64
|
||||
struct drm_device *ddev = pci_get_drvdata(pdev);
|
||||
#endif
|
||||
|
||||
/* if we are running in a VM, make sure the device
|
||||
* torn down properly on reboot/shutdown
|
||||
*/
|
||||
if (radeon_device_is_virtual())
|
||||
radeon_pci_remove(pdev);
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
/* Some adapters need to be suspended before a
|
||||
* shutdown occurs in order to prevent an error
|
||||
* during kexec.
|
||||
* Make this power specific becauase it breaks
|
||||
* some non-power boards.
|
||||
*/
|
||||
radeon_suspend_kms(ddev, true, true, false);
|
||||
#endif
|
||||
}
|
||||
|
||||
static int radeon_pmops_suspend(struct device *dev)
|
||||
|
|
|
@ -479,6 +479,7 @@ void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
|
|||
struct drm_sched_job *s_job, *tmp;
|
||||
uint64_t guilty_context;
|
||||
bool found_guilty = false;
|
||||
struct dma_fence *fence;
|
||||
|
||||
list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
|
||||
struct drm_sched_fence *s_fence = s_job->s_fence;
|
||||
|
@ -492,7 +493,16 @@ void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
|
|||
dma_fence_set_error(&s_fence->finished, -ECANCELED);
|
||||
|
||||
dma_fence_put(s_job->s_fence->parent);
|
||||
s_job->s_fence->parent = sched->ops->run_job(s_job);
|
||||
fence = sched->ops->run_job(s_job);
|
||||
|
||||
if (IS_ERR_OR_NULL(fence)) {
|
||||
s_job->s_fence->parent = NULL;
|
||||
dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
|
||||
} else {
|
||||
s_job->s_fence->parent = fence;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(drm_sched_resubmit_jobs);
|
||||
|
@ -720,7 +730,7 @@ static int drm_sched_main(void *param)
|
|||
fence = sched->ops->run_job(sched_job);
|
||||
drm_sched_fence_scheduled(s_fence);
|
||||
|
||||
if (fence) {
|
||||
if (!IS_ERR_OR_NULL(fence)) {
|
||||
s_fence->parent = dma_fence_get(fence);
|
||||
r = dma_fence_add_callback(fence, &sched_job->cb,
|
||||
drm_sched_process_job);
|
||||
|
@ -730,8 +740,11 @@ static int drm_sched_main(void *param)
|
|||
DRM_ERROR("fence add callback failed (%d)\n",
|
||||
r);
|
||||
dma_fence_put(fence);
|
||||
} else
|
||||
} else {
|
||||
|
||||
dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
|
||||
drm_sched_process_job(NULL, &sched_job->cb);
|
||||
}
|
||||
|
||||
wake_up(&sched->job_scheduled);
|
||||
}
|
||||
|
|
|
@ -557,13 +557,16 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
|
|||
|
||||
if (args->bcl_start != args->bcl_end) {
|
||||
bin = kcalloc(1, sizeof(*bin), GFP_KERNEL);
|
||||
if (!bin)
|
||||
if (!bin) {
|
||||
v3d_job_put(&render->base);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ret = v3d_job_init(v3d, file_priv, &bin->base,
|
||||
v3d_job_free, args->in_sync_bcl);
|
||||
if (ret) {
|
||||
v3d_job_put(&render->base);
|
||||
kfree(bin);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue
Block a user