forked from luck/tmp_suning_uos_patched
Merge branch 'drm-fixes-5.0' of git://people.freedesktop.org/~agd5f/linux into drm-fixes
Fixes for 5.0: - Fix KFD on ARM64 - Fix KFD topology with mixed APU and dGPU systems - Powerplay fix for vega12 - DC Raven fixes - Freesync fix Signed-off-by: Dave Airlie <airlied@redhat.com> From: Alex Deucher <alexdeucher@gmail.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190116224524.3314-1-alexander.deucher@amd.com
This commit is contained in:
commit
586cdb0db7
|
@ -531,17 +531,6 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev,
|
|||
struct drm_gem_object *obj;
|
||||
struct amdgpu_framebuffer *amdgpu_fb;
|
||||
int ret;
|
||||
int height;
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
int cpp = drm_format_plane_cpp(mode_cmd->pixel_format, 0);
|
||||
int pitch = mode_cmd->pitches[0] / cpp;
|
||||
|
||||
pitch = amdgpu_align_pitch(adev, pitch, cpp, false);
|
||||
if (mode_cmd->pitches[0] != pitch) {
|
||||
DRM_DEBUG_KMS("Invalid pitch: expecting %d but got %d\n",
|
||||
pitch, mode_cmd->pitches[0]);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
|
||||
if (obj == NULL) {
|
||||
|
@ -556,13 +545,6 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev,
|
|||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
height = ALIGN(mode_cmd->height, 8);
|
||||
if (obj->size < pitch * height) {
|
||||
DRM_DEBUG_KMS("Invalid GEM size: expecting >= %d but got %zu\n",
|
||||
pitch * height, obj->size);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
amdgpu_fb = kzalloc(sizeof(*amdgpu_fb), GFP_KERNEL);
|
||||
if (amdgpu_fb == NULL) {
|
||||
drm_gem_object_put_unlocked(obj);
|
||||
|
|
|
@ -4,8 +4,8 @@
|
|||
|
||||
config HSA_AMD
|
||||
bool "HSA kernel driver for AMD GPU devices"
|
||||
depends on DRM_AMDGPU && X86_64
|
||||
imply AMD_IOMMU_V2
|
||||
depends on DRM_AMDGPU && (X86_64 || ARM64)
|
||||
imply AMD_IOMMU_V2 if X86_64
|
||||
select MMU_NOTIFIER
|
||||
help
|
||||
Enable this if you want to use HSA features on AMD GPU devices.
|
||||
|
|
|
@ -863,6 +863,7 @@ static int kfd_fill_mem_info_for_cpu(int numa_node_id, int *avail_size,
|
|||
return 0;
|
||||
}
|
||||
|
||||
#if CONFIG_X86_64
|
||||
static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size,
|
||||
uint32_t *num_entries,
|
||||
struct crat_subtype_iolink *sub_type_hdr)
|
||||
|
@ -905,6 +906,7 @@ static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size,
|
|||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* kfd_create_vcrat_image_cpu - Create Virtual CRAT for CPU
|
||||
*
|
||||
|
@ -920,7 +922,9 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size)
|
|||
struct crat_subtype_generic *sub_type_hdr;
|
||||
int avail_size = *size;
|
||||
int numa_node_id;
|
||||
#ifdef CONFIG_X86_64
|
||||
uint32_t entries = 0;
|
||||
#endif
|
||||
int ret = 0;
|
||||
|
||||
if (!pcrat_image || avail_size < VCRAT_SIZE_FOR_CPU)
|
||||
|
@ -982,6 +986,7 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size)
|
|||
sub_type_hdr->length);
|
||||
|
||||
/* Fill in Subtype: IO Link */
|
||||
#ifdef CONFIG_X86_64
|
||||
ret = kfd_fill_iolink_info_for_cpu(numa_node_id, &avail_size,
|
||||
&entries,
|
||||
(struct crat_subtype_iolink *)sub_type_hdr);
|
||||
|
@ -992,6 +997,9 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size)
|
|||
|
||||
sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
|
||||
sub_type_hdr->length * entries);
|
||||
#else
|
||||
pr_info("IO link not available for non x86 platforms\n");
|
||||
#endif
|
||||
|
||||
crat_table->num_domains++;
|
||||
}
|
||||
|
|
|
@ -1093,8 +1093,6 @@ static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu)
|
|||
* the GPU device is not already present in the topology device
|
||||
* list then return NULL. This means a new topology device has to
|
||||
* be created for this GPU.
|
||||
* TODO: Rather than assiging @gpu to first topology device withtout
|
||||
* gpu attached, it will better to have more stringent check.
|
||||
*/
|
||||
static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
|
||||
{
|
||||
|
@ -1102,12 +1100,20 @@ static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
|
|||
struct kfd_topology_device *out_dev = NULL;
|
||||
|
||||
down_write(&topology_lock);
|
||||
list_for_each_entry(dev, &topology_device_list, list)
|
||||
list_for_each_entry(dev, &topology_device_list, list) {
|
||||
/* Discrete GPUs need their own topology device list
|
||||
* entries. Don't assign them to CPU/APU nodes.
|
||||
*/
|
||||
if (!gpu->device_info->needs_iommu_device &&
|
||||
dev->node_props.cpu_cores_count)
|
||||
continue;
|
||||
|
||||
if (!dev->gpu && (dev->node_props.simd_count > 0)) {
|
||||
dev->gpu = gpu;
|
||||
out_dev = dev;
|
||||
break;
|
||||
}
|
||||
}
|
||||
up_write(&topology_lock);
|
||||
return out_dev;
|
||||
}
|
||||
|
@ -1392,7 +1398,6 @@ int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev)
|
|||
|
||||
static int kfd_cpumask_to_apic_id(const struct cpumask *cpumask)
|
||||
{
|
||||
const struct cpuinfo_x86 *cpuinfo;
|
||||
int first_cpu_of_numa_node;
|
||||
|
||||
if (!cpumask || cpumask == cpu_none_mask)
|
||||
|
@ -1400,9 +1405,11 @@ static int kfd_cpumask_to_apic_id(const struct cpumask *cpumask)
|
|||
first_cpu_of_numa_node = cpumask_first(cpumask);
|
||||
if (first_cpu_of_numa_node >= nr_cpu_ids)
|
||||
return -1;
|
||||
cpuinfo = &cpu_data(first_cpu_of_numa_node);
|
||||
|
||||
return cpuinfo->apicid;
|
||||
#ifdef CONFIG_X86_64
|
||||
return cpu_data(first_cpu_of_numa_node).apicid;
|
||||
#else
|
||||
return first_cpu_of_numa_node;
|
||||
#endif
|
||||
}
|
||||
|
||||
/* kfd_numa_node_to_apic_id - Returns the APIC ID of the first logical processor
|
||||
|
|
|
@ -1772,7 +1772,7 @@ static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
|
|||
+ caps.min_input_signal * 0x101;
|
||||
|
||||
if (dc_link_set_backlight_level(dm->backlight_link,
|
||||
brightness, 0, 0))
|
||||
brightness, 0))
|
||||
return 0;
|
||||
else
|
||||
return 1;
|
||||
|
@ -5933,7 +5933,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
|
|||
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
|
||||
if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
|
||||
!new_crtc_state->color_mgmt_changed &&
|
||||
!new_crtc_state->vrr_enabled)
|
||||
old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
|
||||
continue;
|
||||
|
||||
if (!new_crtc_state->enable)
|
||||
|
|
|
@ -2190,8 +2190,7 @@ int dc_link_get_backlight_level(const struct dc_link *link)
|
|||
|
||||
bool dc_link_set_backlight_level(const struct dc_link *link,
|
||||
uint32_t backlight_pwm_u16_16,
|
||||
uint32_t frame_ramp,
|
||||
const struct dc_stream_state *stream)
|
||||
uint32_t frame_ramp)
|
||||
{
|
||||
struct dc *core_dc = link->ctx->dc;
|
||||
struct abm *abm = core_dc->res_pool->abm;
|
||||
|
@ -2206,10 +2205,6 @@ bool dc_link_set_backlight_level(const struct dc_link *link,
|
|||
(abm->funcs->set_backlight_level_pwm == NULL))
|
||||
return false;
|
||||
|
||||
if (stream)
|
||||
((struct dc_stream_state *)stream)->bl_pwm_level =
|
||||
backlight_pwm_u16_16;
|
||||
|
||||
use_smooth_brightness = dmcu->funcs->is_dmcu_initialized(dmcu);
|
||||
|
||||
DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n",
|
||||
|
@ -2637,11 +2632,6 @@ void core_link_enable_stream(
|
|||
|
||||
if (dc_is_dp_signal(pipe_ctx->stream->signal))
|
||||
enable_stream_features(pipe_ctx);
|
||||
|
||||
dc_link_set_backlight_level(pipe_ctx->stream->sink->link,
|
||||
pipe_ctx->stream->bl_pwm_level,
|
||||
0,
|
||||
pipe_ctx->stream);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -146,8 +146,7 @@ static inline struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_
|
|||
*/
|
||||
bool dc_link_set_backlight_level(const struct dc_link *dc_link,
|
||||
uint32_t backlight_pwm_u16_16,
|
||||
uint32_t frame_ramp,
|
||||
const struct dc_stream_state *stream);
|
||||
uint32_t frame_ramp);
|
||||
|
||||
int dc_link_get_backlight_level(const struct dc_link *dc_link);
|
||||
|
||||
|
|
|
@ -91,7 +91,6 @@ struct dc_stream_state {
|
|||
|
||||
/* DMCU info */
|
||||
unsigned int abm_level;
|
||||
unsigned int bl_pwm_level;
|
||||
|
||||
/* from core_stream struct */
|
||||
struct dc_context *ctx;
|
||||
|
|
|
@ -1000,7 +1000,7 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
|
|||
|
||||
pipe_ctx->stream_res.audio->funcs->az_enable(pipe_ctx->stream_res.audio);
|
||||
|
||||
if (num_audio == 1 && pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL)
|
||||
if (num_audio >= 1 && pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL)
|
||||
/*this is the first audio. apply the PME w/a in order to wake AZ from D3*/
|
||||
pp_smu->set_pme_wa_enable(&pp_smu->pp_smu);
|
||||
/* un-mute audio */
|
||||
|
@ -1017,6 +1017,8 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option)
|
|||
pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
|
||||
pipe_ctx->stream_res.stream_enc, true);
|
||||
if (pipe_ctx->stream_res.audio) {
|
||||
struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
|
||||
|
||||
if (option != KEEP_ACQUIRED_RESOURCE ||
|
||||
!dc->debug.az_endpoint_mute_only) {
|
||||
/*only disalbe az_endpoint if power down or free*/
|
||||
|
@ -1036,6 +1038,9 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option)
|
|||
update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, pipe_ctx->stream_res.audio, false);
|
||||
pipe_ctx->stream_res.audio = NULL;
|
||||
}
|
||||
if (pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL)
|
||||
/*this is the first audio. apply the PME w/a in order to wake AZ from D3*/
|
||||
pp_smu->set_pme_wa_enable(&pp_smu->pp_smu);
|
||||
|
||||
/* TODO: notify audio driver for if audio modes list changed
|
||||
* add audio mode list change flag */
|
||||
|
|
|
@ -463,7 +463,7 @@ void dpp1_set_cursor_position(
|
|||
if (src_y_offset >= (int)param->viewport.height)
|
||||
cur_en = 0; /* not visible beyond bottom edge*/
|
||||
|
||||
if (src_y_offset < 0)
|
||||
if (src_y_offset + (int)height <= 0)
|
||||
cur_en = 0; /* not visible beyond top edge*/
|
||||
|
||||
REG_UPDATE(CURSOR0_CONTROL,
|
||||
|
|
|
@ -1140,7 +1140,7 @@ void hubp1_cursor_set_position(
|
|||
if (src_y_offset >= (int)param->viewport.height)
|
||||
cur_en = 0; /* not visible beyond bottom edge*/
|
||||
|
||||
if (src_y_offset < 0) //+ (int)hubp->curs_attr.height
|
||||
if (src_y_offset + (int)hubp->curs_attr.height <= 0)
|
||||
cur_en = 0; /* not visible beyond top edge*/
|
||||
|
||||
if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0)
|
||||
|
|
|
@ -2355,30 +2355,23 @@ static void dcn10_apply_ctx_for_surface(
|
|||
top_pipe_to_program->plane_state->update_flags.bits.full_update)
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
|
||||
|
||||
tg = pipe_ctx->stream_res.tg;
|
||||
/* Skip inactive pipes and ones already updated */
|
||||
if (!pipe_ctx->stream || pipe_ctx->stream == stream
|
||||
|| !pipe_ctx->plane_state)
|
||||
|| !pipe_ctx->plane_state
|
||||
|| !tg->funcs->is_tg_enabled(tg))
|
||||
continue;
|
||||
|
||||
pipe_ctx->stream_res.tg->funcs->lock(pipe_ctx->stream_res.tg);
|
||||
tg->funcs->lock(tg);
|
||||
|
||||
pipe_ctx->plane_res.hubp->funcs->hubp_setup_interdependent(
|
||||
pipe_ctx->plane_res.hubp,
|
||||
&pipe_ctx->dlg_regs,
|
||||
&pipe_ctx->ttu_regs);
|
||||
|
||||
tg->funcs->unlock(tg);
|
||||
}
|
||||
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
|
||||
|
||||
if (!pipe_ctx->stream || pipe_ctx->stream == stream
|
||||
|| !pipe_ctx->plane_state)
|
||||
continue;
|
||||
|
||||
dcn10_pipe_control_lock(dc, pipe_ctx, false);
|
||||
}
|
||||
|
||||
if (num_planes == 0)
|
||||
false_optc_underflow_wa(dc, stream, tg);
|
||||
|
||||
|
|
|
@ -57,6 +57,7 @@ static const unsigned char abm_config[abm_defines_max_config][abm_defines_max_le
|
|||
#define NUM_POWER_FN_SEGS 8
|
||||
#define NUM_BL_CURVE_SEGS 16
|
||||
|
||||
#pragma pack(push, 1)
|
||||
/* NOTE: iRAM is 256B in size */
|
||||
struct iram_table_v_2 {
|
||||
/* flags */
|
||||
|
@ -100,6 +101,7 @@ struct iram_table_v_2 {
|
|||
uint8_t dummy8; /* 0xfe */
|
||||
uint8_t dummy9; /* 0xff */
|
||||
};
|
||||
#pragma pack(pop)
|
||||
|
||||
static uint16_t backlight_8_to_16(unsigned int backlight_8bit)
|
||||
{
|
||||
|
|
|
@ -753,6 +753,22 @@ static int vega12_init_smc_table(struct pp_hwmgr *hwmgr)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int vega12_run_acg_btc(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
uint32_t result;
|
||||
|
||||
PP_ASSERT_WITH_CODE(
|
||||
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc) == 0,
|
||||
"[Run_ACG_BTC] Attempt to run ACG BTC failed!",
|
||||
return -EINVAL);
|
||||
|
||||
result = smum_get_argument(hwmgr);
|
||||
PP_ASSERT_WITH_CODE(result == 1,
|
||||
"Failed to run ACG BTC!", return -EINVAL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vega12_set_allowed_featuresmask(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
struct vega12_hwmgr *data =
|
||||
|
@ -931,6 +947,11 @@ static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
|
|||
"Failed to initialize SMC table!",
|
||||
result = tmp_result);
|
||||
|
||||
tmp_result = vega12_run_acg_btc(hwmgr);
|
||||
PP_ASSERT_WITH_CODE(!tmp_result,
|
||||
"Failed to run ACG BTC!",
|
||||
result = tmp_result);
|
||||
|
||||
result = vega12_enable_all_smu_features(hwmgr);
|
||||
PP_ASSERT_WITH_CODE(!result,
|
||||
"Failed to enable all smu features!",
|
||||
|
|
Loading…
Reference in New Issue
Block a user