drm fixes. msm, sun4i, amdgpu, i915 gvt
-----BEGIN PGP SIGNATURE----- iQIbBAABAgAGBQJcSjL+AAoJEAx081l5xIa+/XUP+PuG3hqaUaBrqzY62ws3Xqz9 AWhZR5m6EZwWyDWVCaURL54IXeOxX22ZknaVp++neOgzIeY7DUHd/h8JKvRepvew KfyTbGJKB8qpYl3A0mRvU4qlPDhWoT2S6aTym5RPYtzMbhUkRoP6rFNVvbg+Y1TF 0Ne9Hl/cVYN8KuAJegbRNQRn4UmbJ541Z49hpKPIlpJChwPyIFzFMSHEY68YybJa uChip1/zf4Ykh4nuFUuldGER1kplu5EQl0Lgl9M22SslHfx4d0p5bNblQHIuuRN1 uH0C/HkaWK6z6ZuWhWDkCvctgxLtLhBKUnQZaWn/rUi77ysFZTzhsDAhsKlm4aDA 3aHzyTMAzuWEQIkOTi40Ozd2U4pEjwg/lFpF8tTWx0q9wkFJBzk6NJgMdw5DhidY zrZlRSLrgIs26Q4DIbZoPmowwt7TkgWa0xkytZi/GVvESD7e3ql2ueHCv0ZG924X aKN0gpAfPaQNuwku+3FSOD8/qhvB/8nOWMuO5wC0CVg9fXfP3sKgsvX8fe4Ho1Uv J6idhzhWjZhCaoL3t/sioV1Af8mkgWjN4FeNKEGv5Vd9ax3oFTqT8itapNOHUiQ5 Yqe4cu3GnaBb/2QAqwEAK70Pza+7rguFSyX48MgBtj048j1chz5y8FGa+52ZenQw LvNtUOjk+HoyuTrsg3M= =+TrN -----END PGP SIGNATURE----- Merge tag 'drm-fixes-2019-01-25-1' of git://anongit.freedesktop.org/drm/drm Pull drm fixes from Dave Airlie: "Live from LCA pull, some fixes all over the place, i915: - GVT workload destruction fix msm: - A6XX opp-level fix - build fixes - hard-coded irq removal amdgpu: - overclocking fix - hybrid gfx fix sun4i: - fix TMDS clock usage" * tag 'drm-fixes-2019-01-25-1' of git://anongit.freedesktop.org/drm/drm: drm/msm: avoid unused function warning drm/msm: Add __printf verification drm/msm: Fix A6XX support for opp-level drm/msm: honor GPU_READONLY flag drm/msm: drop interrupt-names drm/msm/gpu: Remove hardcoded interrupt name drm/msm/gpu: fix building without debugfs drm/i915/execlists: Mark up priority boost on preemption drm/i915/gvt: release shadow batch buffer and wa_ctx before destroy one workload drm/sun4i: hdmi: Fix usage of TMDS clock drm/amd/powerplay: OD setting fix on Vega10 drm/amdgpu: Add APTX quirk for Lenovo laptop drm/msm: Unblock writer if reader closes file
This commit is contained in:
commit
d73aba1115
|
@ -27,7 +27,6 @@ Example:
|
|||
reg = <0x04300000 0x20000>;
|
||||
reg-names = "kgsl_3d0_reg_memory";
|
||||
interrupts = <GIC_SPI 80 0>;
|
||||
interrupt-names = "kgsl_3d0_irq";
|
||||
clock-names =
|
||||
"core",
|
||||
"iface",
|
||||
|
|
|
@ -576,6 +576,7 @@ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = {
|
|||
{ 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX },
|
||||
{ 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX },
|
||||
{ 0x1002, 0x6900, 0x1025, 0x125A, AMDGPU_PX_QUIRK_FORCE_ATPX },
|
||||
{ 0x1002, 0x6900, 0x17AA, 0x3806, AMDGPU_PX_QUIRK_FORCE_ATPX },
|
||||
{ 0, 0, 0, 0, 0 },
|
||||
};
|
||||
|
||||
|
|
|
@ -32,6 +32,7 @@
|
|||
#include "vega10_pptable.h"
|
||||
|
||||
#define NUM_DSPCLK_LEVELS 8
|
||||
#define VEGA10_ENGINECLOCK_HARDMAX 198000
|
||||
|
||||
static void set_hw_cap(struct pp_hwmgr *hwmgr, bool enable,
|
||||
enum phm_platform_caps cap)
|
||||
|
@ -258,7 +259,26 @@ static int init_over_drive_limits(
|
|||
struct pp_hwmgr *hwmgr,
|
||||
const ATOM_Vega10_POWERPLAYTABLE *powerplay_table)
|
||||
{
|
||||
hwmgr->platform_descriptor.overdriveLimit.engineClock =
|
||||
const ATOM_Vega10_GFXCLK_Dependency_Table *gfxclk_dep_table =
|
||||
(const ATOM_Vega10_GFXCLK_Dependency_Table *)
|
||||
(((unsigned long) powerplay_table) +
|
||||
le16_to_cpu(powerplay_table->usGfxclkDependencyTableOffset));
|
||||
bool is_acg_enabled = false;
|
||||
ATOM_Vega10_GFXCLK_Dependency_Record_V2 *patom_record_v2;
|
||||
|
||||
if (gfxclk_dep_table->ucRevId == 1) {
|
||||
patom_record_v2 =
|
||||
(ATOM_Vega10_GFXCLK_Dependency_Record_V2 *)gfxclk_dep_table->entries;
|
||||
is_acg_enabled =
|
||||
(bool)patom_record_v2[gfxclk_dep_table->ucNumEntries-1].ucACGEnable;
|
||||
}
|
||||
|
||||
if (powerplay_table->ulMaxODEngineClock > VEGA10_ENGINECLOCK_HARDMAX &&
|
||||
!is_acg_enabled)
|
||||
hwmgr->platform_descriptor.overdriveLimit.engineClock =
|
||||
VEGA10_ENGINECLOCK_HARDMAX;
|
||||
else
|
||||
hwmgr->platform_descriptor.overdriveLimit.engineClock =
|
||||
le32_to_cpu(powerplay_table->ulMaxODEngineClock);
|
||||
hwmgr->platform_descriptor.overdriveLimit.memoryClock =
|
||||
le32_to_cpu(powerplay_table->ulMaxODMemoryClock);
|
||||
|
|
|
@ -332,6 +332,9 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
|
|||
|
||||
i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj);
|
||||
i915_gem_object_put(wa_ctx->indirect_ctx.obj);
|
||||
|
||||
wa_ctx->indirect_ctx.obj = NULL;
|
||||
wa_ctx->indirect_ctx.shadow_va = NULL;
|
||||
}
|
||||
|
||||
static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
|
||||
|
@ -911,11 +914,6 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
|
|||
|
||||
list_del_init(&workload->list);
|
||||
|
||||
if (!workload->status) {
|
||||
release_shadow_batch_buffer(workload);
|
||||
release_shadow_wa_ctx(&workload->wa_ctx);
|
||||
}
|
||||
|
||||
if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) {
|
||||
/* if workload->status is not successful means HW GPU
|
||||
* has occurred GPU hang or something wrong with i915/GVT,
|
||||
|
@ -1283,6 +1281,9 @@ void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload)
|
|||
{
|
||||
struct intel_vgpu_submission *s = &workload->vgpu->submission;
|
||||
|
||||
release_shadow_batch_buffer(workload);
|
||||
release_shadow_wa_ctx(&workload->wa_ctx);
|
||||
|
||||
if (workload->shadow_mm)
|
||||
intel_vgpu_mm_put(workload->shadow_mm);
|
||||
|
||||
|
|
|
@ -303,6 +303,7 @@ static void __unwind_incomplete_requests(struct intel_engine_cs *engine)
|
|||
*/
|
||||
if (!(prio & I915_PRIORITY_NEWCLIENT)) {
|
||||
prio |= I915_PRIORITY_NEWCLIENT;
|
||||
active->sched.attr.priority = prio;
|
||||
list_move_tail(&active->sched.link,
|
||||
i915_sched_lookup_priolist(engine, prio));
|
||||
}
|
||||
|
@ -645,6 +646,9 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
|
|||
int i;
|
||||
|
||||
priolist_for_each_request_consume(rq, rn, p, i) {
|
||||
GEM_BUG_ON(last &&
|
||||
need_preempt(engine, last, rq_prio(rq)));
|
||||
|
||||
/*
|
||||
* Can we combine this request with the current port?
|
||||
* It has to be the same context/ringbuffer and not
|
||||
|
|
|
@ -944,7 +944,7 @@ static u32 a6xx_gmu_get_arc_level(struct device *dev, unsigned long freq)
|
|||
np = dev_pm_opp_get_of_node(opp);
|
||||
|
||||
if (np) {
|
||||
of_property_read_u32(np, "qcom,level", &val);
|
||||
of_property_read_u32(np, "opp-level", &val);
|
||||
of_node_put(np);
|
||||
}
|
||||
|
||||
|
|
|
@ -765,7 +765,6 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
|
|||
adreno_gpu->rev = config->rev;
|
||||
|
||||
adreno_gpu_config.ioname = "kgsl_3d0_reg_memory";
|
||||
adreno_gpu_config.irqname = "kgsl_3d0_irq";
|
||||
|
||||
adreno_gpu_config.va_start = SZ_16M;
|
||||
adreno_gpu_config.va_end = 0xffffffff;
|
||||
|
|
|
@ -365,19 +365,6 @@ static void _dpu_plane_set_qos_ctrl(struct drm_plane *plane,
|
|||
&pdpu->pipe_qos_cfg);
|
||||
}
|
||||
|
||||
static void dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable)
|
||||
{
|
||||
struct dpu_plane *pdpu = to_dpu_plane(plane);
|
||||
struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane);
|
||||
|
||||
if (!pdpu->is_rt_pipe)
|
||||
return;
|
||||
|
||||
pm_runtime_get_sync(&dpu_kms->pdev->dev);
|
||||
_dpu_plane_set_qos_ctrl(plane, enable, DPU_PLANE_QOS_PANIC_CTRL);
|
||||
pm_runtime_put_sync(&dpu_kms->pdev->dev);
|
||||
}
|
||||
|
||||
/**
|
||||
* _dpu_plane_set_ot_limit - set OT limit for the given plane
|
||||
* @plane: Pointer to drm plane
|
||||
|
@ -1248,6 +1235,19 @@ static void dpu_plane_reset(struct drm_plane *plane)
|
|||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
static void dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable)
|
||||
{
|
||||
struct dpu_plane *pdpu = to_dpu_plane(plane);
|
||||
struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane);
|
||||
|
||||
if (!pdpu->is_rt_pipe)
|
||||
return;
|
||||
|
||||
pm_runtime_get_sync(&dpu_kms->pdev->dev);
|
||||
_dpu_plane_set_qos_ctrl(plane, enable, DPU_PLANE_QOS_PANIC_CTRL);
|
||||
pm_runtime_put_sync(&dpu_kms->pdev->dev);
|
||||
}
|
||||
|
||||
static ssize_t _dpu_plane_danger_read(struct file *file,
|
||||
char __user *buff, size_t count, loff_t *ppos)
|
||||
{
|
||||
|
|
|
@ -250,7 +250,8 @@ void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
|
|||
void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
|
||||
struct msm_gem_vma *vma);
|
||||
int msm_gem_map_vma(struct msm_gem_address_space *aspace,
|
||||
struct msm_gem_vma *vma, struct sg_table *sgt, int npages);
|
||||
struct msm_gem_vma *vma, int prot,
|
||||
struct sg_table *sgt, int npages);
|
||||
void msm_gem_close_vma(struct msm_gem_address_space *aspace,
|
||||
struct msm_gem_vma *vma);
|
||||
|
||||
|
@ -333,6 +334,7 @@ void msm_gem_kernel_put(struct drm_gem_object *bo,
|
|||
struct drm_gem_object *msm_gem_import(struct drm_device *dev,
|
||||
struct dma_buf *dmabuf, struct sg_table *sgt);
|
||||
|
||||
__printf(2, 3)
|
||||
void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...);
|
||||
|
||||
int msm_framebuffer_prepare(struct drm_framebuffer *fb,
|
||||
|
@ -396,12 +398,14 @@ void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m);
|
|||
int msm_debugfs_late_init(struct drm_device *dev);
|
||||
int msm_rd_debugfs_init(struct drm_minor *minor);
|
||||
void msm_rd_debugfs_cleanup(struct msm_drm_private *priv);
|
||||
__printf(3, 4)
|
||||
void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
|
||||
const char *fmt, ...);
|
||||
int msm_perf_debugfs_init(struct drm_minor *minor);
|
||||
void msm_perf_debugfs_cleanup(struct msm_drm_private *priv);
|
||||
#else
|
||||
static inline int msm_debugfs_late_init(struct drm_device *dev) { return 0; }
|
||||
__printf(3, 4)
|
||||
static inline void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
|
||||
const char *fmt, ...) {}
|
||||
static inline void msm_rd_debugfs_cleanup(struct msm_drm_private *priv) {}
|
||||
|
|
|
@ -391,6 +391,10 @@ static int msm_gem_pin_iova(struct drm_gem_object *obj,
|
|||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||
struct msm_gem_vma *vma;
|
||||
struct page **pages;
|
||||
int prot = IOMMU_READ;
|
||||
|
||||
if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
|
||||
prot |= IOMMU_WRITE;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&msm_obj->lock));
|
||||
|
||||
|
@ -405,8 +409,8 @@ static int msm_gem_pin_iova(struct drm_gem_object *obj,
|
|||
if (IS_ERR(pages))
|
||||
return PTR_ERR(pages);
|
||||
|
||||
return msm_gem_map_vma(aspace, vma, msm_obj->sgt,
|
||||
obj->size >> PAGE_SHIFT);
|
||||
return msm_gem_map_vma(aspace, vma, prot,
|
||||
msm_obj->sgt, obj->size >> PAGE_SHIFT);
|
||||
}
|
||||
|
||||
/* get iova and pin it. Should have a matching put */
|
||||
|
|
|
@ -68,7 +68,8 @@ void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
|
|||
|
||||
int
|
||||
msm_gem_map_vma(struct msm_gem_address_space *aspace,
|
||||
struct msm_gem_vma *vma, struct sg_table *sgt, int npages)
|
||||
struct msm_gem_vma *vma, int prot,
|
||||
struct sg_table *sgt, int npages)
|
||||
{
|
||||
unsigned size = npages << PAGE_SHIFT;
|
||||
int ret = 0;
|
||||
|
@ -86,7 +87,7 @@ msm_gem_map_vma(struct msm_gem_address_space *aspace,
|
|||
|
||||
if (aspace->mmu)
|
||||
ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt,
|
||||
size, IOMMU_READ | IOMMU_WRITE);
|
||||
size, prot);
|
||||
|
||||
if (ret)
|
||||
vma->mapped = false;
|
||||
|
|
|
@ -900,7 +900,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
|
|||
}
|
||||
|
||||
/* Get Interrupt: */
|
||||
gpu->irq = platform_get_irq_byname(pdev, config->irqname);
|
||||
gpu->irq = platform_get_irq(pdev, 0);
|
||||
if (gpu->irq < 0) {
|
||||
ret = gpu->irq;
|
||||
DRM_DEV_ERROR(drm->dev, "failed to get irq: %d\n", ret);
|
||||
|
|
|
@ -31,7 +31,6 @@ struct msm_gpu_state;
|
|||
|
||||
struct msm_gpu_config {
|
||||
const char *ioname;
|
||||
const char *irqname;
|
||||
uint64_t va_start;
|
||||
uint64_t va_end;
|
||||
unsigned int nr_rings;
|
||||
|
@ -63,7 +62,7 @@ struct msm_gpu_funcs {
|
|||
struct msm_ringbuffer *(*active_ring)(struct msm_gpu *gpu);
|
||||
void (*recover)(struct msm_gpu *gpu);
|
||||
void (*destroy)(struct msm_gpu *gpu);
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
|
||||
/* show GPU status in debugfs: */
|
||||
void (*show)(struct msm_gpu *gpu, struct msm_gpu_state *state,
|
||||
struct drm_printer *p);
|
||||
|
|
|
@ -115,7 +115,9 @@ static void rd_write(struct msm_rd_state *rd, const void *buf, int sz)
|
|||
char *fptr = &fifo->buf[fifo->head];
|
||||
int n;
|
||||
|
||||
wait_event(rd->fifo_event, circ_space(&rd->fifo) > 0);
|
||||
wait_event(rd->fifo_event, circ_space(&rd->fifo) > 0 || !rd->open);
|
||||
if (!rd->open)
|
||||
return;
|
||||
|
||||
/* Note that smp_load_acquire() is not strictly required
|
||||
* as CIRC_SPACE_TO_END() does not access the tail more
|
||||
|
@ -213,7 +215,10 @@ static int rd_open(struct inode *inode, struct file *file)
|
|||
static int rd_release(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct msm_rd_state *rd = inode->i_private;
|
||||
|
||||
rd->open = false;
|
||||
wake_up_all(&rd->fifo_event);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -92,6 +92,8 @@ static void sun4i_hdmi_disable(struct drm_encoder *encoder)
|
|||
val = readl(hdmi->base + SUN4I_HDMI_VID_CTRL_REG);
|
||||
val &= ~SUN4I_HDMI_VID_CTRL_ENABLE;
|
||||
writel(val, hdmi->base + SUN4I_HDMI_VID_CTRL_REG);
|
||||
|
||||
clk_disable_unprepare(hdmi->tmds_clk);
|
||||
}
|
||||
|
||||
static void sun4i_hdmi_enable(struct drm_encoder *encoder)
|
||||
|
@ -102,6 +104,8 @@ static void sun4i_hdmi_enable(struct drm_encoder *encoder)
|
|||
|
||||
DRM_DEBUG_DRIVER("Enabling the HDMI Output\n");
|
||||
|
||||
clk_prepare_enable(hdmi->tmds_clk);
|
||||
|
||||
sun4i_hdmi_setup_avi_infoframes(hdmi, mode);
|
||||
val |= SUN4I_HDMI_PKT_CTRL_TYPE(0, SUN4I_HDMI_PKT_AVI);
|
||||
val |= SUN4I_HDMI_PKT_CTRL_TYPE(1, SUN4I_HDMI_PKT_END);
|
||||
|
|
Loading…
Reference in New Issue
Block a user