forked from luck/tmp_suning_uos_patched
one vc4, some i915, one radeon, and, one etnvaiv
-----BEGIN PGP SIGNATURE----- iQIcBAABAgAGBQJY3gYlAAoJEAx081l5xIa+aCIP/0IaulWIRbmE4qj6A5RA9X2z sMx0L8Ok25s+Q2bzoH4KOZqt9DJW01EllLAi1jUtZpLQ6rI4JX6PiNKs160HadkU vH0/5m4w+PxFu/g4QapUacsUxUqrrZLkR5dmQGILTXl06Vj6mcHPeou4VpEydH/1 BfQcIC2JF9lG8Xmo02C9OnCbslXIZE+aLnjIWCQJ0Ms6yHFQc4fjAwCsIMygjyxS KLOgK3UOcQ38cOXnunq6sCx+zrcEMO/1pNGsi/qVFzzoEZ5ZV4Awdb6NFspmfPGJ n2JjnKZWttVYRhZnnNiUcU0a9vPCx23NZD1gDo8IK0PtOZLJTzxNlLjjdv2LEfto FsXrfi5GQe3kdCp3g/8jiDjH4PpmmZ+n7AdKUfLM+iO0xj2K/Xd6z/PkDcqxXqam AuQF3C4SoCtbQ9LE4zb0Ja55uPE6NiX8hIM4IJYwO2ZgALuSgOTyGY+3npewcKJE wFgaEamd7TG2id+jcbzVZXWsFgZCD5Y9kJfnUhFDH4SOxCs6dPgPw3BMBfL1H/2b 2uzrkDxsOqNl1JHUjDoW60T3yl2YwMxId1t8qNmNESvCrw9P2SwB3aG2Ir2IFEc4 pK9WBKj1pOXH/lSRMAQCQh8Lnb1UP311kFMz8scjhVxPOL+IlyfXWukNMj5ULi78 OeYCnnBc0+wdZ9jUX26E =Hq4M -----END PGP SIGNATURE----- Merge tag 'drm-fixes-for-v4.11-rc5' of git://people.freedesktop.org/~airlied/linux Pull drm fixes from Dave Airlie: "Seems to be quietening down, which means someone will make a liar of me for rc6. Just one vc4, one etnvaiv, one radeon, and a few i915 GVT fixes, and one i915 normal fixes" * tag 'drm-fixes-for-v4.11-rc5' of git://people.freedesktop.org/~airlied/linux: drm/vc4: Allocate the right amount of space for boot-time CRTC state. drm/etnaviv: (re-)protect fence allocation with GPU mutex drm/radeon: Override fpfn for all VRAM placements in radeon_evict_flags drm/i915: Restore marking context objects as dirty on pinning drm/i915/gvt: Use force single submit flag to distinguish gvt request from i915 request drm/i915/gvt: set shadow entry to scratch page while p2m failed drm/i915/gvt: Fix guest fail to read EDID leading to black guest console issue. drm/i915/gvt: fix wrong offset when loading RCS mocs drm/i915/gvt: add write handler for mmio mbctl drm/i915/kvmgt: Hold struct kvm reference
This commit is contained in:
commit
5559394d18
@ -1311,6 +1311,8 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
|
||||
goto out_pm_put;
|
||||
}
|
||||
|
||||
mutex_lock(&gpu->lock);
|
||||
|
||||
fence = etnaviv_gpu_fence_alloc(gpu);
|
||||
if (!fence) {
|
||||
event_free(gpu, event);
|
||||
@ -1318,8 +1320,6 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
|
||||
goto out_pm_put;
|
||||
}
|
||||
|
||||
mutex_lock(&gpu->lock);
|
||||
|
||||
gpu->event[event].fence = fence;
|
||||
submit->fence = fence->seqno;
|
||||
gpu->active_fence = submit->fence;
|
||||
|
@ -495,7 +495,8 @@ void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu,
|
||||
unsigned char val = edid_get_byte(vgpu);
|
||||
|
||||
aux_data_for_write = (val << 16);
|
||||
}
|
||||
} else
|
||||
aux_data_for_write = (0xff << 16);
|
||||
}
|
||||
/* write the return value in AUX_CH_DATA reg which includes:
|
||||
* ACK of I2C_WRITE
|
||||
|
@ -1837,11 +1837,15 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
|
||||
ret = gtt_entry_p2m(vgpu, &e, &m);
|
||||
if (ret) {
|
||||
gvt_vgpu_err("fail to translate guest gtt entry\n");
|
||||
return ret;
|
||||
/* guest driver may read/write the entry when partial
|
||||
* update the entry in this situation p2m will fail
|
||||
* settting the shadow entry to point to a scratch page
|
||||
*/
|
||||
ops->set_pfn(&m, gvt->gtt.scratch_ggtt_mfn);
|
||||
}
|
||||
} else {
|
||||
m = e;
|
||||
m.val64 = 0;
|
||||
ops->set_pfn(&m, gvt->gtt.scratch_ggtt_mfn);
|
||||
}
|
||||
|
||||
ggtt_set_shadow_entry(ggtt_mm, &m, g_gtt_index);
|
||||
|
@ -970,6 +970,14 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mbctl_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
void *p_data, unsigned int bytes)
|
||||
{
|
||||
*(u32 *)p_data &= (~GEN6_MBCTL_ENABLE_BOOT_FETCH);
|
||||
write_vreg(vgpu, offset, p_data, bytes);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vga_control_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
void *p_data, unsigned int bytes)
|
||||
{
|
||||
@ -2238,7 +2246,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
|
||||
MMIO_D(0x7180, D_ALL);
|
||||
MMIO_D(0x7408, D_ALL);
|
||||
MMIO_D(0x7c00, D_ALL);
|
||||
MMIO_D(GEN6_MBCTL, D_ALL);
|
||||
MMIO_DH(GEN6_MBCTL, D_ALL, NULL, mbctl_write);
|
||||
MMIO_D(0x911c, D_ALL);
|
||||
MMIO_D(0x9120, D_ALL);
|
||||
MMIO_DFH(GEN7_UCGCTL4, D_ALL, F_CMD_ACCESS, NULL, NULL);
|
||||
|
@ -1326,6 +1326,7 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
|
||||
vgpu->handle = (unsigned long)info;
|
||||
info->vgpu = vgpu;
|
||||
info->kvm = kvm;
|
||||
kvm_get_kvm(info->kvm);
|
||||
|
||||
kvmgt_protect_table_init(info);
|
||||
gvt_cache_init(vgpu);
|
||||
@ -1347,6 +1348,7 @@ static bool kvmgt_guest_exit(struct kvmgt_guest_info *info)
|
||||
}
|
||||
|
||||
kvm_page_track_unregister_notifier(info->kvm, &info->track_node);
|
||||
kvm_put_kvm(info->kvm);
|
||||
kvmgt_protect_table_destroy(info);
|
||||
gvt_cache_destroy(info->vgpu);
|
||||
vfree(info);
|
||||
|
@ -207,7 +207,7 @@ static void load_mocs(struct intel_vgpu *vgpu, int ring_id)
|
||||
l3_offset.reg = 0xb020;
|
||||
for (i = 0; i < 32; i++) {
|
||||
gen9_render_mocs_L3[i] = I915_READ(l3_offset);
|
||||
I915_WRITE(l3_offset, vgpu_vreg(vgpu, offset));
|
||||
I915_WRITE(l3_offset, vgpu_vreg(vgpu, l3_offset));
|
||||
POSTING_READ(l3_offset);
|
||||
l3_offset.reg += 4;
|
||||
}
|
||||
|
@ -127,6 +127,11 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline bool is_gvt_request(struct drm_i915_gem_request *req)
|
||||
{
|
||||
return i915_gem_context_force_single_submission(req->ctx);
|
||||
}
|
||||
|
||||
static int shadow_context_status_change(struct notifier_block *nb,
|
||||
unsigned long action, void *data)
|
||||
{
|
||||
@ -137,7 +142,7 @@ static int shadow_context_status_change(struct notifier_block *nb,
|
||||
struct intel_vgpu_workload *workload =
|
||||
scheduler->current_workload[req->engine->id];
|
||||
|
||||
if (unlikely(!workload))
|
||||
if (!is_gvt_request(req) || unlikely(!workload))
|
||||
return NOTIFY_OK;
|
||||
|
||||
switch (action) {
|
||||
|
@ -2024,6 +2024,8 @@ static int intel_ring_context_pin(struct intel_engine_cs *engine,
|
||||
ret = context_pin(ctx, flags);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
ce->state->obj->mm.dirty = true;
|
||||
}
|
||||
|
||||
/* The kernel context is only used as a placeholder for flushing the
|
||||
|
@ -213,8 +213,8 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo,
|
||||
rbo->placement.num_busy_placement = 0;
|
||||
for (i = 0; i < rbo->placement.num_placement; i++) {
|
||||
if (rbo->placements[i].flags & TTM_PL_FLAG_VRAM) {
|
||||
if (rbo->placements[0].fpfn < fpfn)
|
||||
rbo->placements[0].fpfn = fpfn;
|
||||
if (rbo->placements[i].fpfn < fpfn)
|
||||
rbo->placements[i].fpfn = fpfn;
|
||||
} else {
|
||||
rbo->placement.busy_placement =
|
||||
&rbo->placements[i];
|
||||
|
@ -846,6 +846,17 @@ static void vc4_crtc_destroy_state(struct drm_crtc *crtc,
|
||||
drm_atomic_helper_crtc_destroy_state(crtc, state);
|
||||
}
|
||||
|
||||
static void
|
||||
vc4_crtc_reset(struct drm_crtc *crtc)
|
||||
{
|
||||
if (crtc->state)
|
||||
__drm_atomic_helper_crtc_destroy_state(crtc->state);
|
||||
|
||||
crtc->state = kzalloc(sizeof(struct vc4_crtc_state), GFP_KERNEL);
|
||||
if (crtc->state)
|
||||
crtc->state->crtc = crtc;
|
||||
}
|
||||
|
||||
static const struct drm_crtc_funcs vc4_crtc_funcs = {
|
||||
.set_config = drm_atomic_helper_set_config,
|
||||
.destroy = vc4_crtc_destroy,
|
||||
@ -853,7 +864,7 @@ static const struct drm_crtc_funcs vc4_crtc_funcs = {
|
||||
.set_property = NULL,
|
||||
.cursor_set = NULL, /* handled by drm_mode_cursor_universal */
|
||||
.cursor_move = NULL, /* handled by drm_mode_cursor_universal */
|
||||
.reset = drm_atomic_helper_crtc_reset,
|
||||
.reset = vc4_crtc_reset,
|
||||
.atomic_duplicate_state = vc4_crtc_duplicate_state,
|
||||
.atomic_destroy_state = vc4_crtc_destroy_state,
|
||||
.gamma_set = vc4_crtc_gamma_set,
|
||||
|
Loading…
Reference in New Issue
Block a user