forked from luck/tmp_suning_uos_patched
Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux
Pull drm fixes from Dave Airlie: "I really need to get back to sending these on my Friday, instead of my Monday morning, but nothing too amazing in here: a few amdkfd fixes, a few radeon fixes, i915 fixes, one tegra fix and one core fix" * 'drm-fixes' of git://people.freedesktop.org/~airlied/linux: drm: Zero out invalid vblank timestamp in drm_update_vblank_count. drm/tegra: Don't use vblank_disable_immediate on incapable driver. drm/radeon: stop trying to suspend UVD sessions drm/radeon: more strictly validate the UVD codec drm/radeon: make UVD handle checking more strict drm/radeon: make VCE handle check more strict drm/radeon: fix userptr lockup drm/radeon: fix userptr BO unpin bug v3 drm/amdkfd: Initialize sdma vm when creating sdma queue drm/amdkfd: Don't report local memory size drm/amdkfd: allow unregister process with queues drm/i915: Drop PIPE-A quirk for 945GSE HP Mini drm/i915: Sink rate read should be saved in deca-kHz drm/i915/dp: there is no audio on port A drm/i915: Add missing MacBook Pro models with dual channel LVDS drm/i915: Assume dual channel LVDS if pixel clock necessitates it drm/radeon: don't setup audio on asics that don't support it drm/radeon: disable semaphores for UVD V1 (v2)
This commit is contained in:
commit
01d07351f2
|
@ -430,9 +430,10 @@ static int unregister_process_nocpsch(struct device_queue_manager *dqm,
|
|||
|
||||
BUG_ON(!dqm || !qpd);
|
||||
|
||||
BUG_ON(!list_empty(&qpd->queues_list));
|
||||
pr_debug("In func %s\n", __func__);
|
||||
|
||||
pr_debug("kfd: In func %s\n", __func__);
|
||||
pr_debug("qpd->queues_list is %s\n",
|
||||
list_empty(&qpd->queues_list) ? "empty" : "not empty");
|
||||
|
||||
retval = 0;
|
||||
mutex_lock(&dqm->lock);
|
||||
|
@ -882,6 +883,8 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
init_sdma_vm(dqm, q, qpd);
|
||||
|
||||
retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
|
||||
&q->gart_mqd_addr, &q->properties);
|
||||
if (retval != 0)
|
||||
|
|
|
@ -728,9 +728,9 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
|
|||
sysfs_show_32bit_prop(buffer, "max_engine_clk_fcompute",
|
||||
dev->gpu->kfd2kgd->get_max_engine_clock_in_mhz(
|
||||
dev->gpu->kgd));
|
||||
|
||||
sysfs_show_64bit_prop(buffer, "local_mem_size",
|
||||
dev->gpu->kfd2kgd->get_vmem_size(
|
||||
dev->gpu->kgd));
|
||||
(unsigned long long int) 0);
|
||||
|
||||
sysfs_show_32bit_prop(buffer, "fw_version",
|
||||
dev->gpu->kfd2kgd->get_fw_version(
|
||||
|
|
|
@ -131,12 +131,11 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
|
|||
|
||||
/* Reinitialize corresponding vblank timestamp if high-precision query
|
||||
* available. Skip this step if query unsupported or failed. Will
|
||||
* reinitialize delayed at next vblank interrupt in that case.
|
||||
* reinitialize delayed at next vblank interrupt in that case and
|
||||
* assign 0 for now, to mark the vblanktimestamp as invalid.
|
||||
*/
|
||||
if (rc) {
|
||||
tslot = atomic_read(&vblank->count) + diff;
|
||||
vblanktimestamp(dev, crtc, tslot) = t_vblank;
|
||||
}
|
||||
tslot = atomic_read(&vblank->count) + diff;
|
||||
vblanktimestamp(dev, crtc, tslot) = rc ? t_vblank : (struct timeval) {0, 0};
|
||||
|
||||
smp_mb__before_atomic();
|
||||
atomic_add(diff, &vblank->count);
|
||||
|
|
|
@ -13635,9 +13635,6 @@ static const struct intel_dmi_quirk intel_dmi_quirks[] = {
|
|||
};
|
||||
|
||||
static struct intel_quirk intel_quirks[] = {
|
||||
/* HP Mini needs pipe A force quirk (LP: #322104) */
|
||||
{ 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
|
||||
|
||||
/* Toshiba Protege R-205, S-209 needs pipe A force quirk */
|
||||
{ 0x2592, 0x1179, 0x0001, quirk_pipea_force },
|
||||
|
||||
|
|
|
@ -1348,7 +1348,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
|
|||
|
||||
pipe_config->has_dp_encoder = true;
|
||||
pipe_config->has_drrs = false;
|
||||
pipe_config->has_audio = intel_dp->has_audio;
|
||||
pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
|
||||
|
||||
if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
|
||||
intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
|
||||
|
@ -2211,8 +2211,8 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
|
|||
int dotclock;
|
||||
|
||||
tmp = I915_READ(intel_dp->output_reg);
|
||||
if (tmp & DP_AUDIO_OUTPUT_ENABLE)
|
||||
pipe_config->has_audio = true;
|
||||
|
||||
pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
|
||||
|
||||
if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
|
||||
if (tmp & DP_SYNC_HS_HIGH)
|
||||
|
@ -3812,7 +3812,8 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
|
|||
if (val == 0)
|
||||
break;
|
||||
|
||||
intel_dp->sink_rates[i] = val * 200;
|
||||
/* Value read is in kHz while drm clock is saved in deca-kHz */
|
||||
intel_dp->sink_rates[i] = (val * 200) / 10;
|
||||
}
|
||||
intel_dp->num_sink_rates = i;
|
||||
}
|
||||
|
|
|
@ -813,12 +813,28 @@ static int intel_dual_link_lvds_callback(const struct dmi_system_id *id)
|
|||
static const struct dmi_system_id intel_dual_link_lvds[] = {
|
||||
{
|
||||
.callback = intel_dual_link_lvds_callback,
|
||||
.ident = "Apple MacBook Pro (Core i5/i7 Series)",
|
||||
.ident = "Apple MacBook Pro 15\" (2010)",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro6,2"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = intel_dual_link_lvds_callback,
|
||||
.ident = "Apple MacBook Pro 15\" (2011)",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro8,2"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = intel_dual_link_lvds_callback,
|
||||
.ident = "Apple MacBook Pro 15\" (2012)",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro9,1"),
|
||||
},
|
||||
},
|
||||
{ } /* terminating entry */
|
||||
};
|
||||
|
||||
|
@ -848,6 +864,11 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
|
|||
if (i915.lvds_channel_mode > 0)
|
||||
return i915.lvds_channel_mode == 2;
|
||||
|
||||
/* single channel LVDS is limited to 112 MHz */
|
||||
if (lvds_encoder->attached_connector->base.panel.fixed_mode->clock
|
||||
> 112999)
|
||||
return true;
|
||||
|
||||
if (dmi_check_system(intel_dual_link_lvds))
|
||||
return true;
|
||||
|
||||
|
@ -1111,6 +1132,8 @@ void intel_lvds_init(struct drm_device *dev)
|
|||
out:
|
||||
mutex_unlock(&dev->mode_config.mutex);
|
||||
|
||||
intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
|
||||
|
||||
lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder);
|
||||
DRM_DEBUG_KMS("detected %s-link lvds configuration\n",
|
||||
lvds_encoder->is_dual_link ? "dual" : "single");
|
||||
|
@ -1125,7 +1148,6 @@ void intel_lvds_init(struct drm_device *dev)
|
|||
}
|
||||
drm_connector_register(connector);
|
||||
|
||||
intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
|
||||
intel_panel_setup_backlight(connector, INVALID_PIPE);
|
||||
|
||||
return;
|
||||
|
|
|
@ -1673,7 +1673,6 @@ struct radeon_uvd {
|
|||
struct radeon_bo *vcpu_bo;
|
||||
void *cpu_addr;
|
||||
uint64_t gpu_addr;
|
||||
void *saved_bo;
|
||||
atomic_t handles[RADEON_MAX_UVD_HANDLES];
|
||||
struct drm_file *filp[RADEON_MAX_UVD_HANDLES];
|
||||
unsigned img_size[RADEON_MAX_UVD_HANDLES];
|
||||
|
|
|
@ -1202,7 +1202,7 @@ static struct radeon_asic rs780_asic = {
|
|||
static struct radeon_asic_ring rv770_uvd_ring = {
|
||||
.ib_execute = &uvd_v1_0_ib_execute,
|
||||
.emit_fence = &uvd_v2_2_fence_emit,
|
||||
.emit_semaphore = &uvd_v1_0_semaphore_emit,
|
||||
.emit_semaphore = &uvd_v2_2_semaphore_emit,
|
||||
.cs_parse = &radeon_uvd_cs_parse,
|
||||
.ring_test = &uvd_v1_0_ring_test,
|
||||
.ib_test = &uvd_v1_0_ib_test,
|
||||
|
|
|
@ -949,6 +949,10 @@ void uvd_v1_0_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
|
|||
int uvd_v2_2_resume(struct radeon_device *rdev);
|
||||
void uvd_v2_2_fence_emit(struct radeon_device *rdev,
|
||||
struct radeon_fence *fence);
|
||||
bool uvd_v2_2_semaphore_emit(struct radeon_device *rdev,
|
||||
struct radeon_ring *ring,
|
||||
struct radeon_semaphore *semaphore,
|
||||
bool emit_wait);
|
||||
|
||||
/* uvd v3.1 */
|
||||
bool uvd_v3_1_semaphore_emit(struct radeon_device *rdev,
|
||||
|
|
|
@ -464,6 +464,10 @@ void radeon_audio_detect(struct drm_connector *connector,
|
|||
return;
|
||||
|
||||
rdev = connector->encoder->dev->dev_private;
|
||||
|
||||
if (!radeon_audio_chipset_supported(rdev))
|
||||
return;
|
||||
|
||||
radeon_encoder = to_radeon_encoder(connector->encoder);
|
||||
dig = radeon_encoder->enc_priv;
|
||||
|
||||
|
|
|
@ -142,6 +142,9 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
|
|||
|
||||
list_for_each_entry(bo, &node->bos, mn_list) {
|
||||
|
||||
if (!bo->tbo.ttm || bo->tbo.ttm->state != tt_bound)
|
||||
continue;
|
||||
|
||||
r = radeon_bo_reserve(bo, true);
|
||||
if (r) {
|
||||
DRM_ERROR("(%ld) failed to reserve user bo\n", r);
|
||||
|
|
|
@ -591,8 +591,7 @@ static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
|
|||
{
|
||||
struct radeon_device *rdev = radeon_get_rdev(ttm->bdev);
|
||||
struct radeon_ttm_tt *gtt = (void *)ttm;
|
||||
struct scatterlist *sg;
|
||||
int i;
|
||||
struct sg_page_iter sg_iter;
|
||||
|
||||
int write = !(gtt->userflags & RADEON_GEM_USERPTR_READONLY);
|
||||
enum dma_data_direction direction = write ?
|
||||
|
@ -605,9 +604,8 @@ static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
|
|||
/* free the sg table and pages again */
|
||||
dma_unmap_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
|
||||
|
||||
for_each_sg(ttm->sg->sgl, sg, ttm->sg->nents, i) {
|
||||
struct page *page = sg_page(sg);
|
||||
|
||||
for_each_sg_page(ttm->sg->sgl, &sg_iter, ttm->sg->nents, 0) {
|
||||
struct page *page = sg_page_iter_page(&sg_iter);
|
||||
if (!(gtt->userflags & RADEON_GEM_USERPTR_READONLY))
|
||||
set_page_dirty(page);
|
||||
|
||||
|
|
|
@ -204,28 +204,32 @@ void radeon_uvd_fini(struct radeon_device *rdev)
|
|||
|
||||
int radeon_uvd_suspend(struct radeon_device *rdev)
|
||||
{
|
||||
unsigned size;
|
||||
void *ptr;
|
||||
int i;
|
||||
int i, r;
|
||||
|
||||
if (rdev->uvd.vcpu_bo == NULL)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i)
|
||||
if (atomic_read(&rdev->uvd.handles[i]))
|
||||
break;
|
||||
for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
|
||||
uint32_t handle = atomic_read(&rdev->uvd.handles[i]);
|
||||
if (handle != 0) {
|
||||
struct radeon_fence *fence;
|
||||
|
||||
if (i == RADEON_MAX_UVD_HANDLES)
|
||||
return 0;
|
||||
radeon_uvd_note_usage(rdev);
|
||||
|
||||
size = radeon_bo_size(rdev->uvd.vcpu_bo);
|
||||
size -= rdev->uvd_fw->size;
|
||||
r = radeon_uvd_get_destroy_msg(rdev,
|
||||
R600_RING_TYPE_UVD_INDEX, handle, &fence);
|
||||
if (r) {
|
||||
DRM_ERROR("Error destroying UVD (%d)!\n", r);
|
||||
continue;
|
||||
}
|
||||
|
||||
ptr = rdev->uvd.cpu_addr;
|
||||
ptr += rdev->uvd_fw->size;
|
||||
radeon_fence_wait(fence, false);
|
||||
radeon_fence_unref(&fence);
|
||||
|
||||
rdev->uvd.saved_bo = kmalloc(size, GFP_KERNEL);
|
||||
memcpy(rdev->uvd.saved_bo, ptr, size);
|
||||
rdev->uvd.filp[i] = NULL;
|
||||
atomic_set(&rdev->uvd.handles[i], 0);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -246,12 +250,7 @@ int radeon_uvd_resume(struct radeon_device *rdev)
|
|||
ptr = rdev->uvd.cpu_addr;
|
||||
ptr += rdev->uvd_fw->size;
|
||||
|
||||
if (rdev->uvd.saved_bo != NULL) {
|
||||
memcpy(ptr, rdev->uvd.saved_bo, size);
|
||||
kfree(rdev->uvd.saved_bo);
|
||||
rdev->uvd.saved_bo = NULL;
|
||||
} else
|
||||
memset(ptr, 0, size);
|
||||
memset(ptr, 0, size);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -396,6 +395,29 @@ static int radeon_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int radeon_uvd_validate_codec(struct radeon_cs_parser *p,
|
||||
unsigned stream_type)
|
||||
{
|
||||
switch (stream_type) {
|
||||
case 0: /* H264 */
|
||||
case 1: /* VC1 */
|
||||
/* always supported */
|
||||
return 0;
|
||||
|
||||
case 3: /* MPEG2 */
|
||||
case 4: /* MPEG4 */
|
||||
/* only since UVD 3 */
|
||||
if (p->rdev->family >= CHIP_PALM)
|
||||
return 0;
|
||||
|
||||
/* fall through */
|
||||
default:
|
||||
DRM_ERROR("UVD codec not supported by hardware %d!\n",
|
||||
stream_type);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
|
||||
unsigned offset, unsigned buf_sizes[])
|
||||
{
|
||||
|
@ -436,50 +458,70 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (msg_type == 1) {
|
||||
/* it's a decode msg, calc buffer sizes */
|
||||
r = radeon_uvd_cs_msg_decode(msg, buf_sizes);
|
||||
/* calc image size (width * height) */
|
||||
img_size = msg[6] * msg[7];
|
||||
switch (msg_type) {
|
||||
case 0:
|
||||
/* it's a create msg, calc image size (width * height) */
|
||||
img_size = msg[7] * msg[8];
|
||||
|
||||
r = radeon_uvd_validate_codec(p, msg[4]);
|
||||
radeon_bo_kunmap(bo);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
} else if (msg_type == 2) {
|
||||
/* try to alloc a new handle */
|
||||
for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
|
||||
if (atomic_read(&p->rdev->uvd.handles[i]) == handle) {
|
||||
DRM_ERROR("Handle 0x%x already in use!\n", handle);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!atomic_cmpxchg(&p->rdev->uvd.handles[i], 0, handle)) {
|
||||
p->rdev->uvd.filp[i] = p->filp;
|
||||
p->rdev->uvd.img_size[i] = img_size;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
DRM_ERROR("No more free UVD handles!\n");
|
||||
return -EINVAL;
|
||||
|
||||
case 1:
|
||||
/* it's a decode msg, validate codec and calc buffer sizes */
|
||||
r = radeon_uvd_validate_codec(p, msg[4]);
|
||||
if (!r)
|
||||
r = radeon_uvd_cs_msg_decode(msg, buf_sizes);
|
||||
radeon_bo_kunmap(bo);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/* validate the handle */
|
||||
for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
|
||||
if (atomic_read(&p->rdev->uvd.handles[i]) == handle) {
|
||||
if (p->rdev->uvd.filp[i] != p->filp) {
|
||||
DRM_ERROR("UVD handle collision detected!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
DRM_ERROR("Invalid UVD handle 0x%x!\n", handle);
|
||||
return -ENOENT;
|
||||
|
||||
case 2:
|
||||
/* it's a destroy msg, free the handle */
|
||||
for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i)
|
||||
atomic_cmpxchg(&p->rdev->uvd.handles[i], handle, 0);
|
||||
radeon_bo_kunmap(bo);
|
||||
return 0;
|
||||
} else {
|
||||
/* it's a create msg, calc image size (width * height) */
|
||||
img_size = msg[7] * msg[8];
|
||||
radeon_bo_kunmap(bo);
|
||||
|
||||
if (msg_type != 0) {
|
||||
DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
|
||||
return -EINVAL;
|
||||
}
|
||||
default:
|
||||
|
||||
/* it's a create msg, no special handling needed */
|
||||
DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* create or decode, validate the handle */
|
||||
for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
|
||||
if (atomic_read(&p->rdev->uvd.handles[i]) == handle)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* handle not found try to alloc a new one */
|
||||
for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
|
||||
if (!atomic_cmpxchg(&p->rdev->uvd.handles[i], 0, handle)) {
|
||||
p->rdev->uvd.filp[i] = p->filp;
|
||||
p->rdev->uvd.img_size[i] = img_size;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
DRM_ERROR("No more free UVD handles!\n");
|
||||
BUG();
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
|
|
@ -493,18 +493,27 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi,
|
|||
*
|
||||
* @p: parser context
|
||||
* @handle: handle to validate
|
||||
* @allocated: allocated a new handle?
|
||||
*
|
||||
* Validates the handle and return the found session index or -EINVAL
|
||||
* we we don't have another free session index.
|
||||
*/
|
||||
int radeon_vce_validate_handle(struct radeon_cs_parser *p, uint32_t handle)
|
||||
static int radeon_vce_validate_handle(struct radeon_cs_parser *p,
|
||||
uint32_t handle, bool *allocated)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
*allocated = false;
|
||||
|
||||
/* validate the handle */
|
||||
for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
|
||||
if (atomic_read(&p->rdev->vce.handles[i]) == handle)
|
||||
if (atomic_read(&p->rdev->vce.handles[i]) == handle) {
|
||||
if (p->rdev->vce.filp[i] != p->filp) {
|
||||
DRM_ERROR("VCE handle collision detected!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
return i;
|
||||
}
|
||||
}
|
||||
|
||||
/* handle not found try to alloc a new one */
|
||||
|
@ -512,6 +521,7 @@ int radeon_vce_validate_handle(struct radeon_cs_parser *p, uint32_t handle)
|
|||
if (!atomic_cmpxchg(&p->rdev->vce.handles[i], 0, handle)) {
|
||||
p->rdev->vce.filp[i] = p->filp;
|
||||
p->rdev->vce.img_size[i] = 0;
|
||||
*allocated = true;
|
||||
return i;
|
||||
}
|
||||
}
|
||||
|
@ -529,10 +539,10 @@ int radeon_vce_validate_handle(struct radeon_cs_parser *p, uint32_t handle)
|
|||
int radeon_vce_cs_parse(struct radeon_cs_parser *p)
|
||||
{
|
||||
int session_idx = -1;
|
||||
bool destroyed = false;
|
||||
bool destroyed = false, created = false, allocated = false;
|
||||
uint32_t tmp, handle = 0;
|
||||
uint32_t *size = &tmp;
|
||||
int i, r;
|
||||
int i, r = 0;
|
||||
|
||||
while (p->idx < p->chunk_ib->length_dw) {
|
||||
uint32_t len = radeon_get_ib_value(p, p->idx);
|
||||
|
@ -540,18 +550,21 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
|
|||
|
||||
if ((len < 8) || (len & 3)) {
|
||||
DRM_ERROR("invalid VCE command length (%d)!\n", len);
|
||||
return -EINVAL;
|
||||
r = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (destroyed) {
|
||||
DRM_ERROR("No other command allowed after destroy!\n");
|
||||
return -EINVAL;
|
||||
r = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
switch (cmd) {
|
||||
case 0x00000001: // session
|
||||
handle = radeon_get_ib_value(p, p->idx + 2);
|
||||
session_idx = radeon_vce_validate_handle(p, handle);
|
||||
session_idx = radeon_vce_validate_handle(p, handle,
|
||||
&allocated);
|
||||
if (session_idx < 0)
|
||||
return session_idx;
|
||||
size = &p->rdev->vce.img_size[session_idx];
|
||||
|
@ -561,6 +574,13 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
|
|||
break;
|
||||
|
||||
case 0x01000001: // create
|
||||
created = true;
|
||||
if (!allocated) {
|
||||
DRM_ERROR("Handle already in use!\n");
|
||||
r = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
*size = radeon_get_ib_value(p, p->idx + 8) *
|
||||
radeon_get_ib_value(p, p->idx + 10) *
|
||||
8 * 3 / 2;
|
||||
|
@ -578,12 +598,12 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
|
|||
r = radeon_vce_cs_reloc(p, p->idx + 10, p->idx + 9,
|
||||
*size);
|
||||
if (r)
|
||||
return r;
|
||||
goto out;
|
||||
|
||||
r = radeon_vce_cs_reloc(p, p->idx + 12, p->idx + 11,
|
||||
*size / 3);
|
||||
if (r)
|
||||
return r;
|
||||
goto out;
|
||||
break;
|
||||
|
||||
case 0x02000001: // destroy
|
||||
|
@ -594,7 +614,7 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
|
|||
r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
|
||||
*size * 2);
|
||||
if (r)
|
||||
return r;
|
||||
goto out;
|
||||
break;
|
||||
|
||||
case 0x05000004: // video bitstream buffer
|
||||
|
@ -602,36 +622,47 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
|
|||
r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
|
||||
tmp);
|
||||
if (r)
|
||||
return r;
|
||||
goto out;
|
||||
break;
|
||||
|
||||
case 0x05000005: // feedback buffer
|
||||
r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
|
||||
4096);
|
||||
if (r)
|
||||
return r;
|
||||
goto out;
|
||||
break;
|
||||
|
||||
default:
|
||||
DRM_ERROR("invalid VCE command (0x%x)!\n", cmd);
|
||||
return -EINVAL;
|
||||
r = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (session_idx == -1) {
|
||||
DRM_ERROR("no session command at start of IB\n");
|
||||
return -EINVAL;
|
||||
r = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
p->idx += len / 4;
|
||||
}
|
||||
|
||||
if (destroyed) {
|
||||
/* IB contains a destroy msg, free the handle */
|
||||
if (allocated && !created) {
|
||||
DRM_ERROR("New session without create command!\n");
|
||||
r = -ENOENT;
|
||||
}
|
||||
|
||||
out:
|
||||
if ((!r && destroyed) || (r && allocated)) {
|
||||
/*
|
||||
* IB contains a destroy msg or we have allocated an
|
||||
* handle and got an error, anyway free the handle
|
||||
*/
|
||||
for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i)
|
||||
atomic_cmpxchg(&p->rdev->vce.handles[i], handle, 0);
|
||||
}
|
||||
|
||||
return 0;
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -989,6 +989,9 @@
|
|||
((n) & 0x3FFF) << 16)
|
||||
|
||||
/* UVD */
|
||||
#define UVD_SEMA_ADDR_LOW 0xef00
|
||||
#define UVD_SEMA_ADDR_HIGH 0xef04
|
||||
#define UVD_SEMA_CMD 0xef08
|
||||
#define UVD_GPCOM_VCPU_CMD 0xef0c
|
||||
#define UVD_GPCOM_VCPU_DATA0 0xef10
|
||||
#define UVD_GPCOM_VCPU_DATA1 0xef14
|
||||
|
|
|
@ -466,18 +466,8 @@ bool uvd_v1_0_semaphore_emit(struct radeon_device *rdev,
|
|||
struct radeon_semaphore *semaphore,
|
||||
bool emit_wait)
|
||||
{
|
||||
uint64_t addr = semaphore->gpu_addr;
|
||||
|
||||
radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0));
|
||||
radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
|
||||
|
||||
radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0));
|
||||
radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
|
||||
|
||||
radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
|
||||
radeon_ring_write(ring, emit_wait ? 1 : 0);
|
||||
|
||||
return true;
|
||||
/* disable semaphores for UVD V1 hardware */
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -59,6 +59,35 @@ void uvd_v2_2_fence_emit(struct radeon_device *rdev,
|
|||
radeon_ring_write(ring, 2);
|
||||
}
|
||||
|
||||
/**
|
||||
* uvd_v2_2_semaphore_emit - emit semaphore command
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @ring: radeon_ring pointer
|
||||
* @semaphore: semaphore to emit commands for
|
||||
* @emit_wait: true if we should emit a wait command
|
||||
*
|
||||
* Emit a semaphore command (either wait or signal) to the UVD ring.
|
||||
*/
|
||||
bool uvd_v2_2_semaphore_emit(struct radeon_device *rdev,
|
||||
struct radeon_ring *ring,
|
||||
struct radeon_semaphore *semaphore,
|
||||
bool emit_wait)
|
||||
{
|
||||
uint64_t addr = semaphore->gpu_addr;
|
||||
|
||||
radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0));
|
||||
radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
|
||||
|
||||
radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0));
|
||||
radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
|
||||
|
||||
radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
|
||||
radeon_ring_write(ring, emit_wait ? 1 : 0);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* uvd_v2_2_resume - memory controller programming
|
||||
*
|
||||
|
|
|
@ -173,7 +173,6 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
|
|||
drm->irq_enabled = true;
|
||||
|
||||
/* syncpoints are used for full 32-bit hardware VBLANK counters */
|
||||
drm->vblank_disable_immediate = true;
|
||||
drm->max_vblank_count = 0xffffffff;
|
||||
|
||||
err = drm_vblank_init(drm, drm->mode_config.num_crtc);
|
||||
|
|
Loading…
Reference in New Issue
Block a user