drm/radeon: take exclusive_lock in read mode during ring tests, v5

This is needed for the next commit, because the lockup detection
will need the read lock to run.

v4 (chk): split out forced fence completion, remove unrelated changes,
          add and handle in_reset flag
v5 (agd5f): rebase fix

Signed-off-by: Maarten Lankhorst <maarten.lankhorst@canonical.com>
Signed-off-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Maarten Lankhorst 2014-08-27 16:45:18 -04:00 committed by Alex Deucher
parent eb98c70990
commit 9bb39ff43e
4 changed files with 27 additions and 23 deletions

View File

@ -2326,7 +2326,7 @@ struct radeon_device {
bool need_dma32;
bool accel_working;
bool fastfb_working; /* IGP feature*/
bool needs_reset;
bool needs_reset, in_reset;
struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES];
const struct firmware *me_fw; /* all family ME firmware */
const struct firmware *pfp_fw; /* r6/700 PFP firmware */

View File

@ -653,6 +653,13 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
up_read(&rdev->exclusive_lock);
return -EBUSY;
}
if (rdev->in_reset) {
up_read(&rdev->exclusive_lock);
r = radeon_gpu_reset(rdev);
if (!r)
r = -EAGAIN;
return r;
}
/* initialize parser */
memset(&parser, 0, sizeof(struct radeon_cs_parser));
parser.filp = filp;

View File

@ -1673,6 +1673,7 @@ int radeon_gpu_reset(struct radeon_device *rdev)
return 0;
}
rdev->in_reset = true;
rdev->needs_reset = false;
radeon_save_bios_scratch_regs(rdev);
@ -1691,7 +1692,6 @@ int radeon_gpu_reset(struct radeon_device *rdev)
}
}
retry:
r = radeon_asic_reset(rdev);
if (!r) {
dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n");
@ -1700,25 +1700,11 @@ int radeon_gpu_reset(struct radeon_device *rdev)
radeon_restore_bios_scratch_regs(rdev);
if (!r) {
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
if (!r && ring_data[i]) {
radeon_ring_restore(rdev, &rdev->ring[i],
ring_sizes[i], ring_data[i]);
ring_sizes[i] = 0;
ring_data[i] = NULL;
}
r = radeon_ib_ring_tests(rdev);
if (r) {
dev_err(rdev->dev, "ib ring test failed (%d).\n", r);
if (saved) {
saved = false;
radeon_suspend(rdev);
goto retry;
}
}
} else {
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
} else {
radeon_fence_driver_force_completion(rdev, i);
kfree(ring_data[i]);
}
@ -1751,19 +1737,28 @@ int radeon_gpu_reset(struct radeon_device *rdev)
/* reset hpd state */
radeon_hpd_init(rdev);
ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
downgrade_write(&rdev->exclusive_lock);
drm_helper_resume_force_mode(rdev->ddev);
/* set the power state here in case we are a PX system or headless */
if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
radeon_pm_compute_clocks(rdev);
ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
if (r) {
if (!r) {
r = radeon_ib_ring_tests(rdev);
if (r && saved)
r = -EAGAIN;
} else {
/* bad news, how to tell it to userspace ? */
dev_info(rdev->dev, "GPU reset failed\n");
}
up_write(&rdev->exclusive_lock);
rdev->needs_reset = r == -EAGAIN;
rdev->in_reset = false;
up_read(&rdev->exclusive_lock);
return r;
}

View File

@ -405,7 +405,9 @@ static void radeon_flip_work_func(struct work_struct *__work)
r = radeon_fence_wait(work->fence, false);
if (r == -EDEADLK) {
up_read(&rdev->exclusive_lock);
r = radeon_gpu_reset(rdev);
do {
r = radeon_gpu_reset(rdev);
} while (r == -EAGAIN);
down_read(&rdev->exclusive_lock);
}
if (r)