Merge branch 'drm-fixes-3.8' of git://people.freedesktop.org/~agd5f/linux into drm-next

- fix the crashes related to DMA on r6xx
- properly reset DMA on GPU reset
- fix monitor probing with some DP bridges
- misc small bug fixes

* 'drm-fixes-3.8' of git://people.freedesktop.org/~agd5f/linux:
  drm/radeon: switch to a finer grained reset for SI (v2)
  drm/radeon: switch to a finer grained reset for cayman/TN
  drm/radeon: switch to a finer grained reset for evergreen
  drm/radeon: switch to a finer grained reset for r6xx/7xx
  drm/radeon: add GPU reset flags
  drm/radeon: fix typo in evergreen dma fence
  drm/radeon: Properly handle DDC probe for DP bridges
  drm/radeon: reset dma engine on gpu reset (v2)
  drm/radeon: print dma status reg on lockup (v2)
  drm/radeon: improve ring debugfs printing
  drm/radeon: add debugfs file for dma rings
  drm/radeon/r6xx: fix DMA engine for ttm bo transfers
  drm/radeon: add connector table for Mac G4 Silver
This commit is contained in:
Dave Airlie 2013-01-04 12:51:51 +10:00
commit e61e512b5a
14 changed files with 413 additions and 99 deletions

View File

@ -2306,22 +2306,20 @@ bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *rin
return radeon_ring_test_lockup(rdev, ring); return radeon_ring_test_lockup(rdev, ring);
} }
static int evergreen_gpu_soft_reset(struct radeon_device *rdev) static void evergreen_gpu_soft_reset_gfx(struct radeon_device *rdev)
{ {
struct evergreen_mc_save save;
u32 grbm_reset = 0; u32 grbm_reset = 0;
if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
return 0; return;
dev_info(rdev->dev, "GPU softreset \n"); dev_info(rdev->dev, " GRBM_STATUS = 0x%08X\n",
dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
RREG32(GRBM_STATUS)); RREG32(GRBM_STATUS));
dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n", dev_info(rdev->dev, " GRBM_STATUS_SE0 = 0x%08X\n",
RREG32(GRBM_STATUS_SE0)); RREG32(GRBM_STATUS_SE0));
dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n", dev_info(rdev->dev, " GRBM_STATUS_SE1 = 0x%08X\n",
RREG32(GRBM_STATUS_SE1)); RREG32(GRBM_STATUS_SE1));
dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n", dev_info(rdev->dev, " SRBM_STATUS = 0x%08X\n",
RREG32(SRBM_STATUS)); RREG32(SRBM_STATUS));
dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n", dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
RREG32(CP_STALLED_STAT1)); RREG32(CP_STALLED_STAT1));
@ -2331,10 +2329,7 @@ static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
RREG32(CP_BUSY_STAT)); RREG32(CP_BUSY_STAT));
dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n", dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
RREG32(CP_STAT)); RREG32(CP_STAT));
evergreen_mc_stop(rdev, &save);
if (evergreen_mc_wait_for_idle(rdev)) {
dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
}
/* Disable CP parsing/prefetching */ /* Disable CP parsing/prefetching */
WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT); WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
@ -2358,15 +2353,14 @@ static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
udelay(50); udelay(50);
WREG32(GRBM_SOFT_RESET, 0); WREG32(GRBM_SOFT_RESET, 0);
(void)RREG32(GRBM_SOFT_RESET); (void)RREG32(GRBM_SOFT_RESET);
/* Wait a little for things to settle down */
udelay(50); dev_info(rdev->dev, " GRBM_STATUS = 0x%08X\n",
dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
RREG32(GRBM_STATUS)); RREG32(GRBM_STATUS));
dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n", dev_info(rdev->dev, " GRBM_STATUS_SE0 = 0x%08X\n",
RREG32(GRBM_STATUS_SE0)); RREG32(GRBM_STATUS_SE0));
dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n", dev_info(rdev->dev, " GRBM_STATUS_SE1 = 0x%08X\n",
RREG32(GRBM_STATUS_SE1)); RREG32(GRBM_STATUS_SE1));
dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n", dev_info(rdev->dev, " SRBM_STATUS = 0x%08X\n",
RREG32(SRBM_STATUS)); RREG32(SRBM_STATUS));
dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n", dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
RREG32(CP_STALLED_STAT1)); RREG32(CP_STALLED_STAT1));
@ -2376,13 +2370,65 @@ static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
RREG32(CP_BUSY_STAT)); RREG32(CP_BUSY_STAT));
dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n", dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
RREG32(CP_STAT)); RREG32(CP_STAT));
}
static void evergreen_gpu_soft_reset_dma(struct radeon_device *rdev)
{
u32 tmp;
if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
return;
dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
RREG32(DMA_STATUS_REG));
/* Disable DMA */
tmp = RREG32(DMA_RB_CNTL);
tmp &= ~DMA_RB_ENABLE;
WREG32(DMA_RB_CNTL, tmp);
/* Reset dma */
WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
RREG32(SRBM_SOFT_RESET);
udelay(50);
WREG32(SRBM_SOFT_RESET, 0);
dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
RREG32(DMA_STATUS_REG));
}
static int evergreen_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
{
struct evergreen_mc_save save;
if (reset_mask == 0)
return 0;
dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
evergreen_mc_stop(rdev, &save);
if (evergreen_mc_wait_for_idle(rdev)) {
dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
}
if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE))
evergreen_gpu_soft_reset_gfx(rdev);
if (reset_mask & RADEON_RESET_DMA)
evergreen_gpu_soft_reset_dma(rdev);
/* Wait a little for things to settle down */
udelay(50);
evergreen_mc_resume(rdev, &save); evergreen_mc_resume(rdev, &save);
return 0; return 0;
} }
int evergreen_asic_reset(struct radeon_device *rdev) int evergreen_asic_reset(struct radeon_device *rdev)
{ {
return evergreen_gpu_soft_reset(rdev); return evergreen_gpu_soft_reset(rdev, (RADEON_RESET_GFX |
RADEON_RESET_COMPUTE |
RADEON_RESET_DMA));
} }
/* Interrupts */ /* Interrupts */
@ -3215,7 +3261,7 @@ void evergreen_dma_fence_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0)); radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0));
/* flush HDP */ /* flush HDP */
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0)); radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
radeon_ring_write(ring, (0xf << 16) | HDP_MEM_COHERENCY_FLUSH_CNTL); radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
radeon_ring_write(ring, 1); radeon_ring_write(ring, 1);
} }

View File

@ -742,8 +742,9 @@
#define SOFT_RESET_ROM (1 << 14) #define SOFT_RESET_ROM (1 << 14)
#define SOFT_RESET_SEM (1 << 15) #define SOFT_RESET_SEM (1 << 15)
#define SOFT_RESET_VMC (1 << 17) #define SOFT_RESET_VMC (1 << 17)
#define SOFT_RESET_DMA (1 << 20)
#define SOFT_RESET_TST (1 << 21) #define SOFT_RESET_TST (1 << 21)
#define SOFT_RESET_REGBB (1 << 22) #define SOFT_RESET_REGBB (1 << 22)
#define SOFT_RESET_ORB (1 << 23) #define SOFT_RESET_ORB (1 << 23)
/* display watermarks */ /* display watermarks */
@ -2027,4 +2028,15 @@
/* cayman packet3 addition */ /* cayman packet3 addition */
#define CAYMAN_PACKET3_DEALLOC_STATE 0x14 #define CAYMAN_PACKET3_DEALLOC_STATE 0x14
/* DMA regs common on r6xx/r7xx/evergreen/ni */
#define DMA_RB_CNTL 0xd000
# define DMA_RB_ENABLE (1 << 0)
# define DMA_RB_SIZE(x) ((x) << 1) /* log2 */
# define DMA_RB_SWAP_ENABLE (1 << 9) /* 8IN32 */
# define DMA_RPTR_WRITEBACK_ENABLE (1 << 12)
# define DMA_RPTR_WRITEBACK_SWAP_ENABLE (1 << 13) /* 8IN32 */
# define DMA_RPTR_WRITEBACK_TIMER(x) ((x) << 16) /* log2 */
#define DMA_STATUS_REG 0xd034
# define DMA_IDLE (1 << 0)
#endif #endif

View File

@ -1306,22 +1306,20 @@ void cayman_dma_fini(struct radeon_device *rdev)
radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]); radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]);
} }
static int cayman_gpu_soft_reset(struct radeon_device *rdev) static void cayman_gpu_soft_reset_gfx(struct radeon_device *rdev)
{ {
struct evergreen_mc_save save;
u32 grbm_reset = 0; u32 grbm_reset = 0;
if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
return 0; return;
dev_info(rdev->dev, "GPU softreset \n"); dev_info(rdev->dev, " GRBM_STATUS = 0x%08X\n",
dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
RREG32(GRBM_STATUS)); RREG32(GRBM_STATUS));
dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n", dev_info(rdev->dev, " GRBM_STATUS_SE0 = 0x%08X\n",
RREG32(GRBM_STATUS_SE0)); RREG32(GRBM_STATUS_SE0));
dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n", dev_info(rdev->dev, " GRBM_STATUS_SE1 = 0x%08X\n",
RREG32(GRBM_STATUS_SE1)); RREG32(GRBM_STATUS_SE1));
dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n", dev_info(rdev->dev, " SRBM_STATUS = 0x%08X\n",
RREG32(SRBM_STATUS)); RREG32(SRBM_STATUS));
dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n", dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
RREG32(CP_STALLED_STAT1)); RREG32(CP_STALLED_STAT1));
@ -1331,19 +1329,7 @@ static int cayman_gpu_soft_reset(struct radeon_device *rdev)
RREG32(CP_BUSY_STAT)); RREG32(CP_BUSY_STAT));
dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n", dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
RREG32(CP_STAT)); RREG32(CP_STAT));
dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_ADDR 0x%08X\n",
RREG32(0x14F8));
dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_STATUS 0x%08X\n",
RREG32(0x14D8));
dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
RREG32(0x14FC));
dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
RREG32(0x14DC));
evergreen_mc_stop(rdev, &save);
if (evergreen_mc_wait_for_idle(rdev)) {
dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
}
/* Disable CP parsing/prefetching */ /* Disable CP parsing/prefetching */
WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT); WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
@ -1368,16 +1354,14 @@ static int cayman_gpu_soft_reset(struct radeon_device *rdev)
udelay(50); udelay(50);
WREG32(GRBM_SOFT_RESET, 0); WREG32(GRBM_SOFT_RESET, 0);
(void)RREG32(GRBM_SOFT_RESET); (void)RREG32(GRBM_SOFT_RESET);
/* Wait a little for things to settle down */
udelay(50);
dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", dev_info(rdev->dev, " GRBM_STATUS = 0x%08X\n",
RREG32(GRBM_STATUS)); RREG32(GRBM_STATUS));
dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n", dev_info(rdev->dev, " GRBM_STATUS_SE0 = 0x%08X\n",
RREG32(GRBM_STATUS_SE0)); RREG32(GRBM_STATUS_SE0));
dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n", dev_info(rdev->dev, " GRBM_STATUS_SE1 = 0x%08X\n",
RREG32(GRBM_STATUS_SE1)); RREG32(GRBM_STATUS_SE1));
dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n", dev_info(rdev->dev, " SRBM_STATUS = 0x%08X\n",
RREG32(SRBM_STATUS)); RREG32(SRBM_STATUS));
dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n", dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
RREG32(CP_STALLED_STAT1)); RREG32(CP_STALLED_STAT1));
@ -1387,13 +1371,81 @@ static int cayman_gpu_soft_reset(struct radeon_device *rdev)
RREG32(CP_BUSY_STAT)); RREG32(CP_BUSY_STAT));
dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n", dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
RREG32(CP_STAT)); RREG32(CP_STAT));
}
static void cayman_gpu_soft_reset_dma(struct radeon_device *rdev)
{
u32 tmp;
if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
return;
dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
RREG32(DMA_STATUS_REG));
/* dma0 */
tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
tmp &= ~DMA_RB_ENABLE;
WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
/* dma1 */
tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
tmp &= ~DMA_RB_ENABLE;
WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
/* Reset dma */
WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1);
RREG32(SRBM_SOFT_RESET);
udelay(50);
WREG32(SRBM_SOFT_RESET, 0);
dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
RREG32(DMA_STATUS_REG));
}
static int cayman_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
{
struct evergreen_mc_save save;
if (reset_mask == 0)
return 0;
dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_ADDR 0x%08X\n",
RREG32(0x14F8));
dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_STATUS 0x%08X\n",
RREG32(0x14D8));
dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
RREG32(0x14FC));
dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
RREG32(0x14DC));
evergreen_mc_stop(rdev, &save);
if (evergreen_mc_wait_for_idle(rdev)) {
dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
}
if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE))
cayman_gpu_soft_reset_gfx(rdev);
if (reset_mask & RADEON_RESET_DMA)
cayman_gpu_soft_reset_dma(rdev);
/* Wait a little for things to settle down */
udelay(50);
evergreen_mc_resume(rdev, &save); evergreen_mc_resume(rdev, &save);
return 0; return 0;
} }
int cayman_asic_reset(struct radeon_device *rdev) int cayman_asic_reset(struct radeon_device *rdev)
{ {
return cayman_gpu_soft_reset(rdev); return cayman_gpu_soft_reset(rdev, (RADEON_RESET_GFX |
RADEON_RESET_COMPUTE |
RADEON_RESET_DMA));
} }
/** /**

View File

@ -65,7 +65,7 @@
#define SOFT_RESET_VMC (1 << 17) #define SOFT_RESET_VMC (1 << 17)
#define SOFT_RESET_DMA (1 << 20) #define SOFT_RESET_DMA (1 << 20)
#define SOFT_RESET_TST (1 << 21) #define SOFT_RESET_TST (1 << 21)
#define SOFT_RESET_REGBB (1 << 22) #define SOFT_RESET_REGBB (1 << 22)
#define SOFT_RESET_ORB (1 << 23) #define SOFT_RESET_ORB (1 << 23)
#define VM_CONTEXT0_REQUEST_RESPONSE 0x1470 #define VM_CONTEXT0_REQUEST_RESPONSE 0x1470
@ -675,4 +675,3 @@
#define DMA_PACKET_NOP 0xf #define DMA_PACKET_NOP 0xf
#endif #endif

View File

@ -1258,9 +1258,8 @@ void r600_vram_scratch_fini(struct radeon_device *rdev)
* reset, it's up to the caller to determine if the GPU needs one. We * reset, it's up to the caller to determine if the GPU needs one. We
* might add an helper function to check that. * might add an helper function to check that.
*/ */
static int r600_gpu_soft_reset(struct radeon_device *rdev) static void r600_gpu_soft_reset_gfx(struct radeon_device *rdev)
{ {
struct rv515_mc_save save;
u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) | u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) |
S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) | S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) |
S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) | S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) |
@ -1280,14 +1279,13 @@ static int r600_gpu_soft_reset(struct radeon_device *rdev)
u32 tmp; u32 tmp;
if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
return 0; return;
dev_info(rdev->dev, "GPU softreset \n"); dev_info(rdev->dev, " R_008010_GRBM_STATUS = 0x%08X\n",
dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
RREG32(R_008010_GRBM_STATUS)); RREG32(R_008010_GRBM_STATUS));
dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n", dev_info(rdev->dev, " R_008014_GRBM_STATUS2 = 0x%08X\n",
RREG32(R_008014_GRBM_STATUS2)); RREG32(R_008014_GRBM_STATUS2));
dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n", dev_info(rdev->dev, " R_000E50_SRBM_STATUS = 0x%08X\n",
RREG32(R_000E50_SRBM_STATUS)); RREG32(R_000E50_SRBM_STATUS));
dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n", dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
RREG32(CP_STALLED_STAT1)); RREG32(CP_STALLED_STAT1));
@ -1297,12 +1295,10 @@ static int r600_gpu_soft_reset(struct radeon_device *rdev)
RREG32(CP_BUSY_STAT)); RREG32(CP_BUSY_STAT));
dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n", dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
RREG32(CP_STAT)); RREG32(CP_STAT));
rv515_mc_stop(rdev, &save);
if (r600_mc_wait_for_idle(rdev)) {
dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
}
/* Disable CP parsing/prefetching */ /* Disable CP parsing/prefetching */
WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1)); WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
/* Check if any of the rendering block is busy and reset it */ /* Check if any of the rendering block is busy and reset it */
if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) || if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
(RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) { (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
@ -1332,13 +1328,12 @@ static int r600_gpu_soft_reset(struct radeon_device *rdev)
RREG32(R_008020_GRBM_SOFT_RESET); RREG32(R_008020_GRBM_SOFT_RESET);
mdelay(15); mdelay(15);
WREG32(R_008020_GRBM_SOFT_RESET, 0); WREG32(R_008020_GRBM_SOFT_RESET, 0);
/* Wait a little for things to settle down */
mdelay(1); dev_info(rdev->dev, " R_008010_GRBM_STATUS = 0x%08X\n",
dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
RREG32(R_008010_GRBM_STATUS)); RREG32(R_008010_GRBM_STATUS));
dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n", dev_info(rdev->dev, " R_008014_GRBM_STATUS2 = 0x%08X\n",
RREG32(R_008014_GRBM_STATUS2)); RREG32(R_008014_GRBM_STATUS2));
dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n", dev_info(rdev->dev, " R_000E50_SRBM_STATUS = 0x%08X\n",
RREG32(R_000E50_SRBM_STATUS)); RREG32(R_000E50_SRBM_STATUS));
dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n", dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
RREG32(CP_STALLED_STAT1)); RREG32(CP_STALLED_STAT1));
@ -1348,6 +1343,60 @@ static int r600_gpu_soft_reset(struct radeon_device *rdev)
RREG32(CP_BUSY_STAT)); RREG32(CP_BUSY_STAT));
dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n", dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
RREG32(CP_STAT)); RREG32(CP_STAT));
}
static void r600_gpu_soft_reset_dma(struct radeon_device *rdev)
{
u32 tmp;
if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
return;
dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
RREG32(DMA_STATUS_REG));
/* Disable DMA */
tmp = RREG32(DMA_RB_CNTL);
tmp &= ~DMA_RB_ENABLE;
WREG32(DMA_RB_CNTL, tmp);
/* Reset dma */
if (rdev->family >= CHIP_RV770)
WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA);
else
WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
RREG32(SRBM_SOFT_RESET);
udelay(50);
WREG32(SRBM_SOFT_RESET, 0);
dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
RREG32(DMA_STATUS_REG));
}
static int r600_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
{
struct rv515_mc_save save;
if (reset_mask == 0)
return 0;
dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
rv515_mc_stop(rdev, &save);
if (r600_mc_wait_for_idle(rdev)) {
dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
}
if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE))
r600_gpu_soft_reset_gfx(rdev);
if (reset_mask & RADEON_RESET_DMA)
r600_gpu_soft_reset_dma(rdev);
/* Wait a little for things to settle down */
mdelay(1);
rv515_mc_resume(rdev, &save); rv515_mc_resume(rdev, &save);
return 0; return 0;
} }
@ -1395,7 +1444,9 @@ bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
int r600_asic_reset(struct radeon_device *rdev) int r600_asic_reset(struct radeon_device *rdev)
{ {
return r600_gpu_soft_reset(rdev); return r600_gpu_soft_reset(rdev, (RADEON_RESET_GFX |
RADEON_RESET_COMPUTE |
RADEON_RESET_DMA));
} }
u32 r6xx_remap_render_backend(struct radeon_device *rdev, u32 r6xx_remap_render_backend(struct radeon_device *rdev,
@ -2636,8 +2687,8 @@ int r600_copy_dma(struct radeon_device *rdev,
for (i = 0; i < num_loops; i++) { for (i = 0; i < num_loops; i++) {
cur_size_in_dw = size_in_dw; cur_size_in_dw = size_in_dw;
if (cur_size_in_dw > 0xFFFF) if (cur_size_in_dw > 0xFFFE)
cur_size_in_dw = 0xFFFF; cur_size_in_dw = 0xFFFE;
size_in_dw -= cur_size_in_dw; size_in_dw -= cur_size_in_dw;
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw)); radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
radeon_ring_write(ring, dst_offset & 0xfffffffc); radeon_ring_write(ring, dst_offset & 0xfffffffc);

View File

@ -132,6 +132,11 @@ extern int radeon_lockup_timeout;
#define RADEON_VA_RESERVED_SIZE (8 << 20) #define RADEON_VA_RESERVED_SIZE (8 << 20)
#define RADEON_IB_VM_MAX_SIZE (64 << 10) #define RADEON_IB_VM_MAX_SIZE (64 << 10)
/* reset flags */
#define RADEON_RESET_GFX (1 << 0)
#define RADEON_RESET_COMPUTE (1 << 1)
#define RADEON_RESET_DMA (1 << 2)
/* /*
* Errata workarounds. * Errata workarounds.
*/ */

View File

@ -1548,6 +1548,9 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
of_machine_is_compatible("PowerBook6,7")) { of_machine_is_compatible("PowerBook6,7")) {
/* ibook */ /* ibook */
rdev->mode_info.connector_table = CT_IBOOK; rdev->mode_info.connector_table = CT_IBOOK;
} else if (of_machine_is_compatible("PowerMac3,5")) {
/* PowerMac G4 Silver radeon 7500 */
rdev->mode_info.connector_table = CT_MAC_G4_SILVER;
} else if (of_machine_is_compatible("PowerMac4,4")) { } else if (of_machine_is_compatible("PowerMac4,4")) {
/* emac */ /* emac */
rdev->mode_info.connector_table = CT_EMAC; rdev->mode_info.connector_table = CT_EMAC;
@ -2212,6 +2215,54 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
CONNECTOR_OBJECT_ID_SVIDEO, CONNECTOR_OBJECT_ID_SVIDEO,
&hpd); &hpd);
break; break;
case CT_MAC_G4_SILVER:
DRM_INFO("Connector Table: %d (mac g4 silver)\n",
rdev->mode_info.connector_table);
/* DVI-I - tv dac, int tmds */
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
hpd.hpd = RADEON_HPD_1; /* ??? */
radeon_add_legacy_encoder(dev,
radeon_get_encoder_enum(dev,
ATOM_DEVICE_DFP1_SUPPORT,
0),
ATOM_DEVICE_DFP1_SUPPORT);
radeon_add_legacy_encoder(dev,
radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT2_SUPPORT,
2),
ATOM_DEVICE_CRT2_SUPPORT);
radeon_add_legacy_connector(dev, 0,
ATOM_DEVICE_DFP1_SUPPORT |
ATOM_DEVICE_CRT2_SUPPORT,
DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
&hpd);
/* VGA - primary dac */
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT1_SUPPORT,
1),
ATOM_DEVICE_CRT1_SUPPORT);
radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT1_SUPPORT,
DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
CONNECTOR_OBJECT_ID_VGA,
&hpd);
/* TV - TV DAC */
ddc_i2c.valid = false;
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_enum(dev,
ATOM_DEVICE_TV1_SUPPORT,
2),
ATOM_DEVICE_TV1_SUPPORT);
radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
DRM_MODE_CONNECTOR_SVIDEO,
&ddc_i2c,
CONNECTOR_OBJECT_ID_SVIDEO,
&hpd);
break;
default: default:
DRM_INFO("Connector table: %d (invalid)\n", DRM_INFO("Connector table: %d (invalid)\n",
rdev->mode_info.connector_table); rdev->mode_info.connector_table);

View File

@ -741,7 +741,7 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
ret = connector_status_disconnected; ret = connector_status_disconnected;
if (radeon_connector->ddc_bus) if (radeon_connector->ddc_bus)
dret = radeon_ddc_probe(radeon_connector); dret = radeon_ddc_probe(radeon_connector, false);
if (dret) { if (dret) {
radeon_connector->detected_by_load = false; radeon_connector->detected_by_load = false;
if (radeon_connector->edid) { if (radeon_connector->edid) {
@ -947,7 +947,7 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
return connector->status; return connector->status;
if (radeon_connector->ddc_bus) if (radeon_connector->ddc_bus)
dret = radeon_ddc_probe(radeon_connector); dret = radeon_ddc_probe(radeon_connector, false);
if (dret) { if (dret) {
radeon_connector->detected_by_load = false; radeon_connector->detected_by_load = false;
if (radeon_connector->edid) { if (radeon_connector->edid) {
@ -1401,7 +1401,8 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
if (encoder) { if (encoder) {
/* setup ddc on the bridge */ /* setup ddc on the bridge */
radeon_atom_ext_encoder_setup_ddc(encoder); radeon_atom_ext_encoder_setup_ddc(encoder);
if (radeon_ddc_probe(radeon_connector)) /* try DDC */ /* bridge chips are always aux */
if (radeon_ddc_probe(radeon_connector, true)) /* try DDC */
ret = connector_status_connected; ret = connector_status_connected;
else if (radeon_connector->dac_load_detect) { /* try load detection */ else if (radeon_connector->dac_load_detect) { /* try load detection */
struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
@ -1419,7 +1420,8 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
if (radeon_dp_getdpcd(radeon_connector)) if (radeon_dp_getdpcd(radeon_connector))
ret = connector_status_connected; ret = connector_status_connected;
} else { } else {
if (radeon_ddc_probe(radeon_connector)) /* try non-aux ddc (DP to DVI/HMDI/etc. adapter) */
if (radeon_ddc_probe(radeon_connector, false))
ret = connector_status_connected; ret = connector_status_connected;
} }
} }

View File

@ -699,10 +699,15 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
if (radeon_connector->router.ddc_valid) if (radeon_connector->router.ddc_valid)
radeon_router_select_ddc_port(radeon_connector); radeon_router_select_ddc_port(radeon_connector);
if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) || if (radeon_connector_encoder_get_dp_bridge_encoder_id(&radeon_connector->base) !=
(radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) || ENCODER_OBJECT_ID_NONE) {
(radeon_connector_encoder_get_dp_bridge_encoder_id(&radeon_connector->base) != struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
ENCODER_OBJECT_ID_NONE)) {
if (dig->dp_i2c_bus)
radeon_connector->edid = drm_get_edid(&radeon_connector->base,
&dig->dp_i2c_bus->adapter);
} else if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
(radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) {
struct radeon_connector_atom_dig *dig = radeon_connector->con_priv; struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT || if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT ||

View File

@ -39,7 +39,7 @@ extern u32 radeon_atom_hw_i2c_func(struct i2c_adapter *adap);
* radeon_ddc_probe * radeon_ddc_probe
* *
*/ */
bool radeon_ddc_probe(struct radeon_connector *radeon_connector) bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool use_aux)
{ {
u8 out = 0x0; u8 out = 0x0;
u8 buf[8]; u8 buf[8];
@ -63,7 +63,13 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector)
if (radeon_connector->router.ddc_valid) if (radeon_connector->router.ddc_valid)
radeon_router_select_ddc_port(radeon_connector); radeon_router_select_ddc_port(radeon_connector);
ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2); if (use_aux) {
struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
ret = i2c_transfer(&dig->dp_i2c_bus->adapter, msgs, 2);
} else {
ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2);
}
if (ret != 2) if (ret != 2)
/* Couldn't find an accessible DDC on this connector */ /* Couldn't find an accessible DDC on this connector */
return false; return false;

View File

@ -209,7 +209,8 @@ enum radeon_connector_table {
CT_RN50_POWER, CT_RN50_POWER,
CT_MAC_X800, CT_MAC_X800,
CT_MAC_G5_9600, CT_MAC_G5_9600,
CT_SAM440EP CT_SAM440EP,
CT_MAC_G4_SILVER
}; };
enum radeon_dvo_chip { enum radeon_dvo_chip {
@ -558,7 +559,7 @@ extern void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c,
u8 val); u8 val);
extern void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector); extern void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector);
extern void radeon_router_select_cd_port(struct radeon_connector *radeon_connector); extern void radeon_router_select_cd_port(struct radeon_connector *radeon_connector);
extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector); extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool use_aux);
extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector); extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector);
extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector); extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector);

View File

@ -770,22 +770,28 @@ static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
int ridx = *(int*)node->info_ent->data; int ridx = *(int*)node->info_ent->data;
struct radeon_ring *ring = &rdev->ring[ridx]; struct radeon_ring *ring = &rdev->ring[ridx];
unsigned count, i, j; unsigned count, i, j;
u32 tmp;
radeon_ring_free_size(rdev, ring); radeon_ring_free_size(rdev, ring);
count = (ring->ring_size / 4) - ring->ring_free_dw; count = (ring->ring_size / 4) - ring->ring_free_dw;
seq_printf(m, "wptr(0x%04x): 0x%08x\n", ring->wptr_reg, RREG32(ring->wptr_reg)); tmp = RREG32(ring->wptr_reg) >> ring->ptr_reg_shift;
seq_printf(m, "rptr(0x%04x): 0x%08x\n", ring->rptr_reg, RREG32(ring->rptr_reg)); seq_printf(m, "wptr(0x%04x): 0x%08x [%5d]\n", ring->wptr_reg, tmp, tmp);
tmp = RREG32(ring->rptr_reg) >> ring->ptr_reg_shift;
seq_printf(m, "rptr(0x%04x): 0x%08x [%5d]\n", ring->rptr_reg, tmp, tmp);
if (ring->rptr_save_reg) { if (ring->rptr_save_reg) {
seq_printf(m, "rptr next(0x%04x): 0x%08x\n", ring->rptr_save_reg, seq_printf(m, "rptr next(0x%04x): 0x%08x\n", ring->rptr_save_reg,
RREG32(ring->rptr_save_reg)); RREG32(ring->rptr_save_reg));
} }
seq_printf(m, "driver's copy of the wptr: 0x%08x\n", ring->wptr); seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n", ring->wptr, ring->wptr);
seq_printf(m, "driver's copy of the rptr: 0x%08x\n", ring->rptr); seq_printf(m, "driver's copy of the rptr: 0x%08x [%5d]\n", ring->rptr, ring->rptr);
seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw); seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
seq_printf(m, "%u dwords in ring\n", count); seq_printf(m, "%u dwords in ring\n", count);
i = ring->rptr; /* print 8 dw before current rptr as often it's the last executed
for (j = 0; j <= count; j++) { * packet that is the root issue
seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]); */
i = (ring->rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask;
for (j = 0; j <= (count + 32); j++) {
seq_printf(m, "r[%5d]=0x%08x\n", i, ring->ring[i]);
i = (i + 1) & ring->ptr_mask; i = (i + 1) & ring->ptr_mask;
} }
return 0; return 0;
@ -794,11 +800,15 @@ static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
static int radeon_ring_type_gfx_index = RADEON_RING_TYPE_GFX_INDEX; static int radeon_ring_type_gfx_index = RADEON_RING_TYPE_GFX_INDEX;
static int cayman_ring_type_cp1_index = CAYMAN_RING_TYPE_CP1_INDEX; static int cayman_ring_type_cp1_index = CAYMAN_RING_TYPE_CP1_INDEX;
static int cayman_ring_type_cp2_index = CAYMAN_RING_TYPE_CP2_INDEX; static int cayman_ring_type_cp2_index = CAYMAN_RING_TYPE_CP2_INDEX;
static int radeon_ring_type_dma1_index = R600_RING_TYPE_DMA_INDEX;
static int radeon_ring_type_dma2_index = CAYMAN_RING_TYPE_DMA1_INDEX;
static struct drm_info_list radeon_debugfs_ring_info_list[] = { static struct drm_info_list radeon_debugfs_ring_info_list[] = {
{"radeon_ring_gfx", radeon_debugfs_ring_info, 0, &radeon_ring_type_gfx_index}, {"radeon_ring_gfx", radeon_debugfs_ring_info, 0, &radeon_ring_type_gfx_index},
{"radeon_ring_cp1", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp1_index}, {"radeon_ring_cp1", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp1_index},
{"radeon_ring_cp2", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp2_index}, {"radeon_ring_cp2", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp2_index},
{"radeon_ring_dma1", radeon_debugfs_ring_info, 0, &radeon_ring_type_dma1_index},
{"radeon_ring_dma2", radeon_debugfs_ring_info, 0, &radeon_ring_type_dma2_index},
}; };
static int radeon_debugfs_sa_info(struct seq_file *m, void *data) static int radeon_debugfs_sa_info(struct seq_file *m, void *data)

View File

@ -2126,15 +2126,13 @@ bool si_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
return radeon_ring_test_lockup(rdev, ring); return radeon_ring_test_lockup(rdev, ring);
} }
static int si_gpu_soft_reset(struct radeon_device *rdev) static void si_gpu_soft_reset_gfx(struct radeon_device *rdev)
{ {
struct evergreen_mc_save save;
u32 grbm_reset = 0; u32 grbm_reset = 0;
if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
return 0; return;
dev_info(rdev->dev, "GPU softreset \n");
dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
RREG32(GRBM_STATUS)); RREG32(GRBM_STATUS));
dev_info(rdev->dev, " GRBM_STATUS2=0x%08X\n", dev_info(rdev->dev, " GRBM_STATUS2=0x%08X\n",
@ -2145,10 +2143,7 @@ static int si_gpu_soft_reset(struct radeon_device *rdev)
RREG32(GRBM_STATUS_SE1)); RREG32(GRBM_STATUS_SE1));
dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n", dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
RREG32(SRBM_STATUS)); RREG32(SRBM_STATUS));
evergreen_mc_stop(rdev, &save);
if (radeon_mc_wait_for_idle(rdev)) {
dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
}
/* Disable CP parsing/prefetching */ /* Disable CP parsing/prefetching */
WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT); WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
@ -2173,8 +2168,7 @@ static int si_gpu_soft_reset(struct radeon_device *rdev)
udelay(50); udelay(50);
WREG32(GRBM_SOFT_RESET, 0); WREG32(GRBM_SOFT_RESET, 0);
(void)RREG32(GRBM_SOFT_RESET); (void)RREG32(GRBM_SOFT_RESET);
/* Wait a little for things to settle down */
udelay(50);
dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
RREG32(GRBM_STATUS)); RREG32(GRBM_STATUS));
dev_info(rdev->dev, " GRBM_STATUS2=0x%08X\n", dev_info(rdev->dev, " GRBM_STATUS2=0x%08X\n",
@ -2185,13 +2179,75 @@ static int si_gpu_soft_reset(struct radeon_device *rdev)
RREG32(GRBM_STATUS_SE1)); RREG32(GRBM_STATUS_SE1));
dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n", dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
RREG32(SRBM_STATUS)); RREG32(SRBM_STATUS));
}
static void si_gpu_soft_reset_dma(struct radeon_device *rdev)
{
u32 tmp;
if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
return;
dev_info(rdev->dev, " DMA_STATUS_REG = 0x%08X\n",
RREG32(DMA_STATUS_REG));
/* dma0 */
tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
tmp &= ~DMA_RB_ENABLE;
WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
/* dma1 */
tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
tmp &= ~DMA_RB_ENABLE;
WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
/* Reset dma */
WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1);
RREG32(SRBM_SOFT_RESET);
udelay(50);
WREG32(SRBM_SOFT_RESET, 0);
dev_info(rdev->dev, " DMA_STATUS_REG = 0x%08X\n",
RREG32(DMA_STATUS_REG));
}
static int si_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
{
struct evergreen_mc_save save;
if (reset_mask == 0)
return 0;
dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
evergreen_mc_stop(rdev, &save);
if (radeon_mc_wait_for_idle(rdev)) {
dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
}
if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE))
si_gpu_soft_reset_gfx(rdev);
if (reset_mask & RADEON_RESET_DMA)
si_gpu_soft_reset_dma(rdev);
/* Wait a little for things to settle down */
udelay(50);
evergreen_mc_resume(rdev, &save); evergreen_mc_resume(rdev, &save);
return 0; return 0;
} }
int si_asic_reset(struct radeon_device *rdev) int si_asic_reset(struct radeon_device *rdev)
{ {
return si_gpu_soft_reset(rdev); return si_gpu_soft_reset(rdev, (RADEON_RESET_GFX |
RADEON_RESET_COMPUTE |
RADEON_RESET_DMA));
} }
/* MC */ /* MC */

View File

@ -62,6 +62,22 @@
#define SRBM_STATUS 0xE50 #define SRBM_STATUS 0xE50
#define SRBM_SOFT_RESET 0x0E60
#define SOFT_RESET_BIF (1 << 1)
#define SOFT_RESET_DC (1 << 5)
#define SOFT_RESET_DMA1 (1 << 6)
#define SOFT_RESET_GRBM (1 << 8)
#define SOFT_RESET_HDP (1 << 9)
#define SOFT_RESET_IH (1 << 10)
#define SOFT_RESET_MC (1 << 11)
#define SOFT_RESET_ROM (1 << 14)
#define SOFT_RESET_SEM (1 << 15)
#define SOFT_RESET_VMC (1 << 17)
#define SOFT_RESET_DMA (1 << 20)
#define SOFT_RESET_TST (1 << 21)
#define SOFT_RESET_REGBB (1 << 22)
#define SOFT_RESET_ORB (1 << 23)
#define CC_SYS_RB_BACKEND_DISABLE 0xe80 #define CC_SYS_RB_BACKEND_DISABLE 0xe80
#define GC_USER_SYS_RB_BACKEND_DISABLE 0xe84 #define GC_USER_SYS_RB_BACKEND_DISABLE 0xe84
@ -1013,6 +1029,8 @@
# define DATA_SWAP_ENABLE (1 << 3) # define DATA_SWAP_ENABLE (1 << 3)
# define FENCE_SWAP_ENABLE (1 << 4) # define FENCE_SWAP_ENABLE (1 << 4)
# define CTXEMPTY_INT_ENABLE (1 << 28) # define CTXEMPTY_INT_ENABLE (1 << 28)
#define DMA_STATUS_REG 0xd034
# define DMA_IDLE (1 << 0)
#define DMA_TILING_CONFIG 0xd0b8 #define DMA_TILING_CONFIG 0xd0b8
#define DMA_PACKET(cmd, b, t, s, n) ((((cmd) & 0xF) << 28) | \ #define DMA_PACKET(cmd, b, t, s, n) ((((cmd) & 0xF) << 28) | \