aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon/evergreen.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/radeon/evergreen.c')
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c422
1 files changed, 140 insertions, 282 deletions
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 04a5d83..87ff586 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -39,40 +39,7 @@
static void evergreen_gpu_init(struct radeon_device *rdev);
void evergreen_fini(struct radeon_device *rdev);
-void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
-extern void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
- int ring, u32 cp_int_cntl);
-
-void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw,
- unsigned *bankh, unsigned *mtaspect,
- unsigned *tile_split)
-{
- *bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;
- *bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK;
- *mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK;
- *tile_split = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK;
- switch (*bankw) {
- default:
- case 1: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_1; break;
- case 2: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_2; break;
- case 4: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_4; break;
- case 8: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_8; break;
- }
- switch (*bankh) {
- default:
- case 1: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_1; break;
- case 2: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_2; break;
- case 4: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_4; break;
- case 8: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_8; break;
- }
- switch (*mtaspect) {
- default:
- case 1: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1; break;
- case 2: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2; break;
- case 4: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4; break;
- case 8: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8; break;
- }
-}
+static void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
{
@@ -99,25 +66,6 @@ void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
}
}
-void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc)
-{
- struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc];
- int i;
-
- if (RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset) & EVERGREEN_CRTC_MASTER_EN) {
- for (i = 0; i < rdev->usec_timeout; i++) {
- if (!(RREG32(EVERGREEN_CRTC_STATUS + radeon_crtc->crtc_offset) & EVERGREEN_CRTC_V_BLANK))
- break;
- udelay(1);
- }
- for (i = 0; i < rdev->usec_timeout; i++) {
- if (RREG32(EVERGREEN_CRTC_STATUS + radeon_crtc->crtc_offset) & EVERGREEN_CRTC_V_BLANK)
- break;
- udelay(1);
- }
- }
-}
-
void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc)
{
/* enable the pflip int */
@@ -214,57 +162,6 @@ int sumo_get_temp(struct radeon_device *rdev)
return actual_temp * 1000;
}
-void sumo_pm_init_profile(struct radeon_device *rdev)
-{
- int idx;
-
- /* default */
- rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
- rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
- rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
- rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
-
- /* low,mid sh/mh */
- if (rdev->flags & RADEON_IS_MOBILITY)
- idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
- else
- idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
-
- rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
- rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
- rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
- rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
-
- rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
- rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
- rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
- rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
-
- rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
- rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
- rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
- rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
-
- rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
- rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
- rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
- rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
-
- /* high sh/mh */
- idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
- rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
- rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
- rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
- rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx =
- rdev->pm.power_state[idx].num_clock_modes - 1;
-
- rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
- rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
- rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
- rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx =
- rdev->pm.power_state[idx].num_clock_modes - 1;
-}
-
void evergreen_pm_misc(struct radeon_device *rdev)
{
int req_ps_idx = rdev->pm.requested_power_state_index;
@@ -904,7 +801,7 @@ static void evergreen_program_watermarks(struct radeon_device *rdev,
!evergreen_average_bandwidth_vs_available_bandwidth(&wm) ||
!evergreen_check_latency_hiding(&wm) ||
(rdev->disp_priority == 2)) {
- DRM_DEBUG_KMS("force priority to high\n");
+ DRM_INFO("force priority to high\n");
priority_a_cnt |= PRIORITY_ALWAYS_ON;
priority_b_cnt |= PRIORITY_ALWAYS_ON;
}
@@ -1029,7 +926,7 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev)
u32 tmp;
int r;
- if (rdev->gart.robj == NULL) {
+ if (rdev->gart.table.vram.robj == NULL) {
dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
return -EINVAL;
}
@@ -1076,9 +973,6 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev)
WREG32(VM_CONTEXT1_CNTL, 0);
evergreen_pcie_gart_tlb_flush(rdev);
- DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
- (unsigned)(rdev->mc.gtt_size >> 20),
- (unsigned long long)rdev->gart.table_addr);
rdev->gart.ready = true;
return 0;
}
@@ -1086,6 +980,7 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev)
void evergreen_pcie_gart_disable(struct radeon_device *rdev)
{
u32 tmp;
+ int r;
/* Disable all tables */
WREG32(VM_CONTEXT0_CNTL, 0);
@@ -1105,7 +1000,14 @@ void evergreen_pcie_gart_disable(struct radeon_device *rdev)
WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
- radeon_gart_table_vram_unpin(rdev);
+ if (rdev->gart.table.vram.robj) {
+ r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
+ if (likely(r == 0)) {
+ radeon_bo_kunmap(rdev->gart.table.vram.robj);
+ radeon_bo_unpin(rdev->gart.table.vram.robj);
+ radeon_bo_unreserve(rdev->gart.table.vram.robj);
+ }
+ }
}
void evergreen_pcie_gart_fini(struct radeon_device *rdev)
@@ -1304,7 +1206,7 @@ void evergreen_mc_program(struct radeon_device *rdev)
WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
rdev->mc.vram_end >> 12);
}
- WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
+ WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
if (rdev->flags & RADEON_IS_IGP) {
tmp = RREG32(MC_FUS_VM_FB_OFFSET) & 0x000FFFFF;
tmp |= ((rdev->mc.vram_end >> 20) & 0xF) << 24;
@@ -1340,20 +1242,18 @@ void evergreen_mc_program(struct radeon_device *rdev)
*/
void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{
- struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
-
/* set to DX10/11 mode */
- radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
- radeon_ring_write(ring, 1);
+ radeon_ring_write(rdev, PACKET3(PACKET3_MODE_CONTROL, 0));
+ radeon_ring_write(rdev, 1);
/* FIXME: implement */
- radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
- radeon_ring_write(ring,
+ radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
+ radeon_ring_write(rdev,
#ifdef __BIG_ENDIAN
(2 << 0) |
#endif
(ib->gpu_addr & 0xFFFFFFFC));
- radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
- radeon_ring_write(ring, ib->length_dw);
+ radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
+ radeon_ring_write(rdev, ib->length_dw);
}
@@ -1391,73 +1291,71 @@ static int evergreen_cp_load_microcode(struct radeon_device *rdev)
static int evergreen_cp_start(struct radeon_device *rdev)
{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r, i;
uint32_t cp_me;
- r = radeon_ring_lock(rdev, ring, 7);
+ r = radeon_ring_lock(rdev, 7);
if (r) {
DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
return r;
}
- radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
- radeon_ring_write(ring, 0x1);
- radeon_ring_write(ring, 0x0);
- radeon_ring_write(ring, rdev->config.evergreen.max_hw_contexts - 1);
- radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 0);
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
+ radeon_ring_write(rdev, 0x1);
+ radeon_ring_write(rdev, 0x0);
+ radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1);
+ radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, 0);
+ radeon_ring_unlock_commit(rdev);
cp_me = 0xff;
WREG32(CP_ME_CNTL, cp_me);
- r = radeon_ring_lock(rdev, ring, evergreen_default_size + 19);
+ r = radeon_ring_lock(rdev, evergreen_default_size + 19);
if (r) {
DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
return r;
}
/* setup clear context state */
- radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
+ radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+ radeon_ring_write(rdev, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
for (i = 0; i < evergreen_default_size; i++)
- radeon_ring_write(ring, evergreen_default_state[i]);
+ radeon_ring_write(rdev, evergreen_default_state[i]);
- radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
+ radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+ radeon_ring_write(rdev, PACKET3_PREAMBLE_END_CLEAR_STATE);
/* set clear context state */
- radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
- radeon_ring_write(ring, 0);
+ radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0));
+ radeon_ring_write(rdev, 0);
/* SQ_VTX_BASE_VTX_LOC */
- radeon_ring_write(ring, 0xc0026f00);
- radeon_ring_write(ring, 0x00000000);
- radeon_ring_write(ring, 0x00000000);
- radeon_ring_write(ring, 0x00000000);
+ radeon_ring_write(rdev, 0xc0026f00);
+ radeon_ring_write(rdev, 0x00000000);
+ radeon_ring_write(rdev, 0x00000000);
+ radeon_ring_write(rdev, 0x00000000);
/* Clear consts */
- radeon_ring_write(ring, 0xc0036f00);
- radeon_ring_write(ring, 0x00000bc4);
- radeon_ring_write(ring, 0xffffffff);
- radeon_ring_write(ring, 0xffffffff);
- radeon_ring_write(ring, 0xffffffff);
+ radeon_ring_write(rdev, 0xc0036f00);
+ radeon_ring_write(rdev, 0x00000bc4);
+ radeon_ring_write(rdev, 0xffffffff);
+ radeon_ring_write(rdev, 0xffffffff);
+ radeon_ring_write(rdev, 0xffffffff);
- radeon_ring_write(ring, 0xc0026900);
- radeon_ring_write(ring, 0x00000316);
- radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
- radeon_ring_write(ring, 0x00000010); /* */
+ radeon_ring_write(rdev, 0xc0026900);
+ radeon_ring_write(rdev, 0x00000316);
+ radeon_ring_write(rdev, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
+ radeon_ring_write(rdev, 0x00000010); /* */
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev);
return 0;
}
int evergreen_cp_resume(struct radeon_device *rdev)
{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u32 tmp;
u32 rb_bufsz;
int r;
@@ -1475,14 +1373,13 @@ int evergreen_cp_resume(struct radeon_device *rdev)
RREG32(GRBM_SOFT_RESET);
/* Set ring buffer size */
- rb_bufsz = drm_order(ring->ring_size / 8);
+ rb_bufsz = drm_order(rdev->cp.ring_size / 8);
tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
#endif
WREG32(CP_RB_CNTL, tmp);
- WREG32(CP_SEM_WAIT_TIMER, 0x0);
- WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
+ WREG32(CP_SEM_WAIT_TIMER, 0x4);
/* Set the write pointer delay */
WREG32(CP_RB_WPTR_DELAY, 0);
@@ -1490,11 +1387,14 @@ int evergreen_cp_resume(struct radeon_device *rdev)
/* Initialize the ring buffer's read and write pointers */
WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
WREG32(CP_RB_RPTR_WR, 0);
- ring->wptr = 0;
- WREG32(CP_RB_WPTR, ring->wptr);
+ rdev->cp.wptr = 0;
+ WREG32(CP_RB_WPTR, rdev->cp.wptr);
/* set the wb address wether it's enabled or not */
WREG32(CP_RB_RPTR_ADDR,
+#ifdef __BIG_ENDIAN
+ RB_RPTR_SWAP(2) |
+#endif
((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
@@ -1509,16 +1409,16 @@ int evergreen_cp_resume(struct radeon_device *rdev)
mdelay(1);
WREG32(CP_RB_CNTL, tmp);
- WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
+ WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8);
WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
- ring->rptr = RREG32(CP_RB_RPTR);
+ rdev->cp.rptr = RREG32(CP_RB_RPTR);
evergreen_cp_start(rdev);
- ring->ready = true;
- r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
+ rdev->cp.ready = true;
+ r = radeon_ring_test(rdev);
if (r) {
- ring->ready = false;
+ rdev->cp.ready = false;
return r;
}
return 0;
@@ -2116,7 +2016,6 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
rdev->config.evergreen.tile_config |=
((gb_addr_config & 0x30000000) >> 28) << 12;
- rdev->config.evergreen.backend_map = gb_backend_map;
WREG32(GB_BACKEND_MAP, gb_backend_map);
WREG32(GB_ADDR_CONFIG, gb_addr_config);
WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
@@ -2387,7 +2286,7 @@ int evergreen_mc_init(struct radeon_device *rdev)
return 0;
}
-bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+bool evergreen_gpu_is_lockup(struct radeon_device *rdev)
{
u32 srbm_status;
u32 grbm_status;
@@ -2400,19 +2299,19 @@ bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *rin
grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
if (!(grbm_status & GUI_ACTIVE)) {
- r100_gpu_lockup_update(lockup, ring);
+ r100_gpu_lockup_update(lockup, &rdev->cp);
return false;
}
/* force CP activities */
- r = radeon_ring_lock(rdev, ring, 2);
+ r = radeon_ring_lock(rdev, 2);
if (!r) {
/* PACKET2 NOP */
- radeon_ring_write(ring, 0x80000000);
- radeon_ring_write(ring, 0x80000000);
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_write(rdev, 0x80000000);
+ radeon_ring_write(rdev, 0x80000000);
+ radeon_ring_unlock_commit(rdev);
}
- ring->rptr = RREG32(CP_RB_RPTR);
- return r100_gpu_cp_is_lockup(rdev, lockup, ring);
+ rdev->cp.rptr = RREG32(CP_RB_RPTR);
+ return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp);
}
static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
@@ -2504,13 +2403,7 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev)
{
u32 tmp;
- if (rdev->family >= CHIP_CAYMAN) {
- cayman_cp_int_cntl_setup(rdev, 0,
- CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
- cayman_cp_int_cntl_setup(rdev, 1, 0);
- cayman_cp_int_cntl_setup(rdev, 2, 0);
- } else
- WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+ WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
WREG32(GRBM_INT_CNTL, 0);
WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
@@ -2555,7 +2448,6 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev)
int evergreen_irq_set(struct radeon_device *rdev)
{
u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
- u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0;
u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
u32 grbm_int_cntl = 0;
@@ -2580,28 +2472,11 @@ int evergreen_irq_set(struct radeon_device *rdev)
hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
- if (rdev->family >= CHIP_CAYMAN) {
- /* enable CP interrupts on all rings */
- if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
- DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
- cp_int_cntl |= TIME_STAMP_INT_ENABLE;
- }
- if (rdev->irq.sw_int[CAYMAN_RING_TYPE_CP1_INDEX]) {
- DRM_DEBUG("evergreen_irq_set: sw int cp1\n");
- cp_int_cntl1 |= TIME_STAMP_INT_ENABLE;
- }
- if (rdev->irq.sw_int[CAYMAN_RING_TYPE_CP2_INDEX]) {
- DRM_DEBUG("evergreen_irq_set: sw int cp2\n");
- cp_int_cntl2 |= TIME_STAMP_INT_ENABLE;
- }
- } else {
- if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
- DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
- cp_int_cntl |= RB_INT_ENABLE;
- cp_int_cntl |= TIME_STAMP_INT_ENABLE;
- }
+ if (rdev->irq.sw_int) {
+ DRM_DEBUG("evergreen_irq_set: sw int\n");
+ cp_int_cntl |= RB_INT_ENABLE;
+ cp_int_cntl |= TIME_STAMP_INT_ENABLE;
}
-
if (rdev->irq.crtc_vblank_int[0] ||
rdev->irq.pflip[0]) {
DRM_DEBUG("evergreen_irq_set: vblank 0\n");
@@ -2661,12 +2536,7 @@ int evergreen_irq_set(struct radeon_device *rdev)
grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
}
- if (rdev->family >= CHIP_CAYMAN) {
- cayman_cp_int_cntl_setup(rdev, 0, cp_int_cntl);
- cayman_cp_int_cntl_setup(rdev, 1, cp_int_cntl1);
- cayman_cp_int_cntl_setup(rdev, 2, cp_int_cntl2);
- } else
- WREG32(CP_INT_CNTL, cp_int_cntl);
+ WREG32(CP_INT_CNTL, cp_int_cntl);
WREG32(GRBM_INT_CNTL, grbm_int_cntl);
WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
@@ -2701,7 +2571,7 @@ int evergreen_irq_set(struct radeon_device *rdev)
return 0;
}
-static void evergreen_irq_ack(struct radeon_device *rdev)
+static inline void evergreen_irq_ack(struct radeon_device *rdev)
{
u32 tmp;
@@ -2812,7 +2682,7 @@ void evergreen_irq_suspend(struct radeon_device *rdev)
r600_rlc_stop(rdev);
}
-static u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
+static inline u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
{
u32 wptr, tmp;
@@ -2858,9 +2728,6 @@ int evergreen_irq_process(struct radeon_device *rdev)
return IRQ_NONE;
}
restart_ih:
- /* Order reading of wptr vs. reading of IH ring data */
- rmb();
-
/* display interrupts */
evergreen_irq_ack(rdev);
@@ -3081,24 +2948,11 @@ restart_ih:
case 177: /* CP_INT in IB1 */
case 178: /* CP_INT in IB2 */
DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
- radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ radeon_fence_process(rdev);
break;
case 181: /* CP EOP event */
DRM_DEBUG("IH: CP EOP\n");
- if (rdev->family >= CHIP_CAYMAN) {
- switch (src_data) {
- case 0:
- radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
- break;
- case 1:
- radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
- break;
- case 2:
- radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
- break;
- }
- } else
- radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ radeon_fence_process(rdev);
break;
case 233: /* GUI IDLE */
DRM_DEBUG("IH: GUI idle\n");
@@ -3128,11 +2982,11 @@ restart_ih:
static int evergreen_startup(struct radeon_device *rdev)
{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
/* enable pcie gen2 link */
- evergreen_pcie_gen2_enable(rdev);
+ if (!ASIC_IS_DCE5(rdev))
+ evergreen_pcie_gen2_enable(rdev);
if (ASIC_IS_DCE5(rdev)) {
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
@@ -3157,10 +3011,6 @@ static int evergreen_startup(struct radeon_device *rdev)
}
}
- r = r600_vram_scratch_init(rdev);
- if (r)
- return r;
-
evergreen_mc_program(rdev);
if (rdev->flags & RADEON_IS_AGP) {
evergreen_agp_enable(rdev);
@@ -3173,8 +3023,8 @@ static int evergreen_startup(struct radeon_device *rdev)
r = evergreen_blit_init(rdev);
if (r) {
- r600_blit_fini(rdev);
- rdev->asic->copy.copy = NULL;
+ evergreen_blit_fini(rdev);
+ rdev->asic->copy = NULL;
dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
}
@@ -3183,12 +3033,6 @@ static int evergreen_startup(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
- if (r) {
- dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
- return r;
- }
-
/* Enable IRQ */
r = r600_irq_init(rdev);
if (r) {
@@ -3198,9 +3042,7 @@ static int evergreen_startup(struct radeon_device *rdev)
}
evergreen_irq_set(rdev);
- r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
- R600_CP_RB_RPTR, R600_CP_RB_WPTR,
- 0, 0xfffff, RADEON_CP_PACKET2);
+ r = radeon_ring_init(rdev, rdev->cp.ring_size);
if (r)
return r;
r = evergreen_cp_load_microcode(rdev);
@@ -3210,23 +3052,6 @@ static int evergreen_startup(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_ib_pool_start(rdev);
- if (r)
- return r;
-
- r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
- if (r) {
- DRM_ERROR("radeon: failed testing IB (%d).\n", r);
- rdev->accel_working = false;
- return r;
- }
-
- r = r600_audio_init(rdev);
- if (r) {
- DRM_ERROR("radeon: audio init failed\n");
- return r;
- }
-
return 0;
}
@@ -3246,11 +3071,15 @@ int evergreen_resume(struct radeon_device *rdev)
/* post card */
atom_asic_init(rdev->mode_info.atom_context);
- rdev->accel_working = true;
r = evergreen_startup(rdev);
if (r) {
DRM_ERROR("evergreen startup failed on resume\n");
- rdev->accel_working = false;
+ return r;
+ }
+
+ r = r600_ib_test(rdev);
+ if (r) {
+ DRM_ERROR("radeon: failed testing IB (%d).\n", r);
return r;
}
@@ -3260,18 +3089,45 @@ int evergreen_resume(struct radeon_device *rdev)
int evergreen_suspend(struct radeon_device *rdev)
{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ int r;
- r600_audio_fini(rdev);
/* FIXME: we should wait for ring to be empty */
- radeon_ib_pool_suspend(rdev);
- r600_blit_suspend(rdev);
r700_cp_stop(rdev);
- ring->ready = false;
+ rdev->cp.ready = false;
evergreen_irq_suspend(rdev);
radeon_wb_disable(rdev);
evergreen_pcie_gart_disable(rdev);
+ /* unpin shaders bo */
+ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+ if (likely(r == 0)) {
+ radeon_bo_unpin(rdev->r600_blit.shader_obj);
+ radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+ }
+
+ return 0;
+}
+
+int evergreen_copy_blit(struct radeon_device *rdev,
+ uint64_t src_offset,
+ uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence *fence)
+{
+ int r;
+
+ mutex_lock(&rdev->r600_blit.mutex);
+ rdev->r600_blit.vb_ib = NULL;
+ r = evergreen_blit_prepare_copy(rdev, num_gpu_pages * RADEON_GPU_PAGE_SIZE);
+ if (r) {
+ if (rdev->r600_blit.vb_ib)
+ radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
+ mutex_unlock(&rdev->r600_blit.mutex);
+ return r;
+ }
+ evergreen_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages * RADEON_GPU_PAGE_SIZE);
+ evergreen_blit_done_copy(rdev, fence);
+ mutex_unlock(&rdev->r600_blit.mutex);
return 0;
}
@@ -3345,8 +3201,8 @@ int evergreen_init(struct radeon_device *rdev)
if (r)
return r;
- rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
- r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
+ rdev->cp.ring_obj = NULL;
+ r600_ring_init(rdev, 1024 * 1024);
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
@@ -3355,24 +3211,29 @@ int evergreen_init(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
- if (r) {
- dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
- rdev->accel_working = false;
- }
-
r = evergreen_startup(rdev);
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
r700_cp_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
- r100_ib_fini(rdev);
radeon_irq_kms_fini(rdev);
evergreen_pcie_gart_fini(rdev);
rdev->accel_working = false;
}
+ if (rdev->accel_working) {
+ r = radeon_ib_pool_init(rdev);
+ if (r) {
+ DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
+ rdev->accel_working = false;
+ }
+ r = r600_ib_test(rdev);
+ if (r) {
+ DRM_ERROR("radeon: failed testing IB (%d).\n", r);
+ rdev->accel_working = false;
+ }
+ }
/* Don't start up if the MC ucode is missing on BTC parts.
* The default clocks and voltages before the MC ucode
@@ -3390,17 +3251,14 @@ int evergreen_init(struct radeon_device *rdev)
void evergreen_fini(struct radeon_device *rdev)
{
- r600_audio_fini(rdev);
- r600_blit_fini(rdev);
+ evergreen_blit_fini(rdev);
r700_cp_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
- r100_ib_fini(rdev);
+ radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
evergreen_pcie_gart_fini(rdev);
- r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
- radeon_semaphore_driver_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_agp_fini(rdev);
radeon_bo_fini(rdev);
@@ -3409,7 +3267,7 @@ void evergreen_fini(struct radeon_device *rdev)
rdev->bios = NULL;
}
-void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
+static void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
{
u32 link_width_cntl, speed_cntl;