aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorWolfgang Wiedmeyer <wolfgit@wiedmeyer.de>2015-10-23 01:34:08 +0200
committerWolfgang Wiedmeyer <wolfgit@wiedmeyer.de>2015-10-23 01:34:08 +0200
commit5864eaf2f7f0ec3366be0efe9e7a5fd0476e25cc (patch)
treefd05a3a313d92c74af3854cca137797d76ebad71 /drivers/gpu
parent786208aedd8f75b0720e36b2ca66b3a411417301 (diff)
downloadkernel_samsung_smdk4412-5864eaf2f7f0ec3366be0efe9e7a5fd0476e25cc.zip
kernel_samsung_smdk4412-5864eaf2f7f0ec3366be0efe9e7a5fd0476e25cc.tar.gz
kernel_samsung_smdk4412-5864eaf2f7f0ec3366be0efe9e7a5fd0476e25cc.tar.bz2
remove firmware files, merge more uncritical stuff from 3.0.101
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/i2c/ch7006_drv.c7
-rw-r--r--drivers/gpu/drm/i2c/sil164_drv.c2
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c5
-rw-r--r--drivers/gpu/drm/i810/i810_drv.c25
-rw-r--r--drivers/gpu/drm/i915/Makefile3
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7017.c2
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7xxx.c4
-rw-r--r--drivers/gpu/drm/i915/dvo_ivch.c6
-rw-r--r--drivers/gpu/drm/i915/dvo_sil164.c2
-rw-r--r--drivers/gpu/drm/i915/dvo_tfp410.c14
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c668
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c125
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c283
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h353
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c848
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debug.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c23
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c290
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c342
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c34
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c266
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h488
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c24
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h46
-rw-r--r--drivers/gpu/drm/i915/intel_acpi.c4
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c169
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h28
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c48
-rw-r--r--drivers/gpu/drm/i915/intel_display.c2407
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c1093
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h101
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c1
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c25
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c208
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c218
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c145
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c36
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c106
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c170
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c101
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c624
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h43
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c416
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo_regs.h560
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c664
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c210
-rw-r--r--drivers/gpu/drm/mga/mga_dma.c2
-rw-r--r--drivers/gpu/drm/mga/mga_drv.c31
-rw-r--r--drivers/gpu/drm/mga/mga_warp.c1
-rw-r--r--drivers/gpu/drm/nouveau/Makefile21
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c79
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c169
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c1390
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h80
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c339
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c185
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c504
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h36
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_crtc.h12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c269
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c21
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c831
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c120
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h410
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_encoder.h43
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fb.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c54
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c34
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c148
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gpio.c400
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gpio.h71
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hdmi.c258
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hwsq.h115
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_i2c.c542
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_i2c.h21
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c956
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mm.c88
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mm.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mxm.c723
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_notifier.c22
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_object.c107
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_perf.c423
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_pm.c605
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_pm.h57
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_reg.h11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c206
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c635
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_temp.c46
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vm.c64
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vm.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_volt.c61
-rw-r--r--drivers/gpu/drm/nouveau/nv04_crtc.c31
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dac.c14
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dfp.c16
-rw-r--r--drivers/gpu/drm/nouveau/nv04_display.c28
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fb.c34
-rw-r--r--drivers/gpu/drm/nouveau/nv04_graph.c22
-rw-r--r--drivers/gpu/drm/nouveau/nv04_instmem.c28
-rw-r--r--drivers/gpu/drm/nouveau/nv04_pm.c107
-rw-r--r--drivers/gpu/drm/nouveau/nv04_timer.c85
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fb.c126
-rw-r--r--drivers/gpu/drm/nouveau/nv10_gpio.c117
-rw-r--r--drivers/gpu/drm/nouveau/nv10_graph.c15
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv.c20
-rw-r--r--drivers/gpu/drm/nouveau/nv20_fb.c148
-rw-r--r--drivers/gpu/drm/nouveau/nv20_graph.c13
-rw-r--r--drivers/gpu/drm/nouveau/nv31_mpeg.c344
-rw-r--r--drivers/gpu/drm/nouveau/nv40_fb.c45
-rw-r--r--drivers/gpu/drm/nouveau/nv40_graph.c112
-rw-r--r--drivers/gpu/drm/nouveau/nv40_pm.c394
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c391
-rw-r--r--drivers/gpu/drm/nouveau/nv50_cursor.c18
-rw-r--r--drivers/gpu/drm/nouveau/nv50_dac.c14
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c249
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.h6
-rw-r--r--drivers/gpu/drm/nouveau/nv50_evo.c22
-rw-r--r--drivers/gpu/drm/nouveau/nv50_evo.h3
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c10
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fifo.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv50_gpio.c247
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c132
-rw-r--r--drivers/gpu/drm/nouveau/nv50_grctx.c13
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c8
-rw-r--r--drivers/gpu/drm/nouveau/nv50_mpeg.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_pm.c900
-rw-r--r--drivers/gpu/drm/nouveau/nv50_sor.c275
-rw-r--r--drivers/gpu/drm/nouveau/nv50_vm.c33
-rw-r--r--drivers/gpu/drm/nouveau/nv50_vram.c60
-rw-r--r--drivers/gpu/drm/nouveau/nv84_bsp.c83
-rw-r--r--drivers/gpu/drm/nouveau/nv84_crypt.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv84_vp.c83
-rw-r--r--drivers/gpu/drm/nouveau/nv98_crypt.c78
-rw-r--r--drivers/gpu/drm/nouveau/nv98_ppp.c78
-rw-r--r--drivers/gpu/drm/nouveau/nva3_copy.c2
-rw-r--r--drivers/gpu/drm/nouveau/nva3_copy.fuc262
-rw-r--r--drivers/gpu/drm/nouveau/nva3_copy.fuc.h2
-rw-r--r--drivers/gpu/drm/nouveau/nva3_pm.c408
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_copy.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_copy.fuc.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fb.c95
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fbcon.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fifo.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_graph.c356
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_graph.fuc400
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_graph.h26
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_grctx.c360
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_grgpc.fuc539
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h538
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_grhub.fuc856
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h838
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_instmem.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_pm.c392
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_vm.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_vram.c61
-rw-r--r--drivers/gpu/drm/nouveau/nvd0_display.c2098
-rw-r--r--drivers/gpu/drm/r128/r128_cce.c1
-rw-r--r--drivers/gpu/drm/r128/r128_drv.c33
-rw-r--r--drivers/gpu/drm/radeon/.gitignore3
-rw-r--r--drivers/gpu/drm/radeon/Makefile6
-rw-r--r--drivers/gpu/drm/radeon/atom.c5
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c162
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c48
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c2359
-rw-r--r--drivers/gpu/drm/radeon/atombios_i2c.c139
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c422
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_kms.c614
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c1655
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h43
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h518
-rw-r--r--drivers/gpu/drm/radeon/ni.c445
-rw-r--r--drivers/gpu/drm/radeon/nid.h36
-rw-r--r--drivers/gpu/drm/radeon/r100.c475
-rw-r--r--drivers/gpu/drm/radeon/r100_track.h110
-rw-r--r--drivers/gpu/drm/radeon/r200.c50
-rw-r--r--drivers/gpu/drm/radeon/r300.c282
-rw-r--r--drivers/gpu/drm/radeon/r300_cmdbuf.c2
-rw-r--r--drivers/gpu/drm/radeon/r420.c57
-rw-r--r--drivers/gpu/drm/radeon/r500_reg.h4
-rw-r--r--drivers/gpu/drm/radeon/r520.c35
-rw-r--r--drivers/gpu/drm/radeon/r600.c531
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c57
-rw-r--r--drivers/gpu/drm/radeon/r600_blit.c24
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c642
-rw-r--r--drivers/gpu/drm/radeon/r600_cp.c27
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c697
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c58
-rw-r--r--drivers/gpu/drm/radeon/r600d.h45
-rw-r--r--drivers/gpu/drm/radeon/radeon.h797
-rw-r--r--drivers/gpu/drm/radeon/radeon_acpi.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c1700
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h83
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c352
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c249
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_blit_common.h44
-rw-r--r--drivers/gpu/drm/radeon/radeon_clocks.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c48
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c178
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c304
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c35
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c117
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c221
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c75
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c2126
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c46
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c313
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c498
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c173
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c111
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c36
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c64
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_tv.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_mem.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h26
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c125
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h74
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c58
-rw-r--r--drivers/gpu/drm/radeon/radeon_reg.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c501
-rw-r--r--drivers/gpu/drm/radeon/radeon_sa.c189
-rw-r--r--drivers/gpu/drm/radeon/radeon_semaphore.c178
-rw-r--r--drivers/gpu/drm/radeon/radeon_state.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c301
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c379
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/cayman26
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/evergreen27
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r60021
-rw-r--r--drivers/gpu/drm/radeon/rs400.c43
-rw-r--r--drivers/gpu/drm/radeon/rs600.c97
-rw-r--r--drivers/gpu/drm/radeon/rs690.c40
-rw-r--r--drivers/gpu/drm/radeon/rv515.c115
-rw-r--r--drivers/gpu/drm/radeon/rv770.c153
-rw-r--r--drivers/gpu/drm/savage/savage_drv.c25
-rw-r--r--drivers/gpu/drm/savage/savage_state.c5
-rw-r--r--drivers/gpu/drm/sis/sis_drv.c63
-rw-r--r--drivers/gpu/drm/sis/sis_drv.h11
-rw-r--r--drivers/gpu/drm/sis/sis_mm.c199
-rw-r--r--drivers/gpu/drm/tdfx/tdfx_drv.c25
-rw-r--r--drivers/gpu/drm/vmwgfx/Kconfig9
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile3
-rw-r--r--drivers/gpu/drm/vmwgfx/svga3d_reg.h259
-rw-r--r--drivers/gpu/drm/vmwgfx/svga_escape.h2
-rw-r--r--drivers/gpu/drm/vmwgfx/svga_overlay.h22
-rw-r--r--drivers/gpu/drm/vmwgfx/svga_reg.h304
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c142
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c322
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c360
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h236
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c879
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c96
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c1163
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.h120
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c164
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c81
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c46
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c273
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c187
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c1524
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h64
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c334
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_marker.c171
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c191
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c1101
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c571
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c5
271 files changed, 17182 insertions, 44506 deletions
diff --git a/drivers/gpu/drm/i2c/ch7006_drv.c b/drivers/gpu/drm/i2c/ch7006_drv.c
index d3f2e87..08792a7 100644
--- a/drivers/gpu/drm/i2c/ch7006_drv.c
+++ b/drivers/gpu/drm/i2c/ch7006_drv.c
@@ -24,8 +24,6 @@
*
*/
-#include <linux/module.h>
-
#include "ch7006_priv.h"
/* DRM encoder functions */
@@ -252,7 +250,10 @@ static int ch7006_encoder_create_resources(struct drm_encoder *encoder,
drm_mode_create_tv_properties(dev, NUM_TV_NORMS, ch7006_tv_norm_names);
- priv->scale_property = drm_property_create_range(dev, 0, "scale", 0, 2);
+ priv->scale_property = drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "scale", 2);
+ priv->scale_property->values[0] = 0;
+ priv->scale_property->values[1] = 2;
drm_connector_attach_property(connector, conf->tv_select_subconnector_property,
priv->select_subconnector);
diff --git a/drivers/gpu/drm/i2c/sil164_drv.c b/drivers/gpu/drm/i2c/sil164_drv.c
index b7d45ab..0b67732 100644
--- a/drivers/gpu/drm/i2c/sil164_drv.c
+++ b/drivers/gpu/drm/i2c/sil164_drv.c
@@ -24,8 +24,6 @@
*
*/
-#include <linux/module.h>
-
#include "drmP.h"
#include "drm_crtc_helper.h"
#include "drm_encoder_slave.h"
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index 2c8a60c..8f371e8 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -99,6 +99,7 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
buf_priv = buf->dev_private;
vma->vm_flags |= (VM_IO | VM_DONTCOPY);
+ vma->vm_file = filp;
buf_priv->currently_mapped = I810_BUF_MAPPED;
@@ -221,6 +222,8 @@ static int i810_dma_cleanup(struct drm_device *dev)
pci_free_consistent(dev->pdev, PAGE_SIZE,
dev_priv->hw_status_page,
dev_priv->dma_status_page);
+ /* Need to rewrite hardware status page */
+ I810_WRITE(0x02080, 0x1ffff000);
}
kfree(dev->dev_private);
dev->dev_private = NULL;
@@ -1207,8 +1210,6 @@ int i810_driver_load(struct drm_device *dev, unsigned long flags)
dev->types[8] = _DRM_STAT_SECONDARY;
dev->types[9] = _DRM_STAT_DMA;
- pci_set_master(dev->pdev);
-
return 0;
}
diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c
index ec12f7d..6f98d05 100644
--- a/drivers/gpu/drm/i810/i810_drv.c
+++ b/drivers/gpu/drm/i810/i810_drv.c
@@ -30,8 +30,6 @@
* Gareth Hughes <gareth@valinux.com>
*/
-#include <linux/module.h>
-
#include "drmP.h"
#include "drm.h"
#include "i810_drm.h"
@@ -43,17 +41,6 @@ static struct pci_device_id pciidlist[] = {
i810_PCI_IDS
};
-static const struct file_operations i810_driver_fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = drm_mmap,
- .poll = drm_poll,
- .fasync = drm_fasync,
- .llseek = noop_llseek,
-};
-
static struct drm_driver driver = {
.driver_features =
DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR |
@@ -66,7 +53,17 @@ static struct drm_driver driver = {
.reclaim_buffers_locked = i810_driver_reclaim_buffers_locked,
.dma_quiescent = i810_driver_dma_quiescent,
.ioctls = i810_ioctls,
- .fops = &i810_driver_fops,
+ .fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = drm_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+ .llseek = noop_llseek,
+ },
+
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index ce7fc77..0ae6a7c 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -3,7 +3,7 @@
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
ccflags-y := -Iinclude/drm
-i915-y := i915_drv.o i915_dma.o i915_irq.o \
+i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
i915_debugfs.o \
i915_suspend.o \
i915_gem.o \
@@ -28,7 +28,6 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
intel_dvo.o \
intel_ringbuffer.o \
intel_overlay.o \
- intel_sprite.o \
intel_opregion.o \
dvo_ch7xxx.o \
dvo_ch7017.o \
diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
index 1ca799a..d3e8c54 100644
--- a/drivers/gpu/drm/i915/dvo_ch7017.c
+++ b/drivers/gpu/drm/i915/dvo_ch7017.c
@@ -227,7 +227,7 @@ static bool ch7017_init(struct intel_dvo_device *dvo,
default:
DRM_DEBUG_KMS("ch701x not detected, got %d: from %s "
"slave %d.\n",
- val, adapter->name, dvo->slave_addr);
+ val, adapter->name,dvo->slave_addr);
goto fail;
}
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
index 4a03660..7eaa94e 100644
--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
@@ -111,7 +111,7 @@ static char *ch7xxx_get_id(uint8_t vid)
/** Reads an 8 bit register */
static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
{
- struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
+ struct ch7xxx_priv *ch7xxx= dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
u8 out_buf[2];
u8 in_buf[2];
@@ -303,7 +303,7 @@ static void ch7xxx_dump_regs(struct intel_dvo_device *dvo)
for (i = 0; i < CH7xxx_NUM_REGS; i++) {
uint8_t val;
- if ((i % 8) == 0)
+ if ((i % 8) == 0 )
DRM_LOG_KMS("\n %02X: ", i);
ch7xxx_readb(dvo, i, &val);
DRM_LOG_KMS("%02X ", val);
diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
index 04f2893..a12ed94 100644
--- a/drivers/gpu/drm/i915/dvo_ivch.c
+++ b/drivers/gpu/drm/i915/dvo_ivch.c
@@ -344,8 +344,8 @@ static void ivch_mode_set(struct intel_dvo_device *dvo,
(adjusted_mode->hdisplay - 1)) >> 2;
y_ratio = (((mode->vdisplay - 1) << 16) /
(adjusted_mode->vdisplay - 1)) >> 2;
- ivch_write(dvo, VR42, x_ratio);
- ivch_write(dvo, VR41, y_ratio);
+ ivch_write (dvo, VR42, x_ratio);
+ ivch_write (dvo, VR41, y_ratio);
} else {
vr01 &= ~VR01_PANEL_FIT_ENABLE;
vr40 &= ~VR40_CLOCK_GATING_ENABLE;
@@ -410,7 +410,7 @@ static void ivch_destroy(struct intel_dvo_device *dvo)
}
}
-struct intel_dvo_dev_ops ivch_ops = {
+struct intel_dvo_dev_ops ivch_ops= {
.init = ivch_init,
.dpms = ivch_dpms,
.mode_valid = ivch_mode_valid,
diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
index a0b13a6..e4b4091 100644
--- a/drivers/gpu/drm/i915/dvo_sil164.c
+++ b/drivers/gpu/drm/i915/dvo_sil164.c
@@ -104,7 +104,7 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
{
- struct sil164_priv *sil = dvo->dev_priv;
+ struct sil164_priv *sil= dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
uint8_t out_buf[2];
struct i2c_msg msg = {
diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
index aa2cd3e..8ab2855 100644
--- a/drivers/gpu/drm/i915/dvo_tfp410.c
+++ b/drivers/gpu/drm/i915/dvo_tfp410.c
@@ -56,7 +56,7 @@
#define TFP410_CTL_2_MDI (1<<0)
#define TFP410_CTL_3 0x0A
-#define TFP410_CTL_3_DK_MASK (0x7<<5)
+#define TFP410_CTL_3_DK_MASK (0x7<<5)
#define TFP410_CTL_3_DK (1<<5)
#define TFP410_CTL_3_DKEN (1<<4)
#define TFP410_CTL_3_CTL_MASK (0x7<<1)
@@ -225,12 +225,12 @@ static void tfp410_mode_set(struct intel_dvo_device *dvo,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- /* As long as the basics are set up, since we don't have clock dependencies
- * in the mode setup, we can just leave the registers alone and everything
- * will work fine.
- */
- /* don't do much */
- return;
+ /* As long as the basics are set up, since we don't have clock dependencies
+ * in the mode setup, we can just leave the registers alone and everything
+ * will work fine.
+ */
+ /* don't do much */
+ return;
}
/* set the tfp410 power state */
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 7dd8806..19bab81 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -61,7 +61,6 @@ static int i915_capabilities(struct seq_file *m, void *data)
const struct intel_device_info *info = INTEL_INFO(dev);
seq_printf(m, "gen: %d\n", info->gen);
- seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
#define B(x) seq_printf(m, #x ": %s\n", yesno(info->x))
B(is_mobile);
B(is_i85x);
@@ -82,7 +81,6 @@ static int i915_capabilities(struct seq_file *m, void *data)
B(supports_tv);
B(has_bsd_ring);
B(has_blt_ring);
- B(has_llc);
#undef B
return 0;
@@ -100,12 +98,12 @@ static const char *get_pin_flag(struct drm_i915_gem_object *obj)
static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
{
- switch (obj->tiling_mode) {
- default:
- case I915_TILING_NONE: return " ";
- case I915_TILING_X: return "X";
- case I915_TILING_Y: return "Y";
- }
+ switch (obj->tiling_mode) {
+ default:
+ case I915_TILING_NONE: return " ";
+ case I915_TILING_X: return "X";
+ case I915_TILING_Y: return "Y";
+ }
}
static const char *cache_level_str(int type)
@@ -125,7 +123,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
&obj->base,
get_pin_flag(obj),
get_tiling_flag(obj),
- obj->base.size / 1024,
+ obj->base.size,
obj->base.read_domains,
obj->base.write_domain,
obj->last_rendering_seqno,
@@ -219,7 +217,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
++mappable_count; \
} \
} \
-} while (0)
+} while(0)
static int i915_gem_object_info(struct seq_file *m, void* data)
{
@@ -501,7 +499,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
seq_printf(m, "Interrupts received: %d\n",
atomic_read(&dev_priv->irq_received));
for (i = 0; i < I915_NUM_RINGS; i++) {
- if (IS_GEN6(dev) || IS_GEN7(dev)) {
+ if (IS_GEN6(dev)) {
seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
dev_priv->ring[i].name,
I915_READ_IMR(&dev_priv->ring[i]));
@@ -563,6 +561,45 @@ static int i915_hws_info(struct seq_file *m, void *data)
return 0;
}
+static void i915_dump_object(struct seq_file *m,
+ struct io_mapping *mapping,
+ struct drm_i915_gem_object *obj)
+{
+ int page, page_count, i;
+
+ page_count = obj->base.size / PAGE_SIZE;
+ for (page = 0; page < page_count; page++) {
+ u32 *mem = io_mapping_map_wc(mapping,
+ obj->gtt_offset + page * PAGE_SIZE);
+ for (i = 0; i < PAGE_SIZE; i += 4)
+ seq_printf(m, "%08x : %08x\n", i, mem[i / 4]);
+ io_mapping_unmap(mem);
+ }
+}
+
+static int i915_batchbuffer_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
+ if (obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) {
+ seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset);
+ i915_dump_object(m, dev_priv->mm.gtt_mapping, obj);
+ }
+ }
+
+ mutex_unlock(&dev->struct_mutex);
+ return 0;
+}
+
static int i915_ringbuffer_data(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -598,40 +635,33 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
- int ret;
ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
if (ring->size == 0)
return 0;
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
-
seq_printf(m, "Ring %s:\n", ring->name);
seq_printf(m, " Head : %08x\n", I915_READ_HEAD(ring) & HEAD_ADDR);
seq_printf(m, " Tail : %08x\n", I915_READ_TAIL(ring) & TAIL_ADDR);
seq_printf(m, " Size : %08x\n", ring->size);
seq_printf(m, " Active : %08x\n", intel_ring_get_active_head(ring));
seq_printf(m, " NOPID : %08x\n", I915_READ_NOPID(ring));
- if (IS_GEN6(dev) || IS_GEN7(dev)) {
+ if (IS_GEN6(dev)) {
seq_printf(m, " Sync 0 : %08x\n", I915_READ_SYNC_0(ring));
seq_printf(m, " Sync 1 : %08x\n", I915_READ_SYNC_1(ring));
}
seq_printf(m, " Control : %08x\n", I915_READ_CTL(ring));
seq_printf(m, " Start : %08x\n", I915_READ_START(ring));
- mutex_unlock(&dev->struct_mutex);
-
return 0;
}
static const char *ring_str(int ring)
{
switch (ring) {
- case RCS: return "render";
- case VCS: return "bsd";
- case BCS: return "blt";
+ case RING_RENDER: return " render";
+ case RING_BSD: return " bsd";
+ case RING_BLT: return " blt";
default: return "";
}
}
@@ -674,7 +704,7 @@ static void print_error_buffers(struct seq_file *m,
seq_printf(m, "%s [%d]:\n", name, count);
while (count--) {
- seq_printf(m, " %08x %8u %04x %04x %08x%s%s%s%s%s%s%s",
+ seq_printf(m, " %08x %8u %04x %04x %08x%s%s%s%s%s%s",
err->gtt_offset,
err->size,
err->read_domains,
@@ -684,7 +714,6 @@ static void print_error_buffers(struct seq_file *m,
tiling_flag(err->tiling),
dirty_flag(err->dirty),
purgeable_flag(err->purgeable),
- err->ring != -1 ? " " : "",
ring_str(err->ring),
cache_level_str(err->cache_level));
@@ -698,38 +727,6 @@ static void print_error_buffers(struct seq_file *m,
}
}
-static void i915_ring_error_state(struct seq_file *m,
- struct drm_device *dev,
- struct drm_i915_error_state *error,
- unsigned ring)
-{
- seq_printf(m, "%s command stream:\n", ring_str(ring));
- seq_printf(m, " HEAD: 0x%08x\n", error->head[ring]);
- seq_printf(m, " TAIL: 0x%08x\n", error->tail[ring]);
- seq_printf(m, " ACTHD: 0x%08x\n", error->acthd[ring]);
- seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]);
- seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]);
- seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]);
- if (ring == RCS && INTEL_INFO(dev)->gen >= 4) {
- seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1);
- seq_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr);
- }
- if (INTEL_INFO(dev)->gen >= 4)
- seq_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]);
- seq_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]);
- if (INTEL_INFO(dev)->gen >= 6) {
- seq_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]);
- seq_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
- seq_printf(m, " SYNC_0: 0x%08x\n",
- error->semaphore_mboxes[ring][0]);
- seq_printf(m, " SYNC_1: 0x%08x\n",
- error->semaphore_mboxes[ring][1]);
- }
- seq_printf(m, " seqno: 0x%08x\n", error->seqno[ring]);
- seq_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]);
- seq_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]);
-}
-
static int i915_error_state(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -737,7 +734,7 @@ static int i915_error_state(struct seq_file *m, void *unused)
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_error_state *error;
unsigned long flags;
- int i, j, page, offset, elt;
+ int i, page, offset, elt;
spin_lock_irqsave(&dev_priv->error_lock, flags);
if (!dev_priv->first_error) {
@@ -752,20 +749,35 @@ static int i915_error_state(struct seq_file *m, void *unused)
seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
seq_printf(m, "EIR: 0x%08x\n", error->eir);
seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
-
- for (i = 0; i < dev_priv->num_fence_regs; i++)
- seq_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
-
if (INTEL_INFO(dev)->gen >= 6) {
seq_printf(m, "ERROR: 0x%08x\n", error->error);
- seq_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
+ seq_printf(m, "Blitter command stream:\n");
+ seq_printf(m, " ACTHD: 0x%08x\n", error->bcs_acthd);
+ seq_printf(m, " IPEIR: 0x%08x\n", error->bcs_ipeir);
+ seq_printf(m, " IPEHR: 0x%08x\n", error->bcs_ipehr);
+ seq_printf(m, " INSTDONE: 0x%08x\n", error->bcs_instdone);
+ seq_printf(m, " seqno: 0x%08x\n", error->bcs_seqno);
+ seq_printf(m, "Video (BSD) command stream:\n");
+ seq_printf(m, " ACTHD: 0x%08x\n", error->vcs_acthd);
+ seq_printf(m, " IPEIR: 0x%08x\n", error->vcs_ipeir);
+ seq_printf(m, " IPEHR: 0x%08x\n", error->vcs_ipehr);
+ seq_printf(m, " INSTDONE: 0x%08x\n", error->vcs_instdone);
+ seq_printf(m, " seqno: 0x%08x\n", error->vcs_seqno);
}
+ seq_printf(m, "Render command stream:\n");
+ seq_printf(m, " ACTHD: 0x%08x\n", error->acthd);
+ seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir);
+ seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr);
+ seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone);
+ if (INTEL_INFO(dev)->gen >= 4) {
+ seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1);
+ seq_printf(m, " INSTPS: 0x%08x\n", error->instps);
+ }
+ seq_printf(m, " INSTPM: 0x%08x\n", error->instpm);
+ seq_printf(m, " seqno: 0x%08x\n", error->seqno);
- i915_ring_error_state(m, dev, error, RCS);
- if (HAS_BLT(dev))
- i915_ring_error_state(m, dev, error, BCS);
- if (HAS_BSD(dev))
- i915_ring_error_state(m, dev, error, VCS);
+ for (i = 0; i < dev_priv->num_fence_regs; i++)
+ seq_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
if (error->active_bo)
print_error_buffers(m, "Active",
@@ -777,10 +789,10 @@ static int i915_error_state(struct seq_file *m, void *unused)
error->pinned_bo,
error->pinned_bo_count);
- for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
- struct drm_i915_error_object *obj;
+ for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++) {
+ if (error->batchbuffer[i]) {
+ struct drm_i915_error_object *obj = error->batchbuffer[i];
- if ((obj = error->ring[i].batchbuffer)) {
seq_printf(m, "%s --- gtt_offset = 0x%08x\n",
dev_priv->ring[i].name,
obj->gtt_offset);
@@ -792,20 +804,11 @@ static int i915_error_state(struct seq_file *m, void *unused)
}
}
}
+ }
- if (error->ring[i].num_requests) {
- seq_printf(m, "%s --- %d requests\n",
- dev_priv->ring[i].name,
- error->ring[i].num_requests);
- for (j = 0; j < error->ring[i].num_requests; j++) {
- seq_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n",
- error->ring[i].requests[j].seqno,
- error->ring[i].requests[j].jiffies,
- error->ring[i].requests[j].tail);
- }
- }
-
- if ((obj = error->ring[i].ringbuffer)) {
+ for (i = 0; i < ARRAY_SIZE(error->ringbuffer); i++) {
+ if (error->ringbuffer[i]) {
+ struct drm_i915_error_object *obj = error->ringbuffer[i];
seq_printf(m, "%s --- ringbuffer = 0x%08x\n",
dev_priv->ring[i].name,
obj->gtt_offset);
@@ -838,16 +841,7 @@ static int i915_rstdby_delays(struct seq_file *m, void *unused)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- u16 crstanddelay;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
-
- crstanddelay = I915_READ16(CRSTANDVID);
-
- mutex_unlock(&dev->struct_mutex);
+ u16 crstanddelay = I915_READ16(CRSTANDVID);
seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f));
@@ -945,11 +939,7 @@ static int i915_delayfreq_table(struct seq_file *m, void *unused)
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
u32 delayfreq;
- int ret, i;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
+ int i;
for (i = 0; i < 16; i++) {
delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
@@ -957,8 +947,6 @@ static int i915_delayfreq_table(struct seq_file *m, void *unused)
(delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT);
}
- mutex_unlock(&dev->struct_mutex);
-
return 0;
}
@@ -973,40 +961,24 @@ static int i915_inttoext_table(struct seq_file *m, void *unused)
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
u32 inttoext;
- int ret, i;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
+ int i;
for (i = 1; i <= 32; i++) {
inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4);
seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext);
}
- mutex_unlock(&dev->struct_mutex);
-
return 0;
}
-static int ironlake_drpc_info(struct seq_file *m)
+static int i915_drpc_info(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- u32 rgvmodectl, rstdbyctl;
- u16 crstandvid;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
-
- rgvmodectl = I915_READ(MEMMODECTL);
- rstdbyctl = I915_READ(RSTDBYCTL);
- crstandvid = I915_READ16(CRSTANDVID);
-
- mutex_unlock(&dev->struct_mutex);
+ u32 rgvmodectl = I915_READ(MEMMODECTL);
+ u32 rstdbyctl = I915_READ(RSTDBYCTL);
+ u16 crstandvid = I915_READ16(CRSTANDVID);
seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
"yes" : "no");
@@ -1056,95 +1028,6 @@ static int ironlake_drpc_info(struct seq_file *m)
return 0;
}
-static int gen6_drpc_info(struct seq_file *m)
-{
-
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 rpmodectl1, gt_core_status, rcctl1;
- unsigned forcewake_count;
- int count=0, ret;
-
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
-
- spin_lock_irq(&dev_priv->gt_lock);
- forcewake_count = dev_priv->forcewake_count;
- spin_unlock_irq(&dev_priv->gt_lock);
-
- if (forcewake_count) {
- seq_printf(m, "RC information inaccurate because somebody "
- "holds a forcewake reference \n");
- } else {
- /* NB: we cannot use forcewake, else we read the wrong values */
- while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
- udelay(10);
- seq_printf(m, "RC information accurate: %s\n", yesno(count < 51));
- }
-
- gt_core_status = readl(dev_priv->regs + GEN6_GT_CORE_STATUS);
- trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4);
-
- rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
- rcctl1 = I915_READ(GEN6_RC_CONTROL);
- mutex_unlock(&dev->struct_mutex);
-
- seq_printf(m, "Video Turbo Mode: %s\n",
- yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
- seq_printf(m, "HW control enabled: %s\n",
- yesno(rpmodectl1 & GEN6_RP_ENABLE));
- seq_printf(m, "SW control enabled: %s\n",
- yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
- GEN6_RP_MEDIA_SW_MODE));
- seq_printf(m, "RC1e Enabled: %s\n",
- yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
- seq_printf(m, "RC6 Enabled: %s\n",
- yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
- seq_printf(m, "Deep RC6 Enabled: %s\n",
- yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
- seq_printf(m, "Deepest RC6 Enabled: %s\n",
- yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
- seq_printf(m, "Current RC state: ");
- switch (gt_core_status & GEN6_RCn_MASK) {
- case GEN6_RC0:
- if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
- seq_printf(m, "Core Power Down\n");
- else
- seq_printf(m, "on\n");
- break;
- case GEN6_RC3:
- seq_printf(m, "RC3\n");
- break;
- case GEN6_RC6:
- seq_printf(m, "RC6\n");
- break;
- case GEN6_RC7:
- seq_printf(m, "RC7\n");
- break;
- default:
- seq_printf(m, "Unknown\n");
- break;
- }
-
- seq_printf(m, "Core Power Down: %s\n",
- yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
- return 0;
-}
-
-static int i915_drpc_info(struct seq_file *m, void *unused)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
-
- if (IS_GEN6(dev) || IS_GEN7(dev))
- return gen6_drpc_info(m);
- else
- return ironlake_drpc_info(m);
-}
-
static int i915_fbc_status(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -1240,59 +1123,14 @@ static int i915_emon_status(struct seq_file *m, void *unused)
return 0;
}
-static int i915_ring_freq_table(struct seq_file *m, void *unused)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- int ret;
- int gpu_freq, ia_freq;
-
- if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
- seq_printf(m, "unsupported on this chipset\n");
- return 0;
- }
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
-
- seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n");
-
- for (gpu_freq = dev_priv->min_delay; gpu_freq <= dev_priv->max_delay;
- gpu_freq++) {
- I915_WRITE(GEN6_PCODE_DATA, gpu_freq);
- I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
- GEN6_PCODE_READ_MIN_FREQ_TABLE);
- if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
- GEN6_PCODE_READY) == 0, 10)) {
- DRM_ERROR("pcode read of freq table timed out\n");
- continue;
- }
- ia_freq = I915_READ(GEN6_PCODE_DATA);
- seq_printf(m, "%d\t\t%d\n", gpu_freq * 50, ia_freq * 100);
- }
-
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
static int i915_gfxec(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
- mutex_unlock(&dev->struct_mutex);
-
return 0;
}
@@ -1391,119 +1229,16 @@ static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned forcewake_count;
- spin_lock_irq(&dev_priv->gt_lock);
- forcewake_count = dev_priv->forcewake_count;
- spin_unlock_irq(&dev_priv->gt_lock);
-
- seq_printf(m, "forcewake count = %u\n", forcewake_count);
-
- return 0;
-}
-
-static const char *swizzle_string(unsigned swizzle)
-{
- switch(swizzle) {
- case I915_BIT_6_SWIZZLE_NONE:
- return "none";
- case I915_BIT_6_SWIZZLE_9:
- return "bit9";
- case I915_BIT_6_SWIZZLE_9_10:
- return "bit9/bit10";
- case I915_BIT_6_SWIZZLE_9_11:
- return "bit9/bit11";
- case I915_BIT_6_SWIZZLE_9_10_11:
- return "bit9/bit10/bit11";
- case I915_BIT_6_SWIZZLE_9_17:
- return "bit9/bit17";
- case I915_BIT_6_SWIZZLE_9_10_17:
- return "bit9/bit10/bit17";
- case I915_BIT_6_SWIZZLE_UNKNOWN:
- return "unkown";
- }
-
- return "bug";
-}
-
-static int i915_swizzle_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- mutex_lock(&dev->struct_mutex);
- seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
- swizzle_string(dev_priv->mm.bit_6_swizzle_x));
- seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
- swizzle_string(dev_priv->mm.bit_6_swizzle_y));
-
- if (IS_GEN3(dev) || IS_GEN4(dev)) {
- seq_printf(m, "DDC = 0x%08x\n",
- I915_READ(DCC));
- seq_printf(m, "C0DRB3 = 0x%04x\n",
- I915_READ16(C0DRB3));
- seq_printf(m, "C1DRB3 = 0x%04x\n",
- I915_READ16(C1DRB3));
- } else if (IS_GEN6(dev) || IS_GEN7(dev)) {
- seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
- I915_READ(MAD_DIMM_C0));
- seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
- I915_READ(MAD_DIMM_C1));
- seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
- I915_READ(MAD_DIMM_C2));
- seq_printf(m, "TILECTL = 0x%08x\n",
- I915_READ(TILECTL));
- seq_printf(m, "ARB_MODE = 0x%08x\n",
- I915_READ(ARB_MODE));
- seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
- I915_READ(DISP_ARB_CTL));
- }
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
-static int i915_ppgtt_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
- int i, ret;
-
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
- if (INTEL_INFO(dev)->gen == 6)
- seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
-
- for (i = 0; i < I915_NUM_RINGS; i++) {
- ring = &dev_priv->ring[i];
-
- seq_printf(m, "%s\n", ring->name);
- if (INTEL_INFO(dev)->gen == 7)
- seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring)));
- seq_printf(m, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring)));
- seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring)));
- seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring)));
- }
- if (dev_priv->mm.aliasing_ppgtt) {
- struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
-
- seq_printf(m, "aliasing PPGTT:\n");
- seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset);
- }
- seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
- mutex_unlock(&dev->struct_mutex);
+ seq_printf(m, "forcewake count = %d\n",
+ atomic_read(&dev_priv->forcewake_count));
return 0;
}
static int
-i915_debugfs_common_open(struct inode *inode,
- struct file *filp)
+i915_wedged_open(struct inode *inode,
+ struct file *filp)
{
filp->private_data = inode->i_private;
return 0;
@@ -1520,12 +1255,12 @@ i915_wedged_read(struct file *filp,
char buf[80];
int len;
- len = snprintf(buf, sizeof(buf),
+ len = snprintf(buf, sizeof (buf),
"wedged : %d\n",
atomic_read(&dev_priv->mm.wedged));
- if (len > sizeof(buf))
- len = sizeof(buf);
+ if (len > sizeof (buf))
+ len = sizeof (buf);
return simple_read_from_buffer(ubuf, max, ppos, buf, len);
}
@@ -1541,7 +1276,7 @@ i915_wedged_write(struct file *filp,
int val = 1;
if (cnt > 0) {
- if (cnt > sizeof(buf) - 1)
+ if (cnt > sizeof (buf) - 1)
return -EINVAL;
if (copy_from_user(buf, ubuf, cnt))
@@ -1559,145 +1294,12 @@ i915_wedged_write(struct file *filp,
static const struct file_operations i915_wedged_fops = {
.owner = THIS_MODULE,
- .open = i915_debugfs_common_open,
+ .open = i915_wedged_open,
.read = i915_wedged_read,
.write = i915_wedged_write,
.llseek = default_llseek,
};
-static ssize_t
-i915_max_freq_read(struct file *filp,
- char __user *ubuf,
- size_t max,
- loff_t *ppos)
-{
- struct drm_device *dev = filp->private_data;
- drm_i915_private_t *dev_priv = dev->dev_private;
- char buf[80];
- int len;
-
- len = snprintf(buf, sizeof(buf),
- "max freq: %d\n", dev_priv->max_delay * 50);
-
- if (len > sizeof(buf))
- len = sizeof(buf);
-
- return simple_read_from_buffer(ubuf, max, ppos, buf, len);
-}
-
-static ssize_t
-i915_max_freq_write(struct file *filp,
- const char __user *ubuf,
- size_t cnt,
- loff_t *ppos)
-{
- struct drm_device *dev = filp->private_data;
- struct drm_i915_private *dev_priv = dev->dev_private;
- char buf[20];
- int val = 1;
-
- if (cnt > 0) {
- if (cnt > sizeof(buf) - 1)
- return -EINVAL;
-
- if (copy_from_user(buf, ubuf, cnt))
- return -EFAULT;
- buf[cnt] = 0;
-
- val = simple_strtoul(buf, NULL, 0);
- }
-
- DRM_DEBUG_DRIVER("Manually setting max freq to %d\n", val);
-
- /*
- * Turbo will still be enabled, but won't go above the set value.
- */
- dev_priv->max_delay = val / 50;
-
- gen6_set_rps(dev, val / 50);
-
- return cnt;
-}
-
-static const struct file_operations i915_max_freq_fops = {
- .owner = THIS_MODULE,
- .open = i915_debugfs_common_open,
- .read = i915_max_freq_read,
- .write = i915_max_freq_write,
- .llseek = default_llseek,
-};
-
-static ssize_t
-i915_cache_sharing_read(struct file *filp,
- char __user *ubuf,
- size_t max,
- loff_t *ppos)
-{
- struct drm_device *dev = filp->private_data;
- drm_i915_private_t *dev_priv = dev->dev_private;
- char buf[80];
- u32 snpcr;
- int len;
-
- mutex_lock(&dev_priv->dev->struct_mutex);
- snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
- mutex_unlock(&dev_priv->dev->struct_mutex);
-
- len = snprintf(buf, sizeof(buf),
- "%d\n", (snpcr & GEN6_MBC_SNPCR_MASK) >>
- GEN6_MBC_SNPCR_SHIFT);
-
- if (len > sizeof(buf))
- len = sizeof(buf);
-
- return simple_read_from_buffer(ubuf, max, ppos, buf, len);
-}
-
-static ssize_t
-i915_cache_sharing_write(struct file *filp,
- const char __user *ubuf,
- size_t cnt,
- loff_t *ppos)
-{
- struct drm_device *dev = filp->private_data;
- struct drm_i915_private *dev_priv = dev->dev_private;
- char buf[20];
- u32 snpcr;
- int val = 1;
-
- if (cnt > 0) {
- if (cnt > sizeof(buf) - 1)
- return -EINVAL;
-
- if (copy_from_user(buf, ubuf, cnt))
- return -EFAULT;
- buf[cnt] = 0;
-
- val = simple_strtoul(buf, NULL, 0);
- }
-
- if (val < 0 || val > 3)
- return -EINVAL;
-
- DRM_DEBUG_DRIVER("Manually setting uncore sharing to %d\n", val);
-
- /* Update the cache sharing policy here as well */
- snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
- snpcr &= ~GEN6_MBC_SNPCR_MASK;
- snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
- I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
-
- return cnt;
-}
-
-static const struct file_operations i915_cache_sharing_fops = {
- .owner = THIS_MODULE,
- .open = i915_debugfs_common_open,
- .read = i915_cache_sharing_read,
- .write = i915_cache_sharing_write,
- .llseek = default_llseek,
-};
-
/* As the drm_debugfs_init() routines are called before dev->dev_private is
* allocated we need to hook into the minor for release. */
static int
@@ -1716,21 +1318,33 @@ drm_add_fake_info_node(struct drm_minor *minor,
node->minor = minor;
node->dent = ent;
node->info_ent = (void *) key;
-
- mutex_lock(&minor->debugfs_lock);
- list_add(&node->list, &minor->debugfs_list);
- mutex_unlock(&minor->debugfs_lock);
+ list_add(&node->list, &minor->debugfs_nodes.list);
return 0;
}
+static int i915_wedged_create(struct dentry *root, struct drm_minor *minor)
+{
+ struct drm_device *dev = minor->dev;
+ struct dentry *ent;
+
+ ent = debugfs_create_file("i915_wedged",
+ S_IRUGO | S_IWUSR,
+ root, dev,
+ &i915_wedged_fops);
+ if (IS_ERR(ent))
+ return PTR_ERR(ent);
+
+ return drm_add_fake_info_node(minor, ent, &i915_wedged_fops);
+}
+
static int i915_forcewake_open(struct inode *inode, struct file *file)
{
struct drm_device *dev = inode->i_private;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
- if (INTEL_INFO(dev)->gen < 6)
+ if (!IS_GEN6(dev))
return 0;
ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -1747,7 +1361,7 @@ int i915_forcewake_release(struct inode *inode, struct file *file)
struct drm_device *dev = inode->i_private;
struct drm_i915_private *dev_priv = dev->dev_private;
- if (INTEL_INFO(dev)->gen < 6)
+ if (!IS_GEN6(dev))
return 0;
/*
@@ -1785,24 +1399,6 @@ static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor)
return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops);
}
-static int i915_debugfs_create(struct dentry *root,
- struct drm_minor *minor,
- const char *name,
- const struct file_operations *fops)
-{
- struct drm_device *dev = minor->dev;
- struct dentry *ent;
-
- ent = debugfs_create_file(name,
- S_IRUGO | S_IWUSR,
- root, dev,
- fops);
- if (IS_ERR(ent))
- return PTR_ERR(ent);
-
- return drm_add_fake_info_node(minor, ent, fops);
-}
-
static struct drm_info_list i915_debugfs_list[] = {
{"i915_capabilities", i915_capabilities, 0},
{"i915_gem_objects", i915_gem_object_info, 0},
@@ -1826,6 +1422,7 @@ static struct drm_info_list i915_debugfs_list[] = {
{"i915_bsd_ringbuffer_info", i915_ringbuffer_info, 0, (void *)VCS},
{"i915_blt_ringbuffer_data", i915_ringbuffer_data, 0, (void *)BCS},
{"i915_blt_ringbuffer_info", i915_ringbuffer_info, 0, (void *)BCS},
+ {"i915_batchbuffers", i915_batchbuffer_info, 0},
{"i915_error_state", i915_error_state, 0},
{"i915_rstdby_delays", i915_rstdby_delays, 0},
{"i915_cur_delayinfo", i915_cur_delayinfo, 0},
@@ -1833,7 +1430,6 @@ static struct drm_info_list i915_debugfs_list[] = {
{"i915_inttoext_table", i915_inttoext_table, 0},
{"i915_drpc_info", i915_drpc_info, 0},
{"i915_emon_status", i915_emon_status, 0},
- {"i915_ring_freq_table", i915_ring_freq_table, 0},
{"i915_gfxec", i915_gfxec, 0},
{"i915_fbc_status", i915_fbc_status, 0},
{"i915_sr_status", i915_sr_status, 0},
@@ -1841,8 +1437,6 @@ static struct drm_info_list i915_debugfs_list[] = {
{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
{"i915_context_status", i915_context_status, 0},
{"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0},
- {"i915_swizzle_info", i915_swizzle_info, 0},
- {"i915_ppgtt_info", i915_ppgtt_info, 0},
};
#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
@@ -1850,9 +1444,7 @@ int i915_debugfs_init(struct drm_minor *minor)
{
int ret;
- ret = i915_debugfs_create(minor->debugfs_root, minor,
- "i915_wedged",
- &i915_wedged_fops);
+ ret = i915_wedged_create(minor->debugfs_root, minor);
if (ret)
return ret;
@@ -1860,18 +1452,6 @@ int i915_debugfs_init(struct drm_minor *minor)
if (ret)
return ret;
- ret = i915_debugfs_create(minor->debugfs_root, minor,
- "i915_max_freq",
- &i915_max_freq_fops);
- if (ret)
- return ret;
-
- ret = i915_debugfs_create(minor->debugfs_root, minor,
- "i915_cache_sharing",
- &i915_cache_sharing_fops);
- if (ret)
- return ret;
-
return drm_debugfs_create_files(i915_debugfs_list,
I915_DEBUGFS_ENTRIES,
minor->debugfs_root, minor);
@@ -1885,10 +1465,6 @@ void i915_debugfs_cleanup(struct drm_minor *minor)
1, minor);
drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops,
1, minor);
- drm_debugfs_remove_files((struct drm_info_list *) &i915_max_freq_fops,
- 1, minor);
- drm_debugfs_remove_files((struct drm_info_list *) &i915_cache_sharing_fops,
- 1, minor);
}
#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 9341eb8..ef16443 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -41,7 +41,6 @@
#include <linux/pnp.h>
#include <linux/vga_switcheroo.h>
#include <linux/slab.h>
-#include <linux/module.h>
#include <acpi/video.h>
static void i915_write_hws_pga(struct drm_device *dev)
@@ -781,12 +780,6 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_RELAXED_DELTA:
value = 1;
break;
- case I915_PARAM_HAS_GEN7_SOL_RESET:
- value = 1;
- break;
- case I915_PARAM_HAS_LLC:
- value = HAS_LLC(dev);
- break;
default:
DRM_DEBUG_DRIVER("Unknown parameter %d\n",
param->param);
@@ -891,7 +884,7 @@ static int i915_get_bridge_dev(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
+ dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0));
if (!dev_priv->bridge_dev) {
DRM_ERROR("bridge device not found\n");
return -1;
@@ -1078,9 +1071,6 @@ static void i915_setup_compression(struct drm_device *dev, int size)
unsigned long cfb_base;
unsigned long ll_base = 0;
- /* Just in case the BIOS is doing something questionable. */
- intel_disable_fbc(dev);
-
compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0);
if (compressed_fb)
compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
@@ -1107,6 +1097,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
dev_priv->cfb_size = size;
+ intel_disable_fbc(dev);
dev_priv->compressed_fb = compressed_fb;
if (HAS_PCH_SPLIT(dev))
I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
@@ -1196,39 +1187,22 @@ static int i915_load_gem_init(struct drm_device *dev)
/* Basic memrange allocator for stolen space */
drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size);
- mutex_lock(&dev->struct_mutex);
- if (i915_enable_ppgtt && HAS_ALIASING_PPGTT(dev)) {
- /* PPGTT pdes are stolen from global gtt ptes, so shrink the
- * aperture accordingly when using aliasing ppgtt. */
- gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
- /* For paranoia keep the guard page in between. */
- gtt_size -= PAGE_SIZE;
-
- i915_gem_do_init(dev, 0, mappable_size, gtt_size);
-
- ret = i915_gem_init_aliasing_ppgtt(dev);
- if (ret)
- return ret;
- } else {
- /* Let GEM Manage all of the aperture.
- *
- * However, leave one page at the end still bound to the scratch
- * page. There are a number of places where the hardware
- * apparently prefetches past the end of the object, and we've
- * seen multiple hangs with the GPU head pointer stuck in a
- * batchbuffer bound at the last page of the aperture. One page
- * should be enough to keep any prefetching inside of the
- * aperture.
- */
- i915_gem_do_init(dev, 0, mappable_size, gtt_size - PAGE_SIZE);
- }
+ /* Let GEM Manage all of the aperture.
+ *
+ * However, leave one page at the end still bound to the scratch page.
+ * There are a number of places where the hardware apparently
+ * prefetches past the end of the object, and we've seen multiple
+ * hangs with the GPU head pointer stuck in a batchbuffer bound
+ * at the last page of the aperture. One page should be enough to
+ * keep any prefetching inside of the aperture.
+ */
+ i915_gem_do_init(dev, 0, mappable_size, gtt_size - PAGE_SIZE);
- ret = i915_gem_init_hw(dev);
+ mutex_lock(&dev->struct_mutex);
+ ret = i915_gem_init_ringbuffer(dev);
mutex_unlock(&dev->struct_mutex);
- if (ret) {
- i915_gem_cleanup_aliasing_ppgtt(dev);
+ if (ret)
return ret;
- }
/* Try to set up FBC with a reasonable compressed buffer size */
if (I915_HAS_FBC(dev) && i915_powersave) {
@@ -1315,7 +1289,6 @@ cleanup_gem:
mutex_lock(&dev->struct_mutex);
i915_gem_cleanup_ringbuffer(dev);
mutex_unlock(&dev->struct_mutex);
- i915_gem_cleanup_aliasing_ppgtt(dev);
cleanup_vga_switcheroo:
vga_switcheroo_unregister_client(dev->pdev);
cleanup_vga_client:
@@ -1765,10 +1738,10 @@ static DEFINE_SPINLOCK(mchdev_lock);
*/
unsigned long i915_read_mch_val(void)
{
- struct drm_i915_private *dev_priv;
+ struct drm_i915_private *dev_priv;
unsigned long chipset_val, graphics_val, ret = 0;
- spin_lock(&mchdev_lock);
+ spin_lock(&mchdev_lock);
if (!i915_mch_dev)
goto out_unlock;
dev_priv = i915_mch_dev;
@@ -1779,9 +1752,9 @@ unsigned long i915_read_mch_val(void)
ret = chipset_val + graphics_val;
out_unlock:
- spin_unlock(&mchdev_lock);
+ spin_unlock(&mchdev_lock);
- return ret;
+ return ret;
}
EXPORT_SYMBOL_GPL(i915_read_mch_val);
@@ -1792,10 +1765,10 @@ EXPORT_SYMBOL_GPL(i915_read_mch_val);
*/
bool i915_gpu_raise(void)
{
- struct drm_i915_private *dev_priv;
+ struct drm_i915_private *dev_priv;
bool ret = true;
- spin_lock(&mchdev_lock);
+ spin_lock(&mchdev_lock);
if (!i915_mch_dev) {
ret = false;
goto out_unlock;
@@ -1806,9 +1779,9 @@ bool i915_gpu_raise(void)
dev_priv->max_delay--;
out_unlock:
- spin_unlock(&mchdev_lock);
+ spin_unlock(&mchdev_lock);
- return ret;
+ return ret;
}
EXPORT_SYMBOL_GPL(i915_gpu_raise);
@@ -1820,10 +1793,10 @@ EXPORT_SYMBOL_GPL(i915_gpu_raise);
*/
bool i915_gpu_lower(void)
{
- struct drm_i915_private *dev_priv;
+ struct drm_i915_private *dev_priv;
bool ret = true;
- spin_lock(&mchdev_lock);
+ spin_lock(&mchdev_lock);
if (!i915_mch_dev) {
ret = false;
goto out_unlock;
@@ -1834,9 +1807,9 @@ bool i915_gpu_lower(void)
dev_priv->max_delay++;
out_unlock:
- spin_unlock(&mchdev_lock);
+ spin_unlock(&mchdev_lock);
- return ret;
+ return ret;
}
EXPORT_SYMBOL_GPL(i915_gpu_lower);
@@ -1847,10 +1820,10 @@ EXPORT_SYMBOL_GPL(i915_gpu_lower);
*/
bool i915_gpu_busy(void)
{
- struct drm_i915_private *dev_priv;
+ struct drm_i915_private *dev_priv;
bool ret = false;
- spin_lock(&mchdev_lock);
+ spin_lock(&mchdev_lock);
if (!i915_mch_dev)
goto out_unlock;
dev_priv = i915_mch_dev;
@@ -1858,9 +1831,9 @@ bool i915_gpu_busy(void)
ret = dev_priv->busy;
out_unlock:
- spin_unlock(&mchdev_lock);
+ spin_unlock(&mchdev_lock);
- return ret;
+ return ret;
}
EXPORT_SYMBOL_GPL(i915_gpu_busy);
@@ -1872,10 +1845,10 @@ EXPORT_SYMBOL_GPL(i915_gpu_busy);
*/
bool i915_gpu_turbo_disable(void)
{
- struct drm_i915_private *dev_priv;
+ struct drm_i915_private *dev_priv;
bool ret = true;
- spin_lock(&mchdev_lock);
+ spin_lock(&mchdev_lock);
if (!i915_mch_dev) {
ret = false;
goto out_unlock;
@@ -1888,9 +1861,9 @@ bool i915_gpu_turbo_disable(void)
ret = false;
out_unlock:
- spin_unlock(&mchdev_lock);
+ spin_unlock(&mchdev_lock);
- return ret;
+ return ret;
}
EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
@@ -1951,8 +1924,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto free_priv;
}
- pci_set_master(dev->pdev);
-
/* overlay on gen2 is broken and can't address above 1G */
if (IS_GEN2(dev))
dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
@@ -1985,7 +1956,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
- dev_priv->mm.gtt_mapping =
+ dev_priv->mm.gtt_mapping =
io_mapping_create_wc(dev->agp->base, agp_size);
if (dev_priv->mm.gtt_mapping == NULL) {
ret = -EIO;
@@ -2068,14 +2039,11 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (!IS_I945G(dev) && !IS_I945GM(dev))
pci_enable_msi(dev->pdev);
- spin_lock_init(&dev_priv->gt_lock);
spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->error_lock);
spin_lock_init(&dev_priv->rps_lock);
- if (IS_IVYBRIDGE(dev))
- dev_priv->num_pipe = 3;
- else if (IS_MOBILE(dev) || !IS_GEN2(dev))
+ if (IS_MOBILE(dev) || !IS_GEN2(dev))
dev_priv->num_pipe = 2;
else
dev_priv->num_pipe = 1;
@@ -2152,7 +2120,7 @@ int i915_driver_unload(struct drm_device *dev)
unregister_shrinker(&dev_priv->mm.inactive_shrinker);
mutex_lock(&dev->struct_mutex);
- ret = i915_gpu_idle(dev, true);
+ ret = i915_gpu_idle(dev);
if (ret)
DRM_ERROR("failed to idle hardware: %d\n", ret);
mutex_unlock(&dev->struct_mutex);
@@ -2205,7 +2173,6 @@ int i915_driver_unload(struct drm_device *dev)
i915_gem_free_all_phys_object(dev);
i915_gem_cleanup_ringbuffer(dev);
mutex_unlock(&dev->struct_mutex);
- i915_gem_cleanup_aliasing_ppgtt(dev);
if (I915_HAS_FBC(dev) && i915_powersave)
i915_cleanup_compression(dev);
drm_mm_takedown(&dev_priv->mm.stolen);
@@ -2271,12 +2238,18 @@ void i915_driver_lastclose(struct drm_device * dev)
i915_gem_lastclose(dev);
+ if (dev_priv->agp_heap)
+ i915_mem_takedown(&(dev_priv->agp_heap));
+
i915_dma_cleanup(dev);
}
void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
{
+ drm_i915_private_t *dev_priv = dev->dev_private;
i915_gem_release(dev, file_priv);
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ i915_mem_release(dev, file_priv, dev_priv->agp_heap);
}
void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
@@ -2295,11 +2268,11 @@ struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_ALLOC, i915_mem_alloc, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_FREE, i915_mem_free, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
@@ -2327,8 +2300,6 @@ struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
};
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 2b8c1f2..111686a 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -35,84 +35,46 @@
#include "intel_drv.h"
#include <linux/console.h>
-#include <linux/module.h>
#include "drm_crtc_helper.h"
-static int i915_modeset __read_mostly = -1;
+static int i915_modeset = -1;
module_param_named(modeset, i915_modeset, int, 0400);
-MODULE_PARM_DESC(modeset,
- "Use kernel modesetting [KMS] (0=DRM_I915_KMS from .config, "
- "1=on, -1=force vga console preference [default])");
-unsigned int i915_fbpercrtc __always_unused = 0;
+unsigned int i915_fbpercrtc = 0;
module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
-int i915_panel_ignore_lid __read_mostly = 0;
+int i915_panel_ignore_lid = 0;
module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600);
-MODULE_PARM_DESC(panel_ignore_lid,
- "Override lid status (0=autodetect [default], 1=lid open, "
- "-1=lid closed)");
-unsigned int i915_powersave __read_mostly = 1;
+unsigned int i915_powersave = 1;
module_param_named(powersave, i915_powersave, int, 0600);
-MODULE_PARM_DESC(powersave,
- "Enable powersavings, fbc, downclocking, etc. (default: true)");
-int i915_semaphores __read_mostly = -1;
+unsigned int i915_semaphores = 0;
module_param_named(semaphores, i915_semaphores, int, 0600);
-MODULE_PARM_DESC(semaphores,
- "Use semaphores for inter-ring sync (default: -1 (use per-chip defaults))");
-int i915_enable_rc6 __read_mostly = -1;
+unsigned int i915_enable_rc6 = 0;
module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600);
-MODULE_PARM_DESC(i915_enable_rc6,
- "Enable power-saving render C-state 6 (default: -1 (use per-chip default)");
-int i915_enable_fbc __read_mostly = -1;
+unsigned int i915_enable_fbc = 0;
module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600);
-MODULE_PARM_DESC(i915_enable_fbc,
- "Enable frame buffer compression for power savings "
- "(default: -1 (use per-chip default))");
-unsigned int i915_lvds_downclock __read_mostly = 0;
+unsigned int i915_lvds_downclock = 0;
module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
-MODULE_PARM_DESC(lvds_downclock,
- "Use panel (LVDS/eDP) downclocking for power savings "
- "(default: false)");
-int i915_panel_use_ssc __read_mostly = -1;
+unsigned int i915_panel_use_ssc = 1;
module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600);
-MODULE_PARM_DESC(lvds_use_ssc,
- "Use Spread Spectrum Clock with panels [LVDS/eDP] "
- "(default: auto from VBT)");
-int i915_vbt_sdvo_panel_type __read_mostly = -1;
+int i915_vbt_sdvo_panel_type = -1;
module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600);
-MODULE_PARM_DESC(vbt_sdvo_panel_type,
- "Override selection of SDVO panel mode in the VBT "
- "(default: auto)");
-static bool i915_try_reset __read_mostly = true;
+static bool i915_try_reset = true;
module_param_named(reset, i915_try_reset, bool, 0600);
-MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)");
-
-bool i915_enable_hangcheck __read_mostly = true;
-module_param_named(enable_hangcheck, i915_enable_hangcheck, bool, 0644);
-MODULE_PARM_DESC(enable_hangcheck,
- "Periodically check GPU activity for detecting hangs. "
- "WARNING: Disabling this can cause system wide hangs. "
- "(default: true)");
-
-bool i915_enable_ppgtt __read_mostly = 1;
-module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, bool, 0600);
-MODULE_PARM_DESC(i915_enable_ppgtt,
- "Enable PPGTT (default: true)");
static struct drm_driver driver;
extern int intel_agp_enabled;
#define INTEL_VGA_DEVICE(id, info) { \
- .class = PCI_BASE_CLASS_DISPLAY << 16, \
+ .class = PCI_CLASS_DISPLAY_VGA << 8, \
.class_mask = 0xff0000, \
.vendor = 0x8086, \
.device = id, \
@@ -203,7 +165,7 @@ static const struct intel_device_info intel_pineview_info = {
static const struct intel_device_info intel_ironlake_d_info = {
.gen = 5,
- .need_gfx_hws = 1, .has_hotplug = 1,
+ .need_gfx_hws = 1, .has_pipe_cxsr = 1, .has_hotplug = 1,
.has_bsd_ring = 1,
};
@@ -219,7 +181,6 @@ static const struct intel_device_info intel_sandybridge_d_info = {
.need_gfx_hws = 1, .has_hotplug = 1,
.has_bsd_ring = 1,
.has_blt_ring = 1,
- .has_llc = 1,
};
static const struct intel_device_info intel_sandybridge_m_info = {
@@ -228,7 +189,6 @@ static const struct intel_device_info intel_sandybridge_m_info = {
.has_fbc = 1,
.has_bsd_ring = 1,
.has_blt_ring = 1,
- .has_llc = 1,
};
static const struct intel_device_info intel_ivybridge_d_info = {
@@ -236,7 +196,6 @@ static const struct intel_device_info intel_ivybridge_d_info = {
.need_gfx_hws = 1, .has_hotplug = 1,
.has_bsd_ring = 1,
.has_blt_ring = 1,
- .has_llc = 1,
};
static const struct intel_device_info intel_ivybridge_m_info = {
@@ -245,7 +204,6 @@ static const struct intel_device_info intel_ivybridge_m_info = {
.has_fbc = 0, /* FBC is not enabled on Ivybridge mobile yet */
.has_bsd_ring = 1,
.has_blt_ring = 1,
- .has_llc = 1,
};
static const struct pci_device_id pciidlist[] = { /* aka */
@@ -304,7 +262,7 @@ MODULE_DEVICE_TABLE(pci, pciidlist);
#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
-void intel_detect_pch(struct drm_device *dev)
+void intel_detect_pch (struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct pci_dev *pch;
@@ -337,7 +295,7 @@ void intel_detect_pch(struct drm_device *dev)
}
}
-void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
+static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
{
int count;
@@ -353,22 +311,6 @@ void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
udelay(10);
}
-void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
-{
- int count;
-
- count = 0;
- while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1))
- udelay(10);
-
- I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 1);
- POSTING_READ(FORCEWAKE_MT);
-
- count = 0;
- while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1) == 0)
- udelay(10);
-}
-
/*
* Generally this is called implicitly by the register read function. However,
* if some sequence requires the GT to not power down then this function should
@@ -377,35 +319,17 @@ void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
*/
void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
{
- unsigned long irqflags;
-
- spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
- if (dev_priv->forcewake_count++ == 0)
- dev_priv->display.force_wake_get(dev_priv);
- spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
-}
+ WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
-static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
-{
- u32 gtfifodbg;
- gtfifodbg = I915_READ_NOTRACE(GTFIFODBG);
- if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK,
- "MMIO read or write has been dropped %x\n", gtfifodbg))
- I915_WRITE_NOTRACE(GTFIFODBG, GT_FIFO_CPU_ERROR_MASK);
+ /* Forcewake is atomic in case we get in here without the lock */
+ if (atomic_add_return(1, &dev_priv->forcewake_count) == 1)
+ __gen6_gt_force_wake_get(dev_priv);
}
-void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
+static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
{
I915_WRITE_NOTRACE(FORCEWAKE, 0);
- /* The below doubles as a POSTING_READ */
- gen6_gt_check_fifodbg(dev_priv);
-}
-
-void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
-{
- I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 0);
- /* The below doubles as a POSTING_READ */
- gen6_gt_check_fifodbg(dev_priv);
+ POSTING_READ(FORCEWAKE);
}
/*
@@ -413,32 +337,20 @@ void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
*/
void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
{
- unsigned long irqflags;
+ WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
- spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
- if (--dev_priv->forcewake_count == 0)
- dev_priv->display.force_wake_put(dev_priv);
- spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
+ if (atomic_dec_and_test(&dev_priv->forcewake_count))
+ __gen6_gt_force_wake_put(dev_priv);
}
-int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
+void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
{
- int ret = 0;
-
- if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
- int loop = 500;
- u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
- while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
- udelay(10);
- fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
- }
- if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
- ++ret;
- dev_priv->gt_fifo_count = fifo;
+ int loop = 500;
+ u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
+ while (fifo < 20 && loop--) {
+ udelay(10);
+ fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
}
- dev_priv->gt_fifo_count--;
-
- return ret;
}
static int i915_drm_freeze(struct drm_device *dev)
@@ -523,12 +435,9 @@ static int i915_drm_thaw(struct drm_device *dev)
mutex_lock(&dev->struct_mutex);
dev_priv->mm.suspended = 0;
- error = i915_gem_init_hw(dev);
+ error = i915_gem_init_ringbuffer(dev);
mutex_unlock(&dev->struct_mutex);
- if (HAS_PCH_SPLIT(dev))
- ironlake_init_pch_refclk(dev);
-
drm_mode_config_reset(dev);
drm_irq_install(dev);
@@ -634,40 +543,13 @@ static int ironlake_do_reset(struct drm_device *dev, u8 flags)
static int gen6_do_reset(struct drm_device *dev, u8 flags)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int ret;
- unsigned long irqflags;
-
- /* Hold gt_lock across reset to prevent any register access
- * with forcewake not set correctly
- */
- spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
-
- /* Reset the chip */
-
- /* GEN6_GDRST is not in the gt power well, no need to check
- * for fifo space for the write or forcewake the chip for
- * the read
- */
- I915_WRITE_NOTRACE(GEN6_GDRST, GEN6_GRDOM_FULL);
-
- /* Spin waiting for the device to ack the reset request */
- ret = wait_for((I915_READ_NOTRACE(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
-
- /* If reset with a user forcewake, try to restore, otherwise turn it off */
- if (dev_priv->forcewake_count)
- dev_priv->display.force_wake_get(dev_priv);
- else
- dev_priv->display.force_wake_put(dev_priv);
- /* Restore fifo count */
- dev_priv->gt_fifo_count = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
-
- spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
- return ret;
+ I915_WRITE(GEN6_GDRST, GEN6_GRDOM_FULL);
+ return wait_for((I915_READ(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
}
/**
- * i915_reset - reset chip after a hang
+ * i965_reset - reset chip after a hang
* @dev: drm device to reset
* @flags: reset domains
*
@@ -707,6 +589,9 @@ int i915_reset(struct drm_device *dev, u8 flags)
case 7:
case 6:
ret = gen6_do_reset(dev, flags);
+ /* If reset with a user forcewake, try to restore */
+ if (atomic_read(&dev_priv->forcewake_count))
+ __gen6_gt_force_wake_get(dev_priv);
break;
case 5:
ret = ironlake_do_reset(dev, flags);
@@ -743,16 +628,12 @@ int i915_reset(struct drm_device *dev, u8 flags)
!dev_priv->mm.suspended) {
dev_priv->mm.suspended = 0;
- i915_gem_init_swizzling(dev);
-
dev_priv->ring[RCS].init(&dev_priv->ring[RCS]);
if (HAS_BSD(dev))
dev_priv->ring[VCS].init(&dev_priv->ring[VCS]);
if (HAS_BLT(dev))
dev_priv->ring[BCS].init(&dev_priv->ring[BCS]);
- i915_gem_init_ppgtt(dev);
-
mutex_unlock(&dev->struct_mutex);
drm_irq_uninstall(dev);
drm_mode_config_reset(dev);
@@ -861,12 +742,12 @@ static int i915_pm_poweroff(struct device *dev)
}
static const struct dev_pm_ops i915_pm_ops = {
- .suspend = i915_pm_suspend,
- .resume = i915_pm_resume,
- .freeze = i915_pm_freeze,
- .thaw = i915_pm_thaw,
- .poweroff = i915_pm_poweroff,
- .restore = i915_pm_resume,
+ .suspend = i915_pm_suspend,
+ .resume = i915_pm_resume,
+ .freeze = i915_pm_freeze,
+ .thaw = i915_pm_thaw,
+ .poweroff = i915_pm_poweroff,
+ .restore = i915_pm_resume,
};
static struct vm_operations_struct i915_gem_vm_ops = {
@@ -875,24 +756,9 @@ static struct vm_operations_struct i915_gem_vm_ops = {
.close = drm_gem_vm_close,
};
-static const struct file_operations i915_driver_fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = drm_gem_mmap,
- .poll = drm_poll,
- .fasync = drm_fasync,
- .read = drm_read,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = i915_compat_ioctl,
-#endif
- .llseek = noop_llseek,
-};
-
static struct drm_driver driver = {
- /* Don't use MTRRs here; the Xserver or userspace app should
- * deal with them for Intel hardware.
+ /* don't use mtrr's here, the Xserver or user space app should
+ * deal with them for intel hardware.
*/
.driver_features =
DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/
@@ -923,7 +789,21 @@ static struct drm_driver driver = {
.dumb_map_offset = i915_gem_mmap_gtt,
.dumb_destroy = i915_gem_dumb_destroy,
.ioctls = i915_ioctls,
- .fops = &i915_driver_fops,
+ .fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = drm_gem_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+ .read = drm_read,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = i915_compat_ioctl,
+#endif
+ .llseek = noop_llseek,
+ },
+
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
@@ -987,46 +867,3 @@ module_exit(i915_exit);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL and additional rights");
-
-#define __i915_read(x, y) \
-u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
- u##x val = 0; \
- if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
- unsigned long irqflags; \
- spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
- if (dev_priv->forcewake_count == 0) \
- dev_priv->display.force_wake_get(dev_priv); \
- val = read##y(dev_priv->regs + reg); \
- if (dev_priv->forcewake_count == 0) \
- dev_priv->display.force_wake_put(dev_priv); \
- spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \
- } else { \
- val = read##y(dev_priv->regs + reg); \
- } \
- trace_i915_reg_rw(false, reg, val, sizeof(val)); \
- return val; \
-}
-
-__i915_read(8, b)
-__i915_read(16, w)
-__i915_read(32, l)
-__i915_read(64, q)
-#undef __i915_read
-
-#define __i915_write(x, y) \
-void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
- u32 __fifo_ret = 0; \
- trace_i915_reg_rw(true, reg, val, sizeof(val)); \
- if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
- __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
- } \
- write##y(val, dev_priv->regs + reg); \
- if (unlikely(__fifo_ret)) { \
- gen6_gt_check_fifodbg(dev_priv); \
- } \
-}
-__i915_write(8, b)
-__i915_write(16, w)
-__i915_write(32, l)
-__i915_write(64, q)
-#undef __i915_write
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index c0f19f5..b570415 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -35,9 +35,7 @@
#include "intel_ringbuffer.h"
#include <linux/io-mapping.h>
#include <linux/i2c.h>
-#include <linux/i2c-algo-bit.h>
#include <drm/intel-gtt.h>
-#include <linux/backlight.h>
/* General customization:
*/
@@ -108,7 +106,6 @@ struct opregion_header;
struct opregion_acpi;
struct opregion_swsci;
struct opregion_asle;
-struct drm_i915_private;
struct intel_opregion {
struct opregion_header *header;
@@ -128,15 +125,11 @@ struct drm_i915_master_private {
struct _drm_i915_sarea *sarea_priv;
};
#define I915_FENCE_REG_NONE -1
-#define I915_MAX_NUM_FENCES 16
-/* 16 fences + sign bit for FENCE_REG_NONE */
-#define I915_MAX_NUM_FENCE_BITS 5
struct drm_i915_fence_reg {
struct list_head lru_list;
struct drm_i915_gem_object *obj;
uint32_t setup_seqno;
- int pin_count;
};
struct sdvo_device_mapping {
@@ -145,6 +138,7 @@ struct sdvo_device_mapping {
u8 slave_addr;
u8 dvo_wiring;
u8 i2c_pin;
+ u8 i2c_speed;
u8 ddc_pin;
};
@@ -154,40 +148,33 @@ struct drm_i915_error_state {
u32 eir;
u32 pgtbl_er;
u32 pipestat[I915_MAX_PIPES];
- u32 tail[I915_NUM_RINGS];
- u32 head[I915_NUM_RINGS];
- u32 ipeir[I915_NUM_RINGS];
- u32 ipehr[I915_NUM_RINGS];
- u32 instdone[I915_NUM_RINGS];
- u32 acthd[I915_NUM_RINGS];
- u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1];
- /* our own tracking of ring head and tail */
- u32 cpu_ring_head[I915_NUM_RINGS];
- u32 cpu_ring_tail[I915_NUM_RINGS];
+ u32 ipeir;
+ u32 ipehr;
+ u32 instdone;
+ u32 acthd;
u32 error; /* gen6+ */
- u32 instpm[I915_NUM_RINGS];
- u32 instps[I915_NUM_RINGS];
+ u32 bcs_acthd; /* gen6+ blt engine */
+ u32 bcs_ipehr;
+ u32 bcs_ipeir;
+ u32 bcs_instdone;
+ u32 bcs_seqno;
+ u32 vcs_acthd; /* gen6+ bsd engine */
+ u32 vcs_ipehr;
+ u32 vcs_ipeir;
+ u32 vcs_instdone;
+ u32 vcs_seqno;
+ u32 instpm;
+ u32 instps;
u32 instdone1;
- u32 seqno[I915_NUM_RINGS];
+ u32 seqno;
u64 bbaddr;
- u32 fault_reg[I915_NUM_RINGS];
- u32 done_reg;
- u32 faddr[I915_NUM_RINGS];
- u64 fence[I915_MAX_NUM_FENCES];
+ u64 fence[16];
struct timeval time;
- struct drm_i915_error_ring {
- struct drm_i915_error_object {
- int page_count;
- u32 gtt_offset;
- u32 *pages[0];
- } *ringbuffer, *batchbuffer;
- struct drm_i915_error_request {
- long jiffies;
- u32 seqno;
- u32 tail;
- } *requests;
- int num_requests;
- } ring[I915_NUM_RINGS];
+ struct drm_i915_error_object {
+ int page_count;
+ u32 gtt_offset;
+ u32 *pages[0];
+ } *ringbuffer[I915_NUM_RINGS], *batchbuffer[I915_NUM_RINGS];
struct drm_i915_error_buffer {
u32 size;
u32 name;
@@ -195,12 +182,12 @@ struct drm_i915_error_state {
u32 gtt_offset;
u32 read_domains;
u32 write_domain;
- s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
+ s32 fence_reg:5;
s32 pinned:2;
u32 tiling:2;
u32 dirty:1;
u32 purgeable:1;
- s32 ring:4;
+ u32 ring:4;
u32 cache_level:2;
} *active_bo, *pinned_bo;
u32 active_bo_count, pinned_bo_count;
@@ -216,25 +203,17 @@ struct drm_i915_display_funcs {
int (*get_display_clock_speed)(struct drm_device *dev);
int (*get_fifo_size)(struct drm_device *dev, int plane);
void (*update_wm)(struct drm_device *dev);
- void (*update_sprite_wm)(struct drm_device *dev, int pipe,
- uint32_t sprite_width, int pixel_size);
int (*crtc_mode_set)(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
int x, int y,
struct drm_framebuffer *old_fb);
- void (*write_eld)(struct drm_connector *connector,
- struct drm_crtc *crtc);
void (*fdi_link_train)(struct drm_crtc *crtc);
void (*init_clock_gating)(struct drm_device *dev);
void (*init_pch_clock_gating)(struct drm_device *dev);
int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj);
- int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
- int x, int y);
- void (*force_wake_get)(struct drm_i915_private *dev_priv);
- void (*force_wake_put)(struct drm_i915_private *dev_priv);
/* clock updates for mode set */
/* cursor updates */
/* render clock increase/decrease */
@@ -244,37 +223,26 @@ struct drm_i915_display_funcs {
struct intel_device_info {
u8 gen;
- u8 is_mobile:1;
- u8 is_i85x:1;
- u8 is_i915g:1;
- u8 is_i945gm:1;
- u8 is_g33:1;
- u8 need_gfx_hws:1;
- u8 is_g4x:1;
- u8 is_pineview:1;
- u8 is_broadwater:1;
- u8 is_crestline:1;
- u8 is_ivybridge:1;
- u8 has_fbc:1;
- u8 has_pipe_cxsr:1;
- u8 has_hotplug:1;
- u8 cursor_needs_physical:1;
- u8 has_overlay:1;
- u8 overlay_needs_physical:1;
- u8 supports_tv:1;
- u8 has_bsd_ring:1;
- u8 has_blt_ring:1;
- u8 has_llc:1;
-};
-
-#define I915_PPGTT_PD_ENTRIES 512
-#define I915_PPGTT_PT_ENTRIES 1024
-struct i915_hw_ppgtt {
- unsigned num_pd_entries;
- struct page **pt_pages;
- uint32_t pd_offset;
- dma_addr_t *pt_dma_addr;
- dma_addr_t scratch_page_dma_addr;
+ u8 is_mobile : 1;
+ u8 is_i85x : 1;
+ u8 is_i915g : 1;
+ u8 is_i945gm : 1;
+ u8 is_g33 : 1;
+ u8 need_gfx_hws : 1;
+ u8 is_g4x : 1;
+ u8 is_pineview : 1;
+ u8 is_broadwater : 1;
+ u8 is_crestline : 1;
+ u8 is_ivybridge : 1;
+ u8 has_fbc : 1;
+ u8 has_pipe_cxsr : 1;
+ u8 has_hotplug : 1;
+ u8 cursor_needs_physical : 1;
+ u8 has_overlay : 1;
+ u8 overlay_needs_physical : 1;
+ u8 supports_tv : 1;
+ u8 has_bsd_ring : 1;
+ u8 has_blt_ring : 1;
};
enum no_fbc_reason {
@@ -297,17 +265,6 @@ enum intel_pch {
#define QUIRK_LVDS_SSC_DISABLE (1<<1)
struct intel_fbdev;
-struct intel_fbc_work;
-
-struct intel_gmbus {
- struct i2c_adapter adapter;
- bool force_bit;
- bool has_gpio;
- u32 reg0;
- u32 gpio_reg;
- struct i2c_algo_bit_data bit_algo;
- struct drm_i915_private *dev_priv;
-};
typedef struct drm_i915_private {
struct drm_device *dev;
@@ -318,19 +275,12 @@ typedef struct drm_i915_private {
int relative_constants_mode;
void __iomem *regs;
- /** gt_fifo_count and the subsequent register write are synchronized
- * with dev->struct_mutex. */
- unsigned gt_fifo_count;
- /** forcewake_count is protected by gt_lock */
- unsigned forcewake_count;
- /** gt_lock is also taken in irq contexts. */
- struct spinlock gt_lock;
-
- struct intel_gmbus *gmbus;
- /** gmbus_mutex protects against concurrent usage of the single hw gmbus
- * controller on different i2c buses. */
- struct mutex gmbus_mutex;
+ struct intel_gmbus {
+ struct i2c_adapter adapter;
+ struct i2c_adapter *force_bit;
+ u32 reg0;
+ } *gmbus;
struct pci_dev *bridge_dev;
struct intel_ring_buffer ring[I915_NUM_RINGS];
@@ -365,6 +315,7 @@ typedef struct drm_i915_private {
int tex_lru_log_granularity;
int allow_batchbuffer;
+ struct mem_block *agp_heap;
unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
int vblank_pipe;
int num_pipe;
@@ -380,20 +331,21 @@ typedef struct drm_i915_private {
uint32_t last_instdone1;
unsigned long cfb_size;
- unsigned int cfb_fb;
- enum plane cfb_plane;
+ unsigned long cfb_pitch;
+ unsigned long cfb_offset;
+ int cfb_fence;
+ int cfb_plane;
int cfb_y;
- struct intel_fbc_work *fbc_work;
struct intel_opregion opregion;
/* overlay */
struct intel_overlay *overlay;
- bool sprite_scaling_enabled;
/* LVDS info */
int backlight_level; /* restore backlight to this value */
bool backlight_enabled;
+ struct drm_display_mode *panel_fixed_mode;
struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
@@ -403,7 +355,6 @@ typedef struct drm_i915_private {
unsigned int lvds_vbt:1;
unsigned int int_crt_support:1;
unsigned int lvds_use_ssc:1;
- unsigned int display_clock_mode:1;
int lvds_ssc_freq;
struct {
int rate;
@@ -421,7 +372,7 @@ typedef struct drm_i915_private {
struct notifier_block lid_notifier;
int crt_ddc_pin;
- struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
+ struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */
int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
int num_fence_regs; /* 8 on pre-965, 16 otherwise */
@@ -552,7 +503,7 @@ typedef struct drm_i915_private {
u8 saveAR[21];
u8 saveDACMASK;
u8 saveCR[37];
- uint64_t saveFENCE[I915_MAX_NUM_FENCES];
+ uint64_t saveFENCE[16];
u32 saveCURACNTR;
u32 saveCURAPOS;
u32 saveCURABASE;
@@ -613,9 +564,6 @@ typedef struct drm_i915_private {
struct io_mapping *gtt_mapping;
int gtt_mtrr;
- /** PPGTT used for aliasing the PPGTT with the GTT */
- struct i915_hw_ppgtt *aliasing_ppgtt;
-
struct shrinker inactive_shrinker;
/**
@@ -722,9 +670,10 @@ typedef struct drm_i915_private {
unsigned int lvds_border_bits;
/* Panel fitter placement and size for Ironlake+ */
u32 pch_pf_pos, pch_pf_size;
+ int panel_t3, panel_t12;
- struct drm_crtc *plane_to_crtc_mapping[3];
- struct drm_crtc *pipe_to_crtc_mapping[3];
+ struct drm_crtc *plane_to_crtc_mapping[2];
+ struct drm_crtc *pipe_to_crtc_mapping[2];
wait_queue_head_t pending_flip_queue;
bool flip_pending_is_done;
@@ -740,7 +689,6 @@ typedef struct drm_i915_private {
int child_dev_num;
struct child_device_config *child_dev;
struct drm_connector *int_lvds_connector;
- struct drm_connector *int_edp_connector;
bool mchbar_need_disable;
@@ -775,18 +723,11 @@ typedef struct drm_i915_private {
/* list of fbdev register on this device */
struct intel_fbdev *fbdev;
- struct backlight_device *backlight;
-
struct drm_property *broadcast_rgb_property;
struct drm_property *force_audio_property;
-} drm_i915_private_t;
-enum hdmi_force_audio {
- HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */
- HDMI_AUDIO_OFF, /* force turn off HDMI audio */
- HDMI_AUDIO_AUTO, /* trust EDID */
- HDMI_AUDIO_ON, /* force turn on HDMI audio */
-};
+ atomic_t forcewake_count;
+} drm_i915_private_t;
enum i915_cache_level {
I915_CACHE_NONE,
@@ -814,37 +755,39 @@ struct drm_i915_gem_object {
* (has pending rendering), and is not set if it's on inactive (ready
* to be unbound).
*/
- unsigned int active:1;
+ unsigned int active : 1;
/**
* This is set if the object has been written to since last bound
* to the GTT
*/
- unsigned int dirty:1;
+ unsigned int dirty : 1;
/**
* This is set if the object has been written to since the last
* GPU flush.
*/
- unsigned int pending_gpu_write:1;
+ unsigned int pending_gpu_write : 1;
/**
* Fence register bits (if any) for this object. Will be set
* as needed when mapped into the GTT.
* Protected by dev->struct_mutex.
+ *
+ * Size: 4 bits for 16 fences + sign (for FENCE_REG_NONE)
*/
- signed int fence_reg:I915_MAX_NUM_FENCE_BITS;
+ signed int fence_reg : 5;
/**
* Advice: are the backing pages purgeable?
*/
- unsigned int madv:2;
+ unsigned int madv : 2;
/**
* Current tiling mode for the object.
*/
- unsigned int tiling_mode:2;
- unsigned int tiling_changed:1;
+ unsigned int tiling_mode : 2;
+ unsigned int tiling_changed : 1;
/** How many users have pinned this object in GTT space. The following
* users can each hold at most one reference: pwrite/pread, pin_ioctl
@@ -855,22 +798,22 @@ struct drm_i915_gem_object {
*
* In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
* bits with absolutely no headroom. So use 4 bits. */
- unsigned int pin_count:4;
+ unsigned int pin_count : 4;
#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
/**
* Is the object at the current location in the gtt mappable and
* fenceable? Used to avoid costly recalculations.
*/
- unsigned int map_and_fenceable:1;
+ unsigned int map_and_fenceable : 1;
/**
* Whether the current gtt mapping needs to be mappable (and isn't just
* mappable by accident). Track pin and fault separate for a more
* accurate mappable working set.
*/
- unsigned int fault_mappable:1;
- unsigned int pin_mappable:1;
+ unsigned int fault_mappable : 1;
+ unsigned int pin_mappable : 1;
/*
* Is the GPU currently using a fence to access this buffer,
@@ -880,8 +823,6 @@ struct drm_i915_gem_object {
unsigned int cache_level:2;
- unsigned int has_aliasing_ppgtt_mapping:1;
-
struct page **pages;
/**
@@ -959,9 +900,6 @@ struct drm_i915_gem_request {
/** GEM sequence number associated with this request. */
uint32_t seqno;
- /** Postion in the ringbuffer of the end of the request */
- u32 tail;
-
/** Time at which this request was emitted, in jiffies. */
unsigned long emitted_jiffies;
@@ -1018,11 +956,8 @@ struct drm_i915_file_private {
#define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring)
#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring)
-#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
-#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >=6)
-
#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
@@ -1055,17 +990,15 @@ struct drm_i915_file_private {
extern struct drm_ioctl_desc i915_ioctls[];
extern int i915_max_ioctl;
-extern unsigned int i915_fbpercrtc __always_unused;
-extern int i915_panel_ignore_lid __read_mostly;
-extern unsigned int i915_powersave __read_mostly;
-extern int i915_semaphores __read_mostly;
-extern unsigned int i915_lvds_downclock __read_mostly;
-extern int i915_panel_use_ssc __read_mostly;
-extern int i915_vbt_sdvo_panel_type __read_mostly;
-extern int i915_enable_rc6 __read_mostly;
-extern int i915_enable_fbc __read_mostly;
-extern bool i915_enable_hangcheck __read_mostly;
-extern bool i915_enable_ppgtt __read_mostly;
+extern unsigned int i915_fbpercrtc;
+extern int i915_panel_ignore_lid;
+extern unsigned int i915_powersave;
+extern unsigned int i915_semaphores;
+extern unsigned int i915_lvds_downclock;
+extern unsigned int i915_panel_use_ssc;
+extern int i915_vbt_sdvo_panel_type;
+extern unsigned int i915_enable_rc6;
+extern unsigned int i915_enable_fbc;
extern int i915_suspend(struct drm_device *dev, pm_message_t state);
extern int i915_resume(struct drm_device *dev);
@@ -1118,7 +1051,7 @@ i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
void
i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
-void intel_enable_asle(struct drm_device *dev);
+void intel_enable_asle (struct drm_device *dev);
#ifdef CONFIG_DEBUG_FS
extern void i915_destroy_error_state(struct drm_device *dev);
@@ -1127,6 +1060,18 @@ extern void i915_destroy_error_state(struct drm_device *dev);
#endif
+/* i915_mem.c */
+extern int i915_mem_alloc(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int i915_mem_free(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int i915_mem_init_heap(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int i915_mem_destroy_heap(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern void i915_mem_takedown(struct mem_block **heap);
+extern void i915_mem_release(struct drm_device * dev,
+ struct drm_file *file_priv, struct mem_block *heap);
/* i915_gem.c */
int i915_gem_init_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
@@ -1196,7 +1141,7 @@ int i915_gem_dumb_create(struct drm_file *file_priv,
int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
uint32_t handle, uint64_t *offset);
int i915_gem_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
- uint32_t handle);
+ uint32_t handle);
/**
* Returns true if seq1 is later than seq2.
*/
@@ -1206,62 +1151,43 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
return (int32_t)(seq1 - seq2) >= 0;
}
-u32 i915_gem_next_request_seqno(struct intel_ring_buffer *ring);
+static inline u32
+i915_gem_next_request_seqno(struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
+ return ring->outstanding_lazy_request = dev_priv->next_seqno;
+}
int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *pipelined);
int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
-static inline void
-i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
-{
- if (obj->fence_reg != I915_FENCE_REG_NONE) {
- struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
- dev_priv->fence_regs[obj->fence_reg].pin_count++;
- }
-}
-
-static inline void
-i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
-{
- if (obj->fence_reg != I915_FENCE_REG_NONE) {
- struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
- dev_priv->fence_regs[obj->fence_reg].pin_count--;
- }
-}
-
void i915_gem_retire_requests(struct drm_device *dev);
-void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
-
void i915_gem_reset(struct drm_device *dev);
void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
uint32_t read_domains,
uint32_t write_domain);
-int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
-int __must_check i915_gem_init_hw(struct drm_device *dev);
-void i915_gem_init_swizzling(struct drm_device *dev);
-void i915_gem_init_ppgtt(struct drm_device *dev);
+int __must_check i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj);
+int __must_check i915_gem_init_ringbuffer(struct drm_device *dev);
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
void i915_gem_do_init(struct drm_device *dev,
unsigned long start,
unsigned long mappable_end,
unsigned long end);
-int __must_check i915_gpu_idle(struct drm_device *dev, bool do_retire);
+int __must_check i915_gpu_idle(struct drm_device *dev);
int __must_check i915_gem_idle(struct drm_device *dev);
int __must_check i915_add_request(struct intel_ring_buffer *ring,
struct drm_file *file,
struct drm_i915_gem_request *request);
int __must_check i915_wait_request(struct intel_ring_buffer *ring,
- uint32_t seqno,
- bool do_retire);
+ uint32_t seqno);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int __must_check
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
bool write);
int __must_check
-i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
- u32 alignment,
+i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *pipelined);
int i915_gem_attach_phys_object(struct drm_device *dev,
struct drm_i915_gem_object *obj,
@@ -1277,22 +1203,9 @@ i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
uint32_t size,
int tiling_mode);
-int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
- enum i915_cache_level cache_level);
-
/* i915_gem_gtt.c */
-int __must_check i915_gem_init_aliasing_ppgtt(struct drm_device *dev);
-void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev);
-void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
- struct drm_i915_gem_object *obj,
- enum i915_cache_level cache_level);
-void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
- struct drm_i915_gem_object *obj);
-
void i915_gem_restore_gtt_mappings(struct drm_device *dev);
int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj);
-void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj,
- enum i915_cache_level cache_level);
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
/* i915_gem_evict.c */
@@ -1374,19 +1287,17 @@ extern void intel_modeset_init(struct drm_device *dev);
extern void intel_modeset_gem_init(struct drm_device *dev);
extern void intel_modeset_cleanup(struct drm_device *dev);
extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
-extern bool intel_fbc_enabled(struct drm_device *dev);
+extern void i8xx_disable_fbc(struct drm_device *dev);
+extern void g4x_disable_fbc(struct drm_device *dev);
+extern void ironlake_disable_fbc(struct drm_device *dev);
extern void intel_disable_fbc(struct drm_device *dev);
+extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval);
+extern bool intel_fbc_enabled(struct drm_device *dev);
extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
-extern void ironlake_init_pch_refclk(struct drm_device *dev);
extern void ironlake_enable_rc6(struct drm_device *dev);
extern void gen6_set_rps(struct drm_device *dev, u8 val);
-extern void intel_detect_pch(struct drm_device *dev);
-extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
-
-extern void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
-extern void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv);
-extern void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
-extern void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv);
+extern void intel_detect_pch (struct drm_device *dev);
+extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
/* overlay */
#ifdef CONFIG_DEBUG_FS
@@ -1427,16 +1338,27 @@ extern void intel_display_print_error_state(struct seq_file *m,
*/
void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
-int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
+void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
/* We give fast paths for the really cool registers */
#define NEEDS_FORCE_WAKE(dev_priv, reg) \
(((dev_priv)->info->gen >= 6) && \
- ((reg) < 0x40000) && \
- ((reg) != FORCEWAKE))
+ ((reg) < 0x40000) && \
+ ((reg) != FORCEWAKE))
#define __i915_read(x, y) \
- u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
+static inline u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
+ u##x val = 0; \
+ if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
+ gen6_gt_force_wake_get(dev_priv); \
+ val = read##y(dev_priv->regs + reg); \
+ gen6_gt_force_wake_put(dev_priv); \
+ } else { \
+ val = read##y(dev_priv->regs + reg); \
+ } \
+ trace_i915_reg_rw(false, reg, val, sizeof(val)); \
+ return val; \
+}
__i915_read(8, b)
__i915_read(16, w)
@@ -1445,8 +1367,13 @@ __i915_read(64, q)
#undef __i915_read
#define __i915_write(x, y) \
- void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val);
-
+static inline void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
+ trace_i915_reg_rw(true, reg, val, sizeof(val)); \
+ if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
+ __gen6_gt_wait_for_fifo(dev_priv); \
+ } \
+ write##y(val, dev_priv->regs + reg); \
+}
__i915_write(8, b)
__i915_write(16, w)
__i915_write(32, l)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 723d1ae..46e04a1 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -58,7 +58,6 @@ static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj);
static int i915_gem_inactive_shrink(struct shrinker *shrinker,
struct shrink_control *sc);
-static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
/* some bookkeeping */
static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
@@ -180,7 +179,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
mutex_unlock(&dev->struct_mutex);
args->aper_size = dev_priv->mm.gtt_total;
- args->aper_available_size = args->aper_size - pinned;
+ args->aper_available_size = args->aper_size -pinned;
return 0;
}
@@ -196,8 +195,6 @@ i915_gem_create(struct drm_file *file,
u32 handle;
size = roundup(size, PAGE_SIZE);
- if (size == 0)
- return -EINVAL;
/* Allocate the new object */
obj = i915_gem_alloc_object(dev, size);
@@ -259,6 +256,73 @@ static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
obj->tiling_mode != I915_TILING_NONE;
}
+static inline void
+slow_shmem_copy(struct page *dst_page,
+ int dst_offset,
+ struct page *src_page,
+ int src_offset,
+ int length)
+{
+ char *dst_vaddr, *src_vaddr;
+
+ dst_vaddr = kmap(dst_page);
+ src_vaddr = kmap(src_page);
+
+ memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
+
+ kunmap(src_page);
+ kunmap(dst_page);
+}
+
+static inline void
+slow_shmem_bit17_copy(struct page *gpu_page,
+ int gpu_offset,
+ struct page *cpu_page,
+ int cpu_offset,
+ int length,
+ int is_read)
+{
+ char *gpu_vaddr, *cpu_vaddr;
+
+ /* Use the unswizzled path if this page isn't affected. */
+ if ((page_to_phys(gpu_page) & (1 << 17)) == 0) {
+ if (is_read)
+ return slow_shmem_copy(cpu_page, cpu_offset,
+ gpu_page, gpu_offset, length);
+ else
+ return slow_shmem_copy(gpu_page, gpu_offset,
+ cpu_page, cpu_offset, length);
+ }
+
+ gpu_vaddr = kmap(gpu_page);
+ cpu_vaddr = kmap(cpu_page);
+
+ /* Copy the data, XORing A6 with A17 (1). The user already knows he's
+ * XORing with the other bits (A9 for Y, A9 and A10 for X)
+ */
+ while (length > 0) {
+ int cacheline_end = ALIGN(gpu_offset + 1, 64);
+ int this_length = min(cacheline_end - gpu_offset, length);
+ int swizzled_gpu_offset = gpu_offset ^ 64;
+
+ if (is_read) {
+ memcpy(cpu_vaddr + cpu_offset,
+ gpu_vaddr + swizzled_gpu_offset,
+ this_length);
+ } else {
+ memcpy(gpu_vaddr + swizzled_gpu_offset,
+ cpu_vaddr + cpu_offset,
+ this_length);
+ }
+ cpu_offset += this_length;
+ gpu_offset += this_length;
+ length -= this_length;
+ }
+
+ kunmap(cpu_page);
+ kunmap(gpu_page);
+}
+
/**
* This is the fast shmem pread path, which attempts to copy_from_user directly
* from the backing pages of the object to the user's address space. On a
@@ -319,58 +383,6 @@ i915_gem_shmem_pread_fast(struct drm_device *dev,
return 0;
}
-static inline int
-__copy_to_user_swizzled(char __user *cpu_vaddr,
- const char *gpu_vaddr, int gpu_offset,
- int length)
-{
- int ret, cpu_offset = 0;
-
- while (length > 0) {
- int cacheline_end = ALIGN(gpu_offset + 1, 64);
- int this_length = min(cacheline_end - gpu_offset, length);
- int swizzled_gpu_offset = gpu_offset ^ 64;
-
- ret = __copy_to_user(cpu_vaddr + cpu_offset,
- gpu_vaddr + swizzled_gpu_offset,
- this_length);
- if (ret)
- return ret + length;
-
- cpu_offset += this_length;
- gpu_offset += this_length;
- length -= this_length;
- }
-
- return 0;
-}
-
-static inline int
-__copy_from_user_swizzled(char __user *gpu_vaddr, int gpu_offset,
- const char *cpu_vaddr,
- int length)
-{
- int ret, cpu_offset = 0;
-
- while (length > 0) {
- int cacheline_end = ALIGN(gpu_offset + 1, 64);
- int this_length = min(cacheline_end - gpu_offset, length);
- int swizzled_gpu_offset = gpu_offset ^ 64;
-
- ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
- cpu_vaddr + cpu_offset,
- this_length);
- if (ret)
- return ret + length;
-
- cpu_offset += this_length;
- gpu_offset += this_length;
- length -= this_length;
- }
-
- return 0;
-}
-
/**
* This is the fallback shmem pread path, which allocates temporary storage
* in kernel space to copy_to_user into outside of the struct_mutex, so we
@@ -384,34 +396,72 @@ i915_gem_shmem_pread_slow(struct drm_device *dev,
struct drm_file *file)
{
struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
- char __user *user_data;
+ struct mm_struct *mm = current->mm;
+ struct page **user_pages;
ssize_t remain;
- loff_t offset;
- int shmem_page_offset, page_length, ret;
- int obj_do_bit17_swizzling, page_do_bit17_swizzling;
+ loff_t offset, pinned_pages, i;
+ loff_t first_data_page, last_data_page, num_pages;
+ int shmem_page_offset;
+ int data_page_index, data_page_offset;
+ int page_length;
+ int ret;
+ uint64_t data_ptr = args->data_ptr;
+ int do_bit17_swizzling;
- user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size;
- obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
+ /* Pin the user pages containing the data. We can't fault while
+ * holding the struct mutex, yet we want to hold it while
+ * dereferencing the user data.
+ */
+ first_data_page = data_ptr / PAGE_SIZE;
+ last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
+ num_pages = last_data_page - first_data_page + 1;
- offset = args->offset;
+ user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
+ if (user_pages == NULL)
+ return -ENOMEM;
mutex_unlock(&dev->struct_mutex);
+ down_read(&mm->mmap_sem);
+ pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
+ num_pages, 1, 0, user_pages, NULL);
+ up_read(&mm->mmap_sem);
+ mutex_lock(&dev->struct_mutex);
+ if (pinned_pages < num_pages) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ ret = i915_gem_object_set_cpu_read_domain_range(obj,
+ args->offset,
+ args->size);
+ if (ret)
+ goto out;
+
+ do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
+
+ offset = args->offset;
while (remain > 0) {
struct page *page;
- char *vaddr;
/* Operation in this page
*
* shmem_page_offset = offset within page in shmem file
+ * data_page_index = page number in get_user_pages return
+ * data_page_offset = offset with data_page_index page.
* page_length = bytes to copy for this page
*/
shmem_page_offset = offset_in_page(offset);
+ data_page_index = data_ptr / PAGE_SIZE - first_data_page;
+ data_page_offset = offset_in_page(data_ptr);
+
page_length = remain;
if ((shmem_page_offset + page_length) > PAGE_SIZE)
page_length = PAGE_SIZE - shmem_page_offset;
+ if ((data_page_offset + page_length) > PAGE_SIZE)
+ page_length = PAGE_SIZE - data_page_offset;
page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
if (IS_ERR(page)) {
@@ -419,38 +469,36 @@ i915_gem_shmem_pread_slow(struct drm_device *dev,
goto out;
}
- page_do_bit17_swizzling = obj_do_bit17_swizzling &&
- (page_to_phys(page) & (1 << 17)) != 0;
-
- vaddr = kmap(page);
- if (page_do_bit17_swizzling)
- ret = __copy_to_user_swizzled(user_data,
- vaddr, shmem_page_offset,
- page_length);
- else
- ret = __copy_to_user(user_data,
- vaddr + shmem_page_offset,
- page_length);
- kunmap(page);
+ if (do_bit17_swizzling) {
+ slow_shmem_bit17_copy(page,
+ shmem_page_offset,
+ user_pages[data_page_index],
+ data_page_offset,
+ page_length,
+ 1);
+ } else {
+ slow_shmem_copy(user_pages[data_page_index],
+ data_page_offset,
+ page,
+ shmem_page_offset,
+ page_length);
+ }
mark_page_accessed(page);
page_cache_release(page);
- if (ret) {
- ret = -EFAULT;
- goto out;
- }
-
remain -= page_length;
- user_data += page_length;
+ data_ptr += page_length;
offset += page_length;
}
out:
- mutex_lock(&dev->struct_mutex);
- /* Fixup: Kill any reinstated backing storage pages */
- if (obj->madv == __I915_MADV_PURGED)
- i915_gem_object_truncate(obj);
+ for (i = 0; i < pinned_pages; i++) {
+ SetPageDirty(user_pages[i]);
+ mark_page_accessed(user_pages[i]);
+ page_cache_release(user_pages[i]);
+ }
+ drm_free_large(user_pages);
return ret;
}
@@ -752,11 +800,11 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev,
if (IS_ERR(page))
return PTR_ERR(page);
- vaddr = kmap_atomic(page);
+ vaddr = kmap_atomic(page, KM_USER0);
ret = __copy_from_user_inatomic(vaddr + page_offset,
user_data,
page_length);
- kunmap_atomic(vaddr);
+ kunmap_atomic(vaddr, KM_USER0);
set_page_dirty(page);
mark_page_accessed(page);
@@ -791,36 +839,71 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev,
struct drm_file *file)
{
struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
+ struct mm_struct *mm = current->mm;
+ struct page **user_pages;
ssize_t remain;
- loff_t offset;
- char __user *user_data;
- int shmem_page_offset, page_length, ret;
- int obj_do_bit17_swizzling, page_do_bit17_swizzling;
+ loff_t offset, pinned_pages, i;
+ loff_t first_data_page, last_data_page, num_pages;
+ int shmem_page_offset;
+ int data_page_index, data_page_offset;
+ int page_length;
+ int ret;
+ uint64_t data_ptr = args->data_ptr;
+ int do_bit17_swizzling;
- user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size;
- obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
+ /* Pin the user pages containing the data. We can't fault while
+ * holding the struct mutex, and all of the pwrite implementations
+ * want to hold it while dereferencing the user data.
+ */
+ first_data_page = data_ptr / PAGE_SIZE;
+ last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
+ num_pages = last_data_page - first_data_page + 1;
- offset = args->offset;
- obj->dirty = 1;
+ user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
+ if (user_pages == NULL)
+ return -ENOMEM;
mutex_unlock(&dev->struct_mutex);
+ down_read(&mm->mmap_sem);
+ pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
+ num_pages, 0, 0, user_pages, NULL);
+ up_read(&mm->mmap_sem);
+ mutex_lock(&dev->struct_mutex);
+ if (pinned_pages < num_pages) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ ret = i915_gem_object_set_to_cpu_domain(obj, 1);
+ if (ret)
+ goto out;
+
+ do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
+
+ offset = args->offset;
+ obj->dirty = 1;
while (remain > 0) {
struct page *page;
- char *vaddr;
/* Operation in this page
*
* shmem_page_offset = offset within page in shmem file
+ * data_page_index = page number in get_user_pages return
+ * data_page_offset = offset with data_page_index page.
* page_length = bytes to copy for this page
*/
shmem_page_offset = offset_in_page(offset);
+ data_page_index = data_ptr / PAGE_SIZE - first_data_page;
+ data_page_offset = offset_in_page(data_ptr);
page_length = remain;
if ((shmem_page_offset + page_length) > PAGE_SIZE)
page_length = PAGE_SIZE - shmem_page_offset;
+ if ((data_page_offset + page_length) > PAGE_SIZE)
+ page_length = PAGE_SIZE - data_page_offset;
page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
if (IS_ERR(page)) {
@@ -828,45 +911,34 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev,
goto out;
}
- page_do_bit17_swizzling = obj_do_bit17_swizzling &&
- (page_to_phys(page) & (1 << 17)) != 0;
-
- vaddr = kmap(page);
- if (page_do_bit17_swizzling)
- ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
- user_data,
- page_length);
- else
- ret = __copy_from_user(vaddr + shmem_page_offset,
- user_data,
- page_length);
- kunmap(page);
+ if (do_bit17_swizzling) {
+ slow_shmem_bit17_copy(page,
+ shmem_page_offset,
+ user_pages[data_page_index],
+ data_page_offset,
+ page_length,
+ 0);
+ } else {
+ slow_shmem_copy(page,
+ shmem_page_offset,
+ user_pages[data_page_index],
+ data_page_offset,
+ page_length);
+ }
set_page_dirty(page);
mark_page_accessed(page);
page_cache_release(page);
- if (ret) {
- ret = -EFAULT;
- goto out;
- }
-
remain -= page_length;
- user_data += page_length;
+ data_ptr += page_length;
offset += page_length;
}
out:
- mutex_lock(&dev->struct_mutex);
- /* Fixup: Kill any reinstated backing storage pages */
- if (obj->madv == __I915_MADV_PURGED)
- i915_gem_object_truncate(obj);
- /* and flush dirty cachelines in case the object isn't in the cpu write
- * domain anymore. */
- if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
- i915_gem_clflush_object(obj);
- intel_gtt_chipset_flush();
- }
+ for (i = 0; i < pinned_pages; i++)
+ page_cache_release(user_pages[i]);
+ drm_free_large(user_pages);
return ret;
}
@@ -922,13 +994,10 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
* pread/pwrite currently are reading and writing from the CPU
* perspective, requiring manual detiling by the client.
*/
- if (obj->phys_obj) {
+ if (obj->phys_obj)
ret = i915_gem_phys_pwrite(dev, obj, args, file);
- goto out;
- }
-
- if (obj->gtt_space &&
- obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
+ else if (obj->gtt_space &&
+ obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
ret = i915_gem_object_pin(obj, 0, true);
if (ret)
goto out;
@@ -947,23 +1016,17 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
out_unpin:
i915_gem_object_unpin(obj);
-
- if (ret != -EFAULT)
+ } else {
+ ret = i915_gem_object_set_to_cpu_domain(obj, 1);
+ if (ret)
goto out;
- /* Fall through to the shmfs paths because the gtt paths might
- * fail with non-page-backed user pointers (e.g. gtt mappings
- * when moving data between textures). */
- }
-
- ret = i915_gem_object_set_to_cpu_domain(obj, 1);
- if (ret)
- goto out;
- ret = -EFAULT;
- if (!i915_gem_object_needs_bit17_swizzle(obj))
- ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file);
- if (ret == -EFAULT)
- ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file);
+ ret = -EFAULT;
+ if (!i915_gem_object_needs_bit17_swizzle(obj))
+ ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file);
+ if (ret == -EFAULT)
+ ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file);
+ }
out:
drm_gem_object_unreference(&obj->base);
@@ -1076,6 +1139,7 @@ int
i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_mmap *args = data;
struct drm_gem_object *obj;
unsigned long addr;
@@ -1087,6 +1151,11 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
if (obj == NULL)
return -ENOENT;
+ if (obj->size > dev_priv->mm.gtt_mappable_end) {
+ drm_gem_object_unreference_unlocked(obj);
+ return -E2BIG;
+ }
+
down_write(&current->mm->mmap_sem);
addr = do_mmap(obj->filp, 0, args->size,
PROT_READ | PROT_WRITE, MAP_SHARED,
@@ -1201,6 +1270,74 @@ out:
}
/**
+ * i915_gem_create_mmap_offset - create a fake mmap offset for an object
+ * @obj: obj in question
+ *
+ * GEM memory mapping works by handing back to userspace a fake mmap offset
+ * it can use in a subsequent mmap(2) call. The DRM core code then looks
+ * up the object based on the offset and sets up the various memory mapping
+ * structures.
+ *
+ * This routine allocates and attaches a fake offset for @obj.
+ */
+static int
+i915_gem_create_mmap_offset(struct drm_i915_gem_object *obj)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct drm_gem_mm *mm = dev->mm_private;
+ struct drm_map_list *list;
+ struct drm_local_map *map;
+ int ret = 0;
+
+ /* Set the object up for mmap'ing */
+ list = &obj->base.map_list;
+ list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
+ if (!list->map)
+ return -ENOMEM;
+
+ map = list->map;
+ map->type = _DRM_GEM;
+ map->size = obj->base.size;
+ map->handle = obj;
+
+ /* Get a DRM GEM mmap offset allocated... */
+ list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
+ obj->base.size / PAGE_SIZE,
+ 0, 0);
+ if (!list->file_offset_node) {
+ DRM_ERROR("failed to allocate offset for bo %d\n",
+ obj->base.name);
+ ret = -ENOSPC;
+ goto out_free_list;
+ }
+
+ list->file_offset_node = drm_mm_get_block(list->file_offset_node,
+ obj->base.size / PAGE_SIZE,
+ 0);
+ if (!list->file_offset_node) {
+ ret = -ENOMEM;
+ goto out_free_list;
+ }
+
+ list->hash.key = list->file_offset_node->start;
+ ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
+ if (ret) {
+ DRM_ERROR("failed to add to map hash\n");
+ goto out_free_mm;
+ }
+
+ return 0;
+
+out_free_mm:
+ drm_mm_put_block(list->file_offset_node);
+out_free_list:
+ kfree(list->map);
+ list->map = NULL;
+
+ return ret;
+}
+
+/**
* i915_gem_release_mmap - remove physical page mappings
* @obj: obj in question
*
@@ -1228,6 +1365,19 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
obj->fault_mappable = false;
}
+static void
+i915_gem_free_mmap_offset(struct drm_i915_gem_object *obj)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct drm_gem_mm *mm = dev->mm_private;
+ struct drm_map_list *list = &obj->base.map_list;
+
+ drm_ht_remove_item(&mm->offset_hash, &list->hash);
+ drm_mm_put_block(list->file_offset_node);
+ kfree(list->map);
+ list->map = NULL;
+}
+
static uint32_t
i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
{
@@ -1340,7 +1490,7 @@ i915_gem_mmap_gtt(struct drm_file *file,
}
if (!obj->base.map_list.map) {
- ret = drm_gem_create_mmap_offset(&obj->base);
+ ret = i915_gem_create_mmap_offset(obj);
if (ret)
goto out;
}
@@ -1412,7 +1562,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
obj->pages[i] = page;
}
- if (i915_gem_object_needs_bit17_swizzle(obj))
+ if (obj->tiling_mode != I915_TILING_NONE)
i915_gem_object_do_bit_17_swizzle(obj);
return 0;
@@ -1434,7 +1584,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
BUG_ON(obj->madv == __I915_MADV_PURGED);
- if (i915_gem_object_needs_bit17_swizzle(obj))
+ if (obj->tiling_mode != I915_TILING_NONE)
i915_gem_object_save_bit_17_swizzle(obj);
if (obj->madv == I915_MADV_DONTNEED)
@@ -1581,28 +1731,6 @@ i915_gem_process_flushing_list(struct intel_ring_buffer *ring,
}
}
-static u32
-i915_gem_get_seqno(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- u32 seqno = dev_priv->next_seqno;
-
- /* reserve 0 for non-seqno */
- if (++dev_priv->next_seqno == 0)
- dev_priv->next_seqno = 1;
-
- return seqno;
-}
-
-u32
-i915_gem_next_request_seqno(struct intel_ring_buffer *ring)
-{
- if (ring->outstanding_lazy_request == 0)
- ring->outstanding_lazy_request = i915_gem_get_seqno(ring->dev);
-
- return ring->outstanding_lazy_request;
-}
-
int
i915_add_request(struct intel_ring_buffer *ring,
struct drm_file *file,
@@ -1610,19 +1738,10 @@ i915_add_request(struct intel_ring_buffer *ring,
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
uint32_t seqno;
- u32 request_ring_position;
int was_empty;
int ret;
BUG_ON(request == NULL);
- seqno = i915_gem_next_request_seqno(ring);
-
- /* Record the position of the start of the request so that
- * should we detect the updated seqno part-way through the
- * GPU processing the request, we never over-estimate the
- * position of the head.
- */
- request_ring_position = intel_ring_get_tail(ring);
ret = ring->add_request(ring, &seqno);
if (ret)
@@ -1632,7 +1751,6 @@ i915_add_request(struct intel_ring_buffer *ring,
request->seqno = seqno;
request->ring = ring;
- request->tail = request_ring_position;
request->emitted_jiffies = jiffies;
was_empty = list_empty(&ring->request_list);
list_add_tail(&request->list, &ring->request_list);
@@ -1647,14 +1765,11 @@ i915_add_request(struct intel_ring_buffer *ring,
spin_unlock(&file_priv->mm.lock);
}
- ring->outstanding_lazy_request = 0;
+ ring->outstanding_lazy_request = false;
if (!dev_priv->mm.suspended) {
- if (i915_enable_hangcheck) {
- mod_timer(&dev_priv->hangcheck_timer,
- jiffies +
- msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
- }
+ mod_timer(&dev_priv->hangcheck_timer,
+ jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
if (was_empty)
queue_delayed_work(dev_priv->wq,
&dev_priv->mm.retire_work, HZ);
@@ -1711,7 +1826,7 @@ static void i915_gem_reset_fences(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
- for (i = 0; i < dev_priv->num_fence_regs; i++) {
+ for (i = 0; i < 16; i++) {
struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
struct drm_i915_gem_object *obj = reg->obj;
@@ -1743,7 +1858,7 @@ void i915_gem_reset(struct drm_device *dev)
* lost bo to the inactive list.
*/
while (!list_empty(&dev_priv->mm.flushing_list)) {
- obj = list_first_entry(&dev_priv->mm.flushing_list,
+ obj= list_first_entry(&dev_priv->mm.flushing_list,
struct drm_i915_gem_object,
mm_list);
@@ -1769,7 +1884,7 @@ void i915_gem_reset(struct drm_device *dev)
/**
* This function clears the request list as sequence numbers are passed.
*/
-void
+static void
i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
{
uint32_t seqno;
@@ -1797,12 +1912,6 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
break;
trace_i915_gem_request_retire(ring, request->seqno);
- /* We know the GPU must have read the request to have
- * sent us the seqno + interrupt, so use the position
- * of tail of the request to update the last known position
- * of the GPU head.
- */
- ring->last_retired_head = request->tail;
list_del(&request->list);
i915_gem_request_remove_from_client(request);
@@ -1815,7 +1924,7 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
while (!list_empty(&ring->active_list)) {
struct drm_i915_gem_object *obj;
- obj = list_first_entry(&ring->active_list,
+ obj= list_first_entry(&ring->active_list,
struct drm_i915_gem_object,
ring_list);
@@ -1915,8 +2024,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
*/
int
i915_wait_request(struct intel_ring_buffer *ring,
- uint32_t seqno,
- bool do_retire)
+ uint32_t seqno)
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
u32 ier;
@@ -1979,9 +2087,9 @@ i915_wait_request(struct intel_ring_buffer *ring,
|| atomic_read(&dev_priv->mm.wedged));
ring->irq_put(ring);
- } else if (wait_for_atomic(i915_seqno_passed(ring->get_seqno(ring),
- seqno) ||
- atomic_read(&dev_priv->mm.wedged), 3000))
+ } else if (wait_for(i915_seqno_passed(ring->get_seqno(ring),
+ seqno) ||
+ atomic_read(&dev_priv->mm.wedged), 3000))
ret = -EBUSY;
ring->waiting_seqno = 0;
@@ -1990,12 +2098,17 @@ i915_wait_request(struct intel_ring_buffer *ring,
if (atomic_read(&dev_priv->mm.wedged))
ret = -EAGAIN;
+ if (ret && ret != -ERESTARTSYS)
+ DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n",
+ __func__, ret, seqno, ring->get_seqno(ring),
+ dev_priv->next_seqno);
+
/* Directly dispatch request retiring. While we have the work queue
* to handle this, the waiter on a request often wants an associated
* buffer to have made it to the inactive list, and we would need
* a separate wait queue to handle that.
*/
- if (ret == 0 && do_retire)
+ if (ret == 0)
i915_gem_retire_requests_ring(ring);
return ret;
@@ -2019,8 +2132,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
* it.
*/
if (obj->active) {
- ret = i915_wait_request(obj->ring, obj->last_rendering_seqno,
- true);
+ ret = i915_wait_request(obj->ring, obj->last_rendering_seqno);
if (ret)
return ret;
}
@@ -2028,37 +2140,12 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
return 0;
}
-static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
-{
- u32 old_write_domain, old_read_domains;
-
- /* Act a barrier for all accesses through the GTT */
- mb();
-
- /* Force a pagefault for domain tracking on next user access */
- i915_gem_release_mmap(obj);
-
- if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
- return;
-
- old_read_domains = obj->base.read_domains;
- old_write_domain = obj->base.write_domain;
-
- obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
- obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
-
- trace_i915_gem_object_change_domain(obj,
- old_read_domains,
- old_write_domain);
-}
-
/**
* Unbinds an object from the GTT aperture.
*/
int
i915_gem_object_unbind(struct drm_i915_gem_object *obj)
{
- drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
int ret = 0;
if (obj->gtt_space == NULL)
@@ -2069,28 +2156,23 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
return -EINVAL;
}
- ret = i915_gem_object_finish_gpu(obj);
- if (ret == -ERESTARTSYS)
- return ret;
- /* Continue on if we fail due to EIO, the GPU is hung so we
- * should be safe and we need to cleanup or else we might
- * cause memory corruption through use-after-free.
- */
-
- i915_gem_object_finish_gtt(obj);
+ /* blow away mappings if mapped through GTT */
+ i915_gem_release_mmap(obj);
/* Move the object to the CPU domain to ensure that
* any possible CPU writes while it's not in the GTT
- * are flushed when we go to remap it.
+ * are flushed when we go to remap it. This will
+ * also ensure that all pending GPU writes are finished
+ * before we unbind.
*/
- if (ret == 0)
- ret = i915_gem_object_set_to_cpu_domain(obj, 1);
+ ret = i915_gem_object_set_to_cpu_domain(obj, 1);
if (ret == -ERESTARTSYS)
return ret;
+ /* Continue on if we fail due to EIO, the GPU is hung so we
+ * should be safe and we need to cleanup or else we might
+ * cause memory corruption through use-after-free.
+ */
if (ret) {
- /* In the event of a disaster, abandon all caches and
- * hope for the best.
- */
i915_gem_clflush_object(obj);
obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
}
@@ -2103,11 +2185,6 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
trace_i915_gem_object_unbind(obj);
i915_gem_gtt_unbind_object(obj);
- if (obj->has_aliasing_ppgtt_mapping) {
- i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
- obj->has_aliasing_ppgtt_mapping = 0;
- }
-
i915_gem_object_put_pages_gtt(obj);
list_del_init(&obj->gtt_list);
@@ -2147,7 +2224,7 @@ i915_gem_flush_ring(struct intel_ring_buffer *ring,
return 0;
}
-static int i915_ring_idle(struct intel_ring_buffer *ring, bool do_retire)
+static int i915_ring_idle(struct intel_ring_buffer *ring)
{
int ret;
@@ -2161,18 +2238,24 @@ static int i915_ring_idle(struct intel_ring_buffer *ring, bool do_retire)
return ret;
}
- return i915_wait_request(ring, i915_gem_next_request_seqno(ring),
- do_retire);
+ return i915_wait_request(ring, i915_gem_next_request_seqno(ring));
}
-int i915_gpu_idle(struct drm_device *dev, bool do_retire)
+int
+i915_gpu_idle(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
+ bool lists_empty;
int ret, i;
+ lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
+ list_empty(&dev_priv->mm.active_list));
+ if (lists_empty)
+ return 0;
+
/* Flush everything onto the inactive list. */
for (i = 0; i < I915_NUM_RINGS; i++) {
- ret = i915_ring_idle(&dev_priv->ring[i], do_retire);
+ ret = i915_ring_idle(&dev_priv->ring[i]);
if (ret)
return ret;
}
@@ -2375,8 +2458,7 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
if (!ring_passed_seqno(obj->last_fenced_ring,
obj->last_fenced_seqno)) {
ret = i915_wait_request(obj->last_fenced_ring,
- obj->last_fenced_seqno,
- true);
+ obj->last_fenced_seqno);
if (ret)
return ret;
}
@@ -2408,8 +2490,6 @@ i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
if (obj->fence_reg != I915_FENCE_REG_NONE) {
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
-
- WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count);
i915_gem_clear_fence_reg(obj->base.dev,
&dev_priv->fence_regs[obj->fence_reg]);
@@ -2434,7 +2514,7 @@ i915_find_fence_reg(struct drm_device *dev,
if (!reg->obj)
return reg;
- if (!reg->pin_count)
+ if (!reg->obj->pin_count)
avail = reg;
}
@@ -2444,7 +2524,7 @@ i915_find_fence_reg(struct drm_device *dev,
/* None available, try to steal one or wait for a user to finish */
avail = first = NULL;
list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
- if (reg->pin_count)
+ if (reg->obj->pin_count)
continue;
if (first == NULL)
@@ -2519,8 +2599,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
if (!ring_passed_seqno(obj->last_fenced_ring,
reg->setup_seqno)) {
ret = i915_wait_request(obj->last_fenced_ring,
- reg->setup_seqno,
- true);
+ reg->setup_seqno);
if (ret)
return ret;
}
@@ -2539,7 +2618,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
reg = i915_find_fence_reg(dev, pipelined);
if (reg == NULL)
- return -EDEADLK;
+ return -ENOSPC;
ret = i915_gem_object_flush_fence(obj, pipelined);
if (ret)
@@ -2639,7 +2718,6 @@ i915_gem_clear_fence_reg(struct drm_device *dev,
list_del_init(&reg->lru_list);
reg->obj = NULL;
reg->setup_seqno = 0;
- reg->pin_count = 0;
}
/**
@@ -2777,7 +2855,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
fenceable =
obj->gtt_space->size == fence_size &&
- (obj->gtt_space->start & (fence_alignment - 1)) == 0;
+ (obj->gtt_space->start & (fence_alignment -1)) == 0;
mappable =
obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
@@ -2923,144 +3001,51 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
return 0;
}
-int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
- enum i915_cache_level cache_level)
-{
- struct drm_device *dev = obj->base.dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- int ret;
-
- if (obj->cache_level == cache_level)
- return 0;
-
- if (obj->pin_count) {
- DRM_DEBUG("can not change the cache level of pinned objects\n");
- return -EBUSY;
- }
-
- if (obj->gtt_space) {
- ret = i915_gem_object_finish_gpu(obj);
- if (ret)
- return ret;
-
- i915_gem_object_finish_gtt(obj);
-
- /* Before SandyBridge, you could not use tiling or fence
- * registers with snooped memory, so relinquish any fences
- * currently pointing to our region in the aperture.
- */
- if (INTEL_INFO(obj->base.dev)->gen < 6) {
- ret = i915_gem_object_put_fence(obj);
- if (ret)
- return ret;
- }
-
- i915_gem_gtt_rebind_object(obj, cache_level);
- if (obj->has_aliasing_ppgtt_mapping)
- i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
- obj, cache_level);
- }
-
- if (cache_level == I915_CACHE_NONE) {
- u32 old_read_domains, old_write_domain;
-
- /* If we're coming from LLC cached, then we haven't
- * actually been tracking whether the data is in the
- * CPU cache or not, since we only allow one bit set
- * in obj->write_domain and have been skipping the clflushes.
- * Just set it to the CPU cache for now.
- */
- WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
- WARN_ON(obj->base.read_domains & ~I915_GEM_DOMAIN_CPU);
-
- old_read_domains = obj->base.read_domains;
- old_write_domain = obj->base.write_domain;
-
- obj->base.read_domains = I915_GEM_DOMAIN_CPU;
- obj->base.write_domain = I915_GEM_DOMAIN_CPU;
-
- trace_i915_gem_object_change_domain(obj,
- old_read_domains,
- old_write_domain);
- }
-
- obj->cache_level = cache_level;
- return 0;
-}
-
/*
- * Prepare buffer for display plane (scanout, cursors, etc).
- * Can be called from an uninterruptible phase (modesetting) and allows
- * any flushes to be pipelined (for pageflips).
- *
- * For the display plane, we want to be in the GTT but out of any write
- * domains. So in many ways this looks like set_to_gtt_domain() apart from the
- * ability to pipeline the waits, pinning and any additional subtleties
- * that may differentiate the display plane from ordinary buffers.
+ * Prepare buffer for display plane. Use uninterruptible for possible flush
+ * wait, as in modesetting process we're not supposed to be interrupted.
*/
int
-i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
- u32 alignment,
+i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *pipelined)
{
- u32 old_read_domains, old_write_domain;
+ uint32_t old_read_domains;
int ret;
+ /* Not valid to be called on unbound objects. */
+ if (obj->gtt_space == NULL)
+ return -EINVAL;
+
ret = i915_gem_object_flush_gpu_write_domain(obj);
if (ret)
return ret;
+
+ /* Currently, we are always called from an non-interruptible context. */
if (pipelined != obj->ring) {
ret = i915_gem_object_wait_rendering(obj);
- if (ret == -ERESTARTSYS)
+ if (ret)
return ret;
}
- /* The display engine is not coherent with the LLC cache on gen6. As
- * a result, we make sure that the pinning that is about to occur is
- * done with uncached PTEs. This is lowest common denominator for all
- * chipsets.
- *
- * However for gen6+, we could do better by using the GFDT bit instead
- * of uncaching, which would allow us to flush all the LLC-cached data
- * with that bit in the PTE to main memory with just one PIPE_CONTROL.
- */
- ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
- if (ret)
- return ret;
-
- /* As the user may map the buffer once pinned in the display plane
- * (e.g. libkms for the bootup splash), we have to ensure that we
- * always use map_and_fenceable for all scanout buffers.
- */
- ret = i915_gem_object_pin(obj, alignment, true);
- if (ret)
- return ret;
-
i915_gem_object_flush_cpu_write_domain(obj);
- old_write_domain = obj->base.write_domain;
old_read_domains = obj->base.read_domains;
-
- /* It should now be out of any other write domains, and we can update
- * the domain values for our changes.
- */
- BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
trace_i915_gem_object_change_domain(obj,
old_read_domains,
- old_write_domain);
+ obj->base.write_domain);
return 0;
}
int
-i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
+i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj)
{
int ret;
- if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
+ if (!obj->active)
return 0;
if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
@@ -3069,13 +3054,7 @@ i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
return ret;
}
- ret = i915_gem_object_wait_rendering(obj);
- if (ret)
- return ret;
-
- /* Ensure that we invalidate the GPU's caches and TLBs. */
- obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
- return 0;
+ return i915_gem_object_wait_rendering(obj);
}
/**
@@ -3297,10 +3276,6 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
ret = -EIO;
- } else if (wait_for_atomic(i915_seqno_passed(ring->get_seqno(ring),
- seqno) ||
- atomic_read(&dev_priv->mm.wedged), 3000)) {
- ret = -EBUSY;
}
}
@@ -3504,11 +3479,9 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
* so emit a request to do so.
*/
request = kzalloc(sizeof(*request), GFP_KERNEL);
- if (request) {
- ret = i915_add_request(obj->ring, NULL, request);
- if (ret)
- kfree(request);
- } else
+ if (request)
+ ret = i915_add_request(obj->ring, NULL,request);
+ else
ret = -ENOMEM;
}
@@ -3532,7 +3505,7 @@ int
i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- return i915_gem_ring_throttle(dev, file_priv);
+ return i915_gem_ring_throttle(dev, file_priv);
}
int
@@ -3607,23 +3580,7 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
- if (HAS_LLC(dev)) {
- /* On some devices, we can have the GPU use the LLC (the CPU
- * cache) for about a 10% performance improvement
- * compared to uncached. Graphics requests other than
- * display scanout are coherent with the CPU in
- * accessing this cache. This means in this mode we
- * don't need to clflush on the CPU side, and on the
- * GPU side we only need to flush internal caches to
- * get data visible to the CPU.
- *
- * However, we maintain the display planes as UC, and so
- * need to rebind when first used as such.
- */
- obj->cache_level = I915_CACHE_LLC;
- } else
- obj->cache_level = I915_CACHE_NONE;
-
+ obj->cache_level = I915_CACHE_NONE;
obj->base.driver_private = NULL;
obj->fence_reg = I915_FENCE_REG_NONE;
INIT_LIST_HEAD(&obj->mm_list);
@@ -3661,7 +3618,7 @@ static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj)
trace_i915_gem_object_destroy(obj);
if (obj->base.map_list.map)
- drm_gem_free_mmap_offset(&obj->base);
+ i915_gem_free_mmap_offset(obj);
drm_gem_object_release(&obj->base);
i915_gem_info_remove_obj(dev_priv, obj->base.size);
@@ -3698,7 +3655,7 @@ i915_gem_idle(struct drm_device *dev)
return 0;
}
- ret = i915_gpu_idle(dev, true);
+ ret = i915_gpu_idle(dev);
if (ret) {
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -3733,71 +3690,12 @@ i915_gem_idle(struct drm_device *dev)
return 0;
}
-void i915_gem_init_swizzling(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
-
- if (INTEL_INFO(dev)->gen < 5 ||
- dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
- return;
-
- I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
- DISP_TILE_SURFACE_SWIZZLING);
-
- if (IS_GEN5(dev))
- return;
-
- I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
- if (IS_GEN6(dev))
- I915_WRITE(ARB_MODE, ARB_MODE_ENABLE(ARB_MODE_SWIZZLE_SNB));
- else
- I915_WRITE(ARB_MODE, ARB_MODE_ENABLE(ARB_MODE_SWIZZLE_IVB));
-}
-
-void i915_gem_init_ppgtt(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- uint32_t pd_offset;
- struct intel_ring_buffer *ring;
- int i;
-
- if (!dev_priv->mm.aliasing_ppgtt)
- return;
-
- pd_offset = dev_priv->mm.aliasing_ppgtt->pd_offset;
- pd_offset /= 64; /* in cachelines, */
- pd_offset <<= 16;
-
- if (INTEL_INFO(dev)->gen == 6) {
- uint32_t ecochk = I915_READ(GAM_ECOCHK);
- I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
- ECOCHK_PPGTT_CACHE64B);
- I915_WRITE(GFX_MODE, GFX_MODE_ENABLE(GFX_PPGTT_ENABLE));
- } else if (INTEL_INFO(dev)->gen >= 7) {
- I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B);
- /* GFX_MODE is per-ring on gen7+ */
- }
-
- for (i = 0; i < I915_NUM_RINGS; i++) {
- ring = &dev_priv->ring[i];
-
- if (INTEL_INFO(dev)->gen >= 7)
- I915_WRITE(RING_MODE_GEN7(ring),
- GFX_MODE_ENABLE(GFX_PPGTT_ENABLE));
-
- I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
- I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
- }
-}
-
int
-i915_gem_init_hw(struct drm_device *dev)
+i915_gem_init_ringbuffer(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
- i915_gem_init_swizzling(dev);
-
ret = intel_init_render_ring_buffer(dev);
if (ret)
return ret;
@@ -3816,8 +3714,6 @@ i915_gem_init_hw(struct drm_device *dev)
dev_priv->next_seqno = 1;
- i915_gem_init_ppgtt(dev);
-
return 0;
cleanup_bsd_ring:
@@ -3855,7 +3751,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
mutex_lock(&dev->struct_mutex);
dev_priv->mm.suspended = 0;
- ret = i915_gem_init_hw(dev);
+ ret = i915_gem_init_ringbuffer(dev);
if (ret != 0) {
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -3932,7 +3828,7 @@ i915_gem_load(struct drm_device *dev)
INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
for (i = 0; i < I915_NUM_RINGS; i++)
init_ring_lists(&dev_priv->ring[i]);
- for (i = 0; i < I915_MAX_NUM_FENCES; i++)
+ for (i = 0; i < 16; i++)
INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
i915_gem_retire_work_handler);
@@ -4250,7 +4146,7 @@ rescan:
* This has a dramatic impact to reduce the number of
* OOM-killer events whilst running the GPU aggressively.
*/
- if (i915_gpu_idle(dev, true) == 0)
+ if (i915_gpu_idle(dev) == 0)
goto rescan;
}
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
index cc93cac..8da1899 100644
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -72,7 +72,7 @@ i915_verify_lists(struct drm_device *dev)
break;
} else if (!obj->active ||
(obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0 ||
- list_empty(&obj->gpu_write_list)) {
+ list_empty(&obj->gpu_write_list)){
DRM_ERROR("invalid flushing %p (a %d w %x gwl %d)\n",
obj,
obj->active,
@@ -157,7 +157,7 @@ i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle)
for (page = 0; page < obj->size / PAGE_SIZE; page++) {
int i;
- backing_map = kmap_atomic(obj->pages[page]);
+ backing_map = kmap_atomic(obj->pages[page], KM_USER0);
if (backing_map == NULL) {
DRM_ERROR("failed to map backing page\n");
@@ -181,13 +181,13 @@ i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle)
}
}
}
- kunmap_atomic(backing_map);
+ kunmap_atomic(backing_map, KM_USER0);
backing_map = NULL;
}
out:
if (backing_map != NULL)
- kunmap_atomic(backing_map);
+ kunmap_atomic(backing_map, KM_USER0);
iounmap(gtt_mapping);
/* give syslog time to catch up */
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 21a8271..da05a26 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -36,6 +36,7 @@ static bool
mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
{
list_add(&obj->exec_list, unwind);
+ drm_gem_object_reference(&obj->base);
return drm_mm_scan_add_block(obj->gtt_space);
}
@@ -48,6 +49,21 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
struct drm_i915_gem_object *obj;
int ret = 0;
+ i915_gem_retire_requests(dev);
+
+ /* Re-check for free space after retiring requests */
+ if (mappable) {
+ if (drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
+ min_size, alignment, 0,
+ dev_priv->mm.gtt_mappable_end,
+ 0))
+ return 0;
+ } else {
+ if (drm_mm_search_free(&dev_priv->mm.gtt_space,
+ min_size, alignment, 0))
+ return 0;
+ }
+
trace_i915_gem_evict(dev, min_size, alignment, mappable);
/*
@@ -106,7 +122,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
goto found;
}
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
- if (!obj->base.write_domain || obj->pin_count)
+ if (! obj->base.write_domain || obj->pin_count)
continue;
if (mark_free(obj, &unwind_list))
@@ -123,6 +139,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
BUG_ON(ret);
list_del_init(&obj->exec_list);
+ drm_gem_object_unreference(&obj->base);
}
/* We expect the caller to unpin, evict all and try again, or give up.
@@ -141,10 +158,10 @@ found:
exec_list);
if (drm_mm_scan_remove_block(obj->gtt_space)) {
list_move(&obj->exec_list, &eviction_list);
- drm_gem_object_reference(&obj->base);
continue;
}
list_del_init(&obj->exec_list);
+ drm_gem_object_unreference(&obj->base);
}
/* Unbinding will emit any required flushes */
@@ -178,7 +195,7 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
trace_i915_gem_evict_everything(dev, purgeable_only);
/* Flush everything (on to the inactive lists) and evict */
- ret = i915_gpu_idle(dev, true);
+ ret = i915_gpu_idle(dev);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 97b47c5..1ca53ff 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -32,7 +32,6 @@
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_drv.h"
-#include <linux/dma_remapping.h>
struct change_domains {
uint32_t invalidate_domains;
@@ -203,9 +202,9 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
cd->invalidate_domains |= invalidate_domains;
cd->flush_domains |= flush_domains;
if (flush_domains & I915_GEM_GPU_DOMAINS)
- cd->flush_rings |= intel_ring_flag(obj->ring);
+ cd->flush_rings |= obj->ring->id;
if (invalidate_domains & I915_GEM_GPU_DOMAINS)
- cd->flush_rings |= intel_ring_flag(ring);
+ cd->flush_rings |= ring->id;
}
struct eb_objects {
@@ -287,14 +286,14 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
* exec_object list, so it should have a GTT space bound by now.
*/
if (unlikely(target_offset == 0)) {
- DRM_DEBUG("No GTT space found for object %d\n",
+ DRM_ERROR("No GTT space found for object %d\n",
reloc->target_handle);
return ret;
}
/* Validate that the target is in a valid r/w GPU domain */
if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
- DRM_DEBUG("reloc with multiple write domains: "
+ DRM_ERROR("reloc with multiple write domains: "
"obj %p target %d offset %d "
"read %08x write %08x",
obj, reloc->target_handle,
@@ -303,9 +302,8 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
reloc->write_domain);
return ret;
}
- if (unlikely((reloc->write_domain | reloc->read_domains)
- & ~I915_GEM_GPU_DOMAINS)) {
- DRM_DEBUG("reloc with read/write non-GPU domains: "
+ if (unlikely((reloc->write_domain | reloc->read_domains) & I915_GEM_DOMAIN_CPU)) {
+ DRM_ERROR("reloc with read/write CPU domains: "
"obj %p target %d offset %d "
"read %08x write %08x",
obj, reloc->target_handle,
@@ -316,7 +314,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
}
if (unlikely(reloc->write_domain && target_obj->pending_write_domain &&
reloc->write_domain != target_obj->pending_write_domain)) {
- DRM_DEBUG("Write domain conflict: "
+ DRM_ERROR("Write domain conflict: "
"obj %p target %d offset %d "
"new %08x old %08x\n",
obj, reloc->target_handle,
@@ -337,7 +335,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
/* Check that the relocation address is valid... */
if (unlikely(reloc->offset > obj->base.size - 4)) {
- DRM_DEBUG("Relocation beyond object bounds: "
+ DRM_ERROR("Relocation beyond object bounds: "
"obj %p target %d offset %d size %d.\n",
obj, reloc->target_handle,
(int) reloc->offset,
@@ -345,7 +343,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
return ret;
}
if (unlikely(reloc->offset & 3)) {
- DRM_DEBUG("Relocation not 4-byte aligned: "
+ DRM_ERROR("Relocation not 4-byte aligned: "
"obj %p target %d offset %d.\n",
obj, reloc->target_handle,
(int) reloc->offset);
@@ -462,60 +460,11 @@ i915_gem_execbuffer_relocate(struct drm_device *dev,
return ret;
}
-#define __EXEC_OBJECT_HAS_FENCE (1<<31)
-
-static int
-pin_and_fence_object(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *ring)
-{
- struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
- bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
- bool need_fence, need_mappable;
- int ret;
-
- need_fence =
- has_fenced_gpu_access &&
- entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
- obj->tiling_mode != I915_TILING_NONE;
- need_mappable =
- entry->relocation_count ? true : need_fence;
-
- ret = i915_gem_object_pin(obj, entry->alignment, need_mappable);
- if (ret)
- return ret;
-
- if (has_fenced_gpu_access) {
- if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
- if (obj->tiling_mode) {
- ret = i915_gem_object_get_fence(obj, ring);
- if (ret)
- goto err_unpin;
-
- entry->flags |= __EXEC_OBJECT_HAS_FENCE;
- i915_gem_object_pin_fence(obj);
- } else {
- ret = i915_gem_object_put_fence(obj);
- if (ret)
- goto err_unpin;
- }
- }
- obj->pending_fenced_gpu_access = need_fence;
- }
-
- entry->offset = obj->gtt_offset;
- return 0;
-
-err_unpin:
- i915_gem_object_unpin(obj);
- return ret;
-}
-
static int
i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
struct drm_file *file,
struct list_head *objects)
{
- drm_i915_private_t *dev_priv = ring->dev->dev_private;
struct drm_i915_gem_object *obj;
int ret, retry;
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
@@ -568,7 +517,6 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
list_for_each_entry(obj, objects, exec_list) {
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
bool need_fence, need_mappable;
-
if (!obj->gtt_space)
continue;
@@ -583,55 +531,58 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
(need_mappable && !obj->map_and_fenceable))
ret = i915_gem_object_unbind(obj);
else
- ret = pin_and_fence_object(obj, ring);
+ ret = i915_gem_object_pin(obj,
+ entry->alignment,
+ need_mappable);
if (ret)
goto err;
+
+ entry++;
}
/* Bind fresh objects */
list_for_each_entry(obj, objects, exec_list) {
- if (obj->gtt_space)
- continue;
-
- ret = pin_and_fence_object(obj, ring);
- if (ret) {
- int ret_ignore;
-
- /* This can potentially raise a harmless
- * -EINVAL if we failed to bind in the above
- * call. It cannot raise -EINTR since we know
- * that the bo is freshly bound and so will
- * not need to be flushed or waited upon.
- */
- ret_ignore = i915_gem_object_unbind(obj);
- (void)ret_ignore;
- WARN_ON(obj->gtt_space);
- break;
- }
- }
+ struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
+ bool need_fence;
- /* Decrement pin count for bound objects */
- list_for_each_entry(obj, objects, exec_list) {
- struct drm_i915_gem_exec_object2 *entry;
+ need_fence =
+ has_fenced_gpu_access &&
+ entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
+ obj->tiling_mode != I915_TILING_NONE;
- if (!obj->gtt_space)
- continue;
+ if (!obj->gtt_space) {
+ bool need_mappable =
+ entry->relocation_count ? true : need_fence;
- entry = obj->exec_entry;
- if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
- i915_gem_object_unpin_fence(obj);
- entry->flags &= ~__EXEC_OBJECT_HAS_FENCE;
+ ret = i915_gem_object_pin(obj,
+ entry->alignment,
+ need_mappable);
+ if (ret)
+ break;
}
- i915_gem_object_unpin(obj);
+ if (has_fenced_gpu_access) {
+ if (need_fence) {
+ ret = i915_gem_object_get_fence(obj, ring);
+ if (ret)
+ break;
+ } else if (entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
+ obj->tiling_mode == I915_TILING_NONE) {
+ /* XXX pipelined! */
+ ret = i915_gem_object_put_fence(obj);
+ if (ret)
+ break;
+ }
+ obj->pending_fenced_gpu_access = need_fence;
+ }
- /* ... and ensure ppgtt mapping exist if needed. */
- if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
- i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
- obj, obj->cache_level);
+ entry->offset = obj->gtt_offset;
+ }
- obj->has_aliasing_ppgtt_mapping = 1;
- }
+ /* Decrement pin count for bound objects */
+ list_for_each_entry(obj, objects, exec_list) {
+ if (obj->gtt_space)
+ i915_gem_object_unpin(obj);
}
if (ret != -ENOSPC || retry > 1)
@@ -648,19 +599,16 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
} while (1);
err:
- list_for_each_entry_continue_reverse(obj, objects, exec_list) {
- struct drm_i915_gem_exec_object2 *entry;
-
- if (!obj->gtt_space)
- continue;
-
- entry = obj->exec_entry;
- if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
- i915_gem_object_unpin_fence(obj);
- entry->flags &= ~__EXEC_OBJECT_HAS_FENCE;
- }
+ obj = list_entry(obj->exec_list.prev,
+ struct drm_i915_gem_object,
+ exec_list);
+ while (objects != &obj->exec_list) {
+ if (obj->gtt_space)
+ i915_gem_object_unpin(obj);
- i915_gem_object_unpin(obj);
+ obj = list_entry(obj->exec_list.prev,
+ struct drm_i915_gem_object,
+ exec_list);
}
return ret;
@@ -754,7 +702,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
obj = to_intel_bo(drm_gem_object_lookup(dev, file,
exec[i].handle));
if (&obj->base == NULL) {
- DRM_DEBUG("Invalid object handle %d at index %d\n",
+ DRM_ERROR("Invalid object handle %d at index %d\n",
exec[i].handle, i);
ret = -ENOENT;
goto err;
@@ -819,22 +767,6 @@ i915_gem_execbuffer_flush(struct drm_device *dev,
return 0;
}
-static bool
-intel_enable_semaphores(struct drm_device *dev)
-{
- if (INTEL_INFO(dev)->gen < 6)
- return 0;
-
- if (i915_semaphores >= 0)
- return i915_semaphores;
-
- /* Disable semaphores on SNB */
- if (INTEL_INFO(dev)->gen == 6)
- return 0;
-
- return 1;
-}
-
static int
i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *to)
@@ -847,7 +779,7 @@ i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
return 0;
/* XXX gpu semaphores are implicated in various hard hangs on SNB */
- if (!intel_enable_semaphores(obj->base.dev))
+ if (INTEL_INFO(obj->base.dev)->gen < 6 || !i915_semaphores)
return i915_gem_object_wait_rendering(obj);
idx = intel_ring_sync_index(from, to);
@@ -873,8 +805,7 @@ i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
}
from->sync_seqno[idx] = seqno;
-
- return to->sync_to(to, from, seqno - 1);
+ return intel_ring_sync(to, from, seqno - 1);
}
static int
@@ -1048,31 +979,6 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev,
}
static int
-i915_reset_gen7_sol_offsets(struct drm_device *dev,
- struct intel_ring_buffer *ring)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- int ret, i;
-
- if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS])
- return 0;
-
- ret = intel_ring_begin(ring, 4 * 3);
- if (ret)
- return ret;
-
- for (i = 0; i < 4; i++) {
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i));
- intel_ring_emit(ring, 0);
- }
-
- intel_ring_advance(ring);
-
- return 0;
-}
-
-static int
i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file,
struct drm_i915_gem_execbuffer2 *args,
@@ -1086,11 +992,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct intel_ring_buffer *ring;
u32 exec_start, exec_len;
u32 seqno;
- u32 mask;
int ret, mode, i;
if (!i915_gem_check_execbuffer(args)) {
- DRM_DEBUG("execbuf with invalid offset/length\n");
+ DRM_ERROR("execbuf with invalid offset/length\n");
return -EINVAL;
}
@@ -1105,26 +1010,25 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
break;
case I915_EXEC_BSD:
if (!HAS_BSD(dev)) {
- DRM_DEBUG("execbuf with invalid ring (BSD)\n");
+ DRM_ERROR("execbuf with invalid ring (BSD)\n");
return -EINVAL;
}
ring = &dev_priv->ring[VCS];
break;
case I915_EXEC_BLT:
if (!HAS_BLT(dev)) {
- DRM_DEBUG("execbuf with invalid ring (BLT)\n");
+ DRM_ERROR("execbuf with invalid ring (BLT)\n");
return -EINVAL;
}
ring = &dev_priv->ring[BCS];
break;
default:
- DRM_DEBUG("execbuf with unknown ring: %d\n",
+ DRM_ERROR("execbuf with unknown ring: %d\n",
(int)(args->flags & I915_EXEC_RING_MASK));
return -EINVAL;
}
mode = args->flags & I915_EXEC_CONSTANTS_MASK;
- mask = I915_EXEC_CONSTANTS_MASK;
switch (mode) {
case I915_EXEC_CONSTANTS_REL_GENERAL:
case I915_EXEC_CONSTANTS_ABSOLUTE:
@@ -1138,24 +1042,33 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
mode == I915_EXEC_CONSTANTS_REL_SURFACE)
return -EINVAL;
- /* The HW changed the meaning on this bit on gen6 */
- if (INTEL_INFO(dev)->gen >= 6)
- mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit(ring, INSTPM);
+ intel_ring_emit(ring,
+ I915_EXEC_CONSTANTS_MASK << 16 | mode);
+ intel_ring_advance(ring);
+
+ dev_priv->relative_constants_mode = mode;
}
break;
default:
- DRM_DEBUG("execbuf with unknown constants: %d\n", mode);
+ DRM_ERROR("execbuf with unknown constants: %d\n", mode);
return -EINVAL;
}
if (args->buffer_count < 1) {
- DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
+ DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
return -EINVAL;
}
if (args->num_cliprects != 0) {
if (ring != &dev_priv->ring[RCS]) {
- DRM_DEBUG("clip rectangles are only valid with the render ring\n");
+ DRM_ERROR("clip rectangles are only valid with the render ring\n");
return -EINVAL;
}
@@ -1205,7 +1118,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
obj = to_intel_bo(drm_gem_object_lookup(dev, file,
exec[i].handle));
if (&obj->base == NULL) {
- DRM_DEBUG("Invalid object handle %d at index %d\n",
+ DRM_ERROR("Invalid object handle %d at index %d\n",
exec[i].handle, i);
/* prevent error path from reading uninitialized data */
ret = -ENOENT;
@@ -1213,7 +1126,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
}
if (!list_empty(&obj->exec_list)) {
- DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
+ DRM_ERROR("Object %p [handle %d, index %d] appears more than once in object list\n",
obj, exec[i].handle, i);
ret = -EINVAL;
goto err;
@@ -1251,7 +1164,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
/* Set the pending read domains for the batch buffer to COMMAND */
if (batch_obj->base.pending_write_domain) {
- DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
+ DRM_ERROR("Attempting to use self-modifying batch buffer\n");
ret = -EINVAL;
goto err;
}
@@ -1268,7 +1181,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
* so every billion or so execbuffers, we need to stall
* the GPU in order to reset the counters.
*/
- ret = i915_gpu_idle(dev, true);
+ ret = i915_gpu_idle(dev);
if (ret)
goto err;
@@ -1276,27 +1189,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
}
}
- if (ring == &dev_priv->ring[RCS] &&
- mode != dev_priv->relative_constants_mode) {
- ret = intel_ring_begin(ring, 4);
- if (ret)
- goto err;
-
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit(ring, INSTPM);
- intel_ring_emit(ring, mask << 16 | mode);
- intel_ring_advance(ring);
-
- dev_priv->relative_constants_mode = mode;
- }
-
- if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
- ret = i915_reset_gen7_sol_offsets(dev, ring);
- if (ret)
- goto err;
- }
-
trace_i915_gem_ring_dispatch(ring, seqno);
exec_start = batch_obj->gtt_offset + args->batch_start_offset;
@@ -1356,7 +1248,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
int ret, i;
if (args->buffer_count < 1) {
- DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
+ DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
return -EINVAL;
}
@@ -1364,7 +1256,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
if (exec_list == NULL || exec2_list == NULL) {
- DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
+ DRM_ERROR("Failed to allocate exec list for %d buffers\n",
args->buffer_count);
drm_free_large(exec_list);
drm_free_large(exec2_list);
@@ -1375,7 +1267,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
(uintptr_t) args->buffers_ptr,
sizeof(*exec_list) * args->buffer_count);
if (ret != 0) {
- DRM_DEBUG("copy %d exec entries failed %d\n",
+ DRM_ERROR("copy %d exec entries failed %d\n",
args->buffer_count, ret);
drm_free_large(exec_list);
drm_free_large(exec2_list);
@@ -1416,7 +1308,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
sizeof(*exec_list) * args->buffer_count);
if (ret) {
ret = -EFAULT;
- DRM_DEBUG("failed to copy %d exec entries "
+ DRM_ERROR("failed to copy %d exec entries "
"back to user (%d)\n",
args->buffer_count, ret);
}
@@ -1437,7 +1329,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
if (args->buffer_count < 1 ||
args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
- DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
+ DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
return -EINVAL;
}
@@ -1447,7 +1339,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
exec2_list = drm_malloc_ab(sizeof(*exec2_list),
args->buffer_count);
if (exec2_list == NULL) {
- DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
+ DRM_ERROR("Failed to allocate exec list for %d buffers\n",
args->buffer_count);
return -ENOMEM;
}
@@ -1456,7 +1348,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
(uintptr_t) args->buffers_ptr,
sizeof(*exec2_list) * args->buffer_count);
if (ret != 0) {
- DRM_DEBUG("copy %d exec entries failed %d\n",
+ DRM_ERROR("copy %d exec entries failed %d\n",
args->buffer_count, ret);
drm_free_large(exec2_list);
return -EFAULT;
@@ -1471,7 +1363,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
sizeof(*exec2_list) * args->buffer_count);
if (ret) {
ret = -EFAULT;
- DRM_DEBUG("failed to copy %d exec entries "
+ DRM_ERROR("failed to copy %d exec entries "
"back to user (%d)\n",
args->buffer_count, ret);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 2eacd78..e46b645 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -29,279 +29,6 @@
#include "i915_trace.h"
#include "intel_drv.h"
-/* PPGTT support for Sandybdrige/Gen6 and later */
-static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
- unsigned first_entry,
- unsigned num_entries)
-{
- uint32_t *pt_vaddr;
- uint32_t scratch_pte;
- unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
- unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
- unsigned last_pte, i;
-
- scratch_pte = GEN6_PTE_ADDR_ENCODE(ppgtt->scratch_page_dma_addr);
- scratch_pte |= GEN6_PTE_VALID | GEN6_PTE_CACHE_LLC;
-
- while (num_entries) {
- last_pte = first_pte + num_entries;
- if (last_pte > I915_PPGTT_PT_ENTRIES)
- last_pte = I915_PPGTT_PT_ENTRIES;
-
- pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
-
- for (i = first_pte; i < last_pte; i++)
- pt_vaddr[i] = scratch_pte;
-
- kunmap_atomic(pt_vaddr);
-
- num_entries -= last_pte - first_pte;
- first_pte = 0;
- act_pd++;
- }
-}
-
-int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct i915_hw_ppgtt *ppgtt;
- uint32_t pd_entry;
- unsigned first_pd_entry_in_global_pt;
- uint32_t __iomem *pd_addr;
- int i;
- int ret = -ENOMEM;
-
- /* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
- * entries. For aliasing ppgtt support we just steal them at the end for
- * now. */
- first_pd_entry_in_global_pt = 512*1024 - I915_PPGTT_PD_ENTRIES;
-
- ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
- if (!ppgtt)
- return ret;
-
- ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
- ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries,
- GFP_KERNEL);
- if (!ppgtt->pt_pages)
- goto err_ppgtt;
-
- for (i = 0; i < ppgtt->num_pd_entries; i++) {
- ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL);
- if (!ppgtt->pt_pages[i])
- goto err_pt_alloc;
- }
-
- if (dev_priv->mm.gtt->needs_dmar) {
- ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t)
- *ppgtt->num_pd_entries,
- GFP_KERNEL);
- if (!ppgtt->pt_dma_addr)
- goto err_pt_alloc;
- }
-
- pd_addr = dev_priv->mm.gtt->gtt + first_pd_entry_in_global_pt;
- for (i = 0; i < ppgtt->num_pd_entries; i++) {
- dma_addr_t pt_addr;
- if (dev_priv->mm.gtt->needs_dmar) {
- pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i],
- 0, 4096,
- PCI_DMA_BIDIRECTIONAL);
-
- if (pci_dma_mapping_error(dev->pdev,
- pt_addr)) {
- ret = -EIO;
- goto err_pd_pin;
-
- }
- ppgtt->pt_dma_addr[i] = pt_addr;
- } else
- pt_addr = page_to_phys(ppgtt->pt_pages[i]);
-
- pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
- pd_entry |= GEN6_PDE_VALID;
-
- writel(pd_entry, pd_addr + i);
- }
- readl(pd_addr);
-
- ppgtt->scratch_page_dma_addr = dev_priv->mm.gtt->scratch_page_dma;
-
- i915_ppgtt_clear_range(ppgtt, 0,
- ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
-
- ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(uint32_t);
-
- dev_priv->mm.aliasing_ppgtt = ppgtt;
-
- return 0;
-
-err_pd_pin:
- if (ppgtt->pt_dma_addr) {
- for (i--; i >= 0; i--)
- pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
- 4096, PCI_DMA_BIDIRECTIONAL);
- }
-err_pt_alloc:
- kfree(ppgtt->pt_dma_addr);
- for (i = 0; i < ppgtt->num_pd_entries; i++) {
- if (ppgtt->pt_pages[i])
- __free_page(ppgtt->pt_pages[i]);
- }
- kfree(ppgtt->pt_pages);
-err_ppgtt:
- kfree(ppgtt);
-
- return ret;
-}
-
-void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
- int i;
-
- if (!ppgtt)
- return;
-
- if (ppgtt->pt_dma_addr) {
- for (i = 0; i < ppgtt->num_pd_entries; i++)
- pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
- 4096, PCI_DMA_BIDIRECTIONAL);
- }
-
- kfree(ppgtt->pt_dma_addr);
- for (i = 0; i < ppgtt->num_pd_entries; i++)
- __free_page(ppgtt->pt_pages[i]);
- kfree(ppgtt->pt_pages);
- kfree(ppgtt);
-}
-
-static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
- struct scatterlist *sg_list,
- unsigned sg_len,
- unsigned first_entry,
- uint32_t pte_flags)
-{
- uint32_t *pt_vaddr, pte;
- unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
- unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
- unsigned i, j, m, segment_len;
- dma_addr_t page_addr;
- struct scatterlist *sg;
-
- /* init sg walking */
- sg = sg_list;
- i = 0;
- segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
- m = 0;
-
- while (i < sg_len) {
- pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
-
- for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) {
- page_addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
- pte = GEN6_PTE_ADDR_ENCODE(page_addr);
- pt_vaddr[j] = pte | pte_flags;
-
- /* grab the next page */
- m++;
- if (m == segment_len) {
- sg = sg_next(sg);
- i++;
- if (i == sg_len)
- break;
-
- segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
- m = 0;
- }
- }
-
- kunmap_atomic(pt_vaddr);
-
- first_pte = 0;
- act_pd++;
- }
-}
-
-static void i915_ppgtt_insert_pages(struct i915_hw_ppgtt *ppgtt,
- unsigned first_entry, unsigned num_entries,
- struct page **pages, uint32_t pte_flags)
-{
- uint32_t *pt_vaddr, pte;
- unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
- unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
- unsigned last_pte, i;
- dma_addr_t page_addr;
-
- while (num_entries) {
- last_pte = first_pte + num_entries;
- last_pte = min_t(unsigned, last_pte, I915_PPGTT_PT_ENTRIES);
-
- pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
-
- for (i = first_pte; i < last_pte; i++) {
- page_addr = page_to_phys(*pages);
- pte = GEN6_PTE_ADDR_ENCODE(page_addr);
- pt_vaddr[i] = pte | pte_flags;
-
- pages++;
- }
-
- kunmap_atomic(pt_vaddr);
-
- num_entries -= last_pte - first_pte;
- first_pte = 0;
- act_pd++;
- }
-}
-
-void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
- struct drm_i915_gem_object *obj,
- enum i915_cache_level cache_level)
-{
- struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t pte_flags = GEN6_PTE_VALID;
-
- switch (cache_level) {
- case I915_CACHE_LLC_MLC:
- pte_flags |= GEN6_PTE_CACHE_LLC_MLC;
- break;
- case I915_CACHE_LLC:
- pte_flags |= GEN6_PTE_CACHE_LLC;
- break;
- case I915_CACHE_NONE:
- pte_flags |= GEN6_PTE_UNCACHED;
- break;
- default:
- BUG();
- }
-
- if (dev_priv->mm.gtt->needs_dmar) {
- BUG_ON(!obj->sg_list);
-
- i915_ppgtt_insert_sg_entries(ppgtt,
- obj->sg_list,
- obj->num_sg,
- obj->gtt_space->start >> PAGE_SHIFT,
- pte_flags);
- } else
- i915_ppgtt_insert_pages(ppgtt,
- obj->gtt_space->start >> PAGE_SHIFT,
- obj->base.size >> PAGE_SHIFT,
- obj->pages,
- pte_flags);
-}
-
-void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
- struct drm_i915_gem_object *obj)
-{
- i915_ppgtt_clear_range(ppgtt,
- obj->gtt_space->start >> PAGE_SHIFT,
- obj->base.size >> PAGE_SHIFT);
-}
-
/* XXX kill agp_type! */
static unsigned int cache_level_to_agp_type(struct drm_device *dev,
enum i915_cache_level cache_level)
@@ -322,28 +49,6 @@ static unsigned int cache_level_to_agp_type(struct drm_device *dev,
}
}
-static bool do_idling(struct drm_i915_private *dev_priv)
-{
- bool ret = dev_priv->mm.interruptible;
-
- if (unlikely(dev_priv->mm.gtt->do_idle_maps)) {
- dev_priv->mm.interruptible = false;
- if (i915_gpu_idle(dev_priv->dev, false)) {
- DRM_ERROR("Couldn't idle GPU\n");
- /* Wait a bit, in hopes it avoids the hang */
- udelay(10);
- }
- }
-
- return ret;
-}
-
-static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
-{
- if (unlikely(dev_priv->mm.gtt->do_idle_maps))
- dev_priv->mm.interruptible = interruptible;
-}
-
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -354,8 +59,24 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
(dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
+ unsigned int agp_type =
+ cache_level_to_agp_type(dev, obj->cache_level);
+
i915_gem_clflush_object(obj);
- i915_gem_gtt_rebind_object(obj, obj->cache_level);
+
+ if (dev_priv->mm.gtt->needs_dmar) {
+ BUG_ON(!obj->sg_list);
+
+ intel_gtt_insert_sg_entries(obj->sg_list,
+ obj->num_sg,
+ obj->gtt_space->start >> PAGE_SHIFT,
+ agp_type);
+ } else
+ intel_gtt_insert_pages(obj->gtt_space->start
+ >> PAGE_SHIFT,
+ obj->base.size >> PAGE_SHIFT,
+ obj->pages,
+ agp_type);
}
intel_gtt_chipset_flush();
@@ -389,35 +110,8 @@ int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj)
return 0;
}
-void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj,
- enum i915_cache_level cache_level)
-{
- struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned int agp_type = cache_level_to_agp_type(dev, cache_level);
-
- if (dev_priv->mm.gtt->needs_dmar) {
- BUG_ON(!obj->sg_list);
-
- intel_gtt_insert_sg_entries(obj->sg_list,
- obj->num_sg,
- obj->gtt_space->start >> PAGE_SHIFT,
- agp_type);
- } else
- intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
- obj->base.size >> PAGE_SHIFT,
- obj->pages,
- agp_type);
-}
-
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- bool interruptible;
-
- interruptible = do_idling(dev_priv);
-
intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
obj->base.size >> PAGE_SHIFT);
@@ -425,6 +119,4 @@ void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
intel_gtt_unmap_memory(obj->sg_list, obj->num_sg);
obj->sg_list = NULL;
}
-
- undo_idling(dev_priv, interruptible);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 1a93066..99c4faa 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -92,25 +92,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
- if (INTEL_INFO(dev)->gen >= 6) {
- uint32_t dimm_c0, dimm_c1;
- dimm_c0 = I915_READ(MAD_DIMM_C0);
- dimm_c1 = I915_READ(MAD_DIMM_C1);
- dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
- dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
- /* Enable swizzling when the channels are populated with
- * identically sized dimms. We don't need to check the 3rd
- * channel because no cpu with gpu attached ships in that
- * configuration. Also, swizzling only makes sense for 2
- * channels anyway. */
- if (dimm_c0 == dimm_c1) {
- swizzle_x = I915_BIT_6_SWIZZLE_9_10;
- swizzle_y = I915_BIT_6_SWIZZLE_9;
- } else {
- swizzle_x = I915_BIT_6_SWIZZLE_NONE;
- swizzle_y = I915_BIT_6_SWIZZLE_NONE;
- }
- } else if (IS_GEN5(dev)) {
+ if (INTEL_INFO(dev)->gen >= 5) {
/* On Ironlake whatever DRAM config, GPU always do
* same swizzling setup.
*/
@@ -122,10 +104,10 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
*/
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
- } else if (IS_MOBILE(dev) || (IS_GEN3(dev) && !IS_G33(dev))) {
+ } else if (IS_MOBILE(dev)) {
uint32_t dcc;
- /* On 9xx chipsets, channel interleave by the CPU is
+ /* On mobile 9xx chipsets, channel interleave by the CPU is
* determined by DCC. For single-channel, neither the CPU
* nor the GPU do swizzling. For dual channel interleaved,
* the GPU's interleave is bit 9 and 10 for X tiled, and bit
@@ -458,9 +440,14 @@ i915_gem_swizzle_page(struct page *page)
void
i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
{
+ struct drm_device *dev = obj->base.dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
int page_count = obj->base.size >> PAGE_SHIFT;
int i;
+ if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17)
+ return;
+
if (obj->bit_17 == NULL)
return;
@@ -477,9 +464,14 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
void
i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
{
+ struct drm_device *dev = obj->base.dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
int page_count = obj->base.size >> PAGE_SHIFT;
int i;
+ if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17)
+ return;
+
if (obj->bit_17 == NULL) {
obj->bit_17 = kmalloc(BITS_TO_LONGS(page_count) *
sizeof(long), GFP_KERNEL);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index f57e5cf..d05f03c 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -364,12 +364,10 @@ static void notify_ring(struct drm_device *dev,
ring->irq_seqno = seqno;
wake_up_all(&ring->irq_queue);
- if (i915_enable_hangcheck) {
- dev_priv->hangcheck_count = 0;
- mod_timer(&dev_priv->hangcheck_timer,
- jiffies +
- msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
- }
+
+ dev_priv->hangcheck_count = 0;
+ mod_timer(&dev_priv->hangcheck_timer,
+ jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
}
static void gen6_pm_rps_work(struct work_struct *work)
@@ -383,7 +381,6 @@ static void gen6_pm_rps_work(struct work_struct *work)
pm_iir = dev_priv->pm_iir;
dev_priv->pm_iir = 0;
pm_imr = I915_READ(GEN6_PMIMR);
- I915_WRITE(GEN6_PMIMR, 0);
spin_unlock_irq(&dev_priv->rps_lock);
if (!pm_iir)
@@ -421,6 +418,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
* an *extremely* unlikely race with gen6_rps_enable() that is prevented
* by holding struct_mutex for the duration of the write.
*/
+ I915_WRITE(GEN6_PMIMR, pm_imr & ~pm_iir);
mutex_unlock(&dev_priv->dev->struct_mutex);
}
@@ -533,9 +531,8 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
unsigned long flags;
spin_lock_irqsave(&dev_priv->rps_lock, flags);
WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n");
+ I915_WRITE(GEN6_PMIMR, pm_iir);
dev_priv->pm_iir |= pm_iir;
- I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
- POSTING_READ(GEN6_PMIMR);
spin_unlock_irqrestore(&dev_priv->rps_lock, flags);
queue_work(dev_priv->wq, &dev_priv->rps_work);
}
@@ -647,9 +644,8 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
unsigned long flags;
spin_lock_irqsave(&dev_priv->rps_lock, flags);
WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n");
+ I915_WRITE(GEN6_PMIMR, pm_iir);
dev_priv->pm_iir |= pm_iir;
- I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
- POSTING_READ(GEN6_PMIMR);
spin_unlock_irqrestore(&dev_priv->rps_lock, flags);
queue_work(dev_priv->wq, &dev_priv->rps_work);
}
@@ -710,13 +706,14 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
page_count = src->base.size / PAGE_SIZE;
- dst = kmalloc(sizeof(*dst) + page_count * sizeof(u32 *), GFP_ATOMIC);
+ dst = kmalloc(sizeof(*dst) + page_count * sizeof (u32 *), GFP_ATOMIC);
if (dst == NULL)
return NULL;
reloc_offset = src->gtt_offset;
for (page = 0; page < page_count; page++) {
unsigned long flags;
+ void __iomem *s;
void *d;
d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
@@ -724,29 +721,10 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
goto unwind;
local_irq_save(flags);
- if (reloc_offset < dev_priv->mm.gtt_mappable_end) {
- void __iomem *s;
-
- /* Simply ignore tiling or any overlapping fence.
- * It's part of the error state, and this hopefully
- * captures what the GPU read.
- */
-
- s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
- reloc_offset);
- memcpy_fromio(d, s, PAGE_SIZE);
- io_mapping_unmap_atomic(s);
- } else {
- void *s;
-
- drm_clflush_pages(&src->pages[page], 1);
-
- s = kmap_atomic(src->pages[page]);
- memcpy(d, s, PAGE_SIZE);
- kunmap_atomic(s);
-
- drm_clflush_pages(&src->pages[page], 1);
- }
+ s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
+ reloc_offset);
+ memcpy_fromio(d, s, PAGE_SIZE);
+ io_mapping_unmap_atomic(s);
local_irq_restore(flags);
dst->pages[page] = d;
@@ -785,11 +763,11 @@ i915_error_state_free(struct drm_device *dev,
{
int i;
- for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
- i915_error_object_free(error->ring[i].batchbuffer);
- i915_error_object_free(error->ring[i].ringbuffer);
- kfree(error->ring[i].requests);
- }
+ for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++)
+ i915_error_object_free(error->batchbuffer[i]);
+
+ for (i = 0; i < ARRAY_SIZE(error->ringbuffer); i++)
+ i915_error_object_free(error->ringbuffer[i]);
kfree(error->active_bo);
kfree(error->overlay);
@@ -819,7 +797,7 @@ static u32 capture_bo_list(struct drm_i915_error_buffer *err,
err->tiling = obj->tiling_mode;
err->dirty = obj->dirty;
err->purgeable = obj->madv != I915_MADV_WILLNEED;
- err->ring = obj->ring ? obj->ring->id : -1;
+ err->ring = obj->ring ? obj->ring->id : 0;
err->cache_level = obj->cache_level;
if (++i == count)
@@ -891,92 +869,6 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
return NULL;
}
-static void i915_record_ring_state(struct drm_device *dev,
- struct drm_i915_error_state *error,
- struct intel_ring_buffer *ring)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (INTEL_INFO(dev)->gen >= 6) {
- error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
- error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
- error->semaphore_mboxes[ring->id][0]
- = I915_READ(RING_SYNC_0(ring->mmio_base));
- error->semaphore_mboxes[ring->id][1]
- = I915_READ(RING_SYNC_1(ring->mmio_base));
- }
-
- if (INTEL_INFO(dev)->gen >= 4) {
- error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
- error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
- error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
- error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
- if (ring->id == RCS) {
- error->instdone1 = I915_READ(INSTDONE1);
- error->bbaddr = I915_READ64(BB_ADDR);
- }
- } else {
- error->ipeir[ring->id] = I915_READ(IPEIR);
- error->ipehr[ring->id] = I915_READ(IPEHR);
- error->instdone[ring->id] = I915_READ(INSTDONE);
- }
-
- error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
- error->seqno[ring->id] = ring->get_seqno(ring);
- error->acthd[ring->id] = intel_ring_get_active_head(ring);
- error->head[ring->id] = I915_READ_HEAD(ring);
- error->tail[ring->id] = I915_READ_TAIL(ring);
-
- error->cpu_ring_head[ring->id] = ring->head;
- error->cpu_ring_tail[ring->id] = ring->tail;
-}
-
-static void i915_gem_record_rings(struct drm_device *dev,
- struct drm_i915_error_state *error)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_request *request;
- int i, count;
-
- for (i = 0; i < I915_NUM_RINGS; i++) {
- struct intel_ring_buffer *ring = &dev_priv->ring[i];
-
- if (ring->obj == NULL)
- continue;
-
- i915_record_ring_state(dev, error, ring);
-
- error->ring[i].batchbuffer =
- i915_error_first_batchbuffer(dev_priv, ring);
-
- error->ring[i].ringbuffer =
- i915_error_object_create(dev_priv, ring->obj);
-
- count = 0;
- list_for_each_entry(request, &ring->request_list, list)
- count++;
-
- error->ring[i].num_requests = count;
- error->ring[i].requests =
- kmalloc(count*sizeof(struct drm_i915_error_request),
- GFP_ATOMIC);
- if (error->ring[i].requests == NULL) {
- error->ring[i].num_requests = 0;
- continue;
- }
-
- count = 0;
- list_for_each_entry(request, &ring->request_list, list) {
- struct drm_i915_error_request *erq;
-
- erq = &error->ring[i].requests[count++];
- erq->seqno = request->seqno;
- erq->jiffies = request->emitted_jiffies;
- erq->tail = request->tail;
- }
- }
-}
-
/**
* i915_capture_error_state - capture an error record for later analysis
* @dev: drm device
@@ -1001,7 +893,7 @@ static void i915_capture_error_state(struct drm_device *dev)
return;
/* Account for pipe specific data like PIPE*STAT */
- error = kzalloc(sizeof(*error), GFP_ATOMIC);
+ error = kmalloc(sizeof(*error), GFP_ATOMIC);
if (!error) {
DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
return;
@@ -1010,18 +902,59 @@ static void i915_capture_error_state(struct drm_device *dev)
DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n",
dev->primary->index);
+ error->seqno = dev_priv->ring[RCS].get_seqno(&dev_priv->ring[RCS]);
error->eir = I915_READ(EIR);
error->pgtbl_er = I915_READ(PGTBL_ER);
for_each_pipe(pipe)
error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
-
+ error->instpm = I915_READ(INSTPM);
+ error->error = 0;
if (INTEL_INFO(dev)->gen >= 6) {
error->error = I915_READ(ERROR_GEN6);
- error->done_reg = I915_READ(DONE_REG);
- }
+ error->bcs_acthd = I915_READ(BCS_ACTHD);
+ error->bcs_ipehr = I915_READ(BCS_IPEHR);
+ error->bcs_ipeir = I915_READ(BCS_IPEIR);
+ error->bcs_instdone = I915_READ(BCS_INSTDONE);
+ error->bcs_seqno = 0;
+ if (dev_priv->ring[BCS].get_seqno)
+ error->bcs_seqno = dev_priv->ring[BCS].get_seqno(&dev_priv->ring[BCS]);
+
+ error->vcs_acthd = I915_READ(VCS_ACTHD);
+ error->vcs_ipehr = I915_READ(VCS_IPEHR);
+ error->vcs_ipeir = I915_READ(VCS_IPEIR);
+ error->vcs_instdone = I915_READ(VCS_INSTDONE);
+ error->vcs_seqno = 0;
+ if (dev_priv->ring[VCS].get_seqno)
+ error->vcs_seqno = dev_priv->ring[VCS].get_seqno(&dev_priv->ring[VCS]);
+ }
+ if (INTEL_INFO(dev)->gen >= 4) {
+ error->ipeir = I915_READ(IPEIR_I965);
+ error->ipehr = I915_READ(IPEHR_I965);
+ error->instdone = I915_READ(INSTDONE_I965);
+ error->instps = I915_READ(INSTPS);
+ error->instdone1 = I915_READ(INSTDONE1);
+ error->acthd = I915_READ(ACTHD_I965);
+ error->bbaddr = I915_READ64(BB_ADDR);
+ } else {
+ error->ipeir = I915_READ(IPEIR);
+ error->ipehr = I915_READ(IPEHR);
+ error->instdone = I915_READ(INSTDONE);
+ error->acthd = I915_READ(ACTHD);
+ error->bbaddr = 0;
+ }
i915_gem_record_fences(dev, error);
- i915_gem_record_rings(dev, error);
+
+ /* Record the active batch and ring buffers */
+ for (i = 0; i < I915_NUM_RINGS; i++) {
+ error->batchbuffer[i] =
+ i915_error_first_batchbuffer(dev_priv,
+ &dev_priv->ring[i]);
+
+ error->ringbuffer[i] =
+ i915_error_object_create(dev_priv,
+ dev_priv->ring[i].obj);
+ }
/* Record buffers on the active and pinned lists. */
error->active_bo = NULL;
@@ -1077,12 +1010,11 @@ void i915_destroy_error_state(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_error_state *error;
- unsigned long flags;
- spin_lock_irqsave(&dev_priv->error_lock, flags);
+ spin_lock(&dev_priv->error_lock);
error = dev_priv->first_error;
dev_priv->first_error = NULL;
- spin_unlock_irqrestore(&dev_priv->error_lock, flags);
+ spin_unlock(&dev_priv->error_lock);
if (error)
i915_error_state_free(dev, error);
@@ -1266,7 +1198,7 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
} else {
int dspaddr = DSPADDR(intel_crtc->plane);
stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
- crtc->y * crtc->fb->pitches[0] +
+ crtc->y * crtc->fb->pitch +
crtc->x * crtc->fb->bits_per_pixel/8);
}
@@ -1557,7 +1489,7 @@ static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
- DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
+ DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
return 0;
@@ -1605,7 +1537,7 @@ static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
- DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
+ DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
@@ -1710,6 +1642,13 @@ static bool kick_ring(struct intel_ring_buffer *ring)
I915_WRITE_CTL(ring, tmp);
return true;
}
+ if (IS_GEN6(dev) &&
+ (tmp & RING_WAIT_SEMAPHORE)) {
+ DRM_ERROR("Kicking stuck semaphore on %s\n",
+ ring->name);
+ I915_WRITE_CTL(ring, tmp);
+ return true;
+ }
return false;
}
@@ -1726,9 +1665,6 @@ void i915_hangcheck_elapsed(unsigned long data)
uint32_t acthd, instdone, instdone1, acthd_bsd, acthd_blt;
bool err = false;
- if (!i915_enable_hangcheck)
- return;
-
/* If all work is done then ACTHD clearly hasn't advanced. */
if (i915_hangcheck_ring_idle(&dev_priv->ring[RCS], &err) &&
i915_hangcheck_ring_idle(&dev_priv->ring[VCS], &err) &&
@@ -1759,7 +1695,6 @@ void i915_hangcheck_elapsed(unsigned long data)
dev_priv->last_instdone1 == instdone1) {
if (dev_priv->hangcheck_count++ > 1) {
DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
- i915_handle_error(dev, true);
if (!IS_GEN2(dev)) {
/* Is the chip hanging on a WAIT_FOR_EVENT?
@@ -1767,6 +1702,7 @@ void i915_hangcheck_elapsed(unsigned long data)
* and break the hang. This should work on
* all but the second generation chipsets.
*/
+
if (kick_ring(&dev_priv->ring[RCS]))
goto repeat;
@@ -1779,6 +1715,7 @@ void i915_hangcheck_elapsed(unsigned long data)
goto repeat;
}
+ i915_handle_error(dev, true);
return;
}
} else {
@@ -1811,6 +1748,17 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work);
I915_WRITE(HWSTAM, 0xeffe);
+ if (IS_GEN6(dev) || IS_GEN7(dev)) {
+ /* Workaround stalls observed on Sandy Bridge GPUs by
+ * making the blitter command streamer generate a
+ * write to the Hardware Status Page for
+ * MI_USER_INTERRUPT. This appears to serialize the
+ * previous seqno write out before the interrupt
+ * happens.
+ */
+ I915_WRITE(GEN6_BLITTER_HWSTAM, ~GEN6_BLITTER_USER_INTERRUPT);
+ I915_WRITE(GEN6_BSD_HWSTAM, ~GEN6_BSD_USER_INTERRUPT);
+ }
/* XXX hotplug from PCH */
@@ -1829,26 +1777,6 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
POSTING_READ(SDEIER);
}
-/*
- * Enable digital hotplug on the PCH, and configure the DP short pulse
- * duration to 2ms (which is the minimum in the Display Port spec)
- *
- * This register is the same on all known PCH chips.
- */
-
-static void ironlake_enable_pch_hotplug(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- u32 hotplug;
-
- hotplug = I915_READ(PCH_PORT_HOTPLUG);
- hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
- hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
- hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
- hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
- I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
-}
-
static int ironlake_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -1911,8 +1839,6 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
I915_WRITE(SDEIER, hotplug_mask);
POSTING_READ(SDEIER);
- ironlake_enable_pch_hotplug(dev);
-
if (IS_IRONLAKE_M(dev)) {
/* Clear & enable PCU event interrupts */
I915_WRITE(DEIIR, DE_PCU_EVENT);
@@ -1970,8 +1896,6 @@ static int ivybridge_irq_postinstall(struct drm_device *dev)
I915_WRITE(SDEIER, hotplug_mask);
POSTING_READ(SDEIER);
- ironlake_enable_pch_hotplug(dev);
-
return 0;
}
@@ -2096,10 +2020,6 @@ static void ironlake_irq_uninstall(struct drm_device *dev)
I915_WRITE(GTIMR, 0xffffffff);
I915_WRITE(GTIER, 0x0);
I915_WRITE(GTIIR, I915_READ(GTIIR));
-
- I915_WRITE(SDEIMR, 0xffffffff);
- I915_WRITE(SDEIER, 0x0);
- I915_WRITE(SDEIIR, I915_READ(SDEIIR));
}
static void i915_driver_irq_uninstall(struct drm_device * dev)
@@ -2138,10 +2058,8 @@ void intel_irq_init(struct drm_device *dev)
dev->driver->get_vblank_counter = gm45_get_vblank_counter;
}
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
- else
- dev->driver->get_vblank_timestamp = NULL;
+
+ dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
if (IS_IVYBRIDGE(dev)) {
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 4528d3f..5dc3b6d 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -80,53 +80,12 @@
#define GRDOM_RENDER (1<<2)
#define GRDOM_MEDIA (3<<2)
-#define GEN6_MBCUNIT_SNPCR 0x900c /* for LLC config */
-#define GEN6_MBC_SNPCR_SHIFT 21
-#define GEN6_MBC_SNPCR_MASK (3<<21)
-#define GEN6_MBC_SNPCR_MAX (0<<21)
-#define GEN6_MBC_SNPCR_MED (1<<21)
-#define GEN6_MBC_SNPCR_LOW (2<<21)
-#define GEN6_MBC_SNPCR_MIN (3<<21) /* only 1/16th of the cache is shared */
-
-#define GEN6_MBCTL 0x0907c
-#define GEN6_MBCTL_ENABLE_BOOT_FETCH (1 << 4)
-#define GEN6_MBCTL_CTX_FETCH_NEEDED (1 << 3)
-#define GEN6_MBCTL_BME_UPDATE_ENABLE (1 << 2)
-#define GEN6_MBCTL_MAE_UPDATE_ENABLE (1 << 1)
-#define GEN6_MBCTL_BOOT_FETCH_MECH (1 << 0)
-
#define GEN6_GDRST 0x941c
#define GEN6_GRDOM_FULL (1 << 0)
#define GEN6_GRDOM_RENDER (1 << 1)
#define GEN6_GRDOM_MEDIA (1 << 2)
#define GEN6_GRDOM_BLT (1 << 3)
-/* PPGTT stuff */
-#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
-
-#define GEN6_PDE_VALID (1 << 0)
-#define GEN6_PDE_LARGE_PAGE (2 << 0) /* use 32kb pages */
-/* gen6+ has bit 11-4 for physical addr bit 39-32 */
-#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
-
-#define GEN6_PTE_VALID (1 << 0)
-#define GEN6_PTE_UNCACHED (1 << 1)
-#define GEN6_PTE_CACHE_LLC (2 << 1)
-#define GEN6_PTE_CACHE_LLC_MLC (3 << 1)
-#define GEN6_PTE_CACHE_BITS (3 << 1)
-#define GEN6_PTE_GFDT (1 << 3)
-#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
-
-#define RING_PP_DIR_BASE(ring) ((ring)->mmio_base+0x228)
-#define RING_PP_DIR_BASE_READ(ring) ((ring)->mmio_base+0x518)
-#define RING_PP_DIR_DCLV(ring) ((ring)->mmio_base+0x220)
-#define PP_DIR_DCLV_2G 0xffffffff
-
-#define GAM_ECOCHK 0x4090
-#define ECOCHK_SNB_BIT (1<<10)
-#define ECOCHK_PPGTT_CACHE64B (0x3<<3)
-#define ECOCHK_PPGTT_CACHE4B (0x0<<3)
-
/* VGA stuff */
#define VGA_ST01_MDA 0x3ba
@@ -191,7 +150,7 @@
#define MI_SUSPEND_FLUSH MI_INSTR(0x0b, 0)
#define MI_SUSPEND_FLUSH_EN (1<<0)
#define MI_REPORT_HEAD MI_INSTR(0x07, 0)
-#define MI_OVERLAY_FLIP MI_INSTR(0x11, 0)
+#define MI_OVERLAY_FLIP MI_INSTR(0x11,0)
#define MI_OVERLAY_CONTINUE (0x0<<21)
#define MI_OVERLAY_ON (0x1<<21)
#define MI_OVERLAY_OFF (0x2<<21)
@@ -229,13 +188,6 @@
#define MI_SEMAPHORE_UPDATE (1<<21)
#define MI_SEMAPHORE_COMPARE (1<<20)
#define MI_SEMAPHORE_REGISTER (1<<18)
-#define MI_SEMAPHORE_SYNC_RV (2<<16)
-#define MI_SEMAPHORE_SYNC_RB (0<<16)
-#define MI_SEMAPHORE_SYNC_VR (0<<16)
-#define MI_SEMAPHORE_SYNC_VB (2<<16)
-#define MI_SEMAPHORE_SYNC_BR (2<<16)
-#define MI_SEMAPHORE_SYNC_BV (0<<16)
-#define MI_SEMAPHORE_SYNC_INVALID (1<<0)
/*
* 3D instructions used by the kernel
*/
@@ -277,22 +229,16 @@
#define ASYNC_FLIP (1<<22)
#define DISPLAY_PLANE_A (0<<20)
#define DISPLAY_PLANE_B (1<<20)
-#define GFX_OP_PIPE_CONTROL(len) ((0x3<<29)|(0x3<<27)|(0x2<<24)|(len-2))
-#define PIPE_CONTROL_CS_STALL (1<<20)
-#define PIPE_CONTROL_QW_WRITE (1<<14)
-#define PIPE_CONTROL_DEPTH_STALL (1<<13)
-#define PIPE_CONTROL_WRITE_FLUSH (1<<12)
-#define PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH (1<<12) /* gen6+ */
-#define PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE (1<<11) /* MBZ on Ironlake */
-#define PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE (1<<10) /* GM45+ only */
-#define PIPE_CONTROL_INDIRECT_STATE_DISABLE (1<<9)
-#define PIPE_CONTROL_NOTIFY (1<<8)
-#define PIPE_CONTROL_VF_CACHE_INVALIDATE (1<<4)
-#define PIPE_CONTROL_CONST_CACHE_INVALIDATE (1<<3)
-#define PIPE_CONTROL_STATE_CACHE_INVALIDATE (1<<2)
-#define PIPE_CONTROL_STALL_AT_SCOREBOARD (1<<1)
-#define PIPE_CONTROL_DEPTH_CACHE_FLUSH (1<<0)
+#define GFX_OP_PIPE_CONTROL ((0x3<<29)|(0x3<<27)|(0x2<<24)|2)
+#define PIPE_CONTROL_QW_WRITE (1<<14)
+#define PIPE_CONTROL_DEPTH_STALL (1<<13)
+#define PIPE_CONTROL_WC_FLUSH (1<<12)
+#define PIPE_CONTROL_IS_FLUSH (1<<11) /* MBZ on Ironlake */
+#define PIPE_CONTROL_TC_FLUSH (1<<10) /* GM45+ only */
+#define PIPE_CONTROL_ISP_DIS (1<<9)
+#define PIPE_CONTROL_NOTIFY (1<<8)
#define PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */
+#define PIPE_CONTROL_STALL_EN (1<<1) /* in addr word, Ironlake+ only */
/*
@@ -330,12 +276,6 @@
#define FENCE_REG_SANDYBRIDGE_0 0x100000
#define SANDYBRIDGE_FENCE_PITCH_SHIFT 32
-/* control register for cpu gtt access */
-#define TILECTL 0x101000
-#define TILECTL_SWZCTL (1 << 0)
-#define TILECTL_TLB_PREFETCH_DIS (1 << 2)
-#define TILECTL_BACKSNOOP_DIS (1 << 3)
-
/*
* Instruction and interrupt control regs
*/
@@ -350,23 +290,10 @@
#define RING_CTL(base) ((base)+0x3c)
#define RING_SYNC_0(base) ((base)+0x40)
#define RING_SYNC_1(base) ((base)+0x44)
-#define GEN6_RVSYNC (RING_SYNC_0(RENDER_RING_BASE))
-#define GEN6_RBSYNC (RING_SYNC_1(RENDER_RING_BASE))
-#define GEN6_VRSYNC (RING_SYNC_1(GEN6_BSD_RING_BASE))
-#define GEN6_VBSYNC (RING_SYNC_0(GEN6_BSD_RING_BASE))
-#define GEN6_BRSYNC (RING_SYNC_0(BLT_RING_BASE))
-#define GEN6_BVSYNC (RING_SYNC_1(BLT_RING_BASE))
#define RING_MAX_IDLE(base) ((base)+0x54)
#define RING_HWS_PGA(base) ((base)+0x80)
#define RING_HWS_PGA_GEN6(base) ((base)+0x2080)
-#define ARB_MODE 0x04030
-#define ARB_MODE_SWIZZLE_SNB (1<<4)
-#define ARB_MODE_SWIZZLE_IVB (1<<5)
-#define ARB_MODE_ENABLE(x) GFX_MODE_ENABLE(x)
-#define ARB_MODE_DISABLE(x) GFX_MODE_DISABLE(x)
#define RENDER_HWS_PGA_GEN7 (0x04080)
-#define RING_FAULT_REG(ring) (0x4094 + 0x100*(ring)->id)
-#define DONE_REG 0x40b0
#define BSD_HWS_PGA_GEN7 (0x04180)
#define BLT_HWS_PGA_GEN7 (0x04280)
#define RING_ACTHD(base) ((base)+0x74)
@@ -400,12 +327,6 @@
#define IPEIR_I965 0x02064
#define IPEHR_I965 0x02068
#define INSTDONE_I965 0x0206c
-#define RING_IPEIR(base) ((base)+0x64)
-#define RING_IPEHR(base) ((base)+0x68)
-#define RING_INSTDONE(base) ((base)+0x6c)
-#define RING_INSTPS(base) ((base)+0x70)
-#define RING_DMA_FADD(base) ((base)+0x78)
-#define RING_INSTPM(base) ((base)+0xc0)
#define INSTPS 0x02070 /* 965+ only */
#define INSTDONE1 0x0207c /* 965+ only */
#define ACTHD_I965 0x02074
@@ -419,6 +340,14 @@
#define INSTDONE 0x02090
#define NOPID 0x02094
#define HWSTAM 0x02098
+#define VCS_INSTDONE 0x1206C
+#define VCS_IPEIR 0x12064
+#define VCS_IPEHR 0x12068
+#define VCS_ACTHD 0x12074
+#define BCS_INSTDONE 0x2206C
+#define BCS_IPEIR 0x22064
+#define BCS_IPEHR 0x22068
+#define BCS_ACTHD 0x22074
#define ERROR_GEN6 0x040a0
@@ -438,11 +367,9 @@
#define MI_MODE 0x0209c
# define VS_TIMER_DISPATCH (1 << 6)
-# define MI_FLUSH_ENABLE (1 << 12)
+# define MI_FLUSH_ENABLE (1 << 11)
#define GFX_MODE 0x02520
-#define GFX_MODE_GEN7 0x0229c
-#define RING_MODE_GEN7(ring) ((ring)->mmio_base+0x29c)
#define GFX_RUN_LIST_ENABLE (1<<15)
#define GFX_TLB_INVALIDATE_ALWAYS (1<<13)
#define GFX_SURFACE_FAULT_ENABLE (1<<12)
@@ -450,9 +377,6 @@
#define GFX_PSMI_GRANULARITY (1<<10)
#define GFX_PPGTT_ENABLE (1<<9)
-#define GFX_MODE_ENABLE(bit) (((bit) << 16) | (bit))
-#define GFX_MODE_DISABLE(bit) (((bit) << 16) | (0))
-
#define SCPD0 0x0209c /* 915+ only */
#define IER 0x020a0
#define IIR 0x020a4
@@ -490,7 +414,6 @@
#define INSTPM_AGPBUSY_DIS (1<<11) /* gen3: when disabled, pending interrupts
will not assert AGPBUSY# and will only
be delivered when out of C3. */
-#define INSTPM_FORCE_ORDERING (1<<7) /* GEN6+ */
#define ACTHD 0x020c8
#define FW_BLC 0x020d8
#define FW_BLC2 0x020dc
@@ -538,7 +461,7 @@
/* Enables non-sequential data reads through arbiter
*/
-#define MI_ARB_DUAL_DATA_PHASE_DISABLE (1 << 9)
+#define MI_ARB_DUAL_DATA_PHASE_DISABLE (1 << 9)
/* Disable FSB snooping of cacheable write cycles from binner/render
* command stream
@@ -674,7 +597,6 @@
#define DPFC_CTL_PLANEA (0<<30)
#define DPFC_CTL_PLANEB (1<<30)
#define DPFC_CTL_FENCE_EN (1<<29)
-#define DPFC_CTL_PERSISTENT_MODE (1<<25)
#define DPFC_SR_EN (1<<10)
#define DPFC_CTL_LIMIT_1X (0<<6)
#define DPFC_CTL_LIMIT_2X (1<<6)
@@ -709,7 +631,7 @@
#define ILK_DISPLAY_CHICKEN1 0x42000
#define ILK_FBCQ_DIS (1<<22)
-#define ILK_PABSTRETCH_DIS (1<<21)
+#define ILK_PABSTRETCH_DIS (1<<21)
/*
@@ -1100,29 +1022,6 @@
#define C0DRB3 0x10206
#define C1DRB3 0x10606
-/** snb MCH registers for reading the DRAM channel configuration */
-#define MAD_DIMM_C0 (MCHBAR_MIRROR_BASE_SNB + 0x5004)
-#define MAD_DIMM_C1 (MCHBAR_MIRROR_BASE_SNB + 0x5008)
-#define MAD_DIMM_C2 (MCHBAR_MIRROR_BASE_SNB + 0x500C)
-#define MAD_DIMM_ECC_MASK (0x3 << 24)
-#define MAD_DIMM_ECC_OFF (0x0 << 24)
-#define MAD_DIMM_ECC_IO_ON_LOGIC_OFF (0x1 << 24)
-#define MAD_DIMM_ECC_IO_OFF_LOGIC_ON (0x2 << 24)
-#define MAD_DIMM_ECC_ON (0x3 << 24)
-#define MAD_DIMM_ENH_INTERLEAVE (0x1 << 22)
-#define MAD_DIMM_RANK_INTERLEAVE (0x1 << 21)
-#define MAD_DIMM_B_WIDTH_X16 (0x1 << 20) /* X8 chips if unset */
-#define MAD_DIMM_A_WIDTH_X16 (0x1 << 19) /* X8 chips if unset */
-#define MAD_DIMM_B_DUAL_RANK (0x1 << 18)
-#define MAD_DIMM_A_DUAL_RANK (0x1 << 17)
-#define MAD_DIMM_A_SELECT (0x1 << 16)
-/* DIMM sizes are in multiples of 256mb. */
-#define MAD_DIMM_B_SIZE_SHIFT 8
-#define MAD_DIMM_B_SIZE_MASK (0xff << MAD_DIMM_B_SIZE_SHIFT)
-#define MAD_DIMM_A_SIZE_SHIFT 0
-#define MAD_DIMM_A_SIZE_MASK (0xff << MAD_DIMM_A_SIZE_SHIFT)
-
-
/* Clocking configuration register */
#define CLKCFG 0x10c00
#define CLKCFG_FSB_400 (5 << 0) /* hrawclk 100 */
@@ -1402,7 +1301,6 @@
#define _VSYNC_A 0x60014
#define _PIPEASRC 0x6001c
#define _BCLRPAT_A 0x60020
-#define _VSYNCSHIFT_A 0x60028
/* Pipe B timing regs */
#define _HTOTAL_B 0x61000
@@ -1413,8 +1311,6 @@
#define _VSYNC_B 0x61014
#define _PIPEBSRC 0x6101c
#define _BCLRPAT_B 0x61020
-#define _VSYNCSHIFT_B 0x61028
-
#define HTOTAL(pipe) _PIPE(pipe, _HTOTAL_A, _HTOTAL_B)
#define HBLANK(pipe) _PIPE(pipe, _HBLANK_A, _HBLANK_B)
@@ -1423,7 +1319,6 @@
#define VBLANK(pipe) _PIPE(pipe, _VBLANK_A, _VBLANK_B)
#define VSYNC(pipe) _PIPE(pipe, _VSYNC_A, _VSYNC_B)
#define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B)
-#define VSYNCSHIFT(pipe) _PIPE(pipe, _VSYNCSHIFT_A, _VSYNCSHIFT_B)
/* VGA port control */
#define ADPA 0x61100
@@ -1432,7 +1327,6 @@
#define ADPA_PIPE_SELECT_MASK (1<<30)
#define ADPA_PIPE_A_SELECT 0
#define ADPA_PIPE_B_SELECT (1<<30)
-#define ADPA_PIPE_SELECT(pipe) ((pipe) << 30)
#define ADPA_USE_VGA_HVPOLARITY (1<<15)
#define ADPA_SETS_HVPOLARITY 0
#define ADPA_VSYNC_CNTL_DISABLE (1<<11)
@@ -1575,7 +1469,6 @@
/* Selects pipe B for LVDS data. Must be set on pre-965. */
#define LVDS_PIPEB_SELECT (1 << 30)
#define LVDS_PIPE_MASK (1 << 30)
-#define LVDS_PIPE(pipe) ((pipe) << 30)
/* LVDS dithering flag on 965/g4x platform */
#define LVDS_ENABLE_DITHER (1 << 25)
/* LVDS sync polarity flags. Set to invert (i.e. negative) */
@@ -1615,6 +1508,9 @@
#define LVDS_B0B3_POWER_DOWN (0 << 2)
#define LVDS_B0B3_POWER_UP (3 << 2)
+#define LVDS_PIPE_ENABLED(V, P) \
+ (((V) & (LVDS_PIPE_MASK | LVDS_PORT_EN)) == ((P) << 30 | LVDS_PORT_EN))
+
/* Video Data Island Packet control */
#define VIDEO_DIP_DATA 0x61178
#define VIDEO_DIP_CTL 0x61170
@@ -1627,7 +1523,6 @@
#define VIDEO_DIP_SELECT_AVI (0 << 19)
#define VIDEO_DIP_SELECT_VENDOR (1 << 19)
#define VIDEO_DIP_SELECT_SPD (3 << 19)
-#define VIDEO_DIP_SELECT_MASK (3 << 19)
#define VIDEO_DIP_FREQ_ONCE (0 << 16)
#define VIDEO_DIP_FREQ_VSYNC (1 << 16)
#define VIDEO_DIP_FREQ_2VSYNC (2 << 16)
@@ -1644,21 +1539,12 @@
*/
#define PP_READY (1 << 30)
#define PP_SEQUENCE_NONE (0 << 28)
-#define PP_SEQUENCE_POWER_UP (1 << 28)
-#define PP_SEQUENCE_POWER_DOWN (2 << 28)
-#define PP_SEQUENCE_MASK (3 << 28)
-#define PP_SEQUENCE_SHIFT 28
+#define PP_SEQUENCE_ON (1 << 28)
+#define PP_SEQUENCE_OFF (2 << 28)
+#define PP_SEQUENCE_MASK 0x30000000
#define PP_CYCLE_DELAY_ACTIVE (1 << 27)
+#define PP_SEQUENCE_STATE_ON_IDLE (1 << 3)
#define PP_SEQUENCE_STATE_MASK 0x0000000f
-#define PP_SEQUENCE_STATE_OFF_IDLE (0x0 << 0)
-#define PP_SEQUENCE_STATE_OFF_S0_1 (0x1 << 0)
-#define PP_SEQUENCE_STATE_OFF_S0_2 (0x2 << 0)
-#define PP_SEQUENCE_STATE_OFF_S0_3 (0x3 << 0)
-#define PP_SEQUENCE_STATE_ON_IDLE (0x8 << 0)
-#define PP_SEQUENCE_STATE_ON_S1_0 (0x9 << 0)
-#define PP_SEQUENCE_STATE_ON_S1_2 (0xa << 0)
-#define PP_SEQUENCE_STATE_ON_S1_3 (0xb << 0)
-#define PP_SEQUENCE_STATE_RESET (0xf << 0)
#define PP_CONTROL 0x61204
#define POWER_TARGET_ON (1 << 0)
#define PP_ON_DELAYS 0x61208
@@ -2215,6 +2101,9 @@
#define DP_PIPEB_SELECT (1 << 30)
#define DP_PIPE_MASK (1 << 30)
+#define DP_PIPE_ENABLED(V, P) \
+ (((V) & (DP_PIPE_MASK | DP_PORT_EN)) == ((P) << 30 | DP_PORT_EN))
+
/* Link training mode - select a suitable mode for each stage */
#define DP_LINK_TRAIN_PAT_1 (0 << 28)
#define DP_LINK_TRAIN_PAT_2 (1 << 28)
@@ -2410,21 +2299,9 @@
#define PIPECONF_PALETTE 0
#define PIPECONF_GAMMA (1<<24)
#define PIPECONF_FORCE_BORDER (1<<25)
-#define PIPECONF_INTERLACE_MASK (7 << 21)
-/* Note that pre-gen3 does not support interlaced display directly. Panel
- * fitting must be disabled on pre-ilk for interlaced. */
-#define PIPECONF_PROGRESSIVE (0 << 21)
-#define PIPECONF_INTERLACE_W_SYNC_SHIFT_PANEL (4 << 21) /* gen4 only */
-#define PIPECONF_INTERLACE_W_SYNC_SHIFT (5 << 21) /* gen4 only */
+#define PIPECONF_PROGRESSIVE (0 << 21)
#define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21)
-#define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21) /* gen3 only */
-/* Ironlake and later have a complete new set of values for interlaced. PFIT
- * means panel fitter required, PF means progressive fetch, DBL means power
- * saving pixel doubling. */
-#define PIPECONF_PFIT_PF_INTERLACED_ILK (1 << 21)
-#define PIPECONF_INTERLACED_ILK (3 << 21)
-#define PIPECONF_INTERLACED_DBL_ILK (4 << 21) /* ilk/snb only */
-#define PIPECONF_PFIT_PF_INTERLACED_DBL_ILK (5 << 21) /* ilk/snb only */
+#define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21)
#define PIPECONF_CXSR_DOWNCLOCK (1<<16)
#define PIPECONF_BPP_MASK (0x000000e0)
#define PIPECONF_BPP_8 (0<<5)
@@ -2490,7 +2367,7 @@
#define DSPFW1 0x70034
#define DSPFW_SR_SHIFT 23
-#define DSPFW_SR_MASK (0x1ff<<23)
+#define DSPFW_SR_MASK (0x1ff<<23)
#define DSPFW_CURSORB_SHIFT 16
#define DSPFW_CURSORB_MASK (0x3f<<16)
#define DSPFW_PLANEB_SHIFT 8
@@ -2548,7 +2425,6 @@
#define WM0_PIPE_CURSOR_MASK (0x1f)
#define WM0_PIPEB_ILK 0x45104
-#define WM0_PIPEC_IVB 0x45200
#define WM1_LP_ILK 0x45108
#define WM1_LP_SR_EN (1<<31)
#define WM1_LP_LATENCY_SHIFT 24
@@ -2563,8 +2439,6 @@
#define WM3_LP_ILK 0x45110
#define WM3_LP_EN (1<<31)
#define WM1S_LP_ILK 0x45120
-#define WM2S_LP_IVB 0x45124
-#define WM3S_LP_IVB 0x45128
#define WM1S_LP_EN (1<<31)
/* Memory latency timer register */
@@ -2781,140 +2655,6 @@
#define _DSPBSURF 0x7119C
#define _DSPBTILEOFF 0x711A4
-/* Sprite A control */
-#define _DVSACNTR 0x72180
-#define DVS_ENABLE (1<<31)
-#define DVS_GAMMA_ENABLE (1<<30)
-#define DVS_PIXFORMAT_MASK (3<<25)
-#define DVS_FORMAT_YUV422 (0<<25)
-#define DVS_FORMAT_RGBX101010 (1<<25)
-#define DVS_FORMAT_RGBX888 (2<<25)
-#define DVS_FORMAT_RGBX161616 (3<<25)
-#define DVS_SOURCE_KEY (1<<22)
-#define DVS_RGB_ORDER_RGBX (1<<20)
-#define DVS_YUV_BYTE_ORDER_MASK (3<<16)
-#define DVS_YUV_ORDER_YUYV (0<<16)
-#define DVS_YUV_ORDER_UYVY (1<<16)
-#define DVS_YUV_ORDER_YVYU (2<<16)
-#define DVS_YUV_ORDER_VYUY (3<<16)
-#define DVS_DEST_KEY (1<<2)
-#define DVS_TRICKLE_FEED_DISABLE (1<<14)
-#define DVS_TILED (1<<10)
-#define _DVSALINOFF 0x72184
-#define _DVSASTRIDE 0x72188
-#define _DVSAPOS 0x7218c
-#define _DVSASIZE 0x72190
-#define _DVSAKEYVAL 0x72194
-#define _DVSAKEYMSK 0x72198
-#define _DVSASURF 0x7219c
-#define _DVSAKEYMAXVAL 0x721a0
-#define _DVSATILEOFF 0x721a4
-#define _DVSASURFLIVE 0x721ac
-#define _DVSASCALE 0x72204
-#define DVS_SCALE_ENABLE (1<<31)
-#define DVS_FILTER_MASK (3<<29)
-#define DVS_FILTER_MEDIUM (0<<29)
-#define DVS_FILTER_ENHANCING (1<<29)
-#define DVS_FILTER_SOFTENING (2<<29)
-#define DVS_VERTICAL_OFFSET_HALF (1<<28) /* must be enabled below */
-#define DVS_VERTICAL_OFFSET_ENABLE (1<<27)
-#define _DVSAGAMC 0x72300
-
-#define _DVSBCNTR 0x73180
-#define _DVSBLINOFF 0x73184
-#define _DVSBSTRIDE 0x73188
-#define _DVSBPOS 0x7318c
-#define _DVSBSIZE 0x73190
-#define _DVSBKEYVAL 0x73194
-#define _DVSBKEYMSK 0x73198
-#define _DVSBSURF 0x7319c
-#define _DVSBKEYMAXVAL 0x731a0
-#define _DVSBTILEOFF 0x731a4
-#define _DVSBSURFLIVE 0x731ac
-#define _DVSBSCALE 0x73204
-#define _DVSBGAMC 0x73300
-
-#define DVSCNTR(pipe) _PIPE(pipe, _DVSACNTR, _DVSBCNTR)
-#define DVSLINOFF(pipe) _PIPE(pipe, _DVSALINOFF, _DVSBLINOFF)
-#define DVSSTRIDE(pipe) _PIPE(pipe, _DVSASTRIDE, _DVSBSTRIDE)
-#define DVSPOS(pipe) _PIPE(pipe, _DVSAPOS, _DVSBPOS)
-#define DVSSURF(pipe) _PIPE(pipe, _DVSASURF, _DVSBSURF)
-#define DVSKEYMAX(pipe) _PIPE(pipe, _DVSAKEYMAXVAL, _DVSBKEYMAXVAL)
-#define DVSSIZE(pipe) _PIPE(pipe, _DVSASIZE, _DVSBSIZE)
-#define DVSSCALE(pipe) _PIPE(pipe, _DVSASCALE, _DVSBSCALE)
-#define DVSTILEOFF(pipe) _PIPE(pipe, _DVSATILEOFF, _DVSBTILEOFF)
-#define DVSKEYVAL(pipe) _PIPE(pipe, _DVSAKEYVAL, _DVSBKEYVAL)
-#define DVSKEYMSK(pipe) _PIPE(pipe, _DVSAKEYMSK, _DVSBKEYMSK)
-
-#define _SPRA_CTL 0x70280
-#define SPRITE_ENABLE (1<<31)
-#define SPRITE_GAMMA_ENABLE (1<<30)
-#define SPRITE_PIXFORMAT_MASK (7<<25)
-#define SPRITE_FORMAT_YUV422 (0<<25)
-#define SPRITE_FORMAT_RGBX101010 (1<<25)
-#define SPRITE_FORMAT_RGBX888 (2<<25)
-#define SPRITE_FORMAT_RGBX161616 (3<<25)
-#define SPRITE_FORMAT_YUV444 (4<<25)
-#define SPRITE_FORMAT_XR_BGR101010 (5<<25) /* Extended range */
-#define SPRITE_CSC_ENABLE (1<<24)
-#define SPRITE_SOURCE_KEY (1<<22)
-#define SPRITE_RGB_ORDER_RGBX (1<<20) /* only for 888 and 161616 */
-#define SPRITE_YUV_TO_RGB_CSC_DISABLE (1<<19)
-#define SPRITE_YUV_CSC_FORMAT_BT709 (1<<18) /* 0 is BT601 */
-#define SPRITE_YUV_BYTE_ORDER_MASK (3<<16)
-#define SPRITE_YUV_ORDER_YUYV (0<<16)
-#define SPRITE_YUV_ORDER_UYVY (1<<16)
-#define SPRITE_YUV_ORDER_YVYU (2<<16)
-#define SPRITE_YUV_ORDER_VYUY (3<<16)
-#define SPRITE_TRICKLE_FEED_DISABLE (1<<14)
-#define SPRITE_INT_GAMMA_ENABLE (1<<13)
-#define SPRITE_TILED (1<<10)
-#define SPRITE_DEST_KEY (1<<2)
-#define _SPRA_LINOFF 0x70284
-#define _SPRA_STRIDE 0x70288
-#define _SPRA_POS 0x7028c
-#define _SPRA_SIZE 0x70290
-#define _SPRA_KEYVAL 0x70294
-#define _SPRA_KEYMSK 0x70298
-#define _SPRA_SURF 0x7029c
-#define _SPRA_KEYMAX 0x702a0
-#define _SPRA_TILEOFF 0x702a4
-#define _SPRA_SCALE 0x70304
-#define SPRITE_SCALE_ENABLE (1<<31)
-#define SPRITE_FILTER_MASK (3<<29)
-#define SPRITE_FILTER_MEDIUM (0<<29)
-#define SPRITE_FILTER_ENHANCING (1<<29)
-#define SPRITE_FILTER_SOFTENING (2<<29)
-#define SPRITE_VERTICAL_OFFSET_HALF (1<<28) /* must be enabled below */
-#define SPRITE_VERTICAL_OFFSET_ENABLE (1<<27)
-#define _SPRA_GAMC 0x70400
-
-#define _SPRB_CTL 0x71280
-#define _SPRB_LINOFF 0x71284
-#define _SPRB_STRIDE 0x71288
-#define _SPRB_POS 0x7128c
-#define _SPRB_SIZE 0x71290
-#define _SPRB_KEYVAL 0x71294
-#define _SPRB_KEYMSK 0x71298
-#define _SPRB_SURF 0x7129c
-#define _SPRB_KEYMAX 0x712a0
-#define _SPRB_TILEOFF 0x712a4
-#define _SPRB_SCALE 0x71304
-#define _SPRB_GAMC 0x71400
-
-#define SPRCTL(pipe) _PIPE(pipe, _SPRA_CTL, _SPRB_CTL)
-#define SPRLINOFF(pipe) _PIPE(pipe, _SPRA_LINOFF, _SPRB_LINOFF)
-#define SPRSTRIDE(pipe) _PIPE(pipe, _SPRA_STRIDE, _SPRB_STRIDE)
-#define SPRPOS(pipe) _PIPE(pipe, _SPRA_POS, _SPRB_POS)
-#define SPRSIZE(pipe) _PIPE(pipe, _SPRA_SIZE, _SPRB_SIZE)
-#define SPRKEYVAL(pipe) _PIPE(pipe, _SPRA_KEYVAL, _SPRB_KEYVAL)
-#define SPRKEYMSK(pipe) _PIPE(pipe, _SPRA_KEYMSK, _SPRB_KEYMSK)
-#define SPRSURF(pipe) _PIPE(pipe, _SPRA_SURF, _SPRB_SURF)
-#define SPRKEYMAX(pipe) _PIPE(pipe, _SPRA_KEYMAX, _SPRB_KEYMAX)
-#define SPRTILEOFF(pipe) _PIPE(pipe, _SPRA_TILEOFF, _SPRB_TILEOFF)
-#define SPRSCALE(pipe) _PIPE(pipe, _SPRA_SCALE, _SPRB_SCALE)
-#define SPRGAMC(pipe) _PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC)
-
/* VBIOS regs */
#define VGACNTRL 0x71400
# define VGA_DISP_DISABLE (1 << 31)
@@ -3124,10 +2864,6 @@
#define ILK_DPFC_DIS1 (1<<8)
#define ILK_DPFC_DIS2 (1<<9)
-#define IVB_CHICKEN3 0x4200c
-# define CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE (1 << 5)
-# define CHICKEN3_DGMG_DONE_FIX_DISABLE (1 << 2)
-
#define DISP_ARB_CTL 0x45000
#define DISP_TILE_SURFACE_SWIZZLING (1<<13)
#define DISP_FBC_WM_DIS (1<<15)
@@ -3200,13 +2936,12 @@
#define SDEIER 0xc400c
/* digital port hotplug */
-#define PCH_PORT_HOTPLUG 0xc4030 /* SHOTPLUG_CTL */
+#define PCH_PORT_HOTPLUG 0xc4030
#define PORTD_HOTPLUG_ENABLE (1 << 20)
#define PORTD_PULSE_DURATION_2ms (0)
#define PORTD_PULSE_DURATION_4_5ms (1 << 18)
#define PORTD_PULSE_DURATION_6ms (2 << 18)
#define PORTD_PULSE_DURATION_100ms (3 << 18)
-#define PORTD_PULSE_DURATION_MASK (3 << 18)
#define PORTD_HOTPLUG_NO_DETECT (0)
#define PORTD_HOTPLUG_SHORT_DETECT (1 << 16)
#define PORTD_HOTPLUG_LONG_DETECT (1 << 17)
@@ -3215,7 +2950,6 @@
#define PORTC_PULSE_DURATION_4_5ms (1 << 10)
#define PORTC_PULSE_DURATION_6ms (2 << 10)
#define PORTC_PULSE_DURATION_100ms (3 << 10)
-#define PORTC_PULSE_DURATION_MASK (3 << 10)
#define PORTC_HOTPLUG_NO_DETECT (0)
#define PORTC_HOTPLUG_SHORT_DETECT (1 << 8)
#define PORTC_HOTPLUG_LONG_DETECT (1 << 9)
@@ -3224,7 +2958,6 @@
#define PORTB_PULSE_DURATION_4_5ms (1 << 2)
#define PORTB_PULSE_DURATION_6ms (2 << 2)
#define PORTB_PULSE_DURATION_100ms (3 << 2)
-#define PORTB_PULSE_DURATION_MASK (3 << 2)
#define PORTB_HOTPLUG_NO_DETECT (0)
#define PORTB_HOTPLUG_SHORT_DETECT (1 << 0)
#define PORTB_HOTPLUG_LONG_DETECT (1 << 1)
@@ -3245,15 +2978,15 @@
#define _PCH_DPLL_A 0xc6014
#define _PCH_DPLL_B 0xc6018
-#define PCH_DPLL(pipe) (pipe == 0 ? _PCH_DPLL_A : _PCH_DPLL_B)
+#define PCH_DPLL(pipe) _PIPE(pipe, _PCH_DPLL_A, _PCH_DPLL_B)
#define _PCH_FPA0 0xc6040
#define FP_CB_TUNE (0x3<<22)
#define _PCH_FPA1 0xc6044
#define _PCH_FPB0 0xc6048
#define _PCH_FPB1 0xc604c
-#define PCH_FP0(pipe) (pipe == 0 ? _PCH_FPA0 : _PCH_FPB0)
-#define PCH_FP1(pipe) (pipe == 0 ? _PCH_FPA1 : _PCH_FPB1)
+#define PCH_FP0(pipe) _PIPE(pipe, _PCH_FPA0, _PCH_FPB0)
+#define PCH_FP1(pipe) _PIPE(pipe, _PCH_FPA1, _PCH_FPB1)
#define PCH_DPLL_TEST 0xc606c
@@ -3323,7 +3056,6 @@
#define _TRANS_VSYNC_A 0xe0014
#define TRANS_VSYNC_END_SHIFT 16
#define TRANS_VSYNC_START_SHIFT 0
-#define _TRANS_VSYNCSHIFT_A 0xe0028
#define _TRANSA_DATA_M1 0xe0030
#define _TRANSA_DATA_N1 0xe0034
@@ -3334,27 +3066,12 @@
#define _TRANSA_DP_LINK_M2 0xe0048
#define _TRANSA_DP_LINK_N2 0xe004c
-/* Per-transcoder DIP controls */
-
-#define _VIDEO_DIP_CTL_A 0xe0200
-#define _VIDEO_DIP_DATA_A 0xe0208
-#define _VIDEO_DIP_GCP_A 0xe0210
-
-#define _VIDEO_DIP_CTL_B 0xe1200
-#define _VIDEO_DIP_DATA_B 0xe1208
-#define _VIDEO_DIP_GCP_B 0xe1210
-
-#define TVIDEO_DIP_CTL(pipe) _PIPE(pipe, _VIDEO_DIP_CTL_A, _VIDEO_DIP_CTL_B)
-#define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B)
-#define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B)
-
#define _TRANS_HTOTAL_B 0xe1000
#define _TRANS_HBLANK_B 0xe1004
#define _TRANS_HSYNC_B 0xe1008
#define _TRANS_VTOTAL_B 0xe100c
#define _TRANS_VBLANK_B 0xe1010
#define _TRANS_VSYNC_B 0xe1014
-#define _TRANS_VSYNCSHIFT_B 0xe1028
#define TRANS_HTOTAL(pipe) _PIPE(pipe, _TRANS_HTOTAL_A, _TRANS_HTOTAL_B)
#define TRANS_HBLANK(pipe) _PIPE(pipe, _TRANS_HBLANK_A, _TRANS_HBLANK_B)
@@ -3362,8 +3079,6 @@
#define TRANS_VTOTAL(pipe) _PIPE(pipe, _TRANS_VTOTAL_A, _TRANS_VTOTAL_B)
#define TRANS_VBLANK(pipe) _PIPE(pipe, _TRANS_VBLANK_A, _TRANS_VBLANK_B)
#define TRANS_VSYNC(pipe) _PIPE(pipe, _TRANS_VSYNC_A, _TRANS_VSYNC_B)
-#define TRANS_VSYNCSHIFT(pipe) _PIPE(pipe, _TRANS_VSYNCSHIFT_A, \
- _TRANS_VSYNCSHIFT_B)
#define _TRANSB_DATA_M1 0xe1030
#define _TRANSB_DATA_N1 0xe1034
@@ -3397,10 +3112,7 @@
#define TRANS_FSYNC_DELAY_HB4 (3<<27)
#define TRANS_DP_AUDIO_ONLY (1<<26)
#define TRANS_DP_VIDEO_AUDIO (0<<26)
-#define TRANS_INTERLACE_MASK (7<<21)
#define TRANS_PROGRESSIVE (0<<21)
-#define TRANS_INTERLACED (3<<21)
-#define TRANS_LEGACY_INTERLACED_ILK (2<<21)
#define TRANS_8BPC (0<<5)
#define TRANS_10BPC (1<<5)
#define TRANS_6BPC (2<<5)
@@ -3562,13 +3274,15 @@
#define ADPA_CRT_HOTPLUG_VOLREF_475MV (1<<17)
#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
+#define ADPA_PIPE_ENABLED(V, P) \
+ (((V) & (ADPA_TRANS_SELECT_MASK | ADPA_DAC_ENABLE)) == ((P) << 30 | ADPA_DAC_ENABLE))
+
/* or SDVOB */
#define HDMIB 0xe1140
#define PORT_ENABLE (1 << 31)
-#define TRANSCODER(pipe) ((pipe) << 30)
-#define TRANSCODER_CPT(pipe) ((pipe) << 29)
-#define TRANSCODER_MASK (1 << 30)
-#define TRANSCODER_MASK_CPT (3 << 29)
+#define TRANSCODER_A (0)
+#define TRANSCODER_B (1 << 30)
+#define TRANSCODER_MASK (1 << 30)
#define COLOR_FORMAT_8bpc (0)
#define COLOR_FORMAT_12bpc (3 << 26)
#define SDVOB_HOTPLUG_ENABLE (1 << 23)
@@ -3584,6 +3298,9 @@
#define HSYNC_ACTIVE_HIGH (1 << 3)
#define PORT_DETECTED (1 << 2)
+#define HDMI_PIPE_ENABLED(V, P) \
+ (((V) & (TRANSCODER_MASK | PORT_ENABLE)) == ((P) << 30 | PORT_ENABLE))
+
/* PCH SDVOB multiplex with HDMIB */
#define PCH_SDVOB HDMIB
@@ -3611,35 +3328,15 @@
#define PCH_PP_STATUS 0xc7200
#define PCH_PP_CONTROL 0xc7204
#define PANEL_UNLOCK_REGS (0xabcd << 16)
-#define PANEL_UNLOCK_MASK (0xffff << 16)
#define EDP_FORCE_VDD (1 << 3)
#define EDP_BLC_ENABLE (1 << 2)
#define PANEL_POWER_RESET (1 << 1)
#define PANEL_POWER_OFF (0 << 0)
#define PANEL_POWER_ON (1 << 0)
#define PCH_PP_ON_DELAYS 0xc7208
-#define PANEL_PORT_SELECT_MASK (3 << 30)
-#define PANEL_PORT_SELECT_LVDS (0 << 30)
-#define PANEL_PORT_SELECT_DPA (1 << 30)
#define EDP_PANEL (1 << 30)
-#define PANEL_PORT_SELECT_DPC (2 << 30)
-#define PANEL_PORT_SELECT_DPD (3 << 30)
-#define PANEL_POWER_UP_DELAY_MASK (0x1fff0000)
-#define PANEL_POWER_UP_DELAY_SHIFT 16
-#define PANEL_LIGHT_ON_DELAY_MASK (0x1fff)
-#define PANEL_LIGHT_ON_DELAY_SHIFT 0
-
#define PCH_PP_OFF_DELAYS 0xc720c
-#define PANEL_POWER_DOWN_DELAY_MASK (0x1fff0000)
-#define PANEL_POWER_DOWN_DELAY_SHIFT 16
-#define PANEL_LIGHT_OFF_DELAY_MASK (0x1fff)
-#define PANEL_LIGHT_OFF_DELAY_SHIFT 0
-
#define PCH_PP_DIVISOR 0xc7210
-#define PP_REFERENCE_DIVIDER_MASK (0xffffff00)
-#define PP_REFERENCE_DIVIDER_SHIFT 8
-#define PANEL_POWER_CYCLE_DELAY_MASK (0x1f)
-#define PANEL_POWER_CYCLE_DELAY_SHIFT 0
#define PCH_DP_B 0xe4100
#define PCH_DPB_AUX_CH_CTL 0xe4110
@@ -3670,7 +3367,6 @@
#define PORT_TRANS_B_SEL_CPT (1<<29)
#define PORT_TRANS_C_SEL_CPT (2<<29)
#define PORT_TRANS_SEL_MASK (3<<29)
-#define PORT_TRANS_SEL_CPT(pipe) ((pipe) << 29)
#define TRANS_DP_CTL_A 0xe0300
#define TRANS_DP_CTL_B 0xe1300
@@ -3709,39 +3405,10 @@
#define EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B (0x38<<22)
#define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f<<22)
-/* IVB */
-#define EDP_LINK_TRAIN_400MV_0DB_IVB (0x24 <<22)
-#define EDP_LINK_TRAIN_400MV_3_5DB_IVB (0x2a <<22)
-#define EDP_LINK_TRAIN_400MV_6DB_IVB (0x2f <<22)
-#define EDP_LINK_TRAIN_600MV_0DB_IVB (0x30 <<22)
-#define EDP_LINK_TRAIN_600MV_3_5DB_IVB (0x36 <<22)
-#define EDP_LINK_TRAIN_800MV_0DB_IVB (0x38 <<22)
-#define EDP_LINK_TRAIN_800MV_3_5DB_IVB (0x33 <<22)
-
-/* legacy values */
-#define EDP_LINK_TRAIN_500MV_0DB_IVB (0x00 <<22)
-#define EDP_LINK_TRAIN_1000MV_0DB_IVB (0x20 <<22)
-#define EDP_LINK_TRAIN_500MV_3_5DB_IVB (0x02 <<22)
-#define EDP_LINK_TRAIN_1000MV_3_5DB_IVB (0x22 <<22)
-#define EDP_LINK_TRAIN_1000MV_6DB_IVB (0x23 <<22)
-
-#define EDP_LINK_TRAIN_VOL_EMP_MASK_IVB (0x3f<<22)
-
#define FORCEWAKE 0xA18C
#define FORCEWAKE_ACK 0x130090
-#define FORCEWAKE_MT 0xa188 /* multi-threaded */
-#define FORCEWAKE_MT_ACK 0x130040
-#define ECOBUS 0xa180
-#define FORCEWAKE_MT_ENABLE (1<<5)
-
-#define GTFIFODBG 0x120000
-#define GT_FIFO_CPU_ERROR_MASK 7
-#define GT_FIFO_OVFERR (1<<2)
-#define GT_FIFO_IAWRERR (1<<1)
-#define GT_FIFO_IARDERR (1<<0)
#define GT_FIFO_FREE_ENTRIES 0x120008
-#define GT_FIFO_NUM_RESERVED_ENTRIES 20
#define GEN6_UCGCTL2 0x9404
# define GEN6_RCZUNIT_CLOCK_GATE_DISABLE (1 << 13)
@@ -3769,11 +3436,7 @@
#define GEN6_CAGF_MASK (0x7f << GEN6_CAGF_SHIFT)
#define GEN6_RP_CONTROL 0xA024
#define GEN6_RP_MEDIA_TURBO (1<<11)
-#define GEN6_RP_MEDIA_MODE_MASK (3<<9)
-#define GEN6_RP_MEDIA_HW_TURBO_MODE (3<<9)
-#define GEN6_RP_MEDIA_HW_NORMAL_MODE (2<<9)
-#define GEN6_RP_MEDIA_HW_MODE (1<<9)
-#define GEN6_RP_MEDIA_SW_MODE (0<<9)
+#define GEN6_RP_USE_NORMAL_FREQ (1<<9)
#define GEN6_RP_MEDIA_IS_GFX (1<<8)
#define GEN6_RP_ENABLE (1<<7)
#define GEN6_RP_UP_IDLE_MIN (0x1<<3)
@@ -3825,60 +3488,7 @@
#define GEN6_PCODE_MAILBOX 0x138124
#define GEN6_PCODE_READY (1<<31)
#define GEN6_READ_OC_PARAMS 0xc
-#define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x8
-#define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9
+#define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x9
#define GEN6_PCODE_DATA 0x138128
-#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
-
-#define GEN6_GT_CORE_STATUS 0x138060
-#define GEN6_CORE_CPD_STATE_MASK (7<<4)
-#define GEN6_RCn_MASK 7
-#define GEN6_RC0 0
-#define GEN6_RC3 2
-#define GEN6_RC6 3
-#define GEN6_RC7 4
-
-#define G4X_AUD_VID_DID 0x62020
-#define INTEL_AUDIO_DEVCL 0x808629FB
-#define INTEL_AUDIO_DEVBLC 0x80862801
-#define INTEL_AUDIO_DEVCTG 0x80862802
-
-#define G4X_AUD_CNTL_ST 0x620B4
-#define G4X_ELDV_DEVCL_DEVBLC (1 << 13)
-#define G4X_ELDV_DEVCTG (1 << 14)
-#define G4X_ELD_ADDR (0xf << 5)
-#define G4X_ELD_ACK (1 << 4)
-#define G4X_HDMIW_HDMIEDID 0x6210C
-
-#define IBX_HDMIW_HDMIEDID_A 0xE2050
-#define IBX_AUD_CNTL_ST_A 0xE20B4
-#define IBX_ELD_BUFFER_SIZE (0x1f << 10)
-#define IBX_ELD_ADDRESS (0x1f << 5)
-#define IBX_ELD_ACK (1 << 4)
-#define IBX_AUD_CNTL_ST2 0xE20C0
-#define IBX_ELD_VALIDB (1 << 0)
-#define IBX_CP_READYB (1 << 1)
-
-#define CPT_HDMIW_HDMIEDID_A 0xE5050
-#define CPT_AUD_CNTL_ST_A 0xE50B4
-#define CPT_AUD_CNTRL_ST2 0xE50C0
-
-/* These are the 4 32-bit write offset registers for each stream
- * output buffer. It determines the offset from the
- * 3DSTATE_SO_BUFFERs that the next streamed vertex output goes to.
- */
-#define GEN7_SO_WRITE_OFFSET(n) (0x5280 + (n) * 4)
-
-#define IBX_AUD_CONFIG_A 0xe2000
-#define CPT_AUD_CONFIG_A 0xe5000
-#define AUD_CONFIG_N_VALUE_INDEX (1 << 29)
-#define AUD_CONFIG_N_PROG_ENABLE (1 << 28)
-#define AUD_CONFIG_UPPER_N_SHIFT 20
-#define AUD_CONFIG_UPPER_N_VALUE (0xff << 20)
-#define AUD_CONFIG_LOWER_N_SHIFT 4
-#define AUD_CONFIG_LOWER_N_VALUE (0xfff << 4)
-#define AUD_CONFIG_PIXEL_CLOCK_HDMI_SHIFT 16
-#define AUD_CONFIG_PIXEL_CLOCK_HDMI (0xf << 16)
-#define AUD_CONFIG_DISABLE_NCTS (1 << 3)
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 0d13778..5ad0b51 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -28,7 +28,6 @@
#include "drm.h"
#include "i915_drm.h"
#include "intel_drv.h"
-#include "i915_reg.h"
static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
{
@@ -40,7 +39,7 @@ static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
return false;
if (HAS_PCH_SPLIT(dev))
- dpll_reg = PCH_DPLL(pipe);
+ dpll_reg = (pipe == PIPE_A) ? _PCH_DPLL_A : _PCH_DPLL_B;
else
dpll_reg = (pipe == PIPE_A) ? _DPLL_A : _DPLL_B;
@@ -65,7 +64,7 @@ static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
else
array = dev_priv->save_palette_b;
- for (i = 0; i < 256; i++)
+ for(i = 0; i < 256; i++)
array[i] = I915_READ(reg + (i << 2));
}
@@ -87,7 +86,7 @@ static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
else
array = dev_priv->save_palette_b;
- for (i = 0; i < 256; i++)
+ for(i = 0; i < 256; i++)
I915_WRITE(reg + (i << 2), array[i]);
}
@@ -770,13 +769,15 @@ static void i915_restore_display(struct drm_device *dev)
/* FIXME: restore TV & SDVO state */
/* only restore FBC info on the platform that supports FBC*/
- intel_disable_fbc(dev);
if (I915_HAS_FBC(dev)) {
if (HAS_PCH_SPLIT(dev)) {
+ ironlake_disable_fbc(dev);
I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE);
} else if (IS_GM45(dev)) {
+ g4x_disable_fbc(dev);
I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE);
} else {
+ i8xx_disable_fbc(dev);
I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE);
I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE);
I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2);
@@ -830,7 +831,7 @@ int i915_save_state(struct drm_device *dev)
if (IS_IRONLAKE_M(dev))
ironlake_disable_drps(dev);
- if (INTEL_INFO(dev)->gen >= 6)
+ if (IS_GEN6(dev))
gen6_disable_rps(dev);
/* Cache mode state */
@@ -881,26 +882,23 @@ int i915_restore_state(struct drm_device *dev)
}
mutex_unlock(&dev->struct_mutex);
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- intel_init_clock_gating(dev);
+ intel_init_clock_gating(dev);
if (IS_IRONLAKE_M(dev)) {
ironlake_enable_drps(dev);
intel_init_emon(dev);
}
- if (INTEL_INFO(dev)->gen >= 6) {
+ if (IS_GEN6(dev))
gen6_enable_rps(dev_priv);
- gen6_update_ring_freq(dev_priv);
- }
mutex_lock(&dev->struct_mutex);
/* Cache mode state */
- I915_WRITE(CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
+ I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
/* Memory arbitration state */
- I915_WRITE(MI_ARB_STATE, dev_priv->saveMI_ARB_STATE | 0xffff0000);
+ I915_WRITE (MI_ARB_STATE, dev_priv->saveMI_ARB_STATE | 0xffff0000);
for (i = 0; i < 16; i++) {
I915_WRITE(SWF00 + (i << 2), dev_priv->saveSWF0[i]);
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index dac7bba..d623fef 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -385,29 +385,29 @@ TRACE_EVENT(i915_flip_complete,
);
TRACE_EVENT(i915_reg_rw,
- TP_PROTO(bool write, u32 reg, u64 val, int len),
-
- TP_ARGS(write, reg, val, len),
-
- TP_STRUCT__entry(
- __field(u64, val)
- __field(u32, reg)
- __field(u16, write)
- __field(u16, len)
- ),
-
- TP_fast_assign(
- __entry->val = (u64)val;
- __entry->reg = reg;
- __entry->write = write;
- __entry->len = len;
- ),
-
- TP_printk("%s reg=0x%x, len=%d, val=(0x%x, 0x%x)",
- __entry->write ? "write" : "read",
- __entry->reg, __entry->len,
- (u32)(__entry->val & 0xffffffff),
- (u32)(__entry->val >> 32))
+ TP_PROTO(bool write, u32 reg, u64 val, int len),
+
+ TP_ARGS(write, reg, val, len),
+
+ TP_STRUCT__entry(
+ __field(u64, val)
+ __field(u32, reg)
+ __field(u16, write)
+ __field(u16, len)
+ ),
+
+ TP_fast_assign(
+ __entry->val = (u64)val;
+ __entry->reg = reg;
+ __entry->write = write;
+ __entry->len = len;
+ ),
+
+ TP_printk("%s reg=0x%x, len=%d, val=(0x%x, 0x%x)",
+ __entry->write ? "write" : "read",
+ __entry->reg, __entry->len,
+ (u32)(__entry->val & 0xffffffff),
+ (u32)(__entry->val >> 32))
);
#endif /* _I915_TRACE_H_ */
diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c
index bae3edf..2cb8e0b 100644
--- a/drivers/gpu/drm/i915/intel_acpi.c
+++ b/drivers/gpu/drm/i915/intel_acpi.c
@@ -64,7 +64,7 @@ static int intel_dsm(acpi_handle handle, int func, int arg)
case ACPI_TYPE_BUFFER:
if (obj->buffer.length == 4) {
- result = (obj->buffer.pointer[0] |
+ result =(obj->buffer.pointer[0] |
(obj->buffer.pointer[1] << 8) |
(obj->buffer.pointer[2] << 16) |
(obj->buffer.pointer[3] << 24));
@@ -208,7 +208,7 @@ static bool intel_dsm_pci_probe(struct pci_dev *pdev)
ret = intel_dsm(dhandle, INTEL_DSM_FN_SUPPORTED_FUNCTIONS, 0);
if (ret < 0) {
- DRM_DEBUG_KMS("failed to get supported _DSM functions\n");
+ DRM_ERROR("failed to get supported _DSM functions\n");
return false;
}
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index b48fc2a..e5fa074 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -1,5 +1,5 @@
/*
- * Copyright © 2006 Intel Corporation
+ * Copyright © 2006 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -75,7 +75,7 @@ get_blocksize(void *p)
static void
fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
- const struct lvds_dvo_timing *dvo_timing)
+ struct lvds_dvo_timing *dvo_timing)
{
panel_fixed_mode->hdisplay = (dvo_timing->hactive_hi << 8) |
dvo_timing->hactive_lo;
@@ -116,75 +116,20 @@ fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
drm_mode_set_name(panel_fixed_mode);
}
-static bool
-lvds_dvo_timing_equal_size(const struct lvds_dvo_timing *a,
- const struct lvds_dvo_timing *b)
-{
- if (a->hactive_hi != b->hactive_hi ||
- a->hactive_lo != b->hactive_lo)
- return false;
-
- if (a->hsync_off_hi != b->hsync_off_hi ||
- a->hsync_off_lo != b->hsync_off_lo)
- return false;
-
- if (a->hsync_pulse_width != b->hsync_pulse_width)
- return false;
-
- if (a->hblank_hi != b->hblank_hi ||
- a->hblank_lo != b->hblank_lo)
- return false;
-
- if (a->vactive_hi != b->vactive_hi ||
- a->vactive_lo != b->vactive_lo)
- return false;
-
- if (a->vsync_off != b->vsync_off)
- return false;
-
- if (a->vsync_pulse_width != b->vsync_pulse_width)
- return false;
-
- if (a->vblank_hi != b->vblank_hi ||
- a->vblank_lo != b->vblank_lo)
- return false;
-
- return true;
-}
-
-static const struct lvds_dvo_timing *
-get_lvds_dvo_timing(const struct bdb_lvds_lfp_data *lvds_lfp_data,
- const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs,
- int index)
-{
- /*
- * the size of fp_timing varies on the different platform.
- * So calculate the DVO timing relative offset in LVDS data
- * entry to get the DVO timing entry
- */
-
- int lfp_data_size =
- lvds_lfp_data_ptrs->ptr[1].dvo_timing_offset -
- lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset;
- int dvo_timing_offset =
- lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset -
- lvds_lfp_data_ptrs->ptr[0].fp_timing_offset;
- char *entry = (char *)lvds_lfp_data->data + lfp_data_size * index;
-
- return (struct lvds_dvo_timing *)(entry + dvo_timing_offset);
-}
-
/* Try to find integrated panel data */
static void
parse_lfp_panel_data(struct drm_i915_private *dev_priv,
struct bdb_header *bdb)
{
- const struct bdb_lvds_options *lvds_options;
- const struct bdb_lvds_lfp_data *lvds_lfp_data;
- const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs;
- const struct lvds_dvo_timing *panel_dvo_timing;
+ struct bdb_lvds_options *lvds_options;
+ struct bdb_lvds_lfp_data *lvds_lfp_data;
+ struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs;
+ struct bdb_lvds_lfp_data_entry *entry;
+ struct lvds_dvo_timing *dvo_timing;
struct drm_display_mode *panel_fixed_mode;
- int i, downclock;
+ int lfp_data_size, dvo_timing_offset;
+ int i, temp_downclock;
+ struct drm_display_mode *temp_mode;
lvds_options = find_section(bdb, BDB_LVDS_OPTIONS);
if (!lvds_options)
@@ -206,44 +151,75 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
dev_priv->lvds_vbt = 1;
- panel_dvo_timing = get_lvds_dvo_timing(lvds_lfp_data,
- lvds_lfp_data_ptrs,
- lvds_options->panel_type);
+ lfp_data_size = lvds_lfp_data_ptrs->ptr[1].dvo_timing_offset -
+ lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset;
+ entry = (struct bdb_lvds_lfp_data_entry *)
+ ((uint8_t *)lvds_lfp_data->data + (lfp_data_size *
+ lvds_options->panel_type));
+ dvo_timing_offset = lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset -
+ lvds_lfp_data_ptrs->ptr[0].fp_timing_offset;
+
+ /*
+ * the size of fp_timing varies on the different platform.
+ * So calculate the DVO timing relative offset in LVDS data
+ * entry to get the DVO timing entry
+ */
+ dvo_timing = (struct lvds_dvo_timing *)
+ ((unsigned char *)entry + dvo_timing_offset);
panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
if (!panel_fixed_mode)
return;
- fill_detail_timing_data(panel_fixed_mode, panel_dvo_timing);
+ fill_detail_timing_data(panel_fixed_mode, dvo_timing);
dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode;
DRM_DEBUG_KMS("Found panel mode in BIOS VBT tables:\n");
drm_mode_debug_printmodeline(panel_fixed_mode);
+ temp_mode = kzalloc(sizeof(*temp_mode), GFP_KERNEL);
+ temp_downclock = panel_fixed_mode->clock;
/*
- * Iterate over the LVDS panel timing info to find the lowest clock
- * for the native resolution.
+ * enumerate the LVDS panel timing info entry in VBT to check whether
+ * the LVDS downclock is found.
*/
- downclock = panel_dvo_timing->clock;
for (i = 0; i < 16; i++) {
- const struct lvds_dvo_timing *dvo_timing;
-
- dvo_timing = get_lvds_dvo_timing(lvds_lfp_data,
- lvds_lfp_data_ptrs,
- i);
- if (lvds_dvo_timing_equal_size(dvo_timing, panel_dvo_timing) &&
- dvo_timing->clock < downclock)
- downclock = dvo_timing->clock;
+ entry = (struct bdb_lvds_lfp_data_entry *)
+ ((uint8_t *)lvds_lfp_data->data + (lfp_data_size * i));
+ dvo_timing = (struct lvds_dvo_timing *)
+ ((unsigned char *)entry + dvo_timing_offset);
+
+ fill_detail_timing_data(temp_mode, dvo_timing);
+
+ if (temp_mode->hdisplay == panel_fixed_mode->hdisplay &&
+ temp_mode->hsync_start == panel_fixed_mode->hsync_start &&
+ temp_mode->hsync_end == panel_fixed_mode->hsync_end &&
+ temp_mode->htotal == panel_fixed_mode->htotal &&
+ temp_mode->vdisplay == panel_fixed_mode->vdisplay &&
+ temp_mode->vsync_start == panel_fixed_mode->vsync_start &&
+ temp_mode->vsync_end == panel_fixed_mode->vsync_end &&
+ temp_mode->vtotal == panel_fixed_mode->vtotal &&
+ temp_mode->clock < temp_downclock) {
+ /*
+ * downclock is already found. But we expect
+ * to find the lower downclock.
+ */
+ temp_downclock = temp_mode->clock;
+ }
+ /* clear it to zero */
+ memset(temp_mode, 0, sizeof(*temp_mode));
}
-
- if (downclock < panel_dvo_timing->clock && i915_lvds_downclock) {
+ kfree(temp_mode);
+ if (temp_downclock < panel_fixed_mode->clock &&
+ i915_lvds_downclock) {
dev_priv->lvds_downclock_avail = 1;
- dev_priv->lvds_downclock = downclock * 10;
+ dev_priv->lvds_downclock = temp_downclock;
DRM_DEBUG_KMS("LVDS downclock is found in VBT. "
"Normal Clock %dKHz, downclock %dKHz\n",
- panel_fixed_mode->clock, 10*downclock);
+ temp_downclock, panel_fixed_mode->clock);
}
+ return;
}
/* Try to find sdvo panel data */
@@ -310,13 +286,6 @@ parse_general_features(struct drm_i915_private *dev_priv,
dev_priv->lvds_use_ssc = general->enable_ssc;
dev_priv->lvds_ssc_freq =
intel_bios_ssc_frequency(dev, general->ssc_freq);
- dev_priv->display_clock_mode = general->display_clock_mode;
- DRM_DEBUG_KMS("BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d\n",
- dev_priv->int_tv_support,
- dev_priv->int_crt_support,
- dev_priv->lvds_use_ssc,
- dev_priv->lvds_ssc_freq,
- dev_priv->display_clock_mode);
}
}
@@ -389,7 +358,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
if (p_child->dvo_port != DEVICE_PORT_DVOB &&
p_child->dvo_port != DEVICE_PORT_DVOC) {
/* skip the incorrect SDVO port */
- DRM_DEBUG_KMS("Incorrect SDVO port. Skip it\n");
+ DRM_DEBUG_KMS("Incorrect SDVO port. Skip it \n");
continue;
}
DRM_DEBUG_KMS("the SDVO device with slave addr %2x is found on"
@@ -404,13 +373,15 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
p_mapping->dvo_wiring = p_child->dvo_wiring;
p_mapping->ddc_pin = p_child->ddc_pin;
p_mapping->i2c_pin = p_child->i2c_pin;
+ p_mapping->i2c_speed = p_child->i2c_speed;
p_mapping->initialized = 1;
- DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n",
+ DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d, i2c_speed=%d\n",
p_mapping->dvo_port,
p_mapping->slave_addr,
p_mapping->dvo_wiring,
p_mapping->ddc_pin,
- p_mapping->i2c_pin);
+ p_mapping->i2c_pin,
+ p_mapping->i2c_speed);
} else {
DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
"two SDVO device.\n");
@@ -570,10 +541,10 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
count++;
}
if (!count) {
- DRM_DEBUG_KMS("no child dev is parsed from VBT\n");
+ DRM_DEBUG_KMS("no child dev is parsed from VBT \n");
return;
}
- dev_priv->child_dev = kcalloc(count, sizeof(*p_child), GFP_KERNEL);
+ dev_priv->child_dev = kzalloc(sizeof(*p_child) * count, GFP_KERNEL);
if (!dev_priv->child_dev) {
DRM_DEBUG_KMS("No memory space for child device\n");
return;
@@ -616,7 +587,7 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
/* Default to using SSC */
dev_priv->lvds_use_ssc = 1;
dev_priv->lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1);
- DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->lvds_ssc_freq);
+ DRM_DEBUG("Set default to SSC at %dMHz\n", dev_priv->lvds_ssc_freq);
/* eDP data */
dev_priv->edp.bpp = 18;
@@ -665,7 +636,7 @@ intel_parse_bios(struct drm_device *dev)
if (!dmi_check_system(intel_no_opregion_vbt) && dev_priv->opregion.vbt) {
struct vbt_header *vbt = dev_priv->opregion.vbt;
if (memcmp(vbt->signature, "$VBT", 4) == 0) {
- DRM_DEBUG_KMS("Using VBT from OpRegion: %20s\n",
+ DRM_DEBUG_DRIVER("Using VBT from OpRegion: %20s\n",
vbt->signature);
bdb = (struct bdb_header *)((char *)vbt + vbt->bdb_offset);
} else
@@ -690,7 +661,7 @@ intel_parse_bios(struct drm_device *dev)
}
if (!vbt) {
- DRM_DEBUG_DRIVER("VBT signature missing\n");
+ DRM_ERROR("VBT signature missing\n");
pci_unmap_rom(pdev, bios);
return -1;
}
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index dbda6e3..5f8e4ed 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -1,5 +1,5 @@
/*
- * Copyright © 2006 Intel Corporation
+ * Copyright © 2006 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -120,9 +120,7 @@ struct bdb_general_features {
u8 ssc_freq:1;
u8 enable_lfp_on_override:1;
u8 disable_ssc_ddt:1;
- u8 rsvd7:1;
- u8 display_clock_mode:1;
- u8 rsvd8:1; /* finish byte */
+ u8 rsvd8:3; /* finish byte */
/* bits 3 */
u8 disable_smooth_vision:1;
@@ -135,10 +133,7 @@ struct bdb_general_features {
/* bits 5 */
u8 int_crt_support:1;
u8 int_tv_support:1;
- u8 int_efp_support:1;
- u8 dp_ssc_enb:1; /* PCH attached eDP supports SSC */
- u8 dp_ssc_freq:1; /* SSC freq for PCH attached eDP */
- u8 rsvd11:3; /* finish byte */
+ u8 rsvd11:6; /* finish byte */
} __attribute__((packed));
/* pre-915 */
@@ -202,7 +197,8 @@ struct bdb_general_features {
struct child_device_config {
u16 handle;
u16 device_type;
- u8 device_id[10]; /* ascii string */
+ u8 i2c_speed;
+ u8 rsvd[9];
u16 addin_offset;
u8 dvo_port; /* See Device_PORT_* above */
u8 i2c_pin;
@@ -244,7 +240,7 @@ struct bdb_general_definitions {
* And the device num is related with the size of general definition
* block. It is obtained by using the following formula:
* number = (block_size - sizeof(bdb_general_definitions))/
- * sizeof(child_device_config);
+ * sizeof(child_device_config);
*/
struct child_device_config devices[0];
} __attribute__((packed));
@@ -450,11 +446,11 @@ struct bdb_driver_features {
#define EDP_VSWING_1_2V 3
struct edp_power_seq {
- u16 t1_t3;
- u16 t8;
+ u16 t3;
+ u16 t7;
u16 t9;
u16 t10;
- u16 t11_t12;
+ u16 t12;
} __attribute__ ((packed));
struct edp_link_params {
@@ -467,12 +463,8 @@ struct edp_link_params {
struct bdb_edp {
struct edp_power_seq power_seqs[16];
u32 color_depth;
- struct edp_link_params link_params[16];
u32 sdrrs_msa_timing_delay;
-
- /* ith bit indicates enabled/disabled for (i+1)th panel */
- u16 edp_s3d_feature;
- u16 edp_t3_optimization;
+ struct edp_link_params link_params[16];
} __attribute__ ((packed));
void intel_setup_bios(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 4d3d736..0979d88 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -24,7 +24,6 @@
* Eric Anholt <eric@anholt.net>
*/
-#include <linux/dmi.h>
#include <linux/i2c.h>
#include <linux/slab.h>
#include "drmP.h"
@@ -70,7 +69,7 @@ static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
temp &= ~ADPA_DAC_ENABLE;
- switch (mode) {
+ switch(mode) {
case DRM_MODE_DPMS_ON:
temp |= ADPA_DAC_ENABLE;
break;
@@ -153,13 +152,17 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
adpa |= ADPA_VSYNC_ACTIVE_HIGH;
- /* For CPT allow 3 pipe config, for others just use A or B */
- if (HAS_PCH_CPT(dev))
- adpa |= PORT_TRANS_SEL_CPT(intel_crtc->pipe);
- else if (intel_crtc->pipe == 0)
- adpa |= ADPA_PIPE_A_SELECT;
- else
- adpa |= ADPA_PIPE_B_SELECT;
+ if (intel_crtc->pipe == 0) {
+ if (HAS_PCH_CPT(dev))
+ adpa |= PORT_TRANS_A_SEL_CPT;
+ else
+ adpa |= ADPA_PIPE_A_SELECT;
+ } else {
+ if (HAS_PCH_CPT(dev))
+ adpa |= PORT_TRANS_B_SEL_CPT;
+ else
+ adpa |= ADPA_PIPE_B_SELECT;
+ }
if (!HAS_PCH_SPLIT(dev))
I915_WRITE(BCLRPAT(intel_crtc->pipe), 0);
@@ -541,24 +544,6 @@ static const struct drm_encoder_funcs intel_crt_enc_funcs = {
.destroy = intel_encoder_destroy,
};
-static int __init intel_no_crt_dmi_callback(const struct dmi_system_id *id)
-{
- DRM_DEBUG_KMS("Skipping CRT initialization for %s\n", id->ident);
- return 1;
-}
-
-static const struct dmi_system_id intel_no_crt[] = {
- {
- .callback = intel_no_crt_dmi_callback,
- .ident = "ACER ZGB",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ACER"),
- DMI_MATCH(DMI_PRODUCT_NAME, "ZGB"),
- },
- },
- { }
-};
-
void intel_crt_init(struct drm_device *dev)
{
struct drm_connector *connector;
@@ -566,10 +551,6 @@ void intel_crt_init(struct drm_device *dev)
struct intel_connector *intel_connector;
struct drm_i915_private *dev_priv = dev->dev_private;
- /* Skip machines without VGA that falsely report hotplug events */
- if (dmi_check_system(intel_no_crt))
- return;
-
crt = kzalloc(sizeof(struct intel_crt), GFP_KERNEL);
if (!crt)
return;
@@ -594,10 +575,7 @@ void intel_crt_init(struct drm_device *dev)
1 << INTEL_ANALOG_CLONE_BIT |
1 << INTEL_SDVO_LVDS_CLONE_BIT);
crt->base.crtc_mask = (1 << 0) | (1 << 1);
- if (IS_GEN2(dev))
- connector->interlace_allowed = 0;
- else
- connector->interlace_allowed = 1;
+ connector->interlace_allowed = 1;
connector->doublescan_allowed = 0;
drm_encoder_helper_add(&crt->base.base, &intel_crt_helper_funcs);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 1e5790f..2e0c24d 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -24,58 +24,56 @@
* Eric Anholt <eric@anholt.net>
*/
-#include <linux/cpufreq.h>
#include <linux/module.h>
#include <linux/input.h>
#include <linux/i2c.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/vgaarb.h>
-#include <drm/drm_edid.h>
#include "drmP.h"
#include "intel_drv.h"
#include "i915_drm.h"
#include "i915_drv.h"
#include "i915_trace.h"
#include "drm_dp_helper.h"
+
#include "drm_crtc_helper.h"
-#include <linux/dma_remapping.h>
#define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
-bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
+bool intel_pipe_has_type (struct drm_crtc *crtc, int type);
static void intel_update_watermarks(struct drm_device *dev);
static void intel_increase_pllclock(struct drm_crtc *crtc);
static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
typedef struct {
- /* given values */
- int n;
- int m1, m2;
- int p1, p2;
- /* derived values */
- int dot;
- int vco;
- int m;
- int p;
+ /* given values */
+ int n;
+ int m1, m2;
+ int p1, p2;
+ /* derived values */
+ int dot;
+ int vco;
+ int m;
+ int p;
} intel_clock_t;
typedef struct {
- int min, max;
+ int min, max;
} intel_range_t;
typedef struct {
- int dot_limit;
- int p2_slow, p2_fast;
+ int dot_limit;
+ int p2_slow, p2_fast;
} intel_p2_t;
#define INTEL_P2_NUM 2
typedef struct intel_limit intel_limit_t;
struct intel_limit {
- intel_range_t dot, vco, n, m, m1, m2, p, p1;
- intel_p2_t p2;
- bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
- int, int, intel_clock_t *, intel_clock_t *);
+ intel_range_t dot, vco, n, m, m1, m2, p, p1;
+ intel_p2_t p2;
+ bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
+ int, int, intel_clock_t *);
};
/* FDI */
@@ -83,21 +81,17 @@ struct intel_limit {
static bool
intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
- int target, int refclk, intel_clock_t *match_clock,
- intel_clock_t *best_clock);
+ int target, int refclk, intel_clock_t *best_clock);
static bool
intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
- int target, int refclk, intel_clock_t *match_clock,
- intel_clock_t *best_clock);
+ int target, int refclk, intel_clock_t *best_clock);
static bool
intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
- int target, int refclk, intel_clock_t *match_clock,
- intel_clock_t *best_clock);
+ int target, int refclk, intel_clock_t *best_clock);
static bool
intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc,
- int target, int refclk, intel_clock_t *match_clock,
- intel_clock_t *best_clock);
+ int target, int refclk, intel_clock_t *best_clock);
static inline u32 /* units of 100MHz */
intel_fdi_link_freq(struct drm_device *dev)
@@ -110,56 +104,56 @@ intel_fdi_link_freq(struct drm_device *dev)
}
static const intel_limit_t intel_limits_i8xx_dvo = {
- .dot = { .min = 25000, .max = 350000 },
- .vco = { .min = 930000, .max = 1400000 },
- .n = { .min = 3, .max = 16 },
- .m = { .min = 96, .max = 140 },
- .m1 = { .min = 18, .max = 26 },
- .m2 = { .min = 6, .max = 16 },
- .p = { .min = 4, .max = 128 },
- .p1 = { .min = 2, .max = 33 },
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 930000, .max = 1400000 },
+ .n = { .min = 3, .max = 16 },
+ .m = { .min = 96, .max = 140 },
+ .m1 = { .min = 18, .max = 26 },
+ .m2 = { .min = 6, .max = 16 },
+ .p = { .min = 4, .max = 128 },
+ .p1 = { .min = 2, .max = 33 },
.p2 = { .dot_limit = 165000,
.p2_slow = 4, .p2_fast = 2 },
.find_pll = intel_find_best_PLL,
};
static const intel_limit_t intel_limits_i8xx_lvds = {
- .dot = { .min = 25000, .max = 350000 },
- .vco = { .min = 930000, .max = 1400000 },
- .n = { .min = 3, .max = 16 },
- .m = { .min = 96, .max = 140 },
- .m1 = { .min = 18, .max = 26 },
- .m2 = { .min = 6, .max = 16 },
- .p = { .min = 4, .max = 128 },
- .p1 = { .min = 1, .max = 6 },
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 930000, .max = 1400000 },
+ .n = { .min = 3, .max = 16 },
+ .m = { .min = 96, .max = 140 },
+ .m1 = { .min = 18, .max = 26 },
+ .m2 = { .min = 6, .max = 16 },
+ .p = { .min = 4, .max = 128 },
+ .p1 = { .min = 1, .max = 6 },
.p2 = { .dot_limit = 165000,
.p2_slow = 14, .p2_fast = 7 },
.find_pll = intel_find_best_PLL,
};
static const intel_limit_t intel_limits_i9xx_sdvo = {
- .dot = { .min = 20000, .max = 400000 },
- .vco = { .min = 1400000, .max = 2800000 },
- .n = { .min = 1, .max = 6 },
- .m = { .min = 70, .max = 120 },
- .m1 = { .min = 10, .max = 22 },
- .m2 = { .min = 5, .max = 9 },
- .p = { .min = 5, .max = 80 },
- .p1 = { .min = 1, .max = 8 },
+ .dot = { .min = 20000, .max = 400000 },
+ .vco = { .min = 1400000, .max = 2800000 },
+ .n = { .min = 1, .max = 6 },
+ .m = { .min = 70, .max = 120 },
+ .m1 = { .min = 10, .max = 22 },
+ .m2 = { .min = 5, .max = 9 },
+ .p = { .min = 5, .max = 80 },
+ .p1 = { .min = 1, .max = 8 },
.p2 = { .dot_limit = 200000,
.p2_slow = 10, .p2_fast = 5 },
.find_pll = intel_find_best_PLL,
};
static const intel_limit_t intel_limits_i9xx_lvds = {
- .dot = { .min = 20000, .max = 400000 },
- .vco = { .min = 1400000, .max = 2800000 },
- .n = { .min = 1, .max = 6 },
- .m = { .min = 70, .max = 120 },
- .m1 = { .min = 10, .max = 22 },
- .m2 = { .min = 5, .max = 9 },
- .p = { .min = 7, .max = 98 },
- .p1 = { .min = 1, .max = 8 },
+ .dot = { .min = 20000, .max = 400000 },
+ .vco = { .min = 1400000, .max = 2800000 },
+ .n = { .min = 1, .max = 6 },
+ .m = { .min = 70, .max = 120 },
+ .m1 = { .min = 10, .max = 22 },
+ .m2 = { .min = 5, .max = 9 },
+ .p = { .min = 7, .max = 98 },
+ .p1 = { .min = 1, .max = 8 },
.p2 = { .dot_limit = 112000,
.p2_slow = 14, .p2_fast = 7 },
.find_pll = intel_find_best_PLL,
@@ -227,44 +221,44 @@ static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
};
static const intel_limit_t intel_limits_g4x_display_port = {
- .dot = { .min = 161670, .max = 227000 },
- .vco = { .min = 1750000, .max = 3500000},
- .n = { .min = 1, .max = 2 },
- .m = { .min = 97, .max = 108 },
- .m1 = { .min = 0x10, .max = 0x12 },
- .m2 = { .min = 0x05, .max = 0x06 },
- .p = { .min = 10, .max = 20 },
- .p1 = { .min = 1, .max = 2},
- .p2 = { .dot_limit = 0,
+ .dot = { .min = 161670, .max = 227000 },
+ .vco = { .min = 1750000, .max = 3500000},
+ .n = { .min = 1, .max = 2 },
+ .m = { .min = 97, .max = 108 },
+ .m1 = { .min = 0x10, .max = 0x12 },
+ .m2 = { .min = 0x05, .max = 0x06 },
+ .p = { .min = 10, .max = 20 },
+ .p1 = { .min = 1, .max = 2},
+ .p2 = { .dot_limit = 0,
.p2_slow = 10, .p2_fast = 10 },
- .find_pll = intel_find_pll_g4x_dp,
+ .find_pll = intel_find_pll_g4x_dp,
};
static const intel_limit_t intel_limits_pineview_sdvo = {
- .dot = { .min = 20000, .max = 400000},
- .vco = { .min = 1700000, .max = 3500000 },
+ .dot = { .min = 20000, .max = 400000},
+ .vco = { .min = 1700000, .max = 3500000 },
/* Pineview's Ncounter is a ring counter */
- .n = { .min = 3, .max = 6 },
- .m = { .min = 2, .max = 256 },
+ .n = { .min = 3, .max = 6 },
+ .m = { .min = 2, .max = 256 },
/* Pineview only has one combined m divider, which we treat as m2. */
- .m1 = { .min = 0, .max = 0 },
- .m2 = { .min = 0, .max = 254 },
- .p = { .min = 5, .max = 80 },
- .p1 = { .min = 1, .max = 8 },
+ .m1 = { .min = 0, .max = 0 },
+ .m2 = { .min = 0, .max = 254 },
+ .p = { .min = 5, .max = 80 },
+ .p1 = { .min = 1, .max = 8 },
.p2 = { .dot_limit = 200000,
.p2_slow = 10, .p2_fast = 5 },
.find_pll = intel_find_best_PLL,
};
static const intel_limit_t intel_limits_pineview_lvds = {
- .dot = { .min = 20000, .max = 400000 },
- .vco = { .min = 1700000, .max = 3500000 },
- .n = { .min = 3, .max = 6 },
- .m = { .min = 2, .max = 256 },
- .m1 = { .min = 0, .max = 0 },
- .m2 = { .min = 0, .max = 254 },
- .p = { .min = 7, .max = 112 },
- .p1 = { .min = 1, .max = 8 },
+ .dot = { .min = 20000, .max = 400000 },
+ .vco = { .min = 1700000, .max = 3500000 },
+ .n = { .min = 3, .max = 6 },
+ .m = { .min = 2, .max = 256 },
+ .m1 = { .min = 0, .max = 0 },
+ .m2 = { .min = 0, .max = 254 },
+ .p = { .min = 7, .max = 112 },
+ .p1 = { .min = 1, .max = 8 },
.p2 = { .dot_limit = 112000,
.p2_slow = 14, .p2_fast = 14 },
.find_pll = intel_find_best_PLL,
@@ -326,7 +320,7 @@ static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
.m1 = { .min = 12, .max = 22 },
.m2 = { .min = 5, .max = 9 },
.p = { .min = 28, .max = 112 },
- .p1 = { .min = 2, .max = 8 },
+ .p1 = { .min = 2,.max = 8 },
.p2 = { .dot_limit = 225000,
.p2_slow = 14, .p2_fast = 14 },
.find_pll = intel_g4x_find_best_PLL,
@@ -340,24 +334,24 @@ static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
.m1 = { .min = 12, .max = 22 },
.m2 = { .min = 5, .max = 9 },
.p = { .min = 14, .max = 42 },
- .p1 = { .min = 2, .max = 6 },
+ .p1 = { .min = 2,.max = 6 },
.p2 = { .dot_limit = 225000,
.p2_slow = 7, .p2_fast = 7 },
.find_pll = intel_g4x_find_best_PLL,
};
static const intel_limit_t intel_limits_ironlake_display_port = {
- .dot = { .min = 25000, .max = 350000 },
- .vco = { .min = 1760000, .max = 3510000},
- .n = { .min = 1, .max = 2 },
- .m = { .min = 81, .max = 90 },
- .m1 = { .min = 12, .max = 22 },
- .m2 = { .min = 5, .max = 9 },
- .p = { .min = 10, .max = 20 },
- .p1 = { .min = 1, .max = 2},
- .p2 = { .dot_limit = 0,
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 1760000, .max = 3510000},
+ .n = { .min = 1, .max = 2 },
+ .m = { .min = 81, .max = 90 },
+ .m1 = { .min = 12, .max = 22 },
+ .m2 = { .min = 5, .max = 9 },
+ .p = { .min = 10, .max = 20 },
+ .p1 = { .min = 1, .max = 2},
+ .p2 = { .dot_limit = 0,
.p2_slow = 10, .p2_fast = 10 },
- .find_pll = intel_find_pll_ironlake_dp,
+ .find_pll = intel_find_pll_ironlake_dp,
};
static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
@@ -409,7 +403,7 @@ static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
limit = &intel_limits_g4x_hdmi;
} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
limit = &intel_limits_g4x_sdvo;
- } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
+ } else if (intel_pipe_has_type (crtc, INTEL_OUTPUT_DISPLAYPORT)) {
limit = &intel_limits_g4x_display_port;
} else /* The option is for other outputs */
limit = &intel_limits_i9xx_sdvo;
@@ -493,34 +487,33 @@ static bool intel_PLL_is_valid(struct drm_device *dev,
const intel_clock_t *clock)
{
if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
- INTELPllInvalid("p1 out of range\n");
+ INTELPllInvalid ("p1 out of range\n");
if (clock->p < limit->p.min || limit->p.max < clock->p)
- INTELPllInvalid("p out of range\n");
+ INTELPllInvalid ("p out of range\n");
if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
- INTELPllInvalid("m2 out of range\n");
+ INTELPllInvalid ("m2 out of range\n");
if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
- INTELPllInvalid("m1 out of range\n");
+ INTELPllInvalid ("m1 out of range\n");
if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev))
- INTELPllInvalid("m1 <= m2\n");
+ INTELPllInvalid ("m1 <= m2\n");
if (clock->m < limit->m.min || limit->m.max < clock->m)
- INTELPllInvalid("m out of range\n");
+ INTELPllInvalid ("m out of range\n");
if (clock->n < limit->n.min || limit->n.max < clock->n)
- INTELPllInvalid("n out of range\n");
+ INTELPllInvalid ("n out of range\n");
if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
- INTELPllInvalid("vco out of range\n");
+ INTELPllInvalid ("vco out of range\n");
/* XXX: We may need to be checking "Dot clock" depending on the multiplier,
* connector, etc., rather than just a single range.
*/
if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
- INTELPllInvalid("dot out of range\n");
+ INTELPllInvalid ("dot out of range\n");
return true;
}
static bool
intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
- int target, int refclk, intel_clock_t *match_clock,
- intel_clock_t *best_clock)
+ int target, int refclk, intel_clock_t *best_clock)
{
struct drm_device *dev = crtc->dev;
@@ -548,7 +541,7 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
clock.p2 = limit->p2.p2_fast;
}
- memset(best_clock, 0, sizeof(*best_clock));
+ memset (best_clock, 0, sizeof (*best_clock));
for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
clock.m1++) {
@@ -567,9 +560,6 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
if (!intel_PLL_is_valid(dev, limit,
&clock))
continue;
- if (match_clock &&
- clock.p != match_clock->p)
- continue;
this_err = abs(clock.dot - target);
if (this_err < err) {
@@ -586,8 +576,7 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
static bool
intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
- int target, int refclk, intel_clock_t *match_clock,
- intel_clock_t *best_clock)
+ int target, int refclk, intel_clock_t *best_clock)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -634,9 +623,6 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
if (!intel_PLL_is_valid(dev, limit,
&clock))
continue;
- if (match_clock &&
- clock.p != match_clock->p)
- continue;
this_err = abs(clock.dot - target);
if (this_err < err_most) {
@@ -654,8 +640,7 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
static bool
intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
- int target, int refclk, intel_clock_t *match_clock,
- intel_clock_t *best_clock)
+ int target, int refclk, intel_clock_t *best_clock)
{
struct drm_device *dev = crtc->dev;
intel_clock_t clock;
@@ -681,8 +666,7 @@ intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
/* DisplayPort has only two frequencies, 162MHz and 270MHz */
static bool
intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
- int target, int refclk, intel_clock_t *match_clock,
- intel_clock_t *best_clock)
+ int target, int refclk, intel_clock_t *best_clock)
{
intel_clock_t clock;
if (target < 200000) {
@@ -817,19 +801,6 @@ static void assert_pch_pll(struct drm_i915_private *dev_priv,
u32 val;
bool cur_state;
- if (HAS_PCH_CPT(dev_priv->dev)) {
- u32 pch_dpll;
-
- pch_dpll = I915_READ(PCH_DPLL_SEL);
-
- /* Make sure the selected PLL is enabled to the transcoder */
- WARN(!((pch_dpll >> (4 * pipe)) & 8),
- "transcoder %d PLL not enabled\n", pipe);
-
- /* Convert the transcoder pipe number to a pll pipe number */
- pipe = (pch_dpll >> (4 * pipe)) & 1;
- }
-
reg = PCH_DPLL(pipe);
val = I915_READ(reg);
cur_state = !!(val & DPLL_VCO_ENABLE);
@@ -906,7 +877,7 @@ static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
int pp_reg, lvds_reg;
u32 val;
enum pipe panel_pipe = PIPE_A;
- bool locked = true;
+ bool locked = locked;
if (HAS_PCH_SPLIT(dev_priv->dev)) {
pp_reg = PCH_PP_CONTROL;
@@ -929,17 +900,13 @@ static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
pipe_name(pipe));
}
-void assert_pipe(struct drm_i915_private *dev_priv,
- enum pipe pipe, bool state)
+static void assert_pipe(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool state)
{
int reg;
u32 val;
bool cur_state;
- /* if we need the pipe A quirk it must be always on */
- if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
- state = true;
-
reg = PIPECONF(pipe);
val = I915_READ(reg);
cur_state = !!(val & PIPECONF_ENABLE);
@@ -947,25 +914,22 @@ void assert_pipe(struct drm_i915_private *dev_priv,
"pipe %c assertion failure (expected %s, current %s)\n",
pipe_name(pipe), state_string(state), state_string(cur_state));
}
+#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
+#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
-static void assert_plane(struct drm_i915_private *dev_priv,
- enum plane plane, bool state)
+static void assert_plane_enabled(struct drm_i915_private *dev_priv,
+ enum plane plane)
{
int reg;
u32 val;
- bool cur_state;
reg = DSPCNTR(plane);
val = I915_READ(reg);
- cur_state = !!(val & DISPLAY_PLANE_ENABLE);
- WARN(cur_state != state,
- "plane %c assertion failure (expected %s, current %s)\n",
- plane_name(plane), state_string(state), state_string(cur_state));
+ WARN(!(val & DISPLAY_PLANE_ENABLE),
+ "plane %c assertion failure, should be active but is disabled\n",
+ plane_name(plane));
}
-#define assert_plane_enabled(d, p) assert_plane(d, p, true)
-#define assert_plane_disabled(d, p) assert_plane(d, p, false)
-
static void assert_planes_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
@@ -974,14 +938,8 @@ static void assert_planes_disabled(struct drm_i915_private *dev_priv,
int cur_pipe;
/* Planes are fixed to pipes on ILK+ */
- if (HAS_PCH_SPLIT(dev_priv->dev)) {
- reg = DSPCNTR(pipe);
- val = I915_READ(reg);
- WARN((val & DISPLAY_PLANE_ENABLE),
- "plane %c assertion failure, should be disabled but not\n",
- plane_name(pipe));
+ if (HAS_PCH_SPLIT(dev_priv->dev))
return;
- }
/* Need to check both planes against the pipe */
for (i = 0; i < 2; i++) {
@@ -1021,76 +979,11 @@ static void assert_transcoder_disabled(struct drm_i915_private *dev_priv,
pipe_name(pipe));
}
-static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
- enum pipe pipe, u32 port_sel, u32 val)
-{
- if ((val & DP_PORT_EN) == 0)
- return false;
-
- if (HAS_PCH_CPT(dev_priv->dev)) {
- u32 trans_dp_ctl_reg = TRANS_DP_CTL(pipe);
- u32 trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
- if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
- return false;
- } else {
- if ((val & DP_PIPE_MASK) != (pipe << 30))
- return false;
- }
- return true;
-}
-
-static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
- enum pipe pipe, u32 val)
-{
- if ((val & PORT_ENABLE) == 0)
- return false;
-
- if (HAS_PCH_CPT(dev_priv->dev)) {
- if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
- return false;
- } else {
- if ((val & TRANSCODER_MASK) != TRANSCODER(pipe))
- return false;
- }
- return true;
-}
-
-static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
- enum pipe pipe, u32 val)
-{
- if ((val & LVDS_PORT_EN) == 0)
- return false;
-
- if (HAS_PCH_CPT(dev_priv->dev)) {
- if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
- return false;
- } else {
- if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
- return false;
- }
- return true;
-}
-
-static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
- enum pipe pipe, u32 val)
-{
- if ((val & ADPA_DAC_ENABLE) == 0)
- return false;
- if (HAS_PCH_CPT(dev_priv->dev)) {
- if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
- return false;
- } else {
- if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
- return false;
- }
- return true;
-}
-
static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
- enum pipe pipe, int reg, u32 port_sel)
+ enum pipe pipe, int reg)
{
u32 val = I915_READ(reg);
- WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
+ WARN(DP_PIPE_ENABLED(val, pipe),
"PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
reg, pipe_name(pipe));
}
@@ -1099,8 +992,8 @@ static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe, int reg)
{
u32 val = I915_READ(reg);
- WARN(hdmi_pipe_enabled(dev_priv, val, pipe),
- "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
+ WARN(HDMI_PIPE_ENABLED(val, pipe),
+ "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
reg, pipe_name(pipe));
}
@@ -1110,19 +1003,19 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
int reg;
u32 val;
- assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
- assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
- assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
+ assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B);
+ assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C);
+ assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D);
reg = PCH_ADPA;
val = I915_READ(reg);
- WARN(adpa_pipe_enabled(dev_priv, val, pipe),
+ WARN(ADPA_PIPE_ENABLED(val, pipe),
"PCH VGA enabled on transcoder %c, should be disabled\n",
pipe_name(pipe));
reg = PCH_LVDS;
val = I915_READ(reg);
- WARN(lvds_pipe_enabled(dev_priv, val, pipe),
+ WARN(LVDS_PIPE_ENABLED(val, pipe),
"PCH LVDS enabled on transcoder %c, should be disabled\n",
pipe_name(pipe));
@@ -1212,9 +1105,6 @@ static void intel_enable_pch_pll(struct drm_i915_private *dev_priv,
int reg;
u32 val;
- if (pipe > 1)
- return;
-
/* PCH only available on ILK+ */
BUG_ON(dev_priv->info->gen < 5);
@@ -1233,11 +1123,7 @@ static void intel_disable_pch_pll(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
int reg;
- u32 val, pll_mask = TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL,
- pll_sel = TRANSC_DPLL_ENABLE;
-
- if (pipe > 1)
- return;
+ u32 val;
/* PCH only available on ILK+ */
BUG_ON(dev_priv->info->gen < 5);
@@ -1245,15 +1131,6 @@ static void intel_disable_pch_pll(struct drm_i915_private *dev_priv,
/* Make sure transcoder isn't still depending on us */
assert_transcoder_disabled(dev_priv, pipe);
- if (pipe == 0)
- pll_sel |= TRANSC_DPLLA_SEL;
- else if (pipe == 1)
- pll_sel |= TRANSC_DPLLB_SEL;
-
-
- if ((I915_READ(PCH_DPLL_SEL) & pll_mask) == pll_sel)
- return;
-
reg = PCH_DPLL(pipe);
val = I915_READ(reg);
val &= ~DPLL_VCO_ENABLE;
@@ -1266,8 +1143,7 @@ static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
int reg;
- u32 val, pipeconf_val;
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+ u32 val;
/* PCH only available on ILK+ */
BUG_ON(dev_priv->info->gen < 5);
@@ -1281,27 +1157,12 @@ static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
reg = TRANSCONF(pipe);
val = I915_READ(reg);
- pipeconf_val = I915_READ(PIPECONF(pipe));
-
- if (HAS_PCH_IBX(dev_priv->dev)) {
- /*
- * make the BPC in transcoder be consistent with
- * that in pipeconf reg.
- */
- val &= ~PIPE_BPC_MASK;
- val |= pipeconf_val & PIPE_BPC_MASK;
- }
-
- val &= ~TRANS_INTERLACE_MASK;
- if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
- if (HAS_PCH_IBX(dev_priv->dev) &&
- intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO))
- val |= TRANS_LEGACY_INTERLACED_ILK;
- else
- val |= TRANS_INTERLACED;
- else
- val |= TRANS_PROGRESSIVE;
-
+ /*
+ * make the BPC in transcoder be consistent with
+ * that in pipeconf reg.
+ */
+ val &= ~PIPE_BPC_MASK;
+ val |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK;
I915_WRITE(reg, val | TRANS_ENABLE);
if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
DRM_ERROR("failed to enable transcoder %d\n", pipe);
@@ -1326,7 +1187,7 @@ static void intel_disable_transcoder(struct drm_i915_private *dev_priv,
I915_WRITE(reg, val);
/* wait for PCH transcoder off, transcoder state */
if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
- DRM_ERROR("failed to disable transcoder %d\n", pipe);
+ DRM_ERROR("failed to disable transcoder\n");
}
/**
@@ -1411,17 +1272,6 @@ static void intel_disable_pipe(struct drm_i915_private *dev_priv,
intel_wait_for_pipe_off(dev_priv->dev, pipe);
}
-/*
- * Plane regs are double buffered, going from enabled->disabled needs a
- * trigger in order to latch. The display address reg provides this.
- */
-static void intel_flush_display_plane(struct drm_i915_private *dev_priv,
- enum plane plane)
-{
- I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
- I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane)));
-}
-
/**
* intel_enable_plane - enable a display plane on a given pipe
* @dev_priv: i915 private structure
@@ -1445,10 +1295,20 @@ static void intel_enable_plane(struct drm_i915_private *dev_priv,
return;
I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
- intel_flush_display_plane(dev_priv, plane);
intel_wait_for_vblank(dev_priv->dev, pipe);
}
+/*
+ * Plane regs are double buffered, going from enabled->disabled needs a
+ * trigger in order to latch. The display address reg provides this.
+ */
+static void intel_flush_display_plane(struct drm_i915_private *dev_priv,
+ enum plane plane)
+{
+ u32 reg = DSPADDR(plane);
+ I915_WRITE(reg, I915_READ(reg));
+}
+
/**
* intel_disable_plane - disable a display plane
* @dev_priv: i915 private structure
@@ -1474,24 +1334,19 @@ static void intel_disable_plane(struct drm_i915_private *dev_priv,
}
static void disable_pch_dp(struct drm_i915_private *dev_priv,
- enum pipe pipe, int reg, u32 port_sel)
+ enum pipe pipe, int reg)
{
u32 val = I915_READ(reg);
- if (dp_pipe_enabled(dev_priv, pipe, port_sel, val)) {
- DRM_DEBUG_KMS("Disabling pch dp %x on pipe %d\n", reg, pipe);
+ if (DP_PIPE_ENABLED(val, pipe))
I915_WRITE(reg, val & ~DP_PORT_EN);
- }
}
static void disable_pch_hdmi(struct drm_i915_private *dev_priv,
enum pipe pipe, int reg)
{
u32 val = I915_READ(reg);
- if (hdmi_pipe_enabled(dev_priv, val, pipe)) {
- DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n",
- reg, pipe);
+ if (HDMI_PIPE_ENABLED(val, pipe))
I915_WRITE(reg, val & ~PORT_ENABLE);
- }
}
/* Disable any ports connected to this transcoder */
@@ -1503,19 +1358,18 @@ static void intel_disable_pch_ports(struct drm_i915_private *dev_priv,
val = I915_READ(PCH_PP_CONTROL);
I915_WRITE(PCH_PP_CONTROL, val | PANEL_UNLOCK_REGS);
- disable_pch_dp(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
- disable_pch_dp(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
- disable_pch_dp(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
+ disable_pch_dp(dev_priv, pipe, PCH_DP_B);
+ disable_pch_dp(dev_priv, pipe, PCH_DP_C);
+ disable_pch_dp(dev_priv, pipe, PCH_DP_D);
reg = PCH_ADPA;
val = I915_READ(reg);
- if (adpa_pipe_enabled(dev_priv, val, pipe))
+ if (ADPA_PIPE_ENABLED(val, pipe))
I915_WRITE(reg, val & ~ADPA_DAC_ENABLE);
reg = PCH_LVDS;
val = I915_READ(reg);
- if (lvds_pipe_enabled(dev_priv, val, pipe)) {
- DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val);
+ if (LVDS_PIPE_ENABLED(val, pipe)) {
I915_WRITE(reg, val & ~LVDS_PORT_EN);
POSTING_READ(reg);
udelay(100);
@@ -1526,28 +1380,6 @@ static void intel_disable_pch_ports(struct drm_i915_private *dev_priv,
disable_pch_hdmi(dev_priv, pipe, HDMID);
}
-static void i8xx_disable_fbc(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 fbc_ctl;
-
- /* Disable compression */
- fbc_ctl = I915_READ(FBC_CONTROL);
- if ((fbc_ctl & FBC_CTL_EN) == 0)
- return;
-
- fbc_ctl &= ~FBC_CTL_EN;
- I915_WRITE(FBC_CONTROL, fbc_ctl);
-
- /* Wait for compressing bit to clear */
- if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
- DRM_DEBUG_KMS("FBC idle timed out\n");
- return;
- }
-
- DRM_DEBUG_KMS("disabled FBC\n");
-}
-
static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
{
struct drm_device *dev = crtc->dev;
@@ -1556,25 +1388,36 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
struct drm_i915_gem_object *obj = intel_fb->obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int cfb_pitch;
int plane, i;
u32 fbc_ctl, fbc_ctl2;
- cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
- if (fb->pitches[0] < cfb_pitch)
- cfb_pitch = fb->pitches[0];
+ if (fb->pitch == dev_priv->cfb_pitch &&
+ obj->fence_reg == dev_priv->cfb_fence &&
+ intel_crtc->plane == dev_priv->cfb_plane &&
+ I915_READ(FBC_CONTROL) & FBC_CTL_EN)
+ return;
+
+ i8xx_disable_fbc(dev);
+
+ dev_priv->cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
+
+ if (fb->pitch < dev_priv->cfb_pitch)
+ dev_priv->cfb_pitch = fb->pitch;
/* FBC_CTL wants 64B units */
- cfb_pitch = (cfb_pitch / 64) - 1;
- plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
+ dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
+ dev_priv->cfb_fence = obj->fence_reg;
+ dev_priv->cfb_plane = intel_crtc->plane;
+ plane = dev_priv->cfb_plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
/* Clear old tags */
for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
I915_WRITE(FBC_TAG + (i * 4), 0);
/* Set it up... */
- fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
- fbc_ctl2 |= plane;
+ fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | plane;
+ if (obj->tiling_mode != I915_TILING_NONE)
+ fbc_ctl2 |= FBC_CTL_CPU_FENCE;
I915_WRITE(FBC_CONTROL2, fbc_ctl2);
I915_WRITE(FBC_FENCE_OFF, crtc->y);
@@ -1582,13 +1425,36 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
if (IS_I945GM(dev))
fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
- fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
+ fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
- fbc_ctl |= obj->fence_reg;
+ if (obj->tiling_mode != I915_TILING_NONE)
+ fbc_ctl |= dev_priv->cfb_fence;
I915_WRITE(FBC_CONTROL, fbc_ctl);
- DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ",
- cfb_pitch, crtc->y, intel_crtc->plane);
+ DRM_DEBUG_KMS("enabled FBC, pitch %ld, yoff %d, plane %d, ",
+ dev_priv->cfb_pitch, crtc->y, dev_priv->cfb_plane);
+}
+
+void i8xx_disable_fbc(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 fbc_ctl;
+
+ /* Disable compression */
+ fbc_ctl = I915_READ(FBC_CONTROL);
+ if ((fbc_ctl & FBC_CTL_EN) == 0)
+ return;
+
+ fbc_ctl &= ~FBC_CTL_EN;
+ I915_WRITE(FBC_CONTROL, fbc_ctl);
+
+ /* Wait for compressing bit to clear */
+ if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
+ DRM_DEBUG_KMS("FBC idle timed out\n");
+ return;
+ }
+
+ DRM_DEBUG_KMS("disabled FBC\n");
}
static bool i8xx_fbc_enabled(struct drm_device *dev)
@@ -1610,9 +1476,30 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
unsigned long stall_watermark = 200;
u32 dpfc_ctl;
+ dpfc_ctl = I915_READ(DPFC_CONTROL);
+ if (dpfc_ctl & DPFC_CTL_EN) {
+ if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 &&
+ dev_priv->cfb_fence == obj->fence_reg &&
+ dev_priv->cfb_plane == intel_crtc->plane &&
+ dev_priv->cfb_y == crtc->y)
+ return;
+
+ I915_WRITE(DPFC_CONTROL, dpfc_ctl & ~DPFC_CTL_EN);
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+ }
+
+ dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
+ dev_priv->cfb_fence = obj->fence_reg;
+ dev_priv->cfb_plane = intel_crtc->plane;
+ dev_priv->cfb_y = crtc->y;
+
dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
- dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
- I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
+ if (obj->tiling_mode != I915_TILING_NONE) {
+ dpfc_ctl |= DPFC_CTL_FENCE_EN | dev_priv->cfb_fence;
+ I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
+ } else {
+ I915_WRITE(DPFC_CHICKEN, ~DPFC_HT_MODIFY);
+ }
I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
(stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
@@ -1625,7 +1512,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
}
-static void g4x_disable_fbc(struct drm_device *dev)
+void g4x_disable_fbc(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpfc_ctl;
@@ -1680,12 +1567,32 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
u32 dpfc_ctl;
dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
+ if (dpfc_ctl & DPFC_CTL_EN) {
+ if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 &&
+ dev_priv->cfb_fence == obj->fence_reg &&
+ dev_priv->cfb_plane == intel_crtc->plane &&
+ dev_priv->cfb_offset == obj->gtt_offset &&
+ dev_priv->cfb_y == crtc->y)
+ return;
+
+ I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl & ~DPFC_CTL_EN);
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+ }
+
+ dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
+ dev_priv->cfb_fence = obj->fence_reg;
+ dev_priv->cfb_plane = intel_crtc->plane;
+ dev_priv->cfb_offset = obj->gtt_offset;
+ dev_priv->cfb_y = crtc->y;
+
dpfc_ctl &= DPFC_RESERVED;
dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
- /* Set persistent mode for front-buffer rendering, ala X. */
- dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
- dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg);
- I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
+ if (obj->tiling_mode != I915_TILING_NONE) {
+ dpfc_ctl |= (DPFC_CTL_FENCE_EN | dev_priv->cfb_fence);
+ I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
+ } else {
+ I915_WRITE(ILK_DPFC_CHICKEN, ~DPFC_HT_MODIFY);
+ }
I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
(stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
@@ -1697,7 +1604,7 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
if (IS_GEN6(dev)) {
I915_WRITE(SNB_DPFC_CTL_SA,
- SNB_CPU_FENCE_ENABLE | obj->fence_reg);
+ SNB_CPU_FENCE_ENABLE | dev_priv->cfb_fence);
I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
sandybridge_blit_fbc_update(dev);
}
@@ -1705,7 +1612,7 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
}
-static void ironlake_disable_fbc(struct drm_device *dev)
+void ironlake_disable_fbc(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpfc_ctl;
@@ -1737,109 +1644,24 @@ bool intel_fbc_enabled(struct drm_device *dev)
return dev_priv->display.fbc_enabled(dev);
}
-static void intel_fbc_work_fn(struct work_struct *__work)
-{
- struct intel_fbc_work *work =
- container_of(to_delayed_work(__work),
- struct intel_fbc_work, work);
- struct drm_device *dev = work->crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- mutex_lock(&dev->struct_mutex);
- if (work == dev_priv->fbc_work) {
- /* Double check that we haven't switched fb without cancelling
- * the prior work.
- */
- if (work->crtc->fb == work->fb) {
- dev_priv->display.enable_fbc(work->crtc,
- work->interval);
-
- dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane;
- dev_priv->cfb_fb = work->crtc->fb->base.id;
- dev_priv->cfb_y = work->crtc->y;
- }
-
- dev_priv->fbc_work = NULL;
- }
- mutex_unlock(&dev->struct_mutex);
-
- kfree(work);
-}
-
-static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
-{
- if (dev_priv->fbc_work == NULL)
- return;
-
- DRM_DEBUG_KMS("cancelling pending FBC enable\n");
-
- /* Synchronisation is provided by struct_mutex and checking of
- * dev_priv->fbc_work, so we can perform the cancellation
- * entirely asynchronously.
- */
- if (cancel_delayed_work(&dev_priv->fbc_work->work))
- /* tasklet was killed before being run, clean up */
- kfree(dev_priv->fbc_work);
-
- /* Mark the work as no longer wanted so that if it does
- * wake-up (because the work was already running and waiting
- * for our mutex), it will discover that is no longer
- * necessary to run.
- */
- dev_priv->fbc_work = NULL;
-}
-
-static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
{
- struct intel_fbc_work *work;
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = crtc->dev->dev_private;
if (!dev_priv->display.enable_fbc)
return;
- intel_cancel_fbc_work(dev_priv);
-
- work = kzalloc(sizeof *work, GFP_KERNEL);
- if (work == NULL) {
- dev_priv->display.enable_fbc(crtc, interval);
- return;
- }
-
- work->crtc = crtc;
- work->fb = crtc->fb;
- work->interval = interval;
- INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
-
- dev_priv->fbc_work = work;
-
- DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
-
- /* Delay the actual enabling to let pageflipping cease and the
- * display to settle before starting the compression. Note that
- * this delay also serves a second purpose: it allows for a
- * vblank to pass after disabling the FBC before we attempt
- * to modify the control registers.
- *
- * A more complicated solution would involve tracking vblanks
- * following the termination of the page-flipping sequence
- * and indeed performing the enable as a co-routine and not
- * waiting synchronously upon the vblank.
- */
- schedule_delayed_work(&work->work, msecs_to_jiffies(50));
+ dev_priv->display.enable_fbc(crtc, interval);
}
void intel_disable_fbc(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- intel_cancel_fbc_work(dev_priv);
-
if (!dev_priv->display.disable_fbc)
return;
dev_priv->display.disable_fbc(dev);
- dev_priv->cfb_plane = -1;
}
/**
@@ -1869,7 +1691,6 @@ static void intel_update_fbc(struct drm_device *dev)
struct drm_framebuffer *fb;
struct intel_framebuffer *intel_fb;
struct drm_i915_gem_object *obj;
- int enable_fbc;
DRM_DEBUG_KMS("\n");
@@ -1910,15 +1731,8 @@ static void intel_update_fbc(struct drm_device *dev)
intel_fb = to_intel_framebuffer(fb);
obj = intel_fb->obj;
- enable_fbc = i915_enable_fbc;
- if (enable_fbc < 0) {
- DRM_DEBUG_KMS("fbc set to per-chip default\n");
- enable_fbc = 1;
- if (INTEL_INFO(dev)->gen <= 6)
- enable_fbc = 0;
- }
- if (!enable_fbc) {
- DRM_DEBUG_KMS("fbc disabled per module param\n");
+ if (!i915_enable_fbc) {
+ DRM_DEBUG_KMS("fbc disabled per module param (default off)\n");
dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
goto out_disable;
}
@@ -1946,13 +1760,8 @@ static void intel_update_fbc(struct drm_device *dev)
dev_priv->no_fbc_reason = FBC_BAD_PLANE;
goto out_disable;
}
-
- /* The use of a CPU fence is mandatory in order to detect writes
- * by the CPU to the scanout and trigger updates to the FBC.
- */
- if (obj->tiling_mode != I915_TILING_X ||
- obj->fence_reg == I915_FENCE_REG_NONE) {
- DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
+ if (obj->tiling_mode != I915_TILING_X) {
+ DRM_DEBUG_KMS("framebuffer not tiled, disabling compression\n");
dev_priv->no_fbc_reason = FBC_NOT_TILED;
goto out_disable;
}
@@ -1961,44 +1770,6 @@ static void intel_update_fbc(struct drm_device *dev)
if (in_dbg_master())
goto out_disable;
- /* If the scanout has not changed, don't modify the FBC settings.
- * Note that we make the fundamental assumption that the fb->obj
- * cannot be unpinned (and have its GTT offset and fence revoked)
- * without first being decoupled from the scanout and FBC disabled.
- */
- if (dev_priv->cfb_plane == intel_crtc->plane &&
- dev_priv->cfb_fb == fb->base.id &&
- dev_priv->cfb_y == crtc->y)
- return;
-
- if (intel_fbc_enabled(dev)) {
- /* We update FBC along two paths, after changing fb/crtc
- * configuration (modeswitching) and after page-flipping
- * finishes. For the latter, we know that not only did
- * we disable the FBC at the start of the page-flip
- * sequence, but also more than one vblank has passed.
- *
- * For the former case of modeswitching, it is possible
- * to switch between two FBC valid configurations
- * instantaneously so we do need to disable the FBC
- * before we can modify its control registers. We also
- * have to wait for the next vblank for that to take
- * effect. However, since we delay enabling FBC we can
- * assume that a vblank has passed since disabling and
- * that we can safely alter the registers in the deferred
- * callback.
- *
- * In the scenario that we go from a valid to invalid
- * and then back to valid FBC configuration we have
- * no strict enforcement that a vblank occurred since
- * disabling the FBC. However, along all current pipe
- * disabling paths we do need to wait for a vblank at
- * some point. And we wait before enabling FBC anyway.
- */
- DRM_DEBUG_KMS("disabling active FBC for update\n");
- intel_disable_fbc(dev);
- }
-
intel_enable_fbc(crtc, 500);
return;
@@ -2041,10 +1812,14 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
}
dev_priv->mm.interruptible = false;
- ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
+ ret = i915_gem_object_pin(obj, alignment, true);
if (ret)
goto err_interruptible;
+ ret = i915_gem_object_set_to_display_plane(obj, pipelined);
+ if (ret)
+ goto err_unpin;
+
/* Install a fence for tiled scan-out. Pre-i965 always needs a
* fence, whereas 965+ only requires a fence if using
* framebuffer compression. For simplicity, we always install
@@ -2054,8 +1829,6 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
ret = i915_gem_object_get_fence(obj, pipelined);
if (ret)
goto err_unpin;
-
- i915_gem_object_pin_fence(obj);
}
dev_priv->mm.interruptible = true;
@@ -2068,14 +1841,10 @@ err_interruptible:
return ret;
}
-void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
-{
- i915_gem_object_unpin_fence(obj);
- i915_gem_object_unpin(obj);
-}
-
-static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
- int x, int y)
+/* Assume fb object is pinned & idle & fenced and just update base pointers */
+static int
+intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ int x, int y, enum mode_set_atomic state)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2118,7 +1887,7 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
break;
default:
- DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
+ DRM_ERROR("Unknown color depth\n");
return -EINVAL;
}
if (INTEL_INFO(dev)->gen >= 4) {
@@ -2128,14 +1897,18 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
dspcntr &= ~DISPPLANE_TILED;
}
+ if (HAS_PCH_SPLIT(dev))
+ /* must disable */
+ dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
+
I915_WRITE(reg, dspcntr);
Start = obj->gtt_offset;
- Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
+ Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
- Start, Offset, x, y, fb->pitches[0]);
- I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
+ Start, Offset, x, y, fb->pitch);
+ I915_WRITE(DSPSTRIDE(plane), fb->pitch);
if (INTEL_INFO(dev)->gen >= 4) {
I915_WRITE(DSPSURF(plane), Start);
I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
@@ -2144,100 +1917,6 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
I915_WRITE(DSPADDR(plane), Start + Offset);
POSTING_READ(reg);
- return 0;
-}
-
-static int ironlake_update_plane(struct drm_crtc *crtc,
- struct drm_framebuffer *fb, int x, int y)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_framebuffer *intel_fb;
- struct drm_i915_gem_object *obj;
- int plane = intel_crtc->plane;
- unsigned long Start, Offset;
- u32 dspcntr;
- u32 reg;
-
- switch (plane) {
- case 0:
- case 1:
- case 2:
- break;
- default:
- DRM_ERROR("Can't update plane %d in SAREA\n", plane);
- return -EINVAL;
- }
-
- intel_fb = to_intel_framebuffer(fb);
- obj = intel_fb->obj;
-
- reg = DSPCNTR(plane);
- dspcntr = I915_READ(reg);
- /* Mask out pixel format bits in case we change it */
- dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
- switch (fb->bits_per_pixel) {
- case 8:
- dspcntr |= DISPPLANE_8BPP;
- break;
- case 16:
- if (fb->depth != 16)
- return -EINVAL;
-
- dspcntr |= DISPPLANE_16BPP;
- break;
- case 24:
- case 32:
- if (fb->depth == 24)
- dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
- else if (fb->depth == 30)
- dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA;
- else
- return -EINVAL;
- break;
- default:
- DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
- return -EINVAL;
- }
-
- if (obj->tiling_mode != I915_TILING_NONE)
- dspcntr |= DISPPLANE_TILED;
- else
- dspcntr &= ~DISPPLANE_TILED;
-
- /* must disable */
- dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
-
- I915_WRITE(reg, dspcntr);
-
- Start = obj->gtt_offset;
- Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
-
- DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
- Start, Offset, x, y, fb->pitches[0]);
- I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
- I915_WRITE(DSPSURF(plane), Start);
- I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
- I915_WRITE(DSPADDR(plane), Offset);
- POSTING_READ(reg);
-
- return 0;
-}
-
-/* Assume fb object is pinned & idle & fenced and just update base pointers */
-static int
-intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
- int x, int y, enum mode_set_atomic state)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- int ret;
-
- ret = dev_priv->display.update_plane(crtc, fb, x, y);
- if (ret)
- return ret;
-
intel_update_fbc(dev);
intel_increase_pllclock(crtc);
@@ -2255,7 +1934,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
/* no fb bound */
if (!crtc->fb) {
- DRM_ERROR("No FB bound\n");
+ DRM_DEBUG_KMS("No FB bound\n");
return 0;
}
@@ -2263,12 +1942,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
case 0:
case 1:
break;
- case 2:
- if (IS_IVYBRIDGE(dev))
- break;
- /* fall through otherwise */
default:
- DRM_ERROR("no plane for crtc\n");
return -EINVAL;
}
@@ -2278,7 +1952,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
NULL);
if (ret != 0) {
mutex_unlock(&dev->struct_mutex);
- DRM_ERROR("pin & fence failed\n");
return ret;
}
@@ -2298,22 +1971,21 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
* This should only fail upon a hung GPU, in which case we
* can safely continue.
*/
- ret = i915_gem_object_finish_gpu(obj);
+ ret = i915_gem_object_flush_gpu(obj);
(void) ret;
}
ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
LEAVE_ATOMIC_MODE_SET);
if (ret) {
- intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
+ i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
mutex_unlock(&dev->struct_mutex);
- DRM_ERROR("failed to update base address\n");
return ret;
}
if (old_fb) {
intel_wait_for_vblank(dev, intel_crtc->pipe);
- intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
+ i915_gem_object_unpin(to_intel_framebuffer(old_fb)->obj);
}
mutex_unlock(&dev->struct_mutex);
@@ -2414,18 +2086,6 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc)
FDI_FE_ERRC_ENABLE);
}
-static void cpt_phase_pointer_enable(struct drm_device *dev, int pipe)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 flags = I915_READ(SOUTH_CHICKEN1);
-
- flags |= FDI_PHASE_SYNC_OVR(pipe);
- I915_WRITE(SOUTH_CHICKEN1, flags); /* once to unlock... */
- flags |= FDI_PHASE_SYNC_EN(pipe);
- I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to enable */
- POSTING_READ(SOUTH_CHICKEN1);
-}
-
/* The FDI link training functions for ILK/Ibexpeak. */
static void ironlake_fdi_link_train(struct drm_crtc *crtc)
{
@@ -2523,7 +2183,7 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
}
-static const int snb_b_fdi_train_param[] = {
+static const int snb_b_fdi_train_param [] = {
FDI_LINK_TRAIN_400MV_0DB_SNB_B,
FDI_LINK_TRAIN_400MV_6DB_SNB_B,
FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
@@ -2576,10 +2236,7 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
POSTING_READ(reg);
udelay(150);
- if (HAS_PCH_CPT(dev))
- cpt_phase_pointer_enable(dev, pipe);
-
- for (i = 0; i < 4; i++) {
+ for (i = 0; i < 4; i++ ) {
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
@@ -2628,7 +2285,7 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
POSTING_READ(reg);
udelay(150);
- for (i = 0; i < 4; i++) {
+ for (i = 0; i < 4; i++ ) {
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
@@ -2697,10 +2354,7 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
POSTING_READ(reg);
udelay(150);
- if (HAS_PCH_CPT(dev))
- cpt_phase_pointer_enable(dev, pipe);
-
- for (i = 0; i < 4; i++) {
+ for (i = 0; i < 4; i++ ) {
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
@@ -2742,7 +2396,7 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
POSTING_READ(reg);
udelay(150);
- for (i = 0; i < 4; i++) {
+ for (i = 0; i < 4; i++ ) {
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
@@ -2809,17 +2463,6 @@ static void ironlake_fdi_pll_enable(struct drm_crtc *crtc)
}
}
-static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 flags = I915_READ(SOUTH_CHICKEN1);
-
- flags &= ~(FDI_PHASE_SYNC_EN(pipe));
- I915_WRITE(SOUTH_CHICKEN1, flags); /* once to disable... */
- flags &= ~(FDI_PHASE_SYNC_OVR(pipe));
- I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to lock */
- POSTING_READ(SOUTH_CHICKEN1);
-}
static void ironlake_fdi_disable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -2849,8 +2492,6 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
I915_WRITE(FDI_RX_CHICKEN(pipe),
I915_READ(FDI_RX_CHICKEN(pipe) &
~FDI_RX_PHASE_SYNC_POINTER_EN));
- } else if (HAS_PCH_CPT(dev)) {
- cpt_phase_pointer_disable(dev, pipe);
}
/* still set train pattern 1 */
@@ -2951,7 +2592,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
- u32 reg, temp, transc_sel;
+ u32 reg, temp;
/* For PCH output, training FDI link */
dev_priv->display.fdi_link_train(crtc);
@@ -2959,21 +2600,12 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
intel_enable_pch_pll(dev_priv, pipe);
if (HAS_PCH_CPT(dev)) {
- transc_sel = intel_crtc->use_pll_a ? TRANSC_DPLLA_SEL :
- TRANSC_DPLLB_SEL;
-
/* Be sure PCH DPLL SEL is set */
temp = I915_READ(PCH_DPLL_SEL);
- if (pipe == 0) {
- temp &= ~(TRANSA_DPLLB_SEL);
+ if (pipe == 0 && (temp & TRANSA_DPLL_ENABLE) == 0)
temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
- } else if (pipe == 1) {
- temp &= ~(TRANSB_DPLLB_SEL);
+ else if (pipe == 1 && (temp & TRANSB_DPLL_ENABLE) == 0)
temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
- } else if (pipe == 2) {
- temp &= ~(TRANSC_DPLLB_SEL);
- temp |= (TRANSC_DPLL_ENABLE | transc_sel);
- }
I915_WRITE(PCH_DPLL_SEL, temp);
}
@@ -2986,15 +2618,12 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe)));
I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe)));
I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe)));
- I915_WRITE(TRANS_VSYNCSHIFT(pipe), I915_READ(VSYNCSHIFT(pipe)));
intel_fdi_normal_train(crtc);
/* For PCH DP, enable TRANS_DP_CTL */
if (HAS_PCH_CPT(dev) &&
- (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
- intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
- u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5;
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
reg = TRANS_DP_CTL(pipe);
temp = I915_READ(reg);
temp &= ~(TRANS_DP_PORT_SEL_MASK |
@@ -3002,7 +2631,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
TRANS_DP_BPC_MASK);
temp |= (TRANS_DP_OUTPUT_ENABLE |
TRANS_DP_ENH_FRAMING);
- temp |= bpc << 9; /* same format but at 11:9 */
+ temp |= TRANS_DP_8BPC;
if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
@@ -3031,24 +2660,6 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
intel_enable_transcoder(dev_priv, pipe);
}
-void intel_cpt_verify_modeset(struct drm_device *dev, int pipe)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int dslreg = PIPEDSL(pipe), tc2reg = TRANS_CHICKEN2(pipe);
- u32 temp;
-
- temp = I915_READ(dslreg);
- udelay(500);
- if (wait_for(I915_READ(dslreg) != temp, 5)) {
- /* Without this, mode sets may fail silently on FDI */
- I915_WRITE(tc2reg, TRANS_AUTOTRAIN_GEN_STALL_DIS);
- udelay(250);
- I915_WRITE(tc2reg, 0);
- if (wait_for(I915_READ(dslreg) != temp, 5))
- DRM_ERROR("mode set failed: pipe %d stuck\n", pipe);
- }
-}
-
static void ironlake_crtc_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -3131,8 +2742,9 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
intel_disable_plane(dev_priv, plane, pipe);
- if (dev_priv->cfb_plane == plane)
- intel_disable_fbc(dev);
+ if (dev_priv->cfb_plane == plane &&
+ dev_priv->display.disable_fbc)
+ dev_priv->display.disable_fbc(dev);
intel_disable_pipe(dev_priv, pipe);
@@ -3163,13 +2775,13 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
temp = I915_READ(PCH_DPLL_SEL);
switch (pipe) {
case 0:
- temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL);
+ temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
break;
case 1:
temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
break;
case 2:
- /* C shares PLL A or B */
+ /* FIXME: manage transcoder PLLs? */
temp &= ~(TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL);
break;
default:
@@ -3179,8 +2791,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
}
/* disable PCH DPLL */
- if (!intel_crtc->no_pll)
- intel_disable_pch_pll(dev_priv, pipe);
+ intel_disable_pch_pll(dev_priv, pipe);
/* Switch from PCDclk to Rawclk */
reg = FDI_RX_CTL(pipe);
@@ -3298,8 +2909,9 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
intel_crtc_dpms_overlay(intel_crtc, false);
intel_crtc_update_cursor(crtc, false);
- if (dev_priv->cfb_plane == plane)
- intel_disable_fbc(dev);
+ if (dev_priv->cfb_plane == plane &&
+ dev_priv->display.disable_fbc)
+ dev_priv->display.disable_fbc(dev);
intel_disable_plane(dev_priv, plane, pipe);
intel_disable_pipe(dev_priv, pipe);
@@ -3384,12 +2996,10 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
- assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
- assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
if (crtc->fb) {
mutex_lock(&dev->struct_mutex);
- intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
+ i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
mutex_unlock(&dev->struct_mutex);
}
}
@@ -3422,25 +3032,18 @@ static void ironlake_crtc_commit(struct drm_crtc *crtc)
ironlake_crtc_enable(crtc);
}
-void intel_encoder_prepare(struct drm_encoder *encoder)
+void intel_encoder_prepare (struct drm_encoder *encoder)
{
struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
/* lvds has its own version of prepare see intel_lvds_prepare */
encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
}
-void intel_encoder_commit(struct drm_encoder *encoder)
+void intel_encoder_commit (struct drm_encoder *encoder)
{
struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
- struct drm_device *dev = encoder->dev;
- struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
- struct intel_crtc *intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
-
/* lvds has its own version of commit see intel_lvds_commit */
encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
-
- if (HAS_PCH_CPT(dev))
- intel_cpt_verify_modeset(dev, intel_crtc->pipe);
}
void intel_encoder_destroy(struct drm_encoder *encoder)
@@ -3463,8 +3066,11 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
return false;
}
- /* All interlaced capable intel hw wants timings in frames. */
- drm_mode_set_crtcinfo(adjusted_mode, 0);
+ /* XXX some encoders set the crtcinfo, others don't.
+ * Obviously we need some form of conflict resolution here...
+ */
+ if (adjusted_mode->crtc_htotal == 0)
+ drm_mode_set_crtcinfo(adjusted_mode, 0);
return true;
}
@@ -4579,11 +4185,10 @@ static void ironlake_update_wm(struct drm_device *dev)
*/
}
-void sandybridge_update_wm(struct drm_device *dev)
+static void sandybridge_update_wm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
- u32 val;
int fbc_wm, plane_wm, cursor_wm;
unsigned int enabled;
@@ -4592,10 +4197,8 @@ void sandybridge_update_wm(struct drm_device *dev)
&sandybridge_display_wm_info, latency,
&sandybridge_cursor_wm_info, latency,
&plane_wm, &cursor_wm)) {
- val = I915_READ(WM0_PIPEA_ILK);
- val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
- I915_WRITE(WM0_PIPEA_ILK, val |
- ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
+ I915_WRITE(WM0_PIPEA_ILK,
+ (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
" plane %d, " "cursor: %d\n",
plane_wm, cursor_wm);
@@ -4606,32 +4209,14 @@ void sandybridge_update_wm(struct drm_device *dev)
&sandybridge_display_wm_info, latency,
&sandybridge_cursor_wm_info, latency,
&plane_wm, &cursor_wm)) {
- val = I915_READ(WM0_PIPEB_ILK);
- val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
- I915_WRITE(WM0_PIPEB_ILK, val |
- ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
+ I915_WRITE(WM0_PIPEB_ILK,
+ (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
" plane %d, cursor: %d\n",
plane_wm, cursor_wm);
enabled |= 2;
}
- /* IVB has 3 pipes */
- if (IS_IVYBRIDGE(dev) &&
- g4x_compute_wm0(dev, 2,
- &sandybridge_display_wm_info, latency,
- &sandybridge_cursor_wm_info, latency,
- &plane_wm, &cursor_wm)) {
- val = I915_READ(WM0_PIPEC_IVB);
- val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
- I915_WRITE(WM0_PIPEC_IVB, val |
- ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
- DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
- " plane %d, cursor: %d\n",
- plane_wm, cursor_wm);
- enabled |= 3;
- }
-
/*
* Calculate and update the self-refresh watermark only when one
* display plane is used.
@@ -4646,8 +4231,7 @@ void sandybridge_update_wm(struct drm_device *dev)
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
- if (!single_plane_enabled(enabled) ||
- dev_priv->sprite_scaling_enabled)
+ if (!single_plane_enabled(enabled))
return;
enabled = ffs(enabled) - 1;
@@ -4697,161 +4281,6 @@ void sandybridge_update_wm(struct drm_device *dev)
cursor_wm);
}
-static bool
-sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
- uint32_t sprite_width, int pixel_size,
- const struct intel_watermark_params *display,
- int display_latency_ns, int *sprite_wm)
-{
- struct drm_crtc *crtc;
- int clock;
- int entries, tlb_miss;
-
- crtc = intel_get_crtc_for_plane(dev, plane);
- if (crtc->fb == NULL || !crtc->enabled) {
- *sprite_wm = display->guard_size;
- return false;
- }
-
- clock = crtc->mode.clock;
-
- /* Use the small buffer method to calculate the sprite watermark */
- entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
- tlb_miss = display->fifo_size*display->cacheline_size -
- sprite_width * 8;
- if (tlb_miss > 0)
- entries += tlb_miss;
- entries = DIV_ROUND_UP(entries, display->cacheline_size);
- *sprite_wm = entries + display->guard_size;
- if (*sprite_wm > (int)display->max_wm)
- *sprite_wm = display->max_wm;
-
- return true;
-}
-
-static bool
-sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
- uint32_t sprite_width, int pixel_size,
- const struct intel_watermark_params *display,
- int latency_ns, int *sprite_wm)
-{
- struct drm_crtc *crtc;
- unsigned long line_time_us;
- int clock;
- int line_count, line_size;
- int small, large;
- int entries;
-
- if (!latency_ns) {
- *sprite_wm = 0;
- return false;
- }
-
- crtc = intel_get_crtc_for_plane(dev, plane);
- clock = crtc->mode.clock;
- if (!clock) {
- *sprite_wm = 0;
- return false;
- }
-
- line_time_us = (sprite_width * 1000) / clock;
- if (!line_time_us) {
- *sprite_wm = 0;
- return false;
- }
-
- line_count = (latency_ns / line_time_us + 1000) / 1000;
- line_size = sprite_width * pixel_size;
-
- /* Use the minimum of the small and large buffer method for primary */
- small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
- large = line_count * line_size;
-
- entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
- *sprite_wm = entries + display->guard_size;
-
- return *sprite_wm > 0x3ff ? false : true;
-}
-
-static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
- uint32_t sprite_width, int pixel_size)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
- u32 val;
- int sprite_wm, reg;
- int ret;
-
- switch (pipe) {
- case 0:
- reg = WM0_PIPEA_ILK;
- break;
- case 1:
- reg = WM0_PIPEB_ILK;
- break;
- case 2:
- reg = WM0_PIPEC_IVB;
- break;
- default:
- return; /* bad pipe */
- }
-
- ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size,
- &sandybridge_display_wm_info,
- latency, &sprite_wm);
- if (!ret) {
- DRM_DEBUG_KMS("failed to compute sprite wm for pipe %d\n",
- pipe);
- return;
- }
-
- val = I915_READ(reg);
- val &= ~WM0_PIPE_SPRITE_MASK;
- I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
- DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm);
-
-
- ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
- pixel_size,
- &sandybridge_display_srwm_info,
- SNB_READ_WM1_LATENCY() * 500,
- &sprite_wm);
- if (!ret) {
- DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %d\n",
- pipe);
- return;
- }
- I915_WRITE(WM1S_LP_ILK, sprite_wm);
-
- /* Only IVB has two more LP watermarks for sprite */
- if (!IS_IVYBRIDGE(dev))
- return;
-
- ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
- pixel_size,
- &sandybridge_display_srwm_info,
- SNB_READ_WM2_LATENCY() * 500,
- &sprite_wm);
- if (!ret) {
- DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %d\n",
- pipe);
- return;
- }
- I915_WRITE(WM2S_LP_IVB, sprite_wm);
-
- ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
- pixel_size,
- &sandybridge_display_srwm_info,
- SNB_READ_WM3_LATENCY() * 500,
- &sprite_wm);
- if (!ret) {
- DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %d\n",
- pipe);
- return;
- }
- I915_WRITE(WM3S_LP_IVB, sprite_wm);
-}
-
/**
* intel_update_watermarks - update FIFO watermark values based on current modes
*
@@ -4892,239 +4321,12 @@ static void intel_update_watermarks(struct drm_device *dev)
dev_priv->display.update_wm(dev);
}
-void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
- uint32_t sprite_width, int pixel_size)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (dev_priv->display.update_sprite_wm)
- dev_priv->display.update_sprite_wm(dev, pipe, sprite_width,
- pixel_size);
-}
-
static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
{
- if (i915_panel_use_ssc >= 0)
- return i915_panel_use_ssc != 0;
- return dev_priv->lvds_use_ssc
+ return dev_priv->lvds_use_ssc && i915_panel_use_ssc
&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
}
-/**
- * intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send
- * @crtc: CRTC structure
- * @mode: requested mode
- *
- * A pipe may be connected to one or more outputs. Based on the depth of the
- * attached framebuffer, choose a good color depth to use on the pipe.
- *
- * If possible, match the pipe depth to the fb depth. In some cases, this
- * isn't ideal, because the connected output supports a lesser or restricted
- * set of depths. Resolve that here:
- * LVDS typically supports only 6bpc, so clamp down in that case
- * HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc
- * Displays may support a restricted set as well, check EDID and clamp as
- * appropriate.
- * DP may want to dither down to 6bpc to fit larger modes
- *
- * RETURNS:
- * Dithering requirement (i.e. false if display bpc and pipe bpc match,
- * true if they don't match).
- */
-static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
- unsigned int *pipe_bpp,
- struct drm_display_mode *mode)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_encoder *encoder;
- struct drm_connector *connector;
- unsigned int display_bpc = UINT_MAX, bpc;
-
- /* Walk the encoders & connectors on this crtc, get min bpc */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
-
- if (encoder->crtc != crtc)
- continue;
-
- if (intel_encoder->type == INTEL_OUTPUT_LVDS) {
- unsigned int lvds_bpc;
-
- if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) ==
- LVDS_A3_POWER_UP)
- lvds_bpc = 8;
- else
- lvds_bpc = 6;
-
- if (lvds_bpc < display_bpc) {
- DRM_DEBUG_KMS("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc);
- display_bpc = lvds_bpc;
- }
- continue;
- }
-
- if (intel_encoder->type == INTEL_OUTPUT_EDP) {
- /* Use VBT settings if we have an eDP panel */
- unsigned int edp_bpc = dev_priv->edp.bpp / 3;
-
- if (edp_bpc < display_bpc) {
- DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
- display_bpc = edp_bpc;
- }
- continue;
- }
-
- /* Not one of the known troublemakers, check the EDID */
- list_for_each_entry(connector, &dev->mode_config.connector_list,
- head) {
- if (connector->encoder != encoder)
- continue;
-
- /* Don't use an invalid EDID bpc value */
- if (connector->display_info.bpc &&
- connector->display_info.bpc < display_bpc) {
- DRM_DEBUG_KMS("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc);
- display_bpc = connector->display_info.bpc;
- }
- }
-
- /*
- * HDMI is either 12 or 8, so if the display lets 10bpc sneak
- * through, clamp it down. (Note: >12bpc will be caught below.)
- */
- if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
- if (display_bpc > 8 && display_bpc < 12) {
- DRM_DEBUG_KMS("forcing bpc to 12 for HDMI\n");
- display_bpc = 12;
- } else {
- DRM_DEBUG_KMS("forcing bpc to 8 for HDMI\n");
- display_bpc = 8;
- }
- }
- }
-
- if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
- DRM_DEBUG_KMS("Dithering DP to 6bpc\n");
- display_bpc = 6;
- }
-
- /*
- * We could just drive the pipe at the highest bpc all the time and
- * enable dithering as needed, but that costs bandwidth. So choose
- * the minimum value that expresses the full color range of the fb but
- * also stays within the max display bpc discovered above.
- */
-
- switch (crtc->fb->depth) {
- case 8:
- bpc = 8; /* since we go through a colormap */
- break;
- case 15:
- case 16:
- bpc = 6; /* min is 18bpp */
- break;
- case 24:
- bpc = 8;
- break;
- case 30:
- bpc = 10;
- break;
- case 48:
- bpc = 12;
- break;
- default:
- DRM_DEBUG("unsupported depth, assuming 24 bits\n");
- bpc = min((unsigned int)8, display_bpc);
- break;
- }
-
- display_bpc = min(display_bpc, bpc);
-
- DRM_DEBUG_KMS("setting pipe bpc to %d (max display bpc %d)\n",
- bpc, display_bpc);
-
- *pipe_bpp = display_bpc * 3;
-
- return display_bpc != bpc;
-}
-
-static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- int refclk;
-
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
- intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
- refclk = dev_priv->lvds_ssc_freq * 1000;
- DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
- refclk / 1000);
- } else if (!IS_GEN2(dev)) {
- refclk = 96000;
- } else {
- refclk = 48000;
- }
-
- return refclk;
-}
-
-static void i9xx_adjust_sdvo_tv_clock(struct drm_display_mode *adjusted_mode,
- intel_clock_t *clock)
-{
- /* SDVO TV has fixed PLL values depend on its clock range,
- this mirrors vbios setting. */
- if (adjusted_mode->clock >= 100000
- && adjusted_mode->clock < 140500) {
- clock->p1 = 2;
- clock->p2 = 10;
- clock->n = 3;
- clock->m1 = 16;
- clock->m2 = 8;
- } else if (adjusted_mode->clock >= 140500
- && adjusted_mode->clock <= 200000) {
- clock->p1 = 1;
- clock->p2 = 10;
- clock->n = 6;
- clock->m1 = 12;
- clock->m2 = 8;
- }
-}
-
-static void i9xx_update_pll_dividers(struct drm_crtc *crtc,
- intel_clock_t *clock,
- intel_clock_t *reduced_clock)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
- u32 fp, fp2 = 0;
-
- if (IS_PINEVIEW(dev)) {
- fp = (1 << clock->n) << 16 | clock->m1 << 8 | clock->m2;
- if (reduced_clock)
- fp2 = (1 << reduced_clock->n) << 16 |
- reduced_clock->m1 << 8 | reduced_clock->m2;
- } else {
- fp = clock->n << 16 | clock->m1 << 8 | clock->m2;
- if (reduced_clock)
- fp2 = reduced_clock->n << 16 | reduced_clock->m1 << 8 |
- reduced_clock->m2;
- }
-
- I915_WRITE(FP0(pipe), fp);
-
- intel_crtc->lowfreq_avail = false;
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
- reduced_clock && i915_powersave) {
- I915_WRITE(FP1(pipe), fp2);
- intel_crtc->lowfreq_avail = true;
- } else {
- I915_WRITE(FP1(pipe), fp);
- }
-}
-
static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
@@ -5138,7 +4340,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
int plane = intel_crtc->plane;
int refclk, num_connectors = 0;
intel_clock_t clock, reduced_clock;
- u32 dpll, dspcntr, pipeconf, vsyncshift;
+ u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false;
bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
struct drm_mode_config *mode_config = &dev->mode_config;
@@ -5179,7 +4381,15 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
num_connectors++;
}
- refclk = i9xx_get_refclk(crtc, num_connectors);
+ if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
+ refclk = dev_priv->lvds_ssc_freq * 1000;
+ DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
+ refclk / 1000);
+ } else if (!IS_GEN2(dev)) {
+ refclk = 96000;
+ } else {
+ refclk = 48000;
+ }
/*
* Returns a set of divisors for the desired target clock with the given
@@ -5187,8 +4397,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
* reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
*/
limit = intel_limit(crtc, refclk);
- ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
- &clock);
+ ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock);
if (!ok) {
DRM_ERROR("Couldn't find PLL settings for mode!\n");
return -EINVAL;
@@ -5198,24 +4407,53 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
intel_crtc_update_cursor(crtc, true);
if (is_lvds && dev_priv->lvds_downclock_avail) {
- /*
- * Ensure we match the reduced clock's P to the target clock.
- * If the clocks don't match, we can't switch the display clock
- * by using the FP0/FP1. In such case we will disable the LVDS
- * downclock feature.
- */
has_reduced_clock = limit->find_pll(limit, crtc,
dev_priv->lvds_downclock,
refclk,
- &clock,
&reduced_clock);
+ if (has_reduced_clock && (clock.p != reduced_clock.p)) {
+ /*
+ * If the different P is found, it means that we can't
+ * switch the display clock by using the FP0/FP1.
+ * In such case we will disable the LVDS downclock
+ * feature.
+ */
+ DRM_DEBUG_KMS("Different P is found for "
+ "LVDS clock/downclock\n");
+ has_reduced_clock = 0;
+ }
+ }
+ /* SDVO TV has fixed PLL values depend on its clock range,
+ this mirrors vbios setting. */
+ if (is_sdvo && is_tv) {
+ if (adjusted_mode->clock >= 100000
+ && adjusted_mode->clock < 140500) {
+ clock.p1 = 2;
+ clock.p2 = 10;
+ clock.n = 3;
+ clock.m1 = 16;
+ clock.m2 = 8;
+ } else if (adjusted_mode->clock >= 140500
+ && adjusted_mode->clock <= 200000) {
+ clock.p1 = 1;
+ clock.p2 = 10;
+ clock.n = 6;
+ clock.m1 = 12;
+ clock.m2 = 8;
+ }
}
- if (is_sdvo && is_tv)
- i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock);
-
- i9xx_update_pll_dividers(crtc, &clock, has_reduced_clock ?
- &reduced_clock : NULL);
+ if (IS_PINEVIEW(dev)) {
+ fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2;
+ if (has_reduced_clock)
+ fp2 = (1 << reduced_clock.n) << 16 |
+ reduced_clock.m1 << 8 | reduced_clock.m2;
+ } else {
+ fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
+ if (has_reduced_clock)
+ fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
+ reduced_clock.m2;
+ }
dpll = DPLL_VGA_MODE_DIS;
@@ -5289,6 +4527,8 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
/* Set up the display plane register */
dspcntr = DISPPLANE_GAMMA_ENABLE;
+ /* Ironlake's plane is forced to pipe, bit 24 is to
+ enable color space conversion */
if (pipe == 0)
dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
else
@@ -5308,21 +4548,12 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
pipeconf &= ~PIPECONF_DOUBLE_WIDE;
}
- /* default to 8bpc */
- pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN);
- if (is_dp) {
- if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
- pipeconf |= PIPECONF_BPP_6 |
- PIPECONF_DITHER_EN |
- PIPECONF_DITHER_TYPE_SP;
- }
- }
-
dpll |= DPLL_VCO_ENABLE;
DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
drm_mode_debug_printmodeline(mode);
+ I915_WRITE(FP0(pipe), fp);
I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
POSTING_READ(DPLL(pipe));
@@ -5409,32 +4640,33 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
I915_WRITE(DPLL(pipe), dpll);
}
- if (HAS_PIPE_CXSR(dev)) {
- if (intel_crtc->lowfreq_avail) {
+ intel_crtc->lowfreq_avail = false;
+ if (is_lvds && has_reduced_clock && i915_powersave) {
+ I915_WRITE(FP1(pipe), fp2);
+ intel_crtc->lowfreq_avail = true;
+ if (HAS_PIPE_CXSR(dev)) {
DRM_DEBUG_KMS("enabling CxSR downclocking\n");
pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
- } else {
+ }
+ } else {
+ I915_WRITE(FP1(pipe), fp);
+ if (HAS_PIPE_CXSR(dev)) {
DRM_DEBUG_KMS("disabling CxSR downclocking\n");
pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
}
}
- pipeconf &= ~PIPECONF_INTERLACE_MASK;
- if (!IS_GEN2(dev) &&
- adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
/* the chip adds 2 halflines automatically */
+ adjusted_mode->crtc_vdisplay -= 1;
adjusted_mode->crtc_vtotal -= 1;
+ adjusted_mode->crtc_vblank_start -= 1;
adjusted_mode->crtc_vblank_end -= 1;
- vsyncshift = adjusted_mode->crtc_hsync_start
- - adjusted_mode->crtc_htotal/2;
- } else {
- pipeconf |= PIPECONF_PROGRESSIVE;
- vsyncshift = 0;
- }
-
- if (!IS_GEN3(dev))
- I915_WRITE(VSYNCSHIFT(pipe), vsyncshift);
+ adjusted_mode->crtc_vsync_end -= 1;
+ adjusted_mode->crtc_vsync_start -= 1;
+ } else
+ pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */
I915_WRITE(HTOTAL(pipe),
(adjusted_mode->crtc_hdisplay - 1) |
@@ -5483,156 +4715,6 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
return ret;
}
-/*
- * Initialize reference clocks when the driver loads
- */
-void ironlake_init_pch_refclk(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_mode_config *mode_config = &dev->mode_config;
- struct intel_encoder *encoder;
- u32 temp;
- bool has_lvds = false;
- bool has_cpu_edp = false;
- bool has_pch_edp = false;
- bool has_panel = false;
- bool has_ck505 = false;
- bool can_ssc = false;
-
- /* We need to take the global config into account */
- list_for_each_entry(encoder, &mode_config->encoder_list,
- base.head) {
- switch (encoder->type) {
- case INTEL_OUTPUT_LVDS:
- has_panel = true;
- has_lvds = true;
- break;
- case INTEL_OUTPUT_EDP:
- has_panel = true;
- if (intel_encoder_is_pch_edp(&encoder->base))
- has_pch_edp = true;
- else
- has_cpu_edp = true;
- break;
- }
- }
-
- if (HAS_PCH_IBX(dev)) {
- has_ck505 = dev_priv->display_clock_mode;
- can_ssc = has_ck505;
- } else {
- has_ck505 = false;
- can_ssc = true;
- }
-
- DRM_DEBUG_KMS("has_panel %d has_lvds %d has_pch_edp %d has_cpu_edp %d has_ck505 %d\n",
- has_panel, has_lvds, has_pch_edp, has_cpu_edp,
- has_ck505);
-
- /* Ironlake: try to setup display ref clock before DPLL
- * enabling. This is only under driver's control after
- * PCH B stepping, previous chipset stepping should be
- * ignoring this setting.
- */
- temp = I915_READ(PCH_DREF_CONTROL);
- /* Always enable nonspread source */
- temp &= ~DREF_NONSPREAD_SOURCE_MASK;
-
- if (has_ck505)
- temp |= DREF_NONSPREAD_CK505_ENABLE;
- else
- temp |= DREF_NONSPREAD_SOURCE_ENABLE;
-
- if (has_panel) {
- temp &= ~DREF_SSC_SOURCE_MASK;
- temp |= DREF_SSC_SOURCE_ENABLE;
-
- /* SSC must be turned on before enabling the CPU output */
- if (intel_panel_use_ssc(dev_priv) && can_ssc) {
- DRM_DEBUG_KMS("Using SSC on panel\n");
- temp |= DREF_SSC1_ENABLE;
- }
-
- /* Get SSC going before enabling the outputs */
- I915_WRITE(PCH_DREF_CONTROL, temp);
- POSTING_READ(PCH_DREF_CONTROL);
- udelay(200);
-
- temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
-
- /* Enable CPU source on CPU attached eDP */
- if (has_cpu_edp) {
- if (intel_panel_use_ssc(dev_priv) && can_ssc) {
- DRM_DEBUG_KMS("Using SSC on eDP\n");
- temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
- }
- else
- temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
- } else
- temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
-
- I915_WRITE(PCH_DREF_CONTROL, temp);
- POSTING_READ(PCH_DREF_CONTROL);
- udelay(200);
- } else {
- DRM_DEBUG_KMS("Disabling SSC entirely\n");
-
- temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
-
- /* Turn off CPU output */
- temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
-
- I915_WRITE(PCH_DREF_CONTROL, temp);
- POSTING_READ(PCH_DREF_CONTROL);
- udelay(200);
-
- /* Turn off the SSC source */
- temp &= ~DREF_SSC_SOURCE_MASK;
- temp |= DREF_SSC_SOURCE_DISABLE;
-
- /* Turn off SSC1 */
- temp &= ~ DREF_SSC1_ENABLE;
-
- I915_WRITE(PCH_DREF_CONTROL, temp);
- POSTING_READ(PCH_DREF_CONTROL);
- udelay(200);
- }
-}
-
-static int ironlake_get_refclk(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_encoder *encoder;
- struct drm_mode_config *mode_config = &dev->mode_config;
- struct intel_encoder *edp_encoder = NULL;
- int num_connectors = 0;
- bool is_lvds = false;
-
- list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
- if (encoder->base.crtc != crtc)
- continue;
-
- switch (encoder->type) {
- case INTEL_OUTPUT_LVDS:
- is_lvds = true;
- break;
- case INTEL_OUTPUT_EDP:
- edp_encoder = encoder;
- break;
- }
- num_connectors++;
- }
-
- if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
- DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
- dev_priv->lvds_ssc_freq);
- return dev_priv->lvds_ssc_freq * 1000;
- }
-
- return 120000;
-}
-
static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
@@ -5657,9 +4739,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
struct fdi_m_n m_n = {0};
u32 temp;
u32 lvds_sync = 0;
- int target_clock, pixel_multiplier, lane, link_bw, factor;
- unsigned int pipe_bpp;
- bool dither;
+ int target_clock, pixel_multiplier, lane, link_bw, bpp, factor;
list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
if (encoder->base.crtc != crtc)
@@ -5692,7 +4772,16 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
num_connectors++;
}
- refclk = ironlake_get_refclk(crtc);
+ if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
+ refclk = dev_priv->lvds_ssc_freq * 1000;
+ DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
+ refclk / 1000);
+ } else {
+ refclk = 96000;
+ if (!has_edp_encoder ||
+ intel_encoder_is_pch_edp(&has_edp_encoder->base))
+ refclk = 120000; /* 120Mhz refclk */
+ }
/*
* Returns a set of divisors for the desired target clock with the given
@@ -5700,8 +4789,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
* reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
*/
limit = intel_limit(crtc, refclk);
- ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
- &clock);
+ ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock);
if (!ok) {
DRM_ERROR("Couldn't find PLL settings for mode!\n");
return -EINVAL;
@@ -5711,17 +4799,21 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
intel_crtc_update_cursor(crtc, true);
if (is_lvds && dev_priv->lvds_downclock_avail) {
- /*
- * Ensure we match the reduced clock's P to the target clock.
- * If the clocks don't match, we can't switch the display clock
- * by using the FP0/FP1. In such case we will disable the LVDS
- * downclock feature.
- */
has_reduced_clock = limit->find_pll(limit, crtc,
dev_priv->lvds_downclock,
refclk,
- &clock,
&reduced_clock);
+ if (has_reduced_clock && (clock.p != reduced_clock.p)) {
+ /*
+ * If the different P is found, it means that we can't
+ * switch the display clock by using the FP0/FP1.
+ * In such case we will disable the LVDS downclock
+ * feature.
+ */
+ DRM_DEBUG_KMS("Different P is found for "
+ "LVDS clock/downclock\n");
+ has_reduced_clock = 0;
+ }
}
/* SDVO TV has fixed PLL values depend on its clock range,
this mirrors vbios setting. */
@@ -5774,38 +4866,56 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
/* determine panel color depth */
temp = I915_READ(PIPECONF(pipe));
temp &= ~PIPE_BPC_MASK;
- dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, mode);
- switch (pipe_bpp) {
- case 18:
- temp |= PIPE_6BPC;
- break;
- case 24:
+ if (is_lvds) {
+ /* the BPC will be 6 if it is 18-bit LVDS panel */
+ if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP)
+ temp |= PIPE_8BPC;
+ else
+ temp |= PIPE_6BPC;
+ } else if (has_edp_encoder) {
+ switch (dev_priv->edp.bpp/3) {
+ case 8:
+ temp |= PIPE_8BPC;
+ break;
+ case 10:
+ temp |= PIPE_10BPC;
+ break;
+ case 6:
+ temp |= PIPE_6BPC;
+ break;
+ case 12:
+ temp |= PIPE_12BPC;
+ break;
+ }
+ } else
temp |= PIPE_8BPC;
+ I915_WRITE(PIPECONF(pipe), temp);
+
+ switch (temp & PIPE_BPC_MASK) {
+ case PIPE_8BPC:
+ bpp = 24;
break;
- case 30:
- temp |= PIPE_10BPC;
+ case PIPE_10BPC:
+ bpp = 30;
break;
- case 36:
- temp |= PIPE_12BPC;
+ case PIPE_6BPC:
+ bpp = 18;
break;
- default:
- WARN(1, "intel_choose_pipe_bpp returned invalid value %d\n",
- pipe_bpp);
- temp |= PIPE_8BPC;
- pipe_bpp = 24;
+ case PIPE_12BPC:
+ bpp = 36;
break;
+ default:
+ DRM_ERROR("unknown pipe bpc value\n");
+ bpp = 24;
}
- intel_crtc->bpp = pipe_bpp;
- I915_WRITE(PIPECONF(pipe), temp);
-
if (!lane) {
/*
* Account for spread spectrum to avoid
* oversubscribing the link. Max center spread
* is 2.5%; use 5% for safety's sake.
*/
- u32 bps = target_clock * intel_crtc->bpp * 21 / 20;
+ u32 bps = target_clock * bpp * 21 / 20;
lane = bps / (link_bw * 8) + 1;
}
@@ -5813,8 +4923,51 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
if (pixel_multiplier > 1)
link_bw *= pixel_multiplier;
- ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw,
- &m_n);
+ ironlake_compute_m_n(bpp, lane, target_clock, link_bw, &m_n);
+
+ /* Ironlake: try to setup display ref clock before DPLL
+ * enabling. This is only under driver's control after
+ * PCH B stepping, previous chipset stepping should be
+ * ignoring this setting.
+ */
+ temp = I915_READ(PCH_DREF_CONTROL);
+ /* Always enable nonspread source */
+ temp &= ~DREF_NONSPREAD_SOURCE_MASK;
+ temp |= DREF_NONSPREAD_SOURCE_ENABLE;
+ temp &= ~DREF_SSC_SOURCE_MASK;
+ temp |= DREF_SSC_SOURCE_ENABLE;
+ I915_WRITE(PCH_DREF_CONTROL, temp);
+
+ POSTING_READ(PCH_DREF_CONTROL);
+ udelay(200);
+
+ if (has_edp_encoder) {
+ if (intel_panel_use_ssc(dev_priv)) {
+ temp |= DREF_SSC1_ENABLE;
+ I915_WRITE(PCH_DREF_CONTROL, temp);
+
+ POSTING_READ(PCH_DREF_CONTROL);
+ udelay(200);
+ }
+ temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
+
+ /* Enable CPU source on CPU attached eDP */
+ if (!intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
+ if (intel_panel_use_ssc(dev_priv))
+ temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
+ else
+ temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
+ } else {
+ /* Enable SSC on PCH eDP if needed */
+ if (intel_panel_use_ssc(dev_priv)) {
+ DRM_ERROR("enabling SSC on PCH\n");
+ temp |= DREF_SUPERSPREAD_SOURCE_ENABLE;
+ }
+ }
+ I915_WRITE(PCH_DREF_CONTROL, temp);
+ POSTING_READ(PCH_DREF_CONTROL);
+ udelay(200);
+ }
fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
if (has_reduced_clock)
@@ -5887,32 +5040,39 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
/* Set up the display plane register */
dspcntr = DISPPLANE_GAMMA_ENABLE;
- DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
+ DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
drm_mode_debug_printmodeline(mode);
/* PCH eDP needs FDI, but CPU eDP does not */
- if (!intel_crtc->no_pll) {
- if (!has_edp_encoder ||
- intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
- I915_WRITE(PCH_FP0(pipe), fp);
- I915_WRITE(PCH_DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
+ if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
+ I915_WRITE(PCH_FP0(pipe), fp);
+ I915_WRITE(PCH_DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
- POSTING_READ(PCH_DPLL(pipe));
- udelay(150);
- }
- } else {
- if (dpll == (I915_READ(PCH_DPLL(0)) & 0x7fffffff) &&
- fp == I915_READ(PCH_FP0(0))) {
- intel_crtc->use_pll_a = true;
- DRM_DEBUG_KMS("using pipe a dpll\n");
- } else if (dpll == (I915_READ(PCH_DPLL(1)) & 0x7fffffff) &&
- fp == I915_READ(PCH_FP0(1))) {
- intel_crtc->use_pll_a = false;
- DRM_DEBUG_KMS("using pipe b dpll\n");
- } else {
- DRM_DEBUG_KMS("no matching PLL configuration for pipe 2\n");
- return -EINVAL;
+ POSTING_READ(PCH_DPLL(pipe));
+ udelay(150);
+ }
+
+ /* enable transcoder DPLL */
+ if (HAS_PCH_CPT(dev)) {
+ temp = I915_READ(PCH_DPLL_SEL);
+ switch (pipe) {
+ case 0:
+ temp |= TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL;
+ break;
+ case 1:
+ temp |= TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL;
+ break;
+ case 2:
+ /* FIXME: manage transcoder PLLs? */
+ temp |= TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL;
+ break;
+ default:
+ BUG();
}
+ I915_WRITE(PCH_DPLL_SEL, temp);
+
+ POSTING_READ(PCH_DPLL_SEL);
+ udelay(150);
}
/* The LVDS pin pair needs to be on before the DPLLs are enabled.
@@ -5922,16 +5082,17 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
if (is_lvds) {
temp = I915_READ(PCH_LVDS);
temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
- if (HAS_PCH_CPT(dev)) {
- temp &= ~PORT_TRANS_SEL_MASK;
- temp |= PORT_TRANS_SEL_CPT(pipe);
- } else {
- if (pipe == 1)
+ if (pipe == 1) {
+ if (HAS_PCH_CPT(dev))
+ temp |= PORT_TRANS_B_SEL_CPT;
+ else
temp |= LVDS_PIPEB_SELECT;
+ } else {
+ if (HAS_PCH_CPT(dev))
+ temp &= ~PORT_TRANS_SEL_MASK;
else
temp &= ~LVDS_PIPEB_SELECT;
}
-
/* set the corresponsding LVDS_BORDER bit */
temp |= dev_priv->lvds_border_bits;
/* Set the B0-B3 data pairs corresponding to whether we're going to
@@ -5965,12 +5126,14 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
I915_WRITE(PCH_LVDS, temp);
}
+ /* set the dithering flag and clear for anything other than a panel. */
pipeconf &= ~PIPECONF_DITHER_EN;
pipeconf &= ~PIPECONF_DITHER_TYPE_MASK;
- if ((is_lvds && dev_priv->lvds_dither) || dither) {
+ if (dev_priv->lvds_dither && (is_lvds || has_edp_encoder)) {
pipeconf |= PIPECONF_DITHER_EN;
- pipeconf |= PIPECONF_DITHER_TYPE_SP;
+ pipeconf |= PIPECONF_DITHER_TYPE_ST1;
}
+
if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
intel_dp_set_m_n(crtc, mode, adjusted_mode);
} else {
@@ -5981,9 +5144,8 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
I915_WRITE(TRANSDPLINK_N1(pipe), 0);
}
- if (!intel_crtc->no_pll &&
- (!has_edp_encoder ||
- intel_encoder_is_pch_edp(&has_edp_encoder->base))) {
+ if (!has_edp_encoder ||
+ intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
I915_WRITE(PCH_DPLL(pipe), dpll);
/* Wait for the clocks to stabilize. */
@@ -5999,36 +5161,32 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
}
intel_crtc->lowfreq_avail = false;
- if (!intel_crtc->no_pll) {
- if (is_lvds && has_reduced_clock && i915_powersave) {
- I915_WRITE(PCH_FP1(pipe), fp2);
- intel_crtc->lowfreq_avail = true;
- if (HAS_PIPE_CXSR(dev)) {
- DRM_DEBUG_KMS("enabling CxSR downclocking\n");
- pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
- }
- } else {
- I915_WRITE(PCH_FP1(pipe), fp);
- if (HAS_PIPE_CXSR(dev)) {
- DRM_DEBUG_KMS("disabling CxSR downclocking\n");
- pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
- }
+ if (is_lvds && has_reduced_clock && i915_powersave) {
+ I915_WRITE(PCH_FP1(pipe), fp2);
+ intel_crtc->lowfreq_avail = true;
+ if (HAS_PIPE_CXSR(dev)) {
+ DRM_DEBUG_KMS("enabling CxSR downclocking\n");
+ pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
+ }
+ } else {
+ I915_WRITE(PCH_FP1(pipe), fp);
+ if (HAS_PIPE_CXSR(dev)) {
+ DRM_DEBUG_KMS("disabling CxSR downclocking\n");
+ pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
}
}
- pipeconf &= ~PIPECONF_INTERLACE_MASK;
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
- pipeconf |= PIPECONF_INTERLACED_ILK;
+ pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
/* the chip adds 2 halflines automatically */
+ adjusted_mode->crtc_vdisplay -= 1;
adjusted_mode->crtc_vtotal -= 1;
+ adjusted_mode->crtc_vblank_start -= 1;
adjusted_mode->crtc_vblank_end -= 1;
- I915_WRITE(VSYNCSHIFT(pipe),
- adjusted_mode->crtc_hsync_start
- - adjusted_mode->crtc_htotal/2);
- } else {
- pipeconf |= PIPECONF_PROGRESSIVE;
- I915_WRITE(VSYNCSHIFT(pipe), 0);
- }
+ adjusted_mode->crtc_vsync_end -= 1;
+ adjusted_mode->crtc_vsync_start -= 1;
+ } else
+ pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */
I915_WRITE(HTOTAL(pipe),
(adjusted_mode->crtc_hdisplay - 1) |
@@ -6071,6 +5229,12 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
intel_wait_for_vblank(dev, pipe);
+ if (IS_GEN5(dev)) {
+ /* enable address swizzle for tiling buffer */
+ temp = I915_READ(DISP_ARB_CTL);
+ I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING);
+ }
+
I915_WRITE(DSPCNTR(plane), dspcntr);
POSTING_READ(DSPCNTR(plane));
@@ -6097,188 +5261,12 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode,
x, y, old_fb);
- drm_vblank_post_modeset(dev, pipe);
- if (ret)
- intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF;
- else
- intel_crtc->dpms_mode = DRM_MODE_DPMS_ON;
+ drm_vblank_post_modeset(dev, pipe);
return ret;
}
-static bool intel_eld_uptodate(struct drm_connector *connector,
- int reg_eldv, uint32_t bits_eldv,
- int reg_elda, uint32_t bits_elda,
- int reg_edid)
-{
- struct drm_i915_private *dev_priv = connector->dev->dev_private;
- uint8_t *eld = connector->eld;
- uint32_t i;
-
- i = I915_READ(reg_eldv);
- i &= bits_eldv;
-
- if (!eld[0])
- return !i;
-
- if (!i)
- return false;
-
- i = I915_READ(reg_elda);
- i &= ~bits_elda;
- I915_WRITE(reg_elda, i);
-
- for (i = 0; i < eld[2]; i++)
- if (I915_READ(reg_edid) != *((uint32_t *)eld + i))
- return false;
-
- return true;
-}
-
-static void g4x_write_eld(struct drm_connector *connector,
- struct drm_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = connector->dev->dev_private;
- uint8_t *eld = connector->eld;
- uint32_t eldv;
- uint32_t len;
- uint32_t i;
-
- i = I915_READ(G4X_AUD_VID_DID);
-
- if (i == INTEL_AUDIO_DEVBLC || i == INTEL_AUDIO_DEVCL)
- eldv = G4X_ELDV_DEVCL_DEVBLC;
- else
- eldv = G4X_ELDV_DEVCTG;
-
- if (intel_eld_uptodate(connector,
- G4X_AUD_CNTL_ST, eldv,
- G4X_AUD_CNTL_ST, G4X_ELD_ADDR,
- G4X_HDMIW_HDMIEDID))
- return;
-
- i = I915_READ(G4X_AUD_CNTL_ST);
- i &= ~(eldv | G4X_ELD_ADDR);
- len = (i >> 9) & 0x1f; /* ELD buffer size */
- I915_WRITE(G4X_AUD_CNTL_ST, i);
-
- if (!eld[0])
- return;
-
- len = min_t(uint8_t, eld[2], len);
- DRM_DEBUG_DRIVER("ELD size %d\n", len);
- for (i = 0; i < len; i++)
- I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i));
-
- i = I915_READ(G4X_AUD_CNTL_ST);
- i |= eldv;
- I915_WRITE(G4X_AUD_CNTL_ST, i);
-}
-
-static void ironlake_write_eld(struct drm_connector *connector,
- struct drm_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = connector->dev->dev_private;
- uint8_t *eld = connector->eld;
- uint32_t eldv;
- uint32_t i;
- int len;
- int hdmiw_hdmiedid;
- int aud_config;
- int aud_cntl_st;
- int aud_cntrl_st2;
-
- if (HAS_PCH_IBX(connector->dev)) {
- hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID_A;
- aud_config = IBX_AUD_CONFIG_A;
- aud_cntl_st = IBX_AUD_CNTL_ST_A;
- aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
- } else {
- hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID_A;
- aud_config = CPT_AUD_CONFIG_A;
- aud_cntl_st = CPT_AUD_CNTL_ST_A;
- aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
- }
-
- i = to_intel_crtc(crtc)->pipe;
- hdmiw_hdmiedid += i * 0x100;
- aud_cntl_st += i * 0x100;
- aud_config += i * 0x100;
-
- DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(i));
-
- i = I915_READ(aud_cntl_st);
- i = (i >> 29) & 0x3; /* DIP_Port_Select, 0x1 = PortB */
- if (!i) {
- DRM_DEBUG_DRIVER("Audio directed to unknown port\n");
- /* operate blindly on all ports */
- eldv = IBX_ELD_VALIDB;
- eldv |= IBX_ELD_VALIDB << 4;
- eldv |= IBX_ELD_VALIDB << 8;
- } else {
- DRM_DEBUG_DRIVER("ELD on port %c\n", 'A' + i);
- eldv = IBX_ELD_VALIDB << ((i - 1) * 4);
- }
-
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
- DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
- eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */
- I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
- } else
- I915_WRITE(aud_config, 0);
-
- if (intel_eld_uptodate(connector,
- aud_cntrl_st2, eldv,
- aud_cntl_st, IBX_ELD_ADDRESS,
- hdmiw_hdmiedid))
- return;
-
- i = I915_READ(aud_cntrl_st2);
- i &= ~eldv;
- I915_WRITE(aud_cntrl_st2, i);
-
- if (!eld[0])
- return;
-
- i = I915_READ(aud_cntl_st);
- i &= ~IBX_ELD_ADDRESS;
- I915_WRITE(aud_cntl_st, i);
-
- len = min_t(uint8_t, eld[2], 21); /* 84 bytes of hw ELD buffer */
- DRM_DEBUG_DRIVER("ELD size %d\n", len);
- for (i = 0; i < len; i++)
- I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
-
- i = I915_READ(aud_cntrl_st2);
- i |= eldv;
- I915_WRITE(aud_cntrl_st2, i);
-}
-
-void intel_write_eld(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
-{
- struct drm_crtc *crtc = encoder->crtc;
- struct drm_connector *connector;
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- connector = drm_select_eld(encoder, mode);
- if (!connector)
- return;
-
- DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
- connector->base.id,
- drm_get_connector_name(connector),
- connector->encoder->base.id,
- drm_get_encoder_name(connector->encoder));
-
- connector->eld[6] = drm_av_sync_delay(connector, mode) / 2;
-
- if (dev_priv->display.write_eld)
- dev_priv->display.write_eld(connector, crtc);
-}
-
/** Loads the palette/gamma unit for the CRTC with the prepared values */
void intel_crtc_load_lut(struct drm_crtc *crtc)
{
@@ -6495,15 +5483,21 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
goto fail_locked;
}
- ret = i915_gem_object_pin_to_display_plane(obj, 0, NULL);
+ ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
if (ret) {
- DRM_ERROR("failed to move cursor bo into the GTT\n");
+ DRM_ERROR("failed to pin cursor bo\n");
goto fail_locked;
}
+ ret = i915_gem_object_set_to_gtt_domain(obj, 0);
+ if (ret) {
+ DRM_ERROR("failed to move cursor bo into the GTT\n");
+ goto fail_unpin;
+ }
+
ret = i915_gem_object_put_fence(obj);
if (ret) {
- DRM_ERROR("failed to release fence for cursor");
+ DRM_ERROR("failed to move cursor bo into the GTT\n");
goto fail_unpin;
}
@@ -6622,7 +5616,7 @@ static struct drm_display_mode load_detect_mode = {
static struct drm_framebuffer *
intel_framebuffer_create(struct drm_device *dev,
- struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_mode_fb_cmd *mode_cmd,
struct drm_i915_gem_object *obj)
{
struct intel_framebuffer *intel_fb;
@@ -6664,7 +5658,7 @@ intel_framebuffer_create_for_mode(struct drm_device *dev,
int depth, int bpp)
{
struct drm_i915_gem_object *obj;
- struct drm_mode_fb_cmd2 mode_cmd;
+ struct drm_mode_fb_cmd mode_cmd;
obj = i915_gem_alloc_object(dev,
intel_framebuffer_size_for_mode(mode, bpp));
@@ -6673,9 +5667,9 @@ intel_framebuffer_create_for_mode(struct drm_device *dev,
mode_cmd.width = mode->hdisplay;
mode_cmd.height = mode->vdisplay;
- mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
- bpp);
- mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
+ mode_cmd.depth = depth;
+ mode_cmd.bpp = bpp;
+ mode_cmd.pitch = intel_framebuffer_pitch_for_width(mode_cmd.width, bpp);
return intel_framebuffer_create(dev, &mode_cmd, obj);
}
@@ -6696,11 +5690,11 @@ mode_fits_in_fbdev(struct drm_device *dev,
return NULL;
fb = &dev_priv->fbdev->ifb.base;
- if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
- fb->bits_per_pixel))
+ if (fb->pitch < intel_framebuffer_pitch_for_width(mode->hdisplay,
+ fb->bits_per_pixel))
return NULL;
- if (obj->base.size < mode->vdisplay * fb->pitches[0])
+ if (obj->base.size < mode->vdisplay * fb->pitch)
return NULL;
return fb;
@@ -7032,7 +6026,9 @@ static void intel_increase_pllclock(struct drm_crtc *crtc)
if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
DRM_DEBUG_DRIVER("upclocking LVDS\n");
- assert_panel_unlocked(dev_priv, pipe);
+ /* Unlock panel regs */
+ I915_WRITE(PP_CONTROL,
+ I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
dpll &= ~DISPLAY_RATE_SELECT_FPA1;
I915_WRITE(dpll_reg, dpll);
@@ -7041,6 +6037,9 @@ static void intel_increase_pllclock(struct drm_crtc *crtc)
dpll = I915_READ(dpll_reg);
if (dpll & DISPLAY_RATE_SELECT_FPA1)
DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
+
+ /* ...and lock them again */
+ I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3);
}
/* Schedule downclock */
@@ -7070,7 +6069,9 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
DRM_DEBUG_DRIVER("downclocking LVDS\n");
- assert_panel_unlocked(dev_priv, pipe);
+ /* Unlock panel regs */
+ I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) |
+ PANEL_UNLOCK_REGS);
dpll |= DISPLAY_RATE_SELECT_FPA1;
I915_WRITE(dpll_reg, dpll);
@@ -7078,6 +6079,9 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
dpll = I915_READ(dpll_reg);
if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
+
+ /* ...and lock them again */
+ I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3);
}
}
@@ -7192,11 +6196,10 @@ static void intel_unpin_work_fn(struct work_struct *__work)
container_of(__work, struct intel_unpin_work, work);
mutex_lock(&work->dev->struct_mutex);
- intel_unpin_fb_obj(work->old_fb_obj);
+ i915_gem_object_unpin(work->old_fb_obj);
drm_gem_object_unreference(&work->pending_flip_obj->base);
drm_gem_object_unreference(&work->old_fb_obj->base);
- intel_update_fbc(work->dev);
mutex_unlock(&work->dev->struct_mutex);
kfree(work);
}
@@ -7323,7 +6326,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
goto out;
/* Offset into the new buffer for cases of shared fbs between CRTCs */
- offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
+ offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
ret = BEGIN_LP_RING(6);
if (ret)
@@ -7340,9 +6343,9 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
OUT_RING(MI_NOOP);
OUT_RING(MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- OUT_RING(fb->pitches[0]);
+ OUT_RING(fb->pitch);
OUT_RING(obj->gtt_offset + offset);
- OUT_RING(0); /* aux display base address, unused */
+ OUT_RING(MI_NOOP);
ADVANCE_LP_RING();
out:
return ret;
@@ -7364,7 +6367,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
goto out;
/* Offset into the new buffer for cases of shared fbs between CRTCs */
- offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
+ offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
ret = BEGIN_LP_RING(6);
if (ret)
@@ -7378,7 +6381,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
OUT_RING(MI_NOOP);
OUT_RING(MI_DISPLAY_FLIP_I915 |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- OUT_RING(fb->pitches[0]);
+ OUT_RING(fb->pitch);
OUT_RING(obj->gtt_offset + offset);
OUT_RING(MI_NOOP);
@@ -7411,7 +6414,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
*/
OUT_RING(MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- OUT_RING(fb->pitches[0]);
+ OUT_RING(fb->pitch);
OUT_RING(obj->gtt_offset | obj->tiling_mode);
/* XXX Enabling the panel-fitter across page-flip is so far
@@ -7446,7 +6449,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
OUT_RING(MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- OUT_RING(fb->pitches[0] | obj->tiling_mode);
+ OUT_RING(fb->pitch | obj->tiling_mode);
OUT_RING(obj->gtt_offset);
pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
@@ -7482,7 +6485,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
goto out;
intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19));
- intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
+ intel_ring_emit(ring, (fb->pitch | obj->tiling_mode));
intel_ring_emit(ring, (obj->gtt_offset));
intel_ring_emit(ring, (MI_NOOP));
intel_ring_advance(ring);
@@ -7558,7 +6561,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
if (ret)
goto cleanup_pending;
- intel_disable_fbc(dev);
mutex_unlock(&dev->struct_mutex);
trace_i915_flip_request(intel_crtc->plane, obj);
@@ -7696,11 +6698,8 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
intel_crtc_reset(&intel_crtc->base);
intel_crtc->active = true; /* force the pipe off on setup_init_config */
- intel_crtc->bpp = 24; /* default for pre-Ironlake */
if (HAS_PCH_SPLIT(dev)) {
- if (pipe == 2 && IS_IVYBRIDGE(dev))
- intel_crtc->no_pll = true;
intel_helper_funcs.prepare = ironlake_crtc_prepare;
intel_helper_funcs.commit = ironlake_crtc_commit;
} else {
@@ -7780,9 +6779,10 @@ static void intel_setup_outputs(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *encoder;
bool dpd_is_edp = false;
- bool has_lvds;
+ bool has_lvds = false;
- has_lvds = intel_lvds_init(dev);
+ if (IS_MOBILE(dev) && !IS_I830(dev))
+ has_lvds = intel_lvds_init(dev);
if (!has_lvds && !HAS_PCH_SPLIT(dev)) {
/* disable the panel fitter on everything but LVDS */
I915_WRITE(PFIT_CONTROL, 0);
@@ -7877,11 +6877,10 @@ static void intel_setup_outputs(struct drm_device *dev)
intel_encoder_clones(dev, encoder->clone_mask);
}
+ intel_panel_setup_backlight(dev);
+
/* disable all the possible outputs/crtcs before entering KMS mode */
drm_helper_disable_unused_functions(dev);
-
- if (HAS_PCH_SPLIT(dev))
- ironlake_init_pch_refclk(dev);
}
static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
@@ -7911,7 +6910,7 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = {
int intel_framebuffer_init(struct drm_device *dev,
struct intel_framebuffer *intel_fb,
- struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_mode_fb_cmd *mode_cmd,
struct drm_i915_gem_object *obj)
{
int ret;
@@ -7919,26 +6918,16 @@ int intel_framebuffer_init(struct drm_device *dev,
if (obj->tiling_mode == I915_TILING_Y)
return -EINVAL;
- if (mode_cmd->pitches[0] & 63)
+ if (mode_cmd->pitch & 63)
return -EINVAL;
- switch (mode_cmd->pixel_format) {
- case DRM_FORMAT_RGB332:
- case DRM_FORMAT_RGB565:
- case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_ARGB8888:
- case DRM_FORMAT_XRGB2101010:
- case DRM_FORMAT_ARGB2101010:
- /* RGB formats are common across chipsets */
- break;
- case DRM_FORMAT_YUYV:
- case DRM_FORMAT_UYVY:
- case DRM_FORMAT_YVYU:
- case DRM_FORMAT_VYUY:
+ switch (mode_cmd->bpp) {
+ case 8:
+ case 16:
+ case 24:
+ case 32:
break;
default:
- DRM_DEBUG_KMS("unsupported pixel format %u\n",
- mode_cmd->pixel_format);
return -EINVAL;
}
@@ -7956,12 +6945,11 @@ int intel_framebuffer_init(struct drm_device *dev,
static struct drm_framebuffer *
intel_user_framebuffer_create(struct drm_device *dev,
struct drm_file *filp,
- struct drm_mode_fb_cmd2 *mode_cmd)
+ struct drm_mode_fb_cmd *mode_cmd)
{
struct drm_i915_gem_object *obj;
- obj = to_intel_bo(drm_gem_object_lookup(dev, filp,
- mode_cmd->handles[0]));
+ obj = to_intel_bo(drm_gem_object_lookup(dev, filp, mode_cmd->handle));
if (&obj->base == NULL)
return ERR_PTR(-ENOENT);
@@ -8132,10 +7120,6 @@ void gen6_disable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
I915_WRITE(GEN6_PMIER, 0);
- /* Complete PM interrupt masking here doesn't race with the rps work
- * item again unmasking PM interrupts because that is using a different
- * register (PMIMR) to mask PM interrupts. The only risk is in leaving
- * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
spin_lock_irq(&dev_priv->rps_lock);
dev_priv->pm_iir = 0;
@@ -8230,37 +7214,11 @@ void intel_init_emon(struct drm_device *dev)
dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
}
-static bool intel_enable_rc6(struct drm_device *dev)
-{
- /*
- * Respect the kernel parameter if it is set
- */
- if (i915_enable_rc6 >= 0)
- return i915_enable_rc6;
-
- /*
- * Disable RC6 on Ironlake
- */
- if (INTEL_INFO(dev)->gen == 5)
- return 0;
-
- /*
- * Disable rc6 on Sandybridge
- */
- if (INTEL_INFO(dev)->gen == 6) {
- DRM_DEBUG_DRIVER("Sandybridge: RC6 disabled\n");
- return 0;
- }
- DRM_DEBUG_DRIVER("RC6 enabled\n");
- return 1;
-}
-
void gen6_enable_rps(struct drm_i915_private *dev_priv)
{
u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
u32 pcu_mbox, rc6_mask = 0;
- u32 gtfifodbg;
int cur_freq, min_freq, max_freq;
int i;
@@ -8272,13 +7230,6 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
*/
I915_WRITE(GEN6_RC_STATE, 0);
mutex_lock(&dev_priv->dev->struct_mutex);
-
- /* Clear the DBG now so we don't confuse earlier errors */
- if ((gtfifodbg = I915_READ(GTFIFODBG))) {
- DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
- I915_WRITE(GTFIFODBG, gtfifodbg);
- }
-
gen6_gt_force_wake_get(dev_priv);
/* disable the counters and set deterministic thresholds */
@@ -8299,9 +7250,9 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
- if (intel_enable_rc6(dev_priv->dev))
- rc6_mask = GEN6_RC_CTL_RC6_ENABLE |
- ((IS_GEN7(dev_priv->dev)) ? GEN6_RC_CTL_RC6p_ENABLE : 0);
+ if (i915_enable_rc6)
+ rc6_mask = GEN6_RC_CTL_RC6p_ENABLE |
+ GEN6_RC_CTL_RC6_ENABLE;
I915_WRITE(GEN6_RC_CONTROL,
rc6_mask |
@@ -8326,7 +7277,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
I915_WRITE(GEN6_RP_CONTROL,
GEN6_RP_MEDIA_TURBO |
- GEN6_RP_MEDIA_HW_MODE |
+ GEN6_RP_USE_NORMAL_FREQ |
GEN6_RP_MEDIA_IS_GFX |
GEN6_RP_ENABLE |
GEN6_RP_UP_BUSY_AVG |
@@ -8387,59 +7338,6 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
mutex_unlock(&dev_priv->dev->struct_mutex);
}
-void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
-{
- int min_freq = 15;
- int gpu_freq, ia_freq, max_ia_freq;
- int scaling_factor = 180;
-
- max_ia_freq = cpufreq_quick_get_max(0);
- /*
- * Default to measured freq if none found, PCU will ensure we don't go
- * over
- */
- if (!max_ia_freq)
- max_ia_freq = tsc_khz;
-
- /* Convert from kHz to MHz */
- max_ia_freq /= 1000;
-
- mutex_lock(&dev_priv->dev->struct_mutex);
-
- /*
- * For each potential GPU frequency, load a ring frequency we'd like
- * to use for memory access. We do this by specifying the IA frequency
- * the PCU should use as a reference to determine the ring frequency.
- */
- for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay;
- gpu_freq--) {
- int diff = dev_priv->max_delay - gpu_freq;
-
- /*
- * For GPU frequencies less than 750MHz, just use the lowest
- * ring freq.
- */
- if (gpu_freq < min_freq)
- ia_freq = 800;
- else
- ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
- ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
-
- I915_WRITE(GEN6_PCODE_DATA,
- (ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) |
- gpu_freq);
- I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
- GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
- if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
- GEN6_PCODE_READY) == 0, 10)) {
- DRM_ERROR("pcode write of freq table timed out\n");
- continue;
- }
- }
-
- mutex_unlock(&dev_priv->dev->struct_mutex);
-}
-
static void ironlake_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -8563,12 +7461,10 @@ static void gen6_init_clock_gating(struct drm_device *dev)
ILK_DPARB_CLK_GATE |
ILK_DPFD_CLK_GATE);
- for_each_pipe(pipe) {
+ for_each_pipe(pipe)
I915_WRITE(DSPCNTR(pipe),
I915_READ(DSPCNTR(pipe)) |
DISPPLANE_TRICKLE_FEED_DISABLE);
- intel_flush_display_plane(dev_priv, pipe);
- }
}
static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
@@ -8621,8 +7517,6 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
I915_WRITE(DSPCNTR(pipe),
I915_READ(DSPCNTR(pipe)) |
DISPPLANE_TRICKLE_FEED_DISABLE);
- intel_flush_display_plane(dev_priv, pipe);
- }
}
static void g4x_init_clock_gating(struct drm_device *dev)
@@ -8786,7 +7680,7 @@ void ironlake_enable_rc6(struct drm_device *dev)
/* rc6 disabled by default due to repeated reports of hanging during
* boot and resume.
*/
- if (!intel_enable_rc6(dev))
+ if (!i915_enable_rc6)
return;
mutex_lock(&dev->struct_mutex);
@@ -8856,11 +7750,9 @@ static void intel_init_display(struct drm_device *dev)
if (HAS_PCH_SPLIT(dev)) {
dev_priv->display.dpms = ironlake_crtc_dpms;
dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
- dev_priv->display.update_plane = ironlake_update_plane;
} else {
dev_priv->display.dpms = i9xx_crtc_dpms;
dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
- dev_priv->display.update_plane = i9xx_update_plane;
}
if (I915_HAS_FBC(dev)) {
@@ -8881,7 +7773,7 @@ static void intel_init_display(struct drm_device *dev)
}
/* Returns the core display clock speed */
- if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
+ if (IS_I945G(dev) || (IS_G33(dev) && ! IS_PINEVIEW_M(dev)))
dev_priv->display.get_display_clock_speed =
i945_get_display_clock_speed;
else if (IS_I915G(dev))
@@ -8905,34 +7797,6 @@ static void intel_init_display(struct drm_device *dev)
/* For FIFO watermark updates */
if (HAS_PCH_SPLIT(dev)) {
- dev_priv->display.force_wake_get = __gen6_gt_force_wake_get;
- dev_priv->display.force_wake_put = __gen6_gt_force_wake_put;
-
- /* IVB configs may use multi-threaded forcewake */
- if (IS_IVYBRIDGE(dev)) {
- u32 ecobus;
-
- /* A small trick here - if the bios hasn't configured MT forcewake,
- * and if the device is in RC6, then force_wake_mt_get will not wake
- * the device and the ECOBUS read will return zero. Which will be
- * (correctly) interpreted by the test below as MT forcewake being
- * disabled.
- */
- mutex_lock(&dev->struct_mutex);
- __gen6_gt_force_wake_mt_get(dev_priv);
- ecobus = I915_READ_NOTRACE(ECOBUS);
- __gen6_gt_force_wake_mt_put(dev_priv);
- mutex_unlock(&dev->struct_mutex);
-
- if (ecobus & FORCEWAKE_MT_ENABLE) {
- DRM_DEBUG_KMS("Using MT version of forcewake\n");
- dev_priv->display.force_wake_get =
- __gen6_gt_force_wake_mt_get;
- dev_priv->display.force_wake_put =
- __gen6_gt_force_wake_mt_put;
- }
- }
-
if (HAS_PCH_IBX(dev))
dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
else if (HAS_PCH_CPT(dev))
@@ -8948,11 +7812,9 @@ static void intel_init_display(struct drm_device *dev)
}
dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
- dev_priv->display.write_eld = ironlake_write_eld;
} else if (IS_GEN6(dev)) {
if (SNB_READ_WM0_LATENCY()) {
dev_priv->display.update_wm = sandybridge_update_wm;
- dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
} else {
DRM_DEBUG_KMS("Failed to read display plane latency. "
"Disable CxSR\n");
@@ -8960,20 +7822,18 @@ static void intel_init_display(struct drm_device *dev)
}
dev_priv->display.fdi_link_train = gen6_fdi_link_train;
dev_priv->display.init_clock_gating = gen6_init_clock_gating;
- dev_priv->display.write_eld = ironlake_write_eld;
} else if (IS_IVYBRIDGE(dev)) {
/* FIXME: detect B0+ stepping and use auto training */
dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
if (SNB_READ_WM0_LATENCY()) {
dev_priv->display.update_wm = sandybridge_update_wm;
- dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
} else {
DRM_DEBUG_KMS("Failed to read display plane latency. "
"Disable CxSR\n");
dev_priv->display.update_wm = NULL;
}
dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
- dev_priv->display.write_eld = ironlake_write_eld;
+
} else
dev_priv->display.update_wm = NULL;
} else if (IS_PINEVIEW(dev)) {
@@ -8984,7 +7844,7 @@ static void intel_init_display(struct drm_device *dev)
DRM_INFO("failed to find known CxSR latency "
"(found ddr%s fsb freq %d, mem freq %d), "
"disabling CxSR\n",
- (dev_priv->is_ddr3 == 1) ? "3" : "2",
+ (dev_priv->is_ddr3 == 1) ? "3": "2",
dev_priv->fsb_freq, dev_priv->mem_freq);
/* Disable CxSR and never update its watermark again */
pineview_disable_cxsr(dev);
@@ -8993,7 +7853,6 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.update_wm = pineview_update_wm;
dev_priv->display.init_clock_gating = gen3_init_clock_gating;
} else if (IS_G4X(dev)) {
- dev_priv->display.write_eld = g4x_write_eld;
dev_priv->display.update_wm = g4x_update_wm;
dev_priv->display.init_clock_gating = g4x_init_clock_gating;
} else if (IS_GEN4(dev)) {
@@ -9054,7 +7913,7 @@ static void intel_init_display(struct drm_device *dev)
* resume, or other times. This quirk makes sure that's the case for
* affected systems.
*/
-static void quirk_pipea_force(struct drm_device *dev)
+static void quirk_pipea_force (struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -9079,8 +7938,10 @@ struct intel_quirk {
};
struct intel_quirk intel_quirks[] = {
+ /* HP Compaq 2730p needs pipe A force quirk (LP: #291555) */
+ { 0x2a42, 0x103c, 0x30eb, quirk_pipea_force },
/* HP Mini needs pipe A force quirk (LP: #322104) */
- { 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
+ { 0x27ae,0x103c, 0x361a, quirk_pipea_force },
/* Thinkpad R31 needs pipe A force quirk */
{ 0x3577, 0x1014, 0x0505, quirk_pipea_force },
@@ -9100,9 +7961,6 @@ struct intel_quirk intel_quirks[] = {
/* Lenovo U160 cannot use SSC on LVDS */
{ 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
-
- /* Sony Vaio Y cannot use SSC on LVDS */
- { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
};
static void intel_init_quirks(struct drm_device *dev)
@@ -9148,16 +8006,13 @@ static void i915_disable_vga(struct drm_device *dev)
void intel_modeset_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int i, ret;
+ int i;
drm_mode_config_init(dev);
dev->mode_config.min_width = 0;
dev->mode_config.min_height = 0;
- dev->mode_config.preferred_depth = 24;
- dev->mode_config.prefer_shadow = 1;
-
dev->mode_config.funcs = (void *)&intel_mode_funcs;
intel_init_quirks(dev);
@@ -9181,9 +8036,6 @@ void intel_modeset_init(struct drm_device *dev)
for (i = 0; i < dev_priv->num_pipe; i++) {
intel_crtc_init(dev, i);
- ret = intel_plane_init(dev, i);
- if (ret)
- DRM_DEBUG_KMS("plane %d init failed: %d\n", i, ret);
}
/* Just disable it once at startup */
@@ -9197,10 +8049,8 @@ void intel_modeset_init(struct drm_device *dev)
intel_init_emon(dev);
}
- if (IS_GEN6(dev) || IS_GEN7(dev)) {
+ if (IS_GEN6(dev) || IS_GEN7(dev))
gen6_enable_rps(dev_priv);
- gen6_update_ring_freq(dev_priv);
- }
INIT_WORK(&dev_priv->idle_work, intel_idle_update);
setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
@@ -9236,7 +8086,8 @@ void intel_modeset_cleanup(struct drm_device *dev)
intel_increase_pllclock(crtc);
}
- intel_disable_fbc(dev);
+ if (dev_priv->display.disable_fbc)
+ dev_priv->display.disable_fbc(dev);
if (IS_IRONLAKE_M(dev))
ironlake_disable_drps(dev);
@@ -9252,10 +8103,6 @@ void intel_modeset_cleanup(struct drm_device *dev)
* enqueue unpin/hotplug work. */
drm_irq_uninstall(dev);
cancel_work_sync(&dev_priv->hotplug_work);
- cancel_work_sync(&dev_priv->rps_work);
-
- /* flush any delayed tasks or pending work */
- flush_scheduled_work();
/* Shut off idle work before the crtcs get freed. */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -9338,7 +8185,7 @@ struct intel_display_error_state {
struct intel_display_error_state *
intel_display_capture_error_state(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_display_error_state *error;
int i;
@@ -9354,7 +8201,7 @@ intel_display_capture_error_state(struct drm_device *dev)
error->plane[i].control = I915_READ(DSPCNTR(i));
error->plane[i].stride = I915_READ(DSPSTRIDE(i));
error->plane[i].size = I915_READ(DSPSIZE(i));
- error->plane[i].pos = I915_READ(DSPPOS(i));
+ error->plane[i].pos= I915_READ(DSPPOS(i));
error->plane[i].addr = I915_READ(DSPADDR(i));
if (INTEL_INFO(dev)->gen >= 4) {
error->plane[i].surface = I915_READ(DSPSURF(i));
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 993ec13..57b3728 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -36,7 +36,7 @@
#include "i915_drv.h"
#include "drm_dp_helper.h"
-#define DP_RECEIVER_CAP_SIZE 0xf
+
#define DP_LINK_STATUS_SIZE 6
#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
@@ -48,24 +48,16 @@ struct intel_dp {
uint32_t DP;
uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
bool has_audio;
- enum hdmi_force_audio force_audio;
+ int force_audio;
uint32_t color_range;
- int dpms_mode;
uint8_t link_bw;
uint8_t lane_count;
- uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
+ uint8_t dpcd[4];
struct i2c_adapter adapter;
struct i2c_algo_dp_aux_data algo;
bool is_pch_edp;
uint8_t train_set[4];
- int panel_power_up_delay;
- int panel_power_down_delay;
- int panel_power_cycle_delay;
- int backlight_on_delay;
- int backlight_off_delay;
- struct drm_display_mode *panel_fixed_mode; /* for eDP */
- struct delayed_work panel_vdd_work;
- bool want_panel_vdd;
+ uint8_t link_status[DP_LINK_STATUS_SIZE];
};
/**
@@ -93,17 +85,6 @@ static bool is_pch_edp(struct intel_dp *intel_dp)
return intel_dp->is_pch_edp;
}
-/**
- * is_cpu_edp - is the port on the CPU and attached to an eDP panel?
- * @intel_dp: DP struct
- *
- * Returns true if the given DP struct corresponds to a CPU eDP port.
- */
-static bool is_cpu_edp(struct intel_dp *intel_dp)
-{
- return is_edp(intel_dp) && !is_pch_edp(intel_dp);
-}
-
static struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
{
return container_of(encoder, struct intel_dp, base.base);
@@ -139,7 +120,7 @@ static void intel_dp_complete_link_train(struct intel_dp *intel_dp);
static void intel_dp_link_down(struct intel_dp *intel_dp);
void
-intel_edp_link_config(struct intel_encoder *intel_encoder,
+intel_edp_link_config (struct intel_encoder *intel_encoder,
int *lane_num, int *link_bw)
{
struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
@@ -154,12 +135,16 @@ intel_edp_link_config(struct intel_encoder *intel_encoder,
static int
intel_dp_max_lane_count(struct intel_dp *intel_dp)
{
- int max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f;
- switch (max_lane_count) {
- case 1: case 2: case 4:
- break;
- default:
- max_lane_count = 4;
+ int max_lane_count = 4;
+
+ if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
+ max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f;
+ switch (max_lane_count) {
+ case 1: case 2: case 4:
+ break;
+ default:
+ max_lane_count = 4;
+ }
}
return max_lane_count;
}
@@ -189,27 +174,16 @@ intel_dp_link_clock(uint8_t link_bw)
return 162000;
}
-/*
- * The units on the numbers in the next two are... bizarre. Examples will
- * make it clearer; this one parallels an example in the eDP spec.
- *
- * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
- *
- * 270000 * 1 * 8 / 10 == 216000
- *
- * The actual data capacity of that configuration is 2.16Gbit/s, so the
- * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
- * or equivalently, kilopixels per second - so for 1680x1050R it'd be
- * 119000. At 18bpp that's 2142000 kilobits per second.
- *
- * Thus the strange-looking division by 10 in intel_dp_link_required, to
- * get the result in decakilobits instead of kilobits.
- */
-
+/* I think this is a fiction */
static int
-intel_dp_link_required(int pixel_clock, int bpp)
+intel_dp_link_required(struct drm_device *dev, struct intel_dp *intel_dp, int pixel_clock)
{
- return (pixel_clock * bpp + 9) / 10;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (is_edp(intel_dp))
+ return (pixel_clock * dev_priv->edp.bpp + 7) / 8;
+ else
+ return pixel_clock * 3;
}
static int
@@ -223,28 +197,25 @@ intel_dp_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
int max_lanes = intel_dp_max_lane_count(intel_dp);
- int max_rate, mode_rate;
- if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
- if (mode->hdisplay > intel_dp->panel_fixed_mode->hdisplay)
+ if (is_edp(intel_dp) && dev_priv->panel_fixed_mode) {
+ if (mode->hdisplay > dev_priv->panel_fixed_mode->hdisplay)
return MODE_PANEL;
- if (mode->vdisplay > intel_dp->panel_fixed_mode->vdisplay)
+ if (mode->vdisplay > dev_priv->panel_fixed_mode->vdisplay)
return MODE_PANEL;
}
- mode_rate = intel_dp_link_required(mode->clock, 24);
- max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
-
- if (mode_rate > max_rate) {
- mode_rate = intel_dp_link_required(mode->clock, 18);
- if (mode_rate > max_rate)
- return MODE_CLOCK_HIGH;
- else
- mode->private_flags |= INTEL_MODE_DP_FORCE_6BPC;
- }
+ /* only refuse the mode on non eDP since we have seen some weird eDP panels
+ which are outside spec tolerances but somehow work by magic */
+ if (!is_edp(intel_dp) &&
+ (intel_dp_link_required(connector->dev, intel_dp, mode->clock)
+ > intel_dp_max_data_rate(max_link_clock, max_lanes)))
+ return MODE_CLOCK_HIGH;
if (mode->clock < 10000)
return MODE_CLOCK_LOW;
@@ -305,38 +276,6 @@ intel_hrawclk(struct drm_device *dev)
}
}
-static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp)
-{
- struct drm_device *dev = intel_dp->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- return (I915_READ(PCH_PP_STATUS) & PP_ON) != 0;
-}
-
-static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp)
-{
- struct drm_device *dev = intel_dp->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- return (I915_READ(PCH_PP_CONTROL) & EDP_FORCE_VDD) != 0;
-}
-
-static void
-intel_dp_check_edp(struct intel_dp *intel_dp)
-{
- struct drm_device *dev = intel_dp->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (!is_edp(intel_dp))
- return;
- if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) {
- WARN(1, "eDP powered off while attempting aux channel communication.\n");
- DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
- I915_READ(PCH_PP_STATUS),
- I915_READ(PCH_PP_CONTROL));
- }
-}
-
static int
intel_dp_aux_ch(struct intel_dp *intel_dp,
uint8_t *send, int send_bytes,
@@ -351,9 +290,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
int recv_bytes;
uint32_t status;
uint32_t aux_clock_divider;
- int try, precharge = 5;
+ int try, precharge;
- intel_dp_check_edp(intel_dp);
/* The clock divider is based off the hrawclk,
* and would like to run at 2MHz. So, take the
* hrawclk value and divide by 2 and use that
@@ -361,27 +299,24 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
* Note that PCH attached eDP panels should use a 125MHz input
* clock divider.
*/
- if (is_cpu_edp(intel_dp)) {
- if (IS_GEN6(dev) || IS_GEN7(dev))
- aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */
+ if (is_edp(intel_dp) && !is_pch_edp(intel_dp)) {
+ if (IS_GEN6(dev))
+ aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */
else
aux_clock_divider = 225; /* eDP input clock at 450Mhz */
} else if (HAS_PCH_SPLIT(dev))
- aux_clock_divider = 63; /* IRL input clock fixed at 125Mhz */
+ aux_clock_divider = 62; /* IRL input clock fixed at 125Mhz */
else
aux_clock_divider = intel_hrawclk(dev) / 2;
- /* Try to wait for any previous AUX channel activity */
- for (try = 0; try < 3; try++) {
- status = I915_READ(ch_ctl);
- if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
- break;
- msleep(1);
- }
+ if (IS_GEN6(dev))
+ precharge = 3;
+ else
+ precharge = 5;
- if (try == 3) {
- WARN(1, "dp_aux_ch not started status 0x%08x\n",
- I915_READ(ch_ctl));
+ if (I915_READ(ch_ctl) & DP_AUX_CH_CTL_SEND_BUSY) {
+ DRM_ERROR("dp_aux_ch not started status 0x%08x\n",
+ I915_READ(ch_ctl));
return -EBUSY;
}
@@ -391,7 +326,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
for (i = 0; i < send_bytes; i += 4)
I915_WRITE(ch_data + i,
pack_aux(send + i, send_bytes - i));
-
+
/* Send the command and wait for it to complete */
I915_WRITE(ch_ctl,
DP_AUX_CH_CTL_SEND_BUSY |
@@ -408,17 +343,13 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
break;
udelay(100);
}
-
+
/* Clear done status and any errors */
I915_WRITE(ch_ctl,
status |
DP_AUX_CH_CTL_DONE |
DP_AUX_CH_CTL_TIME_OUT_ERROR |
DP_AUX_CH_CTL_RECEIVE_ERROR);
-
- if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
- DP_AUX_CH_CTL_RECEIVE_ERROR))
- continue;
if (status & DP_AUX_CH_CTL_DONE)
break;
}
@@ -448,7 +379,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
if (recv_bytes > recv_size)
recv_bytes = recv_size;
-
+
for (i = 0; i < recv_bytes; i += 4)
unpack_aux(I915_READ(ch_data + i),
recv + i, recv_bytes - i);
@@ -466,7 +397,6 @@ intel_dp_aux_native_write(struct intel_dp *intel_dp,
int msg_bytes;
uint8_t ack;
- intel_dp_check_edp(intel_dp);
if (send_bytes > 16)
return -1;
msg[0] = AUX_NATIVE_WRITE << 4;
@@ -509,7 +439,6 @@ intel_dp_aux_native_read(struct intel_dp *intel_dp,
uint8_t ack;
int ret;
- intel_dp_check_edp(intel_dp);
msg[0] = AUX_NATIVE_READ << 4;
msg[1] = address >> 8;
msg[2] = address & 0xff;
@@ -553,7 +482,6 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
int reply_bytes;
int ret;
- intel_dp_check_edp(intel_dp);
/* Set up the command byte */
if (mode & MODE_I2C_READ)
msg[0] = AUX_I2C_READ << 4;
@@ -645,32 +573,24 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
return -EREMOTEIO;
}
-static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp);
-static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
-
static int
intel_dp_i2c_init(struct intel_dp *intel_dp,
struct intel_connector *intel_connector, const char *name)
{
- int ret;
-
DRM_DEBUG_KMS("i2c_init %s\n", name);
intel_dp->algo.running = false;
intel_dp->algo.address = 0;
intel_dp->algo.aux_ch = intel_dp_i2c_aux_ch;
- memset(&intel_dp->adapter, '\0', sizeof(intel_dp->adapter));
+ memset(&intel_dp->adapter, '\0', sizeof (intel_dp->adapter));
intel_dp->adapter.owner = THIS_MODULE;
intel_dp->adapter.class = I2C_CLASS_DDC;
- strncpy(intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
+ strncpy (intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
intel_dp->adapter.algo_data = &intel_dp->algo;
intel_dp->adapter.dev.parent = &intel_connector->base.kdev;
- ironlake_edp_panel_vdd_on(intel_dp);
- ret = i2c_dp_aux_add_bus(&intel_dp->adapter);
- ironlake_edp_panel_vdd_off(intel_dp, false);
- return ret;
+ return i2c_dp_aux_add_bus(&intel_dp->adapter);
}
static bool
@@ -678,29 +598,29 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
int lane_count, clock;
int max_lane_count = intel_dp_max_lane_count(intel_dp);
int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
- int bpp = mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24;
static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
- if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
- intel_fixed_panel_mode(intel_dp->panel_fixed_mode, adjusted_mode);
+ if (is_edp(intel_dp) && dev_priv->panel_fixed_mode) {
+ intel_fixed_panel_mode(dev_priv->panel_fixed_mode, adjusted_mode);
intel_pch_panel_fitting(dev, DRM_MODE_SCALE_FULLSCREEN,
mode, adjusted_mode);
/*
* the mode->clock is used to calculate the Data&Link M/N
* of the pipe. For the eDP the fixed clock should be used.
*/
- mode->clock = intel_dp->panel_fixed_mode->clock;
+ mode->clock = dev_priv->panel_fixed_mode->clock;
}
for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
for (clock = 0; clock <= max_clock; clock++) {
int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
- if (intel_dp_link_required(mode->clock, bpp)
+ if (intel_dp_link_required(encoder->dev, intel_dp, mode->clock)
<= link_avail) {
intel_dp->link_bw = bws[clock];
intel_dp->lane_count = lane_count;
@@ -714,6 +634,19 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
}
}
+ if (is_edp(intel_dp)) {
+ /* okay we failed just pick the highest */
+ intel_dp->lane_count = max_lane_count;
+ intel_dp->link_bw = bws[max_clock];
+ adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
+ DRM_DEBUG_KMS("Force picking display port link bw %02x lane "
+ "count %d clock %d\n",
+ intel_dp->link_bw, intel_dp->lane_count,
+ adjusted_mode->clock);
+
+ return true;
+ }
+
return false;
}
@@ -759,7 +692,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_encoder *encoder;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int lane_count = 4;
+ int lane_count = 4, bpp = 24;
struct intel_dp_m_n m_n;
int pipe = intel_crtc->pipe;
@@ -773,11 +706,13 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
continue;
intel_dp = enc_to_intel_dp(encoder);
- if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT ||
- intel_dp->base.type == INTEL_OUTPUT_EDP)
- {
+ if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT) {
lane_count = intel_dp->lane_count;
break;
+ } else if (is_edp(intel_dp)) {
+ lane_count = dev_priv->edp.lanes;
+ bpp = dev_priv->edp.bpp;
+ break;
}
}
@@ -786,7 +721,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
* the number of bytes_per_pixel post-LUT, which we always
* set up for 8-bits of R/G/B, or 3 bytes total.
*/
- intel_dp_compute_m_n(intel_crtc->bpp, lane_count,
+ intel_dp_compute_m_n(bpp, lane_count,
mode->clock, adjusted_mode->clock, &m_n);
if (HAS_PCH_SPLIT(dev)) {
@@ -806,53 +741,27 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
}
}
-static void ironlake_edp_pll_on(struct drm_encoder *encoder);
-static void ironlake_edp_pll_off(struct drm_encoder *encoder);
-
static void
intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct drm_crtc *crtc = intel_dp->base.base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- /* Turn on the eDP PLL if needed */
- if (is_edp(intel_dp)) {
- if (!is_pch_edp(intel_dp))
- ironlake_edp_pll_on(encoder);
- else
- ironlake_edp_pll_off(encoder);
- }
-
- /*
- * There are four kinds of DP registers:
- *
- * IBX PCH
- * SNB CPU
- * IVB CPU
- * CPT PCH
- *
- * IBX PCH and CPU are the same for almost everything,
- * except that the CPU DP PLL is configured in this
- * register
- *
- * CPT PCH is quite different, having many bits moved
- * to the TRANS_DP_CTL register instead. That
- * configuration happens (oddly) in ironlake_pch_enable
- */
+ intel_dp->DP = DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
+ intel_dp->DP |= intel_dp->color_range;
- /* Preserve the BIOS-computed detected bit. This is
- * supposed to be read-only.
- */
- intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
- intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ intel_dp->DP |= DP_SYNC_HS_HIGH;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ intel_dp->DP |= DP_SYNC_VS_HIGH;
- /* Handle DP bits in common between all three register formats */
-
- intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
+ if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
+ intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
+ else
+ intel_dp->DP |= DP_LINK_TRAIN_OFF;
switch (intel_dp->lane_count) {
case 1:
@@ -865,297 +774,134 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
intel_dp->DP |= DP_PORT_WIDTH_4;
break;
}
- if (intel_dp->has_audio) {
- DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
- pipe_name(intel_crtc->pipe));
+ if (intel_dp->has_audio)
intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
- intel_write_eld(encoder, adjusted_mode);
- }
+
memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
intel_dp->link_configuration[0] = intel_dp->link_bw;
intel_dp->link_configuration[1] = intel_dp->lane_count;
- intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B;
+
/*
* Check for DPCD version > 1.1 and enhanced framing support
*/
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
(intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+ intel_dp->DP |= DP_ENHANCED_FRAMING;
}
- /* Split out the IBX/CPU vs CPT settings */
-
- if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) {
- if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
- intel_dp->DP |= DP_SYNC_HS_HIGH;
- if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
- intel_dp->DP |= DP_SYNC_VS_HIGH;
- intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
-
- if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
- intel_dp->DP |= DP_ENHANCED_FRAMING;
-
- intel_dp->DP |= intel_crtc->pipe << 29;
+ /* CPT DP's pipe select is decided in TRANS_DP_CTL */
+ if (intel_crtc->pipe == 1 && !HAS_PCH_CPT(dev))
+ intel_dp->DP |= DP_PIPEB_SELECT;
+ if (is_edp(intel_dp) && !is_pch_edp(intel_dp)) {
/* don't miss out required setting for eDP */
intel_dp->DP |= DP_PLL_ENABLE;
if (adjusted_mode->clock < 200000)
intel_dp->DP |= DP_PLL_FREQ_160MHZ;
else
intel_dp->DP |= DP_PLL_FREQ_270MHZ;
- } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
- intel_dp->DP |= intel_dp->color_range;
-
- if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
- intel_dp->DP |= DP_SYNC_HS_HIGH;
- if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
- intel_dp->DP |= DP_SYNC_VS_HIGH;
- intel_dp->DP |= DP_LINK_TRAIN_OFF;
-
- if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
- intel_dp->DP |= DP_ENHANCED_FRAMING;
-
- if (intel_crtc->pipe == 1)
- intel_dp->DP |= DP_PIPEB_SELECT;
-
- if (is_cpu_edp(intel_dp)) {
- /* don't miss out required setting for eDP */
- intel_dp->DP |= DP_PLL_ENABLE;
- if (adjusted_mode->clock < 200000)
- intel_dp->DP |= DP_PLL_FREQ_160MHZ;
- else
- intel_dp->DP |= DP_PLL_FREQ_270MHZ;
- }
- } else {
- intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
- }
-}
-
-#define IDLE_ON_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
-#define IDLE_ON_VALUE (PP_ON | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
-
-#define IDLE_OFF_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
-#define IDLE_OFF_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
-
-#define IDLE_CYCLE_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
-#define IDLE_CYCLE_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
-
-static void ironlake_wait_panel_status(struct intel_dp *intel_dp,
- u32 mask,
- u32 value)
-{
- struct drm_device *dev = intel_dp->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
- mask, value,
- I915_READ(PCH_PP_STATUS),
- I915_READ(PCH_PP_CONTROL));
-
- if (_wait_for((I915_READ(PCH_PP_STATUS) & mask) == value, 5000, 10)) {
- DRM_ERROR("Panel status timeout: status %08x control %08x\n",
- I915_READ(PCH_PP_STATUS),
- I915_READ(PCH_PP_CONTROL));
}
}
-static void ironlake_wait_panel_on(struct intel_dp *intel_dp)
-{
- DRM_DEBUG_KMS("Wait for panel power on\n");
- ironlake_wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
-}
-
-static void ironlake_wait_panel_off(struct intel_dp *intel_dp)
-{
- DRM_DEBUG_KMS("Wait for panel power off time\n");
- ironlake_wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
-}
-
-static void ironlake_wait_panel_power_cycle(struct intel_dp *intel_dp)
-{
- DRM_DEBUG_KMS("Wait for panel power cycle\n");
- ironlake_wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
-}
-
-
-/* Read the current pp_control value, unlocking the register if it
- * is locked
- */
-
-static u32 ironlake_get_pp_control(struct drm_i915_private *dev_priv)
-{
- u32 control = I915_READ(PCH_PP_CONTROL);
-
- control &= ~PANEL_UNLOCK_MASK;
- control |= PANEL_UNLOCK_REGS;
- return control;
-}
-
static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
- if (!is_edp(intel_dp))
- return;
- DRM_DEBUG_KMS("Turn eDP VDD on\n");
-
- WARN(intel_dp->want_panel_vdd,
- "eDP VDD already requested on\n");
-
- intel_dp->want_panel_vdd = true;
-
- if (ironlake_edp_have_panel_vdd(intel_dp)) {
- DRM_DEBUG_KMS("eDP VDD already on\n");
- return;
- }
-
- if (!ironlake_edp_have_panel_power(intel_dp))
- ironlake_wait_panel_power_cycle(intel_dp);
+ /*
+ * If the panel wasn't on, make sure there's not a currently
+ * active PP sequence before enabling AUX VDD.
+ */
+ if (!(I915_READ(PCH_PP_STATUS) & PP_ON))
+ msleep(dev_priv->panel_t3);
- pp = ironlake_get_pp_control(dev_priv);
+ pp = I915_READ(PCH_PP_CONTROL);
pp |= EDP_FORCE_VDD;
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
- DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n",
- I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL));
-
- /*
- * If the panel wasn't on, delay before accessing aux channel
- */
- if (!ironlake_edp_have_panel_power(intel_dp)) {
- DRM_DEBUG_KMS("eDP was not running\n");
- msleep(intel_dp->panel_power_up_delay);
- }
}
-static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
+static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
- if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) {
- pp = ironlake_get_pp_control(dev_priv);
- pp &= ~EDP_FORCE_VDD;
- I915_WRITE(PCH_PP_CONTROL, pp);
- POSTING_READ(PCH_PP_CONTROL);
-
- /* Make sure sequencer is idle before allowing subsequent activity */
- DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n",
- I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL));
-
- msleep(intel_dp->panel_power_down_delay);
- }
-}
-
-static void ironlake_panel_vdd_work(struct work_struct *__work)
-{
- struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
- struct intel_dp, panel_vdd_work);
- struct drm_device *dev = intel_dp->base.base.dev;
-
- mutex_lock(&dev->mode_config.mutex);
- ironlake_panel_vdd_off_sync(intel_dp);
- mutex_unlock(&dev->mode_config.mutex);
-}
-
-static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
-{
- if (!is_edp(intel_dp))
- return;
-
- DRM_DEBUG_KMS("Turn eDP VDD off %d\n", intel_dp->want_panel_vdd);
- WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on");
-
- intel_dp->want_panel_vdd = false;
+ pp = I915_READ(PCH_PP_CONTROL);
+ pp &= ~EDP_FORCE_VDD;
+ I915_WRITE(PCH_PP_CONTROL, pp);
+ POSTING_READ(PCH_PP_CONTROL);
- if (sync) {
- ironlake_panel_vdd_off_sync(intel_dp);
- } else {
- /*
- * Queue the timer to fire a long
- * time from now (relative to the power down delay)
- * to keep the panel power up across a sequence of operations
- */
- schedule_delayed_work(&intel_dp->panel_vdd_work,
- msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
- }
+ /* Make sure sequencer is idle before allowing subsequent activity */
+ msleep(dev_priv->panel_t12);
}
-static void ironlake_edp_panel_on(struct intel_dp *intel_dp)
+/* Returns true if the panel was already on when called */
+static bool ironlake_edp_panel_on (struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 pp;
-
- if (!is_edp(intel_dp))
- return;
+ u32 pp, idle_on_mask = PP_ON | PP_SEQUENCE_STATE_ON_IDLE;
- DRM_DEBUG_KMS("Turn eDP power on\n");
+ if (I915_READ(PCH_PP_STATUS) & PP_ON)
+ return true;
- if (ironlake_edp_have_panel_power(intel_dp)) {
- DRM_DEBUG_KMS("eDP power already on\n");
- return;
- }
+ pp = I915_READ(PCH_PP_CONTROL);
- ironlake_wait_panel_power_cycle(intel_dp);
+ /* ILK workaround: disable reset around power sequence */
+ pp &= ~PANEL_POWER_RESET;
+ I915_WRITE(PCH_PP_CONTROL, pp);
+ POSTING_READ(PCH_PP_CONTROL);
- pp = ironlake_get_pp_control(dev_priv);
- if (IS_GEN5(dev)) {
- /* ILK workaround: disable reset around power sequence */
- pp &= ~PANEL_POWER_RESET;
- I915_WRITE(PCH_PP_CONTROL, pp);
- POSTING_READ(PCH_PP_CONTROL);
- }
+ pp |= PANEL_UNLOCK_REGS | POWER_TARGET_ON;
+ I915_WRITE(PCH_PP_CONTROL, pp);
+ POSTING_READ(PCH_PP_CONTROL);
- pp |= POWER_TARGET_ON;
- if (!IS_GEN5(dev))
- pp |= PANEL_POWER_RESET;
+ if (wait_for((I915_READ(PCH_PP_STATUS) & idle_on_mask) == idle_on_mask,
+ 5000))
+ DRM_ERROR("panel on wait timed out: 0x%08x\n",
+ I915_READ(PCH_PP_STATUS));
+ pp |= PANEL_POWER_RESET; /* restore panel reset bit */
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
- ironlake_wait_panel_on(intel_dp);
-
- if (IS_GEN5(dev)) {
- pp |= PANEL_POWER_RESET; /* restore panel reset bit */
- I915_WRITE(PCH_PP_CONTROL, pp);
- POSTING_READ(PCH_PP_CONTROL);
- }
+ return false;
}
-static void ironlake_edp_panel_off(struct intel_dp *intel_dp)
+static void ironlake_edp_panel_off (struct drm_device *dev)
{
- struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 pp;
+ u32 pp, idle_off_mask = PP_ON | PP_SEQUENCE_MASK |
+ PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK;
- if (!is_edp(intel_dp))
- return;
-
- DRM_DEBUG_KMS("Turn eDP power off\n");
+ pp = I915_READ(PCH_PP_CONTROL);
- WARN(intel_dp->want_panel_vdd, "Cannot turn power off while VDD is on\n");
+ /* ILK workaround: disable reset around power sequence */
+ pp &= ~PANEL_POWER_RESET;
+ I915_WRITE(PCH_PP_CONTROL, pp);
+ POSTING_READ(PCH_PP_CONTROL);
- pp = ironlake_get_pp_control(dev_priv);
- pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
+ pp &= ~POWER_TARGET_ON;
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
- ironlake_wait_panel_off(intel_dp);
+ if (wait_for((I915_READ(PCH_PP_STATUS) & idle_off_mask) == 0, 5000))
+ DRM_ERROR("panel off wait timed out: 0x%08x\n",
+ I915_READ(PCH_PP_STATUS));
+
+ pp |= PANEL_POWER_RESET; /* restore panel reset bit */
+ I915_WRITE(PCH_PP_CONTROL, pp);
+ POSTING_READ(PCH_PP_CONTROL);
}
-static void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
+static void ironlake_edp_backlight_on (struct drm_device *dev)
{
- struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
- if (!is_edp(intel_dp))
- return;
-
DRM_DEBUG_KMS("\n");
/*
* If we enable the backlight right away following a panel power
@@ -1163,28 +909,21 @@ static void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
* link. So delay a bit to make sure the image is solid before
* allowing it to appear.
*/
- msleep(intel_dp->backlight_on_delay);
- pp = ironlake_get_pp_control(dev_priv);
+ msleep(300);
+ pp = I915_READ(PCH_PP_CONTROL);
pp |= EDP_BLC_ENABLE;
I915_WRITE(PCH_PP_CONTROL, pp);
- POSTING_READ(PCH_PP_CONTROL);
}
-static void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
+static void ironlake_edp_backlight_off (struct drm_device *dev)
{
- struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
- if (!is_edp(intel_dp))
- return;
-
DRM_DEBUG_KMS("\n");
- pp = ironlake_get_pp_control(dev_priv);
+ pp = I915_READ(PCH_PP_CONTROL);
pp &= ~EDP_BLC_ENABLE;
I915_WRITE(PCH_PP_CONTROL, pp);
- POSTING_READ(PCH_PP_CONTROL);
- msleep(intel_dp->backlight_off_delay);
}
static void ironlake_edp_pll_on(struct drm_encoder *encoder)
@@ -1247,39 +986,41 @@ static void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
static void intel_dp_prepare(struct drm_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-
- ironlake_edp_backlight_off(intel_dp);
- ironlake_edp_panel_off(intel_dp);
+ struct drm_device *dev = encoder->dev;
/* Wake up the sink first */
- ironlake_edp_panel_vdd_on(intel_dp);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
- intel_dp_link_down(intel_dp);
- ironlake_edp_panel_vdd_off(intel_dp, false);
- /* Make sure the panel is off before trying to
- * change the mode
- */
+ if (is_edp(intel_dp)) {
+ ironlake_edp_backlight_off(dev);
+ ironlake_edp_panel_off(dev);
+ if (!is_pch_edp(intel_dp))
+ ironlake_edp_pll_on(encoder);
+ else
+ ironlake_edp_pll_off(encoder);
+ }
+ intel_dp_link_down(intel_dp);
}
static void intel_dp_commit(struct drm_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct drm_device *dev = encoder->dev;
- struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc);
- ironlake_edp_panel_vdd_on(intel_dp);
- intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+ if (is_edp(intel_dp))
+ ironlake_edp_panel_vdd_on(intel_dp);
+
intel_dp_start_link_train(intel_dp);
- ironlake_edp_panel_on(intel_dp);
- ironlake_edp_panel_vdd_off(intel_dp, true);
- intel_dp_complete_link_train(intel_dp);
- ironlake_edp_backlight_on(intel_dp);
- intel_dp->dpms_mode = DRM_MODE_DPMS_ON;
+ if (is_edp(intel_dp)) {
+ ironlake_edp_panel_on(intel_dp);
+ ironlake_edp_panel_vdd_off(intel_dp);
+ }
- if (HAS_PCH_CPT(dev))
- intel_cpt_verify_modeset(dev, intel_crtc->pipe);
+ intel_dp_complete_link_train(intel_dp);
+
+ if (is_edp(intel_dp))
+ ironlake_edp_backlight_on(dev);
}
static void
@@ -1291,32 +1032,29 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
uint32_t dp_reg = I915_READ(intel_dp->output_reg);
if (mode != DRM_MODE_DPMS_ON) {
- ironlake_edp_backlight_off(intel_dp);
- ironlake_edp_panel_off(intel_dp);
-
- ironlake_edp_panel_vdd_on(intel_dp);
+ if (is_edp(intel_dp))
+ ironlake_edp_backlight_off(dev);
intel_dp_sink_dpms(intel_dp, mode);
intel_dp_link_down(intel_dp);
- ironlake_edp_panel_vdd_off(intel_dp, false);
-
- if (is_cpu_edp(intel_dp))
+ if (is_edp(intel_dp))
+ ironlake_edp_panel_off(dev);
+ if (is_edp(intel_dp) && !is_pch_edp(intel_dp))
ironlake_edp_pll_off(encoder);
} else {
- if (is_cpu_edp(intel_dp))
- ironlake_edp_pll_on(encoder);
-
- ironlake_edp_panel_vdd_on(intel_dp);
+ if (is_edp(intel_dp))
+ ironlake_edp_panel_vdd_on(intel_dp);
intel_dp_sink_dpms(intel_dp, mode);
if (!(dp_reg & DP_PORT_EN)) {
intel_dp_start_link_train(intel_dp);
- ironlake_edp_panel_on(intel_dp);
- ironlake_edp_panel_vdd_off(intel_dp, true);
+ if (is_edp(intel_dp)) {
+ ironlake_edp_panel_on(intel_dp);
+ ironlake_edp_panel_vdd_off(intel_dp);
+ }
intel_dp_complete_link_train(intel_dp);
- } else
- ironlake_edp_panel_vdd_off(intel_dp, false);
- ironlake_edp_backlight_on(intel_dp);
+ }
+ if (is_edp(intel_dp))
+ ironlake_edp_backlight_on(dev);
}
- intel_dp->dpms_mode = mode;
}
/*
@@ -1349,11 +1087,11 @@ intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address,
* link status information
*/
static bool
-intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
+intel_dp_get_link_status(struct intel_dp *intel_dp)
{
return intel_dp_aux_native_read_retry(intel_dp,
DP_LANE0_1_STATUS,
- link_status,
+ intel_dp->link_status,
DP_LINK_STATUS_SIZE);
}
@@ -1365,25 +1103,27 @@ intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
}
static uint8_t
-intel_get_adjust_request_voltage(uint8_t adjust_request[2],
+intel_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE],
int lane)
{
+ int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
int s = ((lane & 1) ?
DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
- uint8_t l = adjust_request[lane>>1];
+ uint8_t l = intel_dp_link_status(link_status, i);
return ((l >> s) & 3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
}
static uint8_t
-intel_get_adjust_request_pre_emphasis(uint8_t adjust_request[2],
+intel_get_adjust_request_pre_emphasis(uint8_t link_status[DP_LINK_STATUS_SIZE],
int lane)
{
+ int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
int s = ((lane & 1) ?
DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
- uint8_t l = adjust_request[lane>>1];
+ uint8_t l = intel_dp_link_status(link_status, i);
return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
}
@@ -1405,63 +1145,34 @@ static char *link_train_names[] = {
* These are source-specific values; current Intel hardware supports
* a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
*/
+#define I830_DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_800
static uint8_t
-intel_dp_voltage_max(struct intel_dp *intel_dp)
+intel_dp_pre_emphasis_max(uint8_t voltage_swing)
{
- struct drm_device *dev = intel_dp->base.base.dev;
-
- if (IS_GEN7(dev) && is_cpu_edp(intel_dp))
- return DP_TRAIN_VOLTAGE_SWING_800;
- else if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
- return DP_TRAIN_VOLTAGE_SWING_1200;
- else
- return DP_TRAIN_VOLTAGE_SWING_800;
-}
-
-static uint8_t
-intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
-{
- struct drm_device *dev = intel_dp->base.base.dev;
-
- if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
- switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
- case DP_TRAIN_VOLTAGE_SWING_400:
- return DP_TRAIN_PRE_EMPHASIS_6;
- case DP_TRAIN_VOLTAGE_SWING_600:
- case DP_TRAIN_VOLTAGE_SWING_800:
- return DP_TRAIN_PRE_EMPHASIS_3_5;
- default:
- return DP_TRAIN_PRE_EMPHASIS_0;
- }
- } else {
- switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
- case DP_TRAIN_VOLTAGE_SWING_400:
- return DP_TRAIN_PRE_EMPHASIS_6;
- case DP_TRAIN_VOLTAGE_SWING_600:
- return DP_TRAIN_PRE_EMPHASIS_6;
- case DP_TRAIN_VOLTAGE_SWING_800:
- return DP_TRAIN_PRE_EMPHASIS_3_5;
- case DP_TRAIN_VOLTAGE_SWING_1200:
- default:
- return DP_TRAIN_PRE_EMPHASIS_0;
- }
+ switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_400:
+ return DP_TRAIN_PRE_EMPHASIS_6;
+ case DP_TRAIN_VOLTAGE_SWING_600:
+ return DP_TRAIN_PRE_EMPHASIS_6;
+ case DP_TRAIN_VOLTAGE_SWING_800:
+ return DP_TRAIN_PRE_EMPHASIS_3_5;
+ case DP_TRAIN_VOLTAGE_SWING_1200:
+ default:
+ return DP_TRAIN_PRE_EMPHASIS_0;
}
}
static void
-intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
+intel_get_adjust_train(struct intel_dp *intel_dp)
{
uint8_t v = 0;
uint8_t p = 0;
int lane;
- uint8_t *adjust_request = link_status + (DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS);
- uint8_t voltage_max;
- uint8_t preemph_max;
for (lane = 0; lane < intel_dp->lane_count; lane++) {
- uint8_t this_v = intel_get_adjust_request_voltage(adjust_request, lane);
- uint8_t this_p = intel_get_adjust_request_pre_emphasis(adjust_request, lane);
+ uint8_t this_v = intel_get_adjust_request_voltage(intel_dp->link_status, lane);
+ uint8_t this_p = intel_get_adjust_request_pre_emphasis(intel_dp->link_status, lane);
if (this_v > v)
v = this_v;
@@ -1469,20 +1180,18 @@ intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_ST
p = this_p;
}
- voltage_max = intel_dp_voltage_max(intel_dp);
- if (v >= voltage_max)
- v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
+ if (v >= I830_DP_VOLTAGE_MAX)
+ v = I830_DP_VOLTAGE_MAX | DP_TRAIN_MAX_SWING_REACHED;
- preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
- if (p >= preemph_max)
- p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
+ if (p >= intel_dp_pre_emphasis_max(v))
+ p = intel_dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
for (lane = 0; lane < 4; lane++)
intel_dp->train_set[lane] = v | p;
}
static uint32_t
-intel_dp_signal_levels(uint8_t train_set)
+intel_dp_signal_levels(uint8_t train_set, int lane_count)
{
uint32_t signal_levels = 0;
@@ -1547,43 +1256,13 @@ intel_gen6_edp_signal_levels(uint8_t train_set)
}
}
-/* Gen7's DP voltage swing and pre-emphasis control */
-static uint32_t
-intel_gen7_edp_signal_levels(uint8_t train_set)
-{
- int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
- DP_TRAIN_PRE_EMPHASIS_MASK);
- switch (signal_levels) {
- case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
- return EDP_LINK_TRAIN_400MV_0DB_IVB;
- case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
- return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
- case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
- return EDP_LINK_TRAIN_400MV_6DB_IVB;
-
- case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
- return EDP_LINK_TRAIN_600MV_0DB_IVB;
- case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
- return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
-
- case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
- return EDP_LINK_TRAIN_800MV_0DB_IVB;
- case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
- return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
-
- default:
- DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
- "0x%x\n", signal_levels);
- return EDP_LINK_TRAIN_500MV_0DB_IVB;
- }
-}
-
static uint8_t
intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
int lane)
{
+ int i = DP_LANE0_1_STATUS + (lane >> 1);
int s = (lane & 1) * 4;
- uint8_t l = link_status[lane>>1];
+ uint8_t l = intel_dp_link_status(link_status, i);
return (l >> s) & 0xf;
}
@@ -1608,18 +1287,18 @@ intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count
DP_LANE_CHANNEL_EQ_DONE|\
DP_LANE_SYMBOL_LOCKED)
static bool
-intel_channel_eq_ok(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
+intel_channel_eq_ok(struct intel_dp *intel_dp)
{
uint8_t lane_align;
uint8_t lane_status;
int lane;
- lane_align = intel_dp_link_status(link_status,
+ lane_align = intel_dp_link_status(intel_dp->link_status,
DP_LANE_ALIGN_STATUS_UPDATED);
if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
return false;
for (lane = 0; lane < intel_dp->lane_count; lane++) {
- lane_status = intel_get_lane_status(link_status, lane);
+ lane_status = intel_get_lane_status(intel_dp->link_status, lane);
if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS)
return false;
}
@@ -1644,9 +1323,8 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
ret = intel_dp_aux_native_write(intel_dp,
DP_TRAINING_LANE0_SET,
- intel_dp->train_set,
- intel_dp->lane_count);
- if (ret != intel_dp->lane_count)
+ intel_dp->train_set, 4);
+ if (ret != 4)
return false;
return true;
@@ -1662,20 +1340,14 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
int i;
uint8_t voltage;
bool clock_recovery = false;
- int voltage_tries, loop_tries;
+ int tries;
u32 reg;
uint32_t DP = intel_dp->DP;
- /*
- * On CPT we have to enable the port in training pattern 1, which
- * will happen below in intel_dp_set_link_train. Otherwise, enable
- * the port and wait for it to become active.
- */
- if (!HAS_PCH_CPT(dev)) {
- I915_WRITE(intel_dp->output_reg, intel_dp->DP);
- POSTING_READ(intel_dp->output_reg);
- intel_wait_for_vblank(dev, intel_crtc->pipe);
- }
+ /* Enable output, wait for it to become active */
+ I915_WRITE(intel_dp->output_reg, intel_dp->DP);
+ POSTING_READ(intel_dp->output_reg);
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
/* Write the link configuration data */
intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
@@ -1683,53 +1355,40 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
DP_LINK_CONFIGURATION_SIZE);
DP |= DP_PORT_EN;
-
- if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
+ if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
DP &= ~DP_LINK_TRAIN_MASK_CPT;
else
DP &= ~DP_LINK_TRAIN_MASK;
memset(intel_dp->train_set, 0, 4);
voltage = 0xff;
- voltage_tries = 0;
- loop_tries = 0;
+ tries = 0;
clock_recovery = false;
for (;;) {
/* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
- uint8_t link_status[DP_LINK_STATUS_SIZE];
uint32_t signal_levels;
-
-
- if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
- signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
- DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
- } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
+ if (IS_GEN6(dev) && is_edp(intel_dp)) {
signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
} else {
- signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]);
- DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n", signal_levels);
+ signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count);
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
}
- if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
+ if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
reg = DP | DP_LINK_TRAIN_PAT_1_CPT;
else
reg = DP | DP_LINK_TRAIN_PAT_1;
if (!intel_dp_set_link_train(intel_dp, reg,
- DP_TRAINING_PATTERN_1 |
- DP_LINK_SCRAMBLING_DISABLE))
+ DP_TRAINING_PATTERN_1))
break;
/* Set training pattern 1 */
udelay(100);
- if (!intel_dp_get_link_status(intel_dp, link_status)) {
- DRM_ERROR("failed to get link status\n");
+ if (!intel_dp_get_link_status(intel_dp))
break;
- }
- if (intel_clock_recovery_ok(link_status, intel_dp->lane_count)) {
- DRM_DEBUG_KMS("clock recovery OK\n");
+ if (intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) {
clock_recovery = true;
break;
}
@@ -1738,30 +1397,20 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
for (i = 0; i < intel_dp->lane_count; i++)
if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
break;
- if (i == intel_dp->lane_count) {
- ++loop_tries;
- if (loop_tries == 5) {
- DRM_DEBUG_KMS("too many full retries, give up\n");
- break;
- }
- memset(intel_dp->train_set, 0, 4);
- voltage_tries = 0;
- continue;
- }
+ if (i == intel_dp->lane_count)
+ break;
/* Check to see if we've tried the same voltage 5 times */
if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
- ++voltage_tries;
- if (voltage_tries == 5) {
- DRM_DEBUG_KMS("too many voltage retries, give up\n");
+ ++tries;
+ if (tries == 5)
break;
- }
} else
- voltage_tries = 0;
+ tries = 0;
voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
/* Compute new intel_dp->train_set as requested by target */
- intel_get_adjust_train(intel_dp, link_status);
+ intel_get_adjust_train(intel_dp);
}
intel_dp->DP = DP;
@@ -1784,7 +1433,6 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
for (;;) {
/* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
uint32_t signal_levels;
- uint8_t link_status[DP_LINK_STATUS_SIZE];
if (cr_tries > 5) {
DRM_ERROR("failed to train DP, aborting\n");
@@ -1792,40 +1440,36 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
break;
}
- if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
- signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
- DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
- } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
+ if (IS_GEN6(dev) && is_edp(intel_dp)) {
signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
} else {
- signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]);
+ signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count);
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
}
- if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
+ if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
reg = DP | DP_LINK_TRAIN_PAT_2_CPT;
else
reg = DP | DP_LINK_TRAIN_PAT_2;
/* channel eq pattern */
if (!intel_dp_set_link_train(intel_dp, reg,
- DP_TRAINING_PATTERN_2 |
- DP_LINK_SCRAMBLING_DISABLE))
+ DP_TRAINING_PATTERN_2))
break;
udelay(400);
- if (!intel_dp_get_link_status(intel_dp, link_status))
+ if (!intel_dp_get_link_status(intel_dp))
break;
/* Make sure clock is still ok */
- if (!intel_clock_recovery_ok(link_status, intel_dp->lane_count)) {
+ if (!intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) {
intel_dp_start_link_train(intel_dp);
cr_tries++;
continue;
}
- if (intel_channel_eq_ok(intel_dp, link_status)) {
+ if (intel_channel_eq_ok(intel_dp)) {
channel_eq = true;
break;
}
@@ -1840,11 +1484,11 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
}
/* Compute new intel_dp->train_set as requested by target */
- intel_get_adjust_train(intel_dp, link_status);
+ intel_get_adjust_train(intel_dp);
++tries;
}
- if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
+ if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
reg = DP | DP_LINK_TRAIN_OFF_CPT;
else
reg = DP | DP_LINK_TRAIN_OFF;
@@ -1874,7 +1518,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
udelay(100);
}
- if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
+ if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) {
DP &= ~DP_LINK_TRAIN_MASK_CPT;
I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
} else {
@@ -1885,12 +1529,8 @@ intel_dp_link_down(struct intel_dp *intel_dp)
msleep(17);
- if (is_edp(intel_dp)) {
- if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
- DP |= DP_LINK_TRAIN_OFF_CPT;
- else
- DP |= DP_LINK_TRAIN_OFF;
- }
+ if (is_edp(intel_dp))
+ DP |= DP_LINK_TRAIN_OFF;
if (!HAS_PCH_CPT(dev) &&
I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
@@ -1928,40 +1568,6 @@ intel_dp_link_down(struct intel_dp *intel_dp)
DP &= ~DP_AUDIO_OUTPUT_ENABLE;
I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
POSTING_READ(intel_dp->output_reg);
- msleep(intel_dp->panel_power_down_delay);
-}
-
-static bool
-intel_dp_get_dpcd(struct intel_dp *intel_dp)
-{
- if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd,
- sizeof(intel_dp->dpcd)) &&
- (intel_dp->dpcd[DP_DPCD_REV] != 0)) {
- return true;
- }
-
- return false;
-}
-
-static bool
-intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
-{
- int ret;
-
- ret = intel_dp_aux_native_read_retry(intel_dp,
- DP_DEVICE_SERVICE_IRQ_VECTOR,
- sink_irq_vector, 1);
- if (!ret)
- return false;
-
- return true;
-}
-
-static void
-intel_dp_handle_test_request(struct intel_dp *intel_dp)
-{
- /* NAK by default */
- intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_ACK);
}
/*
@@ -1976,61 +1582,36 @@ intel_dp_handle_test_request(struct intel_dp *intel_dp)
static void
intel_dp_check_link_status(struct intel_dp *intel_dp)
{
- u8 sink_irq_vector;
- u8 link_status[DP_LINK_STATUS_SIZE];
-
- if (intel_dp->dpms_mode != DRM_MODE_DPMS_ON)
- return;
+ int ret;
if (!intel_dp->base.base.crtc)
return;
- /* Try to read receiver status if the link appears to be up */
- if (!intel_dp_get_link_status(intel_dp, link_status)) {
+ if (!intel_dp_get_link_status(intel_dp)) {
intel_dp_link_down(intel_dp);
return;
}
- /* Now read the DPCD to see if it's actually running */
- if (!intel_dp_get_dpcd(intel_dp)) {
+ /* Try to read receiver status if the link appears to be up */
+ ret = intel_dp_aux_native_read(intel_dp,
+ 0x000, intel_dp->dpcd,
+ sizeof (intel_dp->dpcd));
+ if (ret != sizeof(intel_dp->dpcd)) {
intel_dp_link_down(intel_dp);
return;
}
- /* Try to read the source of the interrupt */
- if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
- intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
- /* Clear interrupt source */
- intel_dp_aux_native_write_1(intel_dp,
- DP_DEVICE_SERVICE_IRQ_VECTOR,
- sink_irq_vector);
-
- if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
- intel_dp_handle_test_request(intel_dp);
- if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
- DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
- }
-
- if (!intel_channel_eq_ok(intel_dp, link_status)) {
- DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
- drm_get_encoder_name(&intel_dp->base.base));
+ if (!intel_channel_eq_ok(intel_dp)) {
intel_dp_start_link_train(intel_dp);
intel_dp_complete_link_train(intel_dp);
}
}
static enum drm_connector_status
-intel_dp_detect_dpcd(struct intel_dp *intel_dp)
-{
- if (intel_dp_get_dpcd(intel_dp))
- return connector_status_connected;
- return connector_status_disconnected;
-}
-
-static enum drm_connector_status
ironlake_dp_detect(struct intel_dp *intel_dp)
{
enum drm_connector_status status;
+ bool ret;
/* Can't disconnect eDP, but you can close the lid... */
if (is_edp(intel_dp)) {
@@ -2040,7 +1621,15 @@ ironlake_dp_detect(struct intel_dp *intel_dp)
return status;
}
- return intel_dp_detect_dpcd(intel_dp);
+ status = connector_status_disconnected;
+ ret = intel_dp_aux_native_read_retry(intel_dp,
+ 0x000, intel_dp->dpcd,
+ sizeof (intel_dp->dpcd));
+ if (ret && intel_dp->dpcd[DP_DPCD_REV] != 0)
+ status = connector_status_connected;
+ DRM_DEBUG_KMS("DPCD: %hx%hx%hx%hx\n", intel_dp->dpcd[0],
+ intel_dp->dpcd[1], intel_dp->dpcd[2], intel_dp->dpcd[3]);
+ return status;
}
static enum drm_connector_status
@@ -2048,6 +1637,7 @@ g4x_dp_detect(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ enum drm_connector_status status;
uint32_t temp, bit;
switch (intel_dp->output_reg) {
@@ -2069,7 +1659,15 @@ g4x_dp_detect(struct intel_dp *intel_dp)
if ((temp & bit) == 0)
return connector_status_disconnected;
- return intel_dp_detect_dpcd(intel_dp);
+ status = connector_status_disconnected;
+ if (intel_dp_aux_native_read(intel_dp, 0x000, intel_dp->dpcd,
+ sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd))
+ {
+ if (intel_dp->dpcd[DP_DPCD_REV] != 0)
+ status = connector_status_connected;
+ }
+
+ return status;
}
static struct edid *
@@ -2080,7 +1678,7 @@ intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
ironlake_edp_panel_vdd_on(intel_dp);
edid = drm_get_edid(connector, adapter);
- ironlake_edp_panel_vdd_off(intel_dp, false);
+ ironlake_edp_panel_vdd_off(intel_dp);
return edid;
}
@@ -2092,7 +1690,7 @@ intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *ada
ironlake_edp_panel_vdd_on(intel_dp);
ret = intel_ddc_get_modes(connector, adapter);
- ironlake_edp_panel_vdd_off(intel_dp, false);
+ ironlake_edp_panel_vdd_off(intel_dp);
return ret;
}
@@ -2117,17 +1715,11 @@ intel_dp_detect(struct drm_connector *connector, bool force)
status = ironlake_dp_detect(intel_dp);
else
status = g4x_dp_detect(intel_dp);
-
- DRM_DEBUG_KMS("DPCD: %02hx%02hx%02hx%02hx%02hx%02hx%02hx%02hx\n",
- intel_dp->dpcd[0], intel_dp->dpcd[1], intel_dp->dpcd[2],
- intel_dp->dpcd[3], intel_dp->dpcd[4], intel_dp->dpcd[5],
- intel_dp->dpcd[6], intel_dp->dpcd[7]);
-
if (status != connector_status_connected)
return status;
- if (intel_dp->force_audio != HDMI_AUDIO_AUTO) {
- intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON);
+ if (intel_dp->force_audio) {
+ intel_dp->has_audio = intel_dp->force_audio > 0;
} else {
edid = intel_dp_get_edid(connector, &intel_dp->adapter);
if (edid) {
@@ -2152,34 +1744,26 @@ static int intel_dp_get_modes(struct drm_connector *connector)
ret = intel_dp_get_edid_modes(connector, &intel_dp->adapter);
if (ret) {
- if (is_edp(intel_dp) && !intel_dp->panel_fixed_mode) {
+ if (is_edp(intel_dp) && !dev_priv->panel_fixed_mode) {
struct drm_display_mode *newmode;
list_for_each_entry(newmode, &connector->probed_modes,
head) {
- if ((newmode->type & DRM_MODE_TYPE_PREFERRED)) {
- intel_dp->panel_fixed_mode =
+ if (newmode->type & DRM_MODE_TYPE_PREFERRED) {
+ dev_priv->panel_fixed_mode =
drm_mode_duplicate(dev, newmode);
break;
}
}
}
+
return ret;
}
/* if eDP has no EDID, try to use fixed panel mode from VBT */
if (is_edp(intel_dp)) {
- /* initialize panel mode from VBT if available for eDP */
- if (intel_dp->panel_fixed_mode == NULL && dev_priv->lfp_lvds_vbt_mode != NULL) {
- intel_dp->panel_fixed_mode =
- drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
- if (intel_dp->panel_fixed_mode) {
- intel_dp->panel_fixed_mode->type |=
- DRM_MODE_TYPE_PREFERRED;
- }
- }
- if (intel_dp->panel_fixed_mode) {
+ if (dev_priv->panel_fixed_mode != NULL) {
struct drm_display_mode *mode;
- mode = drm_mode_duplicate(dev, intel_dp->panel_fixed_mode);
+ mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode);
drm_mode_probed_add(connector, mode);
return 1;
}
@@ -2227,10 +1811,10 @@ intel_dp_set_property(struct drm_connector *connector,
intel_dp->force_audio = i;
- if (i == HDMI_AUDIO_AUTO)
+ if (i == 0)
has_audio = intel_dp_detect_audio(connector);
else
- has_audio = (i == HDMI_AUDIO_ON);
+ has_audio = i > 0;
if (has_audio == intel_dp->has_audio)
return 0;
@@ -2261,13 +1845,8 @@ done:
}
static void
-intel_dp_destroy(struct drm_connector *connector)
+intel_dp_destroy (struct drm_connector *connector)
{
- struct drm_device *dev = connector->dev;
-
- if (intel_dpd_is_edp(dev))
- intel_panel_destroy_backlight(dev);
-
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(connector);
@@ -2279,10 +1858,6 @@ static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
i2c_del_adapter(&intel_dp->adapter);
drm_encoder_cleanup(encoder);
- if (is_edp(intel_dp)) {
- cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
- ironlake_panel_vdd_off_sync(intel_dp);
- }
kfree(intel_dp);
}
@@ -2322,7 +1897,7 @@ intel_dp_hot_plug(struct intel_encoder *intel_encoder)
/* Return which DP Port should be selected for Transcoder DP control */
int
-intel_trans_dp_port_sel(struct drm_crtc *crtc)
+intel_trans_dp_port_sel (struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
@@ -2335,8 +1910,7 @@ intel_trans_dp_port_sel(struct drm_crtc *crtc)
continue;
intel_dp = enc_to_intel_dp(encoder);
- if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT ||
- intel_dp->base.type == INTEL_OUTPUT_EDP)
+ if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT)
return intel_dp->output_reg;
}
@@ -2386,7 +1960,6 @@ intel_dp_init(struct drm_device *dev, int output_reg)
return;
intel_dp->output_reg = output_reg;
- intel_dp->dpms_mode = -1;
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
if (!intel_connector) {
@@ -2420,13 +1993,10 @@ intel_dp_init(struct drm_device *dev, int output_reg)
else if (output_reg == DP_D || output_reg == PCH_DP_D)
intel_encoder->clone_mask = (1 << INTEL_DP_D_CLONE_BIT);
- if (is_edp(intel_dp)) {
+ if (is_edp(intel_dp))
intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT);
- INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
- ironlake_panel_vdd_work);
- }
- intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
connector->interlace_allowed = true;
connector->doublescan_allowed = 0;
@@ -2462,60 +2032,28 @@ intel_dp_init(struct drm_device *dev, int output_reg)
break;
}
+ intel_dp_i2c_init(intel_dp, intel_connector, name);
+
/* Cache some DPCD data in the eDP case */
if (is_edp(intel_dp)) {
- bool ret;
- struct edp_power_seq cur, vbt;
- u32 pp_on, pp_off, pp_div;
+ int ret;
+ u32 pp_on, pp_div;
pp_on = I915_READ(PCH_PP_ON_DELAYS);
- pp_off = I915_READ(PCH_PP_OFF_DELAYS);
pp_div = I915_READ(PCH_PP_DIVISOR);
- /* Pull timing values out of registers */
- cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
- PANEL_POWER_UP_DELAY_SHIFT;
-
- cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
- PANEL_LIGHT_ON_DELAY_SHIFT;
-
- cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
- PANEL_LIGHT_OFF_DELAY_SHIFT;
-
- cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
- PANEL_POWER_DOWN_DELAY_SHIFT;
-
- cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
- PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
-
- DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
- cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
-
- vbt = dev_priv->edp.pps;
-
- DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
- vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
-
-#define get_delay(field) ((max(cur.field, vbt.field) + 9) / 10)
-
- intel_dp->panel_power_up_delay = get_delay(t1_t3);
- intel_dp->backlight_on_delay = get_delay(t8);
- intel_dp->backlight_off_delay = get_delay(t9);
- intel_dp->panel_power_down_delay = get_delay(t10);
- intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
-
- DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
- intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
- intel_dp->panel_power_cycle_delay);
-
- DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
- intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
+ /* Get T3 & T12 values (note: VESA not bspec terminology) */
+ dev_priv->panel_t3 = (pp_on & 0x1fff0000) >> 16;
+ dev_priv->panel_t3 /= 10; /* t3 in 100us units */
+ dev_priv->panel_t12 = pp_div & 0xf;
+ dev_priv->panel_t12 *= 100; /* t12 in 100ms units */
ironlake_edp_panel_vdd_on(intel_dp);
- ret = intel_dp_get_dpcd(intel_dp);
- ironlake_edp_panel_vdd_off(intel_dp, false);
-
- if (ret) {
+ ret = intel_dp_aux_native_read(intel_dp, DP_DPCD_REV,
+ intel_dp->dpcd,
+ sizeof(intel_dp->dpcd));
+ ironlake_edp_panel_vdd_off(intel_dp);
+ if (ret == sizeof(intel_dp->dpcd)) {
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
dev_priv->no_aux_handshake =
intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
@@ -2529,13 +2067,18 @@ intel_dp_init(struct drm_device *dev, int output_reg)
}
}
- intel_dp_i2c_init(intel_dp, intel_connector, name);
-
intel_encoder->hot_plug = intel_dp_hot_plug;
if (is_edp(intel_dp)) {
- dev_priv->int_edp_connector = connector;
- intel_panel_setup_backlight(dev);
+ /* initialize panel mode from VBT if available for eDP */
+ if (dev_priv->lfp_lvds_vbt_mode) {
+ dev_priv->panel_fixed_mode =
+ drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
+ if (dev_priv->panel_fixed_mode) {
+ dev_priv->panel_fixed_mode->type |=
+ DRM_MODE_TYPE_PREFERRED;
+ }
+ }
}
intel_dp_add_properties(intel_dp, connector);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 7c31daf..58b54ff 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -26,7 +26,6 @@
#define __INTEL_DRV_H__
#include <linux/i2c.h>
-#include "i915_drm.h"
#include "i915_drv.h"
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
@@ -35,12 +34,12 @@
#define _wait_for(COND, MS, W) ({ \
unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \
int ret__ = 0; \
- while (!(COND)) { \
+ while (! (COND)) { \
if (time_after(jiffies, timeout__)) { \
ret__ = -ETIMEDOUT; \
break; \
} \
- if (W && drm_can_sleep()) msleep(W); \
+ if (W && !(in_atomic() || in_dbg_master())) msleep(W); \
} \
ret__; \
})
@@ -48,6 +47,13 @@
#define wait_for(COND, MS) _wait_for(COND, MS, 1)
#define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0)
+#define MSLEEP(x) do { \
+ if (in_dbg_master()) \
+ mdelay(x); \
+ else \
+ msleep(x); \
+} while(0)
+
#define KHz(x) (1000*x)
#define MHz(x) KHz(1000*x)
@@ -104,7 +110,6 @@
/* drm_display_mode->private_flags */
#define INTEL_MODE_PIXEL_MULTIPLIER_SHIFT (0x0)
#define INTEL_MODE_PIXEL_MULTIPLIER_MASK (0xf << INTEL_MODE_PIXEL_MULTIPLIER_SHIFT)
-#define INTEL_MODE_DP_FORCE_6BPC (0x10)
static inline void
intel_mode_set_pixel_multiplier(struct drm_display_mode *mode,
@@ -165,61 +170,17 @@ struct intel_crtc {
int16_t cursor_x, cursor_y;
int16_t cursor_width, cursor_height;
bool cursor_visible;
- unsigned int bpp;
-
- bool no_pll; /* tertiary pipe for IVB */
- bool use_pll_a;
-};
-
-struct intel_plane {
- struct drm_plane base;
- enum pipe pipe;
- struct drm_i915_gem_object *obj;
- bool primary_disabled;
- int max_downscale;
- u32 lut_r[1024], lut_g[1024], lut_b[1024];
- void (*update_plane)(struct drm_plane *plane,
- struct drm_framebuffer *fb,
- struct drm_i915_gem_object *obj,
- int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t x, uint32_t y,
- uint32_t src_w, uint32_t src_h);
- void (*disable_plane)(struct drm_plane *plane);
- int (*update_colorkey)(struct drm_plane *plane,
- struct drm_intel_sprite_colorkey *key);
- void (*get_colorkey)(struct drm_plane *plane,
- struct drm_intel_sprite_colorkey *key);
};
#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
#define to_intel_connector(x) container_of(x, struct intel_connector, base)
#define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
-#define to_intel_plane(x) container_of(x, struct intel_plane, base)
-
-#define DIP_HEADER_SIZE 5
#define DIP_TYPE_AVI 0x82
#define DIP_VERSION_AVI 0x2
#define DIP_LEN_AVI 13
-#define DIP_TYPE_SPD 0x83
-#define DIP_VERSION_SPD 0x1
-#define DIP_LEN_SPD 25
-#define DIP_SPD_UNKNOWN 0
-#define DIP_SPD_DSTB 0x1
-#define DIP_SPD_DVDP 0x2
-#define DIP_SPD_DVHS 0x3
-#define DIP_SPD_HDDVR 0x4
-#define DIP_SPD_DVC 0x5
-#define DIP_SPD_DSC 0x6
-#define DIP_SPD_VCD 0x7
-#define DIP_SPD_GAME 0x8
-#define DIP_SPD_PC 0x9
-#define DIP_SPD_BD 0xa
-#define DIP_SPD_SCD 0xb
-
struct dip_infoframe {
uint8_t type; /* HB0 */
uint8_t ver; /* HB1 */
@@ -272,13 +233,6 @@ struct intel_unpin_work {
bool enable_stall_check;
};
-struct intel_fbc_work {
- struct delayed_work work;
- struct drm_crtc *crtc;
- struct drm_framebuffer *fb;
- int interval;
-};
-
int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
extern bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus);
@@ -299,9 +253,8 @@ void
intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
extern bool intel_dpd_is_edp(struct drm_device *dev);
-extern void intel_edp_link_config(struct intel_encoder *, int *, int *);
+extern void intel_edp_link_config (struct intel_encoder *, int *, int *);
extern bool intel_encoder_is_pch_edp(struct drm_encoder *encoder);
-extern int intel_plane_init(struct drm_device *dev, enum pipe pipe);
/* intel_panel.c */
extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
@@ -313,15 +266,14 @@ extern void intel_pch_panel_fitting(struct drm_device *dev,
extern u32 intel_panel_get_max_backlight(struct drm_device *dev);
extern u32 intel_panel_get_backlight(struct drm_device *dev);
extern void intel_panel_set_backlight(struct drm_device *dev, u32 level);
-extern int intel_panel_setup_backlight(struct drm_device *dev);
+extern void intel_panel_setup_backlight(struct drm_device *dev);
extern void intel_panel_enable_backlight(struct drm_device *dev);
extern void intel_panel_disable_backlight(struct drm_device *dev);
-extern void intel_panel_destroy_backlight(struct drm_device *dev);
extern enum drm_connector_status intel_panel_detect(struct drm_device *dev);
extern void intel_crtc_load_lut(struct drm_crtc *crtc);
-extern void intel_encoder_prepare(struct drm_encoder *encoder);
-extern void intel_encoder_commit(struct drm_encoder *encoder);
+extern void intel_encoder_prepare (struct drm_encoder *encoder);
+extern void intel_encoder_commit (struct drm_encoder *encoder);
extern void intel_encoder_destroy(struct drm_encoder *encoder);
static inline struct intel_encoder *intel_attached_encoder(struct drm_connector *connector)
@@ -353,6 +305,9 @@ extern void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
struct drm_connector *connector,
struct intel_load_detect_pipe *old);
+extern struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB);
+extern int intel_sdvo_supports_hotplug(struct drm_connector *connector);
+extern void intel_sdvo_set_hotplug(struct drm_connector *connector, int enable);
extern void intelfb_restore(void);
extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, int regno);
@@ -362,18 +317,16 @@ extern void intel_enable_clock_gating(struct drm_device *dev);
extern void ironlake_enable_drps(struct drm_device *dev);
extern void ironlake_disable_drps(struct drm_device *dev);
extern void gen6_enable_rps(struct drm_i915_private *dev_priv);
-extern void gen6_update_ring_freq(struct drm_i915_private *dev_priv);
extern void gen6_disable_rps(struct drm_device *dev);
extern void intel_init_emon(struct drm_device *dev);
extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
struct drm_i915_gem_object *obj,
struct intel_ring_buffer *pipelined);
-extern void intel_unpin_fb_obj(struct drm_i915_gem_object *obj);
extern int intel_framebuffer_init(struct drm_device *dev,
struct intel_framebuffer *ifb,
- struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_mode_fb_cmd *mode_cmd,
struct drm_i915_gem_object *obj);
extern int intel_fbdev_init(struct drm_device *dev);
extern void intel_fbdev_fini(struct drm_device *dev);
@@ -393,25 +346,5 @@ extern int intel_overlay_attrs(struct drm_device *dev, void *data,
extern void intel_fb_output_poll_changed(struct drm_device *dev);
extern void intel_fb_restore_mode(struct drm_device *dev);
-extern void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
- bool state);
-#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
-#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
-
extern void intel_init_clock_gating(struct drm_device *dev);
-extern void intel_write_eld(struct drm_encoder *encoder,
- struct drm_display_mode *mode);
-extern void intel_cpt_verify_modeset(struct drm_device *dev, int pipe);
-
-/* For use by IVB LP watermark workaround in intel_sprite.c */
-extern void sandybridge_update_wm(struct drm_device *dev);
-extern void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
- uint32_t sprite_width,
- int pixel_size);
-
-extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-
#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 69bea56..8ac91b8 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -157,6 +157,7 @@ static bool intel_dvo_mode_fixup(struct drm_encoder *encoder,
C(vsync_end);
C(vtotal);
C(clock);
+ drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
#undef C
}
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 19ecd78..d0ce34b 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -65,7 +65,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
struct drm_i915_private *dev_priv = dev->dev_private;
struct fb_info *info;
struct drm_framebuffer *fb;
- struct drm_mode_fb_cmd2 mode_cmd;
+ struct drm_mode_fb_cmd mode_cmd;
struct drm_i915_gem_object *obj;
struct device *device = &dev->pdev->dev;
int size, ret;
@@ -77,12 +77,11 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
mode_cmd.width = sizes->surface_width;
mode_cmd.height = sizes->surface_height;
- mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((sizes->surface_bpp + 7) /
- 8), 64);
- mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
- sizes->surface_depth);
+ mode_cmd.bpp = sizes->surface_bpp;
+ mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 7) / 8), 64);
+ mode_cmd.depth = sizes->surface_depth;
- size = mode_cmd.pitches[0] * mode_cmd.height;
+ size = mode_cmd.pitch * mode_cmd.height;
size = ALIGN(size, PAGE_SIZE);
obj = i915_gem_alloc_object(dev, size);
if (!obj) {
@@ -149,10 +148,14 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
// memset(info->screen_base, 0, size);
- drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);
- /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
+ info->pixmap.size = 64*1024;
+ info->pixmap.buf_align = 8;
+ info->pixmap.access_align = 32;
+ info->pixmap.flags = FB_PIXMAP_SYSTEM;
+ info->pixmap.scan_align = 1;
DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n",
fb->width, fb->height,
@@ -276,14 +279,8 @@ void intel_fb_restore_mode(struct drm_device *dev)
{
int ret;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_mode_config *config = &dev->mode_config;
- struct drm_plane *plane;
ret = drm_fb_helper_restore_fbdev_mode(&dev_priv->fbdev->helper);
if (ret)
DRM_DEBUG("failed to restore crtc mode\n");
-
- /* Be sure to shut off any planes that may be active */
- list_for_each_entry(plane, &config->plane_list, head)
- plane->funcs->disable_plane(plane);
}
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 1239d79..918bac8 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -44,9 +44,7 @@ struct intel_hdmi {
uint32_t color_range;
bool has_hdmi_sink;
bool has_audio;
- enum hdmi_force_audio force_audio;
- void (*write_infoframe)(struct drm_encoder *encoder,
- struct dip_infoframe *frame);
+ int force_audio;
};
static struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
@@ -60,69 +58,37 @@ static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector)
struct intel_hdmi, base);
}
-void intel_dip_infoframe_csum(struct dip_infoframe *frame)
+void intel_dip_infoframe_csum(struct dip_infoframe *avi_if)
{
- uint8_t *data = (uint8_t *)frame;
+ uint8_t *data = (uint8_t *)avi_if;
uint8_t sum = 0;
unsigned i;
- frame->checksum = 0;
- frame->ecc = 0;
+ avi_if->checksum = 0;
+ avi_if->ecc = 0;
- for (i = 0; i < frame->len + DIP_HEADER_SIZE; i++)
+ for (i = 0; i < sizeof(*avi_if); i++)
sum += data[i];
- frame->checksum = 0x100 - sum;
+ avi_if->checksum = 0x100 - sum;
}
-static u32 intel_infoframe_index(struct dip_infoframe *frame)
-{
- u32 flags = 0;
-
- switch (frame->type) {
- case DIP_TYPE_AVI:
- flags |= VIDEO_DIP_SELECT_AVI;
- break;
- case DIP_TYPE_SPD:
- flags |= VIDEO_DIP_SELECT_SPD;
- break;
- default:
- DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
- break;
- }
-
- return flags;
-}
-
-static u32 intel_infoframe_flags(struct dip_infoframe *frame)
-{
- u32 flags = 0;
-
- switch (frame->type) {
- case DIP_TYPE_AVI:
- flags |= VIDEO_DIP_ENABLE_AVI | VIDEO_DIP_FREQ_VSYNC;
- break;
- case DIP_TYPE_SPD:
- flags |= VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_FREQ_VSYNC;
- break;
- default:
- DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
- break;
- }
-
- return flags;
-}
-
-static void i9xx_write_infoframe(struct drm_encoder *encoder,
- struct dip_infoframe *frame)
+static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
{
- uint32_t *data = (uint32_t *)frame;
+ struct dip_infoframe avi_if = {
+ .type = DIP_TYPE_AVI,
+ .ver = DIP_VERSION_AVI,
+ .len = DIP_LEN_AVI,
+ };
+ uint32_t *data = (uint32_t *)&avi_if;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
- u32 port, flags, val = I915_READ(VIDEO_DIP_CTL);
- unsigned i, len = DIP_HEADER_SIZE + frame->len;
+ u32 port;
+ unsigned i;
+ if (!intel_hdmi->has_hdmi_sink)
+ return;
/* XXX first guess at handling video port, is this corrent? */
if (intel_hdmi->sdvox_reg == SDVOB)
@@ -132,87 +98,18 @@ static void i9xx_write_infoframe(struct drm_encoder *encoder,
else
return;
- flags = intel_infoframe_index(frame);
-
- val &= ~VIDEO_DIP_SELECT_MASK;
+ I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | port |
+ VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC);
- I915_WRITE(VIDEO_DIP_CTL, val | port | flags);
-
- for (i = 0; i < len; i += 4) {
+ intel_dip_infoframe_csum(&avi_if);
+ for (i = 0; i < sizeof(avi_if); i += 4) {
I915_WRITE(VIDEO_DIP_DATA, *data);
data++;
}
- flags |= intel_infoframe_flags(frame);
-
- I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | val | port | flags);
-}
-
-static void ironlake_write_infoframe(struct drm_encoder *encoder,
- struct dip_infoframe *frame)
-{
- uint32_t *data = (uint32_t *)frame;
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc = encoder->crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
- unsigned i, len = DIP_HEADER_SIZE + frame->len;
- u32 flags, val = I915_READ(reg);
-
- intel_wait_for_vblank(dev, intel_crtc->pipe);
-
- flags = intel_infoframe_index(frame);
-
- val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
-
- I915_WRITE(reg, VIDEO_DIP_ENABLE | val | flags);
-
- for (i = 0; i < len; i += 4) {
- I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
- data++;
- }
-
- flags |= intel_infoframe_flags(frame);
-
- I915_WRITE(reg, VIDEO_DIP_ENABLE | val | flags);
-}
-static void intel_set_infoframe(struct drm_encoder *encoder,
- struct dip_infoframe *frame)
-{
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
-
- if (!intel_hdmi->has_hdmi_sink)
- return;
-
- intel_dip_infoframe_csum(frame);
- intel_hdmi->write_infoframe(encoder, frame);
-}
-
-static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
-{
- struct dip_infoframe avi_if = {
- .type = DIP_TYPE_AVI,
- .ver = DIP_VERSION_AVI,
- .len = DIP_LEN_AVI,
- };
-
- intel_set_infoframe(encoder, &avi_if);
-}
-
-static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder)
-{
- struct dip_infoframe spd_if;
-
- memset(&spd_if, 0, sizeof(spd_if));
- spd_if.type = DIP_TYPE_SPD;
- spd_if.ver = DIP_VERSION_SPD;
- spd_if.len = DIP_LEN_SPD;
- strcpy(spd_if.body.spd.vn, "Intel");
- strcpy(spd_if.body.spd.pd, "Integrated gfx");
- spd_if.body.spd.sdi = DIP_SPD_PC;
-
- intel_set_infoframe(encoder, &spd_if);
+ I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | port |
+ VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC |
+ VIDEO_DIP_ENABLE_AVI);
}
static void intel_hdmi_mode_set(struct drm_encoder *encoder,
@@ -227,40 +124,32 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
u32 sdvox;
sdvox = SDVO_ENCODING_HDMI | SDVO_BORDER_ENABLE;
- if (!HAS_PCH_SPLIT(dev))
- sdvox |= intel_hdmi->color_range;
+ sdvox |= intel_hdmi->color_range;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
sdvox |= SDVO_VSYNC_ACTIVE_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
sdvox |= SDVO_HSYNC_ACTIVE_HIGH;
- if (intel_crtc->bpp > 24)
- sdvox |= COLOR_FORMAT_12bpc;
- else
- sdvox |= COLOR_FORMAT_8bpc;
-
/* Required on CPT */
if (intel_hdmi->has_hdmi_sink && HAS_PCH_CPT(dev))
sdvox |= HDMI_MODE_SELECT;
if (intel_hdmi->has_audio) {
- DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
- pipe_name(intel_crtc->pipe));
sdvox |= SDVO_AUDIO_ENABLE;
sdvox |= SDVO_NULL_PACKETS_DURING_VSYNC;
- intel_write_eld(encoder, adjusted_mode);
}
- if (HAS_PCH_CPT(dev))
- sdvox |= PORT_TRANS_SEL_CPT(intel_crtc->pipe);
- else if (intel_crtc->pipe == 1)
- sdvox |= SDVO_PIPE_B_SELECT;
+ if (intel_crtc->pipe == 1) {
+ if (HAS_PCH_CPT(dev))
+ sdvox |= PORT_TRANS_B_SEL_CPT;
+ else
+ sdvox |= SDVO_PIPE_B_SELECT;
+ }
I915_WRITE(intel_hdmi->sdvox_reg, sdvox);
POSTING_READ(intel_hdmi->sdvox_reg);
intel_hdmi_set_avi_infoframe(encoder);
- intel_hdmi_set_spd_infoframe(encoder);
}
static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
@@ -339,9 +228,7 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
if (edid) {
if (edid->input & DRM_EDID_INPUT_DIGITAL) {
status = connector_status_connected;
- if (intel_hdmi->force_audio != HDMI_AUDIO_OFF_DVI)
- intel_hdmi->has_hdmi_sink =
- drm_detect_hdmi_monitor(edid);
+ intel_hdmi->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
intel_hdmi->has_audio = drm_detect_monitor_audio(edid);
}
connector->display_info.raw_edid = NULL;
@@ -349,9 +236,8 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
}
if (status == connector_status_connected) {
- if (intel_hdmi->force_audio != HDMI_AUDIO_AUTO)
- intel_hdmi->has_audio =
- (intel_hdmi->force_audio == HDMI_AUDIO_ON);
+ if (intel_hdmi->force_audio)
+ intel_hdmi->has_audio = intel_hdmi->force_audio > 0;
}
return status;
@@ -405,7 +291,7 @@ intel_hdmi_set_property(struct drm_connector *connector,
return ret;
if (property == dev_priv->force_audio_property) {
- enum hdmi_force_audio i = val;
+ int i = val;
bool has_audio;
if (i == intel_hdmi->force_audio)
@@ -413,13 +299,13 @@ intel_hdmi_set_property(struct drm_connector *connector,
intel_hdmi->force_audio = i;
- if (i == HDMI_AUDIO_AUTO)
+ if (i == 0)
has_audio = intel_hdmi_detect_audio(connector);
else
- has_audio = (i == HDMI_AUDIO_ON);
+ has_audio = i > 0;
- if (i == HDMI_AUDIO_OFF_DVI)
- intel_hdmi->has_hdmi_sink = 0;
+ if (has_audio == intel_hdmi->has_audio)
+ return 0;
intel_hdmi->has_audio = has_audio;
goto done;
@@ -493,7 +379,6 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
struct intel_encoder *intel_encoder;
struct intel_connector *intel_connector;
struct intel_hdmi *intel_hdmi;
- int i;
intel_hdmi = kzalloc(sizeof(struct intel_hdmi), GFP_KERNEL);
if (!intel_hdmi)
@@ -517,9 +402,9 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
intel_encoder->type = INTEL_OUTPUT_HDMI;
connector->polled = DRM_CONNECTOR_POLL_HPD;
- connector->interlace_allowed = 1;
+ connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
- intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
/* Set up the DDC bus. */
if (sdvox_reg == SDVOB) {
@@ -546,15 +431,6 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
intel_hdmi->sdvox_reg = sdvox_reg;
- if (!HAS_PCH_SPLIT(dev)) {
- intel_hdmi->write_infoframe = i9xx_write_infoframe;
- I915_WRITE(VIDEO_DIP_CTL, 0);
- } else {
- intel_hdmi->write_infoframe = ironlake_write_infoframe;
- for_each_pipe(i)
- I915_WRITE(TVIDEO_DIP_CTL(i), 0);
- }
-
drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
intel_hdmi_add_properties(intel_hdmi, connector);
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 4f40b2b..d98cee6 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -36,7 +36,7 @@
/* Intel GPIO access functions */
-#define I2C_RISEFALL_TIME 10
+#define I2C_RISEFALL_TIME 20
static inline struct intel_gmbus *
to_intel_gmbus(struct i2c_adapter *i2c)
@@ -44,6 +44,13 @@ to_intel_gmbus(struct i2c_adapter *i2c)
return container_of(i2c, struct intel_gmbus, adapter);
}
+struct intel_gpio {
+ struct i2c_adapter adapter;
+ struct i2c_algo_bit_data algo;
+ struct drm_i915_private *dev_priv;
+ u32 reg;
+};
+
void
intel_i2c_reset(struct drm_device *dev)
{
@@ -70,15 +77,15 @@ static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable)
I915_WRITE(DSPCLK_GATE_D, val);
}
-static u32 get_reserved(struct intel_gmbus *bus)
+static u32 get_reserved(struct intel_gpio *gpio)
{
- struct drm_i915_private *dev_priv = bus->dev_priv;
+ struct drm_i915_private *dev_priv = gpio->dev_priv;
struct drm_device *dev = dev_priv->dev;
u32 reserved = 0;
/* On most chips, these bits must be preserved in software. */
if (!IS_I830(dev) && !IS_845G(dev))
- reserved = I915_READ_NOTRACE(bus->gpio_reg) &
+ reserved = I915_READ_NOTRACE(gpio->reg) &
(GPIO_DATA_PULLUP_DISABLE |
GPIO_CLOCK_PULLUP_DISABLE);
@@ -87,29 +94,29 @@ static u32 get_reserved(struct intel_gmbus *bus)
static int get_clock(void *data)
{
- struct intel_gmbus *bus = data;
- struct drm_i915_private *dev_priv = bus->dev_priv;
- u32 reserved = get_reserved(bus);
- I915_WRITE_NOTRACE(bus->gpio_reg, reserved | GPIO_CLOCK_DIR_MASK);
- I915_WRITE_NOTRACE(bus->gpio_reg, reserved);
- return (I915_READ_NOTRACE(bus->gpio_reg) & GPIO_CLOCK_VAL_IN) != 0;
+ struct intel_gpio *gpio = data;
+ struct drm_i915_private *dev_priv = gpio->dev_priv;
+ u32 reserved = get_reserved(gpio);
+ I915_WRITE_NOTRACE(gpio->reg, reserved | GPIO_CLOCK_DIR_MASK);
+ I915_WRITE_NOTRACE(gpio->reg, reserved);
+ return (I915_READ_NOTRACE(gpio->reg) & GPIO_CLOCK_VAL_IN) != 0;
}
static int get_data(void *data)
{
- struct intel_gmbus *bus = data;
- struct drm_i915_private *dev_priv = bus->dev_priv;
- u32 reserved = get_reserved(bus);
- I915_WRITE_NOTRACE(bus->gpio_reg, reserved | GPIO_DATA_DIR_MASK);
- I915_WRITE_NOTRACE(bus->gpio_reg, reserved);
- return (I915_READ_NOTRACE(bus->gpio_reg) & GPIO_DATA_VAL_IN) != 0;
+ struct intel_gpio *gpio = data;
+ struct drm_i915_private *dev_priv = gpio->dev_priv;
+ u32 reserved = get_reserved(gpio);
+ I915_WRITE_NOTRACE(gpio->reg, reserved | GPIO_DATA_DIR_MASK);
+ I915_WRITE_NOTRACE(gpio->reg, reserved);
+ return (I915_READ_NOTRACE(gpio->reg) & GPIO_DATA_VAL_IN) != 0;
}
static void set_clock(void *data, int state_high)
{
- struct intel_gmbus *bus = data;
- struct drm_i915_private *dev_priv = bus->dev_priv;
- u32 reserved = get_reserved(bus);
+ struct intel_gpio *gpio = data;
+ struct drm_i915_private *dev_priv = gpio->dev_priv;
+ u32 reserved = get_reserved(gpio);
u32 clock_bits;
if (state_high)
@@ -118,15 +125,15 @@ static void set_clock(void *data, int state_high)
clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
GPIO_CLOCK_VAL_MASK;
- I915_WRITE_NOTRACE(bus->gpio_reg, reserved | clock_bits);
- POSTING_READ(bus->gpio_reg);
+ I915_WRITE_NOTRACE(gpio->reg, reserved | clock_bits);
+ POSTING_READ(gpio->reg);
}
static void set_data(void *data, int state_high)
{
- struct intel_gmbus *bus = data;
- struct drm_i915_private *dev_priv = bus->dev_priv;
- u32 reserved = get_reserved(bus);
+ struct intel_gpio *gpio = data;
+ struct drm_i915_private *dev_priv = gpio->dev_priv;
+ u32 reserved = get_reserved(gpio);
u32 data_bits;
if (state_high)
@@ -135,14 +142,13 @@ static void set_data(void *data, int state_high)
data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
GPIO_DATA_VAL_MASK;
- I915_WRITE_NOTRACE(bus->gpio_reg, reserved | data_bits);
- POSTING_READ(bus->gpio_reg);
+ I915_WRITE_NOTRACE(gpio->reg, reserved | data_bits);
+ POSTING_READ(gpio->reg);
}
-static bool
-intel_gpio_setup(struct intel_gmbus *bus, u32 pin)
+static struct i2c_adapter *
+intel_gpio_create(struct drm_i915_private *dev_priv, u32 pin)
{
- struct drm_i915_private *dev_priv = bus->dev_priv;
static const int map_pin_to_reg[] = {
0,
GPIOB,
@@ -153,48 +159,65 @@ intel_gpio_setup(struct intel_gmbus *bus, u32 pin)
0,
GPIOF,
};
- struct i2c_algo_bit_data *algo;
+ struct intel_gpio *gpio;
if (pin >= ARRAY_SIZE(map_pin_to_reg) || !map_pin_to_reg[pin])
- return false;
+ return NULL;
- algo = &bus->bit_algo;
+ gpio = kzalloc(sizeof(struct intel_gpio), GFP_KERNEL);
+ if (gpio == NULL)
+ return NULL;
- bus->gpio_reg = map_pin_to_reg[pin];
+ gpio->reg = map_pin_to_reg[pin];
if (HAS_PCH_SPLIT(dev_priv->dev))
- bus->gpio_reg += PCH_GPIOA - GPIOA;
-
- bus->adapter.algo_data = algo;
- algo->setsda = set_data;
- algo->setscl = set_clock;
- algo->getsda = get_data;
- algo->getscl = get_clock;
- algo->udelay = I2C_RISEFALL_TIME;
- algo->timeout = usecs_to_jiffies(2200);
- algo->data = bus;
-
- return true;
+ gpio->reg += PCH_GPIOA - GPIOA;
+ gpio->dev_priv = dev_priv;
+
+ snprintf(gpio->adapter.name, sizeof(gpio->adapter.name),
+ "i915 GPIO%c", "?BACDE?F"[pin]);
+ gpio->adapter.owner = THIS_MODULE;
+ gpio->adapter.algo_data = &gpio->algo;
+ gpio->adapter.dev.parent = &dev_priv->dev->pdev->dev;
+ gpio->algo.setsda = set_data;
+ gpio->algo.setscl = set_clock;
+ gpio->algo.getsda = get_data;
+ gpio->algo.getscl = get_clock;
+ gpio->algo.udelay = I2C_RISEFALL_TIME;
+ gpio->algo.timeout = usecs_to_jiffies(2200);
+ gpio->algo.data = gpio;
+
+ if (i2c_bit_add_bus(&gpio->adapter))
+ goto out_free;
+
+ return &gpio->adapter;
+
+out_free:
+ kfree(gpio);
+ return NULL;
}
static int
-intel_i2c_quirk_xfer(struct intel_gmbus *bus,
+intel_i2c_quirk_xfer(struct drm_i915_private *dev_priv,
+ struct i2c_adapter *adapter,
struct i2c_msg *msgs,
int num)
{
- struct drm_i915_private *dev_priv = bus->dev_priv;
+ struct intel_gpio *gpio = container_of(adapter,
+ struct intel_gpio,
+ adapter);
int ret;
intel_i2c_reset(dev_priv->dev);
intel_i2c_quirk_set(dev_priv, true);
- set_data(bus, 1);
- set_clock(bus, 1);
+ set_data(gpio, 1);
+ set_clock(gpio, 1);
udelay(I2C_RISEFALL_TIME);
- ret = i2c_bit_algo.master_xfer(&bus->adapter, msgs, num);
+ ret = adapter->algo->master_xfer(adapter, msgs, num);
- set_data(bus, 1);
- set_clock(bus, 1);
+ set_data(gpio, 1);
+ set_clock(gpio, 1);
intel_i2c_quirk_set(dev_priv, false);
return ret;
@@ -208,15 +231,12 @@ gmbus_xfer(struct i2c_adapter *adapter,
struct intel_gmbus *bus = container_of(adapter,
struct intel_gmbus,
adapter);
- struct drm_i915_private *dev_priv = bus->dev_priv;
- int i, reg_offset, ret;
+ struct drm_i915_private *dev_priv = adapter->algo_data;
+ int i, reg_offset;
- mutex_lock(&dev_priv->gmbus_mutex);
-
- if (bus->force_bit) {
- ret = intel_i2c_quirk_xfer(bus, msgs, num);
- goto out;
- }
+ if (bus->force_bit)
+ return intel_i2c_quirk_xfer(dev_priv,
+ bus->force_bit, msgs, num);
reg_offset = HAS_PCH_SPLIT(dev_priv->dev) ? PCH_GMBUS0 - GMBUS0 : 0;
@@ -228,8 +248,7 @@ gmbus_xfer(struct i2c_adapter *adapter,
if (msgs[i].flags & I2C_M_RD) {
I915_WRITE(GMBUS1 + reg_offset,
- GMBUS_CYCLE_WAIT |
- (i + 1 == num ? GMBUS_CYCLE_STOP : 0) |
+ GMBUS_CYCLE_WAIT | (i + 1 == num ? GMBUS_CYCLE_STOP : 0) |
(len << GMBUS_BYTE_COUNT_SHIFT) |
(msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
GMBUS_SLAVE_READ | GMBUS_SW_RDY);
@@ -258,8 +277,7 @@ gmbus_xfer(struct i2c_adapter *adapter,
I915_WRITE(GMBUS3 + reg_offset, val);
I915_WRITE(GMBUS1 + reg_offset,
- GMBUS_CYCLE_WAIT |
- (i + 1 == num ? GMBUS_CYCLE_STOP : 0) |
+ (i + 1 == num ? GMBUS_CYCLE_STOP : GMBUS_CYCLE_WAIT) |
(msgs[i].len << GMBUS_BYTE_COUNT_SHIFT) |
(msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
@@ -298,15 +316,11 @@ clear_err:
I915_WRITE(GMBUS1 + reg_offset, 0);
done:
- /* Mark the GMBUS interface as disabled after waiting for idle.
- * We will re-enable it at the start of the next xfer,
- * till then let it sleep.
+ /* Mark the GMBUS interface as disabled. We will re-enable it at the
+ * start of the next xfer, till then let it sleep.
*/
- if (wait_for((I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0, 10))
- DRM_INFO("GMBUS timed out waiting for idle\n");
I915_WRITE(GMBUS0 + reg_offset, 0);
- ret = i;
- goto out;
+ return i;
timeout:
DRM_INFO("GMBUS timed out, falling back to bit banging on pin %d [%s]\n",
@@ -314,21 +328,23 @@ timeout:
I915_WRITE(GMBUS0 + reg_offset, 0);
/* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */
- if (!bus->has_gpio) {
- ret = -EIO;
- } else {
- bus->force_bit = true;
- ret = intel_i2c_quirk_xfer(bus, msgs, num);
- }
-out:
- mutex_unlock(&dev_priv->gmbus_mutex);
- return ret;
+ bus->force_bit = intel_gpio_create(dev_priv, bus->reg0 & 0xff);
+ if (!bus->force_bit)
+ return -ENOMEM;
+
+ return intel_i2c_quirk_xfer(dev_priv, bus->force_bit, msgs, num);
}
static u32 gmbus_func(struct i2c_adapter *adapter)
{
- return i2c_bit_algo.functionality(adapter) &
- (I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
+ struct intel_gmbus *bus = container_of(adapter,
+ struct intel_gmbus,
+ adapter);
+
+ if (bus->force_bit)
+ bus->force_bit->algo->functionality(bus->force_bit);
+
+ return (I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
/* I2C_FUNC_10BIT_ADDR | */
I2C_FUNC_SMBUS_READ_BLOCK_DATA |
I2C_FUNC_SMBUS_BLOCK_PROC_CALL);
@@ -358,13 +374,11 @@ int intel_setup_gmbus(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int ret, i;
- dev_priv->gmbus = kcalloc(GMBUS_NUM_PORTS, sizeof(struct intel_gmbus),
+ dev_priv->gmbus = kcalloc(sizeof(struct intel_gmbus), GMBUS_NUM_PORTS,
GFP_KERNEL);
if (dev_priv->gmbus == NULL)
return -ENOMEM;
- mutex_init(&dev_priv->gmbus_mutex);
-
for (i = 0; i < GMBUS_NUM_PORTS; i++) {
struct intel_gmbus *bus = &dev_priv->gmbus[i];
@@ -376,7 +390,7 @@ int intel_setup_gmbus(struct drm_device *dev)
names[i]);
bus->adapter.dev.parent = &dev->pdev->dev;
- bus->dev_priv = dev_priv;
+ bus->adapter.algo_data = dev_priv;
bus->adapter.algo = &gmbus_algorithm;
ret = i2c_add_adapter(&bus->adapter);
@@ -386,11 +400,8 @@ int intel_setup_gmbus(struct drm_device *dev)
/* By default use a conservative clock rate */
bus->reg0 = i | GMBUS_RATE_100KHZ;
- bus->has_gpio = intel_gpio_setup(bus, i);
-
/* XXX force bit banging until GMBUS is fully debugged */
- if (bus->has_gpio && IS_GEN2(dev))
- bus->force_bit = true;
+ bus->force_bit = intel_gpio_create(dev_priv, i);
}
intel_i2c_reset(dev_priv->dev);
@@ -411,15 +422,32 @@ void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed)
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
- bus->reg0 = (bus->reg0 & ~(0x3 << 8)) | speed;
+ /* speed:
+ * 0x0 = 100 KHz
+ * 0x1 = 50 KHz
+ * 0x2 = 400 KHz
+ * 0x3 = 1000 Khz
+ */
+ bus->reg0 = (bus->reg0 & ~(0x3 << 8)) | (speed << 8);
}
void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
- if (bus->has_gpio)
- bus->force_bit = force_bit;
+ if (force_bit) {
+ if (bus->force_bit == NULL) {
+ struct drm_i915_private *dev_priv = adapter->algo_data;
+ bus->force_bit = intel_gpio_create(dev_priv,
+ bus->reg0 & 0xff);
+ }
+ } else {
+ if (bus->force_bit) {
+ i2c_del_adapter(bus->force_bit);
+ kfree(bus->force_bit);
+ bus->force_bit = NULL;
+ }
+ }
}
void intel_teardown_gmbus(struct drm_device *dev)
@@ -432,6 +460,10 @@ void intel_teardown_gmbus(struct drm_device *dev)
for (i = 0; i < GMBUS_NUM_PORTS; i++) {
struct intel_gmbus *bus = &dev_priv->gmbus[i];
+ if (bus->force_bit) {
+ i2c_del_adapter(bus->force_bit);
+ kfree(bus->force_bit);
+ }
i2c_del_adapter(&bus->adapter);
}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index b19a5cf..09881ac 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -72,16 +72,14 @@ static void intel_lvds_enable(struct intel_lvds *intel_lvds)
{
struct drm_device *dev = intel_lvds->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 ctl_reg, lvds_reg, stat_reg;
+ u32 ctl_reg, lvds_reg;
if (HAS_PCH_SPLIT(dev)) {
ctl_reg = PCH_PP_CONTROL;
lvds_reg = PCH_LVDS;
- stat_reg = PCH_PP_STATUS;
} else {
ctl_reg = PP_CONTROL;
lvds_reg = LVDS;
- stat_reg = PP_STATUS;
}
I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN);
@@ -96,16 +94,17 @@ static void intel_lvds_enable(struct intel_lvds *intel_lvds)
DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n",
intel_lvds->pfit_control,
intel_lvds->pfit_pgm_ratios);
-
- I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios);
- I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control);
- intel_lvds->pfit_dirty = false;
+ if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000)) {
+ DRM_ERROR("timed out waiting for panel to power off\n");
+ } else {
+ I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios);
+ I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control);
+ intel_lvds->pfit_dirty = false;
+ }
}
I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
POSTING_READ(lvds_reg);
- if (wait_for((I915_READ(stat_reg) & PP_ON) != 0, 1000))
- DRM_ERROR("timed out waiting for panel to power on\n");
intel_panel_enable_backlight(dev);
}
@@ -114,25 +113,24 @@ static void intel_lvds_disable(struct intel_lvds *intel_lvds)
{
struct drm_device *dev = intel_lvds->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 ctl_reg, lvds_reg, stat_reg;
+ u32 ctl_reg, lvds_reg;
if (HAS_PCH_SPLIT(dev)) {
ctl_reg = PCH_PP_CONTROL;
lvds_reg = PCH_LVDS;
- stat_reg = PCH_PP_STATUS;
} else {
ctl_reg = PP_CONTROL;
lvds_reg = LVDS;
- stat_reg = PP_STATUS;
}
intel_panel_disable_backlight(dev);
I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON);
- if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000))
- DRM_ERROR("timed out waiting for panel to power off\n");
if (intel_lvds->pfit_control) {
+ if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000))
+ DRM_ERROR("timed out waiting for panel to power off\n");
+
I915_WRITE(PFIT_CONTROL, 0);
intel_lvds->pfit_dirty = true;
}
@@ -400,21 +398,53 @@ out:
static void intel_lvds_prepare(struct drm_encoder *encoder)
{
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
- /*
+ /* We try to do the minimum that is necessary in order to unlock
+ * the registers for mode setting.
+ *
+ * On Ironlake, this is quite simple as we just set the unlock key
+ * and ignore all subtleties. (This may cause some issues...)
+ *
* Prior to Ironlake, we must disable the pipe if we want to adjust
* the panel fitter. However at all other times we can just reset
* the registers regardless.
*/
- if (!HAS_PCH_SPLIT(encoder->dev) && intel_lvds->pfit_dirty)
- intel_lvds_disable(intel_lvds);
+
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(PCH_PP_CONTROL,
+ I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS);
+ } else if (intel_lvds->pfit_dirty) {
+ I915_WRITE(PP_CONTROL,
+ (I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS)
+ & ~POWER_TARGET_ON);
+ } else {
+ I915_WRITE(PP_CONTROL,
+ I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
+ }
}
static void intel_lvds_commit(struct drm_encoder *encoder)
{
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
+ /* Undo any unlocking done in prepare to prevent accidental
+ * adjustment of the registers.
+ */
+ if (HAS_PCH_SPLIT(dev)) {
+ u32 val = I915_READ(PCH_PP_CONTROL);
+ if ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS)
+ I915_WRITE(PCH_PP_CONTROL, val & 0x3);
+ } else {
+ u32 val = I915_READ(PP_CONTROL);
+ if ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS)
+ I915_WRITE(PP_CONTROL, val & 0x3);
+ }
+
/* Always do a full power on as we do not know what state
* we were left in.
*/
@@ -552,8 +582,6 @@ static void intel_lvds_destroy(struct drm_connector *connector)
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- intel_panel_destroy_backlight(dev);
-
if (dev_priv->lid_notifier.notifier_call)
acpi_lid_notifier_unregister(&dev_priv->lid_notifier);
drm_sysfs_connector_remove(connector);
@@ -662,14 +690,6 @@ static const struct dmi_system_id intel_no_lvds[] = {
},
{
.callback = intel_no_lvds_dmi_callback,
- .ident = "Dell OptiPlex FX170",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex FX170"),
- },
- },
- {
- .callback = intel_no_lvds_dmi_callback,
.ident = "AOpen Mini PC",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "AOpen"),
@@ -716,14 +736,6 @@ static const struct dmi_system_id intel_no_lvds[] = {
},
},
{
- .callback = intel_no_lvds_dmi_callback,
- .ident = "Clientron E830",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Clientron"),
- DMI_MATCH(DMI_PRODUCT_NAME, "E830"),
- },
- },
- {
.callback = intel_no_lvds_dmi_callback,
.ident = "Asus EeeBox PC EB1007",
.matches = {
@@ -733,30 +745,6 @@ static const struct dmi_system_id intel_no_lvds[] = {
},
{
.callback = intel_no_lvds_dmi_callback,
- .ident = "Asus AT5NM10T-I",
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
- DMI_MATCH(DMI_BOARD_NAME, "AT5NM10T-I"),
- },
- },
- {
- .callback = intel_no_lvds_dmi_callback,
- .ident = "Hewlett-Packard t5745",
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_BOARD_NAME, "hp t5745"),
- },
- },
- {
- .callback = intel_no_lvds_dmi_callback,
- .ident = "Hewlett-Packard st5747",
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_BOARD_NAME, "hp st5747"),
- },
- },
- {
- .callback = intel_no_lvds_dmi_callback,
.ident = "MSI Wind Box DC500",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"),
@@ -892,18 +880,6 @@ static bool lvds_is_present_in_vbt(struct drm_device *dev,
return false;
}
-static bool intel_lvds_supported(struct drm_device *dev)
-{
- /* With the introduction of the PCH we gained a dedicated
- * LVDS presence pin, use it. */
- if (HAS_PCH_SPLIT(dev))
- return true;
-
- /* Otherwise LVDS was only attached to mobile products,
- * except for the inglorious 830gm */
- return IS_MOBILE(dev) && !IS_I830(dev);
-}
-
/**
* intel_lvds_init - setup LVDS connectors on this device
* @dev: drm device
@@ -925,9 +901,6 @@ bool intel_lvds_init(struct drm_device *dev)
int pipe;
u8 pin;
- if (!intel_lvds_supported(dev))
- return false;
-
/* Skip init on machines we know falsely report LVDS */
if (dmi_check_system(intel_no_lvds))
return false;
@@ -975,11 +948,9 @@ bool intel_lvds_init(struct drm_device *dev)
intel_encoder->type = INTEL_OUTPUT_LVDS;
intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT);
- if (HAS_PCH_SPLIT(dev))
- intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
- else
- intel_encoder->crtc_mask = (1 << 1);
-
+ intel_encoder->crtc_mask = (1 << 1);
+ if (INTEL_INFO(dev)->gen >= 5)
+ intel_encoder->crtc_mask |= (1 << 0);
drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs);
drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
connector->display_info.subpixel_order = SubPixelHorizontalRGB;
@@ -1101,19 +1072,6 @@ out:
pwm = I915_READ(BLC_PWM_PCH_CTL1);
pwm |= PWM_PCH_ENABLE;
I915_WRITE(BLC_PWM_PCH_CTL1, pwm);
- /*
- * Unlock registers and just
- * leave them unlocked
- */
- I915_WRITE(PCH_PP_CONTROL,
- I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS);
- } else {
- /*
- * Unlock registers and just
- * leave them unlocked
- */
- I915_WRITE(PP_CONTROL,
- I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
}
dev_priv->lid_notifier.notifier_call = intel_lid_notify;
if (acpi_lid_notifier_register(&dev_priv->lid_notifier)) {
@@ -1123,9 +1081,6 @@ out:
/* keep the LVDS connector */
dev_priv->int_lvds_connector = connector;
drm_sysfs_connector_add(connector);
-
- intel_panel_setup_backlight(dev);
-
return true;
failed:
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index d1928e7..3b26a3b 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -26,9 +26,7 @@
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/fb.h>
-#include <drm/drm_edid.h>
#include "drmP.h"
-#include "drm_edid.h"
#include "intel_drv.h"
#include "i915_drv.h"
@@ -43,13 +41,13 @@ bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus)
u8 buf[2];
struct i2c_msg msgs[] = {
{
- .addr = DDC_ADDR,
+ .addr = 0x50,
.flags = 0,
.len = 1,
.buf = out_buf,
},
{
- .addr = DDC_ADDR,
+ .addr = 0x50,
.flags = I2C_M_RD,
.len = 1,
.buf = buf,
@@ -76,7 +74,6 @@ int intel_ddc_get_modes(struct drm_connector *connector,
if (edid) {
drm_mode_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
- drm_edid_to_eld(connector, edid);
connector->display_info.raw_edid = NULL;
kfree(edid);
}
@@ -84,11 +81,10 @@ int intel_ddc_get_modes(struct drm_connector *connector,
return ret;
}
-static const struct drm_prop_enum_list force_audio_names[] = {
- { HDMI_AUDIO_OFF_DVI, "force-dvi" },
- { HDMI_AUDIO_OFF, "off" },
- { HDMI_AUDIO_AUTO, "auto" },
- { HDMI_AUDIO_ON, "on" },
+static const char *force_audio_names[] = {
+ "off",
+ "auto",
+ "on",
};
void
@@ -97,24 +93,27 @@ intel_attach_force_audio_property(struct drm_connector *connector)
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_property *prop;
+ int i;
prop = dev_priv->force_audio_property;
if (prop == NULL) {
- prop = drm_property_create_enum(dev, 0,
+ prop = drm_property_create(dev, DRM_MODE_PROP_ENUM,
"audio",
- force_audio_names,
ARRAY_SIZE(force_audio_names));
if (prop == NULL)
return;
+ for (i = 0; i < ARRAY_SIZE(force_audio_names); i++)
+ drm_property_add_enum(prop, i, i-1, force_audio_names[i]);
+
dev_priv->force_audio_property = prop;
}
drm_connector_attach_property(connector, prop, 0);
}
-static const struct drm_prop_enum_list broadcast_rgb_names[] = {
- { 0, "Full" },
- { 1, "Limited 16:235" },
+static const char *broadcast_rgb_names[] = {
+ "Full",
+ "Limited 16:235",
};
void
@@ -123,16 +122,19 @@ intel_attach_broadcast_rgb_property(struct drm_connector *connector)
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_property *prop;
+ int i;
prop = dev_priv->broadcast_rgb_property;
if (prop == NULL) {
- prop = drm_property_create_enum(dev, DRM_MODE_PROP_ENUM,
+ prop = drm_property_create(dev, DRM_MODE_PROP_ENUM,
"Broadcast RGB",
- broadcast_rgb_names,
ARRAY_SIZE(broadcast_rgb_names));
if (prop == NULL)
return;
+ for (i = 0; i < ARRAY_SIZE(broadcast_rgb_names); i++)
+ drm_property_add_enum(prop, i, i, broadcast_rgb_names[i]);
+
dev_priv->broadcast_rgb_property = prop;
}
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index cffb007..e7a97b5 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -51,61 +51,61 @@
#define MBOX_ASLE (1<<2)
struct opregion_header {
- u8 signature[16];
- u32 size;
- u32 opregion_ver;
- u8 bios_ver[32];
- u8 vbios_ver[16];
- u8 driver_ver[16];
- u32 mboxes;
- u8 reserved[164];
+ u8 signature[16];
+ u32 size;
+ u32 opregion_ver;
+ u8 bios_ver[32];
+ u8 vbios_ver[16];
+ u8 driver_ver[16];
+ u32 mboxes;
+ u8 reserved[164];
} __attribute__((packed));
/* OpRegion mailbox #1: public ACPI methods */
struct opregion_acpi {
- u32 drdy; /* driver readiness */
- u32 csts; /* notification status */
- u32 cevt; /* current event */
- u8 rsvd1[20];
- u32 didl[8]; /* supported display devices ID list */
- u32 cpdl[8]; /* currently presented display list */
- u32 cadl[8]; /* currently active display list */
- u32 nadl[8]; /* next active devices list */
- u32 aslp; /* ASL sleep time-out */
- u32 tidx; /* toggle table index */
- u32 chpd; /* current hotplug enable indicator */
- u32 clid; /* current lid state*/
- u32 cdck; /* current docking state */
- u32 sxsw; /* Sx state resume */
- u32 evts; /* ASL supported events */
- u32 cnot; /* current OS notification */
- u32 nrdy; /* driver status */
- u8 rsvd2[60];
+ u32 drdy; /* driver readiness */
+ u32 csts; /* notification status */
+ u32 cevt; /* current event */
+ u8 rsvd1[20];
+ u32 didl[8]; /* supported display devices ID list */
+ u32 cpdl[8]; /* currently presented display list */
+ u32 cadl[8]; /* currently active display list */
+ u32 nadl[8]; /* next active devices list */
+ u32 aslp; /* ASL sleep time-out */
+ u32 tidx; /* toggle table index */
+ u32 chpd; /* current hotplug enable indicator */
+ u32 clid; /* current lid state*/
+ u32 cdck; /* current docking state */
+ u32 sxsw; /* Sx state resume */
+ u32 evts; /* ASL supported events */
+ u32 cnot; /* current OS notification */
+ u32 nrdy; /* driver status */
+ u8 rsvd2[60];
} __attribute__((packed));
/* OpRegion mailbox #2: SWSCI */
struct opregion_swsci {
- u32 scic; /* SWSCI command|status|data */
- u32 parm; /* command parameters */
- u32 dslp; /* driver sleep time-out */
- u8 rsvd[244];
+ u32 scic; /* SWSCI command|status|data */
+ u32 parm; /* command parameters */
+ u32 dslp; /* driver sleep time-out */
+ u8 rsvd[244];
} __attribute__((packed));
/* OpRegion mailbox #3: ASLE */
struct opregion_asle {
- u32 ardy; /* driver readiness */
- u32 aslc; /* ASLE interrupt command */
- u32 tche; /* technology enabled indicator */
- u32 alsi; /* current ALS illuminance reading */
- u32 bclp; /* backlight brightness to set */
- u32 pfit; /* panel fitting state */
- u32 cblv; /* current brightness level */
- u16 bclm[20]; /* backlight level duty cycle mapping table */
- u32 cpfm; /* current panel fitting mode */
- u32 epfm; /* enabled panel fitting modes */
- u8 plut[74]; /* panel LUT and identifier */
- u32 pfmb; /* PWM freq and min brightness */
- u8 rsvd[102];
+ u32 ardy; /* driver readiness */
+ u32 aslc; /* ASLE interrupt command */
+ u32 tche; /* technology enabled indicator */
+ u32 alsi; /* current ALS illuminance reading */
+ u32 bclp; /* backlight brightness to set */
+ u32 pfit; /* panel fitting state */
+ u32 cblv; /* current brightness level */
+ u16 bclm[20]; /* backlight level duty cycle mapping table */
+ u32 cpfm; /* current panel fitting mode */
+ u32 epfm; /* enabled panel fitting modes */
+ u8 plut[74]; /* panel LUT and identifier */
+ u32 pfmb; /* PWM freq and min brightness */
+ u8 rsvd[102];
} __attribute__((packed));
/* ASLE irq request bits */
@@ -227,6 +227,7 @@ void intel_opregion_asle_intr(struct drm_device *dev)
asle->aslc = asle_stat;
}
+/* Only present on Ironlake+ */
void intel_opregion_gse_intr(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -296,26 +297,19 @@ static int intel_opregion_video_event(struct notifier_block *nb,
/* The only video events relevant to opregion are 0x80. These indicate
either a docking event, lid switch or display switch request. In
Linux, these are handled by the dock, button and video drivers.
- */
+ We might want to fix the video driver to be opregion-aware in
+ future, but right now we just indicate to the firmware that the
+ request has been handled */
struct opregion_acpi *acpi;
- struct acpi_bus_event *event = data;
- int ret = NOTIFY_OK;
-
- if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0)
- return NOTIFY_DONE;
if (!system_opregion)
return NOTIFY_DONE;
acpi = system_opregion->acpi;
-
- if (event->type == 0x80 && !(acpi->cevt & 0x1))
- ret = NOTIFY_BAD;
-
acpi->csts = 0;
- return ret;
+ return NOTIFY_OK;
}
static struct notifier_block intel_opregion_notifier = {
@@ -361,7 +355,7 @@ static void intel_didl_outputs(struct drm_device *dev)
list_for_each_entry(acpi_cdev, &acpi_video_bus->children, node) {
if (i >= 8) {
- dev_printk(KERN_ERR, &dev->pdev->dev,
+ dev_printk (KERN_ERR, &dev->pdev->dev,
"More than 8 outputs detected\n");
return;
}
@@ -387,7 +381,7 @@ blind_set:
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
int output_type = ACPI_OTHER_OUTPUT;
if (i >= 8) {
- dev_printk(KERN_ERR, &dev->pdev->dev,
+ dev_printk (KERN_ERR, &dev->pdev->dev,
"More than 8 outputs detected\n");
return;
}
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 5ba5e66..1fe7c07 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -25,6 +25,8 @@
*
* Derived from Xorg ddx, xf86-video-intel, src/i830_video.c
*/
+
+#include <linux/seq_file.h>
#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
@@ -115,57 +117,57 @@
/* memory bufferd overlay registers */
struct overlay_registers {
- u32 OBUF_0Y;
- u32 OBUF_1Y;
- u32 OBUF_0U;
- u32 OBUF_0V;
- u32 OBUF_1U;
- u32 OBUF_1V;
- u32 OSTRIDE;
- u32 YRGB_VPH;
- u32 UV_VPH;
- u32 HORZ_PH;
- u32 INIT_PHS;
- u32 DWINPOS;
- u32 DWINSZ;
- u32 SWIDTH;
- u32 SWIDTHSW;
- u32 SHEIGHT;
- u32 YRGBSCALE;
- u32 UVSCALE;
- u32 OCLRC0;
- u32 OCLRC1;
- u32 DCLRKV;
- u32 DCLRKM;
- u32 SCLRKVH;
- u32 SCLRKVL;
- u32 SCLRKEN;
- u32 OCONFIG;
- u32 OCMD;
- u32 RESERVED1; /* 0x6C */
- u32 OSTART_0Y;
- u32 OSTART_1Y;
- u32 OSTART_0U;
- u32 OSTART_0V;
- u32 OSTART_1U;
- u32 OSTART_1V;
- u32 OTILEOFF_0Y;
- u32 OTILEOFF_1Y;
- u32 OTILEOFF_0U;
- u32 OTILEOFF_0V;
- u32 OTILEOFF_1U;
- u32 OTILEOFF_1V;
- u32 FASTHSCALE; /* 0xA0 */
- u32 UVSCALEV; /* 0xA4 */
- u32 RESERVEDC[(0x200 - 0xA8) / 4]; /* 0xA8 - 0x1FC */
- u16 Y_VCOEFS[N_VERT_Y_TAPS * N_PHASES]; /* 0x200 */
- u16 RESERVEDD[0x100 / 2 - N_VERT_Y_TAPS * N_PHASES];
- u16 Y_HCOEFS[N_HORIZ_Y_TAPS * N_PHASES]; /* 0x300 */
- u16 RESERVEDE[0x200 / 2 - N_HORIZ_Y_TAPS * N_PHASES];
- u16 UV_VCOEFS[N_VERT_UV_TAPS * N_PHASES]; /* 0x500 */
- u16 RESERVEDF[0x100 / 2 - N_VERT_UV_TAPS * N_PHASES];
- u16 UV_HCOEFS[N_HORIZ_UV_TAPS * N_PHASES]; /* 0x600 */
- u16 RESERVEDG[0x100 / 2 - N_HORIZ_UV_TAPS * N_PHASES];
+ u32 OBUF_0Y;
+ u32 OBUF_1Y;
+ u32 OBUF_0U;
+ u32 OBUF_0V;
+ u32 OBUF_1U;
+ u32 OBUF_1V;
+ u32 OSTRIDE;
+ u32 YRGB_VPH;
+ u32 UV_VPH;
+ u32 HORZ_PH;
+ u32 INIT_PHS;
+ u32 DWINPOS;
+ u32 DWINSZ;
+ u32 SWIDTH;
+ u32 SWIDTHSW;
+ u32 SHEIGHT;
+ u32 YRGBSCALE;
+ u32 UVSCALE;
+ u32 OCLRC0;
+ u32 OCLRC1;
+ u32 DCLRKV;
+ u32 DCLRKM;
+ u32 SCLRKVH;
+ u32 SCLRKVL;
+ u32 SCLRKEN;
+ u32 OCONFIG;
+ u32 OCMD;
+ u32 RESERVED1; /* 0x6C */
+ u32 OSTART_0Y;
+ u32 OSTART_1Y;
+ u32 OSTART_0U;
+ u32 OSTART_0V;
+ u32 OSTART_1U;
+ u32 OSTART_1V;
+ u32 OTILEOFF_0Y;
+ u32 OTILEOFF_1Y;
+ u32 OTILEOFF_0U;
+ u32 OTILEOFF_0V;
+ u32 OTILEOFF_1U;
+ u32 OTILEOFF_1V;
+ u32 FASTHSCALE; /* 0xA0 */
+ u32 UVSCALEV; /* 0xA4 */
+ u32 RESERVEDC[(0x200 - 0xA8) / 4]; /* 0xA8 - 0x1FC */
+ u16 Y_VCOEFS[N_VERT_Y_TAPS * N_PHASES]; /* 0x200 */
+ u16 RESERVEDD[0x100 / 2 - N_VERT_Y_TAPS * N_PHASES];
+ u16 Y_HCOEFS[N_HORIZ_Y_TAPS * N_PHASES]; /* 0x300 */
+ u16 RESERVEDE[0x200 / 2 - N_HORIZ_Y_TAPS * N_PHASES];
+ u16 UV_VCOEFS[N_VERT_UV_TAPS * N_PHASES]; /* 0x500 */
+ u16 RESERVEDF[0x100 / 2 - N_VERT_UV_TAPS * N_PHASES];
+ u16 UV_HCOEFS[N_HORIZ_UV_TAPS * N_PHASES]; /* 0x600 */
+ u16 RESERVEDG[0x100 / 2 - N_HORIZ_UV_TAPS * N_PHASES];
};
struct intel_overlay {
@@ -190,7 +192,7 @@ struct intel_overlay {
static struct overlay_registers *
intel_overlay_map_regs(struct intel_overlay *overlay)
{
- drm_i915_private_t *dev_priv = overlay->dev->dev_private;
+ drm_i915_private_t *dev_priv = overlay->dev->dev_private;
struct overlay_registers *regs;
if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
@@ -225,8 +227,7 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
}
overlay->last_flip_req = request->seqno;
overlay->flip_tail = tail;
- ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req,
- true);
+ ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req);
if (ret)
return ret;
@@ -262,8 +263,8 @@ i830_activate_pipe_a(struct drm_device *dev)
DRM_DEBUG_DRIVER("Enabling pipe A in order to enable overlay\n");
mode = drm_mode_duplicate(dev, &vesa_640x480);
- drm_mode_set_crtcinfo(mode, 0);
- if (!drm_crtc_helper_set_mode(&crtc->base, mode,
+ drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
+ if(!drm_crtc_helper_set_mode(&crtc->base, mode,
crtc->base.x, crtc->base.y,
crtc->base.fb))
return 0;
@@ -331,7 +332,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
bool load_polyphase_filter)
{
struct drm_device *dev = overlay->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_request *request;
u32 flip_addr = overlay->flip_addr;
u32 tmp;
@@ -358,7 +359,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
}
OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
OUT_RING(flip_addr);
- ADVANCE_LP_RING();
+ ADVANCE_LP_RING();
ret = i915_add_request(LP_RING(dev_priv), NULL, request);
if (ret) {
@@ -455,8 +456,7 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
if (overlay->last_flip_req == 0)
return 0;
- ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req,
- true);
+ ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req);
if (ret)
return ret;
@@ -591,7 +591,7 @@ static u32 calc_swidthsw(struct drm_device *dev, u32 offset, u32 width)
ret = ((offset + width + mask) >> shift) - (offset >> shift);
if (!IS_GEN2(dev))
ret <<= 1;
- ret -= 1;
+ ret -=1;
return ret << 2;
}
@@ -781,10 +781,14 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
if (ret != 0)
return ret;
- ret = i915_gem_object_pin_to_display_plane(new_bo, 0, NULL);
+ ret = i915_gem_object_pin(new_bo, PAGE_SIZE, true);
if (ret != 0)
return ret;
+ ret = i915_gem_object_set_to_gtt_domain(new_bo, 0);
+ if (ret != 0)
+ goto out_unpin;
+
ret = i915_gem_object_put_fence(new_bo);
if (ret)
goto out_unpin;
@@ -825,7 +829,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
regs->SWIDTHSW = calc_swidthsw(overlay->dev,
params->offset_Y, tmp_width);
regs->SHEIGHT = params->src_h;
- regs->OBUF_0Y = new_bo->gtt_offset + params->offset_Y;
+ regs->OBUF_0Y = new_bo->gtt_offset + params-> offset_Y;
regs->OSTRIDE = params->stride_Y;
if (params->format & I915_OVERLAY_YUV_PLANAR) {
@@ -925,7 +929,7 @@ static void update_pfit_vscale_ratio(struct intel_overlay *overlay)
* line with the intel documentation for the i965
*/
if (INTEL_INFO(dev)->gen >= 4) {
- /* on i965 use the PGM reg to read out the autoscaler values */
+ /* on i965 use the PGM reg to read out the autoscaler values */
ratio = I915_READ(PFIT_PGM_RATIOS) >> PFIT_VERT_SCALE_SHIFT_965;
} else {
if (pfit_control & VERT_AUTO_SCALE)
@@ -943,10 +947,10 @@ static int check_overlay_dst(struct intel_overlay *overlay,
{
struct drm_display_mode *mode = &overlay->crtc->base.mode;
- if (rec->dst_x < mode->hdisplay &&
- rec->dst_x + rec->dst_width <= mode->hdisplay &&
- rec->dst_y < mode->vdisplay &&
- rec->dst_y + rec->dst_height <= mode->vdisplay)
+ if (rec->dst_x < mode->crtc_hdisplay &&
+ rec->dst_x + rec->dst_width <= mode->crtc_hdisplay &&
+ rec->dst_y < mode->crtc_vdisplay &&
+ rec->dst_y + rec->dst_height <= mode->crtc_vdisplay)
return 0;
else
return -EINVAL;
@@ -1106,7 +1110,7 @@ static int intel_panel_fitter_pipe(struct drm_device *dev)
}
int intel_overlay_put_image(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file_priv)
{
struct drm_intel_overlay_put_image *put_image_rec = data;
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -1309,10 +1313,10 @@ static int check_gamma(struct drm_intel_overlay_attrs *attrs)
}
int intel_overlay_attrs(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file_priv)
{
struct drm_intel_overlay_attrs *attrs = data;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_overlay *overlay;
struct overlay_registers *regs;
int ret;
@@ -1401,7 +1405,7 @@ out_unlock:
void intel_setup_overlay(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_overlay *overlay;
struct drm_i915_gem_object *reg_bo;
struct overlay_registers *regs;
@@ -1429,24 +1433,24 @@ void intel_setup_overlay(struct drm_device *dev)
ret = i915_gem_attach_phys_object(dev, reg_bo,
I915_GEM_PHYS_OVERLAY_REGS,
PAGE_SIZE);
- if (ret) {
- DRM_ERROR("failed to attach phys overlay regs\n");
- goto out_free_bo;
- }
+ if (ret) {
+ DRM_ERROR("failed to attach phys overlay regs\n");
+ goto out_free_bo;
+ }
overlay->flip_addr = reg_bo->phys_obj->handle->busaddr;
} else {
ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true);
if (ret) {
- DRM_ERROR("failed to pin overlay register bo\n");
- goto out_free_bo;
- }
+ DRM_ERROR("failed to pin overlay register bo\n");
+ goto out_free_bo;
+ }
overlay->flip_addr = reg_bo->gtt_offset;
ret = i915_gem_object_set_to_gtt_domain(reg_bo, true);
if (ret) {
- DRM_ERROR("failed to move overlay register bo into the GTT\n");
- goto out_unpin_bo;
- }
+ DRM_ERROR("failed to move overlay register bo into the GTT\n");
+ goto out_unpin_bo;
+ }
}
/* init all values */
@@ -1533,7 +1537,7 @@ static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay,
struct intel_overlay_error_state *
intel_overlay_capture_error_state(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_overlay *overlay = dev_priv->overlay;
struct intel_overlay_error_state *error;
struct overlay_registers __iomem *regs;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 230a141..f8aa821 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -48,7 +48,7 @@ intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
adjusted_mode->clock = fixed_mode->clock;
- drm_mode_set_crtcinfo(adjusted_mode, 0);
+ drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
}
/* adjusted_mode has been preset to be the panel's fixed mode */
@@ -84,7 +84,7 @@ intel_pch_panel_fitting(struct drm_device *dev,
if (scaled_width > scaled_height) { /* pillar */
width = scaled_height / mode->vdisplay;
if (width & 1)
- width++;
+ width++;
x = (adjusted_mode->hdisplay - width + 1) / 2;
y = 0;
height = adjusted_mode->vdisplay;
@@ -141,8 +141,8 @@ static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv)
dev_priv->saveBLC_PWM_CTL2 = val;
} else if (val == 0) {
I915_WRITE(BLC_PWM_PCH_CTL2,
- dev_priv->saveBLC_PWM_CTL2);
- val = dev_priv->saveBLC_PWM_CTL2;
+ dev_priv->saveBLC_PWM_CTL);
+ val = dev_priv->saveBLC_PWM_CTL;
}
} else {
val = I915_READ(BLC_PWM_CTL);
@@ -178,10 +178,13 @@ u32 intel_panel_get_max_backlight(struct drm_device *dev)
if (HAS_PCH_SPLIT(dev)) {
max >>= 16;
} else {
- if (INTEL_INFO(dev)->gen < 4)
+ if (IS_PINEVIEW(dev)) {
max >>= 17;
- else
+ } else {
max >>= 16;
+ if (INTEL_INFO(dev)->gen < 4)
+ max &= ~1;
+ }
if (is_backlight_combination_mode(dev))
max *= 0xff;
@@ -200,12 +203,13 @@ u32 intel_panel_get_backlight(struct drm_device *dev)
val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
} else {
val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
- if (INTEL_INFO(dev)->gen < 4)
+ if (IS_PINEVIEW(dev))
val >>= 1;
- if (is_backlight_combination_mode(dev)) {
+ if (is_backlight_combination_mode(dev)){
u8 lbpc;
+ val &= ~1;
pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc);
val *= lbpc;
}
@@ -232,7 +236,7 @@ static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level
if (HAS_PCH_SPLIT(dev))
return intel_pch_panel_set_backlight(dev, level);
- if (is_backlight_combination_mode(dev)) {
+ if (is_backlight_combination_mode(dev)){
u32 max = intel_panel_get_max_backlight(dev);
u8 lbpc;
@@ -242,9 +246,11 @@ static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level
}
tmp = I915_READ(BLC_PWM_CTL);
- if (INTEL_INFO(dev)->gen < 4)
+ if (IS_PINEVIEW(dev)) {
+ tmp &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1);
level <<= 1;
- tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
+ } else
+ tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
I915_WRITE(BLC_PWM_CTL, tmp | level);
}
@@ -276,7 +282,7 @@ void intel_panel_enable_backlight(struct drm_device *dev)
intel_panel_actually_set_backlight(dev, dev_priv->backlight_level);
}
-static void intel_panel_init_backlight(struct drm_device *dev)
+void intel_panel_setup_backlight(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -308,74 +314,3 @@ intel_panel_detect(struct drm_device *dev)
return connector_status_unknown;
}
-
-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
-static int intel_panel_update_status(struct backlight_device *bd)
-{
- struct drm_device *dev = bl_get_data(bd);
- intel_panel_set_backlight(dev, bd->props.brightness);
- return 0;
-}
-
-static int intel_panel_get_brightness(struct backlight_device *bd)
-{
- struct drm_device *dev = bl_get_data(bd);
- struct drm_i915_private *dev_priv = dev->dev_private;
- return dev_priv->backlight_level;
-}
-
-static const struct backlight_ops intel_panel_bl_ops = {
- .update_status = intel_panel_update_status,
- .get_brightness = intel_panel_get_brightness,
-};
-
-int intel_panel_setup_backlight(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct backlight_properties props;
- struct drm_connector *connector;
-
- intel_panel_init_backlight(dev);
-
- if (dev_priv->int_lvds_connector)
- connector = dev_priv->int_lvds_connector;
- else if (dev_priv->int_edp_connector)
- connector = dev_priv->int_edp_connector;
- else
- return -ENODEV;
-
- props.type = BACKLIGHT_RAW;
- props.max_brightness = intel_panel_get_max_backlight(dev);
- dev_priv->backlight =
- backlight_device_register("intel_backlight",
- &connector->kdev, dev,
- &intel_panel_bl_ops, &props);
-
- if (IS_ERR(dev_priv->backlight)) {
- DRM_ERROR("Failed to register backlight: %ld\n",
- PTR_ERR(dev_priv->backlight));
- dev_priv->backlight = NULL;
- return -ENODEV;
- }
- dev_priv->backlight->props.brightness = intel_panel_get_backlight(dev);
- return 0;
-}
-
-void intel_panel_destroy_backlight(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- if (dev_priv->backlight)
- backlight_device_unregister(dev_priv->backlight);
-}
-#else
-int intel_panel_setup_backlight(struct drm_device *dev)
-{
- intel_panel_init_backlight(dev);
- return 0;
-}
-
-void intel_panel_destroy_backlight(struct drm_device *dev)
-{
- return;
-}
-#endif
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index d9777a2..3bd85f7 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -34,16 +34,6 @@
#include "i915_trace.h"
#include "intel_drv.h"
-/*
- * 965+ support PIPE_CONTROL commands, which provide finer grained control
- * over cache flushing.
- */
-struct pipe_control {
- struct drm_i915_gem_object *obj;
- volatile u32 *cpu_page;
- u32 gtt_offset;
-};
-
static inline int ring_space(struct intel_ring_buffer *ring)
{
int space = (ring->head & HEAD_ADDR) - (ring->tail + 8);
@@ -52,6 +42,20 @@ static inline int ring_space(struct intel_ring_buffer *ring)
return space;
}
+static u32 i915_gem_get_seqno(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 seqno;
+
+ seqno = dev_priv->next_seqno;
+
+ /* reserve 0 for non-seqno */
+ if (++dev_priv->next_seqno == 0)
+ dev_priv->next_seqno = 1;
+
+ return seqno;
+}
+
static int
render_ring_flush(struct intel_ring_buffer *ring,
u32 invalidate_domains,
@@ -119,118 +123,6 @@ render_ring_flush(struct intel_ring_buffer *ring,
return 0;
}
-/**
- * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
- * implementing two workarounds on gen6. From section 1.4.7.1
- * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
- *
- * [DevSNB-C+{W/A}] Before any depth stall flush (including those
- * produced by non-pipelined state commands), software needs to first
- * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
- * 0.
- *
- * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
- * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
- *
- * And the workaround for these two requires this workaround first:
- *
- * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
- * BEFORE the pipe-control with a post-sync op and no write-cache
- * flushes.
- *
- * And this last workaround is tricky because of the requirements on
- * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
- * volume 2 part 1:
- *
- * "1 of the following must also be set:
- * - Render Target Cache Flush Enable ([12] of DW1)
- * - Depth Cache Flush Enable ([0] of DW1)
- * - Stall at Pixel Scoreboard ([1] of DW1)
- * - Depth Stall ([13] of DW1)
- * - Post-Sync Operation ([13] of DW1)
- * - Notify Enable ([8] of DW1)"
- *
- * The cache flushes require the workaround flush that triggered this
- * one, so we can't use it. Depth stall would trigger the same.
- * Post-sync nonzero is what triggered this second workaround, so we
- * can't use that one either. Notify enable is IRQs, which aren't
- * really our business. That leaves only stall at scoreboard.
- */
-static int
-intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring)
-{
- struct pipe_control *pc = ring->private;
- u32 scratch_addr = pc->gtt_offset + 128;
- int ret;
-
-
- ret = intel_ring_begin(ring, 6);
- if (ret)
- return ret;
-
- intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
- intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
- PIPE_CONTROL_STALL_AT_SCOREBOARD);
- intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
- intel_ring_emit(ring, 0); /* low dword */
- intel_ring_emit(ring, 0); /* high dword */
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
-
- ret = intel_ring_begin(ring, 6);
- if (ret)
- return ret;
-
- intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
- intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
- intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
-
- return 0;
-}
-
-static int
-gen6_render_ring_flush(struct intel_ring_buffer *ring,
- u32 invalidate_domains, u32 flush_domains)
-{
- u32 flags = 0;
- struct pipe_control *pc = ring->private;
- u32 scratch_addr = pc->gtt_offset + 128;
- int ret;
-
- /* Force SNB workarounds for PIPE_CONTROL flushes */
- intel_emit_post_sync_nonzero_flush(ring);
-
- /* Just flush everything. Experiments have shown that reducing the
- * number of bits based on the write domains has little performance
- * impact.
- */
- flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
- flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
- flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
-
- ret = intel_ring_begin(ring, 6);
- if (ret)
- return ret;
-
- intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
- intel_ring_emit(ring, flags);
- intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
- intel_ring_emit(ring, 0); /* lower dword */
- intel_ring_emit(ring, 0); /* uppwer dword */
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
-
- return 0;
-}
-
static void ring_write_tail(struct intel_ring_buffer *ring,
u32 value)
{
@@ -290,7 +182,7 @@ static int init_ring_common(struct intel_ring_buffer *ring)
I915_WRITE_START(ring, obj->gtt_offset);
I915_WRITE_CTL(ring,
((ring->size - PAGE_SIZE) & RING_NR_PAGES)
- | RING_VALID);
+ | RING_REPORT_64K | RING_VALID);
/* If the head is still not zero, the ring is dead */
if ((I915_READ_CTL(ring) & RING_VALID) == 0 ||
@@ -317,6 +209,16 @@ static int init_ring_common(struct intel_ring_buffer *ring)
return 0;
}
+/*
+ * 965+ support PIPE_CONTROL commands, which provide finer grained control
+ * over cache flushing.
+ */
+struct pipe_control {
+ struct drm_i915_gem_object *obj;
+ volatile u32 *cpu_page;
+ u32 gtt_offset;
+};
+
static int
init_pipe_control(struct intel_ring_buffer *ring)
{
@@ -337,8 +239,7 @@ init_pipe_control(struct intel_ring_buffer *ring)
ret = -ENOMEM;
goto err;
}
-
- i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+ obj->cache_level = I915_CACHE_LLC;
ret = i915_gem_object_pin(obj, 4096, true);
if (ret)
@@ -388,24 +289,18 @@ static int init_render_ring(struct intel_ring_buffer *ring)
if (INTEL_INFO(dev)->gen > 3) {
int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
+ if (IS_GEN6(dev) || IS_GEN7(dev))
+ mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
I915_WRITE(MI_MODE, mode);
- if (IS_GEN7(dev))
- I915_WRITE(GFX_MODE_GEN7,
- GFX_MODE_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
- GFX_MODE_ENABLE(GFX_REPLAY_MODE));
}
- if (INTEL_INFO(dev)->gen >= 5) {
+ if (INTEL_INFO(dev)->gen >= 6) {
+ } else if (IS_GEN5(dev)) {
ret = init_pipe_control(ring);
if (ret)
return ret;
}
- if (INTEL_INFO(dev)->gen >= 6) {
- I915_WRITE(INSTPM,
- INSTPM_FORCE_ORDERING << 16 | INSTPM_FORCE_ORDERING);
- }
-
return ret;
}
@@ -418,131 +313,83 @@ static void render_ring_cleanup(struct intel_ring_buffer *ring)
}
static void
-update_mboxes(struct intel_ring_buffer *ring,
- u32 seqno,
- u32 mmio_offset)
+update_semaphore(struct intel_ring_buffer *ring, int i, u32 seqno)
{
- intel_ring_emit(ring, MI_SEMAPHORE_MBOX |
- MI_SEMAPHORE_GLOBAL_GTT |
- MI_SEMAPHORE_REGISTER |
- MI_SEMAPHORE_UPDATE);
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int id;
+
+ /*
+ * cs -> 1 = vcs, 0 = bcs
+ * vcs -> 1 = bcs, 0 = cs,
+ * bcs -> 1 = cs, 0 = vcs.
+ */
+ id = ring - dev_priv->ring;
+ id += 2 - i;
+ id %= 3;
+
+ intel_ring_emit(ring,
+ MI_SEMAPHORE_MBOX |
+ MI_SEMAPHORE_REGISTER |
+ MI_SEMAPHORE_UPDATE);
intel_ring_emit(ring, seqno);
- intel_ring_emit(ring, mmio_offset);
+ intel_ring_emit(ring,
+ RING_SYNC_0(dev_priv->ring[id].mmio_base) + 4*i);
}
-/**
- * gen6_add_request - Update the semaphore mailbox registers
- *
- * @ring - ring that is adding a request
- * @seqno - return seqno stuck into the ring
- *
- * Update the mailbox registers in the *other* rings with the current seqno.
- * This acts like a signal in the canonical semaphore.
- */
static int
gen6_add_request(struct intel_ring_buffer *ring,
- u32 *seqno)
+ u32 *result)
{
- u32 mbox1_reg;
- u32 mbox2_reg;
+ u32 seqno;
int ret;
ret = intel_ring_begin(ring, 10);
if (ret)
return ret;
- mbox1_reg = ring->signal_mbox[0];
- mbox2_reg = ring->signal_mbox[1];
+ seqno = i915_gem_get_seqno(ring->dev);
+ update_semaphore(ring, 0, seqno);
+ update_semaphore(ring, 1, seqno);
- *seqno = i915_gem_next_request_seqno(ring);
-
- update_mboxes(ring, *seqno, mbox1_reg);
- update_mboxes(ring, *seqno, mbox2_reg);
intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- intel_ring_emit(ring, *seqno);
+ intel_ring_emit(ring, seqno);
intel_ring_emit(ring, MI_USER_INTERRUPT);
intel_ring_advance(ring);
+ *result = seqno;
return 0;
}
-/**
- * intel_ring_sync - sync the waiter to the signaller on seqno
- *
- * @waiter - ring that is waiting
- * @signaller - ring which has, or will signal
- * @seqno - seqno which the waiter will block on
- */
-static int
-intel_ring_sync(struct intel_ring_buffer *waiter,
- struct intel_ring_buffer *signaller,
- int ring,
+int
+intel_ring_sync(struct intel_ring_buffer *ring,
+ struct intel_ring_buffer *to,
u32 seqno)
{
int ret;
- u32 dw1 = MI_SEMAPHORE_MBOX |
- MI_SEMAPHORE_COMPARE |
- MI_SEMAPHORE_REGISTER;
- ret = intel_ring_begin(waiter, 4);
+ ret = intel_ring_begin(ring, 4);
if (ret)
return ret;
- intel_ring_emit(waiter, dw1 | signaller->semaphore_register[ring]);
- intel_ring_emit(waiter, seqno);
- intel_ring_emit(waiter, 0);
- intel_ring_emit(waiter, MI_NOOP);
- intel_ring_advance(waiter);
+ intel_ring_emit(ring,
+ MI_SEMAPHORE_MBOX |
+ MI_SEMAPHORE_REGISTER |
+ intel_ring_sync_index(ring, to) << 17 |
+ MI_SEMAPHORE_COMPARE);
+ intel_ring_emit(ring, seqno);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
return 0;
}
-/* VCS->RCS (RVSYNC) or BCS->RCS (RBSYNC) */
-int
-render_ring_sync_to(struct intel_ring_buffer *waiter,
- struct intel_ring_buffer *signaller,
- u32 seqno)
-{
- WARN_ON(signaller->semaphore_register[RCS] == MI_SEMAPHORE_SYNC_INVALID);
- return intel_ring_sync(waiter,
- signaller,
- RCS,
- seqno);
-}
-
-/* RCS->VCS (VRSYNC) or BCS->VCS (VBSYNC) */
-int
-gen6_bsd_ring_sync_to(struct intel_ring_buffer *waiter,
- struct intel_ring_buffer *signaller,
- u32 seqno)
-{
- WARN_ON(signaller->semaphore_register[VCS] == MI_SEMAPHORE_SYNC_INVALID);
- return intel_ring_sync(waiter,
- signaller,
- VCS,
- seqno);
-}
-
-/* RCS->BCS (BRSYNC) or VCS->BCS (BVSYNC) */
-int
-gen6_blt_ring_sync_to(struct intel_ring_buffer *waiter,
- struct intel_ring_buffer *signaller,
- u32 seqno)
-{
- WARN_ON(signaller->semaphore_register[BCS] == MI_SEMAPHORE_SYNC_INVALID);
- return intel_ring_sync(waiter,
- signaller,
- BCS,
- seqno);
-}
-
-
-
#define PIPE_CONTROL_FLUSH(ring__, addr__) \
do { \
- intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \
- PIPE_CONTROL_DEPTH_STALL); \
+ intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \
+ PIPE_CONTROL_DEPTH_STALL | 2); \
intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \
intel_ring_emit(ring__, 0); \
intel_ring_emit(ring__, 0); \
@@ -552,7 +399,8 @@ static int
pc_render_add_request(struct intel_ring_buffer *ring,
u32 *result)
{
- u32 seqno = i915_gem_next_request_seqno(ring);
+ struct drm_device *dev = ring->dev;
+ u32 seqno = i915_gem_get_seqno(dev);
struct pipe_control *pc = ring->private;
u32 scratch_addr = pc->gtt_offset + 128;
int ret;
@@ -569,9 +417,8 @@ pc_render_add_request(struct intel_ring_buffer *ring,
if (ret)
return ret;
- intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
- PIPE_CONTROL_WRITE_FLUSH |
- PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
+ PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
intel_ring_emit(ring, seqno);
intel_ring_emit(ring, 0);
@@ -586,10 +433,8 @@ pc_render_add_request(struct intel_ring_buffer *ring,
PIPE_CONTROL_FLUSH(ring, scratch_addr);
scratch_addr += 128;
PIPE_CONTROL_FLUSH(ring, scratch_addr);
-
- intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
- PIPE_CONTROL_WRITE_FLUSH |
- PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
+ PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
PIPE_CONTROL_NOTIFY);
intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
intel_ring_emit(ring, seqno);
@@ -604,7 +449,8 @@ static int
render_ring_add_request(struct intel_ring_buffer *ring,
u32 *result)
{
- u32 seqno = i915_gem_next_request_seqno(ring);
+ struct drm_device *dev = ring->dev;
+ u32 seqno = i915_gem_get_seqno(dev);
int ret;
ret = intel_ring_begin(ring, 4);
@@ -622,19 +468,6 @@ render_ring_add_request(struct intel_ring_buffer *ring,
}
static u32
-gen6_ring_get_seqno(struct intel_ring_buffer *ring)
-{
- struct drm_device *dev = ring->dev;
-
- /* Workaround to force correct ordering between irq and seqno writes on
- * ivb (and maybe also on snb) by reading from a CS register (like
- * ACTHD) before reading the status page. */
- if (IS_GEN7(dev))
- intel_ring_get_active_head(ring);
- return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
-}
-
-static u32
ring_get_seqno(struct intel_ring_buffer *ring)
{
return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
@@ -730,13 +563,13 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
*/
if (IS_GEN7(dev)) {
switch (ring->id) {
- case RCS:
+ case RING_RENDER:
mmio = RENDER_HWS_PGA_GEN7;
break;
- case BCS:
+ case RING_BLT:
mmio = BLT_HWS_PGA_GEN7;
break;
- case VCS:
+ case RING_BSD:
mmio = BSD_HWS_PGA_GEN7;
break;
}
@@ -778,7 +611,7 @@ ring_add_request(struct intel_ring_buffer *ring,
if (ret)
return ret;
- seqno = i915_gem_next_request_seqno(ring);
+ seqno = i915_gem_get_seqno(ring->dev);
intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
@@ -799,11 +632,6 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
if (!dev->irq_enabled)
return false;
- /* It looks like we need to prevent the gt from suspending while waiting
- * for an notifiy irq, otherwise irqs seem to get lost on at least the
- * blt/bsd rings on ivb. */
- gen6_gt_force_wake_get(dev_priv);
-
spin_lock(&ring->irq_lock);
if (ring->irq_refcount++ == 0) {
ring->irq_mask &= ~rflag;
@@ -828,8 +656,6 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
ironlake_disable_irq(dev_priv, gflag);
}
spin_unlock(&ring->irq_lock);
-
- gen6_gt_force_wake_put(dev_priv);
}
static bool
@@ -953,8 +779,7 @@ static int init_status_page(struct intel_ring_buffer *ring)
ret = -ENOMEM;
goto err;
}
-
- i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+ obj->cache_level = I915_CACHE_LLC;
ret = i915_gem_object_pin(obj, 4096, true);
if (ret != 0) {
@@ -1111,105 +936,26 @@ static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
return 0;
}
-static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
+int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
- bool was_interruptible;
- int ret;
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long end;
+ u32 head;
- /* XXX As we have not yet audited all the paths to check that
- * they are ready for ERESTARTSYS from intel_ring_begin, do not
- * allow us to be interruptible by a signal.
+ /* If the reported head position has wrapped or hasn't advanced,
+ * fallback to the slow and accurate path.
*/
- was_interruptible = dev_priv->mm.interruptible;
- dev_priv->mm.interruptible = false;
-
- ret = i915_wait_request(ring, seqno, true);
-
- dev_priv->mm.interruptible = was_interruptible;
-
- return ret;
-}
-
-static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
-{
- struct drm_i915_gem_request *request;
- u32 seqno = 0;
- int ret;
-
- i915_gem_retire_requests_ring(ring);
-
- if (ring->last_retired_head != -1) {
- ring->head = ring->last_retired_head;
- ring->last_retired_head = -1;
+ head = intel_read_status_page(ring, 4);
+ if (head > ring->head) {
+ ring->head = head;
ring->space = ring_space(ring);
if (ring->space >= n)
return 0;
}
- list_for_each_entry(request, &ring->request_list, list) {
- int space;
-
- if (request->tail == -1)
- continue;
-
- space = request->tail - (ring->tail + 8);
- if (space < 0)
- space += ring->size;
- if (space >= n) {
- seqno = request->seqno;
- break;
- }
-
- /* Consume this request in case we need more space than
- * is available and so need to prevent a race between
- * updating last_retired_head and direct reads of
- * I915_RING_HEAD. It also provides a nice sanity check.
- */
- request->tail = -1;
- }
-
- if (seqno == 0)
- return -ENOSPC;
-
- ret = intel_ring_wait_seqno(ring, seqno);
- if (ret)
- return ret;
-
- if (WARN_ON(ring->last_retired_head == -1))
- return -ENOSPC;
-
- ring->head = ring->last_retired_head;
- ring->last_retired_head = -1;
- ring->space = ring_space(ring);
- if (WARN_ON(ring->space < n))
- return -ENOSPC;
-
- return 0;
-}
-
-int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
-{
- struct drm_device *dev = ring->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long end;
- int ret;
-
- ret = intel_ring_wait_request(ring, n);
- if (ret != -ENOSPC)
- return ret;
-
trace_i915_ring_wait_begin(ring);
- if (drm_core_check_feature(dev, DRIVER_GEM))
- /* With GEM the hangcheck timer should kick us out of the loop,
- * leaving it early runs the risk of corrupting GEM state (due
- * to running on almost untested codepaths). But on resume
- * timers don't work yet, so prevent a complete hang in that
- * case by choosing an insanely large timeout. */
- end = jiffies + 60 * HZ;
- else
- end = jiffies + 3 * HZ;
-
+ end = jiffies + 3 * HZ;
do {
ring->head = I915_READ_HEAD(ring);
ring->space = ring_space(ring);
@@ -1266,7 +1012,7 @@ void intel_ring_advance(struct intel_ring_buffer *ring)
static const struct intel_ring_buffer render_ring = {
.name = "render ring",
- .id = RCS,
+ .id = RING_RENDER,
.mmio_base = RENDER_RING_BASE,
.size = 32 * PAGE_SIZE,
.init = init_render_ring,
@@ -1277,19 +1023,14 @@ static const struct intel_ring_buffer render_ring = {
.irq_get = render_ring_get_irq,
.irq_put = render_ring_put_irq,
.dispatch_execbuffer = render_ring_dispatch_execbuffer,
- .cleanup = render_ring_cleanup,
- .sync_to = render_ring_sync_to,
- .semaphore_register = {MI_SEMAPHORE_SYNC_INVALID,
- MI_SEMAPHORE_SYNC_RV,
- MI_SEMAPHORE_SYNC_RB},
- .signal_mbox = {GEN6_VRSYNC, GEN6_BRSYNC},
+ .cleanup = render_ring_cleanup,
};
/* ring buffer for bit-stream decoder */
static const struct intel_ring_buffer bsd_ring = {
.name = "bsd ring",
- .id = VCS,
+ .id = RING_BSD,
.mmio_base = BSD_RING_BASE,
.size = 32 * PAGE_SIZE,
.init = init_ring_common,
@@ -1306,23 +1047,23 @@ static const struct intel_ring_buffer bsd_ring = {
static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
u32 value)
{
- drm_i915_private_t *dev_priv = ring->dev->dev_private;
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
/* Every tail move must follow the sequence below */
- I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
- GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
- GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE);
- I915_WRITE(GEN6_BSD_RNCID, 0x0);
-
- if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
- GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0,
- 50))
- DRM_ERROR("timed out waiting for IDLE Indicator\n");
-
- I915_WRITE_TAIL(ring, value);
- I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
- GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
- GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
+ I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
+ GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
+ GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE);
+ I915_WRITE(GEN6_BSD_RNCID, 0x0);
+
+ if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
+ GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0,
+ 50))
+ DRM_ERROR("timed out waiting for IDLE Indicator\n");
+
+ I915_WRITE_TAIL(ring, value);
+ I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
+ GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
+ GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
}
static int gen6_ring_flush(struct intel_ring_buffer *ring,
@@ -1350,18 +1091,18 @@ static int
gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
u32 offset, u32 len)
{
- int ret;
+ int ret;
- ret = intel_ring_begin(ring, 2);
- if (ret)
- return ret;
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
- intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
- /* bit0-7 is the length on GEN6+ */
- intel_ring_emit(ring, offset);
- intel_ring_advance(ring);
+ intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
+ /* bit0-7 is the length on GEN6+ */
+ intel_ring_emit(ring, offset);
+ intel_ring_advance(ring);
- return 0;
+ return 0;
}
static bool
@@ -1399,22 +1140,17 @@ gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
/* ring buffer for Video Codec for Gen6+ */
static const struct intel_ring_buffer gen6_bsd_ring = {
.name = "gen6 bsd ring",
- .id = VCS,
+ .id = RING_BSD,
.mmio_base = GEN6_BSD_RING_BASE,
.size = 32 * PAGE_SIZE,
.init = init_ring_common,
.write_tail = gen6_bsd_ring_write_tail,
.flush = gen6_ring_flush,
.add_request = gen6_add_request,
- .get_seqno = gen6_ring_get_seqno,
+ .get_seqno = ring_get_seqno,
.irq_get = gen6_bsd_ring_get_irq,
.irq_put = gen6_bsd_ring_put_irq,
.dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
- .sync_to = gen6_bsd_ring_sync_to,
- .semaphore_register = {MI_SEMAPHORE_SYNC_VR,
- MI_SEMAPHORE_SYNC_INVALID,
- MI_SEMAPHORE_SYNC_VB},
- .signal_mbox = {GEN6_RVSYNC, GEN6_BVSYNC},
};
/* Blitter support (SandyBridge+) */
@@ -1435,13 +1171,79 @@ blt_ring_put_irq(struct intel_ring_buffer *ring)
GEN6_BLITTER_USER_INTERRUPT);
}
+
+/* Workaround for some stepping of SNB,
+ * each time when BLT engine ring tail moved,
+ * the first command in the ring to be parsed
+ * should be MI_BATCH_BUFFER_START
+ */
+#define NEED_BLT_WORKAROUND(dev) \
+ (IS_GEN6(dev) && (dev->pdev->revision < 8))
+
+static inline struct drm_i915_gem_object *
+to_blt_workaround(struct intel_ring_buffer *ring)
+{
+ return ring->private;
+}
+
+static int blt_ring_init(struct intel_ring_buffer *ring)
+{
+ if (NEED_BLT_WORKAROUND(ring->dev)) {
+ struct drm_i915_gem_object *obj;
+ u32 *ptr;
+ int ret;
+
+ obj = i915_gem_alloc_object(ring->dev, 4096);
+ if (obj == NULL)
+ return -ENOMEM;
+
+ ret = i915_gem_object_pin(obj, 4096, true);
+ if (ret) {
+ drm_gem_object_unreference(&obj->base);
+ return ret;
+ }
+
+ ptr = kmap(obj->pages[0]);
+ *ptr++ = MI_BATCH_BUFFER_END;
+ *ptr++ = MI_NOOP;
+ kunmap(obj->pages[0]);
+
+ ret = i915_gem_object_set_to_gtt_domain(obj, false);
+ if (ret) {
+ i915_gem_object_unpin(obj);
+ drm_gem_object_unreference(&obj->base);
+ return ret;
+ }
+
+ ring->private = obj;
+ }
+
+ return init_ring_common(ring);
+}
+
+static int blt_ring_begin(struct intel_ring_buffer *ring,
+ int num_dwords)
+{
+ if (ring->private) {
+ int ret = intel_ring_begin(ring, num_dwords+2);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, MI_BATCH_BUFFER_START);
+ intel_ring_emit(ring, to_blt_workaround(ring)->gtt_offset);
+
+ return 0;
+ } else
+ return intel_ring_begin(ring, 4);
+}
+
static int blt_ring_flush(struct intel_ring_buffer *ring,
u32 invalidate, u32 flush)
{
uint32_t cmd;
int ret;
- ret = intel_ring_begin(ring, 4);
+ ret = blt_ring_begin(ring, 4);
if (ret)
return ret;
@@ -1456,24 +1258,30 @@ static int blt_ring_flush(struct intel_ring_buffer *ring,
return 0;
}
+static void blt_ring_cleanup(struct intel_ring_buffer *ring)
+{
+ if (!ring->private)
+ return;
+
+ i915_gem_object_unpin(ring->private);
+ drm_gem_object_unreference(ring->private);
+ ring->private = NULL;
+}
+
static const struct intel_ring_buffer gen6_blt_ring = {
- .name = "blt ring",
- .id = BCS,
- .mmio_base = BLT_RING_BASE,
- .size = 32 * PAGE_SIZE,
- .init = init_ring_common,
- .write_tail = ring_write_tail,
- .flush = blt_ring_flush,
- .add_request = gen6_add_request,
- .get_seqno = gen6_ring_get_seqno,
- .irq_get = blt_ring_get_irq,
- .irq_put = blt_ring_put_irq,
- .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
- .sync_to = gen6_blt_ring_sync_to,
- .semaphore_register = {MI_SEMAPHORE_SYNC_BR,
- MI_SEMAPHORE_SYNC_BV,
- MI_SEMAPHORE_SYNC_INVALID},
- .signal_mbox = {GEN6_RBSYNC, GEN6_VBSYNC},
+ .name = "blt ring",
+ .id = RING_BLT,
+ .mmio_base = BLT_RING_BASE,
+ .size = 32 * PAGE_SIZE,
+ .init = blt_ring_init,
+ .write_tail = ring_write_tail,
+ .flush = blt_ring_flush,
+ .add_request = gen6_add_request,
+ .get_seqno = ring_get_seqno,
+ .irq_get = blt_ring_get_irq,
+ .irq_put = blt_ring_put_irq,
+ .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
+ .cleanup = blt_ring_cleanup,
};
int intel_init_render_ring_buffer(struct drm_device *dev)
@@ -1484,10 +1292,8 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
*ring = render_ring;
if (INTEL_INFO(dev)->gen >= 6) {
ring->add_request = gen6_add_request;
- ring->flush = gen6_render_ring_flush;
ring->irq_get = gen6_render_ring_get_irq;
ring->irq_put = gen6_render_ring_put_irq;
- ring->get_seqno = gen6_ring_get_seqno;
} else if (IS_GEN5(dev)) {
ring->add_request = pc_render_add_request;
ring->get_seqno = pc_render_get_seqno;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index bc0365b..39ac2b6 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -1,6 +1,13 @@
#ifndef _INTEL_RINGBUFFER_H_
#define _INTEL_RINGBUFFER_H_
+enum {
+ RCS = 0x0,
+ VCS,
+ BCS,
+ I915_NUM_RINGS,
+};
+
struct intel_hw_status_page {
u32 __iomem *page_addr;
unsigned int gfx_addr;
@@ -29,11 +36,10 @@ struct intel_hw_status_page {
struct intel_ring_buffer {
const char *name;
enum intel_ring_id {
- RCS = 0x0,
- VCS,
- BCS,
+ RING_RENDER = 0x1,
+ RING_BSD = 0x2,
+ RING_BLT = 0x4,
} id;
-#define I915_NUM_RINGS 3
u32 mmio_base;
void __iomem *virtual_start;
struct drm_device *dev;
@@ -46,16 +52,6 @@ struct intel_ring_buffer {
int effective_size;
struct intel_hw_status_page status_page;
- /** We track the position of the requests in the ring buffer, and
- * when each is retired we increment last_retired_head as the GPU
- * must have finished processing the request and so we know we
- * can advance the ringbuffer up to that position.
- *
- * last_retired_head is set to -1 after the value is consumed so
- * we can detect new retirements.
- */
- u32 last_retired_head;
-
spinlock_t irq_lock;
u32 irq_refcount;
u32 irq_mask;
@@ -79,12 +75,7 @@ struct intel_ring_buffer {
int (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
u32 offset, u32 length);
void (*cleanup)(struct intel_ring_buffer *ring);
- int (*sync_to)(struct intel_ring_buffer *ring,
- struct intel_ring_buffer *to,
- u32 seqno);
- u32 semaphore_register[3]; /*our mbox written by others */
- u32 signal_mbox[2]; /* mboxes this ring signals to */
/**
* List of objects currently involved in rendering from the
* ringbuffer.
@@ -123,12 +114,6 @@ struct intel_ring_buffer {
void *private;
};
-static inline unsigned
-intel_ring_flag(struct intel_ring_buffer *ring)
-{
- return 1 << ring->id;
-}
-
static inline u32
intel_ring_sync_index(struct intel_ring_buffer *ring,
struct intel_ring_buffer *other)
@@ -195,6 +180,9 @@ static inline void intel_ring_emit(struct intel_ring_buffer *ring,
void intel_ring_advance(struct intel_ring_buffer *ring);
u32 intel_ring_get_seqno(struct intel_ring_buffer *ring);
+int intel_ring_sync(struct intel_ring_buffer *ring,
+ struct intel_ring_buffer *to,
+ u32 seqno);
int intel_init_render_ring_buffer(struct drm_device *dev);
int intel_init_bsd_ring_buffer(struct drm_device *dev);
@@ -203,11 +191,6 @@ int intel_init_blt_ring_buffer(struct drm_device *dev);
u32 intel_ring_get_active_head(struct intel_ring_buffer *ring);
void intel_ring_setup_status_page(struct intel_ring_buffer *ring);
-static inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring)
-{
- return ring->tail;
-}
-
static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno)
{
if (ring->trace_irq_seqno == 0 && ring->irq_get(ring))
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 0de20ee..06b51ff 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -43,13 +43,12 @@
#define SDVO_TV_MASK (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_SVID0)
#define SDVO_OUTPUT_MASK (SDVO_TMDS_MASK | SDVO_RGB_MASK | SDVO_LVDS_MASK |\
- SDVO_TV_MASK)
+ SDVO_TV_MASK)
#define IS_TV(c) (c->output_flag & SDVO_TV_MASK)
#define IS_TMDS(c) (c->output_flag & SDVO_TMDS_MASK)
#define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK)
#define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK))
-#define IS_DIGITAL(c) (c->output_flag & (SDVO_TMDS_MASK | SDVO_LVDS_MASK))
static const char *tv_format_names[] = {
@@ -93,11 +92,6 @@ struct intel_sdvo {
*/
uint16_t attached_output;
- /*
- * Hotplug activation bits for this device
- */
- uint8_t hotplug_active[2];
-
/**
* This is used to select the color range of RBG outputs in HDMI mode.
* It is only valid when using TMDS encoding and 8 bit per color mode.
@@ -147,7 +141,7 @@ struct intel_sdvo_connector {
/* Mark the type of connector */
uint16_t output_flag;
- enum hdmi_force_audio force_audio;
+ int force_audio;
/* This contains all current supported TV format */
u8 tv_format_supported[TV_FORMAT_NUM];
@@ -289,117 +283,117 @@ static const struct _sdvo_cmd_name {
u8 cmd;
const char *name;
} sdvo_cmd_names[] = {
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FIRMWARE_REV),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TRAINED_INPUTS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_OUTPUTS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_OUTPUTS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_IN_OUT_MAP),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_IN_OUT_MAP),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ATTACHED_DISPLAYS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HOT_PLUG_SUPPORT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_HOT_PLUG),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_HOT_PLUG),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_INPUT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_OUTPUT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART1),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART2),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART2),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART1),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART2),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART1),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART2),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CLOCK_RATE_MULT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CLOCK_RATE_MULT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_POWER_STATES),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POWER_STATE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODER_POWER_STATE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DISPLAY_POWER_STATE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS),
-
- /* Add the op code for SDVO enhancements */
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HPOS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HPOS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HPOS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_VPOS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_VPOS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_VPOS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SATURATION),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SATURATION),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SATURATION),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HUE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HUE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HUE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_CONTRAST),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CONTRAST),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTRAST),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_BRIGHTNESS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_BRIGHTNESS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_BRIGHTNESS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_H),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_H),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_H),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_V),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_V),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_V),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_2D),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_2D),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_2D),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SHARPNESS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SHARPNESS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SHARPNESS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DOT_CRAWL),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DOT_CRAWL),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_CHROMA_FILTER),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_CHROMA_FILTER),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_CHROMA_FILTER),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_LUMA_FILTER),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_LUMA_FILTER),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_LUMA_FILTER),
-
- /* HDMI op code */
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPP_ENCODE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ENCODE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_PIXEL_REPLI),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PIXEL_REPLI),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY_CAP),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_COLORIMETRY),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_AUDIO_STAT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_STAT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INDEX),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_INDEX),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INFO),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_AV_SPLIT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_AV_SPLIT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_TXRATE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_TXRATE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_DATA),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FIRMWARE_REV),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TRAINED_INPUTS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_OUTPUTS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_OUTPUTS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_IN_OUT_MAP),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_IN_OUT_MAP),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ATTACHED_DISPLAYS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HOT_PLUG_SUPPORT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_HOT_PLUG),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_HOT_PLUG),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_INPUT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_OUTPUT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART1),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART2),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART2),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART1),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART2),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART1),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART2),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CLOCK_RATE_MULT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CLOCK_RATE_MULT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_POWER_STATES),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POWER_STATE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODER_POWER_STATE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DISPLAY_POWER_STATE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS),
+
+ /* Add the op code for SDVO enhancements */
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HPOS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HPOS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HPOS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_VPOS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_VPOS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_VPOS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SATURATION),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SATURATION),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SATURATION),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HUE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HUE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HUE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_CONTRAST),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CONTRAST),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTRAST),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_BRIGHTNESS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_BRIGHTNESS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_BRIGHTNESS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_H),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_H),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_H),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_V),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_V),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_V),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_2D),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_2D),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_2D),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SHARPNESS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SHARPNESS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SHARPNESS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DOT_CRAWL),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DOT_CRAWL),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_CHROMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_CHROMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_CHROMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_LUMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_LUMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_LUMA_FILTER),
+
+ /* HDMI op code */
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPP_ENCODE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ENCODE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_PIXEL_REPLI),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PIXEL_REPLI),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY_CAP),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_COLORIMETRY),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_AUDIO_STAT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_STAT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INDEX),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_INDEX),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INFO),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_AV_SPLIT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_AV_SPLIT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_TXRATE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_TXRATE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_DATA),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA),
};
#define IS_SDVOB(reg) (reg == SDVOB || reg == PCH_SDVOB)
@@ -981,6 +975,7 @@ intel_sdvo_set_input_timings_for_mode(struct intel_sdvo *intel_sdvo,
intel_sdvo_get_mode_from_dtd(adjusted_mode, &intel_sdvo->input_dtd);
+ drm_mode_set_crtcinfo(adjusted_mode, 0);
return true;
}
@@ -1118,12 +1113,8 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
}
sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
}
-
- if (INTEL_PCH_TYPE(dev) >= PCH_CPT)
- sdvox |= TRANSCODER_CPT(intel_crtc->pipe);
- else
- sdvox |= TRANSCODER(intel_crtc->pipe);
-
+ if (intel_crtc->pipe == 1)
+ sdvox |= SDVO_PIPE_B_SELECT;
if (intel_sdvo->has_hdmi_audio)
sdvox |= SDVO_AUDIO_ENABLE;
@@ -1250,26 +1241,81 @@ static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct in
return true;
}
-static int intel_sdvo_supports_hotplug(struct intel_sdvo *intel_sdvo)
+/* No use! */
+#if 0
+struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB)
+{
+ struct drm_connector *connector = NULL;
+ struct intel_sdvo *iout = NULL;
+ struct intel_sdvo *sdvo;
+
+ /* find the sdvo connector */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ iout = to_intel_sdvo(connector);
+
+ if (iout->type != INTEL_OUTPUT_SDVO)
+ continue;
+
+ sdvo = iout->dev_priv;
+
+ if (sdvo->sdvo_reg == SDVOB && sdvoB)
+ return connector;
+
+ if (sdvo->sdvo_reg == SDVOC && !sdvoB)
+ return connector;
+
+ }
+
+ return NULL;
+}
+
+int intel_sdvo_supports_hotplug(struct drm_connector *connector)
{
u8 response[2];
+ u8 status;
+ struct intel_sdvo *intel_sdvo;
+ DRM_DEBUG_KMS("\n");
+
+ if (!connector)
+ return 0;
+
+ intel_sdvo = to_intel_sdvo(connector);
return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
&response, 2) && response[0];
}
-static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder)
+void intel_sdvo_set_hotplug(struct drm_connector *connector, int on)
{
- struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
+ u8 response[2];
+ u8 status;
+ struct intel_sdvo *intel_sdvo = to_intel_sdvo(connector);
- intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &intel_sdvo->hotplug_active, 2);
+ intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
+ intel_sdvo_read_response(intel_sdvo, &response, 2);
+
+ if (on) {
+ intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0);
+ status = intel_sdvo_read_response(intel_sdvo, &response, 2);
+
+ intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
+ } else {
+ response[0] = 0;
+ response[1] = 0;
+ intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
+ }
+
+ intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
+ intel_sdvo_read_response(intel_sdvo, &response, 2);
}
+#endif
static bool
intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo)
{
/* Is there more than one type of output? */
- return hweight16(intel_sdvo->caps.output_flags) > 1;
+ int caps = intel_sdvo->caps.output_flags & 0xf;
+ return caps & -caps;
}
static struct edid *
@@ -1290,7 +1336,7 @@ intel_sdvo_get_analog_edid(struct drm_connector *connector)
}
enum drm_connector_status
-intel_sdvo_tmds_sink_detect(struct drm_connector *connector)
+intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
{
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
enum drm_connector_status status;
@@ -1343,25 +1389,13 @@ intel_sdvo_tmds_sink_detect(struct drm_connector *connector)
if (status == connector_status_connected) {
struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
- if (intel_sdvo_connector->force_audio != HDMI_AUDIO_AUTO)
- intel_sdvo->has_hdmi_audio = (intel_sdvo_connector->force_audio == HDMI_AUDIO_ON);
+ if (intel_sdvo_connector->force_audio)
+ intel_sdvo->has_hdmi_audio = intel_sdvo_connector->force_audio > 0;
}
return status;
}
-static bool
-intel_sdvo_connector_matches_edid(struct intel_sdvo_connector *sdvo,
- struct edid *edid)
-{
- bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
- bool connector_is_digital = !!IS_DIGITAL(sdvo);
-
- DRM_DEBUG_KMS("connector_is_digital? %d, monitor_is_digital? %d\n",
- connector_is_digital, monitor_is_digital);
- return connector_is_digital == monitor_is_digital;
-}
-
static enum drm_connector_status
intel_sdvo_detect(struct drm_connector *connector, bool force)
{
@@ -1397,7 +1431,7 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
if ((intel_sdvo_connector->output_flag & response) == 0)
ret = connector_status_disconnected;
else if (IS_TMDS(intel_sdvo_connector))
- ret = intel_sdvo_tmds_sink_detect(connector);
+ ret = intel_sdvo_hdmi_sink_detect(connector);
else {
struct edid *edid;
@@ -1406,12 +1440,10 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
if (edid == NULL)
edid = intel_sdvo_get_analog_edid(connector);
if (edid != NULL) {
- if (intel_sdvo_connector_matches_edid(intel_sdvo_connector,
- edid))
- ret = connector_status_connected;
- else
+ if (edid->input & DRM_EDID_INPUT_DIGITAL)
ret = connector_status_disconnected;
-
+ else
+ ret = connector_status_connected;
connector->display_info.raw_edid = NULL;
kfree(edid);
} else
@@ -1452,8 +1484,11 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
edid = intel_sdvo_get_analog_edid(connector);
if (edid != NULL) {
- if (intel_sdvo_connector_matches_edid(to_intel_sdvo_connector(connector),
- edid)) {
+ struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
+ bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
+ bool connector_is_digital = !!IS_TMDS(intel_sdvo_connector);
+
+ if (connector_is_digital == monitor_is_digital) {
drm_mode_connector_update_edid_property(connector, edid);
drm_add_edid_modes(connector, edid);
}
@@ -1719,10 +1754,10 @@ intel_sdvo_set_property(struct drm_connector *connector,
intel_sdvo_connector->force_audio = i;
- if (i == HDMI_AUDIO_AUTO)
+ if (i == 0)
has_audio = intel_sdvo_detect_hdmi_audio(connector);
else
- has_audio = (i == HDMI_AUDIO_ON);
+ has_audio = i > 0;
if (has_audio == intel_sdvo->has_hdmi_audio)
return 0;
@@ -1945,7 +1980,7 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
struct intel_sdvo *sdvo, u32 reg)
{
struct sdvo_device_mapping *mapping;
- u8 pin;
+ u8 pin, speed;
if (IS_SDVOB(reg))
mapping = &dev_priv->sdvo_mappings[0];
@@ -1953,16 +1988,18 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
mapping = &dev_priv->sdvo_mappings[1];
pin = GMBUS_PORT_DPB;
- if (mapping->initialized)
+ speed = GMBUS_RATE_1MHZ >> 8;
+ if (mapping->initialized) {
pin = mapping->i2c_pin;
+ speed = mapping->i2c_speed;
+ }
if (pin < GMBUS_NUM_PORTS) {
sdvo->i2c = &dev_priv->gmbus[pin].adapter;
- intel_gmbus_set_speed(sdvo->i2c, GMBUS_RATE_1MHZ);
+ intel_gmbus_set_speed(sdvo->i2c, speed);
intel_gmbus_force_bit(sdvo->i2c, true);
- } else {
+ } else
sdvo->i2c = &dev_priv->gmbus[GMBUS_PORT_DPB].adapter;
- }
}
static bool
@@ -2020,7 +2057,7 @@ intel_sdvo_connector_init(struct intel_sdvo_connector *connector,
drm_connector_helper_add(&connector->base.base,
&intel_sdvo_connector_helper_funcs);
- connector->base.base.interlace_allowed = 1;
+ connector->base.base.interlace_allowed = 0;
connector->base.base.doublescan_allowed = 0;
connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB;
@@ -2043,7 +2080,6 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
{
struct drm_encoder *encoder = &intel_sdvo->base.base;
struct drm_connector *connector;
- struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
struct intel_connector *intel_connector;
struct intel_sdvo_connector *intel_sdvo_connector;
@@ -2061,17 +2097,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
intel_connector = &intel_sdvo_connector->base;
connector = &intel_connector->base;
- if (intel_sdvo_supports_hotplug(intel_sdvo) & (1 << device)) {
- connector->polled = DRM_CONNECTOR_POLL_HPD;
- intel_sdvo->hotplug_active[0] |= 1 << device;
- /* Some SDVO devices have one-shot hotplug interrupts.
- * Ensure that they get re-enabled when an interrupt happens.
- */
- intel_encoder->hot_plug = intel_sdvo_enable_hotplug;
- intel_sdvo_enable_hotplug(intel_encoder);
- }
- else
- connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
connector->connector_type = DRM_MODE_CONNECTOR_DVID;
@@ -2253,7 +2279,7 @@ intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags)
bytes[0], bytes[1]);
return false;
}
- intel_sdvo->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+ intel_sdvo->base.crtc_mask = (1 << 0) | (1 << 1);
return true;
}
@@ -2312,15 +2338,17 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
intel_sdvo_connector->max_##name = data_value[0]; \
intel_sdvo_connector->cur_##name = response; \
intel_sdvo_connector->name = \
- drm_property_create_range(dev, 0, #name, 0, data_value[0]); \
+ drm_property_create(dev, DRM_MODE_PROP_RANGE, #name, 2); \
if (!intel_sdvo_connector->name) return false; \
+ intel_sdvo_connector->name->values[0] = 0; \
+ intel_sdvo_connector->name->values[1] = data_value[0]; \
drm_connector_attach_property(connector, \
intel_sdvo_connector->name, \
intel_sdvo_connector->cur_##name); \
DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \
data_value[0], data_value[1], response); \
} \
-} while (0)
+} while(0)
static bool
intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
@@ -2347,19 +2375,25 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
intel_sdvo_connector->left_margin = data_value[0] - response;
intel_sdvo_connector->right_margin = intel_sdvo_connector->left_margin;
intel_sdvo_connector->left =
- drm_property_create_range(dev, 0, "left_margin", 0, data_value[0]);
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "left_margin", 2);
if (!intel_sdvo_connector->left)
return false;
+ intel_sdvo_connector->left->values[0] = 0;
+ intel_sdvo_connector->left->values[1] = data_value[0];
drm_connector_attach_property(connector,
intel_sdvo_connector->left,
intel_sdvo_connector->left_margin);
intel_sdvo_connector->right =
- drm_property_create_range(dev, 0, "right_margin", 0, data_value[0]);
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "right_margin", 2);
if (!intel_sdvo_connector->right)
return false;
+ intel_sdvo_connector->right->values[0] = 0;
+ intel_sdvo_connector->right->values[1] = data_value[0];
drm_connector_attach_property(connector,
intel_sdvo_connector->right,
intel_sdvo_connector->right_margin);
@@ -2383,21 +2417,25 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
intel_sdvo_connector->top_margin = data_value[0] - response;
intel_sdvo_connector->bottom_margin = intel_sdvo_connector->top_margin;
intel_sdvo_connector->top =
- drm_property_create_range(dev, 0,
- "top_margin", 0, data_value[0]);
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "top_margin", 2);
if (!intel_sdvo_connector->top)
return false;
+ intel_sdvo_connector->top->values[0] = 0;
+ intel_sdvo_connector->top->values[1] = data_value[0];
drm_connector_attach_property(connector,
intel_sdvo_connector->top,
intel_sdvo_connector->top_margin);
intel_sdvo_connector->bottom =
- drm_property_create_range(dev, 0,
- "bottom_margin", 0, data_value[0]);
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "bottom_margin", 2);
if (!intel_sdvo_connector->bottom)
return false;
+ intel_sdvo_connector->bottom->values[0] = 0;
+ intel_sdvo_connector->bottom->values[1] = data_value[0];
drm_connector_attach_property(connector,
intel_sdvo_connector->bottom,
intel_sdvo_connector->bottom_margin);
@@ -2426,10 +2464,12 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
intel_sdvo_connector->max_dot_crawl = 1;
intel_sdvo_connector->cur_dot_crawl = response & 0x1;
intel_sdvo_connector->dot_crawl =
- drm_property_create_range(dev, 0, "dot_crawl", 0, 1);
+ drm_property_create(dev, DRM_MODE_PROP_RANGE, "dot_crawl", 2);
if (!intel_sdvo_connector->dot_crawl)
return false;
+ intel_sdvo_connector->dot_crawl->values[0] = 0;
+ intel_sdvo_connector->dot_crawl->values[1] = 1;
drm_connector_attach_property(connector,
intel_sdvo_connector->dot_crawl,
intel_sdvo_connector->cur_dot_crawl);
@@ -2475,7 +2515,7 @@ static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
if (IS_TV(intel_sdvo_connector))
return intel_sdvo_create_enhance_property_tv(intel_sdvo, intel_sdvo_connector, enhancements.reply);
- else if (IS_LVDS(intel_sdvo_connector))
+ else if(IS_LVDS(intel_sdvo_connector))
return intel_sdvo_create_enhance_property_lvds(intel_sdvo, intel_sdvo_connector, enhancements.reply);
else
return true;
@@ -2564,14 +2604,6 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps))
goto err;
- /* Set up hotplug command - note paranoia about contents of reply.
- * We assume that the hardware is in a sane state, and only touch
- * the bits we think we understand.
- */
- intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG,
- &intel_sdvo->hotplug_active, 2);
- intel_sdvo->hotplug_active[0] &= ~0x3;
-
if (intel_sdvo_output_setup(intel_sdvo,
intel_sdvo->caps.output_flags) != true) {
DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n",
diff --git a/drivers/gpu/drm/i915/intel_sdvo_regs.h b/drivers/gpu/drm/i915/intel_sdvo_regs.h
index 770bdd6..50bebc3 100644
--- a/drivers/gpu/drm/i915/intel_sdvo_regs.h
+++ b/drivers/gpu/drm/i915/intel_sdvo_regs.h
@@ -1,5 +1,5 @@
/*
- * Copyright © 2006-2007 Intel Corporation
+ * Copyright © 2006-2007 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -46,19 +46,19 @@
#define SDVO_OUTPUT_LAST (14)
struct intel_sdvo_caps {
- u8 vendor_id;
- u8 device_id;
- u8 device_rev_id;
- u8 sdvo_version_major;
- u8 sdvo_version_minor;
- unsigned int sdvo_inputs_mask:2;
- unsigned int smooth_scaling:1;
- unsigned int sharp_scaling:1;
- unsigned int up_scaling:1;
- unsigned int down_scaling:1;
- unsigned int stall_support:1;
- unsigned int pad:1;
- u16 output_flags;
+ u8 vendor_id;
+ u8 device_id;
+ u8 device_rev_id;
+ u8 sdvo_version_major;
+ u8 sdvo_version_minor;
+ unsigned int sdvo_inputs_mask:2;
+ unsigned int smooth_scaling:1;
+ unsigned int sharp_scaling:1;
+ unsigned int up_scaling:1;
+ unsigned int down_scaling:1;
+ unsigned int stall_support:1;
+ unsigned int pad:1;
+ u16 output_flags;
} __attribute__((packed));
/* Note: SDVO detailed timing flags match EDID misc flags. */
@@ -68,46 +68,46 @@ struct intel_sdvo_caps {
/** This matches the EDID DTD structure, more or less */
struct intel_sdvo_dtd {
- struct {
- u16 clock; /**< pixel clock, in 10kHz units */
- u8 h_active; /**< lower 8 bits (pixels) */
- u8 h_blank; /**< lower 8 bits (pixels) */
- u8 h_high; /**< upper 4 bits each h_active, h_blank */
- u8 v_active; /**< lower 8 bits (lines) */
- u8 v_blank; /**< lower 8 bits (lines) */
- u8 v_high; /**< upper 4 bits each v_active, v_blank */
- } part1;
-
- struct {
- u8 h_sync_off; /**< lower 8 bits, from hblank start */
- u8 h_sync_width; /**< lower 8 bits (pixels) */
- /** lower 4 bits each vsync offset, vsync width */
- u8 v_sync_off_width;
- /**
- * 2 high bits of hsync offset, 2 high bits of hsync width,
- * bits 4-5 of vsync offset, and 2 high bits of vsync width.
- */
- u8 sync_off_width_high;
- u8 dtd_flags;
- u8 sdvo_flags;
- /** bits 6-7 of vsync offset at bits 6-7 */
- u8 v_sync_off_high;
- u8 reserved;
- } part2;
+ struct {
+ u16 clock; /**< pixel clock, in 10kHz units */
+ u8 h_active; /**< lower 8 bits (pixels) */
+ u8 h_blank; /**< lower 8 bits (pixels) */
+ u8 h_high; /**< upper 4 bits each h_active, h_blank */
+ u8 v_active; /**< lower 8 bits (lines) */
+ u8 v_blank; /**< lower 8 bits (lines) */
+ u8 v_high; /**< upper 4 bits each v_active, v_blank */
+ } part1;
+
+ struct {
+ u8 h_sync_off; /**< lower 8 bits, from hblank start */
+ u8 h_sync_width; /**< lower 8 bits (pixels) */
+ /** lower 4 bits each vsync offset, vsync width */
+ u8 v_sync_off_width;
+ /**
+ * 2 high bits of hsync offset, 2 high bits of hsync width,
+ * bits 4-5 of vsync offset, and 2 high bits of vsync width.
+ */
+ u8 sync_off_width_high;
+ u8 dtd_flags;
+ u8 sdvo_flags;
+ /** bits 6-7 of vsync offset at bits 6-7 */
+ u8 v_sync_off_high;
+ u8 reserved;
+ } part2;
} __attribute__((packed));
struct intel_sdvo_pixel_clock_range {
- u16 min; /**< pixel clock, in 10kHz units */
- u16 max; /**< pixel clock, in 10kHz units */
+ u16 min; /**< pixel clock, in 10kHz units */
+ u16 max; /**< pixel clock, in 10kHz units */
} __attribute__((packed));
struct intel_sdvo_preferred_input_timing_args {
- u16 clock;
- u16 width;
- u16 height;
- u8 interlace:1;
- u8 scaled:1;
- u8 pad:6;
+ u16 clock;
+ u16 width;
+ u16 height;
+ u8 interlace:1;
+ u8 scaled:1;
+ u8 pad:6;
} __attribute__((packed));
/* I2C registers for SDVO */
@@ -159,9 +159,9 @@ struct intel_sdvo_preferred_input_timing_args {
*/
#define SDVO_CMD_GET_TRAINED_INPUTS 0x03
struct intel_sdvo_get_trained_inputs_response {
- unsigned int input0_trained:1;
- unsigned int input1_trained:1;
- unsigned int pad:6;
+ unsigned int input0_trained:1;
+ unsigned int input1_trained:1;
+ unsigned int pad:6;
} __attribute__((packed));
/** Returns a struct intel_sdvo_output_flags of active outputs. */
@@ -182,7 +182,7 @@ struct intel_sdvo_get_trained_inputs_response {
*/
#define SDVO_CMD_GET_IN_OUT_MAP 0x06
struct intel_sdvo_in_out_map {
- u16 in0, in1;
+ u16 in0, in1;
};
/**
@@ -215,10 +215,10 @@ struct intel_sdvo_in_out_map {
#define SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE 0x0f
struct intel_sdvo_get_interrupt_event_source_response {
- u16 interrupt_status;
- unsigned int ambient_light_interrupt:1;
- unsigned int hdmi_audio_encrypt_change:1;
- unsigned int pad:6;
+ u16 interrupt_status;
+ unsigned int ambient_light_interrupt:1;
+ unsigned int hdmi_audio_encrypt_change:1;
+ unsigned int pad:6;
} __attribute__((packed));
/**
@@ -230,8 +230,8 @@ struct intel_sdvo_get_interrupt_event_source_response {
*/
#define SDVO_CMD_SET_TARGET_INPUT 0x10
struct intel_sdvo_set_target_input_args {
- unsigned int target_1:1;
- unsigned int pad:7;
+ unsigned int target_1:1;
+ unsigned int pad:7;
} __attribute__((packed));
/**
@@ -319,57 +319,57 @@ struct intel_sdvo_set_target_input_args {
#define SDVO_CMD_GET_SUPPORTED_TV_FORMATS 0x27
/** 6 bytes of bit flags for TV formats shared by all TV format functions */
struct intel_sdvo_tv_format {
- unsigned int ntsc_m:1;
- unsigned int ntsc_j:1;
- unsigned int ntsc_443:1;
- unsigned int pal_b:1;
- unsigned int pal_d:1;
- unsigned int pal_g:1;
- unsigned int pal_h:1;
- unsigned int pal_i:1;
-
- unsigned int pal_m:1;
- unsigned int pal_n:1;
- unsigned int pal_nc:1;
- unsigned int pal_60:1;
- unsigned int secam_b:1;
- unsigned int secam_d:1;
- unsigned int secam_g:1;
- unsigned int secam_k:1;
-
- unsigned int secam_k1:1;
- unsigned int secam_l:1;
- unsigned int secam_60:1;
- unsigned int hdtv_std_smpte_240m_1080i_59:1;
- unsigned int hdtv_std_smpte_240m_1080i_60:1;
- unsigned int hdtv_std_smpte_260m_1080i_59:1;
- unsigned int hdtv_std_smpte_260m_1080i_60:1;
- unsigned int hdtv_std_smpte_274m_1080i_50:1;
-
- unsigned int hdtv_std_smpte_274m_1080i_59:1;
- unsigned int hdtv_std_smpte_274m_1080i_60:1;
- unsigned int hdtv_std_smpte_274m_1080p_23:1;
- unsigned int hdtv_std_smpte_274m_1080p_24:1;
- unsigned int hdtv_std_smpte_274m_1080p_25:1;
- unsigned int hdtv_std_smpte_274m_1080p_29:1;
- unsigned int hdtv_std_smpte_274m_1080p_30:1;
- unsigned int hdtv_std_smpte_274m_1080p_50:1;
-
- unsigned int hdtv_std_smpte_274m_1080p_59:1;
- unsigned int hdtv_std_smpte_274m_1080p_60:1;
- unsigned int hdtv_std_smpte_295m_1080i_50:1;
- unsigned int hdtv_std_smpte_295m_1080p_50:1;
- unsigned int hdtv_std_smpte_296m_720p_59:1;
- unsigned int hdtv_std_smpte_296m_720p_60:1;
- unsigned int hdtv_std_smpte_296m_720p_50:1;
- unsigned int hdtv_std_smpte_293m_480p_59:1;
-
- unsigned int hdtv_std_smpte_170m_480i_59:1;
- unsigned int hdtv_std_iturbt601_576i_50:1;
- unsigned int hdtv_std_iturbt601_576p_50:1;
- unsigned int hdtv_std_eia_7702a_480i_60:1;
- unsigned int hdtv_std_eia_7702a_480p_60:1;
- unsigned int pad:3;
+ unsigned int ntsc_m:1;
+ unsigned int ntsc_j:1;
+ unsigned int ntsc_443:1;
+ unsigned int pal_b:1;
+ unsigned int pal_d:1;
+ unsigned int pal_g:1;
+ unsigned int pal_h:1;
+ unsigned int pal_i:1;
+
+ unsigned int pal_m:1;
+ unsigned int pal_n:1;
+ unsigned int pal_nc:1;
+ unsigned int pal_60:1;
+ unsigned int secam_b:1;
+ unsigned int secam_d:1;
+ unsigned int secam_g:1;
+ unsigned int secam_k:1;
+
+ unsigned int secam_k1:1;
+ unsigned int secam_l:1;
+ unsigned int secam_60:1;
+ unsigned int hdtv_std_smpte_240m_1080i_59:1;
+ unsigned int hdtv_std_smpte_240m_1080i_60:1;
+ unsigned int hdtv_std_smpte_260m_1080i_59:1;
+ unsigned int hdtv_std_smpte_260m_1080i_60:1;
+ unsigned int hdtv_std_smpte_274m_1080i_50:1;
+
+ unsigned int hdtv_std_smpte_274m_1080i_59:1;
+ unsigned int hdtv_std_smpte_274m_1080i_60:1;
+ unsigned int hdtv_std_smpte_274m_1080p_23:1;
+ unsigned int hdtv_std_smpte_274m_1080p_24:1;
+ unsigned int hdtv_std_smpte_274m_1080p_25:1;
+ unsigned int hdtv_std_smpte_274m_1080p_29:1;
+ unsigned int hdtv_std_smpte_274m_1080p_30:1;
+ unsigned int hdtv_std_smpte_274m_1080p_50:1;
+
+ unsigned int hdtv_std_smpte_274m_1080p_59:1;
+ unsigned int hdtv_std_smpte_274m_1080p_60:1;
+ unsigned int hdtv_std_smpte_295m_1080i_50:1;
+ unsigned int hdtv_std_smpte_295m_1080p_50:1;
+ unsigned int hdtv_std_smpte_296m_720p_59:1;
+ unsigned int hdtv_std_smpte_296m_720p_60:1;
+ unsigned int hdtv_std_smpte_296m_720p_50:1;
+ unsigned int hdtv_std_smpte_293m_480p_59:1;
+
+ unsigned int hdtv_std_smpte_170m_480i_59:1;
+ unsigned int hdtv_std_iturbt601_576i_50:1;
+ unsigned int hdtv_std_iturbt601_576p_50:1;
+ unsigned int hdtv_std_eia_7702a_480i_60:1;
+ unsigned int hdtv_std_eia_7702a_480p_60:1;
+ unsigned int pad:3;
} __attribute__((packed));
#define SDVO_CMD_GET_TV_FORMAT 0x28
@@ -379,53 +379,53 @@ struct intel_sdvo_tv_format {
/** Returns the resolutiosn that can be used with the given TV format */
#define SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT 0x83
struct intel_sdvo_sdtv_resolution_request {
- unsigned int ntsc_m:1;
- unsigned int ntsc_j:1;
- unsigned int ntsc_443:1;
- unsigned int pal_b:1;
- unsigned int pal_d:1;
- unsigned int pal_g:1;
- unsigned int pal_h:1;
- unsigned int pal_i:1;
-
- unsigned int pal_m:1;
- unsigned int pal_n:1;
- unsigned int pal_nc:1;
- unsigned int pal_60:1;
- unsigned int secam_b:1;
- unsigned int secam_d:1;
- unsigned int secam_g:1;
- unsigned int secam_k:1;
-
- unsigned int secam_k1:1;
- unsigned int secam_l:1;
- unsigned int secam_60:1;
- unsigned int pad:5;
+ unsigned int ntsc_m:1;
+ unsigned int ntsc_j:1;
+ unsigned int ntsc_443:1;
+ unsigned int pal_b:1;
+ unsigned int pal_d:1;
+ unsigned int pal_g:1;
+ unsigned int pal_h:1;
+ unsigned int pal_i:1;
+
+ unsigned int pal_m:1;
+ unsigned int pal_n:1;
+ unsigned int pal_nc:1;
+ unsigned int pal_60:1;
+ unsigned int secam_b:1;
+ unsigned int secam_d:1;
+ unsigned int secam_g:1;
+ unsigned int secam_k:1;
+
+ unsigned int secam_k1:1;
+ unsigned int secam_l:1;
+ unsigned int secam_60:1;
+ unsigned int pad:5;
} __attribute__((packed));
struct intel_sdvo_sdtv_resolution_reply {
- unsigned int res_320x200:1;
- unsigned int res_320x240:1;
- unsigned int res_400x300:1;
- unsigned int res_640x350:1;
- unsigned int res_640x400:1;
- unsigned int res_640x480:1;
- unsigned int res_704x480:1;
- unsigned int res_704x576:1;
-
- unsigned int res_720x350:1;
- unsigned int res_720x400:1;
- unsigned int res_720x480:1;
- unsigned int res_720x540:1;
- unsigned int res_720x576:1;
- unsigned int res_768x576:1;
- unsigned int res_800x600:1;
- unsigned int res_832x624:1;
-
- unsigned int res_920x766:1;
- unsigned int res_1024x768:1;
- unsigned int res_1280x1024:1;
- unsigned int pad:5;
+ unsigned int res_320x200:1;
+ unsigned int res_320x240:1;
+ unsigned int res_400x300:1;
+ unsigned int res_640x350:1;
+ unsigned int res_640x400:1;
+ unsigned int res_640x480:1;
+ unsigned int res_704x480:1;
+ unsigned int res_704x576:1;
+
+ unsigned int res_720x350:1;
+ unsigned int res_720x400:1;
+ unsigned int res_720x480:1;
+ unsigned int res_720x540:1;
+ unsigned int res_720x576:1;
+ unsigned int res_768x576:1;
+ unsigned int res_800x600:1;
+ unsigned int res_832x624:1;
+
+ unsigned int res_920x766:1;
+ unsigned int res_1024x768:1;
+ unsigned int res_1280x1024:1;
+ unsigned int pad:5;
} __attribute__((packed));
/* Get supported resolution with squire pixel aspect ratio that can be
@@ -433,90 +433,90 @@ struct intel_sdvo_sdtv_resolution_reply {
#define SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT 0x85
struct intel_sdvo_hdtv_resolution_request {
- unsigned int hdtv_std_smpte_240m_1080i_59:1;
- unsigned int hdtv_std_smpte_240m_1080i_60:1;
- unsigned int hdtv_std_smpte_260m_1080i_59:1;
- unsigned int hdtv_std_smpte_260m_1080i_60:1;
- unsigned int hdtv_std_smpte_274m_1080i_50:1;
- unsigned int hdtv_std_smpte_274m_1080i_59:1;
- unsigned int hdtv_std_smpte_274m_1080i_60:1;
- unsigned int hdtv_std_smpte_274m_1080p_23:1;
-
- unsigned int hdtv_std_smpte_274m_1080p_24:1;
- unsigned int hdtv_std_smpte_274m_1080p_25:1;
- unsigned int hdtv_std_smpte_274m_1080p_29:1;
- unsigned int hdtv_std_smpte_274m_1080p_30:1;
- unsigned int hdtv_std_smpte_274m_1080p_50:1;
- unsigned int hdtv_std_smpte_274m_1080p_59:1;
- unsigned int hdtv_std_smpte_274m_1080p_60:1;
- unsigned int hdtv_std_smpte_295m_1080i_50:1;
-
- unsigned int hdtv_std_smpte_295m_1080p_50:1;
- unsigned int hdtv_std_smpte_296m_720p_59:1;
- unsigned int hdtv_std_smpte_296m_720p_60:1;
- unsigned int hdtv_std_smpte_296m_720p_50:1;
- unsigned int hdtv_std_smpte_293m_480p_59:1;
- unsigned int hdtv_std_smpte_170m_480i_59:1;
- unsigned int hdtv_std_iturbt601_576i_50:1;
- unsigned int hdtv_std_iturbt601_576p_50:1;
-
- unsigned int hdtv_std_eia_7702a_480i_60:1;
- unsigned int hdtv_std_eia_7702a_480p_60:1;
- unsigned int pad:6;
+ unsigned int hdtv_std_smpte_240m_1080i_59:1;
+ unsigned int hdtv_std_smpte_240m_1080i_60:1;
+ unsigned int hdtv_std_smpte_260m_1080i_59:1;
+ unsigned int hdtv_std_smpte_260m_1080i_60:1;
+ unsigned int hdtv_std_smpte_274m_1080i_50:1;
+ unsigned int hdtv_std_smpte_274m_1080i_59:1;
+ unsigned int hdtv_std_smpte_274m_1080i_60:1;
+ unsigned int hdtv_std_smpte_274m_1080p_23:1;
+
+ unsigned int hdtv_std_smpte_274m_1080p_24:1;
+ unsigned int hdtv_std_smpte_274m_1080p_25:1;
+ unsigned int hdtv_std_smpte_274m_1080p_29:1;
+ unsigned int hdtv_std_smpte_274m_1080p_30:1;
+ unsigned int hdtv_std_smpte_274m_1080p_50:1;
+ unsigned int hdtv_std_smpte_274m_1080p_59:1;
+ unsigned int hdtv_std_smpte_274m_1080p_60:1;
+ unsigned int hdtv_std_smpte_295m_1080i_50:1;
+
+ unsigned int hdtv_std_smpte_295m_1080p_50:1;
+ unsigned int hdtv_std_smpte_296m_720p_59:1;
+ unsigned int hdtv_std_smpte_296m_720p_60:1;
+ unsigned int hdtv_std_smpte_296m_720p_50:1;
+ unsigned int hdtv_std_smpte_293m_480p_59:1;
+ unsigned int hdtv_std_smpte_170m_480i_59:1;
+ unsigned int hdtv_std_iturbt601_576i_50:1;
+ unsigned int hdtv_std_iturbt601_576p_50:1;
+
+ unsigned int hdtv_std_eia_7702a_480i_60:1;
+ unsigned int hdtv_std_eia_7702a_480p_60:1;
+ unsigned int pad:6;
} __attribute__((packed));
struct intel_sdvo_hdtv_resolution_reply {
- unsigned int res_640x480:1;
- unsigned int res_800x600:1;
- unsigned int res_1024x768:1;
- unsigned int res_1280x960:1;
- unsigned int res_1400x1050:1;
- unsigned int res_1600x1200:1;
- unsigned int res_1920x1440:1;
- unsigned int res_2048x1536:1;
-
- unsigned int res_2560x1920:1;
- unsigned int res_3200x2400:1;
- unsigned int res_3840x2880:1;
- unsigned int pad1:5;
-
- unsigned int res_848x480:1;
- unsigned int res_1064x600:1;
- unsigned int res_1280x720:1;
- unsigned int res_1360x768:1;
- unsigned int res_1704x960:1;
- unsigned int res_1864x1050:1;
- unsigned int res_1920x1080:1;
- unsigned int res_2128x1200:1;
-
- unsigned int res_2560x1400:1;
- unsigned int res_2728x1536:1;
- unsigned int res_3408x1920:1;
- unsigned int res_4264x2400:1;
- unsigned int res_5120x2880:1;
- unsigned int pad2:3;
-
- unsigned int res_768x480:1;
- unsigned int res_960x600:1;
- unsigned int res_1152x720:1;
- unsigned int res_1124x768:1;
- unsigned int res_1536x960:1;
- unsigned int res_1680x1050:1;
- unsigned int res_1728x1080:1;
- unsigned int res_1920x1200:1;
-
- unsigned int res_2304x1440:1;
- unsigned int res_2456x1536:1;
- unsigned int res_3072x1920:1;
- unsigned int res_3840x2400:1;
- unsigned int res_4608x2880:1;
- unsigned int pad3:3;
-
- unsigned int res_1280x1024:1;
- unsigned int pad4:7;
-
- unsigned int res_1280x768:1;
- unsigned int pad5:7;
+ unsigned int res_640x480:1;
+ unsigned int res_800x600:1;
+ unsigned int res_1024x768:1;
+ unsigned int res_1280x960:1;
+ unsigned int res_1400x1050:1;
+ unsigned int res_1600x1200:1;
+ unsigned int res_1920x1440:1;
+ unsigned int res_2048x1536:1;
+
+ unsigned int res_2560x1920:1;
+ unsigned int res_3200x2400:1;
+ unsigned int res_3840x2880:1;
+ unsigned int pad1:5;
+
+ unsigned int res_848x480:1;
+ unsigned int res_1064x600:1;
+ unsigned int res_1280x720:1;
+ unsigned int res_1360x768:1;
+ unsigned int res_1704x960:1;
+ unsigned int res_1864x1050:1;
+ unsigned int res_1920x1080:1;
+ unsigned int res_2128x1200:1;
+
+ unsigned int res_2560x1400:1;
+ unsigned int res_2728x1536:1;
+ unsigned int res_3408x1920:1;
+ unsigned int res_4264x2400:1;
+ unsigned int res_5120x2880:1;
+ unsigned int pad2:3;
+
+ unsigned int res_768x480:1;
+ unsigned int res_960x600:1;
+ unsigned int res_1152x720:1;
+ unsigned int res_1124x768:1;
+ unsigned int res_1536x960:1;
+ unsigned int res_1680x1050:1;
+ unsigned int res_1728x1080:1;
+ unsigned int res_1920x1200:1;
+
+ unsigned int res_2304x1440:1;
+ unsigned int res_2456x1536:1;
+ unsigned int res_3072x1920:1;
+ unsigned int res_3840x2400:1;
+ unsigned int res_4608x2880:1;
+ unsigned int pad3:3;
+
+ unsigned int res_1280x1024:1;
+ unsigned int pad4:7;
+
+ unsigned int res_1280x768:1;
+ unsigned int pad5:7;
} __attribute__((packed));
/* Get supported power state returns info for encoder and monitor, rely on
@@ -544,25 +544,25 @@ struct intel_sdvo_hdtv_resolution_reply {
* The high fields are bits 8:9 of the 10-bit values.
*/
struct sdvo_panel_power_sequencing {
- u8 t0;
- u8 t1;
- u8 t2;
- u8 t3;
- u8 t4;
-
- unsigned int t0_high:2;
- unsigned int t1_high:2;
- unsigned int t2_high:2;
- unsigned int t3_high:2;
-
- unsigned int t4_high:2;
- unsigned int pad:6;
+ u8 t0;
+ u8 t1;
+ u8 t2;
+ u8 t3;
+ u8 t4;
+
+ unsigned int t0_high:2;
+ unsigned int t1_high:2;
+ unsigned int t2_high:2;
+ unsigned int t3_high:2;
+
+ unsigned int t4_high:2;
+ unsigned int pad:6;
} __attribute__((packed));
#define SDVO_CMD_GET_MAX_BACKLIGHT_LEVEL 0x30
struct sdvo_max_backlight_reply {
- u8 max_value;
- u8 default_value;
+ u8 max_value;
+ u8 default_value;
} __attribute__((packed));
#define SDVO_CMD_GET_BACKLIGHT_LEVEL 0x31
@@ -570,16 +570,16 @@ struct sdvo_max_backlight_reply {
#define SDVO_CMD_GET_AMBIENT_LIGHT 0x33
struct sdvo_get_ambient_light_reply {
- u16 trip_low;
- u16 trip_high;
- u16 value;
+ u16 trip_low;
+ u16 trip_high;
+ u16 value;
} __attribute__((packed));
#define SDVO_CMD_SET_AMBIENT_LIGHT 0x34
struct sdvo_set_ambient_light_reply {
- u16 trip_low;
- u16 trip_high;
- unsigned int enable:1;
- unsigned int pad:7;
+ u16 trip_low;
+ u16 trip_high;
+ unsigned int enable:1;
+ unsigned int pad:7;
} __attribute__((packed));
/* Set display power state */
@@ -591,23 +591,23 @@ struct sdvo_set_ambient_light_reply {
#define SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS 0x84
struct intel_sdvo_enhancements_reply {
- unsigned int flicker_filter:1;
- unsigned int flicker_filter_adaptive:1;
- unsigned int flicker_filter_2d:1;
- unsigned int saturation:1;
- unsigned int hue:1;
- unsigned int brightness:1;
- unsigned int contrast:1;
- unsigned int overscan_h:1;
-
- unsigned int overscan_v:1;
- unsigned int hpos:1;
- unsigned int vpos:1;
- unsigned int sharpness:1;
- unsigned int dot_crawl:1;
- unsigned int dither:1;
- unsigned int tv_chroma_filter:1;
- unsigned int tv_luma_filter:1;
+ unsigned int flicker_filter:1;
+ unsigned int flicker_filter_adaptive:1;
+ unsigned int flicker_filter_2d:1;
+ unsigned int saturation:1;
+ unsigned int hue:1;
+ unsigned int brightness:1;
+ unsigned int contrast:1;
+ unsigned int overscan_h:1;
+
+ unsigned int overscan_v:1;
+ unsigned int hpos:1;
+ unsigned int vpos:1;
+ unsigned int sharpness:1;
+ unsigned int dot_crawl:1;
+ unsigned int dither:1;
+ unsigned int tv_chroma_filter:1;
+ unsigned int tv_luma_filter:1;
} __attribute__((packed));
/* Picture enhancement limits below are dependent on the current TV format,
@@ -628,8 +628,8 @@ struct intel_sdvo_enhancements_reply {
#define SDVO_CMD_GET_MAX_TV_CHROMA_FILTER 0x74
#define SDVO_CMD_GET_MAX_TV_LUMA_FILTER 0x77
struct intel_sdvo_enhancement_limits_reply {
- u16 max_value;
- u16 default_value;
+ u16 max_value;
+ u16 default_value;
} __attribute__((packed));
#define SDVO_CMD_GET_LVDS_PANEL_INFORMATION 0x7f
@@ -670,8 +670,8 @@ struct intel_sdvo_enhancement_limits_reply {
#define SDVO_CMD_GET_TV_LUMA_FILTER 0x78
#define SDVO_CMD_SET_TV_LUMA_FILTER 0x79
struct intel_sdvo_enhancements_arg {
- u16 value;
-} __attribute__((packed));
+ u16 value;
+}__attribute__((packed));
#define SDVO_CMD_GET_DOT_CRAWL 0x70
#define SDVO_CMD_SET_DOT_CRAWL 0x71
@@ -724,7 +724,7 @@ struct intel_sdvo_enhancements_arg {
#define SDVO_CMD_GET_AUDIO_TX_INFO 0x9c
#define SDVO_NEED_TO_STALL (1 << 7)
-struct intel_sdvo_encode {
- u8 dvi_rev;
- u8 hdmi_rev;
+struct intel_sdvo_encode{
+ u8 dvi_rev;
+ u8 hdmi_rev;
} __attribute__ ((packed));
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
deleted file mode 100644
index 98444ab..0000000
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ /dev/null
@@ -1,664 +0,0 @@
-/*
- * Copyright © 2011 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- * Authors:
- * Jesse Barnes <jbarnes@virtuousgeek.org>
- *
- * New plane/sprite handling.
- *
- * The older chips had a separate interface for programming plane related
- * registers; newer ones are much simpler and we can use the new DRM plane
- * support.
- */
-#include "drmP.h"
-#include "drm_crtc.h"
-#include "drm_fourcc.h"
-#include "intel_drv.h"
-#include "i915_drm.h"
-#include "i915_drv.h"
-
-static void
-ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
- struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t x, uint32_t y,
- uint32_t src_w, uint32_t src_h)
-{
- struct drm_device *dev = plane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_plane *intel_plane = to_intel_plane(plane);
- int pipe = intel_plane->pipe;
- u32 sprctl, sprscale = 0;
- int pixel_size;
-
- sprctl = I915_READ(SPRCTL(pipe));
-
- /* Mask out pixel format bits in case we change it */
- sprctl &= ~SPRITE_PIXFORMAT_MASK;
- sprctl &= ~SPRITE_RGB_ORDER_RGBX;
- sprctl &= ~SPRITE_YUV_BYTE_ORDER_MASK;
-
- switch (fb->pixel_format) {
- case DRM_FORMAT_XBGR8888:
- sprctl |= SPRITE_FORMAT_RGBX888;
- pixel_size = 4;
- break;
- case DRM_FORMAT_XRGB8888:
- sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX;
- pixel_size = 4;
- break;
- case DRM_FORMAT_YUYV:
- sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YUYV;
- pixel_size = 2;
- break;
- case DRM_FORMAT_YVYU:
- sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YVYU;
- pixel_size = 2;
- break;
- case DRM_FORMAT_UYVY:
- sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_UYVY;
- pixel_size = 2;
- break;
- case DRM_FORMAT_VYUY:
- sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_VYUY;
- pixel_size = 2;
- break;
- default:
- DRM_DEBUG_DRIVER("bad pixel format, assuming RGBX888\n");
- sprctl |= DVS_FORMAT_RGBX888;
- pixel_size = 4;
- break;
- }
-
- if (obj->tiling_mode != I915_TILING_NONE)
- sprctl |= SPRITE_TILED;
-
- /* must disable */
- sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
- sprctl |= SPRITE_ENABLE;
- sprctl |= SPRITE_DEST_KEY;
-
- /* Sizes are 0 based */
- src_w--;
- src_h--;
- crtc_w--;
- crtc_h--;
-
- intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size);
-
- /*
- * IVB workaround: must disable low power watermarks for at least
- * one frame before enabling scaling. LP watermarks can be re-enabled
- * when scaling is disabled.
- */
- if (crtc_w != src_w || crtc_h != src_h) {
- dev_priv->sprite_scaling_enabled = true;
- sandybridge_update_wm(dev);
- intel_wait_for_vblank(dev, pipe);
- sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
- } else {
- dev_priv->sprite_scaling_enabled = false;
- /* potentially re-enable LP watermarks */
- sandybridge_update_wm(dev);
- }
-
- I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
- I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
- if (obj->tiling_mode != I915_TILING_NONE) {
- I915_WRITE(SPRTILEOFF(pipe), (y << 16) | x);
- } else {
- unsigned long offset;
-
- offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
- I915_WRITE(SPRLINOFF(pipe), offset);
- }
- I915_WRITE(SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
- I915_WRITE(SPRSCALE(pipe), sprscale);
- I915_WRITE(SPRCTL(pipe), sprctl);
- I915_WRITE(SPRSURF(pipe), obj->gtt_offset);
- POSTING_READ(SPRSURF(pipe));
-}
-
-static void
-ivb_disable_plane(struct drm_plane *plane)
-{
- struct drm_device *dev = plane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_plane *intel_plane = to_intel_plane(plane);
- int pipe = intel_plane->pipe;
-
- I915_WRITE(SPRCTL(pipe), I915_READ(SPRCTL(pipe)) & ~SPRITE_ENABLE);
- /* Can't leave the scaler enabled... */
- I915_WRITE(SPRSCALE(pipe), 0);
- /* Activate double buffered register update */
- I915_WRITE(SPRSURF(pipe), 0);
- POSTING_READ(SPRSURF(pipe));
-}
-
-static int
-ivb_update_colorkey(struct drm_plane *plane,
- struct drm_intel_sprite_colorkey *key)
-{
- struct drm_device *dev = plane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_plane *intel_plane;
- u32 sprctl;
- int ret = 0;
-
- intel_plane = to_intel_plane(plane);
-
- I915_WRITE(SPRKEYVAL(intel_plane->pipe), key->min_value);
- I915_WRITE(SPRKEYMAX(intel_plane->pipe), key->max_value);
- I915_WRITE(SPRKEYMSK(intel_plane->pipe), key->channel_mask);
-
- sprctl = I915_READ(SPRCTL(intel_plane->pipe));
- sprctl &= ~(SPRITE_SOURCE_KEY | SPRITE_DEST_KEY);
- if (key->flags & I915_SET_COLORKEY_DESTINATION)
- sprctl |= SPRITE_DEST_KEY;
- else if (key->flags & I915_SET_COLORKEY_SOURCE)
- sprctl |= SPRITE_SOURCE_KEY;
- I915_WRITE(SPRCTL(intel_plane->pipe), sprctl);
-
- POSTING_READ(SPRKEYMSK(intel_plane->pipe));
-
- return ret;
-}
-
-static void
-ivb_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
-{
- struct drm_device *dev = plane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_plane *intel_plane;
- u32 sprctl;
-
- intel_plane = to_intel_plane(plane);
-
- key->min_value = I915_READ(SPRKEYVAL(intel_plane->pipe));
- key->max_value = I915_READ(SPRKEYMAX(intel_plane->pipe));
- key->channel_mask = I915_READ(SPRKEYMSK(intel_plane->pipe));
- key->flags = 0;
-
- sprctl = I915_READ(SPRCTL(intel_plane->pipe));
-
- if (sprctl & SPRITE_DEST_KEY)
- key->flags = I915_SET_COLORKEY_DESTINATION;
- else if (sprctl & SPRITE_SOURCE_KEY)
- key->flags = I915_SET_COLORKEY_SOURCE;
- else
- key->flags = I915_SET_COLORKEY_NONE;
-}
-
-static void
-snb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
- struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t x, uint32_t y,
- uint32_t src_w, uint32_t src_h)
-{
- struct drm_device *dev = plane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_plane *intel_plane = to_intel_plane(plane);
- int pipe = intel_plane->pipe, pixel_size;
- u32 dvscntr, dvsscale = 0;
-
- dvscntr = I915_READ(DVSCNTR(pipe));
-
- /* Mask out pixel format bits in case we change it */
- dvscntr &= ~DVS_PIXFORMAT_MASK;
- dvscntr &= ~DVS_RGB_ORDER_RGBX;
- dvscntr &= ~DVS_YUV_BYTE_ORDER_MASK;
-
- switch (fb->pixel_format) {
- case DRM_FORMAT_XBGR8888:
- dvscntr |= DVS_FORMAT_RGBX888;
- pixel_size = 4;
- break;
- case DRM_FORMAT_XRGB8888:
- dvscntr |= DVS_FORMAT_RGBX888 | DVS_RGB_ORDER_RGBX;
- pixel_size = 4;
- break;
- case DRM_FORMAT_YUYV:
- dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YUYV;
- pixel_size = 2;
- break;
- case DRM_FORMAT_YVYU:
- dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YVYU;
- pixel_size = 2;
- break;
- case DRM_FORMAT_UYVY:
- dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_UYVY;
- pixel_size = 2;
- break;
- case DRM_FORMAT_VYUY:
- dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_VYUY;
- pixel_size = 2;
- break;
- default:
- DRM_DEBUG_DRIVER("bad pixel format, assuming RGBX888\n");
- dvscntr |= DVS_FORMAT_RGBX888;
- pixel_size = 4;
- break;
- }
-
- if (obj->tiling_mode != I915_TILING_NONE)
- dvscntr |= DVS_TILED;
-
- /* must disable */
- dvscntr |= DVS_TRICKLE_FEED_DISABLE;
- dvscntr |= DVS_ENABLE;
-
- /* Sizes are 0 based */
- src_w--;
- src_h--;
- crtc_w--;
- crtc_h--;
-
- intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size);
-
- if (crtc_w != src_w || crtc_h != src_h)
- dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h;
-
- I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
- I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
- if (obj->tiling_mode != I915_TILING_NONE) {
- I915_WRITE(DVSTILEOFF(pipe), (y << 16) | x);
- } else {
- unsigned long offset;
-
- offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
- I915_WRITE(DVSLINOFF(pipe), offset);
- }
- I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
- I915_WRITE(DVSSCALE(pipe), dvsscale);
- I915_WRITE(DVSCNTR(pipe), dvscntr);
- I915_WRITE(DVSSURF(pipe), obj->gtt_offset);
- POSTING_READ(DVSSURF(pipe));
-}
-
-static void
-snb_disable_plane(struct drm_plane *plane)
-{
- struct drm_device *dev = plane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_plane *intel_plane = to_intel_plane(plane);
- int pipe = intel_plane->pipe;
-
- I915_WRITE(DVSCNTR(pipe), I915_READ(DVSCNTR(pipe)) & ~DVS_ENABLE);
- /* Disable the scaler */
- I915_WRITE(DVSSCALE(pipe), 0);
- /* Flush double buffered register updates */
- I915_WRITE(DVSSURF(pipe), 0);
- POSTING_READ(DVSSURF(pipe));
-}
-
-static void
-intel_enable_primary(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int reg = DSPCNTR(intel_crtc->plane);
-
- I915_WRITE(reg, I915_READ(reg) | DISPLAY_PLANE_ENABLE);
-}
-
-static void
-intel_disable_primary(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int reg = DSPCNTR(intel_crtc->plane);
-
- I915_WRITE(reg, I915_READ(reg) & ~DISPLAY_PLANE_ENABLE);
-}
-
-static int
-snb_update_colorkey(struct drm_plane *plane,
- struct drm_intel_sprite_colorkey *key)
-{
- struct drm_device *dev = plane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_plane *intel_plane;
- u32 dvscntr;
- int ret = 0;
-
- intel_plane = to_intel_plane(plane);
-
- I915_WRITE(DVSKEYVAL(intel_plane->pipe), key->min_value);
- I915_WRITE(DVSKEYMAX(intel_plane->pipe), key->max_value);
- I915_WRITE(DVSKEYMSK(intel_plane->pipe), key->channel_mask);
-
- dvscntr = I915_READ(DVSCNTR(intel_plane->pipe));
- dvscntr &= ~(DVS_SOURCE_KEY | DVS_DEST_KEY);
- if (key->flags & I915_SET_COLORKEY_DESTINATION)
- dvscntr |= DVS_DEST_KEY;
- else if (key->flags & I915_SET_COLORKEY_SOURCE)
- dvscntr |= DVS_SOURCE_KEY;
- I915_WRITE(DVSCNTR(intel_plane->pipe), dvscntr);
-
- POSTING_READ(DVSKEYMSK(intel_plane->pipe));
-
- return ret;
-}
-
-static void
-snb_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
-{
- struct drm_device *dev = plane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_plane *intel_plane;
- u32 dvscntr;
-
- intel_plane = to_intel_plane(plane);
-
- key->min_value = I915_READ(DVSKEYVAL(intel_plane->pipe));
- key->max_value = I915_READ(DVSKEYMAX(intel_plane->pipe));
- key->channel_mask = I915_READ(DVSKEYMSK(intel_plane->pipe));
- key->flags = 0;
-
- dvscntr = I915_READ(DVSCNTR(intel_plane->pipe));
-
- if (dvscntr & DVS_DEST_KEY)
- key->flags = I915_SET_COLORKEY_DESTINATION;
- else if (dvscntr & DVS_SOURCE_KEY)
- key->flags = I915_SET_COLORKEY_SOURCE;
- else
- key->flags = I915_SET_COLORKEY_NONE;
-}
-
-static int
-intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
- struct drm_framebuffer *fb, int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h)
-{
- struct drm_device *dev = plane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_plane *intel_plane = to_intel_plane(plane);
- struct intel_framebuffer *intel_fb;
- struct drm_i915_gem_object *obj, *old_obj;
- int pipe = intel_plane->pipe;
- int ret = 0;
- int x = src_x >> 16, y = src_y >> 16;
- int primary_w = crtc->mode.hdisplay, primary_h = crtc->mode.vdisplay;
- bool disable_primary = false;
-
- intel_fb = to_intel_framebuffer(fb);
- obj = intel_fb->obj;
-
- old_obj = intel_plane->obj;
-
- /* Pipe must be running... */
- if (!(I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE))
- return -EINVAL;
-
- if (crtc_x >= primary_w || crtc_y >= primary_h)
- return -EINVAL;
-
- /* Don't modify another pipe's plane */
- if (intel_plane->pipe != intel_crtc->pipe)
- return -EINVAL;
-
- /*
- * Clamp the width & height into the visible area. Note we don't
- * try to scale the source if part of the visible region is offscreen.
- * The caller must handle that by adjusting source offset and size.
- */
- if ((crtc_x < 0) && ((crtc_x + crtc_w) > 0)) {
- crtc_w += crtc_x;
- crtc_x = 0;
- }
- if ((crtc_x + crtc_w) <= 0) /* Nothing to display */
- goto out;
- if ((crtc_x + crtc_w) > primary_w)
- crtc_w = primary_w - crtc_x;
-
- if ((crtc_y < 0) && ((crtc_y + crtc_h) > 0)) {
- crtc_h += crtc_y;
- crtc_y = 0;
- }
- if ((crtc_y + crtc_h) <= 0) /* Nothing to display */
- goto out;
- if (crtc_y + crtc_h > primary_h)
- crtc_h = primary_h - crtc_y;
-
- if (!crtc_w || !crtc_h) /* Again, nothing to display */
- goto out;
-
- /*
- * We can take a larger source and scale it down, but
- * only so much... 16x is the max on SNB.
- */
- if (((src_w * src_h) / (crtc_w * crtc_h)) > intel_plane->max_downscale)
- return -EINVAL;
-
- /*
- * If the sprite is completely covering the primary plane,
- * we can disable the primary and save power.
- */
- if ((crtc_x == 0) && (crtc_y == 0) &&
- (crtc_w == primary_w) && (crtc_h == primary_h))
- disable_primary = true;
-
- mutex_lock(&dev->struct_mutex);
-
- ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
- if (ret)
- goto out_unlock;
-
- intel_plane->obj = obj;
-
- /*
- * Be sure to re-enable the primary before the sprite is no longer
- * covering it fully.
- */
- if (!disable_primary && intel_plane->primary_disabled) {
- intel_enable_primary(crtc);
- intel_plane->primary_disabled = false;
- }
-
- intel_plane->update_plane(plane, fb, obj, crtc_x, crtc_y,
- crtc_w, crtc_h, x, y, src_w, src_h);
-
- if (disable_primary) {
- intel_disable_primary(crtc);
- intel_plane->primary_disabled = true;
- }
-
- /* Unpin old obj after new one is active to avoid ugliness */
- if (old_obj) {
- /*
- * It's fairly common to simply update the position of
- * an existing object. In that case, we don't need to
- * wait for vblank to avoid ugliness, we only need to
- * do the pin & ref bookkeeping.
- */
- if (old_obj != obj) {
- mutex_unlock(&dev->struct_mutex);
- intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe);
- mutex_lock(&dev->struct_mutex);
- }
- intel_unpin_fb_obj(old_obj);
- }
-
-out_unlock:
- mutex_unlock(&dev->struct_mutex);
-out:
- return ret;
-}
-
-static int
-intel_disable_plane(struct drm_plane *plane)
-{
- struct drm_device *dev = plane->dev;
- struct intel_plane *intel_plane = to_intel_plane(plane);
- int ret = 0;
-
- if (intel_plane->primary_disabled) {
- intel_enable_primary(plane->crtc);
- intel_plane->primary_disabled = false;
- }
-
- intel_plane->disable_plane(plane);
-
- if (!intel_plane->obj)
- goto out;
-
- mutex_lock(&dev->struct_mutex);
- intel_unpin_fb_obj(intel_plane->obj);
- intel_plane->obj = NULL;
- mutex_unlock(&dev->struct_mutex);
-out:
-
- return ret;
-}
-
-static void intel_destroy_plane(struct drm_plane *plane)
-{
- struct intel_plane *intel_plane = to_intel_plane(plane);
- intel_disable_plane(plane);
- drm_plane_cleanup(plane);
- kfree(intel_plane);
-}
-
-int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_intel_sprite_colorkey *set = data;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_mode_object *obj;
- struct drm_plane *plane;
- struct intel_plane *intel_plane;
- int ret = 0;
-
- if (!dev_priv)
- return -EINVAL;
-
- /* Make sure we don't try to enable both src & dest simultaneously */
- if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
- return -EINVAL;
-
- mutex_lock(&dev->mode_config.mutex);
-
- obj = drm_mode_object_find(dev, set->plane_id, DRM_MODE_OBJECT_PLANE);
- if (!obj) {
- ret = -EINVAL;
- goto out_unlock;
- }
-
- plane = obj_to_plane(obj);
- intel_plane = to_intel_plane(plane);
- ret = intel_plane->update_colorkey(plane, set);
-
-out_unlock:
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
-}
-
-int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_intel_sprite_colorkey *get = data;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_mode_object *obj;
- struct drm_plane *plane;
- struct intel_plane *intel_plane;
- int ret = 0;
-
- if (!dev_priv)
- return -EINVAL;
-
- mutex_lock(&dev->mode_config.mutex);
-
- obj = drm_mode_object_find(dev, get->plane_id, DRM_MODE_OBJECT_PLANE);
- if (!obj) {
- ret = -EINVAL;
- goto out_unlock;
- }
-
- plane = obj_to_plane(obj);
- intel_plane = to_intel_plane(plane);
- intel_plane->get_colorkey(plane, get);
-
-out_unlock:
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
-}
-
-static const struct drm_plane_funcs intel_plane_funcs = {
- .update_plane = intel_update_plane,
- .disable_plane = intel_disable_plane,
- .destroy = intel_destroy_plane,
-};
-
-static uint32_t snb_plane_formats[] = {
- DRM_FORMAT_XBGR8888,
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_YUYV,
- DRM_FORMAT_YVYU,
- DRM_FORMAT_UYVY,
- DRM_FORMAT_VYUY,
-};
-
-int
-intel_plane_init(struct drm_device *dev, enum pipe pipe)
-{
- struct intel_plane *intel_plane;
- unsigned long possible_crtcs;
- int ret;
-
- if (!(IS_GEN6(dev) || IS_GEN7(dev)))
- return -ENODEV;
-
- intel_plane = kzalloc(sizeof(struct intel_plane), GFP_KERNEL);
- if (!intel_plane)
- return -ENOMEM;
-
- if (IS_GEN6(dev)) {
- intel_plane->max_downscale = 16;
- intel_plane->update_plane = snb_update_plane;
- intel_plane->disable_plane = snb_disable_plane;
- intel_plane->update_colorkey = snb_update_colorkey;
- intel_plane->get_colorkey = snb_get_colorkey;
- } else if (IS_GEN7(dev)) {
- intel_plane->max_downscale = 2;
- intel_plane->update_plane = ivb_update_plane;
- intel_plane->disable_plane = ivb_disable_plane;
- intel_plane->update_colorkey = ivb_update_colorkey;
- intel_plane->get_colorkey = ivb_get_colorkey;
- }
-
- intel_plane->pipe = pipe;
- possible_crtcs = (1 << pipe);
- ret = drm_plane_init(dev, &intel_plane->base, possible_crtcs,
- &intel_plane_funcs, snb_plane_formats,
- ARRAY_SIZE(snb_plane_formats), false);
- if (ret)
- kfree(intel_plane);
-
- return ret;
-}
-
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 5f332e0..2136e6b 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -194,10 +194,10 @@ static const u32 filter_table[] = {
*
* if (f >= 1) {
* exp = 0x7;
- * mant = 1 << 8;
+ * mant = 1 << 8;
* } else {
* for (exp = 0; exp < 3 && f < 0.5; exp++)
- * f *= 2.0;
+ * f *= 2.0;
* mant = (f * (1 << 9) + 0.5);
* if (mant >= (1 << 9))
* mant = (1 << 9) - 1;
@@ -430,7 +430,7 @@ static const struct tv_mode tv_modes[] = {
.vsync_start_f1 = 6, .vsync_start_f2 = 7,
.vsync_len = 6,
- .veq_ena = true, .veq_start_f1 = 0,
+ .veq_ena = true, .veq_start_f1 = 0,
.veq_start_f2 = 1, .veq_len = 18,
.vi_end_f1 = 20, .vi_end_f2 = 21,
@@ -472,7 +472,7 @@ static const struct tv_mode tv_modes[] = {
.vsync_start_f1 = 6, .vsync_start_f2 = 7,
.vsync_len = 6,
- .veq_ena = true, .veq_start_f1 = 0,
+ .veq_ena = true, .veq_start_f1 = 0,
.veq_start_f2 = 1, .veq_len = 18,
.vi_end_f1 = 20, .vi_end_f2 = 21,
@@ -515,7 +515,7 @@ static const struct tv_mode tv_modes[] = {
.vsync_start_f1 = 6, .vsync_start_f2 = 7,
.vsync_len = 6,
- .veq_ena = true, .veq_start_f1 = 0,
+ .veq_ena = true, .veq_start_f1 = 0,
.veq_start_f2 = 1, .veq_len = 18,
.vi_end_f1 = 20, .vi_end_f2 = 21,
@@ -558,7 +558,7 @@ static const struct tv_mode tv_modes[] = {
.vsync_start_f1 = 6, .vsync_start_f2 = 7,
.vsync_len = 6,
- .veq_ena = true, .veq_start_f1 = 0,
+ .veq_ena = true, .veq_start_f1 = 0,
.veq_start_f2 = 1, .veq_len = 18,
.vi_end_f1 = 20, .vi_end_f2 = 21,
@@ -602,14 +602,14 @@ static const struct tv_mode tv_modes[] = {
.vsync_start_f1 = 6, .vsync_start_f2 = 7,
.vsync_len = 6,
- .veq_ena = true, .veq_start_f1 = 0,
+ .veq_ena = true, .veq_start_f1 = 0,
.veq_start_f2 = 1, .veq_len = 18,
.vi_end_f1 = 24, .vi_end_f2 = 25,
.nbr_end = 286,
.burst_ena = true,
- .hburst_start = 73, .hburst_len = 34,
+ .hburst_start = 73, .hburst_len = 34,
.vburst_start_f1 = 8, .vburst_end_f1 = 285,
.vburst_start_f2 = 8, .vburst_end_f2 = 286,
.vburst_start_f3 = 9, .vburst_end_f3 = 286,
@@ -646,7 +646,7 @@ static const struct tv_mode tv_modes[] = {
.vsync_start_f1 = 5, .vsync_start_f2 = 6,
.vsync_len = 5,
- .veq_ena = true, .veq_start_f1 = 0,
+ .veq_ena = true, .veq_start_f1 = 0,
.veq_start_f2 = 1, .veq_len = 15,
.vi_end_f1 = 24, .vi_end_f2 = 25,
@@ -674,6 +674,78 @@ static const struct tv_mode tv_modes[] = {
.filter_table = filter_table,
},
{
+ .name = "480p@59.94Hz",
+ .clock = 107520,
+ .refresh = 59940,
+ .oversample = TV_OVERSAMPLE_4X,
+ .component_only = 1,
+
+ .hsync_end = 64, .hblank_end = 122,
+ .hblank_start = 842, .htotal = 857,
+
+ .progressive = true,.trilevel_sync = false,
+
+ .vsync_start_f1 = 12, .vsync_start_f2 = 12,
+ .vsync_len = 12,
+
+ .veq_ena = false,
+
+ .vi_end_f1 = 44, .vi_end_f2 = 44,
+ .nbr_end = 479,
+
+ .burst_ena = false,
+
+ .filter_table = filter_table,
+ },
+ {
+ .name = "480p@60Hz",
+ .clock = 107520,
+ .refresh = 60000,
+ .oversample = TV_OVERSAMPLE_4X,
+ .component_only = 1,
+
+ .hsync_end = 64, .hblank_end = 122,
+ .hblank_start = 842, .htotal = 856,
+
+ .progressive = true,.trilevel_sync = false,
+
+ .vsync_start_f1 = 12, .vsync_start_f2 = 12,
+ .vsync_len = 12,
+
+ .veq_ena = false,
+
+ .vi_end_f1 = 44, .vi_end_f2 = 44,
+ .nbr_end = 479,
+
+ .burst_ena = false,
+
+ .filter_table = filter_table,
+ },
+ {
+ .name = "576p",
+ .clock = 107520,
+ .refresh = 50000,
+ .oversample = TV_OVERSAMPLE_4X,
+ .component_only = 1,
+
+ .hsync_end = 64, .hblank_end = 139,
+ .hblank_start = 859, .htotal = 863,
+
+ .progressive = true, .trilevel_sync = false,
+
+ .vsync_start_f1 = 10, .vsync_start_f2 = 10,
+ .vsync_len = 10,
+
+ .veq_ena = false,
+
+ .vi_end_f1 = 48, .vi_end_f2 = 48,
+ .nbr_end = 575,
+
+ .burst_ena = false,
+
+ .filter_table = filter_table,
+ },
+ {
.name = "720p@60Hz",
.clock = 148800,
.refresh = 60000,
@@ -683,7 +755,31 @@ static const struct tv_mode tv_modes[] = {
.hsync_end = 80, .hblank_end = 300,
.hblank_start = 1580, .htotal = 1649,
- .progressive = true, .trilevel_sync = true,
+ .progressive = true, .trilevel_sync = true,
+
+ .vsync_start_f1 = 10, .vsync_start_f2 = 10,
+ .vsync_len = 10,
+
+ .veq_ena = false,
+
+ .vi_end_f1 = 29, .vi_end_f2 = 29,
+ .nbr_end = 719,
+
+ .burst_ena = false,
+
+ .filter_table = filter_table,
+ },
+ {
+ .name = "720p@59.94Hz",
+ .clock = 148800,
+ .refresh = 59940,
+ .oversample = TV_OVERSAMPLE_2X,
+ .component_only = 1,
+
+ .hsync_end = 80, .hblank_end = 300,
+ .hblank_start = 1580, .htotal = 1651,
+
+ .progressive = true, .trilevel_sync = true,
.vsync_start_f1 = 10, .vsync_start_f2 = 10,
.vsync_len = 10,
@@ -707,7 +803,7 @@ static const struct tv_mode tv_modes[] = {
.hsync_end = 80, .hblank_end = 300,
.hblank_start = 1580, .htotal = 1979,
- .progressive = true, .trilevel_sync = true,
+ .progressive = true, .trilevel_sync = true,
.vsync_start_f1 = 10, .vsync_start_f2 = 10,
.vsync_len = 10,
@@ -732,12 +828,12 @@ static const struct tv_mode tv_modes[] = {
.hsync_end = 88, .hblank_end = 235,
.hblank_start = 2155, .htotal = 2639,
- .progressive = false, .trilevel_sync = true,
+ .progressive = false, .trilevel_sync = true,
.vsync_start_f1 = 4, .vsync_start_f2 = 5,
.vsync_len = 10,
- .veq_ena = true, .veq_start_f1 = 4,
+ .veq_ena = true, .veq_start_f1 = 4,
.veq_start_f2 = 4, .veq_len = 10,
@@ -758,12 +854,12 @@ static const struct tv_mode tv_modes[] = {
.hsync_end = 88, .hblank_end = 235,
.hblank_start = 2155, .htotal = 2199,
- .progressive = false, .trilevel_sync = true,
+ .progressive = false, .trilevel_sync = true,
.vsync_start_f1 = 4, .vsync_start_f2 = 5,
.vsync_len = 10,
- .veq_ena = true, .veq_start_f1 = 4,
+ .veq_ena = true, .veq_start_f1 = 4,
.veq_start_f2 = 4, .veq_len = 10,
@@ -774,6 +870,32 @@ static const struct tv_mode tv_modes[] = {
.filter_table = filter_table,
},
+ {
+ .name = "1080i@59.94Hz",
+ .clock = 148800,
+ .refresh = 29970,
+ .oversample = TV_OVERSAMPLE_2X,
+ .component_only = 1,
+
+ .hsync_end = 88, .hblank_end = 235,
+ .hblank_start = 2155, .htotal = 2201,
+
+ .progressive = false, .trilevel_sync = true,
+
+ .vsync_start_f1 = 4, .vsync_start_f2 = 5,
+ .vsync_len = 10,
+
+ .veq_ena = true, .veq_start_f1 = 4,
+ .veq_start_f2 = 4, .veq_len = 10,
+
+
+ .vi_end_f1 = 21, .vi_end_f2 = 22,
+ .nbr_end = 539,
+
+ .burst_ena = false,
+
+ .filter_table = filter_table,
+ },
};
static struct intel_tv *enc_to_intel_tv(struct drm_encoder *encoder)
@@ -794,7 +916,7 @@ intel_tv_dpms(struct drm_encoder *encoder, int mode)
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- switch (mode) {
+ switch(mode) {
case DRM_MODE_DPMS_ON:
I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE);
break;
@@ -811,7 +933,7 @@ intel_tv_mode_lookup(const char *tv_format)
{
int i;
- for (i = 0; i < sizeof(tv_modes) / sizeof(tv_modes[0]); i++) {
+ for (i = 0; i < sizeof(tv_modes) / sizeof (tv_modes[0]); i++) {
const struct tv_mode *tv_mode = &tv_modes[i];
if (!strcmp(tv_format, tv_mode->name))
@@ -1006,7 +1128,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
if (color_conversion) {
I915_WRITE(TV_CSC_Y, (color_conversion->ry << 16) |
color_conversion->gy);
- I915_WRITE(TV_CSC_Y2, (color_conversion->by << 16) |
+ I915_WRITE(TV_CSC_Y2,(color_conversion->by << 16) |
color_conversion->ay);
I915_WRITE(TV_CSC_U, (color_conversion->ru << 16) |
color_conversion->gu);
@@ -1110,12 +1232,10 @@ static const struct drm_display_mode reported_modes[] = {
* \return false if TV is disconnected.
*/
static int
-intel_tv_detect_type(struct intel_tv *intel_tv,
+intel_tv_detect_type (struct intel_tv *intel_tv,
struct drm_connector *connector)
{
struct drm_encoder *encoder = &intel_tv->base.base;
- struct drm_crtc *crtc = encoder->crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
@@ -1138,10 +1258,6 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
/* Poll for TV detection */
tv_ctl &= ~(TV_ENC_ENABLE | TV_TEST_MODE_MASK);
tv_ctl |= TV_TEST_MODE_MONITOR_DETECT;
- if (intel_crtc->pipe == 1)
- tv_ctl |= TV_ENC_PIPEB_SELECT;
- else
- tv_ctl &= ~TV_ENC_PIPEB_SELECT;
tv_dac &= ~(TVDAC_SENSE_MASK | DAC_A_MASK | DAC_B_MASK | DAC_C_MASK);
tv_dac |= (TVDAC_STATE_CHG_EN |
@@ -1161,26 +1277,26 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
to_intel_crtc(intel_tv->base.base.crtc)->pipe);
type = -1;
- tv_dac = I915_READ(TV_DAC);
- DRM_DEBUG_KMS("TV detected: %x, %x\n", tv_ctl, tv_dac);
- /*
- * A B C
- * 0 1 1 Composite
- * 1 0 X svideo
- * 0 0 0 Component
- */
- if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) {
- DRM_DEBUG_KMS("Detected Composite TV connection\n");
- type = DRM_MODE_CONNECTOR_Composite;
- } else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) {
- DRM_DEBUG_KMS("Detected S-Video TV connection\n");
- type = DRM_MODE_CONNECTOR_SVIDEO;
- } else if ((tv_dac & TVDAC_SENSE_MASK) == 0) {
- DRM_DEBUG_KMS("Detected Component TV connection\n");
- type = DRM_MODE_CONNECTOR_Component;
- } else {
- DRM_DEBUG_KMS("Unrecognised TV connection\n");
- type = -1;
+ if (wait_for((tv_dac = I915_READ(TV_DAC)) & TVDAC_STATE_CHG, 20) == 0) {
+ DRM_DEBUG_KMS("TV detected: %x, %x\n", tv_ctl, tv_dac);
+ /*
+ * A B C
+ * 0 1 1 Composite
+ * 1 0 X svideo
+ * 0 0 0 Component
+ */
+ if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) {
+ DRM_DEBUG_KMS("Detected Composite TV connection\n");
+ type = DRM_MODE_CONNECTOR_Composite;
+ } else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) {
+ DRM_DEBUG_KMS("Detected S-Video TV connection\n");
+ type = DRM_MODE_CONNECTOR_SVIDEO;
+ } else if ((tv_dac & TVDAC_SENSE_MASK) == 0) {
+ DRM_DEBUG_KMS("Detected Component TV connection\n");
+ type = DRM_MODE_CONNECTOR_Component;
+ } else {
+ DRM_DEBUG_KMS("Unrecognised TV connection\n");
+ }
}
I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN);
@@ -1245,7 +1361,7 @@ intel_tv_detect(struct drm_connector *connector, bool force)
int type;
mode = reported_modes[0];
- drm_mode_set_crtcinfo(&mode, 0);
+ drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V);
if (intel_tv->base.base.crtc && intel_tv->base.base.crtc->enabled) {
type = intel_tv_detect_type(intel_tv, connector);
@@ -1369,7 +1485,7 @@ intel_tv_get_modes(struct drm_connector *connector)
}
static void
-intel_tv_destroy(struct drm_connector *connector)
+intel_tv_destroy (struct drm_connector *connector)
{
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
diff --git a/drivers/gpu/drm/mga/mga_dma.c b/drivers/gpu/drm/mga/mga_dma.c
index 507aa3d..5ccb65de 100644
--- a/drivers/gpu/drm/mga/mga_dma.c
+++ b/drivers/gpu/drm/mga/mga_dma.c
@@ -403,8 +403,6 @@ int mga_driver_load(struct drm_device *dev, unsigned long flags)
dev_priv->usec_timeout = MGA_DEFAULT_USEC_TIMEOUT;
dev_priv->chipset = flags;
- pci_set_master(dev->pdev);
-
dev_priv->mmio_base = pci_resource_start(dev->pdev, 1);
dev_priv->mmio_size = pci_resource_len(dev->pdev, 1);
diff --git a/drivers/gpu/drm/mga/mga_drv.c b/drivers/gpu/drm/mga/mga_drv.c
index f9a925d..42d3187 100644
--- a/drivers/gpu/drm/mga/mga_drv.c
+++ b/drivers/gpu/drm/mga/mga_drv.c
@@ -29,8 +29,6 @@
* Gareth Hughes <gareth@valinux.com>
*/
-#include <linux/module.h>
-
#include "drmP.h"
#include "drm.h"
#include "mga_drm.h"
@@ -44,20 +42,6 @@ static struct pci_device_id pciidlist[] = {
mga_PCI_IDS
};
-static const struct file_operations mga_driver_fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = drm_mmap,
- .poll = drm_poll,
- .fasync = drm_fasync,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = mga_compat_ioctl,
-#endif
- .llseek = noop_llseek,
-};
-
static struct drm_driver driver = {
.driver_features =
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA |
@@ -78,7 +62,20 @@ static struct drm_driver driver = {
.reclaim_buffers = drm_core_reclaim_buffers,
.ioctls = mga_ioctls,
.dma_ioctl = mga_dma_buffers,
- .fops = &mga_driver_fops,
+ .fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = drm_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = mga_compat_ioctl,
+#endif
+ .llseek = noop_llseek,
+ },
+
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
diff --git a/drivers/gpu/drm/mga/mga_warp.c b/drivers/gpu/drm/mga/mga_warp.c
index 722a91b..f172bd5 100644
--- a/drivers/gpu/drm/mga/mga_warp.c
+++ b/drivers/gpu/drm/mga/mga_warp.c
@@ -30,7 +30,6 @@
#include <linux/firmware.h>
#include <linux/ihex.h>
#include <linux/platform_device.h>
-#include <linux/module.h>
#include "drmP.h"
#include "drm.h"
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index 1a2ad7e..0583677 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -9,33 +9,28 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o \
nouveau_hw.o nouveau_calc.o nouveau_bios.o nouveau_i2c.o \
nouveau_display.o nouveau_connector.o nouveau_fbcon.o \
- nouveau_hdmi.o nouveau_dp.o nouveau_ramht.o \
+ nouveau_dp.o nouveau_ramht.o \
nouveau_pm.o nouveau_volt.o nouveau_perf.o nouveau_temp.o \
- nouveau_mm.o nouveau_vm.o nouveau_mxm.o nouveau_gpio.o \
+ nouveau_mm.o nouveau_vm.o \
nv04_timer.o \
nv04_mc.o nv40_mc.o nv50_mc.o \
- nv04_fb.o nv10_fb.o nv20_fb.o nv30_fb.o nv40_fb.o \
- nv50_fb.o nvc0_fb.o \
+ nv04_fb.o nv10_fb.o nv30_fb.o nv40_fb.o nv50_fb.o nvc0_fb.o \
nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o nvc0_fifo.o \
nv04_graph.o nv10_graph.o nv20_graph.o \
nv40_graph.o nv50_graph.o nvc0_graph.o \
nv40_grctx.o nv50_grctx.o nvc0_grctx.o \
- nv84_crypt.o nv98_crypt.o \
+ nv84_crypt.o \
nva3_copy.o nvc0_copy.o \
- nv31_mpeg.o nv50_mpeg.o \
- nv84_bsp.o \
- nv84_vp.o \
- nv98_ppp.o \
+ nv40_mpeg.o nv50_mpeg.o \
nv04_instmem.o nv50_instmem.o nvc0_instmem.o \
- nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \
- nv04_crtc.o nv04_display.o nv04_cursor.o \
nv50_evo.o nv50_crtc.o nv50_dac.o nv50_sor.o \
nv50_cursor.o nv50_display.o \
- nvd0_display.o \
+ nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \
+ nv04_crtc.o nv04_display.o nv04_cursor.o \
nv04_fbcon.o nv50_fbcon.o nvc0_fbcon.o \
nv10_gpio.o nv50_gpio.o \
nv50_calc.o \
- nv04_pm.o nv40_pm.o nv50_pm.o nva3_pm.o nvc0_pm.o \
+ nv04_pm.o nv50_pm.o nva3_pm.o \
nv50_vram.o nvc0_vram.o \
nv50_vm.o nvc0_vm.o
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 7814a76..525744d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -18,6 +18,12 @@
#include <linux/vga_switcheroo.h>
+#define NOUVEAU_DSM_SUPPORTED 0x00
+#define NOUVEAU_DSM_SUPPORTED_FUNCTIONS 0x00
+
+#define NOUVEAU_DSM_ACTIVE 0x01
+#define NOUVEAU_DSM_ACTIVE_QUERY 0x00
+
#define NOUVEAU_DSM_LED 0x02
#define NOUVEAU_DSM_LED_STATE 0x00
#define NOUVEAU_DSM_LED_OFF 0x10
@@ -29,9 +35,6 @@
#define NOUVEAU_DSM_POWER_SPEED 0x01
#define NOUVEAU_DSM_POWER_STAMINA 0x02
-#define NOUVEAU_DSM_OPTIMUS_FN 0x1A
-#define NOUVEAU_DSM_OPTIMUS_ARGS 0x03000001
-
static struct nouveau_dsm_priv {
bool dsm_detected;
bool optimus_detected;
@@ -58,8 +61,7 @@ static int nouveau_optimus_dsm(acpi_handle handle, int func, int arg, uint32_t *
struct acpi_object_list input;
union acpi_object params[4];
union acpi_object *obj;
- int i, err;
- char args_buff[4];
+ int err;
input.count = 4;
input.pointer = params;
@@ -71,11 +73,7 @@ static int nouveau_optimus_dsm(acpi_handle handle, int func, int arg, uint32_t *
params[2].type = ACPI_TYPE_INTEGER;
params[2].integer.value = func;
params[3].type = ACPI_TYPE_BUFFER;
- params[3].buffer.length = 4;
- /* ACPI is little endian, AABBCCDD becomes {DD,CC,BB,AA} */
- for (i = 0; i < 4; i++)
- args_buff[i] = (arg >> i * 8) & 0xFF;
- params[3].buffer.pointer = args_buff;
+ params[3].buffer.length = 0;
err = acpi_evaluate_object(handle, "_DSM", &input, &output);
if (err) {
@@ -150,23 +148,6 @@ static int nouveau_dsm(acpi_handle handle, int func, int arg, uint32_t *result)
return 0;
}
-/* Returns 1 if a DSM function is usable and 0 otherwise */
-static int nouveau_test_dsm(acpi_handle test_handle,
- int (*dsm_func)(acpi_handle, int, int, uint32_t *),
- int sfnc)
-{
- u32 result = 0;
-
- /* Function 0 returns a Buffer containing available functions. The args
- * parameter is ignored for function 0, so just put 0 in it */
- if (dsm_func(test_handle, 0, 0, &result))
- return 0;
-
- /* ACPI Spec v4 9.14.1: if bit 0 is zero, no function is supported. If
- * the n-th bit is enabled, function n is supported */
- return result & 1 && result & (1 << sfnc);
-}
-
static int nouveau_dsm_switch_mux(acpi_handle handle, int mux_id)
{
mxm_wmi_call_mxmx(mux_id == NOUVEAU_DSM_LED_STAMINA ? MXM_MXDS_ADAPTER_IGD : MXM_MXDS_ADAPTER_0);
@@ -187,10 +168,6 @@ static int nouveau_dsm_set_discrete_state(acpi_handle handle, enum vga_switchero
static int nouveau_dsm_switchto(enum vga_switcheroo_client_id id)
{
- /* perhaps the _DSM functions are mutually exclusive, but prepare for
- * the future */
- if (!nouveau_dsm_priv.dsm_detected && nouveau_dsm_priv.optimus_detected)
- return 0;
if (id == VGA_SWITCHEROO_IGD)
return nouveau_dsm_switch_mux(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_LED_STAMINA);
else
@@ -203,11 +180,6 @@ static int nouveau_dsm_power_state(enum vga_switcheroo_client_id id,
if (id == VGA_SWITCHEROO_IGD)
return 0;
- /* Optimus laptops have the card already disabled in
- * nouveau_switcheroo_set_state */
- if (!nouveau_dsm_priv.dsm_detected && nouveau_dsm_priv.optimus_detected)
- return 0;
-
return nouveau_dsm_set_discrete_state(nouveau_dsm_priv.dhandle, state);
}
@@ -240,7 +212,8 @@ static int nouveau_dsm_pci_probe(struct pci_dev *pdev)
{
acpi_handle dhandle, nvidia_handle;
acpi_status status;
- int retval = 0;
+ int ret, retval = 0;
+ uint32_t result;
dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
if (!dhandle)
@@ -251,11 +224,13 @@ static int nouveau_dsm_pci_probe(struct pci_dev *pdev)
return false;
}
- if (nouveau_test_dsm(dhandle, nouveau_dsm, NOUVEAU_DSM_POWER))
+ ret = nouveau_dsm(dhandle, NOUVEAU_DSM_SUPPORTED,
+ NOUVEAU_DSM_SUPPORTED_FUNCTIONS, &result);
+ if (ret == 0)
retval |= NOUVEAU_DSM_HAS_MUX;
- if (nouveau_test_dsm(dhandle, nouveau_optimus_dsm,
- NOUVEAU_DSM_OPTIMUS_FN))
+ ret = nouveau_optimus_dsm(dhandle, 0, 0, &result);
+ if (ret == 0)
retval |= NOUVEAU_DSM_HAS_OPT;
if (retval)
@@ -294,22 +269,15 @@ static bool nouveau_dsm_detect(void)
}
if (vga_count == 2 && has_dsm && guid_valid) {
- acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME,
- &buffer);
+ acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME, &buffer);
printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n",
- acpi_method_name);
+ acpi_method_name);
nouveau_dsm_priv.dsm_detected = true;
ret = true;
}
- if (has_optimus == 1) {
- acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME,
- &buffer);
- printk(KERN_INFO "VGA switcheroo: detected Optimus DSM method %s handle\n",
- acpi_method_name);
+ if (has_optimus == 1)
nouveau_dsm_priv.optimus_detected = true;
- ret = true;
- }
return ret;
}
@@ -325,17 +293,6 @@ void nouveau_register_dsm_handler(void)
vga_switcheroo_register_handler(&nouveau_dsm_handler);
}
-/* Must be called for Optimus models before the card can be turned off */
-void nouveau_switcheroo_optimus_dsm(void)
-{
- u32 result = 0;
- if (!nouveau_dsm_priv.optimus_detected)
- return;
-
- nouveau_optimus_dsm(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_OPTIMUS_FN,
- NOUVEAU_DSM_OPTIMUS_ARGS, &result);
-}
-
void nouveau_unregister_dsm_handler(void)
{
vga_switcheroo_unregister_handler();
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index fa22b28..00a55df 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -37,10 +37,8 @@
#include "nouveau_drv.h"
#include "nouveau_drm.h"
#include "nouveau_reg.h"
-#include "nouveau_encoder.h"
-static int
-nv40_get_intensity(struct backlight_device *bd)
+static int nv40_get_intensity(struct backlight_device *bd)
{
struct drm_device *dev = bl_get_data(bd);
int val = (nv_rd32(dev, NV40_PMC_BACKLIGHT) & NV40_PMC_BACKLIGHT_MASK)
@@ -49,8 +47,7 @@ nv40_get_intensity(struct backlight_device *bd)
return val;
}
-static int
-nv40_set_intensity(struct backlight_device *bd)
+static int nv40_set_intensity(struct backlight_device *bd)
{
struct drm_device *dev = bl_get_data(bd);
int val = bd->props.brightness;
@@ -68,8 +65,30 @@ static const struct backlight_ops nv40_bl_ops = {
.update_status = nv40_set_intensity,
};
-static int
-nv40_backlight_init(struct drm_connector *connector)
+static int nv50_get_intensity(struct backlight_device *bd)
+{
+ struct drm_device *dev = bl_get_data(bd);
+
+ return nv_rd32(dev, NV50_PDISPLAY_SOR_BACKLIGHT);
+}
+
+static int nv50_set_intensity(struct backlight_device *bd)
+{
+ struct drm_device *dev = bl_get_data(bd);
+ int val = bd->props.brightness;
+
+ nv_wr32(dev, NV50_PDISPLAY_SOR_BACKLIGHT,
+ val | NV50_PDISPLAY_SOR_BACKLIGHT_ENABLE);
+ return 0;
+}
+
+static const struct backlight_ops nv50_bl_ops = {
+ .options = BL_CORE_SUSPENDRESUME,
+ .get_brightness = nv50_get_intensity,
+ .update_status = nv50_set_intensity,
+};
+
+static int nouveau_nv40_backlight_init(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -94,129 +113,34 @@ nv40_backlight_init(struct drm_connector *connector)
return 0;
}
-static int
-nv50_get_intensity(struct backlight_device *bd)
-{
- struct nouveau_encoder *nv_encoder = bl_get_data(bd);
- struct drm_device *dev = nv_encoder->base.base.dev;
- int or = nv_encoder->or;
- u32 div = 1025;
- u32 val;
-
- val = nv_rd32(dev, NV50_PDISP_SOR_PWM_CTL(or));
- val &= NV50_PDISP_SOR_PWM_CTL_VAL;
- return ((val * 100) + (div / 2)) / div;
-}
-
-static int
-nv50_set_intensity(struct backlight_device *bd)
-{
- struct nouveau_encoder *nv_encoder = bl_get_data(bd);
- struct drm_device *dev = nv_encoder->base.base.dev;
- int or = nv_encoder->or;
- u32 div = 1025;
- u32 val = (bd->props.brightness * div) / 100;
-
- nv_wr32(dev, NV50_PDISP_SOR_PWM_CTL(or),
- NV50_PDISP_SOR_PWM_CTL_NEW | val);
- return 0;
-}
-
-static const struct backlight_ops nv50_bl_ops = {
- .options = BL_CORE_SUSPENDRESUME,
- .get_brightness = nv50_get_intensity,
- .update_status = nv50_set_intensity,
-};
-
-static int
-nva3_get_intensity(struct backlight_device *bd)
-{
- struct nouveau_encoder *nv_encoder = bl_get_data(bd);
- struct drm_device *dev = nv_encoder->base.base.dev;
- int or = nv_encoder->or;
- u32 div, val;
-
- div = nv_rd32(dev, NV50_PDISP_SOR_PWM_DIV(or));
- val = nv_rd32(dev, NV50_PDISP_SOR_PWM_CTL(or));
- val &= NVA3_PDISP_SOR_PWM_CTL_VAL;
- if (div && div >= val)
- return ((val * 100) + (div / 2)) / div;
-
- return 100;
-}
-
-static int
-nva3_set_intensity(struct backlight_device *bd)
-{
- struct nouveau_encoder *nv_encoder = bl_get_data(bd);
- struct drm_device *dev = nv_encoder->base.base.dev;
- int or = nv_encoder->or;
- u32 div, val;
-
- div = nv_rd32(dev, NV50_PDISP_SOR_PWM_DIV(or));
- val = (bd->props.brightness * div) / 100;
- if (div) {
- nv_wr32(dev, NV50_PDISP_SOR_PWM_CTL(or), val |
- NV50_PDISP_SOR_PWM_CTL_NEW |
- NVA3_PDISP_SOR_PWM_CTL_UNK);
- return 0;
- }
-
- return -EINVAL;
-}
-
-static const struct backlight_ops nva3_bl_ops = {
- .options = BL_CORE_SUSPENDRESUME,
- .get_brightness = nva3_get_intensity,
- .update_status = nva3_set_intensity,
-};
-
-static int
-nv50_backlight_init(struct drm_connector *connector)
+static int nouveau_nv50_backlight_init(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_encoder *nv_encoder;
struct backlight_properties props;
struct backlight_device *bd;
- const struct backlight_ops *ops;
-
- nv_encoder = find_encoder(connector, OUTPUT_LVDS);
- if (!nv_encoder) {
- nv_encoder = find_encoder(connector, OUTPUT_DP);
- if (!nv_encoder)
- return -ENODEV;
- }
- if (!nv_rd32(dev, NV50_PDISP_SOR_PWM_CTL(nv_encoder->or)))
+ if (!nv_rd32(dev, NV50_PDISPLAY_SOR_BACKLIGHT))
return 0;
- if (dev_priv->chipset <= 0xa0 ||
- dev_priv->chipset == 0xaa ||
- dev_priv->chipset == 0xac)
- ops = &nv50_bl_ops;
- else
- ops = &nva3_bl_ops;
-
memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_RAW;
- props.max_brightness = 100;
- bd = backlight_device_register("nv_backlight", &connector->kdev,
- nv_encoder, ops, &props);
+ props.max_brightness = 1025;
+ bd = backlight_device_register("nv_backlight", &connector->kdev, dev,
+ &nv50_bl_ops, &props);
if (IS_ERR(bd))
return PTR_ERR(bd);
dev_priv->backlight = bd;
- bd->props.brightness = bd->ops->get_brightness(bd);
+ bd->props.brightness = nv50_get_intensity(bd);
backlight_update_status(bd);
return 0;
}
-int
-nouveau_backlight_init(struct drm_device *dev)
+int nouveau_backlight_init(struct drm_connector *connector)
{
+ struct drm_device *dev = connector->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct drm_connector *connector;
#ifdef CONFIG_ACPI
if (acpi_video_backlight_support()) {
@@ -226,28 +150,21 @@ nouveau_backlight_init(struct drm_device *dev)
}
#endif
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS &&
- connector->connector_type != DRM_MODE_CONNECTOR_eDP)
- continue;
-
- switch (dev_priv->card_type) {
- case NV_40:
- return nv40_backlight_init(connector);
- case NV_50:
- return nv50_backlight_init(connector);
- default:
- break;
- }
+ switch (dev_priv->card_type) {
+ case NV_40:
+ return nouveau_nv40_backlight_init(connector);
+ case NV_50:
+ return nouveau_nv50_backlight_init(connector);
+ default:
+ break;
}
-
return 0;
}
-void
-nouveau_backlight_exit(struct drm_device *dev)
+void nouveau_backlight_exit(struct drm_connector *connector)
{
+ struct drm_device *dev = connector->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
if (dev_priv->backlight) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 8dbeeea..729d5fd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -27,7 +27,6 @@
#include "nouveau_drv.h"
#include "nouveau_hw.h"
#include "nouveau_encoder.h"
-#include "nouveau_gpio.h"
#include <linux/io-mapping.h>
@@ -35,6 +34,9 @@
#define NV_CIO_CRE_44_HEADA 0x0
#define NV_CIO_CRE_44_HEADB 0x3
#define FEATURE_MOBILE 0x10 /* also FEATURE_QUADRO for BMP */
+#define LEGACY_I2C_CRT 0x80
+#define LEGACY_I2C_PANEL 0x81
+#define LEGACY_I2C_TV 0x82
#define EDID1_LEN 128
@@ -65,232 +67,194 @@ static bool nv_cksum(const uint8_t *data, unsigned int length)
}
static int
-score_vbios(struct nvbios *bios, const bool writeable)
+score_vbios(struct drm_device *dev, const uint8_t *data, const bool writeable)
{
- if (!bios->data || bios->data[0] != 0x55 || bios->data[1] != 0xAA) {
- NV_TRACEWARN(bios->dev, "... BIOS signature not found\n");
+ if (!(data[0] == 0x55 && data[1] == 0xAA)) {
+ NV_TRACEWARN(dev, "... BIOS signature not found\n");
return 0;
}
- if (nv_cksum(bios->data, bios->data[2] * 512)) {
- NV_TRACEWARN(bios->dev, "... BIOS checksum invalid\n");
+ if (nv_cksum(data, data[2] * 512)) {
+ NV_TRACEWARN(dev, "... BIOS checksum invalid\n");
/* if a ro image is somewhat bad, it's probably all rubbish */
return writeable ? 2 : 1;
- }
+ } else
+ NV_TRACE(dev, "... appears to be valid\n");
- NV_TRACE(bios->dev, "... appears to be valid\n");
return 3;
}
-static void
-bios_shadow_prom(struct nvbios *bios)
+static void load_vbios_prom(struct drm_device *dev, uint8_t *data)
{
- struct drm_device *dev = bios->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- u32 pcireg, access;
- u16 pcir;
+ uint32_t pci_nv_20, save_pci_nv_20;
+ int pcir_ptr;
int i;
- /* enable access to rom */
if (dev_priv->card_type >= NV_50)
- pcireg = 0x088050;
+ pci_nv_20 = 0x88050;
else
- pcireg = NV_PBUS_PCI_NV_20;
- access = nv_mask(dev, pcireg, 0x00000001, 0x00000000);
+ pci_nv_20 = NV_PBUS_PCI_NV_20;
- /* bail if no rom signature, with a workaround for a PROM reading
- * issue on some chipsets. the first read after a period of
- * inactivity returns the wrong result, so retry the first header
- * byte a few times before giving up as a workaround
- */
- i = 16;
- do {
- if (nv_rd08(dev, NV_PROM_OFFSET + 0) == 0x55)
- break;
- } while (i--);
+ /* enable ROM access */
+ save_pci_nv_20 = nvReadMC(dev, pci_nv_20);
+ nvWriteMC(dev, pci_nv_20,
+ save_pci_nv_20 & ~NV_PBUS_PCI_NV_20_ROM_SHADOW_ENABLED);
- if (!i || nv_rd08(dev, NV_PROM_OFFSET + 1) != 0xaa)
+ /* bail if no rom signature */
+ if (nv_rd08(dev, NV_PROM_OFFSET) != 0x55 ||
+ nv_rd08(dev, NV_PROM_OFFSET + 1) != 0xaa)
goto out;
/* additional check (see note below) - read PCI record header */
- pcir = nv_rd08(dev, NV_PROM_OFFSET + 0x18) |
- nv_rd08(dev, NV_PROM_OFFSET + 0x19) << 8;
- if (nv_rd08(dev, NV_PROM_OFFSET + pcir + 0) != 'P' ||
- nv_rd08(dev, NV_PROM_OFFSET + pcir + 1) != 'C' ||
- nv_rd08(dev, NV_PROM_OFFSET + pcir + 2) != 'I' ||
- nv_rd08(dev, NV_PROM_OFFSET + pcir + 3) != 'R')
+ pcir_ptr = nv_rd08(dev, NV_PROM_OFFSET + 0x18) |
+ nv_rd08(dev, NV_PROM_OFFSET + 0x19) << 8;
+ if (nv_rd08(dev, NV_PROM_OFFSET + pcir_ptr) != 'P' ||
+ nv_rd08(dev, NV_PROM_OFFSET + pcir_ptr + 1) != 'C' ||
+ nv_rd08(dev, NV_PROM_OFFSET + pcir_ptr + 2) != 'I' ||
+ nv_rd08(dev, NV_PROM_OFFSET + pcir_ptr + 3) != 'R')
goto out;
- /* read entire bios image to system memory */
- bios->length = nv_rd08(dev, NV_PROM_OFFSET + 2) * 512;
- bios->data = kmalloc(bios->length, GFP_KERNEL);
- if (bios->data) {
- for (i = 0; i < bios->length; i++)
- bios->data[i] = nv_rd08(dev, NV_PROM_OFFSET + i);
- }
+ /* on some 6600GT/6800LE prom reads are messed up. nvclock alleges a
+ * a good read may be obtained by waiting or re-reading (cargocult: 5x)
+ * each byte. we'll hope pramin has something usable instead
+ */
+ for (i = 0; i < NV_PROM_SIZE; i++)
+ data[i] = nv_rd08(dev, NV_PROM_OFFSET + i);
out:
- /* disable access to rom */
- nv_wr32(dev, pcireg, access);
+ /* disable ROM access */
+ nvWriteMC(dev, pci_nv_20,
+ save_pci_nv_20 | NV_PBUS_PCI_NV_20_ROM_SHADOW_ENABLED);
}
-static void
-bios_shadow_pramin(struct nvbios *bios)
+static void load_vbios_pramin(struct drm_device *dev, uint8_t *data)
{
- struct drm_device *dev = bios->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- u32 bar0 = 0;
+ uint32_t old_bar0_pramin = 0;
int i;
if (dev_priv->card_type >= NV_50) {
- u64 addr = (u64)(nv_rd32(dev, 0x619f04) & 0xffffff00) << 8;
- if (!addr) {
- addr = (u64)nv_rd32(dev, 0x001700) << 16;
- addr += 0xf0000;
- }
+ uint32_t vbios_vram = (nv_rd32(dev, 0x619f04) & ~0xff) << 8;
+
+ if (!vbios_vram)
+ vbios_vram = (nv_rd32(dev, 0x1700) << 16) + 0xf0000;
- bar0 = nv_mask(dev, 0x001700, 0xffffffff, addr >> 16);
+ old_bar0_pramin = nv_rd32(dev, 0x1700);
+ nv_wr32(dev, 0x1700, vbios_vram >> 16);
}
/* bail if no rom signature */
- if (nv_rd08(dev, NV_PRAMIN_OFFSET + 0) != 0x55 ||
+ if (nv_rd08(dev, NV_PRAMIN_OFFSET) != 0x55 ||
nv_rd08(dev, NV_PRAMIN_OFFSET + 1) != 0xaa)
goto out;
- bios->length = nv_rd08(dev, NV_PRAMIN_OFFSET + 2) * 512;
- bios->data = kmalloc(bios->length, GFP_KERNEL);
- if (bios->data) {
- for (i = 0; i < bios->length; i++)
- bios->data[i] = nv_rd08(dev, NV_PRAMIN_OFFSET + i);
- }
+ for (i = 0; i < NV_PROM_SIZE; i++)
+ data[i] = nv_rd08(dev, NV_PRAMIN_OFFSET + i);
out:
if (dev_priv->card_type >= NV_50)
- nv_wr32(dev, 0x001700, bar0);
+ nv_wr32(dev, 0x1700, old_bar0_pramin);
}
-static void
-bios_shadow_pci(struct nvbios *bios)
-{
- struct pci_dev *pdev = bios->dev->pdev;
- size_t length;
-
- if (!pci_enable_rom(pdev)) {
- void __iomem *rom = pci_map_rom(pdev, &length);
- if (rom) {
- bios->data = kmalloc(length, GFP_KERNEL);
- if (bios->data) {
- memcpy_fromio(bios->data, rom, length);
- bios->length = length;
- }
- pci_unmap_rom(pdev, rom);
- }
-
- pci_disable_rom(pdev);
- }
-}
-
-static void
-bios_shadow_acpi(struct nvbios *bios)
+static void load_vbios_pci(struct drm_device *dev, uint8_t *data)
{
- struct pci_dev *pdev = bios->dev->pdev;
- int ptr, len, ret;
- u8 data[3];
+ void __iomem *rom = NULL;
+ size_t rom_len;
+ int ret;
- if (!nouveau_acpi_rom_supported(pdev))
+ ret = pci_enable_rom(dev->pdev);
+ if (ret)
return;
- ret = nouveau_acpi_get_bios_chunk(data, 0, sizeof(data));
- if (ret != sizeof(data))
- return;
+ rom = pci_map_rom(dev->pdev, &rom_len);
+ if (!rom)
+ goto out;
+ memcpy_fromio(data, rom, rom_len);
+ pci_unmap_rom(dev->pdev, rom);
- bios->length = min(data[2] * 512, 65536);
- bios->data = kmalloc(bios->length, GFP_KERNEL);
- if (!bios->data)
- return;
+out:
+ pci_disable_rom(dev->pdev);
+}
- len = bios->length;
- ptr = 0;
- while (len) {
- int size = (len > ROM_BIOS_PAGE) ? ROM_BIOS_PAGE : len;
+static void load_vbios_acpi(struct drm_device *dev, uint8_t *data)
+{
+ int i;
+ int ret;
+ int size = 64 * 1024;
- ret = nouveau_acpi_get_bios_chunk(bios->data, ptr, size);
- if (ret != size) {
- kfree(bios->data);
- bios->data = NULL;
- return;
- }
+ if (!nouveau_acpi_rom_supported(dev->pdev))
+ return;
- len -= size;
- ptr += size;
+ for (i = 0; i < (size / ROM_BIOS_PAGE); i++) {
+ ret = nouveau_acpi_get_bios_chunk(data,
+ (i * ROM_BIOS_PAGE),
+ ROM_BIOS_PAGE);
+ if (ret <= 0)
+ break;
}
+ return;
}
struct methods {
const char desc[8];
- void (*shadow)(struct nvbios *);
+ void (*loadbios)(struct drm_device *, uint8_t *);
const bool rw;
- int score;
- u32 size;
- u8 *data;
};
-static bool
-bios_shadow(struct drm_device *dev)
-{
- struct methods shadow_methods[] = {
- { "PRAMIN", bios_shadow_pramin, true, 0, 0, NULL },
- { "PROM", bios_shadow_prom, false, 0, 0, NULL },
- { "ACPI", bios_shadow_acpi, true, 0, 0, NULL },
- { "PCIROM", bios_shadow_pci, true, 0, 0, NULL },
- {}
- };
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->vbios;
- struct methods *mthd, *best;
+static struct methods shadow_methods[] = {
+ { "PRAMIN", load_vbios_pramin, true },
+ { "PROM", load_vbios_prom, false },
+ { "PCIROM", load_vbios_pci, true },
+ { "ACPI", load_vbios_acpi, true },
+};
+#define NUM_SHADOW_METHODS ARRAY_SIZE(shadow_methods)
+
+static bool NVShadowVBIOS(struct drm_device *dev, uint8_t *data)
+{
+ struct methods *methods = shadow_methods;
+ int testscore = 3;
+ int scores[NUM_SHADOW_METHODS], i;
if (nouveau_vbios) {
- mthd = shadow_methods;
- do {
- if (strcasecmp(nouveau_vbios, mthd->desc))
- continue;
- NV_INFO(dev, "VBIOS source: %s\n", mthd->desc);
+ for (i = 0; i < NUM_SHADOW_METHODS; i++)
+ if (!strcasecmp(nouveau_vbios, methods[i].desc))
+ break;
- mthd->shadow(bios);
- mthd->score = score_vbios(bios, mthd->rw);
- if (mthd->score)
+ if (i < NUM_SHADOW_METHODS) {
+ NV_INFO(dev, "Attempting to use BIOS image from %s\n",
+ methods[i].desc);
+
+ methods[i].loadbios(dev, data);
+ if (score_vbios(dev, data, methods[i].rw))
return true;
- } while ((++mthd)->shadow);
+ }
NV_ERROR(dev, "VBIOS source \'%s\' invalid\n", nouveau_vbios);
}
- mthd = shadow_methods;
- do {
- NV_TRACE(dev, "Checking %s for VBIOS\n", mthd->desc);
- mthd->shadow(bios);
- mthd->score = score_vbios(bios, mthd->rw);
- mthd->size = bios->length;
- mthd->data = bios->data;
- } while (mthd->score != 3 && (++mthd)->shadow);
-
- mthd = shadow_methods;
- best = mthd;
- do {
- if (mthd->score > best->score) {
- kfree(best->data);
- best = mthd;
- }
- } while ((++mthd)->shadow);
+ for (i = 0; i < NUM_SHADOW_METHODS; i++) {
+ NV_TRACE(dev, "Attempting to load BIOS image from %s\n",
+ methods[i].desc);
+ data[0] = data[1] = 0; /* avoid reuse of previous image */
+ methods[i].loadbios(dev, data);
+ scores[i] = score_vbios(dev, data, methods[i].rw);
+ if (scores[i] == testscore)
+ return true;
+ }
- if (best->score) {
- NV_TRACE(dev, "Using VBIOS from %s\n", best->desc);
- bios->length = best->size;
- bios->data = best->data;
- return true;
+ while (--testscore > 0) {
+ for (i = 0; i < NUM_SHADOW_METHODS; i++) {
+ if (scores[i] == testscore) {
+ NV_TRACE(dev, "Using BIOS image from %s\n",
+ methods[i].desc);
+ methods[i].loadbios(dev, data);
+ return true;
+ }
+ }
}
- NV_ERROR(dev, "No valid VBIOS image found\n");
+ NV_ERROR(dev, "No valid BIOS image found\n");
return false;
}
@@ -331,11 +295,6 @@ munge_reg(struct nvbios *bios, uint32_t reg)
if (dev_priv->card_type < NV_50)
return reg;
- if (reg & 0x80000000) {
- BUG_ON(bios->display.crtc < 0);
- reg += bios->display.crtc * 0x800;
- }
-
if (reg & 0x40000000) {
BUG_ON(!dcbent);
@@ -344,7 +303,7 @@ munge_reg(struct nvbios *bios, uint32_t reg)
reg += 0x00000080;
}
- reg &= ~0xe0000000;
+ reg &= ~0x60000000;
return reg;
}
@@ -675,9 +634,10 @@ static int
nv50_pll_set(struct drm_device *dev, uint32_t reg, uint32_t clk)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t reg0 = nv_rd32(dev, reg + 0);
+ uint32_t reg1 = nv_rd32(dev, reg + 4);
struct nouveau_pll_vals pll;
struct pll_lims pll_limits;
- u32 ctrl, mask, coef;
int ret;
ret = get_pll_limits(dev, reg, &pll_limits);
@@ -688,20 +648,15 @@ nv50_pll_set(struct drm_device *dev, uint32_t reg, uint32_t clk)
if (!clk)
return -ERANGE;
- coef = pll.N1 << 8 | pll.M1;
- ctrl = pll.log2P << 16;
- mask = 0x00070000;
- if (reg == 0x004008) {
- mask |= 0x01f80000;
- ctrl |= (pll_limits.log2p_bias << 19);
- ctrl |= (pll.log2P << 22);
- }
+ reg0 = (reg0 & 0xfff8ffff) | (pll.log2P << 16);
+ reg1 = (reg1 & 0xffff0000) | (pll.N1 << 8) | pll.M1;
- if (!dev_priv->vbios.execute)
- return 0;
+ if (dev_priv->vbios.execute) {
+ still_alive();
+ nv_wr32(dev, reg + 4, reg1);
+ nv_wr32(dev, reg + 0, reg0);
+ }
- nv_mask(dev, reg + 0, mask, ctrl);
- nv_wr32(dev, reg + 4, coef);
return 0;
}
@@ -758,19 +713,115 @@ static int dcb_entry_idx_from_crtchead(struct drm_device *dev)
return dcb_entry;
}
+static int
+read_dcb_i2c_entry(struct drm_device *dev, int dcb_version, uint8_t *i2ctable, int index, struct dcb_i2c_entry *i2c)
+{
+ uint8_t dcb_i2c_ver = dcb_version, headerlen = 0, entry_len = 4;
+ int i2c_entries = DCB_MAX_NUM_I2C_ENTRIES;
+ int recordoffset = 0, rdofs = 1, wrofs = 0;
+ uint8_t port_type = 0;
+
+ if (!i2ctable)
+ return -EINVAL;
+
+ if (dcb_version >= 0x30) {
+ if (i2ctable[0] != dcb_version) /* necessary? */
+ NV_WARN(dev,
+ "DCB I2C table version mismatch (%02X vs %02X)\n",
+ i2ctable[0], dcb_version);
+ dcb_i2c_ver = i2ctable[0];
+ headerlen = i2ctable[1];
+ if (i2ctable[2] <= DCB_MAX_NUM_I2C_ENTRIES)
+ i2c_entries = i2ctable[2];
+ else
+ NV_WARN(dev,
+ "DCB I2C table has more entries than indexable "
+ "(%d entries, max %d)\n", i2ctable[2],
+ DCB_MAX_NUM_I2C_ENTRIES);
+ entry_len = i2ctable[3];
+ /* [4] is i2c_default_indices, read in parse_dcb_table() */
+ }
+ /*
+ * It's your own fault if you call this function on a DCB 1.1 BIOS --
+ * the test below is for DCB 1.2
+ */
+ if (dcb_version < 0x14) {
+ recordoffset = 2;
+ rdofs = 0;
+ wrofs = 1;
+ }
+
+ if (index == 0xf)
+ return 0;
+ if (index >= i2c_entries) {
+ NV_ERROR(dev, "DCB I2C index too big (%d >= %d)\n",
+ index, i2ctable[2]);
+ return -ENOENT;
+ }
+ if (i2ctable[headerlen + entry_len * index + 3] == 0xff) {
+ NV_ERROR(dev, "DCB I2C entry invalid\n");
+ return -EINVAL;
+ }
+
+ if (dcb_i2c_ver >= 0x30) {
+ port_type = i2ctable[headerlen + recordoffset + 3 + entry_len * index];
+
+ /*
+ * Fixup for chips using same address offset for read and
+ * write.
+ */
+ if (port_type == 4) /* seen on C51 */
+ rdofs = wrofs = 1;
+ if (port_type >= 5) /* G80+ */
+ rdofs = wrofs = 0;
+ }
+
+ if (dcb_i2c_ver >= 0x40) {
+ if (port_type != 5 && port_type != 6)
+ NV_WARN(dev, "DCB I2C table has port type %d\n", port_type);
+
+ i2c->entry = ROM32(i2ctable[headerlen + recordoffset + entry_len * index]);
+ }
+
+ i2c->port_type = port_type;
+ i2c->read = i2ctable[headerlen + recordoffset + rdofs + entry_len * index];
+ i2c->write = i2ctable[headerlen + recordoffset + wrofs + entry_len * index];
+
+ return 0;
+}
+
static struct nouveau_i2c_chan *
init_i2c_device_find(struct drm_device *dev, int i2c_index)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct dcb_table *dcb = &dev_priv->vbios.dcb;
+
if (i2c_index == 0xff) {
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct dcb_table *dcb = &dev_priv->vbios.dcb;
/* note: dcb_entry_idx_from_crtchead needs pre-script set-up */
- int idx = dcb_entry_idx_from_crtchead(dev);
+ int idx = dcb_entry_idx_from_crtchead(dev), shift = 0;
+ int default_indices = dcb->i2c_default_indices;
- i2c_index = NV_I2C_DEFAULT(0);
if (idx != 0x7f && dcb->entry[idx].i2c_upper_default)
- i2c_index = NV_I2C_DEFAULT(1);
+ shift = 4;
+
+ i2c_index = (default_indices >> shift) & 0xf;
}
+ if (i2c_index == 0x80) /* g80+ */
+ i2c_index = dcb->i2c_default_indices & 0xf;
+ else
+ if (i2c_index == 0x81)
+ i2c_index = (dcb->i2c_default_indices & 0xf0) >> 4;
+
+ if (i2c_index >= DCB_MAX_NUM_I2C_ENTRIES) {
+ NV_ERROR(dev, "invalid i2c_index 0x%x\n", i2c_index);
+ return NULL;
+ }
+
+ /* Make sure i2c table entry has been parsed, it may not
+ * have been if this is a bus not referenced by a DCB encoder
+ */
+ read_dcb_i2c_entry(dev, dcb->version, dcb->i2c_table,
+ i2c_index, &dcb->i2c[i2c_index]);
return nouveau_i2c_find(dev, i2c_index);
}
@@ -1122,29 +1173,36 @@ init_dp_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
*
*/
+ struct bit_displayport_encoder_table *dpe = NULL;
struct dcb_entry *dcb = bios->display.output;
struct drm_device *dev = bios->dev;
uint8_t cond = bios->data[offset + 1];
- uint8_t *table, *entry;
+ int dummy;
BIOSLOG(bios, "0x%04X: subop 0x%02X\n", offset, cond);
if (!iexec->execute)
return 3;
- table = nouveau_dp_bios_data(dev, dcb, &entry);
- if (!table)
+ dpe = nouveau_bios_dp_table(dev, dcb, &dummy);
+ if (!dpe) {
+ NV_ERROR(dev, "0x%04X: INIT_3A: no encoder table!!\n", offset);
return 3;
+ }
switch (cond) {
case 0:
- entry = dcb_conn(dev, dcb->connector);
- if (!entry || entry[0] != DCB_CONNECTOR_eDP)
+ {
+ struct dcb_connector_table_entry *ent =
+ &bios->dcb.connector.entry[dcb->connector];
+
+ if (ent->type != DCB_CONNECTOR_eDP)
iexec->execute = false;
+ }
break;
case 1:
case 2:
- if (!(entry[5] & cond))
+ if (!(dpe->unknown & cond))
iexec->execute = false;
break;
case 5:
@@ -3174,8 +3232,49 @@ init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
* each GPIO according to various values listed in each entry
*/
- if (iexec->execute && bios->execute)
- nouveau_gpio_reset(bios->dev);
+ struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+ const uint32_t nv50_gpio_ctl[2] = { 0xe100, 0xe28c };
+ int i;
+
+ if (dev_priv->card_type < NV_50) {
+ NV_ERROR(bios->dev, "INIT_GPIO on unsupported chipset\n");
+ return 1;
+ }
+
+ if (!iexec->execute)
+ return 1;
+
+ for (i = 0; i < bios->dcb.gpio.entries; i++) {
+ struct dcb_gpio_entry *gpio = &bios->dcb.gpio.entry[i];
+ uint32_t r, s, v;
+
+ BIOSLOG(bios, "0x%04X: Entry: 0x%08X\n", offset, gpio->entry);
+
+ BIOSLOG(bios, "0x%04X: set gpio 0x%02x, state %d\n",
+ offset, gpio->tag, gpio->state_default);
+ if (bios->execute)
+ pgpio->set(bios->dev, gpio->tag, gpio->state_default);
+
+ /* The NVIDIA binary driver doesn't appear to actually do
+ * any of this, my VBIOS does however.
+ */
+ /* Not a clue, needs de-magicing */
+ r = nv50_gpio_ctl[gpio->line >> 4];
+ s = (gpio->line & 0x0f);
+ v = bios_rd32(bios, r) & ~(0x00010001 << s);
+ switch ((gpio->entry & 0x06000000) >> 25) {
+ case 1:
+ v |= (0x00000001 << s);
+ break;
+ case 2:
+ v |= (0x00010000 << s);
+ break;
+ default:
+ break;
+ }
+ bios_wr32(bios, r, v);
+ }
return 1;
}
@@ -3637,10 +3736,6 @@ parse_init_table(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
int count = 0, i, ret;
uint8_t id;
- /* catch NULL script pointers */
- if (offset == 0)
- return 0;
-
/*
* Loop until INIT_DONE causes us to break out of the loop
* (or until offset > bios length just in case... )
@@ -4272,6 +4367,18 @@ int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, b
break;
}
+ /* Dell Latitude D620 reports a too-high value for the dual-link
+ * transition freq, causing us to program the panel incorrectly.
+ *
+ * It doesn't appear the VBIOS actually uses its transition freq
+ * (90000kHz), instead it uses the "Number of LVDS channels" field
+ * out of the panel ID structure (http://www.spwg.org/).
+ *
+ * For the moment, a quirk will do :)
+ */
+ if (nv_match_device(dev, 0x01d7, 0x1028, 0x01c2))
+ bios->fp.duallink_transition_clk = 80000;
+
/* set dual_link flag for EDID case */
if (pxclk && (chip_version < 0x25 || chip_version > 0x28))
bios->fp.dual_link = (pxclk >= bios->fp.duallink_transition_clk);
@@ -4281,37 +4388,86 @@ int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, b
return 0;
}
-/* BIT 'U'/'d' table encoder subtables have hashes matching them to
- * a particular set of encoders.
- *
- * This function returns true if a particular DCB entry matches.
- */
-bool
-bios_encoder_match(struct dcb_entry *dcb, u32 hash)
+static uint8_t *
+bios_output_config_match(struct drm_device *dev, struct dcb_entry *dcbent,
+ uint16_t record, int record_len, int record_nr,
+ bool match_link)
{
- if ((hash & 0x000000f0) != (dcb->location << 4))
- return false;
- if ((hash & 0x0000000f) != dcb->type)
- return false;
- if (!(hash & (dcb->or << 16)))
- return false;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvbios *bios = &dev_priv->vbios;
+ uint32_t entry;
+ uint16_t table;
+ int i, v;
- switch (dcb->type) {
+ switch (dcbent->type) {
case OUTPUT_TMDS:
case OUTPUT_LVDS:
case OUTPUT_DP:
- if (hash & 0x00c00000) {
- if (!(hash & (dcb->sorconf.link << 22)))
- return false;
- }
+ break;
default:
- return true;
+ match_link = false;
+ break;
}
+
+ for (i = 0; i < record_nr; i++, record += record_len) {
+ table = ROM16(bios->data[record]);
+ if (!table)
+ continue;
+ entry = ROM32(bios->data[table]);
+
+ if (match_link) {
+ v = (entry & 0x00c00000) >> 22;
+ if (!(v & dcbent->sorconf.link))
+ continue;
+ }
+
+ v = (entry & 0x000f0000) >> 16;
+ if (!(v & dcbent->or))
+ continue;
+
+ v = (entry & 0x000000f0) >> 4;
+ if (v != dcbent->location)
+ continue;
+
+ v = (entry & 0x0000000f);
+ if (v != dcbent->type)
+ continue;
+
+ return &bios->data[table];
+ }
+
+ return NULL;
+}
+
+void *
+nouveau_bios_dp_table(struct drm_device *dev, struct dcb_entry *dcbent,
+ int *length)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvbios *bios = &dev_priv->vbios;
+ uint8_t *table;
+
+ if (!bios->display.dp_table_ptr) {
+ NV_ERROR(dev, "No pointer to DisplayPort table\n");
+ return NULL;
+ }
+ table = &bios->data[bios->display.dp_table_ptr];
+
+ if (table[0] != 0x20 && table[0] != 0x21) {
+ NV_ERROR(dev, "DisplayPort table version 0x%02x unknown\n",
+ table[0]);
+ return NULL;
+ }
+
+ *length = table[4];
+ return bios_output_config_match(dev, dcbent,
+ bios->display.dp_table_ptr + table[1],
+ table[2], table[3], table[0] >= 0x21);
}
int
-nouveau_bios_run_display_table(struct drm_device *dev, u16 type, int pclk,
- struct dcb_entry *dcbent, int crtc)
+nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
+ uint32_t sub, int pxclk)
{
/*
* The display script table is located by the BIT 'U' table.
@@ -4341,7 +4497,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, u16 type, int pclk,
uint8_t *table = &bios->data[bios->display.script_table_ptr];
uint8_t *otable = NULL;
uint16_t script;
- int i;
+ int i = 0;
if (!bios->display.script_table_ptr) {
NV_ERROR(dev, "No pointer to output script table\n");
@@ -4393,33 +4549,30 @@ nouveau_bios_run_display_table(struct drm_device *dev, u16 type, int pclk,
NV_DEBUG_KMS(dev, "Searching for output entry for %d %d %d\n",
dcbent->type, dcbent->location, dcbent->or);
- for (i = 0; i < table[3]; i++) {
- otable = ROMPTR(dev, table[table[1] + (i * table[2])]);
- if (otable && bios_encoder_match(dcbent, ROM32(otable[0])))
- break;
- }
-
+ otable = bios_output_config_match(dev, dcbent, table[1] +
+ bios->display.script_table_ptr,
+ table[2], table[3], table[0] >= 0x21);
if (!otable) {
NV_DEBUG_KMS(dev, "failed to match any output table\n");
return 1;
}
- if (pclk < -2 || pclk > 0) {
+ if (pxclk < -2 || pxclk > 0) {
/* Try to find matching script table entry */
for (i = 0; i < otable[5]; i++) {
- if (ROM16(otable[table[4] + i*6]) == type)
+ if (ROM16(otable[table[4] + i*6]) == sub)
break;
}
if (i == otable[5]) {
NV_ERROR(dev, "Table 0x%04x not found for %d/%d, "
"using first\n",
- type, dcbent->type, dcbent->or);
+ sub, dcbent->type, dcbent->or);
i = 0;
}
}
- if (pclk == 0) {
+ if (pxclk == 0) {
script = ROM16(otable[6]);
if (!script) {
NV_DEBUG_KMS(dev, "output script 0 not found\n");
@@ -4427,9 +4580,9 @@ nouveau_bios_run_display_table(struct drm_device *dev, u16 type, int pclk,
}
NV_DEBUG_KMS(dev, "0x%04X: parsing output script 0\n", script);
- nouveau_bios_run_init_table(dev, script, dcbent, crtc);
+ nouveau_bios_run_init_table(dev, script, dcbent);
} else
- if (pclk == -1) {
+ if (pxclk == -1) {
script = ROM16(otable[8]);
if (!script) {
NV_DEBUG_KMS(dev, "output script 1 not found\n");
@@ -4437,9 +4590,9 @@ nouveau_bios_run_display_table(struct drm_device *dev, u16 type, int pclk,
}
NV_DEBUG_KMS(dev, "0x%04X: parsing output script 1\n", script);
- nouveau_bios_run_init_table(dev, script, dcbent, crtc);
+ nouveau_bios_run_init_table(dev, script, dcbent);
} else
- if (pclk == -2) {
+ if (pxclk == -2) {
if (table[4] >= 12)
script = ROM16(otable[10]);
else
@@ -4450,31 +4603,31 @@ nouveau_bios_run_display_table(struct drm_device *dev, u16 type, int pclk,
}
NV_DEBUG_KMS(dev, "0x%04X: parsing output script 2\n", script);
- nouveau_bios_run_init_table(dev, script, dcbent, crtc);
+ nouveau_bios_run_init_table(dev, script, dcbent);
} else
- if (pclk > 0) {
+ if (pxclk > 0) {
script = ROM16(otable[table[4] + i*6 + 2]);
if (script)
- script = clkcmptable(bios, script, pclk);
+ script = clkcmptable(bios, script, pxclk);
if (!script) {
NV_DEBUG_KMS(dev, "clock script 0 not found\n");
return 1;
}
NV_DEBUG_KMS(dev, "0x%04X: parsing clock script 0\n", script);
- nouveau_bios_run_init_table(dev, script, dcbent, crtc);
+ nouveau_bios_run_init_table(dev, script, dcbent);
} else
- if (pclk < 0) {
+ if (pxclk < 0) {
script = ROM16(otable[table[4] + i*6 + 4]);
if (script)
- script = clkcmptable(bios, script, -pclk);
+ script = clkcmptable(bios, script, -pxclk);
if (!script) {
NV_DEBUG_KMS(dev, "clock script 1 not found\n");
return 1;
}
NV_DEBUG_KMS(dev, "0x%04X: parsing clock script 1\n", script);
- nouveau_bios_run_init_table(dev, script, dcbent, crtc);
+ nouveau_bios_run_init_table(dev, script, dcbent);
}
return 0;
@@ -4572,7 +4725,7 @@ static struct pll_mapping nv84_pll_mapping[] = {
{ PLL_CORE , 0x004028 },
{ PLL_SHADER, 0x004020 },
{ PLL_MEMORY, 0x004008 },
- { PLL_VDEC , 0x004030 },
+ { PLL_UNK05 , 0x004030 },
{ PLL_UNK41 , 0x00e818 },
{ PLL_VPLL0 , 0x614100 },
{ PLL_VPLL1 , 0x614900 },
@@ -5033,7 +5186,7 @@ static int parse_bit_A_tbl_entry(struct drm_device *dev, struct nvbios *bios, st
load_table_ptr = ROM16(bios->data[bitentry->offset]);
if (load_table_ptr == 0x0) {
- NV_DEBUG(dev, "Pointer to BIT loadval table invalid\n");
+ NV_ERROR(dev, "Pointer to BIT loadval table invalid\n");
return -EINVAL;
}
@@ -5324,6 +5477,14 @@ parse_bit_U_tbl_entry(struct drm_device *dev, struct nvbios *bios,
return 0;
}
+static int
+parse_bit_displayport_tbl_entry(struct drm_device *dev, struct nvbios *bios,
+ struct bit_entry *bitentry)
+{
+ bios->display.dp_table_ptr = ROM16(bios->data[bitentry->offset]);
+ return 0;
+}
+
struct bit_table {
const char id;
int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
@@ -5338,9 +5499,6 @@ bit_table(struct drm_device *dev, u8 id, struct bit_entry *bit)
struct nvbios *bios = &dev_priv->vbios;
u8 entries, *entry;
- if (bios->type != NVBIOS_BIT)
- return -ENODEV;
-
entries = bios->data[bios->offset + 10];
entry = &bios->data[bios->offset + 12];
while (entries--) {
@@ -5349,7 +5507,7 @@ bit_table(struct drm_device *dev, u8 id, struct bit_entry *bit)
bit->version = entry[1];
bit->length = ROM16(entry[2]);
bit->offset = ROM16(entry[4]);
- bit->data = ROMPTR(dev, entry[4]);
+ bit->data = ROMPTR(bios, entry[4]);
return 0;
}
@@ -5400,6 +5558,7 @@ parse_bit_structure(struct nvbios *bios, const uint16_t bitoffset)
parse_bit_table(bios, bitoffset, &BIT_TABLE('L', lvds));
parse_bit_table(bios, bitoffset, &BIT_TABLE('T', tmds));
parse_bit_table(bios, bitoffset, &BIT_TABLE('U', U));
+ parse_bit_table(bios, bitoffset, &BIT_TABLE('d', displayport));
return 0;
}
@@ -5454,6 +5613,10 @@ static int parse_bmp_structure(struct drm_device *dev, struct nvbios *bios, unsi
uint16_t legacy_scripts_offset, legacy_i2c_offset;
/* load needed defaults in case we can't parse this info */
+ bios->dcb.i2c[0].write = NV_CIO_CRE_DDC_WR__INDEX;
+ bios->dcb.i2c[0].read = NV_CIO_CRE_DDC_STATUS__INDEX;
+ bios->dcb.i2c[1].write = NV_CIO_CRE_DDC0_WR__INDEX;
+ bios->dcb.i2c[1].read = NV_CIO_CRE_DDC0_STATUS__INDEX;
bios->digital_min_front_porch = 0x4b;
bios->fmaxvco = 256000;
bios->fminvco = 128000;
@@ -5561,6 +5724,14 @@ static int parse_bmp_structure(struct drm_device *dev, struct nvbios *bios, unsi
bios->legacy.i2c_indices.crt = bios->data[legacy_i2c_offset];
bios->legacy.i2c_indices.tv = bios->data[legacy_i2c_offset + 1];
bios->legacy.i2c_indices.panel = bios->data[legacy_i2c_offset + 2];
+ if (bios->data[legacy_i2c_offset + 4])
+ bios->dcb.i2c[0].write = bios->data[legacy_i2c_offset + 4];
+ if (bios->data[legacy_i2c_offset + 5])
+ bios->dcb.i2c[0].read = bios->data[legacy_i2c_offset + 5];
+ if (bios->data[legacy_i2c_offset + 6])
+ bios->dcb.i2c[1].write = bios->data[legacy_i2c_offset + 6];
+ if (bios->data[legacy_i2c_offset + 7])
+ bios->dcb.i2c[1].read = bios->data[legacy_i2c_offset + 7];
if (bmplength > 74) {
bios->fmaxvco = ROM32(bmp[67]);
@@ -5611,128 +5782,274 @@ static uint16_t findstr(uint8_t *data, int n, const uint8_t *str, int len)
return 0;
}
-void *
-dcb_table(struct drm_device *dev)
+static struct dcb_gpio_entry *
+new_gpio_entry(struct nvbios *bios)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- u8 *dcb = NULL;
+ struct drm_device *dev = bios->dev;
+ struct dcb_gpio_table *gpio = &bios->dcb.gpio;
- if (dev_priv->card_type > NV_04)
- dcb = ROMPTR(dev, dev_priv->vbios.data[0x36]);
- if (!dcb) {
- NV_WARNONCE(dev, "No DCB data found in VBIOS\n");
+ if (gpio->entries >= DCB_MAX_NUM_GPIO_ENTRIES) {
+ NV_ERROR(dev, "exceeded maximum number of gpio entries!!\n");
return NULL;
}
- if (dcb[0] >= 0x41) {
- NV_WARNONCE(dev, "DCB version 0x%02x unknown\n", dcb[0]);
- return NULL;
- } else
- if (dcb[0] >= 0x30) {
- if (ROM32(dcb[6]) == 0x4edcbdcb)
- return dcb;
- } else
- if (dcb[0] >= 0x20) {
- if (ROM32(dcb[4]) == 0x4edcbdcb)
- return dcb;
- } else
- if (dcb[0] >= 0x15) {
- if (!memcmp(&dcb[-7], "DEV_REC", 7))
- return dcb;
- } else {
- /*
- * v1.4 (some NV15/16, NV11+) seems the same as v1.5, but
- * always has the same single (crt) entry, even when tv-out
- * present, so the conclusion is this version cannot really
- * be used.
- *
- * v1.2 tables (some NV6/10, and NV15+) normally have the
- * same 5 entries, which are not specific to the card and so
- * no use.
- *
- * v1.2 does have an I2C table that read_dcb_i2c_table can
- * handle, but cards exist (nv11 in #14821) with a bad i2c
- * table pointer, so use the indices parsed in
- * parse_bmp_structure.
- *
- * v1.1 (NV5+, maybe some NV4) is entirely unhelpful
- */
- NV_WARNONCE(dev, "No useful DCB data in VBIOS\n");
- return NULL;
+ return &gpio->entry[gpio->entries++];
+}
+
+struct dcb_gpio_entry *
+nouveau_bios_gpio_entry(struct drm_device *dev, enum dcb_gpio_tag tag)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvbios *bios = &dev_priv->vbios;
+ int i;
+
+ for (i = 0; i < bios->dcb.gpio.entries; i++) {
+ if (bios->dcb.gpio.entry[i].tag != tag)
+ continue;
+
+ return &bios->dcb.gpio.entry[i];
}
- NV_WARNONCE(dev, "DCB header validation failed\n");
return NULL;
}
-void *
-dcb_outp(struct drm_device *dev, u8 idx)
+static void
+parse_dcb_gpio_table(struct nvbios *bios)
{
- u8 *dcb = dcb_table(dev);
- if (dcb && dcb[0] >= 0x30) {
- if (idx < dcb[2])
- return dcb + dcb[1] + (idx * dcb[3]);
+ struct drm_device *dev = bios->dev;
+ struct dcb_gpio_entry *e;
+ u8 headerlen, entries, recordlen;
+ u8 *dcb, *gpio = NULL, *entry;
+ int i;
+
+ dcb = ROMPTR(bios, bios->data[0x36]);
+ if (dcb[0] >= 0x30) {
+ gpio = ROMPTR(bios, dcb[10]);
+ if (!gpio)
+ goto no_table;
+
+ headerlen = gpio[1];
+ entries = gpio[2];
+ recordlen = gpio[3];
} else
- if (dcb && dcb[0] >= 0x20) {
- u8 *i2c = ROMPTR(dev, dcb[2]);
- u8 *ent = dcb + 8 + (idx * 8);
- if (i2c && ent < i2c)
- return ent;
+ if (dcb[0] >= 0x22 && dcb[-1] >= 0x13) {
+ gpio = ROMPTR(bios, dcb[-15]);
+ if (!gpio)
+ goto no_table;
+
+ headerlen = 3;
+ entries = gpio[2];
+ recordlen = gpio[1];
} else
- if (dcb && dcb[0] >= 0x15) {
- u8 *i2c = ROMPTR(dev, dcb[2]);
- u8 *ent = dcb + 4 + (idx * 10);
- if (i2c && ent < i2c)
- return ent;
+ if (dcb[0] >= 0x22) {
+ /* No GPIO table present, parse the TVDAC GPIO data. */
+ uint8_t *tvdac_gpio = &dcb[-5];
+
+ if (tvdac_gpio[0] & 1) {
+ e = new_gpio_entry(bios);
+ e->tag = DCB_GPIO_TVDAC0;
+ e->line = tvdac_gpio[1] >> 4;
+ e->invert = tvdac_gpio[0] & 2;
+ }
+
+ goto no_table;
+ } else {
+ NV_DEBUG(dev, "no/unknown gpio table on DCB 0x%02x\n", dcb[0]);
+ goto no_table;
}
- return NULL;
+ entry = gpio + headerlen;
+ for (i = 0; i < entries; i++, entry += recordlen) {
+ e = new_gpio_entry(bios);
+ if (!e)
+ break;
+
+ if (gpio[0] < 0x40) {
+ e->entry = ROM16(entry[0]);
+ e->tag = (e->entry & 0x07e0) >> 5;
+ if (e->tag == 0x3f) {
+ bios->dcb.gpio.entries--;
+ continue;
+ }
+
+ e->line = (e->entry & 0x001f);
+ e->invert = ((e->entry & 0xf800) >> 11) != 4;
+ } else {
+ e->entry = ROM32(entry[0]);
+ e->tag = (e->entry & 0x0000ff00) >> 8;
+ if (e->tag == 0xff) {
+ bios->dcb.gpio.entries--;
+ continue;
+ }
+
+ e->line = (e->entry & 0x0000001f) >> 0;
+ e->state_default = (e->entry & 0x01000000) >> 24;
+ e->state[0] = (e->entry & 0x18000000) >> 27;
+ e->state[1] = (e->entry & 0x60000000) >> 29;
+ }
+ }
+
+no_table:
+ /* Apple iMac G4 NV18 */
+ if (nv_match_device(dev, 0x0189, 0x10de, 0x0010)) {
+ e = new_gpio_entry(bios);
+ if (e) {
+ e->tag = DCB_GPIO_TVDAC0;
+ e->line = 4;
+ }
+ }
}
-int
-dcb_outp_foreach(struct drm_device *dev, void *data,
- int (*exec)(struct drm_device *, void *, int idx, u8 *outp))
-{
- int ret, idx = -1;
- u8 *outp = NULL;
- while ((outp = dcb_outp(dev, ++idx))) {
- if (ROM32(outp[0]) == 0x00000000)
- break; /* seen on an NV11 with DCB v1.5 */
- if (ROM32(outp[0]) == 0xffffffff)
- break; /* seen on an NV17 with DCB v2.0 */
-
- if ((outp[0] & 0x0f) == OUTPUT_UNUSED)
- continue;
- if ((outp[0] & 0x0f) == OUTPUT_EOL)
- break;
+struct dcb_connector_table_entry *
+nouveau_bios_connector_entry(struct drm_device *dev, int index)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvbios *bios = &dev_priv->vbios;
+ struct dcb_connector_table_entry *cte;
+
+ if (index >= bios->dcb.connector.entries)
+ return NULL;
- ret = exec(dev, data, idx, outp);
- if (ret)
- return ret;
+ cte = &bios->dcb.connector.entry[index];
+ if (cte->type == 0xff)
+ return NULL;
+
+ return cte;
+}
+
+static enum dcb_connector_type
+divine_connector_type(struct nvbios *bios, int index)
+{
+ struct dcb_table *dcb = &bios->dcb;
+ unsigned encoders = 0, type = DCB_CONNECTOR_NONE;
+ int i;
+
+ for (i = 0; i < dcb->entries; i++) {
+ if (dcb->entry[i].connector == index)
+ encoders |= (1 << dcb->entry[i].type);
}
- return 0;
+ if (encoders & (1 << OUTPUT_DP)) {
+ if (encoders & (1 << OUTPUT_TMDS))
+ type = DCB_CONNECTOR_DP;
+ else
+ type = DCB_CONNECTOR_eDP;
+ } else
+ if (encoders & (1 << OUTPUT_TMDS)) {
+ if (encoders & (1 << OUTPUT_ANALOG))
+ type = DCB_CONNECTOR_DVI_I;
+ else
+ type = DCB_CONNECTOR_DVI_D;
+ } else
+ if (encoders & (1 << OUTPUT_ANALOG)) {
+ type = DCB_CONNECTOR_VGA;
+ } else
+ if (encoders & (1 << OUTPUT_LVDS)) {
+ type = DCB_CONNECTOR_LVDS;
+ } else
+ if (encoders & (1 << OUTPUT_TV)) {
+ type = DCB_CONNECTOR_TV_0;
+ }
+
+ return type;
}
-u8 *
-dcb_conntab(struct drm_device *dev)
+static void
+apply_dcb_connector_quirks(struct nvbios *bios, int idx)
{
- u8 *dcb = dcb_table(dev);
- if (dcb && dcb[0] >= 0x30 && dcb[1] >= 0x16) {
- u8 *conntab = ROMPTR(dev, dcb[0x14]);
- if (conntab && conntab[0] >= 0x30 && conntab[0] <= 0x40)
- return conntab;
+ struct dcb_connector_table_entry *cte = &bios->dcb.connector.entry[idx];
+ struct drm_device *dev = bios->dev;
+
+ /* Gigabyte NX85T */
+ if (nv_match_device(dev, 0x0421, 0x1458, 0x344c)) {
+ if (cte->type == DCB_CONNECTOR_HDMI_1)
+ cte->type = DCB_CONNECTOR_DVI_I;
}
- return NULL;
}
-u8 *
-dcb_conn(struct drm_device *dev, u8 idx)
+static const u8 hpd_gpio[16] = {
+ 0xff, 0x07, 0x08, 0xff, 0xff, 0x51, 0x52, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0x5e, 0x5f, 0x60,
+};
+
+static void
+parse_dcb_connector_table(struct nvbios *bios)
{
- u8 *conntab = dcb_conntab(dev);
- if (conntab && idx < conntab[2])
- return conntab + conntab[1] + (idx * conntab[3]);
- return NULL;
+ struct drm_device *dev = bios->dev;
+ struct dcb_connector_table *ct = &bios->dcb.connector;
+ struct dcb_connector_table_entry *cte;
+ uint8_t *conntab = &bios->data[bios->dcb.connector_table_ptr];
+ uint8_t *entry;
+ int i;
+
+ if (!bios->dcb.connector_table_ptr) {
+ NV_DEBUG_KMS(dev, "No DCB connector table present\n");
+ return;
+ }
+
+ NV_INFO(dev, "DCB connector table: VHER 0x%02x %d %d %d\n",
+ conntab[0], conntab[1], conntab[2], conntab[3]);
+ if ((conntab[0] != 0x30 && conntab[0] != 0x40) ||
+ (conntab[3] != 2 && conntab[3] != 4)) {
+ NV_ERROR(dev, " Unknown! Please report.\n");
+ return;
+ }
+
+ ct->entries = conntab[2];
+
+ entry = conntab + conntab[1];
+ cte = &ct->entry[0];
+ for (i = 0; i < conntab[2]; i++, entry += conntab[3], cte++) {
+ cte->index = i;
+ if (conntab[3] == 2)
+ cte->entry = ROM16(entry[0]);
+ else
+ cte->entry = ROM32(entry[0]);
+
+ cte->type = (cte->entry & 0x000000ff) >> 0;
+ cte->index2 = (cte->entry & 0x00000f00) >> 8;
+
+ cte->gpio_tag = ffs((cte->entry & 0x07033000) >> 12);
+ cte->gpio_tag = hpd_gpio[cte->gpio_tag];
+
+ if (cte->type == 0xff)
+ continue;
+
+ apply_dcb_connector_quirks(bios, i);
+
+ NV_INFO(dev, " %d: 0x%08x: type 0x%02x idx %d tag 0x%02x\n",
+ i, cte->entry, cte->type, cte->index, cte->gpio_tag);
+
+ /* check for known types, fallback to guessing the type
+ * from attached encoders if we hit an unknown.
+ */
+ switch (cte->type) {
+ case DCB_CONNECTOR_VGA:
+ case DCB_CONNECTOR_TV_0:
+ case DCB_CONNECTOR_TV_1:
+ case DCB_CONNECTOR_TV_3:
+ case DCB_CONNECTOR_DVI_I:
+ case DCB_CONNECTOR_DVI_D:
+ case DCB_CONNECTOR_LVDS:
+ case DCB_CONNECTOR_LVDS_SPWG:
+ case DCB_CONNECTOR_DP:
+ case DCB_CONNECTOR_eDP:
+ case DCB_CONNECTOR_HDMI_0:
+ case DCB_CONNECTOR_HDMI_1:
+ break;
+ default:
+ cte->type = divine_connector_type(bios, cte->index);
+ NV_WARN(dev, "unknown type, using 0x%02x\n", cte->type);
+ break;
+ }
+
+ if (nouveau_override_conntype) {
+ int type = divine_connector_type(bios, cte->index);
+ if (type != cte->type)
+ NV_WARN(dev, " -> type 0x%02x\n", cte->type);
+ }
+
+ }
}
static struct dcb_entry *new_dcb_entry(struct dcb_table *dcb)
@@ -5765,7 +6082,8 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
entry->type = conn & 0xf;
entry->i2c_index = (conn >> 4) & 0xf;
entry->heads = (conn >> 8) & 0xf;
- entry->connector = (conn >> 12) & 0xf;
+ if (dcb->version >= 0x40)
+ entry->connector = (conn >> 12) & 0xf;
entry->bus = (conn >> 16) & 0xf;
entry->location = (conn >> 20) & 0x3;
entry->or = (conn >> 24) & 0xf;
@@ -5831,14 +6149,7 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
}
case OUTPUT_DP:
entry->dpconf.sor.link = (conf & 0x00000030) >> 4;
- switch ((conf & 0x00e00000) >> 21) {
- case 0:
- entry->dpconf.link_bw = 162000;
- break;
- default:
- entry->dpconf.link_bw = 270000;
- break;
- }
+ entry->dpconf.link_bw = (conf & 0x00e00000) >> 21;
switch ((conf & 0x0f000000) >> 24) {
case 0xf:
entry->dpconf.link_nr = 4;
@@ -5937,6 +6248,25 @@ parse_dcb15_entry(struct drm_device *dev, struct dcb_table *dcb,
return true;
}
+static bool parse_dcb_entry(struct drm_device *dev, struct dcb_table *dcb,
+ uint32_t conn, uint32_t conf)
+{
+ struct dcb_entry *entry = new_dcb_entry(dcb);
+ bool ret;
+
+ if (dcb->version >= 0x20)
+ ret = parse_dcb20_entry(dev, dcb, conn, conf, entry);
+ else
+ ret = parse_dcb15_entry(dev, dcb, conn, conf, entry);
+ if (!ret)
+ return ret;
+
+ read_dcb_i2c_entry(dev, dcb->version, dcb->i2c_table,
+ entry->i2c_index, &dcb->i2c[entry->i2c_index]);
+
+ return true;
+}
+
static
void merge_like_dcb_entries(struct drm_device *dev, struct dcb_table *dcb)
{
@@ -6047,37 +6377,6 @@ apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf)
}
}
- /* Some other twisted XFX board (rhbz#694914)
- *
- * The DVI/VGA encoder combo that's supposed to represent the
- * DVI-I connector actually point at two different ones, and
- * the HDMI connector ends up paired with the VGA instead.
- *
- * Connector table is missing anything for VGA at all, pointing it
- * an invalid conntab entry 2 so we figure it out ourself.
- */
- if (nv_match_device(dev, 0x0615, 0x1682, 0x2605)) {
- if (idx == 0) {
- *conn = 0x02002300; /* VGA, connector 2 */
- *conf = 0x00000028;
- } else
- if (idx == 1) {
- *conn = 0x01010312; /* DVI, connector 0 */
- *conf = 0x00020030;
- } else
- if (idx == 2) {
- *conn = 0x04020310; /* VGA, connector 0 */
- *conf = 0x00000028;
- } else
- if (idx == 3) {
- *conn = 0x02021322; /* HDMI, connector 1 */
- *conf = 0x00020010;
- } else {
- *conn = 0x0000000e; /* EOL */
- *conf = 0x00000000;
- }
- }
-
return true;
}
@@ -6097,118 +6396,154 @@ fabricate_dcb_encoder_table(struct drm_device *dev, struct nvbios *bios)
#endif
/* Make up some sane defaults */
- fabricate_dcb_output(dcb, OUTPUT_ANALOG,
- bios->legacy.i2c_indices.crt, 1, 1);
+ fabricate_dcb_output(dcb, OUTPUT_ANALOG, LEGACY_I2C_CRT, 1, 1);
if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0)
- fabricate_dcb_output(dcb, OUTPUT_TV,
- bios->legacy.i2c_indices.tv,
+ fabricate_dcb_output(dcb, OUTPUT_TV, LEGACY_I2C_TV,
all_heads, 0);
else if (bios->tmds.output0_script_ptr ||
bios->tmds.output1_script_ptr)
- fabricate_dcb_output(dcb, OUTPUT_TMDS,
- bios->legacy.i2c_indices.panel,
+ fabricate_dcb_output(dcb, OUTPUT_TMDS, LEGACY_I2C_PANEL,
all_heads, 1);
}
static int
-parse_dcb_entry(struct drm_device *dev, void *data, int idx, u8 *outp)
+parse_dcb_table(struct drm_device *dev, struct nvbios *bios)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct dcb_table *dcb = &dev_priv->vbios.dcb;
- u32 conf = (dcb->version >= 0x20) ? ROM32(outp[4]) : ROM32(outp[6]);
- u32 conn = ROM32(outp[0]);
- bool ret;
-
- if (apply_dcb_encoder_quirks(dev, idx, &conn, &conf)) {
- struct dcb_entry *entry = new_dcb_entry(dcb);
-
- NV_TRACEWARN(dev, "DCB outp %02d: %08x %08x\n", idx, conn, conf);
+ struct dcb_table *dcb = &bios->dcb;
+ uint16_t dcbptr = 0, i2ctabptr = 0;
+ uint8_t *dcbtable;
+ uint8_t headerlen = 0x4, entries = DCB_MAX_NUM_ENTRIES;
+ bool configblock = true;
+ int recordlength = 8, confofs = 4;
+ int i;
- if (dcb->version >= 0x20)
- ret = parse_dcb20_entry(dev, dcb, conn, conf, entry);
- else
- ret = parse_dcb15_entry(dev, dcb, conn, conf, entry);
- if (!ret)
- return 1; /* stop parsing */
+ /* get the offset from 0x36 */
+ if (dev_priv->card_type > NV_04) {
+ dcbptr = ROM16(bios->data[0x36]);
+ if (dcbptr == 0x0000)
+ NV_WARN(dev, "No output data (DCB) found in BIOS\n");
+ }
- /* Ignore the I2C index for on-chip TV-out, as there
- * are cards with bogus values (nv31m in bug 23212),
- * and it's otherwise useless.
- */
- if (entry->type == OUTPUT_TV &&
- entry->location == DCB_LOC_ON_CHIP)
- entry->i2c_index = 0x0f;
+ /* this situation likely means a really old card, pre DCB */
+ if (dcbptr == 0x0) {
+ fabricate_dcb_encoder_table(dev, bios);
+ return 0;
}
- return 0;
-}
+ dcbtable = &bios->data[dcbptr];
-static void
-dcb_fake_connectors(struct nvbios *bios)
-{
- struct dcb_table *dcbt = &bios->dcb;
- u8 map[16] = { };
- int i, idx = 0;
+ /* get DCB version */
+ dcb->version = dcbtable[0];
+ NV_TRACE(dev, "Found Display Configuration Block version %d.%d\n",
+ dcb->version >> 4, dcb->version & 0xf);
- /* heuristic: if we ever get a non-zero connector field, assume
- * that all the indices are valid and we don't need fake them.
- */
- for (i = 0; i < dcbt->entries; i++) {
- if (dcbt->entry[i].connector)
- return;
- }
+ if (dcb->version >= 0x20) { /* NV17+ */
+ uint32_t sig;
- /* no useful connector info available, we need to make it up
- * ourselves. the rule here is: anything on the same i2c bus
- * is considered to be on the same connector. any output
- * without an associated i2c bus is assigned its own unique
- * connector index.
- */
- for (i = 0; i < dcbt->entries; i++) {
- u8 i2c = dcbt->entry[i].i2c_index;
- if (i2c == 0x0f) {
- dcbt->entry[i].connector = idx++;
+ if (dcb->version >= 0x30) { /* NV40+ */
+ headerlen = dcbtable[1];
+ entries = dcbtable[2];
+ recordlength = dcbtable[3];
+ i2ctabptr = ROM16(dcbtable[4]);
+ sig = ROM32(dcbtable[6]);
+ dcb->gpio_table_ptr = ROM16(dcbtable[10]);
+ dcb->connector_table_ptr = ROM16(dcbtable[20]);
} else {
- if (!map[i2c])
- map[i2c] = ++idx;
- dcbt->entry[i].connector = map[i2c] - 1;
+ i2ctabptr = ROM16(dcbtable[2]);
+ sig = ROM32(dcbtable[4]);
+ headerlen = 8;
}
- }
- /* if we created more than one connector, destroy the connector
- * table - just in case it has random, rather than stub, entries.
- */
- if (i > 1) {
- u8 *conntab = dcb_conntab(bios->dev);
- if (conntab)
- conntab[0] = 0x00;
- }
-}
+ if (sig != 0x4edcbdcb) {
+ NV_ERROR(dev, "Bad Display Configuration Block "
+ "signature (%08X)\n", sig);
+ return -EINVAL;
+ }
+ } else if (dcb->version >= 0x15) { /* some NV11 and NV20 */
+ char sig[8] = { 0 };
-static int
-parse_dcb_table(struct drm_device *dev, struct nvbios *bios)
-{
- struct dcb_table *dcb = &bios->dcb;
- u8 *dcbt, *conn;
- int idx;
-
- dcbt = dcb_table(dev);
- if (!dcbt) {
- /* handle pre-DCB boards */
- if (bios->type == NVBIOS_BMP) {
- fabricate_dcb_encoder_table(dev, bios);
- return 0;
+ strncpy(sig, (char *)&dcbtable[-7], 7);
+ i2ctabptr = ROM16(dcbtable[2]);
+ recordlength = 10;
+ confofs = 6;
+
+ if (strcmp(sig, "DEV_REC")) {
+ NV_ERROR(dev, "Bad Display Configuration Block "
+ "signature (%s)\n", sig);
+ return -EINVAL;
}
+ } else {
+ /*
+ * v1.4 (some NV15/16, NV11+) seems the same as v1.5, but always
+ * has the same single (crt) entry, even when tv-out present, so
+ * the conclusion is this version cannot really be used.
+ * v1.2 tables (some NV6/10, and NV15+) normally have the same
+ * 5 entries, which are not specific to the card and so no use.
+ * v1.2 does have an I2C table that read_dcb_i2c_table can
+ * handle, but cards exist (nv11 in #14821) with a bad i2c table
+ * pointer, so use the indices parsed in parse_bmp_structure.
+ * v1.1 (NV5+, maybe some NV4) is entirely unhelpful
+ */
+ NV_TRACEWARN(dev, "No useful information in BIOS output table; "
+ "adding all possible outputs\n");
+ fabricate_dcb_encoder_table(dev, bios);
+ return 0;
+ }
- return -EINVAL;
+ if (!i2ctabptr)
+ NV_WARN(dev, "No pointer to DCB I2C port table\n");
+ else {
+ dcb->i2c_table = &bios->data[i2ctabptr];
+ if (dcb->version >= 0x30)
+ dcb->i2c_default_indices = dcb->i2c_table[4];
+
+ /*
+ * Parse the "management" I2C bus, used for hardware
+ * monitoring and some external TMDS transmitters.
+ */
+ if (dcb->version >= 0x22) {
+ int idx = (dcb->version >= 0x40 ?
+ dcb->i2c_default_indices & 0xf :
+ 2);
+
+ read_dcb_i2c_entry(dev, dcb->version, dcb->i2c_table,
+ idx, &dcb->i2c[idx]);
+ }
}
- NV_TRACE(dev, "DCB version %d.%d\n", dcbt[0] >> 4, dcbt[0] & 0xf);
+ if (entries > DCB_MAX_NUM_ENTRIES)
+ entries = DCB_MAX_NUM_ENTRIES;
+
+ for (i = 0; i < entries; i++) {
+ uint32_t connection, config = 0;
- dcb->version = dcbt[0];
- dcb_outp_foreach(dev, NULL, parse_dcb_entry);
+ connection = ROM32(dcbtable[headerlen + recordlength * i]);
+ if (configblock)
+ config = ROM32(dcbtable[headerlen + confofs + recordlength * i]);
+
+ /* seen on an NV11 with DCB v1.5 */
+ if (connection == 0x00000000)
+ break;
+
+ /* seen on an NV17 with DCB v2.0 */
+ if (connection == 0xffffffff)
+ break;
+
+ if ((connection & 0x0000000f) == 0x0000000f)
+ continue;
+
+ if (!apply_dcb_encoder_quirks(dev, i, &connection, &config))
+ continue;
+
+ NV_TRACEWARN(dev, "Raw DCB entry %d: %08x %08x\n",
+ dcb->entries, connection, config);
+
+ if (!parse_dcb_entry(dev, dcb, connection, config))
+ break;
+ }
/*
* apart for v2.1+ not being known for requiring merging, this
@@ -6220,19 +6555,77 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios)
if (!dcb->entries)
return -ENXIO;
- /* dump connector table entries to log, if any exist */
- idx = -1;
- while ((conn = dcb_conn(dev, ++idx))) {
- if (conn[0] != 0xff) {
- NV_TRACE(dev, "DCB conn %02d: ", idx);
- if (dcb_conntab(dev)[3] < 4)
- printk("%04x\n", ROM16(conn[0]));
- else
- printk("%08x\n", ROM32(conn[0]));
+ parse_dcb_gpio_table(bios);
+ parse_dcb_connector_table(bios);
+ return 0;
+}
+
+static void
+fixup_legacy_connector(struct nvbios *bios)
+{
+ struct dcb_table *dcb = &bios->dcb;
+ int i, i2c, i2c_conn[DCB_MAX_NUM_I2C_ENTRIES] = { };
+
+ /*
+ * DCB 3.0 also has the table in most cases, but there are some cards
+ * where the table is filled with stub entries, and the DCB entriy
+ * indices are all 0. We don't need the connector indices on pre-G80
+ * chips (yet?) so limit the use to DCB 4.0 and above.
+ */
+ if (dcb->version >= 0x40)
+ return;
+
+ dcb->connector.entries = 0;
+
+ /*
+ * No known connector info before v3.0, so make it up. the rule here
+ * is: anything on the same i2c bus is considered to be on the same
+ * connector. any output without an associated i2c bus is assigned
+ * its own unique connector index.
+ */
+ for (i = 0; i < dcb->entries; i++) {
+ /*
+ * Ignore the I2C index for on-chip TV-out, as there
+ * are cards with bogus values (nv31m in bug 23212),
+ * and it's otherwise useless.
+ */
+ if (dcb->entry[i].type == OUTPUT_TV &&
+ dcb->entry[i].location == DCB_LOC_ON_CHIP)
+ dcb->entry[i].i2c_index = 0xf;
+ i2c = dcb->entry[i].i2c_index;
+
+ if (i2c_conn[i2c]) {
+ dcb->entry[i].connector = i2c_conn[i2c] - 1;
+ continue;
}
+
+ dcb->entry[i].connector = dcb->connector.entries++;
+ if (i2c != 0xf)
+ i2c_conn[i2c] = dcb->connector.entries;
+ }
+
+ /* Fake the connector table as well as just connector indices */
+ for (i = 0; i < dcb->connector.entries; i++) {
+ dcb->connector.entry[i].index = i;
+ dcb->connector.entry[i].type = divine_connector_type(bios, i);
+ dcb->connector.entry[i].gpio_tag = 0xff;
+ }
+}
+
+static void
+fixup_legacy_i2c(struct nvbios *bios)
+{
+ struct dcb_table *dcb = &bios->dcb;
+ int i;
+
+ for (i = 0; i < dcb->entries; i++) {
+ if (dcb->entry[i].i2c_index == LEGACY_I2C_CRT)
+ dcb->entry[i].i2c_index = bios->legacy.i2c_indices.crt;
+ if (dcb->entry[i].i2c_index == LEGACY_I2C_PANEL)
+ dcb->entry[i].i2c_index = bios->legacy.i2c_indices.panel;
+ if (dcb->entry[i].i2c_index == LEGACY_I2C_TV)
+ dcb->entry[i].i2c_index = bios->legacy.i2c_indices.tv;
}
- dcb_fake_connectors(bios);
- return 0;
}
static int load_nv17_hwsq_ucode_entry(struct drm_device *dev, struct nvbios *bios, uint16_t hwsq_offset, int entry)
@@ -6338,7 +6731,7 @@ uint8_t *nouveau_bios_embedded_edid(struct drm_device *dev)
void
nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table,
- struct dcb_entry *dcbent, int crtc)
+ struct dcb_entry *dcbent)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nvbios *bios = &dev_priv->vbios;
@@ -6346,22 +6739,11 @@ nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table,
spin_lock_bh(&bios->lock);
bios->display.output = dcbent;
- bios->display.crtc = crtc;
parse_init_table(bios, table, &iexec);
bios->display.output = NULL;
spin_unlock_bh(&bios->lock);
}
-void
-nouveau_bios_init_exec(struct drm_device *dev, uint16_t table)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->vbios;
- struct init_exec iexec = { true, false };
-
- parse_init_table(bios, table, &iexec);
-}
-
static bool NVInitVBIOS(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -6371,7 +6753,11 @@ static bool NVInitVBIOS(struct drm_device *dev)
spin_lock_init(&bios->lock);
bios->dev = dev;
- return bios_shadow(dev);
+ if (!NVShadowVBIOS(dev, bios->data))
+ return false;
+
+ bios->length = NV_PROM_SIZE;
+ return true;
}
static int nouveau_parse_vbios_struct(struct drm_device *dev)
@@ -6439,14 +6825,28 @@ nouveau_run_vbios_init(struct drm_device *dev)
if (dev_priv->card_type >= NV_50) {
for (i = 0; i < bios->dcb.entries; i++) {
- nouveau_bios_run_display_table(dev, 0, 0,
- &bios->dcb.entry[i], -1);
+ nouveau_bios_run_display_table(dev,
+ &bios->dcb.entry[i],
+ 0, 0);
}
}
return ret;
}
+static void
+nouveau_bios_i2c_devices_takedown(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvbios *bios = &dev_priv->vbios;
+ struct dcb_i2c_entry *entry;
+ int i;
+
+ entry = &bios->dcb.i2c[0];
+ for (i = 0; i < DCB_MAX_NUM_I2C_ENTRIES; i++, entry++)
+ nouveau_i2c_fini(dev, entry);
+}
+
static bool
nouveau_bios_posted(struct drm_device *dev)
{
@@ -6483,18 +6883,13 @@ nouveau_bios_init(struct drm_device *dev)
if (ret)
return ret;
- ret = nouveau_i2c_init(dev);
- if (ret)
- return ret;
-
- ret = nouveau_mxm_init(dev);
- if (ret)
- return ret;
-
ret = parse_dcb_table(dev, bios);
if (ret)
return ret;
+ fixup_legacy_i2c(bios);
+ fixup_legacy_connector(bios);
+
if (!bios->major_version) /* we don't run version 0 bios */
return 0;
@@ -6531,10 +6926,5 @@ nouveau_bios_init(struct drm_device *dev)
void
nouveau_bios_takedown(struct drm_device *dev)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
-
- nouveau_mxm_fini(dev);
- nouveau_i2c_fini(dev);
-
- kfree(dev_priv->vbios.data);
+ nouveau_bios_i2c_devices_takedown(dev);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index 1f3233d..050c314 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -34,14 +34,9 @@
#define DCB_LOC_ON_CHIP 0
-#define ROM16(x) le16_to_cpu(*(u16 *)&(x))
-#define ROM32(x) le32_to_cpu(*(u32 *)&(x))
-#define ROM48(x) ({ u8 *p = &(x); (u64)ROM16(p[4]) << 32 | ROM32(p[0]); })
-#define ROM64(x) le64_to_cpu(*(u64 *)&(x))
-#define ROMPTR(d,x) ({ \
- struct drm_nouveau_private *dev_priv = (d)->dev_private; \
- ROM16(x) ? &dev_priv->vbios.data[ROM16(x)] : NULL; \
-})
+#define ROM16(x) le16_to_cpu(*(uint16_t *)&(x))
+#define ROM32(x) le32_to_cpu(*(uint32_t *)&(x))
+#define ROMPTR(bios, x) (ROM16(x) ? &(bios)->data[ROM16(x)] : NULL)
struct bit_entry {
uint8_t id;
@@ -53,13 +48,30 @@ struct bit_entry {
int bit_table(struct drm_device *, u8 id, struct bit_entry *);
+struct dcb_i2c_entry {
+ uint32_t entry;
+ uint8_t port_type;
+ uint8_t read, write;
+ struct nouveau_i2c_chan *chan;
+};
+
enum dcb_gpio_tag {
- DCB_GPIO_PANEL_POWER = 0x01,
- DCB_GPIO_TVDAC0 = 0x0c,
+ DCB_GPIO_TVDAC0 = 0xc,
DCB_GPIO_TVDAC1 = 0x2d,
- DCB_GPIO_PWM_FAN = 0x09,
- DCB_GPIO_FAN_SENSE = 0x3d,
- DCB_GPIO_UNUSED = 0xff
+};
+
+struct dcb_gpio_entry {
+ enum dcb_gpio_tag tag;
+ int line;
+ bool invert;
+ uint32_t entry;
+ uint8_t state_default;
+ uint8_t state[2];
+};
+
+struct dcb_gpio_table {
+ int entries;
+ struct dcb_gpio_entry entry[DCB_MAX_NUM_GPIO_ENTRIES];
};
enum dcb_connector_type {
@@ -75,11 +87,23 @@ enum dcb_connector_type {
DCB_CONNECTOR_eDP = 0x47,
DCB_CONNECTOR_HDMI_0 = 0x60,
DCB_CONNECTOR_HDMI_1 = 0x61,
- DCB_CONNECTOR_DMS59_DP0 = 0x64,
- DCB_CONNECTOR_DMS59_DP1 = 0x65,
DCB_CONNECTOR_NONE = 0xff
};
+struct dcb_connector_table_entry {
+ uint8_t index;
+ uint32_t entry;
+ enum dcb_connector_type type;
+ uint8_t index2;
+ uint8_t gpio_tag;
+ void *drm;
+};
+
+struct dcb_connector_table {
+ int entries;
+ struct dcb_connector_table_entry entry[DCB_MAX_NUM_CONNECTOR_ENTRIES];
+};
+
enum dcb_type {
OUTPUT_ANALOG = 0,
OUTPUT_TV = 1,
@@ -87,7 +111,6 @@ enum dcb_type {
OUTPUT_LVDS = 3,
OUTPUT_DP = 6,
OUTPUT_EOL = 14, /* DCB 4.0+, appears to be end-of-list */
- OUTPUT_UNUSED = 15,
OUTPUT_ANY = -1
};
@@ -132,8 +155,18 @@ struct dcb_entry {
struct dcb_table {
uint8_t version;
+
int entries;
struct dcb_entry entry[DCB_MAX_NUM_ENTRIES];
+
+ uint8_t *i2c_table;
+ uint8_t i2c_default_indices;
+ struct dcb_i2c_entry i2c[DCB_MAX_NUM_I2C_ENTRIES];
+
+ uint16_t gpio_table_ptr;
+ struct dcb_gpio_table gpio;
+ uint16_t connector_table_ptr;
+ struct dcb_connector_table connector;
};
enum nouveau_or {
@@ -162,7 +195,7 @@ enum pll_types {
PLL_SHADER = 0x02,
PLL_UNK03 = 0x03,
PLL_MEMORY = 0x04,
- PLL_VDEC = 0x05,
+ PLL_UNK05 = 0x05,
PLL_UNK40 = 0x40,
PLL_UNK41 = 0x41,
PLL_UNK42 = 0x42,
@@ -211,8 +244,6 @@ struct nvbios {
NVBIOS_BIT
} type;
uint16_t offset;
- uint32_t length;
- uint8_t *data;
uint8_t chip_version;
@@ -223,6 +254,8 @@ struct nvbios {
spinlock_t lock;
+ uint8_t data[NV_PROM_SIZE];
+ unsigned int length;
bool execute;
uint8_t major_version;
@@ -256,8 +289,8 @@ struct nvbios {
struct {
struct dcb_entry *output;
- int crtc;
uint16_t script_table_ptr;
+ uint16_t dp_table_ptr;
} display;
struct {
@@ -300,11 +333,4 @@ struct nvbios {
} legacy;
};
-void *dcb_table(struct drm_device *);
-void *dcb_outp(struct drm_device *, u8 idx);
-int dcb_outp_foreach(struct drm_device *, void *data,
- int (*)(struct drm_device *, void *, int idx, u8 *outp));
-u8 *dcb_conntab(struct drm_device *);
-u8 *dcb_conn(struct drm_device *, u8 idx);
-
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index b1d919f..5fb98de 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -28,7 +28,6 @@
*/
#include "drmP.h"
-#include "ttm/ttm_page_alloc.h"
#include "nouveau_drm.h"
#include "nouveau_drv.h"
@@ -50,12 +49,16 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
DRM_ERROR("bo %p still attached to GEM object\n", bo);
nv10_mem_put_tile_region(dev, nvbo->tile, NULL);
+ if (nvbo->vma.node) {
+ nouveau_vm_unmap(&nvbo->vma);
+ nouveau_vm_put(&nvbo->vma);
+ }
kfree(nvbo);
}
static void
nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
- int *align, int *size)
+ int *align, int *size, int *page_shift)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
@@ -79,55 +82,67 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
}
}
} else {
- *size = roundup(*size, (1 << nvbo->page_shift));
- *align = max((1 << nvbo->page_shift), *align);
+ if (likely(dev_priv->chan_vm)) {
+ if (!(flags & TTM_PL_FLAG_TT) && *size > 256 * 1024)
+ *page_shift = dev_priv->chan_vm->lpg_shift;
+ else
+ *page_shift = dev_priv->chan_vm->spg_shift;
+ } else {
+ *page_shift = 12;
+ }
+
+ *size = roundup(*size, (1 << *page_shift));
+ *align = max((1 << *page_shift), *align);
}
*size = roundup(*size, PAGE_SIZE);
}
int
-nouveau_bo_new(struct drm_device *dev, int size, int align,
- uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
- struct nouveau_bo **pnvbo)
+nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
+ int size, int align, uint32_t flags, uint32_t tile_mode,
+ uint32_t tile_flags, struct nouveau_bo **pnvbo)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_bo *nvbo;
- size_t acc_size;
- int ret;
+ int ret = 0, page_shift = 0;
nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
if (!nvbo)
return -ENOMEM;
INIT_LIST_HEAD(&nvbo->head);
INIT_LIST_HEAD(&nvbo->entry);
- INIT_LIST_HEAD(&nvbo->vma_list);
nvbo->tile_mode = tile_mode;
nvbo->tile_flags = tile_flags;
nvbo->bo.bdev = &dev_priv->ttm.bdev;
- nvbo->page_shift = 12;
- if (dev_priv->bar1_vm) {
- if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024)
- nvbo->page_shift = dev_priv->bar1_vm->lpg_shift;
+ nouveau_bo_fixup_align(nvbo, flags, &align, &size, &page_shift);
+ align >>= PAGE_SHIFT;
+
+ if (dev_priv->chan_vm) {
+ ret = nouveau_vm_get(dev_priv->chan_vm, size, page_shift,
+ NV_MEM_ACCESS_RW, &nvbo->vma);
+ if (ret) {
+ kfree(nvbo);
+ return ret;
+ }
}
- nouveau_bo_fixup_align(nvbo, flags, &align, &size);
nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
nouveau_bo_placement_set(nvbo, flags, 0);
- acc_size = ttm_bo_dma_acc_size(&dev_priv->ttm.bdev, size,
- sizeof(struct nouveau_bo));
-
+ nvbo->channel = chan;
ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
- ttm_bo_type_device, &nvbo->placement,
- align >> PAGE_SHIFT, 0, false, NULL, acc_size,
- nouveau_bo_del_ttm);
+ ttm_bo_type_device, &nvbo->placement, align, 0,
+ false, NULL, size, nouveau_bo_del_ttm);
if (ret) {
/* ttm will call nouveau_bo_del_ttm if it fails.. */
return ret;
}
+ nvbo->channel = NULL;
+ if (nvbo->vma.node)
+ nvbo->bo.offset = nvbo->vma.offset;
*pnvbo = nvbo;
return 0;
}
@@ -153,7 +168,7 @@ set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
if (dev_priv->card_type == NV_10 &&
nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
- nvbo->bo.mem.num_pages < vram_pages / 4) {
+ nvbo->bo.mem.num_pages < vram_pages / 2) {
/*
* Make sure that the color and depth buffers are handled
* by independent memory controller units. Up to a 9x
@@ -297,6 +312,8 @@ nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
if (ret)
return ret;
+ if (nvbo->vma.node)
+ nvbo->bo.offset = nvbo->vma.offset;
return 0;
}
@@ -348,10 +365,8 @@ nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
*mem = val;
}
-static struct ttm_tt *
-nouveau_ttm_tt_create(struct ttm_bo_device *bdev,
- unsigned long size, uint32_t page_flags,
- struct page *dummy_read_page)
+static struct ttm_backend *
+nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
struct drm_device *dev = dev_priv->dev;
@@ -359,13 +374,11 @@ nouveau_ttm_tt_create(struct ttm_bo_device *bdev,
switch (dev_priv->gart_info.type) {
#if __OS_HAS_AGP
case NOUVEAU_GART_AGP:
- return ttm_agp_tt_create(bdev, dev->agp->bridge,
- size, page_flags, dummy_read_page);
+ return ttm_agp_backend_init(bdev, dev->agp->bridge);
#endif
case NOUVEAU_GART_PDMA:
case NOUVEAU_GART_HW:
- return nouveau_sgdma_create_ttm(bdev, size, page_flags,
- dummy_read_page);
+ return nouveau_sgdma_init_ttm(dev);
default:
NV_ERROR(dev, "Unknown GART type %d\n",
dev_priv->gart_info.type);
@@ -427,6 +440,7 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
TTM_MEMTYPE_FLAG_CMA;
man->available_caching = TTM_PL_MASK_CACHING;
man->default_caching = TTM_PL_FLAG_CACHED;
+ man->gpu_offset = dev_priv->gart_info.aper_base;
break;
default:
NV_ERROR(dev, "Unknown GART type: %d\n",
@@ -487,12 +501,19 @@ static int
nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
{
- struct nouveau_mem *node = old_mem->mm_node;
- u64 src_offset = node->vma[0].offset;
- u64 dst_offset = node->vma[1].offset;
+ struct nouveau_mem *old_node = old_mem->mm_node;
+ struct nouveau_mem *new_node = new_mem->mm_node;
+ struct nouveau_bo *nvbo = nouveau_bo(bo);
u32 page_count = new_mem->num_pages;
+ u64 src_offset, dst_offset;
int ret;
+ src_offset = old_node->tmp_vma.offset;
+ if (new_node->tmp_vma.node)
+ dst_offset = new_node->tmp_vma.offset;
+ else
+ dst_offset = nvbo->vma.offset;
+
page_count = new_mem->num_pages;
while (page_count) {
int line_count = (page_count > 2047) ? 2047 : page_count;
@@ -526,13 +547,19 @@ static int
nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
{
- struct nouveau_mem *node = old_mem->mm_node;
+ struct nouveau_mem *old_node = old_mem->mm_node;
+ struct nouveau_mem *new_node = new_mem->mm_node;
struct nouveau_bo *nvbo = nouveau_bo(bo);
u64 length = (new_mem->num_pages << PAGE_SHIFT);
- u64 src_offset = node->vma[0].offset;
- u64 dst_offset = node->vma[1].offset;
+ u64 src_offset, dst_offset;
int ret;
+ src_offset = old_node->tmp_vma.offset;
+ if (new_node->tmp_vma.node)
+ dst_offset = new_node->tmp_vma.offset;
+ else
+ dst_offset = nvbo->vma.offset;
+
while (length) {
u32 amount, stride, height;
@@ -668,26 +695,6 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
}
static int
-nouveau_vma_getmap(struct nouveau_channel *chan, struct nouveau_bo *nvbo,
- struct ttm_mem_reg *mem, struct nouveau_vma *vma)
-{
- struct nouveau_mem *node = mem->mm_node;
- int ret;
-
- ret = nouveau_vm_get(chan->vm, mem->num_pages << PAGE_SHIFT,
- node->page_shift, NV_MEM_ACCESS_RO, vma);
- if (ret)
- return ret;
-
- if (mem->mem_type == TTM_PL_VRAM)
- nouveau_vm_map(vma, node);
- else
- nouveau_vm_map_sg(vma, 0, mem->num_pages << PAGE_SHIFT, node);
-
- return 0;
-}
-
-static int
nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
bool no_wait_reserve, bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
@@ -704,20 +711,31 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX);
}
- /* create temporary vmas for the transfer and attach them to the
- * old nouveau_mem node, these will get cleaned up after ttm has
- * destroyed the ttm_mem_reg
+ /* create temporary vma for old memory, this will get cleaned
+ * up after ttm destroys the ttm_mem_reg
*/
if (dev_priv->card_type >= NV_50) {
struct nouveau_mem *node = old_mem->mm_node;
+ if (!node->tmp_vma.node) {
+ u32 page_shift = nvbo->vma.node->type;
+ if (old_mem->mem_type == TTM_PL_TT)
+ page_shift = nvbo->vma.vm->spg_shift;
+
+ ret = nouveau_vm_get(chan->vm,
+ old_mem->num_pages << PAGE_SHIFT,
+ page_shift, NV_MEM_ACCESS_RO,
+ &node->tmp_vma);
+ if (ret)
+ goto out;
+ }
- ret = nouveau_vma_getmap(chan, nvbo, old_mem, &node->vma[0]);
- if (ret)
- goto out;
-
- ret = nouveau_vma_getmap(chan, nvbo, new_mem, &node->vma[1]);
- if (ret)
- goto out;
+ if (old_mem->mem_type == TTM_PL_VRAM)
+ nouveau_vm_map(&node->tmp_vma, node);
+ else {
+ nouveau_vm_map_sg(&node->tmp_vma, 0,
+ old_mem->num_pages << PAGE_SHIFT,
+ node, node->pages);
+ }
}
if (dev_priv->card_type < NV_50)
@@ -744,6 +762,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
bool no_wait_reserve, bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
struct ttm_placement placement;
struct ttm_mem_reg tmp_mem;
@@ -763,7 +782,23 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
if (ret)
goto out;
+ if (dev_priv->card_type >= NV_50) {
+ struct nouveau_bo *nvbo = nouveau_bo(bo);
+ struct nouveau_mem *node = tmp_mem.mm_node;
+ struct nouveau_vma *vma = &nvbo->vma;
+ if (vma->node->type != vma->vm->spg_shift)
+ vma = &node->tmp_vma;
+ nouveau_vm_map_sg(vma, 0, tmp_mem.num_pages << PAGE_SHIFT,
+ node, node->pages);
+ }
+
ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem);
+
+ if (dev_priv->card_type >= NV_50) {
+ struct nouveau_bo *nvbo = nouveau_bo(bo);
+ nouveau_vm_unmap(&nvbo->vma);
+ }
+
if (ret)
goto out;
@@ -809,25 +844,30 @@ out:
static void
nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
{
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
+ struct nouveau_mem *node = new_mem->mm_node;
struct nouveau_bo *nvbo = nouveau_bo(bo);
- struct nouveau_vma *vma;
+ struct nouveau_vma *vma = &nvbo->vma;
+ struct nouveau_vm *vm = vma->vm;
- /* ttm can now (stupidly) pass the driver bos it didn't create... */
- if (bo->destroy != nouveau_bo_del_ttm)
+ if (dev_priv->card_type < NV_50)
return;
- list_for_each_entry(vma, &nvbo->vma_list, head) {
- if (new_mem && new_mem->mem_type == TTM_PL_VRAM) {
- nouveau_vm_map(vma, new_mem->mm_node);
- } else
- if (new_mem && new_mem->mem_type == TTM_PL_TT &&
- nvbo->page_shift == vma->vm->spg_shift) {
- nouveau_vm_map_sg(vma, 0, new_mem->
- num_pages << PAGE_SHIFT,
- new_mem->mm_node);
- } else {
+ switch (new_mem->mem_type) {
+ case TTM_PL_VRAM:
+ nouveau_vm_map(vma, node);
+ break;
+ case TTM_PL_TT:
+ if (vma->node->type != vm->spg_shift) {
nouveau_vm_unmap(vma);
+ vma = &node->tmp_vma;
}
+ nouveau_vm_map_sg(vma, 0, new_mem->num_pages << PAGE_SHIFT,
+ node, node->pages);
+ break;
+ default:
+ nouveau_vm_unmap(&nvbo->vma);
+ break;
}
}
@@ -967,7 +1007,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
break;
}
- if (dev_priv->card_type >= NV_C0)
+ if (dev_priv->card_type == NV_C0)
page_shift = node->page_shift;
else
page_shift = 12;
@@ -1055,94 +1095,8 @@ nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
nouveau_fence_unref(&old_fence);
}
-static int
-nouveau_ttm_tt_populate(struct ttm_tt *ttm)
-{
- struct ttm_dma_tt *ttm_dma = (void *)ttm;
- struct drm_nouveau_private *dev_priv;
- struct drm_device *dev;
- unsigned i;
- int r;
-
- if (ttm->state != tt_unpopulated)
- return 0;
-
- dev_priv = nouveau_bdev(ttm->bdev);
- dev = dev_priv->dev;
-
-#if __OS_HAS_AGP
- if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
- return ttm_agp_tt_populate(ttm);
- }
-#endif
-
-#ifdef CONFIG_SWIOTLB
- if (swiotlb_nr_tbl()) {
- return ttm_dma_populate((void *)ttm, dev->dev);
- }
-#endif
-
- r = ttm_pool_populate(ttm);
- if (r) {
- return r;
- }
-
- for (i = 0; i < ttm->num_pages; i++) {
- ttm_dma->dma_address[i] = pci_map_page(dev->pdev, ttm->pages[i],
- 0, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(dev->pdev, ttm_dma->dma_address[i])) {
- while (--i) {
- pci_unmap_page(dev->pdev, ttm_dma->dma_address[i],
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- ttm_dma->dma_address[i] = 0;
- }
- ttm_pool_unpopulate(ttm);
- return -EFAULT;
- }
- }
- return 0;
-}
-
-static void
-nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
-{
- struct ttm_dma_tt *ttm_dma = (void *)ttm;
- struct drm_nouveau_private *dev_priv;
- struct drm_device *dev;
- unsigned i;
-
- dev_priv = nouveau_bdev(ttm->bdev);
- dev = dev_priv->dev;
-
-#if __OS_HAS_AGP
- if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
- ttm_agp_tt_unpopulate(ttm);
- return;
- }
-#endif
-
-#ifdef CONFIG_SWIOTLB
- if (swiotlb_nr_tbl()) {
- ttm_dma_unpopulate((void *)ttm, dev->dev);
- return;
- }
-#endif
-
- for (i = 0; i < ttm->num_pages; i++) {
- if (ttm_dma->dma_address[i]) {
- pci_unmap_page(dev->pdev, ttm_dma->dma_address[i],
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- }
- }
-
- ttm_pool_unpopulate(ttm);
-}
-
struct ttm_bo_driver nouveau_bo_driver = {
- .ttm_tt_create = &nouveau_ttm_tt_create,
- .ttm_tt_populate = &nouveau_ttm_tt_populate,
- .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
+ .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry,
.invalidate_caches = nouveau_bo_invalidate_caches,
.init_mem_type = nouveau_bo_init_mem_type,
.evict_flags = nouveau_bo_evict_flags,
@@ -1159,54 +1113,3 @@ struct ttm_bo_driver nouveau_bo_driver = {
.io_mem_free = &nouveau_ttm_io_mem_free,
};
-struct nouveau_vma *
-nouveau_bo_vma_find(struct nouveau_bo *nvbo, struct nouveau_vm *vm)
-{
- struct nouveau_vma *vma;
- list_for_each_entry(vma, &nvbo->vma_list, head) {
- if (vma->vm == vm)
- return vma;
- }
-
- return NULL;
-}
-
-int
-nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm,
- struct nouveau_vma *vma)
-{
- const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
- struct nouveau_mem *node = nvbo->bo.mem.mm_node;
- int ret;
-
- ret = nouveau_vm_get(vm, size, nvbo->page_shift,
- NV_MEM_ACCESS_RW, vma);
- if (ret)
- return ret;
-
- if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
- nouveau_vm_map(vma, nvbo->bo.mem.mm_node);
- else
- if (nvbo->bo.mem.mem_type == TTM_PL_TT)
- nouveau_vm_map_sg(vma, 0, size, node);
-
- list_add_tail(&vma->head, &nvbo->vma_list);
- vma->refcount = 1;
- return 0;
-}
-
-void
-nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
-{
- if (vma->node) {
- if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM) {
- spin_lock(&nvbo->bo.bdev->fence_lock);
- ttm_bo_wait(&nvbo->bo, false, false, false);
- spin_unlock(&nvbo->bo.bdev->fence_lock);
- nouveau_vm_unmap(vma);
- }
-
- nouveau_vm_put(vma);
- list_del(&vma->head);
- }
-}
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index a018def..d31d355 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -27,63 +27,40 @@
#include "nouveau_drv.h"
#include "nouveau_drm.h"
#include "nouveau_dma.h"
-#include "nouveau_ramht.h"
static int
-nouveau_channel_pushbuf_init(struct nouveau_channel *chan)
+nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
{
- u32 mem = nouveau_vram_pushbuf ? TTM_PL_FLAG_VRAM : TTM_PL_FLAG_TT;
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- int ret;
-
- /* allocate buffer object */
- ret = nouveau_bo_new(dev, 65536, 0, mem, 0, 0, &chan->pushbuf_bo);
- if (ret)
- goto out;
-
- ret = nouveau_bo_pin(chan->pushbuf_bo, mem);
- if (ret)
- goto out;
-
- ret = nouveau_bo_map(chan->pushbuf_bo);
- if (ret)
- goto out;
+ struct nouveau_bo *pb = chan->pushbuf_bo;
+ struct nouveau_gpuobj *pushbuf = NULL;
+ int ret = 0;
- /* create DMA object covering the entire memtype where the push
- * buffer resides, userspace can submit its own push buffers from
- * anywhere within the same memtype.
- */
- chan->pushbuf_base = chan->pushbuf_bo->bo.offset;
if (dev_priv->card_type >= NV_50) {
- ret = nouveau_bo_vma_add(chan->pushbuf_bo, chan->vm,
- &chan->pushbuf_vma);
- if (ret)
- goto out;
-
if (dev_priv->card_type < NV_C0) {
ret = nouveau_gpuobj_dma_new(chan,
NV_CLASS_DMA_IN_MEMORY, 0,
(1ULL << 40),
NV_MEM_ACCESS_RO,
NV_MEM_TARGET_VM,
- &chan->pushbuf);
+ &pushbuf);
}
- chan->pushbuf_base = chan->pushbuf_vma.offset;
+ chan->pushbuf_base = pb->bo.offset;
} else
- if (chan->pushbuf_bo->bo.mem.mem_type == TTM_PL_TT) {
+ if (pb->bo.mem.mem_type == TTM_PL_TT) {
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
dev_priv->gart_info.aper_size,
NV_MEM_ACCESS_RO,
- NV_MEM_TARGET_GART,
- &chan->pushbuf);
+ NV_MEM_TARGET_GART, &pushbuf);
+ chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
} else
if (dev_priv->card_type != NV_04) {
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
dev_priv->fb_available_size,
NV_MEM_ACCESS_RO,
- NV_MEM_TARGET_VRAM,
- &chan->pushbuf);
+ NV_MEM_TARGET_VRAM, &pushbuf);
+ chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
} else {
/* NV04 cmdbuf hack, from original ddx.. not sure of it's
* exact reason for existing :) PCI access to cmdbuf in
@@ -93,22 +70,47 @@ nouveau_channel_pushbuf_init(struct nouveau_channel *chan)
pci_resource_start(dev->pdev, 1),
dev_priv->fb_available_size,
NV_MEM_ACCESS_RO,
- NV_MEM_TARGET_PCI,
- &chan->pushbuf);
+ NV_MEM_TARGET_PCI, &pushbuf);
+ chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
+ }
+
+ nouveau_gpuobj_ref(pushbuf, &chan->pushbuf);
+ nouveau_gpuobj_ref(NULL, &pushbuf);
+ return ret;
+}
+
+static struct nouveau_bo *
+nouveau_channel_user_pushbuf_alloc(struct drm_device *dev)
+{
+ struct nouveau_bo *pushbuf = NULL;
+ int location, ret;
+
+ if (nouveau_vram_pushbuf)
+ location = TTM_PL_FLAG_VRAM;
+ else
+ location = TTM_PL_FLAG_TT;
+
+ ret = nouveau_bo_new(dev, NULL, 65536, 0, location, 0, 0x0000, &pushbuf);
+ if (ret) {
+ NV_ERROR(dev, "error allocating DMA push buffer: %d\n", ret);
+ return NULL;
}
-out:
+ ret = nouveau_bo_pin(pushbuf, location);
if (ret) {
- NV_ERROR(dev, "error initialising pushbuf: %d\n", ret);
- nouveau_bo_vma_del(chan->pushbuf_bo, &chan->pushbuf_vma);
- nouveau_gpuobj_ref(NULL, &chan->pushbuf);
- if (chan->pushbuf_bo) {
- nouveau_bo_unmap(chan->pushbuf_bo);
- nouveau_bo_ref(NULL, &chan->pushbuf_bo);
- }
+ NV_ERROR(dev, "error pinning DMA push buffer: %d\n", ret);
+ nouveau_bo_ref(NULL, &pushbuf);
+ return NULL;
}
- return 0;
+ ret = nouveau_bo_map(pushbuf);
+ if (ret) {
+ nouveau_bo_unpin(pushbuf);
+ nouveau_bo_ref(NULL, &pushbuf);
+ return NULL;
+ }
+
+ return pushbuf;
}
/* allocates and initializes a fifo for user space consumption */
@@ -119,7 +121,6 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
- struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
struct nouveau_channel *chan;
unsigned long flags;
int ret;
@@ -160,14 +161,19 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
INIT_LIST_HEAD(&chan->fence.pending);
spin_lock_init(&chan->fence.lock);
- /* setup channel's memory and vm */
- ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle);
- if (ret) {
- NV_ERROR(dev, "gpuobj %d\n", ret);
+ /* Allocate DMA push buffer */
+ chan->pushbuf_bo = nouveau_channel_user_pushbuf_alloc(dev);
+ if (!chan->pushbuf_bo) {
+ ret = -ENOMEM;
+ NV_ERROR(dev, "pushbuf %d\n", ret);
nouveau_channel_put(&chan);
return ret;
}
+ nouveau_dma_pre_init(chan);
+ chan->user_put = 0x40;
+ chan->user_get = 0x44;
+
/* Allocate space for per-channel fixed notifier memory */
ret = nouveau_notifier_init_channel(chan);
if (ret) {
@@ -176,19 +182,21 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
return ret;
}
- /* Allocate DMA push buffer */
- ret = nouveau_channel_pushbuf_init(chan);
+ /* Setup channel's default objects */
+ ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle);
if (ret) {
- NV_ERROR(dev, "pushbuf %d\n", ret);
+ NV_ERROR(dev, "gpuobj %d\n", ret);
nouveau_channel_put(&chan);
return ret;
}
- nouveau_dma_pre_init(chan);
- chan->user_put = 0x40;
- chan->user_get = 0x44;
- if (dev_priv->card_type >= NV_50)
- chan->user_get_hi = 0x60;
+ /* Create a dma object for the push buffer */
+ ret = nouveau_channel_pushbuf_ctxdma_init(chan);
+ if (ret) {
+ NV_ERROR(dev, "pbctxdma %d\n", ret);
+ nouveau_channel_put(&chan);
+ return ret;
+ }
/* disable the fifo caches */
pfifo->reassign(dev, false);
@@ -213,11 +221,6 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
nouveau_debugfs_channel_init(chan);
NV_DEBUG(dev, "channel %d initialised\n", chan->id);
- if (fpriv) {
- spin_lock(&fpriv->lock);
- list_add(&chan->list, &fpriv->channels);
- spin_unlock(&fpriv->lock);
- }
*chan_ret = chan;
return 0;
}
@@ -234,23 +237,29 @@ nouveau_channel_get_unlocked(struct nouveau_channel *ref)
}
struct nouveau_channel *
-nouveau_channel_get(struct drm_file *file_priv, int id)
+nouveau_channel_get(struct drm_device *dev, struct drm_file *file_priv, int id)
{
- struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan;
+ unsigned long flags;
- spin_lock(&fpriv->lock);
- list_for_each_entry(chan, &fpriv->channels, list) {
- if (chan->id == id) {
- chan = nouveau_channel_get_unlocked(chan);
- spin_unlock(&fpriv->lock);
- mutex_lock(&chan->mutex);
- return chan;
- }
+ if (unlikely(id < 0 || id >= NOUVEAU_MAX_CHANNEL_NR))
+ return ERR_PTR(-EINVAL);
+
+ spin_lock_irqsave(&dev_priv->channels.lock, flags);
+ chan = nouveau_channel_get_unlocked(dev_priv->channels.ptr[id]);
+ spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
+
+ if (unlikely(!chan))
+ return ERR_PTR(-EINVAL);
+
+ if (unlikely(file_priv && chan->file_priv != file_priv)) {
+ nouveau_channel_put_unlocked(&chan);
+ return ERR_PTR(-EINVAL);
}
- spin_unlock(&fpriv->lock);
- return ERR_PTR(-EINVAL);
+ mutex_lock(&chan->mutex);
+ return chan;
}
void
@@ -304,14 +313,12 @@ nouveau_channel_put_unlocked(struct nouveau_channel **pchan)
/* destroy any resources the channel owned */
nouveau_gpuobj_ref(NULL, &chan->pushbuf);
if (chan->pushbuf_bo) {
- nouveau_bo_vma_del(chan->pushbuf_bo, &chan->pushbuf_vma);
nouveau_bo_unmap(chan->pushbuf_bo);
nouveau_bo_unpin(chan->pushbuf_bo);
nouveau_bo_ref(NULL, &chan->pushbuf_bo);
}
- nouveau_ramht_ref(NULL, &chan->ramht, chan);
- nouveau_notifier_takedown_channel(chan);
nouveau_gpuobj_channel_takedown(chan);
+ nouveau_notifier_takedown_channel(chan);
nouveau_channel_ref(NULL, pchan);
}
@@ -377,11 +384,10 @@ nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv)
NV_DEBUG(dev, "clearing FIFO enables from file_priv\n");
for (i = 0; i < engine->fifo.channels; i++) {
- chan = nouveau_channel_get(file_priv, i);
+ chan = nouveau_channel_get(dev, file_priv, i);
if (IS_ERR(chan))
continue;
- list_del(&chan->list);
atomic_dec(&chan->users);
nouveau_channel_put(&chan);
}
@@ -414,17 +420,13 @@ nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data,
return ret;
init->channel = chan->id;
- if (nouveau_vram_pushbuf == 0) {
- if (chan->dma.ib_max)
- init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM |
- NOUVEAU_GEM_DOMAIN_GART;
- else if (chan->pushbuf_bo->bo.mem.mem_type == TTM_PL_VRAM)
- init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM;
- else
- init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;
- } else {
+ if (chan->dma.ib_max)
+ init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM |
+ NOUVEAU_GEM_DOMAIN_GART;
+ else if (chan->pushbuf_bo->bo.mem.mem_type == TTM_PL_VRAM)
init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM;
- }
+ else
+ init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;
if (dev_priv->card_type < NV_C0) {
init->subchan[0].handle = NvM2MF;
@@ -458,11 +460,10 @@ nouveau_ioctl_fifo_free(struct drm_device *dev, void *data,
struct drm_nouveau_channel_free *req = data;
struct nouveau_channel *chan;
- chan = nouveau_channel_get(file_priv, req->channel);
+ chan = nouveau_channel_get(dev, file_priv, req->channel);
if (IS_ERR(chan))
return PTR_ERR(chan);
- list_del(&chan->list);
atomic_dec(&chan->users);
nouveau_channel_put(&chan);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 9f9d50d..1595d0b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -35,13 +35,12 @@
#include "nouveau_encoder.h"
#include "nouveau_crtc.h"
#include "nouveau_connector.h"
-#include "nouveau_gpio.h"
#include "nouveau_hw.h"
static void nouveau_connector_hotplug(void *, int);
-struct nouveau_encoder *
-find_encoder(struct drm_connector *connector, int type)
+static struct nouveau_encoder *
+find_encoder_by_type(struct drm_connector *connector, int type)
{
struct drm_device *dev = connector->dev;
struct nouveau_encoder *nv_encoder;
@@ -79,11 +78,29 @@ nouveau_encoder_connector_get(struct nouveau_encoder *encoder)
return NULL;
}
+/*TODO: This could use improvement, and learn to handle the fixed
+ * BIOS tables etc. It's fine currently, for its only user.
+ */
+int
+nouveau_connector_bpp(struct drm_connector *connector)
+{
+ struct nouveau_connector *nv_connector = nouveau_connector(connector);
+
+ if (nv_connector->edid && nv_connector->edid->revision >= 4) {
+ u8 bpc = ((nv_connector->edid->input & 0x70) >> 3) + 4;
+ if (bpc > 4)
+ return bpc;
+ }
+
+ return 18;
+}
+
static void
nouveau_connector_destroy(struct drm_connector *connector)
{
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct drm_nouveau_private *dev_priv;
+ struct nouveau_gpio_engine *pgpio;
struct drm_device *dev;
if (!nv_connector)
@@ -93,11 +110,16 @@ nouveau_connector_destroy(struct drm_connector *connector)
dev_priv = dev->dev_private;
NV_DEBUG_KMS(dev, "\n");
- if (nv_connector->hpd != DCB_GPIO_UNUSED) {
- nouveau_gpio_isr_del(dev, 0, nv_connector->hpd, 0xff,
- nouveau_connector_hotplug, connector);
+ pgpio = &dev_priv->engine.gpio;
+ if (pgpio->irq_unregister) {
+ pgpio->irq_unregister(dev, nv_connector->dcb->gpio_tag,
+ nouveau_connector_hotplug, connector);
}
+ if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS ||
+ connector->connector_type == DRM_MODE_CONNECTOR_eDP)
+ nouveau_backlight_exit(connector);
+
kfree(nv_connector->edid);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
@@ -148,8 +170,8 @@ nouveau_connector_of_detect(struct drm_connector *connector)
struct device_node *cn, *dn = pci_device_to_OF_node(dev->pdev);
if (!dn ||
- !((nv_encoder = find_encoder(connector, OUTPUT_TMDS)) ||
- (nv_encoder = find_encoder(connector, OUTPUT_ANALOG))))
+ !((nv_encoder = find_encoder_by_type(connector, OUTPUT_TMDS)) ||
+ (nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG))))
return NULL;
for_each_child_of_node(dn, cn) {
@@ -180,10 +202,6 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
return;
nv_connector->detected_encoder = nv_encoder;
- if (dev_priv->card_type >= NV_50) {
- connector->interlace_allowed = true;
- connector->doublescan_allowed = true;
- } else
if (nv_encoder->dcb->type == OUTPUT_LVDS ||
nv_encoder->dcb->type == OUTPUT_TMDS) {
connector->doublescan_allowed = false;
@@ -200,7 +218,7 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
connector->interlace_allowed = true;
}
- if (nv_connector->type == DCB_CONNECTOR_DVI_I) {
+ if (nv_connector->dcb->type == DCB_CONNECTOR_DVI_I) {
drm_connector_property_set_value(connector,
dev->mode_config.dvi_i_subconnector_property,
nv_encoder->dcb->type == OUTPUT_TMDS ?
@@ -215,7 +233,6 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
struct drm_device *dev = connector->dev;
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_encoder *nv_encoder = NULL;
- struct nouveau_encoder *nv_partner;
struct nouveau_i2c_chan *i2c;
int type;
@@ -249,22 +266,19 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
* same i2c channel so the value returned from ddc_detect
* isn't necessarily correct.
*/
- nv_partner = NULL;
- if (nv_encoder->dcb->type == OUTPUT_TMDS)
- nv_partner = find_encoder(connector, OUTPUT_ANALOG);
- if (nv_encoder->dcb->type == OUTPUT_ANALOG)
- nv_partner = find_encoder(connector, OUTPUT_TMDS);
-
- if (nv_partner && ((nv_encoder->dcb->type == OUTPUT_ANALOG &&
- nv_partner->dcb->type == OUTPUT_TMDS) ||
- (nv_encoder->dcb->type == OUTPUT_TMDS &&
- nv_partner->dcb->type == OUTPUT_ANALOG))) {
+ if (nv_connector->dcb->type == DCB_CONNECTOR_DVI_I) {
if (nv_connector->edid->input & DRM_EDID_INPUT_DIGITAL)
type = OUTPUT_TMDS;
else
type = OUTPUT_ANALOG;
- nv_encoder = find_encoder(connector, type);
+ nv_encoder = find_encoder_by_type(connector, type);
+ if (!nv_encoder) {
+ NV_ERROR(dev, "Detected %d encoder on %s, "
+ "but no object!\n", type,
+ drm_get_connector_name(connector));
+ return connector_status_disconnected;
+ }
}
nouveau_connector_set_encoder(connector, nv_encoder);
@@ -278,9 +292,9 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
}
detect_analog:
- nv_encoder = find_encoder(connector, OUTPUT_ANALOG);
+ nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG);
if (!nv_encoder && !nouveau_tv_disable)
- nv_encoder = find_encoder(connector, OUTPUT_TV);
+ nv_encoder = find_encoder_by_type(connector, OUTPUT_TV);
if (nv_encoder && force) {
struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
struct drm_encoder_helper_funcs *helper =
@@ -313,7 +327,7 @@ nouveau_connector_detect_lvds(struct drm_connector *connector, bool force)
nv_connector->edid = NULL;
}
- nv_encoder = find_encoder(connector, OUTPUT_LVDS);
+ nv_encoder = find_encoder_by_type(connector, OUTPUT_LVDS);
if (!nv_encoder)
return connector_status_disconnected;
@@ -383,7 +397,7 @@ nouveau_connector_force(struct drm_connector *connector)
struct nouveau_encoder *nv_encoder;
int type;
- if (nv_connector->type == DCB_CONNECTOR_DVI_I) {
+ if (nv_connector->dcb->type == DCB_CONNECTOR_DVI_I) {
if (connector->force == DRM_FORCE_ON_DIGITAL)
type = OUTPUT_TMDS;
else
@@ -391,7 +405,7 @@ nouveau_connector_force(struct drm_connector *connector)
} else
type = OUTPUT_ANY;
- nv_encoder = find_encoder(connector, type);
+ nv_encoder = find_encoder_by_type(connector, type);
if (!nv_encoder) {
NV_ERROR(connector->dev, "can't find encoder to force %s on!\n",
drm_get_connector_name(connector));
@@ -406,21 +420,15 @@ static int
nouveau_connector_set_property(struct drm_connector *connector,
struct drm_property *property, uint64_t value)
{
- struct drm_nouveau_private *dev_priv = connector->dev->dev_private;
- struct nouveau_display_engine *disp = &dev_priv->engine.display;
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
struct drm_device *dev = connector->dev;
- struct nouveau_crtc *nv_crtc;
int ret;
- nv_crtc = NULL;
- if (connector->encoder && connector->encoder->crtc)
- nv_crtc = nouveau_crtc(connector->encoder->crtc);
-
/* Scaling mode */
if (property == dev->mode_config.scaling_mode_property) {
+ struct nouveau_crtc *nv_crtc = NULL;
bool modeset = false;
switch (value) {
@@ -446,6 +454,8 @@ nouveau_connector_set_property(struct drm_connector *connector,
modeset = true;
nv_connector->scaling_mode = value;
+ if (connector->encoder && connector->encoder->crtc)
+ nv_crtc = nouveau_crtc(connector->encoder->crtc);
if (!nv_crtc)
return 0;
@@ -457,7 +467,7 @@ nouveau_connector_set_property(struct drm_connector *connector,
if (!ret)
return -EINVAL;
} else {
- ret = nv_crtc->set_scale(nv_crtc, true);
+ ret = nv_crtc->set_scale(nv_crtc, value, true);
if (ret)
return ret;
}
@@ -465,71 +475,23 @@ nouveau_connector_set_property(struct drm_connector *connector,
return 0;
}
- /* Underscan */
- if (property == disp->underscan_property) {
- if (nv_connector->underscan != value) {
- nv_connector->underscan = value;
- if (!nv_crtc || !nv_crtc->set_scale)
- return 0;
-
- return nv_crtc->set_scale(nv_crtc, true);
- }
-
- return 0;
- }
-
- if (property == disp->underscan_hborder_property) {
- if (nv_connector->underscan_hborder != value) {
- nv_connector->underscan_hborder = value;
- if (!nv_crtc || !nv_crtc->set_scale)
- return 0;
-
- return nv_crtc->set_scale(nv_crtc, true);
- }
-
- return 0;
- }
-
- if (property == disp->underscan_vborder_property) {
- if (nv_connector->underscan_vborder != value) {
- nv_connector->underscan_vborder = value;
- if (!nv_crtc || !nv_crtc->set_scale)
- return 0;
-
- return nv_crtc->set_scale(nv_crtc, true);
- }
-
- return 0;
- }
-
/* Dithering */
- if (property == disp->dithering_mode) {
- nv_connector->dithering_mode = value;
- if (!nv_crtc || !nv_crtc->set_dither)
- return 0;
+ if (property == dev->mode_config.dithering_mode_property) {
+ struct nouveau_crtc *nv_crtc = NULL;
- return nv_crtc->set_dither(nv_crtc, true);
- }
+ if (value == DRM_MODE_DITHERING_ON)
+ nv_connector->use_dithering = true;
+ else
+ nv_connector->use_dithering = false;
+
+ if (connector->encoder && connector->encoder->crtc)
+ nv_crtc = nouveau_crtc(connector->encoder->crtc);
- if (property == disp->dithering_depth) {
- nv_connector->dithering_depth = value;
if (!nv_crtc || !nv_crtc->set_dither)
return 0;
- return nv_crtc->set_dither(nv_crtc, true);
- }
-
- if (nv_crtc && nv_crtc->set_color_vibrance) {
- /* Hue */
- if (property == disp->vibrant_hue_property) {
- nv_crtc->vibrant_hue = value - 90;
- return nv_crtc->set_color_vibrance(nv_crtc, true);
- }
- /* Saturation */
- if (property == disp->color_vibrance_property) {
- nv_crtc->color_vibrance = value - 100;
- return nv_crtc->set_color_vibrance(nv_crtc, true);
- }
+ return nv_crtc->set_dither(nv_crtc, nv_connector->use_dithering,
+ true);
}
if (nv_encoder && nv_encoder->dcb->type == OUTPUT_TV)
@@ -640,46 +602,6 @@ nouveau_connector_scaler_modes_add(struct drm_connector *connector)
return modes;
}
-static void
-nouveau_connector_detect_depth(struct drm_connector *connector)
-{
- struct drm_nouveau_private *dev_priv = connector->dev->dev_private;
- struct nouveau_connector *nv_connector = nouveau_connector(connector);
- struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
- struct nvbios *bios = &dev_priv->vbios;
- struct drm_display_mode *mode = nv_connector->native_mode;
- bool duallink;
-
- /* if the edid is feeling nice enough to provide this info, use it */
- if (nv_connector->edid && connector->display_info.bpc)
- return;
-
- /* if not, we're out of options unless we're LVDS, default to 6bpc */
- connector->display_info.bpc = 6;
- if (nv_encoder->dcb->type != OUTPUT_LVDS)
- return;
-
- /* LVDS: panel straps */
- if (bios->fp_no_ddc) {
- if (bios->fp.if_is_24bit)
- connector->display_info.bpc = 8;
- return;
- }
-
- /* LVDS: DDC panel, need to first determine the number of links to
- * know which if_is_24bit flag to check...
- */
- if (nv_connector->edid &&
- nv_connector->type == DCB_CONNECTOR_LVDS_SPWG)
- duallink = ((u8 *)nv_connector->edid)[121] == 2;
- else
- duallink = mode->clock >= bios->fp.duallink_transition_clk;
-
- if ((!duallink && (bios->fp.strapless_is_24bit & 1)) ||
- ( duallink && (bios->fp.strapless_is_24bit & 2)))
- connector->display_info.bpc = 8;
-}
-
static int
nouveau_connector_get_modes(struct drm_connector *connector)
{
@@ -709,12 +631,6 @@ nouveau_connector_get_modes(struct drm_connector *connector)
nv_connector->native_mode = drm_mode_duplicate(dev, &mode);
}
- /* Determine display colour depth for everything except LVDS now,
- * DP requires this before mode_valid() is called.
- */
- if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
- nouveau_connector_detect_depth(connector);
-
/* Find the native mode if this is a digital panel, if we didn't
* find any modes through DDC previously add the native mode to
* the list of modes.
@@ -730,19 +646,12 @@ nouveau_connector_get_modes(struct drm_connector *connector)
ret = 1;
}
- /* Determine LVDS colour depth, must happen after determining
- * "native" mode as some VBIOS tables require us to use the
- * pixel clock as part of the lookup...
- */
- if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
- nouveau_connector_detect_depth(connector);
-
if (nv_encoder->dcb->type == OUTPUT_TV)
ret = get_slave_funcs(encoder)->get_modes(encoder, connector);
- if (nv_connector->type == DCB_CONNECTOR_LVDS ||
- nv_connector->type == DCB_CONNECTOR_LVDS_SPWG ||
- nv_connector->type == DCB_CONNECTOR_eDP)
+ if (nv_connector->dcb->type == DCB_CONNECTOR_LVDS ||
+ nv_connector->dcb->type == DCB_CONNECTOR_LVDS_SPWG ||
+ nv_connector->dcb->type == DCB_CONNECTOR_eDP)
ret += nouveau_connector_scaler_modes_add(connector);
return ret;
@@ -799,9 +708,12 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
case OUTPUT_TV:
return get_slave_funcs(encoder)->mode_valid(encoder, mode);
case OUTPUT_DP:
- max_clock = nv_encoder->dp.link_nr;
- max_clock *= nv_encoder->dp.link_bw;
- clock = clock * (connector->display_info.bpc * 3) / 10;
+ if (nv_encoder->dp.link_bw == DP_LINK_BW_2_7)
+ max_clock = nv_encoder->dp.link_nr * 270000;
+ else
+ max_clock = nv_encoder->dp.link_nr * 162000;
+
+ clock = clock * nouveau_connector_bpp(connector) / 8;
break;
default:
BUG_ON(1);
@@ -859,195 +771,103 @@ nouveau_connector_funcs_lvds = {
.force = nouveau_connector_force
};
-static int
-drm_conntype_from_dcb(enum dcb_connector_type dcb)
-{
- switch (dcb) {
- case DCB_CONNECTOR_VGA : return DRM_MODE_CONNECTOR_VGA;
- case DCB_CONNECTOR_TV_0 :
- case DCB_CONNECTOR_TV_1 :
- case DCB_CONNECTOR_TV_3 : return DRM_MODE_CONNECTOR_TV;
- case DCB_CONNECTOR_DVI_I : return DRM_MODE_CONNECTOR_DVII;
- case DCB_CONNECTOR_DVI_D : return DRM_MODE_CONNECTOR_DVID;
- case DCB_CONNECTOR_LVDS :
- case DCB_CONNECTOR_LVDS_SPWG: return DRM_MODE_CONNECTOR_LVDS;
- case DCB_CONNECTOR_DMS59_DP0:
- case DCB_CONNECTOR_DMS59_DP1:
- case DCB_CONNECTOR_DP : return DRM_MODE_CONNECTOR_DisplayPort;
- case DCB_CONNECTOR_eDP : return DRM_MODE_CONNECTOR_eDP;
- case DCB_CONNECTOR_HDMI_0 :
- case DCB_CONNECTOR_HDMI_1 : return DRM_MODE_CONNECTOR_HDMIA;
- default:
- break;
- }
-
- return DRM_MODE_CONNECTOR_Unknown;
-}
-
struct drm_connector *
nouveau_connector_create(struct drm_device *dev, int index)
{
const struct drm_connector_funcs *funcs = &nouveau_connector_funcs;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_display_engine *disp = &dev_priv->engine.display;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
struct nouveau_connector *nv_connector = NULL;
+ struct dcb_connector_table_entry *dcb = NULL;
struct drm_connector *connector;
int type, ret = 0;
- bool dummy;
NV_DEBUG_KMS(dev, "\n");
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- nv_connector = nouveau_connector(connector);
- if (nv_connector->index == index)
- return connector;
+ if (index >= dev_priv->vbios.dcb.connector.entries)
+ return ERR_PTR(-EINVAL);
+
+ dcb = &dev_priv->vbios.dcb.connector.entry[index];
+ if (dcb->drm)
+ return dcb->drm;
+
+ switch (dcb->type) {
+ case DCB_CONNECTOR_VGA:
+ type = DRM_MODE_CONNECTOR_VGA;
+ break;
+ case DCB_CONNECTOR_TV_0:
+ case DCB_CONNECTOR_TV_1:
+ case DCB_CONNECTOR_TV_3:
+ type = DRM_MODE_CONNECTOR_TV;
+ break;
+ case DCB_CONNECTOR_DVI_I:
+ type = DRM_MODE_CONNECTOR_DVII;
+ break;
+ case DCB_CONNECTOR_DVI_D:
+ type = DRM_MODE_CONNECTOR_DVID;
+ break;
+ case DCB_CONNECTOR_HDMI_0:
+ case DCB_CONNECTOR_HDMI_1:
+ type = DRM_MODE_CONNECTOR_HDMIA;
+ break;
+ case DCB_CONNECTOR_LVDS:
+ case DCB_CONNECTOR_LVDS_SPWG:
+ type = DRM_MODE_CONNECTOR_LVDS;
+ funcs = &nouveau_connector_funcs_lvds;
+ break;
+ case DCB_CONNECTOR_DP:
+ type = DRM_MODE_CONNECTOR_DisplayPort;
+ break;
+ case DCB_CONNECTOR_eDP:
+ type = DRM_MODE_CONNECTOR_eDP;
+ break;
+ default:
+ NV_ERROR(dev, "unknown connector type: 0x%02x!!\n", dcb->type);
+ return ERR_PTR(-EINVAL);
}
nv_connector = kzalloc(sizeof(*nv_connector), GFP_KERNEL);
if (!nv_connector)
return ERR_PTR(-ENOMEM);
-
+ nv_connector->dcb = dcb;
connector = &nv_connector->base;
- nv_connector->index = index;
-
- /* attempt to parse vbios connector type and hotplug gpio */
- nv_connector->dcb = dcb_conn(dev, index);
- if (nv_connector->dcb) {
- static const u8 hpd[16] = {
- 0xff, 0x07, 0x08, 0xff, 0xff, 0x51, 0x52, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0x5e, 0x5f, 0x60,
- };
-
- u32 entry = ROM16(nv_connector->dcb[0]);
- if (dcb_conntab(dev)[3] >= 4)
- entry |= (u32)ROM16(nv_connector->dcb[2]) << 16;
-
- nv_connector->hpd = ffs((entry & 0x07033000) >> 12);
- nv_connector->hpd = hpd[nv_connector->hpd];
-
- nv_connector->type = nv_connector->dcb[0];
- if (drm_conntype_from_dcb(nv_connector->type) ==
- DRM_MODE_CONNECTOR_Unknown) {
- NV_WARN(dev, "unknown connector type %02x\n",
- nv_connector->type);
- nv_connector->type = DCB_CONNECTOR_NONE;
- }
- /* Gigabyte NX85T */
- if (nv_match_device(dev, 0x0421, 0x1458, 0x344c)) {
- if (nv_connector->type == DCB_CONNECTOR_HDMI_1)
- nv_connector->type = DCB_CONNECTOR_DVI_I;
- }
-
- /* Gigabyte GV-NX86T512H */
- if (nv_match_device(dev, 0x0402, 0x1458, 0x3455)) {
- if (nv_connector->type == DCB_CONNECTOR_HDMI_1)
- nv_connector->type = DCB_CONNECTOR_DVI_I;
- }
- } else {
- nv_connector->type = DCB_CONNECTOR_NONE;
- nv_connector->hpd = DCB_GPIO_UNUSED;
- }
+ /* defaults, will get overridden in detect() */
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
- /* no vbios data, or an unknown dcb connector type - attempt to
- * figure out something suitable ourselves
- */
- if (nv_connector->type == DCB_CONNECTOR_NONE) {
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct dcb_table *dcbt = &dev_priv->vbios.dcb;
- u32 encoders = 0;
- int i;
-
- for (i = 0; i < dcbt->entries; i++) {
- if (dcbt->entry[i].connector == nv_connector->index)
- encoders |= (1 << dcbt->entry[i].type);
- }
+ drm_connector_init(dev, connector, funcs, type);
+ drm_connector_helper_add(connector, &nouveau_connector_helper_funcs);
- if (encoders & (1 << OUTPUT_DP)) {
- if (encoders & (1 << OUTPUT_TMDS))
- nv_connector->type = DCB_CONNECTOR_DP;
- else
- nv_connector->type = DCB_CONNECTOR_eDP;
- } else
- if (encoders & (1 << OUTPUT_TMDS)) {
- if (encoders & (1 << OUTPUT_ANALOG))
- nv_connector->type = DCB_CONNECTOR_DVI_I;
- else
- nv_connector->type = DCB_CONNECTOR_DVI_D;
- } else
- if (encoders & (1 << OUTPUT_ANALOG)) {
- nv_connector->type = DCB_CONNECTOR_VGA;
- } else
- if (encoders & (1 << OUTPUT_LVDS)) {
- nv_connector->type = DCB_CONNECTOR_LVDS;
- } else
- if (encoders & (1 << OUTPUT_TV)) {
- nv_connector->type = DCB_CONNECTOR_TV_0;
- }
- }
+ /* Check if we need dithering enabled */
+ if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
+ bool dummy, is_24bit = false;
- type = drm_conntype_from_dcb(nv_connector->type);
- if (type == DRM_MODE_CONNECTOR_LVDS) {
- ret = nouveau_bios_parse_lvds_table(dev, 0, &dummy, &dummy);
+ ret = nouveau_bios_parse_lvds_table(dev, 0, &dummy, &is_24bit);
if (ret) {
- NV_ERROR(dev, "Error parsing LVDS table, disabling\n");
- kfree(nv_connector);
- return ERR_PTR(ret);
+ NV_ERROR(dev, "Error parsing LVDS table, disabling "
+ "LVDS\n");
+ goto fail;
}
- funcs = &nouveau_connector_funcs_lvds;
- } else {
- funcs = &nouveau_connector_funcs;
+ nv_connector->use_dithering = !is_24bit;
}
- /* defaults, will get overridden in detect() */
- connector->interlace_allowed = false;
- connector->doublescan_allowed = false;
-
- drm_connector_init(dev, connector, funcs, type);
- drm_connector_helper_add(connector, &nouveau_connector_helper_funcs);
-
/* Init DVI-I specific properties */
- if (nv_connector->type == DCB_CONNECTOR_DVI_I)
+ if (dcb->type == DCB_CONNECTOR_DVI_I) {
+ drm_mode_create_dvi_i_properties(dev);
drm_connector_attach_property(connector, dev->mode_config.dvi_i_subconnector_property, 0);
-
- /* Add overscan compensation options to digital outputs */
- if (disp->underscan_property &&
- (nv_connector->type == DCB_CONNECTOR_DVI_D ||
- nv_connector->type == DCB_CONNECTOR_DVI_I ||
- nv_connector->type == DCB_CONNECTOR_HDMI_0 ||
- nv_connector->type == DCB_CONNECTOR_HDMI_1 ||
- nv_connector->type == DCB_CONNECTOR_DP ||
- nv_connector->type == DCB_CONNECTOR_DMS59_DP0 ||
- nv_connector->type == DCB_CONNECTOR_DMS59_DP1)) {
- drm_connector_attach_property(connector,
- disp->underscan_property,
- UNDERSCAN_OFF);
- drm_connector_attach_property(connector,
- disp->underscan_hborder_property,
- 0);
- drm_connector_attach_property(connector,
- disp->underscan_vborder_property,
- 0);
+ drm_connector_attach_property(connector, dev->mode_config.dvi_i_select_subconnector_property, 0);
}
- /* Add hue and saturation options */
- if (disp->vibrant_hue_property)
- drm_connector_attach_property(connector,
- disp->vibrant_hue_property,
- 90);
- if (disp->color_vibrance_property)
- drm_connector_attach_property(connector,
- disp->color_vibrance_property,
- 150);
-
- switch (nv_connector->type) {
+ switch (dcb->type) {
case DCB_CONNECTOR_VGA:
if (dev_priv->card_type >= NV_50) {
drm_connector_attach_property(connector,
dev->mode_config.scaling_mode_property,
nv_connector->scaling_mode);
}
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
/* fall-through */
case DCB_CONNECTOR_TV_0:
case DCB_CONNECTOR_TV_1:
@@ -1060,32 +880,39 @@ nouveau_connector_create(struct drm_device *dev, int index)
drm_connector_attach_property(connector,
dev->mode_config.scaling_mode_property,
nv_connector->scaling_mode);
- if (disp->dithering_mode) {
- nv_connector->dithering_mode = DITHERING_MODE_AUTO;
- drm_connector_attach_property(connector,
- disp->dithering_mode,
- nv_connector->dithering_mode);
- }
- if (disp->dithering_depth) {
- nv_connector->dithering_depth = DITHERING_DEPTH_AUTO;
- drm_connector_attach_property(connector,
- disp->dithering_depth,
- nv_connector->dithering_depth);
+ drm_connector_attach_property(connector,
+ dev->mode_config.dithering_mode_property,
+ nv_connector->use_dithering ?
+ DRM_MODE_DITHERING_ON : DRM_MODE_DITHERING_OFF);
+
+ if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS) {
+ if (dev_priv->card_type >= NV_50)
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ else
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
}
break;
}
- connector->polled = DRM_CONNECTOR_POLL_CONNECT;
- if (nv_connector->hpd != DCB_GPIO_UNUSED) {
- ret = nouveau_gpio_isr_add(dev, 0, nv_connector->hpd, 0xff,
- nouveau_connector_hotplug,
- connector);
- if (ret == 0)
- connector->polled = DRM_CONNECTOR_POLL_HPD;
+ if (pgpio->irq_register) {
+ pgpio->irq_register(dev, nv_connector->dcb->gpio_tag,
+ nouveau_connector_hotplug, connector);
}
drm_sysfs_connector_add(connector);
- return connector;
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS ||
+ connector->connector_type == DRM_MODE_CONNECTOR_eDP)
+ nouveau_backlight_init(connector);
+
+ dcb->drm = connector;
+ return dcb->drm;
+
+fail:
+ drm_connector_cleanup(connector);
+ kfree(connector);
+ return ERR_PTR(ret);
+
}
static void
@@ -1094,13 +921,22 @@ nouveau_connector_hotplug(void *data, int plugged)
struct drm_connector *connector = data;
struct drm_device *dev = connector->dev;
- NV_DEBUG(dev, "%splugged %s\n", plugged ? "" : "un",
- drm_get_connector_name(connector));
+ NV_INFO(dev, "%splugged %s\n", plugged ? "" : "un",
+ drm_get_connector_name(connector));
- if (plugged)
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
- else
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+ if (connector->encoder && connector->encoder->crtc &&
+ connector->encoder->crtc->enabled) {
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(connector->encoder);
+ struct drm_encoder_helper_funcs *helper =
+ connector->encoder->helper_private;
+
+ if (nv_encoder->dcb->type == OUTPUT_DP) {
+ if (plugged)
+ helper->dpms(connector->encoder, DRM_MODE_DPMS_ON);
+ else
+ helper->dpms(connector->encoder, DRM_MODE_DPMS_OFF);
+ }
+ }
drm_helper_hpd_irq_event(dev);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index e485702..711b1e9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -30,43 +30,13 @@
#include "drm_edid.h"
#include "nouveau_i2c.h"
-enum nouveau_underscan_type {
- UNDERSCAN_OFF,
- UNDERSCAN_ON,
- UNDERSCAN_AUTO,
-};
-
-/* the enum values specifically defined here match nv50/nvd0 hw values, and
- * the code relies on this
- */
-enum nouveau_dithering_mode {
- DITHERING_MODE_OFF = 0x00,
- DITHERING_MODE_ON = 0x01,
- DITHERING_MODE_DYNAMIC2X2 = 0x10 | DITHERING_MODE_ON,
- DITHERING_MODE_STATIC2X2 = 0x18 | DITHERING_MODE_ON,
- DITHERING_MODE_TEMPORAL = 0x20 | DITHERING_MODE_ON,
- DITHERING_MODE_AUTO
-};
-
-enum nouveau_dithering_depth {
- DITHERING_DEPTH_6BPC = 0x00,
- DITHERING_DEPTH_8BPC = 0x02,
- DITHERING_DEPTH_AUTO
-};
-
struct nouveau_connector {
struct drm_connector base;
- enum dcb_connector_type type;
- u8 index;
- u8 *dcb;
- u8 hpd;
- int dithering_mode;
- int dithering_depth;
+ struct dcb_connector_table_entry *dcb;
+
int scaling_mode;
- enum nouveau_underscan_type underscan;
- u32 underscan_hborder;
- u32 underscan_vborder;
+ bool use_dithering;
struct nouveau_encoder *detected_encoder;
struct edid *edid;
diff --git a/drivers/gpu/drm/nouveau/nouveau_crtc.h b/drivers/gpu/drm/nouveau/nouveau_crtc.h
index e6d0d1e..cb1ce2a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_crtc.h
+++ b/drivers/gpu/drm/nouveau/nouveau_crtc.h
@@ -32,11 +32,11 @@ struct nouveau_crtc {
int index;
+ struct drm_display_mode *mode;
+
uint32_t dpms_saved_fp_control;
uint32_t fp_users;
int saturation;
- int color_vibrance;
- int vibrant_hue;
int sharpness;
int last_dpms;
@@ -67,9 +67,8 @@ struct nouveau_crtc {
int depth;
} lut;
- int (*set_dither)(struct nouveau_crtc *crtc, bool update);
- int (*set_scale)(struct nouveau_crtc *crtc, bool update);
- int (*set_color_vibrance)(struct nouveau_crtc *crtc, bool update);
+ int (*set_dither)(struct nouveau_crtc *crtc, bool on, bool update);
+ int (*set_scale)(struct nouveau_crtc *crtc, int mode, bool update);
};
static inline struct nouveau_crtc *nouveau_crtc(struct drm_crtc *crtc)
@@ -83,13 +82,14 @@ static inline struct drm_crtc *to_drm_crtc(struct nouveau_crtc *crtc)
}
int nv50_crtc_create(struct drm_device *dev, int index);
+int nv50_cursor_init(struct nouveau_crtc *);
+void nv50_cursor_fini(struct nouveau_crtc *);
int nv50_crtc_cursor_set(struct drm_crtc *drm_crtc, struct drm_file *file_priv,
uint32_t buffer_handle, uint32_t width,
uint32_t height);
int nv50_crtc_cursor_move(struct drm_crtc *drm_crtc, int x, int y);
int nv04_cursor_init(struct nouveau_crtc *);
-int nv50_cursor_init(struct nouveau_crtc *);
struct nouveau_connector *
nouveau_crtc_connector_get(struct nouveau_crtc *crtc);
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
index fa2ec49..8e15923 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -44,7 +44,7 @@ nouveau_debugfs_channel_info(struct seq_file *m, void *data)
seq_printf(m, "channel id : %d\n", chan->id);
seq_printf(m, "cpu fifo state:\n");
- seq_printf(m, " base: 0x%10llx\n", chan->pushbuf_base);
+ seq_printf(m, " base: 0x%08x\n", chan->pushbuf_base);
seq_printf(m, " max: 0x%08x\n", chan->dma.max << 2);
seq_printf(m, " cur: 0x%08x\n", chan->dma.cur << 2);
seq_printf(m, " put: 0x%08x\n", chan->dma.put << 2);
@@ -178,7 +178,6 @@ static struct drm_info_list nouveau_debugfs_list[] = {
{ "memory", nouveau_debugfs_memory_info, 0, NULL },
{ "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL },
{ "ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL },
- { "ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL },
};
#define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list)
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index c01ae78..eb514ea 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -32,8 +32,6 @@
#include "nouveau_hw.h"
#include "nouveau_crtc.h"
#include "nouveau_dma.h"
-#include "nouveau_connector.h"
-#include "nouveau_gpio.h"
#include "nv50_display.h"
static void
@@ -66,7 +64,7 @@ static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = {
int
nouveau_framebuffer_init(struct drm_device *dev,
struct nouveau_framebuffer *nv_fb,
- struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_mode_fb_cmd *mode_cmd,
struct nouveau_bo *nvbo)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -107,16 +105,13 @@ nouveau_framebuffer_init(struct drm_device *dev,
if (dev_priv->chipset == 0x50)
nv_fb->r_format |= (tile_flags << 8);
- if (!tile_flags) {
- if (dev_priv->card_type < NV_D0)
- nv_fb->r_pitch = 0x00100000 | fb->pitches[0];
- else
- nv_fb->r_pitch = 0x01000000 | fb->pitches[0];
- } else {
+ if (!tile_flags)
+ nv_fb->r_pitch = 0x00100000 | fb->pitch;
+ else {
u32 mode = nvbo->tile_mode;
if (dev_priv->card_type >= NV_C0)
mode >>= 4;
- nv_fb->r_pitch = ((fb->pitches[0] / 4) << 4) | mode;
+ nv_fb->r_pitch = ((fb->pitch / 4) << 4) | mode;
}
}
@@ -126,13 +121,13 @@ nouveau_framebuffer_init(struct drm_device *dev,
static struct drm_framebuffer *
nouveau_user_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv,
- struct drm_mode_fb_cmd2 *mode_cmd)
+ struct drm_mode_fb_cmd *mode_cmd)
{
struct nouveau_framebuffer *nouveau_fb;
struct drm_gem_object *gem;
int ret;
- gem = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
+ gem = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
if (!gem)
return ERR_PTR(-ENOENT);
@@ -149,207 +144,11 @@ nouveau_user_framebuffer_create(struct drm_device *dev,
return &nouveau_fb->base;
}
-static const struct drm_mode_config_funcs nouveau_mode_config_funcs = {
+const struct drm_mode_config_funcs nouveau_mode_config_funcs = {
.fb_create = nouveau_user_framebuffer_create,
.output_poll_changed = nouveau_fbcon_output_poll_changed,
};
-
-struct nouveau_drm_prop_enum_list {
- u8 gen_mask;
- int type;
- char *name;
-};
-
-static struct nouveau_drm_prop_enum_list underscan[] = {
- { 6, UNDERSCAN_AUTO, "auto" },
- { 6, UNDERSCAN_OFF, "off" },
- { 6, UNDERSCAN_ON, "on" },
- {}
-};
-
-static struct nouveau_drm_prop_enum_list dither_mode[] = {
- { 7, DITHERING_MODE_AUTO, "auto" },
- { 7, DITHERING_MODE_OFF, "off" },
- { 1, DITHERING_MODE_ON, "on" },
- { 6, DITHERING_MODE_STATIC2X2, "static 2x2" },
- { 6, DITHERING_MODE_DYNAMIC2X2, "dynamic 2x2" },
- { 4, DITHERING_MODE_TEMPORAL, "temporal" },
- {}
-};
-
-static struct nouveau_drm_prop_enum_list dither_depth[] = {
- { 6, DITHERING_DEPTH_AUTO, "auto" },
- { 6, DITHERING_DEPTH_6BPC, "6 bpc" },
- { 6, DITHERING_DEPTH_8BPC, "8 bpc" },
- {}
-};
-
-#define PROP_ENUM(p,gen,n,list) do { \
- struct nouveau_drm_prop_enum_list *l = (list); \
- int c = 0; \
- while (l->gen_mask) { \
- if (l->gen_mask & (1 << (gen))) \
- c++; \
- l++; \
- } \
- if (c) { \
- p = drm_property_create(dev, DRM_MODE_PROP_ENUM, n, c); \
- l = (list); \
- c = 0; \
- while (p && l->gen_mask) { \
- if (l->gen_mask & (1 << (gen))) { \
- drm_property_add_enum(p, c, l->type, l->name); \
- c++; \
- } \
- l++; \
- } \
- } \
-} while(0)
-
-int
-nouveau_display_init(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_display_engine *disp = &dev_priv->engine.display;
- struct drm_connector *connector;
- int ret;
-
- ret = disp->init(dev);
- if (ret)
- return ret;
-
- /* power on internal panel if it's not already. the init tables of
- * some vbios default this to off for some reason, causing the
- * panel to not work after resume
- */
- if (nouveau_gpio_func_get(dev, DCB_GPIO_PANEL_POWER) == 0) {
- nouveau_gpio_func_set(dev, DCB_GPIO_PANEL_POWER, true);
- msleep(300);
- }
-
- /* enable polling for external displays */
- drm_kms_helper_poll_enable(dev);
-
- /* enable hotplug interrupts */
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- struct nouveau_connector *conn = nouveau_connector(connector);
- nouveau_gpio_irq(dev, 0, conn->hpd, 0xff, true);
- }
-
- return ret;
-}
-
-void
-nouveau_display_fini(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_display_engine *disp = &dev_priv->engine.display;
- struct drm_connector *connector;
-
- /* disable hotplug interrupts */
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- struct nouveau_connector *conn = nouveau_connector(connector);
- nouveau_gpio_irq(dev, 0, conn->hpd, 0xff, false);
- }
-
- drm_kms_helper_poll_disable(dev);
- disp->fini(dev);
-}
-
-int
-nouveau_display_create(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_display_engine *disp = &dev_priv->engine.display;
- int ret, gen;
-
- drm_mode_config_init(dev);
- drm_mode_create_scaling_mode_property(dev);
- drm_mode_create_dvi_i_properties(dev);
-
- if (dev_priv->card_type < NV_50)
- gen = 0;
- else
- if (dev_priv->card_type < NV_D0)
- gen = 1;
- else
- gen = 2;
-
- PROP_ENUM(disp->dithering_mode, gen, "dithering mode", dither_mode);
- PROP_ENUM(disp->dithering_depth, gen, "dithering depth", dither_depth);
- PROP_ENUM(disp->underscan_property, gen, "underscan", underscan);
-
- disp->underscan_hborder_property =
- drm_property_create_range(dev, 0, "underscan hborder", 0, 128);
-
- disp->underscan_vborder_property =
- drm_property_create_range(dev, 0, "underscan vborder", 0, 128);
-
- if (gen == 1) {
- disp->vibrant_hue_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "vibrant hue", 2);
- disp->vibrant_hue_property->values[0] = 0;
- disp->vibrant_hue_property->values[1] = 180; /* -90..+90 */
-
- disp->color_vibrance_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "color vibrance", 2);
- disp->color_vibrance_property->values[0] = 0;
- disp->color_vibrance_property->values[1] = 200; /* -100..+100 */
- }
-
- dev->mode_config.funcs = (void *)&nouveau_mode_config_funcs;
- dev->mode_config.fb_base = pci_resource_start(dev->pdev, 1);
-
- dev->mode_config.min_width = 0;
- dev->mode_config.min_height = 0;
- if (dev_priv->card_type < NV_10) {
- dev->mode_config.max_width = 2048;
- dev->mode_config.max_height = 2048;
- } else
- if (dev_priv->card_type < NV_50) {
- dev->mode_config.max_width = 4096;
- dev->mode_config.max_height = 4096;
- } else {
- dev->mode_config.max_width = 8192;
- dev->mode_config.max_height = 8192;
- }
-
- dev->mode_config.preferred_depth = 24;
- dev->mode_config.prefer_shadow = 1;
-
- drm_kms_helper_poll_init(dev);
- drm_kms_helper_poll_disable(dev);
-
- ret = disp->create(dev);
- if (ret)
- return ret;
-
- if (dev->mode_config.num_crtc) {
- ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
- if (ret)
- return ret;
- }
-
- return ret;
-}
-
-void
-nouveau_display_destroy(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_display_engine *disp = &dev_priv->engine.display;
-
- drm_vblank_cleanup(dev);
-
- disp->destroy(dev);
-
- drm_kms_helper_poll_fini(dev);
- drm_mode_config_cleanup(dev);
-}
-
int
nouveau_vblank_enable(struct drm_device *dev, int crtc)
{
@@ -492,7 +291,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
/* Initialize a page flip struct */
*s = (struct nouveau_page_flip_state)
{ { }, event, nouveau_crtc(crtc)->index,
- fb->bits_per_pixel, fb->pitches[0], crtc->x, crtc->y,
+ fb->bits_per_pixel, fb->pitch, crtc->x, crtc->y,
new_bo->bo.offset };
/* Choose the channel the flip will be handled in */
@@ -503,10 +302,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
/* Emit a page flip */
if (dev_priv->card_type >= NV_50) {
- if (dev_priv->card_type >= NV_D0)
- ret = nvd0_display_flip_next(crtc, fb, chan, 0);
- else
- ret = nv50_display_flip_next(crtc, fb, chan);
+ ret = nv50_display_flip_next(crtc, fb, chan);
if (ret) {
nouveau_channel_put(&chan);
goto fail_unreserve;
@@ -570,48 +366,3 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
spin_unlock_irqrestore(&dev->event_lock, flags);
return 0;
}
-
-int
-nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
- struct drm_mode_create_dumb *args)
-{
- struct nouveau_bo *bo;
- int ret;
-
- args->pitch = roundup(args->width * (args->bpp / 8), 256);
- args->size = args->pitch * args->height;
- args->size = roundup(args->size, PAGE_SIZE);
-
- ret = nouveau_gem_new(dev, args->size, 0, TTM_PL_FLAG_VRAM, 0, 0, &bo);
- if (ret)
- return ret;
-
- ret = drm_gem_handle_create(file_priv, bo->gem, &args->handle);
- drm_gem_object_unreference_unlocked(bo->gem);
- return ret;
-}
-
-int
-nouveau_display_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
- uint32_t handle)
-{
- return drm_gem_handle_delete(file_priv, handle);
-}
-
-int
-nouveau_display_dumb_map_offset(struct drm_file *file_priv,
- struct drm_device *dev,
- uint32_t handle, uint64_t *poffset)
-{
- struct drm_gem_object *gem;
-
- gem = drm_gem_object_lookup(dev, file_priv, handle);
- if (gem) {
- struct nouveau_bo *bo = gem->driver_private;
- *poffset = bo->bo.addr_space_offset;
- drm_gem_object_unreference_unlocked(gem);
- return 0;
- }
-
- return -ENOENT;
-}
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index 4c2e4e5..568caed 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -134,13 +134,11 @@ OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords)
* -EBUSY if timeout exceeded
*/
static inline int
-READ_GET(struct nouveau_channel *chan, uint64_t *prev_get, int *timeout)
+READ_GET(struct nouveau_channel *chan, uint32_t *prev_get, uint32_t *timeout)
{
- uint64_t val;
+ uint32_t val;
val = nvchan_rd32(chan, chan->user_get);
- if (chan->user_get_hi)
- val |= (uint64_t)nvchan_rd32(chan, chan->user_get_hi) << 32;
/* reset counter as long as GET is still advancing, this is
* to avoid misdetecting a GPU lockup if the GPU happens to
@@ -169,13 +167,8 @@ nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo,
int delta, int length)
{
struct nouveau_bo *pb = chan->pushbuf_bo;
- struct nouveau_vma *vma;
+ uint64_t offset = bo->bo.offset + delta;
int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base;
- u64 offset;
-
- vma = nouveau_bo_vma_find(bo, chan->vm);
- BUG_ON(!vma);
- offset = vma->offset + delta;
BUG_ON(chan->dma.ib_free < 1);
nouveau_bo_wr32(pb, ip++, lower_32_bits(offset));
@@ -220,8 +213,8 @@ nv50_dma_push_wait(struct nouveau_channel *chan, int count)
static int
nv50_dma_wait(struct nouveau_channel *chan, int slots, int count)
{
- uint64_t prev_get = 0;
- int ret, cnt = 0;
+ uint32_t cnt = 0, prev_get = 0;
+ int ret;
ret = nv50_dma_push_wait(chan, slots + 1);
if (unlikely(ret))
@@ -263,8 +256,8 @@ nv50_dma_wait(struct nouveau_channel *chan, int slots, int count)
int
nouveau_dma_wait(struct nouveau_channel *chan, int slots, int size)
{
- uint64_t prev_get = 0;
- int cnt = 0, get;
+ uint32_t prev_get = 0, cnt = 0;
+ int get;
if (chan->dma.ib_max)
return nv50_dma_wait(chan, slots, size);
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index 302b2f7..7beb82a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -28,525 +28,557 @@
#include "nouveau_i2c.h"
#include "nouveau_connector.h"
#include "nouveau_encoder.h"
-#include "nouveau_crtc.h"
-#include "nouveau_gpio.h"
-
-/******************************************************************************
- * aux channel util functions
- *****************************************************************************/
-#define AUX_DBG(fmt, args...) do { \
- if (nouveau_reg_debug & NOUVEAU_REG_DEBUG_AUXCH) { \
- NV_PRINTK(KERN_DEBUG, dev, "AUXCH(%d): " fmt, ch, ##args); \
- } \
-} while (0)
-#define AUX_ERR(fmt, args...) NV_ERROR(dev, "AUXCH(%d): " fmt, ch, ##args)
-
-static void
-auxch_fini(struct drm_device *dev, int ch)
+
+static int
+auxch_rd(struct drm_encoder *encoder, int address, uint8_t *buf, int size)
{
- nv_mask(dev, 0x00e4e4 + (ch * 0x50), 0x00310000, 0x00000000);
+ struct drm_device *dev = encoder->dev;
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_i2c_chan *auxch;
+ int ret;
+
+ auxch = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index);
+ if (!auxch)
+ return -ENODEV;
+
+ ret = nouveau_dp_auxch(auxch, 9, address, buf, size);
+ if (ret)
+ return ret;
+
+ return 0;
}
static int
-auxch_init(struct drm_device *dev, int ch)
+auxch_wr(struct drm_encoder *encoder, int address, uint8_t *buf, int size)
{
- const u32 unksel = 1; /* nfi which to use, or if it matters.. */
- const u32 ureq = unksel ? 0x00100000 : 0x00200000;
- const u32 urep = unksel ? 0x01000000 : 0x02000000;
- u32 ctrl, timeout;
-
- /* wait up to 1ms for any previous transaction to be done... */
- timeout = 1000;
- do {
- ctrl = nv_rd32(dev, 0x00e4e4 + (ch * 0x50));
- udelay(1);
- if (!timeout--) {
- AUX_ERR("begin idle timeout 0x%08x", ctrl);
- return -EBUSY;
- }
- } while (ctrl & 0x03010000);
-
- /* set some magic, and wait up to 1ms for it to appear */
- nv_mask(dev, 0x00e4e4 + (ch * 0x50), 0x00300000, ureq);
- timeout = 1000;
- do {
- ctrl = nv_rd32(dev, 0x00e4e4 + (ch * 0x50));
- udelay(1);
- if (!timeout--) {
- AUX_ERR("magic wait 0x%08x\n", ctrl);
- auxch_fini(dev, ch);
- return -EBUSY;
- }
- } while ((ctrl & 0x03000000) != urep);
+ struct drm_device *dev = encoder->dev;
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_i2c_chan *auxch;
+ int ret;
- return 0;
+ auxch = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index);
+ if (!auxch)
+ return -ENODEV;
+
+ ret = nouveau_dp_auxch(auxch, 8, address, buf, size);
+ return ret;
}
static int
-auxch_tx(struct drm_device *dev, int ch, u8 type, u32 addr, u8 *data, u8 size)
+nouveau_dp_lane_count_set(struct drm_encoder *encoder, uint8_t cmd)
{
- u32 ctrl, stat, timeout, retries;
- u32 xbuf[4] = {};
- int ret, i;
+ struct drm_device *dev = encoder->dev;
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ uint32_t tmp;
+ int or = nv_encoder->or, link = !(nv_encoder->dcb->sorconf.link & 1);
+
+ tmp = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link));
+ tmp &= ~(NV50_SOR_DP_CTRL_ENHANCED_FRAME_ENABLED |
+ NV50_SOR_DP_CTRL_LANE_MASK);
+ tmp |= ((1 << (cmd & DP_LANE_COUNT_MASK)) - 1) << 16;
+ if (cmd & DP_LANE_COUNT_ENHANCED_FRAME_EN)
+ tmp |= NV50_SOR_DP_CTRL_ENHANCED_FRAME_ENABLED;
+ nv_wr32(dev, NV50_SOR_DP_CTRL(or, link), tmp);
+
+ return auxch_wr(encoder, DP_LANE_COUNT_SET, &cmd, 1);
+}
- AUX_DBG("%d: 0x%08x %d\n", type, addr, size);
+static int
+nouveau_dp_link_bw_set(struct drm_encoder *encoder, uint8_t cmd)
+{
+ struct drm_device *dev = encoder->dev;
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ uint32_t tmp;
+ int reg = 0x614300 + (nv_encoder->or * 0x800);
+
+ tmp = nv_rd32(dev, reg);
+ tmp &= 0xfff3ffff;
+ if (cmd == DP_LINK_BW_2_7)
+ tmp |= 0x00040000;
+ nv_wr32(dev, reg, tmp);
+
+ return auxch_wr(encoder, DP_LINK_BW_SET, &cmd, 1);
+}
+
+static int
+nouveau_dp_link_train_set(struct drm_encoder *encoder, int pattern)
+{
+ struct drm_device *dev = encoder->dev;
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ uint32_t tmp;
+ uint8_t cmd;
+ int or = nv_encoder->or, link = !(nv_encoder->dcb->sorconf.link & 1);
+ int ret;
+
+ tmp = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link));
+ tmp &= ~NV50_SOR_DP_CTRL_TRAINING_PATTERN;
+ tmp |= (pattern << 24);
+ nv_wr32(dev, NV50_SOR_DP_CTRL(or, link), tmp);
- ret = auxch_init(dev, ch);
+ ret = auxch_rd(encoder, DP_TRAINING_PATTERN_SET, &cmd, 1);
if (ret)
- goto out;
+ return ret;
+ cmd &= ~DP_TRAINING_PATTERN_MASK;
+ cmd |= (pattern & DP_TRAINING_PATTERN_MASK);
+ return auxch_wr(encoder, DP_TRAINING_PATTERN_SET, &cmd, 1);
+}
- stat = nv_rd32(dev, 0x00e4e8 + (ch * 0x50));
- if (!(stat & 0x10000000)) {
- AUX_DBG("sink not detected\n");
- ret = -ENXIO;
- goto out;
- }
+static int
+nouveau_dp_max_voltage_swing(struct drm_encoder *encoder)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct drm_device *dev = encoder->dev;
+ struct bit_displayport_encoder_table_entry *dpse;
+ struct bit_displayport_encoder_table *dpe;
+ int i, dpe_headerlen, max_vs = 0;
- if (!(type & 1)) {
- memcpy(xbuf, data, size);
- for (i = 0; i < 16; i += 4) {
- AUX_DBG("wr 0x%08x\n", xbuf[i / 4]);
- nv_wr32(dev, 0x00e4c0 + (ch * 0x50) + i, xbuf[i / 4]);
- }
+ dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen);
+ if (!dpe)
+ return false;
+ dpse = (void *)((char *)dpe + dpe_headerlen);
+
+ for (i = 0; i < dpe_headerlen; i++, dpse++) {
+ if (dpse->vs_level > max_vs)
+ max_vs = dpse->vs_level;
}
- ctrl = nv_rd32(dev, 0x00e4e4 + (ch * 0x50));
- ctrl &= ~0x0001f0ff;
- ctrl |= type << 12;
- ctrl |= size - 1;
- nv_wr32(dev, 0x00e4e0 + (ch * 0x50), addr);
-
- /* retry transaction a number of times on failure... */
- ret = -EREMOTEIO;
- for (retries = 0; retries < 32; retries++) {
- /* reset, and delay a while if this is a retry */
- nv_wr32(dev, 0x00e4e4 + (ch * 0x50), 0x80000000 | ctrl);
- nv_wr32(dev, 0x00e4e4 + (ch * 0x50), 0x00000000 | ctrl);
- if (retries)
- udelay(400);
-
- /* transaction request, wait up to 1ms for it to complete */
- nv_wr32(dev, 0x00e4e4 + (ch * 0x50), 0x00010000 | ctrl);
-
- timeout = 1000;
- do {
- ctrl = nv_rd32(dev, 0x00e4e4 + (ch * 0x50));
- udelay(1);
- if (!timeout--) {
- AUX_ERR("tx req timeout 0x%08x\n", ctrl);
- goto out;
- }
- } while (ctrl & 0x00010000);
+ return max_vs;
+}
- /* read status, and check if transaction completed ok */
- stat = nv_mask(dev, 0x00e4e8 + (ch * 0x50), 0, 0);
- if (!(stat & 0x000f0f00)) {
- ret = 0;
- break;
- }
+static int
+nouveau_dp_max_pre_emphasis(struct drm_encoder *encoder, int vs)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct drm_device *dev = encoder->dev;
+ struct bit_displayport_encoder_table_entry *dpse;
+ struct bit_displayport_encoder_table *dpe;
+ int i, dpe_headerlen, max_pre = 0;
- AUX_DBG("%02d 0x%08x 0x%08x\n", retries, ctrl, stat);
- }
+ dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen);
+ if (!dpe)
+ return false;
+ dpse = (void *)((char *)dpe + dpe_headerlen);
- if (type & 1) {
- for (i = 0; i < 16; i += 4) {
- xbuf[i / 4] = nv_rd32(dev, 0x00e4d0 + (ch * 0x50) + i);
- AUX_DBG("rd 0x%08x\n", xbuf[i / 4]);
- }
- memcpy(data, xbuf, size);
+ for (i = 0; i < dpe_headerlen; i++, dpse++) {
+ if (dpse->vs_level != vs)
+ continue;
+
+ if (dpse->pre_level > max_pre)
+ max_pre = dpse->pre_level;
}
-out:
- auxch_fini(dev, ch);
- return ret;
+ return max_pre;
}
-u8 *
-nouveau_dp_bios_data(struct drm_device *dev, struct dcb_entry *dcb, u8 **entry)
+static bool
+nouveau_dp_link_train_adjust(struct drm_encoder *encoder, uint8_t *config)
{
- struct bit_entry d;
- u8 *table;
- int i;
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct drm_device *dev = encoder->dev;
+ struct bit_displayport_encoder_table *dpe;
+ int ret, i, dpe_headerlen, vs = 0, pre = 0;
+ uint8_t request[2];
- if (bit_table(dev, 'd', &d)) {
- NV_ERROR(dev, "BIT 'd' table not found\n");
- return NULL;
- }
+ dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen);
+ if (!dpe)
+ return false;
- if (d.version != 1) {
- NV_ERROR(dev, "BIT 'd' table version %d unknown\n", d.version);
- return NULL;
- }
+ ret = auxch_rd(encoder, DP_ADJUST_REQUEST_LANE0_1, request, 2);
+ if (ret)
+ return false;
+
+ NV_DEBUG_KMS(dev, "\t\tadjust 0x%02x 0x%02x\n", request[0], request[1]);
- table = ROMPTR(dev, d.data[0]);
- if (!table) {
- NV_ERROR(dev, "displayport table pointer invalid\n");
- return NULL;
+ /* Keep all lanes at the same level.. */
+ for (i = 0; i < nv_encoder->dp.link_nr; i++) {
+ int lane_req = (request[i >> 1] >> ((i & 1) << 2)) & 0xf;
+ int lane_vs = lane_req & 3;
+ int lane_pre = (lane_req >> 2) & 3;
+
+ if (lane_vs > vs)
+ vs = lane_vs;
+ if (lane_pre > pre)
+ pre = lane_pre;
}
- switch (table[0]) {
- case 0x20:
- case 0x21:
- case 0x30:
- break;
- default:
- NV_ERROR(dev, "displayport table 0x%02x unknown\n", table[0]);
- return NULL;
+ if (vs >= nouveau_dp_max_voltage_swing(encoder)) {
+ vs = nouveau_dp_max_voltage_swing(encoder);
+ vs |= 4;
}
- for (i = 0; i < table[3]; i++) {
- *entry = ROMPTR(dev, table[table[1] + (i * table[2])]);
- if (*entry && bios_encoder_match(dcb, ROM32((*entry)[0])))
- return table;
+ if (pre >= nouveau_dp_max_pre_emphasis(encoder, vs & 3)) {
+ pre = nouveau_dp_max_pre_emphasis(encoder, vs & 3);
+ pre |= 4;
}
- NV_ERROR(dev, "displayport encoder table not found\n");
- return NULL;
-}
+ /* Update the configuration for all lanes.. */
+ for (i = 0; i < nv_encoder->dp.link_nr; i++)
+ config[i] = (pre << 3) | vs;
-/******************************************************************************
- * link training
- *****************************************************************************/
-struct dp_state {
- struct dp_train_func *func;
- struct dcb_entry *dcb;
- int auxch;
- int crtc;
- u8 *dpcd;
- int link_nr;
- u32 link_bw;
- u8 stat[6];
- u8 conf[4];
-};
+ return true;
+}
-static void
-dp_set_link_config(struct drm_device *dev, struct dp_state *dp)
+static bool
+nouveau_dp_link_train_commit(struct drm_encoder *encoder, uint8_t *config)
{
- u8 sink[2];
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct drm_device *dev = encoder->dev;
+ struct bit_displayport_encoder_table_entry *dpse;
+ struct bit_displayport_encoder_table *dpe;
+ int or = nv_encoder->or, link = !(nv_encoder->dcb->sorconf.link & 1);
+ int dpe_headerlen, ret, i;
- NV_DEBUG_KMS(dev, "%d lanes at %d KB/s\n", dp->link_nr, dp->link_bw);
+ NV_DEBUG_KMS(dev, "\t\tconfig 0x%02x 0x%02x 0x%02x 0x%02x\n",
+ config[0], config[1], config[2], config[3]);
- /* set desired link configuration on the source */
- dp->func->link_set(dev, dp->dcb, dp->crtc, dp->link_nr, dp->link_bw,
- dp->dpcd[2] & DP_ENHANCED_FRAME_CAP);
+ dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen);
+ if (!dpe)
+ return false;
+ dpse = (void *)((char *)dpe + dpe_headerlen);
- /* inform the sink of the new configuration */
- sink[0] = dp->link_bw / 27000;
- sink[1] = dp->link_nr;
- if (dp->dpcd[2] & DP_ENHANCED_FRAME_CAP)
- sink[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+ for (i = 0; i < dpe->record_nr; i++, dpse++) {
+ if (dpse->vs_level == (config[0] & 3) &&
+ dpse->pre_level == ((config[0] >> 3) & 3))
+ break;
+ }
+ BUG_ON(i == dpe->record_nr);
+
+ for (i = 0; i < nv_encoder->dp.link_nr; i++) {
+ const int shift[4] = { 16, 8, 0, 24 };
+ uint32_t mask = 0xff << shift[i];
+ uint32_t reg0, reg1, reg2;
+
+ reg0 = nv_rd32(dev, NV50_SOR_DP_UNK118(or, link)) & ~mask;
+ reg0 |= (dpse->reg0 << shift[i]);
+ reg1 = nv_rd32(dev, NV50_SOR_DP_UNK120(or, link)) & ~mask;
+ reg1 |= (dpse->reg1 << shift[i]);
+ reg2 = nv_rd32(dev, NV50_SOR_DP_UNK130(or, link)) & 0xffff00ff;
+ reg2 |= (dpse->reg2 << 8);
+ nv_wr32(dev, NV50_SOR_DP_UNK118(or, link), reg0);
+ nv_wr32(dev, NV50_SOR_DP_UNK120(or, link), reg1);
+ nv_wr32(dev, NV50_SOR_DP_UNK130(or, link), reg2);
+ }
+
+ ret = auxch_wr(encoder, DP_TRAINING_LANE0_SET, config, 4);
+ if (ret)
+ return false;
- auxch_tx(dev, dp->auxch, 8, DP_LINK_BW_SET, sink, 2);
+ return true;
}
-static void
-dp_set_training_pattern(struct drm_device *dev, struct dp_state *dp, u8 pattern)
+bool
+nouveau_dp_link_train(struct drm_encoder *encoder)
{
- u8 sink_tp;
-
- NV_DEBUG_KMS(dev, "training pattern %d\n", pattern);
-
- dp->func->train_set(dev, dp->dcb, pattern);
+ struct drm_device *dev = encoder->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_connector *nv_connector;
+ struct bit_displayport_encoder_table *dpe;
+ int dpe_headerlen;
+ uint8_t config[4], status[3];
+ bool cr_done, cr_max_vs, eq_done, hpd_state;
+ int ret = 0, i, tries, voltage;
- auxch_tx(dev, dp->auxch, 9, DP_TRAINING_PATTERN_SET, &sink_tp, 1);
- sink_tp &= ~DP_TRAINING_PATTERN_MASK;
- sink_tp |= pattern;
- auxch_tx(dev, dp->auxch, 8, DP_TRAINING_PATTERN_SET, &sink_tp, 1);
-}
+ NV_DEBUG_KMS(dev, "link training!!\n");
-static int
-dp_link_train_commit(struct drm_device *dev, struct dp_state *dp)
-{
- int i;
+ nv_connector = nouveau_encoder_connector_get(nv_encoder);
+ if (!nv_connector)
+ return false;
- for (i = 0; i < dp->link_nr; i++) {
- u8 lane = (dp->stat[4 + (i >> 1)] >> ((i & 1) * 4)) & 0xf;
- u8 lpre = (lane & 0x0c) >> 2;
- u8 lvsw = (lane & 0x03) >> 0;
+ dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen);
+ if (!dpe) {
+ NV_ERROR(dev, "SOR-%d: no DP encoder table!\n", nv_encoder->or);
+ return false;
+ }
- dp->conf[i] = (lpre << 3) | lvsw;
- if (lvsw == DP_TRAIN_VOLTAGE_SWING_1200)
- dp->conf[i] |= DP_TRAIN_MAX_SWING_REACHED;
- if ((lpre << 3) == DP_TRAIN_PRE_EMPHASIS_9_5)
- dp->conf[i] |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
+ /* disable hotplug detect, this flips around on some panels during
+ * link training.
+ */
+ hpd_state = pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, false);
- NV_DEBUG_KMS(dev, "config lane %d %02x\n", i, dp->conf[i]);
- dp->func->train_adj(dev, dp->dcb, i, lvsw, lpre);
+ if (dpe->script0) {
+ NV_DEBUG_KMS(dev, "SOR-%d: running DP script 0\n", nv_encoder->or);
+ nouveau_bios_run_init_table(dev, le16_to_cpu(dpe->script0),
+ nv_encoder->dcb);
}
- return auxch_tx(dev, dp->auxch, 8, DP_TRAINING_LANE0_SET, dp->conf, 4);
-}
+train:
+ cr_done = eq_done = false;
-static int
-dp_link_train_update(struct drm_device *dev, struct dp_state *dp, u32 delay)
-{
- int ret;
+ /* set link configuration */
+ NV_DEBUG_KMS(dev, "\tbegin train: bw %d, lanes %d\n",
+ nv_encoder->dp.link_bw, nv_encoder->dp.link_nr);
+
+ ret = nouveau_dp_link_bw_set(encoder, nv_encoder->dp.link_bw);
+ if (ret)
+ return false;
- udelay(delay);
+ config[0] = nv_encoder->dp.link_nr;
+ if (nv_encoder->dp.dpcd_version >= 0x11 &&
+ nv_encoder->dp.enhanced_frame)
+ config[0] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
- ret = auxch_tx(dev, dp->auxch, 9, DP_LANE0_1_STATUS, dp->stat, 6);
+ ret = nouveau_dp_lane_count_set(encoder, config[0]);
if (ret)
- return ret;
+ return false;
- NV_DEBUG_KMS(dev, "status %02x %02x %02x %02x %02x %02x\n",
- dp->stat[0], dp->stat[1], dp->stat[2], dp->stat[3],
- dp->stat[4], dp->stat[5]);
- return 0;
-}
+ /* clock recovery */
+ NV_DEBUG_KMS(dev, "\tbegin cr\n");
+ ret = nouveau_dp_link_train_set(encoder, DP_TRAINING_PATTERN_1);
+ if (ret)
+ goto stop;
-static int
-dp_link_train_cr(struct drm_device *dev, struct dp_state *dp)
-{
- bool cr_done = false, abort = false;
- int voltage = dp->conf[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
- int tries = 0, i;
+ tries = 0;
+ voltage = -1;
+ memset(config, 0x00, sizeof(config));
+ for (;;) {
+ if (!nouveau_dp_link_train_commit(encoder, config))
+ break;
- dp_set_training_pattern(dev, dp, DP_TRAINING_PATTERN_1);
+ udelay(100);
- do {
- if (dp_link_train_commit(dev, dp) ||
- dp_link_train_update(dev, dp, 100))
+ ret = auxch_rd(encoder, DP_LANE0_1_STATUS, status, 2);
+ if (ret)
break;
+ NV_DEBUG_KMS(dev, "\t\tstatus: 0x%02x 0x%02x\n",
+ status[0], status[1]);
cr_done = true;
- for (i = 0; i < dp->link_nr; i++) {
- u8 lane = (dp->stat[i >> 1] >> ((i & 1) * 4)) & 0xf;
+ cr_max_vs = false;
+ for (i = 0; i < nv_encoder->dp.link_nr; i++) {
+ int lane = (status[i >> 1] >> ((i & 1) * 4)) & 0xf;
+
if (!(lane & DP_LANE_CR_DONE)) {
cr_done = false;
- if (dp->conf[i] & DP_TRAIN_MAX_SWING_REACHED)
- abort = true;
+ if (config[i] & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED)
+ cr_max_vs = true;
break;
}
}
- if ((dp->conf[0] & DP_TRAIN_VOLTAGE_SWING_MASK) != voltage) {
- voltage = dp->conf[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
+ if ((config[0] & DP_TRAIN_VOLTAGE_SWING_MASK) != voltage) {
+ voltage = config[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
tries = 0;
}
- } while (!cr_done && !abort && ++tries < 5);
- return cr_done ? 0 : -1;
-}
+ if (cr_done || cr_max_vs || (++tries == 5))
+ break;
-static int
-dp_link_train_eq(struct drm_device *dev, struct dp_state *dp)
-{
- bool eq_done, cr_done = true;
- int tries = 0, i;
+ if (!nouveau_dp_link_train_adjust(encoder, config))
+ break;
+ }
+
+ if (!cr_done)
+ goto stop;
+
+ /* channel equalisation */
+ NV_DEBUG_KMS(dev, "\tbegin eq\n");
+ ret = nouveau_dp_link_train_set(encoder, DP_TRAINING_PATTERN_2);
+ if (ret)
+ goto stop;
- dp_set_training_pattern(dev, dp, DP_TRAINING_PATTERN_2);
+ for (tries = 0; tries <= 5; tries++) {
+ udelay(400);
- do {
- if (dp_link_train_update(dev, dp, 400))
+ ret = auxch_rd(encoder, DP_LANE0_1_STATUS, status, 3);
+ if (ret)
break;
+ NV_DEBUG_KMS(dev, "\t\tstatus: 0x%02x 0x%02x\n",
+ status[0], status[1]);
- eq_done = !!(dp->stat[2] & DP_INTERLANE_ALIGN_DONE);
- for (i = 0; i < dp->link_nr && eq_done; i++) {
- u8 lane = (dp->stat[i >> 1] >> ((i & 1) * 4)) & 0xf;
- if (!(lane & DP_LANE_CR_DONE))
+ eq_done = true;
+ if (!(status[2] & DP_INTERLANE_ALIGN_DONE))
+ eq_done = false;
+
+ for (i = 0; eq_done && i < nv_encoder->dp.link_nr; i++) {
+ int lane = (status[i >> 1] >> ((i & 1) * 4)) & 0xf;
+
+ if (!(lane & DP_LANE_CR_DONE)) {
cr_done = false;
+ break;
+ }
+
if (!(lane & DP_LANE_CHANNEL_EQ_DONE) ||
- !(lane & DP_LANE_SYMBOL_LOCKED))
+ !(lane & DP_LANE_SYMBOL_LOCKED)) {
eq_done = false;
+ break;
+ }
}
- if (dp_link_train_commit(dev, dp))
+ if (eq_done || !cr_done)
break;
- } while (!eq_done && cr_done && ++tries <= 5);
-
- return eq_done ? 0 : -1;
-}
-static void
-dp_set_downspread(struct drm_device *dev, struct dp_state *dp, bool enable)
-{
- u16 script = 0x0000;
- u8 *entry, *table = nouveau_dp_bios_data(dev, dp->dcb, &entry);
- if (table) {
- if (table[0] >= 0x20 && table[0] <= 0x30) {
- if (enable) script = ROM16(entry[12]);
- else script = ROM16(entry[14]);
- }
+ if (!nouveau_dp_link_train_adjust(encoder, config) ||
+ !nouveau_dp_link_train_commit(encoder, config))
+ break;
}
- nouveau_bios_run_init_table(dev, script, dp->dcb, dp->crtc);
-}
+stop:
+ /* end link training */
+ ret = nouveau_dp_link_train_set(encoder, DP_TRAINING_PATTERN_DISABLE);
+ if (ret)
+ return false;
-static void
-dp_link_train_init(struct drm_device *dev, struct dp_state *dp)
-{
- u16 script = 0x0000;
- u8 *entry, *table = nouveau_dp_bios_data(dev, dp->dcb, &entry);
- if (table) {
- if (table[0] >= 0x20 && table[0] <= 0x30)
- script = ROM16(entry[6]);
+ /* retry at a lower setting, if possible */
+ if (!ret && !(eq_done && cr_done)) {
+ NV_DEBUG_KMS(dev, "\twe failed\n");
+ if (nv_encoder->dp.link_bw != DP_LINK_BW_1_62) {
+ NV_DEBUG_KMS(dev, "retry link training at low rate\n");
+ nv_encoder->dp.link_bw = DP_LINK_BW_1_62;
+ goto train;
+ }
}
- nouveau_bios_run_init_table(dev, script, dp->dcb, dp->crtc);
-}
-
-static void
-dp_link_train_fini(struct drm_device *dev, struct dp_state *dp)
-{
- u16 script = 0x0000;
- u8 *entry, *table = nouveau_dp_bios_data(dev, dp->dcb, &entry);
- if (table) {
- if (table[0] >= 0x20 && table[0] <= 0x30)
- script = ROM16(entry[8]);
+ if (dpe->script1) {
+ NV_DEBUG_KMS(dev, "SOR-%d: running DP script 1\n", nv_encoder->or);
+ nouveau_bios_run_init_table(dev, le16_to_cpu(dpe->script1),
+ nv_encoder->dcb);
}
- nouveau_bios_run_init_table(dev, script, dp->dcb, dp->crtc);
+ /* re-enable hotplug detect */
+ pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, hpd_state);
+
+ return eq_done;
}
bool
-nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate,
- struct dp_train_func *func)
+nouveau_dp_detect(struct drm_encoder *encoder)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
- struct nouveau_connector *nv_connector =
- nouveau_encoder_connector_get(nv_encoder);
struct drm_device *dev = encoder->dev;
- struct nouveau_i2c_chan *auxch;
- const u32 bw_list[] = { 270000, 162000, 0 };
- const u32 *link_bw = bw_list;
- struct dp_state dp;
+ uint8_t dpcd[4];
+ int ret;
- auxch = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index);
- if (!auxch)
+ ret = auxch_rd(encoder, 0x0000, dpcd, 4);
+ if (ret)
return false;
- dp.func = func;
- dp.dcb = nv_encoder->dcb;
- dp.crtc = nv_crtc->index;
- dp.auxch = auxch->drive;
- dp.dpcd = nv_encoder->dp.dpcd;
-
- /* adjust required bandwidth for 8B/10B coding overhead */
- datarate = (datarate / 8) * 10;
-
- /* some sinks toggle hotplug in response to some of the actions
- * we take during link training (DP_SET_POWER is one), we need
- * to ignore them for the moment to avoid races.
- */
- nouveau_gpio_irq(dev, 0, nv_connector->hpd, 0xff, false);
-
- /* enable down-spreading, if possible */
- dp_set_downspread(dev, &dp, nv_encoder->dp.dpcd[3] & 1);
-
- /* execute pre-train script from vbios */
- dp_link_train_init(dev, &dp);
-
- /* start off at highest link rate supported by encoder and display */
- while (*link_bw > nv_encoder->dp.link_bw)
- link_bw++;
-
- while (link_bw[0]) {
- /* find minimum required lane count at this link rate */
- dp.link_nr = nv_encoder->dp.link_nr;
- while ((dp.link_nr >> 1) * link_bw[0] > datarate)
- dp.link_nr >>= 1;
-
- /* drop link rate to minimum with this lane count */
- while ((link_bw[1] * dp.link_nr) > datarate)
- link_bw++;
- dp.link_bw = link_bw[0];
+ NV_DEBUG_KMS(dev, "encoder: link_bw %d, link_nr %d\n"
+ "display: link_bw %d, link_nr %d version 0x%02x\n",
+ nv_encoder->dcb->dpconf.link_bw,
+ nv_encoder->dcb->dpconf.link_nr,
+ dpcd[1], dpcd[2] & 0x0f, dpcd[0]);
- /* program selected link configuration */
- dp_set_link_config(dev, &dp);
+ nv_encoder->dp.dpcd_version = dpcd[0];
- /* attempt to train the link at this configuration */
- memset(dp.stat, 0x00, sizeof(dp.stat));
- if (!dp_link_train_cr(dev, &dp) &&
- !dp_link_train_eq(dev, &dp))
- break;
-
- /* retry at lower rate */
- link_bw++;
- }
+ nv_encoder->dp.link_bw = dpcd[1];
+ if (nv_encoder->dp.link_bw != DP_LINK_BW_1_62 &&
+ !nv_encoder->dcb->dpconf.link_bw)
+ nv_encoder->dp.link_bw = DP_LINK_BW_1_62;
- /* finish link training */
- dp_set_training_pattern(dev, &dp, DP_TRAINING_PATTERN_DISABLE);
+ nv_encoder->dp.link_nr = dpcd[2] & DP_MAX_LANE_COUNT_MASK;
+ if (nv_encoder->dp.link_nr > nv_encoder->dcb->dpconf.link_nr)
+ nv_encoder->dp.link_nr = nv_encoder->dcb->dpconf.link_nr;
- /* execute post-train script from vbios */
- dp_link_train_fini(dev, &dp);
+ nv_encoder->dp.enhanced_frame = (dpcd[2] & DP_ENHANCED_FRAME_CAP);
- /* re-enable hotplug detect */
- nouveau_gpio_irq(dev, 0, nv_connector->hpd, 0xff, true);
return true;
}
-void
-nouveau_dp_dpms(struct drm_encoder *encoder, int mode, u32 datarate,
- struct dp_train_func *func)
+int
+nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
+ uint8_t *data, int data_nr)
{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_i2c_chan *auxch;
- u8 status;
-
- auxch = nouveau_i2c_find(encoder->dev, nv_encoder->dcb->i2c_index);
- if (!auxch)
- return;
+ struct drm_device *dev = auxch->dev;
+ uint32_t tmp, ctrl, stat = 0, data32[4] = {};
+ int ret = 0, i, index = auxch->rd;
+
+ NV_DEBUG_KMS(dev, "ch %d cmd %d addr 0x%x len %d\n", index, cmd, addr, data_nr);
+
+ tmp = nv_rd32(dev, NV50_AUXCH_CTRL(auxch->rd));
+ nv_wr32(dev, NV50_AUXCH_CTRL(auxch->rd), tmp | 0x00100000);
+ tmp = nv_rd32(dev, NV50_AUXCH_CTRL(auxch->rd));
+ if (!(tmp & 0x01000000)) {
+ NV_ERROR(dev, "expected bit 24 == 1, got 0x%08x\n", tmp);
+ ret = -EIO;
+ goto out;
+ }
- if (mode == DRM_MODE_DPMS_ON)
- status = DP_SET_POWER_D0;
- else
- status = DP_SET_POWER_D3;
+ for (i = 0; i < 3; i++) {
+ tmp = nv_rd32(dev, NV50_AUXCH_STAT(auxch->rd));
+ if (tmp & NV50_AUXCH_STAT_STATE_READY)
+ break;
+ udelay(100);
+ }
- nouveau_dp_auxch(auxch, 8, DP_SET_POWER, &status, 1);
+ if (i == 3) {
+ ret = -EBUSY;
+ goto out;
+ }
- if (mode == DRM_MODE_DPMS_ON)
- nouveau_dp_link_train(encoder, datarate, func);
-}
+ if (!(cmd & 1)) {
+ memcpy(data32, data, data_nr);
+ for (i = 0; i < 4; i++) {
+ NV_DEBUG_KMS(dev, "wr %d: 0x%08x\n", i, data32[i]);
+ nv_wr32(dev, NV50_AUXCH_DATA_OUT(index, i), data32[i]);
+ }
+ }
-bool
-nouveau_dp_detect(struct drm_encoder *encoder)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct drm_device *dev = encoder->dev;
- struct nouveau_i2c_chan *auxch;
- u8 *dpcd = nv_encoder->dp.dpcd;
- int ret;
+ nv_wr32(dev, NV50_AUXCH_ADDR(index), addr);
+ ctrl = nv_rd32(dev, NV50_AUXCH_CTRL(index));
+ ctrl &= ~(NV50_AUXCH_CTRL_CMD | NV50_AUXCH_CTRL_LEN);
+ ctrl |= (cmd << NV50_AUXCH_CTRL_CMD_SHIFT);
+ ctrl |= ((data_nr - 1) << NV50_AUXCH_CTRL_LEN_SHIFT);
+
+ for (i = 0; i < 16; i++) {
+ nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl | 0x80000000);
+ nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl);
+ nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl | 0x00010000);
+ if (!nv_wait(dev, NV50_AUXCH_CTRL(index),
+ 0x00010000, 0x00000000)) {
+ NV_ERROR(dev, "expected bit 16 == 0, got 0x%08x\n",
+ nv_rd32(dev, NV50_AUXCH_CTRL(index)));
+ ret = -EBUSY;
+ goto out;
+ }
- auxch = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index);
- if (!auxch)
- return false;
+ udelay(400);
- ret = auxch_tx(dev, auxch->drive, 9, DP_DPCD_REV, dpcd, 8);
- if (ret)
- return false;
+ stat = nv_rd32(dev, NV50_AUXCH_STAT(index));
+ if ((stat & NV50_AUXCH_STAT_REPLY_AUX) !=
+ NV50_AUXCH_STAT_REPLY_AUX_DEFER)
+ break;
+ }
- nv_encoder->dp.link_bw = 27000 * dpcd[1];
- nv_encoder->dp.link_nr = dpcd[2] & DP_MAX_LANE_COUNT_MASK;
+ if (i == 16) {
+ NV_ERROR(dev, "auxch DEFER too many times, bailing\n");
+ ret = -EREMOTEIO;
+ goto out;
+ }
- NV_DEBUG_KMS(dev, "display: %dx%d dpcd 0x%02x\n",
- nv_encoder->dp.link_nr, nv_encoder->dp.link_bw, dpcd[0]);
- NV_DEBUG_KMS(dev, "encoder: %dx%d\n",
- nv_encoder->dcb->dpconf.link_nr,
- nv_encoder->dcb->dpconf.link_bw);
+ if (cmd & 1) {
+ if ((stat & NV50_AUXCH_STAT_COUNT) != data_nr) {
+ ret = -EREMOTEIO;
+ goto out;
+ }
- if (nv_encoder->dcb->dpconf.link_nr < nv_encoder->dp.link_nr)
- nv_encoder->dp.link_nr = nv_encoder->dcb->dpconf.link_nr;
- if (nv_encoder->dcb->dpconf.link_bw < nv_encoder->dp.link_bw)
- nv_encoder->dp.link_bw = nv_encoder->dcb->dpconf.link_bw;
+ for (i = 0; i < 4; i++) {
+ data32[i] = nv_rd32(dev, NV50_AUXCH_DATA_IN(index, i));
+ NV_DEBUG_KMS(dev, "rd %d: 0x%08x\n", i, data32[i]);
+ }
+ memcpy(data, data32, data_nr);
+ }
- NV_DEBUG_KMS(dev, "maximum: %dx%d\n",
- nv_encoder->dp.link_nr, nv_encoder->dp.link_bw);
+out:
+ tmp = nv_rd32(dev, NV50_AUXCH_CTRL(auxch->rd));
+ nv_wr32(dev, NV50_AUXCH_CTRL(auxch->rd), tmp & ~0x00100000);
+ tmp = nv_rd32(dev, NV50_AUXCH_CTRL(auxch->rd));
+ if (tmp & 0x01000000) {
+ NV_ERROR(dev, "expected bit 24 == 0, got 0x%08x\n", tmp);
+ ret = -EIO;
+ }
- return true;
-}
+ udelay(400);
-int
-nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
- uint8_t *data, int data_nr)
-{
- return auxch_tx(auxch->dev, auxch->drive, cmd, addr, data, data_nr);
+ return ret ? ret : (stat & NV50_AUXCH_STAT_REPLY);
}
static int
nouveau_dp_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
{
struct nouveau_i2c_chan *auxch = (struct nouveau_i2c_chan *)adap;
+ struct drm_device *dev = auxch->dev;
struct i2c_msg *msg = msgs;
int ret, mcnt = num;
@@ -570,6 +602,19 @@ nouveau_dp_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
if (ret < 0)
return ret;
+ switch (ret & NV50_AUXCH_STAT_REPLY_I2C) {
+ case NV50_AUXCH_STAT_REPLY_I2C_ACK:
+ break;
+ case NV50_AUXCH_STAT_REPLY_I2C_NACK:
+ return -EREMOTEIO;
+ case NV50_AUXCH_STAT_REPLY_I2C_DEFER:
+ udelay(100);
+ continue;
+ default:
+ NV_ERROR(dev, "bad auxch reply: 0x%08x\n", ret);
+ return -EREMOTEIO;
+ }
+
ptr += cnt;
remaining -= cnt;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index 4f2030b..02c6f37 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -23,7 +23,6 @@
*/
#include <linux/console.h>
-#include <linux/module.h>
#include "drmP.h"
#include "drm.h"
@@ -42,7 +41,7 @@ int nouveau_agpmode = -1;
module_param_named(agpmode, nouveau_agpmode, int, 0400);
MODULE_PARM_DESC(modeset, "Enable kernel modesetting");
-int nouveau_modeset = -1;
+static int nouveau_modeset = -1; /* kms */
module_param_named(modeset, nouveau_modeset, int, 0400);
MODULE_PARM_DESC(vbios, "Override default VBIOS location");
@@ -57,10 +56,6 @@ MODULE_PARM_DESC(vram_notify, "Force DMA notifiers to be in VRAM");
int nouveau_vram_notify = 0;
module_param_named(vram_notify, nouveau_vram_notify, int, 0400);
-MODULE_PARM_DESC(vram_type, "Override detected VRAM type");
-char *nouveau_vram_type;
-module_param_named(vram_type, nouveau_vram_type, charp, 0400);
-
MODULE_PARM_DESC(duallink, "Allow dual-link TMDS (>=GeForce 8)");
int nouveau_duallink = 1;
module_param_named(duallink, nouveau_duallink, int, 0400);
@@ -78,7 +73,7 @@ int nouveau_ignorelid = 0;
module_param_named(ignorelid, nouveau_ignorelid, int, 0400);
MODULE_PARM_DESC(noaccel, "Disable all acceleration");
-int nouveau_noaccel = -1;
+int nouveau_noaccel = 0;
module_param_named(noaccel, nouveau_noaccel, int, 0400);
MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration");
@@ -93,7 +88,7 @@ MODULE_PARM_DESC(override_conntype, "Ignore DCB connector type");
int nouveau_override_conntype = 0;
module_param_named(override_conntype, nouveau_override_conntype, int, 0400);
-MODULE_PARM_DESC(tv_disable, "Disable TV-out detection");
+MODULE_PARM_DESC(tv_disable, "Disable TV-out detection\n");
int nouveau_tv_disable = 0;
module_param_named(tv_disable, nouveau_tv_disable, int, 0400);
@@ -108,30 +103,22 @@ module_param_named(tv_norm, nouveau_tv_norm, charp, 0400);
MODULE_PARM_DESC(reg_debug, "Register access debug bitmask:\n"
"\t\t0x1 mc, 0x2 video, 0x4 fb, 0x8 extdev,\n"
"\t\t0x10 crtc, 0x20 ramdac, 0x40 vgacrtc, 0x80 rmvio,\n"
- "\t\t0x100 vgaattr, 0x200 EVO (G80+)");
+ "\t\t0x100 vgaattr, 0x200 EVO (G80+). ");
int nouveau_reg_debug;
module_param_named(reg_debug, nouveau_reg_debug, int, 0600);
-MODULE_PARM_DESC(perflvl, "Performance level (default: boot)");
+MODULE_PARM_DESC(perflvl, "Performance level (default: boot)\n");
char *nouveau_perflvl;
module_param_named(perflvl, nouveau_perflvl, charp, 0400);
-MODULE_PARM_DESC(perflvl_wr, "Allow perflvl changes (warning: dangerous!)");
+MODULE_PARM_DESC(perflvl_wr, "Allow perflvl changes (warning: dangerous!)\n");
int nouveau_perflvl_wr;
module_param_named(perflvl_wr, nouveau_perflvl_wr, int, 0400);
-MODULE_PARM_DESC(msi, "Enable MSI (default: off)");
+MODULE_PARM_DESC(msi, "Enable MSI (default: off)\n");
int nouveau_msi;
module_param_named(msi, nouveau_msi, int, 0400);
-MODULE_PARM_DESC(ctxfw, "Use external HUB/GPC ucode (fermi)");
-int nouveau_ctxfw;
-module_param_named(ctxfw, nouveau_ctxfw, int, 0400);
-
-MODULE_PARM_DESC(mxmdcb, "Santise DCB table according to MXM-SIS");
-int nouveau_mxmdcb = 1;
-module_param_named(mxmdcb, nouveau_mxmdcb, int, 0400);
-
int nouveau_fbpercrtc;
#if 0
module_param_named(fbpercrtc, nouveau_fbpercrtc, int, 0400);
@@ -186,11 +173,8 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
- NV_INFO(dev, "Disabling display...\n");
- nouveau_display_fini(dev);
-
- NV_INFO(dev, "Disabling fbcon...\n");
- nouveau_fbcon_set_suspend(dev, 1);
+ NV_INFO(dev, "Disabling fbcon acceleration...\n");
+ nouveau_fbcon_save_disable_accel(dev);
NV_INFO(dev, "Unpinning framebuffer(s)...\n");
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -226,13 +210,10 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
pfifo->unload_context(dev);
for (e = NVOBJ_ENGINE_NR - 1; e >= 0; e--) {
- if (!dev_priv->eng[e])
- continue;
-
- ret = dev_priv->eng[e]->fini(dev, e, true);
- if (ret) {
- NV_ERROR(dev, "... engine %d failed: %d\n", e, ret);
- goto out_abort;
+ if (dev_priv->eng[e]) {
+ ret = dev_priv->eng[e]->fini(dev, e);
+ if (ret)
+ goto out_abort;
}
}
@@ -257,6 +238,10 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
pci_set_power_state(pdev, PCI_D3hot);
}
+ console_lock();
+ nouveau_fbcon_set_suspend(dev, 1);
+ console_unlock();
+ nouveau_fbcon_restore_accel(dev);
return 0;
out_abort:
@@ -282,6 +267,8 @@ nouveau_pci_resume(struct pci_dev *pdev)
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
+ nouveau_fbcon_save_disable_accel(dev);
+
NV_INFO(dev, "We're back, enabling device...\n");
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
@@ -301,6 +288,8 @@ nouveau_pci_resume(struct pci_dev *pdev)
if (ret)
return ret;
+ nouveau_pm_resume(dev);
+
if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
ret = nouveau_mem_init_agp(dev);
if (ret) {
@@ -340,8 +329,6 @@ nouveau_pci_resume(struct pci_dev *pdev)
}
}
- nouveau_pm_resume(dev);
-
NV_INFO(dev, "Restoring mode...\n");
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct nouveau_framebuffer *nouveau_fb;
@@ -363,10 +350,16 @@ nouveau_pci_resume(struct pci_dev *pdev)
NV_ERROR(dev, "Could not pin/map cursor.\n");
}
- nouveau_fbcon_set_suspend(dev, 0);
- nouveau_fbcon_zfill_all(dev);
+ engine->display.init(dev);
- nouveau_display_init(dev);
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ u32 offset = nv_crtc->cursor.nvbo->bo.mem.start << PAGE_SHIFT;
+
+ nv_crtc->cursor.set_offset(nv_crtc, offset);
+ nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x,
+ nv_crtc->cursor_saved_y);
+ }
/* Force CLUT to get re-loaded during modeset */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -375,35 +368,18 @@ nouveau_pci_resume(struct pci_dev *pdev)
nv_crtc->lut.depth = 0;
}
- drm_helper_resume_force_mode(dev);
+ console_lock();
+ nouveau_fbcon_set_suspend(dev, 0);
+ console_unlock();
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- u32 offset = nv_crtc->cursor.nvbo->bo.offset;
+ nouveau_fbcon_zfill_all(dev);
- nv_crtc->cursor.set_offset(nv_crtc, offset);
- nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x,
- nv_crtc->cursor_saved_y);
- }
+ drm_helper_resume_force_mode(dev);
+ nouveau_fbcon_restore_accel(dev);
return 0;
}
-static const struct file_operations nouveau_driver_fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = nouveau_ttm_mmap,
- .poll = drm_poll,
- .fasync = drm_fasync,
- .read = drm_read,
-#if defined(CONFIG_COMPAT)
- .compat_ioctl = nouveau_compat_ioctl,
-#endif
- .llseek = noop_llseek,
-};
-
static struct drm_driver driver = {
.driver_features =
DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG |
@@ -413,9 +389,7 @@ static struct drm_driver driver = {
.firstopen = nouveau_firstopen,
.lastclose = nouveau_lastclose,
.unload = nouveau_unload,
- .open = nouveau_open,
.preclose = nouveau_preclose,
- .postclose = nouveau_postclose,
#if defined(CONFIG_DRM_NOUVEAU_DEBUG)
.debugfs_init = nouveau_debugfs_init,
.debugfs_cleanup = nouveau_debugfs_takedown,
@@ -429,15 +403,23 @@ static struct drm_driver driver = {
.disable_vblank = nouveau_vblank_disable,
.reclaim_buffers = drm_core_reclaim_buffers,
.ioctls = nouveau_ioctls,
- .fops = &nouveau_driver_fops,
+ .fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = nouveau_ttm_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+ .read = drm_read,
+#if defined(CONFIG_COMPAT)
+ .compat_ioctl = nouveau_compat_ioctl,
+#endif
+ .llseek = noop_llseek,
+ },
+
.gem_init_object = nouveau_gem_object_new,
.gem_free_object = nouveau_gem_object_del,
- .gem_open_object = nouveau_gem_object_open,
- .gem_close_object = nouveau_gem_object_close,
-
- .dumb_create = nouveau_display_dumb_create,
- .dumb_map_offset = nouveau_display_dumb_map_offset,
- .dumb_destroy = nouveau_display_dumb_destroy,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index a184ba3..9c56331 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -46,17 +46,9 @@
#include "ttm/ttm_module.h"
struct nouveau_fpriv {
- spinlock_t lock;
- struct list_head channels;
- struct nouveau_vm *vm;
+ struct ttm_object_file *tfile;
};
-static inline struct nouveau_fpriv *
-nouveau_fpriv(struct drm_file *file_priv)
-{
- return file_priv ? file_priv->driver_priv : NULL;
-}
-
#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
#include "nouveau_drm.h"
@@ -77,7 +69,7 @@ struct nouveau_mem {
struct drm_device *dev;
struct nouveau_vma bar_vma;
- struct nouveau_vma vma[2];
+ struct nouveau_vma tmp_vma;
u8 page_shift;
struct drm_mm_node *tag;
@@ -115,8 +107,7 @@ struct nouveau_bo {
struct nouveau_channel *channel;
- struct list_head vma_list;
- unsigned page_shift;
+ struct nouveau_vma vma;
uint32_t tile_mode;
uint32_t tile_flags;
@@ -163,9 +154,6 @@ enum nouveau_flags {
#define NVOBJ_ENGINE_COPY0 3
#define NVOBJ_ENGINE_COPY1 4
#define NVOBJ_ENGINE_MPEG 5
-#define NVOBJ_ENGINE_PPP NVOBJ_ENGINE_MPEG
-#define NVOBJ_ENGINE_BSP 6
-#define NVOBJ_ENGINE_VP 7
#define NVOBJ_ENGINE_DISPLAY 15
#define NVOBJ_ENGINE_NR 16
@@ -188,10 +176,9 @@ struct nouveau_gpuobj {
uint32_t flags;
u32 size;
- u32 pinst; /* PRAMIN BAR offset */
- u32 cinst; /* Channel offset */
- u64 vinst; /* VRAM address */
- u64 linst; /* VM address */
+ u32 pinst;
+ u32 cinst;
+ u64 vinst;
uint32_t engine;
uint32_t class;
@@ -214,7 +201,6 @@ enum nouveau_channel_mutex_class {
struct nouveau_channel {
struct drm_device *dev;
- struct list_head list;
int id;
/* references to the channel data structure */
@@ -232,7 +218,6 @@ struct nouveau_channel {
/* mapping of the regs controlling the fifo */
void __iomem *user;
uint32_t user_get;
- uint32_t user_get_hi;
uint32_t user_put;
/* Fencing */
@@ -243,18 +228,15 @@ struct nouveau_channel {
uint32_t sequence;
uint32_t sequence_ack;
atomic_t last_sequence_irq;
- struct nouveau_vma vma;
} fence;
/* DMA push buffer */
struct nouveau_gpuobj *pushbuf;
struct nouveau_bo *pushbuf_bo;
- struct nouveau_vma pushbuf_vma;
- uint64_t pushbuf_base;
+ uint32_t pushbuf_base;
/* Notifier memory */
struct nouveau_bo *notifier_bo;
- struct nouveau_vma notifier_vma;
struct drm_mm notifier_heap;
/* PFIFO context */
@@ -296,7 +278,6 @@ struct nouveau_channel {
uint32_t sw_subchannel[8];
- struct nouveau_vma dispc_vma[2];
struct {
struct nouveau_gpuobj *vblsem;
uint32_t vblsem_head;
@@ -316,7 +297,7 @@ struct nouveau_channel {
struct nouveau_exec_engine {
void (*destroy)(struct drm_device *, int engine);
int (*init)(struct drm_device *, int engine);
- int (*fini)(struct drm_device *, int engine, bool suspend);
+ int (*fini)(struct drm_device *, int engine);
int (*context_new)(struct nouveau_channel *, int engine);
void (*context_del)(struct nouveau_channel *, int engine);
int (*object_new)(struct nouveau_channel *, int engine,
@@ -333,8 +314,7 @@ struct nouveau_instmem_engine {
int (*suspend)(struct drm_device *dev);
void (*resume)(struct drm_device *dev);
- int (*get)(struct nouveau_gpuobj *, struct nouveau_channel *,
- u32 size, u32 align);
+ int (*get)(struct nouveau_gpuobj *, u32 size, u32 align);
void (*put)(struct nouveau_gpuobj *);
int (*map)(struct nouveau_gpuobj *);
void (*unmap)(struct nouveau_gpuobj *);
@@ -397,145 +377,77 @@ struct nouveau_display_engine {
int (*early_init)(struct drm_device *);
void (*late_takedown)(struct drm_device *);
int (*create)(struct drm_device *);
- void (*destroy)(struct drm_device *);
int (*init)(struct drm_device *);
- void (*fini)(struct drm_device *);
-
- struct drm_property *dithering_mode;
- struct drm_property *dithering_depth;
- struct drm_property *underscan_property;
- struct drm_property *underscan_hborder_property;
- struct drm_property *underscan_vborder_property;
- /* not really hue and saturation: */
- struct drm_property *vibrant_hue_property;
- struct drm_property *color_vibrance_property;
+ void (*destroy)(struct drm_device *);
};
struct nouveau_gpio_engine {
- spinlock_t lock;
- struct list_head isr;
- int (*init)(struct drm_device *);
- void (*fini)(struct drm_device *);
- int (*drive)(struct drm_device *, int line, int dir, int out);
- int (*sense)(struct drm_device *, int line);
- void (*irq_enable)(struct drm_device *, int line, bool);
+ void *priv;
+
+ int (*init)(struct drm_device *);
+ void (*takedown)(struct drm_device *);
+
+ int (*get)(struct drm_device *, enum dcb_gpio_tag);
+ int (*set)(struct drm_device *, enum dcb_gpio_tag, int state);
+
+ int (*irq_register)(struct drm_device *, enum dcb_gpio_tag,
+ void (*)(void *, int), void *);
+ void (*irq_unregister)(struct drm_device *, enum dcb_gpio_tag,
+ void (*)(void *, int), void *);
+ bool (*irq_enable)(struct drm_device *, enum dcb_gpio_tag, bool on);
};
struct nouveau_pm_voltage_level {
- u32 voltage; /* microvolts */
- u8 vid;
+ u8 voltage;
+ u8 vid;
};
struct nouveau_pm_voltage {
bool supported;
- u8 version;
u8 vid_mask;
struct nouveau_pm_voltage_level *level;
int nr_level;
};
-/* Exclusive upper limits */
-#define NV_MEM_CL_DDR2_MAX 8
-#define NV_MEM_WR_DDR2_MAX 9
-#define NV_MEM_CL_DDR3_MAX 17
-#define NV_MEM_WR_DDR3_MAX 17
-#define NV_MEM_CL_GDDR3_MAX 16
-#define NV_MEM_WR_GDDR3_MAX 18
-#define NV_MEM_CL_GDDR5_MAX 21
-#define NV_MEM_WR_GDDR5_MAX 20
-
struct nouveau_pm_memtiming {
int id;
-
- u32 reg[9];
- u32 mr[4];
-
- u8 tCWL;
-
- u8 odt;
- u8 drive_strength;
-};
-
-struct nouveau_pm_tbl_header {
- u8 version;
- u8 header_len;
- u8 entry_cnt;
- u8 entry_len;
-};
-
-struct nouveau_pm_tbl_entry {
- u8 tWR;
- u8 tWTR;
- u8 tCL;
- u8 tRC;
- u8 empty_4;
- u8 tRFC; /* Byte 5 */
- u8 empty_6;
- u8 tRAS; /* Byte 7 */
- u8 empty_8;
- u8 tRP; /* Byte 9 */
- u8 tRCDRD;
- u8 tRCDWR;
- u8 tRRD;
- u8 tUNK_13;
- u8 RAM_FT1; /* 14, a bitmask of random RAM features */
- u8 empty_15;
- u8 tUNK_16;
- u8 empty_17;
- u8 tUNK_18;
- u8 tCWL;
- u8 tUNK_20, tUNK_21;
-};
-
-struct nouveau_pm_profile;
-struct nouveau_pm_profile_func {
- void (*destroy)(struct nouveau_pm_profile *);
- void (*init)(struct nouveau_pm_profile *);
- void (*fini)(struct nouveau_pm_profile *);
- struct nouveau_pm_level *(*select)(struct nouveau_pm_profile *);
-};
-
-struct nouveau_pm_profile {
- const struct nouveau_pm_profile_func *func;
- struct list_head head;
- char name[8];
+ u32 reg_100220;
+ u32 reg_100224;
+ u32 reg_100228;
+ u32 reg_10022c;
+ u32 reg_100230;
+ u32 reg_100234;
+ u32 reg_100238;
+ u32 reg_10023c;
+ u32 reg_100240;
};
#define NOUVEAU_PM_MAX_LEVEL 8
struct nouveau_pm_level {
- struct nouveau_pm_profile profile;
struct device_attribute dev_attr;
char name[32];
int id;
- struct nouveau_pm_memtiming timing;
- u32 memory;
- u16 memscript;
-
u32 core;
+ u32 memory;
u32 shader;
- u32 rop;
- u32 copy;
- u32 daemon;
- u32 vdec;
- u32 dom6;
- u32 unka0; /* nva3:nvc0 */
- u32 hub01; /* nvc0- */
- u32 hub06; /* nvc0- */
- u32 hub07; /* nvc0- */
-
- u32 volt_min; /* microvolts */
- u32 volt_max;
- u8 fanspeed;
+ u32 unk05;
+ u32 unk0a;
+
+ u8 voltage;
+ u8 fanspeed;
+
+ u16 memscript;
+ struct nouveau_pm_memtiming *timing;
};
struct nouveau_pm_temp_sensor_constants {
u16 offset_constant;
s16 offset_mult;
- s16 offset_div;
- s16 slope_mult;
- s16 slope_div;
+ u16 offset_div;
+ u16 slope_mult;
+ u16 slope_div;
};
struct nouveau_pm_threshold_temp {
@@ -544,26 +456,19 @@ struct nouveau_pm_threshold_temp {
s16 fan_boost;
};
-struct nouveau_pm_fan {
- u32 percent;
- u32 min_duty;
- u32 max_duty;
- u32 pwm_freq;
- u32 pwm_divisor;
+struct nouveau_pm_memtimings {
+ bool supported;
+ struct nouveau_pm_memtiming *timing;
+ int nr_timing;
};
struct nouveau_pm_engine {
struct nouveau_pm_voltage voltage;
struct nouveau_pm_level perflvl[NOUVEAU_PM_MAX_LEVEL];
int nr_perflvl;
+ struct nouveau_pm_memtimings memtimings;
struct nouveau_pm_temp_sensor_constants sensor_constants;
struct nouveau_pm_threshold_temp threshold_temp;
- struct nouveau_pm_fan fan;
-
- struct nouveau_pm_profile *profile_ac;
- struct nouveau_pm_profile *profile_dc;
- struct nouveau_pm_profile *profile;
- struct list_head profiles;
struct nouveau_pm_level boot;
struct nouveau_pm_level *cur;
@@ -571,22 +476,19 @@ struct nouveau_pm_engine {
struct device *hwmon;
struct notifier_block acpi_nb;
- int (*clocks_get)(struct drm_device *, struct nouveau_pm_level *);
- void *(*clocks_pre)(struct drm_device *, struct nouveau_pm_level *);
- int (*clocks_set)(struct drm_device *, void *);
-
+ int (*clock_get)(struct drm_device *, u32 id);
+ void *(*clock_pre)(struct drm_device *, struct nouveau_pm_level *,
+ u32 id, int khz);
+ void (*clock_set)(struct drm_device *, void *);
int (*voltage_get)(struct drm_device *);
int (*voltage_set)(struct drm_device *, int voltage);
- int (*pwm_get)(struct drm_device *, int line, u32*, u32*);
- int (*pwm_set)(struct drm_device *, int line, u32, u32);
+ int (*fanspeed_get)(struct drm_device *);
+ int (*fanspeed_set)(struct drm_device *, int fanspeed);
int (*temp_get)(struct drm_device *);
};
struct nouveau_vram_engine {
- struct nouveau_mm mm;
-
int (*init)(struct drm_device *);
- void (*takedown)(struct drm_device *dev);
int (*get)(struct drm_device *, u64, u32 align, u32 size_nc,
u32 type, struct nouveau_mem **);
void (*put)(struct drm_device *, struct nouveau_mem **);
@@ -695,26 +597,24 @@ struct nv04_mode_state {
};
enum nouveau_card_type {
- NV_04 = 0x04,
+ NV_04 = 0x00,
NV_10 = 0x10,
NV_20 = 0x20,
NV_30 = 0x30,
NV_40 = 0x40,
NV_50 = 0x50,
NV_C0 = 0xc0,
- NV_D0 = 0xd0,
};
struct drm_nouveau_private {
struct drm_device *dev;
- bool noaccel;
/* the card type, takes NV_* as values */
enum nouveau_card_type card_type;
/* exact chipset, derived from NV_PMC_BOOT_0 */
int chipset;
+ int stepping;
int flags;
- u32 crystal;
void __iomem *mmio;
@@ -798,23 +698,11 @@ struct drm_nouveau_private {
} tile;
/* VRAM/fb configuration */
- enum {
- NV_MEM_TYPE_UNKNOWN = 0,
- NV_MEM_TYPE_STOLEN,
- NV_MEM_TYPE_SGRAM,
- NV_MEM_TYPE_SDRAM,
- NV_MEM_TYPE_DDR1,
- NV_MEM_TYPE_DDR2,
- NV_MEM_TYPE_DDR3,
- NV_MEM_TYPE_GDDR2,
- NV_MEM_TYPE_GDDR3,
- NV_MEM_TYPE_GDDR4,
- NV_MEM_TYPE_GDDR5
- } vram_type;
uint64_t vram_size;
uint64_t vram_sys_base;
- bool vram_rank_B;
+ u32 vram_rblock_size;
+ uint64_t fb_phys;
uint64_t fb_available_size;
uint64_t fb_mappable_pages;
uint64_t fb_aper_free;
@@ -828,8 +716,6 @@ struct drm_nouveau_private {
struct nouveau_vm *chan_vm;
struct nvbios vbios;
- u8 *mxms;
- struct list_head i2c_ports;
struct nv04_mode_state mode_reg;
struct nv04_mode_state saved_reg;
@@ -879,14 +765,12 @@ nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pnvbo)
}
/* nouveau_drv.c */
-extern int nouveau_modeset;
extern int nouveau_agpmode;
extern int nouveau_duallink;
extern int nouveau_uscript_lvds;
extern int nouveau_uscript_tmds;
extern int nouveau_vram_pushbuf;
extern int nouveau_vram_notify;
-extern char *nouveau_vram_type;
extern int nouveau_fbpercrtc;
extern int nouveau_tv_disable;
extern char *nouveau_tv_norm;
@@ -900,16 +784,12 @@ extern int nouveau_override_conntype;
extern char *nouveau_perflvl;
extern int nouveau_perflvl_wr;
extern int nouveau_msi;
-extern int nouveau_ctxfw;
-extern int nouveau_mxmdcb;
extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state);
extern int nouveau_pci_resume(struct pci_dev *pdev);
/* nouveau_state.c */
-extern int nouveau_open(struct drm_device *, struct drm_file *);
extern void nouveau_preclose(struct drm_device *dev, struct drm_file *);
-extern void nouveau_postclose(struct drm_device *, struct drm_file *);
extern int nouveau_load(struct drm_device *, unsigned long flags);
extern int nouveau_firstopen(struct drm_device *);
extern void nouveau_lastclose(struct drm_device *);
@@ -922,8 +802,6 @@ extern bool nouveau_wait_eq(struct drm_device *, uint64_t timeout,
uint32_t reg, uint32_t mask, uint32_t val);
extern bool nouveau_wait_ne(struct drm_device *, uint64_t timeout,
uint32_t reg, uint32_t mask, uint32_t val);
-extern bool nouveau_wait_cb(struct drm_device *, u64 timeout,
- bool (*cond)(void *), void *);
extern bool nouveau_wait_for_idle(struct drm_device *);
extern int nouveau_card_init(struct drm_device *);
@@ -935,12 +813,8 @@ extern void nouveau_mem_gart_fini(struct drm_device *);
extern int nouveau_mem_init_agp(struct drm_device *);
extern int nouveau_mem_reset_agp(struct drm_device *);
extern void nouveau_mem_close(struct drm_device *);
+extern int nouveau_mem_detect(struct drm_device *);
extern bool nouveau_mem_flags_valid(struct drm_device *, u32 tile_flags);
-extern int nouveau_mem_timing_calc(struct drm_device *, u32 freq,
- struct nouveau_pm_memtiming *);
-extern void nouveau_mem_timing_read(struct drm_device *,
- struct nouveau_pm_memtiming *);
-extern int nouveau_mem_vbios_type(struct drm_device *);
extern struct nouveau_tile_reg *nv10_mem_set_tiling(
struct drm_device *dev, uint32_t addr, uint32_t size,
uint32_t pitch, uint32_t flags);
@@ -973,7 +847,7 @@ extern int nouveau_channel_alloc(struct drm_device *dev,
extern struct nouveau_channel *
nouveau_channel_get_unlocked(struct nouveau_channel *);
extern struct nouveau_channel *
-nouveau_channel_get(struct drm_file *, int id);
+nouveau_channel_get(struct drm_device *, struct drm_file *, int id);
extern void nouveau_channel_put_unlocked(struct nouveau_channel **);
extern void nouveau_channel_put(struct nouveau_channel **);
extern void nouveau_channel_ref(struct nouveau_channel *chan,
@@ -1056,10 +930,7 @@ extern int nouveau_sgdma_init(struct drm_device *);
extern void nouveau_sgdma_takedown(struct drm_device *);
extern uint32_t nouveau_sgdma_get_physical(struct drm_device *,
uint32_t offset);
-extern struct ttm_tt *nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
- unsigned long size,
- uint32_t page_flags,
- struct page *dummy_read_page);
+extern struct ttm_backend *nouveau_sgdma_init_ttm(struct drm_device *);
/* nouveau_debugfs.c */
#if defined(CONFIG_DRM_NOUVEAU_DEBUG)
@@ -1100,14 +971,12 @@ extern int nouveau_dma_wait(struct nouveau_channel *, int slots, int size);
#if defined(CONFIG_ACPI)
void nouveau_register_dsm_handler(void);
void nouveau_unregister_dsm_handler(void);
-void nouveau_switcheroo_optimus_dsm(void);
int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len);
bool nouveau_acpi_rom_supported(struct pci_dev *pdev);
int nouveau_acpi_edid(struct drm_device *, struct drm_connector *);
#else
static inline void nouveau_register_dsm_handler(void) {}
static inline void nouveau_unregister_dsm_handler(void) {}
-static inline void nouveau_switcheroo_optimus_dsm(void) {}
static inline bool nouveau_acpi_rom_supported(struct pci_dev *pdev) { return false; }
static inline int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len) { return -EINVAL; }
static inline int nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector) { return -EINVAL; }
@@ -1115,15 +984,15 @@ static inline int nouveau_acpi_edid(struct drm_device *dev, struct drm_connector
/* nouveau_backlight.c */
#ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
-extern int nouveau_backlight_init(struct drm_device *);
-extern void nouveau_backlight_exit(struct drm_device *);
+extern int nouveau_backlight_init(struct drm_connector *);
+extern void nouveau_backlight_exit(struct drm_connector *);
#else
-static inline int nouveau_backlight_init(struct drm_device *dev)
+static inline int nouveau_backlight_init(struct drm_connector *dev)
{
return 0;
}
-static inline void nouveau_backlight_exit(struct drm_device *dev) { }
+static inline void nouveau_backlight_exit(struct drm_connector *dev) { }
#endif
/* nouveau_bios.c */
@@ -1131,15 +1000,19 @@ extern int nouveau_bios_init(struct drm_device *);
extern void nouveau_bios_takedown(struct drm_device *dev);
extern int nouveau_run_vbios_init(struct drm_device *);
extern void nouveau_bios_run_init_table(struct drm_device *, uint16_t table,
- struct dcb_entry *, int crtc);
-extern void nouveau_bios_init_exec(struct drm_device *, uint16_t table);
+ struct dcb_entry *);
+extern struct dcb_gpio_entry *nouveau_bios_gpio_entry(struct drm_device *,
+ enum dcb_gpio_tag);
extern struct dcb_connector_table_entry *
nouveau_bios_connector_entry(struct drm_device *, int index);
extern u32 get_pll_register(struct drm_device *, enum pll_types);
extern int get_pll_limits(struct drm_device *, uint32_t limit_match,
struct pll_lims *);
-extern int nouveau_bios_run_display_table(struct drm_device *, u16 id, int clk,
- struct dcb_entry *, int crtc);
+extern int nouveau_bios_run_display_table(struct drm_device *,
+ struct dcb_entry *,
+ uint32_t script, int pxclk);
+extern void *nouveau_bios_dp_table(struct drm_device *, struct dcb_entry *,
+ int *length);
extern bool nouveau_bios_fp_mode(struct drm_device *, struct drm_display_mode *);
extern uint8_t *nouveau_bios_embedded_edid(struct drm_device *);
extern int nouveau_bios_parse_lvds_table(struct drm_device *, int pxclk,
@@ -1148,28 +1021,23 @@ extern int run_tmds_table(struct drm_device *, struct dcb_entry *,
int head, int pxclk);
extern int call_lvds_script(struct drm_device *, struct dcb_entry *, int head,
enum LVDS_script, int pxclk);
-bool bios_encoder_match(struct dcb_entry *, u32 hash);
-
-/* nouveau_mxm.c */
-int nouveau_mxm_init(struct drm_device *dev);
-void nouveau_mxm_fini(struct drm_device *dev);
/* nouveau_ttm.c */
int nouveau_ttm_global_init(struct drm_nouveau_private *);
void nouveau_ttm_global_release(struct drm_nouveau_private *);
int nouveau_ttm_mmap(struct file *, struct vm_area_struct *);
-/* nouveau_hdmi.c */
-void nouveau_hdmi_mode_set(struct drm_encoder *, struct drm_display_mode *);
+/* nouveau_dp.c */
+int nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
+ uint8_t *data, int data_nr);
+bool nouveau_dp_detect(struct drm_encoder *);
+bool nouveau_dp_link_train(struct drm_encoder *);
/* nv04_fb.c */
-extern int nv04_fb_vram_init(struct drm_device *);
extern int nv04_fb_init(struct drm_device *);
extern void nv04_fb_takedown(struct drm_device *);
/* nv10_fb.c */
-extern int nv10_fb_vram_init(struct drm_device *dev);
-extern int nv1a_fb_vram_init(struct drm_device *dev);
extern int nv10_fb_init(struct drm_device *);
extern void nv10_fb_takedown(struct drm_device *);
extern void nv10_fb_init_tile_region(struct drm_device *dev, int i,
@@ -1178,16 +1046,6 @@ extern void nv10_fb_init_tile_region(struct drm_device *dev, int i,
extern void nv10_fb_set_tile_region(struct drm_device *dev, int i);
extern void nv10_fb_free_tile_region(struct drm_device *dev, int i);
-/* nv20_fb.c */
-extern int nv20_fb_vram_init(struct drm_device *dev);
-extern int nv20_fb_init(struct drm_device *);
-extern void nv20_fb_takedown(struct drm_device *);
-extern void nv20_fb_init_tile_region(struct drm_device *dev, int i,
- uint32_t addr, uint32_t size,
- uint32_t pitch, uint32_t flags);
-extern void nv20_fb_set_tile_region(struct drm_device *dev, int i);
-extern void nv20_fb_free_tile_region(struct drm_device *dev, int i);
-
/* nv30_fb.c */
extern int nv30_fb_init(struct drm_device *);
extern void nv30_fb_takedown(struct drm_device *);
@@ -1197,7 +1055,6 @@ extern void nv30_fb_init_tile_region(struct drm_device *dev, int i,
extern void nv30_fb_free_tile_region(struct drm_device *dev, int i);
/* nv40_fb.c */
-extern int nv40_fb_vram_init(struct drm_device *dev);
extern int nv40_fb_init(struct drm_device *);
extern void nv40_fb_takedown(struct drm_device *);
extern void nv40_fb_set_tile_region(struct drm_device *dev, int i);
@@ -1263,6 +1120,7 @@ extern int nvc0_fifo_unload_context(struct drm_device *);
/* nv04_graph.c */
extern int nv04_graph_create(struct drm_device *);
+extern void nv04_graph_fifo_access(struct drm_device *, bool);
extern int nv04_graph_object_new(struct nouveau_channel *, int, u32, u16);
extern int nv04_graph_mthd_page_flip(struct nouveau_channel *chan,
u32 class, u32 mthd, u32 data);
@@ -1294,39 +1152,24 @@ extern int nvc0_graph_isr_chid(struct drm_device *dev, u64 inst);
/* nv84_crypt.c */
extern int nv84_crypt_create(struct drm_device *);
-/* nv98_crypt.c */
-extern int nv98_crypt_create(struct drm_device *dev);
-
/* nva3_copy.c */
extern int nva3_copy_create(struct drm_device *dev);
/* nvc0_copy.c */
extern int nvc0_copy_create(struct drm_device *dev, int engine);
-/* nv31_mpeg.c */
-extern int nv31_mpeg_create(struct drm_device *dev);
+/* nv40_mpeg.c */
+extern int nv40_mpeg_create(struct drm_device *dev);
/* nv50_mpeg.c */
extern int nv50_mpeg_create(struct drm_device *dev);
-/* nv84_bsp.c */
-/* nv98_bsp.c */
-extern int nv84_bsp_create(struct drm_device *dev);
-
-/* nv84_vp.c */
-/* nv98_vp.c */
-extern int nv84_vp_create(struct drm_device *dev);
-
-/* nv98_ppp.c */
-extern int nv98_ppp_create(struct drm_device *dev);
-
/* nv04_instmem.c */
extern int nv04_instmem_init(struct drm_device *);
extern void nv04_instmem_takedown(struct drm_device *);
extern int nv04_instmem_suspend(struct drm_device *);
extern void nv04_instmem_resume(struct drm_device *);
-extern int nv04_instmem_get(struct nouveau_gpuobj *, struct nouveau_channel *,
- u32 size, u32 align);
+extern int nv04_instmem_get(struct nouveau_gpuobj *, u32 size, u32 align);
extern void nv04_instmem_put(struct nouveau_gpuobj *);
extern int nv04_instmem_map(struct nouveau_gpuobj *);
extern void nv04_instmem_unmap(struct nouveau_gpuobj *);
@@ -1337,8 +1180,7 @@ extern int nv50_instmem_init(struct drm_device *);
extern void nv50_instmem_takedown(struct drm_device *);
extern int nv50_instmem_suspend(struct drm_device *);
extern void nv50_instmem_resume(struct drm_device *);
-extern int nv50_instmem_get(struct nouveau_gpuobj *, struct nouveau_channel *,
- u32 size, u32 align);
+extern int nv50_instmem_get(struct nouveau_gpuobj *, u32 size, u32 align);
extern void nv50_instmem_put(struct nouveau_gpuobj *);
extern int nv50_instmem_map(struct nouveau_gpuobj *);
extern void nv50_instmem_unmap(struct nouveau_gpuobj *);
@@ -1397,28 +1239,18 @@ extern int nv17_tv_create(struct drm_connector *, struct dcb_entry *);
extern int nv04_display_early_init(struct drm_device *);
extern void nv04_display_late_takedown(struct drm_device *);
extern int nv04_display_create(struct drm_device *);
-extern void nv04_display_destroy(struct drm_device *);
extern int nv04_display_init(struct drm_device *);
-extern void nv04_display_fini(struct drm_device *);
-
-/* nvd0_display.c */
-extern int nvd0_display_create(struct drm_device *);
-extern void nvd0_display_destroy(struct drm_device *);
-extern int nvd0_display_init(struct drm_device *);
-extern void nvd0_display_fini(struct drm_device *);
-struct nouveau_bo *nvd0_display_crtc_sema(struct drm_device *, int crtc);
-void nvd0_display_flip_stop(struct drm_crtc *);
-int nvd0_display_flip_next(struct drm_crtc *, struct drm_framebuffer *,
- struct nouveau_channel *, u32 swap_interval);
+extern void nv04_display_destroy(struct drm_device *);
/* nv04_crtc.c */
extern int nv04_crtc_create(struct drm_device *, int index);
/* nouveau_bo.c */
extern struct ttm_bo_driver nouveau_bo_driver;
-extern int nouveau_bo_new(struct drm_device *, int size, int align,
- uint32_t flags, uint32_t tile_mode,
- uint32_t tile_flags, struct nouveau_bo **);
+extern int nouveau_bo_new(struct drm_device *, struct nouveau_channel *,
+ int size, int align, uint32_t flags,
+ uint32_t tile_mode, uint32_t tile_flags,
+ struct nouveau_bo **);
extern int nouveau_bo_pin(struct nouveau_bo *, uint32_t flags);
extern int nouveau_bo_unpin(struct nouveau_bo *);
extern int nouveau_bo_map(struct nouveau_bo *);
@@ -1433,12 +1265,6 @@ extern void nouveau_bo_fence(struct nouveau_bo *, struct nouveau_fence *);
extern int nouveau_bo_validate(struct nouveau_bo *, bool interruptible,
bool no_wait_reserve, bool no_wait_gpu);
-extern struct nouveau_vma *
-nouveau_bo_vma_find(struct nouveau_bo *, struct nouveau_vm *);
-extern int nouveau_bo_vma_add(struct nouveau_bo *, struct nouveau_vm *,
- struct nouveau_vma *);
-extern void nouveau_bo_vma_del(struct nouveau_bo *, struct nouveau_vma *);
-
/* nouveau_fence.c */
struct nouveau_fence;
extern int nouveau_fence_init(struct drm_device *);
@@ -1484,14 +1310,12 @@ static inline struct nouveau_fence *nouveau_fence_ref(struct nouveau_fence *obj)
}
/* nouveau_gem.c */
-extern int nouveau_gem_new(struct drm_device *, int size, int align,
- uint32_t domain, uint32_t tile_mode,
- uint32_t tile_flags, struct nouveau_bo **);
+extern int nouveau_gem_new(struct drm_device *, struct nouveau_channel *,
+ int size, int align, uint32_t domain,
+ uint32_t tile_mode, uint32_t tile_flags,
+ struct nouveau_bo **);
extern int nouveau_gem_object_new(struct drm_gem_object *);
extern void nouveau_gem_object_del(struct drm_gem_object *);
-extern int nouveau_gem_object_open(struct drm_gem_object *, struct drm_file *);
-extern void nouveau_gem_object_close(struct drm_gem_object *,
- struct drm_file *);
extern int nouveau_gem_ioctl_new(struct drm_device *, void *,
struct drm_file *);
extern int nouveau_gem_ioctl_pushbuf(struct drm_device *, void *,
@@ -1504,40 +1328,29 @@ extern int nouveau_gem_ioctl_info(struct drm_device *, void *,
struct drm_file *);
/* nouveau_display.c */
-int nouveau_display_create(struct drm_device *dev);
-void nouveau_display_destroy(struct drm_device *dev);
-int nouveau_display_init(struct drm_device *dev);
-void nouveau_display_fini(struct drm_device *dev);
int nouveau_vblank_enable(struct drm_device *dev, int crtc);
void nouveau_vblank_disable(struct drm_device *dev, int crtc);
int nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event);
int nouveau_finish_page_flip(struct nouveau_channel *,
struct nouveau_page_flip_state *);
-int nouveau_display_dumb_create(struct drm_file *, struct drm_device *,
- struct drm_mode_create_dumb *args);
-int nouveau_display_dumb_map_offset(struct drm_file *, struct drm_device *,
- uint32_t handle, uint64_t *offset);
-int nouveau_display_dumb_destroy(struct drm_file *, struct drm_device *,
- uint32_t handle);
/* nv10_gpio.c */
-int nv10_gpio_init(struct drm_device *dev);
-void nv10_gpio_fini(struct drm_device *dev);
-int nv10_gpio_drive(struct drm_device *dev, int line, int dir, int out);
-int nv10_gpio_sense(struct drm_device *dev, int line);
-void nv10_gpio_irq_enable(struct drm_device *, int line, bool on);
+int nv10_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
+int nv10_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
/* nv50_gpio.c */
int nv50_gpio_init(struct drm_device *dev);
void nv50_gpio_fini(struct drm_device *dev);
-int nv50_gpio_drive(struct drm_device *dev, int line, int dir, int out);
-int nv50_gpio_sense(struct drm_device *dev, int line);
-void nv50_gpio_irq_enable(struct drm_device *, int line, bool on);
-int nvd0_gpio_drive(struct drm_device *dev, int line, int dir, int out);
-int nvd0_gpio_sense(struct drm_device *dev, int line);
-
-/* nv50_calc.c */
+int nv50_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
+int nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
+int nv50_gpio_irq_register(struct drm_device *, enum dcb_gpio_tag,
+ void (*)(void *, int), void *);
+void nv50_gpio_irq_unregister(struct drm_device *, enum dcb_gpio_tag,
+ void (*)(void *, int), void *);
+bool nv50_gpio_irq_enable(struct drm_device *, enum dcb_gpio_tag, bool on);
+
+/* nv50_calc. */
int nv50_calc_pll(struct drm_device *, struct pll_lims *, int clk,
int *N1, int *M1, int *N2, int *M2, int *P);
int nva3_calc_pll(struct drm_device *, struct pll_lims *,
@@ -1605,8 +1418,6 @@ static inline void nv_wr08(struct drm_device *dev, unsigned reg, u8 val)
nouveau_wait_eq(dev, 2000000000ULL, (reg), (mask), (val))
#define nv_wait_ne(dev, reg, mask, val) \
nouveau_wait_ne(dev, 2000000000ULL, (reg), (mask), (val))
-#define nv_wait_cb(dev, func, data) \
- nouveau_wait_cb(dev, 2000000000ULL, (func), (data))
/* PRAMIN access */
static inline u32 nv_ri32(struct drm_device *dev, unsigned offset)
@@ -1660,13 +1471,6 @@ extern void nv_wo32(struct nouveau_gpuobj *, u32 offset, u32 val);
#define NV_TRACEWARN(d, fmt, arg...) NV_PRINTK(KERN_NOTICE, d, fmt, ##arg)
#define NV_TRACE(d, fmt, arg...) NV_PRINTK(KERN_INFO, d, fmt, ##arg)
#define NV_WARN(d, fmt, arg...) NV_PRINTK(KERN_WARNING, d, fmt, ##arg)
-#define NV_WARNONCE(d, fmt, arg...) do { \
- static int _warned = 0; \
- if (!_warned) { \
- NV_WARN(d, fmt, ##arg); \
- _warned = 1; \
- } \
-} while(0)
/* nouveau_reg_debug bitmask */
enum {
@@ -1680,7 +1484,6 @@ enum {
NOUVEAU_REG_DEBUG_RMVIO = 0x80,
NOUVEAU_REG_DEBUG_VGAATTR = 0x100,
NOUVEAU_REG_DEBUG_EVO = 0x200,
- NOUVEAU_REG_DEBUG_AUXCH = 0x400
};
#define NV_REG_DEBUG(type, dev, fmt, arg...) do { \
@@ -1754,7 +1557,6 @@ nv44_graph_class(struct drm_device *dev)
#define NV_MEM_ACCESS_RW (NV_MEM_ACCESS_RO | NV_MEM_ACCESS_WO)
#define NV_MEM_ACCESS_SYS 4
#define NV_MEM_ACCESS_VM 8
-#define NV_MEM_ACCESS_NOSNOOP 16
#define NV_MEM_TARGET_VRAM 0
#define NV_MEM_TARGET_PCI 1
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h
index 3dc14a3..ae69b61 100644
--- a/drivers/gpu/drm/nouveau/nouveau_encoder.h
+++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h
@@ -32,14 +32,6 @@
#define NV_DPMS_CLEARED 0x80
-struct dp_train_func {
- void (*link_set)(struct drm_device *, struct dcb_entry *, int crtc,
- int nr, u32 bw, bool enhframe);
- void (*train_set)(struct drm_device *, struct dcb_entry *, u8 pattern);
- void (*train_adj)(struct drm_device *, struct dcb_entry *,
- u8 lane, u8 swing, u8 preem);
-};
-
struct nouveau_encoder {
struct drm_encoder_slave base;
@@ -57,17 +49,17 @@ struct nouveau_encoder {
union {
struct {
- u8 dpcd[8];
+ int mc_unknown;
+ uint32_t unk0;
+ uint32_t unk1;
+ int dpcd_version;
int link_nr;
int link_bw;
- u32 datarate;
+ bool enhanced_frame;
} dp;
};
};
-struct nouveau_encoder *
-find_encoder(struct drm_connector *connector, int type);
-
static inline struct nouveau_encoder *nouveau_encoder(struct drm_encoder *enc)
{
struct drm_encoder_slave *slave = to_encoder_slave(enc);
@@ -86,19 +78,26 @@ get_slave_funcs(struct drm_encoder *enc)
return to_encoder_slave(enc)->slave_funcs;
}
-/* nouveau_dp.c */
-int nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
- uint8_t *data, int data_nr);
-bool nouveau_dp_detect(struct drm_encoder *);
-void nouveau_dp_dpms(struct drm_encoder *, int mode, u32 datarate,
- struct dp_train_func *);
-u8 *nouveau_dp_bios_data(struct drm_device *, struct dcb_entry *, u8 **);
-
struct nouveau_connector *
nouveau_encoder_connector_get(struct nouveau_encoder *encoder);
int nv50_sor_create(struct drm_connector *, struct dcb_entry *);
-void nv50_sor_dp_calc_tu(struct drm_device *, int, int, u32, u32);
int nv50_dac_create(struct drm_connector *, struct dcb_entry *);
+struct bit_displayport_encoder_table {
+ uint32_t match;
+ uint8_t record_nr;
+ uint8_t unknown;
+ uint16_t script0;
+ uint16_t script1;
+ uint16_t unknown_table;
+} __attribute__ ((packed));
+
+struct bit_displayport_encoder_table_entry {
+ uint8_t vs_level;
+ uint8_t pre_level;
+ uint8_t reg0;
+ uint8_t reg1;
+ uint8_t reg2;
+} __attribute__ ((packed));
#endif /* __NOUVEAU_ENCODER_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fb.h b/drivers/gpu/drm/nouveau/nouveau_fb.h
index f3fb649..a3a88ad 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fb.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fb.h
@@ -30,7 +30,6 @@
struct nouveau_framebuffer {
struct drm_framebuffer base;
struct nouveau_bo *nvbo;
- struct nouveau_vma vma;
u32 r_dma;
u32 r_format;
u32 r_pitch;
@@ -42,6 +41,8 @@ nouveau_framebuffer(struct drm_framebuffer *fb)
return container_of(fb, struct nouveau_framebuffer, base);
}
+extern const struct drm_mode_config_funcs nouveau_mode_config_funcs;
+
int nouveau_framebuffer_init(struct drm_device *dev, struct nouveau_framebuffer *nouveau_fb,
- struct drm_mode_fb_cmd2 *mode_cmd, struct nouveau_bo *nvbo);
+ struct drm_mode_fb_cmd *mode_cmd, struct nouveau_bo *nvbo);
#endif /* __NOUVEAU_FB_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 6fd2211..ea71f78 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -36,7 +36,6 @@
#include <linux/init.h>
#include <linux/screen_info.h>
#include <linux/vga_switcheroo.h>
-#include <linux/console.h>
#include "drmP.h"
#include "drm.h"
@@ -280,9 +279,8 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
struct fb_info *info;
struct drm_framebuffer *fb;
struct nouveau_framebuffer *nouveau_fb;
- struct nouveau_channel *chan;
struct nouveau_bo *nvbo;
- struct drm_mode_fb_cmd2 mode_cmd;
+ struct drm_mode_fb_cmd mode_cmd;
struct pci_dev *pdev = dev->pdev;
struct device *device = &pdev->dev;
int size, ret;
@@ -290,17 +288,16 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
mode_cmd.width = sizes->surface_width;
mode_cmd.height = sizes->surface_height;
- mode_cmd.pitches[0] = mode_cmd.width * (sizes->surface_bpp >> 3);
- mode_cmd.pitches[0] = roundup(mode_cmd.pitches[0], 256);
+ mode_cmd.bpp = sizes->surface_bpp;
+ mode_cmd.pitch = mode_cmd.width * (mode_cmd.bpp >> 3);
+ mode_cmd.pitch = roundup(mode_cmd.pitch, 256);
+ mode_cmd.depth = sizes->surface_depth;
- mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
- sizes->surface_depth);
-
- size = mode_cmd.pitches[0] * mode_cmd.height;
+ size = mode_cmd.pitch * mode_cmd.height;
size = roundup(size, PAGE_SIZE);
- ret = nouveau_gem_new(dev, size, 0, NOUVEAU_GEM_DOMAIN_VRAM,
- 0, 0x0000, &nvbo);
+ ret = nouveau_gem_new(dev, dev_priv->channel, size, 0,
+ NOUVEAU_GEM_DOMAIN_VRAM, 0, 0x0000, &nvbo);
if (ret) {
NV_ERROR(dev, "failed to allocate framebuffer\n");
goto out;
@@ -321,15 +318,6 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
goto out;
}
- chan = nouveau_nofbaccel ? NULL : dev_priv->channel;
- if (chan && dev_priv->card_type >= NV_50) {
- ret = nouveau_bo_vma_add(nvbo, chan->vm, &nfbdev->nouveau_fb.vma);
- if (ret) {
- NV_ERROR(dev, "failed to map fb into chan: %d\n", ret);
- chan = NULL;
- }
- }
-
mutex_lock(&dev->struct_mutex);
info = framebuffer_alloc(0, device);
@@ -371,7 +359,7 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
info->screen_base = nvbo_kmap_obj_iovirtual(nouveau_fb->nvbo);
info->screen_size = size;
- drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
drm_fb_helper_fill_var(info, &nfbdev->helper, sizes->fb_width, sizes->fb_height);
/* Set aperture base/size for vesafb takeover */
@@ -381,7 +369,11 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
goto out_unref;
}
- /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
+ info->pixmap.size = 64*1024;
+ info->pixmap.buf_align = 8;
+ info->pixmap.access_align = 32;
+ info->pixmap.flags = FB_PIXMAP_SYSTEM;
+ info->pixmap.scan_align = 1;
mutex_unlock(&dev->struct_mutex);
@@ -456,7 +448,6 @@ nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *nfbdev)
if (nouveau_fb->nvbo) {
nouveau_bo_unmap(nouveau_fb->nvbo);
- nouveau_bo_vma_del(nouveau_fb->nvbo, &nouveau_fb->vma);
drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem);
nouveau_fb->nvbo = NULL;
}
@@ -485,7 +476,6 @@ int nouveau_fbcon_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fbdev *nfbdev;
- int preferred_bpp;
int ret;
nfbdev = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL);
@@ -504,15 +494,7 @@ int nouveau_fbcon_init(struct drm_device *dev)
}
drm_fb_helper_single_add_all_connectors(&nfbdev->helper);
-
- if (dev_priv->vram_size <= 32 * 1024 * 1024)
- preferred_bpp = 8;
- else if (dev_priv->vram_size <= 64 * 1024 * 1024)
- preferred_bpp = 16;
- else
- preferred_bpp = 32;
-
- drm_fb_helper_initial_config(&nfbdev->helper, preferred_bpp);
+ drm_fb_helper_initial_config(&nfbdev->helper, 32);
return 0;
}
@@ -545,13 +527,7 @@ void nouveau_fbcon_restore_accel(struct drm_device *dev)
void nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- console_lock();
- if (state == 0)
- nouveau_fbcon_save_disable_accel(dev);
fb_set_suspend(dev_priv->nfbdev->helper.fbdev, state);
- if (state == 1)
- nouveau_fbcon_restore_accel(dev);
- console_unlock();
}
void nouveau_fbcon_zfill_all(struct drm_device *dev)
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 2f6daae..56f06b0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -336,7 +336,6 @@ semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
{
struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
struct nouveau_fence *fence = NULL;
- u64 offset = chan->fence.vma.offset + sema->mem->start;
int ret;
if (dev_priv->chipset < 0x84) {
@@ -346,10 +345,13 @@ semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 3);
OUT_RING (chan, NvSema);
- OUT_RING (chan, offset);
+ OUT_RING (chan, sema->mem->start);
OUT_RING (chan, 1);
} else
if (dev_priv->chipset < 0xc0) {
+ struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
+ u64 offset = vma->offset + sema->mem->start;
+
ret = RING_SPACE(chan, 7);
if (ret)
return ret;
@@ -362,6 +364,9 @@ semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
OUT_RING (chan, 1);
OUT_RING (chan, 1); /* ACQUIRE_EQ */
} else {
+ struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
+ u64 offset = vma->offset + sema->mem->start;
+
ret = RING_SPACE(chan, 5);
if (ret)
return ret;
@@ -389,7 +394,6 @@ semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
{
struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
struct nouveau_fence *fence = NULL;
- u64 offset = chan->fence.vma.offset + sema->mem->start;
int ret;
if (dev_priv->chipset < 0x84) {
@@ -399,11 +403,14 @@ semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 2);
OUT_RING (chan, NvSema);
- OUT_RING (chan, offset);
+ OUT_RING (chan, sema->mem->start);
BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_RELEASE, 1);
OUT_RING (chan, 1);
} else
if (dev_priv->chipset < 0xc0) {
+ struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
+ u64 offset = vma->offset + sema->mem->start;
+
ret = RING_SPACE(chan, 7);
if (ret)
return ret;
@@ -416,6 +423,9 @@ semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
OUT_RING (chan, 1);
OUT_RING (chan, 2); /* RELEASE */
} else {
+ struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
+ u64 offset = vma->offset + sema->mem->start;
+
ret = RING_SPACE(chan, 5);
if (ret)
return ret;
@@ -519,7 +529,7 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
if (USE_SEMA(dev) && dev_priv->chipset < 0x84) {
struct ttm_mem_reg *mem = &dev_priv->fence.bo->bo.mem;
- ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_FROM_MEMORY,
+ ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
mem->start << PAGE_SHIFT,
mem->size, NV_MEM_ACCESS_RW,
NV_MEM_TARGET_VRAM, &obj);
@@ -530,13 +540,6 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
nouveau_gpuobj_ref(NULL, &obj);
if (ret)
return ret;
- } else
- if (USE_SEMA(dev)) {
- /* map fence bo into channel's vm */
- ret = nouveau_bo_vma_add(dev_priv->fence.bo, chan->vm,
- &chan->fence.vma);
- if (ret)
- return ret;
}
atomic_set(&chan->fence.last_sequence_irq, 0);
@@ -546,10 +549,10 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
void
nouveau_fence_channel_fini(struct nouveau_channel *chan)
{
- struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
struct nouveau_fence *tmp, *fence;
spin_lock(&chan->fence.lock);
+
list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) {
fence->signalled = true;
list_del(&fence->entry);
@@ -559,9 +562,8 @@ nouveau_fence_channel_fini(struct nouveau_channel *chan)
kref_put(&fence->refcount, nouveau_fence_del);
}
- spin_unlock(&chan->fence.lock);
- nouveau_bo_vma_del(dev_priv->fence.bo, &chan->fence.vma);
+ spin_unlock(&chan->fence.lock);
}
int
@@ -573,7 +575,7 @@ nouveau_fence_init(struct drm_device *dev)
/* Create a shared VRAM heap for cross-channel sync. */
if (USE_SEMA(dev)) {
- ret = nouveau_bo_new(dev, size, 0, TTM_PL_FLAG_VRAM,
+ ret = nouveau_bo_new(dev, NULL, size, 0, TTM_PL_FLAG_VRAM,
0, 0, &dev_priv->fence.bo);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 7ce3fde..cee78b2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -60,71 +60,9 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
}
int
-nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
-{
- struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
- struct nouveau_bo *nvbo = nouveau_gem_object(gem);
- struct nouveau_vma *vma;
- int ret;
-
- if (!fpriv->vm)
- return 0;
-
- ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
- if (ret)
- return ret;
-
- vma = nouveau_bo_vma_find(nvbo, fpriv->vm);
- if (!vma) {
- vma = kzalloc(sizeof(*vma), GFP_KERNEL);
- if (!vma) {
- ret = -ENOMEM;
- goto out;
- }
-
- ret = nouveau_bo_vma_add(nvbo, fpriv->vm, vma);
- if (ret) {
- kfree(vma);
- goto out;
- }
- } else {
- vma->refcount++;
- }
-
-out:
- ttm_bo_unreserve(&nvbo->bo);
- return ret;
-}
-
-void
-nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
-{
- struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
- struct nouveau_bo *nvbo = nouveau_gem_object(gem);
- struct nouveau_vma *vma;
- int ret;
-
- if (!fpriv->vm)
- return;
-
- ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
- if (ret)
- return;
-
- vma = nouveau_bo_vma_find(nvbo, fpriv->vm);
- if (vma) {
- if (--vma->refcount == 0) {
- nouveau_bo_vma_del(nvbo, vma);
- kfree(vma);
- }
- }
- ttm_bo_unreserve(&nvbo->bo);
-}
-
-int
-nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
- uint32_t tile_mode, uint32_t tile_flags,
- struct nouveau_bo **pnvbo)
+nouveau_gem_new(struct drm_device *dev, struct nouveau_channel *chan,
+ int size, int align, uint32_t domain, uint32_t tile_mode,
+ uint32_t tile_flags, struct nouveau_bo **pnvbo)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_bo *nvbo;
@@ -138,7 +76,7 @@ nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU)
flags |= TTM_PL_FLAG_SYSTEM;
- ret = nouveau_bo_new(dev, size, align, flags, tile_mode,
+ ret = nouveau_bo_new(dev, chan, size, align, flags, tile_mode,
tile_flags, pnvbo);
if (ret)
return ret;
@@ -165,28 +103,17 @@ nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
}
static int
-nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
- struct drm_nouveau_gem_info *rep)
+nouveau_gem_info(struct drm_gem_object *gem, struct drm_nouveau_gem_info *rep)
{
- struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
- struct nouveau_vma *vma;
if (nvbo->bo.mem.mem_type == TTM_PL_TT)
rep->domain = NOUVEAU_GEM_DOMAIN_GART;
else
rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
- rep->offset = nvbo->bo.offset;
- if (fpriv->vm) {
- vma = nouveau_bo_vma_find(nvbo, fpriv->vm);
- if (!vma)
- return -EINVAL;
-
- rep->offset = vma->offset;
- }
-
rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
+ rep->offset = nvbo->bo.offset;
rep->map_handle = nvbo->bo.addr_space_offset;
rep->tile_mode = nvbo->tile_mode;
rep->tile_flags = nvbo->tile_flags;
@@ -200,6 +127,7 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct drm_nouveau_gem_new *req = data;
struct nouveau_bo *nvbo = NULL;
+ struct nouveau_channel *chan = NULL;
int ret = 0;
if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL))
@@ -210,21 +138,28 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
return -EINVAL;
}
- ret = nouveau_gem_new(dev, req->info.size, req->align,
+ if (req->channel_hint) {
+ chan = nouveau_channel_get(dev, file_priv, req->channel_hint);
+ if (IS_ERR(chan))
+ return PTR_ERR(chan);
+ }
+
+ ret = nouveau_gem_new(dev, chan, req->info.size, req->align,
req->info.domain, req->info.tile_mode,
req->info.tile_flags, &nvbo);
+ if (chan)
+ nouveau_channel_put(&chan);
if (ret)
return ret;
- ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle);
- if (ret == 0) {
- ret = nouveau_gem_info(file_priv, nvbo->gem, &req->info);
- if (ret)
- drm_gem_handle_delete(file_priv, req->info.handle);
- }
+ ret = nouveau_gem_info(nvbo->gem, &req->info);
+ if (ret)
+ goto out;
+ ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle);
/* drop reference from allocate - handle holds it now */
drm_gem_object_unreference_unlocked(nvbo->gem);
+out:
return ret;
}
@@ -402,7 +337,6 @@ static int
validate_list(struct nouveau_channel *chan, struct list_head *list,
struct drm_nouveau_gem_pushbuf_bo *pbbo, uint64_t user_pbbo_ptr)
{
- struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
(void __force __user *)(uintptr_t)user_pbbo_ptr;
struct drm_device *dev = chan->dev;
@@ -441,26 +375,24 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
return ret;
}
- if (dev_priv->card_type < NV_50) {
- if (nvbo->bo.offset == b->presumed.offset &&
- ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
- b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
- (nvbo->bo.mem.mem_type == TTM_PL_TT &&
- b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
- continue;
+ if (nvbo->bo.offset == b->presumed.offset &&
+ ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
+ b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
+ (nvbo->bo.mem.mem_type == TTM_PL_TT &&
+ b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
+ continue;
- if (nvbo->bo.mem.mem_type == TTM_PL_TT)
- b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
- else
- b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
- b->presumed.offset = nvbo->bo.offset;
- b->presumed.valid = 0;
- relocs++;
-
- if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index].presumed,
- &b->presumed, sizeof(b->presumed)))
- return -EFAULT;
- }
+ if (nvbo->bo.mem.mem_type == TTM_PL_TT)
+ b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
+ else
+ b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
+ b->presumed.offset = nvbo->bo.offset;
+ b->presumed.valid = 0;
+ relocs++;
+
+ if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index].presumed,
+ &b->presumed, sizeof(b->presumed)))
+ return -EFAULT;
}
return relocs;
@@ -635,7 +567,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
struct nouveau_fence *fence = NULL;
int i, j, ret = 0, do_reloc = 0;
- chan = nouveau_channel_get(file_priv, req->channel);
+ chan = nouveau_channel_get(dev, file_priv, req->channel);
if (IS_ERR(chan))
return PTR_ERR(chan);
@@ -869,7 +801,7 @@ nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
if (!gem)
return -ENOENT;
- ret = nouveau_gem_info(file_priv, gem, req);
+ ret = nouveau_gem_info(gem, req);
drm_gem_object_unreference_unlocked(gem);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_gpio.c b/drivers/gpu/drm/nouveau/nouveau_gpio.c
deleted file mode 100644
index a580cc6..0000000
--- a/drivers/gpu/drm/nouveau/nouveau_gpio.c
+++ /dev/null
@@ -1,400 +0,0 @@
-/*
- * Copyright 2011 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include "drmP.h"
-#include "nouveau_drv.h"
-#include "nouveau_i2c.h"
-#include "nouveau_gpio.h"
-
-static u8 *
-dcb_gpio_table(struct drm_device *dev)
-{
- u8 *dcb = dcb_table(dev);
- if (dcb) {
- if (dcb[0] >= 0x30 && dcb[1] >= 0x0c)
- return ROMPTR(dev, dcb[0x0a]);
- if (dcb[0] >= 0x22 && dcb[-1] >= 0x13)
- return ROMPTR(dev, dcb[-15]);
- }
- return NULL;
-}
-
-static u8 *
-dcb_gpio_entry(struct drm_device *dev, int idx, int ent, u8 *version)
-{
- u8 *table = dcb_gpio_table(dev);
- if (table) {
- *version = table[0];
- if (*version < 0x30 && ent < table[2])
- return table + 3 + (ent * table[1]);
- else if (ent < table[2])
- return table + table[1] + (ent * table[3]);
- }
- return NULL;
-}
-
-int
-nouveau_gpio_drive(struct drm_device *dev, int idx, int line, int dir, int out)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
-
- return pgpio->drive ? pgpio->drive(dev, line, dir, out) : -ENODEV;
-}
-
-int
-nouveau_gpio_sense(struct drm_device *dev, int idx, int line)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
-
- return pgpio->sense ? pgpio->sense(dev, line) : -ENODEV;
-}
-
-int
-nouveau_gpio_find(struct drm_device *dev, int idx, u8 func, u8 line,
- struct gpio_func *gpio)
-{
- u8 *table, *entry, version;
- int i = -1;
-
- if (line == 0xff && func == 0xff)
- return -EINVAL;
-
- while ((entry = dcb_gpio_entry(dev, idx, ++i, &version))) {
- if (version < 0x40) {
- u16 data = ROM16(entry[0]);
- *gpio = (struct gpio_func) {
- .line = (data & 0x001f) >> 0,
- .func = (data & 0x07e0) >> 5,
- .log[0] = (data & 0x1800) >> 11,
- .log[1] = (data & 0x6000) >> 13,
- };
- } else
- if (version < 0x41) {
- *gpio = (struct gpio_func) {
- .line = entry[0] & 0x1f,
- .func = entry[1],
- .log[0] = (entry[3] & 0x18) >> 3,
- .log[1] = (entry[3] & 0x60) >> 5,
- };
- } else {
- *gpio = (struct gpio_func) {
- .line = entry[0] & 0x3f,
- .func = entry[1],
- .log[0] = (entry[4] & 0x30) >> 4,
- .log[1] = (entry[4] & 0xc0) >> 6,
- };
- }
-
- if ((line == 0xff || line == gpio->line) &&
- (func == 0xff || func == gpio->func))
- return 0;
- }
-
- /* DCB 2.2, fixed TVDAC GPIO data */
- if ((table = dcb_table(dev)) && table[0] >= 0x22) {
- if (func == DCB_GPIO_TVDAC0) {
- *gpio = (struct gpio_func) {
- .func = DCB_GPIO_TVDAC0,
- .line = table[-4] >> 4,
- .log[0] = !!(table[-5] & 2),
- .log[1] = !(table[-5] & 2),
- };
- return 0;
- }
- }
-
- /* Apple iMac G4 NV18 */
- if (nv_match_device(dev, 0x0189, 0x10de, 0x0010)) {
- if (func == DCB_GPIO_TVDAC0) {
- *gpio = (struct gpio_func) {
- .func = DCB_GPIO_TVDAC0,
- .line = 4,
- .log[0] = 0,
- .log[1] = 1,
- };
- return 0;
- }
- }
-
- return -EINVAL;
-}
-
-int
-nouveau_gpio_set(struct drm_device *dev, int idx, u8 tag, u8 line, int state)
-{
- struct gpio_func gpio;
- int ret;
-
- ret = nouveau_gpio_find(dev, idx, tag, line, &gpio);
- if (ret == 0) {
- int dir = !!(gpio.log[state] & 0x02);
- int out = !!(gpio.log[state] & 0x01);
- ret = nouveau_gpio_drive(dev, idx, gpio.line, dir, out);
- }
-
- return ret;
-}
-
-int
-nouveau_gpio_get(struct drm_device *dev, int idx, u8 tag, u8 line)
-{
- struct gpio_func gpio;
- int ret;
-
- ret = nouveau_gpio_find(dev, idx, tag, line, &gpio);
- if (ret == 0) {
- ret = nouveau_gpio_sense(dev, idx, gpio.line);
- if (ret >= 0)
- ret = (ret == (gpio.log[1] & 1));
- }
-
- return ret;
-}
-
-int
-nouveau_gpio_irq(struct drm_device *dev, int idx, u8 tag, u8 line, bool on)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
- struct gpio_func gpio;
- int ret;
-
- ret = nouveau_gpio_find(dev, idx, tag, line, &gpio);
- if (ret == 0) {
- if (idx == 0 && pgpio->irq_enable)
- pgpio->irq_enable(dev, gpio.line, on);
- else
- ret = -ENODEV;
- }
-
- return ret;
-}
-
-struct gpio_isr {
- struct drm_device *dev;
- struct list_head head;
- struct work_struct work;
- int idx;
- struct gpio_func func;
- void (*handler)(void *, int);
- void *data;
- bool inhibit;
-};
-
-static void
-nouveau_gpio_isr_bh(struct work_struct *work)
-{
- struct gpio_isr *isr = container_of(work, struct gpio_isr, work);
- struct drm_device *dev = isr->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
- unsigned long flags;
- int state;
-
- state = nouveau_gpio_get(dev, isr->idx, isr->func.func, isr->func.line);
- if (state >= 0)
- isr->handler(isr->data, state);
-
- spin_lock_irqsave(&pgpio->lock, flags);
- isr->inhibit = false;
- spin_unlock_irqrestore(&pgpio->lock, flags);
-}
-
-void
-nouveau_gpio_isr(struct drm_device *dev, int idx, u32 line_mask)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
- struct gpio_isr *isr;
-
- if (idx != 0)
- return;
-
- spin_lock(&pgpio->lock);
- list_for_each_entry(isr, &pgpio->isr, head) {
- if (line_mask & (1 << isr->func.line)) {
- if (isr->inhibit)
- continue;
- isr->inhibit = true;
- schedule_work(&isr->work);
- }
- }
- spin_unlock(&pgpio->lock);
-}
-
-int
-nouveau_gpio_isr_add(struct drm_device *dev, int idx, u8 tag, u8 line,
- void (*handler)(void *, int), void *data)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
- struct gpio_isr *isr;
- unsigned long flags;
- int ret;
-
- isr = kzalloc(sizeof(*isr), GFP_KERNEL);
- if (!isr)
- return -ENOMEM;
-
- ret = nouveau_gpio_find(dev, idx, tag, line, &isr->func);
- if (ret) {
- kfree(isr);
- return ret;
- }
-
- INIT_WORK(&isr->work, nouveau_gpio_isr_bh);
- isr->dev = dev;
- isr->handler = handler;
- isr->data = data;
- isr->idx = idx;
-
- spin_lock_irqsave(&pgpio->lock, flags);
- list_add(&isr->head, &pgpio->isr);
- spin_unlock_irqrestore(&pgpio->lock, flags);
- return 0;
-}
-
-void
-nouveau_gpio_isr_del(struct drm_device *dev, int idx, u8 tag, u8 line,
- void (*handler)(void *, int), void *data)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
- struct gpio_isr *isr, *tmp;
- struct gpio_func func;
- unsigned long flags;
- LIST_HEAD(tofree);
- int ret;
-
- ret = nouveau_gpio_find(dev, idx, tag, line, &func);
- if (ret == 0) {
- spin_lock_irqsave(&pgpio->lock, flags);
- list_for_each_entry_safe(isr, tmp, &pgpio->isr, head) {
- if (memcmp(&isr->func, &func, sizeof(func)) ||
- isr->idx != idx ||
- isr->handler != handler || isr->data != data)
- continue;
- list_move(&isr->head, &tofree);
- }
- spin_unlock_irqrestore(&pgpio->lock, flags);
-
- list_for_each_entry_safe(isr, tmp, &tofree, head) {
- flush_work_sync(&isr->work);
- kfree(isr);
- }
- }
-}
-
-int
-nouveau_gpio_create(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
-
- INIT_LIST_HEAD(&pgpio->isr);
- spin_lock_init(&pgpio->lock);
-
- return nouveau_gpio_init(dev);
-}
-
-void
-nouveau_gpio_destroy(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
-
- nouveau_gpio_fini(dev);
- BUG_ON(!list_empty(&pgpio->isr));
-}
-
-int
-nouveau_gpio_init(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
- int ret = 0;
-
- if (pgpio->init)
- ret = pgpio->init(dev);
-
- return ret;
-}
-
-void
-nouveau_gpio_fini(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
-
- if (pgpio->fini)
- pgpio->fini(dev);
-}
-
-void
-nouveau_gpio_reset(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- u8 *entry, version;
- int ent = -1;
-
- while ((entry = dcb_gpio_entry(dev, 0, ++ent, &version))) {
- u8 func = 0xff, line, defs, unk0, unk1;
- if (version >= 0x41) {
- defs = !!(entry[0] & 0x80);
- line = entry[0] & 0x3f;
- func = entry[1];
- unk0 = entry[2];
- unk1 = entry[3] & 0x1f;
- } else
- if (version >= 0x40) {
- line = entry[0] & 0x1f;
- func = entry[1];
- defs = !!(entry[3] & 0x01);
- unk0 = !!(entry[3] & 0x02);
- unk1 = !!(entry[3] & 0x04);
- } else {
- break;
- }
-
- if (func == 0xff)
- continue;
-
- nouveau_gpio_func_set(dev, func, defs);
-
- if (dev_priv->card_type >= NV_D0) {
- nv_mask(dev, 0x00d610 + (line * 4), 0xff, unk0);
- if (unk1--)
- nv_mask(dev, 0x00d640 + (unk1 * 4), 0xff, line);
- } else
- if (dev_priv->card_type >= NV_50) {
- static const u32 regs[] = { 0xe100, 0xe28c };
- u32 val = (unk1 << 16) | unk0;
- u32 reg = regs[line >> 4]; line &= 0x0f;
-
- nv_mask(dev, reg, 0x00010001 << line, val << line);
- }
- }
-}
diff --git a/drivers/gpu/drm/nouveau/nouveau_gpio.h b/drivers/gpu/drm/nouveau/nouveau_gpio.h
deleted file mode 100644
index 64c5cb0..0000000
--- a/drivers/gpu/drm/nouveau/nouveau_gpio.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Copyright 2011 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#ifndef __NOUVEAU_GPIO_H__
-#define __NOUVEAU_GPIO_H__
-
-struct gpio_func {
- u8 func;
- u8 line;
- u8 log[2];
-};
-
-/* nouveau_gpio.c */
-int nouveau_gpio_create(struct drm_device *);
-void nouveau_gpio_destroy(struct drm_device *);
-int nouveau_gpio_init(struct drm_device *);
-void nouveau_gpio_fini(struct drm_device *);
-void nouveau_gpio_reset(struct drm_device *);
-int nouveau_gpio_drive(struct drm_device *, int idx, int line,
- int dir, int out);
-int nouveau_gpio_sense(struct drm_device *, int idx, int line);
-int nouveau_gpio_find(struct drm_device *, int idx, u8 tag, u8 line,
- struct gpio_func *);
-int nouveau_gpio_set(struct drm_device *, int idx, u8 tag, u8 line, int state);
-int nouveau_gpio_get(struct drm_device *, int idx, u8 tag, u8 line);
-int nouveau_gpio_irq(struct drm_device *, int idx, u8 tag, u8 line, bool on);
-void nouveau_gpio_isr(struct drm_device *, int idx, u32 mask);
-int nouveau_gpio_isr_add(struct drm_device *, int idx, u8 tag, u8 line,
- void (*)(void *, int state), void *data);
-void nouveau_gpio_isr_del(struct drm_device *, int idx, u8 tag, u8 line,
- void (*)(void *, int state), void *data);
-
-static inline bool
-nouveau_gpio_func_valid(struct drm_device *dev, u8 tag)
-{
- struct gpio_func func;
- return (nouveau_gpio_find(dev, 0, tag, 0xff, &func)) == 0;
-}
-
-static inline int
-nouveau_gpio_func_set(struct drm_device *dev, u8 tag, int state)
-{
- return nouveau_gpio_set(dev, 0, tag, 0xff, state);
-}
-
-static inline int
-nouveau_gpio_func_get(struct drm_device *dev, u8 tag)
-{
- return nouveau_gpio_get(dev, 0, tag, 0xff);
-}
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_hdmi.c b/drivers/gpu/drm/nouveau/nouveau_hdmi.c
deleted file mode 100644
index 59ea1c1..0000000
--- a/drivers/gpu/drm/nouveau/nouveau_hdmi.c
+++ /dev/null
@@ -1,258 +0,0 @@
-/*
- * Copyright 2011 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include "drmP.h"
-#include "nouveau_drv.h"
-#include "nouveau_connector.h"
-#include "nouveau_encoder.h"
-#include "nouveau_crtc.h"
-
-static bool
-hdmi_sor(struct drm_encoder *encoder)
-{
- struct drm_nouveau_private *dev_priv = encoder->dev->dev_private;
- if (dev_priv->chipset < 0xa3)
- return false;
- return true;
-}
-
-static inline u32
-hdmi_base(struct drm_encoder *encoder)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
- if (!hdmi_sor(encoder))
- return 0x616500 + (nv_crtc->index * 0x800);
- return 0x61c500 + (nv_encoder->or * 0x800);
-}
-
-static void
-hdmi_wr32(struct drm_encoder *encoder, u32 reg, u32 val)
-{
- nv_wr32(encoder->dev, hdmi_base(encoder) + reg, val);
-}
-
-static u32
-hdmi_rd32(struct drm_encoder *encoder, u32 reg)
-{
- return nv_rd32(encoder->dev, hdmi_base(encoder) + reg);
-}
-
-static u32
-hdmi_mask(struct drm_encoder *encoder, u32 reg, u32 mask, u32 val)
-{
- u32 tmp = hdmi_rd32(encoder, reg);
- hdmi_wr32(encoder, reg, (tmp & ~mask) | val);
- return tmp;
-}
-
-static void
-nouveau_audio_disconnect(struct drm_encoder *encoder)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct drm_device *dev = encoder->dev;
- u32 or = nv_encoder->or * 0x800;
-
- if (hdmi_sor(encoder)) {
- nv_mask(dev, 0x61c448 + or, 0x00000003, 0x00000000);
- }
-}
-
-static void
-nouveau_audio_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_connector *nv_connector;
- struct drm_device *dev = encoder->dev;
- u32 or = nv_encoder->or * 0x800;
- int i;
-
- nv_connector = nouveau_encoder_connector_get(nv_encoder);
- if (!drm_detect_monitor_audio(nv_connector->edid)) {
- nouveau_audio_disconnect(encoder);
- return;
- }
-
- if (hdmi_sor(encoder)) {
- nv_mask(dev, 0x61c448 + or, 0x00000001, 0x00000001);
-
- drm_edid_to_eld(&nv_connector->base, nv_connector->edid);
- if (nv_connector->base.eld[0]) {
- u8 *eld = nv_connector->base.eld;
- for (i = 0; i < eld[2] * 4; i++)
- nv_wr32(dev, 0x61c440 + or, (i << 8) | eld[i]);
- for (i = eld[2] * 4; i < 0x60; i++)
- nv_wr32(dev, 0x61c440 + or, (i << 8) | 0x00);
- nv_mask(dev, 0x61c448 + or, 0x00000002, 0x00000002);
- }
- }
-}
-
-static void
-nouveau_hdmi_infoframe(struct drm_encoder *encoder, u32 ctrl, u8 *frame)
-{
- /* calculate checksum for the infoframe */
- u8 sum = 0, i;
- for (i = 0; i < frame[2]; i++)
- sum += frame[i];
- frame[3] = 256 - sum;
-
- /* disable infoframe, and write header */
- hdmi_mask(encoder, ctrl + 0x00, 0x00000001, 0x00000000);
- hdmi_wr32(encoder, ctrl + 0x08, *(u32 *)frame & 0xffffff);
-
- /* register scans tell me the audio infoframe has only one set of
- * subpack regs, according to tegra (gee nvidia, it'd be nice if we
- * could get those docs too!), the hdmi block pads out the rest of
- * the packet on its own.
- */
- if (ctrl == 0x020)
- frame[2] = 6;
-
- /* write out checksum and data, weird weird 7 byte register pairs */
- for (i = 0; i < frame[2] + 1; i += 7) {
- u32 rsubpack = ctrl + 0x0c + ((i / 7) * 8);
- u32 *subpack = (u32 *)&frame[3 + i];
- hdmi_wr32(encoder, rsubpack + 0, subpack[0]);
- hdmi_wr32(encoder, rsubpack + 4, subpack[1] & 0xffffff);
- }
-
- /* enable the infoframe */
- hdmi_mask(encoder, ctrl, 0x00000001, 0x00000001);
-}
-
-static void
-nouveau_hdmi_video_infoframe(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
-{
- const u8 Y = 0, A = 0, B = 0, S = 0, C = 0, M = 0, R = 0;
- const u8 ITC = 0, EC = 0, Q = 0, SC = 0, VIC = 0, PR = 0;
- const u8 bar_top = 0, bar_bottom = 0, bar_left = 0, bar_right = 0;
- u8 frame[20];
-
- frame[0x00] = 0x82; /* AVI infoframe */
- frame[0x01] = 0x02; /* version */
- frame[0x02] = 0x0d; /* length */
- frame[0x03] = 0x00;
- frame[0x04] = (Y << 5) | (A << 4) | (B << 2) | S;
- frame[0x05] = (C << 6) | (M << 4) | R;
- frame[0x06] = (ITC << 7) | (EC << 4) | (Q << 2) | SC;
- frame[0x07] = VIC;
- frame[0x08] = PR;
- frame[0x09] = bar_top & 0xff;
- frame[0x0a] = bar_top >> 8;
- frame[0x0b] = bar_bottom & 0xff;
- frame[0x0c] = bar_bottom >> 8;
- frame[0x0d] = bar_left & 0xff;
- frame[0x0e] = bar_left >> 8;
- frame[0x0f] = bar_right & 0xff;
- frame[0x10] = bar_right >> 8;
- frame[0x11] = 0x00;
- frame[0x12] = 0x00;
- frame[0x13] = 0x00;
-
- nouveau_hdmi_infoframe(encoder, 0x020, frame);
-}
-
-static void
-nouveau_hdmi_audio_infoframe(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
-{
- const u8 CT = 0x00, CC = 0x01, ceaSS = 0x00, SF = 0x00, FMT = 0x00;
- const u8 CA = 0x00, DM_INH = 0, LSV = 0x00;
- u8 frame[12];
-
- frame[0x00] = 0x84; /* Audio infoframe */
- frame[0x01] = 0x01; /* version */
- frame[0x02] = 0x0a; /* length */
- frame[0x03] = 0x00;
- frame[0x04] = (CT << 4) | CC;
- frame[0x05] = (SF << 2) | ceaSS;
- frame[0x06] = FMT;
- frame[0x07] = CA;
- frame[0x08] = (DM_INH << 7) | (LSV << 3);
- frame[0x09] = 0x00;
- frame[0x0a] = 0x00;
- frame[0x0b] = 0x00;
-
- nouveau_hdmi_infoframe(encoder, 0x000, frame);
-}
-
-static void
-nouveau_hdmi_disconnect(struct drm_encoder *encoder)
-{
- nouveau_audio_disconnect(encoder);
-
- /* disable audio and avi infoframes */
- hdmi_mask(encoder, 0x000, 0x00000001, 0x00000000);
- hdmi_mask(encoder, 0x020, 0x00000001, 0x00000000);
-
- /* disable hdmi */
- hdmi_mask(encoder, 0x0a4, 0x40000000, 0x00000000);
-}
-
-void
-nouveau_hdmi_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_connector *nv_connector;
- struct drm_device *dev = encoder->dev;
- u32 max_ac_packet, rekey;
-
- nv_connector = nouveau_encoder_connector_get(nv_encoder);
- if (!mode || !nv_connector || !nv_connector->edid ||
- !drm_detect_hdmi_monitor(nv_connector->edid)) {
- nouveau_hdmi_disconnect(encoder);
- return;
- }
-
- nouveau_hdmi_video_infoframe(encoder, mode);
- nouveau_hdmi_audio_infoframe(encoder, mode);
-
- hdmi_mask(encoder, 0x0d0, 0x00070001, 0x00010001); /* SPARE, HW_CTS */
- hdmi_mask(encoder, 0x068, 0x00010101, 0x00000000); /* ACR_CTRL, ?? */
- hdmi_mask(encoder, 0x078, 0x80000000, 0x80000000); /* ACR_0441_ENABLE */
-
- nv_mask(dev, 0x61733c, 0x00100000, 0x00100000); /* RESETF */
- nv_mask(dev, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */
- nv_mask(dev, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */
-
- /* value matches nvidia binary driver, and tegra constant */
- rekey = 56;
-
- max_ac_packet = mode->htotal - mode->hdisplay;
- max_ac_packet -= rekey;
- max_ac_packet -= 18; /* constant from tegra */
- max_ac_packet /= 32;
-
- /* enable hdmi */
- hdmi_mask(encoder, 0x0a4, 0x5f1f003f, 0x40000000 | /* enable */
- 0x1f000000 | /* unknown */
- max_ac_packet << 16 |
- rekey);
-
- nouveau_audio_mode_set(encoder, mode);
-}
diff --git a/drivers/gpu/drm/nouveau/nouveau_hwsq.h b/drivers/gpu/drm/nouveau/nouveau_hwsq.h
deleted file mode 100644
index 6976875..0000000
--- a/drivers/gpu/drm/nouveau/nouveau_hwsq.h
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- * Copyright 2010 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#ifndef __NOUVEAU_HWSQ_H__
-#define __NOUVEAU_HWSQ_H__
-
-struct hwsq_ucode {
- u8 data[0x200];
- union {
- u8 *u08;
- u16 *u16;
- u32 *u32;
- } ptr;
- u16 len;
-
- u32 reg;
- u32 val;
-};
-
-static inline void
-hwsq_init(struct hwsq_ucode *hwsq)
-{
- hwsq->ptr.u08 = hwsq->data;
- hwsq->reg = 0xffffffff;
- hwsq->val = 0xffffffff;
-}
-
-static inline void
-hwsq_fini(struct hwsq_ucode *hwsq)
-{
- do {
- *hwsq->ptr.u08++ = 0x7f;
- hwsq->len = hwsq->ptr.u08 - hwsq->data;
- } while (hwsq->len & 3);
- hwsq->ptr.u08 = hwsq->data;
-}
-
-static inline void
-hwsq_usec(struct hwsq_ucode *hwsq, u8 usec)
-{
- u32 shift = 0;
- while (usec & ~3) {
- usec >>= 2;
- shift++;
- }
-
- *hwsq->ptr.u08++ = (shift << 2) | usec;
-}
-
-static inline void
-hwsq_setf(struct hwsq_ucode *hwsq, u8 flag, int val)
-{
- flag += 0x80;
- if (val >= 0)
- flag += 0x20;
- if (val >= 1)
- flag += 0x20;
- *hwsq->ptr.u08++ = flag;
-}
-
-static inline void
-hwsq_op5f(struct hwsq_ucode *hwsq, u8 v0, u8 v1)
-{
- *hwsq->ptr.u08++ = 0x5f;
- *hwsq->ptr.u08++ = v0;
- *hwsq->ptr.u08++ = v1;
-}
-
-static inline void
-hwsq_wr32(struct hwsq_ucode *hwsq, u32 reg, u32 val)
-{
- if (val != hwsq->val) {
- if ((val & 0xffff0000) == (hwsq->val & 0xffff0000)) {
- *hwsq->ptr.u08++ = 0x42;
- *hwsq->ptr.u16++ = (val & 0x0000ffff);
- } else {
- *hwsq->ptr.u08++ = 0xe2;
- *hwsq->ptr.u32++ = val;
- }
-
- hwsq->val = val;
- }
-
- if ((reg & 0xffff0000) == (hwsq->reg & 0xffff0000)) {
- *hwsq->ptr.u08++ = 0x40;
- *hwsq->ptr.u16++ = (reg & 0x0000ffff);
- } else {
- *hwsq->ptr.u08++ = 0xe0;
- *hwsq->ptr.u32++ = reg;
- }
- hwsq->reg = reg;
-}
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c
index 8f4f914..cb389d0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_i2c.c
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c
@@ -22,482 +22,257 @@
* Authors: Ben Skeggs
*/
-#include <linux/module.h>
-
#include "drmP.h"
#include "nouveau_drv.h"
#include "nouveau_i2c.h"
#include "nouveau_hw.h"
-#define T_TIMEOUT 2200000
-#define T_RISEFALL 1000
-#define T_HOLD 5000
-
static void
-i2c_drive_scl(void *data, int state)
+nv04_i2c_setscl(void *data, int state)
{
- struct nouveau_i2c_chan *port = data;
- if (port->type == 0) {
- u8 val = NVReadVgaCrtc(port->dev, 0, port->drive);
- if (state) val |= 0x20;
- else val &= 0xdf;
- NVWriteVgaCrtc(port->dev, 0, port->drive, val | 0x01);
- } else
- if (port->type == 4) {
- nv_mask(port->dev, port->drive, 0x2f, state ? 0x21 : 0x01);
- } else
- if (port->type == 5) {
- if (state) port->state |= 0x01;
- else port->state &= 0xfe;
- nv_wr32(port->dev, port->drive, 4 | port->state);
- }
+ struct nouveau_i2c_chan *i2c = data;
+ struct drm_device *dev = i2c->dev;
+ uint8_t val;
+
+ val = (NVReadVgaCrtc(dev, 0, i2c->wr) & 0xd0) | (state ? 0x20 : 0);
+ NVWriteVgaCrtc(dev, 0, i2c->wr, val | 0x01);
}
static void
-i2c_drive_sda(void *data, int state)
+nv04_i2c_setsda(void *data, int state)
{
- struct nouveau_i2c_chan *port = data;
- if (port->type == 0) {
- u8 val = NVReadVgaCrtc(port->dev, 0, port->drive);
- if (state) val |= 0x10;
- else val &= 0xef;
- NVWriteVgaCrtc(port->dev, 0, port->drive, val | 0x01);
- } else
- if (port->type == 4) {
- nv_mask(port->dev, port->drive, 0x1f, state ? 0x11 : 0x01);
- } else
- if (port->type == 5) {
- if (state) port->state |= 0x02;
- else port->state &= 0xfd;
- nv_wr32(port->dev, port->drive, 4 | port->state);
- }
-}
+ struct nouveau_i2c_chan *i2c = data;
+ struct drm_device *dev = i2c->dev;
+ uint8_t val;
-static int
-i2c_sense_scl(void *data)
-{
- struct nouveau_i2c_chan *port = data;
- struct drm_nouveau_private *dev_priv = port->dev->dev_private;
- if (port->type == 0) {
- return !!(NVReadVgaCrtc(port->dev, 0, port->sense) & 0x04);
- } else
- if (port->type == 4) {
- return !!(nv_rd32(port->dev, port->sense) & 0x00040000);
- } else
- if (port->type == 5) {
- if (dev_priv->card_type < NV_D0)
- return !!(nv_rd32(port->dev, port->sense) & 0x01);
- else
- return !!(nv_rd32(port->dev, port->sense) & 0x10);
- }
- return 0;
+ val = (NVReadVgaCrtc(dev, 0, i2c->wr) & 0xe0) | (state ? 0x10 : 0);
+ NVWriteVgaCrtc(dev, 0, i2c->wr, val | 0x01);
}
static int
-i2c_sense_sda(void *data)
+nv04_i2c_getscl(void *data)
{
- struct nouveau_i2c_chan *port = data;
- struct drm_nouveau_private *dev_priv = port->dev->dev_private;
- if (port->type == 0) {
- return !!(NVReadVgaCrtc(port->dev, 0, port->sense) & 0x08);
- } else
- if (port->type == 4) {
- return !!(nv_rd32(port->dev, port->sense) & 0x00080000);
- } else
- if (port->type == 5) {
- if (dev_priv->card_type < NV_D0)
- return !!(nv_rd32(port->dev, port->sense) & 0x02);
- else
- return !!(nv_rd32(port->dev, port->sense) & 0x20);
- }
- return 0;
-}
-
-static void
-i2c_delay(struct nouveau_i2c_chan *port, u32 nsec)
-{
- udelay((nsec + 500) / 1000);
-}
-
-static bool
-i2c_raise_scl(struct nouveau_i2c_chan *port)
-{
- u32 timeout = T_TIMEOUT / T_RISEFALL;
-
- i2c_drive_scl(port, 1);
- do {
- i2c_delay(port, T_RISEFALL);
- } while (!i2c_sense_scl(port) && --timeout);
+ struct nouveau_i2c_chan *i2c = data;
+ struct drm_device *dev = i2c->dev;
- return timeout != 0;
+ return !!(NVReadVgaCrtc(dev, 0, i2c->rd) & 4);
}
static int
-i2c_start(struct nouveau_i2c_chan *port)
+nv04_i2c_getsda(void *data)
{
- int ret = 0;
-
- port->state = i2c_sense_scl(port);
- port->state |= i2c_sense_sda(port) << 1;
- if (port->state != 3) {
- i2c_drive_scl(port, 0);
- i2c_drive_sda(port, 1);
- if (!i2c_raise_scl(port))
- ret = -EBUSY;
- }
+ struct nouveau_i2c_chan *i2c = data;
+ struct drm_device *dev = i2c->dev;
- i2c_drive_sda(port, 0);
- i2c_delay(port, T_HOLD);
- i2c_drive_scl(port, 0);
- i2c_delay(port, T_HOLD);
- return ret;
+ return !!(NVReadVgaCrtc(dev, 0, i2c->rd) & 8);
}
static void
-i2c_stop(struct nouveau_i2c_chan *port)
+nv4e_i2c_setscl(void *data, int state)
{
- i2c_drive_scl(port, 0);
- i2c_drive_sda(port, 0);
- i2c_delay(port, T_RISEFALL);
-
- i2c_drive_scl(port, 1);
- i2c_delay(port, T_HOLD);
- i2c_drive_sda(port, 1);
- i2c_delay(port, T_HOLD);
+ struct nouveau_i2c_chan *i2c = data;
+ struct drm_device *dev = i2c->dev;
+ uint8_t val;
+
+ val = (nv_rd32(dev, i2c->wr) & 0xd0) | (state ? 0x20 : 0);
+ nv_wr32(dev, i2c->wr, val | 0x01);
}
-static int
-i2c_bitw(struct nouveau_i2c_chan *port, int sda)
+static void
+nv4e_i2c_setsda(void *data, int state)
{
- i2c_drive_sda(port, sda);
- i2c_delay(port, T_RISEFALL);
-
- if (!i2c_raise_scl(port))
- return -ETIMEDOUT;
- i2c_delay(port, T_HOLD);
+ struct nouveau_i2c_chan *i2c = data;
+ struct drm_device *dev = i2c->dev;
+ uint8_t val;
- i2c_drive_scl(port, 0);
- i2c_delay(port, T_HOLD);
- return 0;
+ val = (nv_rd32(dev, i2c->wr) & 0xe0) | (state ? 0x10 : 0);
+ nv_wr32(dev, i2c->wr, val | 0x01);
}
static int
-i2c_bitr(struct nouveau_i2c_chan *port)
+nv4e_i2c_getscl(void *data)
{
- int sda;
-
- i2c_drive_sda(port, 1);
- i2c_delay(port, T_RISEFALL);
-
- if (!i2c_raise_scl(port))
- return -ETIMEDOUT;
- i2c_delay(port, T_HOLD);
+ struct nouveau_i2c_chan *i2c = data;
+ struct drm_device *dev = i2c->dev;
- sda = i2c_sense_sda(port);
-
- i2c_drive_scl(port, 0);
- i2c_delay(port, T_HOLD);
- return sda;
+ return !!((nv_rd32(dev, i2c->rd) >> 16) & 4);
}
static int
-i2c_get_byte(struct nouveau_i2c_chan *port, u8 *byte, bool last)
+nv4e_i2c_getsda(void *data)
{
- int i, bit;
-
- *byte = 0;
- for (i = 7; i >= 0; i--) {
- bit = i2c_bitr(port);
- if (bit < 0)
- return bit;
- *byte |= bit << i;
- }
+ struct nouveau_i2c_chan *i2c = data;
+ struct drm_device *dev = i2c->dev;
- return i2c_bitw(port, last ? 1 : 0);
+ return !!((nv_rd32(dev, i2c->rd) >> 16) & 8);
}
static int
-i2c_put_byte(struct nouveau_i2c_chan *port, u8 byte)
+nv50_i2c_getscl(void *data)
{
- int i, ret;
- for (i = 7; i >= 0; i--) {
- ret = i2c_bitw(port, !!(byte & (1 << i)));
- if (ret < 0)
- return ret;
- }
+ struct nouveau_i2c_chan *i2c = data;
+ struct drm_device *dev = i2c->dev;
- ret = i2c_bitr(port);
- if (ret == 1) /* nack */
- ret = -EIO;
- return ret;
+ return !!(nv_rd32(dev, i2c->rd) & 1);
}
-static int
-i2c_addr(struct nouveau_i2c_chan *port, struct i2c_msg *msg)
-{
- u32 addr = msg->addr << 1;
- if (msg->flags & I2C_M_RD)
- addr |= 1;
- return i2c_put_byte(port, addr);
-}
static int
-i2c_bit_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+nv50_i2c_getsda(void *data)
{
- struct nouveau_i2c_chan *port = (struct nouveau_i2c_chan *)adap;
- struct i2c_msg *msg = msgs;
- int ret = 0, mcnt = num;
-
- while (!ret && mcnt--) {
- u8 remaining = msg->len;
- u8 *ptr = msg->buf;
+ struct nouveau_i2c_chan *i2c = data;
+ struct drm_device *dev = i2c->dev;
- ret = i2c_start(port);
- if (ret == 0)
- ret = i2c_addr(port, msg);
-
- if (msg->flags & I2C_M_RD) {
- while (!ret && remaining--)
- ret = i2c_get_byte(port, ptr++, !remaining);
- } else {
- while (!ret && remaining--)
- ret = i2c_put_byte(port, *ptr++);
- }
+ return !!(nv_rd32(dev, i2c->rd) & 2);
+}
- msg++;
- }
+static void
+nv50_i2c_setscl(void *data, int state)
+{
+ struct nouveau_i2c_chan *i2c = data;
+ struct drm_device *dev = i2c->dev;
- i2c_stop(port);
- return (ret < 0) ? ret : num;
+ nv_wr32(dev, i2c->wr, 4 | (i2c->data ? 2 : 0) | (state ? 1 : 0));
}
-static u32
-i2c_bit_func(struct i2c_adapter *adap)
+static void
+nv50_i2c_setsda(void *data, int state)
{
- return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
-}
+ struct nouveau_i2c_chan *i2c = data;
+ struct drm_device *dev = i2c->dev;
-const struct i2c_algorithm nouveau_i2c_bit_algo = {
- .master_xfer = i2c_bit_xfer,
- .functionality = i2c_bit_func
-};
+ nv_wr32(dev, i2c->wr,
+ (nv_rd32(dev, i2c->rd) & 1) | 4 | (state ? 2 : 0));
+ i2c->data = state;
+}
static const uint32_t nv50_i2c_port[] = {
0x00e138, 0x00e150, 0x00e168, 0x00e180,
0x00e254, 0x00e274, 0x00e764, 0x00e780,
0x00e79c, 0x00e7b8
};
+#define NV50_I2C_PORTS ARRAY_SIZE(nv50_i2c_port)
-static u8 *
-i2c_table(struct drm_device *dev, u8 *version)
+int
+nouveau_i2c_init(struct drm_device *dev, struct dcb_i2c_entry *entry, int index)
{
- u8 *dcb = dcb_table(dev), *i2c = NULL;
- if (dcb) {
- if (dcb[0] >= 0x15)
- i2c = ROMPTR(dev, dcb[2]);
- if (dcb[0] >= 0x30)
- i2c = ROMPTR(dev, dcb[4]);
- }
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_i2c_chan *i2c;
+ int ret;
- /* early revisions had no version number, use dcb version */
- if (i2c) {
- *version = dcb[0];
- if (*version >= 0x30)
- *version = i2c[0];
- }
+ if (entry->chan)
+ return -EEXIST;
- return i2c;
-}
+ if (dev_priv->card_type >= NV_50 && entry->read >= NV50_I2C_PORTS) {
+ NV_ERROR(dev, "unknown i2c port %d\n", entry->read);
+ return -EINVAL;
+ }
-int
-nouveau_i2c_init(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->vbios;
- struct nouveau_i2c_chan *port;
- u8 *i2c, *entry, legacy[2][4] = {};
- u8 version, entries, recordlen;
- int ret, i;
-
- INIT_LIST_HEAD(&dev_priv->i2c_ports);
-
- i2c = i2c_table(dev, &version);
- if (!i2c) {
- u8 *bmp = &bios->data[bios->offset];
- if (bios->type != NVBIOS_BMP)
- return -ENODEV;
-
- legacy[0][0] = NV_CIO_CRE_DDC_WR__INDEX;
- legacy[0][1] = NV_CIO_CRE_DDC_STATUS__INDEX;
- legacy[1][0] = NV_CIO_CRE_DDC0_WR__INDEX;
- legacy[1][1] = NV_CIO_CRE_DDC0_STATUS__INDEX;
-
- /* BMP (from v4.0) has i2c info in the structure, it's in a
- * fixed location on earlier VBIOS
- */
- if (bmp[5] < 4)
- i2c = &bios->data[0x48];
- else
- i2c = &bmp[0x36];
-
- if (i2c[4]) legacy[0][0] = i2c[4];
- if (i2c[5]) legacy[0][1] = i2c[5];
- if (i2c[6]) legacy[1][0] = i2c[6];
- if (i2c[7]) legacy[1][1] = i2c[7];
+ i2c = kzalloc(sizeof(*i2c), GFP_KERNEL);
+ if (i2c == NULL)
+ return -ENOMEM;
+
+ switch (entry->port_type) {
+ case 0:
+ i2c->bit.setsda = nv04_i2c_setsda;
+ i2c->bit.setscl = nv04_i2c_setscl;
+ i2c->bit.getsda = nv04_i2c_getsda;
+ i2c->bit.getscl = nv04_i2c_getscl;
+ i2c->rd = entry->read;
+ i2c->wr = entry->write;
+ break;
+ case 4:
+ i2c->bit.setsda = nv4e_i2c_setsda;
+ i2c->bit.setscl = nv4e_i2c_setscl;
+ i2c->bit.getsda = nv4e_i2c_getsda;
+ i2c->bit.getscl = nv4e_i2c_getscl;
+ i2c->rd = 0x600800 + entry->read;
+ i2c->wr = 0x600800 + entry->write;
+ break;
+ case 5:
+ i2c->bit.setsda = nv50_i2c_setsda;
+ i2c->bit.setscl = nv50_i2c_setscl;
+ i2c->bit.getsda = nv50_i2c_getsda;
+ i2c->bit.getscl = nv50_i2c_getscl;
+ i2c->rd = nv50_i2c_port[entry->read];
+ i2c->wr = i2c->rd;
+ break;
+ case 6:
+ i2c->rd = entry->read;
+ i2c->wr = entry->write;
+ break;
+ default:
+ NV_ERROR(dev, "DCB I2C port type %d unknown\n",
+ entry->port_type);
+ kfree(i2c);
+ return -EINVAL;
}
- if (i2c && version >= 0x30) {
- entry = i2c[1] + i2c;
- entries = i2c[2];
- recordlen = i2c[3];
- } else
- if (i2c) {
- entry = i2c;
- entries = 16;
- recordlen = 4;
+ snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
+ "nouveau-%s-%d", pci_name(dev->pdev), index);
+ i2c->adapter.owner = THIS_MODULE;
+ i2c->adapter.dev.parent = &dev->pdev->dev;
+ i2c->dev = dev;
+ i2c_set_adapdata(&i2c->adapter, i2c);
+
+ if (entry->port_type < 6) {
+ i2c->adapter.algo_data = &i2c->bit;
+ i2c->bit.udelay = 40;
+ i2c->bit.timeout = usecs_to_jiffies(5000);
+ i2c->bit.data = i2c;
+ ret = i2c_bit_add_bus(&i2c->adapter);
} else {
- entry = legacy[0];
- entries = 2;
- recordlen = 4;
+ i2c->adapter.algo = &nouveau_dp_i2c_algo;
+ ret = i2c_add_adapter(&i2c->adapter);
}
- for (i = 0; i < entries; i++, entry += recordlen) {
- port = kzalloc(sizeof(*port), GFP_KERNEL);
- if (port == NULL) {
- nouveau_i2c_fini(dev);
- return -ENOMEM;
- }
-
- port->type = entry[3];
- if (version < 0x30) {
- port->type &= 0x07;
- if (port->type == 0x07)
- port->type = 0xff;
- }
-
- if (port->type == 0xff) {
- kfree(port);
- continue;
- }
-
- switch (port->type) {
- case 0: /* NV04:NV50 */
- port->drive = entry[0];
- port->sense = entry[1];
- port->adapter.algo = &nouveau_i2c_bit_algo;
- break;
- case 4: /* NV4E */
- port->drive = 0x600800 + entry[1];
- port->sense = port->drive;
- port->adapter.algo = &nouveau_i2c_bit_algo;
- break;
- case 5: /* NV50- */
- port->drive = entry[0] & 0x0f;
- if (dev_priv->card_type < NV_D0) {
- if (port->drive >= ARRAY_SIZE(nv50_i2c_port))
- break;
- port->drive = nv50_i2c_port[port->drive];
- port->sense = port->drive;
- } else {
- port->drive = 0x00d014 + (port->drive * 0x20);
- port->sense = port->drive;
- }
- port->adapter.algo = &nouveau_i2c_bit_algo;
- break;
- case 6: /* NV50- DP AUX */
- port->drive = entry[0];
- port->sense = port->drive;
- port->adapter.algo = &nouveau_dp_i2c_algo;
- break;
- default:
- break;
- }
-
- if (!port->adapter.algo) {
- NV_ERROR(dev, "I2C%d: type %d index %x/%x unknown\n",
- i, port->type, port->drive, port->sense);
- kfree(port);
- continue;
- }
-
- snprintf(port->adapter.name, sizeof(port->adapter.name),
- "nouveau-%s-%d", pci_name(dev->pdev), i);
- port->adapter.owner = THIS_MODULE;
- port->adapter.dev.parent = &dev->pdev->dev;
- port->dev = dev;
- port->index = i;
- port->dcb = ROM32(entry[0]);
- i2c_set_adapdata(&port->adapter, i2c);
-
- ret = i2c_add_adapter(&port->adapter);
- if (ret) {
- NV_ERROR(dev, "I2C%d: failed register: %d\n", i, ret);
- kfree(port);
- continue;
- }
-
- list_add_tail(&port->head, &dev_priv->i2c_ports);
+ if (ret) {
+ NV_ERROR(dev, "Failed to register i2c %d\n", index);
+ kfree(i2c);
+ return ret;
}
+ entry->chan = i2c;
return 0;
}
void
-nouveau_i2c_fini(struct drm_device *dev)
+nouveau_i2c_fini(struct drm_device *dev, struct dcb_i2c_entry *entry)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_i2c_chan *port, *tmp;
+ if (!entry->chan)
+ return;
- list_for_each_entry_safe(port, tmp, &dev_priv->i2c_ports, head) {
- i2c_del_adapter(&port->adapter);
- kfree(port);
- }
+ i2c_del_adapter(&entry->chan->adapter);
+ kfree(entry->chan);
+ entry->chan = NULL;
}
struct nouveau_i2c_chan *
-nouveau_i2c_find(struct drm_device *dev, u8 index)
+nouveau_i2c_find(struct drm_device *dev, int index)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_i2c_chan *port;
-
- if (index == NV_I2C_DEFAULT(0) ||
- index == NV_I2C_DEFAULT(1)) {
- u8 version, *i2c = i2c_table(dev, &version);
- if (i2c && version >= 0x30) {
- if (index == NV_I2C_DEFAULT(0))
- index = (i2c[4] & 0x0f);
- else
- index = (i2c[4] & 0xf0) >> 4;
- } else {
- index = 2;
- }
- }
-
- list_for_each_entry(port, &dev_priv->i2c_ports, head) {
- if (port->index == index)
- break;
- }
+ struct dcb_i2c_entry *i2c = &dev_priv->vbios.dcb.i2c[index];
- if (&port->head == &dev_priv->i2c_ports)
+ if (index >= DCB_MAX_NUM_I2C_ENTRIES)
return NULL;
- if (dev_priv->card_type >= NV_50 && (port->dcb & 0x00000100)) {
- u32 reg = 0x00e500, val;
- if (port->type == 6) {
- reg += port->drive * 0x50;
+ if (dev_priv->card_type >= NV_50 && (i2c->entry & 0x00000100)) {
+ uint32_t reg = 0xe500, val;
+
+ if (i2c->port_type == 6) {
+ reg += i2c->read * 0x50;
val = 0x2002;
} else {
- reg += ((port->dcb & 0x1e00) >> 9) * 0x50;
+ reg += ((i2c->entry & 0x1e00) >> 9) * 0x50;
val = 0xe001;
}
- /* nfi, but neither auxch or i2c work if it's 1 */
- nv_mask(dev, reg + 0x0c, 0x00000001, 0x00000000);
- /* nfi, but switches auxch vs normal i2c */
- nv_mask(dev, reg + 0x00, 0x0000f003, val);
+ nv_wr32(dev, reg, (nv_rd32(dev, reg) & ~0xf003) | val);
}
- return port;
+ if (!i2c->chan && nouveau_i2c_init(dev, i2c, index))
+ return NULL;
+ return i2c->chan;
}
bool
@@ -532,12 +307,8 @@ nouveau_i2c_identify(struct drm_device *dev, const char *what,
struct nouveau_i2c_chan *i2c = nouveau_i2c_find(dev, index);
int i;
- if (!i2c) {
- NV_DEBUG(dev, "No bus when probing %s on %d\n", what, index);
- return -ENODEV;
- }
+ NV_DEBUG(dev, "Probing %ss on I2C bus: %d\n", what, index);
- NV_DEBUG(dev, "Probing %ss on I2C bus: %d\n", what, i2c->index);
for (i = 0; info[i].addr; i++) {
if (nouveau_probe_i2c_addr(i2c, info[i].addr) &&
(!match || match(i2c, &info[i]))) {
@@ -547,5 +318,6 @@ nouveau_i2c_identify(struct drm_device *dev, const char *what,
}
NV_DEBUG(dev, "No devices found.\n");
+
return -ENODEV;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.h b/drivers/gpu/drm/nouveau/nouveau_i2c.h
index 4d2e4e9..422b62f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_i2c.h
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.h
@@ -27,25 +27,20 @@
#include <linux/i2c-algo-bit.h>
#include "drm_dp_helper.h"
-#define NV_I2C_PORT(n) (0x00 + (n))
-#define NV_I2C_PORT_NUM 0x10
-#define NV_I2C_DEFAULT(n) (0x80 + (n))
+struct dcb_i2c_entry;
struct nouveau_i2c_chan {
struct i2c_adapter adapter;
struct drm_device *dev;
- struct list_head head;
- u8 index;
- u8 type;
- u32 dcb;
- u32 drive;
- u32 sense;
- u32 state;
+ struct i2c_algo_bit_data bit;
+ unsigned rd;
+ unsigned wr;
+ unsigned data;
};
-int nouveau_i2c_init(struct drm_device *);
-void nouveau_i2c_fini(struct drm_device *);
-struct nouveau_i2c_chan *nouveau_i2c_find(struct drm_device *, u8 index);
+int nouveau_i2c_init(struct drm_device *, struct dcb_i2c_entry *, int index);
+void nouveau_i2c_fini(struct drm_device *, struct dcb_i2c_entry *);
+struct nouveau_i2c_chan *nouveau_i2c_find(struct drm_device *, int index);
bool nouveau_probe_i2c_addr(struct nouveau_i2c_chan *i2c, int addr);
int nouveau_i2c_identify(struct drm_device *dev, const char *what,
struct i2c_board_info *info,
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
index 868c7fd..2ba7265 100644
--- a/drivers/gpu/drm/nouveau/nouveau_irq.c
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -79,7 +79,7 @@ nouveau_irq_handler(DRM_IRQ_ARGS)
int i;
stat = nv_rd32(dev, NV03_PMC_INTR_0);
- if (stat == 0 || stat == ~0)
+ if (!stat)
return IRQ_NONE;
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index b08065f..5ee14d2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -26,8 +26,7 @@
* DEALINGS IN THE SOFTWARE.
*
* Authors:
- * Ben Skeggs <bskeggs@redhat.com>
- * Roy Spliet <r.spliet@student.tudelft.nl>
+ * Keith Whitwell <keith@tungstengraphics.com>
*/
@@ -193,6 +192,75 @@ nouveau_mem_gart_fini(struct drm_device *dev)
}
}
+static uint32_t
+nouveau_mem_detect_nv04(struct drm_device *dev)
+{
+ uint32_t boot0 = nv_rd32(dev, NV04_PFB_BOOT_0);
+
+ if (boot0 & 0x00000100)
+ return (((boot0 >> 12) & 0xf) * 2 + 2) * 1024 * 1024;
+
+ switch (boot0 & NV04_PFB_BOOT_0_RAM_AMOUNT) {
+ case NV04_PFB_BOOT_0_RAM_AMOUNT_32MB:
+ return 32 * 1024 * 1024;
+ case NV04_PFB_BOOT_0_RAM_AMOUNT_16MB:
+ return 16 * 1024 * 1024;
+ case NV04_PFB_BOOT_0_RAM_AMOUNT_8MB:
+ return 8 * 1024 * 1024;
+ case NV04_PFB_BOOT_0_RAM_AMOUNT_4MB:
+ return 4 * 1024 * 1024;
+ }
+
+ return 0;
+}
+
+static uint32_t
+nouveau_mem_detect_nforce(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct pci_dev *bridge;
+ uint32_t mem;
+
+ bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 1));
+ if (!bridge) {
+ NV_ERROR(dev, "no bridge device\n");
+ return 0;
+ }
+
+ if (dev_priv->flags & NV_NFORCE) {
+ pci_read_config_dword(bridge, 0x7C, &mem);
+ return (uint64_t)(((mem >> 6) & 31) + 1)*1024*1024;
+ } else
+ if (dev_priv->flags & NV_NFORCE2) {
+ pci_read_config_dword(bridge, 0x84, &mem);
+ return (uint64_t)(((mem >> 4) & 127) + 1)*1024*1024;
+ }
+
+ NV_ERROR(dev, "impossible!\n");
+ return 0;
+}
+
+int
+nouveau_mem_detect(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->card_type == NV_04) {
+ dev_priv->vram_size = nouveau_mem_detect_nv04(dev);
+ } else
+ if (dev_priv->flags & (NV_NFORCE | NV_NFORCE2)) {
+ dev_priv->vram_size = nouveau_mem_detect_nforce(dev);
+ } else
+ if (dev_priv->card_type < NV_50) {
+ dev_priv->vram_size = nv_rd32(dev, NV04_PFB_FIFO_DATA);
+ dev_priv->vram_size &= NV10_PFB_FIFO_DATA_RAM_AMOUNT_MB_MASK;
+ }
+
+ if (dev_priv->vram_size)
+ return 0;
+ return -ENOMEM;
+}
+
bool
nouveau_mem_flags_valid(struct drm_device *dev, u32 tile_flags)
{
@@ -317,29 +385,11 @@ nouveau_mem_init_agp(struct drm_device *dev)
return 0;
}
-static const struct vram_types {
- int value;
- const char *name;
-} vram_type_map[] = {
- { NV_MEM_TYPE_STOLEN , "stolen system memory" },
- { NV_MEM_TYPE_SGRAM , "SGRAM" },
- { NV_MEM_TYPE_SDRAM , "SDRAM" },
- { NV_MEM_TYPE_DDR1 , "DDR1" },
- { NV_MEM_TYPE_DDR2 , "DDR2" },
- { NV_MEM_TYPE_DDR3 , "DDR3" },
- { NV_MEM_TYPE_GDDR2 , "GDDR2" },
- { NV_MEM_TYPE_GDDR3 , "GDDR3" },
- { NV_MEM_TYPE_GDDR4 , "GDDR4" },
- { NV_MEM_TYPE_GDDR5 , "GDDR5" },
- { NV_MEM_TYPE_UNKNOWN, "unknown type" }
-};
-
int
nouveau_mem_vram_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
- const struct vram_types *vram_type;
int ret, dma_bits;
dma_bits = 32;
@@ -347,7 +397,7 @@ nouveau_mem_vram_init(struct drm_device *dev)
if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(40)))
dma_bits = 40;
} else
- if (0 && pci_is_pcie(dev->pdev) &&
+ if (0 && drm_pci_device_is_pcie(dev) &&
dev_priv->chipset > 0x40 &&
dev_priv->chipset != 0x45) {
if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(39)))
@@ -357,12 +407,8 @@ nouveau_mem_vram_init(struct drm_device *dev)
ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
if (ret)
return ret;
- ret = pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
- if (ret) {
- /* Reset to default value. */
- pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(32));
- }
+ dev_priv->fb_phys = pci_resource_start(dev->pdev, 1);
ret = nouveau_ttm_global_init(dev_priv);
if (ret)
@@ -377,21 +423,39 @@ nouveau_mem_vram_init(struct drm_device *dev)
return ret;
}
- vram_type = vram_type_map;
- while (vram_type->value != NV_MEM_TYPE_UNKNOWN) {
- if (nouveau_vram_type) {
- if (!strcasecmp(nouveau_vram_type, vram_type->name))
- break;
- dev_priv->vram_type = vram_type->value;
- } else {
- if (vram_type->value == dev_priv->vram_type)
- break;
- }
- vram_type++;
+ /* reserve space at end of VRAM for PRAMIN */
+ if (dev_priv->card_type >= NV_50) {
+ dev_priv->ramin_rsvd_vram = 1 * 1024 * 1024;
+ } else
+ if (dev_priv->card_type >= NV_40) {
+ u32 vs = hweight8((nv_rd32(dev, 0x001540) & 0x0000ff00) >> 8);
+ u32 rsvd;
+
+ /* estimate grctx size, the magics come from nv40_grctx.c */
+ if (dev_priv->chipset == 0x40) rsvd = 0x6aa0 * vs;
+ else if (dev_priv->chipset < 0x43) rsvd = 0x4f00 * vs;
+ else if (nv44_graph_class(dev)) rsvd = 0x4980 * vs;
+ else rsvd = 0x4a40 * vs;
+ rsvd += 16 * 1024;
+ rsvd *= dev_priv->engine.fifo.channels;
+
+ /* pciegart table */
+ if (drm_pci_device_is_pcie(dev))
+ rsvd += 512 * 1024;
+
+ /* object storage */
+ rsvd += 512 * 1024;
+
+ dev_priv->ramin_rsvd_vram = round_up(rsvd, 4096);
+ } else {
+ dev_priv->ramin_rsvd_vram = 512 * 1024;
}
- NV_INFO(dev, "Detected %dMiB VRAM (%s)\n",
- (int)(dev_priv->vram_size >> 20), vram_type->name);
+ ret = dev_priv->engine.vram.init(dev);
+ if (ret)
+ return ret;
+
+ NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20));
if (dev_priv->vram_sys_base) {
NV_INFO(dev, "Stolen system memory at: 0x%010llx\n",
dev_priv->vram_sys_base);
@@ -415,7 +479,7 @@ nouveau_mem_vram_init(struct drm_device *dev)
}
if (dev_priv->card_type < NV_50) {
- ret = nouveau_bo_new(dev, 256*1024, 0, TTM_PL_FLAG_VRAM,
+ ret = nouveau_bo_new(dev, NULL, 256*1024, 0, TTM_PL_FLAG_VRAM,
0, 0, &dev_priv->vga_ram);
if (ret == 0)
ret = nouveau_bo_pin(dev_priv->vga_ram,
@@ -472,645 +536,230 @@ nouveau_mem_gart_init(struct drm_device *dev)
return 0;
}
-static int
-nv40_mem_timing_calc(struct drm_device *dev, u32 freq,
- struct nouveau_pm_tbl_entry *e, u8 len,
- struct nouveau_pm_memtiming *boot,
- struct nouveau_pm_memtiming *t)
-{
- t->reg[0] = (e->tRP << 24 | e->tRAS << 16 | e->tRFC << 8 | e->tRC);
-
- /* XXX: I don't trust the -1's and +1's... they must come
- * from somewhere! */
- t->reg[1] = (e->tWR + 2 + (t->tCWL - 1)) << 24 |
- 1 << 16 |
- (e->tWTR + 2 + (t->tCWL - 1)) << 8 |
- (e->tCL + 2 - (t->tCWL - 1));
-
- t->reg[2] = 0x20200000 |
- ((t->tCWL - 1) << 24 |
- e->tRRD << 16 |
- e->tRCDWR << 8 |
- e->tRCDRD);
-
- NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x\n", t->id,
- t->reg[0], t->reg[1], t->reg[2]);
- return 0;
-}
-
-static int
-nv50_mem_timing_calc(struct drm_device *dev, u32 freq,
- struct nouveau_pm_tbl_entry *e, u8 len,
- struct nouveau_pm_memtiming *boot,
- struct nouveau_pm_memtiming *t)
+void
+nouveau_mem_timing_init(struct drm_device *dev)
{
+ /* cards < NVC0 only */
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+ struct nouveau_pm_memtimings *memtimings = &pm->memtimings;
+ struct nvbios *bios = &dev_priv->vbios;
struct bit_entry P;
- uint8_t unk18 = 1, unk20 = 0, unk21 = 0, tmp7_3;
-
- if (bit_table(dev, 'P', &P))
- return -EINVAL;
-
- switch (min(len, (u8) 22)) {
- case 22:
- unk21 = e->tUNK_21;
- case 21:
- unk20 = e->tUNK_20;
- case 20:
- if (e->tCWL > 0)
- t->tCWL = e->tCWL;
- case 19:
- unk18 = e->tUNK_18;
- break;
- }
-
- t->reg[0] = (e->tRP << 24 | e->tRAS << 16 | e->tRFC << 8 | e->tRC);
-
- t->reg[1] = (e->tWR + 2 + (t->tCWL - 1)) << 24 |
- max(unk18, (u8) 1) << 16 |
- (e->tWTR + 2 + (t->tCWL - 1)) << 8;
-
- t->reg[2] = ((t->tCWL - 1) << 24 |
- e->tRRD << 16 |
- e->tRCDWR << 8 |
- e->tRCDRD);
-
- t->reg[4] = e->tUNK_13 << 8 | e->tUNK_13;
-
- t->reg[5] = (e->tRFC << 24 | max(e->tRCDRD, e->tRCDWR) << 16 | e->tRP);
-
- t->reg[8] = boot->reg[8] & 0xffffff00;
-
- if (P.version == 1) {
- t->reg[1] |= (e->tCL + 2 - (t->tCWL - 1));
-
- t->reg[3] = (0x14 + e->tCL) << 24 |
- 0x16 << 16 |
- (e->tCL - 1) << 8 |
- (e->tCL - 1);
-
- t->reg[4] |= boot->reg[4] & 0xffff0000;
-
- t->reg[6] = (0x33 - t->tCWL) << 16 |
- t->tCWL << 8 |
- (0x2e + e->tCL - t->tCWL);
-
- t->reg[7] = 0x4000202 | (e->tCL - 1) << 16;
-
- /* XXX: P.version == 1 only has DDR2 and GDDR3? */
- if (dev_priv->vram_type == NV_MEM_TYPE_DDR2) {
- t->reg[5] |= (e->tCL + 3) << 8;
- t->reg[6] |= (t->tCWL - 2) << 8;
- t->reg[8] |= (e->tCL - 4);
- } else {
- t->reg[5] |= (e->tCL + 2) << 8;
- t->reg[6] |= t->tCWL << 8;
- t->reg[8] |= (e->tCL - 2);
+ u8 tUNK_0, tUNK_1, tUNK_2;
+ u8 tRP; /* Byte 3 */
+ u8 tRAS; /* Byte 5 */
+ u8 tRFC; /* Byte 7 */
+ u8 tRC; /* Byte 9 */
+ u8 tUNK_10, tUNK_11, tUNK_12, tUNK_13, tUNK_14;
+ u8 tUNK_18, tUNK_19, tUNK_20, tUNK_21;
+ u8 magic_number = 0; /* Yeah... sorry*/
+ u8 *mem = NULL, *entry;
+ int i, recordlen, entries;
+
+ if (bios->type == NVBIOS_BIT) {
+ if (bit_table(dev, 'P', &P))
+ return;
+
+ if (P.version == 1)
+ mem = ROMPTR(bios, P.data[4]);
+ else
+ if (P.version == 2)
+ mem = ROMPTR(bios, P.data[8]);
+ else {
+ NV_WARN(dev, "unknown mem for BIT P %d\n", P.version);
}
} else {
- t->reg[1] |= (5 + e->tCL - (t->tCWL));
-
- /* XXX: 0xb? 0x30? */
- t->reg[3] = (0x30 + e->tCL) << 24 |
- (boot->reg[3] & 0x00ff0000)|
- (0xb + e->tCL) << 8 |
- (e->tCL - 1);
-
- t->reg[4] |= (unk20 << 24 | unk21 << 16);
-
- /* XXX: +6? */
- t->reg[5] |= (t->tCWL + 6) << 8;
-
- t->reg[6] = (0x5a + e->tCL) << 16 |
- (6 - e->tCL + t->tCWL) << 8 |
- (0x50 + e->tCL - t->tCWL);
-
- tmp7_3 = (boot->reg[7] & 0xff000000) >> 24;
- t->reg[7] = (tmp7_3 << 24) |
- ((tmp7_3 - 6 + e->tCL) << 16) |
- 0x202;
- }
-
- NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", t->id,
- t->reg[0], t->reg[1], t->reg[2], t->reg[3]);
- NV_DEBUG(dev, " 230: %08x %08x %08x %08x\n",
- t->reg[4], t->reg[5], t->reg[6], t->reg[7]);
- NV_DEBUG(dev, " 240: %08x\n", t->reg[8]);
- return 0;
-}
-
-static int
-nvc0_mem_timing_calc(struct drm_device *dev, u32 freq,
- struct nouveau_pm_tbl_entry *e, u8 len,
- struct nouveau_pm_memtiming *boot,
- struct nouveau_pm_memtiming *t)
-{
- if (e->tCWL > 0)
- t->tCWL = e->tCWL;
-
- t->reg[0] = (e->tRP << 24 | (e->tRAS & 0x7f) << 17 |
- e->tRFC << 8 | e->tRC);
-
- t->reg[1] = (boot->reg[1] & 0xff000000) |
- (e->tRCDWR & 0x0f) << 20 |
- (e->tRCDRD & 0x0f) << 14 |
- (t->tCWL << 7) |
- (e->tCL & 0x0f);
-
- t->reg[2] = (boot->reg[2] & 0xff0000ff) |
- e->tWR << 16 | e->tWTR << 8;
-
- t->reg[3] = (e->tUNK_20 & 0x1f) << 9 |
- (e->tUNK_21 & 0xf) << 5 |
- (e->tUNK_13 & 0x1f);
-
- t->reg[4] = (boot->reg[4] & 0xfff00fff) |
- (e->tRRD&0x1f) << 15;
-
- NV_DEBUG(dev, "Entry %d: 290: %08x %08x %08x %08x\n", t->id,
- t->reg[0], t->reg[1], t->reg[2], t->reg[3]);
- NV_DEBUG(dev, " 2a0: %08x\n", t->reg[4]);
- return 0;
-}
-
-/**
- * MR generation methods
- */
-
-static int
-nouveau_mem_ddr2_mr(struct drm_device *dev, u32 freq,
- struct nouveau_pm_tbl_entry *e, u8 len,
- struct nouveau_pm_memtiming *boot,
- struct nouveau_pm_memtiming *t)
-{
- t->drive_strength = 0;
- if (len < 15) {
- t->odt = boot->odt;
- } else {
- t->odt = e->RAM_FT1 & 0x07;
- }
-
- if (e->tCL >= NV_MEM_CL_DDR2_MAX) {
- NV_WARN(dev, "(%u) Invalid tCL: %u", t->id, e->tCL);
- return -ERANGE;
- }
-
- if (e->tWR >= NV_MEM_WR_DDR2_MAX) {
- NV_WARN(dev, "(%u) Invalid tWR: %u", t->id, e->tWR);
- return -ERANGE;
- }
-
- if (t->odt > 3) {
- NV_WARN(dev, "(%u) Invalid odt value, assuming disabled: %x",
- t->id, t->odt);
- t->odt = 0;
- }
-
- t->mr[0] = (boot->mr[0] & 0x100f) |
- (e->tCL) << 4 |
- (e->tWR - 1) << 9;
- t->mr[1] = (boot->mr[1] & 0x101fbb) |
- (t->odt & 0x1) << 2 |
- (t->odt & 0x2) << 5;
-
- NV_DEBUG(dev, "(%u) MR: %08x", t->id, t->mr[0]);
- return 0;
-}
-
-uint8_t nv_mem_wr_lut_ddr3[NV_MEM_WR_DDR3_MAX] = {
- 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 5, 6, 6, 7, 7, 0, 0};
-
-static int
-nouveau_mem_ddr3_mr(struct drm_device *dev, u32 freq,
- struct nouveau_pm_tbl_entry *e, u8 len,
- struct nouveau_pm_memtiming *boot,
- struct nouveau_pm_memtiming *t)
-{
- u8 cl = e->tCL - 4;
-
- t->drive_strength = 0;
- if (len < 15) {
- t->odt = boot->odt;
- } else {
- t->odt = e->RAM_FT1 & 0x07;
- }
-
- if (e->tCL >= NV_MEM_CL_DDR3_MAX || e->tCL < 4) {
- NV_WARN(dev, "(%u) Invalid tCL: %u", t->id, e->tCL);
- return -ERANGE;
- }
-
- if (e->tWR >= NV_MEM_WR_DDR3_MAX || e->tWR < 4) {
- NV_WARN(dev, "(%u) Invalid tWR: %u", t->id, e->tWR);
- return -ERANGE;
- }
-
- if (e->tCWL < 5) {
- NV_WARN(dev, "(%u) Invalid tCWL: %u", t->id, e->tCWL);
- return -ERANGE;
- }
-
- t->mr[0] = (boot->mr[0] & 0x180b) |
- /* CAS */
- (cl & 0x7) << 4 |
- (cl & 0x8) >> 1 |
- (nv_mem_wr_lut_ddr3[e->tWR]) << 9;
- t->mr[1] = (boot->mr[1] & 0x101dbb) |
- (t->odt & 0x1) << 2 |
- (t->odt & 0x2) << 5 |
- (t->odt & 0x4) << 7;
- t->mr[2] = (boot->mr[2] & 0x20ffb7) | (e->tCWL - 5) << 3;
-
- NV_DEBUG(dev, "(%u) MR: %08x %08x", t->id, t->mr[0], t->mr[2]);
- return 0;
-}
-
-uint8_t nv_mem_cl_lut_gddr3[NV_MEM_CL_GDDR3_MAX] = {
- 0, 0, 0, 0, 4, 5, 6, 7, 0, 1, 2, 3, 8, 9, 10, 11};
-uint8_t nv_mem_wr_lut_gddr3[NV_MEM_WR_GDDR3_MAX] = {
- 0, 0, 0, 0, 0, 2, 3, 8, 9, 10, 11, 0, 0, 1, 1, 0, 3};
-
-static int
-nouveau_mem_gddr3_mr(struct drm_device *dev, u32 freq,
- struct nouveau_pm_tbl_entry *e, u8 len,
- struct nouveau_pm_memtiming *boot,
- struct nouveau_pm_memtiming *t)
-{
- if (len < 15) {
- t->drive_strength = boot->drive_strength;
- t->odt = boot->odt;
- } else {
- t->drive_strength = (e->RAM_FT1 & 0x30) >> 4;
- t->odt = e->RAM_FT1 & 0x07;
+ NV_DEBUG(dev, "BMP version too old for memory\n");
+ return;
}
- if (e->tCL >= NV_MEM_CL_GDDR3_MAX) {
- NV_WARN(dev, "(%u) Invalid tCL: %u", t->id, e->tCL);
- return -ERANGE;
+ if (!mem) {
+ NV_DEBUG(dev, "memory timing table pointer invalid\n");
+ return;
}
- if (e->tWR >= NV_MEM_WR_GDDR3_MAX) {
- NV_WARN(dev, "(%u) Invalid tWR: %u", t->id, e->tWR);
- return -ERANGE;
+ if (mem[0] != 0x10) {
+ NV_WARN(dev, "memory timing table 0x%02x unknown\n", mem[0]);
+ return;
}
- if (t->odt > 3) {
- NV_WARN(dev, "(%u) Invalid odt value, assuming autocal: %x",
- t->id, t->odt);
- t->odt = 0;
+ /* validate record length */
+ entries = mem[2];
+ recordlen = mem[3];
+ if (recordlen < 15) {
+ NV_ERROR(dev, "mem timing table length unknown: %d\n", mem[3]);
+ return;
}
- t->mr[0] = (boot->mr[0] & 0xe0b) |
- /* CAS */
- ((nv_mem_cl_lut_gddr3[e->tCL] & 0x7) << 4) |
- ((nv_mem_cl_lut_gddr3[e->tCL] & 0x8) >> 2);
- t->mr[1] = (boot->mr[1] & 0x100f40) | t->drive_strength |
- (t->odt << 2) |
- (nv_mem_wr_lut_gddr3[e->tWR] & 0xf) << 4;
- t->mr[2] = boot->mr[2];
-
- NV_DEBUG(dev, "(%u) MR: %08x %08x %08x", t->id,
- t->mr[0], t->mr[1], t->mr[2]);
- return 0;
-}
-
-static int
-nouveau_mem_gddr5_mr(struct drm_device *dev, u32 freq,
- struct nouveau_pm_tbl_entry *e, u8 len,
- struct nouveau_pm_memtiming *boot,
- struct nouveau_pm_memtiming *t)
-{
- if (len < 15) {
- t->drive_strength = boot->drive_strength;
- t->odt = boot->odt;
- } else {
- t->drive_strength = (e->RAM_FT1 & 0x30) >> 4;
- t->odt = e->RAM_FT1 & 0x03;
- }
+ /* parse vbios entries into common format */
+ memtimings->timing =
+ kcalloc(entries, sizeof(*memtimings->timing), GFP_KERNEL);
+ if (!memtimings->timing)
+ return;
- if (e->tCL >= NV_MEM_CL_GDDR5_MAX) {
- NV_WARN(dev, "(%u) Invalid tCL: %u", t->id, e->tCL);
- return -ERANGE;
+ /* Get "some number" from the timing reg for NV_40 and NV_50
+ * Used in calculations later */
+ if (dev_priv->card_type >= NV_40 && dev_priv->chipset < 0x98) {
+ magic_number = (nv_rd32(dev, 0x100228) & 0x0f000000) >> 24;
}
- if (e->tWR >= NV_MEM_WR_GDDR5_MAX) {
- NV_WARN(dev, "(%u) Invalid tWR: %u", t->id, e->tWR);
- return -ERANGE;
- }
+ entry = mem + mem[1];
+ for (i = 0; i < entries; i++, entry += recordlen) {
+ struct nouveau_pm_memtiming *timing = &pm->memtimings.timing[i];
+ if (entry[0] == 0)
+ continue;
- if (t->odt > 3) {
- NV_WARN(dev, "(%u) Invalid odt value, assuming autocal: %x",
- t->id, t->odt);
- t->odt = 0;
- }
+ tUNK_18 = 1;
+ tUNK_19 = 1;
+ tUNK_20 = 0;
+ tUNK_21 = 0;
+ switch (min(recordlen, 22)) {
+ case 22:
+ tUNK_21 = entry[21];
+ case 21:
+ tUNK_20 = entry[20];
+ case 20:
+ tUNK_19 = entry[19];
+ case 19:
+ tUNK_18 = entry[18];
+ default:
+ tUNK_0 = entry[0];
+ tUNK_1 = entry[1];
+ tUNK_2 = entry[2];
+ tRP = entry[3];
+ tRAS = entry[5];
+ tRFC = entry[7];
+ tRC = entry[9];
+ tUNK_10 = entry[10];
+ tUNK_11 = entry[11];
+ tUNK_12 = entry[12];
+ tUNK_13 = entry[13];
+ tUNK_14 = entry[14];
+ break;
+ }
- t->mr[0] = (boot->mr[0] & 0x007) |
- ((e->tCL - 5) << 3) |
- ((e->tWR - 4) << 8);
- t->mr[1] = (boot->mr[1] & 0x1007f0) |
- t->drive_strength |
- (t->odt << 2);
+ timing->reg_100220 = (tRC << 24 | tRFC << 16 | tRAS << 8 | tRP);
- NV_DEBUG(dev, "(%u) MR: %08x %08x", t->id, t->mr[0], t->mr[1]);
- return 0;
-}
+ /* XXX: I don't trust the -1's and +1's... they must come
+ * from somewhere! */
+ timing->reg_100224 = (tUNK_0 + tUNK_19 + 1 + magic_number) << 24 |
+ max(tUNK_18, (u8) 1) << 16 |
+ (tUNK_1 + tUNK_19 + 1 + magic_number) << 8;
+ if (dev_priv->chipset == 0xa8) {
+ timing->reg_100224 |= (tUNK_2 - 1);
+ } else {
+ timing->reg_100224 |= (tUNK_2 + 2 - magic_number);
+ }
-int
-nouveau_mem_timing_calc(struct drm_device *dev, u32 freq,
- struct nouveau_pm_memtiming *t)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
- struct nouveau_pm_memtiming *boot = &pm->boot.timing;
- struct nouveau_pm_tbl_entry *e;
- u8 ver, len, *ptr, *ramcfg;
- int ret;
+ timing->reg_100228 = (tUNK_12 << 16 | tUNK_11 << 8 | tUNK_10);
+ if (dev_priv->chipset >= 0xa3 && dev_priv->chipset < 0xaa)
+ timing->reg_100228 |= (tUNK_19 - 1) << 24;
+ else
+ timing->reg_100228 |= magic_number << 24;
+
+ if (dev_priv->card_type == NV_40) {
+ /* NV40: don't know what the rest of the regs are..
+ * And don't need to know either */
+ timing->reg_100228 |= 0x20200000;
+ } else if (dev_priv->card_type >= NV_50) {
+ if (dev_priv->chipset < 0x98 ||
+ (dev_priv->chipset == 0x98 &&
+ dev_priv->stepping <= 0xa1)) {
+ timing->reg_10022c = (0x14 + tUNK_2) << 24 |
+ 0x16 << 16 |
+ (tUNK_2 - 1) << 8 |
+ (tUNK_2 - 1);
+ } else {
+ /* XXX: reg_10022c for recentish cards */
+ timing->reg_10022c = tUNK_2 - 1;
+ }
- ptr = nouveau_perf_timing(dev, freq, &ver, &len);
- if (!ptr || ptr[0] == 0x00) {
- *t = *boot;
- return 0;
- }
- e = (struct nouveau_pm_tbl_entry *)ptr;
-
- t->tCWL = boot->tCWL;
-
- switch (dev_priv->card_type) {
- case NV_40:
- ret = nv40_mem_timing_calc(dev, freq, e, len, boot, t);
- break;
- case NV_50:
- ret = nv50_mem_timing_calc(dev, freq, e, len, boot, t);
- break;
- case NV_C0:
- ret = nvc0_mem_timing_calc(dev, freq, e, len, boot, t);
- break;
- default:
- ret = -ENODEV;
- break;
- }
+ timing->reg_100230 = (tUNK_20 << 24 | tUNK_21 << 16 |
+ tUNK_13 << 8 | tUNK_13);
- switch (dev_priv->vram_type * !ret) {
- case NV_MEM_TYPE_GDDR3:
- ret = nouveau_mem_gddr3_mr(dev, freq, e, len, boot, t);
- break;
- case NV_MEM_TYPE_GDDR5:
- ret = nouveau_mem_gddr5_mr(dev, freq, e, len, boot, t);
- break;
- case NV_MEM_TYPE_DDR2:
- ret = nouveau_mem_ddr2_mr(dev, freq, e, len, boot, t);
- break;
- case NV_MEM_TYPE_DDR3:
- ret = nouveau_mem_ddr3_mr(dev, freq, e, len, boot, t);
- break;
- default:
- ret = -EINVAL;
- break;
- }
+ timing->reg_100234 = (tRAS << 24 | tRC);
+ timing->reg_100234 += max(tUNK_10, tUNK_11) << 16;
- ramcfg = nouveau_perf_ramcfg(dev, freq, &ver, &len);
- if (ramcfg) {
- int dll_off;
+ if (dev_priv->chipset < 0x98 ||
+ (dev_priv->chipset == 0x98 &&
+ dev_priv->stepping <= 0xa1)) {
+ timing->reg_100234 |= (tUNK_2 + 2) << 8;
+ } else {
+ /* XXX: +6? */
+ timing->reg_100234 |= (tUNK_19 + 6) << 8;
+ }
- if (ver == 0x00)
- dll_off = !!(ramcfg[3] & 0x04);
- else
- dll_off = !!(ramcfg[2] & 0x40);
+ /* XXX; reg_100238
+ * reg_100238: 0x00?????? */
+ timing->reg_10023c = 0x202;
+ if (dev_priv->chipset < 0x98 ||
+ (dev_priv->chipset == 0x98 &&
+ dev_priv->stepping <= 0xa1)) {
+ timing->reg_10023c |= 0x4000000 | (tUNK_2 - 1) << 16;
+ } else {
+ /* XXX: reg_10023c
+ * currently unknown
+ * 10023c seen as 06xxxxxx, 0bxxxxxx or 0fxxxxxx */
+ }
- switch (dev_priv->vram_type) {
- case NV_MEM_TYPE_GDDR3:
- t->mr[1] &= ~0x00000040;
- t->mr[1] |= 0x00000040 * dll_off;
- break;
- default:
- t->mr[1] &= ~0x00000001;
- t->mr[1] |= 0x00000001 * dll_off;
- break;
+ /* XXX: reg_100240? */
}
+ timing->id = i;
+
+ NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", i,
+ timing->reg_100220, timing->reg_100224,
+ timing->reg_100228, timing->reg_10022c);
+ NV_DEBUG(dev, " 230: %08x %08x %08x %08x\n",
+ timing->reg_100230, timing->reg_100234,
+ timing->reg_100238, timing->reg_10023c);
+ NV_DEBUG(dev, " 240: %08x\n", timing->reg_100240);
}
- return ret;
+ memtimings->nr_timing = entries;
+ memtimings->supported = (dev_priv->chipset <= 0x98);
}
void
-nouveau_mem_timing_read(struct drm_device *dev, struct nouveau_pm_memtiming *t)
+nouveau_mem_timing_fini(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- u32 timing_base, timing_regs, mr_base;
- int i;
-
- if (dev_priv->card_type >= 0xC0) {
- timing_base = 0x10f290;
- mr_base = 0x10f300;
- } else {
- timing_base = 0x100220;
- mr_base = 0x1002c0;
- }
+ struct nouveau_pm_memtimings *mem = &dev_priv->engine.pm.memtimings;
- t->id = -1;
-
- switch (dev_priv->card_type) {
- case NV_50:
- timing_regs = 9;
- break;
- case NV_C0:
- case NV_D0:
- timing_regs = 5;
- break;
- case NV_30:
- case NV_40:
- timing_regs = 3;
- break;
- default:
- timing_regs = 0;
- return;
- }
- for(i = 0; i < timing_regs; i++)
- t->reg[i] = nv_rd32(dev, timing_base + (0x04 * i));
-
- t->tCWL = 0;
- if (dev_priv->card_type < NV_C0) {
- t->tCWL = ((nv_rd32(dev, 0x100228) & 0x0f000000) >> 24) + 1;
- } else if (dev_priv->card_type <= NV_D0) {
- t->tCWL = ((nv_rd32(dev, 0x10f294) & 0x00000f80) >> 7);
- }
-
- t->mr[0] = nv_rd32(dev, mr_base);
- t->mr[1] = nv_rd32(dev, mr_base + 0x04);
- t->mr[2] = nv_rd32(dev, mr_base + 0x20);
- t->mr[3] = nv_rd32(dev, mr_base + 0x24);
-
- t->odt = 0;
- t->drive_strength = 0;
-
- switch (dev_priv->vram_type) {
- case NV_MEM_TYPE_DDR3:
- t->odt |= (t->mr[1] & 0x200) >> 7;
- case NV_MEM_TYPE_DDR2:
- t->odt |= (t->mr[1] & 0x04) >> 2 |
- (t->mr[1] & 0x40) >> 5;
- break;
- case NV_MEM_TYPE_GDDR3:
- case NV_MEM_TYPE_GDDR5:
- t->drive_strength = t->mr[1] & 0x03;
- t->odt = (t->mr[1] & 0x0c) >> 2;
- break;
- default:
- break;
- }
+ kfree(mem->timing);
}
-int
-nouveau_mem_exec(struct nouveau_mem_exec_func *exec,
- struct nouveau_pm_level *perflvl)
+static int
+nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long p_size)
{
- struct drm_nouveau_private *dev_priv = exec->dev->dev_private;
- struct nouveau_pm_memtiming *info = &perflvl->timing;
- u32 tMRD = 1000, tCKSRE = 0, tCKSRX = 0, tXS = 0, tDLLK = 0;
- u32 mr[3] = { info->mr[0], info->mr[1], info->mr[2] };
- u32 mr1_dlloff;
-
- switch (dev_priv->vram_type) {
- case NV_MEM_TYPE_DDR2:
- tDLLK = 2000;
- mr1_dlloff = 0x00000001;
- break;
- case NV_MEM_TYPE_DDR3:
- tDLLK = 12000;
- mr1_dlloff = 0x00000001;
- break;
- case NV_MEM_TYPE_GDDR3:
- tDLLK = 40000;
- mr1_dlloff = 0x00000040;
- break;
- default:
- NV_ERROR(exec->dev, "cannot reclock unsupported memtype\n");
- return -ENODEV;
- }
-
- /* fetch current MRs */
- switch (dev_priv->vram_type) {
- case NV_MEM_TYPE_GDDR3:
- case NV_MEM_TYPE_DDR3:
- mr[2] = exec->mrg(exec, 2);
- default:
- mr[1] = exec->mrg(exec, 1);
- mr[0] = exec->mrg(exec, 0);
- break;
- }
-
- /* DLL 'on' -> DLL 'off' mode, disable before entering self-refresh */
- if (!(mr[1] & mr1_dlloff) && (info->mr[1] & mr1_dlloff)) {
- exec->precharge(exec);
- exec->mrs (exec, 1, mr[1] | mr1_dlloff);
- exec->wait(exec, tMRD);
- }
-
- /* enter self-refresh mode */
- exec->precharge(exec);
- exec->refresh(exec);
- exec->refresh(exec);
- exec->refresh_auto(exec, false);
- exec->refresh_self(exec, true);
- exec->wait(exec, tCKSRE);
-
- /* modify input clock frequency */
- exec->clock_set(exec);
-
- /* exit self-refresh mode */
- exec->wait(exec, tCKSRX);
- exec->precharge(exec);
- exec->refresh_self(exec, false);
- exec->refresh_auto(exec, true);
- exec->wait(exec, tXS);
-
- /* update MRs */
- if (mr[2] != info->mr[2]) {
- exec->mrs (exec, 2, info->mr[2]);
- exec->wait(exec, tMRD);
- }
-
- if (mr[1] != info->mr[1]) {
- /* need to keep DLL off until later, at least on GDDR3 */
- exec->mrs (exec, 1, info->mr[1] | (mr[1] & mr1_dlloff));
- exec->wait(exec, tMRD);
- }
-
- if (mr[0] != info->mr[0]) {
- exec->mrs (exec, 0, info->mr[0]);
- exec->wait(exec, tMRD);
- }
-
- /* update PFB timing registers */
- exec->timing_set(exec);
-
- /* DLL (enable + ) reset */
- if (!(info->mr[1] & mr1_dlloff)) {
- if (mr[1] & mr1_dlloff) {
- exec->mrs (exec, 1, info->mr[1]);
- exec->wait(exec, tMRD);
- }
- exec->mrs (exec, 0, info->mr[0] | 0x00000100);
- exec->wait(exec, tMRD);
- exec->mrs (exec, 0, info->mr[0] | 0x00000000);
- exec->wait(exec, tMRD);
- exec->wait(exec, tDLLK);
- if (dev_priv->vram_type == NV_MEM_TYPE_GDDR3)
- exec->precharge(exec);
- }
-
- return 0;
-}
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
+ struct nouveau_mm *mm;
+ u64 size, block, rsvd;
+ int ret;
-int
-nouveau_mem_vbios_type(struct drm_device *dev)
-{
- struct bit_entry M;
- u8 ramcfg = (nv_rd32(dev, 0x101000) & 0x0000003c) >> 2;
- if (!bit_table(dev, 'M', &M) || M.version != 2 || M.length < 5) {
- u8 *table = ROMPTR(dev, M.data[3]);
- if (table && table[0] == 0x10 && ramcfg < table[3]) {
- u8 *entry = table + table[1] + (ramcfg * table[2]);
- switch (entry[0] & 0x0f) {
- case 0: return NV_MEM_TYPE_DDR2;
- case 1: return NV_MEM_TYPE_DDR3;
- case 2: return NV_MEM_TYPE_GDDR3;
- case 3: return NV_MEM_TYPE_GDDR5;
- default:
- break;
- }
+ rsvd = (256 * 1024); /* vga memory */
+ size = (p_size << PAGE_SHIFT) - rsvd;
+ block = dev_priv->vram_rblock_size;
- }
- }
- return NV_MEM_TYPE_UNKNOWN;
-}
+ ret = nouveau_mm_init(&mm, rsvd >> 12, size >> 12, block >> 12);
+ if (ret)
+ return ret;
-static int
-nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
-{
- /* nothing to do */
+ man->priv = mm;
return 0;
}
static int
nouveau_vram_manager_fini(struct ttm_mem_type_manager *man)
{
- /* nothing to do */
- return 0;
-}
+ struct nouveau_mm *mm = man->priv;
+ int ret;
-static inline void
-nouveau_mem_node_cleanup(struct nouveau_mem *node)
-{
- if (node->vma[0].node) {
- nouveau_vm_unmap(&node->vma[0]);
- nouveau_vm_put(&node->vma[0]);
- }
+ ret = nouveau_mm_fini(&mm);
+ if (ret)
+ return ret;
- if (node->vma[1].node) {
- nouveau_vm_unmap(&node->vma[1]);
- nouveau_vm_put(&node->vma[1]);
- }
+ man->priv = NULL;
+ return 0;
}
static void
@@ -1119,9 +768,14 @@ nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
+ struct nouveau_mem *node = mem->mm_node;
struct drm_device *dev = dev_priv->dev;
- nouveau_mem_node_cleanup(mem->mm_node);
+ if (node->tmp_vma.node) {
+ nouveau_vm_unmap(&node->tmp_vma);
+ nouveau_vm_put(&node->tmp_vma);
+ }
+
vram->put(dev, (struct nouveau_mem **)&mem->mm_node);
}
@@ -1140,7 +794,7 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
int ret;
if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG)
- size_nc = 1 << nvbo->page_shift;
+ size_nc = 1 << nvbo->vma.node->type;
ret = vram->get(dev, mem->num_pages << PAGE_SHIFT,
mem->page_alignment << PAGE_SHIFT, size_nc,
@@ -1150,7 +804,9 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
return (ret == -ENOSPC) ? 0 : ret;
}
- node->page_shift = nvbo->page_shift;
+ node->page_shift = 12;
+ if (nvbo->vma.node)
+ node->page_shift = nvbo->vma.node->type;
mem->mm_node = node;
mem->start = node->offset >> PAGE_SHIFT;
@@ -1206,9 +862,15 @@ static void
nouveau_gart_manager_del(struct ttm_mem_type_manager *man,
struct ttm_mem_reg *mem)
{
- nouveau_mem_node_cleanup(mem->mm_node);
- kfree(mem->mm_node);
+ struct nouveau_mem *node = mem->mm_node;
+
+ if (node->tmp_vma.node) {
+ nouveau_vm_unmap(&node->tmp_vma);
+ nouveau_vm_put(&node->tmp_vma);
+ }
+
mem->mm_node = NULL;
+ kfree(node);
}
static int
@@ -1218,7 +880,11 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
struct ttm_mem_reg *mem)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
+ struct nouveau_bo *nvbo = nouveau_bo(bo);
+ struct nouveau_vma *vma = &nvbo->vma;
+ struct nouveau_vm *vm = vma->vm;
struct nouveau_mem *node;
+ int ret;
if (unlikely((mem->num_pages << PAGE_SHIFT) >=
dev_priv->gart_info.aper_size))
@@ -1227,8 +893,24 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
node = kzalloc(sizeof(*node), GFP_KERNEL);
if (!node)
return -ENOMEM;
- node->page_shift = 12;
+ /* This node must be for evicting large-paged VRAM
+ * to system memory. Due to a nv50 limitation of
+ * not being able to mix large/small pages within
+ * the same PDE, we need to create a temporary
+ * small-paged VMA for the eviction.
+ */
+ if (vma->node->type != vm->spg_shift) {
+ ret = nouveau_vm_get(vm, (u64)vma->node->length << 12,
+ vm->spg_shift, NV_MEM_ACCESS_RW,
+ &node->tmp_vma);
+ if (ret) {
+ kfree(node);
+ return ret;
+ }
+ }
+
+ node->page_shift = nvbo->vma.node->type;
mem->mm_node = node;
mem->start = 0;
return 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.c b/drivers/gpu/drm/nouveau/nouveau_mm.c
index b29ffb3..7609756 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mm.c
@@ -27,7 +27,7 @@
#include "nouveau_mm.h"
static inline void
-region_put(struct nouveau_mm *mm, struct nouveau_mm_node *a)
+region_put(struct nouveau_mm *rmm, struct nouveau_mm_node *a)
{
list_del(&a->nl_entry);
list_del(&a->fl_entry);
@@ -35,7 +35,7 @@ region_put(struct nouveau_mm *mm, struct nouveau_mm_node *a)
}
static struct nouveau_mm_node *
-region_split(struct nouveau_mm *mm, struct nouveau_mm_node *a, u32 size)
+region_split(struct nouveau_mm *rmm, struct nouveau_mm_node *a, u32 size)
{
struct nouveau_mm_node *b;
@@ -57,33 +57,33 @@ region_split(struct nouveau_mm *mm, struct nouveau_mm_node *a, u32 size)
return b;
}
-#define node(root, dir) ((root)->nl_entry.dir == &mm->nodes) ? NULL : \
+#define node(root, dir) ((root)->nl_entry.dir == &rmm->nodes) ? NULL : \
list_entry((root)->nl_entry.dir, struct nouveau_mm_node, nl_entry)
void
-nouveau_mm_put(struct nouveau_mm *mm, struct nouveau_mm_node *this)
+nouveau_mm_put(struct nouveau_mm *rmm, struct nouveau_mm_node *this)
{
struct nouveau_mm_node *prev = node(this, prev);
struct nouveau_mm_node *next = node(this, next);
- list_add(&this->fl_entry, &mm->free);
+ list_add(&this->fl_entry, &rmm->free);
this->type = 0;
if (prev && prev->type == 0) {
prev->length += this->length;
- region_put(mm, this);
+ region_put(rmm, this);
this = prev;
}
if (next && next->type == 0) {
next->offset = this->offset;
next->length += this->length;
- region_put(mm, this);
+ region_put(rmm, this);
}
}
int
-nouveau_mm_get(struct nouveau_mm *mm, int type, u32 size, u32 size_nc,
+nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc,
u32 align, struct nouveau_mm_node **pnode)
{
struct nouveau_mm_node *prev, *this, *next;
@@ -92,17 +92,17 @@ nouveau_mm_get(struct nouveau_mm *mm, int type, u32 size, u32 size_nc,
u32 splitoff;
u32 s, e;
- list_for_each_entry(this, &mm->free, fl_entry) {
+ list_for_each_entry(this, &rmm->free, fl_entry) {
e = this->offset + this->length;
s = this->offset;
prev = node(this, prev);
if (prev && prev->type != type)
- s = roundup(s, mm->block_size);
+ s = roundup(s, rmm->block_size);
next = node(this, next);
if (next && next->type != type)
- e = rounddown(e, mm->block_size);
+ e = rounddown(e, rmm->block_size);
s = (s + align_mask) & ~align_mask;
e &= ~align_mask;
@@ -110,10 +110,10 @@ nouveau_mm_get(struct nouveau_mm *mm, int type, u32 size, u32 size_nc,
continue;
splitoff = s - this->offset;
- if (splitoff && !region_split(mm, this, splitoff))
+ if (splitoff && !region_split(rmm, this, splitoff))
return -ENOMEM;
- this = region_split(mm, this, min(size, e - s));
+ this = region_split(rmm, this, min(size, e - s));
if (!this)
return -ENOMEM;
@@ -127,49 +127,45 @@ nouveau_mm_get(struct nouveau_mm *mm, int type, u32 size, u32 size_nc,
}
int
-nouveau_mm_init(struct nouveau_mm *mm, u32 offset, u32 length, u32 block)
+nouveau_mm_init(struct nouveau_mm **prmm, u32 offset, u32 length, u32 block)
{
- struct nouveau_mm_node *node;
-
- if (block) {
- mutex_init(&mm->mutex);
- INIT_LIST_HEAD(&mm->nodes);
- INIT_LIST_HEAD(&mm->free);
- mm->block_size = block;
- mm->heap_nodes = 0;
- }
+ struct nouveau_mm *rmm;
+ struct nouveau_mm_node *heap;
- node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (!node)
+ heap = kzalloc(sizeof(*heap), GFP_KERNEL);
+ if (!heap)
return -ENOMEM;
- node->offset = roundup(offset, mm->block_size);
- node->length = rounddown(offset + length, mm->block_size) - node->offset;
+ heap->offset = roundup(offset, block);
+ heap->length = rounddown(offset + length, block) - heap->offset;
- list_add_tail(&node->nl_entry, &mm->nodes);
- list_add_tail(&node->fl_entry, &mm->free);
- mm->heap_nodes++;
+ rmm = kzalloc(sizeof(*rmm), GFP_KERNEL);
+ if (!rmm) {
+ kfree(heap);
+ return -ENOMEM;
+ }
+ rmm->block_size = block;
+ mutex_init(&rmm->mutex);
+ INIT_LIST_HEAD(&rmm->nodes);
+ INIT_LIST_HEAD(&rmm->free);
+ list_add(&heap->nl_entry, &rmm->nodes);
+ list_add(&heap->fl_entry, &rmm->free);
+
+ *prmm = rmm;
return 0;
}
int
-nouveau_mm_fini(struct nouveau_mm *mm)
+nouveau_mm_fini(struct nouveau_mm **prmm)
{
- struct nouveau_mm_node *node, *heap =
- list_first_entry(&mm->nodes, struct nouveau_mm_node, nl_entry);
- int nodes = 0;
-
- list_for_each_entry(node, &mm->nodes, nl_entry) {
- if (nodes++ == mm->heap_nodes) {
- printk(KERN_ERR "nouveau_mm in use at destroy time!\n");
- list_for_each_entry(node, &mm->nodes, nl_entry) {
- printk(KERN_ERR "0x%02x: 0x%08x 0x%08x\n",
- node->type, node->offset, node->length);
- }
- WARN_ON(1);
- return -EBUSY;
- }
- }
+ struct nouveau_mm *rmm = *prmm;
+ struct nouveau_mm_node *heap =
+ list_first_entry(&rmm->nodes, struct nouveau_mm_node, nl_entry);
+
+ if (!list_is_singular(&rmm->nodes))
+ return -EBUSY;
kfree(heap);
+ kfree(rmm);
+ *prmm = NULL;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.h b/drivers/gpu/drm/nouveau/nouveau_mm.h
index 57a600c..1f7483a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_mm.h
@@ -42,18 +42,16 @@ struct nouveau_mm {
struct mutex mutex;
u32 block_size;
- int heap_nodes;
};
-int nouveau_mm_init(struct nouveau_mm *, u32 offset, u32 length, u32 block);
-int nouveau_mm_fini(struct nouveau_mm *);
+int nouveau_mm_init(struct nouveau_mm **, u32 offset, u32 length, u32 block);
+int nouveau_mm_fini(struct nouveau_mm **);
int nouveau_mm_pre(struct nouveau_mm *);
int nouveau_mm_get(struct nouveau_mm *, int type, u32 size, u32 size_nc,
u32 align, struct nouveau_mm_node **);
void nouveau_mm_put(struct nouveau_mm *, struct nouveau_mm_node *);
int nv50_vram_init(struct drm_device *);
-void nv50_vram_fini(struct drm_device *);
int nv50_vram_new(struct drm_device *, u64 size, u32 align, u32 size_nc,
u32 memtype, struct nouveau_mem **);
void nv50_vram_del(struct drm_device *, struct nouveau_mem **);
diff --git a/drivers/gpu/drm/nouveau/nouveau_mxm.c b/drivers/gpu/drm/nouveau/nouveau_mxm.c
deleted file mode 100644
index 07d0d1e..0000000
--- a/drivers/gpu/drm/nouveau/nouveau_mxm.c
+++ /dev/null
@@ -1,723 +0,0 @@
-/*
- * Copyright 2011 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <linux/acpi.h>
-
-#include "drmP.h"
-#include "nouveau_drv.h"
-
-#define MXM_DBG(dev, fmt, args...) NV_DEBUG((dev), "MXM: " fmt, ##args)
-#define MXM_MSG(dev, fmt, args...) NV_INFO((dev), "MXM: " fmt, ##args)
-
-static u8 *
-mxms_data(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- return dev_priv->mxms;
-
-}
-
-static u16
-mxms_version(struct drm_device *dev)
-{
- u8 *mxms = mxms_data(dev);
- u16 version = (mxms[4] << 8) | mxms[5];
- switch (version ) {
- case 0x0200:
- case 0x0201:
- case 0x0300:
- return version;
- default:
- break;
- }
-
- MXM_DBG(dev, "unknown version %d.%d\n", mxms[4], mxms[5]);
- return 0x0000;
-}
-
-static u16
-mxms_headerlen(struct drm_device *dev)
-{
- return 8;
-}
-
-static u16
-mxms_structlen(struct drm_device *dev)
-{
- return *(u16 *)&mxms_data(dev)[6];
-}
-
-static bool
-mxms_checksum(struct drm_device *dev)
-{
- u16 size = mxms_headerlen(dev) + mxms_structlen(dev);
- u8 *mxms = mxms_data(dev), sum = 0;
- while (size--)
- sum += *mxms++;
- if (sum) {
- MXM_DBG(dev, "checksum invalid\n");
- return false;
- }
- return true;
-}
-
-static bool
-mxms_valid(struct drm_device *dev)
-{
- u8 *mxms = mxms_data(dev);
- if (*(u32 *)mxms != 0x5f4d584d) {
- MXM_DBG(dev, "signature invalid\n");
- return false;
- }
-
- if (!mxms_version(dev) || !mxms_checksum(dev))
- return false;
-
- return true;
-}
-
-static bool
-mxms_foreach(struct drm_device *dev, u8 types,
- bool (*exec)(struct drm_device *, u8 *, void *), void *info)
-{
- u8 *mxms = mxms_data(dev);
- u8 *desc = mxms + mxms_headerlen(dev);
- u8 *fini = desc + mxms_structlen(dev) - 1;
- while (desc < fini) {
- u8 type = desc[0] & 0x0f;
- u8 headerlen = 0;
- u8 recordlen = 0;
- u8 entries = 0;
-
- switch (type) {
- case 0: /* Output Device Structure */
- if (mxms_version(dev) >= 0x0300)
- headerlen = 8;
- else
- headerlen = 6;
- break;
- case 1: /* System Cooling Capability Structure */
- case 2: /* Thermal Structure */
- case 3: /* Input Power Structure */
- headerlen = 4;
- break;
- case 4: /* GPIO Device Structure */
- headerlen = 4;
- recordlen = 2;
- entries = (ROM32(desc[0]) & 0x01f00000) >> 20;
- break;
- case 5: /* Vendor Specific Structure */
- headerlen = 8;
- break;
- case 6: /* Backlight Control Structure */
- if (mxms_version(dev) >= 0x0300) {
- headerlen = 4;
- recordlen = 8;
- entries = (desc[1] & 0xf0) >> 4;
- } else {
- headerlen = 8;
- }
- break;
- case 7: /* Fan Control Structure */
- headerlen = 8;
- recordlen = 4;
- entries = desc[1] & 0x07;
- break;
- default:
- MXM_DBG(dev, "unknown descriptor type %d\n", type);
- return false;
- }
-
- if ((drm_debug & DRM_UT_DRIVER) && (exec == NULL)) {
- static const char * mxms_desc_name[] = {
- "ODS", "SCCS", "TS", "IPS",
- "GSD", "VSS", "BCS", "FCS",
- };
- u8 *dump = desc;
- int i, j;
-
- MXM_DBG(dev, "%4s: ", mxms_desc_name[type]);
- for (j = headerlen - 1; j >= 0; j--)
- printk("%02x", dump[j]);
- printk("\n");
- dump += headerlen;
-
- for (i = 0; i < entries; i++, dump += recordlen) {
- MXM_DBG(dev, " ");
- for (j = recordlen - 1; j >= 0; j--)
- printk("%02x", dump[j]);
- printk("\n");
- }
- }
-
- if (types & (1 << type)) {
- if (!exec(dev, desc, info))
- return false;
- }
-
- desc += headerlen + (entries * recordlen);
- }
-
- return true;
-}
-
-static u8 *
-mxm_table(struct drm_device *dev, u8 *size)
-{
- struct bit_entry x;
-
- if (bit_table(dev, 'x', &x)) {
- MXM_DBG(dev, "BIT 'x' table not present\n");
- return NULL;
- }
-
- if (x.version != 1 || x.length < 3) {
- MXM_MSG(dev, "BIT x table %d/%d unknown\n",
- x.version, x.length);
- return NULL;
- }
-
- *size = x.length;
- return x.data;
-}
-
-/* These map MXM v2.x digital connection values to the appropriate SOR/link,
- * hopefully they're correct for all boards within the same chipset...
- *
- * MXM v3.x VBIOS are nicer and provide pointers to these tables.
- */
-static u8 nv84_sor_map[16] = {
- 0x00, 0x12, 0x22, 0x11, 0x32, 0x31, 0x11, 0x31,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
-};
-
-static u8 nv92_sor_map[16] = {
- 0x00, 0x12, 0x22, 0x11, 0x32, 0x31, 0x11, 0x31,
- 0x11, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
-};
-
-static u8 nv94_sor_map[16] = {
- 0x00, 0x14, 0x24, 0x11, 0x34, 0x31, 0x11, 0x31,
- 0x11, 0x31, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00
-};
-
-static u8 nv96_sor_map[16] = {
- 0x00, 0x14, 0x24, 0x00, 0x34, 0x00, 0x11, 0x31,
- 0x11, 0x31, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00
-};
-
-static u8 nv98_sor_map[16] = {
- 0x00, 0x14, 0x12, 0x11, 0x00, 0x31, 0x11, 0x31,
- 0x11, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
-};
-
-static u8
-mxm_sor_map(struct drm_device *dev, u8 conn)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- u8 len, *mxm = mxm_table(dev, &len);
- if (mxm && len >= 6) {
- u8 *map = ROMPTR(dev, mxm[4]);
- if (map) {
- if (map[0] == 0x10) {
- if (conn < map[3])
- return map[map[1] + conn];
- return 0x00;
- }
-
- MXM_MSG(dev, "unknown sor map 0x%02x\n", map[0]);
- }
- }
-
- if (dev_priv->chipset == 0x84 || dev_priv->chipset == 0x86)
- return nv84_sor_map[conn];
- if (dev_priv->chipset == 0x92)
- return nv92_sor_map[conn];
- if (dev_priv->chipset == 0x94)
- return nv94_sor_map[conn];
- if (dev_priv->chipset == 0x96)
- return nv96_sor_map[conn];
- if (dev_priv->chipset == 0x98)
- return nv98_sor_map[conn];
-
- MXM_MSG(dev, "missing sor map\n");
- return 0x00;
-}
-
-static u8
-mxm_ddc_map(struct drm_device *dev, u8 port)
-{
- u8 len, *mxm = mxm_table(dev, &len);
- if (mxm && len >= 8) {
- u8 *map = ROMPTR(dev, mxm[6]);
- if (map) {
- if (map[0] == 0x10) {
- if (port < map[3])
- return map[map[1] + port];
- return 0x00;
- }
-
- MXM_MSG(dev, "unknown ddc map 0x%02x\n", map[0]);
- }
- }
-
- /* v2.x: directly write port as dcb i2cidx */
- return (port << 4) | port;
-}
-
-struct mxms_odev {
- u8 outp_type;
- u8 conn_type;
- u8 ddc_port;
- u8 dig_conn;
-};
-
-static void
-mxms_output_device(struct drm_device *dev, u8 *pdata, struct mxms_odev *desc)
-{
- u64 data = ROM32(pdata[0]);
- if (mxms_version(dev) >= 0x0300)
- data |= (u64)ROM16(pdata[4]) << 32;
-
- desc->outp_type = (data & 0x00000000000000f0ULL) >> 4;
- desc->ddc_port = (data & 0x0000000000000f00ULL) >> 8;
- desc->conn_type = (data & 0x000000000001f000ULL) >> 12;
- desc->dig_conn = (data & 0x0000000000780000ULL) >> 19;
-}
-
-struct context {
- u32 *outp;
- struct mxms_odev desc;
-};
-
-static bool
-mxm_match_tmds_partner(struct drm_device *dev, u8 *data, void *info)
-{
- struct context *ctx = info;
- struct mxms_odev desc;
-
- mxms_output_device(dev, data, &desc);
- if (desc.outp_type == 2 &&
- desc.dig_conn == ctx->desc.dig_conn)
- return false;
- return true;
-}
-
-static bool
-mxm_match_dcb(struct drm_device *dev, u8 *data, void *info)
-{
- struct context *ctx = info;
- u64 desc = *(u64 *)data;
-
- mxms_output_device(dev, data, &ctx->desc);
-
- /* match dcb encoder type to mxm-ods device type */
- if ((ctx->outp[0] & 0x0000000f) != ctx->desc.outp_type)
- return true;
-
- /* digital output, have some extra stuff to match here, there's a
- * table in the vbios that provides a mapping from the mxm digital
- * connection enum values to SOR/link
- */
- if ((desc & 0x00000000000000f0) >= 0x20) {
- /* check against sor index */
- u8 link = mxm_sor_map(dev, ctx->desc.dig_conn);
- if ((ctx->outp[0] & 0x0f000000) != (link & 0x0f) << 24)
- return true;
-
- /* check dcb entry has a compatible link field */
- link = (link & 0x30) >> 4;
- if ((link & ((ctx->outp[1] & 0x00000030) >> 4)) != link)
- return true;
- }
-
- /* mark this descriptor accounted for by setting invalid device type,
- * except of course some manufactures don't follow specs properly and
- * we need to avoid killing off the TMDS function on DP connectors
- * if MXM-SIS is missing an entry for it.
- */
- data[0] &= ~0xf0;
- if (ctx->desc.outp_type == 6 && ctx->desc.conn_type == 6 &&
- mxms_foreach(dev, 0x01, mxm_match_tmds_partner, ctx)) {
- data[0] |= 0x20; /* modify descriptor to match TMDS now */
- } else {
- data[0] |= 0xf0;
- }
-
- return false;
-}
-
-static int
-mxm_dcb_sanitise_entry(struct drm_device *dev, void *data, int idx, u8 *dcbe)
-{
- struct context ctx = { .outp = (u32 *)dcbe };
- u8 type, i2cidx, link;
- u8 *conn;
-
- /* look for an output device structure that matches this dcb entry.
- * if one isn't found, disable it.
- */
- if (mxms_foreach(dev, 0x01, mxm_match_dcb, &ctx)) {
- MXM_DBG(dev, "disable %d: 0x%08x 0x%08x\n",
- idx, ctx.outp[0], ctx.outp[1]);
- ctx.outp[0] |= 0x0000000f;
- return 0;
- }
-
- /* modify the output's ddc/aux port, there's a pointer to a table
- * with the mapping from mxm ddc/aux port to dcb i2c_index in the
- * vbios mxm table
- */
- i2cidx = mxm_ddc_map(dev, ctx.desc.ddc_port);
- if ((ctx.outp[0] & 0x0000000f) != OUTPUT_DP)
- i2cidx = (i2cidx & 0x0f) << 4;
- else
- i2cidx = (i2cidx & 0xf0);
-
- if (i2cidx != 0xf0) {
- ctx.outp[0] &= ~0x000000f0;
- ctx.outp[0] |= i2cidx;
- }
-
- /* override dcb sorconf.link, based on what mxm data says */
- switch (ctx.desc.outp_type) {
- case 0x00: /* Analog CRT */
- case 0x01: /* Analog TV/HDTV */
- break;
- default:
- link = mxm_sor_map(dev, ctx.desc.dig_conn) & 0x30;
- ctx.outp[1] &= ~0x00000030;
- ctx.outp[1] |= link;
- break;
- }
-
- /* we may need to fixup various other vbios tables based on what
- * the descriptor says the connector type should be.
- *
- * in a lot of cases, the vbios tables will claim DVI-I is possible,
- * and the mxm data says the connector is really HDMI. another
- * common example is DP->eDP.
- */
- conn = dcb_conn(dev, (ctx.outp[0] & 0x0000f000) >> 12);
- type = conn[0];
- switch (ctx.desc.conn_type) {
- case 0x01: /* LVDS */
- ctx.outp[1] |= 0x00000004; /* use_power_scripts */
- /* XXX: modify default link width in LVDS table */
- break;
- case 0x02: /* HDMI */
- type = DCB_CONNECTOR_HDMI_1;
- break;
- case 0x03: /* DVI-D */
- type = DCB_CONNECTOR_DVI_D;
- break;
- case 0x0e: /* eDP, falls through to DPint */
- ctx.outp[1] |= 0x00010000;
- case 0x07: /* DP internal, wtf is this?? HP8670w */
- ctx.outp[1] |= 0x00000004; /* use_power_scripts? */
- type = DCB_CONNECTOR_eDP;
- break;
- default:
- break;
- }
-
- if (mxms_version(dev) >= 0x0300)
- conn[0] = type;
-
- return 0;
-}
-
-static bool
-mxm_show_unmatched(struct drm_device *dev, u8 *data, void *info)
-{
- u64 desc = *(u64 *)data;
- if ((desc & 0xf0) != 0xf0)
- MXM_MSG(dev, "unmatched output device 0x%016llx\n", desc);
- return true;
-}
-
-static void
-mxm_dcb_sanitise(struct drm_device *dev)
-{
- u8 *dcb = dcb_table(dev);
- if (!dcb || dcb[0] != 0x40) {
- MXM_DBG(dev, "unsupported DCB version\n");
- return;
- }
-
- dcb_outp_foreach(dev, NULL, mxm_dcb_sanitise_entry);
- mxms_foreach(dev, 0x01, mxm_show_unmatched, NULL);
-}
-
-static bool
-mxm_shadow_rom_fetch(struct nouveau_i2c_chan *i2c, u8 addr,
- u8 offset, u8 size, u8 *data)
-{
- struct i2c_msg msgs[] = {
- { .addr = addr, .flags = 0, .len = 1, .buf = &offset },
- { .addr = addr, .flags = I2C_M_RD, .len = size, .buf = data, },
- };
-
- return i2c_transfer(&i2c->adapter, msgs, 2) == 2;
-}
-
-static bool
-mxm_shadow_rom(struct drm_device *dev, u8 version)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_i2c_chan *i2c = NULL;
- u8 i2cidx, mxms[6], addr, size;
-
- i2cidx = mxm_ddc_map(dev, 1 /* LVDS_DDC */) & 0x0f;
- if (i2cidx < 0x0f)
- i2c = nouveau_i2c_find(dev, i2cidx);
- if (!i2c)
- return false;
-
- addr = 0x54;
- if (!mxm_shadow_rom_fetch(i2c, addr, 0, 6, mxms)) {
- addr = 0x56;
- if (!mxm_shadow_rom_fetch(i2c, addr, 0, 6, mxms))
- return false;
- }
-
- dev_priv->mxms = mxms;
- size = mxms_headerlen(dev) + mxms_structlen(dev);
- dev_priv->mxms = kmalloc(size, GFP_KERNEL);
-
- if (dev_priv->mxms &&
- mxm_shadow_rom_fetch(i2c, addr, 0, size, dev_priv->mxms))
- return true;
-
- kfree(dev_priv->mxms);
- dev_priv->mxms = NULL;
- return false;
-}
-
-#if defined(CONFIG_ACPI)
-static bool
-mxm_shadow_dsm(struct drm_device *dev, u8 version)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- static char muid[] = {
- 0x00, 0xA4, 0x04, 0x40, 0x7D, 0x91, 0xF2, 0x4C,
- 0xB8, 0x9C, 0x79, 0xB6, 0x2F, 0xD5, 0x56, 0x65
- };
- u32 mxms_args[] = { 0x00000000 };
- union acpi_object args[4] = {
- /* _DSM MUID */
- { .buffer.type = 3,
- .buffer.length = sizeof(muid),
- .buffer.pointer = muid,
- },
- /* spec says this can be zero to mean "highest revision", but
- * of course there's at least one bios out there which fails
- * unless you pass in exactly the version it supports..
- */
- { .integer.type = ACPI_TYPE_INTEGER,
- .integer.value = (version & 0xf0) << 4 | (version & 0x0f),
- },
- /* MXMS function */
- { .integer.type = ACPI_TYPE_INTEGER,
- .integer.value = 0x00000010,
- },
- /* Pointer to MXMS arguments */
- { .buffer.type = ACPI_TYPE_BUFFER,
- .buffer.length = sizeof(mxms_args),
- .buffer.pointer = (char *)mxms_args,
- },
- };
- struct acpi_object_list list = { ARRAY_SIZE(args), args };
- struct acpi_buffer retn = { ACPI_ALLOCATE_BUFFER, NULL };
- union acpi_object *obj;
- acpi_handle handle;
- int ret;
-
- handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev);
- if (!handle)
- return false;
-
- ret = acpi_evaluate_object(handle, "_DSM", &list, &retn);
- if (ret) {
- MXM_DBG(dev, "DSM MXMS failed: %d\n", ret);
- return false;
- }
-
- obj = retn.pointer;
- if (obj->type == ACPI_TYPE_BUFFER) {
- dev_priv->mxms = kmemdup(obj->buffer.pointer,
- obj->buffer.length, GFP_KERNEL);
- } else
- if (obj->type == ACPI_TYPE_INTEGER) {
- MXM_DBG(dev, "DSM MXMS returned 0x%llx\n", obj->integer.value);
- }
-
- kfree(obj);
- return dev_priv->mxms != NULL;
-}
-#endif
-
-#if defined(CONFIG_ACPI_WMI) || defined(CONFIG_ACPI_WMI_MODULE)
-
-#define WMI_WMMX_GUID "F6CB5C3C-9CAE-4EBD-B577-931EA32A2CC0"
-
-static u8
-wmi_wmmx_mxmi(struct drm_device *dev, u8 version)
-{
- u32 mxmi_args[] = { 0x494D584D /* MXMI */, version, 0 };
- struct acpi_buffer args = { sizeof(mxmi_args), mxmi_args };
- struct acpi_buffer retn = { ACPI_ALLOCATE_BUFFER, NULL };
- union acpi_object *obj;
- acpi_status status;
-
- status = wmi_evaluate_method(WMI_WMMX_GUID, 0, 0, &args, &retn);
- if (ACPI_FAILURE(status)) {
- MXM_DBG(dev, "WMMX MXMI returned %d\n", status);
- return 0x00;
- }
-
- obj = retn.pointer;
- if (obj->type == ACPI_TYPE_INTEGER) {
- version = obj->integer.value;
- MXM_DBG(dev, "WMMX MXMI version %d.%d\n",
- (version >> 4), version & 0x0f);
- } else {
- version = 0;
- MXM_DBG(dev, "WMMX MXMI returned non-integer\n");
- }
-
- kfree(obj);
- return version;
-}
-
-static bool
-mxm_shadow_wmi(struct drm_device *dev, u8 version)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- u32 mxms_args[] = { 0x534D584D /* MXMS */, version, 0 };
- struct acpi_buffer args = { sizeof(mxms_args), mxms_args };
- struct acpi_buffer retn = { ACPI_ALLOCATE_BUFFER, NULL };
- union acpi_object *obj;
- acpi_status status;
-
- if (!wmi_has_guid(WMI_WMMX_GUID)) {
- MXM_DBG(dev, "WMMX GUID not found\n");
- return false;
- }
-
- mxms_args[1] = wmi_wmmx_mxmi(dev, 0x00);
- if (!mxms_args[1])
- mxms_args[1] = wmi_wmmx_mxmi(dev, version);
- if (!mxms_args[1])
- return false;
-
- status = wmi_evaluate_method(WMI_WMMX_GUID, 0, 0, &args, &retn);
- if (ACPI_FAILURE(status)) {
- MXM_DBG(dev, "WMMX MXMS returned %d\n", status);
- return false;
- }
-
- obj = retn.pointer;
- if (obj->type == ACPI_TYPE_BUFFER) {
- dev_priv->mxms = kmemdup(obj->buffer.pointer,
- obj->buffer.length, GFP_KERNEL);
- }
-
- kfree(obj);
- return dev_priv->mxms != NULL;
-}
-#endif
-
-struct mxm_shadow_h {
- const char *name;
- bool (*exec)(struct drm_device *, u8 version);
-} _mxm_shadow[] = {
- { "ROM", mxm_shadow_rom },
-#if defined(CONFIG_ACPI)
- { "DSM", mxm_shadow_dsm },
-#endif
-#if defined(CONFIG_ACPI_WMI) || defined(CONFIG_ACPI_WMI_MODULE)
- { "WMI", mxm_shadow_wmi },
-#endif
- {}
-};
-
-static int
-mxm_shadow(struct drm_device *dev, u8 version)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct mxm_shadow_h *shadow = _mxm_shadow;
- do {
- MXM_DBG(dev, "checking %s\n", shadow->name);
- if (shadow->exec(dev, version)) {
- if (mxms_valid(dev))
- return 0;
- kfree(dev_priv->mxms);
- dev_priv->mxms = NULL;
- }
- } while ((++shadow)->name);
- return -ENOENT;
-}
-
-int
-nouveau_mxm_init(struct drm_device *dev)
-{
- u8 mxm_size, *mxm = mxm_table(dev, &mxm_size);
- if (!mxm || !mxm[0]) {
- MXM_MSG(dev, "no VBIOS data, nothing to do\n");
- return 0;
- }
-
- MXM_MSG(dev, "BIOS version %d.%d\n", mxm[0] >> 4, mxm[0] & 0x0f);
-
- if (mxm_shadow(dev, mxm[0])) {
- MXM_MSG(dev, "failed to locate valid SIS\n");
-#if 0
- /* we should, perhaps, fall back to some kind of limited
- * mode here if the x86 vbios hasn't already done the
- * work for us (so we prevent loading with completely
- * whacked vbios tables).
- */
- return -EINVAL;
-#else
- return 0;
-#endif
- }
-
- MXM_MSG(dev, "MXMS Version %d.%d\n",
- mxms_version(dev) >> 8, mxms_version(dev) & 0xff);
- mxms_foreach(dev, 0, NULL, NULL);
-
- if (nouveau_mxmdcb)
- mxm_dcb_sanitise(dev);
- return 0;
-}
-
-void
-nouveau_mxm_fini(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- kfree(dev_priv->mxms);
- dev_priv->mxms = NULL;
-}
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
index 2ef883c..5b39718 100644
--- a/drivers/gpu/drm/nouveau/nouveau_notifier.c
+++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c
@@ -34,7 +34,6 @@ int
nouveau_notifier_init_channel(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_bo *ntfy = NULL;
uint32_t flags, ttmpl;
int ret;
@@ -47,7 +46,7 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan)
ttmpl = TTM_PL_FLAG_TT;
}
- ret = nouveau_gem_new(dev, PAGE_SIZE, 0, flags, 0, 0, &ntfy);
+ ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, flags, 0, 0, &ntfy);
if (ret)
return ret;
@@ -59,22 +58,14 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan)
if (ret)
goto out_err;
- if (dev_priv->card_type >= NV_50) {
- ret = nouveau_bo_vma_add(ntfy, chan->vm, &chan->notifier_vma);
- if (ret)
- goto out_err;
- }
-
ret = drm_mm_init(&chan->notifier_heap, 0, ntfy->bo.mem.size);
if (ret)
goto out_err;
chan->notifier_bo = ntfy;
out_err:
- if (ret) {
- nouveau_bo_vma_del(ntfy, &chan->notifier_vma);
+ if (ret)
drm_gem_object_unreference_unlocked(ntfy->gem);
- }
return ret;
}
@@ -87,7 +78,6 @@ nouveau_notifier_takedown_channel(struct nouveau_channel *chan)
if (!chan->notifier_bo)
return;
- nouveau_bo_vma_del(chan->notifier_bo, &chan->notifier_vma);
nouveau_bo_unmap(chan->notifier_bo);
mutex_lock(&dev->struct_mutex);
nouveau_bo_unpin(chan->notifier_bo);
@@ -115,7 +105,7 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *nobj = NULL;
struct drm_mm_node *mem;
- uint64_t offset;
+ uint32_t offset;
int target, ret;
mem = drm_mm_search_free_in_range(&chan->notifier_heap, size, 0,
@@ -132,10 +122,10 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
target = NV_MEM_TARGET_VRAM;
else
target = NV_MEM_TARGET_GART;
- offset = chan->notifier_bo->bo.offset;
+ offset = chan->notifier_bo->bo.mem.start << PAGE_SHIFT;
} else {
target = NV_MEM_TARGET_VM;
- offset = chan->notifier_vma.offset;
+ offset = chan->notifier_bo->vma.offset;
}
offset += mem->start;
@@ -193,7 +183,7 @@ nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data,
if (unlikely(dev_priv->card_type >= NV_C0))
return -EINVAL;
- chan = nouveau_channel_get(file_priv, na->channel);
+ chan = nouveau_channel_get(dev, file_priv, na->channel);
if (IS_ERR(chan))
return PTR_ERR(chan);
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
index cc419fa..8f97016 100644
--- a/drivers/gpu/drm/nouveau/nouveau_object.c
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -125,7 +125,7 @@ nouveau_gpuobj_mthd_call2(struct drm_device *dev, int chid,
int ret = -EINVAL;
spin_lock_irqsave(&dev_priv->channels.lock, flags);
- if (chid >= 0 && chid < dev_priv->engine.fifo.channels)
+ if (chid > 0 && chid < dev_priv->engine.fifo.channels)
chan = dev_priv->channels.ptr[chid];
if (chan)
ret = nouveau_gpuobj_mthd_call(chan, class, mthd, data);
@@ -191,7 +191,7 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
spin_unlock(&dev_priv->ramin_lock);
- if (!(flags & NVOBJ_FLAG_VM) && chan) {
+ if (chan) {
ramin = drm_mm_search_free(&chan->ramin_heap, size, align, 0);
if (ramin)
ramin = drm_mm_get_block(ramin, size, align);
@@ -208,7 +208,7 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
gpuobj->vinst = ramin->start + chan->ramin->vinst;
gpuobj->node = ramin;
} else {
- ret = instmem->get(gpuobj, chan, size, align);
+ ret = instmem->get(gpuobj, size, align);
if (ret) {
nouveau_gpuobj_ref(NULL, &gpuobj);
return ret;
@@ -680,7 +680,7 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
return ret;
}
- ret = drm_mm_init(&chan->ramin_heap, base, size - base);
+ ret = drm_mm_init(&chan->ramin_heap, base, size);
if (ret) {
NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret);
nouveau_gpuobj_ref(NULL, &chan->ramin);
@@ -690,68 +690,35 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
return 0;
}
-static int
-nvc0_gpuobj_channel_init(struct nouveau_channel *chan, struct nouveau_vm *vm)
-{
- struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
- struct drm_device *dev = chan->dev;
- struct nouveau_gpuobj *pgd = NULL;
- struct nouveau_vm_pgd *vpgd;
- int ret, i;
-
- ret = nouveau_gpuobj_new(dev, NULL, 4096, 0x1000, 0, &chan->ramin);
- if (ret)
- return ret;
-
- /* create page directory for this vm if none currently exists,
- * will be destroyed automagically when last reference to the
- * vm is removed
- */
- if (list_empty(&vm->pgd_list)) {
- ret = nouveau_gpuobj_new(dev, NULL, 65536, 0x1000, 0, &pgd);
- if (ret)
- return ret;
- }
- nouveau_vm_ref(vm, &chan->vm, pgd);
- nouveau_gpuobj_ref(NULL, &pgd);
-
- /* point channel at vm's page directory */
- vpgd = list_first_entry(&vm->pgd_list, struct nouveau_vm_pgd, head);
- nv_wo32(chan->ramin, 0x0200, lower_32_bits(vpgd->obj->vinst));
- nv_wo32(chan->ramin, 0x0204, upper_32_bits(vpgd->obj->vinst));
- nv_wo32(chan->ramin, 0x0208, 0xffffffff);
- nv_wo32(chan->ramin, 0x020c, 0x000000ff);
-
- /* map display semaphore buffers into channel's vm */
- for (i = 0; i < dev->mode_config.num_crtc; i++) {
- struct nouveau_bo *bo;
- if (dev_priv->card_type >= NV_D0)
- bo = nvd0_display_crtc_sema(dev, i);
- else
- bo = nv50_display(dev)->crtc[i].sem.bo;
-
- ret = nouveau_bo_vma_add(bo, chan->vm, &chan->dispc_vma[i]);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
int
nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
uint32_t vram_h, uint32_t tt_h)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fpriv *fpriv = nouveau_fpriv(chan->file_priv);
- struct nouveau_vm *vm = fpriv ? fpriv->vm : dev_priv->chan_vm;
struct nouveau_gpuobj *vram = NULL, *tt = NULL;
int ret, i;
NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
- if (dev_priv->card_type >= NV_C0)
- return nvc0_gpuobj_channel_init(chan, vm);
+
+ if (dev_priv->card_type == NV_C0) {
+ struct nouveau_vm *vm = dev_priv->chan_vm;
+ struct nouveau_vm_pgd *vpgd;
+
+ ret = nouveau_gpuobj_new(dev, NULL, 4096, 0x1000, 0,
+ &chan->ramin);
+ if (ret)
+ return ret;
+
+ nouveau_vm_ref(vm, &chan->vm, NULL);
+
+ vpgd = list_first_entry(&vm->pgd_list, struct nouveau_vm_pgd, head);
+ nv_wo32(chan->ramin, 0x0200, lower_32_bits(vpgd->obj->vinst));
+ nv_wo32(chan->ramin, 0x0204, upper_32_bits(vpgd->obj->vinst));
+ nv_wo32(chan->ramin, 0x0208, 0xffffffff);
+ nv_wo32(chan->ramin, 0x020c, 0x000000ff);
+ return 0;
+ }
/* Allocate a chunk of memory for per-channel object storage */
ret = nouveau_gpuobj_channel_init_pramin(chan);
@@ -764,7 +731,7 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
* - Allocate per-channel page-directory
* - Link with shared channel VM
*/
- if (vm) {
+ if (dev_priv->chan_vm) {
u32 pgd_offs = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200;
u64 vm_vinst = chan->ramin->vinst + pgd_offs;
u32 vm_pinst = chan->ramin->pinst;
@@ -777,7 +744,7 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
if (ret)
return ret;
- nouveau_vm_ref(vm, &chan->vm, chan->vm_pd);
+ nouveau_vm_ref(dev_priv->chan_vm, &chan->vm, chan->vm_pd);
}
/* RAMHT */
@@ -797,11 +764,11 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
return ret;
/* dma objects for display sync channel semaphore blocks */
- for (i = 0; i < dev->mode_config.num_crtc; i++) {
+ for (i = 0; i < 2; i++) {
struct nouveau_gpuobj *sem = NULL;
struct nv50_display_crtc *dispc =
&nv50_display(dev)->crtc[i];
- u64 offset = dispc->sem.bo->bo.offset;
+ u64 offset = dispc->sem.bo->bo.mem.start << PAGE_SHIFT;
ret = nouveau_gpuobj_dma_new(chan, 0x3d, offset, 0xfff,
NV_MEM_ACCESS_RW,
@@ -874,24 +841,10 @@ void
nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- int i;
NV_DEBUG(dev, "ch%d\n", chan->id);
- if (dev_priv->card_type >= NV_D0) {
- for (i = 0; i < dev->mode_config.num_crtc; i++) {
- struct nouveau_bo *bo = nvd0_display_crtc_sema(dev, i);
- nouveau_bo_vma_del(bo, &chan->dispc_vma[i]);
- }
- } else
- if (dev_priv->card_type >= NV_50) {
- struct nv50_display *disp = nv50_display(dev);
- for (i = 0; i < dev->mode_config.num_crtc; i++) {
- struct nv50_display_crtc *dispc = &disp->crtc[i];
- nouveau_bo_vma_del(dispc->sem.bo, &chan->dispc_vma[i]);
- }
- }
+ nouveau_ramht_ref(NULL, &chan->ramht, chan);
nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
nouveau_gpuobj_ref(NULL, &chan->vm_pd);
@@ -956,7 +909,7 @@ int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data,
if (init->handle == ~0)
return -EINVAL;
- chan = nouveau_channel_get(file_priv, init->channel);
+ chan = nouveau_channel_get(dev, file_priv, init->channel);
if (IS_ERR(chan))
return PTR_ERR(chan);
@@ -983,7 +936,7 @@ int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data,
struct nouveau_channel *chan;
int ret;
- chan = nouveau_channel_get(file_priv, objfree->channel);
+ chan = nouveau_channel_get(dev, file_priv, objfree->channel);
if (IS_ERR(chan))
return PTR_ERR(chan);
diff --git a/drivers/gpu/drm/nouveau/nouveau_perf.c b/drivers/gpu/drm/nouveau/nouveau_perf.c
index 69a528d..ef9dec0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_perf.c
+++ b/drivers/gpu/drm/nouveau/nouveau_perf.c
@@ -27,178 +27,6 @@
#include "nouveau_drv.h"
#include "nouveau_pm.h"
-static u8 *
-nouveau_perf_table(struct drm_device *dev, u8 *ver)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->vbios;
- struct bit_entry P;
-
- if (!bit_table(dev, 'P', &P) && P.version && P.version <= 2) {
- u8 *perf = ROMPTR(dev, P.data[0]);
- if (perf) {
- *ver = perf[0];
- return perf;
- }
- }
-
- if (bios->type == NVBIOS_BMP) {
- if (bios->data[bios->offset + 6] >= 0x25) {
- u8 *perf = ROMPTR(dev, bios->data[bios->offset + 0x94]);
- if (perf) {
- *ver = perf[1];
- return perf;
- }
- }
- }
-
- return NULL;
-}
-
-static u8 *
-nouveau_perf_entry(struct drm_device *dev, int idx,
- u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
-{
- u8 *perf = nouveau_perf_table(dev, ver);
- if (perf) {
- if (*ver >= 0x12 && *ver < 0x20 && idx < perf[2]) {
- *hdr = perf[3];
- *cnt = 0;
- *len = 0;
- return perf + perf[0] + idx * perf[3];
- } else
- if (*ver >= 0x20 && *ver < 0x40 && idx < perf[2]) {
- *hdr = perf[3];
- *cnt = perf[4];
- *len = perf[5];
- return perf + perf[1] + idx * (*hdr + (*cnt * *len));
- } else
- if (*ver >= 0x40 && *ver < 0x41 && idx < perf[5]) {
- *hdr = perf[2];
- *cnt = perf[4];
- *len = perf[3];
- return perf + perf[1] + idx * (*hdr + (*cnt * *len));
- }
- }
- return NULL;
-}
-
-static u8 *
-nouveau_perf_rammap(struct drm_device *dev, u32 freq,
- u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct bit_entry P;
- u8 *perf, i = 0;
-
- if (!bit_table(dev, 'P', &P) && P.version == 2) {
- u8 *rammap = ROMPTR(dev, P.data[4]);
- if (rammap) {
- u8 *ramcfg = rammap + rammap[1];
-
- *ver = rammap[0];
- *hdr = rammap[2];
- *cnt = rammap[4];
- *len = rammap[3];
-
- freq /= 1000;
- for (i = 0; i < rammap[5]; i++) {
- if (freq >= ROM16(ramcfg[0]) &&
- freq <= ROM16(ramcfg[2]))
- return ramcfg;
-
- ramcfg += *hdr + (*cnt * *len);
- }
- }
-
- return NULL;
- }
-
- if (dev_priv->chipset == 0x49 ||
- dev_priv->chipset == 0x4b)
- freq /= 2;
-
- while ((perf = nouveau_perf_entry(dev, i++, ver, hdr, cnt, len))) {
- if (*ver >= 0x20 && *ver < 0x25) {
- if (perf[0] != 0xff && freq <= ROM16(perf[11]) * 1000)
- break;
- } else
- if (*ver >= 0x25 && *ver < 0x40) {
- if (perf[0] != 0xff && freq <= ROM16(perf[12]) * 1000)
- break;
- }
- }
-
- if (perf) {
- u8 *ramcfg = perf + *hdr;
- *ver = 0x00;
- *hdr = 0;
- return ramcfg;
- }
-
- return NULL;
-}
-
-u8 *
-nouveau_perf_ramcfg(struct drm_device *dev, u32 freq, u8 *ver, u8 *len)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->vbios;
- u8 strap, hdr, cnt;
- u8 *rammap;
-
- strap = (nv_rd32(dev, 0x101000) & 0x0000003c) >> 2;
- if (bios->ram_restrict_tbl_ptr)
- strap = bios->data[bios->ram_restrict_tbl_ptr + strap];
-
- rammap = nouveau_perf_rammap(dev, freq, ver, &hdr, &cnt, len);
- if (rammap && strap < cnt)
- return rammap + hdr + (strap * *len);
-
- return NULL;
-}
-
-u8 *
-nouveau_perf_timing(struct drm_device *dev, u32 freq, u8 *ver, u8 *len)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->vbios;
- struct bit_entry P;
- u8 *perf, *timing = NULL;
- u8 i = 0, hdr, cnt;
-
- if (bios->type == NVBIOS_BMP) {
- while ((perf = nouveau_perf_entry(dev, i++, ver, &hdr, &cnt,
- len)) && *ver == 0x15) {
- if (freq <= ROM32(perf[5]) * 20) {
- *ver = 0x00;
- *len = 14;
- return perf + 41;
- }
- }
- return NULL;
- }
-
- if (!bit_table(dev, 'P', &P)) {
- if (P.version == 1)
- timing = ROMPTR(dev, P.data[4]);
- else
- if (P.version == 2)
- timing = ROMPTR(dev, P.data[8]);
- }
-
- if (timing && timing[0] == 0x10) {
- u8 *ramcfg = nouveau_perf_ramcfg(dev, freq, ver, len);
- if (ramcfg && ramcfg[1] < timing[2]) {
- *ver = timing[0];
- *len = timing[3];
- return timing + timing[1] + (ramcfg[1] * timing[3]);
- }
- }
-
- return NULL;
-}
-
static void
legacy_perf_init(struct drm_device *dev)
{
@@ -213,7 +41,7 @@ legacy_perf_init(struct drm_device *dev)
return;
}
- perf = ROMPTR(dev, bmp[0x73]);
+ perf = ROMPTR(bios, bmp[0x73]);
if (!perf) {
NV_DEBUG(dev, "No memclock table pointer found.\n");
return;
@@ -244,46 +72,66 @@ legacy_perf_init(struct drm_device *dev)
pm->nr_perflvl = 1;
}
-static void
-nouveau_perf_voltage(struct drm_device *dev, struct nouveau_pm_level *perflvl)
+static struct nouveau_pm_memtiming *
+nouveau_perf_timing(struct drm_device *dev, struct bit_entry *P,
+ u16 memclk, u8 *entry, u8 recordlen, u8 entries)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct bit_entry P;
- u8 *vmap;
- int id;
-
- id = perflvl->volt_min;
- perflvl->volt_min = 0;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+ struct nvbios *bios = &dev_priv->vbios;
+ u8 ramcfg;
+ int i;
- /* boards using voltage table version <0x40 store the voltage
- * level directly in the perflvl entry as a multiple of 10mV
+ /* perf v2 has a separate "timing map" table, we have to match
+ * the target memory clock to a specific entry, *then* use
+ * ramcfg to select the correct subentry
*/
- if (dev_priv->engine.pm.voltage.version < 0x40) {
- perflvl->volt_min = id * 10000;
- perflvl->volt_max = perflvl->volt_min;
- return;
- }
+ if (P->version == 2) {
+ u8 *tmap = ROMPTR(bios, P->data[4]);
+ if (!tmap) {
+ NV_DEBUG(dev, "no timing map pointer\n");
+ return NULL;
+ }
- /* on newer ones, the perflvl stores an index into yet another
- * vbios table containing a min/max voltage value for the perflvl
- */
- if (bit_table(dev, 'P', &P) || P.version != 2 || P.length < 34) {
- NV_DEBUG(dev, "where's our volt map table ptr? %d %d\n",
- P.version, P.length);
- return;
+ if (tmap[0] != 0x10) {
+ NV_WARN(dev, "timing map 0x%02x unknown\n", tmap[0]);
+ return NULL;
+ }
+
+ entry = tmap + tmap[1];
+ recordlen = tmap[2] + (tmap[4] * tmap[3]);
+ for (i = 0; i < tmap[5]; i++, entry += recordlen) {
+ if (memclk >= ROM16(entry[0]) &&
+ memclk <= ROM16(entry[2]))
+ break;
+ }
+
+ if (i == tmap[5]) {
+ NV_WARN(dev, "no match in timing map table\n");
+ return NULL;
+ }
+
+ entry += tmap[2];
+ recordlen = tmap[3];
+ entries = tmap[4];
}
- vmap = ROMPTR(dev, P.data[32]);
- if (!vmap) {
- NV_DEBUG(dev, "volt map table pointer invalid\n");
- return;
+ ramcfg = (nv_rd32(dev, NV_PEXTDEV_BOOT_0) & 0x0000003c) >> 2;
+ if (bios->ram_restrict_tbl_ptr)
+ ramcfg = bios->data[bios->ram_restrict_tbl_ptr + ramcfg];
+
+ if (ramcfg >= entries) {
+ NV_WARN(dev, "ramcfg strap out of bounds!\n");
+ return NULL;
}
- if (id < vmap[3]) {
- vmap += vmap[1] + (vmap[2] * id);
- perflvl->volt_min = ROM32(vmap[0]);
- perflvl->volt_max = ROM32(vmap[4]);
+ entry += ramcfg * recordlen;
+ if (entry[1] >= pm->memtimings.nr_timing) {
+ NV_WARN(dev, "timingset %d does not exist\n", entry[1]);
+ return NULL;
}
+
+ return &pm->memtimings.timing[entry[1]];
}
void
@@ -292,120 +140,153 @@ nouveau_perf_init(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
struct nvbios *bios = &dev_priv->vbios;
- u8 *perf, ver, hdr, cnt, len;
- int ret, vid, i = -1;
+ struct bit_entry P;
+ u8 version, headerlen, recordlen, entries;
+ u8 *perf, *entry;
+ int vid, i;
- if (bios->type == NVBIOS_BMP && bios->data[bios->offset + 6] < 0x25) {
- legacy_perf_init(dev);
- return;
+ if (bios->type == NVBIOS_BIT) {
+ if (bit_table(dev, 'P', &P))
+ return;
+
+ if (P.version != 1 && P.version != 2) {
+ NV_WARN(dev, "unknown perf for BIT P %d\n", P.version);
+ return;
+ }
+
+ perf = ROMPTR(bios, P.data[0]);
+ version = perf[0];
+ headerlen = perf[1];
+ if (version < 0x40) {
+ recordlen = perf[3] + (perf[4] * perf[5]);
+ entries = perf[2];
+ } else {
+ recordlen = perf[2] + (perf[3] * perf[4]);
+ entries = perf[5];
+ }
+ } else {
+ if (bios->data[bios->offset + 6] < 0x25) {
+ legacy_perf_init(dev);
+ return;
+ }
+
+ perf = ROMPTR(bios, bios->data[bios->offset + 0x94]);
+ if (!perf) {
+ NV_DEBUG(dev, "perf table pointer invalid\n");
+ return;
+ }
+
+ version = perf[1];
+ headerlen = perf[0];
+ recordlen = perf[3];
+ entries = perf[2];
}
- perf = nouveau_perf_table(dev, &ver);
- if (ver >= 0x20 && ver < 0x40)
- pm->fan.pwm_divisor = ROM16(perf[6]);
+ if (entries > NOUVEAU_PM_MAX_LEVEL) {
+ NV_DEBUG(dev, "perf table has too many entries - buggy vbios?\n");
+ entries = NOUVEAU_PM_MAX_LEVEL;
+ }
- while ((perf = nouveau_perf_entry(dev, ++i, &ver, &hdr, &cnt, &len))) {
+ entry = perf + headerlen;
+ for (i = 0; i < entries; i++) {
struct nouveau_pm_level *perflvl = &pm->perflvl[pm->nr_perflvl];
- if (perf[0] == 0xff)
+ perflvl->timing = NULL;
+
+ if (entry[0] == 0xff) {
+ entry += recordlen;
continue;
+ }
- switch (ver) {
+ switch (version) {
case 0x12:
case 0x13:
case 0x15:
- perflvl->fanspeed = perf[55];
- if (hdr > 56)
- perflvl->volt_min = perf[56];
- perflvl->core = ROM32(perf[1]) * 10;
- perflvl->memory = ROM32(perf[5]) * 20;
+ perflvl->fanspeed = entry[55];
+ perflvl->voltage = (recordlen > 56) ? entry[56] : 0;
+ perflvl->core = ROM32(entry[1]) * 10;
+ perflvl->memory = ROM32(entry[5]) * 20;
break;
case 0x21:
case 0x23:
case 0x24:
- perflvl->fanspeed = perf[4];
- perflvl->volt_min = perf[5];
- perflvl->shader = ROM16(perf[6]) * 1000;
- perflvl->core = perflvl->shader;
- perflvl->core += (signed char)perf[8] * 1000;
+ perflvl->fanspeed = entry[4];
+ perflvl->voltage = entry[5];
+ perflvl->core = ROM16(entry[6]) * 1000;
+
if (dev_priv->chipset == 0x49 ||
dev_priv->chipset == 0x4b)
- perflvl->memory = ROM16(perf[11]) * 1000;
+ perflvl->memory = ROM16(entry[11]) * 1000;
else
- perflvl->memory = ROM16(perf[11]) * 2000;
+ perflvl->memory = ROM16(entry[11]) * 2000;
+
break;
case 0x25:
- perflvl->fanspeed = perf[4];
- perflvl->volt_min = perf[5];
- perflvl->core = ROM16(perf[6]) * 1000;
- perflvl->shader = ROM16(perf[10]) * 1000;
- perflvl->memory = ROM16(perf[12]) * 1000;
+ perflvl->fanspeed = entry[4];
+ perflvl->voltage = entry[5];
+ perflvl->core = ROM16(entry[6]) * 1000;
+ perflvl->shader = ROM16(entry[10]) * 1000;
+ perflvl->memory = ROM16(entry[12]) * 1000;
break;
case 0x30:
- perflvl->memscript = ROM16(perf[2]);
+ perflvl->memscript = ROM16(entry[2]);
case 0x35:
- perflvl->fanspeed = perf[6];
- perflvl->volt_min = perf[7];
- perflvl->core = ROM16(perf[8]) * 1000;
- perflvl->shader = ROM16(perf[10]) * 1000;
- perflvl->memory = ROM16(perf[12]) * 1000;
- perflvl->vdec = ROM16(perf[16]) * 1000;
- perflvl->dom6 = ROM16(perf[20]) * 1000;
+ perflvl->fanspeed = entry[6];
+ perflvl->voltage = entry[7];
+ perflvl->core = ROM16(entry[8]) * 1000;
+ perflvl->shader = ROM16(entry[10]) * 1000;
+ perflvl->memory = ROM16(entry[12]) * 1000;
+ /*XXX: confirm on 0x35 */
+ perflvl->unk05 = ROM16(entry[16]) * 1000;
break;
case 0x40:
-#define subent(n) ((ROM16(perf[hdr + (n) * len]) & 0xfff) * 1000)
+#define subent(n) entry[perf[2] + ((n) * perf[3])]
perflvl->fanspeed = 0; /*XXX*/
- perflvl->volt_min = perf[2];
+ perflvl->voltage = entry[2];
if (dev_priv->card_type == NV_50) {
- perflvl->core = subent(0);
- perflvl->shader = subent(1);
- perflvl->memory = subent(2);
- perflvl->vdec = subent(3);
- perflvl->unka0 = subent(4);
+ perflvl->core = ROM16(subent(0)) & 0xfff;
+ perflvl->shader = ROM16(subent(1)) & 0xfff;
+ perflvl->memory = ROM16(subent(2)) & 0xfff;
} else {
- perflvl->hub06 = subent(0);
- perflvl->hub01 = subent(1);
- perflvl->copy = subent(2);
- perflvl->shader = subent(3);
- perflvl->rop = subent(4);
- perflvl->memory = subent(5);
- perflvl->vdec = subent(6);
- perflvl->daemon = subent(10);
- perflvl->hub07 = subent(11);
+ perflvl->shader = ROM16(subent(3)) & 0xfff;
perflvl->core = perflvl->shader / 2;
+ perflvl->unk0a = ROM16(subent(4)) & 0xfff;
+ perflvl->memory = ROM16(subent(5)) & 0xfff;
}
+
+ perflvl->core *= 1000;
+ perflvl->shader *= 1000;
+ perflvl->memory *= 1000;
+ perflvl->unk0a *= 1000;
break;
}
/* make sure vid is valid */
- nouveau_perf_voltage(dev, perflvl);
- if (pm->voltage.supported && perflvl->volt_min) {
- vid = nouveau_volt_vid_lookup(dev, perflvl->volt_min);
+ if (pm->voltage.supported && perflvl->voltage) {
+ vid = nouveau_volt_vid_lookup(dev, perflvl->voltage);
if (vid < 0) {
- NV_DEBUG(dev, "perflvl %d, bad vid\n", i);
+ NV_DEBUG(dev, "drop perflvl %d, bad vid\n", i);
+ entry += recordlen;
continue;
}
}
/* get the corresponding memory timings */
- ret = nouveau_mem_timing_calc(dev, perflvl->memory,
- &perflvl->timing);
- if (ret) {
- NV_DEBUG(dev, "perflvl %d, bad timing: %d\n", i, ret);
- continue;
+ if (version > 0x15) {
+ /* last 3 args are for < 0x40, ignored for >= 0x40 */
+ perflvl->timing =
+ nouveau_perf_timing(dev, &P,
+ perflvl->memory / 1000,
+ entry + perf[3],
+ perf[5], perf[4]);
}
snprintf(perflvl->name, sizeof(perflvl->name),
"performance_level_%d", i);
perflvl->id = i;
-
- snprintf(perflvl->profile.name, sizeof(perflvl->profile.name),
- "%d", perflvl->id);
- perflvl->profile.func = &nouveau_pm_static_profile_func;
- list_add_tail(&perflvl->profile.head, &pm->profiles);
-
-
pm->nr_perflvl++;
+
+ entry += recordlen;
}
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c
index 34d591b..da8d994 100644
--- a/drivers/gpu/drm/nouveau/nouveau_pm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_pm.c
@@ -26,7 +26,6 @@
#include "nouveau_drv.h"
#include "nouveau_pm.h"
-#include "nouveau_gpio.h"
#ifdef CONFIG_ACPI
#include <linux/acpi.h>
@@ -36,98 +35,22 @@
#include <linux/hwmon-sysfs.h>
static int
-nouveau_pwmfan_get(struct drm_device *dev)
+nouveau_pm_clock_set(struct drm_device *dev, struct nouveau_pm_level *perflvl,
+ u8 id, u32 khz)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
- struct gpio_func gpio;
- u32 divs, duty;
- int ret;
-
- if (!pm->pwm_get)
- return -ENODEV;
-
- ret = nouveau_gpio_find(dev, 0, DCB_GPIO_PWM_FAN, 0xff, &gpio);
- if (ret == 0) {
- ret = pm->pwm_get(dev, gpio.line, &divs, &duty);
- if (ret == 0 && divs) {
- divs = max(divs, duty);
- if (dev_priv->card_type <= NV_40 || (gpio.log[0] & 1))
- duty = divs - duty;
- return (duty * 100) / divs;
- }
-
- return nouveau_gpio_func_get(dev, gpio.func) * 100;
- }
-
- return -ENODEV;
-}
-
-static int
-nouveau_pwmfan_set(struct drm_device *dev, int percent)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
- struct gpio_func gpio;
- u32 divs, duty;
- int ret;
-
- if (!pm->pwm_set)
- return -ENODEV;
-
- ret = nouveau_gpio_find(dev, 0, DCB_GPIO_PWM_FAN, 0xff, &gpio);
- if (ret == 0) {
- divs = pm->fan.pwm_divisor;
- if (pm->fan.pwm_freq) {
- /*XXX: PNVIO clock more than likely... */
- divs = 135000 / pm->fan.pwm_freq;
- if (dev_priv->chipset < 0xa3)
- divs /= 4;
- }
-
- duty = ((divs * percent) + 99) / 100;
- if (dev_priv->card_type <= NV_40 || (gpio.log[0] & 1))
- duty = divs - duty;
-
- ret = pm->pwm_set(dev, gpio.line, divs, duty);
- if (!ret)
- pm->fan.percent = percent;
- return ret;
- }
-
- return -ENODEV;
-}
-
-static int
-nouveau_pm_perflvl_aux(struct drm_device *dev, struct nouveau_pm_level *perflvl,
- struct nouveau_pm_level *a, struct nouveau_pm_level *b)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
- int ret;
+ void *pre_state;
- /*XXX: not on all boards, we should control based on temperature
- * on recent boards.. or maybe on some other factor we don't
- * know about?
- */
- if (a->fanspeed && b->fanspeed && b->fanspeed > a->fanspeed) {
- ret = nouveau_pwmfan_set(dev, perflvl->fanspeed);
- if (ret && ret != -ENODEV) {
- NV_ERROR(dev, "fanspeed set failed: %d\n", ret);
- return ret;
- }
- }
+ if (khz == 0)
+ return 0;
- if (pm->voltage.supported && pm->voltage_set) {
- if (perflvl->volt_min && b->volt_min > a->volt_min) {
- ret = pm->voltage_set(dev, perflvl->volt_min);
- if (ret) {
- NV_ERROR(dev, "voltage set failed: %d\n", ret);
- return ret;
- }
- }
- }
+ pre_state = pm->clock_pre(dev, perflvl, id, khz);
+ if (IS_ERR(pre_state))
+ return PTR_ERR(pre_state);
+ if (pre_state)
+ pm->clock_set(dev, pre_state);
return 0;
}
@@ -136,90 +59,26 @@ nouveau_pm_perflvl_set(struct drm_device *dev, struct nouveau_pm_level *perflvl)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
- void *state;
int ret;
if (perflvl == pm->cur)
return 0;
- ret = nouveau_pm_perflvl_aux(dev, perflvl, pm->cur, perflvl);
- if (ret)
- return ret;
-
- state = pm->clocks_pre(dev, perflvl);
- if (IS_ERR(state)) {
- ret = PTR_ERR(state);
- goto error;
+ if (pm->voltage.supported && pm->voltage_set && perflvl->voltage) {
+ ret = pm->voltage_set(dev, perflvl->voltage);
+ if (ret) {
+ NV_ERROR(dev, "voltage_set %d failed: %d\n",
+ perflvl->voltage, ret);
+ }
}
- ret = pm->clocks_set(dev, state);
- if (ret)
- goto error;
- ret = nouveau_pm_perflvl_aux(dev, perflvl, perflvl, pm->cur);
- if (ret)
- return ret;
+ nouveau_pm_clock_set(dev, perflvl, PLL_CORE, perflvl->core);
+ nouveau_pm_clock_set(dev, perflvl, PLL_SHADER, perflvl->shader);
+ nouveau_pm_clock_set(dev, perflvl, PLL_MEMORY, perflvl->memory);
+ nouveau_pm_clock_set(dev, perflvl, PLL_UNK05, perflvl->unk05);
pm->cur = perflvl;
return 0;
-
-error:
- /* restore the fan speed and voltage before leaving */
- nouveau_pm_perflvl_aux(dev, perflvl, perflvl, pm->cur);
- return ret;
-}
-
-void
-nouveau_pm_trigger(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
- struct nouveau_pm_profile *profile = NULL;
- struct nouveau_pm_level *perflvl = NULL;
- int ret;
-
- /* select power profile based on current power source */
- if (power_supply_is_system_supplied())
- profile = pm->profile_ac;
- else
- profile = pm->profile_dc;
-
- if (profile != pm->profile) {
- pm->profile->func->fini(pm->profile);
- pm->profile = profile;
- pm->profile->func->init(pm->profile);
- }
-
- /* select performance level based on profile */
- perflvl = profile->func->select(profile);
-
- /* change perflvl, if necessary */
- if (perflvl != pm->cur) {
- struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
- u64 time0 = ptimer->read(dev);
-
- NV_INFO(dev, "setting performance level: %d", perflvl->id);
- ret = nouveau_pm_perflvl_set(dev, perflvl);
- if (ret)
- NV_INFO(dev, "> reclocking failed: %d\n\n", ret);
-
- NV_INFO(dev, "> reclocking took %lluns\n\n",
- ptimer->read(dev) - time0);
- }
-}
-
-static struct nouveau_pm_profile *
-profile_find(struct drm_device *dev, const char *string)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
- struct nouveau_pm_profile *profile;
-
- list_for_each_entry(profile, &pm->profiles, head) {
- if (!strncmp(profile->name, string, sizeof(profile->name)))
- return profile;
- }
-
- return NULL;
}
static int
@@ -227,53 +86,35 @@ nouveau_pm_profile_set(struct drm_device *dev, const char *profile)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
- struct nouveau_pm_profile *ac = NULL, *dc = NULL;
- char string[16], *cur = string, *ptr;
+ struct nouveau_pm_level *perflvl = NULL;
/* safety precaution, for now */
if (nouveau_perflvl_wr != 7777)
return -EPERM;
- strncpy(string, profile, sizeof(string));
- if ((ptr = strchr(string, '\n')))
- *ptr = '\0';
-
- ptr = strsep(&cur, ",");
- if (ptr)
- ac = profile_find(dev, ptr);
-
- ptr = strsep(&cur, ",");
- if (ptr)
- dc = profile_find(dev, ptr);
- else
- dc = ac;
-
- if (ac == NULL || dc == NULL)
+ if (!pm->clock_set)
return -EINVAL;
- pm->profile_ac = ac;
- pm->profile_dc = dc;
- nouveau_pm_trigger(dev);
- return 0;
-}
+ if (!strncmp(profile, "boot", 4))
+ perflvl = &pm->boot;
+ else {
+ int pl = simple_strtol(profile, NULL, 10);
+ int i;
-static void
-nouveau_pm_static_dummy(struct nouveau_pm_profile *profile)
-{
-}
+ for (i = 0; i < pm->nr_perflvl; i++) {
+ if (pm->perflvl[i].id == pl) {
+ perflvl = &pm->perflvl[i];
+ break;
+ }
+ }
-static struct nouveau_pm_level *
-nouveau_pm_static_select(struct nouveau_pm_profile *profile)
-{
- return container_of(profile, struct nouveau_pm_level, profile);
-}
+ if (!perflvl)
+ return -EINVAL;
+ }
-const struct nouveau_pm_profile_func nouveau_pm_static_profile_func = {
- .destroy = nouveau_pm_static_dummy,
- .init = nouveau_pm_static_dummy,
- .fini = nouveau_pm_static_dummy,
- .select = nouveau_pm_static_select,
-};
+ NV_INFO(dev, "setting performance level: %s\n", profile);
+ return nouveau_pm_perflvl_set(dev, perflvl);
+}
static int
nouveau_pm_perflvl_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
@@ -282,34 +123,40 @@ nouveau_pm_perflvl_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
int ret;
+ if (!pm->clock_get)
+ return -EINVAL;
+
memset(perflvl, 0, sizeof(*perflvl));
- if (pm->clocks_get) {
- ret = pm->clocks_get(dev, perflvl);
- if (ret)
- return ret;
- }
+ ret = pm->clock_get(dev, PLL_CORE);
+ if (ret > 0)
+ perflvl->core = ret;
+
+ ret = pm->clock_get(dev, PLL_MEMORY);
+ if (ret > 0)
+ perflvl->memory = ret;
+
+ ret = pm->clock_get(dev, PLL_SHADER);
+ if (ret > 0)
+ perflvl->shader = ret;
+
+ ret = pm->clock_get(dev, PLL_UNK05);
+ if (ret > 0)
+ perflvl->unk05 = ret;
if (pm->voltage.supported && pm->voltage_get) {
ret = pm->voltage_get(dev);
- if (ret > 0) {
- perflvl->volt_min = ret;
- perflvl->volt_max = ret;
- }
+ if (ret > 0)
+ perflvl->voltage = ret;
}
- ret = nouveau_pwmfan_get(dev);
- if (ret > 0)
- perflvl->fanspeed = ret;
-
- nouveau_mem_timing_read(dev, &perflvl->timing);
return 0;
}
static void
nouveau_pm_perflvl_info(struct nouveau_pm_level *perflvl, char *ptr, int len)
{
- char c[16], s[16], v[32], f[16], m[16];
+ char c[16], s[16], v[16], f[16], t[16];
c[0] = '\0';
if (perflvl->core)
@@ -319,37 +166,31 @@ nouveau_pm_perflvl_info(struct nouveau_pm_level *perflvl, char *ptr, int len)
if (perflvl->shader)
snprintf(s, sizeof(s), " shader %dMHz", perflvl->shader / 1000);
- m[0] = '\0';
- if (perflvl->memory)
- snprintf(m, sizeof(m), " memory %dMHz", perflvl->memory / 1000);
-
v[0] = '\0';
- if (perflvl->volt_min && perflvl->volt_min != perflvl->volt_max) {
- snprintf(v, sizeof(v), " voltage %dmV-%dmV",
- perflvl->volt_min / 1000, perflvl->volt_max / 1000);
- } else
- if (perflvl->volt_min) {
- snprintf(v, sizeof(v), " voltage %dmV",
- perflvl->volt_min / 1000);
- }
+ if (perflvl->voltage)
+ snprintf(v, sizeof(v), " voltage %dmV", perflvl->voltage * 10);
f[0] = '\0';
if (perflvl->fanspeed)
snprintf(f, sizeof(f), " fanspeed %d%%", perflvl->fanspeed);
- snprintf(ptr, len, "%s%s%s%s%s\n", c, s, m, v, f);
+ t[0] = '\0';
+ if (perflvl->timing)
+ snprintf(t, sizeof(t), " timing %d", perflvl->timing->id);
+
+ snprintf(ptr, len, "memory %dMHz%s%s%s%s%s\n", perflvl->memory / 1000,
+ c, s, v, f, t);
}
static ssize_t
nouveau_pm_get_perflvl_info(struct device *d,
struct device_attribute *a, char *buf)
{
- struct nouveau_pm_level *perflvl =
- container_of(a, struct nouveau_pm_level, dev_attr);
+ struct nouveau_pm_level *perflvl = (struct nouveau_pm_level *)a;
char *ptr = buf;
int len = PAGE_SIZE;
- snprintf(ptr, len, "%d:", perflvl->id);
+ snprintf(ptr, len, "%d: ", perflvl->id);
ptr += strlen(buf);
len -= strlen(buf);
@@ -367,8 +208,12 @@ nouveau_pm_get_perflvl(struct device *d, struct device_attribute *a, char *buf)
int len = PAGE_SIZE, ret;
char *ptr = buf;
- snprintf(ptr, len, "profile: %s, %s\nc:",
- pm->profile_ac->name, pm->profile_dc->name);
+ if (!pm->cur)
+ snprintf(ptr, len, "setting: boot\n");
+ else if (pm->cur == &pm->boot)
+ snprintf(ptr, len, "setting: boot\nc: ");
+ else
+ snprintf(ptr, len, "setting: static %d\nc: ", pm->cur->id);
ptr += strlen(buf);
len -= strlen(buf);
@@ -447,7 +292,7 @@ nouveau_sysfs_fini(struct drm_device *dev)
}
}
-#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
+#ifdef CONFIG_HWMON
static ssize_t
nouveau_hwmon_show_temp(struct device *d, struct device_attribute *a, char *buf)
{
@@ -480,7 +325,7 @@ nouveau_hwmon_set_max_temp(struct device *d, struct device_attribute *a,
struct nouveau_pm_threshold_temp *temp = &pm->threshold_temp;
long value;
- if (kstrtol(buf, 10, &value) == -EINVAL)
+ if (strict_strtol(buf, 10, &value) == -EINVAL)
return count;
temp->down_clock = value/1000;
@@ -515,7 +360,7 @@ nouveau_hwmon_set_critical_temp(struct device *d, struct device_attribute *a,
struct nouveau_pm_threshold_temp *temp = &pm->threshold_temp;
long value;
- if (kstrtol(buf, 10, &value) == -EINVAL)
+ if (strict_strtol(buf, 10, &value) == -EINVAL)
return count;
temp->critical = value/1000;
@@ -547,172 +392,6 @@ static SENSOR_DEVICE_ATTR(update_rate, S_IRUGO,
nouveau_hwmon_show_update_rate,
NULL, 0);
-static ssize_t
-nouveau_hwmon_show_fan0_input(struct device *d, struct device_attribute *attr,
- char *buf)
-{
- struct drm_device *dev = dev_get_drvdata(d);
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
- struct gpio_func gpio;
- u32 cycles, cur, prev;
- u64 start;
- int ret;
-
- ret = nouveau_gpio_find(dev, 0, DCB_GPIO_FAN_SENSE, 0xff, &gpio);
- if (ret)
- return ret;
-
- /* Monitor the GPIO input 0x3b for 250ms.
- * When the fan spins, it changes the value of GPIO FAN_SENSE.
- * We get 4 changes (0 -> 1 -> 0 -> 1 -> [...]) per complete rotation.
- */
- start = ptimer->read(dev);
- prev = nouveau_gpio_sense(dev, 0, gpio.line);
- cycles = 0;
- do {
- cur = nouveau_gpio_sense(dev, 0, gpio.line);
- if (prev != cur) {
- cycles++;
- prev = cur;
- }
-
- usleep_range(500, 1000); /* supports 0 < rpm < 7500 */
- } while (ptimer->read(dev) - start < 250000000);
-
- /* interpolate to get rpm */
- return sprintf(buf, "%i\n", cycles / 4 * 4 * 60);
-}
-static SENSOR_DEVICE_ATTR(fan0_input, S_IRUGO, nouveau_hwmon_show_fan0_input,
- NULL, 0);
-
-static ssize_t
-nouveau_hwmon_get_pwm0(struct device *d, struct device_attribute *a, char *buf)
-{
- struct drm_device *dev = dev_get_drvdata(d);
- int ret;
-
- ret = nouveau_pwmfan_get(dev);
- if (ret < 0)
- return ret;
-
- return sprintf(buf, "%i\n", ret);
-}
-
-static ssize_t
-nouveau_hwmon_set_pwm0(struct device *d, struct device_attribute *a,
- const char *buf, size_t count)
-{
- struct drm_device *dev = dev_get_drvdata(d);
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
- int ret = -ENODEV;
- long value;
-
- if (nouveau_perflvl_wr != 7777)
- return -EPERM;
-
- if (kstrtol(buf, 10, &value) == -EINVAL)
- return -EINVAL;
-
- if (value < pm->fan.min_duty)
- value = pm->fan.min_duty;
- if (value > pm->fan.max_duty)
- value = pm->fan.max_duty;
-
- ret = nouveau_pwmfan_set(dev, value);
- if (ret)
- return ret;
-
- return count;
-}
-
-static SENSOR_DEVICE_ATTR(pwm0, S_IRUGO | S_IWUSR,
- nouveau_hwmon_get_pwm0,
- nouveau_hwmon_set_pwm0, 0);
-
-static ssize_t
-nouveau_hwmon_get_pwm0_min(struct device *d,
- struct device_attribute *a, char *buf)
-{
- struct drm_device *dev = dev_get_drvdata(d);
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
-
- return sprintf(buf, "%i\n", pm->fan.min_duty);
-}
-
-static ssize_t
-nouveau_hwmon_set_pwm0_min(struct device *d, struct device_attribute *a,
- const char *buf, size_t count)
-{
- struct drm_device *dev = dev_get_drvdata(d);
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
- long value;
-
- if (kstrtol(buf, 10, &value) == -EINVAL)
- return -EINVAL;
-
- if (value < 0)
- value = 0;
-
- if (pm->fan.max_duty - value < 10)
- value = pm->fan.max_duty - 10;
-
- if (value < 10)
- pm->fan.min_duty = 10;
- else
- pm->fan.min_duty = value;
-
- return count;
-}
-
-static SENSOR_DEVICE_ATTR(pwm0_min, S_IRUGO | S_IWUSR,
- nouveau_hwmon_get_pwm0_min,
- nouveau_hwmon_set_pwm0_min, 0);
-
-static ssize_t
-nouveau_hwmon_get_pwm0_max(struct device *d,
- struct device_attribute *a, char *buf)
-{
- struct drm_device *dev = dev_get_drvdata(d);
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
-
- return sprintf(buf, "%i\n", pm->fan.max_duty);
-}
-
-static ssize_t
-nouveau_hwmon_set_pwm0_max(struct device *d, struct device_attribute *a,
- const char *buf, size_t count)
-{
- struct drm_device *dev = dev_get_drvdata(d);
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
- long value;
-
- if (kstrtol(buf, 10, &value) == -EINVAL)
- return -EINVAL;
-
- if (value < 0)
- value = 0;
-
- if (value - pm->fan.min_duty < 10)
- value = pm->fan.min_duty + 10;
-
- if (value > 100)
- pm->fan.max_duty = 100;
- else
- pm->fan.max_duty = value;
-
- return count;
-}
-
-static SENSOR_DEVICE_ATTR(pwm0_max, S_IRUGO | S_IWUSR,
- nouveau_hwmon_get_pwm0_max,
- nouveau_hwmon_set_pwm0_max, 0);
-
static struct attribute *hwmon_attributes[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_temp1_max.dev_attr.attr,
@@ -721,36 +400,20 @@ static struct attribute *hwmon_attributes[] = {
&sensor_dev_attr_update_rate.dev_attr.attr,
NULL
};
-static struct attribute *hwmon_fan_rpm_attributes[] = {
- &sensor_dev_attr_fan0_input.dev_attr.attr,
- NULL
-};
-static struct attribute *hwmon_pwm_fan_attributes[] = {
- &sensor_dev_attr_pwm0.dev_attr.attr,
- &sensor_dev_attr_pwm0_min.dev_attr.attr,
- &sensor_dev_attr_pwm0_max.dev_attr.attr,
- NULL
-};
static const struct attribute_group hwmon_attrgroup = {
.attrs = hwmon_attributes,
};
-static const struct attribute_group hwmon_fan_rpm_attrgroup = {
- .attrs = hwmon_fan_rpm_attributes,
-};
-static const struct attribute_group hwmon_pwm_fan_attrgroup = {
- .attrs = hwmon_pwm_fan_attributes,
-};
#endif
static int
nouveau_hwmon_init(struct drm_device *dev)
{
+#ifdef CONFIG_HWMON
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
-#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
struct device *hwmon_dev;
- int ret = 0;
+ int ret;
if (!pm->temp_get)
return -ENODEV;
@@ -763,62 +426,28 @@ nouveau_hwmon_init(struct drm_device *dev)
return ret;
}
dev_set_drvdata(hwmon_dev, dev);
-
- /* default sysfs entries */
ret = sysfs_create_group(&dev->pdev->dev.kobj, &hwmon_attrgroup);
if (ret) {
- if (ret)
- goto error;
- }
-
- /* if the card has a pwm fan */
- /*XXX: incorrect, need better detection for this, some boards have
- * the gpio entries for pwm fan control even when there's no
- * actual fan connected to it... therm table? */
- if (nouveau_pwmfan_get(dev) >= 0) {
- ret = sysfs_create_group(&dev->pdev->dev.kobj,
- &hwmon_pwm_fan_attrgroup);
- if (ret)
- goto error;
- }
-
- /* if the card can read the fan rpm */
- if (nouveau_gpio_func_valid(dev, DCB_GPIO_FAN_SENSE)) {
- ret = sysfs_create_group(&dev->pdev->dev.kobj,
- &hwmon_fan_rpm_attrgroup);
- if (ret)
- goto error;
+ NV_ERROR(dev,
+ "Unable to create hwmon sysfs file: %d\n", ret);
+ hwmon_device_unregister(hwmon_dev);
+ return ret;
}
pm->hwmon = hwmon_dev;
-
- return 0;
-
-error:
- NV_ERROR(dev, "Unable to create some hwmon sysfs files: %d\n", ret);
- hwmon_device_unregister(hwmon_dev);
- pm->hwmon = NULL;
- return ret;
-#else
- pm->hwmon = NULL;
- return 0;
#endif
+ return 0;
}
static void
nouveau_hwmon_fini(struct drm_device *dev)
{
-#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
+#ifdef CONFIG_HWMON
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
if (pm->hwmon) {
sysfs_remove_group(&dev->pdev->dev.kobj, &hwmon_attrgroup);
- sysfs_remove_group(&dev->pdev->dev.kobj,
- &hwmon_pwm_fan_attrgroup);
- sysfs_remove_group(&dev->pdev->dev.kobj,
- &hwmon_fan_rpm_attrgroup);
-
hwmon_device_unregister(pm->hwmon);
}
#endif
@@ -837,7 +466,6 @@ nouveau_pm_acpi_event(struct notifier_block *nb, unsigned long val, void *data)
bool ac = power_supply_is_system_supplied();
NV_DEBUG(dev, "power supply changed: %s\n", ac ? "AC" : "DC");
- nouveau_pm_trigger(dev);
}
return NOTIFY_OK;
@@ -852,48 +480,35 @@ nouveau_pm_init(struct drm_device *dev)
char info[256];
int ret, i;
- /* parse aux tables from vbios */
+ nouveau_mem_timing_init(dev);
nouveau_volt_init(dev);
- nouveau_temp_init(dev);
-
- /* determine current ("boot") performance level */
- ret = nouveau_pm_perflvl_get(dev, &pm->boot);
- if (ret) {
- NV_ERROR(dev, "failed to determine boot perflvl\n");
- return ret;
- }
-
- strncpy(pm->boot.name, "boot", 4);
- strncpy(pm->boot.profile.name, "boot", 4);
- pm->boot.profile.func = &nouveau_pm_static_profile_func;
-
- INIT_LIST_HEAD(&pm->profiles);
- list_add(&pm->boot.profile.head, &pm->profiles);
-
- pm->profile_ac = &pm->boot.profile;
- pm->profile_dc = &pm->boot.profile;
- pm->profile = &pm->boot.profile;
- pm->cur = &pm->boot;
-
- /* add performance levels from vbios */
nouveau_perf_init(dev);
+ nouveau_temp_init(dev);
- /* display available performance levels */
NV_INFO(dev, "%d available performance level(s)\n", pm->nr_perflvl);
for (i = 0; i < pm->nr_perflvl; i++) {
nouveau_pm_perflvl_info(&pm->perflvl[i], info, sizeof(info));
- NV_INFO(dev, "%d:%s", pm->perflvl[i].id, info);
+ NV_INFO(dev, "%d: %s", pm->perflvl[i].id, info);
}
- nouveau_pm_perflvl_info(&pm->boot, info, sizeof(info));
- NV_INFO(dev, "c:%s", info);
+ /* determine current ("boot") performance level */
+ ret = nouveau_pm_perflvl_get(dev, &pm->boot);
+ if (ret == 0) {
+ strncpy(pm->boot.name, "boot", 4);
+ pm->cur = &pm->boot;
- /* switch performance levels now if requested */
- if (nouveau_perflvl != NULL)
- nouveau_pm_profile_set(dev, nouveau_perflvl);
+ nouveau_pm_perflvl_info(&pm->boot, info, sizeof(info));
+ NV_INFO(dev, "c: %s", info);
+ }
- /* determine the current fan speed */
- pm->fan.percent = nouveau_pwmfan_get(dev);
+ /* switch performance levels now if requested */
+ if (nouveau_perflvl != NULL) {
+ ret = nouveau_pm_profile_set(dev, nouveau_perflvl);
+ if (ret) {
+ NV_ERROR(dev, "error setting perflvl \"%s\": %d\n",
+ nouveau_perflvl, ret);
+ }
+ }
nouveau_sysfs_init(dev);
nouveau_hwmon_init(dev);
@@ -910,12 +525,6 @@ nouveau_pm_fini(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
- struct nouveau_pm_profile *profile, *tmp;
-
- list_for_each_entry_safe(profile, tmp, &pm->profiles, head) {
- list_del(&profile->head);
- profile->func->destroy(profile);
- }
if (pm->cur != &pm->boot)
nouveau_pm_perflvl_set(dev, &pm->boot);
@@ -923,6 +532,7 @@ nouveau_pm_fini(struct drm_device *dev)
nouveau_temp_fini(dev);
nouveau_perf_fini(dev);
nouveau_volt_fini(dev);
+ nouveau_mem_timing_fini(dev);
#if defined(CONFIG_ACPI) && defined(CONFIG_POWER_SUPPLY)
unregister_acpi_notifier(&pm->acpi_nb);
@@ -944,5 +554,4 @@ nouveau_pm_resume(struct drm_device *dev)
perflvl = pm->cur;
pm->cur = &pm->boot;
nouveau_pm_perflvl_set(dev, perflvl);
- nouveau_pwmfan_set(dev, pm->fan.percent);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.h b/drivers/gpu/drm/nouveau/nouveau_pm.h
index 3f82dfe..4a9838dd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_pm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_pm.h
@@ -25,30 +25,10 @@
#ifndef __NOUVEAU_PM_H__
#define __NOUVEAU_PM_H__
-struct nouveau_mem_exec_func {
- struct drm_device *dev;
- void (*precharge)(struct nouveau_mem_exec_func *);
- void (*refresh)(struct nouveau_mem_exec_func *);
- void (*refresh_auto)(struct nouveau_mem_exec_func *, bool);
- void (*refresh_self)(struct nouveau_mem_exec_func *, bool);
- void (*wait)(struct nouveau_mem_exec_func *, u32 nsec);
- u32 (*mrg)(struct nouveau_mem_exec_func *, int mr);
- void (*mrs)(struct nouveau_mem_exec_func *, int mr, u32 data);
- void (*clock_set)(struct nouveau_mem_exec_func *);
- void (*timing_set)(struct nouveau_mem_exec_func *);
- void *priv;
-};
-
-/* nouveau_mem.c */
-int nouveau_mem_exec(struct nouveau_mem_exec_func *,
- struct nouveau_pm_level *);
-
/* nouveau_pm.c */
int nouveau_pm_init(struct drm_device *dev);
void nouveau_pm_fini(struct drm_device *dev);
void nouveau_pm_resume(struct drm_device *dev);
-extern const struct nouveau_pm_profile_func nouveau_pm_static_profile_func;
-void nouveau_pm_trigger(struct drm_device *dev);
/* nouveau_volt.c */
void nouveau_volt_init(struct drm_device *);
@@ -61,41 +41,28 @@ int nouveau_voltage_gpio_set(struct drm_device *, int voltage);
/* nouveau_perf.c */
void nouveau_perf_init(struct drm_device *);
void nouveau_perf_fini(struct drm_device *);
-u8 *nouveau_perf_timing(struct drm_device *, u32 freq, u8 *ver, u8 *len);
-u8 *nouveau_perf_ramcfg(struct drm_device *, u32 freq, u8 *ver, u8 *len);
/* nouveau_mem.c */
void nouveau_mem_timing_init(struct drm_device *);
void nouveau_mem_timing_fini(struct drm_device *);
/* nv04_pm.c */
-int nv04_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
-void *nv04_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
-int nv04_pm_clocks_set(struct drm_device *, void *);
-
-/* nv40_pm.c */
-int nv40_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
-void *nv40_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
-int nv40_pm_clocks_set(struct drm_device *, void *);
-int nv40_pm_pwm_get(struct drm_device *, int, u32 *, u32 *);
-int nv40_pm_pwm_set(struct drm_device *, int, u32, u32);
+int nv04_pm_clock_get(struct drm_device *, u32 id);
+void *nv04_pm_clock_pre(struct drm_device *, struct nouveau_pm_level *,
+ u32 id, int khz);
+void nv04_pm_clock_set(struct drm_device *, void *);
/* nv50_pm.c */
-int nv50_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
-void *nv50_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
-int nv50_pm_clocks_set(struct drm_device *, void *);
-int nv50_pm_pwm_get(struct drm_device *, int, u32 *, u32 *);
-int nv50_pm_pwm_set(struct drm_device *, int, u32, u32);
+int nv50_pm_clock_get(struct drm_device *, u32 id);
+void *nv50_pm_clock_pre(struct drm_device *, struct nouveau_pm_level *,
+ u32 id, int khz);
+void nv50_pm_clock_set(struct drm_device *, void *);
/* nva3_pm.c */
-int nva3_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
-void *nva3_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
-int nva3_pm_clocks_set(struct drm_device *, void *);
-
-/* nvc0_pm.c */
-int nvc0_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
-void *nvc0_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
-int nvc0_pm_clocks_set(struct drm_device *, void *);
+int nva3_pm_clock_get(struct drm_device *, u32 id);
+void *nva3_pm_clock_pre(struct drm_device *, struct nouveau_pm_level *,
+ u32 id, int khz);
+void nva3_pm_clock_set(struct drm_device *, void *);
/* nouveau_temp.c */
void nouveau_temp_init(struct drm_device *dev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h
index 43a96b9..f18cdfc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_reg.h
+++ b/drivers/gpu/drm/nouveau/nouveau_reg.h
@@ -826,12 +826,9 @@
#define NV50_PDISPLAY_SOR_DPMS_STATE_ACTIVE 0x00030000
#define NV50_PDISPLAY_SOR_DPMS_STATE_BLANKED 0x00080000
#define NV50_PDISPLAY_SOR_DPMS_STATE_WAIT 0x10000000
-#define NV50_PDISP_SOR_PWM_DIV(i) (0x0061c080 + (i) * 0x800)
-#define NV50_PDISP_SOR_PWM_CTL(i) (0x0061c084 + (i) * 0x800)
-#define NV50_PDISP_SOR_PWM_CTL_NEW 0x80000000
-#define NVA3_PDISP_SOR_PWM_CTL_UNK 0x40000000
-#define NV50_PDISP_SOR_PWM_CTL_VAL 0x000007ff
-#define NVA3_PDISP_SOR_PWM_CTL_VAL 0x00ffffff
+#define NV50_PDISPLAY_SOR_BACKLIGHT 0x0061c084
+#define NV50_PDISPLAY_SOR_BACKLIGHT_ENABLE 0x80000000
+#define NV50_PDISPLAY_SOR_BACKLIGHT_LEVEL 0x00000fff
#define NV50_SOR_DP_CTRL(i, l) (0x0061c10c + (i) * 0x800 + (l) * 0x80)
#define NV50_SOR_DP_CTRL_ENABLED 0x00000001
#define NV50_SOR_DP_CTRL_ENHANCED_FRAME_ENABLED 0x00004000
@@ -846,7 +843,7 @@
#define NV50_SOR_DP_CTRL_TRAINING_PATTERN_2 0x02000000
#define NV50_SOR_DP_UNK118(i, l) (0x0061c118 + (i) * 0x800 + (l) * 0x80)
#define NV50_SOR_DP_UNK120(i, l) (0x0061c120 + (i) * 0x800 + (l) * 0x80)
-#define NV50_SOR_DP_SCFG(i, l) (0x0061c128 + (i) * 0x800 + (l) * 0x80)
+#define NV50_SOR_DP_UNK128(i, l) (0x0061c128 + (i) * 0x800 + (l) * 0x80)
#define NV50_SOR_DP_UNK130(i, l) (0x0061c130 + (i) * 0x800 + (l) * 0x80)
#define NV50_PDISPLAY_USER(i) ((i) * 0x1000 + 0x00640000)
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 47f245e..ca6028f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -8,30 +8,112 @@
#define NV_CTXDMA_PAGE_MASK (NV_CTXDMA_PAGE_SIZE - 1)
struct nouveau_sgdma_be {
- /* this has to be the first field so populate/unpopulated in
- * nouve_bo.c works properly, otherwise have to move them here
- */
- struct ttm_dma_tt ttm;
+ struct ttm_backend backend;
struct drm_device *dev;
+
+ dma_addr_t *pages;
+ bool *ttm_alloced;
+ unsigned nr_pages;
+
u64 offset;
+ bool bound;
};
+static int
+nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
+ struct page **pages, struct page *dummy_read_page,
+ dma_addr_t *dma_addrs)
+{
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+ struct drm_device *dev = nvbe->dev;
+
+ NV_DEBUG(nvbe->dev, "num_pages = %ld\n", num_pages);
+
+ if (nvbe->pages)
+ return -EINVAL;
+
+ nvbe->pages = kmalloc(sizeof(dma_addr_t) * num_pages, GFP_KERNEL);
+ if (!nvbe->pages)
+ return -ENOMEM;
+
+ nvbe->ttm_alloced = kmalloc(sizeof(bool) * num_pages, GFP_KERNEL);
+ if (!nvbe->ttm_alloced) {
+ kfree(nvbe->pages);
+ nvbe->pages = NULL;
+ return -ENOMEM;
+ }
+
+ nvbe->nr_pages = 0;
+ while (num_pages--) {
+ /* this code path isn't called and is incorrect anyways */
+ if (0) { /*dma_addrs[nvbe->nr_pages] != DMA_ERROR_CODE)*/
+ nvbe->pages[nvbe->nr_pages] =
+ dma_addrs[nvbe->nr_pages];
+ nvbe->ttm_alloced[nvbe->nr_pages] = true;
+ } else {
+ nvbe->pages[nvbe->nr_pages] =
+ pci_map_page(dev->pdev, pages[nvbe->nr_pages], 0,
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ if (pci_dma_mapping_error(dev->pdev,
+ nvbe->pages[nvbe->nr_pages])) {
+ be->func->clear(be);
+ return -EFAULT;
+ }
+ nvbe->ttm_alloced[nvbe->nr_pages] = false;
+ }
+
+ nvbe->nr_pages++;
+ }
+
+ return 0;
+}
+
static void
-nouveau_sgdma_destroy(struct ttm_tt *ttm)
+nouveau_sgdma_clear(struct ttm_backend *be)
{
- struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+ struct drm_device *dev;
+
+ if (nvbe && nvbe->pages) {
+ dev = nvbe->dev;
+ NV_DEBUG(dev, "\n");
+
+ if (nvbe->bound)
+ be->func->unbind(be);
+
+ while (nvbe->nr_pages--) {
+ if (!nvbe->ttm_alloced[nvbe->nr_pages])
+ pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ }
+ kfree(nvbe->pages);
+ kfree(nvbe->ttm_alloced);
+ nvbe->pages = NULL;
+ nvbe->ttm_alloced = NULL;
+ nvbe->nr_pages = 0;
+ }
+}
- if (ttm) {
+static void
+nouveau_sgdma_destroy(struct ttm_backend *be)
+{
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+
+ if (be) {
NV_DEBUG(nvbe->dev, "\n");
- ttm_dma_tt_fini(&nvbe->ttm);
- kfree(nvbe);
+
+ if (nvbe) {
+ if (nvbe->pages)
+ be->func->clear(be);
+ kfree(nvbe);
+ }
}
}
static int
-nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
+nv04_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
{
- struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
struct drm_device *dev = nvbe->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
@@ -41,23 +123,24 @@ nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
nvbe->offset = mem->start << PAGE_SHIFT;
pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
- for (i = 0; i < ttm->num_pages; i++) {
- dma_addr_t dma_offset = nvbe->ttm.dma_address[i];
+ for (i = 0; i < nvbe->nr_pages; i++) {
+ dma_addr_t dma_offset = nvbe->pages[i];
uint32_t offset_l = lower_32_bits(dma_offset);
for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) {
nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3);
- offset_l += NV_CTXDMA_PAGE_SIZE;
+ dma_offset += NV_CTXDMA_PAGE_SIZE;
}
}
+ nvbe->bound = true;
return 0;
}
static int
-nv04_sgdma_unbind(struct ttm_tt *ttm)
+nv04_sgdma_unbind(struct ttm_backend *be)
{
- struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
struct drm_device *dev = nvbe->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
@@ -65,19 +148,22 @@ nv04_sgdma_unbind(struct ttm_tt *ttm)
NV_DEBUG(dev, "\n");
- if (ttm->state != tt_bound)
+ if (!nvbe->bound)
return 0;
pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
- for (i = 0; i < ttm->num_pages; i++) {
+ for (i = 0; i < nvbe->nr_pages; i++) {
for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++)
nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
}
+ nvbe->bound = false;
return 0;
}
static struct ttm_backend_func nv04_sgdma_backend = {
+ .populate = nouveau_sgdma_populate,
+ .clear = nouveau_sgdma_clear,
.bind = nv04_sgdma_bind,
.unbind = nv04_sgdma_unbind,
.destroy = nouveau_sgdma_destroy
@@ -96,14 +182,14 @@ nv41_sgdma_flush(struct nouveau_sgdma_be *nvbe)
}
static int
-nv41_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
+nv41_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
{
- struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
- dma_addr_t *list = nvbe->ttm.dma_address;
+ dma_addr_t *list = nvbe->pages;
u32 pte = mem->start << 2;
- u32 cnt = ttm->num_pages;
+ u32 cnt = nvbe->nr_pages;
nvbe->offset = mem->start << PAGE_SHIFT;
@@ -113,17 +199,18 @@ nv41_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
}
nv41_sgdma_flush(nvbe);
+ nvbe->bound = true;
return 0;
}
static int
-nv41_sgdma_unbind(struct ttm_tt *ttm)
+nv41_sgdma_unbind(struct ttm_backend *be)
{
- struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
u32 pte = (nvbe->offset >> 12) << 2;
- u32 cnt = ttm->num_pages;
+ u32 cnt = nvbe->nr_pages;
while (cnt--) {
nv_wo32(pgt, pte, 0x00000000);
@@ -131,22 +218,24 @@ nv41_sgdma_unbind(struct ttm_tt *ttm)
}
nv41_sgdma_flush(nvbe);
+ nvbe->bound = false;
return 0;
}
static struct ttm_backend_func nv41_sgdma_backend = {
+ .populate = nouveau_sgdma_populate,
+ .clear = nouveau_sgdma_clear,
.bind = nv41_sgdma_bind,
.unbind = nv41_sgdma_unbind,
.destroy = nouveau_sgdma_destroy
};
static void
-nv44_sgdma_flush(struct ttm_tt *ttm)
+nv44_sgdma_flush(struct nouveau_sgdma_be *nvbe)
{
- struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct drm_device *dev = nvbe->dev;
- nv_wr32(dev, 0x100814, (ttm->num_pages - 1) << 12);
+ nv_wr32(dev, 0x100814, (nvbe->nr_pages - 1) << 12);
nv_wr32(dev, 0x100808, nvbe->offset | 0x20);
if (!nv_wait(dev, 0x100808, 0x00000001, 0x00000001))
NV_ERROR(dev, "gart flush timeout: 0x%08x\n",
@@ -205,14 +294,14 @@ nv44_sgdma_fill(struct nouveau_gpuobj *pgt, dma_addr_t *list, u32 base, u32 cnt)
}
static int
-nv44_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
+nv44_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
{
- struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
- dma_addr_t *list = nvbe->ttm.dma_address;
+ dma_addr_t *list = nvbe->pages;
u32 pte = mem->start << 2, tmp[4];
- u32 cnt = ttm->num_pages;
+ u32 cnt = nvbe->nr_pages;
int i;
nvbe->offset = mem->start << PAGE_SHIFT;
@@ -240,18 +329,19 @@ nv44_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
if (cnt)
nv44_sgdma_fill(pgt, list, pte, cnt);
- nv44_sgdma_flush(ttm);
+ nv44_sgdma_flush(nvbe);
+ nvbe->bound = true;
return 0;
}
static int
-nv44_sgdma_unbind(struct ttm_tt *ttm)
+nv44_sgdma_unbind(struct ttm_backend *be)
{
- struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
u32 pte = (nvbe->offset >> 12) << 2;
- u32 cnt = ttm->num_pages;
+ u32 cnt = nvbe->nr_pages;
if (pte & 0x0000000c) {
u32 max = 4 - ((pte >> 2) & 0x3);
@@ -273,47 +363,55 @@ nv44_sgdma_unbind(struct ttm_tt *ttm)
if (cnt)
nv44_sgdma_fill(pgt, NULL, pte, cnt);
- nv44_sgdma_flush(ttm);
+ nv44_sgdma_flush(nvbe);
+ nvbe->bound = false;
return 0;
}
static struct ttm_backend_func nv44_sgdma_backend = {
+ .populate = nouveau_sgdma_populate,
+ .clear = nouveau_sgdma_clear,
.bind = nv44_sgdma_bind,
.unbind = nv44_sgdma_unbind,
.destroy = nouveau_sgdma_destroy
};
static int
-nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
+nv50_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
{
- struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
struct nouveau_mem *node = mem->mm_node;
-
/* noop: bound in move_notify() */
- node->pages = nvbe->ttm.dma_address;
+ node->pages = nvbe->pages;
+ nvbe->pages = (dma_addr_t *)node;
+ nvbe->bound = true;
return 0;
}
static int
-nv50_sgdma_unbind(struct ttm_tt *ttm)
+nv50_sgdma_unbind(struct ttm_backend *be)
{
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+ struct nouveau_mem *node = (struct nouveau_mem *)nvbe->pages;
/* noop: unbound in move_notify() */
+ nvbe->pages = node->pages;
+ node->pages = NULL;
+ nvbe->bound = false;
return 0;
}
static struct ttm_backend_func nv50_sgdma_backend = {
+ .populate = nouveau_sgdma_populate,
+ .clear = nouveau_sgdma_clear,
.bind = nv50_sgdma_bind,
.unbind = nv50_sgdma_unbind,
.destroy = nouveau_sgdma_destroy
};
-struct ttm_tt *
-nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
- unsigned long size, uint32_t page_flags,
- struct page *dummy_read_page)
+struct ttm_backend *
+nouveau_sgdma_init_ttm(struct drm_device *dev)
{
- struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
- struct drm_device *dev = dev_priv->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_sgdma_be *nvbe;
nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
@@ -321,13 +419,9 @@ nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
return NULL;
nvbe->dev = dev;
- nvbe->ttm.ttm.func = dev_priv->gart_info.func;
- if (ttm_dma_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page)) {
- kfree(nvbe);
- return NULL;
- }
- return &nvbe->ttm.ttm;
+ nvbe->backend.func = dev_priv->gart_info.func;
+ return &nvbe->backend;
}
int
@@ -338,7 +432,7 @@ nouveau_sgdma_init(struct drm_device *dev)
u32 aper_size, align;
int ret;
- if (dev_priv->card_type >= NV_40 && pci_is_pcie(dev->pdev))
+ if (dev_priv->card_type >= NV_40 && drm_pci_device_is_pcie(dev))
aper_size = 512 * 1024 * 1024;
else
aper_size = 64 * 1024 * 1024;
@@ -367,7 +461,7 @@ nouveau_sgdma_init(struct drm_device *dev)
dev_priv->gart_info.type = NOUVEAU_GART_HW;
dev_priv->gart_info.func = &nv50_sgdma_backend;
} else
- if (0 && pci_is_pcie(dev->pdev) &&
+ if (0 && drm_pci_device_is_pcie(dev) &&
dev_priv->chipset > 0x40 && dev_priv->chipset != 0x45) {
if (nv44_graph_class(dev)) {
dev_priv->gart_info.func = &nv44_sgdma_backend;
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index 9c144fb..731acea 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -36,7 +36,6 @@
#include "nouveau_drm.h"
#include "nouveau_fbcon.h"
#include "nouveau_ramht.h"
-#include "nouveau_gpio.h"
#include "nouveau_pm.h"
#include "nv50_display.h"
@@ -81,14 +80,17 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->display.early_init = nv04_display_early_init;
engine->display.late_takedown = nv04_display_late_takedown;
engine->display.create = nv04_display_create;
- engine->display.destroy = nv04_display_destroy;
engine->display.init = nv04_display_init;
- engine->display.fini = nv04_display_fini;
- engine->pm.clocks_get = nv04_pm_clocks_get;
- engine->pm.clocks_pre = nv04_pm_clocks_pre;
- engine->pm.clocks_set = nv04_pm_clocks_set;
- engine->vram.init = nv04_fb_vram_init;
- engine->vram.takedown = nouveau_stub_takedown;
+ engine->display.destroy = nv04_display_destroy;
+ engine->gpio.init = nouveau_stub_init;
+ engine->gpio.takedown = nouveau_stub_takedown;
+ engine->gpio.get = NULL;
+ engine->gpio.set = NULL;
+ engine->gpio.irq_enable = NULL;
+ engine->pm.clock_get = nv04_pm_clock_get;
+ engine->pm.clock_pre = nv04_pm_clock_pre;
+ engine->pm.clock_set = nv04_pm_clock_set;
+ engine->vram.init = nouveau_mem_detect;
engine->vram.flags_valid = nouveau_mem_flags_valid;
break;
case 0x10:
@@ -126,20 +128,17 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->display.early_init = nv04_display_early_init;
engine->display.late_takedown = nv04_display_late_takedown;
engine->display.create = nv04_display_create;
- engine->display.destroy = nv04_display_destroy;
engine->display.init = nv04_display_init;
- engine->display.fini = nv04_display_fini;
- engine->gpio.drive = nv10_gpio_drive;
- engine->gpio.sense = nv10_gpio_sense;
- engine->pm.clocks_get = nv04_pm_clocks_get;
- engine->pm.clocks_pre = nv04_pm_clocks_pre;
- engine->pm.clocks_set = nv04_pm_clocks_set;
- if (dev_priv->chipset == 0x1a ||
- dev_priv->chipset == 0x1f)
- engine->vram.init = nv1a_fb_vram_init;
- else
- engine->vram.init = nv10_fb_vram_init;
- engine->vram.takedown = nouveau_stub_takedown;
+ engine->display.destroy = nv04_display_destroy;
+ engine->gpio.init = nouveau_stub_init;
+ engine->gpio.takedown = nouveau_stub_takedown;
+ engine->gpio.get = nv10_gpio_get;
+ engine->gpio.set = nv10_gpio_set;
+ engine->gpio.irq_enable = NULL;
+ engine->pm.clock_get = nv04_pm_clock_get;
+ engine->pm.clock_pre = nv04_pm_clock_pre;
+ engine->pm.clock_set = nv04_pm_clock_set;
+ engine->vram.init = nouveau_mem_detect;
engine->vram.flags_valid = nouveau_mem_flags_valid;
break;
case 0x20:
@@ -157,11 +156,11 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->timer.init = nv04_timer_init;
engine->timer.read = nv04_timer_read;
engine->timer.takedown = nv04_timer_takedown;
- engine->fb.init = nv20_fb_init;
- engine->fb.takedown = nv20_fb_takedown;
- engine->fb.init_tile_region = nv20_fb_init_tile_region;
- engine->fb.set_tile_region = nv20_fb_set_tile_region;
- engine->fb.free_tile_region = nv20_fb_free_tile_region;
+ engine->fb.init = nv10_fb_init;
+ engine->fb.takedown = nv10_fb_takedown;
+ engine->fb.init_tile_region = nv10_fb_init_tile_region;
+ engine->fb.set_tile_region = nv10_fb_set_tile_region;
+ engine->fb.free_tile_region = nv10_fb_free_tile_region;
engine->fifo.channels = 32;
engine->fifo.init = nv10_fifo_init;
engine->fifo.takedown = nv04_fifo_fini;
@@ -177,16 +176,17 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->display.early_init = nv04_display_early_init;
engine->display.late_takedown = nv04_display_late_takedown;
engine->display.create = nv04_display_create;
- engine->display.destroy = nv04_display_destroy;
engine->display.init = nv04_display_init;
- engine->display.fini = nv04_display_fini;
- engine->gpio.drive = nv10_gpio_drive;
- engine->gpio.sense = nv10_gpio_sense;
- engine->pm.clocks_get = nv04_pm_clocks_get;
- engine->pm.clocks_pre = nv04_pm_clocks_pre;
- engine->pm.clocks_set = nv04_pm_clocks_set;
- engine->vram.init = nv20_fb_vram_init;
- engine->vram.takedown = nouveau_stub_takedown;
+ engine->display.destroy = nv04_display_destroy;
+ engine->gpio.init = nouveau_stub_init;
+ engine->gpio.takedown = nouveau_stub_takedown;
+ engine->gpio.get = nv10_gpio_get;
+ engine->gpio.set = nv10_gpio_set;
+ engine->gpio.irq_enable = NULL;
+ engine->pm.clock_get = nv04_pm_clock_get;
+ engine->pm.clock_pre = nv04_pm_clock_pre;
+ engine->pm.clock_set = nv04_pm_clock_set;
+ engine->vram.init = nouveau_mem_detect;
engine->vram.flags_valid = nouveau_mem_flags_valid;
break;
case 0x30:
@@ -224,18 +224,19 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->display.early_init = nv04_display_early_init;
engine->display.late_takedown = nv04_display_late_takedown;
engine->display.create = nv04_display_create;
- engine->display.destroy = nv04_display_destroy;
engine->display.init = nv04_display_init;
- engine->display.fini = nv04_display_fini;
- engine->gpio.drive = nv10_gpio_drive;
- engine->gpio.sense = nv10_gpio_sense;
- engine->pm.clocks_get = nv04_pm_clocks_get;
- engine->pm.clocks_pre = nv04_pm_clocks_pre;
- engine->pm.clocks_set = nv04_pm_clocks_set;
+ engine->display.destroy = nv04_display_destroy;
+ engine->gpio.init = nouveau_stub_init;
+ engine->gpio.takedown = nouveau_stub_takedown;
+ engine->gpio.get = nv10_gpio_get;
+ engine->gpio.set = nv10_gpio_set;
+ engine->gpio.irq_enable = NULL;
+ engine->pm.clock_get = nv04_pm_clock_get;
+ engine->pm.clock_pre = nv04_pm_clock_pre;
+ engine->pm.clock_set = nv04_pm_clock_set;
engine->pm.voltage_get = nouveau_voltage_gpio_get;
engine->pm.voltage_set = nouveau_voltage_gpio_set;
- engine->vram.init = nv20_fb_vram_init;
- engine->vram.takedown = nouveau_stub_takedown;
+ engine->vram.init = nouveau_mem_detect;
engine->vram.flags_valid = nouveau_mem_flags_valid;
break;
case 0x40:
@@ -274,30 +275,26 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->display.early_init = nv04_display_early_init;
engine->display.late_takedown = nv04_display_late_takedown;
engine->display.create = nv04_display_create;
- engine->display.destroy = nv04_display_destroy;
engine->display.init = nv04_display_init;
- engine->display.fini = nv04_display_fini;
- engine->gpio.init = nv10_gpio_init;
- engine->gpio.fini = nv10_gpio_fini;
- engine->gpio.drive = nv10_gpio_drive;
- engine->gpio.sense = nv10_gpio_sense;
- engine->gpio.irq_enable = nv10_gpio_irq_enable;
- engine->pm.clocks_get = nv40_pm_clocks_get;
- engine->pm.clocks_pre = nv40_pm_clocks_pre;
- engine->pm.clocks_set = nv40_pm_clocks_set;
+ engine->display.destroy = nv04_display_destroy;
+ engine->gpio.init = nouveau_stub_init;
+ engine->gpio.takedown = nouveau_stub_takedown;
+ engine->gpio.get = nv10_gpio_get;
+ engine->gpio.set = nv10_gpio_set;
+ engine->gpio.irq_enable = NULL;
+ engine->pm.clock_get = nv04_pm_clock_get;
+ engine->pm.clock_pre = nv04_pm_clock_pre;
+ engine->pm.clock_set = nv04_pm_clock_set;
engine->pm.voltage_get = nouveau_voltage_gpio_get;
engine->pm.voltage_set = nouveau_voltage_gpio_set;
engine->pm.temp_get = nv40_temp_get;
- engine->pm.pwm_get = nv40_pm_pwm_get;
- engine->pm.pwm_set = nv40_pm_pwm_set;
- engine->vram.init = nv40_fb_vram_init;
- engine->vram.takedown = nouveau_stub_takedown;
+ engine->vram.init = nouveau_mem_detect;
engine->vram.flags_valid = nouveau_mem_flags_valid;
break;
case 0x50:
case 0x80: /* gotta love NVIDIA's consistency.. */
case 0x90:
- case 0xa0:
+ case 0xA0:
engine->instmem.init = nv50_instmem_init;
engine->instmem.takedown = nv50_instmem_takedown;
engine->instmem.suspend = nv50_instmem_suspend;
@@ -332,13 +329,14 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->display.early_init = nv50_display_early_init;
engine->display.late_takedown = nv50_display_late_takedown;
engine->display.create = nv50_display_create;
- engine->display.destroy = nv50_display_destroy;
engine->display.init = nv50_display_init;
- engine->display.fini = nv50_display_fini;
+ engine->display.destroy = nv50_display_destroy;
engine->gpio.init = nv50_gpio_init;
- engine->gpio.fini = nv50_gpio_fini;
- engine->gpio.drive = nv50_gpio_drive;
- engine->gpio.sense = nv50_gpio_sense;
+ engine->gpio.takedown = nv50_gpio_fini;
+ engine->gpio.get = nv50_gpio_get;
+ engine->gpio.set = nv50_gpio_set;
+ engine->gpio.irq_register = nv50_gpio_irq_register;
+ engine->gpio.irq_unregister = nv50_gpio_irq_unregister;
engine->gpio.irq_enable = nv50_gpio_irq_enable;
switch (dev_priv->chipset) {
case 0x84:
@@ -351,14 +349,14 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
case 0xaa:
case 0xac:
case 0x50:
- engine->pm.clocks_get = nv50_pm_clocks_get;
- engine->pm.clocks_pre = nv50_pm_clocks_pre;
- engine->pm.clocks_set = nv50_pm_clocks_set;
+ engine->pm.clock_get = nv50_pm_clock_get;
+ engine->pm.clock_pre = nv50_pm_clock_pre;
+ engine->pm.clock_set = nv50_pm_clock_set;
break;
default:
- engine->pm.clocks_get = nva3_pm_clocks_get;
- engine->pm.clocks_pre = nva3_pm_clocks_pre;
- engine->pm.clocks_set = nva3_pm_clocks_set;
+ engine->pm.clock_get = nva3_pm_clock_get;
+ engine->pm.clock_pre = nva3_pm_clock_pre;
+ engine->pm.clock_set = nva3_pm_clock_set;
break;
}
engine->pm.voltage_get = nouveau_voltage_gpio_get;
@@ -367,15 +365,12 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->pm.temp_get = nv84_temp_get;
else
engine->pm.temp_get = nv40_temp_get;
- engine->pm.pwm_get = nv50_pm_pwm_get;
- engine->pm.pwm_set = nv50_pm_pwm_set;
engine->vram.init = nv50_vram_init;
- engine->vram.takedown = nv50_vram_fini;
engine->vram.get = nv50_vram_new;
engine->vram.put = nv50_vram_del;
engine->vram.flags_valid = nv50_vram_flags_valid;
break;
- case 0xc0:
+ case 0xC0:
engine->instmem.init = nvc0_instmem_init;
engine->instmem.takedown = nvc0_instmem_takedown;
engine->instmem.suspend = nvc0_instmem_suspend;
@@ -406,93 +401,25 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->display.early_init = nv50_display_early_init;
engine->display.late_takedown = nv50_display_late_takedown;
engine->display.create = nv50_display_create;
- engine->display.destroy = nv50_display_destroy;
engine->display.init = nv50_display_init;
- engine->display.fini = nv50_display_fini;
- engine->gpio.init = nv50_gpio_init;
- engine->gpio.fini = nv50_gpio_fini;
- engine->gpio.drive = nv50_gpio_drive;
- engine->gpio.sense = nv50_gpio_sense;
- engine->gpio.irq_enable = nv50_gpio_irq_enable;
- engine->vram.init = nvc0_vram_init;
- engine->vram.takedown = nv50_vram_fini;
- engine->vram.get = nvc0_vram_new;
- engine->vram.put = nv50_vram_del;
- engine->vram.flags_valid = nvc0_vram_flags_valid;
- engine->pm.temp_get = nv84_temp_get;
- engine->pm.clocks_get = nvc0_pm_clocks_get;
- engine->pm.clocks_pre = nvc0_pm_clocks_pre;
- engine->pm.clocks_set = nvc0_pm_clocks_set;
- engine->pm.voltage_get = nouveau_voltage_gpio_get;
- engine->pm.voltage_set = nouveau_voltage_gpio_set;
- engine->pm.pwm_get = nv50_pm_pwm_get;
- engine->pm.pwm_set = nv50_pm_pwm_set;
- break;
- case 0xd0:
- engine->instmem.init = nvc0_instmem_init;
- engine->instmem.takedown = nvc0_instmem_takedown;
- engine->instmem.suspend = nvc0_instmem_suspend;
- engine->instmem.resume = nvc0_instmem_resume;
- engine->instmem.get = nv50_instmem_get;
- engine->instmem.put = nv50_instmem_put;
- engine->instmem.map = nv50_instmem_map;
- engine->instmem.unmap = nv50_instmem_unmap;
- engine->instmem.flush = nv84_instmem_flush;
- engine->mc.init = nv50_mc_init;
- engine->mc.takedown = nv50_mc_takedown;
- engine->timer.init = nv04_timer_init;
- engine->timer.read = nv04_timer_read;
- engine->timer.takedown = nv04_timer_takedown;
- engine->fb.init = nvc0_fb_init;
- engine->fb.takedown = nvc0_fb_takedown;
- engine->fifo.channels = 128;
- engine->fifo.init = nvc0_fifo_init;
- engine->fifo.takedown = nvc0_fifo_takedown;
- engine->fifo.disable = nvc0_fifo_disable;
- engine->fifo.enable = nvc0_fifo_enable;
- engine->fifo.reassign = nvc0_fifo_reassign;
- engine->fifo.channel_id = nvc0_fifo_channel_id;
- engine->fifo.create_context = nvc0_fifo_create_context;
- engine->fifo.destroy_context = nvc0_fifo_destroy_context;
- engine->fifo.load_context = nvc0_fifo_load_context;
- engine->fifo.unload_context = nvc0_fifo_unload_context;
- engine->display.early_init = nouveau_stub_init;
- engine->display.late_takedown = nouveau_stub_takedown;
- engine->display.create = nvd0_display_create;
- engine->display.destroy = nvd0_display_destroy;
- engine->display.init = nvd0_display_init;
- engine->display.fini = nvd0_display_fini;
+ engine->display.destroy = nv50_display_destroy;
engine->gpio.init = nv50_gpio_init;
- engine->gpio.fini = nv50_gpio_fini;
- engine->gpio.drive = nvd0_gpio_drive;
- engine->gpio.sense = nvd0_gpio_sense;
+ engine->gpio.takedown = nouveau_stub_takedown;
+ engine->gpio.get = nv50_gpio_get;
+ engine->gpio.set = nv50_gpio_set;
+ engine->gpio.irq_register = nv50_gpio_irq_register;
+ engine->gpio.irq_unregister = nv50_gpio_irq_unregister;
engine->gpio.irq_enable = nv50_gpio_irq_enable;
engine->vram.init = nvc0_vram_init;
- engine->vram.takedown = nv50_vram_fini;
engine->vram.get = nvc0_vram_new;
engine->vram.put = nv50_vram_del;
engine->vram.flags_valid = nvc0_vram_flags_valid;
- engine->pm.temp_get = nv84_temp_get;
- engine->pm.clocks_get = nvc0_pm_clocks_get;
- engine->pm.clocks_pre = nvc0_pm_clocks_pre;
- engine->pm.clocks_set = nvc0_pm_clocks_set;
- engine->pm.voltage_get = nouveau_voltage_gpio_get;
- engine->pm.voltage_set = nouveau_voltage_gpio_set;
break;
default:
NV_ERROR(dev, "NV%02x unsupported\n", dev_priv->chipset);
return 1;
}
- /* headless mode */
- if (nouveau_modeset == 2) {
- engine->display.early_init = nouveau_stub_init;
- engine->display.late_takedown = nouveau_stub_takedown;
- engine->display.create = nouveau_stub_init;
- engine->display.init = nouveau_stub_init;
- engine->display.destroy = nouveau_stub_takedown;
- }
-
return 0;
}
@@ -514,6 +441,21 @@ nouveau_vga_set_decode(void *priv, bool state)
return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
}
+static int
+nouveau_card_init_channel(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int ret;
+
+ ret = nouveau_channel_alloc(dev, &dev_priv->channel,
+ (struct drm_file *)-2, NvDmaFB, NvDmaTT);
+ if (ret)
+ return ret;
+
+ mutex_unlock(&dev_priv->channel->mutex);
+ return 0;
+}
+
static void nouveau_switcheroo_set_state(struct pci_dev *pdev,
enum vga_switcheroo_state state)
{
@@ -529,7 +471,6 @@ static void nouveau_switcheroo_set_state(struct pci_dev *pdev,
printk(KERN_ERR "VGA switcheroo: switched nouveau off\n");
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
drm_kms_helper_poll_disable(dev);
- nouveau_switcheroo_optimus_dsm();
nouveau_pci_suspend(pdev, pmm);
dev->switch_power_state = DRM_SWITCH_POWER_OFF;
}
@@ -584,55 +525,45 @@ nouveau_card_init(struct drm_device *dev)
if (ret)
goto out_display_early;
- /* workaround an odd issue on nvc1 by disabling the device's
- * nosnoop capability. hopefully won't cause issues until a
- * better fix is found - assuming there is one...
- */
- if (dev_priv->chipset == 0xc1) {
- nv_mask(dev, 0x00088080, 0x00000800, 0x00000000);
- }
+ nouveau_pm_init(dev);
- /* PMC */
- ret = engine->mc.init(dev);
+ ret = nouveau_mem_vram_init(dev);
if (ret)
goto out_bios;
- /* PTIMER */
- ret = engine->timer.init(dev);
- if (ret)
- goto out_mc;
-
- /* PFB */
- ret = engine->fb.init(dev);
+ ret = nouveau_gpuobj_init(dev);
if (ret)
- goto out_timer;
+ goto out_vram;
- ret = engine->vram.init(dev);
+ ret = engine->instmem.init(dev);
if (ret)
- goto out_fb;
+ goto out_gpuobj;
- /* PGPIO */
- ret = nouveau_gpio_create(dev);
+ ret = nouveau_mem_gart_init(dev);
if (ret)
- goto out_vram;
+ goto out_instmem;
- ret = nouveau_gpuobj_init(dev);
+ /* PMC */
+ ret = engine->mc.init(dev);
if (ret)
- goto out_gpio;
+ goto out_gart;
- ret = engine->instmem.init(dev);
+ /* PGPIO */
+ ret = engine->gpio.init(dev);
if (ret)
- goto out_gpuobj;
+ goto out_mc;
- ret = nouveau_mem_vram_init(dev);
+ /* PTIMER */
+ ret = engine->timer.init(dev);
if (ret)
- goto out_instmem;
+ goto out_gpio;
- ret = nouveau_mem_gart_init(dev);
+ /* PFB */
+ ret = engine->fb.init(dev);
if (ret)
- goto out_ttmvram;
+ goto out_timer;
- if (!dev_priv->noaccel) {
+ if (!nouveau_noaccel) {
switch (dev_priv->card_type) {
case NV_04:
nv04_graph_create(dev);
@@ -651,7 +582,6 @@ nouveau_card_init(struct drm_device *dev)
nv50_graph_create(dev);
break;
case NV_C0:
- case NV_D0:
nvc0_graph_create(dev);
break;
default:
@@ -667,11 +597,6 @@ nouveau_card_init(struct drm_device *dev)
case 0xa0:
nv84_crypt_create(dev);
break;
- case 0x98:
- case 0xaa:
- case 0xac:
- nv98_crypt_create(dev);
- break;
}
switch (dev_priv->card_type) {
@@ -693,25 +618,12 @@ nouveau_card_init(struct drm_device *dev)
break;
}
- if (dev_priv->chipset >= 0xa3 || dev_priv->chipset == 0x98) {
- nv84_bsp_create(dev);
- nv84_vp_create(dev);
- nv98_ppp_create(dev);
- } else
- if (dev_priv->chipset >= 0x84) {
- nv50_mpeg_create(dev);
- nv84_bsp_create(dev);
- nv84_vp_create(dev);
- } else
- if (dev_priv->chipset >= 0x50) {
+ if (dev_priv->card_type == NV_40)
+ nv40_mpeg_create(dev);
+ else
+ if (dev_priv->card_type == NV_50 &&
+ (dev_priv->chipset < 0x98 || dev_priv->chipset == 0xa0))
nv50_mpeg_create(dev);
- } else
- if (dev_priv->card_type == NV_40 ||
- dev_priv->chipset == 0x31 ||
- dev_priv->chipset == 0x34 ||
- dev_priv->chipset == 0x36) {
- nv31_mpeg_create(dev);
- }
for (e = 0; e < NVOBJ_ENGINE_NR; e++) {
if (dev_priv->eng[e]) {
@@ -727,80 +639,71 @@ nouveau_card_init(struct drm_device *dev)
goto out_engine;
}
- ret = nouveau_irq_init(dev);
+ ret = engine->display.create(dev);
if (ret)
goto out_fifo;
- ret = nouveau_display_create(dev);
+ ret = drm_vblank_init(dev, nv_two_heads(dev) ? 2 : 1);
if (ret)
- goto out_irq;
+ goto out_vblank;
- nouveau_backlight_init(dev);
- nouveau_pm_init(dev);
+ ret = nouveau_irq_init(dev);
+ if (ret)
+ goto out_vblank;
+
+ /* what about PVIDEO/PCRTC/PRAMDAC etc? */
if (dev_priv->eng[NVOBJ_ENGINE_GR]) {
ret = nouveau_fence_init(dev);
if (ret)
- goto out_pm;
+ goto out_irq;
- ret = nouveau_channel_alloc(dev, &dev_priv->channel, NULL,
- NvDmaFB, NvDmaTT);
+ ret = nouveau_card_init_channel(dev);
if (ret)
goto out_fence;
-
- mutex_unlock(&dev_priv->channel->mutex);
- }
-
- if (dev->mode_config.num_crtc) {
- ret = nouveau_display_init(dev);
- if (ret)
- goto out_chan;
-
- nouveau_fbcon_init(dev);
}
+ nouveau_fbcon_init(dev);
+ drm_kms_helper_poll_init(dev);
return 0;
-out_chan:
- nouveau_channel_put_unlocked(&dev_priv->channel);
out_fence:
nouveau_fence_fini(dev);
-out_pm:
- nouveau_pm_fini(dev);
- nouveau_backlight_exit(dev);
- nouveau_display_destroy(dev);
out_irq:
nouveau_irq_fini(dev);
+out_vblank:
+ drm_vblank_cleanup(dev);
+ engine->display.destroy(dev);
out_fifo:
- if (!dev_priv->noaccel)
+ if (!nouveau_noaccel)
engine->fifo.takedown(dev);
out_engine:
- if (!dev_priv->noaccel) {
+ if (!nouveau_noaccel) {
for (e = e - 1; e >= 0; e--) {
if (!dev_priv->eng[e])
continue;
- dev_priv->eng[e]->fini(dev, e, false);
+ dev_priv->eng[e]->fini(dev, e);
dev_priv->eng[e]->destroy(dev,e );
}
}
+
+ engine->fb.takedown(dev);
+out_timer:
+ engine->timer.takedown(dev);
+out_gpio:
+ engine->gpio.takedown(dev);
+out_mc:
+ engine->mc.takedown(dev);
+out_gart:
nouveau_mem_gart_fini(dev);
-out_ttmvram:
- nouveau_mem_vram_fini(dev);
out_instmem:
engine->instmem.takedown(dev);
out_gpuobj:
nouveau_gpuobj_takedown(dev);
-out_gpio:
- nouveau_gpio_destroy(dev);
out_vram:
- engine->vram.takedown(dev);
-out_fb:
- engine->fb.takedown(dev);
-out_timer:
- engine->timer.takedown(dev);
-out_mc:
- engine->mc.takedown(dev);
+ nouveau_mem_vram_fini(dev);
out_bios:
+ nouveau_pm_fini(dev);
nouveau_bios_takedown(dev);
out_display_early:
engine->display.late_takedown(dev);
@@ -815,29 +718,25 @@ static void nouveau_card_takedown(struct drm_device *dev)
struct nouveau_engine *engine = &dev_priv->engine;
int e;
- if (dev->mode_config.num_crtc) {
- nouveau_fbcon_fini(dev);
- nouveau_display_fini(dev);
- }
-
if (dev_priv->channel) {
- nouveau_channel_put_unlocked(&dev_priv->channel);
nouveau_fence_fini(dev);
+ nouveau_channel_put_unlocked(&dev_priv->channel);
}
- nouveau_pm_fini(dev);
- nouveau_backlight_exit(dev);
- nouveau_display_destroy(dev);
-
- if (!dev_priv->noaccel) {
+ if (!nouveau_noaccel) {
engine->fifo.takedown(dev);
for (e = NVOBJ_ENGINE_NR - 1; e >= 0; e--) {
if (dev_priv->eng[e]) {
- dev_priv->eng[e]->fini(dev, e, false);
+ dev_priv->eng[e]->fini(dev, e);
dev_priv->eng[e]->destroy(dev,e );
}
}
}
+ engine->fb.takedown(dev);
+ engine->timer.takedown(dev);
+ engine->gpio.takedown(dev);
+ engine->mc.takedown(dev);
+ engine->display.late_takedown(dev);
if (dev_priv->vga_ram) {
nouveau_bo_unpin(dev_priv->vga_ram);
@@ -849,60 +748,20 @@ static void nouveau_card_takedown(struct drm_device *dev)
ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT);
mutex_unlock(&dev->struct_mutex);
nouveau_mem_gart_fini(dev);
- nouveau_mem_vram_fini(dev);
engine->instmem.takedown(dev);
nouveau_gpuobj_takedown(dev);
+ nouveau_mem_vram_fini(dev);
- nouveau_gpio_destroy(dev);
- engine->vram.takedown(dev);
- engine->fb.takedown(dev);
- engine->timer.takedown(dev);
- engine->mc.takedown(dev);
+ nouveau_irq_fini(dev);
+ drm_vblank_cleanup(dev);
+ nouveau_pm_fini(dev);
nouveau_bios_takedown(dev);
- engine->display.late_takedown(dev);
-
- nouveau_irq_fini(dev);
vga_client_register(dev->pdev, NULL, NULL, NULL);
}
-int
-nouveau_open(struct drm_device *dev, struct drm_file *file_priv)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fpriv *fpriv;
- int ret;
-
- fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
- if (unlikely(!fpriv))
- return -ENOMEM;
-
- spin_lock_init(&fpriv->lock);
- INIT_LIST_HEAD(&fpriv->channels);
-
- if (dev_priv->card_type == NV_50) {
- ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0020000000ULL,
- &fpriv->vm);
- if (ret) {
- kfree(fpriv);
- return ret;
- }
- } else
- if (dev_priv->card_type >= NV_C0) {
- ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0008000000ULL,
- &fpriv->vm);
- if (ret) {
- kfree(fpriv);
- return ret;
- }
- }
-
- file_priv->driver_priv = fpriv;
- return 0;
-}
-
/* here a client dies, release the stuff that was allocated for its
* file_priv */
void nouveau_preclose(struct drm_device *dev, struct drm_file *file_priv)
@@ -910,14 +769,6 @@ void nouveau_preclose(struct drm_device *dev, struct drm_file *file_priv)
nouveau_channel_cleanup(dev, file_priv);
}
-void
-nouveau_postclose(struct drm_device *dev, struct drm_file *file_priv)
-{
- struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
- nouveau_vm_ref(NULL, &fpriv->vm, NULL);
- kfree(fpriv);
-}
-
/* first module load, setup the mmio/fb mapping */
/* KMS: we need mmio at load time, not when the first drm client opens. */
int nouveau_firstopen(struct drm_device *dev)
@@ -993,7 +844,7 @@ static int nouveau_remove_conflicting_drivers(struct drm_device *dev)
int nouveau_load(struct drm_device *dev, unsigned long flags)
{
struct drm_nouveau_private *dev_priv;
- uint32_t reg0 = ~0, strap;
+ uint32_t reg0;
resource_size_t mmio_start_offs;
int ret;
@@ -1005,72 +856,15 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
dev->dev_private = dev_priv;
dev_priv->dev = dev;
- pci_set_master(dev->pdev);
-
dev_priv->flags = flags & NOUVEAU_FLAGS;
NV_DEBUG(dev, "vendor: 0x%X device: 0x%X class: 0x%X\n",
dev->pci_vendor, dev->pci_device, dev->pdev->class);
- /* first up, map the start of mmio and determine the chipset */
- dev_priv->mmio = ioremap(pci_resource_start(dev->pdev, 0), PAGE_SIZE);
- if (dev_priv->mmio) {
-#ifdef __BIG_ENDIAN
- /* put the card into big-endian mode if it's not */
- if (nv_rd32(dev, NV03_PMC_BOOT_1) != 0x01000001)
- nv_wr32(dev, NV03_PMC_BOOT_1, 0x01000001);
- DRM_MEMORYBARRIER();
-#endif
-
- /* determine chipset and derive architecture from it */
- reg0 = nv_rd32(dev, NV03_PMC_BOOT_0);
- if ((reg0 & 0x0f000000) > 0) {
- dev_priv->chipset = (reg0 & 0xff00000) >> 20;
- switch (dev_priv->chipset & 0xf0) {
- case 0x10:
- case 0x20:
- case 0x30:
- dev_priv->card_type = dev_priv->chipset & 0xf0;
- break;
- case 0x40:
- case 0x60:
- dev_priv->card_type = NV_40;
- break;
- case 0x50:
- case 0x80:
- case 0x90:
- case 0xa0:
- dev_priv->card_type = NV_50;
- break;
- case 0xc0:
- dev_priv->card_type = NV_C0;
- break;
- case 0xd0:
- dev_priv->card_type = NV_D0;
- break;
- default:
- break;
- }
- } else
- if ((reg0 & 0xff00fff0) == 0x20004000) {
- if (reg0 & 0x00f00000)
- dev_priv->chipset = 0x05;
- else
- dev_priv->chipset = 0x04;
- dev_priv->card_type = NV_04;
- }
-
- iounmap(dev_priv->mmio);
- }
-
- if (!dev_priv->card_type) {
- NV_ERROR(dev, "unsupported chipset 0x%08x\n", reg0);
- ret = -EINVAL;
- goto err_priv;
- }
-
- NV_INFO(dev, "Detected an NV%2x generation card (0x%08x)\n",
- dev_priv->card_type, reg0);
+ /* resource 0 is mmio regs */
+ /* resource 1 is linear FB */
+ /* resource 2 is RAMIN (mmio regs + 0x1000000) */
+ /* resource 6 is bios */
/* map the mmio regs */
mmio_start_offs = pci_resource_start(dev->pdev, 0);
@@ -1084,41 +878,61 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
NV_DEBUG(dev, "regs mapped ok at 0x%llx\n",
(unsigned long long)mmio_start_offs);
- /* determine frequency of timing crystal */
- strap = nv_rd32(dev, 0x101000);
- if ( dev_priv->chipset < 0x17 ||
- (dev_priv->chipset >= 0x20 && dev_priv->chipset <= 0x25))
- strap &= 0x00000040;
- else
- strap &= 0x00400040;
+#ifdef __BIG_ENDIAN
+ /* Put the card in BE mode if it's not */
+ if (nv_rd32(dev, NV03_PMC_BOOT_1) != 0x01000001)
+ nv_wr32(dev, NV03_PMC_BOOT_1, 0x01000001);
- switch (strap) {
- case 0x00000000: dev_priv->crystal = 13500; break;
- case 0x00000040: dev_priv->crystal = 14318; break;
- case 0x00400000: dev_priv->crystal = 27000; break;
- case 0x00400040: dev_priv->crystal = 25000; break;
- }
+ DRM_MEMORYBARRIER();
+#endif
- NV_DEBUG(dev, "crystal freq: %dKHz\n", dev_priv->crystal);
+ /* Time to determine the card architecture */
+ reg0 = nv_rd32(dev, NV03_PMC_BOOT_0);
+ dev_priv->stepping = 0; /* XXX: add stepping for pre-NV10? */
+
+ /* We're dealing with >=NV10 */
+ if ((reg0 & 0x0f000000) > 0) {
+ /* Bit 27-20 contain the architecture in hex */
+ dev_priv->chipset = (reg0 & 0xff00000) >> 20;
+ dev_priv->stepping = (reg0 & 0xff);
+ /* NV04 or NV05 */
+ } else if ((reg0 & 0xff00fff0) == 0x20004000) {
+ if (reg0 & 0x00f00000)
+ dev_priv->chipset = 0x05;
+ else
+ dev_priv->chipset = 0x04;
+ } else
+ dev_priv->chipset = 0xff;
- /* Determine whether we'll attempt acceleration or not, some
- * cards are disabled by default here due to them being known
- * non-functional, or never been tested due to lack of hw.
- */
- dev_priv->noaccel = !!nouveau_noaccel;
- if (nouveau_noaccel == -1) {
- switch (dev_priv->chipset) {
- case 0xd9: /* known broken */
- NV_INFO(dev, "acceleration disabled by default, pass "
- "noaccel=0 to force enable\n");
- dev_priv->noaccel = true;
- break;
- default:
- dev_priv->noaccel = false;
- break;
- }
+ switch (dev_priv->chipset & 0xf0) {
+ case 0x00:
+ case 0x10:
+ case 0x20:
+ case 0x30:
+ dev_priv->card_type = dev_priv->chipset & 0xf0;
+ break;
+ case 0x40:
+ case 0x60:
+ dev_priv->card_type = NV_40;
+ break;
+ case 0x50:
+ case 0x80:
+ case 0x90:
+ case 0xa0:
+ dev_priv->card_type = NV_50;
+ break;
+ case 0xc0:
+ dev_priv->card_type = NV_C0;
+ break;
+ default:
+ NV_INFO(dev, "Unsupported chipset 0x%08x\n", reg0);
+ ret = -EINVAL;
+ goto err_mmio;
}
+ NV_INFO(dev, "Detected an NV%2x generation card (0x%08x)\n",
+ dev_priv->card_type, reg0);
+
ret = nouveau_remove_conflicting_drivers(dev);
if (ret)
goto err_mmio;
@@ -1134,7 +948,7 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
ioremap(pci_resource_start(dev->pdev, ramin_bar),
dev_priv->ramin_size);
if (!dev_priv->ramin) {
- NV_ERROR(dev, "Failed to map PRAMIN BAR\n");
+ NV_ERROR(dev, "Failed to PRAMIN BAR");
ret = -ENOMEM;
goto err_mmio;
}
@@ -1183,7 +997,11 @@ void nouveau_lastclose(struct drm_device *dev)
int nouveau_unload(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_engine *engine = &dev_priv->engine;
+ drm_kms_helper_poll_fini(dev);
+ nouveau_fbcon_fini(dev);
+ engine->display.destroy(dev);
nouveau_card_takedown(dev);
iounmap(dev_priv->mmio);
@@ -1213,7 +1031,7 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
case NOUVEAU_GETPARAM_BUS_TYPE:
if (drm_pci_device_is_agp(dev))
getparam->value = NV_AGP;
- else if (pci_is_pcie(dev->pdev))
+ else if (drm_pci_device_is_pcie(dev))
getparam->value = NV_PCIE;
else
getparam->value = NV_PCI;
@@ -1302,23 +1120,6 @@ nouveau_wait_ne(struct drm_device *dev, uint64_t timeout,
return false;
}
-/* Wait until cond(data) == true, up until timeout has hit */
-bool
-nouveau_wait_cb(struct drm_device *dev, u64 timeout,
- bool (*cond)(void *), void *data)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
- u64 start = ptimer->read(dev);
-
- do {
- if (cond(data) == true)
- return true;
- } while (ptimer->read(dev) - start < timeout);
-
- return false;
-}
-
/* Waits for PGRAPH to go completely idle */
bool nouveau_wait_for_idle(struct drm_device *dev)
{
diff --git a/drivers/gpu/drm/nouveau/nouveau_temp.c b/drivers/gpu/drm/nouveau/nouveau_temp.c
index 0f5a301..649b041 100644
--- a/drivers/gpu/drm/nouveau/nouveau_temp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_temp.c
@@ -22,8 +22,6 @@
* Authors: Martin Peres
*/
-#include <linux/module.h>
-
#include "drmP.h"
#include "nouveau_drv.h"
@@ -45,7 +43,7 @@ nouveau_temp_vbios_parse(struct drm_device *dev, u8 *temp)
/* Set the default sensor's contants */
sensor->offset_constant = 0;
- sensor->offset_mult = 0;
+ sensor->offset_mult = 1;
sensor->offset_div = 1;
sensor->slope_mult = 1;
sensor->slope_div = 1;
@@ -55,10 +53,6 @@ nouveau_temp_vbios_parse(struct drm_device *dev, u8 *temp)
temps->down_clock = 100;
temps->fan_boost = 90;
- /* Set the default range for the pwm fan */
- pm->fan.min_duty = 30;
- pm->fan.max_duty = 100;
-
/* Set the known default values to setup the temperature sensor */
if (dev_priv->card_type >= NV_40) {
switch (dev_priv->chipset) {
@@ -105,13 +99,6 @@ nouveau_temp_vbios_parse(struct drm_device *dev, u8 *temp)
sensor->slope_mult = 431;
sensor->slope_div = 10000;
break;
-
- case 0x67:
- sensor->offset_mult = -26149;
- sensor->offset_div = 100;
- sensor->slope_mult = 484;
- sensor->slope_div = 10000;
- break;
}
}
@@ -122,7 +109,7 @@ nouveau_temp_vbios_parse(struct drm_device *dev, u8 *temp)
/* Read the entries from the table */
for (i = 0; i < entries; i++) {
- s16 value = ROM16(temp[1]);
+ u16 value = ROM16(temp[1]);
switch (temp[0]) {
case 0x01:
@@ -160,26 +147,11 @@ nouveau_temp_vbios_parse(struct drm_device *dev, u8 *temp)
case 0x13:
sensor->slope_div = value;
break;
- case 0x22:
- pm->fan.min_duty = value & 0xff;
- pm->fan.max_duty = (value & 0xff00) >> 8;
- break;
- case 0x26:
- pm->fan.pwm_freq = value;
- break;
}
temp += recordlen;
}
nouveau_temp_safety_checks(dev);
-
- /* check the fan min/max settings */
- if (pm->fan.min_duty < 10)
- pm->fan.min_duty = 10;
- if (pm->fan.max_duty > 100)
- pm->fan.max_duty = 100;
- if (pm->fan.max_duty < pm->fan.min_duty)
- pm->fan.max_duty = pm->fan.min_duty;
}
static int
@@ -188,8 +160,8 @@ nv40_sensor_setup(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
struct nouveau_pm_temp_sensor_constants *sensor = &pm->sensor_constants;
- s32 offset = sensor->offset_mult / sensor->offset_div;
- s32 sensor_calibration;
+ u32 offset = sensor->offset_mult / sensor->offset_div;
+ u32 sensor_calibration;
/* set up the sensors */
sensor_calibration = 120 - offset - sensor->offset_constant;
@@ -286,6 +258,8 @@ probe_monitoring_device(struct nouveau_i2c_chan *i2c,
static void
nouveau_temp_probe_i2c(struct drm_device *dev)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct dcb_table *dcb = &dev_priv->vbios.dcb;
struct i2c_board_info info[] = {
{ I2C_BOARD_INFO("w83l785ts", 0x2d) },
{ I2C_BOARD_INFO("w83781d", 0x2d) },
@@ -294,9 +268,11 @@ nouveau_temp_probe_i2c(struct drm_device *dev)
{ I2C_BOARD_INFO("lm99", 0x4c) },
{ }
};
+ int idx = (dcb->version >= 0x40 ?
+ dcb->i2c_default_indices & 0xf : 2);
nouveau_i2c_identify(dev, "monitoring device", info,
- probe_monitoring_device, NV_I2C_DEFAULT(0));
+ probe_monitoring_device, idx);
}
void
@@ -312,9 +288,9 @@ nouveau_temp_init(struct drm_device *dev)
return;
if (P.version == 1)
- temp = ROMPTR(dev, P.data[12]);
+ temp = ROMPTR(bios, P.data[12]);
else if (P.version == 2)
- temp = ROMPTR(dev, P.data[16]);
+ temp = ROMPTR(bios, P.data[16]);
else
NV_WARN(dev, "unknown temp for BIT P %d\n", P.version);
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.c b/drivers/gpu/drm/nouveau/nouveau_vm.c
index 2bf6c03..519a6b4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vm.c
@@ -78,10 +78,9 @@ nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_mem *node)
void
nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
- struct nouveau_mem *mem)
+ struct nouveau_mem *mem, dma_addr_t *list)
{
struct nouveau_vm *vm = vma->vm;
- dma_addr_t *list = mem->pages;
int big = vma->node->type != vm->spg_shift;
u32 offset = vma->node->offset + (delta >> 12);
u32 bits = vma->node->type - 12;
@@ -173,9 +172,9 @@ nouveau_vm_unmap_pgt(struct nouveau_vm *vm, int big, u32 fpde, u32 lpde)
vm->map_pgt(vpgd->obj, pde, vpgt->obj);
}
- mutex_unlock(&vm->mm.mutex);
+ mutex_unlock(&vm->mm->mutex);
nouveau_gpuobj_ref(NULL, &pgt);
- mutex_lock(&vm->mm.mutex);
+ mutex_lock(&vm->mm->mutex);
}
}
@@ -192,18 +191,18 @@ nouveau_vm_map_pgt(struct nouveau_vm *vm, u32 pde, u32 type)
pgt_size = (1 << (vm->pgt_bits + 12)) >> type;
pgt_size *= 8;
- mutex_unlock(&vm->mm.mutex);
+ mutex_unlock(&vm->mm->mutex);
ret = nouveau_gpuobj_new(vm->dev, NULL, pgt_size, 0x1000,
NVOBJ_FLAG_ZERO_ALLOC, &pgt);
- mutex_lock(&vm->mm.mutex);
+ mutex_lock(&vm->mm->mutex);
if (unlikely(ret))
return ret;
/* someone beat us to filling the PDE while we didn't have the lock */
if (unlikely(vpgt->refcount[big]++)) {
- mutex_unlock(&vm->mm.mutex);
+ mutex_unlock(&vm->mm->mutex);
nouveau_gpuobj_ref(NULL, &pgt);
- mutex_lock(&vm->mm.mutex);
+ mutex_lock(&vm->mm->mutex);
return 0;
}
@@ -224,10 +223,10 @@ nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift,
u32 fpde, lpde, pde;
int ret;
- mutex_lock(&vm->mm.mutex);
- ret = nouveau_mm_get(&vm->mm, page_shift, msize, 0, align, &vma->node);
+ mutex_lock(&vm->mm->mutex);
+ ret = nouveau_mm_get(vm->mm, page_shift, msize, 0, align, &vma->node);
if (unlikely(ret != 0)) {
- mutex_unlock(&vm->mm.mutex);
+ mutex_unlock(&vm->mm->mutex);
return ret;
}
@@ -246,13 +245,13 @@ nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift,
if (ret) {
if (pde != fpde)
nouveau_vm_unmap_pgt(vm, big, fpde, pde - 1);
- nouveau_mm_put(&vm->mm, vma->node);
- mutex_unlock(&vm->mm.mutex);
+ nouveau_mm_put(vm->mm, vma->node);
+ mutex_unlock(&vm->mm->mutex);
vma->node = NULL;
return ret;
}
}
- mutex_unlock(&vm->mm.mutex);
+ mutex_unlock(&vm->mm->mutex);
vma->vm = vm;
vma->offset = (u64)vma->node->offset << 12;
@@ -271,11 +270,11 @@ nouveau_vm_put(struct nouveau_vma *vma)
fpde = (vma->node->offset >> vm->pgt_bits);
lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits;
- mutex_lock(&vm->mm.mutex);
+ mutex_lock(&vm->mm->mutex);
nouveau_vm_unmap_pgt(vm, vma->node->type != vm->spg_shift, fpde, lpde);
- nouveau_mm_put(&vm->mm, vma->node);
+ nouveau_mm_put(vm->mm, vma->node);
vma->node = NULL;
- mutex_unlock(&vm->mm.mutex);
+ mutex_unlock(&vm->mm->mutex);
}
int
@@ -307,7 +306,7 @@ nouveau_vm_new(struct drm_device *dev, u64 offset, u64 length, u64 mm_offset,
block = length;
} else
- if (dev_priv->card_type >= NV_C0) {
+ if (dev_priv->card_type == NV_C0) {
vm->map_pgt = nvc0_vm_map_pgt;
vm->map = nvc0_vm_map;
vm->map_sg = nvc0_vm_map_sg;
@@ -361,35 +360,32 @@ nouveau_vm_link(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd)
nouveau_gpuobj_ref(pgd, &vpgd->obj);
- mutex_lock(&vm->mm.mutex);
+ mutex_lock(&vm->mm->mutex);
for (i = vm->fpde; i <= vm->lpde; i++)
vm->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj);
list_add(&vpgd->head, &vm->pgd_list);
- mutex_unlock(&vm->mm.mutex);
+ mutex_unlock(&vm->mm->mutex);
return 0;
}
static void
-nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *mpgd)
+nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd)
{
struct nouveau_vm_pgd *vpgd, *tmp;
- struct nouveau_gpuobj *pgd = NULL;
- if (!mpgd)
+ if (!pgd)
return;
- mutex_lock(&vm->mm.mutex);
+ mutex_lock(&vm->mm->mutex);
list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
- if (vpgd->obj == mpgd) {
- pgd = vpgd->obj;
- list_del(&vpgd->head);
- kfree(vpgd);
- break;
- }
- }
- mutex_unlock(&vm->mm.mutex);
+ if (vpgd->obj != pgd)
+ continue;
- nouveau_gpuobj_ref(NULL, &pgd);
+ list_del(&vpgd->head);
+ nouveau_gpuobj_ref(NULL, &vpgd->obj);
+ kfree(vpgd);
+ }
+ mutex_unlock(&vm->mm->mutex);
}
static void
@@ -400,8 +396,8 @@ nouveau_vm_del(struct nouveau_vm *vm)
list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
nouveau_vm_unlink(vm, vpgd->obj);
}
+ WARN_ON(nouveau_mm_fini(&vm->mm) != 0);
- nouveau_mm_fini(&vm->mm);
kfree(vm->pgt);
kfree(vm);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.h b/drivers/gpu/drm/nouveau/nouveau_vm.h
index 4fb6e72..c48a9fc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_vm.h
@@ -41,8 +41,6 @@ struct nouveau_vm_pgd {
};
struct nouveau_vma {
- struct list_head head;
- int refcount;
struct nouveau_vm *vm;
struct nouveau_mm_node *node;
u64 offset;
@@ -51,7 +49,7 @@ struct nouveau_vma {
struct nouveau_vm {
struct drm_device *dev;
- struct nouveau_mm mm;
+ struct nouveau_mm *mm;
int refcount;
struct list_head pgd_list;
@@ -89,7 +87,7 @@ void nouveau_vm_map_at(struct nouveau_vma *, u64 offset, struct nouveau_mem *);
void nouveau_vm_unmap(struct nouveau_vma *);
void nouveau_vm_unmap_at(struct nouveau_vma *, u64 offset, u64 length);
void nouveau_vm_map_sg(struct nouveau_vma *, u64 offset, u64 length,
- struct nouveau_mem *);
+ struct nouveau_mem *, dma_addr_t *);
/* nv50_vm.c */
void nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
diff --git a/drivers/gpu/drm/nouveau/nouveau_volt.c b/drivers/gpu/drm/nouveau/nouveau_volt.c
index b010cb9..75e87274 100644
--- a/drivers/gpu/drm/nouveau/nouveau_volt.c
+++ b/drivers/gpu/drm/nouveau/nouveau_volt.c
@@ -26,15 +26,15 @@
#include "nouveau_drv.h"
#include "nouveau_pm.h"
-#include "nouveau_gpio.h"
-static const enum dcb_gpio_tag vidtag[] = { 0x04, 0x05, 0x06, 0x1a, 0x73 };
+static const enum dcb_gpio_tag vidtag[] = { 0x04, 0x05, 0x06, 0x1a };
static int nr_vidtag = sizeof(vidtag) / sizeof(vidtag[0]);
int
nouveau_voltage_gpio_get(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *gpio = &dev_priv->engine.gpio;
struct nouveau_pm_voltage *volt = &dev_priv->engine.pm.voltage;
u8 vid = 0;
int i;
@@ -43,7 +43,7 @@ nouveau_voltage_gpio_get(struct drm_device *dev)
if (!(volt->vid_mask & (1 << i)))
continue;
- vid |= nouveau_gpio_func_get(dev, vidtag[i]) << i;
+ vid |= gpio->get(dev, vidtag[i]) << i;
}
return nouveau_volt_lvl_lookup(dev, vid);
@@ -53,6 +53,7 @@ int
nouveau_voltage_gpio_set(struct drm_device *dev, int voltage)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *gpio = &dev_priv->engine.gpio;
struct nouveau_pm_voltage *volt = &dev_priv->engine.pm.voltage;
int vid, i;
@@ -64,7 +65,7 @@ nouveau_voltage_gpio_set(struct drm_device *dev, int voltage)
if (!(volt->vid_mask & (1 << i)))
continue;
- nouveau_gpio_func_set(dev, vidtag[i], !!(vid & (1 << i)));
+ gpio->set(dev, vidtag[i], !!(vid & (1 << i)));
}
return 0;
@@ -116,10 +117,10 @@ nouveau_volt_init(struct drm_device *dev)
return;
if (P.version == 1)
- volt = ROMPTR(dev, P.data[16]);
+ volt = ROMPTR(bios, P.data[16]);
else
if (P.version == 2)
- volt = ROMPTR(dev, P.data[12]);
+ volt = ROMPTR(bios, P.data[12]);
else {
NV_WARN(dev, "unknown volt for BIT P %d\n", P.version);
}
@@ -129,7 +130,7 @@ nouveau_volt_init(struct drm_device *dev)
return;
}
- volt = ROMPTR(dev, bios->data[bios->offset + 0x98]);
+ volt = ROMPTR(bios, bios->data[bios->offset + 0x98]);
}
if (!volt) {
@@ -169,13 +170,6 @@ nouveau_volt_init(struct drm_device *dev)
*/
vidshift = 2;
break;
- case 0x40:
- headerlen = volt[1];
- recordlen = volt[2];
- entries = volt[3]; /* not a clue what the entries are for.. */
- vidmask = volt[11]; /* guess.. */
- vidshift = 0;
- break;
default:
NV_WARN(dev, "voltage table 0x%02x unknown\n", volt[0]);
return;
@@ -193,7 +187,7 @@ nouveau_volt_init(struct drm_device *dev)
return;
}
- if (!nouveau_gpio_func_valid(dev, vidtag[i])) {
+ if (!nouveau_bios_gpio_entry(dev, vidtag[i])) {
NV_DEBUG(dev, "vid bit %d has no gpio tag\n", i);
return;
}
@@ -203,37 +197,16 @@ nouveau_volt_init(struct drm_device *dev)
}
/* parse vbios entries into common format */
- voltage->version = volt[0];
- if (voltage->version < 0x40) {
- voltage->nr_level = entries;
- voltage->level =
- kcalloc(entries, sizeof(*voltage->level), GFP_KERNEL);
- if (!voltage->level)
- return;
-
- entry = volt + headerlen;
- for (i = 0; i < entries; i++, entry += recordlen) {
- voltage->level[i].voltage = entry[0] * 10000;
- voltage->level[i].vid = entry[1] >> vidshift;
- }
- } else {
- u32 volt_uv = ROM32(volt[4]);
- s16 step_uv = ROM16(volt[8]);
- u8 vid;
-
- voltage->nr_level = voltage->vid_mask + 1;
- voltage->level = kcalloc(voltage->nr_level,
- sizeof(*voltage->level), GFP_KERNEL);
- if (!voltage->level)
- return;
+ voltage->level = kcalloc(entries, sizeof(*voltage->level), GFP_KERNEL);
+ if (!voltage->level)
+ return;
- for (vid = 0; vid <= voltage->vid_mask; vid++) {
- voltage->level[vid].voltage = volt_uv;
- voltage->level[vid].vid = vid;
- volt_uv += step_uv;
- }
+ entry = volt + headerlen;
+ for (i = 0; i < entries; i++, entry += recordlen) {
+ voltage->level[i].voltage = entry[0];
+ voltage->level[i].vid = entry[1] >> vidshift;
}
-
+ voltage->nr_level = entries;
voltage->supported = true;
}
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
index 728d075..f1a3ae4 100644
--- a/drivers/gpu/drm/nouveau/nv04_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -364,7 +364,7 @@ nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode)
regp->CRTC[NV_CIO_CR_VRE_INDEX] = 1 << 5 | XLATE(vertEnd, 0, NV_CIO_CR_VRE_3_0);
regp->CRTC[NV_CIO_CR_VDE_INDEX] = vertDisplay;
/* framebuffer can be larger than crtc scanout area. */
- regp->CRTC[NV_CIO_CR_OFFSET_INDEX] = fb->pitches[0] / 8;
+ regp->CRTC[NV_CIO_CR_OFFSET_INDEX] = fb->pitch / 8;
regp->CRTC[NV_CIO_CR_ULINE_INDEX] = 0x00;
regp->CRTC[NV_CIO_CR_VBS_INDEX] = vertBlankStart;
regp->CRTC[NV_CIO_CR_VBE_INDEX] = vertBlankEnd;
@@ -377,9 +377,9 @@ nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode)
/* framebuffer can be larger than crtc scanout area. */
regp->CRTC[NV_CIO_CRE_RPC0_INDEX] =
- XLATE(fb->pitches[0] / 8, 8, NV_CIO_CRE_RPC0_OFFSET_10_8);
+ XLATE(fb->pitch / 8, 8, NV_CIO_CRE_RPC0_OFFSET_10_8);
regp->CRTC[NV_CIO_CRE_42] =
- XLATE(fb->pitches[0] / 8, 11, NV_CIO_CRE_42_OFFSET_11);
+ XLATE(fb->pitch / 8, 11, NV_CIO_CRE_42_OFFSET_11);
regp->CRTC[NV_CIO_CRE_RPC1_INDEX] = mode->crtc_hdisplay < 1280 ?
MASK(NV_CIO_CRE_RPC1_LARGE) : 0x00;
regp->CRTC[NV_CIO_CRE_LSR_INDEX] = XLATE(horizBlankEnd, 6, NV_CIO_CRE_LSR_HBE_6) |
@@ -781,20 +781,11 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index];
- struct drm_framebuffer *drm_fb;
- struct nouveau_framebuffer *fb;
+ struct drm_framebuffer *drm_fb = nv_crtc->base.fb;
+ struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
int arb_burst, arb_lwm;
int ret;
- NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
-
- /* no fb bound */
- if (!atomic && !crtc->fb) {
- NV_DEBUG_KMS(dev, "No FB bound\n");
- return 0;
- }
-
-
/* If atomic, we want to switch to the fb we were passed, so
* now we update pointers to do that. (We don't pin; just
* assume we're already pinned and update the base address.)
@@ -803,8 +794,6 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
drm_fb = passed_fb;
fb = nouveau_framebuffer(passed_fb);
} else {
- drm_fb = crtc->fb;
- fb = nouveau_framebuffer(crtc->fb);
/* If not atomic, we can go ahead and pin, and unpin the
* old fb we were passed.
*/
@@ -835,18 +824,18 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
NVWriteRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_GENERAL_CONTROL,
regp->ramdac_gen_ctrl);
- regp->CRTC[NV_CIO_CR_OFFSET_INDEX] = drm_fb->pitches[0] >> 3;
+ regp->CRTC[NV_CIO_CR_OFFSET_INDEX] = drm_fb->pitch >> 3;
regp->CRTC[NV_CIO_CRE_RPC0_INDEX] =
- XLATE(drm_fb->pitches[0] >> 3, 8, NV_CIO_CRE_RPC0_OFFSET_10_8);
+ XLATE(drm_fb->pitch >> 3, 8, NV_CIO_CRE_RPC0_OFFSET_10_8);
regp->CRTC[NV_CIO_CRE_42] =
- XLATE(drm_fb->pitches[0] / 8, 11, NV_CIO_CRE_42_OFFSET_11);
+ XLATE(drm_fb->pitch / 8, 11, NV_CIO_CRE_42_OFFSET_11);
crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_RPC0_INDEX);
crtc_wr_cio_state(crtc, regp, NV_CIO_CR_OFFSET_INDEX);
crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_42);
/* Update the framebuffer location. */
regp->fb_start = nv_crtc->fb.offset & ~3;
- regp->fb_start += (y * drm_fb->pitches[0]) + (x * drm_fb->bits_per_pixel / 8);
+ regp->fb_start += (y * drm_fb->pitch) + (x * drm_fb->bits_per_pixel / 8);
nv_set_crtc_base(dev, nv_crtc->index, regp->fb_start);
/* Update the arbitration parameters. */
@@ -1046,7 +1035,7 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num)
drm_crtc_helper_add(&nv_crtc->base, &nv04_crtc_helper_funcs);
drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
- ret = nouveau_bo_new(dev, 64*64*4, 0x100, TTM_PL_FLAG_VRAM,
+ ret = nouveau_bo_new(dev, NULL, 64*64*4, 0x100, TTM_PL_FLAG_VRAM,
0, 0x0000, &nv_crtc->cursor.nvbo);
if (!ret) {
ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c
index f180dcf..2d6bfd0 100644
--- a/drivers/gpu/drm/nouveau/nv04_dac.c
+++ b/drivers/gpu/drm/nouveau/nv04_dac.c
@@ -32,7 +32,6 @@
#include "nouveau_connector.h"
#include "nouveau_crtc.h"
#include "nouveau_hw.h"
-#include "nouveau_gpio.h"
#include "nvreg.h"
int nv04_dac_output_offset(struct drm_encoder *encoder)
@@ -221,6 +220,7 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *gpio = &dev_priv->engine.gpio;
struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb;
uint32_t sample, testval, regoffset = nv04_dac_output_offset(encoder);
uint32_t saved_powerctrl_2 = 0, saved_powerctrl_4 = 0, saved_routput,
@@ -252,11 +252,11 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
nvWriteMC(dev, NV_PBUS_POWERCTRL_4, saved_powerctrl_4 & 0xffffffcf);
}
- saved_gpio1 = nouveau_gpio_func_get(dev, DCB_GPIO_TVDAC1);
- saved_gpio0 = nouveau_gpio_func_get(dev, DCB_GPIO_TVDAC0);
+ saved_gpio1 = gpio->get(dev, DCB_GPIO_TVDAC1);
+ saved_gpio0 = gpio->get(dev, DCB_GPIO_TVDAC0);
- nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC1, dcb->type == OUTPUT_TV);
- nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC0, dcb->type == OUTPUT_TV);
+ gpio->set(dev, DCB_GPIO_TVDAC1, dcb->type == OUTPUT_TV);
+ gpio->set(dev, DCB_GPIO_TVDAC0, dcb->type == OUTPUT_TV);
msleep(4);
@@ -306,8 +306,8 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
nvWriteMC(dev, NV_PBUS_POWERCTRL_4, saved_powerctrl_4);
nvWriteMC(dev, NV_PBUS_POWERCTRL_2, saved_powerctrl_2);
- nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC1, saved_gpio1);
- nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC0, saved_gpio0);
+ gpio->set(dev, DCB_GPIO_TVDAC1, saved_gpio1);
+ gpio->set(dev, DCB_GPIO_TVDAC0, saved_gpio0);
return sample;
}
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c
index 473f30a..752440c 100644
--- a/drivers/gpu/drm/nouveau/nv04_dfp.c
+++ b/drivers/gpu/drm/nouveau/nv04_dfp.c
@@ -289,7 +289,6 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
struct nouveau_connector *nv_connector = nouveau_crtc_connector_get(nv_crtc);
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct drm_display_mode *output_mode = &nv_encoder->mode;
- struct drm_connector *connector = &nv_connector->base;
uint32_t mode_ratio, panel_ratio;
NV_DEBUG_KMS(dev, "Output mode on CRTC %d:\n", nv_crtc->index);
@@ -341,15 +340,10 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
output_mode->clock > 165000)
regp->fp_control |= (2 << 24);
if (nv_encoder->dcb->type == OUTPUT_LVDS) {
- bool duallink = false, dummy;
- if (nv_connector->edid &&
- nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
- duallink = (((u8 *)nv_connector->edid)[121] == 2);
- } else {
- nouveau_bios_parse_lvds_table(dev, output_mode->clock,
- &duallink, &dummy);
- }
+ bool duallink, dummy;
+ nouveau_bios_parse_lvds_table(dev, output_mode->clock,
+ &duallink, &dummy);
if (duallink)
regp->fp_control |= (8 << 28);
} else
@@ -413,9 +407,7 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
}
/* Output property. */
- if ((nv_connector->dithering_mode == DITHERING_MODE_ON) ||
- (nv_connector->dithering_mode == DITHERING_MODE_AUTO &&
- encoder->crtc->fb->depth > connector->display_info.bpc * 3)) {
+ if (nv_connector->use_dithering) {
if (dev_priv->chipset == 0x11)
regp->dither = savep->dither | 0x00010000;
else {
diff --git a/drivers/gpu/drm/nouveau/nv04_display.c b/drivers/gpu/drm/nouveau/nv04_display.c
index 7047d37..1715e14 100644
--- a/drivers/gpu/drm/nouveau/nv04_display.c
+++ b/drivers/gpu/drm/nouveau/nv04_display.c
@@ -126,6 +126,27 @@ nv04_display_create(struct drm_device *dev)
nouveau_hw_save_vga_fonts(dev, 1);
+ drm_mode_config_init(dev);
+ drm_mode_create_scaling_mode_property(dev);
+ drm_mode_create_dithering_property(dev);
+
+ dev->mode_config.funcs = (void *)&nouveau_mode_config_funcs;
+
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+ switch (dev_priv->card_type) {
+ case NV_04:
+ dev->mode_config.max_width = 2048;
+ dev->mode_config.max_height = 2048;
+ break;
+ default:
+ dev->mode_config.max_width = 4096;
+ dev->mode_config.max_height = 4096;
+ break;
+ }
+
+ dev->mode_config.fb_base = dev_priv->fb_phys;
+
nv04_crtc_create(dev, 0);
if (nv_two_heads(dev))
nv04_crtc_create(dev, 1);
@@ -214,6 +235,8 @@ nv04_display_destroy(struct drm_device *dev)
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
crtc->funcs->restore(crtc);
+ drm_mode_config_cleanup(dev);
+
nouveau_hw_save_vga_fonts(dev, 0);
}
@@ -243,11 +266,6 @@ nv04_display_init(struct drm_device *dev)
return 0;
}
-void
-nv04_display_fini(struct drm_device *dev)
-{
-}
-
static void
nv04_vblank_crtc0_isr(struct drm_device *dev)
{
diff --git a/drivers/gpu/drm/nouveau/nv04_fb.c b/drivers/gpu/drm/nouveau/nv04_fb.c
index d5eedd6..638cf60 100644
--- a/drivers/gpu/drm/nouveau/nv04_fb.c
+++ b/drivers/gpu/drm/nouveau/nv04_fb.c
@@ -4,40 +4,6 @@
#include "nouveau_drm.h"
int
-nv04_fb_vram_init(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- u32 boot0 = nv_rd32(dev, NV04_PFB_BOOT_0);
-
- if (boot0 & 0x00000100) {
- dev_priv->vram_size = ((boot0 >> 12) & 0xf) * 2 + 2;
- dev_priv->vram_size *= 1024 * 1024;
- } else {
- switch (boot0 & NV04_PFB_BOOT_0_RAM_AMOUNT) {
- case NV04_PFB_BOOT_0_RAM_AMOUNT_32MB:
- dev_priv->vram_size = 32 * 1024 * 1024;
- break;
- case NV04_PFB_BOOT_0_RAM_AMOUNT_16MB:
- dev_priv->vram_size = 16 * 1024 * 1024;
- break;
- case NV04_PFB_BOOT_0_RAM_AMOUNT_8MB:
- dev_priv->vram_size = 8 * 1024 * 1024;
- break;
- case NV04_PFB_BOOT_0_RAM_AMOUNT_4MB:
- dev_priv->vram_size = 4 * 1024 * 1024;
- break;
- }
- }
-
- if ((boot0 & 0x00000038) <= 0x10)
- dev_priv->vram_type = NV_MEM_TYPE_SGRAM;
- else
- dev_priv->vram_type = NV_MEM_TYPE_SDRAM;
-
- return 0;
-}
-
-int
nv04_fb_init(struct drm_device *dev)
{
/* This is what the DDX did for NV_ARCH_04, but a mmio-trace shows
diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
index dbdea8e..3626ee7 100644
--- a/drivers/gpu/drm/nouveau/nv04_graph.c
+++ b/drivers/gpu/drm/nouveau/nv04_graph.c
@@ -450,13 +450,13 @@ nv04_graph_context_del(struct nouveau_channel *chan, int engine)
unsigned long flags;
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
- nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
+ nv04_graph_fifo_access(dev, false);
/* Unload the context if it's the currently active one */
if (nv04_graph_channel(dev) == chan)
nv04_graph_unload_context(dev);
- nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
+ nv04_graph_fifo_access(dev, true);
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
/* Free the context resources */
@@ -538,18 +538,24 @@ nv04_graph_init(struct drm_device *dev, int engine)
}
static int
-nv04_graph_fini(struct drm_device *dev, int engine, bool suspend)
+nv04_graph_fini(struct drm_device *dev, int engine)
{
- nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
- if (!nv_wait(dev, NV04_PGRAPH_STATUS, ~0, 0) && suspend) {
- nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
- return -EBUSY;
- }
nv04_graph_unload_context(dev);
nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000);
return 0;
}
+void
+nv04_graph_fifo_access(struct drm_device *dev, bool enabled)
+{
+ if (enabled)
+ nv_wr32(dev, NV04_PGRAPH_FIFO,
+ nv_rd32(dev, NV04_PGRAPH_FIFO) | 1);
+ else
+ nv_wr32(dev, NV04_PGRAPH_FIFO,
+ nv_rd32(dev, NV04_PGRAPH_FIFO) & ~1);
+}
+
static int
nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
u32 class, u32 mthd, u32 data)
diff --git a/drivers/gpu/drm/nouveau/nv04_instmem.c b/drivers/gpu/drm/nouveau/nv04_instmem.c
index c1248e0..b8611b9 100644
--- a/drivers/gpu/drm/nouveau/nv04_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv04_instmem.c
@@ -28,31 +28,6 @@ int nv04_instmem_init(struct drm_device *dev)
/* RAMIN always available */
dev_priv->ramin_available = true;
- /* Reserve space at end of VRAM for PRAMIN */
- if (dev_priv->card_type >= NV_40) {
- u32 vs = hweight8((nv_rd32(dev, 0x001540) & 0x0000ff00) >> 8);
- u32 rsvd;
-
- /* estimate grctx size, the magics come from nv40_grctx.c */
- if (dev_priv->chipset == 0x40) rsvd = 0x6aa0 * vs;
- else if (dev_priv->chipset < 0x43) rsvd = 0x4f00 * vs;
- else if (nv44_graph_class(dev)) rsvd = 0x4980 * vs;
- else rsvd = 0x4a40 * vs;
- rsvd += 16 * 1024;
- rsvd *= dev_priv->engine.fifo.channels;
-
- /* pciegart table */
- if (pci_is_pcie(dev->pdev))
- rsvd += 512 * 1024;
-
- /* object storage */
- rsvd += 512 * 1024;
-
- dev_priv->ramin_rsvd_vram = round_up(rsvd, 4096);
- } else {
- dev_priv->ramin_rsvd_vram = 512 * 1024;
- }
-
/* Setup shared RAMHT */
ret = nouveau_gpuobj_new_fake(dev, 0x10000, ~0, 4096,
NVOBJ_FLAG_ZERO_ALLOC, &ramht);
@@ -137,8 +112,7 @@ nv04_instmem_resume(struct drm_device *dev)
}
int
-nv04_instmem_get(struct nouveau_gpuobj *gpuobj, struct nouveau_channel *chan,
- u32 size, u32 align)
+nv04_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align)
{
struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
struct drm_mm_node *ramin = NULL;
diff --git a/drivers/gpu/drm/nouveau/nv04_pm.c b/drivers/gpu/drm/nouveau/nv04_pm.c
index 6e75899..eb1c70d 100644
--- a/drivers/gpu/drm/nouveau/nv04_pm.c
+++ b/drivers/gpu/drm/nouveau/nv04_pm.c
@@ -27,111 +27,64 @@
#include "nouveau_hw.h"
#include "nouveau_pm.h"
-int
-nv04_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
-{
- int ret;
-
- ret = nouveau_hw_get_clock(dev, PLL_CORE);
- if (ret < 0)
- return ret;
- perflvl->core = ret;
-
- ret = nouveau_hw_get_clock(dev, PLL_MEMORY);
- if (ret < 0)
- return ret;
- perflvl->memory = ret;
-
- return 0;
-}
-
-struct nv04_pm_clock {
+struct nv04_pm_state {
struct pll_lims pll;
struct nouveau_pll_vals calc;
};
-struct nv04_pm_state {
- struct nv04_pm_clock core;
- struct nv04_pm_clock memory;
-};
-
-static int
-calc_pll(struct drm_device *dev, u32 id, int khz, struct nv04_pm_clock *clk)
+int
+nv04_pm_clock_get(struct drm_device *dev, u32 id)
{
- int ret;
-
- ret = get_pll_limits(dev, id, &clk->pll);
- if (ret)
- return ret;
-
- ret = nouveau_calc_pll_mnp(dev, &clk->pll, khz, &clk->calc);
- if (!ret)
- return -EINVAL;
-
- return 0;
+ return nouveau_hw_get_clock(dev, id);
}
void *
-nv04_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
+nv04_pm_clock_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl,
+ u32 id, int khz)
{
- struct nv04_pm_state *info;
+ struct nv04_pm_state *state;
int ret;
- info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info)
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
return ERR_PTR(-ENOMEM);
- ret = calc_pll(dev, PLL_CORE, perflvl->core, &info->core);
- if (ret)
- goto error;
+ ret = get_pll_limits(dev, id, &state->pll);
+ if (ret) {
+ kfree(state);
+ return (ret == -ENOENT) ? NULL : ERR_PTR(ret);
+ }
- if (perflvl->memory) {
- ret = calc_pll(dev, PLL_MEMORY, perflvl->memory, &info->memory);
- if (ret)
- goto error;
+ ret = nouveau_calc_pll_mnp(dev, &state->pll, khz, &state->calc);
+ if (!ret) {
+ kfree(state);
+ return ERR_PTR(-EINVAL);
}
- return info;
-error:
- kfree(info);
- return ERR_PTR(ret);
+ return state;
}
-static void
-prog_pll(struct drm_device *dev, struct nv04_pm_clock *clk)
+void
+nv04_pm_clock_set(struct drm_device *dev, void *pre_state)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- u32 reg = clk->pll.reg;
+ struct nv04_pm_state *state = pre_state;
+ u32 reg = state->pll.reg;
/* thank the insane nouveau_hw_setpll() interface for this */
if (dev_priv->card_type >= NV_40)
reg += 4;
- nouveau_hw_setpll(dev, reg, &clk->calc);
-}
-
-int
-nv04_pm_clocks_set(struct drm_device *dev, void *pre_state)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
- struct nv04_pm_state *state = pre_state;
+ nouveau_hw_setpll(dev, reg, &state->calc);
- prog_pll(dev, &state->core);
+ if (dev_priv->card_type < NV_30 && reg == NV_PRAMDAC_MPLL_COEFF) {
+ if (dev_priv->card_type == NV_20)
+ nv_mask(dev, 0x1002c4, 0, 1 << 20);
- if (state->memory.pll.reg) {
- prog_pll(dev, &state->memory);
- if (dev_priv->card_type < NV_30) {
- if (dev_priv->card_type == NV_20)
- nv_mask(dev, 0x1002c4, 0, 1 << 20);
-
- /* Reset the DLLs */
- nv_mask(dev, 0x1002c0, 0, 1 << 8);
- }
+ /* Reset the DLLs */
+ nv_mask(dev, 0x1002c0, 0, 1 << 8);
}
- ptimer->init(dev);
-
kfree(state);
- return 0;
}
+
diff --git a/drivers/gpu/drm/nouveau/nv04_timer.c b/drivers/gpu/drm/nouveau/nv04_timer.c
index 55c9452..1d09ddd 100644
--- a/drivers/gpu/drm/nouveau/nv04_timer.c
+++ b/drivers/gpu/drm/nouveau/nv04_timer.c
@@ -2,80 +2,47 @@
#include "drm.h"
#include "nouveau_drv.h"
#include "nouveau_drm.h"
-#include "nouveau_hw.h"
int
nv04_timer_init(struct drm_device *dev)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- u32 m, n, d;
-
nv_wr32(dev, NV04_PTIMER_INTR_EN_0, 0x00000000);
nv_wr32(dev, NV04_PTIMER_INTR_0, 0xFFFFFFFF);
- /* aim for 31.25MHz, which gives us nanosecond timestamps */
- d = 1000000 / 32;
-
- /* determine base clock for timer source */
- if (dev_priv->chipset < 0x40) {
- n = nouveau_hw_get_clock(dev, PLL_CORE);
- } else
- if (dev_priv->chipset == 0x40) {
- /*XXX: figure this out */
- n = 0;
- } else {
- n = dev_priv->crystal;
- m = 1;
- while (n < (d * 2)) {
- n += (n / m);
- m++;
- }
-
- nv_wr32(dev, 0x009220, m - 1);
- }
-
- if (!n) {
- NV_WARN(dev, "PTIMER: unknown input clock freq\n");
- if (!nv_rd32(dev, NV04_PTIMER_NUMERATOR) ||
- !nv_rd32(dev, NV04_PTIMER_DENOMINATOR)) {
- nv_wr32(dev, NV04_PTIMER_NUMERATOR, 1);
- nv_wr32(dev, NV04_PTIMER_DENOMINATOR, 1);
- }
- return 0;
- }
-
- /* reduce ratio to acceptable values */
- while (((n % 5) == 0) && ((d % 5) == 0)) {
- n /= 5;
- d /= 5;
+ /* Just use the pre-existing values when possible for now; these regs
+ * are not written in nv (driver writer missed a /4 on the address), and
+ * writing 8 and 3 to the correct regs breaks the timings on the LVDS
+ * hardware sequencing microcode.
+ * A correct solution (involving calculations with the GPU PLL) can
+ * be done when kernel modesetting lands
+ */
+ if (!nv_rd32(dev, NV04_PTIMER_NUMERATOR) ||
+ !nv_rd32(dev, NV04_PTIMER_DENOMINATOR)) {
+ nv_wr32(dev, NV04_PTIMER_NUMERATOR, 0x00000008);
+ nv_wr32(dev, NV04_PTIMER_DENOMINATOR, 0x00000003);
}
- while (((n % 2) == 0) && ((d % 2) == 0)) {
- n /= 2;
- d /= 2;
- }
-
- while (n > 0xffff || d > 0xffff) {
- n >>= 1;
- d >>= 1;
- }
-
- nv_wr32(dev, NV04_PTIMER_NUMERATOR, n);
- nv_wr32(dev, NV04_PTIMER_DENOMINATOR, d);
return 0;
}
-u64
+uint64_t
nv04_timer_read(struct drm_device *dev)
{
- u32 hi, lo;
-
+ uint32_t low;
+ /* From kmmio dumps on nv28 this looks like how the blob does this.
+ * It reads the high dword twice, before and after.
+ * The only explanation seems to be that the 64-bit timer counter
+ * advances between high and low dword reads and may corrupt the
+ * result. Not confirmed.
+ */
+ uint32_t high2 = nv_rd32(dev, NV04_PTIMER_TIME_1);
+ uint32_t high1;
do {
- hi = nv_rd32(dev, NV04_PTIMER_TIME_1);
- lo = nv_rd32(dev, NV04_PTIMER_TIME_0);
- } while (hi != nv_rd32(dev, NV04_PTIMER_TIME_1));
-
- return ((u64)hi << 32 | lo);
+ high1 = high2;
+ low = nv_rd32(dev, NV04_PTIMER_TIME_0);
+ high2 = nv_rd32(dev, NV04_PTIMER_TIME_1);
+ } while (high1 != high2);
+ return (((uint64_t)high2) << 32) | (uint64_t)low;
}
void
diff --git a/drivers/gpu/drm/nouveau/nv10_fb.c b/drivers/gpu/drm/nouveau/nv10_fb.c
index 420b1608..f78181a 100644
--- a/drivers/gpu/drm/nouveau/nv10_fb.c
+++ b/drivers/gpu/drm/nouveau/nv10_fb.c
@@ -3,16 +3,81 @@
#include "nouveau_drv.h"
#include "nouveau_drm.h"
+static struct drm_mm_node *
+nv20_fb_alloc_tag(struct drm_device *dev, uint32_t size)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
+ struct drm_mm_node *mem;
+ int ret;
+
+ ret = drm_mm_pre_get(&pfb->tag_heap);
+ if (ret)
+ return NULL;
+
+ spin_lock(&dev_priv->tile.lock);
+ mem = drm_mm_search_free(&pfb->tag_heap, size, 0, 0);
+ if (mem)
+ mem = drm_mm_get_block_atomic(mem, size, 0);
+ spin_unlock(&dev_priv->tile.lock);
+
+ return mem;
+}
+
+static void
+nv20_fb_free_tag(struct drm_device *dev, struct drm_mm_node *mem)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ spin_lock(&dev_priv->tile.lock);
+ drm_mm_put_block(mem);
+ spin_unlock(&dev_priv->tile.lock);
+}
+
void
nv10_fb_init_tile_region(struct drm_device *dev, int i, uint32_t addr,
uint32_t size, uint32_t pitch, uint32_t flags)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
+ int bpp = (flags & NOUVEAU_GEM_TILE_32BPP ? 32 : 16);
- tile->addr = 0x80000000 | addr;
+ tile->addr = addr;
tile->limit = max(1u, addr + size) - 1;
tile->pitch = pitch;
+
+ if (dev_priv->card_type == NV_20) {
+ if (flags & NOUVEAU_GEM_TILE_ZETA) {
+ /*
+ * Allocate some of the on-die tag memory,
+ * used to store Z compression meta-data (most
+ * likely just a bitmap determining if a given
+ * tile is compressed or not).
+ */
+ tile->tag_mem = nv20_fb_alloc_tag(dev, size / 256);
+
+ if (tile->tag_mem) {
+ /* Enable Z compression */
+ if (dev_priv->chipset >= 0x25)
+ tile->zcomp = tile->tag_mem->start |
+ (bpp == 16 ?
+ NV25_PFB_ZCOMP_MODE_16 :
+ NV25_PFB_ZCOMP_MODE_32);
+ else
+ tile->zcomp = tile->tag_mem->start |
+ NV20_PFB_ZCOMP_EN |
+ (bpp == 16 ? 0 :
+ NV20_PFB_ZCOMP_MODE_32);
+ }
+
+ tile->addr |= 3;
+ } else {
+ tile->addr |= 1;
+ }
+
+ } else {
+ tile->addr |= 1 << 31;
+ }
}
void
@@ -21,6 +86,11 @@ nv10_fb_free_tile_region(struct drm_device *dev, int i)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
+ if (tile->tag_mem) {
+ nv20_fb_free_tag(dev, tile->tag_mem);
+ tile->tag_mem = NULL;
+ }
+
tile->addr = tile->limit = tile->pitch = tile->zcomp = 0;
}
@@ -33,48 +103,9 @@ nv10_fb_set_tile_region(struct drm_device *dev, int i)
nv_wr32(dev, NV10_PFB_TLIMIT(i), tile->limit);
nv_wr32(dev, NV10_PFB_TSIZE(i), tile->pitch);
nv_wr32(dev, NV10_PFB_TILE(i), tile->addr);
-}
-
-int
-nv1a_fb_vram_init(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct pci_dev *bridge;
- uint32_t mem, mib;
-
- bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 1));
- if (!bridge) {
- NV_ERROR(dev, "no bridge device\n");
- return 0;
- }
-
- if (dev_priv->chipset == 0x1a) {
- pci_read_config_dword(bridge, 0x7c, &mem);
- mib = ((mem >> 6) & 31) + 1;
- } else {
- pci_read_config_dword(bridge, 0x84, &mem);
- mib = ((mem >> 4) & 127) + 1;
- }
-
- dev_priv->vram_size = mib * 1024 * 1024;
- return 0;
-}
-
-int
-nv10_fb_vram_init(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- u32 fifo_data = nv_rd32(dev, NV04_PFB_FIFO_DATA);
- u32 cfg0 = nv_rd32(dev, 0x100200);
- dev_priv->vram_size = fifo_data & NV10_PFB_FIFO_DATA_RAM_AMOUNT_MB_MASK;
-
- if (cfg0 & 0x00000001)
- dev_priv->vram_type = NV_MEM_TYPE_DDR1;
- else
- dev_priv->vram_type = NV_MEM_TYPE_SDRAM;
-
- return 0;
+ if (dev_priv->card_type == NV_20)
+ nv_wr32(dev, NV20_PFB_ZCOMP(i), tile->zcomp);
}
int
@@ -84,8 +115,14 @@ nv10_fb_init(struct drm_device *dev)
struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
int i;
- /* Turn all the tiling regions off. */
pfb->num_tiles = NV10_PFB_TILE__SIZE;
+
+ if (dev_priv->card_type == NV_20)
+ drm_mm_init(&pfb->tag_heap, 0,
+ (dev_priv->chipset >= 0x25 ?
+ 64 * 1024 : 32 * 1024));
+
+ /* Turn all the tiling regions off. */
for (i = 0; i < pfb->num_tiles; i++)
pfb->set_tile_region(dev, i);
@@ -101,4 +138,7 @@ nv10_fb_takedown(struct drm_device *dev)
for (i = 0; i < pfb->num_tiles; i++)
pfb->free_tile_region(dev, i);
+
+ if (dev_priv->card_type == NV_20)
+ drm_mm_takedown(&pfb->tag_heap);
}
diff --git a/drivers/gpu/drm/nouveau/nv10_gpio.c b/drivers/gpu/drm/nouveau/nv10_gpio.c
index 550ad3f..007fc29 100644
--- a/drivers/gpu/drm/nouveau/nv10_gpio.c
+++ b/drivers/gpu/drm/nouveau/nv10_gpio.c
@@ -27,97 +27,66 @@
#include "drmP.h"
#include "nouveau_drv.h"
#include "nouveau_hw.h"
-#include "nouveau_gpio.h"
-int
-nv10_gpio_sense(struct drm_device *dev, int line)
+static bool
+get_gpio_location(struct dcb_gpio_entry *ent, uint32_t *reg, uint32_t *shift,
+ uint32_t *mask)
{
- if (line < 2) {
- line = line * 16;
- line = NVReadCRTC(dev, 0, NV_PCRTC_GPIO) >> line;
- return !!(line & 0x0100);
- } else
- if (line < 10) {
- line = (line - 2) * 4;
- line = NVReadCRTC(dev, 0, NV_PCRTC_GPIO_EXT) >> line;
- return !!(line & 0x04);
- } else
- if (line < 14) {
- line = (line - 10) * 4;
- line = NVReadCRTC(dev, 0, NV_PCRTC_850) >> line;
- return !!(line & 0x04);
- }
+ if (ent->line < 2) {
+ *reg = NV_PCRTC_GPIO;
+ *shift = ent->line * 16;
+ *mask = 0x11;
- return -EINVAL;
-}
+ } else if (ent->line < 10) {
+ *reg = NV_PCRTC_GPIO_EXT;
+ *shift = (ent->line - 2) * 4;
+ *mask = 0x3;
+
+ } else if (ent->line < 14) {
+ *reg = NV_PCRTC_850;
+ *shift = (ent->line - 10) * 4;
+ *mask = 0x3;
-int
-nv10_gpio_drive(struct drm_device *dev, int line, int dir, int out)
-{
- u32 reg, mask, data;
-
- if (line < 2) {
- line = line * 16;
- reg = NV_PCRTC_GPIO;
- mask = 0x00000011;
- data = (dir << 4) | out;
- } else
- if (line < 10) {
- line = (line - 2) * 4;
- reg = NV_PCRTC_GPIO_EXT;
- mask = 0x00000003 << ((line - 2) * 4);
- data = (dir << 1) | out;
- } else
- if (line < 14) {
- line = (line - 10) * 4;
- reg = NV_PCRTC_850;
- mask = 0x00000003;
- data = (dir << 1) | out;
} else {
- return -EINVAL;
+ return false;
}
- mask = NVReadCRTC(dev, 0, reg) & ~(mask << line);
- NVWriteCRTC(dev, 0, reg, mask | (data << line));
- return 0;
+ return true;
}
-void
-nv10_gpio_irq_enable(struct drm_device *dev, int line, bool on)
+int
+nv10_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag)
{
- u32 mask = 0x00010001 << line;
+ struct dcb_gpio_entry *ent = nouveau_bios_gpio_entry(dev, tag);
+ uint32_t reg, shift, mask, value;
- nv_wr32(dev, 0x001104, mask);
- nv_mask(dev, 0x001144, mask, on ? mask : 0);
-}
+ if (!ent)
+ return -ENODEV;
-static void
-nv10_gpio_isr(struct drm_device *dev)
-{
- u32 intr = nv_rd32(dev, 0x1104);
- u32 hi = (intr & 0x0000ffff) >> 0;
- u32 lo = (intr & 0xffff0000) >> 16;
+ if (!get_gpio_location(ent, &reg, &shift, &mask))
+ return -ENODEV;
- nouveau_gpio_isr(dev, 0, hi | lo);
+ value = NVReadCRTC(dev, 0, reg) >> shift;
- nv_wr32(dev, 0x001104, intr);
+ return (ent->invert ? 1 : 0) ^ (value & 1);
}
int
-nv10_gpio_init(struct drm_device *dev)
+nv10_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state)
{
- nv_wr32(dev, 0x001140, 0x00000000);
- nv_wr32(dev, 0x001100, 0xffffffff);
- nv_wr32(dev, 0x001144, 0x00000000);
- nv_wr32(dev, 0x001104, 0xffffffff);
- nouveau_irq_register(dev, 28, nv10_gpio_isr); /* PBUS */
- return 0;
-}
+ struct dcb_gpio_entry *ent = nouveau_bios_gpio_entry(dev, tag);
+ uint32_t reg, shift, mask, value;
-void
-nv10_gpio_fini(struct drm_device *dev)
-{
- nv_wr32(dev, 0x001140, 0x00000000);
- nv_wr32(dev, 0x001144, 0x00000000);
- nouveau_irq_unregister(dev, 28);
+ if (!ent)
+ return -ENODEV;
+
+ if (!get_gpio_location(ent, &reg, &shift, &mask))
+ return -ENODEV;
+
+ value = ((ent->invert ? 1 : 0) ^ (state ? 1 : 0)) << shift;
+ mask = ~(mask << shift);
+
+ NVWriteCRTC(dev, 0, reg, value | (NVReadCRTC(dev, 0, reg) & mask));
+
+ return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv10_graph.c b/drivers/gpu/drm/nouveau/nv10_graph.c
index 7255e4a..0930c6c 100644
--- a/drivers/gpu/drm/nouveau/nv10_graph.c
+++ b/drivers/gpu/drm/nouveau/nv10_graph.c
@@ -708,8 +708,8 @@ static void nv10_graph_load_dma_vtxbuf(struct nouveau_channel *chan,
0x2c000000 | chan->id << 20 | subchan << 16 | 0x18c);
nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2_DL, inst);
nv_mask(dev, NV10_PGRAPH_CTX_CONTROL, 0, 0x10000);
- nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
- nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
+ nv04_graph_fifo_access(dev, true);
+ nv04_graph_fifo_access(dev, false);
/* Restore the FIFO state */
for (i = 0; i < ARRAY_SIZE(fifo); i++)
@@ -879,13 +879,13 @@ nv10_graph_context_del(struct nouveau_channel *chan, int engine)
unsigned long flags;
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
- nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
+ nv04_graph_fifo_access(dev, false);
/* Unload the context if it's the currently active one */
if (nv10_graph_channel(dev) == chan)
nv10_graph_unload_context(dev);
- nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
+ nv04_graph_fifo_access(dev, true);
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
/* Free the context resources */
@@ -957,13 +957,8 @@ nv10_graph_init(struct drm_device *dev, int engine)
}
static int
-nv10_graph_fini(struct drm_device *dev, int engine, bool suspend)
+nv10_graph_fini(struct drm_device *dev, int engine)
{
- nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
- if (!nv_wait(dev, NV04_PGRAPH_STATUS, ~0, 0) && suspend) {
- nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
- return -EBUSY;
- }
nv10_graph_unload_context(dev);
nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c
index 696d7e7..3900ceb 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv.c
+++ b/drivers/gpu/drm/nouveau/nv17_tv.c
@@ -30,7 +30,6 @@
#include "nouveau_encoder.h"
#include "nouveau_connector.h"
#include "nouveau_crtc.h"
-#include "nouveau_gpio.h"
#include "nouveau_hw.h"
#include "nv17_tv.h"
@@ -38,6 +37,7 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *gpio = &dev_priv->engine.gpio;
uint32_t testval, regoffset = nv04_dac_output_offset(encoder);
uint32_t gpio0, gpio1, fp_htotal, fp_hsync_start, fp_hsync_end,
fp_control, test_ctrl, dacclk, ctv_14, ctv_1c, ctv_6c;
@@ -53,8 +53,8 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
head = (dacclk & 0x100) >> 8;
/* Save the previous state. */
- gpio1 = nouveau_gpio_func_get(dev, DCB_GPIO_TVDAC1);
- gpio0 = nouveau_gpio_func_get(dev, DCB_GPIO_TVDAC0);
+ gpio1 = gpio->get(dev, DCB_GPIO_TVDAC1);
+ gpio0 = gpio->get(dev, DCB_GPIO_TVDAC0);
fp_htotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL);
fp_hsync_start = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START);
fp_hsync_end = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END);
@@ -65,8 +65,8 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
ctv_6c = NVReadRAMDAC(dev, head, 0x680c6c);
/* Prepare the DAC for load detection. */
- nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC1, true);
- nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC0, true);
+ gpio->set(dev, DCB_GPIO_TVDAC1, true);
+ gpio->set(dev, DCB_GPIO_TVDAC0, true);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, 1343);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, 1047);
@@ -111,8 +111,8 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END, fp_hsync_end);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, fp_hsync_start);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, fp_htotal);
- nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC1, gpio1);
- nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC0, gpio0);
+ gpio->set(dev, DCB_GPIO_TVDAC1, gpio1);
+ gpio->set(dev, DCB_GPIO_TVDAC0, gpio0);
return sample;
}
@@ -357,6 +357,8 @@ static bool nv17_tv_mode_fixup(struct drm_encoder *encoder,
static void nv17_tv_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *gpio = &dev_priv->engine.gpio;
struct nv17_tv_state *regs = &to_tv_enc(encoder)->state;
struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
@@ -381,8 +383,8 @@ static void nv17_tv_dpms(struct drm_encoder *encoder, int mode)
nv_load_ptv(dev, regs, 200);
- nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC1, mode == DRM_MODE_DPMS_ON);
- nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC0, mode == DRM_MODE_DPMS_ON);
+ gpio->set(dev, DCB_GPIO_TVDAC1, mode == DRM_MODE_DPMS_ON);
+ gpio->set(dev, DCB_GPIO_TVDAC0, mode == DRM_MODE_DPMS_ON);
nv04_dac_update_dacclk(encoder, mode == DRM_MODE_DPMS_ON);
}
diff --git a/drivers/gpu/drm/nouveau/nv20_fb.c b/drivers/gpu/drm/nouveau/nv20_fb.c
deleted file mode 100644
index 19bd640..0000000
--- a/drivers/gpu/drm/nouveau/nv20_fb.c
+++ /dev/null
@@ -1,148 +0,0 @@
-#include "drmP.h"
-#include "drm.h"
-#include "nouveau_drv.h"
-#include "nouveau_drm.h"
-
-static struct drm_mm_node *
-nv20_fb_alloc_tag(struct drm_device *dev, uint32_t size)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
- struct drm_mm_node *mem;
- int ret;
-
- ret = drm_mm_pre_get(&pfb->tag_heap);
- if (ret)
- return NULL;
-
- spin_lock(&dev_priv->tile.lock);
- mem = drm_mm_search_free(&pfb->tag_heap, size, 0, 0);
- if (mem)
- mem = drm_mm_get_block_atomic(mem, size, 0);
- spin_unlock(&dev_priv->tile.lock);
-
- return mem;
-}
-
-static void
-nv20_fb_free_tag(struct drm_device *dev, struct drm_mm_node **pmem)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct drm_mm_node *mem = *pmem;
- if (mem) {
- spin_lock(&dev_priv->tile.lock);
- drm_mm_put_block(mem);
- spin_unlock(&dev_priv->tile.lock);
- *pmem = NULL;
- }
-}
-
-void
-nv20_fb_init_tile_region(struct drm_device *dev, int i, uint32_t addr,
- uint32_t size, uint32_t pitch, uint32_t flags)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
- int bpp = (flags & NOUVEAU_GEM_TILE_32BPP ? 32 : 16);
-
- tile->addr = 0x00000001 | addr;
- tile->limit = max(1u, addr + size) - 1;
- tile->pitch = pitch;
-
- /* Allocate some of the on-die tag memory, used to store Z
- * compression meta-data (most likely just a bitmap determining
- * if a given tile is compressed or not).
- */
- if (flags & NOUVEAU_GEM_TILE_ZETA) {
- tile->tag_mem = nv20_fb_alloc_tag(dev, size / 256);
- if (tile->tag_mem) {
- /* Enable Z compression */
- tile->zcomp = tile->tag_mem->start;
- if (dev_priv->chipset >= 0x25) {
- if (bpp == 16)
- tile->zcomp |= NV25_PFB_ZCOMP_MODE_16;
- else
- tile->zcomp |= NV25_PFB_ZCOMP_MODE_32;
- } else {
- tile->zcomp |= NV20_PFB_ZCOMP_EN;
- if (bpp != 16)
- tile->zcomp |= NV20_PFB_ZCOMP_MODE_32;
- }
- }
-
- tile->addr |= 2;
- }
-}
-
-void
-nv20_fb_free_tile_region(struct drm_device *dev, int i)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
-
- tile->addr = tile->limit = tile->pitch = tile->zcomp = 0;
- nv20_fb_free_tag(dev, &tile->tag_mem);
-}
-
-void
-nv20_fb_set_tile_region(struct drm_device *dev, int i)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
-
- nv_wr32(dev, NV10_PFB_TLIMIT(i), tile->limit);
- nv_wr32(dev, NV10_PFB_TSIZE(i), tile->pitch);
- nv_wr32(dev, NV10_PFB_TILE(i), tile->addr);
- nv_wr32(dev, NV20_PFB_ZCOMP(i), tile->zcomp);
-}
-
-int
-nv20_fb_vram_init(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- u32 mem_size = nv_rd32(dev, 0x10020c);
- u32 pbus1218 = nv_rd32(dev, 0x001218);
-
- dev_priv->vram_size = mem_size & 0xff000000;
- switch (pbus1218 & 0x00000300) {
- case 0x00000000: dev_priv->vram_type = NV_MEM_TYPE_SDRAM; break;
- case 0x00000100: dev_priv->vram_type = NV_MEM_TYPE_DDR1; break;
- case 0x00000200: dev_priv->vram_type = NV_MEM_TYPE_GDDR3; break;
- case 0x00000300: dev_priv->vram_type = NV_MEM_TYPE_GDDR2; break;
- }
-
- return 0;
-}
-
-int
-nv20_fb_init(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
- int i;
-
- if (dev_priv->chipset >= 0x25)
- drm_mm_init(&pfb->tag_heap, 0, 64 * 1024);
- else
- drm_mm_init(&pfb->tag_heap, 0, 32 * 1024);
-
- /* Turn all the tiling regions off. */
- pfb->num_tiles = NV10_PFB_TILE__SIZE;
- for (i = 0; i < pfb->num_tiles; i++)
- pfb->set_tile_region(dev, i);
-
- return 0;
-}
-
-void
-nv20_fb_takedown(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
- int i;
-
- for (i = 0; i < pfb->num_tiles; i++)
- pfb->free_tile_region(dev, i);
-
- drm_mm_takedown(&pfb->tag_heap);
-}
diff --git a/drivers/gpu/drm/nouveau/nv20_graph.c b/drivers/gpu/drm/nouveau/nv20_graph.c
index 183e375..affc7d7 100644
--- a/drivers/gpu/drm/nouveau/nv20_graph.c
+++ b/drivers/gpu/drm/nouveau/nv20_graph.c
@@ -454,13 +454,13 @@ nv20_graph_context_del(struct nouveau_channel *chan, int engine)
unsigned long flags;
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
- nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
+ nv04_graph_fifo_access(dev, false);
/* Unload the context if it's the currently active one */
if (nv10_graph_channel(dev) == chan)
nv20_graph_unload_context(dev);
- nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
+ nv04_graph_fifo_access(dev, true);
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
/* Free the context resources */
@@ -654,13 +654,8 @@ nv30_graph_init(struct drm_device *dev, int engine)
}
int
-nv20_graph_fini(struct drm_device *dev, int engine, bool suspend)
+nv20_graph_fini(struct drm_device *dev, int engine)
{
- nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
- if (!nv_wait(dev, NV04_PGRAPH_STATUS, ~0, 0) && suspend) {
- nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
- return -EBUSY;
- }
nv20_graph_unload_context(dev);
nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000);
return 0;
@@ -758,7 +753,6 @@ nv20_graph_create(struct drm_device *dev)
break;
default:
NV_ERROR(dev, "PGRAPH: unknown chipset\n");
- kfree(pgraph);
return 0;
}
} else {
@@ -780,7 +774,6 @@ nv20_graph_create(struct drm_device *dev)
break;
default:
NV_ERROR(dev, "PGRAPH: unknown chipset\n");
- kfree(pgraph);
return 0;
}
}
diff --git a/drivers/gpu/drm/nouveau/nv31_mpeg.c b/drivers/gpu/drm/nouveau/nv31_mpeg.c
deleted file mode 100644
index 6f06a07..0000000
--- a/drivers/gpu/drm/nouveau/nv31_mpeg.c
+++ /dev/null
@@ -1,344 +0,0 @@
-/*
- * Copyright 2011 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include "drmP.h"
-#include "nouveau_drv.h"
-#include "nouveau_ramht.h"
-
-struct nv31_mpeg_engine {
- struct nouveau_exec_engine base;
- atomic_t refcount;
-};
-
-
-static int
-nv31_mpeg_context_new(struct nouveau_channel *chan, int engine)
-{
- struct nv31_mpeg_engine *pmpeg = nv_engine(chan->dev, engine);
-
- if (!atomic_add_unless(&pmpeg->refcount, 1, 1))
- return -EBUSY;
-
- chan->engctx[engine] = (void *)0xdeadcafe;
- return 0;
-}
-
-static void
-nv31_mpeg_context_del(struct nouveau_channel *chan, int engine)
-{
- struct nv31_mpeg_engine *pmpeg = nv_engine(chan->dev, engine);
- atomic_dec(&pmpeg->refcount);
- chan->engctx[engine] = NULL;
-}
-
-static int
-nv40_mpeg_context_new(struct nouveau_channel *chan, int engine)
-{
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *ctx = NULL;
- unsigned long flags;
- int ret;
-
- NV_DEBUG(dev, "ch%d\n", chan->id);
-
- ret = nouveau_gpuobj_new(dev, NULL, 264 * 4, 16, NVOBJ_FLAG_ZERO_ALLOC |
- NVOBJ_FLAG_ZERO_FREE, &ctx);
- if (ret)
- return ret;
-
- nv_wo32(ctx, 0x78, 0x02001ec1);
-
- spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
- nv_mask(dev, 0x002500, 0x00000001, 0x00000000);
- if ((nv_rd32(dev, 0x003204) & 0x1f) == chan->id)
- nv_wr32(dev, 0x00330c, ctx->pinst >> 4);
- nv_wo32(chan->ramfc, 0x54, ctx->pinst >> 4);
- nv_mask(dev, 0x002500, 0x00000001, 0x00000001);
- spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
-
- chan->engctx[engine] = ctx;
- return 0;
-}
-
-static void
-nv40_mpeg_context_del(struct nouveau_channel *chan, int engine)
-{
- struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
- struct nouveau_gpuobj *ctx = chan->engctx[engine];
- struct drm_device *dev = chan->dev;
- unsigned long flags;
- u32 inst = 0x80000000 | (ctx->pinst >> 4);
-
- spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
- nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000);
- if (nv_rd32(dev, 0x00b318) == inst)
- nv_mask(dev, 0x00b318, 0x80000000, 0x00000000);
- nv_mask(dev, 0x00b32c, 0x00000001, 0x00000001);
- spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
-
- nouveau_gpuobj_ref(NULL, &ctx);
- chan->engctx[engine] = NULL;
-}
-
-static int
-nv31_mpeg_object_new(struct nouveau_channel *chan, int engine,
- u32 handle, u16 class)
-{
- struct drm_device *dev = chan->dev;
- struct nouveau_gpuobj *obj = NULL;
- int ret;
-
- ret = nouveau_gpuobj_new(dev, chan, 20, 16, NVOBJ_FLAG_ZERO_ALLOC |
- NVOBJ_FLAG_ZERO_FREE, &obj);
- if (ret)
- return ret;
- obj->engine = 2;
- obj->class = class;
-
- nv_wo32(obj, 0x00, class);
-
- ret = nouveau_ramht_insert(chan, handle, obj);
- nouveau_gpuobj_ref(NULL, &obj);
- return ret;
-}
-
-static int
-nv31_mpeg_init(struct drm_device *dev, int engine)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv31_mpeg_engine *pmpeg = nv_engine(dev, engine);
- int i;
-
- /* VPE init */
- nv_mask(dev, 0x000200, 0x00000002, 0x00000000);
- nv_mask(dev, 0x000200, 0x00000002, 0x00000002);
- nv_wr32(dev, 0x00b0e0, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */
- nv_wr32(dev, 0x00b0e8, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */
-
- for (i = 0; i < dev_priv->engine.fb.num_tiles; i++)
- pmpeg->base.set_tile_region(dev, i);
-
- /* PMPEG init */
- nv_wr32(dev, 0x00b32c, 0x00000000);
- nv_wr32(dev, 0x00b314, 0x00000100);
- nv_wr32(dev, 0x00b220, nv44_graph_class(dev) ? 0x00000044 : 0x00000031);
- nv_wr32(dev, 0x00b300, 0x02001ec1);
- nv_mask(dev, 0x00b32c, 0x00000001, 0x00000001);
-
- nv_wr32(dev, 0x00b100, 0xffffffff);
- nv_wr32(dev, 0x00b140, 0xffffffff);
-
- if (!nv_wait(dev, 0x00b200, 0x00000001, 0x00000000)) {
- NV_ERROR(dev, "PMPEG init: 0x%08x\n", nv_rd32(dev, 0x00b200));
- return -EBUSY;
- }
-
- return 0;
-}
-
-static int
-nv31_mpeg_fini(struct drm_device *dev, int engine, bool suspend)
-{
- /*XXX: context save? */
- nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000);
- nv_wr32(dev, 0x00b140, 0x00000000);
- return 0;
-}
-
-static int
-nv31_mpeg_mthd_dma(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
-{
- struct drm_device *dev = chan->dev;
- u32 inst = data << 4;
- u32 dma0 = nv_ri32(dev, inst + 0);
- u32 dma1 = nv_ri32(dev, inst + 4);
- u32 dma2 = nv_ri32(dev, inst + 8);
- u32 base = (dma2 & 0xfffff000) | (dma0 >> 20);
- u32 size = dma1 + 1;
-
- /* only allow linear DMA objects */
- if (!(dma0 & 0x00002000))
- return -EINVAL;
-
- if (mthd == 0x0190) {
- /* DMA_CMD */
- nv_mask(dev, 0x00b300, 0x00030000, (dma0 & 0x00030000));
- nv_wr32(dev, 0x00b334, base);
- nv_wr32(dev, 0x00b324, size);
- } else
- if (mthd == 0x01a0) {
- /* DMA_DATA */
- nv_mask(dev, 0x00b300, 0x000c0000, (dma0 & 0x00030000) << 2);
- nv_wr32(dev, 0x00b360, base);
- nv_wr32(dev, 0x00b364, size);
- } else {
- /* DMA_IMAGE, VRAM only */
- if (dma0 & 0x000c0000)
- return -EINVAL;
-
- nv_wr32(dev, 0x00b370, base);
- nv_wr32(dev, 0x00b374, size);
- }
-
- return 0;
-}
-
-static int
-nv31_mpeg_isr_chid(struct drm_device *dev, u32 inst)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *ctx;
- unsigned long flags;
- int i;
-
- /* hardcode drm channel id on nv3x, so swmthd lookup works */
- if (dev_priv->card_type < NV_40)
- return 0;
-
- spin_lock_irqsave(&dev_priv->channels.lock, flags);
- for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
- if (!dev_priv->channels.ptr[i])
- continue;
-
- ctx = dev_priv->channels.ptr[i]->engctx[NVOBJ_ENGINE_MPEG];
- if (ctx && ctx->pinst == inst)
- break;
- }
- spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
- return i;
-}
-
-static void
-nv31_vpe_set_tile_region(struct drm_device *dev, int i)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
-
- nv_wr32(dev, 0x00b008 + (i * 0x10), tile->pitch);
- nv_wr32(dev, 0x00b004 + (i * 0x10), tile->limit);
- nv_wr32(dev, 0x00b000 + (i * 0x10), tile->addr);
-}
-
-static void
-nv31_mpeg_isr(struct drm_device *dev)
-{
- u32 inst = (nv_rd32(dev, 0x00b318) & 0x000fffff) << 4;
- u32 chid = nv31_mpeg_isr_chid(dev, inst);
- u32 stat = nv_rd32(dev, 0x00b100);
- u32 type = nv_rd32(dev, 0x00b230);
- u32 mthd = nv_rd32(dev, 0x00b234);
- u32 data = nv_rd32(dev, 0x00b238);
- u32 show = stat;
-
- if (stat & 0x01000000) {
- /* happens on initial binding of the object */
- if (type == 0x00000020 && mthd == 0x0000) {
- nv_mask(dev, 0x00b308, 0x00000000, 0x00000000);
- show &= ~0x01000000;
- }
-
- if (type == 0x00000010) {
- if (!nouveau_gpuobj_mthd_call2(dev, chid, 0x3174, mthd, data))
- show &= ~0x01000000;
- }
- }
-
- nv_wr32(dev, 0x00b100, stat);
- nv_wr32(dev, 0x00b230, 0x00000001);
-
- if (show && nouveau_ratelimit()) {
- NV_INFO(dev, "PMPEG: Ch %d [0x%08x] 0x%08x 0x%08x 0x%08x 0x%08x\n",
- chid, inst, stat, type, mthd, data);
- }
-}
-
-static void
-nv31_vpe_isr(struct drm_device *dev)
-{
- if (nv_rd32(dev, 0x00b100))
- nv31_mpeg_isr(dev);
-
- if (nv_rd32(dev, 0x00b800)) {
- u32 stat = nv_rd32(dev, 0x00b800);
- NV_INFO(dev, "PMSRCH: 0x%08x\n", stat);
- nv_wr32(dev, 0xb800, stat);
- }
-}
-
-static void
-nv31_mpeg_destroy(struct drm_device *dev, int engine)
-{
- struct nv31_mpeg_engine *pmpeg = nv_engine(dev, engine);
-
- nouveau_irq_unregister(dev, 0);
-
- NVOBJ_ENGINE_DEL(dev, MPEG);
- kfree(pmpeg);
-}
-
-int
-nv31_mpeg_create(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv31_mpeg_engine *pmpeg;
-
- pmpeg = kzalloc(sizeof(*pmpeg), GFP_KERNEL);
- if (!pmpeg)
- return -ENOMEM;
- atomic_set(&pmpeg->refcount, 0);
-
- pmpeg->base.destroy = nv31_mpeg_destroy;
- pmpeg->base.init = nv31_mpeg_init;
- pmpeg->base.fini = nv31_mpeg_fini;
- if (dev_priv->card_type < NV_40) {
- pmpeg->base.context_new = nv31_mpeg_context_new;
- pmpeg->base.context_del = nv31_mpeg_context_del;
- } else {
- pmpeg->base.context_new = nv40_mpeg_context_new;
- pmpeg->base.context_del = nv40_mpeg_context_del;
- }
- pmpeg->base.object_new = nv31_mpeg_object_new;
-
- /* ISR vector, PMC_ENABLE bit, and TILE regs are shared between
- * all VPE engines, for this driver's purposes the PMPEG engine
- * will be treated as the "master" and handle the global VPE
- * bits too
- */
- pmpeg->base.set_tile_region = nv31_vpe_set_tile_region;
- nouveau_irq_register(dev, 0, nv31_vpe_isr);
-
- NVOBJ_ENGINE_ADD(dev, MPEG, &pmpeg->base);
- NVOBJ_CLASS(dev, 0x3174, MPEG);
- NVOBJ_MTHD (dev, 0x3174, 0x0190, nv31_mpeg_mthd_dma);
- NVOBJ_MTHD (dev, 0x3174, 0x01a0, nv31_mpeg_mthd_dma);
- NVOBJ_MTHD (dev, 0x3174, 0x01b0, nv31_mpeg_mthd_dma);
-
-#if 0
- NVOBJ_ENGINE_ADD(dev, ME, &pme->base);
- NVOBJ_CLASS(dev, 0x4075, ME);
-#endif
- return 0;
-
-}
diff --git a/drivers/gpu/drm/nouveau/nv40_fb.c b/drivers/gpu/drm/nouveau/nv40_fb.c
index 7fbcb33..f0ac2a7 100644
--- a/drivers/gpu/drm/nouveau/nv40_fb.c
+++ b/drivers/gpu/drm/nouveau/nv40_fb.c
@@ -72,51 +72,6 @@ nv44_fb_init_gart(struct drm_device *dev)
}
int
-nv40_fb_vram_init(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
-
- /* 0x001218 is actually present on a few other NV4X I looked at,
- * and even contains sane values matching 0x100474. From looking
- * at various vbios images however, this isn't the case everywhere.
- * So, I chose to use the same regs I've seen NVIDIA reading around
- * the memory detection, hopefully that'll get us the right numbers
- */
- if (dev_priv->chipset == 0x40) {
- u32 pbus1218 = nv_rd32(dev, 0x001218);
- switch (pbus1218 & 0x00000300) {
- case 0x00000000: dev_priv->vram_type = NV_MEM_TYPE_SDRAM; break;
- case 0x00000100: dev_priv->vram_type = NV_MEM_TYPE_DDR1; break;
- case 0x00000200: dev_priv->vram_type = NV_MEM_TYPE_GDDR3; break;
- case 0x00000300: dev_priv->vram_type = NV_MEM_TYPE_DDR2; break;
- }
- } else
- if (dev_priv->chipset == 0x49 || dev_priv->chipset == 0x4b) {
- u32 pfb914 = nv_rd32(dev, 0x100914);
- switch (pfb914 & 0x00000003) {
- case 0x00000000: dev_priv->vram_type = NV_MEM_TYPE_DDR1; break;
- case 0x00000001: dev_priv->vram_type = NV_MEM_TYPE_DDR2; break;
- case 0x00000002: dev_priv->vram_type = NV_MEM_TYPE_GDDR3; break;
- case 0x00000003: break;
- }
- } else
- if (dev_priv->chipset != 0x4e) {
- u32 pfb474 = nv_rd32(dev, 0x100474);
- if (pfb474 & 0x00000004)
- dev_priv->vram_type = NV_MEM_TYPE_GDDR3;
- if (pfb474 & 0x00000002)
- dev_priv->vram_type = NV_MEM_TYPE_DDR2;
- if (pfb474 & 0x00000001)
- dev_priv->vram_type = NV_MEM_TYPE_DDR1;
- } else {
- dev_priv->vram_type = NV_MEM_TYPE_STOLEN;
- }
-
- dev_priv->vram_size = nv_rd32(dev, 0x10020c) & 0xff000000;
- return 0;
-}
-
-int
nv40_fb_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c
index ba14a93..5beb01b 100644
--- a/drivers/gpu/drm/nouveau/nv40_graph.c
+++ b/drivers/gpu/drm/nouveau/nv40_graph.c
@@ -35,6 +35,89 @@ struct nv40_graph_engine {
u32 grctx_size;
};
+static struct nouveau_channel *
+nv40_graph_channel(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *grctx;
+ uint32_t inst;
+ int i;
+
+ inst = nv_rd32(dev, NV40_PGRAPH_CTXCTL_CUR);
+ if (!(inst & NV40_PGRAPH_CTXCTL_CUR_LOADED))
+ return NULL;
+ inst = (inst & NV40_PGRAPH_CTXCTL_CUR_INSTANCE) << 4;
+
+ for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
+ if (!dev_priv->channels.ptr[i])
+ continue;
+
+ grctx = dev_priv->channels.ptr[i]->engctx[NVOBJ_ENGINE_GR];
+ if (grctx && grctx->pinst == inst)
+ return dev_priv->channels.ptr[i];
+ }
+
+ return NULL;
+}
+
+static int
+nv40_graph_transfer_context(struct drm_device *dev, uint32_t inst, int save)
+{
+ uint32_t old_cp, tv = 1000, tmp;
+ int i;
+
+ old_cp = nv_rd32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER);
+ nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
+
+ tmp = nv_rd32(dev, NV40_PGRAPH_CTXCTL_0310);
+ tmp |= save ? NV40_PGRAPH_CTXCTL_0310_XFER_SAVE :
+ NV40_PGRAPH_CTXCTL_0310_XFER_LOAD;
+ nv_wr32(dev, NV40_PGRAPH_CTXCTL_0310, tmp);
+
+ tmp = nv_rd32(dev, NV40_PGRAPH_CTXCTL_0304);
+ tmp |= NV40_PGRAPH_CTXCTL_0304_XFER_CTX;
+ nv_wr32(dev, NV40_PGRAPH_CTXCTL_0304, tmp);
+
+ nouveau_wait_for_idle(dev);
+
+ for (i = 0; i < tv; i++) {
+ if (nv_rd32(dev, NV40_PGRAPH_CTXCTL_030C) == 0)
+ break;
+ }
+
+ nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, old_cp);
+
+ if (i == tv) {
+ uint32_t ucstat = nv_rd32(dev, NV40_PGRAPH_CTXCTL_UCODE_STAT);
+ NV_ERROR(dev, "Failed: Instance=0x%08x Save=%d\n", inst, save);
+ NV_ERROR(dev, "IP: 0x%02x, Opcode: 0x%08x\n",
+ ucstat >> NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_SHIFT,
+ ucstat & NV40_PGRAPH_CTXCTL_UCODE_STAT_OP_MASK);
+ NV_ERROR(dev, "0x40030C = 0x%08x\n",
+ nv_rd32(dev, NV40_PGRAPH_CTXCTL_030C));
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int
+nv40_graph_unload_context(struct drm_device *dev)
+{
+ uint32_t inst;
+ int ret;
+
+ inst = nv_rd32(dev, NV40_PGRAPH_CTXCTL_CUR);
+ if (!(inst & NV40_PGRAPH_CTXCTL_CUR_LOADED))
+ return 0;
+ inst &= NV40_PGRAPH_CTXCTL_CUR_INSTANCE;
+
+ ret = nv40_graph_transfer_context(dev, inst, 1);
+
+ nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, inst);
+ return ret;
+}
+
static int
nv40_graph_context_new(struct nouveau_channel *chan, int engine)
{
@@ -80,16 +163,16 @@ nv40_graph_context_del(struct nouveau_channel *chan, int engine)
struct nouveau_gpuobj *grctx = chan->engctx[engine];
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- u32 inst = 0x01000000 | (grctx->pinst >> 4);
unsigned long flags;
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
- nv_mask(dev, 0x400720, 0x00000000, 0x00000001);
- if (nv_rd32(dev, 0x40032c) == inst)
- nv_mask(dev, 0x40032c, 0x01000000, 0x00000000);
- if (nv_rd32(dev, 0x400330) == inst)
- nv_mask(dev, 0x400330, 0x01000000, 0x00000000);
- nv_mask(dev, 0x400720, 0x00000001, 0x00000001);
+ nv04_graph_fifo_access(dev, false);
+
+ /* Unload the context if it's the currently active one */
+ if (nv40_graph_channel(dev) == chan)
+ nv40_graph_unload_context(dev);
+
+ nv04_graph_fifo_access(dev, true);
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
/* Free the context resources */
@@ -346,20 +429,9 @@ nv40_graph_init(struct drm_device *dev, int engine)
}
static int
-nv40_graph_fini(struct drm_device *dev, int engine, bool suspend)
+nv40_graph_fini(struct drm_device *dev, int engine)
{
- u32 inst = nv_rd32(dev, 0x40032c);
- if (inst & 0x01000000) {
- nv_wr32(dev, 0x400720, 0x00000000);
- nv_wr32(dev, 0x400784, inst);
- nv_mask(dev, 0x400310, 0x00000020, 0x00000020);
- nv_mask(dev, 0x400304, 0x00000001, 0x00000001);
- if (!nv_wait(dev, 0x400300, 0x00000001, 0x00000000)) {
- u32 insn = nv_rd32(dev, 0x400308);
- NV_ERROR(dev, "PGRAPH: ctxprog timeout 0x%08x\n", insn);
- }
- nv_mask(dev, 0x40032c, 0x01000000, 0x00000000);
- }
+ nv40_graph_unload_context(dev);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv40_pm.c b/drivers/gpu/drm/nouveau/nv40_pm.c
deleted file mode 100644
index c761538..0000000
--- a/drivers/gpu/drm/nouveau/nv40_pm.c
+++ /dev/null
@@ -1,394 +0,0 @@
-/*
- * Copyright 2011 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include "drmP.h"
-#include "nouveau_drv.h"
-#include "nouveau_bios.h"
-#include "nouveau_pm.h"
-#include "nouveau_hw.h"
-
-#define min2(a,b) ((a) < (b) ? (a) : (b))
-
-static u32
-read_pll_1(struct drm_device *dev, u32 reg)
-{
- u32 ctrl = nv_rd32(dev, reg + 0x00);
- int P = (ctrl & 0x00070000) >> 16;
- int N = (ctrl & 0x0000ff00) >> 8;
- int M = (ctrl & 0x000000ff) >> 0;
- u32 ref = 27000, clk = 0;
-
- if (ctrl & 0x80000000)
- clk = ref * N / M;
-
- return clk >> P;
-}
-
-static u32
-read_pll_2(struct drm_device *dev, u32 reg)
-{
- u32 ctrl = nv_rd32(dev, reg + 0x00);
- u32 coef = nv_rd32(dev, reg + 0x04);
- int N2 = (coef & 0xff000000) >> 24;
- int M2 = (coef & 0x00ff0000) >> 16;
- int N1 = (coef & 0x0000ff00) >> 8;
- int M1 = (coef & 0x000000ff) >> 0;
- int P = (ctrl & 0x00070000) >> 16;
- u32 ref = 27000, clk = 0;
-
- if ((ctrl & 0x80000000) && M1) {
- clk = ref * N1 / M1;
- if ((ctrl & 0x40000100) == 0x40000000) {
- if (M2)
- clk = clk * N2 / M2;
- else
- clk = 0;
- }
- }
-
- return clk >> P;
-}
-
-static u32
-read_clk(struct drm_device *dev, u32 src)
-{
- switch (src) {
- case 3:
- return read_pll_2(dev, 0x004000);
- case 2:
- return read_pll_1(dev, 0x004008);
- default:
- break;
- }
-
- return 0;
-}
-
-int
-nv40_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
-{
- u32 ctrl = nv_rd32(dev, 0x00c040);
-
- perflvl->core = read_clk(dev, (ctrl & 0x00000003) >> 0);
- perflvl->shader = read_clk(dev, (ctrl & 0x00000030) >> 4);
- perflvl->memory = read_pll_2(dev, 0x4020);
- return 0;
-}
-
-struct nv40_pm_state {
- u32 ctrl;
- u32 npll_ctrl;
- u32 npll_coef;
- u32 spll;
- u32 mpll_ctrl;
- u32 mpll_coef;
-};
-
-static int
-nv40_calc_pll(struct drm_device *dev, u32 reg, struct pll_lims *pll,
- u32 clk, int *N1, int *M1, int *N2, int *M2, int *log2P)
-{
- struct nouveau_pll_vals coef;
- int ret;
-
- ret = get_pll_limits(dev, reg, pll);
- if (ret)
- return ret;
-
- if (clk < pll->vco1.maxfreq)
- pll->vco2.maxfreq = 0;
-
- ret = nouveau_calc_pll_mnp(dev, pll, clk, &coef);
- if (ret == 0)
- return -ERANGE;
-
- *N1 = coef.N1;
- *M1 = coef.M1;
- if (N2 && M2) {
- if (pll->vco2.maxfreq) {
- *N2 = coef.N2;
- *M2 = coef.M2;
- } else {
- *N2 = 1;
- *M2 = 1;
- }
- }
- *log2P = coef.log2P;
- return 0;
-}
-
-void *
-nv40_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
-{
- struct nv40_pm_state *info;
- struct pll_lims pll;
- int N1, N2, M1, M2, log2P;
- int ret;
-
- info = kmalloc(sizeof(*info), GFP_KERNEL);
- if (!info)
- return ERR_PTR(-ENOMEM);
-
- /* core/geometric clock */
- ret = nv40_calc_pll(dev, 0x004000, &pll, perflvl->core,
- &N1, &M1, &N2, &M2, &log2P);
- if (ret < 0)
- goto out;
-
- if (N2 == M2) {
- info->npll_ctrl = 0x80000100 | (log2P << 16);
- info->npll_coef = (N1 << 8) | M1;
- } else {
- info->npll_ctrl = 0xc0000000 | (log2P << 16);
- info->npll_coef = (N2 << 24) | (M2 << 16) | (N1 << 8) | M1;
- }
-
- /* use the second PLL for shader/rop clock, if it differs from core */
- if (perflvl->shader && perflvl->shader != perflvl->core) {
- ret = nv40_calc_pll(dev, 0x004008, &pll, perflvl->shader,
- &N1, &M1, NULL, NULL, &log2P);
- if (ret < 0)
- goto out;
-
- info->spll = 0xc0000000 | (log2P << 16) | (N1 << 8) | M1;
- info->ctrl = 0x00000223;
- } else {
- info->spll = 0x00000000;
- info->ctrl = 0x00000333;
- }
-
- /* memory clock */
- if (!perflvl->memory) {
- info->mpll_ctrl = 0x00000000;
- goto out;
- }
-
- ret = nv40_calc_pll(dev, 0x004020, &pll, perflvl->memory,
- &N1, &M1, &N2, &M2, &log2P);
- if (ret < 0)
- goto out;
-
- info->mpll_ctrl = 0x80000000 | (log2P << 16);
- info->mpll_ctrl |= min2(pll.log2p_bias + log2P, pll.max_log2p) << 20;
- if (N2 == M2) {
- info->mpll_ctrl |= 0x00000100;
- info->mpll_coef = (N1 << 8) | M1;
- } else {
- info->mpll_ctrl |= 0x40000000;
- info->mpll_coef = (N2 << 24) | (M2 << 16) | (N1 << 8) | M1;
- }
-
-out:
- if (ret < 0) {
- kfree(info);
- info = ERR_PTR(ret);
- }
- return info;
-}
-
-static bool
-nv40_pm_gr_idle(void *data)
-{
- struct drm_device *dev = data;
-
- if ((nv_rd32(dev, 0x400760) & 0x000000f0) >> 4 !=
- (nv_rd32(dev, 0x400760) & 0x0000000f))
- return false;
-
- if (nv_rd32(dev, 0x400700))
- return false;
-
- return true;
-}
-
-int
-nv40_pm_clocks_set(struct drm_device *dev, void *pre_state)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv40_pm_state *info = pre_state;
- unsigned long flags;
- struct bit_entry M;
- u32 crtc_mask = 0;
- u8 sr1[2];
- int i, ret = -EAGAIN;
-
- /* determine which CRTCs are active, fetch VGA_SR1 for each */
- for (i = 0; i < 2; i++) {
- u32 vbl = nv_rd32(dev, 0x600808 + (i * 0x2000));
- u32 cnt = 0;
- do {
- if (vbl != nv_rd32(dev, 0x600808 + (i * 0x2000))) {
- nv_wr08(dev, 0x0c03c4 + (i * 0x2000), 0x01);
- sr1[i] = nv_rd08(dev, 0x0c03c5 + (i * 0x2000));
- if (!(sr1[i] & 0x20))
- crtc_mask |= (1 << i);
- break;
- }
- udelay(1);
- } while (cnt++ < 32);
- }
-
- /* halt and idle engines */
- spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
- nv_mask(dev, 0x002500, 0x00000001, 0x00000000);
- if (!nv_wait(dev, 0x002500, 0x00000010, 0x00000000))
- goto resume;
- nv_mask(dev, 0x003220, 0x00000001, 0x00000000);
- if (!nv_wait(dev, 0x003220, 0x00000010, 0x00000000))
- goto resume;
- nv_mask(dev, 0x003200, 0x00000001, 0x00000000);
- nv04_fifo_cache_pull(dev, false);
-
- if (!nv_wait_cb(dev, nv40_pm_gr_idle, dev))
- goto resume;
-
- ret = 0;
-
- /* set engine clocks */
- nv_mask(dev, 0x00c040, 0x00000333, 0x00000000);
- nv_wr32(dev, 0x004004, info->npll_coef);
- nv_mask(dev, 0x004000, 0xc0070100, info->npll_ctrl);
- nv_mask(dev, 0x004008, 0xc007ffff, info->spll);
- mdelay(5);
- nv_mask(dev, 0x00c040, 0x00000333, info->ctrl);
-
- if (!info->mpll_ctrl)
- goto resume;
-
- /* wait for vblank start on active crtcs, disable memory access */
- for (i = 0; i < 2; i++) {
- if (!(crtc_mask & (1 << i)))
- continue;
- nv_wait(dev, 0x600808 + (i * 0x2000), 0x00010000, 0x00000000);
- nv_wait(dev, 0x600808 + (i * 0x2000), 0x00010000, 0x00010000);
- nv_wr08(dev, 0x0c03c4 + (i * 0x2000), 0x01);
- nv_wr08(dev, 0x0c03c5 + (i * 0x2000), sr1[i] | 0x20);
- }
-
- /* prepare ram for reclocking */
- nv_wr32(dev, 0x1002d4, 0x00000001); /* precharge */
- nv_wr32(dev, 0x1002d0, 0x00000001); /* refresh */
- nv_wr32(dev, 0x1002d0, 0x00000001); /* refresh */
- nv_mask(dev, 0x100210, 0x80000000, 0x00000000); /* no auto refresh */
- nv_wr32(dev, 0x1002dc, 0x00000001); /* enable self-refresh */
-
- /* change the PLL of each memory partition */
- nv_mask(dev, 0x00c040, 0x0000c000, 0x00000000);
- switch (dev_priv->chipset) {
- case 0x40:
- case 0x45:
- case 0x41:
- case 0x42:
- case 0x47:
- nv_mask(dev, 0x004044, 0xc0771100, info->mpll_ctrl);
- nv_mask(dev, 0x00402c, 0xc0771100, info->mpll_ctrl);
- nv_wr32(dev, 0x004048, info->mpll_coef);
- nv_wr32(dev, 0x004030, info->mpll_coef);
- case 0x43:
- case 0x49:
- case 0x4b:
- nv_mask(dev, 0x004038, 0xc0771100, info->mpll_ctrl);
- nv_wr32(dev, 0x00403c, info->mpll_coef);
- default:
- nv_mask(dev, 0x004020, 0xc0771100, info->mpll_ctrl);
- nv_wr32(dev, 0x004024, info->mpll_coef);
- break;
- }
- udelay(100);
- nv_mask(dev, 0x00c040, 0x0000c000, 0x0000c000);
-
- /* re-enable normal operation of memory controller */
- nv_wr32(dev, 0x1002dc, 0x00000000);
- nv_mask(dev, 0x100210, 0x80000000, 0x80000000);
- udelay(100);
-
- /* execute memory reset script from vbios */
- if (!bit_table(dev, 'M', &M))
- nouveau_bios_init_exec(dev, ROM16(M.data[0]));
-
- /* make sure we're in vblank (hopefully the same one as before), and
- * then re-enable crtc memory access
- */
- for (i = 0; i < 2; i++) {
- if (!(crtc_mask & (1 << i)))
- continue;
- nv_wait(dev, 0x600808 + (i * 0x2000), 0x00010000, 0x00010000);
- nv_wr08(dev, 0x0c03c4 + (i * 0x2000), 0x01);
- nv_wr08(dev, 0x0c03c5 + (i * 0x2000), sr1[i]);
- }
-
- /* resume engines */
-resume:
- nv_wr32(dev, 0x003250, 0x00000001);
- nv_mask(dev, 0x003220, 0x00000001, 0x00000001);
- nv_wr32(dev, 0x003200, 0x00000001);
- nv_wr32(dev, 0x002500, 0x00000001);
- spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
-
- kfree(info);
- return ret;
-}
-
-int
-nv40_pm_pwm_get(struct drm_device *dev, int line, u32 *divs, u32 *duty)
-{
- if (line == 2) {
- u32 reg = nv_rd32(dev, 0x0010f0);
- if (reg & 0x80000000) {
- *duty = (reg & 0x7fff0000) >> 16;
- *divs = (reg & 0x00007fff);
- return 0;
- }
- } else
- if (line == 9) {
- u32 reg = nv_rd32(dev, 0x0015f4);
- if (reg & 0x80000000) {
- *divs = nv_rd32(dev, 0x0015f8);
- *duty = (reg & 0x7fffffff);
- return 0;
- }
- } else {
- NV_ERROR(dev, "unknown pwm ctrl for gpio %d\n", line);
- return -ENODEV;
- }
-
- return -EINVAL;
-}
-
-int
-nv40_pm_pwm_set(struct drm_device *dev, int line, u32 divs, u32 duty)
-{
- if (line == 2) {
- nv_wr32(dev, 0x0010f0, 0x80000000 | (duty << 16) | divs);
- } else
- if (line == 9) {
- nv_wr32(dev, 0x0015f8, divs);
- nv_wr32(dev, 0x0015f4, duty | 0x80000000);
- } else {
- NV_ERROR(dev, "unknown pwm ctrl for gpio %d\n", line);
- return -ENODEV;
- }
-
- return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index 701b927..ebabacf 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -104,7 +104,7 @@ nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked)
OUT_RING(evo, nv_crtc->lut.depth == 8 ?
NV50_EVO_CRTC_CLUT_MODE_OFF :
NV50_EVO_CRTC_CLUT_MODE_ON);
- OUT_RING(evo, nv_crtc->lut.nvbo->bo.offset >> 8);
+ OUT_RING(evo, (nv_crtc->lut.nvbo->bo.mem.start << PAGE_SHIFT) >> 8);
if (dev_priv->chipset != 0x50) {
BEGIN_RING(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1);
OUT_RING(evo, NvEvoVRAM);
@@ -132,74 +132,30 @@ nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked)
}
static int
-nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update)
-{
- struct nouveau_channel *evo = nv50_display(nv_crtc->base.dev)->master;
- struct nouveau_connector *nv_connector;
- struct drm_connector *connector;
- int head = nv_crtc->index, ret;
- u32 mode = 0x00;
-
- nv_connector = nouveau_crtc_connector_get(nv_crtc);
- connector = &nv_connector->base;
- if (nv_connector->dithering_mode == DITHERING_MODE_AUTO) {
- if (nv_crtc->base.fb->depth > connector->display_info.bpc * 3)
- mode = DITHERING_MODE_DYNAMIC2X2;
- } else {
- mode = nv_connector->dithering_mode;
- }
-
- if (nv_connector->dithering_depth == DITHERING_DEPTH_AUTO) {
- if (connector->display_info.bpc >= 8)
- mode |= DITHERING_DEPTH_8BPC;
- } else {
- mode |= nv_connector->dithering_depth;
- }
-
- ret = RING_SPACE(evo, 2 + (update ? 2 : 0));
- if (ret == 0) {
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(head, DITHER_CTRL), 1);
- OUT_RING (evo, mode);
- if (update) {
- BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
- OUT_RING (evo, 0);
- FIRE_RING (evo);
- }
- }
-
- return ret;
-}
-
-static int
-nv50_crtc_set_color_vibrance(struct nouveau_crtc *nv_crtc, bool update)
+nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool on, bool update)
{
struct drm_device *dev = nv_crtc->base.dev;
struct nouveau_channel *evo = nv50_display(dev)->master;
int ret;
- int adj;
- u32 hue, vib;
- NV_DEBUG_KMS(dev, "vibrance = %i, hue = %i\n",
- nv_crtc->color_vibrance, nv_crtc->vibrant_hue);
+ NV_DEBUG_KMS(dev, "\n");
ret = RING_SPACE(evo, 2 + (update ? 2 : 0));
if (ret) {
- NV_ERROR(dev, "no space while setting color vibrance\n");
+ NV_ERROR(dev, "no space while setting dither\n");
return ret;
}
- adj = (nv_crtc->color_vibrance > 0) ? 50 : 0;
- vib = ((nv_crtc->color_vibrance * 2047 + adj) / 100) & 0xfff;
-
- hue = ((nv_crtc->vibrant_hue * 2047) / 100) & 0xfff;
-
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, COLOR_CTRL), 1);
- OUT_RING (evo, (hue << 20) | (vib << 8));
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, DITHER_CTRL), 1);
+ if (on)
+ OUT_RING(evo, NV50_EVO_CRTC_DITHER_CTRL_ON);
+ else
+ OUT_RING(evo, NV50_EVO_CRTC_DITHER_CTRL_OFF);
if (update) {
BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
- OUT_RING (evo, 0);
- FIRE_RING (evo);
+ OUT_RING(evo, 0);
+ FIRE_RING(evo);
}
return 0;
@@ -224,103 +180,80 @@ nouveau_crtc_connector_get(struct nouveau_crtc *nv_crtc)
}
static int
-nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update)
+nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, int scaling_mode, bool update)
{
- struct nouveau_connector *nv_connector;
- struct drm_crtc *crtc = &nv_crtc->base;
- struct drm_device *dev = crtc->dev;
+ struct nouveau_connector *nv_connector =
+ nouveau_crtc_connector_get(nv_crtc);
+ struct drm_device *dev = nv_crtc->base.dev;
struct nouveau_channel *evo = nv50_display(dev)->master;
- struct drm_display_mode *umode = &crtc->mode;
- struct drm_display_mode *omode;
- int scaling_mode, ret;
- u32 ctrl = 0, oX, oY;
+ struct drm_display_mode *native_mode = NULL;
+ struct drm_display_mode *mode = &nv_crtc->base.mode;
+ uint32_t outX, outY, horiz, vert;
+ int ret;
NV_DEBUG_KMS(dev, "\n");
- nv_connector = nouveau_crtc_connector_get(nv_crtc);
- if (!nv_connector || !nv_connector->native_mode) {
- NV_ERROR(dev, "no native mode, forcing panel scaling\n");
- scaling_mode = DRM_MODE_SCALE_NONE;
- } else {
- scaling_mode = nv_connector->scaling_mode;
- }
-
- /* start off at the resolution we programmed the crtc for, this
- * effectively handles NONE/FULL scaling
- */
- if (scaling_mode != DRM_MODE_SCALE_NONE)
- omode = nv_connector->native_mode;
- else
- omode = umode;
-
- oX = omode->hdisplay;
- oY = omode->vdisplay;
- if (omode->flags & DRM_MODE_FLAG_DBLSCAN)
- oY *= 2;
-
- /* add overscan compensation if necessary, will keep the aspect
- * ratio the same as the backend mode unless overridden by the
- * user setting both hborder and vborder properties.
- */
- if (nv_connector && ( nv_connector->underscan == UNDERSCAN_ON ||
- (nv_connector->underscan == UNDERSCAN_AUTO &&
- nv_connector->edid &&
- drm_detect_hdmi_monitor(nv_connector->edid)))) {
- u32 bX = nv_connector->underscan_hborder;
- u32 bY = nv_connector->underscan_vborder;
- u32 aspect = (oY << 19) / oX;
-
- if (bX) {
- oX -= (bX * 2);
- if (bY) oY -= (bY * 2);
- else oY = ((oX * aspect) + (aspect / 2)) >> 19;
+ switch (scaling_mode) {
+ case DRM_MODE_SCALE_NONE:
+ break;
+ default:
+ if (!nv_connector || !nv_connector->native_mode) {
+ NV_ERROR(dev, "No native mode, forcing panel scaling\n");
+ scaling_mode = DRM_MODE_SCALE_NONE;
} else {
- oX -= (oX >> 4) + 32;
- if (bY) oY -= (bY * 2);
- else oY = ((oX * aspect) + (aspect / 2)) >> 19;
+ native_mode = nv_connector->native_mode;
}
+ break;
}
- /* handle CENTER/ASPECT scaling, taking into account the areas
- * removed already for overscan compensation
- */
switch (scaling_mode) {
- case DRM_MODE_SCALE_CENTER:
- oX = min((u32)umode->hdisplay, oX);
- oY = min((u32)umode->vdisplay, oY);
- /* fall-through */
case DRM_MODE_SCALE_ASPECT:
- if (oY < oX) {
- u32 aspect = (umode->hdisplay << 19) / umode->vdisplay;
- oX = ((oY * aspect) + (aspect / 2)) >> 19;
+ horiz = (native_mode->hdisplay << 19) / mode->hdisplay;
+ vert = (native_mode->vdisplay << 19) / mode->vdisplay;
+
+ if (vert > horiz) {
+ outX = (mode->hdisplay * horiz) >> 19;
+ outY = (mode->vdisplay * horiz) >> 19;
} else {
- u32 aspect = (umode->vdisplay << 19) / umode->hdisplay;
- oY = ((oX * aspect) + (aspect / 2)) >> 19;
+ outX = (mode->hdisplay * vert) >> 19;
+ outY = (mode->vdisplay * vert) >> 19;
}
break;
+ case DRM_MODE_SCALE_FULLSCREEN:
+ outX = native_mode->hdisplay;
+ outY = native_mode->vdisplay;
+ break;
+ case DRM_MODE_SCALE_CENTER:
+ case DRM_MODE_SCALE_NONE:
default:
+ outX = mode->hdisplay;
+ outY = mode->vdisplay;
break;
}
- if (umode->hdisplay != oX || umode->vdisplay != oY ||
- umode->flags & DRM_MODE_FLAG_INTERLACE ||
- umode->flags & DRM_MODE_FLAG_DBLSCAN)
- ctrl |= NV50_EVO_CRTC_SCALE_CTRL_ACTIVE;
-
- ret = RING_SPACE(evo, 5);
+ ret = RING_SPACE(evo, update ? 7 : 5);
if (ret)
return ret;
+ /* Got a better name for SCALER_ACTIVE? */
+ /* One day i've got to really figure out why this is needed. */
BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_CTRL), 1);
- OUT_RING (evo, ctrl);
+ if ((mode->flags & DRM_MODE_FLAG_DBLSCAN) ||
+ (mode->flags & DRM_MODE_FLAG_INTERLACE) ||
+ mode->hdisplay != outX || mode->vdisplay != outY) {
+ OUT_RING(evo, NV50_EVO_CRTC_SCALE_CTRL_ACTIVE);
+ } else {
+ OUT_RING(evo, NV50_EVO_CRTC_SCALE_CTRL_INACTIVE);
+ }
+
BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_RES1), 2);
- OUT_RING (evo, oY << 16 | oX);
- OUT_RING (evo, oY << 16 | oX);
+ OUT_RING(evo, outY << 16 | outX);
+ OUT_RING(evo, outY << 16 | outX);
if (update) {
- nv50_display_flip_stop(crtc);
- nv50_display_sync(dev);
- nv50_display_flip_next(crtc, crtc->fb, NULL);
+ BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
+ OUT_RING(evo, 0);
+ FIRE_RING(evo);
}
return 0;
@@ -396,10 +329,13 @@ nv50_crtc_destroy(struct drm_crtc *crtc)
drm_crtc_cleanup(&nv_crtc->base);
+ nv50_cursor_fini(nv_crtc);
+
nouveau_bo_unmap(nv_crtc->lut.nvbo);
nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
nouveau_bo_unmap(nv_crtc->cursor.nvbo);
nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
+ kfree(nv_crtc->mode);
kfree(nv_crtc);
}
@@ -436,7 +372,7 @@ nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
nouveau_bo_unmap(cursor);
- nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.offset);
+ nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.mem.start << PAGE_SHIFT);
nv_crtc->cursor.show(nv_crtc, true);
out:
@@ -507,6 +443,39 @@ nv50_crtc_dpms(struct drm_crtc *crtc, int mode)
{
}
+static int
+nv50_crtc_wait_complete(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
+ struct nv50_display *disp = nv50_display(dev);
+ struct nouveau_channel *evo = disp->master;
+ u64 start;
+ int ret;
+
+ ret = RING_SPACE(evo, 6);
+ if (ret)
+ return ret;
+ BEGIN_RING(evo, 0, 0x0084, 1);
+ OUT_RING (evo, 0x80000000);
+ BEGIN_RING(evo, 0, 0x0080, 1);
+ OUT_RING (evo, 0);
+ BEGIN_RING(evo, 0, 0x0084, 1);
+ OUT_RING (evo, 0x00000000);
+
+ nv_wo32(disp->ntfy, 0x000, 0x00000000);
+ FIRE_RING (evo);
+
+ start = ptimer->read(dev);
+ do {
+ if (nv_ro32(disp->ntfy, 0x000))
+ return 0;
+ } while (ptimer->read(dev) - start < 2000000000ULL);
+
+ return -EBUSY;
+}
+
static void
nv50_crtc_prepare(struct drm_crtc *crtc)
{
@@ -530,7 +499,7 @@ nv50_crtc_commit(struct drm_crtc *crtc)
nv50_crtc_blank(nv_crtc, false);
drm_vblank_post_modeset(dev, nv_crtc->index);
- nv50_display_sync(dev);
+ nv50_crtc_wait_complete(crtc);
nv50_display_flip_next(crtc, crtc->fb, NULL);
}
@@ -550,18 +519,12 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
struct drm_device *dev = nv_crtc->base.dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *evo = nv50_display(dev)->master;
- struct drm_framebuffer *drm_fb;
- struct nouveau_framebuffer *fb;
+ struct drm_framebuffer *drm_fb = nv_crtc->base.fb;
+ struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
int ret;
NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
- /* no fb bound */
- if (!atomic && !crtc->fb) {
- NV_DEBUG_KMS(dev, "No FB bound\n");
- return 0;
- }
-
/* If atomic, we want to switch to the fb we were passed, so
* now we update pointers to do that. (We don't pin; just
* assume we're already pinned and update the base address.)
@@ -570,8 +533,6 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
drm_fb = passed_fb;
fb = nouveau_framebuffer(passed_fb);
} else {
- drm_fb = crtc->fb;
- fb = nouveau_framebuffer(crtc->fb);
/* If not atomic, we can go ahead and pin, and unpin the
* old fb we were passed.
*/
@@ -585,7 +546,7 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
}
}
- nv_crtc->fb.offset = fb->nvbo->bo.offset;
+ nv_crtc->fb.offset = fb->nvbo->bo.mem.start << PAGE_SHIFT;
nv_crtc->fb.tile_flags = nouveau_bo_tile_layout(fb->nvbo);
nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8;
if (!nv_crtc->fb.blanked && dev_priv->chipset != 0x50) {
@@ -612,6 +573,8 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
OUT_RING (evo, fb->base.depth == 8 ?
NV50_EVO_CRTC_CLUT_MODE_OFF : NV50_EVO_CRTC_CLUT_MODE_ON);
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, COLOR_CTRL), 1);
+ OUT_RING (evo, NV50_EVO_CRTC_COLOR_CTRL_COLOR);
BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_POS), 1);
OUT_RING (evo, (y << 16) | x);
@@ -624,77 +587,90 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
}
static int
-nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
- struct drm_display_mode *mode, int x, int y,
+nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode, int x, int y,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
struct nouveau_channel *evo = nv50_display(dev)->master;
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- u32 head = nv_crtc->index * 0x400;
- u32 ilace = (mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 1;
- u32 vscan = (mode->flags & DRM_MODE_FLAG_DBLSCAN) ? 2 : 1;
- u32 hactive, hsynce, hbackp, hfrontp, hblanke, hblanks;
- u32 vactive, vsynce, vbackp, vfrontp, vblanke, vblanks;
- u32 vblan2e = 0, vblan2s = 1;
+ struct nouveau_connector *nv_connector = NULL;
+ uint32_t hsync_dur, vsync_dur, hsync_start_to_end, vsync_start_to_end;
+ uint32_t hunk1, vunk1, vunk2a, vunk2b;
int ret;
- /* hw timing description looks like this:
- *
- * <sync> <back porch> <---------display---------> <front porch>
- * ______
- * |____________|---------------------------|____________|
- *
- * ^ synce ^ blanke ^ blanks ^ active
- *
- * interlaced modes also have 2 additional values pointing at the end
- * and start of the next field's blanking period.
- */
+ /* Find the connector attached to this CRTC */
+ nv_connector = nouveau_crtc_connector_get(nv_crtc);
+
+ *nv_crtc->mode = *adjusted_mode;
- hactive = mode->htotal;
- hsynce = mode->hsync_end - mode->hsync_start - 1;
- hbackp = mode->htotal - mode->hsync_end;
- hblanke = hsynce + hbackp;
- hfrontp = mode->hsync_start - mode->hdisplay;
- hblanks = mode->htotal - hfrontp - 1;
-
- vactive = mode->vtotal * vscan / ilace;
- vsynce = ((mode->vsync_end - mode->vsync_start) * vscan / ilace) - 1;
- vbackp = (mode->vtotal - mode->vsync_end) * vscan / ilace;
- vblanke = vsynce + vbackp;
- vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace;
- vblanks = vactive - vfrontp - 1;
- if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
- vblan2e = vactive + vsynce + vbackp;
- vblan2s = vblan2e + (mode->vdisplay * vscan / ilace);
- vactive = (vactive * 2) + 1;
+ NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
+
+ hsync_dur = adjusted_mode->hsync_end - adjusted_mode->hsync_start;
+ vsync_dur = adjusted_mode->vsync_end - adjusted_mode->vsync_start;
+ hsync_start_to_end = adjusted_mode->htotal - adjusted_mode->hsync_start;
+ vsync_start_to_end = adjusted_mode->vtotal - adjusted_mode->vsync_start;
+ /* I can't give this a proper name, anyone else can? */
+ hunk1 = adjusted_mode->htotal -
+ adjusted_mode->hsync_start + adjusted_mode->hdisplay;
+ vunk1 = adjusted_mode->vtotal -
+ adjusted_mode->vsync_start + adjusted_mode->vdisplay;
+ /* Another strange value, this time only for interlaced adjusted_modes. */
+ vunk2a = 2 * adjusted_mode->vtotal -
+ adjusted_mode->vsync_start + adjusted_mode->vdisplay;
+ vunk2b = adjusted_mode->vtotal -
+ adjusted_mode->vsync_start + adjusted_mode->vtotal;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ vsync_dur /= 2;
+ vsync_start_to_end /= 2;
+ vunk1 /= 2;
+ vunk2a /= 2;
+ vunk2b /= 2;
+ /* magic */
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) {
+ vsync_start_to_end -= 1;
+ vunk1 -= 1;
+ vunk2a -= 1;
+ vunk2b -= 1;
+ }
}
- ret = RING_SPACE(evo, 18);
- if (ret == 0) {
- BEGIN_RING(evo, 0, 0x0804 + head, 2);
- OUT_RING (evo, 0x00800000 | mode->clock);
- OUT_RING (evo, (ilace == 2) ? 2 : 0);
- BEGIN_RING(evo, 0, 0x0810 + head, 6);
- OUT_RING (evo, 0x00000000); /* border colour */
- OUT_RING (evo, (vactive << 16) | hactive);
- OUT_RING (evo, ( vsynce << 16) | hsynce);
- OUT_RING (evo, (vblanke << 16) | hblanke);
- OUT_RING (evo, (vblanks << 16) | hblanks);
- OUT_RING (evo, (vblan2e << 16) | vblan2s);
- BEGIN_RING(evo, 0, 0x082c + head, 1);
- OUT_RING (evo, 0x00000000);
- BEGIN_RING(evo, 0, 0x0900 + head, 1);
- OUT_RING (evo, 0x00000311); /* makes sync channel work */
- BEGIN_RING(evo, 0, 0x08c8 + head, 1);
- OUT_RING (evo, (umode->vdisplay << 16) | umode->hdisplay);
- BEGIN_RING(evo, 0, 0x08d4 + head, 1);
- OUT_RING (evo, 0x00000000); /* screen position */
+ ret = RING_SPACE(evo, 17);
+ if (ret)
+ return ret;
+
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CLOCK), 2);
+ OUT_RING(evo, adjusted_mode->clock | 0x800000);
+ OUT_RING(evo, (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 0);
+
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, DISPLAY_START), 5);
+ OUT_RING(evo, 0);
+ OUT_RING(evo, (adjusted_mode->vtotal << 16) | adjusted_mode->htotal);
+ OUT_RING(evo, (vsync_dur - 1) << 16 | (hsync_dur - 1));
+ OUT_RING(evo, (vsync_start_to_end - 1) << 16 |
+ (hsync_start_to_end - 1));
+ OUT_RING(evo, (vunk1 - 1) << 16 | (hunk1 - 1));
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, UNK0824), 1);
+ OUT_RING(evo, (vunk2b - 1) << 16 | (vunk2a - 1));
+ } else {
+ OUT_RING(evo, 0);
+ OUT_RING(evo, 0);
}
- nv_crtc->set_dither(nv_crtc, false);
- nv_crtc->set_scale(nv_crtc, false);
- nv_crtc->set_color_vibrance(nv_crtc, false);
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, UNK082C), 1);
+ OUT_RING(evo, 0);
+
+ /* This is the actual resolution of the mode. */
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, REAL_RES), 1);
+ OUT_RING(evo, (mode->vdisplay << 16) | mode->hdisplay);
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_CENTER_OFFSET), 1);
+ OUT_RING(evo, NV50_EVO_CRTC_SCALE_CENTER_OFFSET_VAL(0, 0));
+
+ nv_crtc->set_dither(nv_crtc, nv_connector->use_dithering, false);
+ nv_crtc->set_scale(nv_crtc, nv_connector->scaling_mode, false);
return nv50_crtc_do_mode_set_base(crtc, old_fb, x, y, false);
}
@@ -710,7 +686,7 @@ nv50_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
if (ret)
return ret;
- ret = nv50_display_sync(crtc->dev);
+ ret = nv50_crtc_wait_complete(crtc);
if (ret)
return ret;
@@ -729,7 +705,7 @@ nv50_crtc_mode_set_base_atomic(struct drm_crtc *crtc,
if (ret)
return ret;
- return nv50_display_sync(crtc->dev);
+ return nv50_crtc_wait_complete(crtc);
}
static const struct drm_crtc_helper_funcs nv50_crtc_helper_funcs = {
@@ -755,8 +731,11 @@ nv50_crtc_create(struct drm_device *dev, int index)
if (!nv_crtc)
return -ENOMEM;
- nv_crtc->color_vibrance = 50;
- nv_crtc->vibrant_hue = 0;
+ nv_crtc->mode = kzalloc(sizeof(*nv_crtc->mode), GFP_KERNEL);
+ if (!nv_crtc->mode) {
+ kfree(nv_crtc);
+ return -ENOMEM;
+ }
/* Default CLUT parameters, will be activated on the hw upon
* first mode set.
@@ -768,7 +747,7 @@ nv50_crtc_create(struct drm_device *dev, int index)
}
nv_crtc->lut.depth = 0;
- ret = nouveau_bo_new(dev, 4096, 0x100, TTM_PL_FLAG_VRAM,
+ ret = nouveau_bo_new(dev, NULL, 4096, 0x100, TTM_PL_FLAG_VRAM,
0, 0x0000, &nv_crtc->lut.nvbo);
if (!ret) {
ret = nouveau_bo_pin(nv_crtc->lut.nvbo, TTM_PL_FLAG_VRAM);
@@ -779,6 +758,7 @@ nv50_crtc_create(struct drm_device *dev, int index)
}
if (ret) {
+ kfree(nv_crtc->mode);
kfree(nv_crtc);
return ret;
}
@@ -788,13 +768,12 @@ nv50_crtc_create(struct drm_device *dev, int index)
/* set function pointers */
nv_crtc->set_dither = nv50_crtc_set_dither;
nv_crtc->set_scale = nv50_crtc_set_scale;
- nv_crtc->set_color_vibrance = nv50_crtc_set_color_vibrance;
drm_crtc_init(dev, &nv_crtc->base, &nv50_crtc_funcs);
drm_crtc_helper_add(&nv_crtc->base, &nv50_crtc_helper_funcs);
drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
- ret = nouveau_bo_new(dev, 64*64*4, 0x100, TTM_PL_FLAG_VRAM,
+ ret = nouveau_bo_new(dev, NULL, 64*64*4, 0x100, TTM_PL_FLAG_VRAM,
0, 0x0000, &nv_crtc->cursor.nvbo);
if (!ret) {
ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
diff --git a/drivers/gpu/drm/nouveau/nv50_cursor.c b/drivers/gpu/drm/nouveau/nv50_cursor.c
index adfc9b6..9752c35 100644
--- a/drivers/gpu/drm/nouveau/nv50_cursor.c
+++ b/drivers/gpu/drm/nouveau/nv50_cursor.c
@@ -137,3 +137,21 @@ nv50_cursor_init(struct nouveau_crtc *nv_crtc)
nv_crtc->cursor.show = nv50_cursor_show;
return 0;
}
+
+void
+nv50_cursor_fini(struct nouveau_crtc *nv_crtc)
+{
+ struct drm_device *dev = nv_crtc->base.dev;
+ int idx = nv_crtc->index;
+
+ NV_DEBUG_KMS(dev, "\n");
+
+ nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx), 0);
+ if (!nv_wait(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx),
+ NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) {
+ NV_ERROR(dev, "timeout: CURSOR_CTRL2_STATUS == 0\n");
+ NV_ERROR(dev, "CURSOR_CTRL2 = 0x%08x\n",
+ nv_rd32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx)));
+ }
+}
+
diff --git a/drivers/gpu/drm/nouveau/nv50_dac.c b/drivers/gpu/drm/nouveau/nv50_dac.c
index 55c5633..808f3ec 100644
--- a/drivers/gpu/drm/nouveau/nv50_dac.c
+++ b/drivers/gpu/drm/nouveau/nv50_dac.c
@@ -190,13 +190,21 @@ nv50_dac_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
}
if (connector->scaling_mode != DRM_MODE_SCALE_NONE &&
- connector->native_mode)
- drm_mode_copy(adjusted_mode, connector->native_mode);
+ connector->native_mode) {
+ int id = adjusted_mode->base.id;
+ *adjusted_mode = *connector->native_mode;
+ adjusted_mode->base.id = id;
+ }
return true;
}
static void
+nv50_dac_prepare(struct drm_encoder *encoder)
+{
+}
+
+static void
nv50_dac_commit(struct drm_encoder *encoder)
{
}
@@ -258,7 +266,7 @@ static const struct drm_encoder_helper_funcs nv50_dac_helper_funcs = {
.save = nv50_dac_save,
.restore = nv50_dac_restore,
.mode_fixup = nv50_dac_mode_fixup,
- .prepare = nv50_dac_disconnect,
+ .prepare = nv50_dac_prepare,
.commit = nv50_dac_commit,
.mode_set = nv50_dac_mode_set,
.get_crtc = nv50_dac_crtc_get,
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 0e47a89..08da478 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -50,76 +50,9 @@ nv50_sor_nr(struct drm_device *dev)
return 4;
}
-u32
-nv50_display_active_crtcs(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- u32 mask = 0;
- int i;
-
- if (dev_priv->chipset < 0x90 ||
- dev_priv->chipset == 0x92 ||
- dev_priv->chipset == 0xa0) {
- for (i = 0; i < 2; i++)
- mask |= nv_rd32(dev, NV50_PDISPLAY_SOR_MODE_CTRL_C(i));
- } else {
- for (i = 0; i < 4; i++)
- mask |= nv_rd32(dev, NV90_PDISPLAY_SOR_MODE_CTRL_C(i));
- }
-
- for (i = 0; i < 3; i++)
- mask |= nv_rd32(dev, NV50_PDISPLAY_DAC_MODE_CTRL_C(i));
-
- return mask & 3;
-}
-
-static int
-evo_icmd(struct drm_device *dev, int ch, u32 mthd, u32 data)
-{
- int ret = 0;
- nv_mask(dev, 0x610300 + (ch * 0x08), 0x00000001, 0x00000001);
- nv_wr32(dev, 0x610304 + (ch * 0x08), data);
- nv_wr32(dev, 0x610300 + (ch * 0x08), 0x80000001 | mthd);
- if (!nv_wait(dev, 0x610300 + (ch * 0x08), 0x80000000, 0x00000000))
- ret = -EBUSY;
- if (ret || (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO))
- NV_INFO(dev, "EvoPIO: %d 0x%04x 0x%08x\n", ch, mthd, data);
- nv_mask(dev, 0x610300 + (ch * 0x08), 0x00000001, 0x00000000);
- return ret;
-}
-
int
nv50_display_early_init(struct drm_device *dev)
{
- u32 ctrl = nv_rd32(dev, 0x610200);
- int i;
-
- /* check if master evo channel is already active, a good a sign as any
- * that the display engine is in a weird state (hibernate/kexec), if
- * it is, do our best to reset the display engine...
- */
- if ((ctrl & 0x00000003) == 0x00000003) {
- NV_INFO(dev, "PDISP: EVO(0) 0x%08x, resetting...\n", ctrl);
-
- /* deactivate both heads first, PDISP will disappear forever
- * (well, until you power cycle) on some boards as soon as
- * PMC_ENABLE is hit unless they are..
- */
- for (i = 0; i < 2; i++) {
- evo_icmd(dev, 0, 0x0880 + (i * 0x400), 0x05000000);
- evo_icmd(dev, 0, 0x089c + (i * 0x400), 0);
- evo_icmd(dev, 0, 0x0840 + (i * 0x400), 0);
- evo_icmd(dev, 0, 0x0844 + (i * 0x400), 0);
- evo_icmd(dev, 0, 0x085c + (i * 0x400), 0);
- evo_icmd(dev, 0, 0x0874 + (i * 0x400), 0);
- }
- evo_icmd(dev, 0, 0x0080, 0);
-
- /* reset PDISP */
- nv_mask(dev, 0x000200, 0x40000000, 0x00000000);
- nv_mask(dev, 0x000200, 0x40000000, 0x40000000);
- }
-
return 0;
}
@@ -129,40 +62,11 @@ nv50_display_late_takedown(struct drm_device *dev)
}
int
-nv50_display_sync(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
- struct nv50_display *disp = nv50_display(dev);
- struct nouveau_channel *evo = disp->master;
- u64 start;
- int ret;
-
- ret = RING_SPACE(evo, 6);
- if (ret == 0) {
- BEGIN_RING(evo, 0, 0x0084, 1);
- OUT_RING (evo, 0x80000000);
- BEGIN_RING(evo, 0, 0x0080, 1);
- OUT_RING (evo, 0);
- BEGIN_RING(evo, 0, 0x0084, 1);
- OUT_RING (evo, 0x00000000);
-
- nv_wo32(disp->ntfy, 0x000, 0x00000000);
- FIRE_RING (evo);
-
- start = ptimer->read(dev);
- do {
- if (nv_ro32(disp->ntfy, 0x000))
- return 0;
- } while (ptimer->read(dev) - start < 2000000000ULL);
- }
-
- return -EBUSY;
-}
-
-int
nv50_display_init(struct drm_device *dev)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+ struct drm_connector *connector;
struct nouveau_channel *evo;
int ret, i;
u32 val;
@@ -257,6 +161,16 @@ nv50_display_init(struct drm_device *dev)
NV50_PDISPLAY_INTR_EN_1_CLK_UNK20 |
NV50_PDISPLAY_INTR_EN_1_CLK_UNK40);
+ /* enable hotplug interrupts */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct nouveau_connector *conn = nouveau_connector(connector);
+
+ if (conn->dcb->gpio_tag == 0xff)
+ continue;
+
+ pgpio->irq_enable(dev, conn->dcb->gpio_tag, true);
+ }
+
ret = nv50_evo_init(dev);
if (ret)
return ret;
@@ -264,19 +178,36 @@ nv50_display_init(struct drm_device *dev)
nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->vinst >> 8) | 9);
- ret = RING_SPACE(evo, 3);
+ ret = RING_SPACE(evo, 15);
if (ret)
return ret;
BEGIN_RING(evo, 0, NV50_EVO_UNK84, 2);
- OUT_RING (evo, NV50_EVO_UNK84_NOTIFY_DISABLED);
- OUT_RING (evo, NvEvoSync);
+ OUT_RING(evo, NV50_EVO_UNK84_NOTIFY_DISABLED);
+ OUT_RING(evo, NvEvoSync);
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, FB_DMA), 1);
+ OUT_RING(evo, NV50_EVO_CRTC_FB_DMA_HANDLE_NONE);
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, UNK0800), 1);
+ OUT_RING(evo, 0);
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, DISPLAY_START), 1);
+ OUT_RING(evo, 0);
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, UNK082C), 1);
+ OUT_RING(evo, 0);
+ /* required to make display sync channels not hate life */
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, UNK900), 1);
+ OUT_RING (evo, 0x00000311);
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(1, UNK900), 1);
+ OUT_RING (evo, 0x00000311);
+ FIRE_RING(evo);
+ if (!nv_wait(dev, 0x640004, 0xffffffff, evo->dma.put << 2))
+ NV_ERROR(dev, "evo pushbuf stalled\n");
- return nv50_display_sync(dev);
+
+ return 0;
}
-void
-nv50_display_fini(struct drm_device *dev)
+static int nv50_display_disable(struct drm_device *dev)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv50_display *disp = nv50_display(dev);
struct nouveau_channel *evo = disp->master;
struct drm_crtc *drm_crtc;
@@ -316,16 +247,6 @@ nv50_display_fini(struct drm_device *dev)
}
}
- for (i = 0; i < 2; i++) {
- nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 0);
- if (!nv_wait(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
- NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) {
- NV_ERROR(dev, "timeout: CURSOR_CTRL2_STATUS == 0\n");
- NV_ERROR(dev, "CURSOR_CTRL2 = 0x%08x\n",
- nv_rd32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i)));
- }
- }
-
nv50_evo_fini(dev);
for (i = 0; i < 3; i++) {
@@ -339,10 +260,18 @@ nv50_display_fini(struct drm_device *dev)
/* disable interrupts. */
nv_wr32(dev, NV50_PDISPLAY_INTR_EN_1, 0x00000000);
+
+ /* disable hotplug interrupts */
+ nv_wr32(dev, 0xe054, 0xffffffff);
+ nv_wr32(dev, 0xe050, 0x00000000);
+ if (dev_priv->chipset >= 0x90) {
+ nv_wr32(dev, 0xe074, 0xffffffff);
+ nv_wr32(dev, 0xe070, 0x00000000);
+ }
+ return 0;
}
-int
-nv50_display_create(struct drm_device *dev)
+int nv50_display_create(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct dcb_table *dcb = &dev_priv->vbios.dcb;
@@ -357,6 +286,23 @@ nv50_display_create(struct drm_device *dev)
return -ENOMEM;
dev_priv->engine.display.priv = priv;
+ /* init basic kernel modesetting */
+ drm_mode_config_init(dev);
+
+ /* Initialise some optional connector properties. */
+ drm_mode_create_scaling_mode_property(dev);
+ drm_mode_create_dithering_property(dev);
+
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+
+ dev->mode_config.funcs = (void *)&nouveau_mode_config_funcs;
+
+ dev->mode_config.max_width = 8192;
+ dev->mode_config.max_height = 8192;
+
+ dev->mode_config.fb_base = dev_priv->fb_phys;
+
/* Create CRTC objects */
for (i = 0; i < 2; i++)
nv50_crtc_create(dev, i);
@@ -402,7 +348,7 @@ nv50_display_create(struct drm_device *dev)
tasklet_init(&priv->tasklet, nv50_display_bh, (unsigned long)dev);
nouveau_irq_register(dev, 26, nv50_display_isr);
- ret = nv50_evo_create(dev);
+ ret = nv50_display_init(dev);
if (ret) {
nv50_display_destroy(dev);
return ret;
@@ -418,7 +364,9 @@ nv50_display_destroy(struct drm_device *dev)
NV_DEBUG_KMS(dev, "\n");
- nv50_evo_destroy(dev);
+ drm_mode_config_cleanup(dev);
+
+ nv50_display_disable(dev);
nouveau_irq_unregister(dev, 26);
kfree(disp);
}
@@ -467,6 +415,8 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
/* synchronise with the rendering channel, if necessary */
if (likely(chan)) {
+ u64 offset = dispc->sem.bo->vma.offset + dispc->sem.offset;
+
ret = RING_SPACE(chan, 10);
if (ret) {
WIND_RING(evo);
@@ -488,8 +438,6 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
else
OUT_RING (chan, chan->vram_handle);
} else {
- u64 offset = chan->dispc_vma[nv_crtc->index].offset;
- offset += dispc->sem.offset;
BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0010, 4);
OUT_RING (chan, upper_32_bits(offset));
OUT_RING (chan, lower_32_bits(offset));
@@ -536,7 +484,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
OUT_RING (evo, 0x00000000);
OUT_RING (evo, 0x00000000);
BEGIN_RING(evo, 0, 0x0800, 5);
- OUT_RING (evo, nv_fb->nvbo->bo.offset >> 8);
+ OUT_RING (evo, (nv_fb->nvbo->bo.mem.start << PAGE_SHIFT) >> 8);
OUT_RING (evo, 0);
OUT_RING (evo, (fb->height << 16) | fb->width);
OUT_RING (evo, nv_fb->r_pitch);
@@ -582,7 +530,7 @@ nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcb,
} else {
/* determine number of lvds links */
if (nv_connector && nv_connector->edid &&
- nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
+ nv_connector->dcb->type == DCB_CONNECTOR_LVDS_SPWG) {
/* http://www.spwg.org */
if (((u8 *)nv_connector->edid)[121] == 2)
script |= 0x0100;
@@ -677,7 +625,7 @@ nv50_display_unk10_handler(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv50_display *disp = nv50_display(dev);
u32 unk30 = nv_rd32(dev, 0x610030), mc;
- int i, crtc, or = 0, type = OUTPUT_ANY;
+ int i, crtc, or, type = OUTPUT_ANY;
NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30);
disp->irq.dcb = NULL;
@@ -750,7 +698,7 @@ nv50_display_unk10_handler(struct drm_device *dev)
struct dcb_entry *dcb = &dev_priv->vbios.dcb.entry[i];
if (dcb->type == type && (dcb->or & (1 << or))) {
- nouveau_bios_run_display_table(dev, 0, -1, dcb, -1);
+ nouveau_bios_run_display_table(dev, dcb, 0, -1);
disp->irq.dcb = dcb;
goto ack;
}
@@ -763,18 +711,49 @@ ack:
}
static void
+nv50_display_unk20_dp_hack(struct drm_device *dev, struct dcb_entry *dcb)
+{
+ int or = ffs(dcb->or) - 1, link = !(dcb->dpconf.sor.link & 1);
+ struct drm_encoder *encoder;
+ uint32_t tmp, unk0 = 0, unk1 = 0;
+
+ if (dcb->type != OUTPUT_DP)
+ return;
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+
+ if (nv_encoder->dcb == dcb) {
+ unk0 = nv_encoder->dp.unk0;
+ unk1 = nv_encoder->dp.unk1;
+ break;
+ }
+ }
+
+ if (unk0 || unk1) {
+ tmp = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link));
+ tmp &= 0xfffffe03;
+ nv_wr32(dev, NV50_SOR_DP_CTRL(or, link), tmp | unk0);
+
+ tmp = nv_rd32(dev, NV50_SOR_DP_UNK128(or, link));
+ tmp &= 0xfef080c0;
+ nv_wr32(dev, NV50_SOR_DP_UNK128(or, link), tmp | unk1);
+ }
+}
+
+static void
nv50_display_unk20_handler(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv50_display *disp = nv50_display(dev);
u32 unk30 = nv_rd32(dev, 0x610030), tmp, pclk, script, mc = 0;
struct dcb_entry *dcb;
- int i, crtc, or = 0, type = OUTPUT_ANY;
+ int i, crtc, or, type = OUTPUT_ANY;
NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30);
dcb = disp->irq.dcb;
if (dcb) {
- nouveau_bios_run_display_table(dev, 0, -2, dcb, -1);
+ nouveau_bios_run_display_table(dev, dcb, 0, -2);
disp->irq.dcb = NULL;
}
@@ -783,8 +762,8 @@ nv50_display_unk20_handler(struct drm_device *dev)
if (crtc >= 0) {
pclk = nv_rd32(dev, NV50_PDISPLAY_CRTC_P(crtc, CLOCK));
pclk &= 0x003fffff;
- if (pclk)
- nv50_crtc_set_clock(dev, crtc, pclk);
+
+ nv50_crtc_set_clock(dev, crtc, pclk);
tmp = nv_rd32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(crtc));
tmp &= ~0x000000f;
@@ -858,15 +837,9 @@ nv50_display_unk20_handler(struct drm_device *dev)
}
script = nv50_display_script_select(dev, dcb, mc, pclk);
- nouveau_bios_run_display_table(dev, script, pclk, dcb, -1);
+ nouveau_bios_run_display_table(dev, dcb, script, pclk);
- if (type == OUTPUT_DP) {
- int link = !(dcb->dpconf.sor.link & 1);
- if ((mc & 0x000f0000) == 0x00020000)
- nv50_sor_dp_calc_tu(dev, or, link, pclk, 18);
- else
- nv50_sor_dp_calc_tu(dev, or, link, pclk, 24);
- }
+ nv50_display_unk20_dp_hack(dev, dcb);
if (dcb->type != OUTPUT_ANALOG) {
tmp = nv_rd32(dev, NV50_PDISPLAY_SOR_CLK_CTRL2(or));
@@ -931,7 +904,7 @@ nv50_display_unk40_handler(struct drm_device *dev)
if (!dcb)
goto ack;
- nouveau_bios_run_display_table(dev, script, -pclk, dcb, -1);
+ nouveau_bios_run_display_table(dev, dcb, script, -pclk);
nv50_display_unk40_dp_set_tmds(dev, dcb);
ack:
diff --git a/drivers/gpu/drm/nouveau/nv50_display.h b/drivers/gpu/drm/nouveau/nv50_display.h
index 5d3dd14..c2da503 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.h
+++ b/drivers/gpu/drm/nouveau/nv50_display.h
@@ -69,20 +69,14 @@ int nv50_display_early_init(struct drm_device *dev);
void nv50_display_late_takedown(struct drm_device *dev);
int nv50_display_create(struct drm_device *dev);
int nv50_display_init(struct drm_device *dev);
-void nv50_display_fini(struct drm_device *dev);
void nv50_display_destroy(struct drm_device *dev);
int nv50_crtc_blank(struct nouveau_crtc *, bool blank);
int nv50_crtc_set_clock(struct drm_device *, int head, int pclk);
-u32 nv50_display_active_crtcs(struct drm_device *);
-
-int nv50_display_sync(struct drm_device *);
int nv50_display_flip_next(struct drm_crtc *, struct drm_framebuffer *,
struct nouveau_channel *chan);
void nv50_display_flip_stop(struct drm_crtc *);
-int nv50_evo_create(struct drm_device *dev);
-void nv50_evo_destroy(struct drm_device *dev);
int nv50_evo_init(struct drm_device *dev);
void nv50_evo_fini(struct drm_device *dev);
void nv50_evo_dmaobj_init(struct nouveau_gpuobj *, u32 memtype, u64 base,
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.c b/drivers/gpu/drm/nouveau/nv50_evo.c
index 9b962e9..c8e83c1 100644
--- a/drivers/gpu/drm/nouveau/nv50_evo.c
+++ b/drivers/gpu/drm/nouveau/nv50_evo.c
@@ -38,7 +38,6 @@ nv50_evo_channel_del(struct nouveau_channel **pevo)
return;
*pevo = NULL;
- nouveau_ramht_ref(NULL, &evo->ramht, evo);
nouveau_gpuobj_channel_takedown(evo);
nouveau_bo_unmap(evo->pushbuf_bo);
nouveau_bo_ref(NULL, &evo->pushbuf_bo);
@@ -117,7 +116,7 @@ nv50_evo_channel_new(struct drm_device *dev, int chid,
evo->user_get = 4;
evo->user_put = 0;
- ret = nouveau_bo_new(dev, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0,
+ ret = nouveau_bo_new(dev, NULL, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0,
&evo->pushbuf_bo);
if (ret == 0)
ret = nouveau_bo_pin(evo->pushbuf_bo, TTM_PL_FLAG_VRAM);
@@ -154,7 +153,7 @@ nv50_evo_channel_init(struct nouveau_channel *evo)
{
struct drm_device *dev = evo->dev;
int id = evo->id, ret, i;
- u64 pushbuf = evo->pushbuf_bo->bo.offset;
+ u64 pushbuf = evo->pushbuf_bo->bo.mem.start << PAGE_SHIFT;
u32 tmp;
tmp = nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id));
@@ -218,7 +217,7 @@ nv50_evo_channel_fini(struct nouveau_channel *evo)
}
}
-void
+static void
nv50_evo_destroy(struct drm_device *dev)
{
struct nv50_display *disp = nv50_display(dev);
@@ -235,7 +234,7 @@ nv50_evo_destroy(struct drm_device *dev)
nv50_evo_channel_del(&disp->master);
}
-int
+static int
nv50_evo_create(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -332,15 +331,16 @@ nv50_evo_create(struct drm_device *dev)
if (ret)
goto err;
- ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
+ ret = nouveau_bo_new(dev, NULL, 4096, 0x1000, TTM_PL_FLAG_VRAM,
0, 0x0000, &dispc->sem.bo);
if (!ret) {
+ offset = dispc->sem.bo->bo.mem.start << PAGE_SHIFT;
+
ret = nouveau_bo_pin(dispc->sem.bo, TTM_PL_FLAG_VRAM);
if (!ret)
ret = nouveau_bo_map(dispc->sem.bo);
if (ret)
nouveau_bo_ref(NULL, &dispc->sem.bo);
- offset = dispc->sem.bo->bo.offset;
}
if (ret)
@@ -388,6 +388,12 @@ nv50_evo_init(struct drm_device *dev)
struct nv50_display *disp = nv50_display(dev);
int ret, i;
+ if (!disp->master) {
+ ret = nv50_evo_create(dev);
+ if (ret)
+ return ret;
+ }
+
ret = nv50_evo_channel_init(disp->master);
if (ret)
return ret;
@@ -414,4 +420,6 @@ nv50_evo_fini(struct drm_device *dev)
if (disp->master)
nv50_evo_channel_fini(disp->master);
+
+ nv50_evo_destroy(dev);
}
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.h b/drivers/gpu/drm/nouveau/nv50_evo.h
index 771d879..3860ca6 100644
--- a/drivers/gpu/drm/nouveau/nv50_evo.h
+++ b/drivers/gpu/drm/nouveau/nv50_evo.h
@@ -104,8 +104,7 @@
#define NV50_EVO_CRTC_SCALE_CTRL_INACTIVE 0x00000000
#define NV50_EVO_CRTC_SCALE_CTRL_ACTIVE 0x00000009
#define NV50_EVO_CRTC_COLOR_CTRL 0x000008a8
-#define NV50_EVO_CRTC_COLOR_CTRL_VIBRANCE 0x000fff00
-#define NV50_EVO_CRTC_COLOR_CTRL_HUE 0xfff00000
+#define NV50_EVO_CRTC_COLOR_CTRL_COLOR 0x00040000
#define NV50_EVO_CRTC_FB_POS 0x000008c0
#define NV50_EVO_CRTC_REAL_RES 0x000008c8
#define NV50_EVO_CRTC_SCALE_CENTER_OFFSET 0x000008d4
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index dc75a72..791ded1 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -159,7 +159,7 @@ nv50_fbcon_accel_init(struct fb_info *info)
struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
- struct nouveau_framebuffer *fb = &nfbdev->nouveau_fb;
+ struct nouveau_bo *nvbo = nfbdev->nouveau_fb.nvbo;
int ret, format;
switch (info->var.bits_per_pixel) {
@@ -247,8 +247,8 @@ nv50_fbcon_accel_init(struct fb_info *info)
OUT_RING(chan, info->fix.line_length);
OUT_RING(chan, info->var.xres_virtual);
OUT_RING(chan, info->var.yres_virtual);
- OUT_RING(chan, upper_32_bits(fb->vma.offset));
- OUT_RING(chan, lower_32_bits(fb->vma.offset));
+ OUT_RING(chan, upper_32_bits(nvbo->vma.offset));
+ OUT_RING(chan, lower_32_bits(nvbo->vma.offset));
BEGIN_RING(chan, NvSub2D, 0x0230, 2);
OUT_RING(chan, format);
OUT_RING(chan, 1);
@@ -256,8 +256,8 @@ nv50_fbcon_accel_init(struct fb_info *info)
OUT_RING(chan, info->fix.line_length);
OUT_RING(chan, info->var.xres_virtual);
OUT_RING(chan, info->var.yres_virtual);
- OUT_RING(chan, upper_32_bits(fb->vma.offset));
- OUT_RING(chan, lower_32_bits(fb->vma.offset));
+ OUT_RING(chan, upper_32_bits(nvbo->vma.offset));
+ OUT_RING(chan, lower_32_bits(nvbo->vma.offset));
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c
index 3bc2a56..c34a074 100644
--- a/drivers/gpu/drm/nouveau/nv50_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv50_fifo.c
@@ -230,7 +230,6 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *ramfc = NULL;
- uint64_t ib_offset = chan->pushbuf_base + chan->dma.ib_base * 4;
unsigned long flags;
int ret;
@@ -281,9 +280,8 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
nv_wo32(ramfc, 0x7c, 0x30000001);
nv_wo32(ramfc, 0x78, 0x00000000);
nv_wo32(ramfc, 0x3c, 0x403f6078);
- nv_wo32(ramfc, 0x50, lower_32_bits(ib_offset));
- nv_wo32(ramfc, 0x54, upper_32_bits(ib_offset) |
- drm_order(chan->dma.ib_max + 1) << 16);
+ nv_wo32(ramfc, 0x50, chan->pushbuf_base + chan->dma.ib_base * 4);
+ nv_wo32(ramfc, 0x54, drm_order(chan->dma.ib_max + 1) << 16);
if (dev_priv->chipset != 0x50) {
nv_wo32(chan->ramin, 0, chan->id);
diff --git a/drivers/gpu/drm/nouveau/nv50_gpio.c b/drivers/gpu/drm/nouveau/nv50_gpio.c
index f429e6a..d4f4206 100644
--- a/drivers/gpu/drm/nouveau/nv50_gpio.c
+++ b/drivers/gpu/drm/nouveau/nv50_gpio.c
@@ -25,95 +25,198 @@
#include "drmP.h"
#include "nouveau_drv.h"
#include "nouveau_hw.h"
-#include "nouveau_gpio.h"
#include "nv50_display.h"
+static void nv50_gpio_isr(struct drm_device *dev);
+static void nv50_gpio_isr_bh(struct work_struct *work);
+
+struct nv50_gpio_priv {
+ struct list_head handlers;
+ spinlock_t lock;
+};
+
+struct nv50_gpio_handler {
+ struct drm_device *dev;
+ struct list_head head;
+ struct work_struct work;
+ bool inhibit;
+
+ struct dcb_gpio_entry *gpio;
+
+ void (*handler)(void *data, int state);
+ void *data;
+};
+
static int
-nv50_gpio_location(int line, u32 *reg, u32 *shift)
+nv50_gpio_location(struct dcb_gpio_entry *gpio, uint32_t *reg, uint32_t *shift)
{
const uint32_t nv50_gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 };
- if (line >= 32)
+ if (gpio->line >= 32)
return -EINVAL;
- *reg = nv50_gpio_reg[line >> 3];
- *shift = (line & 7) << 2;
+ *reg = nv50_gpio_reg[gpio->line >> 3];
+ *shift = (gpio->line & 7) << 2;
return 0;
}
int
-nv50_gpio_drive(struct drm_device *dev, int line, int dir, int out)
+nv50_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag)
{
- u32 reg, shift;
+ struct dcb_gpio_entry *gpio;
+ uint32_t r, s, v;
+
+ gpio = nouveau_bios_gpio_entry(dev, tag);
+ if (!gpio)
+ return -ENOENT;
- if (nv50_gpio_location(line, &reg, &shift))
+ if (nv50_gpio_location(gpio, &r, &s))
return -EINVAL;
- nv_mask(dev, reg, 7 << shift, (((dir ^ 1) << 1) | out) << shift);
- return 0;
+ v = nv_rd32(dev, r) >> (s + 2);
+ return ((v & 1) == (gpio->state[1] & 1));
}
int
-nv50_gpio_sense(struct drm_device *dev, int line)
+nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state)
{
- u32 reg, shift;
+ struct dcb_gpio_entry *gpio;
+ uint32_t r, s, v;
+
+ gpio = nouveau_bios_gpio_entry(dev, tag);
+ if (!gpio)
+ return -ENOENT;
- if (nv50_gpio_location(line, &reg, &shift))
+ if (nv50_gpio_location(gpio, &r, &s))
return -EINVAL;
- return !!(nv_rd32(dev, reg) & (4 << shift));
+ v = nv_rd32(dev, r) & ~(0x3 << s);
+ v |= (gpio->state[state] ^ 2) << s;
+ nv_wr32(dev, r, v);
+ return 0;
}
-void
-nv50_gpio_irq_enable(struct drm_device *dev, int line, bool on)
+int
+nv50_gpio_irq_register(struct drm_device *dev, enum dcb_gpio_tag tag,
+ void (*handler)(void *, int), void *data)
{
- u32 reg = line < 16 ? 0xe050 : 0xe070;
- u32 mask = 0x00010001 << (line & 0xf);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+ struct nv50_gpio_priv *priv = pgpio->priv;
+ struct nv50_gpio_handler *gpioh;
+ struct dcb_gpio_entry *gpio;
+ unsigned long flags;
- nv_wr32(dev, reg + 4, mask);
- nv_mask(dev, reg + 0, mask, on ? mask : 0);
+ gpio = nouveau_bios_gpio_entry(dev, tag);
+ if (!gpio)
+ return -ENOENT;
+
+ gpioh = kzalloc(sizeof(*gpioh), GFP_KERNEL);
+ if (!gpioh)
+ return -ENOMEM;
+
+ INIT_WORK(&gpioh->work, nv50_gpio_isr_bh);
+ gpioh->dev = dev;
+ gpioh->gpio = gpio;
+ gpioh->handler = handler;
+ gpioh->data = data;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ list_add(&gpioh->head, &priv->handlers);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return 0;
}
-int
-nvd0_gpio_drive(struct drm_device *dev, int line, int dir, int out)
+void
+nv50_gpio_irq_unregister(struct drm_device *dev, enum dcb_gpio_tag tag,
+ void (*handler)(void *, int), void *data)
{
- u32 data = ((dir ^ 1) << 13) | (out << 12);
- nv_mask(dev, 0x00d610 + (line * 4), 0x00003000, data);
- nv_mask(dev, 0x00d604, 0x00000001, 0x00000001); /* update? */
- return 0;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+ struct nv50_gpio_priv *priv = pgpio->priv;
+ struct nv50_gpio_handler *gpioh, *tmp;
+ struct dcb_gpio_entry *gpio;
+ LIST_HEAD(tofree);
+ unsigned long flags;
+
+ gpio = nouveau_bios_gpio_entry(dev, tag);
+ if (!gpio)
+ return;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ list_for_each_entry_safe(gpioh, tmp, &priv->handlers, head) {
+ if (gpioh->gpio != gpio ||
+ gpioh->handler != handler ||
+ gpioh->data != data)
+ continue;
+ list_move(&gpioh->head, &tofree);
+ }
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ list_for_each_entry_safe(gpioh, tmp, &tofree, head) {
+ flush_work_sync(&gpioh->work);
+ kfree(gpioh);
+ }
}
-int
-nvd0_gpio_sense(struct drm_device *dev, int line)
+bool
+nv50_gpio_irq_enable(struct drm_device *dev, enum dcb_gpio_tag tag, bool on)
{
- return !!(nv_rd32(dev, 0x00d610 + (line * 4)) & 0x00004000);
+ struct dcb_gpio_entry *gpio;
+ u32 reg, mask;
+
+ gpio = nouveau_bios_gpio_entry(dev, tag);
+ if (!gpio)
+ return false;
+
+ reg = gpio->line < 16 ? 0xe050 : 0xe070;
+ mask = 0x00010001 << (gpio->line & 0xf);
+
+ nv_wr32(dev, reg + 4, mask);
+ reg = nv_mask(dev, reg + 0, mask, on ? mask : 0);
+ return (reg & mask) == mask;
}
-static void
-nv50_gpio_isr(struct drm_device *dev)
+static int
+nv50_gpio_create(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- u32 intr0, intr1 = 0;
- u32 hi, lo;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+ struct nv50_gpio_priv *priv;
- intr0 = nv_rd32(dev, 0xe054) & nv_rd32(dev, 0xe050);
- if (dev_priv->chipset >= 0x90)
- intr1 = nv_rd32(dev, 0xe074) & nv_rd32(dev, 0xe070);
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
- hi = (intr0 & 0x0000ffff) | (intr1 << 16);
- lo = (intr0 >> 16) | (intr1 & 0xffff0000);
- nouveau_gpio_isr(dev, 0, hi | lo);
+ INIT_LIST_HEAD(&priv->handlers);
+ spin_lock_init(&priv->lock);
+ pgpio->priv = priv;
+ return 0;
+}
- nv_wr32(dev, 0xe054, intr0);
- if (dev_priv->chipset >= 0x90)
- nv_wr32(dev, 0xe074, intr1);
+static void
+nv50_gpio_destroy(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+
+ kfree(pgpio->priv);
+ pgpio->priv = NULL;
}
int
nv50_gpio_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+ int ret;
+
+ if (!pgpio->priv) {
+ ret = nv50_gpio_create(dev);
+ if (ret)
+ return ret;
+ }
/* disable, and ack any pending gpio interrupts */
nv_wr32(dev, 0xe050, 0x00000000);
@@ -136,4 +239,64 @@ nv50_gpio_fini(struct drm_device *dev)
if (dev_priv->chipset >= 0x90)
nv_wr32(dev, 0xe070, 0x00000000);
nouveau_irq_unregister(dev, 21);
+
+ nv50_gpio_destroy(dev);
+}
+
+static void
+nv50_gpio_isr_bh(struct work_struct *work)
+{
+ struct nv50_gpio_handler *gpioh =
+ container_of(work, struct nv50_gpio_handler, work);
+ struct drm_nouveau_private *dev_priv = gpioh->dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+ struct nv50_gpio_priv *priv = pgpio->priv;
+ unsigned long flags;
+ int state;
+
+ state = pgpio->get(gpioh->dev, gpioh->gpio->tag);
+ if (state < 0)
+ return;
+
+ gpioh->handler(gpioh->data, state);
+
+ spin_lock_irqsave(&priv->lock, flags);
+ gpioh->inhibit = false;
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static void
+nv50_gpio_isr(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+ struct nv50_gpio_priv *priv = pgpio->priv;
+ struct nv50_gpio_handler *gpioh;
+ u32 intr0, intr1 = 0;
+ u32 hi, lo, ch;
+
+ intr0 = nv_rd32(dev, 0xe054) & nv_rd32(dev, 0xe050);
+ if (dev_priv->chipset >= 0x90)
+ intr1 = nv_rd32(dev, 0xe074) & nv_rd32(dev, 0xe070);
+
+ hi = (intr0 & 0x0000ffff) | (intr1 << 16);
+ lo = (intr0 >> 16) | (intr1 & 0xffff0000);
+ ch = hi | lo;
+
+ nv_wr32(dev, 0xe054, intr0);
+ if (dev_priv->chipset >= 0x90)
+ nv_wr32(dev, 0xe074, intr1);
+
+ spin_lock(&priv->lock);
+ list_for_each_entry(gpioh, &priv->handlers, head) {
+ if (!(ch & (1 << gpioh->gpio->line)))
+ continue;
+
+ if (gpioh->inhibit)
+ continue;
+ gpioh->inhibit = true;
+
+ schedule_work(&gpioh->work);
+ }
+ spin_unlock(&priv->lock);
}
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index 33d5711..e25cbb4 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -31,6 +31,7 @@
#include "nouveau_grctx.h"
#include "nouveau_dma.h"
#include "nouveau_vm.h"
+#include "nouveau_ramht.h"
#include "nv50_evo.h"
struct nv50_graph_engine {
@@ -120,62 +121,71 @@ nv50_graph_unload_context(struct drm_device *dev)
return 0;
}
-static int
-nv50_graph_init(struct drm_device *dev, int engine)
+static void
+nv50_graph_init_reset(struct drm_device *dev)
+{
+ uint32_t pmc_e = NV_PMC_ENABLE_PGRAPH | (1 << 21);
+
+ NV_DEBUG(dev, "\n");
+
+ nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & ~pmc_e);
+ nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | pmc_e);
+}
+
+static void
+nv50_graph_init_intr(struct drm_device *dev)
+{
+ NV_DEBUG(dev, "\n");
+
+ nv_wr32(dev, NV03_PGRAPH_INTR, 0xffffffff);
+ nv_wr32(dev, 0x400138, 0xffffffff);
+ nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xffffffff);
+}
+
+static void
+nv50_graph_init_regs__nv(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv50_graph_engine *pgraph = nv_engine(dev, engine);
- u32 units = nv_rd32(dev, 0x001540);
+ uint32_t units = nv_rd32(dev, 0x1540);
int i;
NV_DEBUG(dev, "\n");
- /* master reset */
- nv_mask(dev, 0x000200, 0x00201000, 0x00000000);
- nv_mask(dev, 0x000200, 0x00201000, 0x00201000);
- nv_wr32(dev, 0x40008c, 0x00000004); /* HW_CTX_SWITCH_ENABLED */
-
- /* reset/enable traps and interrupts */
nv_wr32(dev, 0x400804, 0xc0000000);
nv_wr32(dev, 0x406800, 0xc0000000);
nv_wr32(dev, 0x400c04, 0xc0000000);
nv_wr32(dev, 0x401800, 0xc0000000);
nv_wr32(dev, 0x405018, 0xc0000000);
nv_wr32(dev, 0x402000, 0xc0000000);
- for (i = 0; i < 16; i++) {
- if (!(units & (1 << i)))
- continue;
- if (dev_priv->chipset < 0xa0) {
- nv_wr32(dev, 0x408900 + (i << 12), 0xc0000000);
- nv_wr32(dev, 0x408e08 + (i << 12), 0xc0000000);
- nv_wr32(dev, 0x408314 + (i << 12), 0xc0000000);
- } else {
- nv_wr32(dev, 0x408600 + (i << 11), 0xc0000000);
- nv_wr32(dev, 0x408708 + (i << 11), 0xc0000000);
- nv_wr32(dev, 0x40831c + (i << 11), 0xc0000000);
+ for (i = 0; i < 16; i++) {
+ if (units & 1 << i) {
+ if (dev_priv->chipset < 0xa0) {
+ nv_wr32(dev, 0x408900 + (i << 12), 0xc0000000);
+ nv_wr32(dev, 0x408e08 + (i << 12), 0xc0000000);
+ nv_wr32(dev, 0x408314 + (i << 12), 0xc0000000);
+ } else {
+ nv_wr32(dev, 0x408600 + (i << 11), 0xc0000000);
+ nv_wr32(dev, 0x408708 + (i << 11), 0xc0000000);
+ nv_wr32(dev, 0x40831c + (i << 11), 0xc0000000);
+ }
}
}
nv_wr32(dev, 0x400108, 0xffffffff);
- nv_wr32(dev, 0x400138, 0xffffffff);
- nv_wr32(dev, 0x400100, 0xffffffff);
- nv_wr32(dev, 0x40013c, 0xffffffff);
+
+ nv_wr32(dev, 0x400824, 0x00004000);
nv_wr32(dev, 0x400500, 0x00010001);
+}
+
+static void
+nv50_graph_init_zcull(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int i;
+
+ NV_DEBUG(dev, "\n");
- /* upload context program, initialise ctxctl defaults */
- nv_wr32(dev, 0x400324, 0x00000000);
- for (i = 0; i < pgraph->ctxprog_size; i++)
- nv_wr32(dev, 0x400328, pgraph->ctxprog[i]);
- nv_wr32(dev, 0x400824, 0x00000000);
- nv_wr32(dev, 0x400828, 0x00000000);
- nv_wr32(dev, 0x40082c, 0x00000000);
- nv_wr32(dev, 0x400830, 0x00000000);
- nv_wr32(dev, 0x400724, 0x00000000);
- nv_wr32(dev, 0x40032c, 0x00000000);
- nv_wr32(dev, 0x400320, 4); /* CTXCTL_CMD = NEWCTXDMA */
-
- /* some unknown zcull magic */
switch (dev_priv->chipset & 0xf0) {
case 0x50:
case 0x80:
@@ -204,18 +214,50 @@ nv50_graph_init(struct drm_device *dev, int engine)
nv_wr32(dev, 0x402c28 + (i * 8), 0x00000000);
nv_wr32(dev, 0x402c2c + (i * 8), 0x00000000);
}
+}
+
+static int
+nv50_graph_init_ctxctl(struct drm_device *dev)
+{
+ struct nv50_graph_engine *pgraph = nv_engine(dev, NVOBJ_ENGINE_GR);
+ int i;
+
+ NV_DEBUG(dev, "\n");
+
+ nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
+ for (i = 0; i < pgraph->ctxprog_size; i++)
+ nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, pgraph->ctxprog[i]);
+
+ nv_wr32(dev, 0x40008c, 0x00000004); /* HW_CTX_SWITCH_ENABLED */
+ nv_wr32(dev, 0x400320, 4);
+ nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0);
+ nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, 0);
+ return 0;
+}
+
+static int
+nv50_graph_init(struct drm_device *dev, int engine)
+{
+ int ret;
+ NV_DEBUG(dev, "\n");
+
+ nv50_graph_init_reset(dev);
+ nv50_graph_init_regs__nv(dev);
+ nv50_graph_init_zcull(dev);
+
+ ret = nv50_graph_init_ctxctl(dev);
+ if (ret)
+ return ret;
+
+ nv50_graph_init_intr(dev);
return 0;
}
static int
-nv50_graph_fini(struct drm_device *dev, int engine, bool suspend)
+nv50_graph_fini(struct drm_device *dev, int engine)
{
- nv_mask(dev, 0x400500, 0x00010001, 0x00000000);
- if (!nv_wait(dev, 0x400700, ~0, 0) && suspend) {
- nv_mask(dev, 0x400500, 0x00010001, 0x00010001);
- return -EBUSY;
- }
+ NV_DEBUG(dev, "\n");
nv50_graph_unload_context(dev);
nv_wr32(dev, 0x40013c, 0x00000000);
return 0;
@@ -616,9 +658,9 @@ nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old,
}
break;
case 7: /* MP error */
- if (ustatus & 0x04030000) {
+ if (ustatus & 0x00010000) {
nv50_pgraph_mp_trap(dev, i, display);
- ustatus &= ~0x04030000;
+ ustatus &= ~0x00010000;
}
break;
case 8: /* TPDMA error */
diff --git a/drivers/gpu/drm/nouveau/nv50_grctx.c b/drivers/gpu/drm/nouveau/nv50_grctx.c
index 4b46d69..de9abff 100644
--- a/drivers/gpu/drm/nouveau/nv50_grctx.c
+++ b/drivers/gpu/drm/nouveau/nv50_grctx.c
@@ -40,12 +40,6 @@
#define CP_FLAG_UNK0B ((0 * 32) + 0xb)
#define CP_FLAG_UNK0B_CLEAR 0
#define CP_FLAG_UNK0B_SET 1
-#define CP_FLAG_XFER_SWITCH ((0 * 32) + 0xe)
-#define CP_FLAG_XFER_SWITCH_DISABLE 0
-#define CP_FLAG_XFER_SWITCH_ENABLE 1
-#define CP_FLAG_STATE ((0 * 32) + 0x1c)
-#define CP_FLAG_STATE_STOPPED 0
-#define CP_FLAG_STATE_RUNNING 1
#define CP_FLAG_UNK1D ((0 * 32) + 0x1d)
#define CP_FLAG_UNK1D_CLEAR 0
#define CP_FLAG_UNK1D_SET 1
@@ -200,9 +194,6 @@ nv50_grctx_init(struct nouveau_grctx *ctx)
"the devs.\n");
return -ENOSYS;
}
-
- cp_set (ctx, STATE, RUNNING);
- cp_set (ctx, XFER_SWITCH, ENABLE);
/* decide whether we're loading/unloading the context */
cp_bra (ctx, AUTO_SAVE, PENDING, cp_setup_save);
cp_bra (ctx, USER_SAVE, PENDING, cp_setup_save);
@@ -269,8 +260,6 @@ nv50_grctx_init(struct nouveau_grctx *ctx)
cp_name(ctx, cp_exit);
cp_set (ctx, USER_SAVE, NOT_PENDING);
cp_set (ctx, USER_LOAD, NOT_PENDING);
- cp_set (ctx, XFER_SWITCH, DISABLE);
- cp_set (ctx, STATE, STOPPED);
cp_out (ctx, CP_END);
ctx->ctxvals_pos += 0x400; /* padding... no idea why you need it */
@@ -601,7 +590,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
gr_def(ctx, offset + 0x1c, 0x00880000);
break;
case 0x86:
- gr_def(ctx, offset + 0x1c, 0x018c0000);
+ gr_def(ctx, offset + 0x1c, 0x008c0000);
break;
case 0x92:
case 0x96:
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index a7c12c9..4f95a1e 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -305,9 +305,9 @@ struct nv50_gpuobj_node {
u32 align;
};
+
int
-nv50_instmem_get(struct nouveau_gpuobj *gpuobj, struct nouveau_channel *chan,
- u32 size, u32 align)
+nv50_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align)
{
struct drm_device *dev = gpuobj->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -336,7 +336,7 @@ nv50_instmem_get(struct nouveau_gpuobj *gpuobj, struct nouveau_channel *chan,
if (!(gpuobj->flags & NVOBJ_FLAG_VM_USER))
flags |= NV_MEM_ACCESS_SYS;
- ret = nouveau_vm_get(chan->vm, size, 12, flags,
+ ret = nouveau_vm_get(dev_priv->chan_vm, size, 12, flags,
&node->chan_vma);
if (ret) {
vram->put(dev, &node->vram);
@@ -345,7 +345,7 @@ nv50_instmem_get(struct nouveau_gpuobj *gpuobj, struct nouveau_channel *chan,
}
nouveau_vm_map(&node->chan_vma, node->vram);
- gpuobj->linst = node->chan_vma.offset;
+ gpuobj->vinst = node->chan_vma.offset;
}
gpuobj->size = size;
diff --git a/drivers/gpu/drm/nouveau/nv50_mpeg.c b/drivers/gpu/drm/nouveau/nv50_mpeg.c
index b57a2d1..1dc5913 100644
--- a/drivers/gpu/drm/nouveau/nv50_mpeg.c
+++ b/drivers/gpu/drm/nouveau/nv50_mpeg.c
@@ -160,7 +160,7 @@ nv50_mpeg_init(struct drm_device *dev, int engine)
}
static int
-nv50_mpeg_fini(struct drm_device *dev, int engine, bool suspend)
+nv50_mpeg_fini(struct drm_device *dev, int engine)
{
/*XXX: context save for s/r */
nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000);
diff --git a/drivers/gpu/drm/nouveau/nv50_pm.c b/drivers/gpu/drm/nouveau/nv50_pm.c
index d020ed4..8a28100 100644
--- a/drivers/gpu/drm/nouveau/nv50_pm.c
+++ b/drivers/gpu/drm/nouveau/nv50_pm.c
@@ -25,862 +25,122 @@
#include "drmP.h"
#include "nouveau_drv.h"
#include "nouveau_bios.h"
-#include "nouveau_hw.h"
#include "nouveau_pm.h"
-#include "nouveau_hwsq.h"
-#include "nv50_display.h"
-
-enum clk_src {
- clk_src_crystal,
- clk_src_href,
- clk_src_hclk,
- clk_src_hclkm3,
- clk_src_hclkm3d2,
- clk_src_host,
- clk_src_nvclk,
- clk_src_sclk,
- clk_src_mclk,
- clk_src_vdec,
- clk_src_dom6
-};
-
-static u32 read_clk(struct drm_device *, enum clk_src);
-
-static u32
-read_div(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
-
- switch (dev_priv->chipset) {
- case 0x50: /* it exists, but only has bit 31, not the dividers.. */
- case 0x84:
- case 0x86:
- case 0x98:
- case 0xa0:
- return nv_rd32(dev, 0x004700);
- case 0x92:
- case 0x94:
- case 0x96:
- return nv_rd32(dev, 0x004800);
- default:
- return 0x00000000;
- }
-}
-
-static u32
-read_pll_src(struct drm_device *dev, u32 base)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- u32 coef, ref = read_clk(dev, clk_src_crystal);
- u32 rsel = nv_rd32(dev, 0x00e18c);
- int P, N, M, id;
-
- switch (dev_priv->chipset) {
- case 0x50:
- case 0xa0:
- switch (base) {
- case 0x4020:
- case 0x4028: id = !!(rsel & 0x00000004); break;
- case 0x4008: id = !!(rsel & 0x00000008); break;
- case 0x4030: id = 0; break;
- default:
- NV_ERROR(dev, "ref: bad pll 0x%06x\n", base);
- return 0;
- }
-
- coef = nv_rd32(dev, 0x00e81c + (id * 0x0c));
- ref *= (coef & 0x01000000) ? 2 : 4;
- P = (coef & 0x00070000) >> 16;
- N = ((coef & 0x0000ff00) >> 8) + 1;
- M = ((coef & 0x000000ff) >> 0) + 1;
- break;
- case 0x84:
- case 0x86:
- case 0x92:
- coef = nv_rd32(dev, 0x00e81c);
- P = (coef & 0x00070000) >> 16;
- N = (coef & 0x0000ff00) >> 8;
- M = (coef & 0x000000ff) >> 0;
- break;
- case 0x94:
- case 0x96:
- case 0x98:
- rsel = nv_rd32(dev, 0x00c050);
- switch (base) {
- case 0x4020: rsel = (rsel & 0x00000003) >> 0; break;
- case 0x4008: rsel = (rsel & 0x0000000c) >> 2; break;
- case 0x4028: rsel = (rsel & 0x00001800) >> 11; break;
- case 0x4030: rsel = 3; break;
- default:
- NV_ERROR(dev, "ref: bad pll 0x%06x\n", base);
- return 0;
- }
-
- switch (rsel) {
- case 0: id = 1; break;
- case 1: return read_clk(dev, clk_src_crystal);
- case 2: return read_clk(dev, clk_src_href);
- case 3: id = 0; break;
- }
-
- coef = nv_rd32(dev, 0x00e81c + (id * 0x28));
- P = (nv_rd32(dev, 0x00e824 + (id * 0x28)) >> 16) & 7;
- P += (coef & 0x00070000) >> 16;
- N = (coef & 0x0000ff00) >> 8;
- M = (coef & 0x000000ff) >> 0;
- break;
- default:
- BUG_ON(1);
- }
-
- if (M)
- return (ref * N / M) >> P;
- return 0;
-}
-
-static u32
-read_pll_ref(struct drm_device *dev, u32 base)
-{
- u32 src, mast = nv_rd32(dev, 0x00c040);
-
- switch (base) {
- case 0x004028:
- src = !!(mast & 0x00200000);
- break;
- case 0x004020:
- src = !!(mast & 0x00400000);
- break;
- case 0x004008:
- src = !!(mast & 0x00010000);
- break;
- case 0x004030:
- src = !!(mast & 0x02000000);
- break;
- case 0x00e810:
- return read_clk(dev, clk_src_crystal);
- default:
- NV_ERROR(dev, "bad pll 0x%06x\n", base);
- return 0;
- }
-
- if (src)
- return read_clk(dev, clk_src_href);
- return read_pll_src(dev, base);
-}
-
-static u32
-read_pll(struct drm_device *dev, u32 base)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- u32 mast = nv_rd32(dev, 0x00c040);
- u32 ctrl = nv_rd32(dev, base + 0);
- u32 coef = nv_rd32(dev, base + 4);
- u32 ref = read_pll_ref(dev, base);
- u32 clk = 0;
- int N1, N2, M1, M2;
-
- if (base == 0x004028 && (mast & 0x00100000)) {
- /* wtf, appears to only disable post-divider on nva0 */
- if (dev_priv->chipset != 0xa0)
- return read_clk(dev, clk_src_dom6);
- }
-
- N2 = (coef & 0xff000000) >> 24;
- M2 = (coef & 0x00ff0000) >> 16;
- N1 = (coef & 0x0000ff00) >> 8;
- M1 = (coef & 0x000000ff);
- if ((ctrl & 0x80000000) && M1) {
- clk = ref * N1 / M1;
- if ((ctrl & 0x40000100) == 0x40000000) {
- if (M2)
- clk = clk * N2 / M2;
- else
- clk = 0;
- }
- }
-
- return clk;
-}
-
-static u32
-read_clk(struct drm_device *dev, enum clk_src src)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- u32 mast = nv_rd32(dev, 0x00c040);
- u32 P = 0;
-
- switch (src) {
- case clk_src_crystal:
- return dev_priv->crystal;
- case clk_src_href:
- return 100000; /* PCIE reference clock */
- case clk_src_hclk:
- return read_clk(dev, clk_src_href) * 27778 / 10000;
- case clk_src_hclkm3:
- return read_clk(dev, clk_src_hclk) * 3;
- case clk_src_hclkm3d2:
- return read_clk(dev, clk_src_hclk) * 3 / 2;
- case clk_src_host:
- switch (mast & 0x30000000) {
- case 0x00000000: return read_clk(dev, clk_src_href);
- case 0x10000000: break;
- case 0x20000000: /* !0x50 */
- case 0x30000000: return read_clk(dev, clk_src_hclk);
- }
- break;
- case clk_src_nvclk:
- if (!(mast & 0x00100000))
- P = (nv_rd32(dev, 0x004028) & 0x00070000) >> 16;
- switch (mast & 0x00000003) {
- case 0x00000000: return read_clk(dev, clk_src_crystal) >> P;
- case 0x00000001: return read_clk(dev, clk_src_dom6);
- case 0x00000002: return read_pll(dev, 0x004020) >> P;
- case 0x00000003: return read_pll(dev, 0x004028) >> P;
- }
- break;
- case clk_src_sclk:
- P = (nv_rd32(dev, 0x004020) & 0x00070000) >> 16;
- switch (mast & 0x00000030) {
- case 0x00000000:
- if (mast & 0x00000080)
- return read_clk(dev, clk_src_host) >> P;
- return read_clk(dev, clk_src_crystal) >> P;
- case 0x00000010: break;
- case 0x00000020: return read_pll(dev, 0x004028) >> P;
- case 0x00000030: return read_pll(dev, 0x004020) >> P;
- }
- break;
- case clk_src_mclk:
- P = (nv_rd32(dev, 0x004008) & 0x00070000) >> 16;
- if (nv_rd32(dev, 0x004008) & 0x00000200) {
- switch (mast & 0x0000c000) {
- case 0x00000000:
- return read_clk(dev, clk_src_crystal) >> P;
- case 0x00008000:
- case 0x0000c000:
- return read_clk(dev, clk_src_href) >> P;
- }
- } else {
- return read_pll(dev, 0x004008) >> P;
- }
- break;
- case clk_src_vdec:
- P = (read_div(dev) & 0x00000700) >> 8;
- switch (dev_priv->chipset) {
- case 0x84:
- case 0x86:
- case 0x92:
- case 0x94:
- case 0x96:
- case 0xa0:
- switch (mast & 0x00000c00) {
- case 0x00000000:
- if (dev_priv->chipset == 0xa0) /* wtf?? */
- return read_clk(dev, clk_src_nvclk) >> P;
- return read_clk(dev, clk_src_crystal) >> P;
- case 0x00000400:
- return 0;
- case 0x00000800:
- if (mast & 0x01000000)
- return read_pll(dev, 0x004028) >> P;
- return read_pll(dev, 0x004030) >> P;
- case 0x00000c00:
- return read_clk(dev, clk_src_nvclk) >> P;
- }
- break;
- case 0x98:
- switch (mast & 0x00000c00) {
- case 0x00000000:
- return read_clk(dev, clk_src_nvclk) >> P;
- case 0x00000400:
- return 0;
- case 0x00000800:
- return read_clk(dev, clk_src_hclkm3d2) >> P;
- case 0x00000c00:
- return read_clk(dev, clk_src_mclk) >> P;
- }
- break;
- }
- break;
- case clk_src_dom6:
- switch (dev_priv->chipset) {
- case 0x50:
- case 0xa0:
- return read_pll(dev, 0x00e810) >> 2;
- case 0x84:
- case 0x86:
- case 0x92:
- case 0x94:
- case 0x96:
- case 0x98:
- P = (read_div(dev) & 0x00000007) >> 0;
- switch (mast & 0x0c000000) {
- case 0x00000000: return read_clk(dev, clk_src_href);
- case 0x04000000: break;
- case 0x08000000: return read_clk(dev, clk_src_hclk);
- case 0x0c000000:
- return read_clk(dev, clk_src_hclkm3) >> P;
- }
- break;
- default:
- break;
- }
- default:
- break;
- }
-
- NV_DEBUG(dev, "unknown clock source %d 0x%08x\n", src, mast);
- return 0;
-}
-
-int
-nv50_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- if (dev_priv->chipset == 0xaa ||
- dev_priv->chipset == 0xac)
- return 0;
-
- perflvl->core = read_clk(dev, clk_src_nvclk);
- perflvl->shader = read_clk(dev, clk_src_sclk);
- perflvl->memory = read_clk(dev, clk_src_mclk);
- if (dev_priv->chipset != 0x50) {
- perflvl->vdec = read_clk(dev, clk_src_vdec);
- perflvl->dom6 = read_clk(dev, clk_src_dom6);
- }
-
- return 0;
-}
struct nv50_pm_state {
struct nouveau_pm_level *perflvl;
- struct hwsq_ucode eclk_hwsq;
- struct hwsq_ucode mclk_hwsq;
- u32 mscript;
- u32 mmast;
- u32 mctrl;
- u32 mcoef;
+ struct pll_lims pll;
+ enum pll_types type;
+ int N, M, P;
};
-static u32
-calc_pll(struct drm_device *dev, u32 reg, struct pll_lims *pll,
- u32 clk, int *N1, int *M1, int *log2P)
+int
+nv50_pm_clock_get(struct drm_device *dev, u32 id)
{
- struct nouveau_pll_vals coef;
- int ret;
+ struct pll_lims pll;
+ int P, N, M, ret;
+ u32 reg0, reg1;
- ret = get_pll_limits(dev, reg, pll);
+ ret = get_pll_limits(dev, id, &pll);
if (ret)
- return 0;
-
- pll->vco2.maxfreq = 0;
- pll->refclk = read_pll_ref(dev, reg);
- if (!pll->refclk)
- return 0;
-
- ret = nouveau_calc_pll_mnp(dev, pll, clk, &coef);
- if (ret == 0)
- return 0;
-
- *N1 = coef.N1;
- *M1 = coef.M1;
- *log2P = coef.log2P;
- return ret;
-}
+ return ret;
-static inline u32
-calc_div(u32 src, u32 target, int *div)
-{
- u32 clk0 = src, clk1 = src;
- for (*div = 0; *div <= 7; (*div)++) {
- if (clk0 <= target) {
- clk1 = clk0 << (*div ? 1 : 0);
- break;
+ reg0 = nv_rd32(dev, pll.reg + 0);
+ reg1 = nv_rd32(dev, pll.reg + 4);
+
+ if ((reg0 & 0x80000000) == 0) {
+ if (id == PLL_SHADER) {
+ NV_DEBUG(dev, "Shader PLL is disabled. "
+ "Shader clock is twice the core\n");
+ ret = nv50_pm_clock_get(dev, PLL_CORE);
+ if (ret > 0)
+ return ret << 1;
+ } else if (id == PLL_MEMORY) {
+ NV_DEBUG(dev, "Memory PLL is disabled. "
+ "Memory clock is equal to the ref_clk\n");
+ return pll.refclk;
}
- clk0 >>= 1;
- }
-
- if (target - clk0 <= clk1 - target)
- return clk0;
- (*div)--;
- return clk1;
-}
-
-static inline u32
-clk_same(u32 a, u32 b)
-{
- return ((a / 1000) == (b / 1000));
-}
-
-static void
-mclk_precharge(struct nouveau_mem_exec_func *exec)
-{
- struct nv50_pm_state *info = exec->priv;
- struct hwsq_ucode *hwsq = &info->mclk_hwsq;
-
- hwsq_wr32(hwsq, 0x1002d4, 0x00000001);
-}
-
-static void
-mclk_refresh(struct nouveau_mem_exec_func *exec)
-{
- struct nv50_pm_state *info = exec->priv;
- struct hwsq_ucode *hwsq = &info->mclk_hwsq;
-
- hwsq_wr32(hwsq, 0x1002d0, 0x00000001);
-}
-
-static void
-mclk_refresh_auto(struct nouveau_mem_exec_func *exec, bool enable)
-{
- struct nv50_pm_state *info = exec->priv;
- struct hwsq_ucode *hwsq = &info->mclk_hwsq;
-
- hwsq_wr32(hwsq, 0x100210, enable ? 0x80000000 : 0x00000000);
-}
-
-static void
-mclk_refresh_self(struct nouveau_mem_exec_func *exec, bool enable)
-{
- struct nv50_pm_state *info = exec->priv;
- struct hwsq_ucode *hwsq = &info->mclk_hwsq;
-
- hwsq_wr32(hwsq, 0x1002dc, enable ? 0x00000001 : 0x00000000);
-}
-
-static void
-mclk_wait(struct nouveau_mem_exec_func *exec, u32 nsec)
-{
- struct nv50_pm_state *info = exec->priv;
- struct hwsq_ucode *hwsq = &info->mclk_hwsq;
-
- if (nsec > 1000)
- hwsq_usec(hwsq, (nsec + 500) / 1000);
-}
-
-static u32
-mclk_mrg(struct nouveau_mem_exec_func *exec, int mr)
-{
- if (mr <= 1)
- return nv_rd32(exec->dev, 0x1002c0 + ((mr - 0) * 4));
- if (mr <= 3)
- return nv_rd32(exec->dev, 0x1002e0 + ((mr - 2) * 4));
- return 0;
-}
-
-static void
-mclk_mrs(struct nouveau_mem_exec_func *exec, int mr, u32 data)
-{
- struct drm_nouveau_private *dev_priv = exec->dev->dev_private;
- struct nv50_pm_state *info = exec->priv;
- struct hwsq_ucode *hwsq = &info->mclk_hwsq;
-
- if (mr <= 1) {
- if (dev_priv->vram_rank_B)
- hwsq_wr32(hwsq, 0x1002c8 + ((mr - 0) * 4), data);
- hwsq_wr32(hwsq, 0x1002c0 + ((mr - 0) * 4), data);
- } else
- if (mr <= 3) {
- if (dev_priv->vram_rank_B)
- hwsq_wr32(hwsq, 0x1002e8 + ((mr - 2) * 4), data);
- hwsq_wr32(hwsq, 0x1002e0 + ((mr - 2) * 4), data);
- }
-}
-
-static void
-mclk_clock_set(struct nouveau_mem_exec_func *exec)
-{
- struct nv50_pm_state *info = exec->priv;
- struct hwsq_ucode *hwsq = &info->mclk_hwsq;
- u32 ctrl = nv_rd32(exec->dev, 0x004008);
-
- info->mmast = nv_rd32(exec->dev, 0x00c040);
- info->mmast &= ~0xc0000000; /* get MCLK_2 from HREF */
- info->mmast |= 0x0000c000; /* use MCLK_2 as MPLL_BYPASS clock */
-
- hwsq_wr32(hwsq, 0xc040, info->mmast);
- hwsq_wr32(hwsq, 0x4008, ctrl | 0x00000200); /* bypass MPLL */
- if (info->mctrl & 0x80000000)
- hwsq_wr32(hwsq, 0x400c, info->mcoef);
- hwsq_wr32(hwsq, 0x4008, info->mctrl);
-}
-
-static void
-mclk_timing_set(struct nouveau_mem_exec_func *exec)
-{
- struct drm_device *dev = exec->dev;
- struct nv50_pm_state *info = exec->priv;
- struct nouveau_pm_level *perflvl = info->perflvl;
- struct hwsq_ucode *hwsq = &info->mclk_hwsq;
- int i;
-
- for (i = 0; i < 9; i++) {
- u32 reg = 0x100220 + (i * 4);
- u32 val = nv_rd32(dev, reg);
- if (val != perflvl->timing.reg[i])
- hwsq_wr32(hwsq, reg, perflvl->timing.reg[i]);
- }
-}
-
-static int
-calc_mclk(struct drm_device *dev, struct nouveau_pm_level *perflvl,
- struct nv50_pm_state *info)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- u32 crtc_mask = nv50_display_active_crtcs(dev);
- struct nouveau_mem_exec_func exec = {
- .dev = dev,
- .precharge = mclk_precharge,
- .refresh = mclk_refresh,
- .refresh_auto = mclk_refresh_auto,
- .refresh_self = mclk_refresh_self,
- .wait = mclk_wait,
- .mrg = mclk_mrg,
- .mrs = mclk_mrs,
- .clock_set = mclk_clock_set,
- .timing_set = mclk_timing_set,
- .priv = info
- };
- struct hwsq_ucode *hwsq = &info->mclk_hwsq;
- struct pll_lims pll;
- int N, M, P;
- int ret;
-
- /* use pcie refclock if possible, otherwise use mpll */
- info->mctrl = nv_rd32(dev, 0x004008);
- info->mctrl &= ~0x81ff0200;
- if (clk_same(perflvl->memory, read_clk(dev, clk_src_href))) {
- info->mctrl |= 0x00000200 | (pll.log2p_bias << 19);
- } else {
- ret = calc_pll(dev, 0x4008, &pll, perflvl->memory, &N, &M, &P);
- if (ret == 0)
- return -EINVAL;
-
- info->mctrl |= 0x80000000 | (P << 22) | (P << 16);
- info->mctrl |= pll.log2p_bias << 19;
- info->mcoef = (N << 8) | M;
}
- /* build the ucode which will reclock the memory for us */
- hwsq_init(hwsq);
- if (crtc_mask) {
- hwsq_op5f(hwsq, crtc_mask, 0x00); /* wait for scanout */
- hwsq_op5f(hwsq, crtc_mask, 0x01); /* wait for vblank */
- }
- if (dev_priv->chipset >= 0x92)
- hwsq_wr32(hwsq, 0x611200, 0x00003300); /* disable scanout */
- hwsq_setf(hwsq, 0x10, 0); /* disable bus access */
- hwsq_op5f(hwsq, 0x00, 0x01); /* no idea :s */
+ P = (reg0 & 0x00070000) >> 16;
+ N = (reg1 & 0x0000ff00) >> 8;
+ M = (reg1 & 0x000000ff);
- ret = nouveau_mem_exec(&exec, perflvl);
- if (ret)
- return ret;
-
- hwsq_setf(hwsq, 0x10, 1); /* enable bus access */
- hwsq_op5f(hwsq, 0x00, 0x00); /* no idea, reverse of 0x00, 0x01? */
- if (dev_priv->chipset >= 0x92)
- hwsq_wr32(hwsq, 0x611200, 0x00003330); /* enable scanout */
- hwsq_fini(hwsq);
- return 0;
+ return ((pll.refclk * N / M) >> P);
}
void *
-nv50_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
+nv50_pm_clock_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl,
+ u32 id, int khz)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv50_pm_state *info;
- struct hwsq_ucode *hwsq;
- struct pll_lims pll;
- u32 out, mast, divs, ctrl;
- int clk, ret = -EINVAL;
- int N, M, P1, P2;
+ struct nv50_pm_state *state;
+ int dummy, ret;
- if (dev_priv->chipset == 0xaa ||
- dev_priv->chipset == 0xac)
- return ERR_PTR(-ENODEV);
-
- info = kmalloc(sizeof(*info), GFP_KERNEL);
- if (!info)
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
return ERR_PTR(-ENOMEM);
- info->perflvl = perflvl;
-
- /* memory: build hwsq ucode which we'll use to reclock memory.
- * use pcie refclock if possible, otherwise use mpll */
- info->mclk_hwsq.len = 0;
- if (perflvl->memory) {
- ret = calc_mclk(dev, perflvl, info);
- if (ret)
- goto error;
- info->mscript = perflvl->memscript;
- }
-
- divs = read_div(dev);
- mast = info->mmast;
-
- /* start building HWSQ script for engine reclocking */
- hwsq = &info->eclk_hwsq;
- hwsq_init(hwsq);
- hwsq_setf(hwsq, 0x10, 0); /* disable bus access */
- hwsq_op5f(hwsq, 0x00, 0x01); /* wait for access disabled? */
-
- /* vdec/dom6: switch to "safe" clocks temporarily */
- if (perflvl->vdec) {
- mast &= ~0x00000c00;
- divs &= ~0x00000700;
- }
-
- if (perflvl->dom6) {
- mast &= ~0x0c000000;
- divs &= ~0x00000007;
- }
-
- hwsq_wr32(hwsq, 0x00c040, mast);
-
- /* vdec: avoid modifying xpll until we know exactly how the other
- * clock domains work, i suspect at least some of them can also be
- * tied to xpll...
- */
- if (perflvl->vdec) {
- /* see how close we can get using nvclk as a source */
- clk = calc_div(perflvl->core, perflvl->vdec, &P1);
-
- /* see how close we can get using xpll/hclk as a source */
- if (dev_priv->chipset != 0x98)
- out = read_pll(dev, 0x004030);
- else
- out = read_clk(dev, clk_src_hclkm3d2);
- out = calc_div(out, perflvl->vdec, &P2);
-
- /* select whichever gets us closest */
- if (abs((int)perflvl->vdec - clk) <=
- abs((int)perflvl->vdec - out)) {
- if (dev_priv->chipset != 0x98)
- mast |= 0x00000c00;
- divs |= P1 << 8;
- } else {
- mast |= 0x00000800;
- divs |= P2 << 8;
- }
- }
-
- /* dom6: nfi what this is, but we're limited to various combinations
- * of the host clock frequency
- */
- if (perflvl->dom6) {
- if (clk_same(perflvl->dom6, read_clk(dev, clk_src_href))) {
- mast |= 0x00000000;
- } else
- if (clk_same(perflvl->dom6, read_clk(dev, clk_src_hclk))) {
- mast |= 0x08000000;
- } else {
- clk = read_clk(dev, clk_src_hclk) * 3;
- clk = calc_div(clk, perflvl->dom6, &P1);
-
- mast |= 0x0c000000;
- divs |= P1;
- }
- }
-
- /* vdec/dom6: complete switch to new clocks */
- switch (dev_priv->chipset) {
- case 0x92:
- case 0x94:
- case 0x96:
- hwsq_wr32(hwsq, 0x004800, divs);
- break;
- default:
- hwsq_wr32(hwsq, 0x004700, divs);
- break;
- }
-
- hwsq_wr32(hwsq, 0x00c040, mast);
-
- /* core/shader: make sure sclk/nvclk are disconnected from their
- * PLLs (nvclk to dom6, sclk to hclk)
- */
- if (dev_priv->chipset < 0x92)
- mast = (mast & ~0x001000b0) | 0x00100080;
- else
- mast = (mast & ~0x000000b3) | 0x00000081;
-
- hwsq_wr32(hwsq, 0x00c040, mast);
-
- /* core: for the moment at least, always use nvpll */
- clk = calc_pll(dev, 0x4028, &pll, perflvl->core, &N, &M, &P1);
- if (clk == 0)
- goto error;
-
- ctrl = nv_rd32(dev, 0x004028) & ~0xc03f0100;
- mast &= ~0x00100000;
- mast |= 3;
-
- hwsq_wr32(hwsq, 0x004028, 0x80000000 | (P1 << 19) | (P1 << 16) | ctrl);
- hwsq_wr32(hwsq, 0x00402c, (N << 8) | M);
-
- /* shader: tie to nvclk if possible, otherwise use spll. have to be
- * very careful that the shader clock is at least twice the core, or
- * some chipsets will be very unhappy. i expect most or all of these
- * cases will be handled by tying to nvclk, but it's possible there's
- * corners
- */
- ctrl = nv_rd32(dev, 0x004020) & ~0xc03f0100;
-
- if (P1-- && perflvl->shader == (perflvl->core << 1)) {
- hwsq_wr32(hwsq, 0x004020, (P1 << 19) | (P1 << 16) | ctrl);
- hwsq_wr32(hwsq, 0x00c040, 0x00000020 | mast);
- } else {
- clk = calc_pll(dev, 0x4020, &pll, perflvl->shader, &N, &M, &P1);
- if (clk == 0)
- goto error;
- ctrl |= 0x80000000;
-
- hwsq_wr32(hwsq, 0x004020, (P1 << 19) | (P1 << 16) | ctrl);
- hwsq_wr32(hwsq, 0x004024, (N << 8) | M);
- hwsq_wr32(hwsq, 0x00c040, 0x00000030 | mast);
- }
-
- hwsq_setf(hwsq, 0x10, 1); /* enable bus access */
- hwsq_op5f(hwsq, 0x00, 0x00); /* wait for access enabled? */
- hwsq_fini(hwsq);
-
- return info;
-error:
- kfree(info);
- return ERR_PTR(ret);
-}
-
-static int
-prog_hwsq(struct drm_device *dev, struct hwsq_ucode *hwsq)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- u32 hwsq_data, hwsq_kick;
- int i;
+ state->type = id;
+ state->perflvl = perflvl;
- if (dev_priv->chipset < 0x94) {
- hwsq_data = 0x001400;
- hwsq_kick = 0x00000003;
- } else {
- hwsq_data = 0x080000;
- hwsq_kick = 0x00000001;
+ ret = get_pll_limits(dev, id, &state->pll);
+ if (ret < 0) {
+ kfree(state);
+ return (ret == -ENOENT) ? NULL : ERR_PTR(ret);
}
- /* upload hwsq ucode */
- nv_mask(dev, 0x001098, 0x00000008, 0x00000000);
- nv_wr32(dev, 0x001304, 0x00000000);
- if (dev_priv->chipset >= 0x92)
- nv_wr32(dev, 0x001318, 0x00000000);
- for (i = 0; i < hwsq->len / 4; i++)
- nv_wr32(dev, hwsq_data + (i * 4), hwsq->ptr.u32[i]);
- nv_mask(dev, 0x001098, 0x00000018, 0x00000018);
-
- /* launch, and wait for completion */
- nv_wr32(dev, 0x00130c, hwsq_kick);
- if (!nv_wait(dev, 0x001308, 0x00000100, 0x00000000)) {
- NV_ERROR(dev, "hwsq ucode exec timed out\n");
- NV_ERROR(dev, "0x001308: 0x%08x\n", nv_rd32(dev, 0x001308));
- for (i = 0; i < hwsq->len / 4; i++) {
- NV_ERROR(dev, "0x%06x: 0x%08x\n", 0x1400 + (i * 4),
- nv_rd32(dev, 0x001400 + (i * 4)));
- }
- return -EIO;
+ ret = nv50_calc_pll(dev, &state->pll, khz, &state->N, &state->M,
+ &dummy, &dummy, &state->P);
+ if (ret < 0) {
+ kfree(state);
+ return ERR_PTR(ret);
}
- return 0;
+ return state;
}
-int
-nv50_pm_clocks_set(struct drm_device *dev, void *data)
+void
+nv50_pm_clock_set(struct drm_device *dev, void *pre_state)
{
- struct nv50_pm_state *info = data;
- struct bit_entry M;
- int ret = -EBUSY;
-
- /* halt and idle execution engines */
- nv_mask(dev, 0x002504, 0x00000001, 0x00000001);
- if (!nv_wait(dev, 0x002504, 0x00000010, 0x00000010))
- goto resume;
- if (!nv_wait(dev, 0x00251c, 0x0000003f, 0x0000003f))
- goto resume;
+ struct nv50_pm_state *state = pre_state;
+ struct nouveau_pm_level *perflvl = state->perflvl;
+ u32 reg = state->pll.reg, tmp;
+ struct bit_entry BIT_M;
+ u16 script;
+ int N = state->N;
+ int M = state->M;
+ int P = state->P;
- /* program memory clock, if necessary - must come before engine clock
- * reprogramming due to how we construct the hwsq scripts in pre()
- */
- if (info->mclk_hwsq.len) {
- /* execute some scripts that do ??? from the vbios.. */
- if (!bit_table(dev, 'M', &M) && M.version == 1) {
- if (M.length >= 6)
- nouveau_bios_init_exec(dev, ROM16(M.data[5]));
- if (M.length >= 8)
- nouveau_bios_init_exec(dev, ROM16(M.data[7]));
- if (M.length >= 10)
- nouveau_bios_init_exec(dev, ROM16(M.data[9]));
- nouveau_bios_init_exec(dev, info->mscript);
- }
+ if (state->type == PLL_MEMORY && perflvl->memscript &&
+ bit_table(dev, 'M', &BIT_M) == 0 &&
+ BIT_M.version == 1 && BIT_M.length >= 0x0b) {
+ script = ROM16(BIT_M.data[0x05]);
+ if (script)
+ nouveau_bios_run_init_table(dev, script, NULL);
+ script = ROM16(BIT_M.data[0x07]);
+ if (script)
+ nouveau_bios_run_init_table(dev, script, NULL);
+ script = ROM16(BIT_M.data[0x09]);
+ if (script)
+ nouveau_bios_run_init_table(dev, script, NULL);
- ret = prog_hwsq(dev, &info->mclk_hwsq);
- if (ret)
- goto resume;
+ nouveau_bios_run_init_table(dev, perflvl->memscript, NULL);
}
- /* program engine clocks */
- ret = prog_hwsq(dev, &info->eclk_hwsq);
-
-resume:
- nv_mask(dev, 0x002504, 0x00000001, 0x00000000);
- kfree(info);
- return ret;
-}
-
-static int
-pwm_info(struct drm_device *dev, int *line, int *ctrl, int *indx)
-{
- if (*line == 0x04) {
- *ctrl = 0x00e100;
- *line = 4;
- *indx = 0;
- } else
- if (*line == 0x09) {
- *ctrl = 0x00e100;
- *line = 9;
- *indx = 1;
- } else
- if (*line == 0x10) {
- *ctrl = 0x00e28c;
- *line = 0;
- *indx = 0;
- } else {
- NV_ERROR(dev, "unknown pwm ctrl for gpio %d\n", *line);
- return -ENODEV;
+ if (state->type == PLL_MEMORY) {
+ nv_wr32(dev, 0x100210, 0);
+ nv_wr32(dev, 0x1002dc, 1);
}
- return 0;
-}
-
-int
-nv50_pm_pwm_get(struct drm_device *dev, int line, u32 *divs, u32 *duty)
-{
- int ctrl, id, ret = pwm_info(dev, &line, &ctrl, &id);
- if (ret)
- return ret;
+ tmp = nv_rd32(dev, reg + 0) & 0xfff8ffff;
+ tmp |= 0x80000000 | (P << 16);
+ nv_wr32(dev, reg + 0, tmp);
+ nv_wr32(dev, reg + 4, (N << 8) | M);
- if (nv_rd32(dev, ctrl) & (1 << line)) {
- *divs = nv_rd32(dev, 0x00e114 + (id * 8));
- *duty = nv_rd32(dev, 0x00e118 + (id * 8));
- return 0;
+ if (state->type == PLL_MEMORY) {
+ nv_wr32(dev, 0x1002dc, 0);
+ nv_wr32(dev, 0x100210, 0x80000000);
}
- return -EINVAL;
+ kfree(state);
}
-int
-nv50_pm_pwm_set(struct drm_device *dev, int line, u32 divs, u32 duty)
-{
- int ctrl, id, ret = pwm_info(dev, &line, &ctrl, &id);
- if (ret)
- return ret;
-
- nv_mask(dev, ctrl, 0x00010001 << line, 0x00000001 << line);
- nv_wr32(dev, 0x00e114 + (id * 8), divs);
- nv_wr32(dev, 0x00e118 + (id * 8), duty | 0x80000000);
- return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
index a7844ab..c25c5938 100644
--- a/drivers/gpu/drm/nouveau/nv50_sor.c
+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
@@ -36,193 +36,6 @@
#include "nouveau_crtc.h"
#include "nv50_display.h"
-static u32
-nv50_sor_dp_lane_map(struct drm_device *dev, struct dcb_entry *dcb, u8 lane)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- static const u8 nvaf[] = { 24, 16, 8, 0 }; /* thanks, apple.. */
- static const u8 nv50[] = { 16, 8, 0, 24 };
- if (dev_priv->card_type == 0xaf)
- return nvaf[lane];
- return nv50[lane];
-}
-
-static void
-nv50_sor_dp_train_set(struct drm_device *dev, struct dcb_entry *dcb, u8 pattern)
-{
- u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
- nv_mask(dev, NV50_SOR_DP_CTRL(or, link), 0x0f000000, pattern << 24);
-}
-
-static void
-nv50_sor_dp_train_adj(struct drm_device *dev, struct dcb_entry *dcb,
- u8 lane, u8 swing, u8 preem)
-{
- u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
- u32 shift = nv50_sor_dp_lane_map(dev, dcb, lane);
- u32 mask = 0x000000ff << shift;
- u8 *table, *entry, *config;
-
- table = nouveau_dp_bios_data(dev, dcb, &entry);
- if (!table || (table[0] != 0x20 && table[0] != 0x21)) {
- NV_ERROR(dev, "PDISP: unsupported DP table for chipset\n");
- return;
- }
-
- config = entry + table[4];
- while (config[0] != swing || config[1] != preem) {
- config += table[5];
- if (config >= entry + table[4] + entry[4] * table[5])
- return;
- }
-
- nv_mask(dev, NV50_SOR_DP_UNK118(or, link), mask, config[2] << shift);
- nv_mask(dev, NV50_SOR_DP_UNK120(or, link), mask, config[3] << shift);
- nv_mask(dev, NV50_SOR_DP_UNK130(or, link), 0x0000ff00, config[4] << 8);
-}
-
-static void
-nv50_sor_dp_link_set(struct drm_device *dev, struct dcb_entry *dcb, int crtc,
- int link_nr, u32 link_bw, bool enhframe)
-{
- u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
- u32 dpctrl = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link)) & ~0x001f4000;
- u32 clksor = nv_rd32(dev, 0x614300 + (or * 0x800)) & ~0x000c0000;
- u8 *table, *entry, mask;
- int i;
-
- table = nouveau_dp_bios_data(dev, dcb, &entry);
- if (!table || (table[0] != 0x20 && table[0] != 0x21)) {
- NV_ERROR(dev, "PDISP: unsupported DP table for chipset\n");
- return;
- }
-
- entry = ROMPTR(dev, entry[10]);
- if (entry) {
- while (link_bw < ROM16(entry[0]) * 10)
- entry += 4;
-
- nouveau_bios_run_init_table(dev, ROM16(entry[2]), dcb, crtc);
- }
-
- dpctrl |= ((1 << link_nr) - 1) << 16;
- if (enhframe)
- dpctrl |= 0x00004000;
-
- if (link_bw > 162000)
- clksor |= 0x00040000;
-
- nv_wr32(dev, 0x614300 + (or * 0x800), clksor);
- nv_wr32(dev, NV50_SOR_DP_CTRL(or, link), dpctrl);
-
- mask = 0;
- for (i = 0; i < link_nr; i++)
- mask |= 1 << (nv50_sor_dp_lane_map(dev, dcb, i) >> 3);
- nv_mask(dev, NV50_SOR_DP_UNK130(or, link), 0x0000000f, mask);
-}
-
-static void
-nv50_sor_dp_link_get(struct drm_device *dev, u32 or, u32 link, u32 *nr, u32 *bw)
-{
- u32 dpctrl = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link)) & 0x000f0000;
- u32 clksor = nv_rd32(dev, 0x614300 + (or * 0x800));
- if (clksor & 0x000c0000)
- *bw = 270000;
- else
- *bw = 162000;
-
- if (dpctrl > 0x00030000) *nr = 4;
- else if (dpctrl > 0x00010000) *nr = 2;
- else *nr = 1;
-}
-
-void
-nv50_sor_dp_calc_tu(struct drm_device *dev, int or, int link, u32 clk, u32 bpp)
-{
- const u32 symbol = 100000;
- int bestTU = 0, bestVTUi = 0, bestVTUf = 0, bestVTUa = 0;
- int TU, VTUi, VTUf, VTUa;
- u64 link_data_rate, link_ratio, unk;
- u32 best_diff = 64 * symbol;
- u32 link_nr, link_bw, r;
-
- /* calculate packed data rate for each lane */
- nv50_sor_dp_link_get(dev, or, link, &link_nr, &link_bw);
- link_data_rate = (clk * bpp / 8) / link_nr;
-
- /* calculate ratio of packed data rate to link symbol rate */
- link_ratio = link_data_rate * symbol;
- r = do_div(link_ratio, link_bw);
-
- for (TU = 64; TU >= 32; TU--) {
- /* calculate average number of valid symbols in each TU */
- u32 tu_valid = link_ratio * TU;
- u32 calc, diff;
-
- /* find a hw representation for the fraction.. */
- VTUi = tu_valid / symbol;
- calc = VTUi * symbol;
- diff = tu_valid - calc;
- if (diff) {
- if (diff >= (symbol / 2)) {
- VTUf = symbol / (symbol - diff);
- if (symbol - (VTUf * diff))
- VTUf++;
-
- if (VTUf <= 15) {
- VTUa = 1;
- calc += symbol - (symbol / VTUf);
- } else {
- VTUa = 0;
- VTUf = 1;
- calc += symbol;
- }
- } else {
- VTUa = 0;
- VTUf = min((int)(symbol / diff), 15);
- calc += symbol / VTUf;
- }
-
- diff = calc - tu_valid;
- } else {
- /* no remainder, but the hw doesn't like the fractional
- * part to be zero. decrement the integer part and
- * have the fraction add a whole symbol back
- */
- VTUa = 0;
- VTUf = 1;
- VTUi--;
- }
-
- if (diff < best_diff) {
- best_diff = diff;
- bestTU = TU;
- bestVTUa = VTUa;
- bestVTUf = VTUf;
- bestVTUi = VTUi;
- if (diff == 0)
- break;
- }
- }
-
- if (!bestTU) {
- NV_ERROR(dev, "DP: unable to find suitable config\n");
- return;
- }
-
- /* XXX close to vbios numbers, but not right */
- unk = (symbol - link_ratio) * bestTU;
- unk *= link_ratio;
- r = do_div(unk, symbol);
- r = do_div(unk, symbol);
- unk += 6;
-
- nv_mask(dev, NV50_SOR_DP_CTRL(or, link), 0x000001fc, bestTU << 2);
- nv_mask(dev, NV50_SOR_DP_SCFG(or, link), 0x010f7f3f, bestVTUa << 24 |
- bestVTUf << 16 |
- bestVTUi << 8 |
- unk);
-}
static void
nv50_sor_disconnect(struct drm_encoder *encoder)
{
@@ -247,8 +60,6 @@ nv50_sor_disconnect(struct drm_encoder *encoder)
BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
OUT_RING (evo, 0);
- nouveau_hdmi_mode_set(encoder, NULL);
-
nv_encoder->crtc = NULL;
nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
}
@@ -304,13 +115,20 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
}
if (nv_encoder->dcb->type == OUTPUT_DP) {
- struct dp_train_func func = {
- .link_set = nv50_sor_dp_link_set,
- .train_set = nv50_sor_dp_train_set,
- .train_adj = nv50_sor_dp_train_adj
- };
+ struct nouveau_i2c_chan *auxch;
+
+ auxch = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index);
+ if (!auxch)
+ return;
- nouveau_dp_dpms(encoder, mode, nv_encoder->dp.datarate, &func);
+ if (mode == DRM_MODE_DPMS_ON) {
+ u8 status = DP_SET_POWER_D0;
+ nouveau_dp_auxch(auxch, 8, DP_SET_POWER, &status, 1);
+ nouveau_dp_link_train(encoder);
+ } else {
+ u8 status = DP_SET_POWER_D3;
+ nouveau_dp_auxch(auxch, 8, DP_SET_POWER, &status, 1);
+ }
}
}
@@ -342,8 +160,11 @@ nv50_sor_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
}
if (connector->scaling_mode != DRM_MODE_SCALE_NONE &&
- connector->native_mode)
- drm_mode_copy(adjusted_mode, connector->native_mode);
+ connector->native_mode) {
+ int id = adjusted_mode->base.id;
+ *adjusted_mode = *connector->native_mode;
+ adjusted_mode->base.id = id;
+ }
return true;
}
@@ -351,12 +172,6 @@ nv50_sor_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
static void
nv50_sor_prepare(struct drm_encoder *encoder)
{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- nv50_sor_disconnect(encoder);
- if (nv_encoder->dcb->type == OUTPUT_DP) {
- /* avoid race between link training and supervisor intr */
- nv50_display_sync(encoder->dev);
- }
}
static void
@@ -365,43 +180,33 @@ nv50_sor_commit(struct drm_encoder *encoder)
}
static void
-nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
- struct drm_display_mode *mode)
+nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
struct nouveau_channel *evo = nv50_display(encoder->dev)->master;
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct drm_device *dev = encoder->dev;
struct nouveau_crtc *crtc = nouveau_crtc(encoder->crtc);
- struct nouveau_connector *nv_connector;
uint32_t mode_ctl = 0;
int ret;
NV_DEBUG_KMS(dev, "or %d type %d -> crtc %d\n",
nv_encoder->or, nv_encoder->dcb->type, crtc->index);
- nv_encoder->crtc = encoder->crtc;
+
+ nv50_sor_dpms(encoder, DRM_MODE_DPMS_ON);
switch (nv_encoder->dcb->type) {
case OUTPUT_TMDS:
if (nv_encoder->dcb->sorconf.link & 1) {
- if (mode->clock < 165000)
+ if (adjusted_mode->clock < 165000)
mode_ctl = 0x0100;
else
mode_ctl = 0x0500;
} else
mode_ctl = 0x0200;
-
- nouveau_hdmi_mode_set(encoder, mode);
break;
case OUTPUT_DP:
- nv_connector = nouveau_encoder_connector_get(nv_encoder);
- if (nv_connector && nv_connector->base.display_info.bpc == 6) {
- nv_encoder->dp.datarate = mode->clock * 18 / 8;
- mode_ctl |= 0x00020000;
- } else {
- nv_encoder->dp.datarate = mode->clock * 24 / 8;
- mode_ctl |= 0x00050000;
- }
-
+ mode_ctl |= (nv_encoder->dp.mc_unknown << 16);
if (nv_encoder->dcb->sorconf.link & 1)
mode_ctl |= 0x00000800;
else
@@ -416,22 +221,21 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
else
mode_ctl |= NV50_EVO_SOR_MODE_CTRL_CRTC0;
- if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
mode_ctl |= NV50_EVO_SOR_MODE_CTRL_NHSYNC;
- if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
mode_ctl |= NV50_EVO_SOR_MODE_CTRL_NVSYNC;
- nv50_sor_dpms(encoder, DRM_MODE_DPMS_ON);
-
ret = RING_SPACE(evo, 2);
if (ret) {
NV_ERROR(dev, "no space while connecting SOR\n");
- nv_encoder->crtc = NULL;
return;
}
BEGIN_RING(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1);
OUT_RING(evo, mode_ctl);
+
+ nv_encoder->crtc = encoder->crtc;
}
static struct drm_crtc *
@@ -509,6 +313,29 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_entry *entry)
encoder->possible_crtcs = entry->heads;
encoder->possible_clones = 0;
+ if (nv_encoder->dcb->type == OUTPUT_DP) {
+ int or = nv_encoder->or, link = !(entry->dpconf.sor.link & 1);
+ uint32_t tmp;
+
+ tmp = nv_rd32(dev, 0x61c700 + (or * 0x800));
+
+ switch ((tmp & 0x00000f00) >> 8) {
+ case 8:
+ case 9:
+ nv_encoder->dp.mc_unknown = (tmp & 0x000f0000) >> 16;
+ tmp = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link));
+ nv_encoder->dp.unk0 = tmp & 0x000001fc;
+ tmp = nv_rd32(dev, NV50_SOR_DP_UNK128(or, link));
+ nv_encoder->dp.unk1 = tmp & 0x010f7f3f;
+ break;
+ default:
+ break;
+ }
+
+ if (!nv_encoder->dp.mc_unknown)
+ nv_encoder->dp.mc_unknown = 5;
+ }
+
drm_mode_connector_attach_encoder(connector, encoder);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv50_vm.c b/drivers/gpu/drm/nouveau/nv50_vm.c
index 44fbac9..1a0dd49 100644
--- a/drivers/gpu/drm/nouveau/nv50_vm.c
+++ b/drivers/gpu/drm/nouveau/nv50_vm.c
@@ -48,7 +48,7 @@ nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
phys |= 0x60;
else if (coverage <= 64 * 1024 * 1024)
phys |= 0x40;
- else if (coverage <= 128 * 1024 * 1024)
+ else if (coverage < 128 * 1024 * 1024)
phys |= 0x20;
}
@@ -57,15 +57,27 @@ nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
}
static inline u64
-vm_addr(struct nouveau_vma *vma, u64 phys, u32 memtype, u32 target)
+nv50_vm_addr(struct nouveau_vma *vma, u64 phys, u32 memtype, u32 target)
{
+ struct drm_nouveau_private *dev_priv = vma->vm->dev->dev_private;
+
phys |= 1; /* present */
phys |= (u64)memtype << 40;
+
+ /* IGPs don't have real VRAM, re-target to stolen system memory */
+ if (target == 0 && dev_priv->vram_sys_base) {
+ phys += dev_priv->vram_sys_base;
+ target = 3;
+ }
+
phys |= target << 4;
+
if (vma->access & NV_MEM_ACCESS_SYS)
phys |= (1 << 6);
+
if (!(vma->access & NV_MEM_ACCESS_WO))
phys |= (1 << 3);
+
return phys;
}
@@ -73,19 +85,11 @@ void
nv50_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
struct nouveau_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta)
{
- struct drm_nouveau_private *dev_priv = vma->vm->dev->dev_private;
u32 comp = (mem->memtype & 0x180) >> 7;
- u32 block, target;
+ u32 block;
int i;
- /* IGPs don't have real VRAM, re-target to stolen system memory */
- target = 0;
- if (dev_priv->vram_sys_base) {
- phys += dev_priv->vram_sys_base;
- target = 3;
- }
-
- phys = vm_addr(vma, phys, mem->memtype, target);
+ phys = nv50_vm_addr(vma, phys, mem->memtype, 0);
pte <<= 3;
cnt <<= 3;
@@ -121,10 +125,9 @@ void
nv50_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
{
- u32 target = (vma->access & NV_MEM_ACCESS_NOSNOOP) ? 3 : 2;
pte <<= 3;
while (cnt--) {
- u64 phys = vm_addr(vma, (u64)*list++, mem->memtype, target);
+ u64 phys = nv50_vm_addr(vma, (u64)*list++, mem->memtype, 2);
nv_wo32(pgt, pte + 0, lower_32_bits(phys));
nv_wo32(pgt, pte + 4, upper_32_bits(phys));
pte += 8;
@@ -153,7 +156,7 @@ nv50_vm_flush(struct nouveau_vm *vm)
pinstmem->flush(vm->dev);
/* BAR */
- if (vm == dev_priv->bar1_vm || vm == dev_priv->bar3_vm) {
+ if (vm != dev_priv->chan_vm) {
nv50_vm_flush_engine(vm->dev, 6);
return;
}
diff --git a/drivers/gpu/drm/nouveau/nv50_vram.c b/drivers/gpu/drm/nouveau/nv50_vram.c
index 9ed9ae39..ffbc3d8 100644
--- a/drivers/gpu/drm/nouveau/nv50_vram.c
+++ b/drivers/gpu/drm/nouveau/nv50_vram.c
@@ -51,7 +51,9 @@ void
nv50_vram_del(struct drm_device *dev, struct nouveau_mem **pmem)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_mm *mm = &dev_priv->engine.vram.mm;
+ struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
+ struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM];
+ struct nouveau_mm *mm = man->priv;
struct nouveau_mm_node *this;
struct nouveau_mem *mem;
@@ -82,7 +84,9 @@ nv50_vram_new(struct drm_device *dev, u64 size, u32 align, u32 size_nc,
u32 memtype, struct nouveau_mem **pmem)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_mm *mm = &dev_priv->engine.vram.mm;
+ struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
+ struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM];
+ struct nouveau_mm *mm = man->priv;
struct nouveau_mm_node *r;
struct nouveau_mem *mem;
int comp = (memtype & 0x300) >> 8;
@@ -160,7 +164,7 @@ nv50_vram_rblock(struct drm_device *dev)
colbits = (r4 & 0x0000f000) >> 12;
rowbitsa = ((r4 & 0x000f0000) >> 16) + 8;
rowbitsb = ((r4 & 0x00f00000) >> 20) + 8;
- banks = 1 << (((r4 & 0x03000000) >> 24) + 2);
+ banks = ((r4 & 0x01000000) ? 8 : 4);
rowsize = parts * banks * (1 << colbits) * 8;
predicted = rowsize << rowbitsa;
@@ -186,52 +190,22 @@ int
nv50_vram_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
- const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
- const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
- u32 pfb714 = nv_rd32(dev, 0x100714);
- u32 rblock, length;
-
- switch (pfb714 & 0x00000007) {
- case 0: dev_priv->vram_type = NV_MEM_TYPE_DDR1; break;
- case 1:
- if (nouveau_mem_vbios_type(dev) == NV_MEM_TYPE_DDR3)
- dev_priv->vram_type = NV_MEM_TYPE_DDR3;
- else
- dev_priv->vram_type = NV_MEM_TYPE_DDR2;
- break;
- case 2: dev_priv->vram_type = NV_MEM_TYPE_GDDR3; break;
- case 3: dev_priv->vram_type = NV_MEM_TYPE_GDDR4; break;
- case 4: dev_priv->vram_type = NV_MEM_TYPE_GDDR5; break;
- default:
- break;
- }
- dev_priv->vram_rank_B = !!(nv_rd32(dev, 0x100200) & 0x4);
dev_priv->vram_size = nv_rd32(dev, 0x10020c);
dev_priv->vram_size |= (dev_priv->vram_size & 0xff) << 32;
dev_priv->vram_size &= 0xffffffff00ULL;
- /* IGPs, no funky reordering happens here, they don't have VRAM */
- if (dev_priv->chipset == 0xaa ||
- dev_priv->chipset == 0xac ||
- dev_priv->chipset == 0xaf) {
+ switch (dev_priv->chipset) {
+ case 0xaa:
+ case 0xac:
+ case 0xaf:
dev_priv->vram_sys_base = (u64)nv_rd32(dev, 0x100e10) << 12;
- rblock = 4096 >> 12;
- } else {
- rblock = nv50_vram_rblock(dev) >> 12;
+ dev_priv->vram_rblock_size = 4096;
+ break;
+ default:
+ dev_priv->vram_rblock_size = nv50_vram_rblock(dev);
+ break;
}
- length = (dev_priv->vram_size >> 12) - rsvd_head - rsvd_tail;
-
- return nouveau_mm_init(&vram->mm, rsvd_head, length, rblock);
-}
-
-void
-nv50_vram_fini(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
-
- nouveau_mm_fini(&vram->mm);
+ return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv84_bsp.c b/drivers/gpu/drm/nouveau/nv84_bsp.c
deleted file mode 100644
index 7487573..0000000
--- a/drivers/gpu/drm/nouveau/nv84_bsp.c
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Copyright 2011 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include "drmP.h"
-#include "nouveau_drv.h"
-#include "nouveau_util.h"
-#include "nouveau_vm.h"
-#include "nouveau_ramht.h"
-
-/*XXX: This stub is currently used on NV98+ also, as soon as this becomes
- * more than just an enable/disable stub this needs to be split out to
- * nv98_bsp.c...
- */
-
-struct nv84_bsp_engine {
- struct nouveau_exec_engine base;
-};
-
-static int
-nv84_bsp_fini(struct drm_device *dev, int engine, bool suspend)
-{
- if (!(nv_rd32(dev, 0x000200) & 0x00008000))
- return 0;
-
- nv_mask(dev, 0x000200, 0x00008000, 0x00000000);
- return 0;
-}
-
-static int
-nv84_bsp_init(struct drm_device *dev, int engine)
-{
- nv_mask(dev, 0x000200, 0x00008000, 0x00000000);
- nv_mask(dev, 0x000200, 0x00008000, 0x00008000);
- return 0;
-}
-
-static void
-nv84_bsp_destroy(struct drm_device *dev, int engine)
-{
- struct nv84_bsp_engine *pbsp = nv_engine(dev, engine);
-
- NVOBJ_ENGINE_DEL(dev, BSP);
-
- kfree(pbsp);
-}
-
-int
-nv84_bsp_create(struct drm_device *dev)
-{
- struct nv84_bsp_engine *pbsp;
-
- pbsp = kzalloc(sizeof(*pbsp), GFP_KERNEL);
- if (!pbsp)
- return -ENOMEM;
-
- pbsp->base.destroy = nv84_bsp_destroy;
- pbsp->base.init = nv84_bsp_init;
- pbsp->base.fini = nv84_bsp_fini;
-
- NVOBJ_ENGINE_ADD(dev, BSP, &pbsp->base);
- return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nv84_crypt.c b/drivers/gpu/drm/nouveau/nv84_crypt.c
index edece9c..75b809a 100644
--- a/drivers/gpu/drm/nouveau/nv84_crypt.c
+++ b/drivers/gpu/drm/nouveau/nv84_crypt.c
@@ -138,7 +138,7 @@ nv84_crypt_isr(struct drm_device *dev)
}
static int
-nv84_crypt_fini(struct drm_device *dev, int engine, bool suspend)
+nv84_crypt_fini(struct drm_device *dev, int engine)
{
nv_wr32(dev, 0x102140, 0x00000000);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nv84_vp.c b/drivers/gpu/drm/nouveau/nv84_vp.c
deleted file mode 100644
index 6570d30..0000000
--- a/drivers/gpu/drm/nouveau/nv84_vp.c
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Copyright 2011 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include "drmP.h"
-#include "nouveau_drv.h"
-#include "nouveau_util.h"
-#include "nouveau_vm.h"
-#include "nouveau_ramht.h"
-
-/*XXX: This stub is currently used on NV98+ also, as soon as this becomes
- * more than just an enable/disable stub this needs to be split out to
- * nv98_vp.c...
- */
-
-struct nv84_vp_engine {
- struct nouveau_exec_engine base;
-};
-
-static int
-nv84_vp_fini(struct drm_device *dev, int engine, bool suspend)
-{
- if (!(nv_rd32(dev, 0x000200) & 0x00020000))
- return 0;
-
- nv_mask(dev, 0x000200, 0x00020000, 0x00000000);
- return 0;
-}
-
-static int
-nv84_vp_init(struct drm_device *dev, int engine)
-{
- nv_mask(dev, 0x000200, 0x00020000, 0x00000000);
- nv_mask(dev, 0x000200, 0x00020000, 0x00020000);
- return 0;
-}
-
-static void
-nv84_vp_destroy(struct drm_device *dev, int engine)
-{
- struct nv84_vp_engine *pvp = nv_engine(dev, engine);
-
- NVOBJ_ENGINE_DEL(dev, VP);
-
- kfree(pvp);
-}
-
-int
-nv84_vp_create(struct drm_device *dev)
-{
- struct nv84_vp_engine *pvp;
-
- pvp = kzalloc(sizeof(*pvp), GFP_KERNEL);
- if (!pvp)
- return -ENOMEM;
-
- pvp->base.destroy = nv84_vp_destroy;
- pvp->base.init = nv84_vp_init;
- pvp->base.fini = nv84_vp_fini;
-
- NVOBJ_ENGINE_ADD(dev, VP, &pvp->base);
- return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nv98_crypt.c b/drivers/gpu/drm/nouveau/nv98_crypt.c
deleted file mode 100644
index db94ff0..0000000
--- a/drivers/gpu/drm/nouveau/nv98_crypt.c
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Copyright 2011 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include "drmP.h"
-#include "nouveau_drv.h"
-#include "nouveau_util.h"
-#include "nouveau_vm.h"
-#include "nouveau_ramht.h"
-
-struct nv98_crypt_engine {
- struct nouveau_exec_engine base;
-};
-
-static int
-nv98_crypt_fini(struct drm_device *dev, int engine, bool suspend)
-{
- if (!(nv_rd32(dev, 0x000200) & 0x00004000))
- return 0;
-
- nv_mask(dev, 0x000200, 0x00004000, 0x00000000);
- return 0;
-}
-
-static int
-nv98_crypt_init(struct drm_device *dev, int engine)
-{
- nv_mask(dev, 0x000200, 0x00004000, 0x00000000);
- nv_mask(dev, 0x000200, 0x00004000, 0x00004000);
- return 0;
-}
-
-static void
-nv98_crypt_destroy(struct drm_device *dev, int engine)
-{
- struct nv98_crypt_engine *pcrypt = nv_engine(dev, engine);
-
- NVOBJ_ENGINE_DEL(dev, CRYPT);
-
- kfree(pcrypt);
-}
-
-int
-nv98_crypt_create(struct drm_device *dev)
-{
- struct nv98_crypt_engine *pcrypt;
-
- pcrypt = kzalloc(sizeof(*pcrypt), GFP_KERNEL);
- if (!pcrypt)
- return -ENOMEM;
-
- pcrypt->base.destroy = nv98_crypt_destroy;
- pcrypt->base.init = nv98_crypt_init;
- pcrypt->base.fini = nv98_crypt_fini;
-
- NVOBJ_ENGINE_ADD(dev, CRYPT, &pcrypt->base);
- return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nv98_ppp.c b/drivers/gpu/drm/nouveau/nv98_ppp.c
deleted file mode 100644
index a987dd6..0000000
--- a/drivers/gpu/drm/nouveau/nv98_ppp.c
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Copyright 2011 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include "drmP.h"
-#include "nouveau_drv.h"
-#include "nouveau_util.h"
-#include "nouveau_vm.h"
-#include "nouveau_ramht.h"
-
-struct nv98_ppp_engine {
- struct nouveau_exec_engine base;
-};
-
-static int
-nv98_ppp_fini(struct drm_device *dev, int engine, bool suspend)
-{
- if (!(nv_rd32(dev, 0x000200) & 0x00000002))
- return 0;
-
- nv_mask(dev, 0x000200, 0x00000002, 0x00000000);
- return 0;
-}
-
-static int
-nv98_ppp_init(struct drm_device *dev, int engine)
-{
- nv_mask(dev, 0x000200, 0x00000002, 0x00000000);
- nv_mask(dev, 0x000200, 0x00000002, 0x00000002);
- return 0;
-}
-
-static void
-nv98_ppp_destroy(struct drm_device *dev, int engine)
-{
- struct nv98_ppp_engine *pppp = nv_engine(dev, engine);
-
- NVOBJ_ENGINE_DEL(dev, PPP);
-
- kfree(pppp);
-}
-
-int
-nv98_ppp_create(struct drm_device *dev)
-{
- struct nv98_ppp_engine *pppp;
-
- pppp = kzalloc(sizeof(*pppp), GFP_KERNEL);
- if (!pppp)
- return -ENOMEM;
-
- pppp->base.destroy = nv98_ppp_destroy;
- pppp->base.init = nv98_ppp_init;
- pppp->base.fini = nv98_ppp_fini;
-
- NVOBJ_ENGINE_ADD(dev, PPP, &pppp->base);
- return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nva3_copy.c b/drivers/gpu/drm/nouveau/nva3_copy.c
index 8f356d5..b86820a 100644
--- a/drivers/gpu/drm/nouveau/nva3_copy.c
+++ b/drivers/gpu/drm/nouveau/nva3_copy.c
@@ -140,7 +140,7 @@ nva3_copy_init(struct drm_device *dev, int engine)
}
static int
-nva3_copy_fini(struct drm_device *dev, int engine, bool suspend)
+nva3_copy_fini(struct drm_device *dev, int engine)
{
nv_mask(dev, 0x104048, 0x00000003, 0x00000000);
diff --git a/drivers/gpu/drm/nouveau/nva3_copy.fuc b/drivers/gpu/drm/nouveau/nva3_copy.fuc
index abc3662..eaf35f8 100644
--- a/drivers/gpu/drm/nouveau/nva3_copy.fuc
+++ b/drivers/gpu/drm/nouveau/nva3_copy.fuc
@@ -31,9 +31,8 @@
*/
ifdef(`NVA3',
-.section #nva3_pcopy_data
-,
-.section #nvc0_pcopy_data
+.section nva3_pcopy_data,
+.section nvc0_pcopy_data
)
ctx_object: .b32 0
@@ -43,7 +42,7 @@ ctx_dma_query: .b32 0
ctx_dma_src: .b32 0
ctx_dma_dst: .b32 0
,)
-.equ #ctx_dma_count 3
+.equ ctx_dma_count 3
ctx_query_address_high: .b32 0
ctx_query_address_low: .b32 0
ctx_query_counter: .b32 0
@@ -79,65 +78,64 @@ ctx_ycnt: .b32 0
dispatch_table:
// mthd 0x0000, NAME
.b16 0x000 1
-.b32 #ctx_object ~0xffffffff
+.b32 ctx_object ~0xffffffff
// mthd 0x0100, NOP
.b16 0x040 1
-.b32 0x00010000 + #cmd_nop ~0xffffffff
+.b32 0x00010000 + cmd_nop ~0xffffffff
// mthd 0x0140, PM_TRIGGER
.b16 0x050 1
-.b32 0x00010000 + #cmd_pm_trigger ~0xffffffff
+.b32 0x00010000 + cmd_pm_trigger ~0xffffffff
ifdef(`NVA3', `
// mthd 0x0180-0x018c, DMA_
-.b16 0x060 #ctx_dma_count
+.b16 0x060 ctx_dma_count
dispatch_dma:
-.b32 0x00010000 + #cmd_dma ~0xffffffff
-.b32 0x00010000 + #cmd_dma ~0xffffffff
-.b32 0x00010000 + #cmd_dma ~0xffffffff
+.b32 0x00010000 + cmd_dma ~0xffffffff
+.b32 0x00010000 + cmd_dma ~0xffffffff
+.b32 0x00010000 + cmd_dma ~0xffffffff
',)
// mthd 0x0200-0x0218, SRC_TILE
.b16 0x80 7
-.b32 #ctx_src_tile_mode ~0x00000fff
-.b32 #ctx_src_xsize ~0x0007ffff
-.b32 #ctx_src_ysize ~0x00001fff
-.b32 #ctx_src_zsize ~0x000007ff
-.b32 #ctx_src_zoff ~0x00000fff
-.b32 #ctx_src_xoff ~0x0007ffff
-.b32 #ctx_src_yoff ~0x00001fff
+.b32 ctx_src_tile_mode ~0x00000fff
+.b32 ctx_src_xsize ~0x0007ffff
+.b32 ctx_src_ysize ~0x00001fff
+.b32 ctx_src_zsize ~0x000007ff
+.b32 ctx_src_zoff ~0x00000fff
+.b32 ctx_src_xoff ~0x0007ffff
+.b32 ctx_src_yoff ~0x00001fff
// mthd 0x0220-0x0238, DST_TILE
.b16 0x88 7
-.b32 #ctx_dst_tile_mode ~0x00000fff
-.b32 #ctx_dst_xsize ~0x0007ffff
-.b32 #ctx_dst_ysize ~0x00001fff
-.b32 #ctx_dst_zsize ~0x000007ff
-.b32 #ctx_dst_zoff ~0x00000fff
-.b32 #ctx_dst_xoff ~0x0007ffff
-.b32 #ctx_dst_yoff ~0x00001fff
+.b32 ctx_dst_tile_mode ~0x00000fff
+.b32 ctx_dst_xsize ~0x0007ffff
+.b32 ctx_dst_ysize ~0x00001fff
+.b32 ctx_dst_zsize ~0x000007ff
+.b32 ctx_dst_zoff ~0x00000fff
+.b32 ctx_dst_xoff ~0x0007ffff
+.b32 ctx_dst_yoff ~0x00001fff
// mthd 0x0300-0x0304, EXEC, WRCACHE_FLUSH
.b16 0xc0 2
-.b32 0x00010000 + #cmd_exec ~0xffffffff
-.b32 0x00010000 + #cmd_wrcache_flush ~0xffffffff
+.b32 0x00010000 + cmd_exec ~0xffffffff
+.b32 0x00010000 + cmd_wrcache_flush ~0xffffffff
// mthd 0x030c-0x0340, various stuff
.b16 0xc3 14
-.b32 #ctx_src_address_high ~0x000000ff
-.b32 #ctx_src_address_low ~0xfffffff0
-.b32 #ctx_dst_address_high ~0x000000ff
-.b32 #ctx_dst_address_low ~0xfffffff0
-.b32 #ctx_src_pitch ~0x0007ffff
-.b32 #ctx_dst_pitch ~0x0007ffff
-.b32 #ctx_xcnt ~0x0000ffff
-.b32 #ctx_ycnt ~0x00001fff
-.b32 #ctx_format ~0x0333ffff
-.b32 #ctx_swz_const0 ~0xffffffff
-.b32 #ctx_swz_const1 ~0xffffffff
-.b32 #ctx_query_address_high ~0x000000ff
-.b32 #ctx_query_address_low ~0xffffffff
-.b32 #ctx_query_counter ~0xffffffff
+.b32 ctx_src_address_high ~0x000000ff
+.b32 ctx_src_address_low ~0xfffffff0
+.b32 ctx_dst_address_high ~0x000000ff
+.b32 ctx_dst_address_low ~0xfffffff0
+.b32 ctx_src_pitch ~0x0007ffff
+.b32 ctx_dst_pitch ~0x0007ffff
+.b32 ctx_xcnt ~0x0000ffff
+.b32 ctx_ycnt ~0x00001fff
+.b32 ctx_format ~0x0333ffff
+.b32 ctx_swz_const0 ~0xffffffff
+.b32 ctx_swz_const1 ~0xffffffff
+.b32 ctx_query_address_high ~0x000000ff
+.b32 ctx_query_address_low ~0xffffffff
+.b32 ctx_query_counter ~0xffffffff
.b16 0x800 0
ifdef(`NVA3',
-.section #nva3_pcopy_code
-,
-.section #nvc0_pcopy_code
+.section nva3_pcopy_code,
+.section nvc0_pcopy_code
)
main:
@@ -145,12 +143,12 @@ main:
mov $sp $r0
// setup i0 handler and route fifo and ctxswitch to it
- mov $r1 #ih
+ mov $r1 ih
mov $iv0 $r1
mov $r1 0x400
movw $r2 0xfff3
sethi $r2 0
- iowr I[$r1 + 0x300] $r2
+ iowr I[$r2 + 0x300] $r2
// enable interrupts
or $r2 0xc
@@ -166,19 +164,19 @@ main:
bset $flags $p0
spin:
sleep $p0
- bra #spin
+ bra spin
// i0 handler
ih:
iord $r1 I[$r0 + 0x200]
and $r2 $r1 0x00000008
- bra e #ih_no_chsw
- call #chsw
+ bra e ih_no_chsw
+ call chsw
ih_no_chsw:
and $r2 $r1 0x00000004
- bra e #ih_no_cmd
- call #dispatch
+ bra e ih_no_cmd
+ call dispatch
ih_no_cmd:
and $r1 $r1 0x0000000c
@@ -237,9 +235,9 @@ ifdef(`NVA3', `
sethi $r4 0x60000
// swap!
- bra $p1 #swctx_load
+ bra $p1 swctx_load
xdst $r0 $r4
- bra #swctx_done
+ bra swctx_done
swctx_load:
xdld $r0 $r4
swctx_done:
@@ -253,9 +251,9 @@ chsw:
// if it's active, unload it and return
xbit $r15 $r3 0x1e
- bra e #chsw_no_unload
+ bra e chsw_no_unload
bclr $flags $p1
- call #swctx
+ call swctx
bclr $r3 0x1e
iowr I[$r2] $r3
mov $r4 1
@@ -268,20 +266,20 @@ chsw:
// is there a channel waiting to be loaded?
xbit $r13 $r3 0x1e
- bra e #chsw_finish_load
+ bra e chsw_finish_load
bset $flags $p1
- call #swctx
+ call swctx
ifdef(`NVA3',
// load dma objects back into TARGET regs
- mov $r5 #ctx_dma
- mov $r6 #ctx_dma_count
+ mov $r5 ctx_dma
+ mov $r6 ctx_dma_count
chsw_load_ctx_dma:
ld b32 $r7 D[$r5 + $r6 * 4]
add b32 $r8 $r6 0x180
shl b32 $r8 8
iowr I[$r8] $r7
sub b32 $r6 1
- bra nc #chsw_load_ctx_dma
+ bra nc chsw_load_ctx_dma
,)
chsw_finish_load:
@@ -299,7 +297,7 @@ dispatch:
shl b32 $r2 0x10
// lookup method in the dispatch table, ILLEGAL_MTHD if not found
- mov $r5 #dispatch_table
+ mov $r5 dispatch_table
clear b32 $r6
clear b32 $r7
dispatch_loop:
@@ -307,14 +305,14 @@ dispatch:
ld b16 $r7 D[$r5 + 2]
add b32 $r5 4
cmpu b32 $r4 $r6
- bra c #dispatch_illegal_mthd
+ bra c dispatch_illegal_mthd
add b32 $r7 $r6
cmpu b32 $r4 $r7
- bra c #dispatch_valid_mthd
+ bra c dispatch_valid_mthd
sub b32 $r7 $r6
shl b32 $r7 3
add b32 $r5 $r7
- bra #dispatch_loop
+ bra dispatch_loop
// ensure no bits set in reserved fields, INVALID_BITFIELD
dispatch_valid_mthd:
@@ -324,20 +322,20 @@ dispatch:
ld b32 $r5 D[$r4 + 4]
and $r5 $r3
cmpu b32 $r5 0
- bra ne #dispatch_invalid_bitfield
+ bra ne dispatch_invalid_bitfield
// depending on dispatch flags: execute method, or save data as state
ld b16 $r5 D[$r4 + 0]
ld b16 $r6 D[$r4 + 2]
cmpu b32 $r6 0
- bra ne #dispatch_cmd
+ bra ne dispatch_cmd
st b32 D[$r5] $r3
- bra #dispatch_done
+ bra dispatch_done
dispatch_cmd:
bclr $flags $p1
call $r5
- bra $p1 #dispatch_error
- bra #dispatch_done
+ bra $p1 dispatch_error
+ bra dispatch_done
dispatch_invalid_bitfield:
or $r2 2
@@ -355,7 +353,7 @@ dispatch:
iord $r2 I[$r0 + 0x200]
and $r2 0x40
cmpu b32 $r2 0
- bra ne #hostirq_wait
+ bra ne hostirq_wait
dispatch_done:
mov $r2 0x1d00
@@ -411,10 +409,10 @@ ifdef(`NVA3',
// $r2: hostirq state
// $r3: data
cmd_dma:
- sub b32 $r4 #dispatch_dma
+ sub b32 $r4 dispatch_dma
shr b32 $r4 1
bset $r3 0x1e
- st b32 D[$r4 + #ctx_dma] $r3
+ st b32 D[$r4 + ctx_dma] $r3
add b32 $r4 0x600
shl b32 $r4 6
iowr I[$r4] $r3
@@ -432,7 +430,7 @@ cmd_exec_set_format:
st b32 D[$sp + 0x0c] $r0
// extract cpp, src_ncomp and dst_ncomp from FORMAT
- ld b32 $r4 D[$r0 + #ctx_format]
+ ld b32 $r4 D[$r0 + ctx_format]
extr $r5 $r4 16:17
add b32 $r5 1
extr $r6 $r4 20:21
@@ -450,22 +448,22 @@ cmd_exec_set_format:
clear b32 $r11
bpc_loop:
cmpu b8 $r10 4
- bra nc #cmp_c0
+ bra nc cmp_c0
mulu $r12 $r10 $r5
add b32 $r12 $r11
bset $flags $p2
- bra #bpc_next
+ bra bpc_next
cmp_c0:
- bra ne #cmp_c1
+ bra ne cmp_c1
mov $r12 0x10
add b32 $r12 $r11
- bra #bpc_next
+ bra bpc_next
cmp_c1:
cmpu b8 $r10 6
- bra nc #cmp_zero
+ bra nc cmp_zero
mov $r12 0x14
add b32 $r12 $r11
- bra #bpc_next
+ bra bpc_next
cmp_zero:
mov $r12 0x80
bpc_next:
@@ -473,22 +471,22 @@ cmd_exec_set_format:
add b32 $r8 1
add b32 $r11 1
cmpu b32 $r11 $r5
- bra c #bpc_loop
+ bra c bpc_loop
add b32 $r9 1
cmpu b32 $r9 $r7
- bra c #ncomp_loop
+ bra c ncomp_loop
// SRC_XCNT = (xcnt * src_cpp), or 0 if no src ref in swz (hw will hang)
mulu $r6 $r5
- st b32 D[$r0 + #ctx_src_cpp] $r6
- ld b32 $r8 D[$r0 + #ctx_xcnt]
+ st b32 D[$r0 + ctx_src_cpp] $r6
+ ld b32 $r8 D[$r0 + ctx_xcnt]
mulu $r6 $r8
- bra $p2 #dst_xcnt
+ bra $p2 dst_xcnt
clear b32 $r6
dst_xcnt:
mulu $r7 $r5
- st b32 D[$r0 + #ctx_dst_cpp] $r7
+ st b32 D[$r0 + ctx_dst_cpp] $r7
mulu $r7 $r8
mov $r5 0x810
@@ -496,10 +494,10 @@ cmd_exec_set_format:
iowr I[$r5 + 0x000] $r6
iowr I[$r5 + 0x100] $r7
add b32 $r5 0x800
- ld b32 $r6 D[$r0 + #ctx_dst_cpp]
+ ld b32 $r6 D[$r0 + ctx_dst_cpp]
sub b32 $r6 1
shl b32 $r6 8
- ld b32 $r7 D[$r0 + #ctx_src_cpp]
+ ld b32 $r7 D[$r0 + ctx_src_cpp]
sub b32 $r7 1
or $r6 $r7
iowr I[$r5 + 0x000] $r6
@@ -513,9 +511,9 @@ cmd_exec_set_format:
ld b32 $r6 D[$sp + 0x0c]
iowr I[$r5 + 0x300] $r6
add b32 $r5 0x400
- ld b32 $r6 D[$r0 + #ctx_swz_const0]
+ ld b32 $r6 D[$r0 + ctx_swz_const0]
iowr I[$r5 + 0x000] $r6
- ld b32 $r6 D[$r0 + #ctx_swz_const1]
+ ld b32 $r6 D[$r0 + ctx_swz_const1]
iowr I[$r5 + 0x100] $r6
add $sp 0x10
ret
@@ -545,7 +543,7 @@ cmd_exec_set_format:
//
cmd_exec_set_surface_tiled:
// translate TILE_MODE into Tp, Th, Td shift values
- ld b32 $r7 D[$r5 + #ctx_src_tile_mode]
+ ld b32 $r7 D[$r5 + ctx_src_tile_mode]
extr $r9 $r7 8:11
extr $r8 $r7 4:7
ifdef(`NVA3',
@@ -555,9 +553,9 @@ ifdef(`NVA3',
)
extr $r7 $r7 0:3
cmp b32 $r7 0xe
- bra ne #xtile64
+ bra ne xtile64
mov $r7 4
- bra #xtileok
+ bra xtileok
xtile64:
xbit $r7 $flags $p2
add b32 $r7 17
@@ -567,8 +565,8 @@ ifdef(`NVA3',
// Op = (x * cpp) & ((1 << Tp) - 1)
// Tx = (x * cpp) >> Tp
- ld b32 $r10 D[$r5 + #ctx_src_xoff]
- ld b32 $r11 D[$r5 + #ctx_src_cpp]
+ ld b32 $r10 D[$r5 + ctx_src_xoff]
+ ld b32 $r11 D[$r5 + ctx_src_cpp]
mulu $r10 $r11
mov $r11 1
shl b32 $r11 $r7
@@ -578,7 +576,7 @@ ifdef(`NVA3',
// Tyo = y & ((1 << Th) - 1)
// Ty = y >> Th
- ld b32 $r13 D[$r5 + #ctx_src_yoff]
+ ld b32 $r13 D[$r5 + ctx_src_yoff]
mov $r14 1
shl b32 $r14 $r8
sub b32 $r14 1
@@ -600,8 +598,8 @@ ifdef(`NVA3',
add b32 $r12 $r11
// nTx = ((w * cpp) + ((1 << Tp) - 1) >> Tp)
- ld b32 $r15 D[$r5 + #ctx_src_xsize]
- ld b32 $r11 D[$r5 + #ctx_src_cpp]
+ ld b32 $r15 D[$r5 + ctx_src_xsize]
+ ld b32 $r11 D[$r5 + ctx_src_cpp]
mulu $r15 $r11
mov $r11 1
shl b32 $r11 $r7
@@ -611,7 +609,7 @@ ifdef(`NVA3',
push $r15
// nTy = (h + ((1 << Th) - 1)) >> Th
- ld b32 $r15 D[$r5 + #ctx_src_ysize]
+ ld b32 $r15 D[$r5 + ctx_src_ysize]
mov $r11 1
shl b32 $r11 $r8
sub b32 $r11 1
@@ -631,7 +629,7 @@ ifdef(`NVA3',
// Tz = z >> Td
// Op += Tzo << Tys
// Ts = Tys + Td
- ld b32 $r8 D[$r5 + #ctx_src_zoff]
+ ld b32 $r8 D[$r5 + ctx_src_zoff]
mov $r14 1
shl b32 $r14 $r9
sub b32 $r14 1
@@ -658,8 +656,8 @@ ifdef(`NVA3',
// SRC_ADDRESS_LOW = (Ot + Op) & 0xffffffff
// CFG_ADDRESS_HIGH |= ((Ot + Op) >> 32) << 16
- ld b32 $r7 D[$r5 + #ctx_src_address_low]
- ld b32 $r8 D[$r5 + #ctx_src_address_high]
+ ld b32 $r7 D[$r5 + ctx_src_address_low]
+ ld b32 $r8 D[$r5 + ctx_src_address_high]
add b32 $r10 $r12
add b32 $r7 $r10
adc b32 $r8 0
@@ -679,14 +677,14 @@ cmd_exec_set_surface_linear:
xbit $r6 $flags $p2
add b32 $r6 0x202
shl b32 $r6 8
- ld b32 $r7 D[$r5 + #ctx_src_address_low]
+ ld b32 $r7 D[$r5 + ctx_src_address_low]
iowr I[$r6 + 0x000] $r7
add b32 $r6 0x400
- ld b32 $r7 D[$r5 + #ctx_src_address_high]
+ ld b32 $r7 D[$r5 + ctx_src_address_high]
shl b32 $r7 16
iowr I[$r6 + 0x000] $r7
add b32 $r6 0x400
- ld b32 $r7 D[$r5 + #ctx_src_pitch]
+ ld b32 $r7 D[$r5 + ctx_src_pitch]
iowr I[$r6 + 0x000] $r7
ret
@@ -699,7 +697,7 @@ cmd_exec_wait:
loop:
iord $r1 I[$r0]
and $r1 1
- bra ne #loop
+ bra ne loop
pop $r1
pop $r0
ret
@@ -707,18 +705,18 @@ cmd_exec_wait:
cmd_exec_query:
// if QUERY_SHORT not set, write out { -, 0, TIME_LO, TIME_HI }
xbit $r4 $r3 13
- bra ne #query_counter
- call #cmd_exec_wait
+ bra ne query_counter
+ call cmd_exec_wait
mov $r4 0x80c
shl b32 $r4 6
- ld b32 $r5 D[$r0 + #ctx_query_address_low]
+ ld b32 $r5 D[$r0 + ctx_query_address_low]
add b32 $r5 4
iowr I[$r4 + 0x000] $r5
iowr I[$r4 + 0x100] $r0
mov $r5 0xc
iowr I[$r4 + 0x200] $r5
add b32 $r4 0x400
- ld b32 $r5 D[$r0 + #ctx_query_address_high]
+ ld b32 $r5 D[$r0 + ctx_query_address_high]
shl b32 $r5 16
iowr I[$r4 + 0x000] $r5
add b32 $r4 0x500
@@ -743,16 +741,16 @@ cmd_exec_query:
// write COUNTER
query_counter:
- call #cmd_exec_wait
+ call cmd_exec_wait
mov $r4 0x80c
shl b32 $r4 6
- ld b32 $r5 D[$r0 + #ctx_query_address_low]
+ ld b32 $r5 D[$r0 + ctx_query_address_low]
iowr I[$r4 + 0x000] $r5
iowr I[$r4 + 0x100] $r0
mov $r5 0x4
iowr I[$r4 + 0x200] $r5
add b32 $r4 0x400
- ld b32 $r5 D[$r0 + #ctx_query_address_high]
+ ld b32 $r5 D[$r0 + ctx_query_address_high]
shl b32 $r5 16
iowr I[$r4 + 0x000] $r5
add b32 $r4 0x500
@@ -761,7 +759,7 @@ cmd_exec_query:
mov $r5 0x00001110
sethi $r5 0x13120000
iowr I[$r4 + 0x100] $r5
- ld b32 $r5 D[$r0 + #ctx_query_counter]
+ ld b32 $r5 D[$r0 + ctx_query_counter]
add b32 $r4 0x500
iowr I[$r4 + 0x000] $r5
mov $r5 0x00002601
@@ -789,22 +787,22 @@ cmd_exec_query:
// $r2: hostirq state
// $r3: data
cmd_exec:
- call #cmd_exec_wait
+ call cmd_exec_wait
// if format requested, call function to calculate it, otherwise
// fill in cpp/xcnt for both surfaces as if (cpp == 1)
xbit $r15 $r3 0
- bra e #cmd_exec_no_format
- call #cmd_exec_set_format
+ bra e cmd_exec_no_format
+ call cmd_exec_set_format
mov $r4 0x200
- bra #cmd_exec_init_src_surface
+ bra cmd_exec_init_src_surface
cmd_exec_no_format:
mov $r6 0x810
shl b32 $r6 6
mov $r7 1
- st b32 D[$r0 + #ctx_src_cpp] $r7
- st b32 D[$r0 + #ctx_dst_cpp] $r7
- ld b32 $r7 D[$r0 + #ctx_xcnt]
+ st b32 D[$r0 + ctx_src_cpp] $r7
+ st b32 D[$r0 + ctx_dst_cpp] $r7
+ ld b32 $r7 D[$r0 + ctx_xcnt]
iowr I[$r6 + 0x000] $r7
iowr I[$r6 + 0x100] $r7
clear b32 $r4
@@ -813,28 +811,28 @@ cmd_exec:
bclr $flags $p2
clear b32 $r5
xbit $r15 $r3 4
- bra e #src_tiled
- call #cmd_exec_set_surface_linear
- bra #cmd_exec_init_dst_surface
+ bra e src_tiled
+ call cmd_exec_set_surface_linear
+ bra cmd_exec_init_dst_surface
src_tiled:
- call #cmd_exec_set_surface_tiled
+ call cmd_exec_set_surface_tiled
bset $r4 7
cmd_exec_init_dst_surface:
bset $flags $p2
- mov $r5 #ctx_dst_address_high - #ctx_src_address_high
+ mov $r5 ctx_dst_address_high - ctx_src_address_high
xbit $r15 $r3 8
- bra e #dst_tiled
- call #cmd_exec_set_surface_linear
- bra #cmd_exec_kick
+ bra e dst_tiled
+ call cmd_exec_set_surface_linear
+ bra cmd_exec_kick
dst_tiled:
- call #cmd_exec_set_surface_tiled
+ call cmd_exec_set_surface_tiled
bset $r4 8
cmd_exec_kick:
mov $r5 0x800
shl b32 $r5 6
- ld b32 $r6 D[$r0 + #ctx_ycnt]
+ ld b32 $r6 D[$r0 + ctx_ycnt]
iowr I[$r5 + 0x100] $r6
mov $r6 0x0041
// SRC_TARGET = 1, DST_TARGET = 2
@@ -844,8 +842,8 @@ cmd_exec:
// if requested, queue up a QUERY write after the copy has completed
xbit $r15 $r3 12
- bra e #cmd_exec_done
- call #cmd_exec_query
+ bra e cmd_exec_done
+ call cmd_exec_query
cmd_exec_done:
ret
diff --git a/drivers/gpu/drm/nouveau/nva3_copy.fuc.h b/drivers/gpu/drm/nouveau/nva3_copy.fuc.h
index 1f33fbd..2731de2 100644
--- a/drivers/gpu/drm/nouveau/nva3_copy.fuc.h
+++ b/drivers/gpu/drm/nouveau/nva3_copy.fuc.h
@@ -152,7 +152,7 @@ uint32_t nva3_pcopy_code[] = {
0xf10010fe,
0xf1040017,
0xf0fff327,
- 0x12d00023,
+ 0x22d00023,
0x0c25f0c0,
0xf40012d0,
0x17f11031,
diff --git a/drivers/gpu/drm/nouveau/nva3_pm.c b/drivers/gpu/drm/nouveau/nva3_pm.c
index 9e636e6..e4b2b9e 100644
--- a/drivers/gpu/drm/nouveau/nva3_pm.c
+++ b/drivers/gpu/drm/nouveau/nva3_pm.c
@@ -27,320 +27,178 @@
#include "nouveau_bios.h"
#include "nouveau_pm.h"
-static u32 read_clk(struct drm_device *, int, bool);
-static u32 read_pll(struct drm_device *, int, u32);
-
-static u32
-read_vco(struct drm_device *dev, int clk)
-{
- u32 sctl = nv_rd32(dev, 0x4120 + (clk * 4));
- if ((sctl & 0x00000030) != 0x00000030)
- return read_pll(dev, 0x41, 0x00e820);
- return read_pll(dev, 0x42, 0x00e8a0);
-}
-
-static u32
-read_clk(struct drm_device *dev, int clk, bool ignore_en)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- u32 sctl, sdiv, sclk;
-
- /* refclk for the 0xe8xx plls is a fixed frequency */
- if (clk >= 0x40) {
- if (dev_priv->chipset == 0xaf) {
- /* no joke.. seriously.. sigh.. */
- return nv_rd32(dev, 0x00471c) * 1000;
- }
-
- return dev_priv->crystal;
- }
-
- sctl = nv_rd32(dev, 0x4120 + (clk * 4));
- if (!ignore_en && !(sctl & 0x00000100))
- return 0;
-
- switch (sctl & 0x00003000) {
- case 0x00000000:
- return dev_priv->crystal;
- case 0x00002000:
- if (sctl & 0x00000040)
- return 108000;
- return 100000;
- case 0x00003000:
- sclk = read_vco(dev, clk);
- sdiv = ((sctl & 0x003f0000) >> 16) + 2;
- return (sclk * 2) / sdiv;
- default:
- return 0;
- }
-}
-
-static u32
-read_pll(struct drm_device *dev, int clk, u32 pll)
-{
- u32 ctrl = nv_rd32(dev, pll + 0);
- u32 sclk = 0, P = 1, N = 1, M = 1;
-
- if (!(ctrl & 0x00000008)) {
- if (ctrl & 0x00000001) {
- u32 coef = nv_rd32(dev, pll + 4);
- M = (coef & 0x000000ff) >> 0;
- N = (coef & 0x0000ff00) >> 8;
- P = (coef & 0x003f0000) >> 16;
-
- /* no post-divider on these.. */
- if ((pll & 0x00ff00) == 0x00e800)
- P = 1;
-
- sclk = read_clk(dev, 0x00 + clk, false);
- }
- } else {
- sclk = read_clk(dev, 0x10 + clk, false);
- }
-
- return sclk * N / (M * P);
-}
+/* This is actually a lot more complex than it appears here, but hopefully
+ * this should be able to deal with what the VBIOS leaves for us..
+ *
+ * If not, well, I'll jump off that bridge when I come to it.
+ */
-struct creg {
- u32 clk;
- u32 pll;
+struct nva3_pm_state {
+ enum pll_types type;
+ u32 src0;
+ u32 src1;
+ u32 ctrl;
+ u32 coef;
+ u32 old_pnm;
+ u32 new_pnm;
+ u32 new_div;
};
static int
-calc_clk(struct drm_device *dev, int clk, u32 pll, u32 khz, struct creg *reg)
+nva3_pm_pll_offset(u32 id)
{
- struct pll_lims limits;
- u32 oclk, sclk, sdiv;
- int P, N, M, diff;
- int ret;
-
- reg->pll = 0;
- reg->clk = 0;
- if (!khz) {
- NV_DEBUG(dev, "no clock for 0x%04x/0x%02x\n", pll, clk);
- return 0;
+ static const u32 pll_map[] = {
+ 0x00, PLL_CORE,
+ 0x01, PLL_SHADER,
+ 0x02, PLL_MEMORY,
+ 0x00, 0x00
+ };
+ const u32 *map = pll_map;
+
+ while (map[1]) {
+ if (id == map[1])
+ return map[0];
+ map += 2;
}
- switch (khz) {
- case 27000:
- reg->clk = 0x00000100;
- return khz;
- case 100000:
- reg->clk = 0x00002100;
- return khz;
- case 108000:
- reg->clk = 0x00002140;
- return khz;
- default:
- sclk = read_vco(dev, clk);
- sdiv = min((sclk * 2) / (khz - 2999), (u32)65);
- /* if the clock has a PLL attached, and we can get a within
- * [-2, 3) MHz of a divider, we'll disable the PLL and use
- * the divider instead.
- *
- * divider can go as low as 2, limited here because NVIDIA
- * and the VBIOS on my NVA8 seem to prefer using the PLL
- * for 810MHz - is there a good reason?
- */
- if (sdiv > 4) {
- oclk = (sclk * 2) / sdiv;
- diff = khz - oclk;
- if (!pll || (diff >= -2000 && diff < 3000)) {
- reg->clk = (((sdiv - 2) << 16) | 0x00003100);
- return oclk;
- }
- }
-
- if (!pll) {
- NV_ERROR(dev, "bad freq %02x: %d %d\n", clk, khz, sclk);
- return -ERANGE;
- }
+ return -ENOENT;
+}
- break;
- }
+int
+nva3_pm_clock_get(struct drm_device *dev, u32 id)
+{
+ u32 src0, src1, ctrl, coef;
+ struct pll_lims pll;
+ int ret, off;
+ int P, N, M;
- ret = get_pll_limits(dev, pll, &limits);
+ ret = get_pll_limits(dev, id, &pll);
if (ret)
return ret;
- limits.refclk = read_clk(dev, clk - 0x10, true);
- if (!limits.refclk)
- return -EINVAL;
-
- ret = nva3_calc_pll(dev, &limits, khz, &N, NULL, &M, &P);
- if (ret >= 0) {
- reg->clk = nv_rd32(dev, 0x4120 + (clk * 4));
- reg->pll = (P << 16) | (N << 8) | M;
- }
- return ret;
-}
-
-static void
-prog_pll(struct drm_device *dev, int clk, u32 pll, struct creg *reg)
-{
- const u32 src0 = 0x004120 + (clk * 4);
- const u32 src1 = 0x004160 + (clk * 4);
- const u32 ctrl = pll + 0;
- const u32 coef = pll + 4;
- u32 cntl;
-
- if (!reg->clk && !reg->pll) {
- NV_DEBUG(dev, "no clock for %02x\n", clk);
- return;
- }
+ off = nva3_pm_pll_offset(id);
+ if (off < 0)
+ return off;
- cntl = nv_rd32(dev, ctrl) & 0xfffffff2;
- if (reg->pll) {
- nv_mask(dev, src0, 0x00000101, 0x00000101);
- nv_wr32(dev, coef, reg->pll);
- nv_wr32(dev, ctrl, cntl | 0x00000015);
- nv_mask(dev, src1, 0x00000100, 0x00000000);
- nv_mask(dev, src1, 0x00000001, 0x00000000);
- } else {
- nv_mask(dev, src1, 0x003f3141, 0x00000101 | reg->clk);
- nv_wr32(dev, ctrl, cntl | 0x0000001d);
- nv_mask(dev, ctrl, 0x00000001, 0x00000000);
- nv_mask(dev, src0, 0x00000100, 0x00000000);
- nv_mask(dev, src0, 0x00000001, 0x00000000);
- }
-}
+ src0 = nv_rd32(dev, 0x4120 + (off * 4));
+ src1 = nv_rd32(dev, 0x4160 + (off * 4));
+ ctrl = nv_rd32(dev, pll.reg + 0);
+ coef = nv_rd32(dev, pll.reg + 4);
+ NV_DEBUG(dev, "PLL %02x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ id, src0, src1, ctrl, coef);
-static void
-prog_clk(struct drm_device *dev, int clk, struct creg *reg)
-{
- if (!reg->clk) {
- NV_DEBUG(dev, "no clock for %02x\n", clk);
- return;
+ if (ctrl & 0x00000008) {
+ u32 div = ((src1 & 0x003c0000) >> 18) + 1;
+ return (pll.refclk * 2) / div;
}
- nv_mask(dev, 0x004120 + (clk * 4), 0x003f3141, 0x00000101 | reg->clk);
+ P = (coef & 0x003f0000) >> 16;
+ N = (coef & 0x0000ff00) >> 8;
+ M = (coef & 0x000000ff);
+ return pll.refclk * N / M / P;
}
-int
-nva3_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
-{
- perflvl->core = read_pll(dev, 0x00, 0x4200);
- perflvl->shader = read_pll(dev, 0x01, 0x4220);
- perflvl->memory = read_pll(dev, 0x02, 0x4000);
- perflvl->unka0 = read_clk(dev, 0x20, false);
- perflvl->vdec = read_clk(dev, 0x21, false);
- perflvl->daemon = read_clk(dev, 0x25, false);
- perflvl->copy = perflvl->core;
- return 0;
-}
-
-struct nva3_pm_state {
- struct creg nclk;
- struct creg sclk;
- struct creg mclk;
- struct creg vdec;
- struct creg unka0;
-};
-
void *
-nva3_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
+nva3_pm_clock_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl,
+ u32 id, int khz)
{
- struct nva3_pm_state *info;
- int ret;
-
- info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info)
- return ERR_PTR(-ENOMEM);
+ struct nva3_pm_state *pll;
+ struct pll_lims limits;
+ int N, M, P, diff;
+ int ret, off;
- ret = calc_clk(dev, 0x10, 0x4200, perflvl->core, &info->nclk);
+ ret = get_pll_limits(dev, id, &limits);
if (ret < 0)
- goto out;
+ return (ret == -ENOENT) ? NULL : ERR_PTR(ret);
- ret = calc_clk(dev, 0x11, 0x4220, perflvl->shader, &info->sclk);
- if (ret < 0)
- goto out;
+ off = nva3_pm_pll_offset(id);
+ if (id < 0)
+ return ERR_PTR(-EINVAL);
- ret = calc_clk(dev, 0x12, 0x4000, perflvl->memory, &info->mclk);
- if (ret < 0)
- goto out;
- ret = calc_clk(dev, 0x20, 0x0000, perflvl->unka0, &info->unka0);
- if (ret < 0)
- goto out;
+ pll = kzalloc(sizeof(*pll), GFP_KERNEL);
+ if (!pll)
+ return ERR_PTR(-ENOMEM);
+ pll->type = id;
+ pll->src0 = 0x004120 + (off * 4);
+ pll->src1 = 0x004160 + (off * 4);
+ pll->ctrl = limits.reg + 0;
+ pll->coef = limits.reg + 4;
+
+ /* If target clock is within [-2, 3) MHz of a divisor, we'll
+ * use that instead of calculating MNP values
+ */
+ pll->new_div = min((limits.refclk * 2) / (khz - 2999), 16);
+ if (pll->new_div) {
+ diff = khz - ((limits.refclk * 2) / pll->new_div);
+ if (diff < -2000 || diff >= 3000)
+ pll->new_div = 0;
+ }
- ret = calc_clk(dev, 0x21, 0x0000, perflvl->vdec, &info->vdec);
- if (ret < 0)
- goto out;
+ if (!pll->new_div) {
+ ret = nva3_calc_pll(dev, &limits, khz, &N, NULL, &M, &P);
+ if (ret < 0)
+ return ERR_PTR(ret);
-out:
- if (ret < 0) {
- kfree(info);
- info = ERR_PTR(ret);
+ pll->new_pnm = (P << 16) | (N << 8) | M;
+ pll->new_div = 2 - 1;
+ } else {
+ pll->new_pnm = 0;
+ pll->new_div--;
}
- return info;
-}
-static bool
-nva3_pm_grcp_idle(void *data)
-{
- struct drm_device *dev = data;
-
- if (!(nv_rd32(dev, 0x400304) & 0x00000001))
- return true;
- if (nv_rd32(dev, 0x400308) == 0x0050001c)
- return true;
- return false;
+ if ((nv_rd32(dev, pll->src1) & 0x00000101) != 0x00000101)
+ pll->old_pnm = nv_rd32(dev, pll->coef);
+ return pll;
}
-int
-nva3_pm_clocks_set(struct drm_device *dev, void *pre_state)
+void
+nva3_pm_clock_set(struct drm_device *dev, void *pre_state)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nva3_pm_state *info = pre_state;
- unsigned long flags;
- int ret = -EAGAIN;
+ struct nva3_pm_state *pll = pre_state;
+ u32 ctrl = 0;
- /* prevent any new grctx switches from starting */
- spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
- nv_wr32(dev, 0x400324, 0x00000000);
- nv_wr32(dev, 0x400328, 0x0050001c); /* wait flag 0x1c */
- /* wait for any pending grctx switches to complete */
- if (!nv_wait_cb(dev, nva3_pm_grcp_idle, dev)) {
- NV_ERROR(dev, "pm: ctxprog didn't go idle\n");
- goto cleanup;
+ /* For the memory clock, NVIDIA will build a "script" describing
+ * the reclocking process and ask PDAEMON to execute it.
+ */
+ if (pll->type == PLL_MEMORY) {
+ nv_wr32(dev, 0x100210, 0);
+ nv_wr32(dev, 0x1002dc, 1);
+ nv_wr32(dev, 0x004018, 0x00001000);
+ ctrl = 0x18000100;
}
- /* freeze PFIFO */
- nv_mask(dev, 0x002504, 0x00000001, 0x00000001);
- if (!nv_wait(dev, 0x002504, 0x00000010, 0x00000010)) {
- NV_ERROR(dev, "pm: fifo didn't go idle\n");
- goto cleanup;
+
+ if (pll->old_pnm || !pll->new_pnm) {
+ nv_mask(dev, pll->src1, 0x003c0101, 0x00000101 |
+ (pll->new_div << 18));
+ nv_wr32(dev, pll->ctrl, 0x0001001d | ctrl);
+ nv_mask(dev, pll->ctrl, 0x00000001, 0x00000000);
}
- prog_pll(dev, 0x00, 0x004200, &info->nclk);
- prog_pll(dev, 0x01, 0x004220, &info->sclk);
- prog_clk(dev, 0x20, &info->unka0);
- prog_clk(dev, 0x21, &info->vdec);
+ if (pll->new_pnm) {
+ nv_mask(dev, pll->src0, 0x00000101, 0x00000101);
+ nv_wr32(dev, pll->coef, pll->new_pnm);
+ nv_wr32(dev, pll->ctrl, 0x0001001d | ctrl);
+ nv_mask(dev, pll->ctrl, 0x00000010, 0x00000000);
+ nv_mask(dev, pll->ctrl, 0x00020010, 0x00020010);
+ nv_wr32(dev, pll->ctrl, 0x00010015 | ctrl);
+ nv_mask(dev, pll->src1, 0x00000100, 0x00000000);
+ nv_mask(dev, pll->src1, 0x00000001, 0x00000000);
+ if (pll->type == PLL_MEMORY)
+ nv_wr32(dev, 0x4018, 0x10005000);
+ } else {
+ nv_mask(dev, pll->ctrl, 0x00000001, 0x00000000);
+ nv_mask(dev, pll->src0, 0x00000100, 0x00000000);
+ nv_mask(dev, pll->src0, 0x00000001, 0x00000000);
+ if (pll->type == PLL_MEMORY)
+ nv_wr32(dev, 0x4018, 0x1000d000);
+ }
- if (info->mclk.clk || info->mclk.pll) {
- nv_wr32(dev, 0x100210, 0);
- nv_wr32(dev, 0x1002dc, 1);
- nv_wr32(dev, 0x004018, 0x00001000);
- prog_pll(dev, 0x02, 0x004000, &info->mclk);
- if (nv_rd32(dev, 0x4000) & 0x00000008)
- nv_wr32(dev, 0x004018, 0x1000d000);
- else
- nv_wr32(dev, 0x004018, 0x10005000);
+ if (pll->type == PLL_MEMORY) {
nv_wr32(dev, 0x1002dc, 0);
nv_wr32(dev, 0x100210, 0x80000000);
}
- ret = 0;
-
-cleanup:
- /* unfreeze PFIFO */
- nv_mask(dev, 0x002504, 0x00000001, 0x00000000);
- /* restore ctxprog to normal */
- nv_wr32(dev, 0x400324, 0x00000000);
- nv_wr32(dev, 0x400328, 0x0070009c); /* set flag 0x1c */
- /* unblock it if necessary */
- if (nv_rd32(dev, 0x400308) == 0x0050001c)
- nv_mask(dev, 0x400824, 0x10000000, 0x10000000);
- spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
- kfree(info);
- return ret;
+ kfree(pll);
}
+
diff --git a/drivers/gpu/drm/nouveau/nvc0_copy.c b/drivers/gpu/drm/nouveau/nvc0_copy.c
index dddf006..208fa7a 100644
--- a/drivers/gpu/drm/nouveau/nvc0_copy.c
+++ b/drivers/gpu/drm/nouveau/nvc0_copy.c
@@ -48,14 +48,14 @@ nvc0_copy_context_new(struct nouveau_channel *chan, int engine)
struct nouveau_gpuobj *ctx = NULL;
int ret;
- ret = nouveau_gpuobj_new(dev, chan, 256, 256,
+ ret = nouveau_gpuobj_new(dev, NULL, 256, 256,
NVOBJ_FLAG_VM | NVOBJ_FLAG_VM_USER |
NVOBJ_FLAG_ZERO_ALLOC, &ctx);
if (ret)
return ret;
- nv_wo32(ramin, pcopy->ctx + 0, lower_32_bits(ctx->linst));
- nv_wo32(ramin, pcopy->ctx + 4, upper_32_bits(ctx->linst));
+ nv_wo32(ramin, pcopy->ctx + 0, lower_32_bits(ctx->vinst));
+ nv_wo32(ramin, pcopy->ctx + 4, upper_32_bits(ctx->vinst));
dev_priv->engine.instmem.flush(dev);
chan->engctx[engine] = ctx;
@@ -127,7 +127,7 @@ nvc0_copy_init(struct drm_device *dev, int engine)
}
static int
-nvc0_copy_fini(struct drm_device *dev, int engine, bool suspend)
+nvc0_copy_fini(struct drm_device *dev, int engine)
{
struct nvc0_copy_engine *pcopy = nv_engine(dev, engine);
diff --git a/drivers/gpu/drm/nouveau/nvc0_copy.fuc.h b/drivers/gpu/drm/nouveau/nvc0_copy.fuc.h
index a8d1745..4199038 100644
--- a/drivers/gpu/drm/nouveau/nvc0_copy.fuc.h
+++ b/drivers/gpu/drm/nouveau/nvc0_copy.fuc.h
@@ -145,7 +145,7 @@ uint32_t nvc0_pcopy_code[] = {
0xf10010fe,
0xf1040017,
0xf0fff327,
- 0x12d00023,
+ 0x22d00023,
0x0c25f0c0,
0xf40012d0,
0x17f11031,
diff --git a/drivers/gpu/drm/nouveau/nvc0_fb.c b/drivers/gpu/drm/nouveau/nvc0_fb.c
index 5bf5503..26a9960 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fb.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fb.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2011 Red Hat Inc.
+ * Copyright 2010 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -23,107 +23,16 @@
*/
#include "drmP.h"
-#include "drm.h"
-#include "nouveau_drv.h"
-#include "nouveau_drm.h"
-
-struct nvc0_fb_priv {
- struct page *r100c10_page;
- dma_addr_t r100c10;
-};
-
-static inline void
-nvc0_mfb_subp_isr(struct drm_device *dev, int unit, int subp)
-{
- u32 subp_base = 0x141000 + (unit * 0x2000) + (subp * 0x400);
- u32 stat = nv_rd32(dev, subp_base + 0x020);
-
- if (stat) {
- NV_INFO(dev, "PMFB%d_SUBP%d: 0x%08x\n", unit, subp, stat);
- nv_wr32(dev, subp_base + 0x020, stat);
- }
-}
-
-static void
-nvc0_mfb_isr(struct drm_device *dev)
-{
- u32 units = nv_rd32(dev, 0x00017c);
- while (units) {
- u32 subp, unit = ffs(units) - 1;
- for (subp = 0; subp < 2; subp++)
- nvc0_mfb_subp_isr(dev, unit, subp);
- units &= ~(1 << unit);
- }
-}
-
-static void
-nvc0_fb_destroy(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
- struct nvc0_fb_priv *priv = pfb->priv;
-
- nouveau_irq_unregister(dev, 25);
-
- if (priv->r100c10_page) {
- pci_unmap_page(dev->pdev, priv->r100c10, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
- __free_page(priv->r100c10_page);
- }
- kfree(priv);
- pfb->priv = NULL;
-}
-
-static int
-nvc0_fb_create(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
- struct nvc0_fb_priv *priv;
-
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
- pfb->priv = priv;
-
- priv->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
- if (!priv->r100c10_page) {
- nvc0_fb_destroy(dev);
- return -ENOMEM;
- }
-
- priv->r100c10 = pci_map_page(dev->pdev, priv->r100c10_page, 0,
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(dev->pdev, priv->r100c10)) {
- nvc0_fb_destroy(dev);
- return -EFAULT;
- }
-
- nouveau_irq_register(dev, 25, nvc0_mfb_isr);
- return 0;
-}
+#include "nouveau_drv.h"
int
nvc0_fb_init(struct drm_device *dev)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvc0_fb_priv *priv;
- int ret;
-
- if (!dev_priv->engine.fb.priv) {
- ret = nvc0_fb_create(dev);
- if (ret)
- return ret;
- }
- priv = dev_priv->engine.fb.priv;
-
- nv_wr32(dev, 0x100c10, priv->r100c10 >> 8);
return 0;
}
void
nvc0_fb_takedown(struct drm_device *dev)
{
- nvc0_fb_destroy(dev);
}
diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
index a495e48..fa5d4c2 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
@@ -159,7 +159,7 @@ nvc0_fbcon_accel_init(struct fb_info *info)
struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
- struct nouveau_framebuffer *fb = &nfbdev->nouveau_fb;
+ struct nouveau_bo *nvbo = nfbdev->nouveau_fb.nvbo;
int ret, format;
ret = nouveau_gpuobj_gr_new(chan, 0x902d, 0x902d);
@@ -203,8 +203,8 @@ nvc0_fbcon_accel_init(struct fb_info *info)
BEGIN_NVC0(chan, 2, NvSub2D, 0x0000, 1);
OUT_RING (chan, 0x0000902d);
BEGIN_NVC0(chan, 2, NvSub2D, 0x0104, 2);
- OUT_RING (chan, upper_32_bits(chan->notifier_vma.offset));
- OUT_RING (chan, lower_32_bits(chan->notifier_vma.offset));
+ OUT_RING (chan, upper_32_bits(chan->notifier_bo->bo.offset));
+ OUT_RING (chan, lower_32_bits(chan->notifier_bo->bo.offset));
BEGIN_NVC0(chan, 2, NvSub2D, 0x0290, 1);
OUT_RING (chan, 0);
BEGIN_NVC0(chan, 2, NvSub2D, 0x0888, 1);
@@ -249,8 +249,8 @@ nvc0_fbcon_accel_init(struct fb_info *info)
OUT_RING (chan, info->fix.line_length);
OUT_RING (chan, info->var.xres_virtual);
OUT_RING (chan, info->var.yres_virtual);
- OUT_RING (chan, upper_32_bits(fb->vma.offset));
- OUT_RING (chan, lower_32_bits(fb->vma.offset));
+ OUT_RING (chan, upper_32_bits(nvbo->vma.offset));
+ OUT_RING (chan, lower_32_bits(nvbo->vma.offset));
BEGIN_NVC0(chan, 2, NvSub2D, 0x0230, 10);
OUT_RING (chan, format);
OUT_RING (chan, 1);
@@ -260,8 +260,8 @@ nvc0_fbcon_accel_init(struct fb_info *info)
OUT_RING (chan, info->fix.line_length);
OUT_RING (chan, info->var.xres_virtual);
OUT_RING (chan, info->var.yres_virtual);
- OUT_RING (chan, upper_32_bits(fb->vma.offset));
- OUT_RING (chan, lower_32_bits(fb->vma.offset));
+ OUT_RING (chan, upper_32_bits(nvbo->vma.offset));
+ OUT_RING (chan, lower_32_bits(nvbo->vma.offset));
FIRE_RING (chan);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nvc0_fifo.c b/drivers/gpu/drm/nouveau/nvc0_fifo.c
index dcbe0d5..fb4f594 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fifo.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fifo.c
@@ -210,10 +210,10 @@ nvc0_fifo_unload_context(struct drm_device *dev)
int i;
for (i = 0; i < 128; i++) {
- if (!(nv_rd32(dev, 0x003004 + (i * 8)) & 1))
+ if (!(nv_rd32(dev, 0x003004 + (i * 4)) & 1))
continue;
- nv_mask(dev, 0x003004 + (i * 8), 0x00000001, 0x00000000);
+ nv_mask(dev, 0x003004 + (i * 4), 0x00000001, 0x00000000);
nv_wr32(dev, 0x002634, i);
if (!nv_wait(dev, 0x002634, 0xffffffff, i)) {
NV_INFO(dev, "PFIFO: kick ch %d failed: 0x%08x\n",
@@ -322,7 +322,7 @@ nvc0_fifo_init(struct drm_device *dev)
}
/* PSUBFIFO[n] */
- for (i = 0; i < priv->spoon_nr; i++) {
+ for (i = 0; i < 3; i++) {
nv_mask(dev, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
nv_wr32(dev, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
nv_wr32(dev, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTR_EN */
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c
index 8ee3963..ca6db20 100644
--- a/drivers/gpu/drm/nouveau/nvc0_graph.c
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.c
@@ -23,40 +23,12 @@
*/
#include <linux/firmware.h>
-#include <linux/module.h>
#include "drmP.h"
#include "nouveau_drv.h"
#include "nouveau_mm.h"
-
#include "nvc0_graph.h"
-#include "nvc0_grhub.fuc.h"
-#include "nvc0_grgpc.fuc.h"
-
-static void
-nvc0_graph_ctxctl_debug_unit(struct drm_device *dev, u32 base)
-{
- NV_INFO(dev, "PGRAPH: %06x - done 0x%08x\n", base,
- nv_rd32(dev, base + 0x400));
- NV_INFO(dev, "PGRAPH: %06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base,
- nv_rd32(dev, base + 0x800), nv_rd32(dev, base + 0x804),
- nv_rd32(dev, base + 0x808), nv_rd32(dev, base + 0x80c));
- NV_INFO(dev, "PGRAPH: %06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base,
- nv_rd32(dev, base + 0x810), nv_rd32(dev, base + 0x814),
- nv_rd32(dev, base + 0x818), nv_rd32(dev, base + 0x81c));
-}
-
-static void
-nvc0_graph_ctxctl_debug(struct drm_device *dev)
-{
- u32 gpcnr = nv_rd32(dev, 0x409604) & 0xffff;
- u32 gpc;
-
- nvc0_graph_ctxctl_debug_unit(dev, 0x409000);
- for (gpc = 0; gpc < gpcnr; gpc++)
- nvc0_graph_ctxctl_debug_unit(dev, 0x502000 + (gpc * 0x8000));
-}
static int
nvc0_graph_load_context(struct nouveau_channel *chan)
@@ -100,44 +72,24 @@ nvc0_graph_construct_context(struct nouveau_channel *chan)
if (!ctx)
return -ENOMEM;
- if (!nouveau_ctxfw) {
- nv_wr32(dev, 0x409840, 0x80000000);
- nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->vinst >> 12);
- nv_wr32(dev, 0x409504, 0x00000001);
- if (!nv_wait(dev, 0x409800, 0x80000000, 0x80000000)) {
- NV_ERROR(dev, "PGRAPH: HUB_SET_CHAN timeout\n");
- nvc0_graph_ctxctl_debug(dev);
- ret = -EBUSY;
- goto err;
- }
- } else {
- nvc0_graph_load_context(chan);
-
- nv_wo32(grch->grctx, 0x1c, 1);
- nv_wo32(grch->grctx, 0x20, 0);
- nv_wo32(grch->grctx, 0x28, 0);
- nv_wo32(grch->grctx, 0x2c, 0);
- dev_priv->engine.instmem.flush(dev);
- }
+ nvc0_graph_load_context(chan);
+
+ nv_wo32(grch->grctx, 0x1c, 1);
+ nv_wo32(grch->grctx, 0x20, 0);
+ nv_wo32(grch->grctx, 0x28, 0);
+ nv_wo32(grch->grctx, 0x2c, 0);
+ dev_priv->engine.instmem.flush(dev);
ret = nvc0_grctx_generate(chan);
- if (ret)
- goto err;
-
- if (!nouveau_ctxfw) {
- nv_wr32(dev, 0x409840, 0x80000000);
- nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->vinst >> 12);
- nv_wr32(dev, 0x409504, 0x00000002);
- if (!nv_wait(dev, 0x409800, 0x80000000, 0x80000000)) {
- NV_ERROR(dev, "PGRAPH: HUB_CTX_SAVE timeout\n");
- nvc0_graph_ctxctl_debug(dev);
- ret = -EBUSY;
- goto err;
- }
- } else {
- ret = nvc0_graph_unload_context_to(dev, chan->ramin->vinst);
- if (ret)
- goto err;
+ if (ret) {
+ kfree(ctx);
+ return ret;
+ }
+
+ ret = nvc0_graph_unload_context_to(dev, chan->ramin->vinst);
+ if (ret) {
+ kfree(ctx);
+ return ret;
}
for (i = 0; i < priv->grctx_size; i += 4)
@@ -145,10 +97,6 @@ nvc0_graph_construct_context(struct nouveau_channel *chan)
priv->grctx_vals = ctx;
return 0;
-
-err:
- kfree(ctx);
- return ret;
}
static int
@@ -157,87 +105,64 @@ nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan)
struct nvc0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR);
struct nvc0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR];
struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
int i = 0, gpc, tp, ret;
+ u32 magic;
- ret = nouveau_gpuobj_new(dev, chan, 0x2000, 256, NVOBJ_FLAG_VM,
+ ret = nouveau_gpuobj_new(dev, NULL, 0x2000, 256, NVOBJ_FLAG_VM,
&grch->unk408004);
if (ret)
return ret;
- ret = nouveau_gpuobj_new(dev, chan, 0x8000, 256, NVOBJ_FLAG_VM,
+ ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 256, NVOBJ_FLAG_VM,
&grch->unk40800c);
if (ret)
return ret;
- ret = nouveau_gpuobj_new(dev, chan, 384 * 1024, 4096,
+ ret = nouveau_gpuobj_new(dev, NULL, 384 * 1024, 4096,
NVOBJ_FLAG_VM | NVOBJ_FLAG_VM_USER,
&grch->unk418810);
if (ret)
return ret;
- ret = nouveau_gpuobj_new(dev, chan, 0x1000, 0, NVOBJ_FLAG_VM,
+ ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0, NVOBJ_FLAG_VM,
&grch->mmio);
if (ret)
return ret;
nv_wo32(grch->mmio, i++ * 4, 0x00408004);
- nv_wo32(grch->mmio, i++ * 4, grch->unk408004->linst >> 8);
+ nv_wo32(grch->mmio, i++ * 4, grch->unk408004->vinst >> 8);
nv_wo32(grch->mmio, i++ * 4, 0x00408008);
nv_wo32(grch->mmio, i++ * 4, 0x80000018);
nv_wo32(grch->mmio, i++ * 4, 0x0040800c);
- nv_wo32(grch->mmio, i++ * 4, grch->unk40800c->linst >> 8);
+ nv_wo32(grch->mmio, i++ * 4, grch->unk40800c->vinst >> 8);
nv_wo32(grch->mmio, i++ * 4, 0x00408010);
nv_wo32(grch->mmio, i++ * 4, 0x80000000);
nv_wo32(grch->mmio, i++ * 4, 0x00418810);
- nv_wo32(grch->mmio, i++ * 4, 0x80000000 | grch->unk418810->linst >> 12);
+ nv_wo32(grch->mmio, i++ * 4, 0x80000000 | grch->unk418810->vinst >> 12);
nv_wo32(grch->mmio, i++ * 4, 0x00419848);
- nv_wo32(grch->mmio, i++ * 4, 0x10000000 | grch->unk418810->linst >> 12);
+ nv_wo32(grch->mmio, i++ * 4, 0x10000000 | grch->unk418810->vinst >> 12);
nv_wo32(grch->mmio, i++ * 4, 0x00419004);
- nv_wo32(grch->mmio, i++ * 4, grch->unk40800c->linst >> 8);
+ nv_wo32(grch->mmio, i++ * 4, grch->unk40800c->vinst >> 8);
nv_wo32(grch->mmio, i++ * 4, 0x00419008);
nv_wo32(grch->mmio, i++ * 4, 0x00000000);
nv_wo32(grch->mmio, i++ * 4, 0x00418808);
- nv_wo32(grch->mmio, i++ * 4, grch->unk408004->linst >> 8);
+ nv_wo32(grch->mmio, i++ * 4, grch->unk408004->vinst >> 8);
nv_wo32(grch->mmio, i++ * 4, 0x0041880c);
nv_wo32(grch->mmio, i++ * 4, 0x80000018);
- if (dev_priv->chipset != 0xc1) {
- u32 magic = 0x02180000;
- nv_wo32(grch->mmio, i++ * 4, 0x00405830);
- nv_wo32(grch->mmio, i++ * 4, magic);
- for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
- for (tp = 0; tp < priv->tp_nr[gpc]; tp++) {
- u32 reg = TP_UNIT(gpc, tp, 0x520);
- nv_wo32(grch->mmio, i++ * 4, reg);
- nv_wo32(grch->mmio, i++ * 4, magic);
- magic += 0x0324;
- }
- }
- } else {
- u32 magic = 0x02180000;
- nv_wo32(grch->mmio, i++ * 4, 0x00405830);
- nv_wo32(grch->mmio, i++ * 4, magic | 0x0000218);
- nv_wo32(grch->mmio, i++ * 4, 0x004064c4);
- nv_wo32(grch->mmio, i++ * 4, 0x0086ffff);
- for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
- for (tp = 0; tp < priv->tp_nr[gpc]; tp++) {
- u32 reg = TP_UNIT(gpc, tp, 0x520);
- nv_wo32(grch->mmio, i++ * 4, reg);
- nv_wo32(grch->mmio, i++ * 4, (1 << 28) | magic);
- magic += 0x0324;
- }
- for (tp = 0; tp < priv->tp_nr[gpc]; tp++) {
- u32 reg = TP_UNIT(gpc, tp, 0x544);
- nv_wo32(grch->mmio, i++ * 4, reg);
- nv_wo32(grch->mmio, i++ * 4, magic);
- magic += 0x0324;
- }
+ magic = 0x02180000;
+ nv_wo32(grch->mmio, i++ * 4, 0x00405830);
+ nv_wo32(grch->mmio, i++ * 4, magic);
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+ for (tp = 0; tp < priv->tp_nr[gpc]; tp++, magic += 0x02fc) {
+ u32 reg = 0x504520 + (gpc * 0x8000) + (tp * 0x0800);
+ nv_wo32(grch->mmio, i++ * 4, reg);
+ nv_wo32(grch->mmio, i++ * 4, magic);
}
}
@@ -261,7 +186,7 @@ nvc0_graph_context_new(struct nouveau_channel *chan, int engine)
return -ENOMEM;
chan->engctx[NVOBJ_ENGINE_GR] = grch;
- ret = nouveau_gpuobj_new(dev, chan, priv->grctx_size, 256,
+ ret = nouveau_gpuobj_new(dev, NULL, priv->grctx_size, 256,
NVOBJ_FLAG_VM | NVOBJ_FLAG_ZERO_ALLOC,
&grch->grctx);
if (ret)
@@ -272,8 +197,8 @@ nvc0_graph_context_new(struct nouveau_channel *chan, int engine)
if (ret)
goto error;
- nv_wo32(chan->ramin, 0x0210, lower_32_bits(grctx->linst) | 4);
- nv_wo32(chan->ramin, 0x0214, upper_32_bits(grctx->linst));
+ nv_wo32(chan->ramin, 0x0210, lower_32_bits(grctx->vinst) | 4);
+ nv_wo32(chan->ramin, 0x0214, upper_32_bits(grctx->vinst));
pinstmem->flush(dev);
if (!priv->grctx_vals) {
@@ -285,20 +210,15 @@ nvc0_graph_context_new(struct nouveau_channel *chan, int engine)
for (i = 0; i < priv->grctx_size; i += 4)
nv_wo32(grctx, i, priv->grctx_vals[i / 4]);
- if (!nouveau_ctxfw) {
- nv_wo32(grctx, 0x00, grch->mmio_nr);
- nv_wo32(grctx, 0x04, grch->mmio->linst >> 8);
- } else {
- nv_wo32(grctx, 0xf4, 0);
- nv_wo32(grctx, 0xf8, 0);
- nv_wo32(grctx, 0x10, grch->mmio_nr);
- nv_wo32(grctx, 0x14, lower_32_bits(grch->mmio->linst));
- nv_wo32(grctx, 0x18, upper_32_bits(grch->mmio->linst));
- nv_wo32(grctx, 0x1c, 1);
- nv_wo32(grctx, 0x20, 0);
- nv_wo32(grctx, 0x28, 0);
- nv_wo32(grctx, 0x2c, 0);
- }
+ nv_wo32(grctx, 0xf4, 0);
+ nv_wo32(grctx, 0xf8, 0);
+ nv_wo32(grctx, 0x10, grch->mmio_nr);
+ nv_wo32(grctx, 0x14, lower_32_bits(grch->mmio->vinst));
+ nv_wo32(grctx, 0x18, upper_32_bits(grch->mmio->vinst));
+ nv_wo32(grctx, 0x1c, 1);
+ nv_wo32(grctx, 0x20, 0);
+ nv_wo32(grctx, 0x28, 0);
+ nv_wo32(grctx, 0x2c, 0);
pinstmem->flush(dev);
return 0;
@@ -328,7 +248,7 @@ nvc0_graph_object_new(struct nouveau_channel *chan, int engine,
}
static int
-nvc0_graph_fini(struct drm_device *dev, int engine, bool suspend)
+nvc0_graph_fini(struct drm_device *dev, int engine)
{
return 0;
}
@@ -376,13 +296,10 @@ static void
nvc0_graph_init_gpc_0(struct drm_device *dev)
{
struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
- const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tp_total);
u32 data[TP_MAX / 8];
u8 tpnr[GPC_MAX];
int i, gpc, tpc;
- nv_wr32(dev, TP_UNIT(0, 0, 0x5c), 1); /* affects TFB offset queries */
-
/*
* TP ROP UNKVAL(magic_not_rop_nr)
* 450: 4/0/0/0 2 3
@@ -390,6 +307,13 @@ nvc0_graph_init_gpc_0(struct drm_device *dev)
* 465: 3/4/4/0 4 7
* 470: 3/3/4/4 5 5
* 480: 3/4/4/4 6 6
+ *
+ * magicgpc918
+ * 450: 00200000 00000000001000000000000000000000
+ * 460: 00124925 00000000000100100100100100100101
+ * 465: 000ba2e9 00000000000010111010001011101001
+ * 470: 00092493 00000000000010010010010010010011
+ * 480: 00088889 00000000000010001000100010001001
*/
memset(data, 0x00, sizeof(data));
@@ -412,11 +336,11 @@ nvc0_graph_init_gpc_0(struct drm_device *dev)
nv_wr32(dev, GPC_UNIT(gpc, 0x0914), priv->magic_not_rop_nr << 8 |
priv->tp_nr[gpc]);
nv_wr32(dev, GPC_UNIT(gpc, 0x0910), 0x00040000 | priv->tp_total);
- nv_wr32(dev, GPC_UNIT(gpc, 0x0918), magicgpc918);
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0918), priv->magicgpc918);
}
- nv_wr32(dev, GPC_BCAST(0x1bd4), magicgpc918);
- nv_wr32(dev, GPC_BCAST(0x08ac), nv_rd32(dev, 0x100800));
+ nv_wr32(dev, GPC_BCAST(0x1bd4), priv->magicgpc918);
+ nv_wr32(dev, GPC_BCAST(0x08ac), priv->rop_nr);
}
static void
@@ -495,51 +419,8 @@ nvc0_graph_init_fuc(struct drm_device *dev, u32 fuc_base,
static int
nvc0_graph_init_ctxctl(struct drm_device *dev)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
u32 r000260;
- int i;
-
- if (!nouveau_ctxfw) {
- /* load HUB microcode */
- r000260 = nv_mask(dev, 0x000260, 0x00000001, 0x00000000);
- nv_wr32(dev, 0x4091c0, 0x01000000);
- for (i = 0; i < sizeof(nvc0_grhub_data) / 4; i++)
- nv_wr32(dev, 0x4091c4, nvc0_grhub_data[i]);
-
- nv_wr32(dev, 0x409180, 0x01000000);
- for (i = 0; i < sizeof(nvc0_grhub_code) / 4; i++) {
- if ((i & 0x3f) == 0)
- nv_wr32(dev, 0x409188, i >> 6);
- nv_wr32(dev, 0x409184, nvc0_grhub_code[i]);
- }
-
- /* load GPC microcode */
- nv_wr32(dev, 0x41a1c0, 0x01000000);
- for (i = 0; i < sizeof(nvc0_grgpc_data) / 4; i++)
- nv_wr32(dev, 0x41a1c4, nvc0_grgpc_data[i]);
-
- nv_wr32(dev, 0x41a180, 0x01000000);
- for (i = 0; i < sizeof(nvc0_grgpc_code) / 4; i++) {
- if ((i & 0x3f) == 0)
- nv_wr32(dev, 0x41a188, i >> 6);
- nv_wr32(dev, 0x41a184, nvc0_grgpc_code[i]);
- }
- nv_wr32(dev, 0x000260, r000260);
-
- /* start HUB ucode running, it'll init the GPCs */
- nv_wr32(dev, 0x409800, dev_priv->chipset);
- nv_wr32(dev, 0x40910c, 0x00000000);
- nv_wr32(dev, 0x409100, 0x00000002);
- if (!nv_wait(dev, 0x409800, 0x80000000, 0x80000000)) {
- NV_ERROR(dev, "PGRAPH: HUB_INIT timed out\n");
- nvc0_graph_ctxctl_debug(dev);
- return -EBUSY;
- }
-
- priv->grctx_size = nv_rd32(dev, 0x409804);
- return 0;
- }
/* load fuc microcode */
r000260 = nv_mask(dev, 0x000260, 0x00000001, 0x00000000);
@@ -647,22 +528,6 @@ nvc0_graph_isr_chid(struct drm_device *dev, u64 inst)
}
static void
-nvc0_graph_ctxctl_isr(struct drm_device *dev)
-{
- u32 ustat = nv_rd32(dev, 0x409c18);
-
- if (ustat & 0x00000001)
- NV_INFO(dev, "PGRAPH: CTXCTRL ucode error\n");
- if (ustat & 0x00080000)
- NV_INFO(dev, "PGRAPH: CTXCTRL watchdog timeout\n");
- if (ustat & ~0x00080001)
- NV_INFO(dev, "PGRAPH: CTXCTRL 0x%08x\n", ustat);
-
- nvc0_graph_ctxctl_debug(dev);
- nv_wr32(dev, 0x409c20, ustat);
-}
-
-static void
nvc0_graph_isr(struct drm_device *dev)
{
u64 inst = (u64)(nv_rd32(dev, 0x409b00) & 0x0fffffff) << 12;
@@ -713,7 +578,11 @@ nvc0_graph_isr(struct drm_device *dev)
}
if (stat & 0x00080000) {
- nvc0_graph_ctxctl_isr(dev);
+ u32 ustat = nv_rd32(dev, 0x409c18);
+
+ NV_INFO(dev, "PGRAPH: CTXCTRL ustat 0x%08x\n", ustat);
+
+ nv_wr32(dev, 0x409c20, ustat);
nv_wr32(dev, 0x400100, 0x00080000);
stat &= ~0x00080000;
}
@@ -726,6 +595,22 @@ nvc0_graph_isr(struct drm_device *dev)
nv_wr32(dev, 0x400500, 0x00010001);
}
+static void
+nvc0_runk140_isr(struct drm_device *dev)
+{
+ u32 units = nv_rd32(dev, 0x00017c) & 0x1f;
+
+ while (units) {
+ u32 unit = ffs(units) - 1;
+ u32 reg = 0x140000 + unit * 0x2000;
+ u32 st0 = nv_mask(dev, reg + 0x1020, 0, 0);
+ u32 st1 = nv_mask(dev, reg + 0x1420, 0, 0);
+
+ NV_INFO(dev, "PRUNK140: %d 0x%08x 0x%08x\n", unit, st0, st1);
+ units &= ~(1 << unit);
+ }
+}
+
static int
nvc0_graph_create_fw(struct drm_device *dev, const char *fwname,
struct nvc0_graph_fuc *fuc)
@@ -766,14 +651,13 @@ nvc0_graph_destroy(struct drm_device *dev, int engine)
{
struct nvc0_graph_priv *priv = nv_engine(dev, engine);
- if (nouveau_ctxfw) {
- nvc0_graph_destroy_fw(&priv->fuc409c);
- nvc0_graph_destroy_fw(&priv->fuc409d);
- nvc0_graph_destroy_fw(&priv->fuc41ac);
- nvc0_graph_destroy_fw(&priv->fuc41ad);
- }
+ nvc0_graph_destroy_fw(&priv->fuc409c);
+ nvc0_graph_destroy_fw(&priv->fuc409d);
+ nvc0_graph_destroy_fw(&priv->fuc41ac);
+ nvc0_graph_destroy_fw(&priv->fuc41ad);
nouveau_irq_unregister(dev, 12);
+ nouveau_irq_unregister(dev, 25);
nouveau_gpuobj_ref(NULL, &priv->unk4188b8);
nouveau_gpuobj_ref(NULL, &priv->unk4188b4);
@@ -791,10 +675,13 @@ nvc0_graph_create(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nvc0_graph_priv *priv;
int ret, gpc, i;
- u32 fermi;
- fermi = nvc0_graph_class(dev);
- if (!fermi) {
+ switch (dev_priv->chipset) {
+ case 0xc0:
+ case 0xc3:
+ case 0xc4:
+ break;
+ default:
NV_ERROR(dev, "PGRAPH: unsupported chipset, please report!\n");
return 0;
}
@@ -812,18 +699,17 @@ nvc0_graph_create(struct drm_device *dev)
NVOBJ_ENGINE_ADD(dev, GR, &priv->base);
nouveau_irq_register(dev, 12, nvc0_graph_isr);
+ nouveau_irq_register(dev, 25, nvc0_runk140_isr);
- if (nouveau_ctxfw) {
- NV_INFO(dev, "PGRAPH: using external firmware\n");
- if (nvc0_graph_create_fw(dev, "fuc409c", &priv->fuc409c) ||
- nvc0_graph_create_fw(dev, "fuc409d", &priv->fuc409d) ||
- nvc0_graph_create_fw(dev, "fuc41ac", &priv->fuc41ac) ||
- nvc0_graph_create_fw(dev, "fuc41ad", &priv->fuc41ad)) {
- ret = 0;
- goto error;
- }
+ if (nvc0_graph_create_fw(dev, "fuc409c", &priv->fuc409c) ||
+ nvc0_graph_create_fw(dev, "fuc409d", &priv->fuc409d) ||
+ nvc0_graph_create_fw(dev, "fuc41ac", &priv->fuc41ac) ||
+ nvc0_graph_create_fw(dev, "fuc41ad", &priv->fuc41ad)) {
+ ret = 0;
+ goto error;
}
+
ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b4);
if (ret)
goto error;
@@ -849,34 +735,25 @@ nvc0_graph_create(struct drm_device *dev)
case 0xc0:
if (priv->tp_total == 11) { /* 465, 3/4/4/0, 4 */
priv->magic_not_rop_nr = 0x07;
+ /* filled values up to tp_total, the rest 0 */
+ priv->magicgpc918 = 0x000ba2e9;
} else
if (priv->tp_total == 14) { /* 470, 3/3/4/4, 5 */
priv->magic_not_rop_nr = 0x05;
+ priv->magicgpc918 = 0x00092493;
} else
if (priv->tp_total == 15) { /* 480, 3/4/4/4, 6 */
priv->magic_not_rop_nr = 0x06;
+ priv->magicgpc918 = 0x00088889;
}
break;
case 0xc3: /* 450, 4/0/0/0, 2 */
priv->magic_not_rop_nr = 0x03;
+ priv->magicgpc918 = 0x00200000;
break;
case 0xc4: /* 460, 3/4/0/0, 4 */
priv->magic_not_rop_nr = 0x01;
- break;
- case 0xc1: /* 2/0/0/0, 1 */
- priv->magic_not_rop_nr = 0x01;
- break;
- case 0xc8: /* 4/4/3/4, 5 */
- priv->magic_not_rop_nr = 0x06;
- break;
- case 0xce: /* 4/4/0/0, 4 */
- priv->magic_not_rop_nr = 0x03;
- break;
- case 0xcf: /* 4/0/0/0, 3 */
- priv->magic_not_rop_nr = 0x03;
- break;
- case 0xd9: /* 1/0/0/0, 1 */
- priv->magic_not_rop_nr = 0x01;
+ priv->magicgpc918 = 0x00124925;
break;
}
@@ -884,17 +761,15 @@ nvc0_graph_create(struct drm_device *dev)
NV_ERROR(dev, "PGRAPH: unknown config: %d/%d/%d/%d, %d\n",
priv->tp_nr[0], priv->tp_nr[1], priv->tp_nr[2],
priv->tp_nr[3], priv->rop_nr);
- priv->magic_not_rop_nr = 0x00;
+ /* use 0xc3's values... */
+ priv->magic_not_rop_nr = 0x03;
+ priv->magicgpc918 = 0x00200000;
}
NVOBJ_CLASS(dev, 0x902d, GR); /* 2D */
NVOBJ_CLASS(dev, 0x9039, GR); /* M2MF */
NVOBJ_MTHD (dev, 0x9039, 0x0500, nvc0_graph_mthd_page_flip);
NVOBJ_CLASS(dev, 0x9097, GR); /* 3D */
- if (fermi >= 0x9197)
- NVOBJ_CLASS(dev, 0x9197, GR); /* 3D (NVC1-) */
- if (fermi >= 0x9297)
- NVOBJ_CLASS(dev, 0x9297, GR); /* 3D (NVC8-) */
NVOBJ_CLASS(dev, 0x90c0, GR); /* COMPUTE */
return 0;
@@ -902,3 +777,20 @@ error:
nvc0_graph_destroy(dev, NVOBJ_ENGINE_GR);
return ret;
}
+
+MODULE_FIRMWARE("nouveau/nvc0_fuc409c");
+MODULE_FIRMWARE("nouveau/nvc0_fuc409d");
+MODULE_FIRMWARE("nouveau/nvc0_fuc41ac");
+MODULE_FIRMWARE("nouveau/nvc0_fuc41ad");
+MODULE_FIRMWARE("nouveau/nvc3_fuc409c");
+MODULE_FIRMWARE("nouveau/nvc3_fuc409d");
+MODULE_FIRMWARE("nouveau/nvc3_fuc41ac");
+MODULE_FIRMWARE("nouveau/nvc3_fuc41ad");
+MODULE_FIRMWARE("nouveau/nvc4_fuc409c");
+MODULE_FIRMWARE("nouveau/nvc4_fuc409d");
+MODULE_FIRMWARE("nouveau/nvc4_fuc41ac");
+MODULE_FIRMWARE("nouveau/nvc4_fuc41ad");
+MODULE_FIRMWARE("nouveau/fuc409c");
+MODULE_FIRMWARE("nouveau/fuc409d");
+MODULE_FIRMWARE("nouveau/fuc41ac");
+MODULE_FIRMWARE("nouveau/fuc41ad");
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.fuc b/drivers/gpu/drm/nouveau/nvc0_graph.fuc
deleted file mode 100644
index e6b2288..0000000
--- a/drivers/gpu/drm/nouveau/nvc0_graph.fuc
+++ /dev/null
@@ -1,400 +0,0 @@
-/* fuc microcode util functions for nvc0 PGRAPH
- *
- * Copyright 2011 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-define(`mmctx_data', `.b32 eval((($2 - 1) << 26) | $1)')
-define(`queue_init', `.skip eval((2 * 4) + ((8 * 4) * 2))')
-
-ifdef(`include_code', `
-// Error codes
-define(`E_BAD_COMMAND', 0x01)
-define(`E_CMD_OVERFLOW', 0x02)
-
-// Util macros to help with debugging ucode hangs etc
-define(`T_WAIT', 0)
-define(`T_MMCTX', 1)
-define(`T_STRWAIT', 2)
-define(`T_STRINIT', 3)
-define(`T_AUTO', 4)
-define(`T_CHAN', 5)
-define(`T_LOAD', 6)
-define(`T_SAVE', 7)
-define(`T_LCHAN', 8)
-define(`T_LCTXH', 9)
-
-define(`trace_set', `
- mov $r8 0x83c
- shl b32 $r8 6
- clear b32 $r9
- bset $r9 $1
- iowr I[$r8 + 0x000] $r9 // CC_SCRATCH[7]
-')
-
-define(`trace_clr', `
- mov $r8 0x85c
- shl b32 $r8 6
- clear b32 $r9
- bset $r9 $1
- iowr I[$r8 + 0x000] $r9 // CC_SCRATCH[7]
-')
-
-// queue_put - add request to queue
-//
-// In : $r13 queue pointer
-// $r14 command
-// $r15 data
-//
-queue_put:
- // make sure we have space..
- ld b32 $r8 D[$r13 + 0x0] // GET
- ld b32 $r9 D[$r13 + 0x4] // PUT
- xor $r8 8
- cmpu b32 $r8 $r9
- bra ne #queue_put_next
- mov $r15 E_CMD_OVERFLOW
- call #error
- ret
-
- // store cmd/data on queue
- queue_put_next:
- and $r8 $r9 7
- shl b32 $r8 3
- add b32 $r8 $r13
- add b32 $r8 8
- st b32 D[$r8 + 0x0] $r14
- st b32 D[$r8 + 0x4] $r15
-
- // update PUT
- add b32 $r9 1
- and $r9 0xf
- st b32 D[$r13 + 0x4] $r9
- ret
-
-// queue_get - fetch request from queue
-//
-// In : $r13 queue pointer
-//
-// Out: $p1 clear on success (data available)
-// $r14 command
-// $r15 data
-//
-queue_get:
- bset $flags $p1
- ld b32 $r8 D[$r13 + 0x0] // GET
- ld b32 $r9 D[$r13 + 0x4] // PUT
- cmpu b32 $r8 $r9
- bra e #queue_get_done
- // fetch first cmd/data pair
- and $r9 $r8 7
- shl b32 $r9 3
- add b32 $r9 $r13
- add b32 $r9 8
- ld b32 $r14 D[$r9 + 0x0]
- ld b32 $r15 D[$r9 + 0x4]
-
- // update GET
- add b32 $r8 1
- and $r8 0xf
- st b32 D[$r13 + 0x0] $r8
- bclr $flags $p1
-queue_get_done:
- ret
-
-// nv_rd32 - read 32-bit value from nv register
-//
-// In : $r14 register
-// Out: $r15 value
-//
-nv_rd32:
- mov $r11 0x728
- shl b32 $r11 6
- mov b32 $r12 $r14
- bset $r12 31 // MMIO_CTRL_PENDING
- iowr I[$r11 + 0x000] $r12 // MMIO_CTRL
- nv_rd32_wait:
- iord $r12 I[$r11 + 0x000]
- xbit $r12 $r12 31
- bra ne #nv_rd32_wait
- mov $r10 6 // DONE_MMIO_RD
- call #wait_doneo
- iord $r15 I[$r11 + 0x100] // MMIO_RDVAL
- ret
-
-// nv_wr32 - write 32-bit value to nv register
-//
-// In : $r14 register
-// $r15 value
-//
-nv_wr32:
- mov $r11 0x728
- shl b32 $r11 6
- iowr I[$r11 + 0x200] $r15 // MMIO_WRVAL
- mov b32 $r12 $r14
- bset $r12 31 // MMIO_CTRL_PENDING
- bset $r12 30 // MMIO_CTRL_WRITE
- iowr I[$r11 + 0x000] $r12 // MMIO_CTRL
- nv_wr32_wait:
- iord $r12 I[$r11 + 0x000]
- xbit $r12 $r12 31
- bra ne #nv_wr32_wait
- ret
-
-// (re)set watchdog timer
-//
-// In : $r15 timeout
-//
-watchdog_reset:
- mov $r8 0x430
- shl b32 $r8 6
- bset $r15 31
- iowr I[$r8 + 0x000] $r15
- ret
-
-// clear watchdog timer
-watchdog_clear:
- mov $r8 0x430
- shl b32 $r8 6
- iowr I[$r8 + 0x000] $r0
- ret
-
-// wait_done{z,o} - wait on FUC_DONE bit to become clear/set
-//
-// In : $r10 bit to wait on
-//
-define(`wait_done', `
-$1:
- trace_set(T_WAIT);
- mov $r8 0x818
- shl b32 $r8 6
- iowr I[$r8 + 0x000] $r10 // CC_SCRATCH[6] = wait bit
- wait_done_$1:
- mov $r8 0x400
- shl b32 $r8 6
- iord $r8 I[$r8 + 0x000] // DONE
- xbit $r8 $r8 $r10
- bra $2 #wait_done_$1
- trace_clr(T_WAIT)
- ret
-')
-wait_done(wait_donez, ne)
-wait_done(wait_doneo, e)
-
-// mmctx_size - determine size of a mmio list transfer
-//
-// In : $r14 mmio list head
-// $r15 mmio list tail
-// Out: $r15 transfer size (in bytes)
-//
-mmctx_size:
- clear b32 $r9
- nv_mmctx_size_loop:
- ld b32 $r8 D[$r14]
- shr b32 $r8 26
- add b32 $r8 1
- shl b32 $r8 2
- add b32 $r9 $r8
- add b32 $r14 4
- cmpu b32 $r14 $r15
- bra ne #nv_mmctx_size_loop
- mov b32 $r15 $r9
- ret
-
-// mmctx_xfer - execute a list of mmio transfers
-//
-// In : $r10 flags
-// bit 0: direction (0 = save, 1 = load)
-// bit 1: set if first transfer
-// bit 2: set if last transfer
-// $r11 base
-// $r12 mmio list head
-// $r13 mmio list tail
-// $r14 multi_stride
-// $r15 multi_mask
-//
-mmctx_xfer:
- trace_set(T_MMCTX)
- mov $r8 0x710
- shl b32 $r8 6
- clear b32 $r9
- or $r11 $r11
- bra e #mmctx_base_disabled
- iowr I[$r8 + 0x000] $r11 // MMCTX_BASE
- bset $r9 0 // BASE_EN
- mmctx_base_disabled:
- or $r14 $r14
- bra e #mmctx_multi_disabled
- iowr I[$r8 + 0x200] $r14 // MMCTX_MULTI_STRIDE
- iowr I[$r8 + 0x300] $r15 // MMCTX_MULTI_MASK
- bset $r9 1 // MULTI_EN
- mmctx_multi_disabled:
- add b32 $r8 0x100
-
- xbit $r11 $r10 0
- shl b32 $r11 16 // DIR
- bset $r11 12 // QLIMIT = 0x10
- xbit $r14 $r10 1
- shl b32 $r14 17
- or $r11 $r14 // START_TRIGGER
- iowr I[$r8 + 0x000] $r11 // MMCTX_CTRL
-
- // loop over the mmio list, and send requests to the hw
- mmctx_exec_loop:
- // wait for space in mmctx queue
- mmctx_wait_free:
- iord $r14 I[$r8 + 0x000] // MMCTX_CTRL
- and $r14 0x1f
- bra e #mmctx_wait_free
-
- // queue up an entry
- ld b32 $r14 D[$r12]
- or $r14 $r9
- iowr I[$r8 + 0x300] $r14
- add b32 $r12 4
- cmpu b32 $r12 $r13
- bra ne #mmctx_exec_loop
-
- xbit $r11 $r10 2
- bra ne #mmctx_stop
- // wait for queue to empty
- mmctx_fini_wait:
- iord $r11 I[$r8 + 0x000] // MMCTX_CTRL
- and $r11 0x1f
- cmpu b32 $r11 0x10
- bra ne #mmctx_fini_wait
- mov $r10 2 // DONE_MMCTX
- call #wait_donez
- bra #mmctx_done
- mmctx_stop:
- xbit $r11 $r10 0
- shl b32 $r11 16 // DIR
- bset $r11 12 // QLIMIT = 0x10
- bset $r11 18 // STOP_TRIGGER
- iowr I[$r8 + 0x000] $r11 // MMCTX_CTRL
- mmctx_stop_wait:
- // wait for STOP_TRIGGER to clear
- iord $r11 I[$r8 + 0x000] // MMCTX_CTRL
- xbit $r11 $r11 18
- bra ne #mmctx_stop_wait
- mmctx_done:
- trace_clr(T_MMCTX)
- ret
-
-// Wait for DONE_STRAND
-//
-strand_wait:
- push $r10
- mov $r10 2
- call #wait_donez
- pop $r10
- ret
-
-// unknown - call before issuing strand commands
-//
-strand_pre:
- mov $r8 0x4afc
- sethi $r8 0x20000
- mov $r9 0xc
- iowr I[$r8] $r9
- call #strand_wait
- ret
-
-// unknown - call after issuing strand commands
-//
-strand_post:
- mov $r8 0x4afc
- sethi $r8 0x20000
- mov $r9 0xd
- iowr I[$r8] $r9
- call #strand_wait
- ret
-
-// Selects strand set?!
-//
-// In: $r14 id
-//
-strand_set:
- mov $r10 0x4ffc
- sethi $r10 0x20000
- sub b32 $r11 $r10 0x500
- mov $r12 0xf
- iowr I[$r10 + 0x000] $r12 // 0x93c = 0xf
- mov $r12 0xb
- iowr I[$r11 + 0x000] $r12 // 0x928 = 0xb
- call #strand_wait
- iowr I[$r10 + 0x000] $r14 // 0x93c = <id>
- mov $r12 0xa
- iowr I[$r11 + 0x000] $r12 // 0x928 = 0xa
- call #strand_wait
- ret
-
-// Initialise strand context data
-//
-// In : $r15 context base
-// Out: $r15 context size (in bytes)
-//
-// Strandset(?) 3 hardcoded currently
-//
-strand_ctx_init:
- trace_set(T_STRINIT)
- call #strand_pre
- mov $r14 3
- call #strand_set
- mov $r10 0x46fc
- sethi $r10 0x20000
- add b32 $r11 $r10 0x400
- iowr I[$r10 + 0x100] $r0 // STRAND_FIRST_GENE = 0
- mov $r12 1
- iowr I[$r11 + 0x000] $r12 // STRAND_CMD = LATCH_FIRST_GENE
- call #strand_wait
- sub b32 $r12 $r0 1
- iowr I[$r10 + 0x000] $r12 // STRAND_GENE_CNT = 0xffffffff
- mov $r12 2
- iowr I[$r11 + 0x000] $r12 // STRAND_CMD = LATCH_GENE_CNT
- call #strand_wait
- call #strand_post
-
- // read the size of each strand, poke the context offset of
- // each into STRAND_{SAVE,LOAD}_SWBASE now, no need to worry
- // about it later then.
- mov $r8 0x880
- shl b32 $r8 6
- iord $r9 I[$r8 + 0x000] // STRANDS
- add b32 $r8 0x2200
- shr b32 $r14 $r15 8
- ctx_init_strand_loop:
- iowr I[$r8 + 0x000] $r14 // STRAND_SAVE_SWBASE
- iowr I[$r8 + 0x100] $r14 // STRAND_LOAD_SWBASE
- iord $r10 I[$r8 + 0x200] // STRAND_SIZE
- shr b32 $r10 6
- add b32 $r10 1
- add b32 $r14 $r10
- add b32 $r8 4
- sub b32 $r9 1
- bra ne #ctx_init_strand_loop
-
- shl b32 $r14 8
- sub b32 $r15 $r14 $r15
- trace_clr(T_STRINIT)
- ret
-')
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.h b/drivers/gpu/drm/nouveau/nvc0_graph.h
index 91d44ea..f5d184e0 100644
--- a/drivers/gpu/drm/nouveau/nvc0_graph.h
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.h
@@ -57,7 +57,8 @@ struct nvc0_graph_priv {
struct nouveau_gpuobj *unk4188b4;
struct nouveau_gpuobj *unk4188b8;
- u8 magic_not_rop_nr;
+ u8 magic_not_rop_nr;
+ u32 magicgpc918;
};
struct nvc0_graph_chan {
@@ -71,27 +72,4 @@ struct nvc0_graph_chan {
int nvc0_grctx_generate(struct nouveau_channel *);
-/* nvc0_graph.c uses this also to determine supported chipsets */
-static inline u32
-nvc0_graph_class(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
-
- switch (dev_priv->chipset) {
- case 0xc0:
- case 0xc3:
- case 0xc4:
- case 0xce: /* guess, mmio trace shows only 0x9097 state */
- case 0xcf: /* guess, mmio trace shows only 0x9097 state */
- return 0x9097;
- case 0xc1:
- return 0x9197;
- case 0xc8:
- case 0xd9:
- return 0x9297;
- default:
- return 0;
- }
-}
-
#endif
diff --git a/drivers/gpu/drm/nouveau/nvc0_grctx.c b/drivers/gpu/drm/nouveau/nvc0_grctx.c
index de77842..6df0661 100644
--- a/drivers/gpu/drm/nouveau/nvc0_grctx.c
+++ b/drivers/gpu/drm/nouveau/nvc0_grctx.c
@@ -45,9 +45,6 @@ nv_mthd(struct drm_device *dev, u32 class, u32 mthd, u32 data)
static void
nvc0_grctx_generate_9097(struct drm_device *dev)
{
- u32 fermi = nvc0_graph_class(dev);
- u32 mthd;
-
nv_mthd(dev, 0x9097, 0x0800, 0x00000000);
nv_mthd(dev, 0x9097, 0x0840, 0x00000000);
nv_mthd(dev, 0x9097, 0x0880, 0x00000000);
@@ -827,10 +824,134 @@ nvc0_grctx_generate_9097(struct drm_device *dev)
nv_mthd(dev, 0x9097, 0x1eb8, 0x00000001);
nv_mthd(dev, 0x9097, 0x1ed8, 0x00000001);
nv_mthd(dev, 0x9097, 0x1ef8, 0x00000001);
- if (fermi == 0x9097) {
- for (mthd = 0x3400; mthd <= 0x35fc; mthd += 4)
- nv_mthd(dev, 0x9097, mthd, 0x00000000);
- }
+ nv_mthd(dev, 0x9097, 0x3400, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3404, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3408, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x340c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3410, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3414, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3418, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x341c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3420, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3424, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3428, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x342c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3430, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3434, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3438, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x343c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3440, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3444, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3448, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x344c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3450, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3454, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3458, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x345c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3460, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3464, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3468, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x346c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3470, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3474, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3478, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x347c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3480, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3484, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3488, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x348c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3490, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3494, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3498, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x349c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34a0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34a4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34a8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34ac, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34b0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34b4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34b8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34bc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34c0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34c4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34c8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34cc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34d0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34d4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34d8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34dc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34e0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34e4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34e8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34ec, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34f0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34f4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34f8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34fc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3500, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3504, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3508, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x350c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3510, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3514, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3518, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x351c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3520, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3524, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3528, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x352c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3530, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3534, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3538, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x353c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3540, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3544, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3548, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x354c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3550, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3554, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3558, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x355c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3560, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3564, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3568, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x356c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3570, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3574, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3578, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x357c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3580, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3584, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3588, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x358c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3590, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3594, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3598, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x359c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35a0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35a4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35a8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35ac, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35b0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35b4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35b8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35bc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35c0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35c4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35c8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35cc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35d0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35d4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35d8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35dc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35e0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35e4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35e8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35ec, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35f0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35f4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35f8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35fc, 0x00000000);
nv_mthd(dev, 0x9097, 0x030c, 0x00000001);
nv_mthd(dev, 0x9097, 0x1944, 0x00000000);
nv_mthd(dev, 0x9097, 0x1514, 0x00000000);
@@ -1200,37 +1321,6 @@ nvc0_grctx_generate_9097(struct drm_device *dev)
}
static void
-nvc0_grctx_generate_9197(struct drm_device *dev)
-{
- u32 fermi = nvc0_graph_class(dev);
- u32 mthd;
-
- if (fermi == 0x9197) {
- for (mthd = 0x3400; mthd <= 0x35fc; mthd += 4)
- nv_mthd(dev, 0x9197, mthd, 0x00000000);
- }
- nv_mthd(dev, 0x9197, 0x02e4, 0x0000b001);
-}
-
-static void
-nvc0_grctx_generate_9297(struct drm_device *dev)
-{
- u32 fermi = nvc0_graph_class(dev);
- u32 mthd;
-
- if (fermi == 0x9297) {
- for (mthd = 0x3400; mthd <= 0x35fc; mthd += 4)
- nv_mthd(dev, 0x9297, mthd, 0x00000000);
- }
- nv_mthd(dev, 0x9297, 0x036c, 0x00000000);
- nv_mthd(dev, 0x9297, 0x0370, 0x00000000);
- nv_mthd(dev, 0x9297, 0x07a4, 0x00000000);
- nv_mthd(dev, 0x9297, 0x07a8, 0x00000000);
- nv_mthd(dev, 0x9297, 0x0374, 0x00000000);
- nv_mthd(dev, 0x9297, 0x0378, 0x00000020);
-}
-
-static void
nvc0_grctx_generate_902d(struct drm_device *dev)
{
nv_mthd(dev, 0x902d, 0x0200, 0x000000cf);
@@ -1268,17 +1358,6 @@ nvc0_grctx_generate_9039(struct drm_device *dev)
static void
nvc0_grctx_generate_90c0(struct drm_device *dev)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- int i;
-
- for (i = 0; dev_priv->chipset == 0xd9 && i < 4; i++) {
- nv_mthd(dev, 0x90c0, 0x2700 + (i * 0x40), 0x00000000);
- nv_mthd(dev, 0x90c0, 0x2720 + (i * 0x40), 0x00000000);
- nv_mthd(dev, 0x90c0, 0x2704 + (i * 0x40), 0x00000000);
- nv_mthd(dev, 0x90c0, 0x2724 + (i * 0x40), 0x00000000);
- nv_mthd(dev, 0x90c0, 0x2708 + (i * 0x40), 0x00000000);
- nv_mthd(dev, 0x90c0, 0x2728 + (i * 0x40), 0x00000000);
- }
nv_mthd(dev, 0x90c0, 0x270c, 0x00000000);
nv_mthd(dev, 0x90c0, 0x272c, 0x00000000);
nv_mthd(dev, 0x90c0, 0x274c, 0x00000000);
@@ -1287,12 +1366,6 @@ nvc0_grctx_generate_90c0(struct drm_device *dev)
nv_mthd(dev, 0x90c0, 0x27ac, 0x00000000);
nv_mthd(dev, 0x90c0, 0x27cc, 0x00000000);
nv_mthd(dev, 0x90c0, 0x27ec, 0x00000000);
- for (i = 0; dev_priv->chipset == 0xd9 && i < 4; i++) {
- nv_mthd(dev, 0x90c0, 0x2710 + (i * 0x40), 0x00014000);
- nv_mthd(dev, 0x90c0, 0x2730 + (i * 0x40), 0x00014000);
- nv_mthd(dev, 0x90c0, 0x2714 + (i * 0x40), 0x00000040);
- nv_mthd(dev, 0x90c0, 0x2734 + (i * 0x40), 0x00000040);
- }
nv_mthd(dev, 0x90c0, 0x030c, 0x00000001);
nv_mthd(dev, 0x90c0, 0x1944, 0x00000000);
nv_mthd(dev, 0x90c0, 0x0758, 0x00000100);
@@ -1486,22 +1559,9 @@ nvc0_grctx_generate_unk47xx(struct drm_device *dev)
static void
nvc0_grctx_generate_shaders(struct drm_device *dev)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
-
- if (dev_priv->chipset == 0xd9) {
- nv_wr32(dev, 0x405800, 0x0f8000bf);
- nv_wr32(dev, 0x405830, 0x02180218);
- nv_wr32(dev, 0x405834, 0x08000000);
- } else
- if (dev_priv->chipset == 0xc1) {
- nv_wr32(dev, 0x405800, 0x0f8000bf);
- nv_wr32(dev, 0x405830, 0x02180218);
- nv_wr32(dev, 0x405834, 0x00000000);
- } else {
- nv_wr32(dev, 0x405800, 0x078000bf);
- nv_wr32(dev, 0x405830, 0x02180000);
- nv_wr32(dev, 0x405834, 0x00000000);
- }
+ nv_wr32(dev, 0x405800, 0x078000bf);
+ nv_wr32(dev, 0x405830, 0x02180000);
+ nv_wr32(dev, 0x405834, 0x00000000);
nv_wr32(dev, 0x405838, 0x00000000);
nv_wr32(dev, 0x405854, 0x00000000);
nv_wr32(dev, 0x405870, 0x00000001);
@@ -1526,19 +1586,10 @@ nvc0_grctx_generate_unk60xx(struct drm_device *dev)
static void
nvc0_grctx_generate_unk64xx(struct drm_device *dev)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
-
nv_wr32(dev, 0x4064a8, 0x00000000);
nv_wr32(dev, 0x4064ac, 0x00003fff);
nv_wr32(dev, 0x4064b4, 0x00000000);
nv_wr32(dev, 0x4064b8, 0x00000000);
- if (dev_priv->chipset == 0xd9)
- nv_wr32(dev, 0x4064bc, 0x00000000);
- if (dev_priv->chipset == 0xc1 ||
- dev_priv->chipset == 0xd9) {
- nv_wr32(dev, 0x4064c0, 0x80140078);
- nv_wr32(dev, 0x4064c4, 0x0086ffff);
- }
}
static void
@@ -1571,28 +1622,22 @@ static void
nvc0_grctx_generate_rop(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- int chipset = dev_priv->chipset;
/* ROPC_BROADCAST */
nv_wr32(dev, 0x408800, 0x02802a3c);
nv_wr32(dev, 0x408804, 0x00000040);
- if (chipset == 0xd9) {
- nv_wr32(dev, 0x408808, 0x1043e005);
+ nv_wr32(dev, 0x408808, 0x0003e00d);
+ switch (dev_priv->chipset) {
+ case 0xc0:
+ nv_wr32(dev, 0x408900, 0x0080b801);
+ break;
+ case 0xc3:
+ case 0xc4:
nv_wr32(dev, 0x408900, 0x3080b801);
- nv_wr32(dev, 0x408904, 0x1043e005);
- nv_wr32(dev, 0x408908, 0x00c8102f);
- } else
- if (chipset == 0xc1) {
- nv_wr32(dev, 0x408808, 0x1003e005);
- nv_wr32(dev, 0x408900, 0x3080b801);
- nv_wr32(dev, 0x408904, 0x62000001);
- nv_wr32(dev, 0x408908, 0x00c80929);
- } else {
- nv_wr32(dev, 0x408808, 0x0003e00d);
- nv_wr32(dev, 0x408900, 0x3080b801);
- nv_wr32(dev, 0x408904, 0x02000001);
- nv_wr32(dev, 0x408908, 0x00c80929);
+ break;
}
+ nv_wr32(dev, 0x408904, 0x02000001);
+ nv_wr32(dev, 0x408908, 0x00c80929);
nv_wr32(dev, 0x40890c, 0x00000000);
nv_wr32(dev, 0x408980, 0x0000011d);
}
@@ -1600,8 +1645,6 @@ nvc0_grctx_generate_rop(struct drm_device *dev)
static void
nvc0_grctx_generate_gpc(struct drm_device *dev)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- int chipset = dev_priv->chipset;
int i;
/* GPC_BROADCAST */
@@ -1611,7 +1654,7 @@ nvc0_grctx_generate_gpc(struct drm_device *dev)
nv_wr32(dev, 0x418408, 0x00000000);
nv_wr32(dev, 0x41840c, 0x00001008);
nv_wr32(dev, 0x418410, 0x0fff0fff);
- nv_wr32(dev, 0x418414, chipset != 0xd9 ? 0x00200fff : 0x02200fff);
+ nv_wr32(dev, 0x418414, 0x00200fff);
nv_wr32(dev, 0x418450, 0x00000000);
nv_wr32(dev, 0x418454, 0x00000000);
nv_wr32(dev, 0x418458, 0x00000000);
@@ -1626,17 +1669,14 @@ nvc0_grctx_generate_gpc(struct drm_device *dev)
nv_wr32(dev, 0x418700, 0x00000002);
nv_wr32(dev, 0x418704, 0x00000080);
nv_wr32(dev, 0x418708, 0x00000000);
- nv_wr32(dev, 0x41870c, chipset != 0xd9 ? 0x07c80000 : 0x00000000);
+ nv_wr32(dev, 0x41870c, 0x07c80000);
nv_wr32(dev, 0x418710, 0x00000000);
- nv_wr32(dev, 0x418800, chipset != 0xd9 ? 0x0006860a : 0x7006860a);
+ nv_wr32(dev, 0x418800, 0x0006860a);
nv_wr32(dev, 0x418808, 0x00000000);
nv_wr32(dev, 0x41880c, 0x00000000);
nv_wr32(dev, 0x418810, 0x00000000);
nv_wr32(dev, 0x418828, 0x00008442);
- if (chipset == 0xc1 || chipset == 0xd9)
- nv_wr32(dev, 0x418830, 0x10000001);
- else
- nv_wr32(dev, 0x418830, 0x00000001);
+ nv_wr32(dev, 0x418830, 0x00000001);
nv_wr32(dev, 0x4188d8, 0x00000008);
nv_wr32(dev, 0x4188e0, 0x01000000);
nv_wr32(dev, 0x4188e8, 0x00000000);
@@ -1644,12 +1684,7 @@ nvc0_grctx_generate_gpc(struct drm_device *dev)
nv_wr32(dev, 0x4188f0, 0x00000000);
nv_wr32(dev, 0x4188f4, 0x00000000);
nv_wr32(dev, 0x4188f8, 0x00000000);
- if (chipset == 0xd9)
- nv_wr32(dev, 0x4188fc, 0x20100008);
- else if (chipset == 0xc1)
- nv_wr32(dev, 0x4188fc, 0x00100018);
- else
- nv_wr32(dev, 0x4188fc, 0x00100000);
+ nv_wr32(dev, 0x4188fc, 0x00100000);
nv_wr32(dev, 0x41891c, 0x00ff00ff);
nv_wr32(dev, 0x418924, 0x00000000);
nv_wr32(dev, 0x418928, 0x00ffff00);
@@ -1663,7 +1698,7 @@ nvc0_grctx_generate_gpc(struct drm_device *dev)
nv_wr32(dev, 0x418a14 + (i * 0x20), 0x00000000);
nv_wr32(dev, 0x418a18 + (i * 0x20), 0x00000000);
}
- nv_wr32(dev, 0x418b00, chipset != 0xd9 ? 0x00000000 : 0x00000006);
+ nv_wr32(dev, 0x418b00, 0x00000000);
nv_wr32(dev, 0x418b08, 0x0a418820);
nv_wr32(dev, 0x418b0c, 0x062080e6);
nv_wr32(dev, 0x418b10, 0x020398a4);
@@ -1680,8 +1715,6 @@ nvc0_grctx_generate_gpc(struct drm_device *dev)
nv_wr32(dev, 0x418c24, 0x00000000);
nv_wr32(dev, 0x418c28, 0x00000000);
nv_wr32(dev, 0x418c2c, 0x00000000);
- if (chipset == 0xc1 || chipset == 0xd9)
- nv_wr32(dev, 0x418c6c, 0x00000001);
nv_wr32(dev, 0x418c80, 0x20200004);
nv_wr32(dev, 0x418c8c, 0x00000001);
nv_wr32(dev, 0x419000, 0x00000780);
@@ -1694,16 +1727,10 @@ static void
nvc0_grctx_generate_tp(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- int chipset = dev_priv->chipset;
/* GPC_BROADCAST.TP_BROADCAST */
- nv_wr32(dev, 0x419818, 0x00000000);
- nv_wr32(dev, 0x41983c, 0x00038bc7);
nv_wr32(dev, 0x419848, 0x00000000);
- if (chipset == 0xc1 || chipset == 0xd9)
- nv_wr32(dev, 0x419864, 0x00000129);
- else
- nv_wr32(dev, 0x419864, 0x0000012a);
+ nv_wr32(dev, 0x419864, 0x0000012a);
nv_wr32(dev, 0x419888, 0x00000000);
nv_wr32(dev, 0x419a00, 0x000001f0);
nv_wr32(dev, 0x419a04, 0x00000001);
@@ -1713,10 +1740,8 @@ nvc0_grctx_generate_tp(struct drm_device *dev)
nv_wr32(dev, 0x419a14, 0x00000200);
nv_wr32(dev, 0x419a1c, 0x00000000);
nv_wr32(dev, 0x419a20, 0x00000800);
- if (chipset == 0xd9)
- nv_wr32(dev, 0x00419ac4, 0x0017f440);
- else if (chipset != 0xc0 && chipset != 0xc8)
- nv_wr32(dev, 0x00419ac4, 0x0007f440);
+ if (dev_priv->chipset != 0xc0)
+ nv_wr32(dev, 0x00419ac4, 0x0007f440); /* 0xc3 */
nv_wr32(dev, 0x419b00, 0x0a418820);
nv_wr32(dev, 0x419b04, 0x062080e6);
nv_wr32(dev, 0x419b08, 0x020398a4);
@@ -1724,34 +1749,17 @@ nvc0_grctx_generate_tp(struct drm_device *dev)
nv_wr32(dev, 0x419b10, 0x0a418820);
nv_wr32(dev, 0x419b14, 0x000000e6);
nv_wr32(dev, 0x419bd0, 0x00900103);
- if (chipset == 0xc1 || chipset == 0xd9)
- nv_wr32(dev, 0x419be0, 0x00400001);
- else
- nv_wr32(dev, 0x419be0, 0x00000001);
+ nv_wr32(dev, 0x419be0, 0x00000001);
nv_wr32(dev, 0x419be4, 0x00000000);
- nv_wr32(dev, 0x419c00, chipset != 0xd9 ? 0x00000002 : 0x0000000a);
+ nv_wr32(dev, 0x419c00, 0x00000002);
nv_wr32(dev, 0x419c04, 0x00000006);
nv_wr32(dev, 0x419c08, 0x00000002);
nv_wr32(dev, 0x419c20, 0x00000000);
- if (dev_priv->chipset == 0xd9) {
- nv_wr32(dev, 0x419c24, 0x00084210);
- nv_wr32(dev, 0x419c28, 0x3cf3cf3c);
- nv_wr32(dev, 0x419cb0, 0x00020048);
- } else
- if (chipset == 0xce || chipset == 0xcf) {
- nv_wr32(dev, 0x419cb0, 0x00020048);
- } else {
- nv_wr32(dev, 0x419cb0, 0x00060048);
- }
+ nv_wr32(dev, 0x419cbc, 0x28137606);
nv_wr32(dev, 0x419ce8, 0x00000000);
nv_wr32(dev, 0x419cf4, 0x00000183);
- if (chipset == 0xc1 || chipset == 0xd9)
- nv_wr32(dev, 0x419d20, 0x12180000);
- else
- nv_wr32(dev, 0x419d20, 0x02180000);
+ nv_wr32(dev, 0x419d20, 0x02180000);
nv_wr32(dev, 0x419d24, 0x00001fff);
- if (chipset == 0xc1 || chipset == 0xd9)
- nv_wr32(dev, 0x419d44, 0x02180218);
nv_wr32(dev, 0x419e04, 0x00000000);
nv_wr32(dev, 0x419e08, 0x00000000);
nv_wr32(dev, 0x419e0c, 0x00000000);
@@ -1777,11 +1785,11 @@ nvc0_grctx_generate_tp(struct drm_device *dev)
nv_wr32(dev, 0x419e8c, 0x00000000);
nv_wr32(dev, 0x419e90, 0x00000000);
nv_wr32(dev, 0x419e98, 0x00000000);
- if (chipset != 0xc0 && chipset != 0xc8)
+ if (dev_priv->chipset != 0xc0)
nv_wr32(dev, 0x419ee0, 0x00011110);
nv_wr32(dev, 0x419f50, 0x00000000);
nv_wr32(dev, 0x419f54, 0x00000000);
- if (chipset != 0xc0 && chipset != 0xc8)
+ if (dev_priv->chipset != 0xc0)
nv_wr32(dev, 0x419f58, 0x00000000);
}
@@ -1793,7 +1801,6 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
struct nvc0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR];
struct drm_device *dev = chan->dev;
int i, gpc, tp, id;
- u32 fermi = nvc0_graph_class(dev);
u32 r000260, tmp;
r000260 = nv_rd32(dev, 0x000260);
@@ -1850,7 +1857,10 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
nv_wr32(dev, 0x40587c, 0x00000000);
if (1) {
- u8 tpnr[GPC_MAX], data[TP_MAX];
+ const u8 chipset_tp_max[] = { 16, 0, 0, 4, 8 };
+ u8 max = chipset_tp_max[dev_priv->chipset & 0x0f];
+ u8 tpnr[GPC_MAX];
+ u8 data[32];
memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr));
memset(data, 0x1f, sizeof(data));
@@ -1864,7 +1874,7 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
data[tp] = gpc;
}
- for (i = 0; i < 4; i++)
+ for (i = 0; i < max / 4; i++)
nv_wr32(dev, 0x4060a8 + (i * 4), ((u32 *)data)[i]);
}
@@ -1876,7 +1886,6 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
/* calculate first set of magics */
memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr));
- gpc = -1;
for (tp = 0; tp < priv->tp_total; tp++) {
do {
gpc = (gpc + 1) % priv->gpc_nr;
@@ -1926,26 +1935,30 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
if (1) {
u32 tp_mask = 0, tp_set = 0;
- u8 tpnr[GPC_MAX], a, b;
+ u8 tpnr[GPC_MAX];
memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr));
for (gpc = 0; gpc < priv->gpc_nr; gpc++)
tp_mask |= ((1 << priv->tp_nr[gpc]) - 1) << (gpc * 8);
- for (i = 0, gpc = -1, b = -1; i < 32; i++) {
- a = (i * (priv->tp_total - 1)) / 32;
- if (a != b) {
- b = a;
- do {
- gpc = (gpc + 1) % priv->gpc_nr;
- } while (!tpnr[gpc]);
- tp = priv->tp_nr[gpc] - tpnr[gpc]--;
+ gpc = -1;
+ for (i = 0, gpc = -1; i < 32; i++) {
+ int ltp = i * (priv->tp_total - 1) / 32;
- tp_set |= 1 << ((gpc * 8) + tp);
- }
+ do {
+ gpc = (gpc + 1) % priv->gpc_nr;
+ } while (!tpnr[gpc]);
+ tp = priv->tp_nr[gpc] - tpnr[gpc]--;
+
+ tp_set |= 1 << ((gpc * 8) + tp);
- nv_wr32(dev, 0x406800 + (i * 0x20), tp_set);
- nv_wr32(dev, 0x406c00 + (i * 0x20), tp_set ^ tp_mask);
+ do {
+ nv_wr32(dev, 0x406800 + (i * 0x20), tp_set);
+ tp_set ^= tp_mask;
+ nv_wr32(dev, 0x406c00 + (i * 0x20), tp_set);
+ tp_set ^= tp_mask;
+ } while (ltp == (++i * (priv->tp_total - 1) / 32));
+ i--;
}
}
@@ -2050,10 +2063,6 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
nv_icmd(dev, 0x00000215, 0x00000040);
nv_icmd(dev, 0x00000216, 0x00000040);
nv_icmd(dev, 0x00000217, 0x00000040);
- if (dev_priv->chipset == 0xd9) {
- for (i = 0x0400; i <= 0x0417; i++)
- nv_icmd(dev, i, 0x00000040);
- }
nv_icmd(dev, 0x00000218, 0x0000c080);
nv_icmd(dev, 0x00000219, 0x0000c080);
nv_icmd(dev, 0x0000021a, 0x0000c080);
@@ -2062,10 +2071,6 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
nv_icmd(dev, 0x0000021d, 0x0000c080);
nv_icmd(dev, 0x0000021e, 0x0000c080);
nv_icmd(dev, 0x0000021f, 0x0000c080);
- if (dev_priv->chipset == 0xd9) {
- for (i = 0x0440; i <= 0x0457; i++)
- nv_icmd(dev, i, 0x0000c080);
- }
nv_icmd(dev, 0x000000ad, 0x0000013e);
nv_icmd(dev, 0x000000e1, 0x00000010);
nv_icmd(dev, 0x00000290, 0x00000000);
@@ -2628,9 +2633,6 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
nv_icmd(dev, 0x0000053f, 0xffff0000);
nv_icmd(dev, 0x00000585, 0x0000003f);
nv_icmd(dev, 0x00000576, 0x00000003);
- if (dev_priv->chipset == 0xc1 ||
- dev_priv->chipset == 0xd9)
- nv_icmd(dev, 0x0000057b, 0x00000059);
nv_icmd(dev, 0x00000586, 0x00000040);
nv_icmd(dev, 0x00000582, 0x00000080);
nv_icmd(dev, 0x00000583, 0x00000080);
@@ -2731,8 +2733,6 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
nv_icmd(dev, 0x00000957, 0x00000003);
nv_icmd(dev, 0x0000095e, 0x20164010);
nv_icmd(dev, 0x0000095f, 0x00000020);
- if (dev_priv->chipset == 0xd9)
- nv_icmd(dev, 0x0000097d, 0x00000020);
nv_icmd(dev, 0x00000683, 0x00000006);
nv_icmd(dev, 0x00000685, 0x003fffff);
nv_icmd(dev, 0x00000687, 0x00000c48);
@@ -2865,10 +2865,6 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
nv_wr32(dev, 0x404154, 0x00000400);
nvc0_grctx_generate_9097(dev);
- if (fermi >= 0x9197)
- nvc0_grctx_generate_9197(dev);
- if (fermi >= 0x9297)
- nvc0_grctx_generate_9297(dev);
nvc0_grctx_generate_902d(dev);
nvc0_grctx_generate_9039(dev);
nvc0_grctx_generate_90c0(dev);
diff --git a/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc b/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc
deleted file mode 100644
index 15272be..0000000
--- a/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc
+++ /dev/null
@@ -1,539 +0,0 @@
-/* fuc microcode for nvc0 PGRAPH/GPC
- *
- * Copyright 2011 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-/* To build:
- * m4 nvc0_grgpc.fuc | envyas -a -w -m fuc -V nva3 -o nvc0_grgpc.fuc.h
- */
-
-/* TODO
- * - bracket certain functions with scratch writes, useful for debugging
- * - watchdog timer around ctx operations
- */
-
-.section #nvc0_grgpc_data
-include(`nvc0_graph.fuc')
-gpc_id: .b32 0
-gpc_mmio_list_head: .b32 0
-gpc_mmio_list_tail: .b32 0
-
-tpc_count: .b32 0
-tpc_mask: .b32 0
-tpc_mmio_list_head: .b32 0
-tpc_mmio_list_tail: .b32 0
-
-cmd_queue: queue_init
-
-// chipset descriptions
-chipsets:
-.b8 0xc0 0 0 0
-.b16 #nvc0_gpc_mmio_head
-.b16 #nvc0_gpc_mmio_tail
-.b16 #nvc0_tpc_mmio_head
-.b16 #nvc0_tpc_mmio_tail
-.b8 0xc1 0 0 0
-.b16 #nvc0_gpc_mmio_head
-.b16 #nvc1_gpc_mmio_tail
-.b16 #nvc0_tpc_mmio_head
-.b16 #nvc1_tpc_mmio_tail
-.b8 0xc3 0 0 0
-.b16 #nvc0_gpc_mmio_head
-.b16 #nvc0_gpc_mmio_tail
-.b16 #nvc0_tpc_mmio_head
-.b16 #nvc3_tpc_mmio_tail
-.b8 0xc4 0 0 0
-.b16 #nvc0_gpc_mmio_head
-.b16 #nvc0_gpc_mmio_tail
-.b16 #nvc0_tpc_mmio_head
-.b16 #nvc3_tpc_mmio_tail
-.b8 0xc8 0 0 0
-.b16 #nvc0_gpc_mmio_head
-.b16 #nvc0_gpc_mmio_tail
-.b16 #nvc0_tpc_mmio_head
-.b16 #nvc0_tpc_mmio_tail
-.b8 0xce 0 0 0
-.b16 #nvc0_gpc_mmio_head
-.b16 #nvc0_gpc_mmio_tail
-.b16 #nvc0_tpc_mmio_head
-.b16 #nvc3_tpc_mmio_tail
-.b8 0xcf 0 0 0
-.b16 #nvc0_gpc_mmio_head
-.b16 #nvc0_gpc_mmio_tail
-.b16 #nvc0_tpc_mmio_head
-.b16 #nvcf_tpc_mmio_tail
-.b8 0xd9 0 0 0
-.b16 #nvd9_gpc_mmio_head
-.b16 #nvd9_gpc_mmio_tail
-.b16 #nvd9_tpc_mmio_head
-.b16 #nvd9_tpc_mmio_tail
-.b8 0 0 0 0
-
-// GPC mmio lists
-nvc0_gpc_mmio_head:
-mmctx_data(0x000380, 1)
-mmctx_data(0x000400, 6)
-mmctx_data(0x000450, 9)
-mmctx_data(0x000600, 1)
-mmctx_data(0x000684, 1)
-mmctx_data(0x000700, 5)
-mmctx_data(0x000800, 1)
-mmctx_data(0x000808, 3)
-mmctx_data(0x000828, 1)
-mmctx_data(0x000830, 1)
-mmctx_data(0x0008d8, 1)
-mmctx_data(0x0008e0, 1)
-mmctx_data(0x0008e8, 6)
-mmctx_data(0x00091c, 1)
-mmctx_data(0x000924, 3)
-mmctx_data(0x000b00, 1)
-mmctx_data(0x000b08, 6)
-mmctx_data(0x000bb8, 1)
-mmctx_data(0x000c08, 1)
-mmctx_data(0x000c10, 8)
-mmctx_data(0x000c80, 1)
-mmctx_data(0x000c8c, 1)
-mmctx_data(0x001000, 3)
-mmctx_data(0x001014, 1)
-nvc0_gpc_mmio_tail:
-mmctx_data(0x000c6c, 1);
-nvc1_gpc_mmio_tail:
-
-nvd9_gpc_mmio_head:
-mmctx_data(0x000380, 1)
-mmctx_data(0x000400, 2)
-mmctx_data(0x00040c, 3)
-mmctx_data(0x000450, 9)
-mmctx_data(0x000600, 1)
-mmctx_data(0x000684, 1)
-mmctx_data(0x000700, 5)
-mmctx_data(0x000800, 1)
-mmctx_data(0x000808, 3)
-mmctx_data(0x000828, 1)
-mmctx_data(0x000830, 1)
-mmctx_data(0x0008d8, 1)
-mmctx_data(0x0008e0, 1)
-mmctx_data(0x0008e8, 6)
-mmctx_data(0x00091c, 1)
-mmctx_data(0x000924, 3)
-mmctx_data(0x000b00, 1)
-mmctx_data(0x000b08, 6)
-mmctx_data(0x000bb8, 1)
-mmctx_data(0x000c08, 1)
-mmctx_data(0x000c10, 8)
-mmctx_data(0x000c6c, 1)
-mmctx_data(0x000c80, 1)
-mmctx_data(0x000c8c, 1)
-mmctx_data(0x001000, 3)
-mmctx_data(0x001014, 1)
-nvd9_gpc_mmio_tail:
-
-// TPC mmio lists
-nvc0_tpc_mmio_head:
-mmctx_data(0x000018, 1)
-mmctx_data(0x00003c, 1)
-mmctx_data(0x000048, 1)
-mmctx_data(0x000064, 1)
-mmctx_data(0x000088, 1)
-mmctx_data(0x000200, 6)
-mmctx_data(0x00021c, 2)
-mmctx_data(0x000300, 6)
-mmctx_data(0x0003d0, 1)
-mmctx_data(0x0003e0, 2)
-mmctx_data(0x000400, 3)
-mmctx_data(0x000420, 1)
-mmctx_data(0x0004b0, 1)
-mmctx_data(0x0004e8, 1)
-mmctx_data(0x0004f4, 1)
-mmctx_data(0x000520, 2)
-mmctx_data(0x000604, 4)
-mmctx_data(0x000644, 20)
-mmctx_data(0x000698, 1)
-mmctx_data(0x000750, 2)
-nvc0_tpc_mmio_tail:
-mmctx_data(0x000758, 1)
-mmctx_data(0x0002c4, 1)
-mmctx_data(0x0006e0, 1)
-nvcf_tpc_mmio_tail:
-mmctx_data(0x0004bc, 1)
-nvc3_tpc_mmio_tail:
-mmctx_data(0x000544, 1)
-nvc1_tpc_mmio_tail:
-
-nvd9_tpc_mmio_head:
-mmctx_data(0x000018, 1)
-mmctx_data(0x00003c, 1)
-mmctx_data(0x000048, 1)
-mmctx_data(0x000064, 1)
-mmctx_data(0x000088, 1)
-mmctx_data(0x000200, 6)
-mmctx_data(0x00021c, 2)
-mmctx_data(0x0002c4, 1)
-mmctx_data(0x000300, 6)
-mmctx_data(0x0003d0, 1)
-mmctx_data(0x0003e0, 2)
-mmctx_data(0x000400, 3)
-mmctx_data(0x000420, 3)
-mmctx_data(0x0004b0, 1)
-mmctx_data(0x0004e8, 1)
-mmctx_data(0x0004f4, 1)
-mmctx_data(0x000520, 2)
-mmctx_data(0x000544, 1)
-mmctx_data(0x000604, 4)
-mmctx_data(0x000644, 20)
-mmctx_data(0x000698, 1)
-mmctx_data(0x0006e0, 1)
-mmctx_data(0x000750, 3)
-nvd9_tpc_mmio_tail:
-
-.section #nvc0_grgpc_code
-bra #init
-define(`include_code')
-include(`nvc0_graph.fuc')
-
-// reports an exception to the host
-//
-// In: $r15 error code (see nvc0_graph.fuc)
-//
-error:
- push $r14
- mov $r14 -0x67ec // 0x9814
- sethi $r14 0x400000
- call #nv_wr32 // HUB_CTXCTL_CC_SCRATCH[5] = error code
- add b32 $r14 0x41c
- mov $r15 1
- call #nv_wr32 // HUB_CTXCTL_INTR_UP_SET
- pop $r14
- ret
-
-// GPC fuc initialisation, executed by triggering ucode start, will
-// fall through to main loop after completion.
-//
-// Input:
-// CC_SCRATCH[0]: chipset (PMC_BOOT_0 read returns 0x0bad0bad... sigh)
-// CC_SCRATCH[1]: context base
-//
-// Output:
-// CC_SCRATCH[0]:
-// 31:31: set to signal completion
-// CC_SCRATCH[1]:
-// 31:0: GPC context size
-//
-init:
- clear b32 $r0
- mov $sp $r0
-
- // enable fifo access
- mov $r1 0x1200
- mov $r2 2
- iowr I[$r1 + 0x000] $r2 // FIFO_ENABLE
-
- // setup i0 handler, and route all interrupts to it
- mov $r1 #ih
- mov $iv0 $r1
- mov $r1 0x400
- iowr I[$r1 + 0x300] $r0 // INTR_DISPATCH
-
- // enable fifo interrupt
- mov $r2 4
- iowr I[$r1 + 0x000] $r2 // INTR_EN_SET
-
- // enable interrupts
- bset $flags ie0
-
- // figure out which GPC we are, and how many TPCs we have
- mov $r1 0x608
- shl b32 $r1 6
- iord $r2 I[$r1 + 0x000] // UNITS
- mov $r3 1
- and $r2 0x1f
- shl b32 $r3 $r2
- sub b32 $r3 1
- st b32 D[$r0 + #tpc_count] $r2
- st b32 D[$r0 + #tpc_mask] $r3
- add b32 $r1 0x400
- iord $r2 I[$r1 + 0x000] // MYINDEX
- st b32 D[$r0 + #gpc_id] $r2
-
- // find context data for this chipset
- mov $r2 0x800
- shl b32 $r2 6
- iord $r2 I[$r2 + 0x000] // CC_SCRATCH[0]
- mov $r1 #chipsets - 12
- init_find_chipset:
- add b32 $r1 12
- ld b32 $r3 D[$r1 + 0x00]
- cmpu b32 $r3 $r2
- bra e #init_context
- cmpu b32 $r3 0
- bra ne #init_find_chipset
- // unknown chipset
- ret
-
- // initialise context base, and size tracking
- init_context:
- mov $r2 0x800
- shl b32 $r2 6
- iord $r2 I[$r2 + 0x100] // CC_SCRATCH[1], initial base
- clear b32 $r3 // track GPC context size here
-
- // set mmctx base addresses now so we don't have to do it later,
- // they don't currently ever change
- mov $r4 0x700
- shl b32 $r4 6
- shr b32 $r5 $r2 8
- iowr I[$r4 + 0x000] $r5 // MMCTX_SAVE_SWBASE
- iowr I[$r4 + 0x100] $r5 // MMCTX_LOAD_SWBASE
-
- // calculate GPC mmio context size, store the chipset-specific
- // mmio list pointers somewhere we can get at them later without
- // re-parsing the chipset list
- clear b32 $r14
- clear b32 $r15
- ld b16 $r14 D[$r1 + 4]
- ld b16 $r15 D[$r1 + 6]
- st b16 D[$r0 + #gpc_mmio_list_head] $r14
- st b16 D[$r0 + #gpc_mmio_list_tail] $r15
- call #mmctx_size
- add b32 $r2 $r15
- add b32 $r3 $r15
-
- // calculate per-TPC mmio context size, store the list pointers
- ld b16 $r14 D[$r1 + 8]
- ld b16 $r15 D[$r1 + 10]
- st b16 D[$r0 + #tpc_mmio_list_head] $r14
- st b16 D[$r0 + #tpc_mmio_list_tail] $r15
- call #mmctx_size
- ld b32 $r14 D[$r0 + #tpc_count]
- mulu $r14 $r15
- add b32 $r2 $r14
- add b32 $r3 $r14
-
- // round up base/size to 256 byte boundary (for strand SWBASE)
- add b32 $r4 0x1300
- shr b32 $r3 2
- iowr I[$r4 + 0x000] $r3 // MMCTX_LOAD_COUNT, wtf for?!?
- shr b32 $r2 8
- shr b32 $r3 6
- add b32 $r2 1
- add b32 $r3 1
- shl b32 $r2 8
- shl b32 $r3 8
-
- // calculate size of strand context data
- mov b32 $r15 $r2
- call #strand_ctx_init
- add b32 $r3 $r15
-
- // save context size, and tell HUB we're done
- mov $r1 0x800
- shl b32 $r1 6
- iowr I[$r1 + 0x100] $r3 // CC_SCRATCH[1] = context size
- add b32 $r1 0x800
- clear b32 $r2
- bset $r2 31
- iowr I[$r1 + 0x000] $r2 // CC_SCRATCH[0] |= 0x80000000
-
-// Main program loop, very simple, sleeps until woken up by the interrupt
-// handler, pulls a command from the queue and executes its handler
-//
-main:
- bset $flags $p0
- sleep $p0
- mov $r13 #cmd_queue
- call #queue_get
- bra $p1 #main
-
- // 0x0000-0x0003 are all context transfers
- cmpu b32 $r14 0x04
- bra nc #main_not_ctx_xfer
- // fetch $flags and mask off $p1/$p2
- mov $r1 $flags
- mov $r2 0x0006
- not b32 $r2
- and $r1 $r2
- // set $p1/$p2 according to transfer type
- shl b32 $r14 1
- or $r1 $r14
- mov $flags $r1
- // transfer context data
- call #ctx_xfer
- bra #main
-
- main_not_ctx_xfer:
- shl b32 $r15 $r14 16
- or $r15 E_BAD_COMMAND
- call #error
- bra #main
-
-// interrupt handler
-ih:
- push $r8
- mov $r8 $flags
- push $r8
- push $r9
- push $r10
- push $r11
- push $r13
- push $r14
- push $r15
-
- // incoming fifo command?
- iord $r10 I[$r0 + 0x200] // INTR
- and $r11 $r10 0x00000004
- bra e #ih_no_fifo
- // queue incoming fifo command for later processing
- mov $r11 0x1900
- mov $r13 #cmd_queue
- iord $r14 I[$r11 + 0x100] // FIFO_CMD
- iord $r15 I[$r11 + 0x000] // FIFO_DATA
- call #queue_put
- add b32 $r11 0x400
- mov $r14 1
- iowr I[$r11 + 0x000] $r14 // FIFO_ACK
-
- // ack, and wake up main()
- ih_no_fifo:
- iowr I[$r0 + 0x100] $r10 // INTR_ACK
-
- pop $r15
- pop $r14
- pop $r13
- pop $r11
- pop $r10
- pop $r9
- pop $r8
- mov $flags $r8
- pop $r8
- bclr $flags $p0
- iret
-
-// Set this GPC's bit in HUB_BAR, used to signal completion of various
-// activities to the HUB fuc
-//
-hub_barrier_done:
- mov $r15 1
- ld b32 $r14 D[$r0 + #gpc_id]
- shl b32 $r15 $r14
- mov $r14 -0x6be8 // 0x409418 - HUB_BAR_SET
- sethi $r14 0x400000
- call #nv_wr32
- ret
-
-// Disables various things, waits a bit, and re-enables them..
-//
-// Not sure how exactly this helps, perhaps "ENABLE" is not such a
-// good description for the bits we turn off? Anyways, without this,
-// funny things happen.
-//
-ctx_redswitch:
- mov $r14 0x614
- shl b32 $r14 6
- mov $r15 0x020
- iowr I[$r14] $r15 // GPC_RED_SWITCH = POWER
- mov $r15 8
- ctx_redswitch_delay:
- sub b32 $r15 1
- bra ne #ctx_redswitch_delay
- mov $r15 0xa20
- iowr I[$r14] $r15 // GPC_RED_SWITCH = UNK11, ENABLE, POWER
- ret
-
-// Transfer GPC context data between GPU and storage area
-//
-// In: $r15 context base address
-// $p1 clear on save, set on load
-// $p2 set if opposite direction done/will be done, so:
-// on save it means: "a load will follow this save"
-// on load it means: "a save preceeded this load"
-//
-ctx_xfer:
- // set context base address
- mov $r1 0xa04
- shl b32 $r1 6
- iowr I[$r1 + 0x000] $r15// MEM_BASE
- bra not $p1 #ctx_xfer_not_load
- call #ctx_redswitch
- ctx_xfer_not_load:
-
- // strands
- mov $r1 0x4afc
- sethi $r1 0x20000
- mov $r2 0xc
- iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x0c
- call #strand_wait
- mov $r2 0x47fc
- sethi $r2 0x20000
- iowr I[$r2] $r0 // STRAND_FIRST_GENE(0x3f) = 0x00
- xbit $r2 $flags $p1
- add b32 $r2 3
- iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x03/0x04 (SAVE/LOAD)
-
- // mmio context
- xbit $r10 $flags $p1 // direction
- or $r10 2 // first
- mov $r11 0x0000
- sethi $r11 0x500000
- ld b32 $r12 D[$r0 + #gpc_id]
- shl b32 $r12 15
- add b32 $r11 $r12 // base = NV_PGRAPH_GPCn
- ld b32 $r12 D[$r0 + #gpc_mmio_list_head]
- ld b32 $r13 D[$r0 + #gpc_mmio_list_tail]
- mov $r14 0 // not multi
- call #mmctx_xfer
-
- // per-TPC mmio context
- xbit $r10 $flags $p1 // direction
- or $r10 4 // last
- mov $r11 0x4000
- sethi $r11 0x500000 // base = NV_PGRAPH_GPC0_TPC0
- ld b32 $r12 D[$r0 + #gpc_id]
- shl b32 $r12 15
- add b32 $r11 $r12 // base = NV_PGRAPH_GPCn_TPC0
- ld b32 $r12 D[$r0 + #tpc_mmio_list_head]
- ld b32 $r13 D[$r0 + #tpc_mmio_list_tail]
- ld b32 $r15 D[$r0 + #tpc_mask]
- mov $r14 0x800 // stride = 0x800
- call #mmctx_xfer
-
- // wait for strands to finish
- call #strand_wait
-
- // if load, or a save without a load following, do some
- // unknown stuff that's done after finishing a block of
- // strand commands
- bra $p1 #ctx_xfer_post
- bra not $p2 #ctx_xfer_done
- ctx_xfer_post:
- mov $r1 0x4afc
- sethi $r1 0x20000
- mov $r2 0xd
- iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x0d
- call #strand_wait
-
- // mark completion in HUB's barrier
- ctx_xfer_done:
- call #hub_barrier_done
- ret
-
-.align 256
diff --git a/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h b/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h
deleted file mode 100644
index a988b8a..0000000
--- a/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h
+++ /dev/null
@@ -1,538 +0,0 @@
-uint32_t nvc0_grgpc_data[] = {
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x000000c0,
- 0x012800c8,
- 0x01e40194,
- 0x000000c1,
- 0x012c00c8,
- 0x01f80194,
- 0x000000c3,
- 0x012800c8,
- 0x01f40194,
- 0x000000c4,
- 0x012800c8,
- 0x01f40194,
- 0x000000c8,
- 0x012800c8,
- 0x01e40194,
- 0x000000ce,
- 0x012800c8,
- 0x01f40194,
- 0x000000cf,
- 0x012800c8,
- 0x01f00194,
- 0x000000d9,
- 0x0194012c,
- 0x025401f8,
- 0x00000000,
- 0x00000380,
- 0x14000400,
- 0x20000450,
- 0x00000600,
- 0x00000684,
- 0x10000700,
- 0x00000800,
- 0x08000808,
- 0x00000828,
- 0x00000830,
- 0x000008d8,
- 0x000008e0,
- 0x140008e8,
- 0x0000091c,
- 0x08000924,
- 0x00000b00,
- 0x14000b08,
- 0x00000bb8,
- 0x00000c08,
- 0x1c000c10,
- 0x00000c80,
- 0x00000c8c,
- 0x08001000,
- 0x00001014,
- 0x00000c6c,
- 0x00000380,
- 0x04000400,
- 0x0800040c,
- 0x20000450,
- 0x00000600,
- 0x00000684,
- 0x10000700,
- 0x00000800,
- 0x08000808,
- 0x00000828,
- 0x00000830,
- 0x000008d8,
- 0x000008e0,
- 0x140008e8,
- 0x0000091c,
- 0x08000924,
- 0x00000b00,
- 0x14000b08,
- 0x00000bb8,
- 0x00000c08,
- 0x1c000c10,
- 0x00000c6c,
- 0x00000c80,
- 0x00000c8c,
- 0x08001000,
- 0x00001014,
- 0x00000018,
- 0x0000003c,
- 0x00000048,
- 0x00000064,
- 0x00000088,
- 0x14000200,
- 0x0400021c,
- 0x14000300,
- 0x000003d0,
- 0x040003e0,
- 0x08000400,
- 0x00000420,
- 0x000004b0,
- 0x000004e8,
- 0x000004f4,
- 0x04000520,
- 0x0c000604,
- 0x4c000644,
- 0x00000698,
- 0x04000750,
- 0x00000758,
- 0x000002c4,
- 0x000006e0,
- 0x000004bc,
- 0x00000544,
- 0x00000018,
- 0x0000003c,
- 0x00000048,
- 0x00000064,
- 0x00000088,
- 0x14000200,
- 0x0400021c,
- 0x000002c4,
- 0x14000300,
- 0x000003d0,
- 0x040003e0,
- 0x08000400,
- 0x08000420,
- 0x000004b0,
- 0x000004e8,
- 0x000004f4,
- 0x04000520,
- 0x00000544,
- 0x0c000604,
- 0x4c000644,
- 0x00000698,
- 0x000006e0,
- 0x08000750,
-};
-
-uint32_t nvc0_grgpc_code[] = {
- 0x03060ef5,
- 0x9800d898,
- 0x86f001d9,
- 0x0489b808,
- 0xf00c1bf4,
- 0x21f502f7,
- 0x00f802ec,
- 0xb60798c4,
- 0x8dbb0384,
- 0x0880b600,
- 0x80008e80,
- 0x90b6018f,
- 0x0f94f001,
- 0xf801d980,
- 0x0131f400,
- 0x9800d898,
- 0x89b801d9,
- 0x210bf404,
- 0xb60789c4,
- 0x9dbb0394,
- 0x0890b600,
- 0x98009e98,
- 0x80b6019f,
- 0x0f84f001,
- 0xf400d880,
- 0x00f80132,
- 0x0728b7f1,
- 0xb906b4b6,
- 0xc9f002ec,
- 0x00bcd01f,
- 0xc800bccf,
- 0x1bf41fcc,
- 0x06a7f0fa,
- 0x010321f5,
- 0xf840bfcf,
- 0x28b7f100,
- 0x06b4b607,
- 0xb980bfd0,
- 0xc9f002ec,
- 0x1ec9f01f,
- 0xcf00bcd0,
- 0xccc800bc,
- 0xfa1bf41f,
- 0x87f100f8,
- 0x84b60430,
- 0x1ff9f006,
- 0xf8008fd0,
- 0x3087f100,
- 0x0684b604,
- 0xf80080d0,
- 0x3c87f100,
- 0x0684b608,
- 0x99f094bd,
- 0x0089d000,
- 0x081887f1,
- 0xd00684b6,
- 0x87f1008a,
- 0x84b60400,
- 0x0088cf06,
- 0xf4888aff,
- 0x87f1f31b,
- 0x84b6085c,
- 0xf094bd06,
- 0x89d00099,
- 0xf100f800,
- 0xb6083c87,
- 0x94bd0684,
- 0xd00099f0,
- 0x87f10089,
- 0x84b60818,
- 0x008ad006,
- 0x040087f1,
- 0xcf0684b6,
- 0x8aff0088,
- 0xf30bf488,
- 0x085c87f1,
- 0xbd0684b6,
- 0x0099f094,
- 0xf80089d0,
- 0x9894bd00,
- 0x85b600e8,
- 0x0180b61a,
- 0xbb0284b6,
- 0xe0b60098,
- 0x04efb804,
- 0xb9eb1bf4,
- 0x00f8029f,
- 0x083c87f1,
- 0xbd0684b6,
- 0x0199f094,
- 0xf10089d0,
- 0xb6071087,
- 0x94bd0684,
- 0xf405bbfd,
- 0x8bd0090b,
- 0x0099f000,
- 0xf405eefd,
- 0x8ed00c0b,
- 0xc08fd080,
- 0xb70199f0,
- 0xc8010080,
- 0xb4b600ab,
- 0x0cb9f010,
- 0xb601aec8,
- 0xbefd11e4,
- 0x008bd005,
- 0xf0008ecf,
- 0x0bf41fe4,
- 0x00ce98fa,
- 0xd005e9fd,
- 0xc0b6c08e,
- 0x04cdb804,
- 0xc8e81bf4,
- 0x1bf402ab,
- 0x008bcf18,
- 0xb01fb4f0,
- 0x1bf410b4,
- 0x02a7f0f7,
- 0xf4c921f4,
- 0xabc81b0e,
- 0x10b4b600,
- 0xf00cb9f0,
- 0x8bd012b9,
- 0x008bcf00,
- 0xf412bbc8,
- 0x87f1fa1b,
- 0x84b6085c,
- 0xf094bd06,
- 0x89d00199,
- 0xf900f800,
- 0x02a7f0a0,
- 0xfcc921f4,
- 0xf100f8a0,
- 0xf04afc87,
- 0x97f00283,
- 0x0089d00c,
- 0x020721f5,
- 0x87f100f8,
- 0x83f04afc,
- 0x0d97f002,
- 0xf50089d0,
- 0xf8020721,
- 0xfca7f100,
- 0x02a3f04f,
- 0x0500aba2,
- 0xd00fc7f0,
- 0xc7f000ac,
- 0x00bcd00b,
- 0x020721f5,
- 0xf000aed0,
- 0xbcd00ac7,
- 0x0721f500,
- 0xf100f802,
- 0xb6083c87,
- 0x94bd0684,
- 0xd00399f0,
- 0x21f50089,
- 0xe7f00213,
- 0x3921f503,
- 0xfca7f102,
- 0x02a3f046,
- 0x0400aba0,
- 0xf040a0d0,
- 0xbcd001c7,
- 0x0721f500,
- 0x010c9202,
- 0xf000acd0,
- 0xbcd002c7,
- 0x0721f500,
- 0x2621f502,
- 0x8087f102,
- 0x0684b608,
- 0xb70089cf,
- 0x95220080,
- 0x8ed008fe,
- 0x408ed000,
- 0xb6808acf,
- 0xa0b606a5,
- 0x00eabb01,
- 0xb60480b6,
- 0x1bf40192,
- 0x08e4b6e8,
- 0xf1f2efbc,
- 0xb6085c87,
- 0x94bd0684,
- 0xd00399f0,
- 0x00f80089,
- 0xe7f1e0f9,
- 0xe3f09814,
- 0x8d21f440,
- 0x041ce0b7,
- 0xf401f7f0,
- 0xe0fc8d21,
- 0x04bd00f8,
- 0xf10004fe,
- 0xf0120017,
- 0x12d00227,
- 0x3e17f100,
- 0x0010fe04,
- 0x040017f1,
- 0xf0c010d0,
- 0x12d00427,
- 0x1031f400,
- 0x060817f1,
- 0xcf0614b6,
- 0x37f00012,
- 0x1f24f001,
- 0xb60432bb,
- 0x02800132,
- 0x04038003,
- 0x040010b7,
- 0x800012cf,
- 0x27f10002,
- 0x24b60800,
- 0x0022cf06,
- 0xb65817f0,
- 0x13980c10,
- 0x0432b800,
- 0xb00b0bf4,
- 0x1bf40034,
- 0xf100f8f1,
- 0xb6080027,
- 0x22cf0624,
- 0xf134bd40,
- 0xb6070047,
- 0x25950644,
- 0x0045d008,
- 0xbd4045d0,
- 0x58f4bde4,
- 0x1f58021e,
- 0x020e4003,
- 0xf5040f40,
- 0xbb013d21,
- 0x3fbb002f,
- 0x041e5800,
- 0x40051f58,
- 0x0f400a0e,
- 0x3d21f50c,
- 0x030e9801,
- 0xbb00effd,
- 0x3ebb002e,
- 0x0040b700,
- 0x0235b613,
- 0xb60043d0,
- 0x35b60825,
- 0x0120b606,
- 0xb60130b6,
- 0x34b60824,
- 0x022fb908,
- 0x026321f5,
- 0xf1003fbb,
- 0xb6080017,
- 0x13d00614,
- 0x0010b740,
- 0xf024bd08,
- 0x12d01f29,
- 0x0031f400,
- 0xf00028f4,
- 0x21f41cd7,
- 0xf401f439,
- 0xf404e4b0,
- 0x81fe1e18,
- 0x0627f001,
- 0x12fd20bd,
- 0x01e4b604,
- 0xfe051efd,
- 0x21f50018,
- 0x0ef404c3,
- 0x10ef94d3,
- 0xf501f5f0,
- 0xf402ec21,
- 0x80f9c60e,
- 0xf90188fe,
- 0xf990f980,
- 0xf9b0f9a0,
- 0xf9e0f9d0,
- 0x800acff0,
- 0xf404abc4,
- 0xb7f11d0b,
- 0xd7f01900,
- 0x40becf1c,
- 0xf400bfcf,
- 0xb0b70421,
- 0xe7f00400,
- 0x00bed001,
- 0xfc400ad0,
- 0xfce0fcf0,
- 0xfcb0fcd0,
- 0xfc90fca0,
- 0x0088fe80,
- 0x32f480fc,
- 0xf001f800,
- 0x0e9801f7,
- 0x04febb00,
- 0x9418e7f1,
- 0xf440e3f0,
- 0x00f88d21,
- 0x0614e7f1,
- 0xf006e4b6,
- 0xefd020f7,
- 0x08f7f000,
- 0xf401f2b6,
- 0xf7f1fd1b,
- 0xefd00a20,
- 0xf100f800,
- 0xb60a0417,
- 0x1fd00614,
- 0x0711f400,
- 0x04a421f5,
- 0x4afc17f1,
- 0xf00213f0,
- 0x12d00c27,
- 0x0721f500,
- 0xfc27f102,
- 0x0223f047,
- 0xf00020d0,
- 0x20b6012c,
- 0x0012d003,
- 0xf001acf0,
- 0xb7f002a5,
- 0x50b3f000,
- 0xb6000c98,
- 0xbcbb0fc4,
- 0x010c9800,
- 0xf0020d98,
- 0x21f500e7,
- 0xacf0015c,
- 0x04a5f001,
- 0x4000b7f1,
- 0x9850b3f0,
- 0xc4b6000c,
- 0x00bcbb0f,
- 0x98050c98,
- 0x0f98060d,
- 0x00e7f104,
- 0x5c21f508,
- 0x0721f501,
- 0x0601f402,
- 0xf11412f4,
- 0xf04afc17,
- 0x27f00213,
- 0x0012d00d,
- 0x020721f5,
- 0x048f21f5,
- 0x000000f8,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
-};
diff --git a/drivers/gpu/drm/nouveau/nvc0_grhub.fuc b/drivers/gpu/drm/nouveau/nvc0_grhub.fuc
deleted file mode 100644
index 98acddb..0000000
--- a/drivers/gpu/drm/nouveau/nvc0_grhub.fuc
+++ /dev/null
@@ -1,856 +0,0 @@
-/* fuc microcode for nvc0 PGRAPH/HUB
- *
- * Copyright 2011 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-/* To build:
- * m4 nvc0_grhub.fuc | envyas -a -w -m fuc -V nva3 -o nvc0_grhub.fuc.h
- */
-
-.section #nvc0_grhub_data
-include(`nvc0_graph.fuc')
-gpc_count: .b32 0
-rop_count: .b32 0
-cmd_queue: queue_init
-hub_mmio_list_head: .b32 0
-hub_mmio_list_tail: .b32 0
-
-ctx_current: .b32 0
-
-chipsets:
-.b8 0xc0 0 0 0
-.b16 #nvc0_hub_mmio_head
-.b16 #nvc0_hub_mmio_tail
-.b8 0xc1 0 0 0
-.b16 #nvc0_hub_mmio_head
-.b16 #nvc1_hub_mmio_tail
-.b8 0xc3 0 0 0
-.b16 #nvc0_hub_mmio_head
-.b16 #nvc0_hub_mmio_tail
-.b8 0xc4 0 0 0
-.b16 #nvc0_hub_mmio_head
-.b16 #nvc0_hub_mmio_tail
-.b8 0xc8 0 0 0
-.b16 #nvc0_hub_mmio_head
-.b16 #nvc0_hub_mmio_tail
-.b8 0xce 0 0 0
-.b16 #nvc0_hub_mmio_head
-.b16 #nvc0_hub_mmio_tail
-.b8 0xcf 0 0 0
-.b16 #nvc0_hub_mmio_head
-.b16 #nvc0_hub_mmio_tail
-.b8 0xd9 0 0 0
-.b16 #nvd9_hub_mmio_head
-.b16 #nvd9_hub_mmio_tail
-.b8 0 0 0 0
-
-nvc0_hub_mmio_head:
-mmctx_data(0x17e91c, 2)
-mmctx_data(0x400204, 2)
-mmctx_data(0x404004, 11)
-mmctx_data(0x404044, 1)
-mmctx_data(0x404094, 14)
-mmctx_data(0x4040d0, 7)
-mmctx_data(0x4040f8, 1)
-mmctx_data(0x404130, 3)
-mmctx_data(0x404150, 3)
-mmctx_data(0x404164, 2)
-mmctx_data(0x404174, 3)
-mmctx_data(0x404200, 8)
-mmctx_data(0x404404, 14)
-mmctx_data(0x404460, 4)
-mmctx_data(0x404480, 1)
-mmctx_data(0x404498, 1)
-mmctx_data(0x404604, 4)
-mmctx_data(0x404618, 32)
-mmctx_data(0x404698, 21)
-mmctx_data(0x4046f0, 2)
-mmctx_data(0x404700, 22)
-mmctx_data(0x405800, 1)
-mmctx_data(0x405830, 3)
-mmctx_data(0x405854, 1)
-mmctx_data(0x405870, 4)
-mmctx_data(0x405a00, 2)
-mmctx_data(0x405a18, 1)
-mmctx_data(0x406020, 1)
-mmctx_data(0x406028, 4)
-mmctx_data(0x4064a8, 2)
-mmctx_data(0x4064b4, 2)
-mmctx_data(0x407804, 1)
-mmctx_data(0x40780c, 6)
-mmctx_data(0x4078bc, 1)
-mmctx_data(0x408000, 7)
-mmctx_data(0x408064, 1)
-mmctx_data(0x408800, 3)
-mmctx_data(0x408900, 4)
-mmctx_data(0x408980, 1)
-nvc0_hub_mmio_tail:
-mmctx_data(0x4064c0, 2)
-nvc1_hub_mmio_tail:
-
-nvd9_hub_mmio_head:
-mmctx_data(0x17e91c, 2)
-mmctx_data(0x400204, 2)
-mmctx_data(0x404004, 10)
-mmctx_data(0x404044, 1)
-mmctx_data(0x404094, 14)
-mmctx_data(0x4040d0, 7)
-mmctx_data(0x4040f8, 1)
-mmctx_data(0x404130, 3)
-mmctx_data(0x404150, 3)
-mmctx_data(0x404164, 2)
-mmctx_data(0x404178, 2)
-mmctx_data(0x404200, 8)
-mmctx_data(0x404404, 14)
-mmctx_data(0x404460, 4)
-mmctx_data(0x404480, 1)
-mmctx_data(0x404498, 1)
-mmctx_data(0x404604, 4)
-mmctx_data(0x404618, 32)
-mmctx_data(0x404698, 21)
-mmctx_data(0x4046f0, 2)
-mmctx_data(0x404700, 22)
-mmctx_data(0x405800, 1)
-mmctx_data(0x405830, 3)
-mmctx_data(0x405854, 1)
-mmctx_data(0x405870, 4)
-mmctx_data(0x405a00, 2)
-mmctx_data(0x405a18, 1)
-mmctx_data(0x406020, 1)
-mmctx_data(0x406028, 4)
-mmctx_data(0x4064a8, 2)
-mmctx_data(0x4064b4, 5)
-mmctx_data(0x407804, 1)
-mmctx_data(0x40780c, 6)
-mmctx_data(0x4078bc, 1)
-mmctx_data(0x408000, 7)
-mmctx_data(0x408064, 1)
-mmctx_data(0x408800, 3)
-mmctx_data(0x408900, 4)
-mmctx_data(0x408980, 1)
-nvd9_hub_mmio_tail:
-
-.align 256
-chan_data:
-chan_mmio_count: .b32 0
-chan_mmio_address: .b32 0
-
-.align 256
-xfer_data: .b32 0
-
-.section #nvc0_grhub_code
-bra #init
-define(`include_code')
-include(`nvc0_graph.fuc')
-
-// reports an exception to the host
-//
-// In: $r15 error code (see nvc0_graph.fuc)
-//
-error:
- push $r14
- mov $r14 0x814
- shl b32 $r14 6
- iowr I[$r14 + 0x000] $r15 // CC_SCRATCH[5] = error code
- mov $r14 0xc1c
- shl b32 $r14 6
- mov $r15 1
- iowr I[$r14 + 0x000] $r15 // INTR_UP_SET
- pop $r14
- ret
-
-// HUB fuc initialisation, executed by triggering ucode start, will
-// fall through to main loop after completion.
-//
-// Input:
-// CC_SCRATCH[0]: chipset (PMC_BOOT_0 read returns 0x0bad0bad... sigh)
-//
-// Output:
-// CC_SCRATCH[0]:
-// 31:31: set to signal completion
-// CC_SCRATCH[1]:
-// 31:0: total PGRAPH context size
-//
-init:
- clear b32 $r0
- mov $sp $r0
- mov $xdbase $r0
-
- // enable fifo access
- mov $r1 0x1200
- mov $r2 2
- iowr I[$r1 + 0x000] $r2 // FIFO_ENABLE
-
- // setup i0 handler, and route all interrupts to it
- mov $r1 #ih
- mov $iv0 $r1
- mov $r1 0x400
- iowr I[$r1 + 0x300] $r0 // INTR_DISPATCH
-
- // route HUB_CHANNEL_SWITCH to fuc interrupt 8
- mov $r3 0x404
- shl b32 $r3 6
- mov $r2 0x2003 // { HUB_CHANNEL_SWITCH, ZERO } -> intr 8
- iowr I[$r3 + 0x000] $r2
-
- // not sure what these are, route them because NVIDIA does, and
- // the IRQ handler will signal the host if we ever get one.. we
- // may find out if/why we need to handle these if so..
- //
- mov $r2 0x2004
- iowr I[$r3 + 0x004] $r2 // { 0x04, ZERO } -> intr 9
- mov $r2 0x200b
- iowr I[$r3 + 0x008] $r2 // { 0x0b, ZERO } -> intr 10
- mov $r2 0x200c
- iowr I[$r3 + 0x01c] $r2 // { 0x0c, ZERO } -> intr 15
-
- // enable all INTR_UP interrupts
- mov $r2 0xc24
- shl b32 $r2 6
- not b32 $r3 $r0
- iowr I[$r2] $r3
-
- // enable fifo, ctxsw, 9, 10, 15 interrupts
- mov $r2 -0x78fc // 0x8704
- sethi $r2 0
- iowr I[$r1 + 0x000] $r2 // INTR_EN_SET
-
- // fifo level triggered, rest edge
- sub b32 $r1 0x100
- mov $r2 4
- iowr I[$r1] $r2
-
- // enable interrupts
- bset $flags ie0
-
- // fetch enabled GPC/ROP counts
- mov $r14 -0x69fc // 0x409604
- sethi $r14 0x400000
- call #nv_rd32
- extr $r1 $r15 16:20
- st b32 D[$r0 + #rop_count] $r1
- and $r15 0x1f
- st b32 D[$r0 + #gpc_count] $r15
-
- // set BAR_REQMASK to GPC mask
- mov $r1 1
- shl b32 $r1 $r15
- sub b32 $r1 1
- mov $r2 0x40c
- shl b32 $r2 6
- iowr I[$r2 + 0x000] $r1
- iowr I[$r2 + 0x100] $r1
-
- // find context data for this chipset
- mov $r2 0x800
- shl b32 $r2 6
- iord $r2 I[$r2 + 0x000] // CC_SCRATCH[0]
- mov $r15 #chipsets - 8
- init_find_chipset:
- add b32 $r15 8
- ld b32 $r3 D[$r15 + 0x00]
- cmpu b32 $r3 $r2
- bra e #init_context
- cmpu b32 $r3 0
- bra ne #init_find_chipset
- // unknown chipset
- ret
-
- // context size calculation, reserve first 256 bytes for use by fuc
- init_context:
- mov $r1 256
-
- // calculate size of mmio context data
- ld b16 $r14 D[$r15 + 4]
- ld b16 $r15 D[$r15 + 6]
- sethi $r14 0
- st b32 D[$r0 + #hub_mmio_list_head] $r14
- st b32 D[$r0 + #hub_mmio_list_tail] $r15
- call #mmctx_size
-
- // set mmctx base addresses now so we don't have to do it later,
- // they don't (currently) ever change
- mov $r3 0x700
- shl b32 $r3 6
- shr b32 $r4 $r1 8
- iowr I[$r3 + 0x000] $r4 // MMCTX_SAVE_SWBASE
- iowr I[$r3 + 0x100] $r4 // MMCTX_LOAD_SWBASE
- add b32 $r3 0x1300
- add b32 $r1 $r15
- shr b32 $r15 2
- iowr I[$r3 + 0x000] $r15 // MMCTX_LOAD_COUNT, wtf for?!?
-
- // strands, base offset needs to be aligned to 256 bytes
- shr b32 $r1 8
- add b32 $r1 1
- shl b32 $r1 8
- mov b32 $r15 $r1
- call #strand_ctx_init
- add b32 $r1 $r15
-
- // initialise each GPC in sequence by passing in the offset of its
- // context data in GPCn_CC_SCRATCH[1], and starting its FUC (which
- // has previously been uploaded by the host) running.
- //
- // the GPC fuc init sequence will set GPCn_CC_SCRATCH[0] bit 31
- // when it has completed, and return the size of its context data
- // in GPCn_CC_SCRATCH[1]
- //
- ld b32 $r3 D[$r0 + #gpc_count]
- mov $r4 0x2000
- sethi $r4 0x500000
- init_gpc:
- // setup, and start GPC ucode running
- add b32 $r14 $r4 0x804
- mov b32 $r15 $r1
- call #nv_wr32 // CC_SCRATCH[1] = ctx offset
- add b32 $r14 $r4 0x800
- mov b32 $r15 $r2
- call #nv_wr32 // CC_SCRATCH[0] = chipset
- add b32 $r14 $r4 0x10c
- clear b32 $r15
- call #nv_wr32
- add b32 $r14 $r4 0x104
- call #nv_wr32 // ENTRY
- add b32 $r14 $r4 0x100
- mov $r15 2 // CTRL_START_TRIGGER
- call #nv_wr32 // CTRL
-
- // wait for it to complete, and adjust context size
- add b32 $r14 $r4 0x800
- init_gpc_wait:
- call #nv_rd32
- xbit $r15 $r15 31
- bra e #init_gpc_wait
- add b32 $r14 $r4 0x804
- call #nv_rd32
- add b32 $r1 $r15
-
- // next!
- add b32 $r4 0x8000
- sub b32 $r3 1
- bra ne #init_gpc
-
- // save context size, and tell host we're ready
- mov $r2 0x800
- shl b32 $r2 6
- iowr I[$r2 + 0x100] $r1 // CC_SCRATCH[1] = context size
- add b32 $r2 0x800
- clear b32 $r1
- bset $r1 31
- iowr I[$r2 + 0x000] $r1 // CC_SCRATCH[0] |= 0x80000000
-
-// Main program loop, very simple, sleeps until woken up by the interrupt
-// handler, pulls a command from the queue and executes its handler
-//
-main:
- // sleep until we have something to do
- bset $flags $p0
- sleep $p0
- mov $r13 #cmd_queue
- call #queue_get
- bra $p1 #main
-
- // context switch, requested by GPU?
- cmpu b32 $r14 0x4001
- bra ne #main_not_ctx_switch
- trace_set(T_AUTO)
- mov $r1 0xb00
- shl b32 $r1 6
- iord $r2 I[$r1 + 0x100] // CHAN_NEXT
- iord $r1 I[$r1 + 0x000] // CHAN_CUR
-
- xbit $r3 $r1 31
- bra e #chsw_no_prev
- xbit $r3 $r2 31
- bra e #chsw_prev_no_next
- push $r2
- mov b32 $r2 $r1
- trace_set(T_SAVE)
- bclr $flags $p1
- bset $flags $p2
- call #ctx_xfer
- trace_clr(T_SAVE);
- pop $r2
- trace_set(T_LOAD);
- bset $flags $p1
- call #ctx_xfer
- trace_clr(T_LOAD);
- bra #chsw_done
- chsw_prev_no_next:
- push $r2
- mov b32 $r2 $r1
- bclr $flags $p1
- bclr $flags $p2
- call #ctx_xfer
- pop $r2
- mov $r1 0xb00
- shl b32 $r1 6
- iowr I[$r1] $r2
- bra #chsw_done
- chsw_no_prev:
- xbit $r3 $r2 31
- bra e #chsw_done
- bset $flags $p1
- bclr $flags $p2
- call #ctx_xfer
-
- // ack the context switch request
- chsw_done:
- mov $r1 0xb0c
- shl b32 $r1 6
- mov $r2 1
- iowr I[$r1 + 0x000] $r2 // 0x409b0c
- trace_clr(T_AUTO)
- bra #main
-
- // request to set current channel? (*not* a context switch)
- main_not_ctx_switch:
- cmpu b32 $r14 0x0001
- bra ne #main_not_ctx_chan
- mov b32 $r2 $r15
- call #ctx_chan
- bra #main_done
-
- // request to store current channel context?
- main_not_ctx_chan:
- cmpu b32 $r14 0x0002
- bra ne #main_not_ctx_save
- trace_set(T_SAVE)
- bclr $flags $p1
- bclr $flags $p2
- call #ctx_xfer
- trace_clr(T_SAVE)
- bra #main_done
-
- main_not_ctx_save:
- shl b32 $r15 $r14 16
- or $r15 E_BAD_COMMAND
- call #error
- bra #main
-
- main_done:
- mov $r1 0x820
- shl b32 $r1 6
- clear b32 $r2
- bset $r2 31
- iowr I[$r1 + 0x000] $r2 // CC_SCRATCH[0] |= 0x80000000
- bra #main
-
-// interrupt handler
-ih:
- push $r8
- mov $r8 $flags
- push $r8
- push $r9
- push $r10
- push $r11
- push $r13
- push $r14
- push $r15
-
- // incoming fifo command?
- iord $r10 I[$r0 + 0x200] // INTR
- and $r11 $r10 0x00000004
- bra e #ih_no_fifo
- // queue incoming fifo command for later processing
- mov $r11 0x1900
- mov $r13 #cmd_queue
- iord $r14 I[$r11 + 0x100] // FIFO_CMD
- iord $r15 I[$r11 + 0x000] // FIFO_DATA
- call #queue_put
- add b32 $r11 0x400
- mov $r14 1
- iowr I[$r11 + 0x000] $r14 // FIFO_ACK
-
- // context switch request?
- ih_no_fifo:
- and $r11 $r10 0x00000100
- bra e #ih_no_ctxsw
- // enqueue a context switch for later processing
- mov $r13 #cmd_queue
- mov $r14 0x4001
- call #queue_put
-
- // anything we didn't handle, bring it to the host's attention
- ih_no_ctxsw:
- mov $r11 0x104
- not b32 $r11
- and $r11 $r10 $r11
- bra e #ih_no_other
- mov $r10 0xc1c
- shl b32 $r10 6
- iowr I[$r10] $r11 // INTR_UP_SET
-
- // ack, and wake up main()
- ih_no_other:
- iowr I[$r0 + 0x100] $r10 // INTR_ACK
-
- pop $r15
- pop $r14
- pop $r13
- pop $r11
- pop $r10
- pop $r9
- pop $r8
- mov $flags $r8
- pop $r8
- bclr $flags $p0
- iret
-
-// Not real sure, but, MEM_CMD 7 will hang forever if this isn't done
-ctx_4160s:
- mov $r14 0x4160
- sethi $r14 0x400000
- mov $r15 1
- call #nv_wr32
- ctx_4160s_wait:
- call #nv_rd32
- xbit $r15 $r15 4
- bra e #ctx_4160s_wait
- ret
-
-// Without clearing again at end of xfer, some things cause PGRAPH
-// to hang with STATUS=0x00000007 until it's cleared.. fbcon can
-// still function with it set however...
-ctx_4160c:
- mov $r14 0x4160
- sethi $r14 0x400000
- clear b32 $r15
- call #nv_wr32
- ret
-
-// Again, not real sure
-//
-// In: $r15 value to set 0x404170 to
-//
-ctx_4170s:
- mov $r14 0x4170
- sethi $r14 0x400000
- or $r15 0x10
- call #nv_wr32
- ret
-
-// Waits for a ctx_4170s() call to complete
-//
-ctx_4170w:
- mov $r14 0x4170
- sethi $r14 0x400000
- call #nv_rd32
- and $r15 0x10
- bra ne #ctx_4170w
- ret
-
-// Disables various things, waits a bit, and re-enables them..
-//
-// Not sure how exactly this helps, perhaps "ENABLE" is not such a
-// good description for the bits we turn off? Anyways, without this,
-// funny things happen.
-//
-ctx_redswitch:
- mov $r14 0x614
- shl b32 $r14 6
- mov $r15 0x270
- iowr I[$r14] $r15 // HUB_RED_SWITCH = ENABLE_GPC, POWER_ALL
- mov $r15 8
- ctx_redswitch_delay:
- sub b32 $r15 1
- bra ne #ctx_redswitch_delay
- mov $r15 0x770
- iowr I[$r14] $r15 // HUB_RED_SWITCH = ENABLE_ALL, POWER_ALL
- ret
-
-// Not a clue what this is for, except that unless the value is 0x10, the
-// strand context is saved (and presumably restored) incorrectly..
-//
-// In: $r15 value to set to (0x00/0x10 are used)
-//
-ctx_86c:
- mov $r14 0x86c
- shl b32 $r14 6
- iowr I[$r14] $r15 // HUB(0x86c) = val
- mov $r14 -0x75ec
- sethi $r14 0x400000
- call #nv_wr32 // ROP(0xa14) = val
- mov $r14 -0x5794
- sethi $r14 0x410000
- call #nv_wr32 // GPC(0x86c) = val
- ret
-
-// ctx_load - load's a channel's ctxctl data, and selects its vm
-//
-// In: $r2 channel address
-//
-ctx_load:
- trace_set(T_CHAN)
-
- // switch to channel, somewhat magic in parts..
- mov $r10 12 // DONE_UNK12
- call #wait_donez
- mov $r1 0xa24
- shl b32 $r1 6
- iowr I[$r1 + 0x000] $r0 // 0x409a24
- mov $r3 0xb00
- shl b32 $r3 6
- iowr I[$r3 + 0x100] $r2 // CHAN_NEXT
- mov $r1 0xa0c
- shl b32 $r1 6
- mov $r4 7
- iowr I[$r1 + 0x000] $r2 // MEM_CHAN
- iowr I[$r1 + 0x100] $r4 // MEM_CMD
- ctx_chan_wait_0:
- iord $r4 I[$r1 + 0x100]
- and $r4 0x1f
- bra ne #ctx_chan_wait_0
- iowr I[$r3 + 0x000] $r2 // CHAN_CUR
-
- // load channel header, fetch PGRAPH context pointer
- mov $xtargets $r0
- bclr $r2 31
- shl b32 $r2 4
- add b32 $r2 2
-
- trace_set(T_LCHAN)
- mov $r1 0xa04
- shl b32 $r1 6
- iowr I[$r1 + 0x000] $r2 // MEM_BASE
- mov $r1 0xa20
- shl b32 $r1 6
- mov $r2 0x0002
- sethi $r2 0x80000000
- iowr I[$r1 + 0x000] $r2 // MEM_TARGET = vram
- mov $r1 0x10 // chan + 0x0210
- mov $r2 #xfer_data
- sethi $r2 0x00020000 // 16 bytes
- xdld $r1 $r2
- xdwait
- trace_clr(T_LCHAN)
-
- // update current context
- ld b32 $r1 D[$r0 + #xfer_data + 4]
- shl b32 $r1 24
- ld b32 $r2 D[$r0 + #xfer_data + 0]
- shr b32 $r2 8
- or $r1 $r2
- st b32 D[$r0 + #ctx_current] $r1
-
- // set transfer base to start of context, and fetch context header
- trace_set(T_LCTXH)
- mov $r2 0xa04
- shl b32 $r2 6
- iowr I[$r2 + 0x000] $r1 // MEM_BASE
- mov $r2 1
- mov $r1 0xa20
- shl b32 $r1 6
- iowr I[$r1 + 0x000] $r2 // MEM_TARGET = vm
- mov $r1 #chan_data
- sethi $r1 0x00060000 // 256 bytes
- xdld $r0 $r1
- xdwait
- trace_clr(T_LCTXH)
-
- trace_clr(T_CHAN)
- ret
-
-// ctx_chan - handler for HUB_SET_CHAN command, will set a channel as
-// the active channel for ctxctl, but not actually transfer
-// any context data. intended for use only during initial
-// context construction.
-//
-// In: $r2 channel address
-//
-ctx_chan:
- call #ctx_4160s
- call #ctx_load
- mov $r10 12 // DONE_UNK12
- call #wait_donez
- mov $r1 0xa10
- shl b32 $r1 6
- mov $r2 5
- iowr I[$r1 + 0x000] $r2 // MEM_CMD = 5 (???)
- ctx_chan_wait:
- iord $r2 I[$r1 + 0x000]
- or $r2 $r2
- bra ne #ctx_chan_wait
- call #ctx_4160c
- ret
-
-// Execute per-context state overrides list
-//
-// Only executed on the first load of a channel. Might want to look into
-// removing this and having the host directly modify the channel's context
-// to change this state... The nouveau DRM already builds this list as
-// it's definitely needed for NVIDIA's, so we may as well use it for now
-//
-// Input: $r1 mmio list length
-//
-ctx_mmio_exec:
- // set transfer base to be the mmio list
- ld b32 $r3 D[$r0 + #chan_mmio_address]
- mov $r2 0xa04
- shl b32 $r2 6
- iowr I[$r2 + 0x000] $r3 // MEM_BASE
-
- clear b32 $r3
- ctx_mmio_loop:
- // fetch next 256 bytes of mmio list if necessary
- and $r4 $r3 0xff
- bra ne #ctx_mmio_pull
- mov $r5 #xfer_data
- sethi $r5 0x00060000 // 256 bytes
- xdld $r3 $r5
- xdwait
-
- // execute a single list entry
- ctx_mmio_pull:
- ld b32 $r14 D[$r4 + #xfer_data + 0x00]
- ld b32 $r15 D[$r4 + #xfer_data + 0x04]
- call #nv_wr32
-
- // next!
- add b32 $r3 8
- sub b32 $r1 1
- bra ne #ctx_mmio_loop
-
- // set transfer base back to the current context
- ctx_mmio_done:
- ld b32 $r3 D[$r0 + #ctx_current]
- iowr I[$r2 + 0x000] $r3 // MEM_BASE
-
- // disable the mmio list now, we don't need/want to execute it again
- st b32 D[$r0 + #chan_mmio_count] $r0
- mov $r1 #chan_data
- sethi $r1 0x00060000 // 256 bytes
- xdst $r0 $r1
- xdwait
- ret
-
-// Transfer HUB context data between GPU and storage area
-//
-// In: $r2 channel address
-// $p1 clear on save, set on load
-// $p2 set if opposite direction done/will be done, so:
-// on save it means: "a load will follow this save"
-// on load it means: "a save preceeded this load"
-//
-ctx_xfer:
- bra not $p1 #ctx_xfer_pre
- bra $p2 #ctx_xfer_pre_load
- ctx_xfer_pre:
- mov $r15 0x10
- call #ctx_86c
- call #ctx_4160s
- bra not $p1 #ctx_xfer_exec
-
- ctx_xfer_pre_load:
- mov $r15 2
- call #ctx_4170s
- call #ctx_4170w
- call #ctx_redswitch
- clear b32 $r15
- call #ctx_4170s
- call #ctx_load
-
- // fetch context pointer, and initiate xfer on all GPCs
- ctx_xfer_exec:
- ld b32 $r1 D[$r0 + #ctx_current]
- mov $r2 0x414
- shl b32 $r2 6
- iowr I[$r2 + 0x000] $r0 // BAR_STATUS = reset
- mov $r14 -0x5b00
- sethi $r14 0x410000
- mov b32 $r15 $r1
- call #nv_wr32 // GPC_BCAST_WRCMD_DATA = ctx pointer
- add b32 $r14 4
- xbit $r15 $flags $p1
- xbit $r2 $flags $p2
- shl b32 $r2 1
- or $r15 $r2
- call #nv_wr32 // GPC_BCAST_WRCMD_CMD = GPC_XFER(type)
-
- // strands
- mov $r1 0x4afc
- sethi $r1 0x20000
- mov $r2 0xc
- iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x0c
- call #strand_wait
- mov $r2 0x47fc
- sethi $r2 0x20000
- iowr I[$r2] $r0 // STRAND_FIRST_GENE(0x3f) = 0x00
- xbit $r2 $flags $p1
- add b32 $r2 3
- iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x03/0x04 (SAVE/LOAD)
-
- // mmio context
- xbit $r10 $flags $p1 // direction
- or $r10 6 // first, last
- mov $r11 0 // base = 0
- ld b32 $r12 D[$r0 + #hub_mmio_list_head]
- ld b32 $r13 D[$r0 + #hub_mmio_list_tail]
- mov $r14 0 // not multi
- call #mmctx_xfer
-
- // wait for GPCs to all complete
- mov $r10 8 // DONE_BAR
- call #wait_doneo
-
- // wait for strand xfer to complete
- call #strand_wait
-
- // post-op
- bra $p1 #ctx_xfer_post
- mov $r10 12 // DONE_UNK12
- call #wait_donez
- mov $r1 0xa10
- shl b32 $r1 6
- mov $r2 5
- iowr I[$r1] $r2 // MEM_CMD
- ctx_xfer_post_save_wait:
- iord $r2 I[$r1]
- or $r2 $r2
- bra ne #ctx_xfer_post_save_wait
-
- bra $p2 #ctx_xfer_done
- ctx_xfer_post:
- mov $r15 2
- call #ctx_4170s
- clear b32 $r15
- call #ctx_86c
- call #strand_post
- call #ctx_4170w
- clear b32 $r15
- call #ctx_4170s
-
- bra not $p1 #ctx_xfer_no_post_mmio
- ld b32 $r1 D[$r0 + #chan_mmio_count]
- or $r1 $r1
- bra e #ctx_xfer_no_post_mmio
- call #ctx_mmio_exec
-
- ctx_xfer_no_post_mmio:
- call #ctx_4160c
-
- ctx_xfer_done:
- ret
-
-.align 256
diff --git a/drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h b/drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h
deleted file mode 100644
index c5ed307..0000000
--- a/drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h
+++ /dev/null
@@ -1,838 +0,0 @@
-uint32_t nvc0_grhub_data[] = {
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x000000c0,
- 0x013c00a0,
- 0x000000c1,
- 0x014000a0,
- 0x000000c3,
- 0x013c00a0,
- 0x000000c4,
- 0x013c00a0,
- 0x000000c8,
- 0x013c00a0,
- 0x000000ce,
- 0x013c00a0,
- 0x000000cf,
- 0x013c00a0,
- 0x000000d9,
- 0x01dc0140,
- 0x00000000,
- 0x0417e91c,
- 0x04400204,
- 0x28404004,
- 0x00404044,
- 0x34404094,
- 0x184040d0,
- 0x004040f8,
- 0x08404130,
- 0x08404150,
- 0x04404164,
- 0x08404174,
- 0x1c404200,
- 0x34404404,
- 0x0c404460,
- 0x00404480,
- 0x00404498,
- 0x0c404604,
- 0x7c404618,
- 0x50404698,
- 0x044046f0,
- 0x54404700,
- 0x00405800,
- 0x08405830,
- 0x00405854,
- 0x0c405870,
- 0x04405a00,
- 0x00405a18,
- 0x00406020,
- 0x0c406028,
- 0x044064a8,
- 0x044064b4,
- 0x00407804,
- 0x1440780c,
- 0x004078bc,
- 0x18408000,
- 0x00408064,
- 0x08408800,
- 0x0c408900,
- 0x00408980,
- 0x044064c0,
- 0x0417e91c,
- 0x04400204,
- 0x24404004,
- 0x00404044,
- 0x34404094,
- 0x184040d0,
- 0x004040f8,
- 0x08404130,
- 0x08404150,
- 0x04404164,
- 0x04404178,
- 0x1c404200,
- 0x34404404,
- 0x0c404460,
- 0x00404480,
- 0x00404498,
- 0x0c404604,
- 0x7c404618,
- 0x50404698,
- 0x044046f0,
- 0x54404700,
- 0x00405800,
- 0x08405830,
- 0x00405854,
- 0x0c405870,
- 0x04405a00,
- 0x00405a18,
- 0x00406020,
- 0x0c406028,
- 0x044064a8,
- 0x104064b4,
- 0x00407804,
- 0x1440780c,
- 0x004078bc,
- 0x18408000,
- 0x00408064,
- 0x08408800,
- 0x0c408900,
- 0x00408980,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
-};
-
-uint32_t nvc0_grhub_code[] = {
- 0x03090ef5,
- 0x9800d898,
- 0x86f001d9,
- 0x0489b808,
- 0xf00c1bf4,
- 0x21f502f7,
- 0x00f802ec,
- 0xb60798c4,
- 0x8dbb0384,
- 0x0880b600,
- 0x80008e80,
- 0x90b6018f,
- 0x0f94f001,
- 0xf801d980,
- 0x0131f400,
- 0x9800d898,
- 0x89b801d9,
- 0x210bf404,
- 0xb60789c4,
- 0x9dbb0394,
- 0x0890b600,
- 0x98009e98,
- 0x80b6019f,
- 0x0f84f001,
- 0xf400d880,
- 0x00f80132,
- 0x0728b7f1,
- 0xb906b4b6,
- 0xc9f002ec,
- 0x00bcd01f,
- 0xc800bccf,
- 0x1bf41fcc,
- 0x06a7f0fa,
- 0x010321f5,
- 0xf840bfcf,
- 0x28b7f100,
- 0x06b4b607,
- 0xb980bfd0,
- 0xc9f002ec,
- 0x1ec9f01f,
- 0xcf00bcd0,
- 0xccc800bc,
- 0xfa1bf41f,
- 0x87f100f8,
- 0x84b60430,
- 0x1ff9f006,
- 0xf8008fd0,
- 0x3087f100,
- 0x0684b604,
- 0xf80080d0,
- 0x3c87f100,
- 0x0684b608,
- 0x99f094bd,
- 0x0089d000,
- 0x081887f1,
- 0xd00684b6,
- 0x87f1008a,
- 0x84b60400,
- 0x0088cf06,
- 0xf4888aff,
- 0x87f1f31b,
- 0x84b6085c,
- 0xf094bd06,
- 0x89d00099,
- 0xf100f800,
- 0xb6083c87,
- 0x94bd0684,
- 0xd00099f0,
- 0x87f10089,
- 0x84b60818,
- 0x008ad006,
- 0x040087f1,
- 0xcf0684b6,
- 0x8aff0088,
- 0xf30bf488,
- 0x085c87f1,
- 0xbd0684b6,
- 0x0099f094,
- 0xf80089d0,
- 0x9894bd00,
- 0x85b600e8,
- 0x0180b61a,
- 0xbb0284b6,
- 0xe0b60098,
- 0x04efb804,
- 0xb9eb1bf4,
- 0x00f8029f,
- 0x083c87f1,
- 0xbd0684b6,
- 0x0199f094,
- 0xf10089d0,
- 0xb6071087,
- 0x94bd0684,
- 0xf405bbfd,
- 0x8bd0090b,
- 0x0099f000,
- 0xf405eefd,
- 0x8ed00c0b,
- 0xc08fd080,
- 0xb70199f0,
- 0xc8010080,
- 0xb4b600ab,
- 0x0cb9f010,
- 0xb601aec8,
- 0xbefd11e4,
- 0x008bd005,
- 0xf0008ecf,
- 0x0bf41fe4,
- 0x00ce98fa,
- 0xd005e9fd,
- 0xc0b6c08e,
- 0x04cdb804,
- 0xc8e81bf4,
- 0x1bf402ab,
- 0x008bcf18,
- 0xb01fb4f0,
- 0x1bf410b4,
- 0x02a7f0f7,
- 0xf4c921f4,
- 0xabc81b0e,
- 0x10b4b600,
- 0xf00cb9f0,
- 0x8bd012b9,
- 0x008bcf00,
- 0xf412bbc8,
- 0x87f1fa1b,
- 0x84b6085c,
- 0xf094bd06,
- 0x89d00199,
- 0xf900f800,
- 0x02a7f0a0,
- 0xfcc921f4,
- 0xf100f8a0,
- 0xf04afc87,
- 0x97f00283,
- 0x0089d00c,
- 0x020721f5,
- 0x87f100f8,
- 0x83f04afc,
- 0x0d97f002,
- 0xf50089d0,
- 0xf8020721,
- 0xfca7f100,
- 0x02a3f04f,
- 0x0500aba2,
- 0xd00fc7f0,
- 0xc7f000ac,
- 0x00bcd00b,
- 0x020721f5,
- 0xf000aed0,
- 0xbcd00ac7,
- 0x0721f500,
- 0xf100f802,
- 0xb6083c87,
- 0x94bd0684,
- 0xd00399f0,
- 0x21f50089,
- 0xe7f00213,
- 0x3921f503,
- 0xfca7f102,
- 0x02a3f046,
- 0x0400aba0,
- 0xf040a0d0,
- 0xbcd001c7,
- 0x0721f500,
- 0x010c9202,
- 0xf000acd0,
- 0xbcd002c7,
- 0x0721f500,
- 0x2621f502,
- 0x8087f102,
- 0x0684b608,
- 0xb70089cf,
- 0x95220080,
- 0x8ed008fe,
- 0x408ed000,
- 0xb6808acf,
- 0xa0b606a5,
- 0x00eabb01,
- 0xb60480b6,
- 0x1bf40192,
- 0x08e4b6e8,
- 0xf1f2efbc,
- 0xb6085c87,
- 0x94bd0684,
- 0xd00399f0,
- 0x00f80089,
- 0xe7f1e0f9,
- 0xe4b60814,
- 0x00efd006,
- 0x0c1ce7f1,
- 0xf006e4b6,
- 0xefd001f7,
- 0xf8e0fc00,
- 0xfe04bd00,
- 0x07fe0004,
- 0x0017f100,
- 0x0227f012,
- 0xf10012d0,
- 0xfe05b917,
- 0x17f10010,
- 0x10d00400,
- 0x0437f1c0,
- 0x0634b604,
- 0x200327f1,
- 0xf10032d0,
- 0xd0200427,
- 0x27f10132,
- 0x32d0200b,
- 0x0c27f102,
- 0x0732d020,
- 0x0c2427f1,
- 0xb90624b6,
- 0x23d00003,
- 0x0427f100,
- 0x0023f087,
- 0xb70012d0,
- 0xf0010012,
- 0x12d00427,
- 0x1031f400,
- 0x9604e7f1,
- 0xf440e3f0,
- 0xf1c76821,
- 0x01018090,
- 0x801ff4f0,
- 0x17f0000f,
- 0x041fbb01,
- 0xf10112b6,
- 0xb6040c27,
- 0x21d00624,
- 0x4021d000,
- 0x080027f1,
- 0xcf0624b6,
- 0xf7f00022,
- 0x08f0b654,
- 0xb800f398,
- 0x0bf40432,
- 0x0034b00b,
- 0xf8f11bf4,
- 0x0017f100,
- 0x02fe5801,
- 0xf003ff58,
- 0x0e8000e3,
- 0x150f8014,
- 0x013d21f5,
- 0x070037f1,
- 0x950634b6,
- 0x34d00814,
- 0x4034d000,
- 0x130030b7,
- 0xb6001fbb,
- 0x3fd002f5,
- 0x0815b600,
- 0xb60110b6,
- 0x1fb90814,
- 0x6321f502,
- 0x001fbb02,
- 0xf1000398,
- 0xf0200047,
- 0x4ea05043,
- 0x1fb90804,
- 0x8d21f402,
- 0x08004ea0,
- 0xf4022fb9,
- 0x4ea08d21,
- 0xf4bd010c,
- 0xa08d21f4,
- 0xf401044e,
- 0x4ea08d21,
- 0xf7f00100,
- 0x8d21f402,
- 0x08004ea0,
- 0xc86821f4,
- 0x0bf41fff,
- 0x044ea0fa,
- 0x6821f408,
- 0xb7001fbb,
- 0xb6800040,
- 0x1bf40132,
- 0x0027f1b4,
- 0x0624b608,
- 0xb74021d0,
- 0xbd080020,
- 0x1f19f014,
- 0xf40021d0,
- 0x28f40031,
- 0x08d7f000,
- 0xf43921f4,
- 0xe4b1f401,
- 0x1bf54001,
- 0x87f100d1,
- 0x84b6083c,
- 0xf094bd06,
- 0x89d00499,
- 0x0017f100,
- 0x0614b60b,
- 0xcf4012cf,
- 0x13c80011,
- 0x7e0bf41f,
- 0xf41f23c8,
- 0x20f95a0b,
- 0xf10212b9,
- 0xb6083c87,
- 0x94bd0684,
- 0xd00799f0,
- 0x32f40089,
- 0x0231f401,
- 0x082921f5,
- 0x085c87f1,
- 0xbd0684b6,
- 0x0799f094,
- 0xfc0089d0,
- 0x3c87f120,
- 0x0684b608,
- 0x99f094bd,
- 0x0089d006,
- 0xf50131f4,
- 0xf1082921,
- 0xb6085c87,
- 0x94bd0684,
- 0xd00699f0,
- 0x0ef40089,
- 0xb920f931,
- 0x32f40212,
- 0x0232f401,
- 0x082921f5,
- 0x17f120fc,
- 0x14b60b00,
- 0x0012d006,
- 0xc8130ef4,
- 0x0bf41f23,
- 0x0131f40d,
- 0xf50232f4,
- 0xf1082921,
- 0xb60b0c17,
- 0x27f00614,
- 0x0012d001,
- 0x085c87f1,
- 0xbd0684b6,
- 0x0499f094,
- 0xf50089d0,
- 0xb0ff200e,
- 0x1bf401e4,
- 0x02f2b90d,
- 0x07b521f5,
- 0xb0420ef4,
- 0x1bf402e4,
- 0x3c87f12e,
- 0x0684b608,
- 0x99f094bd,
- 0x0089d007,
- 0xf40132f4,
- 0x21f50232,
- 0x87f10829,
- 0x84b6085c,
- 0xf094bd06,
- 0x89d00799,
- 0x110ef400,
- 0xf010ef94,
- 0x21f501f5,
- 0x0ef502ec,
- 0x17f1fed1,
- 0x14b60820,
- 0xf024bd06,
- 0x12d01f29,
- 0xbe0ef500,
- 0xfe80f9fe,
- 0x80f90188,
- 0xa0f990f9,
- 0xd0f9b0f9,
- 0xf0f9e0f9,
- 0xc4800acf,
- 0x0bf404ab,
- 0x00b7f11d,
- 0x08d7f019,
- 0xcf40becf,
- 0x21f400bf,
- 0x00b0b704,
- 0x01e7f004,
- 0xe400bed0,
- 0xf40100ab,
- 0xd7f00d0b,
- 0x01e7f108,
- 0x0421f440,
- 0x0104b7f1,
- 0xabffb0bd,
- 0x0d0bf4b4,
- 0x0c1ca7f1,
- 0xd006a4b6,
- 0x0ad000ab,
- 0xfcf0fc40,
- 0xfcd0fce0,
- 0xfca0fcb0,
- 0xfe80fc90,
- 0x80fc0088,
- 0xf80032f4,
- 0x60e7f101,
- 0x40e3f041,
- 0xf401f7f0,
- 0x21f48d21,
- 0x04ffc868,
- 0xf8fa0bf4,
- 0x60e7f100,
- 0x40e3f041,
- 0x21f4f4bd,
- 0xf100f88d,
- 0xf04170e7,
- 0xf5f040e3,
- 0x8d21f410,
- 0xe7f100f8,
- 0xe3f04170,
- 0x6821f440,
- 0xf410f4f0,
- 0x00f8f31b,
- 0x0614e7f1,
- 0xf106e4b6,
- 0xd00270f7,
- 0xf7f000ef,
- 0x01f2b608,
- 0xf1fd1bf4,
- 0xd00770f7,
- 0x00f800ef,
- 0x086ce7f1,
- 0xd006e4b6,
- 0xe7f100ef,
- 0xe3f08a14,
- 0x8d21f440,
- 0xa86ce7f1,
- 0xf441e3f0,
- 0x00f88d21,
- 0x083c87f1,
- 0xbd0684b6,
- 0x0599f094,
- 0xf00089d0,
- 0x21f40ca7,
- 0x2417f1c9,
- 0x0614b60a,
- 0xf10010d0,
- 0xb60b0037,
- 0x32d00634,
- 0x0c17f140,
- 0x0614b60a,
- 0xd00747f0,
- 0x14d00012,
- 0x4014cf40,
- 0xf41f44f0,
- 0x32d0fa1b,
- 0x000bfe00,
- 0xb61f2af0,
- 0x20b60424,
- 0x3c87f102,
- 0x0684b608,
- 0x99f094bd,
- 0x0089d008,
- 0x0a0417f1,
- 0xd00614b6,
- 0x17f10012,
- 0x14b60a20,
- 0x0227f006,
- 0x800023f1,
- 0xf00012d0,
- 0x27f11017,
- 0x23f00300,
- 0x0512fa02,
- 0x87f103f8,
- 0x84b6085c,
- 0xf094bd06,
- 0x89d00899,
- 0xc1019800,
- 0x981814b6,
- 0x25b6c002,
- 0x0512fd08,
- 0xf1160180,
- 0xb6083c87,
- 0x94bd0684,
- 0xd00999f0,
- 0x27f10089,
- 0x24b60a04,
- 0x0021d006,
- 0xf10127f0,
- 0xb60a2017,
- 0x12d00614,
- 0x0017f100,
- 0x0613f002,
- 0xf80501fa,
- 0x5c87f103,
- 0x0684b608,
- 0x99f094bd,
- 0x0089d009,
- 0x085c87f1,
- 0xbd0684b6,
- 0x0599f094,
- 0xf80089d0,
- 0x3121f500,
- 0xb821f506,
- 0x0ca7f006,
- 0xf1c921f4,
- 0xb60a1017,
- 0x27f00614,
- 0x0012d005,
- 0xfd0012cf,
- 0x1bf40522,
- 0x4921f5fa,
- 0x9800f806,
- 0x27f18103,
- 0x24b60a04,
- 0x0023d006,
- 0x34c434bd,
- 0x0f1bf4ff,
- 0x030057f1,
- 0xfa0653f0,
- 0x03f80535,
- 0x98c04e98,
- 0x21f4c14f,
- 0x0830b68d,
- 0xf40112b6,
- 0x0398df1b,
- 0x0023d016,
- 0xf1800080,
- 0xf0020017,
- 0x01fa0613,
- 0xf803f806,
- 0x0611f400,
- 0xf01102f4,
- 0x21f510f7,
- 0x21f50698,
- 0x11f40631,
- 0x02f7f01c,
- 0x065721f5,
- 0x066621f5,
- 0x067821f5,
- 0x21f5f4bd,
- 0x21f50657,
- 0x019806b8,
- 0x1427f116,
- 0x0624b604,
- 0xf10020d0,
- 0xf0a500e7,
- 0x1fb941e3,
- 0x8d21f402,
- 0xf004e0b6,
- 0x2cf001fc,
- 0x0124b602,
- 0xf405f2fd,
- 0x17f18d21,
- 0x13f04afc,
- 0x0c27f002,
- 0xf50012d0,
- 0xf1020721,
- 0xf047fc27,
- 0x20d00223,
- 0x012cf000,
- 0xd00320b6,
- 0xacf00012,
- 0x06a5f001,
- 0x9800b7f0,
- 0x0d98140c,
- 0x00e7f015,
- 0x015c21f5,
- 0xf508a7f0,
- 0xf5010321,
- 0xf4020721,
- 0xa7f02201,
- 0xc921f40c,
- 0x0a1017f1,
- 0xf00614b6,
- 0x12d00527,
- 0x0012cf00,
- 0xf40522fd,
- 0x02f4fa1b,
- 0x02f7f032,
- 0x065721f5,
- 0x21f5f4bd,
- 0x21f50698,
- 0x21f50226,
- 0xf4bd0666,
- 0x065721f5,
- 0x981011f4,
- 0x11fd8001,
- 0x070bf405,
- 0x07df21f5,
- 0x064921f5,
- 0x000000f8,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
-};
diff --git a/drivers/gpu/drm/nouveau/nvc0_instmem.c b/drivers/gpu/drm/nouveau/nvc0_instmem.c
index b701c43..82357d2 100644
--- a/drivers/gpu/drm/nouveau/nvc0_instmem.c
+++ b/drivers/gpu/drm/nouveau/nvc0_instmem.c
@@ -32,6 +32,7 @@ struct nvc0_instmem_priv {
struct nouveau_channel *bar1;
struct nouveau_gpuobj *bar3_pgd;
struct nouveau_channel *bar3;
+ struct nouveau_gpuobj *chan_pgd;
};
int
@@ -180,11 +181,17 @@ nvc0_instmem_init(struct drm_device *dev)
goto error;
/* channel vm */
- ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0008000000ULL,
- &dev_priv->chan_vm);
+ ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0008000000ULL, &vm);
if (ret)
goto error;
+ ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 4096, 0, &priv->chan_pgd);
+ if (ret)
+ goto error;
+
+ nouveau_vm_ref(vm, &dev_priv->chan_vm, priv->chan_pgd);
+ nouveau_vm_ref(NULL, &vm, NULL);
+
nvc0_instmem_resume(dev);
return 0;
error:
@@ -204,7 +211,8 @@ nvc0_instmem_takedown(struct drm_device *dev)
nv_wr32(dev, 0x1704, 0x00000000);
nv_wr32(dev, 0x1714, 0x00000000);
- nouveau_vm_ref(NULL, &dev_priv->chan_vm, NULL);
+ nouveau_vm_ref(NULL, &dev_priv->chan_vm, priv->chan_pgd);
+ nouveau_gpuobj_ref(NULL, &priv->chan_pgd);
nvc0_channel_del(&priv->bar1);
nouveau_vm_ref(NULL, &dev_priv->bar1_vm, priv->bar1_pgd);
diff --git a/drivers/gpu/drm/nouveau/nvc0_pm.c b/drivers/gpu/drm/nouveau/nvc0_pm.c
deleted file mode 100644
index ce65f81..0000000
--- a/drivers/gpu/drm/nouveau/nvc0_pm.c
+++ /dev/null
@@ -1,392 +0,0 @@
-/*
- * Copyright 2011 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include "drmP.h"
-#include "nouveau_drv.h"
-#include "nouveau_bios.h"
-#include "nouveau_pm.h"
-
-static u32 read_div(struct drm_device *, int, u32, u32);
-static u32 read_pll(struct drm_device *, u32);
-
-static u32
-read_vco(struct drm_device *dev, u32 dsrc)
-{
- u32 ssrc = nv_rd32(dev, dsrc);
- if (!(ssrc & 0x00000100))
- return read_pll(dev, 0x00e800);
- return read_pll(dev, 0x00e820);
-}
-
-static u32
-read_pll(struct drm_device *dev, u32 pll)
-{
- u32 ctrl = nv_rd32(dev, pll + 0);
- u32 coef = nv_rd32(dev, pll + 4);
- u32 P = (coef & 0x003f0000) >> 16;
- u32 N = (coef & 0x0000ff00) >> 8;
- u32 M = (coef & 0x000000ff) >> 0;
- u32 sclk, doff;
-
- if (!(ctrl & 0x00000001))
- return 0;
-
- switch (pll & 0xfff000) {
- case 0x00e000:
- sclk = 27000;
- P = 1;
- break;
- case 0x137000:
- doff = (pll - 0x137000) / 0x20;
- sclk = read_div(dev, doff, 0x137120, 0x137140);
- break;
- case 0x132000:
- switch (pll) {
- case 0x132000:
- sclk = read_pll(dev, 0x132020);
- break;
- case 0x132020:
- sclk = read_div(dev, 0, 0x137320, 0x137330);
- break;
- default:
- return 0;
- }
- break;
- default:
- return 0;
- }
-
- return sclk * N / M / P;
-}
-
-static u32
-read_div(struct drm_device *dev, int doff, u32 dsrc, u32 dctl)
-{
- u32 ssrc = nv_rd32(dev, dsrc + (doff * 4));
- u32 sctl = nv_rd32(dev, dctl + (doff * 4));
-
- switch (ssrc & 0x00000003) {
- case 0:
- if ((ssrc & 0x00030000) != 0x00030000)
- return 27000;
- return 108000;
- case 2:
- return 100000;
- case 3:
- if (sctl & 0x80000000) {
- u32 sclk = read_vco(dev, dsrc + (doff * 4));
- u32 sdiv = (sctl & 0x0000003f) + 2;
- return (sclk * 2) / sdiv;
- }
-
- return read_vco(dev, dsrc + (doff * 4));
- default:
- return 0;
- }
-}
-
-static u32
-read_mem(struct drm_device *dev)
-{
- u32 ssel = nv_rd32(dev, 0x1373f0);
- if (ssel & 0x00000001)
- return read_div(dev, 0, 0x137300, 0x137310);
- return read_pll(dev, 0x132000);
-}
-
-static u32
-read_clk(struct drm_device *dev, int clk)
-{
- u32 sctl = nv_rd32(dev, 0x137250 + (clk * 4));
- u32 ssel = nv_rd32(dev, 0x137100);
- u32 sclk, sdiv;
-
- if (ssel & (1 << clk)) {
- if (clk < 7)
- sclk = read_pll(dev, 0x137000 + (clk * 0x20));
- else
- sclk = read_pll(dev, 0x1370e0);
- sdiv = ((sctl & 0x00003f00) >> 8) + 2;
- } else {
- sclk = read_div(dev, clk, 0x137160, 0x1371d0);
- sdiv = ((sctl & 0x0000003f) >> 0) + 2;
- }
-
- if (sctl & 0x80000000)
- return (sclk * 2) / sdiv;
- return sclk;
-}
-
-int
-nvc0_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
-{
- perflvl->shader = read_clk(dev, 0x00);
- perflvl->core = perflvl->shader / 2;
- perflvl->memory = read_mem(dev);
- perflvl->rop = read_clk(dev, 0x01);
- perflvl->hub07 = read_clk(dev, 0x02);
- perflvl->hub06 = read_clk(dev, 0x07);
- perflvl->hub01 = read_clk(dev, 0x08);
- perflvl->copy = read_clk(dev, 0x09);
- perflvl->daemon = read_clk(dev, 0x0c);
- perflvl->vdec = read_clk(dev, 0x0e);
- return 0;
-}
-
-struct nvc0_pm_clock {
- u32 freq;
- u32 ssel;
- u32 mdiv;
- u32 dsrc;
- u32 ddiv;
- u32 coef;
-};
-
-struct nvc0_pm_state {
- struct nvc0_pm_clock eng[16];
-};
-
-static u32
-calc_div(struct drm_device *dev, int clk, u32 ref, u32 freq, u32 *ddiv)
-{
- u32 div = min((ref * 2) / freq, (u32)65);
- if (div < 2)
- div = 2;
-
- *ddiv = div - 2;
- return (ref * 2) / div;
-}
-
-static u32
-calc_src(struct drm_device *dev, int clk, u32 freq, u32 *dsrc, u32 *ddiv)
-{
- u32 sclk;
-
- /* use one of the fixed frequencies if possible */
- *ddiv = 0x00000000;
- switch (freq) {
- case 27000:
- case 108000:
- *dsrc = 0x00000000;
- if (freq == 108000)
- *dsrc |= 0x00030000;
- return freq;
- case 100000:
- *dsrc = 0x00000002;
- return freq;
- default:
- *dsrc = 0x00000003;
- break;
- }
-
- /* otherwise, calculate the closest divider */
- sclk = read_vco(dev, clk);
- if (clk < 7)
- sclk = calc_div(dev, clk, sclk, freq, ddiv);
- return sclk;
-}
-
-static u32
-calc_pll(struct drm_device *dev, int clk, u32 freq, u32 *coef)
-{
- struct pll_lims limits;
- int N, M, P, ret;
-
- ret = get_pll_limits(dev, 0x137000 + (clk * 0x20), &limits);
- if (ret)
- return 0;
-
- limits.refclk = read_div(dev, clk, 0x137120, 0x137140);
- if (!limits.refclk)
- return 0;
-
- ret = nva3_calc_pll(dev, &limits, freq, &N, NULL, &M, &P);
- if (ret <= 0)
- return 0;
-
- *coef = (P << 16) | (N << 8) | M;
- return ret;
-}
-
-/* A (likely rather simplified and incomplete) view of the clock tree
- *
- * Key:
- *
- * S: source select
- * D: divider
- * P: pll
- * F: switch
- *
- * Engine clocks:
- *
- * 137250(D) ---- 137100(F0) ---- 137160(S)/1371d0(D) ------------------- ref
- * (F1) ---- 1370X0(P) ---- 137120(S)/137140(D) ---- ref
- *
- * Not all registers exist for all clocks. For example: clocks >= 8 don't
- * have their own PLL (all tied to clock 7's PLL when in PLL mode), nor do
- * they have the divider at 1371d0, though the source selection at 137160
- * still exists. You must use the divider at 137250 for these instead.
- *
- * Memory clock:
- *
- * TBD, read_mem() above is likely very wrong...
- *
- */
-
-static int
-calc_clk(struct drm_device *dev, int clk, struct nvc0_pm_clock *info, u32 freq)
-{
- u32 src0, div0, div1D, div1P = 0;
- u32 clk0, clk1 = 0;
-
- /* invalid clock domain */
- if (!freq)
- return 0;
-
- /* first possible path, using only dividers */
- clk0 = calc_src(dev, clk, freq, &src0, &div0);
- clk0 = calc_div(dev, clk, clk0, freq, &div1D);
-
- /* see if we can get any closer using PLLs */
- if (clk0 != freq && (0x00004387 & (1 << clk))) {
- if (clk < 7)
- clk1 = calc_pll(dev, clk, freq, &info->coef);
- else
- clk1 = read_pll(dev, 0x1370e0);
- clk1 = calc_div(dev, clk, clk1, freq, &div1P);
- }
-
- /* select the method which gets closest to target freq */
- if (abs((int)freq - clk0) <= abs((int)freq - clk1)) {
- info->dsrc = src0;
- if (div0) {
- info->ddiv |= 0x80000000;
- info->ddiv |= div0 << 8;
- info->ddiv |= div0;
- }
- if (div1D) {
- info->mdiv |= 0x80000000;
- info->mdiv |= div1D;
- }
- info->ssel = 0;
- info->freq = clk0;
- } else {
- if (div1P) {
- info->mdiv |= 0x80000000;
- info->mdiv |= div1P << 8;
- }
- info->ssel = (1 << clk);
- info->freq = clk1;
- }
-
- return 0;
-}
-
-void *
-nvc0_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvc0_pm_state *info;
- int ret;
-
- info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info)
- return ERR_PTR(-ENOMEM);
-
- /* NFI why this is still in the performance table, the ROPCs appear
- * to get their clock from clock 2 ("hub07", actually hub05 on this
- * chip, but, anyway...) as well. nvatiming confirms hub05 and ROP
- * are always the same freq with the binary driver even when the
- * performance table says they should differ.
- */
- if (dev_priv->chipset == 0xd9)
- perflvl->rop = 0;
-
- if ((ret = calc_clk(dev, 0x00, &info->eng[0x00], perflvl->shader)) ||
- (ret = calc_clk(dev, 0x01, &info->eng[0x01], perflvl->rop)) ||
- (ret = calc_clk(dev, 0x02, &info->eng[0x02], perflvl->hub07)) ||
- (ret = calc_clk(dev, 0x07, &info->eng[0x07], perflvl->hub06)) ||
- (ret = calc_clk(dev, 0x08, &info->eng[0x08], perflvl->hub01)) ||
- (ret = calc_clk(dev, 0x09, &info->eng[0x09], perflvl->copy)) ||
- (ret = calc_clk(dev, 0x0c, &info->eng[0x0c], perflvl->daemon)) ||
- (ret = calc_clk(dev, 0x0e, &info->eng[0x0e], perflvl->vdec))) {
- kfree(info);
- return ERR_PTR(ret);
- }
-
- return info;
-}
-
-static void
-prog_clk(struct drm_device *dev, int clk, struct nvc0_pm_clock *info)
-{
- /* program dividers at 137160/1371d0 first */
- if (clk < 7 && !info->ssel) {
- nv_mask(dev, 0x1371d0 + (clk * 0x04), 0x80003f3f, info->ddiv);
- nv_wr32(dev, 0x137160 + (clk * 0x04), info->dsrc);
- }
-
- /* switch clock to non-pll mode */
- nv_mask(dev, 0x137100, (1 << clk), 0x00000000);
- nv_wait(dev, 0x137100, (1 << clk), 0x00000000);
-
- /* reprogram pll */
- if (clk < 7) {
- /* make sure it's disabled first... */
- u32 base = 0x137000 + (clk * 0x20);
- u32 ctrl = nv_rd32(dev, base + 0x00);
- if (ctrl & 0x00000001) {
- nv_mask(dev, base + 0x00, 0x00000004, 0x00000000);
- nv_mask(dev, base + 0x00, 0x00000001, 0x00000000);
- }
- /* program it to new values, if necessary */
- if (info->ssel) {
- nv_wr32(dev, base + 0x04, info->coef);
- nv_mask(dev, base + 0x00, 0x00000001, 0x00000001);
- nv_wait(dev, base + 0x00, 0x00020000, 0x00020000);
- nv_mask(dev, base + 0x00, 0x00020004, 0x00000004);
- }
- }
-
- /* select pll/non-pll mode, and program final clock divider */
- nv_mask(dev, 0x137100, (1 << clk), info->ssel);
- nv_wait(dev, 0x137100, (1 << clk), info->ssel);
- nv_mask(dev, 0x137250 + (clk * 0x04), 0x00003f3f, info->mdiv);
-}
-
-int
-nvc0_pm_clocks_set(struct drm_device *dev, void *data)
-{
- struct nvc0_pm_state *info = data;
- int i;
-
- for (i = 0; i < 16; i++) {
- if (!info->eng[i].freq)
- continue;
- prog_clk(dev, i, &info->eng[i]);
- }
-
- kfree(info);
- return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvc0_vm.c b/drivers/gpu/drm/nouveau/nvc0_vm.c
index 30d2bd5..a179e6c 100644
--- a/drivers/gpu/drm/nouveau/nvc0_vm.c
+++ b/drivers/gpu/drm/nouveau/nvc0_vm.c
@@ -77,11 +77,9 @@ void
nvc0_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
{
- u32 target = (vma->access & NV_MEM_ACCESS_NOSNOOP) ? 7 : 5;
-
pte <<= 3;
while (cnt--) {
- u64 phys = nvc0_vm_addr(vma, *list++, mem->memtype, target);
+ u64 phys = nvc0_vm_addr(vma, *list++, mem->memtype, 5);
nv_wo32(pgt, pte + 0, lower_32_bits(phys));
nv_wo32(pgt, pte + 4, upper_32_bits(phys));
pte += 8;
@@ -107,11 +105,7 @@ nvc0_vm_flush(struct nouveau_vm *vm)
struct drm_device *dev = vm->dev;
struct nouveau_vm_pgd *vpgd;
unsigned long flags;
- u32 engine;
-
- engine = 1;
- if (vm == dev_priv->bar1_vm || vm == dev_priv->bar3_vm)
- engine |= 4;
+ u32 engine = (dev_priv->chan_vm == vm) ? 1 : 5;
pinstmem->flush(vm->dev);
diff --git a/drivers/gpu/drm/nouveau/nvc0_vram.c b/drivers/gpu/drm/nouveau/nvc0_vram.c
index a7eef89..67c6ec6 100644
--- a/drivers/gpu/drm/nouveau/nvc0_vram.c
+++ b/drivers/gpu/drm/nouveau/nvc0_vram.c
@@ -43,7 +43,7 @@ static const u8 types[256] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 3, 3, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3,
- 3, 3, 3, 1, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3,
+ 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3,
3, 3, 0, 0, 0, 0, 0, 0, 3, 0, 0, 3, 0, 3, 0, 3,
3, 0, 3, 3, 3, 3, 3, 0, 0, 3, 0, 3, 0, 3, 3, 0,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 1, 1, 0
@@ -61,7 +61,9 @@ nvc0_vram_new(struct drm_device *dev, u64 size, u32 align, u32 ncmin,
u32 type, struct nouveau_mem **pmem)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_mm *mm = &dev_priv->engine.vram.mm;
+ struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
+ struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM];
+ struct nouveau_mm *mm = man->priv;
struct nouveau_mm_node *r;
struct nouveau_mem *mem;
int ret;
@@ -103,58 +105,9 @@ int
nvc0_vram_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
- const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
- const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
- u32 parts = nv_rd32(dev, 0x022438);
- u32 pmask = nv_rd32(dev, 0x022554);
- u32 bsize = nv_rd32(dev, 0x10f20c);
- u32 offset, length;
- bool uniform = true;
- int ret, part;
-
- NV_DEBUG(dev, "0x100800: 0x%08x\n", nv_rd32(dev, 0x100800));
- NV_DEBUG(dev, "parts 0x%08x mask 0x%08x\n", parts, pmask);
-
- dev_priv->vram_type = nouveau_mem_vbios_type(dev);
- dev_priv->vram_rank_B = !!(nv_rd32(dev, 0x10f200) & 0x00000004);
-
- /* read amount of vram attached to each memory controller */
- for (part = 0; part < parts; part++) {
- if (!(pmask & (1 << part))) {
- u32 psize = nv_rd32(dev, 0x11020c + (part * 0x1000));
- if (psize != bsize) {
- if (psize < bsize)
- bsize = psize;
- uniform = false;
- }
-
- NV_DEBUG(dev, "%d: mem_amount 0x%08x\n", part, psize);
- dev_priv->vram_size += (u64)psize << 20;
- }
- }
-
- /* if all controllers have the same amount attached, there's no holes */
- if (uniform) {
- offset = rsvd_head;
- length = (dev_priv->vram_size >> 12) - rsvd_head - rsvd_tail;
- return nouveau_mm_init(&vram->mm, offset, length, 1);
- }
-
- /* otherwise, address lowest common amount from 0GiB */
- ret = nouveau_mm_init(&vram->mm, rsvd_head, (bsize << 8) * parts, 1);
- if (ret)
- return ret;
-
- /* and the rest starting from (8GiB + common_size) */
- offset = (0x0200000000ULL >> 12) + (bsize << 8);
- length = (dev_priv->vram_size >> 12) - (bsize << 8) - rsvd_tail;
-
- ret = nouveau_mm_init(&vram->mm, offset, length, 0);
- if (ret) {
- nouveau_mm_fini(&vram->mm);
- return ret;
- }
+ dev_priv->vram_size = nv_rd32(dev, 0x10f20c) << 20;
+ dev_priv->vram_size *= nv_rd32(dev, 0x121c74);
+ dev_priv->vram_rblock_size = 4096;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvd0_display.c b/drivers/gpu/drm/nouveau/nvd0_display.c
deleted file mode 100644
index dfb8a95..0000000
--- a/drivers/gpu/drm/nouveau/nvd0_display.c
+++ /dev/null
@@ -1,2098 +0,0 @@
-/*
- * Copyright 2011 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <linux/dma-mapping.h>
-
-#include "drmP.h"
-#include "drm_crtc_helper.h"
-
-#include "nouveau_drv.h"
-#include "nouveau_connector.h"
-#include "nouveau_encoder.h"
-#include "nouveau_crtc.h"
-#include "nouveau_dma.h"
-#include "nouveau_fb.h"
-#include "nv50_display.h"
-
-#define EVO_DMA_NR 9
-
-#define EVO_MASTER (0x00)
-#define EVO_FLIP(c) (0x01 + (c))
-#define EVO_OVLY(c) (0x05 + (c))
-#define EVO_OIMM(c) (0x09 + (c))
-#define EVO_CURS(c) (0x0d + (c))
-
-/* offsets in shared sync bo of various structures */
-#define EVO_SYNC(c, o) ((c) * 0x0100 + (o))
-#define EVO_MAST_NTFY EVO_SYNC( 0, 0x00)
-#define EVO_FLIP_SEM0(c) EVO_SYNC((c), 0x00)
-#define EVO_FLIP_SEM1(c) EVO_SYNC((c), 0x10)
-
-struct evo {
- int idx;
- dma_addr_t handle;
- u32 *ptr;
- struct {
- u32 offset;
- u16 value;
- } sem;
-};
-
-struct nvd0_display {
- struct nouveau_gpuobj *mem;
- struct nouveau_bo *sync;
- struct evo evo[9];
-
- struct tasklet_struct tasklet;
- u32 modeset;
-};
-
-static struct nvd0_display *
-nvd0_display(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- return dev_priv->engine.display.priv;
-}
-
-static struct drm_crtc *
-nvd0_display_crtc_get(struct drm_encoder *encoder)
-{
- return nouveau_encoder(encoder)->crtc;
-}
-
-/******************************************************************************
- * EVO channel helpers
- *****************************************************************************/
-static inline int
-evo_icmd(struct drm_device *dev, int id, u32 mthd, u32 data)
-{
- int ret = 0;
- nv_mask(dev, 0x610700 + (id * 0x10), 0x00000001, 0x00000001);
- nv_wr32(dev, 0x610704 + (id * 0x10), data);
- nv_mask(dev, 0x610704 + (id * 0x10), 0x80000ffc, 0x80000000 | mthd);
- if (!nv_wait(dev, 0x610704 + (id * 0x10), 0x80000000, 0x00000000))
- ret = -EBUSY;
- nv_mask(dev, 0x610700 + (id * 0x10), 0x00000001, 0x00000000);
- return ret;
-}
-
-static u32 *
-evo_wait(struct drm_device *dev, int id, int nr)
-{
- struct nvd0_display *disp = nvd0_display(dev);
- u32 put = nv_rd32(dev, 0x640000 + (id * 0x1000)) / 4;
-
- if (put + nr >= (PAGE_SIZE / 4)) {
- disp->evo[id].ptr[put] = 0x20000000;
-
- nv_wr32(dev, 0x640000 + (id * 0x1000), 0x00000000);
- if (!nv_wait(dev, 0x640004 + (id * 0x1000), ~0, 0x00000000)) {
- NV_ERROR(dev, "evo %d dma stalled\n", id);
- return NULL;
- }
-
- put = 0;
- }
-
- if (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
- NV_INFO(dev, "Evo%d: %p START\n", id, disp->evo[id].ptr + put);
-
- return disp->evo[id].ptr + put;
-}
-
-static void
-evo_kick(u32 *push, struct drm_device *dev, int id)
-{
- struct nvd0_display *disp = nvd0_display(dev);
-
- if (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO) {
- u32 curp = nv_rd32(dev, 0x640000 + (id * 0x1000)) >> 2;
- u32 *cur = disp->evo[id].ptr + curp;
-
- while (cur < push)
- NV_INFO(dev, "Evo%d: 0x%08x\n", id, *cur++);
- NV_INFO(dev, "Evo%d: %p KICK!\n", id, push);
- }
-
- nv_wr32(dev, 0x640000 + (id * 0x1000), (push - disp->evo[id].ptr) << 2);
-}
-
-#define evo_mthd(p,m,s) *((p)++) = (((s) << 18) | (m))
-#define evo_data(p,d) *((p)++) = (d)
-
-static int
-evo_init_dma(struct drm_device *dev, int ch)
-{
- struct nvd0_display *disp = nvd0_display(dev);
- u32 flags;
-
- flags = 0x00000000;
- if (ch == EVO_MASTER)
- flags |= 0x01000000;
-
- nv_wr32(dev, 0x610494 + (ch * 0x0010), (disp->evo[ch].handle >> 8) | 3);
- nv_wr32(dev, 0x610498 + (ch * 0x0010), 0x00010000);
- nv_wr32(dev, 0x61049c + (ch * 0x0010), 0x00000001);
- nv_mask(dev, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000010);
- nv_wr32(dev, 0x640000 + (ch * 0x1000), 0x00000000);
- nv_wr32(dev, 0x610490 + (ch * 0x0010), 0x00000013 | flags);
- if (!nv_wait(dev, 0x610490 + (ch * 0x0010), 0x80000000, 0x00000000)) {
- NV_ERROR(dev, "PDISP: ch%d 0x%08x\n", ch,
- nv_rd32(dev, 0x610490 + (ch * 0x0010)));
- return -EBUSY;
- }
-
- nv_mask(dev, 0x610090, (1 << ch), (1 << ch));
- nv_mask(dev, 0x6100a0, (1 << ch), (1 << ch));
- return 0;
-}
-
-static void
-evo_fini_dma(struct drm_device *dev, int ch)
-{
- if (!(nv_rd32(dev, 0x610490 + (ch * 0x0010)) & 0x00000010))
- return;
-
- nv_mask(dev, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000000);
- nv_mask(dev, 0x610490 + (ch * 0x0010), 0x00000003, 0x00000000);
- nv_wait(dev, 0x610490 + (ch * 0x0010), 0x80000000, 0x00000000);
- nv_mask(dev, 0x610090, (1 << ch), 0x00000000);
- nv_mask(dev, 0x6100a0, (1 << ch), 0x00000000);
-}
-
-static inline void
-evo_piow(struct drm_device *dev, int ch, u16 mthd, u32 data)
-{
- nv_wr32(dev, 0x640000 + (ch * 0x1000) + mthd, data);
-}
-
-static int
-evo_init_pio(struct drm_device *dev, int ch)
-{
- nv_wr32(dev, 0x610490 + (ch * 0x0010), 0x00000001);
- if (!nv_wait(dev, 0x610490 + (ch * 0x0010), 0x00010000, 0x00010000)) {
- NV_ERROR(dev, "PDISP: ch%d 0x%08x\n", ch,
- nv_rd32(dev, 0x610490 + (ch * 0x0010)));
- return -EBUSY;
- }
-
- nv_mask(dev, 0x610090, (1 << ch), (1 << ch));
- nv_mask(dev, 0x6100a0, (1 << ch), (1 << ch));
- return 0;
-}
-
-static void
-evo_fini_pio(struct drm_device *dev, int ch)
-{
- if (!(nv_rd32(dev, 0x610490 + (ch * 0x0010)) & 0x00000001))
- return;
-
- nv_mask(dev, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000010);
- nv_mask(dev, 0x610490 + (ch * 0x0010), 0x00000001, 0x00000000);
- nv_wait(dev, 0x610490 + (ch * 0x0010), 0x00010000, 0x00000000);
- nv_mask(dev, 0x610090, (1 << ch), 0x00000000);
- nv_mask(dev, 0x6100a0, (1 << ch), 0x00000000);
-}
-
-static bool
-evo_sync_wait(void *data)
-{
- return nouveau_bo_rd32(data, EVO_MAST_NTFY) != 0x00000000;
-}
-
-static int
-evo_sync(struct drm_device *dev, int ch)
-{
- struct nvd0_display *disp = nvd0_display(dev);
- u32 *push = evo_wait(dev, ch, 8);
- if (push) {
- nouveau_bo_wr32(disp->sync, EVO_MAST_NTFY, 0x00000000);
- evo_mthd(push, 0x0084, 1);
- evo_data(push, 0x80000000 | EVO_MAST_NTFY);
- evo_mthd(push, 0x0080, 2);
- evo_data(push, 0x00000000);
- evo_data(push, 0x00000000);
- evo_kick(push, dev, ch);
- if (nv_wait_cb(dev, evo_sync_wait, disp->sync))
- return 0;
- }
-
- return -EBUSY;
-}
-
-/******************************************************************************
- * Page flipping channel
- *****************************************************************************/
-struct nouveau_bo *
-nvd0_display_crtc_sema(struct drm_device *dev, int crtc)
-{
- return nvd0_display(dev)->sync;
-}
-
-void
-nvd0_display_flip_stop(struct drm_crtc *crtc)
-{
- struct nvd0_display *disp = nvd0_display(crtc->dev);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct evo *evo = &disp->evo[EVO_FLIP(nv_crtc->index)];
- u32 *push;
-
- push = evo_wait(crtc->dev, evo->idx, 8);
- if (push) {
- evo_mthd(push, 0x0084, 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x0094, 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x00c0, 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x0080, 1);
- evo_data(push, 0x00000000);
- evo_kick(push, crtc->dev, evo->idx);
- }
-}
-
-int
-nvd0_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
- struct nouveau_channel *chan, u32 swap_interval)
-{
- struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
- struct nvd0_display *disp = nvd0_display(crtc->dev);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct evo *evo = &disp->evo[EVO_FLIP(nv_crtc->index)];
- u64 offset;
- u32 *push;
- int ret;
-
- evo_sync(crtc->dev, EVO_MASTER);
-
- swap_interval <<= 4;
- if (swap_interval == 0)
- swap_interval |= 0x100;
-
- push = evo_wait(crtc->dev, evo->idx, 128);
- if (unlikely(push == NULL))
- return -EBUSY;
-
- /* synchronise with the rendering channel, if necessary */
- if (likely(chan)) {
- ret = RING_SPACE(chan, 10);
- if (ret)
- return ret;
-
- offset = chan->dispc_vma[nv_crtc->index].offset;
- offset += evo->sem.offset;
-
- BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0010, 4);
- OUT_RING (chan, upper_32_bits(offset));
- OUT_RING (chan, lower_32_bits(offset));
- OUT_RING (chan, 0xf00d0000 | evo->sem.value);
- OUT_RING (chan, 0x1002);
- BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0010, 4);
- OUT_RING (chan, upper_32_bits(offset));
- OUT_RING (chan, lower_32_bits(offset ^ 0x10));
- OUT_RING (chan, 0x74b1e000);
- OUT_RING (chan, 0x1001);
- FIRE_RING (chan);
- } else {
- nouveau_bo_wr32(disp->sync, evo->sem.offset / 4,
- 0xf00d0000 | evo->sem.value);
- evo_sync(crtc->dev, EVO_MASTER);
- }
-
- /* queue the flip */
- evo_mthd(push, 0x0100, 1);
- evo_data(push, 0xfffe0000);
- evo_mthd(push, 0x0084, 1);
- evo_data(push, swap_interval);
- if (!(swap_interval & 0x00000100)) {
- evo_mthd(push, 0x00e0, 1);
- evo_data(push, 0x40000000);
- }
- evo_mthd(push, 0x0088, 4);
- evo_data(push, evo->sem.offset);
- evo_data(push, 0xf00d0000 | evo->sem.value);
- evo_data(push, 0x74b1e000);
- evo_data(push, NvEvoSync);
- evo_mthd(push, 0x00a0, 2);
- evo_data(push, 0x00000000);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x00c0, 1);
- evo_data(push, nv_fb->r_dma);
- evo_mthd(push, 0x0110, 2);
- evo_data(push, 0x00000000);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x0400, 5);
- evo_data(push, nv_fb->nvbo->bo.offset >> 8);
- evo_data(push, 0);
- evo_data(push, (fb->height << 16) | fb->width);
- evo_data(push, nv_fb->r_pitch);
- evo_data(push, nv_fb->r_format);
- evo_mthd(push, 0x0080, 1);
- evo_data(push, 0x00000000);
- evo_kick(push, crtc->dev, evo->idx);
-
- evo->sem.offset ^= 0x10;
- evo->sem.value++;
- return 0;
-}
-
-/******************************************************************************
- * CRTC
- *****************************************************************************/
-static int
-nvd0_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update)
-{
- struct drm_device *dev = nv_crtc->base.dev;
- struct nouveau_connector *nv_connector;
- struct drm_connector *connector;
- u32 *push, mode = 0x00;
-
- nv_connector = nouveau_crtc_connector_get(nv_crtc);
- connector = &nv_connector->base;
- if (nv_connector->dithering_mode == DITHERING_MODE_AUTO) {
- if (nv_crtc->base.fb->depth > connector->display_info.bpc * 3)
- mode = DITHERING_MODE_DYNAMIC2X2;
- } else {
- mode = nv_connector->dithering_mode;
- }
-
- if (nv_connector->dithering_depth == DITHERING_DEPTH_AUTO) {
- if (connector->display_info.bpc >= 8)
- mode |= DITHERING_DEPTH_8BPC;
- } else {
- mode |= nv_connector->dithering_depth;
- }
-
- push = evo_wait(dev, EVO_MASTER, 4);
- if (push) {
- evo_mthd(push, 0x0490 + (nv_crtc->index * 0x300), 1);
- evo_data(push, mode);
- if (update) {
- evo_mthd(push, 0x0080, 1);
- evo_data(push, 0x00000000);
- }
- evo_kick(push, dev, EVO_MASTER);
- }
-
- return 0;
-}
-
-static int
-nvd0_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update)
-{
- struct drm_display_mode *omode, *umode = &nv_crtc->base.mode;
- struct drm_device *dev = nv_crtc->base.dev;
- struct drm_crtc *crtc = &nv_crtc->base;
- struct nouveau_connector *nv_connector;
- int mode = DRM_MODE_SCALE_NONE;
- u32 oX, oY, *push;
-
- /* start off at the resolution we programmed the crtc for, this
- * effectively handles NONE/FULL scaling
- */
- nv_connector = nouveau_crtc_connector_get(nv_crtc);
- if (nv_connector && nv_connector->native_mode)
- mode = nv_connector->scaling_mode;
-
- if (mode != DRM_MODE_SCALE_NONE)
- omode = nv_connector->native_mode;
- else
- omode = umode;
-
- oX = omode->hdisplay;
- oY = omode->vdisplay;
- if (omode->flags & DRM_MODE_FLAG_DBLSCAN)
- oY *= 2;
-
- /* add overscan compensation if necessary, will keep the aspect
- * ratio the same as the backend mode unless overridden by the
- * user setting both hborder and vborder properties.
- */
- if (nv_connector && ( nv_connector->underscan == UNDERSCAN_ON ||
- (nv_connector->underscan == UNDERSCAN_AUTO &&
- nv_connector->edid &&
- drm_detect_hdmi_monitor(nv_connector->edid)))) {
- u32 bX = nv_connector->underscan_hborder;
- u32 bY = nv_connector->underscan_vborder;
- u32 aspect = (oY << 19) / oX;
-
- if (bX) {
- oX -= (bX * 2);
- if (bY) oY -= (bY * 2);
- else oY = ((oX * aspect) + (aspect / 2)) >> 19;
- } else {
- oX -= (oX >> 4) + 32;
- if (bY) oY -= (bY * 2);
- else oY = ((oX * aspect) + (aspect / 2)) >> 19;
- }
- }
-
- /* handle CENTER/ASPECT scaling, taking into account the areas
- * removed already for overscan compensation
- */
- switch (mode) {
- case DRM_MODE_SCALE_CENTER:
- oX = min((u32)umode->hdisplay, oX);
- oY = min((u32)umode->vdisplay, oY);
- /* fall-through */
- case DRM_MODE_SCALE_ASPECT:
- if (oY < oX) {
- u32 aspect = (umode->hdisplay << 19) / umode->vdisplay;
- oX = ((oY * aspect) + (aspect / 2)) >> 19;
- } else {
- u32 aspect = (umode->vdisplay << 19) / umode->hdisplay;
- oY = ((oX * aspect) + (aspect / 2)) >> 19;
- }
- break;
- default:
- break;
- }
-
- push = evo_wait(dev, EVO_MASTER, 8);
- if (push) {
- evo_mthd(push, 0x04c0 + (nv_crtc->index * 0x300), 3);
- evo_data(push, (oY << 16) | oX);
- evo_data(push, (oY << 16) | oX);
- evo_data(push, (oY << 16) | oX);
- evo_mthd(push, 0x0494 + (nv_crtc->index * 0x300), 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x04b8 + (nv_crtc->index * 0x300), 1);
- evo_data(push, (umode->vdisplay << 16) | umode->hdisplay);
- evo_kick(push, dev, EVO_MASTER);
- if (update) {
- nvd0_display_flip_stop(crtc);
- nvd0_display_flip_next(crtc, crtc->fb, NULL, 1);
- }
- }
-
- return 0;
-}
-
-static int
-nvd0_crtc_set_image(struct nouveau_crtc *nv_crtc, struct drm_framebuffer *fb,
- int x, int y, bool update)
-{
- struct nouveau_framebuffer *nvfb = nouveau_framebuffer(fb);
- u32 *push;
-
- push = evo_wait(fb->dev, EVO_MASTER, 16);
- if (push) {
- evo_mthd(push, 0x0460 + (nv_crtc->index * 0x300), 1);
- evo_data(push, nvfb->nvbo->bo.offset >> 8);
- evo_mthd(push, 0x0468 + (nv_crtc->index * 0x300), 4);
- evo_data(push, (fb->height << 16) | fb->width);
- evo_data(push, nvfb->r_pitch);
- evo_data(push, nvfb->r_format);
- evo_data(push, nvfb->r_dma);
- evo_mthd(push, 0x04b0 + (nv_crtc->index * 0x300), 1);
- evo_data(push, (y << 16) | x);
- if (update) {
- evo_mthd(push, 0x0080, 1);
- evo_data(push, 0x00000000);
- }
- evo_kick(push, fb->dev, EVO_MASTER);
- }
-
- nv_crtc->fb.tile_flags = nvfb->r_dma;
- return 0;
-}
-
-static void
-nvd0_crtc_cursor_show(struct nouveau_crtc *nv_crtc, bool show, bool update)
-{
- struct drm_device *dev = nv_crtc->base.dev;
- u32 *push = evo_wait(dev, EVO_MASTER, 16);
- if (push) {
- if (show) {
- evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 2);
- evo_data(push, 0x85000000);
- evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
- evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1);
- evo_data(push, NvEvoVRAM);
- } else {
- evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 1);
- evo_data(push, 0x05000000);
- evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1);
- evo_data(push, 0x00000000);
- }
-
- if (update) {
- evo_mthd(push, 0x0080, 1);
- evo_data(push, 0x00000000);
- }
-
- evo_kick(push, dev, EVO_MASTER);
- }
-}
-
-static void
-nvd0_crtc_dpms(struct drm_crtc *crtc, int mode)
-{
-}
-
-static void
-nvd0_crtc_prepare(struct drm_crtc *crtc)
-{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- u32 *push;
-
- nvd0_display_flip_stop(crtc);
-
- push = evo_wait(crtc->dev, EVO_MASTER, 2);
- if (push) {
- evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 1);
- evo_data(push, 0x03000000);
- evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1);
- evo_data(push, 0x00000000);
- evo_kick(push, crtc->dev, EVO_MASTER);
- }
-
- nvd0_crtc_cursor_show(nv_crtc, false, false);
-}
-
-static void
-nvd0_crtc_commit(struct drm_crtc *crtc)
-{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- u32 *push;
-
- push = evo_wait(crtc->dev, EVO_MASTER, 32);
- if (push) {
- evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1);
- evo_data(push, nv_crtc->fb.tile_flags);
- evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 4);
- evo_data(push, 0x83000000);
- evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8);
- evo_data(push, 0x00000000);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1);
- evo_data(push, NvEvoVRAM);
- evo_mthd(push, 0x0430 + (nv_crtc->index * 0x300), 1);
- evo_data(push, 0xffffff00);
- evo_kick(push, crtc->dev, EVO_MASTER);
- }
-
- nvd0_crtc_cursor_show(nv_crtc, nv_crtc->cursor.visible, true);
- nvd0_display_flip_next(crtc, crtc->fb, NULL, 1);
-}
-
-static bool
-nvd0_crtc_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
-static int
-nvd0_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb)
-{
- struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->fb);
- int ret;
-
- ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM);
- if (ret)
- return ret;
-
- if (old_fb) {
- nvfb = nouveau_framebuffer(old_fb);
- nouveau_bo_unpin(nvfb->nvbo);
- }
-
- return 0;
-}
-
-static int
-nvd0_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
- struct drm_display_mode *mode, int x, int y,
- struct drm_framebuffer *old_fb)
-{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct nouveau_connector *nv_connector;
- u32 ilace = (mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 1;
- u32 vscan = (mode->flags & DRM_MODE_FLAG_DBLSCAN) ? 2 : 1;
- u32 hactive, hsynce, hbackp, hfrontp, hblanke, hblanks;
- u32 vactive, vsynce, vbackp, vfrontp, vblanke, vblanks;
- u32 vblan2e = 0, vblan2s = 1;
- u32 *push;
- int ret;
-
- hactive = mode->htotal;
- hsynce = mode->hsync_end - mode->hsync_start - 1;
- hbackp = mode->htotal - mode->hsync_end;
- hblanke = hsynce + hbackp;
- hfrontp = mode->hsync_start - mode->hdisplay;
- hblanks = mode->htotal - hfrontp - 1;
-
- vactive = mode->vtotal * vscan / ilace;
- vsynce = ((mode->vsync_end - mode->vsync_start) * vscan / ilace) - 1;
- vbackp = (mode->vtotal - mode->vsync_end) * vscan / ilace;
- vblanke = vsynce + vbackp;
- vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace;
- vblanks = vactive - vfrontp - 1;
- if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
- vblan2e = vactive + vsynce + vbackp;
- vblan2s = vblan2e + (mode->vdisplay * vscan / ilace);
- vactive = (vactive * 2) + 1;
- }
-
- ret = nvd0_crtc_swap_fbs(crtc, old_fb);
- if (ret)
- return ret;
-
- push = evo_wait(crtc->dev, EVO_MASTER, 64);
- if (push) {
- evo_mthd(push, 0x0410 + (nv_crtc->index * 0x300), 6);
- evo_data(push, 0x00000000);
- evo_data(push, (vactive << 16) | hactive);
- evo_data(push, ( vsynce << 16) | hsynce);
- evo_data(push, (vblanke << 16) | hblanke);
- evo_data(push, (vblanks << 16) | hblanks);
- evo_data(push, (vblan2e << 16) | vblan2s);
- evo_mthd(push, 0x042c + (nv_crtc->index * 0x300), 1);
- evo_data(push, 0x00000000); /* ??? */
- evo_mthd(push, 0x0450 + (nv_crtc->index * 0x300), 3);
- evo_data(push, mode->clock * 1000);
- evo_data(push, 0x00200000); /* ??? */
- evo_data(push, mode->clock * 1000);
- evo_mthd(push, 0x04d0 + (nv_crtc->index * 0x300), 2);
- evo_data(push, 0x00000311);
- evo_data(push, 0x00000100);
- evo_kick(push, crtc->dev, EVO_MASTER);
- }
-
- nv_connector = nouveau_crtc_connector_get(nv_crtc);
- nvd0_crtc_set_dither(nv_crtc, false);
- nvd0_crtc_set_scale(nv_crtc, false);
- nvd0_crtc_set_image(nv_crtc, crtc->fb, x, y, false);
- return 0;
-}
-
-static int
-nvd0_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
- struct drm_framebuffer *old_fb)
-{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- int ret;
-
- if (!crtc->fb) {
- NV_DEBUG_KMS(crtc->dev, "No FB bound\n");
- return 0;
- }
-
- ret = nvd0_crtc_swap_fbs(crtc, old_fb);
- if (ret)
- return ret;
-
- nvd0_display_flip_stop(crtc);
- nvd0_crtc_set_image(nv_crtc, crtc->fb, x, y, true);
- nvd0_display_flip_next(crtc, crtc->fb, NULL, 1);
- return 0;
-}
-
-static int
-nvd0_crtc_mode_set_base_atomic(struct drm_crtc *crtc,
- struct drm_framebuffer *fb, int x, int y,
- enum mode_set_atomic state)
-{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- nvd0_display_flip_stop(crtc);
- nvd0_crtc_set_image(nv_crtc, fb, x, y, true);
- return 0;
-}
-
-static void
-nvd0_crtc_lut_load(struct drm_crtc *crtc)
-{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- void __iomem *lut = nvbo_kmap_obj_iovirtual(nv_crtc->lut.nvbo);
- int i;
-
- for (i = 0; i < 256; i++) {
- writew(0x6000 + (nv_crtc->lut.r[i] >> 2), lut + (i * 0x20) + 0);
- writew(0x6000 + (nv_crtc->lut.g[i] >> 2), lut + (i * 0x20) + 2);
- writew(0x6000 + (nv_crtc->lut.b[i] >> 2), lut + (i * 0x20) + 4);
- }
-}
-
-static int
-nvd0_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
- uint32_t handle, uint32_t width, uint32_t height)
-{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct drm_gem_object *gem;
- struct nouveau_bo *nvbo;
- bool visible = (handle != 0);
- int i, ret = 0;
-
- if (visible) {
- if (width != 64 || height != 64)
- return -EINVAL;
-
- gem = drm_gem_object_lookup(dev, file_priv, handle);
- if (unlikely(!gem))
- return -ENOENT;
- nvbo = nouveau_gem_object(gem);
-
- ret = nouveau_bo_map(nvbo);
- if (ret == 0) {
- for (i = 0; i < 64 * 64; i++) {
- u32 v = nouveau_bo_rd32(nvbo, i);
- nouveau_bo_wr32(nv_crtc->cursor.nvbo, i, v);
- }
- nouveau_bo_unmap(nvbo);
- }
-
- drm_gem_object_unreference_unlocked(gem);
- }
-
- if (visible != nv_crtc->cursor.visible) {
- nvd0_crtc_cursor_show(nv_crtc, visible, true);
- nv_crtc->cursor.visible = visible;
- }
-
- return ret;
-}
-
-static int
-nvd0_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
-{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- int ch = EVO_CURS(nv_crtc->index);
-
- evo_piow(crtc->dev, ch, 0x0084, (y << 16) | x);
- evo_piow(crtc->dev, ch, 0x0080, 0x00000000);
- return 0;
-}
-
-static void
-nvd0_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
- uint32_t start, uint32_t size)
-{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- u32 end = max(start + size, (u32)256);
- u32 i;
-
- for (i = start; i < end; i++) {
- nv_crtc->lut.r[i] = r[i];
- nv_crtc->lut.g[i] = g[i];
- nv_crtc->lut.b[i] = b[i];
- }
-
- nvd0_crtc_lut_load(crtc);
-}
-
-static void
-nvd0_crtc_destroy(struct drm_crtc *crtc)
-{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- nouveau_bo_unmap(nv_crtc->cursor.nvbo);
- nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
- nouveau_bo_unmap(nv_crtc->lut.nvbo);
- nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
- drm_crtc_cleanup(crtc);
- kfree(crtc);
-}
-
-static const struct drm_crtc_helper_funcs nvd0_crtc_hfunc = {
- .dpms = nvd0_crtc_dpms,
- .prepare = nvd0_crtc_prepare,
- .commit = nvd0_crtc_commit,
- .mode_fixup = nvd0_crtc_mode_fixup,
- .mode_set = nvd0_crtc_mode_set,
- .mode_set_base = nvd0_crtc_mode_set_base,
- .mode_set_base_atomic = nvd0_crtc_mode_set_base_atomic,
- .load_lut = nvd0_crtc_lut_load,
-};
-
-static const struct drm_crtc_funcs nvd0_crtc_func = {
- .cursor_set = nvd0_crtc_cursor_set,
- .cursor_move = nvd0_crtc_cursor_move,
- .gamma_set = nvd0_crtc_gamma_set,
- .set_config = drm_crtc_helper_set_config,
- .destroy = nvd0_crtc_destroy,
- .page_flip = nouveau_crtc_page_flip,
-};
-
-static void
-nvd0_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y)
-{
-}
-
-static void
-nvd0_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset)
-{
-}
-
-static int
-nvd0_crtc_create(struct drm_device *dev, int index)
-{
- struct nouveau_crtc *nv_crtc;
- struct drm_crtc *crtc;
- int ret, i;
-
- nv_crtc = kzalloc(sizeof(*nv_crtc), GFP_KERNEL);
- if (!nv_crtc)
- return -ENOMEM;
-
- nv_crtc->index = index;
- nv_crtc->set_dither = nvd0_crtc_set_dither;
- nv_crtc->set_scale = nvd0_crtc_set_scale;
- nv_crtc->cursor.set_offset = nvd0_cursor_set_offset;
- nv_crtc->cursor.set_pos = nvd0_cursor_set_pos;
- for (i = 0; i < 256; i++) {
- nv_crtc->lut.r[i] = i << 8;
- nv_crtc->lut.g[i] = i << 8;
- nv_crtc->lut.b[i] = i << 8;
- }
-
- crtc = &nv_crtc->base;
- drm_crtc_init(dev, crtc, &nvd0_crtc_func);
- drm_crtc_helper_add(crtc, &nvd0_crtc_hfunc);
- drm_mode_crtc_set_gamma_size(crtc, 256);
-
- ret = nouveau_bo_new(dev, 64 * 64 * 4, 0x100, TTM_PL_FLAG_VRAM,
- 0, 0x0000, &nv_crtc->cursor.nvbo);
- if (!ret) {
- ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
- if (!ret)
- ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
- if (ret)
- nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
- }
-
- if (ret)
- goto out;
-
- ret = nouveau_bo_new(dev, 8192, 0x100, TTM_PL_FLAG_VRAM,
- 0, 0x0000, &nv_crtc->lut.nvbo);
- if (!ret) {
- ret = nouveau_bo_pin(nv_crtc->lut.nvbo, TTM_PL_FLAG_VRAM);
- if (!ret)
- ret = nouveau_bo_map(nv_crtc->lut.nvbo);
- if (ret)
- nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
- }
-
- if (ret)
- goto out;
-
- nvd0_crtc_lut_load(crtc);
-
-out:
- if (ret)
- nvd0_crtc_destroy(crtc);
- return ret;
-}
-
-/******************************************************************************
- * DAC
- *****************************************************************************/
-static void
-nvd0_dac_dpms(struct drm_encoder *encoder, int mode)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct drm_device *dev = encoder->dev;
- int or = nv_encoder->or;
- u32 dpms_ctrl;
-
- dpms_ctrl = 0x80000000;
- if (mode == DRM_MODE_DPMS_STANDBY || mode == DRM_MODE_DPMS_OFF)
- dpms_ctrl |= 0x00000001;
- if (mode == DRM_MODE_DPMS_SUSPEND || mode == DRM_MODE_DPMS_OFF)
- dpms_ctrl |= 0x00000004;
-
- nv_wait(dev, 0x61a004 + (or * 0x0800), 0x80000000, 0x00000000);
- nv_mask(dev, 0x61a004 + (or * 0x0800), 0xc000007f, dpms_ctrl);
- nv_wait(dev, 0x61a004 + (or * 0x0800), 0x80000000, 0x00000000);
-}
-
-static bool
-nvd0_dac_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_connector *nv_connector;
-
- nv_connector = nouveau_encoder_connector_get(nv_encoder);
- if (nv_connector && nv_connector->native_mode) {
- if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) {
- int id = adjusted_mode->base.id;
- *adjusted_mode = *nv_connector->native_mode;
- adjusted_mode->base.id = id;
- }
- }
-
- return true;
-}
-
-static void
-nvd0_dac_commit(struct drm_encoder *encoder)
-{
-}
-
-static void
-nvd0_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
- u32 syncs, magic, *push;
-
- syncs = 0x00000001;
- if (mode->flags & DRM_MODE_FLAG_NHSYNC)
- syncs |= 0x00000008;
- if (mode->flags & DRM_MODE_FLAG_NVSYNC)
- syncs |= 0x00000010;
-
- magic = 0x31ec6000 | (nv_crtc->index << 25);
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- magic |= 0x00000001;
-
- nvd0_dac_dpms(encoder, DRM_MODE_DPMS_ON);
-
- push = evo_wait(encoder->dev, EVO_MASTER, 8);
- if (push) {
- evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2);
- evo_data(push, syncs);
- evo_data(push, magic);
- evo_mthd(push, 0x0180 + (nv_encoder->or * 0x020), 2);
- evo_data(push, 1 << nv_crtc->index);
- evo_data(push, 0x00ff);
- evo_kick(push, encoder->dev, EVO_MASTER);
- }
-
- nv_encoder->crtc = encoder->crtc;
-}
-
-static void
-nvd0_dac_disconnect(struct drm_encoder *encoder)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct drm_device *dev = encoder->dev;
- u32 *push;
-
- if (nv_encoder->crtc) {
- nvd0_crtc_prepare(nv_encoder->crtc);
-
- push = evo_wait(dev, EVO_MASTER, 4);
- if (push) {
- evo_mthd(push, 0x0180 + (nv_encoder->or * 0x20), 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x0080, 1);
- evo_data(push, 0x00000000);
- evo_kick(push, dev, EVO_MASTER);
- }
-
- nv_encoder->crtc = NULL;
- }
-}
-
-static enum drm_connector_status
-nvd0_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
-{
- enum drm_connector_status status = connector_status_disconnected;
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct drm_device *dev = encoder->dev;
- int or = nv_encoder->or;
- u32 load;
-
- nv_wr32(dev, 0x61a00c + (or * 0x800), 0x00100000);
- udelay(9500);
- nv_wr32(dev, 0x61a00c + (or * 0x800), 0x80000000);
-
- load = nv_rd32(dev, 0x61a00c + (or * 0x800));
- if ((load & 0x38000000) == 0x38000000)
- status = connector_status_connected;
-
- nv_wr32(dev, 0x61a00c + (or * 0x800), 0x00000000);
- return status;
-}
-
-static void
-nvd0_dac_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
- kfree(encoder);
-}
-
-static const struct drm_encoder_helper_funcs nvd0_dac_hfunc = {
- .dpms = nvd0_dac_dpms,
- .mode_fixup = nvd0_dac_mode_fixup,
- .prepare = nvd0_dac_disconnect,
- .commit = nvd0_dac_commit,
- .mode_set = nvd0_dac_mode_set,
- .disable = nvd0_dac_disconnect,
- .get_crtc = nvd0_display_crtc_get,
- .detect = nvd0_dac_detect
-};
-
-static const struct drm_encoder_funcs nvd0_dac_func = {
- .destroy = nvd0_dac_destroy,
-};
-
-static int
-nvd0_dac_create(struct drm_connector *connector, struct dcb_entry *dcbe)
-{
- struct drm_device *dev = connector->dev;
- struct nouveau_encoder *nv_encoder;
- struct drm_encoder *encoder;
-
- nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
- if (!nv_encoder)
- return -ENOMEM;
- nv_encoder->dcb = dcbe;
- nv_encoder->or = ffs(dcbe->or) - 1;
-
- encoder = to_drm_encoder(nv_encoder);
- encoder->possible_crtcs = dcbe->heads;
- encoder->possible_clones = 0;
- drm_encoder_init(dev, encoder, &nvd0_dac_func, DRM_MODE_ENCODER_DAC);
- drm_encoder_helper_add(encoder, &nvd0_dac_hfunc);
-
- drm_mode_connector_attach_encoder(connector, encoder);
- return 0;
-}
-
-/******************************************************************************
- * Audio
- *****************************************************************************/
-static void
-nvd0_audio_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_connector *nv_connector;
- struct drm_device *dev = encoder->dev;
- int i, or = nv_encoder->or * 0x30;
-
- nv_connector = nouveau_encoder_connector_get(nv_encoder);
- if (!drm_detect_monitor_audio(nv_connector->edid))
- return;
-
- nv_mask(dev, 0x10ec10 + or, 0x80000003, 0x80000001);
-
- drm_edid_to_eld(&nv_connector->base, nv_connector->edid);
- if (nv_connector->base.eld[0]) {
- u8 *eld = nv_connector->base.eld;
-
- for (i = 0; i < eld[2] * 4; i++)
- nv_wr32(dev, 0x10ec00 + or, (i << 8) | eld[i]);
- for (i = eld[2] * 4; i < 0x60; i++)
- nv_wr32(dev, 0x10ec00 + or, (i << 8) | 0x00);
-
- nv_mask(dev, 0x10ec10 + or, 0x80000002, 0x80000002);
- }
-}
-
-static void
-nvd0_audio_disconnect(struct drm_encoder *encoder)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct drm_device *dev = encoder->dev;
- int or = nv_encoder->or * 0x30;
-
- nv_mask(dev, 0x10ec10 + or, 0x80000003, 0x80000000);
-}
-
-/******************************************************************************
- * HDMI
- *****************************************************************************/
-static void
-nvd0_hdmi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
- struct nouveau_connector *nv_connector;
- struct drm_device *dev = encoder->dev;
- int head = nv_crtc->index * 0x800;
- u32 rekey = 56; /* binary driver, and tegra constant */
- u32 max_ac_packet;
-
- nv_connector = nouveau_encoder_connector_get(nv_encoder);
- if (!drm_detect_hdmi_monitor(nv_connector->edid))
- return;
-
- max_ac_packet = mode->htotal - mode->hdisplay;
- max_ac_packet -= rekey;
- max_ac_packet -= 18; /* constant from tegra */
- max_ac_packet /= 32;
-
- /* AVI InfoFrame */
- nv_mask(dev, 0x616714 + head, 0x00000001, 0x00000000);
- nv_wr32(dev, 0x61671c + head, 0x000d0282);
- nv_wr32(dev, 0x616720 + head, 0x0000006f);
- nv_wr32(dev, 0x616724 + head, 0x00000000);
- nv_wr32(dev, 0x616728 + head, 0x00000000);
- nv_wr32(dev, 0x61672c + head, 0x00000000);
- nv_mask(dev, 0x616714 + head, 0x00000001, 0x00000001);
-
- /* ??? InfoFrame? */
- nv_mask(dev, 0x6167a4 + head, 0x00000001, 0x00000000);
- nv_wr32(dev, 0x6167ac + head, 0x00000010);
- nv_mask(dev, 0x6167a4 + head, 0x00000001, 0x00000001);
-
- /* HDMI_CTRL */
- nv_mask(dev, 0x616798 + head, 0x401f007f, 0x40000000 | rekey |
- max_ac_packet << 16);
-
- /* NFI, audio doesn't work without it though.. */
- nv_mask(dev, 0x616548 + head, 0x00000070, 0x00000000);
-
- nvd0_audio_mode_set(encoder, mode);
-}
-
-static void
-nvd0_hdmi_disconnect(struct drm_encoder *encoder)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
- struct drm_device *dev = encoder->dev;
- int head = nv_crtc->index * 0x800;
-
- nvd0_audio_disconnect(encoder);
-
- nv_mask(dev, 0x616798 + head, 0x40000000, 0x00000000);
- nv_mask(dev, 0x6167a4 + head, 0x00000001, 0x00000000);
- nv_mask(dev, 0x616714 + head, 0x00000001, 0x00000000);
-}
-
-/******************************************************************************
- * SOR
- *****************************************************************************/
-static inline u32
-nvd0_sor_dp_lane_map(struct drm_device *dev, struct dcb_entry *dcb, u8 lane)
-{
- static const u8 nvd0[] = { 16, 8, 0, 24 };
- return nvd0[lane];
-}
-
-static void
-nvd0_sor_dp_train_set(struct drm_device *dev, struct dcb_entry *dcb, u8 pattern)
-{
- const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
- const u32 loff = (or * 0x800) + (link * 0x80);
- nv_mask(dev, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * pattern);
-}
-
-static void
-nvd0_sor_dp_train_adj(struct drm_device *dev, struct dcb_entry *dcb,
- u8 lane, u8 swing, u8 preem)
-{
- const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
- const u32 loff = (or * 0x800) + (link * 0x80);
- u32 shift = nvd0_sor_dp_lane_map(dev, dcb, lane);
- u32 mask = 0x000000ff << shift;
- u8 *table, *entry, *config = NULL;
-
- switch (swing) {
- case 0: preem += 0; break;
- case 1: preem += 4; break;
- case 2: preem += 7; break;
- case 3: preem += 9; break;
- }
-
- table = nouveau_dp_bios_data(dev, dcb, &entry);
- if (table) {
- if (table[0] == 0x30) {
- config = entry + table[4];
- config += table[5] * preem;
- }
- }
-
- if (!config) {
- NV_ERROR(dev, "PDISP: unsupported DP table for chipset\n");
- return;
- }
-
- nv_mask(dev, 0x61c118 + loff, mask, config[1] << shift);
- nv_mask(dev, 0x61c120 + loff, mask, config[2] << shift);
- nv_mask(dev, 0x61c130 + loff, 0x0000ff00, config[3] << 8);
- nv_mask(dev, 0x61c13c + loff, 0x00000000, 0x00000000);
-}
-
-static void
-nvd0_sor_dp_link_set(struct drm_device *dev, struct dcb_entry *dcb, int crtc,
- int link_nr, u32 link_bw, bool enhframe)
-{
- const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
- const u32 loff = (or * 0x800) + (link * 0x80);
- const u32 soff = (or * 0x800);
- u32 dpctrl = nv_rd32(dev, 0x61c10c + loff) & ~0x001f4000;
- u32 clksor = nv_rd32(dev, 0x612300 + soff) & ~0x007c0000;
- u32 script = 0x0000, lane_mask = 0;
- u8 *table, *entry;
- int i;
-
- link_bw /= 27000;
-
- table = nouveau_dp_bios_data(dev, dcb, &entry);
- if (table) {
- if (table[0] == 0x30) entry = ROMPTR(dev, entry[10]);
- else entry = NULL;
-
- while (entry) {
- if (entry[0] >= link_bw)
- break;
- entry += 3;
- }
-
- nouveau_bios_run_init_table(dev, script, dcb, crtc);
- }
-
- clksor |= link_bw << 18;
- dpctrl |= ((1 << link_nr) - 1) << 16;
- if (enhframe)
- dpctrl |= 0x00004000;
-
- for (i = 0; i < link_nr; i++)
- lane_mask |= 1 << (nvd0_sor_dp_lane_map(dev, dcb, i) >> 3);
-
- nv_wr32(dev, 0x612300 + soff, clksor);
- nv_wr32(dev, 0x61c10c + loff, dpctrl);
- nv_mask(dev, 0x61c130 + loff, 0x0000000f, lane_mask);
-}
-
-static void
-nvd0_sor_dp_link_get(struct drm_device *dev, struct dcb_entry *dcb,
- u32 *link_nr, u32 *link_bw)
-{
- const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
- const u32 loff = (or * 0x800) + (link * 0x80);
- const u32 soff = (or * 0x800);
- u32 dpctrl = nv_rd32(dev, 0x61c10c + loff) & 0x000f0000;
- u32 clksor = nv_rd32(dev, 0x612300 + soff);
-
- if (dpctrl > 0x00030000) *link_nr = 4;
- else if (dpctrl > 0x00010000) *link_nr = 2;
- else *link_nr = 1;
-
- *link_bw = (clksor & 0x007c0000) >> 18;
- *link_bw *= 27000;
-}
-
-static void
-nvd0_sor_dp_calc_tu(struct drm_device *dev, struct dcb_entry *dcb,
- u32 crtc, u32 datarate)
-{
- const u32 symbol = 100000;
- const u32 TU = 64;
- u32 link_nr, link_bw;
- u64 ratio, value;
-
- nvd0_sor_dp_link_get(dev, dcb, &link_nr, &link_bw);
-
- ratio = datarate;
- ratio *= symbol;
- do_div(ratio, link_nr * link_bw);
-
- value = (symbol - ratio) * TU;
- value *= ratio;
- do_div(value, symbol);
- do_div(value, symbol);
-
- value += 5;
- value |= 0x08000000;
-
- nv_wr32(dev, 0x616610 + (crtc * 0x800), value);
-}
-
-static void
-nvd0_sor_dpms(struct drm_encoder *encoder, int mode)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct drm_device *dev = encoder->dev;
- struct drm_encoder *partner;
- int or = nv_encoder->or;
- u32 dpms_ctrl;
-
- nv_encoder->last_dpms = mode;
-
- list_for_each_entry(partner, &dev->mode_config.encoder_list, head) {
- struct nouveau_encoder *nv_partner = nouveau_encoder(partner);
-
- if (partner->encoder_type != DRM_MODE_ENCODER_TMDS)
- continue;
-
- if (nv_partner != nv_encoder &&
- nv_partner->dcb->or == nv_encoder->dcb->or) {
- if (nv_partner->last_dpms == DRM_MODE_DPMS_ON)
- return;
- break;
- }
- }
-
- dpms_ctrl = (mode == DRM_MODE_DPMS_ON);
- dpms_ctrl |= 0x80000000;
-
- nv_wait(dev, 0x61c004 + (or * 0x0800), 0x80000000, 0x00000000);
- nv_mask(dev, 0x61c004 + (or * 0x0800), 0x80000001, dpms_ctrl);
- nv_wait(dev, 0x61c004 + (or * 0x0800), 0x80000000, 0x00000000);
- nv_wait(dev, 0x61c030 + (or * 0x0800), 0x10000000, 0x00000000);
-
- if (nv_encoder->dcb->type == OUTPUT_DP) {
- struct dp_train_func func = {
- .link_set = nvd0_sor_dp_link_set,
- .train_set = nvd0_sor_dp_train_set,
- .train_adj = nvd0_sor_dp_train_adj
- };
-
- nouveau_dp_dpms(encoder, mode, nv_encoder->dp.datarate, &func);
- }
-}
-
-static bool
-nvd0_sor_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_connector *nv_connector;
-
- nv_connector = nouveau_encoder_connector_get(nv_encoder);
- if (nv_connector && nv_connector->native_mode) {
- if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) {
- int id = adjusted_mode->base.id;
- *adjusted_mode = *nv_connector->native_mode;
- adjusted_mode->base.id = id;
- }
- }
-
- return true;
-}
-
-static void
-nvd0_sor_disconnect(struct drm_encoder *encoder)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct drm_device *dev = encoder->dev;
- u32 *push;
-
- if (nv_encoder->crtc) {
- nvd0_crtc_prepare(nv_encoder->crtc);
-
- push = evo_wait(dev, EVO_MASTER, 4);
- if (push) {
- evo_mthd(push, 0x0200 + (nv_encoder->or * 0x20), 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x0080, 1);
- evo_data(push, 0x00000000);
- evo_kick(push, dev, EVO_MASTER);
- }
-
- nvd0_hdmi_disconnect(encoder);
-
- nv_encoder->crtc = NULL;
- nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
- }
-}
-
-static void
-nvd0_sor_prepare(struct drm_encoder *encoder)
-{
- nvd0_sor_disconnect(encoder);
- if (nouveau_encoder(encoder)->dcb->type == OUTPUT_DP)
- evo_sync(encoder->dev, EVO_MASTER);
-}
-
-static void
-nvd0_sor_commit(struct drm_encoder *encoder)
-{
-}
-
-static void
-nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
- struct drm_display_mode *mode)
-{
- struct drm_device *dev = encoder->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
- struct nouveau_connector *nv_connector;
- struct nvbios *bios = &dev_priv->vbios;
- u32 mode_ctrl = (1 << nv_crtc->index);
- u32 syncs, magic, *push;
- u32 or_config;
-
- syncs = 0x00000001;
- if (mode->flags & DRM_MODE_FLAG_NHSYNC)
- syncs |= 0x00000008;
- if (mode->flags & DRM_MODE_FLAG_NVSYNC)
- syncs |= 0x00000010;
-
- magic = 0x31ec6000 | (nv_crtc->index << 25);
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- magic |= 0x00000001;
-
- nv_connector = nouveau_encoder_connector_get(nv_encoder);
- switch (nv_encoder->dcb->type) {
- case OUTPUT_TMDS:
- if (nv_encoder->dcb->sorconf.link & 1) {
- if (mode->clock < 165000)
- mode_ctrl |= 0x00000100;
- else
- mode_ctrl |= 0x00000500;
- } else {
- mode_ctrl |= 0x00000200;
- }
-
- or_config = (mode_ctrl & 0x00000f00) >> 8;
- if (mode->clock >= 165000)
- or_config |= 0x0100;
-
- nvd0_hdmi_mode_set(encoder, mode);
- break;
- case OUTPUT_LVDS:
- or_config = (mode_ctrl & 0x00000f00) >> 8;
- if (bios->fp_no_ddc) {
- if (bios->fp.dual_link)
- or_config |= 0x0100;
- if (bios->fp.if_is_24bit)
- or_config |= 0x0200;
- } else {
- if (nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
- if (((u8 *)nv_connector->edid)[121] == 2)
- or_config |= 0x0100;
- } else
- if (mode->clock >= bios->fp.duallink_transition_clk) {
- or_config |= 0x0100;
- }
-
- if (or_config & 0x0100) {
- if (bios->fp.strapless_is_24bit & 2)
- or_config |= 0x0200;
- } else {
- if (bios->fp.strapless_is_24bit & 1)
- or_config |= 0x0200;
- }
-
- if (nv_connector->base.display_info.bpc == 8)
- or_config |= 0x0200;
-
- }
- break;
- case OUTPUT_DP:
- if (nv_connector->base.display_info.bpc == 6) {
- nv_encoder->dp.datarate = mode->clock * 18 / 8;
- syncs |= 0x00000140;
- } else {
- nv_encoder->dp.datarate = mode->clock * 24 / 8;
- syncs |= 0x00000180;
- }
-
- if (nv_encoder->dcb->sorconf.link & 1)
- mode_ctrl |= 0x00000800;
- else
- mode_ctrl |= 0x00000900;
-
- or_config = (mode_ctrl & 0x00000f00) >> 8;
- break;
- default:
- BUG_ON(1);
- break;
- }
-
- nvd0_sor_dpms(encoder, DRM_MODE_DPMS_ON);
-
- if (nv_encoder->dcb->type == OUTPUT_DP) {
- nvd0_sor_dp_calc_tu(dev, nv_encoder->dcb, nv_crtc->index,
- nv_encoder->dp.datarate);
- }
-
- push = evo_wait(dev, EVO_MASTER, 8);
- if (push) {
- evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2);
- evo_data(push, syncs);
- evo_data(push, magic);
- evo_mthd(push, 0x0200 + (nv_encoder->or * 0x020), 2);
- evo_data(push, mode_ctrl);
- evo_data(push, or_config);
- evo_kick(push, dev, EVO_MASTER);
- }
-
- nv_encoder->crtc = encoder->crtc;
-}
-
-static void
-nvd0_sor_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
- kfree(encoder);
-}
-
-static const struct drm_encoder_helper_funcs nvd0_sor_hfunc = {
- .dpms = nvd0_sor_dpms,
- .mode_fixup = nvd0_sor_mode_fixup,
- .prepare = nvd0_sor_prepare,
- .commit = nvd0_sor_commit,
- .mode_set = nvd0_sor_mode_set,
- .disable = nvd0_sor_disconnect,
- .get_crtc = nvd0_display_crtc_get,
-};
-
-static const struct drm_encoder_funcs nvd0_sor_func = {
- .destroy = nvd0_sor_destroy,
-};
-
-static int
-nvd0_sor_create(struct drm_connector *connector, struct dcb_entry *dcbe)
-{
- struct drm_device *dev = connector->dev;
- struct nouveau_encoder *nv_encoder;
- struct drm_encoder *encoder;
-
- nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
- if (!nv_encoder)
- return -ENOMEM;
- nv_encoder->dcb = dcbe;
- nv_encoder->or = ffs(dcbe->or) - 1;
- nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
-
- encoder = to_drm_encoder(nv_encoder);
- encoder->possible_crtcs = dcbe->heads;
- encoder->possible_clones = 0;
- drm_encoder_init(dev, encoder, &nvd0_sor_func, DRM_MODE_ENCODER_TMDS);
- drm_encoder_helper_add(encoder, &nvd0_sor_hfunc);
-
- drm_mode_connector_attach_encoder(connector, encoder);
- return 0;
-}
-
-/******************************************************************************
- * IRQ
- *****************************************************************************/
-static struct dcb_entry *
-lookup_dcb(struct drm_device *dev, int id, u32 mc)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- int type, or, i, link = -1;
-
- if (id < 4) {
- type = OUTPUT_ANALOG;
- or = id;
- } else {
- switch (mc & 0x00000f00) {
- case 0x00000000: link = 0; type = OUTPUT_LVDS; break;
- case 0x00000100: link = 0; type = OUTPUT_TMDS; break;
- case 0x00000200: link = 1; type = OUTPUT_TMDS; break;
- case 0x00000500: link = 0; type = OUTPUT_TMDS; break;
- case 0x00000800: link = 0; type = OUTPUT_DP; break;
- case 0x00000900: link = 1; type = OUTPUT_DP; break;
- default:
- NV_ERROR(dev, "PDISP: unknown SOR mc 0x%08x\n", mc);
- return NULL;
- }
-
- or = id - 4;
- }
-
- for (i = 0; i < dev_priv->vbios.dcb.entries; i++) {
- struct dcb_entry *dcb = &dev_priv->vbios.dcb.entry[i];
- if (dcb->type == type && (dcb->or & (1 << or)) &&
- (link < 0 || link == !(dcb->sorconf.link & 1)))
- return dcb;
- }
-
- NV_ERROR(dev, "PDISP: DCB for %d/0x%08x not found\n", id, mc);
- return NULL;
-}
-
-static void
-nvd0_display_unk1_handler(struct drm_device *dev, u32 crtc, u32 mask)
-{
- struct dcb_entry *dcb;
- int i;
-
- for (i = 0; mask && i < 8; i++) {
- u32 mcc = nv_rd32(dev, 0x640180 + (i * 0x20));
- if (!(mcc & (1 << crtc)))
- continue;
-
- dcb = lookup_dcb(dev, i, mcc);
- if (!dcb)
- continue;
-
- nouveau_bios_run_display_table(dev, 0x0000, -1, dcb, crtc);
- }
-
- nv_wr32(dev, 0x6101d4, 0x00000000);
- nv_wr32(dev, 0x6109d4, 0x00000000);
- nv_wr32(dev, 0x6101d0, 0x80000000);
-}
-
-static void
-nvd0_display_unk2_handler(struct drm_device *dev, u32 crtc, u32 mask)
-{
- struct dcb_entry *dcb;
- u32 or, tmp, pclk;
- int i;
-
- for (i = 0; mask && i < 8; i++) {
- u32 mcc = nv_rd32(dev, 0x640180 + (i * 0x20));
- if (!(mcc & (1 << crtc)))
- continue;
-
- dcb = lookup_dcb(dev, i, mcc);
- if (!dcb)
- continue;
-
- nouveau_bios_run_display_table(dev, 0x0000, -2, dcb, crtc);
- }
-
- pclk = nv_rd32(dev, 0x660450 + (crtc * 0x300)) / 1000;
- if (mask & 0x00010000) {
- nv50_crtc_set_clock(dev, crtc, pclk);
- }
-
- for (i = 0; mask && i < 8; i++) {
- u32 mcp = nv_rd32(dev, 0x660180 + (i * 0x20));
- u32 cfg = nv_rd32(dev, 0x660184 + (i * 0x20));
- if (!(mcp & (1 << crtc)))
- continue;
-
- dcb = lookup_dcb(dev, i, mcp);
- if (!dcb)
- continue;
- or = ffs(dcb->or) - 1;
-
- nouveau_bios_run_display_table(dev, cfg, pclk, dcb, crtc);
-
- nv_wr32(dev, 0x612200 + (crtc * 0x800), 0x00000000);
- switch (dcb->type) {
- case OUTPUT_ANALOG:
- nv_wr32(dev, 0x612280 + (or * 0x800), 0x00000000);
- break;
- case OUTPUT_TMDS:
- case OUTPUT_LVDS:
- case OUTPUT_DP:
- if (cfg & 0x00000100)
- tmp = 0x00000101;
- else
- tmp = 0x00000000;
-
- nv_mask(dev, 0x612300 + (or * 0x800), 0x00000707, tmp);
- break;
- default:
- break;
- }
-
- break;
- }
-
- nv_wr32(dev, 0x6101d4, 0x00000000);
- nv_wr32(dev, 0x6109d4, 0x00000000);
- nv_wr32(dev, 0x6101d0, 0x80000000);
-}
-
-static void
-nvd0_display_unk4_handler(struct drm_device *dev, u32 crtc, u32 mask)
-{
- struct dcb_entry *dcb;
- int pclk, i;
-
- pclk = nv_rd32(dev, 0x660450 + (crtc * 0x300)) / 1000;
-
- for (i = 0; mask && i < 8; i++) {
- u32 mcp = nv_rd32(dev, 0x660180 + (i * 0x20));
- u32 cfg = nv_rd32(dev, 0x660184 + (i * 0x20));
- if (!(mcp & (1 << crtc)))
- continue;
-
- dcb = lookup_dcb(dev, i, mcp);
- if (!dcb)
- continue;
-
- nouveau_bios_run_display_table(dev, cfg, -pclk, dcb, crtc);
- }
-
- nv_wr32(dev, 0x6101d4, 0x00000000);
- nv_wr32(dev, 0x6109d4, 0x00000000);
- nv_wr32(dev, 0x6101d0, 0x80000000);
-}
-
-static void
-nvd0_display_bh(unsigned long data)
-{
- struct drm_device *dev = (struct drm_device *)data;
- struct nvd0_display *disp = nvd0_display(dev);
- u32 mask = 0, crtc = ~0;
- int i;
-
- if (drm_debug & (DRM_UT_DRIVER | DRM_UT_KMS)) {
- NV_INFO(dev, "PDISP: modeset req %d\n", disp->modeset);
- NV_INFO(dev, " STAT: 0x%08x 0x%08x 0x%08x\n",
- nv_rd32(dev, 0x6101d0),
- nv_rd32(dev, 0x6101d4), nv_rd32(dev, 0x6109d4));
- for (i = 0; i < 8; i++) {
- NV_INFO(dev, " %s%d: 0x%08x 0x%08x\n",
- i < 4 ? "DAC" : "SOR", i,
- nv_rd32(dev, 0x640180 + (i * 0x20)),
- nv_rd32(dev, 0x660180 + (i * 0x20)));
- }
- }
-
- while (!mask && ++crtc < dev->mode_config.num_crtc)
- mask = nv_rd32(dev, 0x6101d4 + (crtc * 0x800));
-
- if (disp->modeset & 0x00000001)
- nvd0_display_unk1_handler(dev, crtc, mask);
- if (disp->modeset & 0x00000002)
- nvd0_display_unk2_handler(dev, crtc, mask);
- if (disp->modeset & 0x00000004)
- nvd0_display_unk4_handler(dev, crtc, mask);
-}
-
-static void
-nvd0_display_intr(struct drm_device *dev)
-{
- struct nvd0_display *disp = nvd0_display(dev);
- u32 intr = nv_rd32(dev, 0x610088);
- int i;
-
- if (intr & 0x00000001) {
- u32 stat = nv_rd32(dev, 0x61008c);
- nv_wr32(dev, 0x61008c, stat);
- intr &= ~0x00000001;
- }
-
- if (intr & 0x00000002) {
- u32 stat = nv_rd32(dev, 0x61009c);
- int chid = ffs(stat) - 1;
- if (chid >= 0) {
- u32 mthd = nv_rd32(dev, 0x6101f0 + (chid * 12));
- u32 data = nv_rd32(dev, 0x6101f4 + (chid * 12));
- u32 unkn = nv_rd32(dev, 0x6101f8 + (chid * 12));
-
- NV_INFO(dev, "EvoCh: chid %d mthd 0x%04x data 0x%08x "
- "0x%08x 0x%08x\n",
- chid, (mthd & 0x0000ffc), data, mthd, unkn);
- nv_wr32(dev, 0x61009c, (1 << chid));
- nv_wr32(dev, 0x6101f0 + (chid * 12), 0x90000000);
- }
-
- intr &= ~0x00000002;
- }
-
- if (intr & 0x00100000) {
- u32 stat = nv_rd32(dev, 0x6100ac);
-
- if (stat & 0x00000007) {
- disp->modeset = stat;
- tasklet_schedule(&disp->tasklet);
-
- nv_wr32(dev, 0x6100ac, (stat & 0x00000007));
- stat &= ~0x00000007;
- }
-
- if (stat) {
- NV_INFO(dev, "PDISP: unknown intr24 0x%08x\n", stat);
- nv_wr32(dev, 0x6100ac, stat);
- }
-
- intr &= ~0x00100000;
- }
-
- for (i = 0; i < dev->mode_config.num_crtc; i++) {
- u32 mask = 0x01000000 << i;
- if (intr & mask) {
- u32 stat = nv_rd32(dev, 0x6100bc + (i * 0x800));
- nv_wr32(dev, 0x6100bc + (i * 0x800), stat);
- intr &= ~mask;
- }
- }
-
- if (intr)
- NV_INFO(dev, "PDISP: unknown intr 0x%08x\n", intr);
-}
-
-/******************************************************************************
- * Init
- *****************************************************************************/
-void
-nvd0_display_fini(struct drm_device *dev)
-{
- int i;
-
- /* fini cursors + overlays + flips */
- for (i = 1; i >= 0; i--) {
- evo_fini_pio(dev, EVO_CURS(i));
- evo_fini_pio(dev, EVO_OIMM(i));
- evo_fini_dma(dev, EVO_OVLY(i));
- evo_fini_dma(dev, EVO_FLIP(i));
- }
-
- /* fini master */
- evo_fini_dma(dev, EVO_MASTER);
-}
-
-int
-nvd0_display_init(struct drm_device *dev)
-{
- struct nvd0_display *disp = nvd0_display(dev);
- int ret, i;
- u32 *push;
-
- if (nv_rd32(dev, 0x6100ac) & 0x00000100) {
- nv_wr32(dev, 0x6100ac, 0x00000100);
- nv_mask(dev, 0x6194e8, 0x00000001, 0x00000000);
- if (!nv_wait(dev, 0x6194e8, 0x00000002, 0x00000000)) {
- NV_ERROR(dev, "PDISP: 0x6194e8 0x%08x\n",
- nv_rd32(dev, 0x6194e8));
- return -EBUSY;
- }
- }
-
- /* nfi what these are exactly, i do know that SOR_MODE_CTRL won't
- * work at all unless you do the SOR part below.
- */
- for (i = 0; i < 3; i++) {
- u32 dac = nv_rd32(dev, 0x61a000 + (i * 0x800));
- nv_wr32(dev, 0x6101c0 + (i * 0x800), dac);
- }
-
- for (i = 0; i < 4; i++) {
- u32 sor = nv_rd32(dev, 0x61c000 + (i * 0x800));
- nv_wr32(dev, 0x6301c4 + (i * 0x800), sor);
- }
-
- for (i = 0; i < dev->mode_config.num_crtc; i++) {
- u32 crtc0 = nv_rd32(dev, 0x616104 + (i * 0x800));
- u32 crtc1 = nv_rd32(dev, 0x616108 + (i * 0x800));
- u32 crtc2 = nv_rd32(dev, 0x61610c + (i * 0x800));
- nv_wr32(dev, 0x6101b4 + (i * 0x800), crtc0);
- nv_wr32(dev, 0x6101b8 + (i * 0x800), crtc1);
- nv_wr32(dev, 0x6101bc + (i * 0x800), crtc2);
- }
-
- /* point at our hash table / objects, enable interrupts */
- nv_wr32(dev, 0x610010, (disp->mem->vinst >> 8) | 9);
- nv_mask(dev, 0x6100b0, 0x00000307, 0x00000307);
-
- /* init master */
- ret = evo_init_dma(dev, EVO_MASTER);
- if (ret)
- goto error;
-
- /* init flips + overlays + cursors */
- for (i = 0; i < dev->mode_config.num_crtc; i++) {
- if ((ret = evo_init_dma(dev, EVO_FLIP(i))) ||
- (ret = evo_init_dma(dev, EVO_OVLY(i))) ||
- (ret = evo_init_pio(dev, EVO_OIMM(i))) ||
- (ret = evo_init_pio(dev, EVO_CURS(i))))
- goto error;
- }
-
- push = evo_wait(dev, EVO_MASTER, 32);
- if (!push) {
- ret = -EBUSY;
- goto error;
- }
- evo_mthd(push, 0x0088, 1);
- evo_data(push, NvEvoSync);
- evo_mthd(push, 0x0084, 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x0084, 1);
- evo_data(push, 0x80000000);
- evo_mthd(push, 0x008c, 1);
- evo_data(push, 0x00000000);
- evo_kick(push, dev, EVO_MASTER);
-
-error:
- if (ret)
- nvd0_display_fini(dev);
- return ret;
-}
-
-void
-nvd0_display_destroy(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvd0_display *disp = nvd0_display(dev);
- struct pci_dev *pdev = dev->pdev;
- int i;
-
- for (i = 0; i < EVO_DMA_NR; i++) {
- struct evo *evo = &disp->evo[i];
- pci_free_consistent(pdev, PAGE_SIZE, evo->ptr, evo->handle);
- }
-
- nouveau_gpuobj_ref(NULL, &disp->mem);
- nouveau_bo_unmap(disp->sync);
- nouveau_bo_ref(NULL, &disp->sync);
- nouveau_irq_unregister(dev, 26);
-
- dev_priv->engine.display.priv = NULL;
- kfree(disp);
-}
-
-int
-nvd0_display_create(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
- struct dcb_table *dcb = &dev_priv->vbios.dcb;
- struct drm_connector *connector, *tmp;
- struct pci_dev *pdev = dev->pdev;
- struct nvd0_display *disp;
- struct dcb_entry *dcbe;
- int crtcs, ret, i;
-
- disp = kzalloc(sizeof(*disp), GFP_KERNEL);
- if (!disp)
- return -ENOMEM;
- dev_priv->engine.display.priv = disp;
-
- /* create crtc objects to represent the hw heads */
- crtcs = nv_rd32(dev, 0x022448);
- for (i = 0; i < crtcs; i++) {
- ret = nvd0_crtc_create(dev, i);
- if (ret)
- goto out;
- }
-
- /* create encoder/connector objects based on VBIOS DCB table */
- for (i = 0, dcbe = &dcb->entry[0]; i < dcb->entries; i++, dcbe++) {
- connector = nouveau_connector_create(dev, dcbe->connector);
- if (IS_ERR(connector))
- continue;
-
- if (dcbe->location != DCB_LOC_ON_CHIP) {
- NV_WARN(dev, "skipping off-chip encoder %d/%d\n",
- dcbe->type, ffs(dcbe->or) - 1);
- continue;
- }
-
- switch (dcbe->type) {
- case OUTPUT_TMDS:
- case OUTPUT_LVDS:
- case OUTPUT_DP:
- nvd0_sor_create(connector, dcbe);
- break;
- case OUTPUT_ANALOG:
- nvd0_dac_create(connector, dcbe);
- break;
- default:
- NV_WARN(dev, "skipping unsupported encoder %d/%d\n",
- dcbe->type, ffs(dcbe->or) - 1);
- continue;
- }
- }
-
- /* cull any connectors we created that don't have an encoder */
- list_for_each_entry_safe(connector, tmp, &dev->mode_config.connector_list, head) {
- if (connector->encoder_ids[0])
- continue;
-
- NV_WARN(dev, "%s has no encoders, removing\n",
- drm_get_connector_name(connector));
- connector->funcs->destroy(connector);
- }
-
- /* setup interrupt handling */
- tasklet_init(&disp->tasklet, nvd0_display_bh, (unsigned long)dev);
- nouveau_irq_register(dev, 26, nvd0_display_intr);
-
- /* small shared memory area we use for notifiers and semaphores */
- ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
- 0, 0x0000, &disp->sync);
- if (!ret) {
- ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM);
- if (!ret)
- ret = nouveau_bo_map(disp->sync);
- if (ret)
- nouveau_bo_ref(NULL, &disp->sync);
- }
-
- if (ret)
- goto out;
-
- /* hash table and dma objects for the memory areas we care about */
- ret = nouveau_gpuobj_new(dev, NULL, 0x4000, 0x10000,
- NVOBJ_FLAG_ZERO_ALLOC, &disp->mem);
- if (ret)
- goto out;
-
- /* create evo dma channels */
- for (i = 0; i < EVO_DMA_NR; i++) {
- struct evo *evo = &disp->evo[i];
- u64 offset = disp->sync->bo.offset;
- u32 dmao = 0x1000 + (i * 0x100);
- u32 hash = 0x0000 + (i * 0x040);
-
- evo->idx = i;
- evo->sem.offset = EVO_SYNC(evo->idx, 0x00);
- evo->ptr = pci_alloc_consistent(pdev, PAGE_SIZE, &evo->handle);
- if (!evo->ptr) {
- ret = -ENOMEM;
- goto out;
- }
-
- nv_wo32(disp->mem, dmao + 0x00, 0x00000049);
- nv_wo32(disp->mem, dmao + 0x04, (offset + 0x0000) >> 8);
- nv_wo32(disp->mem, dmao + 0x08, (offset + 0x0fff) >> 8);
- nv_wo32(disp->mem, dmao + 0x0c, 0x00000000);
- nv_wo32(disp->mem, dmao + 0x10, 0x00000000);
- nv_wo32(disp->mem, dmao + 0x14, 0x00000000);
- nv_wo32(disp->mem, hash + 0x00, NvEvoSync);
- nv_wo32(disp->mem, hash + 0x04, 0x00000001 | (i << 27) |
- ((dmao + 0x00) << 9));
-
- nv_wo32(disp->mem, dmao + 0x20, 0x00000049);
- nv_wo32(disp->mem, dmao + 0x24, 0x00000000);
- nv_wo32(disp->mem, dmao + 0x28, (dev_priv->vram_size - 1) >> 8);
- nv_wo32(disp->mem, dmao + 0x2c, 0x00000000);
- nv_wo32(disp->mem, dmao + 0x30, 0x00000000);
- nv_wo32(disp->mem, dmao + 0x34, 0x00000000);
- nv_wo32(disp->mem, hash + 0x08, NvEvoVRAM);
- nv_wo32(disp->mem, hash + 0x0c, 0x00000001 | (i << 27) |
- ((dmao + 0x20) << 9));
-
- nv_wo32(disp->mem, dmao + 0x40, 0x00000009);
- nv_wo32(disp->mem, dmao + 0x44, 0x00000000);
- nv_wo32(disp->mem, dmao + 0x48, (dev_priv->vram_size - 1) >> 8);
- nv_wo32(disp->mem, dmao + 0x4c, 0x00000000);
- nv_wo32(disp->mem, dmao + 0x50, 0x00000000);
- nv_wo32(disp->mem, dmao + 0x54, 0x00000000);
- nv_wo32(disp->mem, hash + 0x10, NvEvoVRAM_LP);
- nv_wo32(disp->mem, hash + 0x14, 0x00000001 | (i << 27) |
- ((dmao + 0x40) << 9));
-
- nv_wo32(disp->mem, dmao + 0x60, 0x0fe00009);
- nv_wo32(disp->mem, dmao + 0x64, 0x00000000);
- nv_wo32(disp->mem, dmao + 0x68, (dev_priv->vram_size - 1) >> 8);
- nv_wo32(disp->mem, dmao + 0x6c, 0x00000000);
- nv_wo32(disp->mem, dmao + 0x70, 0x00000000);
- nv_wo32(disp->mem, dmao + 0x74, 0x00000000);
- nv_wo32(disp->mem, hash + 0x18, NvEvoFB32);
- nv_wo32(disp->mem, hash + 0x1c, 0x00000001 | (i << 27) |
- ((dmao + 0x60) << 9));
- }
-
- pinstmem->flush(dev);
-
-out:
- if (ret)
- nvd0_display_destroy(dev);
- return ret;
-}
diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
index bcac90b..570e190 100644
--- a/drivers/gpu/drm/r128/r128_cce.c
+++ b/drivers/gpu/drm/r128/r128_cce.c
@@ -32,7 +32,6 @@
#include <linux/firmware.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
-#include <linux/module.h>
#include "drmP.h"
#include "drm.h"
diff --git a/drivers/gpu/drm/r128/r128_drv.c b/drivers/gpu/drm/r128/r128_drv.c
index 88718fa..b9e8efd 100644
--- a/drivers/gpu/drm/r128/r128_drv.c
+++ b/drivers/gpu/drm/r128/r128_drv.c
@@ -29,8 +29,6 @@
* Gareth Hughes <gareth@valinux.com>
*/
-#include <linux/module.h>
-
#include "drmP.h"
#include "drm.h"
#include "r128_drm.h"
@@ -42,20 +40,6 @@ static struct pci_device_id pciidlist[] = {
r128_PCI_IDS
};
-static const struct file_operations r128_driver_fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = drm_mmap,
- .poll = drm_poll,
- .fasync = drm_fasync,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = r128_compat_ioctl,
-#endif
- .llseek = noop_llseek,
-};
-
static struct drm_driver driver = {
.driver_features =
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
@@ -74,7 +58,21 @@ static struct drm_driver driver = {
.reclaim_buffers = drm_core_reclaim_buffers,
.ioctls = r128_ioctls,
.dma_ioctl = r128_cce_buffers,
- .fops = &r128_driver_fops,
+ .fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = drm_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = r128_compat_ioctl,
+#endif
+ .llseek = noop_llseek,
+ },
+
+
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
@@ -85,7 +83,6 @@ static struct drm_driver driver = {
int r128_driver_load(struct drm_device *dev, unsigned long flags)
{
- pci_set_master(dev->pdev);
return drm_vblank_init(dev, 1);
}
diff --git a/drivers/gpu/drm/radeon/.gitignore b/drivers/gpu/drm/radeon/.gitignore
new file mode 100644
index 0000000..403eb3a
--- /dev/null
+++ b/drivers/gpu/drm/radeon/.gitignore
@@ -0,0 +1,3 @@
+mkregtable
+*_reg_safe.h
+
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index 8410415..3896ef8 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -5,7 +5,6 @@
ccflags-y := -Iinclude/drm
hostprogs-y := mkregtable
-clean-files := rn50_reg_safe.h r100_reg_safe.h r200_reg_safe.h rv515_reg_safe.h r300_reg_safe.h r420_reg_safe.h rs600_reg_safe.h r600_reg_safe.h evergreen_reg_safe.h cayman_reg_safe.h
quiet_cmd_mkregtable = MKREGTABLE $@
cmd_mkregtable = $(obj)/mkregtable $< > $@
@@ -70,8 +69,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \
r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \
evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o \
- radeon_trace_points.o ni.o cayman_blit_shaders.o atombios_encoders.o \
- radeon_semaphore.o radeon_sa.o atombios_i2c.o
+ radeon_trace_points.o ni.o cayman_blit_shaders.o
radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o
@@ -79,4 +77,4 @@ radeon-$(CONFIG_ACPI) += radeon_acpi.o
obj-$(CONFIG_DRM_RADEON)+= radeon.o
-CFLAGS_radeon_trace_points.o := -I$(src)
+CFLAGS_radeon_trace_points.o := -I$(src) \ No newline at end of file
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index daa1e34..a1df7d7 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -665,8 +665,6 @@ static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg)
SDEBUG(" count: %d\n", count);
if (arg == ATOM_UNIT_MICROSEC)
udelay(count);
- else if (!drm_can_sleep())
- mdelay(count);
else
msleep(count);
}
@@ -1261,9 +1259,6 @@ struct atom_context *atom_parse(struct card_info *card, void *bios)
char name[512];
int i;
- if (!ctx)
- return NULL;
-
ctx->card = card;
ctx->bios = bios;
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 72672ea..071ded1 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -355,12 +355,15 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc,
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
-static void atombios_disable_ss(struct radeon_device *rdev, int pll_id)
+static void atombios_disable_ss(struct drm_crtc *crtc)
{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
u32 ss_cntl;
if (ASIC_IS_DCE4(rdev)) {
- switch (pll_id) {
+ switch (radeon_crtc->pll_id) {
case ATOM_PPLL1:
ss_cntl = RREG32(EVERGREEN_P1PLL_SS_CNTL);
ss_cntl &= ~EVERGREEN_PxPLL_SS_EN;
@@ -376,7 +379,7 @@ static void atombios_disable_ss(struct radeon_device *rdev, int pll_id)
return;
}
} else if (ASIC_IS_AVIVO(rdev)) {
- switch (pll_id) {
+ switch (radeon_crtc->pll_id) {
case ATOM_PPLL1:
ss_cntl = RREG32(AVIVO_P1PLL_INT_SS_CNTL);
ss_cntl &= ~1;
@@ -403,11 +406,13 @@ union atom_enable_ss {
ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3 v3;
};
-static void atombios_crtc_program_ss(struct radeon_device *rdev,
+static void atombios_crtc_program_ss(struct drm_crtc *crtc,
int enable,
int pll_id,
struct radeon_atom_ss *ss)
{
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
int index = GetIndexIntoMasterTable(COMMAND, EnableSpreadSpectrumOnPPLL);
union atom_enable_ss args;
@@ -461,7 +466,7 @@ static void atombios_crtc_program_ss(struct radeon_device *rdev,
return;
}
args.v2.ucEnable = enable;
- if ((ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK) || ASIC_IS_DCE41(rdev))
+ if ((ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK))
args.v2.ucEnable = ATOM_DISABLE;
} else if (ASIC_IS_DCE3(rdev)) {
args.v1.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
@@ -474,7 +479,7 @@ static void atombios_crtc_program_ss(struct radeon_device *rdev,
} else if (ASIC_IS_AVIVO(rdev)) {
if ((enable == ATOM_DISABLE) || (ss->percentage == 0) ||
(ss->type & ATOM_EXTERNAL_SS_MASK)) {
- atombios_disable_ss(rdev, pll_id);
+ atombios_disable_ss(crtc);
return;
}
args.lvds_ss_2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
@@ -486,7 +491,7 @@ static void atombios_crtc_program_ss(struct radeon_device *rdev,
} else {
if ((enable == ATOM_DISABLE) || (ss->percentage == 0) ||
(ss->type & ATOM_EXTERNAL_SS_MASK)) {
- atombios_disable_ss(rdev, pll_id);
+ atombios_disable_ss(crtc);
return;
}
args.lvds_ss.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
@@ -518,7 +523,6 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
int encoder_mode = 0;
u32 dp_clock = mode->clock;
int bpc = 8;
- bool is_duallink = false;
/* reset the pll flags */
pll->flags = 0;
@@ -550,12 +554,11 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
if (encoder->crtc == crtc) {
radeon_encoder = to_radeon_encoder(encoder);
connector = radeon_get_connector_for_encoder(encoder);
- if (connector && connector->display_info.bpc)
+ if (connector)
bpc = connector->display_info.bpc;
encoder_mode = atombios_get_encoder_mode(encoder);
- is_duallink = radeon_dig_monitor_is_duallink(encoder, mode->clock);
if ((radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
- (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)) {
+ radeon_encoder_is_dp_bridge(encoder)) {
if (connector) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct radeon_connector_atom_dig *dig_connector =
@@ -635,29 +638,44 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
if (ss_enabled && ss->percentage)
args.v3.sInput.ucDispPllConfig |=
DISPPLL_CONFIG_SS_ENABLE;
- if (ENCODER_MODE_IS_DP(encoder_mode)) {
- args.v3.sInput.ucDispPllConfig |=
- DISPPLL_CONFIG_COHERENT_MODE;
- /* 16200 or 27000 */
- args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10);
- } else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+ if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT) ||
+ radeon_encoder_is_dp_bridge(encoder)) {
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- if (encoder_mode == ATOM_ENCODER_MODE_HDMI)
- /* deep color support */
- args.v3.sInput.usPixelClock =
- cpu_to_le16((mode->clock * bpc / 8) / 10);
- if (dig->coherent_mode)
+ if (encoder_mode == ATOM_ENCODER_MODE_DP) {
args.v3.sInput.ucDispPllConfig |=
DISPPLL_CONFIG_COHERENT_MODE;
- if (is_duallink)
+ /* 16200 or 27000 */
+ args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10);
+ } else {
+ if (encoder_mode == ATOM_ENCODER_MODE_HDMI) {
+ /* deep color support */
+ args.v3.sInput.usPixelClock =
+ cpu_to_le16((mode->clock * bpc / 8) / 10);
+ }
+ if (dig->coherent_mode)
+ args.v3.sInput.ucDispPllConfig |=
+ DISPPLL_CONFIG_COHERENT_MODE;
+ if (mode->clock > 165000)
+ args.v3.sInput.ucDispPllConfig |=
+ DISPPLL_CONFIG_DUAL_LINK;
+ }
+ } else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+ if (encoder_mode == ATOM_ENCODER_MODE_DP) {
args.v3.sInput.ucDispPllConfig |=
- DISPPLL_CONFIG_DUAL_LINK;
+ DISPPLL_CONFIG_COHERENT_MODE;
+ /* 16200 or 27000 */
+ args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10);
+ } else if (encoder_mode != ATOM_ENCODER_MODE_LVDS) {
+ if (mode->clock > 165000)
+ args.v3.sInput.ucDispPllConfig |=
+ DISPPLL_CONFIG_DUAL_LINK;
+ }
}
- if (radeon_encoder_get_dp_bridge_encoder_id(encoder) !=
- ENCODER_OBJECT_ID_NONE)
- args.v3.sInput.ucExtTransmitterID =
- radeon_encoder_get_dp_bridge_encoder_id(encoder);
- else
+ if (radeon_encoder_is_dp_bridge(encoder)) {
+ struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder);
+ struct radeon_encoder *ext_radeon_encoder = to_radeon_encoder(ext_encoder);
+ args.v3.sInput.ucExtTransmitterID = ext_radeon_encoder->encoder_id;
+ } else
args.v3.sInput.ucExtTransmitterID = 0;
atom_execute_table(rdev->mode_info.atom_context,
@@ -699,9 +717,11 @@ union set_pixel_clock {
/* on DCE5, make sure the voltage is high enough to support the
* required disp clk.
*/
-static void atombios_crtc_set_dcpll(struct radeon_device *rdev,
+static void atombios_crtc_set_dcpll(struct drm_crtc *crtc,
u32 dispclk)
{
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
u8 frev, crev;
int index;
union set_pixel_clock args;
@@ -744,7 +764,7 @@ static void atombios_crtc_set_dcpll(struct radeon_device *rdev,
}
static void atombios_crtc_program_pll(struct drm_crtc *crtc,
- u32 crtc_id,
+ int crtc_id,
int pll_id,
u32 encoder_mode,
u32 encoder_id,
@@ -831,7 +851,8 @@ static void atombios_crtc_program_pll(struct drm_crtc *crtc,
args.v5.ucPpll = pll_id;
break;
case 6:
- args.v6.ulDispEngClkFreq = cpu_to_le32(crtc_id << 24 | clock / 10);
+ args.v6.ulCrtcPclkFreq.ucCRTC = crtc_id;
+ args.v6.ulCrtcPclkFreq.ulPixelClock = cpu_to_le32(clock / 10);
args.v6.ucRefDiv = ref_div;
args.v6.usFbDiv = cpu_to_le16(fb_div);
args.v6.ulFbDivDecFrac = cpu_to_le32(frac_fb_div * 100000);
@@ -925,7 +946,6 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
bpc = connector->display_info.bpc;
switch (encoder_mode) {
- case ATOM_ENCODER_MODE_DP_MST:
case ATOM_ENCODER_MODE_DP:
/* DP/eDP */
dp_clock = dig_connector->dp_clock / 10;
@@ -991,7 +1011,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
&ref_div, &post_div);
- atombios_crtc_program_ss(rdev, ATOM_DISABLE, radeon_crtc->pll_id, &ss);
+ atombios_crtc_program_ss(crtc, ATOM_DISABLE, radeon_crtc->pll_id, &ss);
atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
encoder_mode, radeon_encoder->encoder_id, mode->clock,
@@ -1014,7 +1034,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
ss.step = step_size;
}
- atombios_crtc_program_ss(rdev, ATOM_ENABLE, radeon_crtc->pll_id, &ss);
+ atombios_crtc_program_ss(crtc, ATOM_ENABLE, radeon_crtc->pll_id, &ss);
}
}
@@ -1031,7 +1051,6 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
struct radeon_bo *rbo;
uint64_t fb_location;
uint32_t fb_format, fb_pitch_pixels, tiling_flags;
- unsigned bankw, bankh, mtaspect, tile_split;
u32 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_NONE);
u32 tmp, viewport_w, viewport_h;
int r;
@@ -1103,33 +1122,9 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
return -EINVAL;
}
- if (tiling_flags & RADEON_TILING_MACRO) {
- if (rdev->family >= CHIP_CAYMAN)
- tmp = rdev->config.cayman.tile_config;
- else
- tmp = rdev->config.evergreen.tile_config;
-
- switch ((tmp & 0xf0) >> 4) {
- case 0: /* 4 banks */
- fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_4_BANK);
- break;
- case 1: /* 8 banks */
- default:
- fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_8_BANK);
- break;
- case 2: /* 16 banks */
- fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_16_BANK);
- break;
- }
-
+ if (tiling_flags & RADEON_TILING_MACRO)
fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1);
-
- evergreen_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split);
- fb_format |= EVERGREEN_GRPH_TILE_SPLIT(tile_split);
- fb_format |= EVERGREEN_GRPH_BANK_WIDTH(bankw);
- fb_format |= EVERGREEN_GRPH_BANK_HEIGHT(bankh);
- fb_format |= EVERGREEN_GRPH_MACRO_TILE_ASPECT(mtaspect);
- } else if (tiling_flags & RADEON_TILING_MICRO)
+ else if (tiling_flags & RADEON_TILING_MICRO)
fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1);
switch (radeon_crtc->crtc_id) {
@@ -1173,7 +1168,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
WREG32(EVERGREEN_GRPH_X_END + radeon_crtc->crtc_offset, target_fb->width);
WREG32(EVERGREEN_GRPH_Y_END + radeon_crtc->crtc_offset, target_fb->height);
- fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
+ fb_pitch_pixels = target_fb->pitch / (target_fb->bits_per_pixel / 8);
WREG32(EVERGREEN_GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels);
WREG32(EVERGREEN_GRPH_ENABLE + radeon_crtc->crtc_offset, 1);
@@ -1342,7 +1337,7 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
WREG32(AVIVO_D1GRPH_X_END + radeon_crtc->crtc_offset, target_fb->width);
WREG32(AVIVO_D1GRPH_Y_END + radeon_crtc->crtc_offset, target_fb->height);
- fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
+ fb_pitch_pixels = target_fb->pitch / (target_fb->bits_per_pixel / 8);
WREG32(AVIVO_D1GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels);
WREG32(AVIVO_D1GRPH_ENABLE + radeon_crtc->crtc_offset, 1);
@@ -1456,7 +1451,7 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
* PPLL/DCPLL programming and only program the DP DTO for the
* crtc virtual pixel clock.
*/
- if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_encoder))) {
+ if (atombios_get_encoder_mode(test_encoder) == ATOM_ENCODER_MODE_DP) {
if (ASIC_IS_DCE5(rdev) || rdev->clock.dp_extclk)
return ATOM_PPLL_INVALID;
}
@@ -1483,24 +1478,6 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
}
-void radeon_atom_dcpll_init(struct radeon_device *rdev)
-{
- /* always set DCPLL */
- if (ASIC_IS_DCE4(rdev)) {
- struct radeon_atom_ss ss;
- bool ss_enabled = radeon_atombios_get_asic_ss_info(rdev, &ss,
- ASIC_INTERNAL_SS_ON_DCPLL,
- rdev->clock.default_dispclk);
- if (ss_enabled)
- atombios_crtc_program_ss(rdev, ATOM_DISABLE, ATOM_DCPLL, &ss);
- /* XXX: DCE5, make sure voltage, dispclk is high enough */
- atombios_crtc_set_dcpll(rdev, rdev->clock.default_dispclk);
- if (ss_enabled)
- atombios_crtc_program_ss(rdev, ATOM_ENABLE, ATOM_DCPLL, &ss);
- }
-
-}
-
int atombios_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
@@ -1522,6 +1499,19 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
}
}
+ /* always set DCPLL */
+ if (ASIC_IS_DCE4(rdev)) {
+ struct radeon_atom_ss ss;
+ bool ss_enabled = radeon_atombios_get_asic_ss_info(rdev, &ss,
+ ASIC_INTERNAL_SS_ON_DCPLL,
+ rdev->clock.default_dispclk);
+ if (ss_enabled)
+ atombios_crtc_program_ss(crtc, ATOM_DISABLE, ATOM_DCPLL, &ss);
+ /* XXX: DCE5, make sure voltage, dispclk is high enough */
+ atombios_crtc_set_dcpll(crtc, rdev->clock.default_dispclk);
+ if (ss_enabled)
+ atombios_crtc_program_ss(crtc, ATOM_ENABLE, ATOM_DCPLL, &ss);
+ }
atombios_crtc_set_pll(crtc, adjusted_mode);
if (ASIC_IS_DCE4(rdev))
@@ -1547,6 +1537,12 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+
+ /* adjust pm to upcoming mode change */
+ radeon_pm_compute_clocks(rdev);
+
if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
return false;
return true;
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index aee9a39..efc2b21 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -483,8 +483,7 @@ static int radeon_dp_get_dp_link_clock(struct drm_connector *connector,
int bpp = convert_bpc_to_bpp(connector->display_info.bpc);
int lane_num, max_pix_clock;
- if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) ==
- ENCODER_OBJECT_ID_NUTMEG)
+ if (radeon_connector_encoder_is_dp_bridge(connector))
return 270000;
lane_num = radeon_dp_get_dp_lane_number(connector, dpcd, pix_clock);
@@ -550,8 +549,8 @@ bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector)
return false;
}
-int radeon_dp_get_panel_mode(struct drm_encoder *encoder,
- struct drm_connector *connector)
+static void radeon_dp_set_panel_mode(struct drm_encoder *encoder,
+ struct drm_connector *connector)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
@@ -559,33 +558,24 @@ int radeon_dp_get_panel_mode(struct drm_encoder *encoder,
int panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
if (!ASIC_IS_DCE4(rdev))
- return panel_mode;
+ return;
- if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) ==
- ENCODER_OBJECT_ID_NUTMEG)
+ if (radeon_connector_encoder_is_dp_bridge(connector))
panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE;
- else if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) ==
- ENCODER_OBJECT_ID_TRAVIS) {
- u8 id[6];
- int i;
- for (i = 0; i < 6; i++)
- id[i] = radeon_read_dpcd_reg(radeon_connector, 0x503 + i);
- if (id[0] == 0x73 &&
- id[1] == 0x69 &&
- id[2] == 0x76 &&
- id[3] == 0x61 &&
- id[4] == 0x72 &&
- id[5] == 0x54)
- panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE;
- else
- panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
- } else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+ else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
u8 tmp = radeon_read_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_CAP);
if (tmp & 1)
panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
}
- return panel_mode;
+ atombios_dig_encoder_setup(encoder,
+ ATOM_ENCODER_CMD_SETUP_PANEL_MODE,
+ panel_mode);
+
+ if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) &&
+ (panel_mode == DP_PANEL_MODE_INTERNAL_DP2_MODE)) {
+ radeon_write_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_SET, 1);
+ }
}
void radeon_dp_set_link_config(struct drm_connector *connector,
@@ -722,8 +712,6 @@ static void radeon_dp_set_tp(struct radeon_dp_link_train_info *dp_info, int tp)
static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info)
{
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(dp_info->encoder);
- struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
u8 tmp;
/* power up the sink */
@@ -739,15 +727,11 @@ static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info)
radeon_write_dpcd_reg(dp_info->radeon_connector,
DP_DOWNSPREAD_CTRL, 0);
- if ((dp_info->connector->connector_type == DRM_MODE_CONNECTOR_eDP) &&
- (dig->panel_mode == DP_PANEL_MODE_INTERNAL_DP2_MODE)) {
- radeon_write_dpcd_reg(dp_info->radeon_connector, DP_EDP_CONFIGURATION_SET, 1);
- }
+ radeon_dp_set_panel_mode(dp_info->encoder, dp_info->connector);
/* set the lane count on the sink */
tmp = dp_info->dp_lane_count;
- if (dp_info->dpcd[DP_DPCD_REV] >= 0x11 &&
- dp_info->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)
+ if (dp_info->dpcd[0] >= 0x11)
tmp |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LANE_COUNT_SET, tmp);
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
deleted file mode 100644
index b88c460..0000000
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ /dev/null
@@ -1,2359 +0,0 @@
-/*
- * Copyright 2007-11 Advanced Micro Devices, Inc.
- * Copyright 2008 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Dave Airlie
- * Alex Deucher
- */
-#include "drmP.h"
-#include "drm_crtc_helper.h"
-#include "radeon_drm.h"
-#include "radeon.h"
-#include "atom.h"
-
-extern int atom_debug;
-
-/* evil but including atombios.h is much worse */
-bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
- struct drm_display_mode *mode);
-
-
-static inline bool radeon_encoder_is_digital(struct drm_encoder *encoder)
-{
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- switch (radeon_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_LVDS:
- case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
- case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
- case ENCODER_OBJECT_ID_INTERNAL_DVO1:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
- case ENCODER_OBJECT_ID_INTERNAL_DDI:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- return true;
- default:
- return false;
- }
-}
-
-static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
-
- /* set the active encoder to connector routing */
- radeon_encoder_set_active_device(encoder);
- drm_mode_set_crtcinfo(adjusted_mode, 0);
-
- /* hw bug */
- if ((mode->flags & DRM_MODE_FLAG_INTERLACE)
- && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
- adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
-
- /* get the native mode for LVDS */
- if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT))
- radeon_panel_mode_fixup(encoder, adjusted_mode);
-
- /* get the native mode for TV */
- if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) {
- struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv;
- if (tv_dac) {
- if (tv_dac->tv_std == TV_STD_NTSC ||
- tv_dac->tv_std == TV_STD_NTSC_J ||
- tv_dac->tv_std == TV_STD_PAL_M)
- radeon_atom_get_tv_timings(rdev, 0, adjusted_mode);
- else
- radeon_atom_get_tv_timings(rdev, 1, adjusted_mode);
- }
- }
-
- if (ASIC_IS_DCE3(rdev) &&
- ((radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
- (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE))) {
- struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
- radeon_dp_set_link_config(connector, mode);
- }
-
- return true;
-}
-
-static void
-atombios_dac_setup(struct drm_encoder *encoder, int action)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- DAC_ENCODER_CONTROL_PS_ALLOCATION args;
- int index = 0;
- struct radeon_encoder_atom_dac *dac_info = radeon_encoder->enc_priv;
-
- memset(&args, 0, sizeof(args));
-
- switch (radeon_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_DAC1:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
- index = GetIndexIntoMasterTable(COMMAND, DAC1EncoderControl);
- break;
- case ENCODER_OBJECT_ID_INTERNAL_DAC2:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
- index = GetIndexIntoMasterTable(COMMAND, DAC2EncoderControl);
- break;
- }
-
- args.ucAction = action;
-
- if (radeon_encoder->active_device & (ATOM_DEVICE_CRT_SUPPORT))
- args.ucDacStandard = ATOM_DAC1_PS2;
- else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
- args.ucDacStandard = ATOM_DAC1_CV;
- else {
- switch (dac_info->tv_std) {
- case TV_STD_PAL:
- case TV_STD_PAL_M:
- case TV_STD_SCART_PAL:
- case TV_STD_SECAM:
- case TV_STD_PAL_CN:
- args.ucDacStandard = ATOM_DAC1_PAL;
- break;
- case TV_STD_NTSC:
- case TV_STD_NTSC_J:
- case TV_STD_PAL_60:
- default:
- args.ucDacStandard = ATOM_DAC1_NTSC;
- break;
- }
- }
- args.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
-
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-
-}
-
-static void
-atombios_tv_setup(struct drm_encoder *encoder, int action)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- TV_ENCODER_CONTROL_PS_ALLOCATION args;
- int index = 0;
- struct radeon_encoder_atom_dac *dac_info = radeon_encoder->enc_priv;
-
- memset(&args, 0, sizeof(args));
-
- index = GetIndexIntoMasterTable(COMMAND, TVEncoderControl);
-
- args.sTVEncoder.ucAction = action;
-
- if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
- args.sTVEncoder.ucTvStandard = ATOM_TV_CV;
- else {
- switch (dac_info->tv_std) {
- case TV_STD_NTSC:
- args.sTVEncoder.ucTvStandard = ATOM_TV_NTSC;
- break;
- case TV_STD_PAL:
- args.sTVEncoder.ucTvStandard = ATOM_TV_PAL;
- break;
- case TV_STD_PAL_M:
- args.sTVEncoder.ucTvStandard = ATOM_TV_PALM;
- break;
- case TV_STD_PAL_60:
- args.sTVEncoder.ucTvStandard = ATOM_TV_PAL60;
- break;
- case TV_STD_NTSC_J:
- args.sTVEncoder.ucTvStandard = ATOM_TV_NTSCJ;
- break;
- case TV_STD_SCART_PAL:
- args.sTVEncoder.ucTvStandard = ATOM_TV_PAL; /* ??? */
- break;
- case TV_STD_SECAM:
- args.sTVEncoder.ucTvStandard = ATOM_TV_SECAM;
- break;
- case TV_STD_PAL_CN:
- args.sTVEncoder.ucTvStandard = ATOM_TV_PALCN;
- break;
- default:
- args.sTVEncoder.ucTvStandard = ATOM_TV_NTSC;
- break;
- }
- }
-
- args.sTVEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
-
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-
-}
-
-union dvo_encoder_control {
- ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION ext_tmds;
- DVO_ENCODER_CONTROL_PS_ALLOCATION dvo;
- DVO_ENCODER_CONTROL_PS_ALLOCATION_V3 dvo_v3;
-};
-
-void
-atombios_dvo_setup(struct drm_encoder *encoder, int action)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- union dvo_encoder_control args;
- int index = GetIndexIntoMasterTable(COMMAND, DVOEncoderControl);
- uint8_t frev, crev;
-
- memset(&args, 0, sizeof(args));
-
- if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
- return;
-
- switch (frev) {
- case 1:
- switch (crev) {
- case 1:
- /* R4xx, R5xx */
- args.ext_tmds.sXTmdsEncoder.ucEnable = action;
-
- if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
- args.ext_tmds.sXTmdsEncoder.ucMisc |= PANEL_ENCODER_MISC_DUAL;
-
- args.ext_tmds.sXTmdsEncoder.ucMisc |= ATOM_PANEL_MISC_888RGB;
- break;
- case 2:
- /* RS600/690/740 */
- args.dvo.sDVOEncoder.ucAction = action;
- args.dvo.sDVOEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
- /* DFP1, CRT1, TV1 depending on the type of port */
- args.dvo.sDVOEncoder.ucDeviceType = ATOM_DEVICE_DFP1_INDEX;
-
- if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
- args.dvo.sDVOEncoder.usDevAttr.sDigAttrib.ucAttribute |= PANEL_ENCODER_MISC_DUAL;
- break;
- case 3:
- /* R6xx */
- args.dvo_v3.ucAction = action;
- args.dvo_v3.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
- args.dvo_v3.ucDVOConfig = 0; /* XXX */
- break;
- default:
- DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
- break;
- }
- break;
- default:
- DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
- break;
- }
-
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-}
-
-union lvds_encoder_control {
- LVDS_ENCODER_CONTROL_PS_ALLOCATION v1;
- LVDS_ENCODER_CONTROL_PS_ALLOCATION_V2 v2;
-};
-
-void
-atombios_digital_setup(struct drm_encoder *encoder, int action)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- union lvds_encoder_control args;
- int index = 0;
- int hdmi_detected = 0;
- uint8_t frev, crev;
-
- if (!dig)
- return;
-
- if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
- hdmi_detected = 1;
-
- memset(&args, 0, sizeof(args));
-
- switch (radeon_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_LVDS:
- index = GetIndexIntoMasterTable(COMMAND, LVDSEncoderControl);
- break;
- case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
- index = GetIndexIntoMasterTable(COMMAND, TMDS1EncoderControl);
- break;
- case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
- if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
- index = GetIndexIntoMasterTable(COMMAND, LVDSEncoderControl);
- else
- index = GetIndexIntoMasterTable(COMMAND, TMDS2EncoderControl);
- break;
- }
-
- if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
- return;
-
- switch (frev) {
- case 1:
- case 2:
- switch (crev) {
- case 1:
- args.v1.ucMisc = 0;
- args.v1.ucAction = action;
- if (hdmi_detected)
- args.v1.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE;
- args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
- if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
- if (dig->lcd_misc & ATOM_PANEL_MISC_DUAL)
- args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL;
- if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB)
- args.v1.ucMisc |= ATOM_PANEL_MISC_888RGB;
- } else {
- if (dig->linkb)
- args.v1.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB;
- if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
- args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL;
- /*if (pScrn->rgbBits == 8) */
- args.v1.ucMisc |= ATOM_PANEL_MISC_888RGB;
- }
- break;
- case 2:
- case 3:
- args.v2.ucMisc = 0;
- args.v2.ucAction = action;
- if (crev == 3) {
- if (dig->coherent_mode)
- args.v2.ucMisc |= PANEL_ENCODER_MISC_COHERENT;
- }
- if (hdmi_detected)
- args.v2.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE;
- args.v2.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
- args.v2.ucTruncate = 0;
- args.v2.ucSpatial = 0;
- args.v2.ucTemporal = 0;
- args.v2.ucFRC = 0;
- if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
- if (dig->lcd_misc & ATOM_PANEL_MISC_DUAL)
- args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL;
- if (dig->lcd_misc & ATOM_PANEL_MISC_SPATIAL) {
- args.v2.ucSpatial = PANEL_ENCODER_SPATIAL_DITHER_EN;
- if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB)
- args.v2.ucSpatial |= PANEL_ENCODER_SPATIAL_DITHER_DEPTH;
- }
- if (dig->lcd_misc & ATOM_PANEL_MISC_TEMPORAL) {
- args.v2.ucTemporal = PANEL_ENCODER_TEMPORAL_DITHER_EN;
- if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB)
- args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_DITHER_DEPTH;
- if (((dig->lcd_misc >> ATOM_PANEL_MISC_GREY_LEVEL_SHIFT) & 0x3) == 2)
- args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_LEVEL_4;
- }
- } else {
- if (dig->linkb)
- args.v2.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB;
- if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
- args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL;
- }
- break;
- default:
- DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
- break;
- }
- break;
- default:
- DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
- break;
- }
-
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-}
-
-int
-atombios_get_encoder_mode(struct drm_encoder *encoder)
-{
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct drm_connector *connector;
- struct radeon_connector *radeon_connector;
- struct radeon_connector_atom_dig *dig_connector;
-
- /* dp bridges are always DP */
- if (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)
- return ATOM_ENCODER_MODE_DP;
-
- /* DVO is always DVO */
- if (radeon_encoder->encoder_id == ATOM_ENCODER_MODE_DVO)
- return ATOM_ENCODER_MODE_DVO;
-
- connector = radeon_get_connector_for_encoder(encoder);
- /* if we don't have an active device yet, just use one of
- * the connectors tied to the encoder.
- */
- if (!connector)
- connector = radeon_get_connector_for_encoder_init(encoder);
- radeon_connector = to_radeon_connector(connector);
-
- switch (connector->connector_type) {
- case DRM_MODE_CONNECTOR_DVII:
- case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
- if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
- radeon_audio)
- return ATOM_ENCODER_MODE_HDMI;
- else if (radeon_connector->use_digital)
- return ATOM_ENCODER_MODE_DVI;
- else
- return ATOM_ENCODER_MODE_CRT;
- break;
- case DRM_MODE_CONNECTOR_DVID:
- case DRM_MODE_CONNECTOR_HDMIA:
- default:
- if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
- radeon_audio)
- return ATOM_ENCODER_MODE_HDMI;
- else
- return ATOM_ENCODER_MODE_DVI;
- break;
- case DRM_MODE_CONNECTOR_LVDS:
- return ATOM_ENCODER_MODE_LVDS;
- break;
- case DRM_MODE_CONNECTOR_DisplayPort:
- dig_connector = radeon_connector->con_priv;
- if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
- (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
- return ATOM_ENCODER_MODE_DP;
- else if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
- radeon_audio)
- return ATOM_ENCODER_MODE_HDMI;
- else
- return ATOM_ENCODER_MODE_DVI;
- break;
- case DRM_MODE_CONNECTOR_eDP:
- return ATOM_ENCODER_MODE_DP;
- case DRM_MODE_CONNECTOR_DVIA:
- case DRM_MODE_CONNECTOR_VGA:
- return ATOM_ENCODER_MODE_CRT;
- break;
- case DRM_MODE_CONNECTOR_Composite:
- case DRM_MODE_CONNECTOR_SVIDEO:
- case DRM_MODE_CONNECTOR_9PinDIN:
- /* fix me */
- return ATOM_ENCODER_MODE_TV;
- /*return ATOM_ENCODER_MODE_CV;*/
- break;
- }
-}
-
-/*
- * DIG Encoder/Transmitter Setup
- *
- * DCE 3.0/3.1
- * - 2 DIG transmitter blocks. UNIPHY (links A and B) and LVTMA.
- * Supports up to 3 digital outputs
- * - 2 DIG encoder blocks.
- * DIG1 can drive UNIPHY link A or link B
- * DIG2 can drive UNIPHY link B or LVTMA
- *
- * DCE 3.2
- * - 3 DIG transmitter blocks. UNIPHY0/1/2 (links A and B).
- * Supports up to 5 digital outputs
- * - 2 DIG encoder blocks.
- * DIG1/2 can drive UNIPHY0/1/2 link A or link B
- *
- * DCE 4.0/5.0
- * - 3 DIG transmitter blocks UNIPHY0/1/2 (links A and B).
- * Supports up to 6 digital outputs
- * - 6 DIG encoder blocks.
- * - DIG to PHY mapping is hardcoded
- * DIG1 drives UNIPHY0 link A, A+B
- * DIG2 drives UNIPHY0 link B
- * DIG3 drives UNIPHY1 link A, A+B
- * DIG4 drives UNIPHY1 link B
- * DIG5 drives UNIPHY2 link A, A+B
- * DIG6 drives UNIPHY2 link B
- *
- * DCE 4.1
- * - 3 DIG transmitter blocks UNIPHY0/1/2 (links A and B).
- * Supports up to 6 digital outputs
- * - 2 DIG encoder blocks.
- * DIG1/2 can drive UNIPHY0/1/2 link A or link B
- *
- * Routing
- * crtc -> dig encoder -> UNIPHY/LVTMA (1 or 2 links)
- * Examples:
- * crtc0 -> dig2 -> LVTMA links A+B -> TMDS/HDMI
- * crtc1 -> dig1 -> UNIPHY0 link B -> DP
- * crtc0 -> dig1 -> UNIPHY2 link A -> LVDS
- * crtc1 -> dig2 -> UNIPHY1 link B+A -> TMDS/HDMI
- */
-
-union dig_encoder_control {
- DIG_ENCODER_CONTROL_PS_ALLOCATION v1;
- DIG_ENCODER_CONTROL_PARAMETERS_V2 v2;
- DIG_ENCODER_CONTROL_PARAMETERS_V3 v3;
- DIG_ENCODER_CONTROL_PARAMETERS_V4 v4;
-};
-
-void
-atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
- union dig_encoder_control args;
- int index = 0;
- uint8_t frev, crev;
- int dp_clock = 0;
- int dp_lane_count = 0;
- int hpd_id = RADEON_HPD_NONE;
- int bpc = 8;
-
- if (connector) {
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- struct radeon_connector_atom_dig *dig_connector =
- radeon_connector->con_priv;
-
- dp_clock = dig_connector->dp_clock;
- dp_lane_count = dig_connector->dp_lane_count;
- hpd_id = radeon_connector->hpd.hpd;
- bpc = connector->display_info.bpc;
- }
-
- /* no dig encoder assigned */
- if (dig->dig_encoder == -1)
- return;
-
- memset(&args, 0, sizeof(args));
-
- if (ASIC_IS_DCE4(rdev))
- index = GetIndexIntoMasterTable(COMMAND, DIGxEncoderControl);
- else {
- if (dig->dig_encoder)
- index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
- else
- index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl);
- }
-
- if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
- return;
-
- switch (frev) {
- case 1:
- switch (crev) {
- case 1:
- args.v1.ucAction = action;
- args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
- if (action == ATOM_ENCODER_CMD_SETUP_PANEL_MODE)
- args.v3.ucPanelMode = panel_mode;
- else
- args.v1.ucEncoderMode = atombios_get_encoder_mode(encoder);
-
- if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode))
- args.v1.ucLaneNum = dp_lane_count;
- else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
- args.v1.ucLaneNum = 8;
- else
- args.v1.ucLaneNum = 4;
-
- if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode) && (dp_clock == 270000))
- args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
- switch (radeon_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1;
- break;
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
- args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER2;
- break;
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER3;
- break;
- }
- if (dig->linkb)
- args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
- else
- args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
- break;
- case 2:
- case 3:
- args.v3.ucAction = action;
- args.v3.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
- if (action == ATOM_ENCODER_CMD_SETUP_PANEL_MODE)
- args.v3.ucPanelMode = panel_mode;
- else
- args.v3.ucEncoderMode = atombios_get_encoder_mode(encoder);
-
- if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode))
- args.v3.ucLaneNum = dp_lane_count;
- else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
- args.v3.ucLaneNum = 8;
- else
- args.v3.ucLaneNum = 4;
-
- if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode) && (dp_clock == 270000))
- args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ;
- args.v3.acConfig.ucDigSel = dig->dig_encoder;
- switch (bpc) {
- case 0:
- args.v3.ucBitPerColor = PANEL_BPC_UNDEFINE;
- break;
- case 6:
- args.v3.ucBitPerColor = PANEL_6BIT_PER_COLOR;
- break;
- case 8:
- default:
- args.v3.ucBitPerColor = PANEL_8BIT_PER_COLOR;
- break;
- case 10:
- args.v3.ucBitPerColor = PANEL_10BIT_PER_COLOR;
- break;
- case 12:
- args.v3.ucBitPerColor = PANEL_12BIT_PER_COLOR;
- break;
- case 16:
- args.v3.ucBitPerColor = PANEL_16BIT_PER_COLOR;
- break;
- }
- break;
- case 4:
- args.v4.ucAction = action;
- args.v4.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
- if (action == ATOM_ENCODER_CMD_SETUP_PANEL_MODE)
- args.v4.ucPanelMode = panel_mode;
- else
- args.v4.ucEncoderMode = atombios_get_encoder_mode(encoder);
-
- if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode))
- args.v4.ucLaneNum = dp_lane_count;
- else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
- args.v4.ucLaneNum = 8;
- else
- args.v4.ucLaneNum = 4;
-
- if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode)) {
- if (dp_clock == 270000)
- args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_2_70GHZ;
- else if (dp_clock == 540000)
- args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ;
- }
- args.v4.acConfig.ucDigSel = dig->dig_encoder;
- switch (bpc) {
- case 0:
- args.v4.ucBitPerColor = PANEL_BPC_UNDEFINE;
- break;
- case 6:
- args.v4.ucBitPerColor = PANEL_6BIT_PER_COLOR;
- break;
- case 8:
- default:
- args.v4.ucBitPerColor = PANEL_8BIT_PER_COLOR;
- break;
- case 10:
- args.v4.ucBitPerColor = PANEL_10BIT_PER_COLOR;
- break;
- case 12:
- args.v4.ucBitPerColor = PANEL_12BIT_PER_COLOR;
- break;
- case 16:
- args.v4.ucBitPerColor = PANEL_16BIT_PER_COLOR;
- break;
- }
- if (hpd_id == RADEON_HPD_NONE)
- args.v4.ucHPD_ID = 0;
- else
- args.v4.ucHPD_ID = hpd_id + 1;
- break;
- default:
- DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
- break;
- }
- break;
- default:
- DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
- break;
- }
-
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-
-}
-
-union dig_transmitter_control {
- DIG_TRANSMITTER_CONTROL_PS_ALLOCATION v1;
- DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2;
- DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 v3;
- DIG_TRANSMITTER_CONTROL_PARAMETERS_V4 v4;
-};
-
-void
-atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t lane_num, uint8_t lane_set)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- struct drm_connector *connector;
- union dig_transmitter_control args;
- int index = 0;
- uint8_t frev, crev;
- bool is_dp = false;
- int pll_id = 0;
- int dp_clock = 0;
- int dp_lane_count = 0;
- int connector_object_id = 0;
- int igp_lane_info = 0;
- int dig_encoder = dig->dig_encoder;
-
- if (action == ATOM_TRANSMITTER_ACTION_INIT) {
- connector = radeon_get_connector_for_encoder_init(encoder);
- /* just needed to avoid bailing in the encoder check. the encoder
- * isn't used for init
- */
- dig_encoder = 0;
- } else
- connector = radeon_get_connector_for_encoder(encoder);
-
- if (connector) {
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- struct radeon_connector_atom_dig *dig_connector =
- radeon_connector->con_priv;
-
- dp_clock = dig_connector->dp_clock;
- dp_lane_count = dig_connector->dp_lane_count;
- connector_object_id =
- (radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
- igp_lane_info = dig_connector->igp_lane_info;
- }
-
- if (encoder->crtc) {
- struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
- pll_id = radeon_crtc->pll_id;
- }
-
- /* no dig encoder assigned */
- if (dig_encoder == -1)
- return;
-
- if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)))
- is_dp = true;
-
- memset(&args, 0, sizeof(args));
-
- switch (radeon_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
- index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl);
- break;
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl);
- break;
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
- index = GetIndexIntoMasterTable(COMMAND, LVTMATransmitterControl);
- break;
- }
-
- if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
- return;
-
- switch (frev) {
- case 1:
- switch (crev) {
- case 1:
- args.v1.ucAction = action;
- if (action == ATOM_TRANSMITTER_ACTION_INIT) {
- args.v1.usInitInfo = cpu_to_le16(connector_object_id);
- } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
- args.v1.asMode.ucLaneSel = lane_num;
- args.v1.asMode.ucLaneSet = lane_set;
- } else {
- if (is_dp)
- args.v1.usPixelClock =
- cpu_to_le16(dp_clock / 10);
- else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
- args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
- else
- args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
- }
-
- args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL;
-
- if (dig_encoder)
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER;
- else
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER;
-
- if ((rdev->flags & RADEON_IS_IGP) &&
- (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY)) {
- if (is_dp ||
- !radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) {
- if (igp_lane_info & 0x1)
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3;
- else if (igp_lane_info & 0x2)
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_4_7;
- else if (igp_lane_info & 0x4)
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_11;
- else if (igp_lane_info & 0x8)
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15;
- } else {
- if (igp_lane_info & 0x3)
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7;
- else if (igp_lane_info & 0xc)
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15;
- }
- }
-
- if (dig->linkb)
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB;
- else
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA;
-
- if (is_dp)
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT;
- else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
- if (dig->coherent_mode)
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT;
- if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_8LANE_LINK;
- }
- break;
- case 2:
- args.v2.ucAction = action;
- if (action == ATOM_TRANSMITTER_ACTION_INIT) {
- args.v2.usInitInfo = cpu_to_le16(connector_object_id);
- } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
- args.v2.asMode.ucLaneSel = lane_num;
- args.v2.asMode.ucLaneSet = lane_set;
- } else {
- if (is_dp)
- args.v2.usPixelClock =
- cpu_to_le16(dp_clock / 10);
- else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
- args.v2.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
- else
- args.v2.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
- }
-
- args.v2.acConfig.ucEncoderSel = dig_encoder;
- if (dig->linkb)
- args.v2.acConfig.ucLinkSel = 1;
-
- switch (radeon_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- args.v2.acConfig.ucTransmitterSel = 0;
- break;
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
- args.v2.acConfig.ucTransmitterSel = 1;
- break;
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- args.v2.acConfig.ucTransmitterSel = 2;
- break;
- }
-
- if (is_dp) {
- args.v2.acConfig.fCoherentMode = 1;
- args.v2.acConfig.fDPConnector = 1;
- } else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
- if (dig->coherent_mode)
- args.v2.acConfig.fCoherentMode = 1;
- if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
- args.v2.acConfig.fDualLinkConnector = 1;
- }
- break;
- case 3:
- args.v3.ucAction = action;
- if (action == ATOM_TRANSMITTER_ACTION_INIT) {
- args.v3.usInitInfo = cpu_to_le16(connector_object_id);
- } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
- args.v3.asMode.ucLaneSel = lane_num;
- args.v3.asMode.ucLaneSet = lane_set;
- } else {
- if (is_dp)
- args.v3.usPixelClock =
- cpu_to_le16(dp_clock / 10);
- else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
- args.v3.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
- else
- args.v3.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
- }
-
- if (is_dp)
- args.v3.ucLaneNum = dp_lane_count;
- else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
- args.v3.ucLaneNum = 8;
- else
- args.v3.ucLaneNum = 4;
-
- if (dig->linkb)
- args.v3.acConfig.ucLinkSel = 1;
- if (dig_encoder & 1)
- args.v3.acConfig.ucEncoderSel = 1;
-
- /* Select the PLL for the PHY
- * DP PHY should be clocked from external src if there is
- * one.
- */
- /* On DCE4, if there is an external clock, it generates the DP ref clock */
- if (is_dp && rdev->clock.dp_extclk)
- args.v3.acConfig.ucRefClkSource = 2; /* external src */
- else
- args.v3.acConfig.ucRefClkSource = pll_id;
-
- switch (radeon_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- args.v3.acConfig.ucTransmitterSel = 0;
- break;
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
- args.v3.acConfig.ucTransmitterSel = 1;
- break;
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- args.v3.acConfig.ucTransmitterSel = 2;
- break;
- }
-
- if (is_dp)
- args.v3.acConfig.fCoherentMode = 1; /* DP requires coherent */
- else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
- if (dig->coherent_mode)
- args.v3.acConfig.fCoherentMode = 1;
- if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
- args.v3.acConfig.fDualLinkConnector = 1;
- }
- break;
- case 4:
- args.v4.ucAction = action;
- if (action == ATOM_TRANSMITTER_ACTION_INIT) {
- args.v4.usInitInfo = cpu_to_le16(connector_object_id);
- } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
- args.v4.asMode.ucLaneSel = lane_num;
- args.v4.asMode.ucLaneSet = lane_set;
- } else {
- if (is_dp)
- args.v4.usPixelClock =
- cpu_to_le16(dp_clock / 10);
- else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
- args.v4.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
- else
- args.v4.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
- }
-
- if (is_dp)
- args.v4.ucLaneNum = dp_lane_count;
- else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
- args.v4.ucLaneNum = 8;
- else
- args.v4.ucLaneNum = 4;
-
- if (dig->linkb)
- args.v4.acConfig.ucLinkSel = 1;
- if (dig_encoder & 1)
- args.v4.acConfig.ucEncoderSel = 1;
-
- /* Select the PLL for the PHY
- * DP PHY should be clocked from external src if there is
- * one.
- */
- /* On DCE5 DCPLL usually generates the DP ref clock */
- if (is_dp) {
- if (rdev->clock.dp_extclk)
- args.v4.acConfig.ucRefClkSource = ENCODER_REFCLK_SRC_EXTCLK;
- else
- args.v4.acConfig.ucRefClkSource = ENCODER_REFCLK_SRC_DCPLL;
- } else
- args.v4.acConfig.ucRefClkSource = pll_id;
-
- switch (radeon_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- args.v4.acConfig.ucTransmitterSel = 0;
- break;
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
- args.v4.acConfig.ucTransmitterSel = 1;
- break;
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- args.v4.acConfig.ucTransmitterSel = 2;
- break;
- }
-
- if (is_dp)
- args.v4.acConfig.fCoherentMode = 1; /* DP requires coherent */
- else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
- if (dig->coherent_mode)
- args.v4.acConfig.fCoherentMode = 1;
- if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
- args.v4.acConfig.fDualLinkConnector = 1;
- }
- break;
- default:
- DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
- break;
- }
- break;
- default:
- DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
- break;
- }
-
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-}
-
-bool
-atombios_set_edp_panel_power(struct drm_connector *connector, int action)
-{
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- struct drm_device *dev = radeon_connector->base.dev;
- struct radeon_device *rdev = dev->dev_private;
- union dig_transmitter_control args;
- int index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl);
- uint8_t frev, crev;
-
- if (connector->connector_type != DRM_MODE_CONNECTOR_eDP)
- goto done;
-
- if (!ASIC_IS_DCE4(rdev))
- goto done;
-
- if ((action != ATOM_TRANSMITTER_ACTION_POWER_ON) &&
- (action != ATOM_TRANSMITTER_ACTION_POWER_OFF))
- goto done;
-
- if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
- goto done;
-
- memset(&args, 0, sizeof(args));
-
- args.v1.ucAction = action;
-
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-
- /* wait for the panel to power up */
- if (action == ATOM_TRANSMITTER_ACTION_POWER_ON) {
- int i;
-
- for (i = 0; i < 300; i++) {
- if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd))
- return true;
- mdelay(1);
- }
- return false;
- }
-done:
- return true;
-}
-
-union external_encoder_control {
- EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION v1;
- EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3 v3;
-};
-
-static void
-atombios_external_encoder_setup(struct drm_encoder *encoder,
- struct drm_encoder *ext_encoder,
- int action)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct radeon_encoder *ext_radeon_encoder = to_radeon_encoder(ext_encoder);
- union external_encoder_control args;
- struct drm_connector *connector;
- int index = GetIndexIntoMasterTable(COMMAND, ExternalEncoderControl);
- u8 frev, crev;
- int dp_clock = 0;
- int dp_lane_count = 0;
- int connector_object_id = 0;
- u32 ext_enum = (ext_radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
- int bpc = 8;
-
- if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT)
- connector = radeon_get_connector_for_encoder_init(encoder);
- else
- connector = radeon_get_connector_for_encoder(encoder);
-
- if (connector) {
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- struct radeon_connector_atom_dig *dig_connector =
- radeon_connector->con_priv;
-
- dp_clock = dig_connector->dp_clock;
- dp_lane_count = dig_connector->dp_lane_count;
- connector_object_id =
- (radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
- bpc = connector->display_info.bpc;
- }
-
- memset(&args, 0, sizeof(args));
-
- if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
- return;
-
- switch (frev) {
- case 1:
- /* no params on frev 1 */
- break;
- case 2:
- switch (crev) {
- case 1:
- case 2:
- args.v1.sDigEncoder.ucAction = action;
- args.v1.sDigEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
- args.v1.sDigEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder);
-
- if (ENCODER_MODE_IS_DP(args.v1.sDigEncoder.ucEncoderMode)) {
- if (dp_clock == 270000)
- args.v1.sDigEncoder.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
- args.v1.sDigEncoder.ucLaneNum = dp_lane_count;
- } else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
- args.v1.sDigEncoder.ucLaneNum = 8;
- else
- args.v1.sDigEncoder.ucLaneNum = 4;
- break;
- case 3:
- args.v3.sExtEncoder.ucAction = action;
- if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT)
- args.v3.sExtEncoder.usConnectorId = cpu_to_le16(connector_object_id);
- else
- args.v3.sExtEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
- args.v3.sExtEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder);
-
- if (ENCODER_MODE_IS_DP(args.v3.sExtEncoder.ucEncoderMode)) {
- if (dp_clock == 270000)
- args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ;
- else if (dp_clock == 540000)
- args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_5_40GHZ;
- args.v3.sExtEncoder.ucLaneNum = dp_lane_count;
- } else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
- args.v3.sExtEncoder.ucLaneNum = 8;
- else
- args.v3.sExtEncoder.ucLaneNum = 4;
- switch (ext_enum) {
- case GRAPH_OBJECT_ENUM_ID1:
- args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER1;
- break;
- case GRAPH_OBJECT_ENUM_ID2:
- args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER2;
- break;
- case GRAPH_OBJECT_ENUM_ID3:
- args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER3;
- break;
- }
- switch (bpc) {
- case 0:
- args.v3.sExtEncoder.ucBitPerColor = PANEL_BPC_UNDEFINE;
- break;
- case 6:
- args.v3.sExtEncoder.ucBitPerColor = PANEL_6BIT_PER_COLOR;
- break;
- case 8:
- default:
- args.v3.sExtEncoder.ucBitPerColor = PANEL_8BIT_PER_COLOR;
- break;
- case 10:
- args.v3.sExtEncoder.ucBitPerColor = PANEL_10BIT_PER_COLOR;
- break;
- case 12:
- args.v3.sExtEncoder.ucBitPerColor = PANEL_12BIT_PER_COLOR;
- break;
- case 16:
- args.v3.sExtEncoder.ucBitPerColor = PANEL_16BIT_PER_COLOR;
- break;
- }
- break;
- default:
- DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
- return;
- }
- break;
- default:
- DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
- return;
- }
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-}
-
-static void
-atombios_yuv_setup(struct drm_encoder *encoder, bool enable)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
- ENABLE_YUV_PS_ALLOCATION args;
- int index = GetIndexIntoMasterTable(COMMAND, EnableYUV);
- uint32_t temp, reg;
-
- memset(&args, 0, sizeof(args));
-
- if (rdev->family >= CHIP_R600)
- reg = R600_BIOS_3_SCRATCH;
- else
- reg = RADEON_BIOS_3_SCRATCH;
-
- /* XXX: fix up scratch reg handling */
- temp = RREG32(reg);
- if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
- WREG32(reg, (ATOM_S3_TV1_ACTIVE |
- (radeon_crtc->crtc_id << 18)));
- else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
- WREG32(reg, (ATOM_S3_CV_ACTIVE | (radeon_crtc->crtc_id << 24)));
- else
- WREG32(reg, 0);
-
- if (enable)
- args.ucEnable = ATOM_ENABLE;
- args.ucCRTC = radeon_crtc->crtc_id;
-
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-
- WREG32(reg, temp);
-}
-
-static void
-radeon_atom_encoder_dpms_avivo(struct drm_encoder *encoder, int mode)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION args;
- int index = 0;
-
- memset(&args, 0, sizeof(args));
-
- switch (radeon_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
- index = GetIndexIntoMasterTable(COMMAND, TMDSAOutputControl);
- break;
- case ENCODER_OBJECT_ID_INTERNAL_DVO1:
- case ENCODER_OBJECT_ID_INTERNAL_DDI:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
- index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl);
- break;
- case ENCODER_OBJECT_ID_INTERNAL_LVDS:
- index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl);
- break;
- case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
- if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
- index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl);
- else
- index = GetIndexIntoMasterTable(COMMAND, LVTMAOutputControl);
- break;
- case ENCODER_OBJECT_ID_INTERNAL_DAC1:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
- if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
- index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl);
- else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
- index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl);
- else
- index = GetIndexIntoMasterTable(COMMAND, DAC1OutputControl);
- break;
- case ENCODER_OBJECT_ID_INTERNAL_DAC2:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
- if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
- index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl);
- else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
- index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl);
- else
- index = GetIndexIntoMasterTable(COMMAND, DAC2OutputControl);
- break;
- default:
- return;
- }
-
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- args.ucAction = ATOM_ENABLE;
- /* workaround for DVOOutputControl on some RS690 systems */
- if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DDI) {
- u32 reg = RREG32(RADEON_BIOS_3_SCRATCH);
- WREG32(RADEON_BIOS_3_SCRATCH, reg & ~ATOM_S3_DFP2I_ACTIVE);
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
- WREG32(RADEON_BIOS_3_SCRATCH, reg);
- } else
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
- if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
- args.ucAction = ATOM_LCD_BLON;
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
- }
- break;
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- case DRM_MODE_DPMS_OFF:
- args.ucAction = ATOM_DISABLE;
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
- if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
- args.ucAction = ATOM_LCD_BLOFF;
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
- }
- break;
- }
-}
-
-static void
-radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
- struct radeon_connector *radeon_connector = NULL;
- struct radeon_connector_atom_dig *radeon_dig_connector = NULL;
-
- if (connector) {
- radeon_connector = to_radeon_connector(connector);
- radeon_dig_connector = radeon_connector->con_priv;
- }
-
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- /* some early dce3.2 boards have a bug in their transmitter control table */
- if ((rdev->family == CHIP_RV710) || (rdev->family == CHIP_RV730) ||
- ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev))
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
- else
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
- if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
- if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
- atombios_set_edp_panel_power(connector,
- ATOM_TRANSMITTER_ACTION_POWER_ON);
- radeon_dig_connector->edp_on = true;
- }
- radeon_dp_link_train(encoder, connector);
- if (ASIC_IS_DCE4(rdev))
- atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0);
- }
- if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0);
- break;
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- case DRM_MODE_DPMS_OFF:
- if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev))
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
- else
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
- if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
- if (ASIC_IS_DCE4(rdev))
- atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0);
- if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
- atombios_set_edp_panel_power(connector,
- ATOM_TRANSMITTER_ACTION_POWER_OFF);
- radeon_dig_connector->edp_on = false;
- }
- }
- if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
- break;
- }
-}
-
-static void
-radeon_atom_encoder_dpms_ext(struct drm_encoder *encoder,
- struct drm_encoder *ext_encoder,
- int mode)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
-
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- default:
- if (ASIC_IS_DCE41(rdev)) {
- atombios_external_encoder_setup(encoder, ext_encoder,
- EXTERNAL_ENCODER_ACTION_V3_ENABLE_OUTPUT);
- atombios_external_encoder_setup(encoder, ext_encoder,
- EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING_OFF);
- } else
- atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
- break;
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- case DRM_MODE_DPMS_OFF:
- if (ASIC_IS_DCE41(rdev)) {
- atombios_external_encoder_setup(encoder, ext_encoder,
- EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING);
- atombios_external_encoder_setup(encoder, ext_encoder,
- EXTERNAL_ENCODER_ACTION_V3_DISABLE_OUTPUT);
- } else
- atombios_external_encoder_setup(encoder, ext_encoder, ATOM_DISABLE);
- break;
- }
-}
-
-static void
-radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder);
-
- DRM_DEBUG_KMS("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n",
- radeon_encoder->encoder_id, mode, radeon_encoder->devices,
- radeon_encoder->active_device);
- switch (radeon_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
- case ENCODER_OBJECT_ID_INTERNAL_LVDS:
- case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
- case ENCODER_OBJECT_ID_INTERNAL_DVO1:
- case ENCODER_OBJECT_ID_INTERNAL_DDI:
- case ENCODER_OBJECT_ID_INTERNAL_DAC2:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
- radeon_atom_encoder_dpms_avivo(encoder, mode);
- break;
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
- radeon_atom_encoder_dpms_dig(encoder, mode);
- break;
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
- if (ASIC_IS_DCE5(rdev)) {
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- atombios_dvo_setup(encoder, ATOM_ENABLE);
- break;
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- case DRM_MODE_DPMS_OFF:
- atombios_dvo_setup(encoder, ATOM_DISABLE);
- break;
- }
- } else if (ASIC_IS_DCE3(rdev))
- radeon_atom_encoder_dpms_dig(encoder, mode);
- else
- radeon_atom_encoder_dpms_avivo(encoder, mode);
- break;
- case ENCODER_OBJECT_ID_INTERNAL_DAC1:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
- if (ASIC_IS_DCE5(rdev)) {
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- atombios_dac_setup(encoder, ATOM_ENABLE);
- break;
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- case DRM_MODE_DPMS_OFF:
- atombios_dac_setup(encoder, ATOM_DISABLE);
- break;
- }
- } else
- radeon_atom_encoder_dpms_avivo(encoder, mode);
- break;
- default:
- return;
- }
-
- if (ext_encoder)
- radeon_atom_encoder_dpms_ext(encoder, ext_encoder, mode);
-
- radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
-
-}
-
-union crtc_source_param {
- SELECT_CRTC_SOURCE_PS_ALLOCATION v1;
- SELECT_CRTC_SOURCE_PARAMETERS_V2 v2;
-};
-
-static void
-atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
- union crtc_source_param args;
- int index = GetIndexIntoMasterTable(COMMAND, SelectCRTC_Source);
- uint8_t frev, crev;
- struct radeon_encoder_atom_dig *dig;
-
- memset(&args, 0, sizeof(args));
-
- if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
- return;
-
- switch (frev) {
- case 1:
- switch (crev) {
- case 1:
- default:
- if (ASIC_IS_AVIVO(rdev))
- args.v1.ucCRTC = radeon_crtc->crtc_id;
- else {
- if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DAC1) {
- args.v1.ucCRTC = radeon_crtc->crtc_id;
- } else {
- args.v1.ucCRTC = radeon_crtc->crtc_id << 2;
- }
- }
- switch (radeon_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
- args.v1.ucDevice = ATOM_DEVICE_DFP1_INDEX;
- break;
- case ENCODER_OBJECT_ID_INTERNAL_LVDS:
- case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
- if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT)
- args.v1.ucDevice = ATOM_DEVICE_LCD1_INDEX;
- else
- args.v1.ucDevice = ATOM_DEVICE_DFP3_INDEX;
- break;
- case ENCODER_OBJECT_ID_INTERNAL_DVO1:
- case ENCODER_OBJECT_ID_INTERNAL_DDI:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
- args.v1.ucDevice = ATOM_DEVICE_DFP2_INDEX;
- break;
- case ENCODER_OBJECT_ID_INTERNAL_DAC1:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
- if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
- args.v1.ucDevice = ATOM_DEVICE_TV1_INDEX;
- else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
- args.v1.ucDevice = ATOM_DEVICE_CV_INDEX;
- else
- args.v1.ucDevice = ATOM_DEVICE_CRT1_INDEX;
- break;
- case ENCODER_OBJECT_ID_INTERNAL_DAC2:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
- if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
- args.v1.ucDevice = ATOM_DEVICE_TV1_INDEX;
- else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
- args.v1.ucDevice = ATOM_DEVICE_CV_INDEX;
- else
- args.v1.ucDevice = ATOM_DEVICE_CRT2_INDEX;
- break;
- }
- break;
- case 2:
- args.v2.ucCRTC = radeon_crtc->crtc_id;
- if (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE) {
- struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
-
- if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
- args.v2.ucEncodeMode = ATOM_ENCODER_MODE_LVDS;
- else if (connector->connector_type == DRM_MODE_CONNECTOR_VGA)
- args.v2.ucEncodeMode = ATOM_ENCODER_MODE_CRT;
- else
- args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder);
- } else
- args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder);
- switch (radeon_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
- dig = radeon_encoder->enc_priv;
- switch (dig->dig_encoder) {
- case 0:
- args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
- break;
- case 1:
- args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
- break;
- case 2:
- args.v2.ucEncoderID = ASIC_INT_DIG3_ENCODER_ID;
- break;
- case 3:
- args.v2.ucEncoderID = ASIC_INT_DIG4_ENCODER_ID;
- break;
- case 4:
- args.v2.ucEncoderID = ASIC_INT_DIG5_ENCODER_ID;
- break;
- case 5:
- args.v2.ucEncoderID = ASIC_INT_DIG6_ENCODER_ID;
- break;
- }
- break;
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
- args.v2.ucEncoderID = ASIC_INT_DVO_ENCODER_ID;
- break;
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
- if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
- args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
- else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
- args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
- else
- args.v2.ucEncoderID = ASIC_INT_DAC1_ENCODER_ID;
- break;
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
- if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
- args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
- else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
- args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
- else
- args.v2.ucEncoderID = ASIC_INT_DAC2_ENCODER_ID;
- break;
- }
- break;
- }
- break;
- default:
- DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
- return;
- }
-
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-
- /* update scratch regs with new routing */
- radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
-}
-
-static void
-atombios_apply_encoder_quirks(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
-
- /* Funky macbooks */
- if ((dev->pdev->device == 0x71C5) &&
- (dev->pdev->subsystem_vendor == 0x106b) &&
- (dev->pdev->subsystem_device == 0x0080)) {
- if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) {
- uint32_t lvtma_bit_depth_control = RREG32(AVIVO_LVTMA_BIT_DEPTH_CONTROL);
-
- lvtma_bit_depth_control &= ~AVIVO_LVTMA_BIT_DEPTH_CONTROL_TRUNCATE_EN;
- lvtma_bit_depth_control &= ~AVIVO_LVTMA_BIT_DEPTH_CONTROL_SPATIAL_DITHER_EN;
-
- WREG32(AVIVO_LVTMA_BIT_DEPTH_CONTROL, lvtma_bit_depth_control);
- }
- }
-
- /* set scaler clears this on some chips */
- if (ASIC_IS_AVIVO(rdev) &&
- (!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)))) {
- if (ASIC_IS_DCE4(rdev)) {
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset,
- EVERGREEN_INTERLEAVE_EN);
- else
- WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
- } else {
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset,
- AVIVO_D1MODE_INTERLEAVE_EN);
- else
- WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
- }
- }
-}
-
-static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct drm_encoder *test_encoder;
- struct radeon_encoder_atom_dig *dig;
- uint32_t dig_enc_in_use = 0;
-
- /* DCE4/5 */
- if (ASIC_IS_DCE4(rdev)) {
- dig = radeon_encoder->enc_priv;
- if (ASIC_IS_DCE41(rdev)) {
- /* ontario follows DCE4 */
- if (rdev->family == CHIP_PALM) {
- if (dig->linkb)
- return 1;
- else
- return 0;
- } else
- /* llano follows DCE3.2 */
- return radeon_crtc->crtc_id;
- } else {
- switch (radeon_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- if (dig->linkb)
- return 1;
- else
- return 0;
- break;
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
- if (dig->linkb)
- return 3;
- else
- return 2;
- break;
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- if (dig->linkb)
- return 5;
- else
- return 4;
- break;
- }
- }
- }
-
- /* on DCE32 and encoder can driver any block so just crtc id */
- if (ASIC_IS_DCE32(rdev)) {
- return radeon_crtc->crtc_id;
- }
-
- /* on DCE3 - LVTMA can only be driven by DIGB */
- list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) {
- struct radeon_encoder *radeon_test_encoder;
-
- if (encoder == test_encoder)
- continue;
-
- if (!radeon_encoder_is_digital(test_encoder))
- continue;
-
- radeon_test_encoder = to_radeon_encoder(test_encoder);
- dig = radeon_test_encoder->enc_priv;
-
- if (dig->dig_encoder >= 0)
- dig_enc_in_use |= (1 << dig->dig_encoder);
- }
-
- if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA) {
- if (dig_enc_in_use & 0x2)
- DRM_ERROR("LVDS required digital encoder 2 but it was in use - stealing\n");
- return 1;
- }
- if (!(dig_enc_in_use & 1))
- return 0;
- return 1;
-}
-
-/* This only needs to be called once at startup */
-void
-radeon_atom_encoder_init(struct radeon_device *rdev)
-{
- struct drm_device *dev = rdev->ddev;
- struct drm_encoder *encoder;
-
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder);
-
- switch (radeon_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0);
- break;
- default:
- break;
- }
-
- if (ext_encoder && ASIC_IS_DCE41(rdev))
- atombios_external_encoder_setup(encoder, ext_encoder,
- EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT);
- }
-}
-
-static void
-radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder);
-
- radeon_encoder->pixel_clock = adjusted_mode->clock;
-
- if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE4(rdev)) {
- if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT))
- atombios_yuv_setup(encoder, true);
- else
- atombios_yuv_setup(encoder, false);
- }
-
- switch (radeon_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
- case ENCODER_OBJECT_ID_INTERNAL_LVDS:
- case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
- atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_ENABLE);
- break;
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
- if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
- struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
- struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
-
- if (!connector)
- dig->panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
- else
- dig->panel_mode = radeon_dp_get_panel_mode(encoder, connector);
-
- /* setup and enable the encoder */
- atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
- atombios_dig_encoder_setup(encoder,
- ATOM_ENCODER_CMD_SETUP_PANEL_MODE,
- dig->panel_mode);
- } else if (ASIC_IS_DCE4(rdev)) {
- /* disable the transmitter */
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
- /* setup and enable the encoder */
- atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
-
- /* enable the transmitter */
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
- } else {
- /* disable the encoder and transmitter */
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
- atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0);
-
- /* setup and enable the encoder and transmitter */
- atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0);
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
- }
- break;
- case ENCODER_OBJECT_ID_INTERNAL_DDI:
- case ENCODER_OBJECT_ID_INTERNAL_DVO1:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
- atombios_dvo_setup(encoder, ATOM_ENABLE);
- break;
- case ENCODER_OBJECT_ID_INTERNAL_DAC1:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
- case ENCODER_OBJECT_ID_INTERNAL_DAC2:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
- atombios_dac_setup(encoder, ATOM_ENABLE);
- if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) {
- if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
- atombios_tv_setup(encoder, ATOM_ENABLE);
- else
- atombios_tv_setup(encoder, ATOM_DISABLE);
- }
- break;
- }
-
- if (ext_encoder) {
- if (ASIC_IS_DCE41(rdev))
- atombios_external_encoder_setup(encoder, ext_encoder,
- EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP);
- else
- atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
- }
-
- atombios_apply_encoder_quirks(encoder, adjusted_mode);
-
- if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
- r600_hdmi_enable(encoder);
- r600_hdmi_setmode(encoder, adjusted_mode);
- }
-}
-
-static bool
-atombios_dac_load_detect(struct drm_encoder *encoder, struct drm_connector *connector)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
-
- if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT |
- ATOM_DEVICE_CV_SUPPORT |
- ATOM_DEVICE_CRT_SUPPORT)) {
- DAC_LOAD_DETECTION_PS_ALLOCATION args;
- int index = GetIndexIntoMasterTable(COMMAND, DAC_LoadDetection);
- uint8_t frev, crev;
-
- memset(&args, 0, sizeof(args));
-
- if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
- return false;
-
- args.sDacload.ucMisc = 0;
-
- if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DAC1) ||
- (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1))
- args.sDacload.ucDacType = ATOM_DAC_A;
- else
- args.sDacload.ucDacType = ATOM_DAC_B;
-
- if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT)
- args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CRT1_SUPPORT);
- else if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT)
- args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CRT2_SUPPORT);
- else if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) {
- args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CV_SUPPORT);
- if (crev >= 3)
- args.sDacload.ucMisc = DAC_LOAD_MISC_YPrPb;
- } else if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) {
- args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_TV1_SUPPORT);
- if (crev >= 3)
- args.sDacload.ucMisc = DAC_LOAD_MISC_YPrPb;
- }
-
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-
- return true;
- } else
- return false;
-}
-
-static enum drm_connector_status
-radeon_atom_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- uint32_t bios_0_scratch;
-
- if (!atombios_dac_load_detect(encoder, connector)) {
- DRM_DEBUG_KMS("detect returned false \n");
- return connector_status_unknown;
- }
-
- if (rdev->family >= CHIP_R600)
- bios_0_scratch = RREG32(R600_BIOS_0_SCRATCH);
- else
- bios_0_scratch = RREG32(RADEON_BIOS_0_SCRATCH);
-
- DRM_DEBUG_KMS("Bios 0 scratch %x %08x\n", bios_0_scratch, radeon_encoder->devices);
- if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT) {
- if (bios_0_scratch & ATOM_S0_CRT1_MASK)
- return connector_status_connected;
- }
- if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT) {
- if (bios_0_scratch & ATOM_S0_CRT2_MASK)
- return connector_status_connected;
- }
- if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) {
- if (bios_0_scratch & (ATOM_S0_CV_MASK|ATOM_S0_CV_MASK_A))
- return connector_status_connected;
- }
- if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) {
- if (bios_0_scratch & (ATOM_S0_TV1_COMPOSITE | ATOM_S0_TV1_COMPOSITE_A))
- return connector_status_connected; /* CTV */
- else if (bios_0_scratch & (ATOM_S0_TV1_SVIDEO | ATOM_S0_TV1_SVIDEO_A))
- return connector_status_connected; /* STV */
- }
- return connector_status_disconnected;
-}
-
-static enum drm_connector_status
-radeon_atom_dig_detect(struct drm_encoder *encoder, struct drm_connector *connector)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder);
- u32 bios_0_scratch;
-
- if (!ASIC_IS_DCE4(rdev))
- return connector_status_unknown;
-
- if (!ext_encoder)
- return connector_status_unknown;
-
- if ((radeon_connector->devices & ATOM_DEVICE_CRT_SUPPORT) == 0)
- return connector_status_unknown;
-
- /* load detect on the dp bridge */
- atombios_external_encoder_setup(encoder, ext_encoder,
- EXTERNAL_ENCODER_ACTION_V3_DACLOAD_DETECTION);
-
- bios_0_scratch = RREG32(R600_BIOS_0_SCRATCH);
-
- DRM_DEBUG_KMS("Bios 0 scratch %x %08x\n", bios_0_scratch, radeon_encoder->devices);
- if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT) {
- if (bios_0_scratch & ATOM_S0_CRT1_MASK)
- return connector_status_connected;
- }
- if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT) {
- if (bios_0_scratch & ATOM_S0_CRT2_MASK)
- return connector_status_connected;
- }
- if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) {
- if (bios_0_scratch & (ATOM_S0_CV_MASK|ATOM_S0_CV_MASK_A))
- return connector_status_connected;
- }
- if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) {
- if (bios_0_scratch & (ATOM_S0_TV1_COMPOSITE | ATOM_S0_TV1_COMPOSITE_A))
- return connector_status_connected; /* CTV */
- else if (bios_0_scratch & (ATOM_S0_TV1_SVIDEO | ATOM_S0_TV1_SVIDEO_A))
- return connector_status_connected; /* STV */
- }
- return connector_status_disconnected;
-}
-
-void
-radeon_atom_ext_encoder_setup_ddc(struct drm_encoder *encoder)
-{
- struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder);
-
- if (ext_encoder)
- /* ddc_setup on the dp bridge */
- atombios_external_encoder_setup(encoder, ext_encoder,
- EXTERNAL_ENCODER_ACTION_V3_DDC_SETUP);
-
-}
-
-static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
-{
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
-
- if ((radeon_encoder->active_device &
- (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
- (radeon_encoder_get_dp_bridge_encoder_id(encoder) !=
- ENCODER_OBJECT_ID_NONE)) {
- struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- if (dig)
- dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder);
- }
-
- radeon_atom_output_lock(encoder, true);
- radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
-
- if (connector) {
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
-
- /* select the clock/data port if it uses a router */
- if (radeon_connector->router.cd_valid)
- radeon_router_select_cd_port(radeon_connector);
-
- /* turn eDP panel on for mode set */
- if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
- atombios_set_edp_panel_power(connector,
- ATOM_TRANSMITTER_ACTION_POWER_ON);
- }
-
- /* this is needed for the pll/ss setup to work correctly in some cases */
- atombios_set_encoder_crtc_source(encoder);
-}
-
-static void radeon_atom_encoder_commit(struct drm_encoder *encoder)
-{
- radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
- radeon_atom_output_lock(encoder, false);
-}
-
-static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct radeon_encoder_atom_dig *dig;
-
- /* check for pre-DCE3 cards with shared encoders;
- * can't really use the links individually, so don't disable
- * the encoder if it's in use by another connector
- */
- if (!ASIC_IS_DCE3(rdev)) {
- struct drm_encoder *other_encoder;
- struct radeon_encoder *other_radeon_encoder;
-
- list_for_each_entry(other_encoder, &dev->mode_config.encoder_list, head) {
- other_radeon_encoder = to_radeon_encoder(other_encoder);
- if ((radeon_encoder->encoder_id == other_radeon_encoder->encoder_id) &&
- drm_helper_encoder_in_use(other_encoder))
- goto disable_done;
- }
- }
-
- radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
-
- switch (radeon_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
- case ENCODER_OBJECT_ID_INTERNAL_LVDS:
- case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
- atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_DISABLE);
- break;
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
- if (ASIC_IS_DCE4(rdev))
- /* disable the transmitter */
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
- else {
- /* disable the encoder and transmitter */
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
- atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0);
- }
- break;
- case ENCODER_OBJECT_ID_INTERNAL_DDI:
- case ENCODER_OBJECT_ID_INTERNAL_DVO1:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
- atombios_dvo_setup(encoder, ATOM_DISABLE);
- break;
- case ENCODER_OBJECT_ID_INTERNAL_DAC1:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
- case ENCODER_OBJECT_ID_INTERNAL_DAC2:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
- atombios_dac_setup(encoder, ATOM_DISABLE);
- if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
- atombios_tv_setup(encoder, ATOM_DISABLE);
- break;
- }
-
-disable_done:
- if (radeon_encoder_is_digital(encoder)) {
- if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
- r600_hdmi_disable(encoder);
- dig = radeon_encoder->enc_priv;
- dig->dig_encoder = -1;
- }
- radeon_encoder->active_device = 0;
-}
-
-/* these are handled by the primary encoders */
-static void radeon_atom_ext_prepare(struct drm_encoder *encoder)
-{
-
-}
-
-static void radeon_atom_ext_commit(struct drm_encoder *encoder)
-{
-
-}
-
-static void
-radeon_atom_ext_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
-
-}
-
-static void radeon_atom_ext_disable(struct drm_encoder *encoder)
-{
-
-}
-
-static void
-radeon_atom_ext_dpms(struct drm_encoder *encoder, int mode)
-{
-
-}
-
-static bool radeon_atom_ext_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
-static const struct drm_encoder_helper_funcs radeon_atom_ext_helper_funcs = {
- .dpms = radeon_atom_ext_dpms,
- .mode_fixup = radeon_atom_ext_mode_fixup,
- .prepare = radeon_atom_ext_prepare,
- .mode_set = radeon_atom_ext_mode_set,
- .commit = radeon_atom_ext_commit,
- .disable = radeon_atom_ext_disable,
- /* no detect for TMDS/LVDS yet */
-};
-
-static const struct drm_encoder_helper_funcs radeon_atom_dig_helper_funcs = {
- .dpms = radeon_atom_encoder_dpms,
- .mode_fixup = radeon_atom_mode_fixup,
- .prepare = radeon_atom_encoder_prepare,
- .mode_set = radeon_atom_encoder_mode_set,
- .commit = radeon_atom_encoder_commit,
- .disable = radeon_atom_encoder_disable,
- .detect = radeon_atom_dig_detect,
-};
-
-static const struct drm_encoder_helper_funcs radeon_atom_dac_helper_funcs = {
- .dpms = radeon_atom_encoder_dpms,
- .mode_fixup = radeon_atom_mode_fixup,
- .prepare = radeon_atom_encoder_prepare,
- .mode_set = radeon_atom_encoder_mode_set,
- .commit = radeon_atom_encoder_commit,
- .detect = radeon_atom_dac_detect,
-};
-
-void radeon_enc_destroy(struct drm_encoder *encoder)
-{
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- kfree(radeon_encoder->enc_priv);
- drm_encoder_cleanup(encoder);
- kfree(radeon_encoder);
-}
-
-static const struct drm_encoder_funcs radeon_atom_enc_funcs = {
- .destroy = radeon_enc_destroy,
-};
-
-struct radeon_encoder_atom_dac *
-radeon_atombios_set_dac_info(struct radeon_encoder *radeon_encoder)
-{
- struct drm_device *dev = radeon_encoder->base.dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder_atom_dac *dac = kzalloc(sizeof(struct radeon_encoder_atom_dac), GFP_KERNEL);
-
- if (!dac)
- return NULL;
-
- dac->tv_std = radeon_atombios_get_tv_info(rdev);
- return dac;
-}
-
-struct radeon_encoder_atom_dig *
-radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder)
-{
- int encoder_enum = (radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
- struct radeon_encoder_atom_dig *dig = kzalloc(sizeof(struct radeon_encoder_atom_dig), GFP_KERNEL);
-
- if (!dig)
- return NULL;
-
- /* coherent mode by default */
- dig->coherent_mode = true;
- dig->dig_encoder = -1;
-
- if (encoder_enum == 2)
- dig->linkb = true;
- else
- dig->linkb = false;
-
- return dig;
-}
-
-void
-radeon_add_atom_encoder(struct drm_device *dev,
- uint32_t encoder_enum,
- uint32_t supported_device,
- u16 caps)
-{
- struct radeon_device *rdev = dev->dev_private;
- struct drm_encoder *encoder;
- struct radeon_encoder *radeon_encoder;
-
- /* see if we already added it */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- radeon_encoder = to_radeon_encoder(encoder);
- if (radeon_encoder->encoder_enum == encoder_enum) {
- radeon_encoder->devices |= supported_device;
- return;
- }
-
- }
-
- /* add a new one */
- radeon_encoder = kzalloc(sizeof(struct radeon_encoder), GFP_KERNEL);
- if (!radeon_encoder)
- return;
-
- encoder = &radeon_encoder->base;
- switch (rdev->num_crtc) {
- case 1:
- encoder->possible_crtcs = 0x1;
- break;
- case 2:
- default:
- encoder->possible_crtcs = 0x3;
- break;
- case 4:
- encoder->possible_crtcs = 0xf;
- break;
- case 6:
- encoder->possible_crtcs = 0x3f;
- break;
- }
-
- radeon_encoder->enc_priv = NULL;
-
- radeon_encoder->encoder_enum = encoder_enum;
- radeon_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
- radeon_encoder->devices = supported_device;
- radeon_encoder->rmx_type = RMX_OFF;
- radeon_encoder->underscan_type = UNDERSCAN_OFF;
- radeon_encoder->is_ext_encoder = false;
- radeon_encoder->caps = caps;
-
- switch (radeon_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_LVDS:
- case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
- case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
- if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
- radeon_encoder->rmx_type = RMX_FULL;
- drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS);
- radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder);
- } else {
- drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS);
- radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
- }
- drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs);
- break;
- case ENCODER_OBJECT_ID_INTERNAL_DAC1:
- drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC);
- radeon_encoder->enc_priv = radeon_atombios_set_dac_info(radeon_encoder);
- drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs);
- break;
- case ENCODER_OBJECT_ID_INTERNAL_DAC2:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
- drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TVDAC);
- radeon_encoder->enc_priv = radeon_atombios_set_dac_info(radeon_encoder);
- drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs);
- break;
- case ENCODER_OBJECT_ID_INTERNAL_DVO1:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
- case ENCODER_OBJECT_ID_INTERNAL_DDI:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
- radeon_encoder->rmx_type = RMX_FULL;
- drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS);
- radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder);
- } else if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
- drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC);
- radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
- } else {
- drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS);
- radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
- }
- drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs);
- break;
- case ENCODER_OBJECT_ID_SI170B:
- case ENCODER_OBJECT_ID_CH7303:
- case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
- case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
- case ENCODER_OBJECT_ID_TITFP513:
- case ENCODER_OBJECT_ID_VT1623:
- case ENCODER_OBJECT_ID_HDMI_SI1930:
- case ENCODER_OBJECT_ID_TRAVIS:
- case ENCODER_OBJECT_ID_NUTMEG:
- /* these are handled by the primary encoders */
- radeon_encoder->is_ext_encoder = true;
- if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
- drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS);
- else if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
- drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC);
- else
- drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS);
- drm_encoder_helper_add(encoder, &radeon_atom_ext_helper_funcs);
- break;
- }
-}
diff --git a/drivers/gpu/drm/radeon/atombios_i2c.c b/drivers/gpu/drm/radeon/atombios_i2c.c
deleted file mode 100644
index 44d87b6..0000000
--- a/drivers/gpu/drm/radeon/atombios_i2c.c
+++ /dev/null
@@ -1,139 +0,0 @@
-/*
- * Copyright 2011 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Alex Deucher
- *
- */
-#include "drmP.h"
-#include "radeon_drm.h"
-#include "radeon.h"
-#include "atom.h"
-
-#define TARGET_HW_I2C_CLOCK 50
-
-/* these are a limitation of ProcessI2cChannelTransaction not the hw */
-#define ATOM_MAX_HW_I2C_WRITE 2
-#define ATOM_MAX_HW_I2C_READ 255
-
-static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
- u8 slave_addr, u8 flags,
- u8 *buf, u8 num)
-{
- struct drm_device *dev = chan->dev;
- struct radeon_device *rdev = dev->dev_private;
- PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION args;
- int index = GetIndexIntoMasterTable(COMMAND, ProcessI2cChannelTransaction);
- unsigned char *base;
- u16 out;
-
- memset(&args, 0, sizeof(args));
-
- base = (unsigned char *)rdev->mode_info.atom_context->scratch;
-
- if (flags & HW_I2C_WRITE) {
- if (num > ATOM_MAX_HW_I2C_WRITE) {
- DRM_ERROR("hw i2c: tried to write too many bytes (%d vs 2)\n", num);
- return -EINVAL;
- }
- memcpy(&out, buf, num);
- args.lpI2CDataOut = cpu_to_le16(out);
- } else {
- if (num > ATOM_MAX_HW_I2C_READ) {
- DRM_ERROR("hw i2c: tried to read too many bytes (%d vs 255)\n", num);
- return -EINVAL;
- }
- }
-
- args.ucI2CSpeed = TARGET_HW_I2C_CLOCK;
- args.ucRegIndex = 0;
- args.ucTransBytes = num;
- args.ucSlaveAddr = slave_addr << 1;
- args.ucLineNumber = chan->rec.i2c_id;
-
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-
- /* error */
- if (args.ucStatus != HW_ASSISTED_I2C_STATUS_SUCCESS) {
- DRM_DEBUG_KMS("hw_i2c error\n");
- return -EIO;
- }
-
- if (!(flags & HW_I2C_WRITE))
- memcpy(buf, base, num);
-
- return 0;
-}
-
-int radeon_atom_hw_i2c_xfer(struct i2c_adapter *i2c_adap,
- struct i2c_msg *msgs, int num)
-{
- struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
- struct i2c_msg *p;
- int i, remaining, current_count, buffer_offset, max_bytes, ret;
- u8 buf = 0, flags;
-
- /* check for bus probe */
- p = &msgs[0];
- if ((num == 1) && (p->len == 0)) {
- ret = radeon_process_i2c_ch(i2c,
- p->addr, HW_I2C_WRITE,
- &buf, 1);
- if (ret)
- return ret;
- else
- return num;
- }
-
- for (i = 0; i < num; i++) {
- p = &msgs[i];
- remaining = p->len;
- buffer_offset = 0;
- /* max_bytes are a limitation of ProcessI2cChannelTransaction not the hw */
- if (p->flags & I2C_M_RD) {
- max_bytes = ATOM_MAX_HW_I2C_READ;
- flags = HW_I2C_READ;
- } else {
- max_bytes = ATOM_MAX_HW_I2C_WRITE;
- flags = HW_I2C_WRITE;
- }
- while (remaining) {
- if (remaining > max_bytes)
- current_count = max_bytes;
- else
- current_count = remaining;
- ret = radeon_process_i2c_ch(i2c,
- p->addr, flags,
- &p->buf[buffer_offset], current_count);
- if (ret)
- return ret;
- remaining -= current_count;
- buffer_offset += current_count;
- }
- }
-
- return num;
-}
-
-u32 radeon_atom_hw_i2c_func(struct i2c_adapter *adap)
-{
- return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
-}
-
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 04a5d83..87ff586 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -39,40 +39,7 @@
static void evergreen_gpu_init(struct radeon_device *rdev);
void evergreen_fini(struct radeon_device *rdev);
-void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
-extern void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
- int ring, u32 cp_int_cntl);
-
-void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw,
- unsigned *bankh, unsigned *mtaspect,
- unsigned *tile_split)
-{
- *bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;
- *bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK;
- *mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK;
- *tile_split = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK;
- switch (*bankw) {
- default:
- case 1: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_1; break;
- case 2: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_2; break;
- case 4: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_4; break;
- case 8: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_8; break;
- }
- switch (*bankh) {
- default:
- case 1: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_1; break;
- case 2: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_2; break;
- case 4: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_4; break;
- case 8: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_8; break;
- }
- switch (*mtaspect) {
- default:
- case 1: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1; break;
- case 2: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2; break;
- case 4: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4; break;
- case 8: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8; break;
- }
-}
+static void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
{
@@ -99,25 +66,6 @@ void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
}
}
-void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc)
-{
- struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc];
- int i;
-
- if (RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset) & EVERGREEN_CRTC_MASTER_EN) {
- for (i = 0; i < rdev->usec_timeout; i++) {
- if (!(RREG32(EVERGREEN_CRTC_STATUS + radeon_crtc->crtc_offset) & EVERGREEN_CRTC_V_BLANK))
- break;
- udelay(1);
- }
- for (i = 0; i < rdev->usec_timeout; i++) {
- if (RREG32(EVERGREEN_CRTC_STATUS + radeon_crtc->crtc_offset) & EVERGREEN_CRTC_V_BLANK)
- break;
- udelay(1);
- }
- }
-}
-
void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc)
{
/* enable the pflip int */
@@ -214,57 +162,6 @@ int sumo_get_temp(struct radeon_device *rdev)
return actual_temp * 1000;
}
-void sumo_pm_init_profile(struct radeon_device *rdev)
-{
- int idx;
-
- /* default */
- rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
- rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
- rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
- rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
-
- /* low,mid sh/mh */
- if (rdev->flags & RADEON_IS_MOBILITY)
- idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
- else
- idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
-
- rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
- rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
- rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
- rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
-
- rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
- rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
- rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
- rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
-
- rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
- rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
- rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
- rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
-
- rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
- rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
- rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
- rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
-
- /* high sh/mh */
- idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
- rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
- rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
- rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
- rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx =
- rdev->pm.power_state[idx].num_clock_modes - 1;
-
- rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
- rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
- rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
- rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx =
- rdev->pm.power_state[idx].num_clock_modes - 1;
-}
-
void evergreen_pm_misc(struct radeon_device *rdev)
{
int req_ps_idx = rdev->pm.requested_power_state_index;
@@ -904,7 +801,7 @@ static void evergreen_program_watermarks(struct radeon_device *rdev,
!evergreen_average_bandwidth_vs_available_bandwidth(&wm) ||
!evergreen_check_latency_hiding(&wm) ||
(rdev->disp_priority == 2)) {
- DRM_DEBUG_KMS("force priority to high\n");
+ DRM_INFO("force priority to high\n");
priority_a_cnt |= PRIORITY_ALWAYS_ON;
priority_b_cnt |= PRIORITY_ALWAYS_ON;
}
@@ -1029,7 +926,7 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev)
u32 tmp;
int r;
- if (rdev->gart.robj == NULL) {
+ if (rdev->gart.table.vram.robj == NULL) {
dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
return -EINVAL;
}
@@ -1076,9 +973,6 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev)
WREG32(VM_CONTEXT1_CNTL, 0);
evergreen_pcie_gart_tlb_flush(rdev);
- DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
- (unsigned)(rdev->mc.gtt_size >> 20),
- (unsigned long long)rdev->gart.table_addr);
rdev->gart.ready = true;
return 0;
}
@@ -1086,6 +980,7 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev)
void evergreen_pcie_gart_disable(struct radeon_device *rdev)
{
u32 tmp;
+ int r;
/* Disable all tables */
WREG32(VM_CONTEXT0_CNTL, 0);
@@ -1105,7 +1000,14 @@ void evergreen_pcie_gart_disable(struct radeon_device *rdev)
WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
- radeon_gart_table_vram_unpin(rdev);
+ if (rdev->gart.table.vram.robj) {
+ r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
+ if (likely(r == 0)) {
+ radeon_bo_kunmap(rdev->gart.table.vram.robj);
+ radeon_bo_unpin(rdev->gart.table.vram.robj);
+ radeon_bo_unreserve(rdev->gart.table.vram.robj);
+ }
+ }
}
void evergreen_pcie_gart_fini(struct radeon_device *rdev)
@@ -1304,7 +1206,7 @@ void evergreen_mc_program(struct radeon_device *rdev)
WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
rdev->mc.vram_end >> 12);
}
- WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
+ WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
if (rdev->flags & RADEON_IS_IGP) {
tmp = RREG32(MC_FUS_VM_FB_OFFSET) & 0x000FFFFF;
tmp |= ((rdev->mc.vram_end >> 20) & 0xF) << 24;
@@ -1340,20 +1242,18 @@ void evergreen_mc_program(struct radeon_device *rdev)
*/
void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{
- struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
-
/* set to DX10/11 mode */
- radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
- radeon_ring_write(ring, 1);
+ radeon_ring_write(rdev, PACKET3(PACKET3_MODE_CONTROL, 0));
+ radeon_ring_write(rdev, 1);
/* FIXME: implement */
- radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
- radeon_ring_write(ring,
+ radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
+ radeon_ring_write(rdev,
#ifdef __BIG_ENDIAN
(2 << 0) |
#endif
(ib->gpu_addr & 0xFFFFFFFC));
- radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
- radeon_ring_write(ring, ib->length_dw);
+ radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
+ radeon_ring_write(rdev, ib->length_dw);
}
@@ -1391,73 +1291,71 @@ static int evergreen_cp_load_microcode(struct radeon_device *rdev)
static int evergreen_cp_start(struct radeon_device *rdev)
{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r, i;
uint32_t cp_me;
- r = radeon_ring_lock(rdev, ring, 7);
+ r = radeon_ring_lock(rdev, 7);
if (r) {
DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
return r;
}
- radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
- radeon_ring_write(ring, 0x1);
- radeon_ring_write(ring, 0x0);
- radeon_ring_write(ring, rdev->config.evergreen.max_hw_contexts - 1);
- radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 0);
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
+ radeon_ring_write(rdev, 0x1);
+ radeon_ring_write(rdev, 0x0);
+ radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1);
+ radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, 0);
+ radeon_ring_unlock_commit(rdev);
cp_me = 0xff;
WREG32(CP_ME_CNTL, cp_me);
- r = radeon_ring_lock(rdev, ring, evergreen_default_size + 19);
+ r = radeon_ring_lock(rdev, evergreen_default_size + 19);
if (r) {
DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
return r;
}
/* setup clear context state */
- radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
+ radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+ radeon_ring_write(rdev, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
for (i = 0; i < evergreen_default_size; i++)
- radeon_ring_write(ring, evergreen_default_state[i]);
+ radeon_ring_write(rdev, evergreen_default_state[i]);
- radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
+ radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+ radeon_ring_write(rdev, PACKET3_PREAMBLE_END_CLEAR_STATE);
/* set clear context state */
- radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
- radeon_ring_write(ring, 0);
+ radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0));
+ radeon_ring_write(rdev, 0);
/* SQ_VTX_BASE_VTX_LOC */
- radeon_ring_write(ring, 0xc0026f00);
- radeon_ring_write(ring, 0x00000000);
- radeon_ring_write(ring, 0x00000000);
- radeon_ring_write(ring, 0x00000000);
+ radeon_ring_write(rdev, 0xc0026f00);
+ radeon_ring_write(rdev, 0x00000000);
+ radeon_ring_write(rdev, 0x00000000);
+ radeon_ring_write(rdev, 0x00000000);
/* Clear consts */
- radeon_ring_write(ring, 0xc0036f00);
- radeon_ring_write(ring, 0x00000bc4);
- radeon_ring_write(ring, 0xffffffff);
- radeon_ring_write(ring, 0xffffffff);
- radeon_ring_write(ring, 0xffffffff);
+ radeon_ring_write(rdev, 0xc0036f00);
+ radeon_ring_write(rdev, 0x00000bc4);
+ radeon_ring_write(rdev, 0xffffffff);
+ radeon_ring_write(rdev, 0xffffffff);
+ radeon_ring_write(rdev, 0xffffffff);
- radeon_ring_write(ring, 0xc0026900);
- radeon_ring_write(ring, 0x00000316);
- radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
- radeon_ring_write(ring, 0x00000010); /* */
+ radeon_ring_write(rdev, 0xc0026900);
+ radeon_ring_write(rdev, 0x00000316);
+ radeon_ring_write(rdev, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
+ radeon_ring_write(rdev, 0x00000010); /* */
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev);
return 0;
}
int evergreen_cp_resume(struct radeon_device *rdev)
{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u32 tmp;
u32 rb_bufsz;
int r;
@@ -1475,14 +1373,13 @@ int evergreen_cp_resume(struct radeon_device *rdev)
RREG32(GRBM_SOFT_RESET);
/* Set ring buffer size */
- rb_bufsz = drm_order(ring->ring_size / 8);
+ rb_bufsz = drm_order(rdev->cp.ring_size / 8);
tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
#endif
WREG32(CP_RB_CNTL, tmp);
- WREG32(CP_SEM_WAIT_TIMER, 0x0);
- WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
+ WREG32(CP_SEM_WAIT_TIMER, 0x4);
/* Set the write pointer delay */
WREG32(CP_RB_WPTR_DELAY, 0);
@@ -1490,11 +1387,14 @@ int evergreen_cp_resume(struct radeon_device *rdev)
/* Initialize the ring buffer's read and write pointers */
WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
WREG32(CP_RB_RPTR_WR, 0);
- ring->wptr = 0;
- WREG32(CP_RB_WPTR, ring->wptr);
+ rdev->cp.wptr = 0;
+ WREG32(CP_RB_WPTR, rdev->cp.wptr);
/* set the wb address wether it's enabled or not */
WREG32(CP_RB_RPTR_ADDR,
+#ifdef __BIG_ENDIAN
+ RB_RPTR_SWAP(2) |
+#endif
((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
@@ -1509,16 +1409,16 @@ int evergreen_cp_resume(struct radeon_device *rdev)
mdelay(1);
WREG32(CP_RB_CNTL, tmp);
- WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
+ WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8);
WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
- ring->rptr = RREG32(CP_RB_RPTR);
+ rdev->cp.rptr = RREG32(CP_RB_RPTR);
evergreen_cp_start(rdev);
- ring->ready = true;
- r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
+ rdev->cp.ready = true;
+ r = radeon_ring_test(rdev);
if (r) {
- ring->ready = false;
+ rdev->cp.ready = false;
return r;
}
return 0;
@@ -2116,7 +2016,6 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
rdev->config.evergreen.tile_config |=
((gb_addr_config & 0x30000000) >> 28) << 12;
- rdev->config.evergreen.backend_map = gb_backend_map;
WREG32(GB_BACKEND_MAP, gb_backend_map);
WREG32(GB_ADDR_CONFIG, gb_addr_config);
WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
@@ -2387,7 +2286,7 @@ int evergreen_mc_init(struct radeon_device *rdev)
return 0;
}
-bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+bool evergreen_gpu_is_lockup(struct radeon_device *rdev)
{
u32 srbm_status;
u32 grbm_status;
@@ -2400,19 +2299,19 @@ bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *rin
grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
if (!(grbm_status & GUI_ACTIVE)) {
- r100_gpu_lockup_update(lockup, ring);
+ r100_gpu_lockup_update(lockup, &rdev->cp);
return false;
}
/* force CP activities */
- r = radeon_ring_lock(rdev, ring, 2);
+ r = radeon_ring_lock(rdev, 2);
if (!r) {
/* PACKET2 NOP */
- radeon_ring_write(ring, 0x80000000);
- radeon_ring_write(ring, 0x80000000);
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_write(rdev, 0x80000000);
+ radeon_ring_write(rdev, 0x80000000);
+ radeon_ring_unlock_commit(rdev);
}
- ring->rptr = RREG32(CP_RB_RPTR);
- return r100_gpu_cp_is_lockup(rdev, lockup, ring);
+ rdev->cp.rptr = RREG32(CP_RB_RPTR);
+ return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp);
}
static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
@@ -2504,13 +2403,7 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev)
{
u32 tmp;
- if (rdev->family >= CHIP_CAYMAN) {
- cayman_cp_int_cntl_setup(rdev, 0,
- CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
- cayman_cp_int_cntl_setup(rdev, 1, 0);
- cayman_cp_int_cntl_setup(rdev, 2, 0);
- } else
- WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+ WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
WREG32(GRBM_INT_CNTL, 0);
WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
@@ -2555,7 +2448,6 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev)
int evergreen_irq_set(struct radeon_device *rdev)
{
u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
- u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0;
u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
u32 grbm_int_cntl = 0;
@@ -2580,28 +2472,11 @@ int evergreen_irq_set(struct radeon_device *rdev)
hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
- if (rdev->family >= CHIP_CAYMAN) {
- /* enable CP interrupts on all rings */
- if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
- DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
- cp_int_cntl |= TIME_STAMP_INT_ENABLE;
- }
- if (rdev->irq.sw_int[CAYMAN_RING_TYPE_CP1_INDEX]) {
- DRM_DEBUG("evergreen_irq_set: sw int cp1\n");
- cp_int_cntl1 |= TIME_STAMP_INT_ENABLE;
- }
- if (rdev->irq.sw_int[CAYMAN_RING_TYPE_CP2_INDEX]) {
- DRM_DEBUG("evergreen_irq_set: sw int cp2\n");
- cp_int_cntl2 |= TIME_STAMP_INT_ENABLE;
- }
- } else {
- if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
- DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
- cp_int_cntl |= RB_INT_ENABLE;
- cp_int_cntl |= TIME_STAMP_INT_ENABLE;
- }
+ if (rdev->irq.sw_int) {
+ DRM_DEBUG("evergreen_irq_set: sw int\n");
+ cp_int_cntl |= RB_INT_ENABLE;
+ cp_int_cntl |= TIME_STAMP_INT_ENABLE;
}
-
if (rdev->irq.crtc_vblank_int[0] ||
rdev->irq.pflip[0]) {
DRM_DEBUG("evergreen_irq_set: vblank 0\n");
@@ -2661,12 +2536,7 @@ int evergreen_irq_set(struct radeon_device *rdev)
grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
}
- if (rdev->family >= CHIP_CAYMAN) {
- cayman_cp_int_cntl_setup(rdev, 0, cp_int_cntl);
- cayman_cp_int_cntl_setup(rdev, 1, cp_int_cntl1);
- cayman_cp_int_cntl_setup(rdev, 2, cp_int_cntl2);
- } else
- WREG32(CP_INT_CNTL, cp_int_cntl);
+ WREG32(CP_INT_CNTL, cp_int_cntl);
WREG32(GRBM_INT_CNTL, grbm_int_cntl);
WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
@@ -2701,7 +2571,7 @@ int evergreen_irq_set(struct radeon_device *rdev)
return 0;
}
-static void evergreen_irq_ack(struct radeon_device *rdev)
+static inline void evergreen_irq_ack(struct radeon_device *rdev)
{
u32 tmp;
@@ -2812,7 +2682,7 @@ void evergreen_irq_suspend(struct radeon_device *rdev)
r600_rlc_stop(rdev);
}
-static u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
+static inline u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
{
u32 wptr, tmp;
@@ -2858,9 +2728,6 @@ int evergreen_irq_process(struct radeon_device *rdev)
return IRQ_NONE;
}
restart_ih:
- /* Order reading of wptr vs. reading of IH ring data */
- rmb();
-
/* display interrupts */
evergreen_irq_ack(rdev);
@@ -3081,24 +2948,11 @@ restart_ih:
case 177: /* CP_INT in IB1 */
case 178: /* CP_INT in IB2 */
DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
- radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ radeon_fence_process(rdev);
break;
case 181: /* CP EOP event */
DRM_DEBUG("IH: CP EOP\n");
- if (rdev->family >= CHIP_CAYMAN) {
- switch (src_data) {
- case 0:
- radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
- break;
- case 1:
- radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
- break;
- case 2:
- radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
- break;
- }
- } else
- radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ radeon_fence_process(rdev);
break;
case 233: /* GUI IDLE */
DRM_DEBUG("IH: GUI idle\n");
@@ -3128,11 +2982,11 @@ restart_ih:
static int evergreen_startup(struct radeon_device *rdev)
{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
/* enable pcie gen2 link */
- evergreen_pcie_gen2_enable(rdev);
+ if (!ASIC_IS_DCE5(rdev))
+ evergreen_pcie_gen2_enable(rdev);
if (ASIC_IS_DCE5(rdev)) {
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
@@ -3157,10 +3011,6 @@ static int evergreen_startup(struct radeon_device *rdev)
}
}
- r = r600_vram_scratch_init(rdev);
- if (r)
- return r;
-
evergreen_mc_program(rdev);
if (rdev->flags & RADEON_IS_AGP) {
evergreen_agp_enable(rdev);
@@ -3173,8 +3023,8 @@ static int evergreen_startup(struct radeon_device *rdev)
r = evergreen_blit_init(rdev);
if (r) {
- r600_blit_fini(rdev);
- rdev->asic->copy.copy = NULL;
+ evergreen_blit_fini(rdev);
+ rdev->asic->copy = NULL;
dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
}
@@ -3183,12 +3033,6 @@ static int evergreen_startup(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
- if (r) {
- dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
- return r;
- }
-
/* Enable IRQ */
r = r600_irq_init(rdev);
if (r) {
@@ -3198,9 +3042,7 @@ static int evergreen_startup(struct radeon_device *rdev)
}
evergreen_irq_set(rdev);
- r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
- R600_CP_RB_RPTR, R600_CP_RB_WPTR,
- 0, 0xfffff, RADEON_CP_PACKET2);
+ r = radeon_ring_init(rdev, rdev->cp.ring_size);
if (r)
return r;
r = evergreen_cp_load_microcode(rdev);
@@ -3210,23 +3052,6 @@ static int evergreen_startup(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_ib_pool_start(rdev);
- if (r)
- return r;
-
- r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
- if (r) {
- DRM_ERROR("radeon: failed testing IB (%d).\n", r);
- rdev->accel_working = false;
- return r;
- }
-
- r = r600_audio_init(rdev);
- if (r) {
- DRM_ERROR("radeon: audio init failed\n");
- return r;
- }
-
return 0;
}
@@ -3246,11 +3071,15 @@ int evergreen_resume(struct radeon_device *rdev)
/* post card */
atom_asic_init(rdev->mode_info.atom_context);
- rdev->accel_working = true;
r = evergreen_startup(rdev);
if (r) {
DRM_ERROR("evergreen startup failed on resume\n");
- rdev->accel_working = false;
+ return r;
+ }
+
+ r = r600_ib_test(rdev);
+ if (r) {
+ DRM_ERROR("radeon: failed testing IB (%d).\n", r);
return r;
}
@@ -3260,18 +3089,45 @@ int evergreen_resume(struct radeon_device *rdev)
int evergreen_suspend(struct radeon_device *rdev)
{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ int r;
- r600_audio_fini(rdev);
/* FIXME: we should wait for ring to be empty */
- radeon_ib_pool_suspend(rdev);
- r600_blit_suspend(rdev);
r700_cp_stop(rdev);
- ring->ready = false;
+ rdev->cp.ready = false;
evergreen_irq_suspend(rdev);
radeon_wb_disable(rdev);
evergreen_pcie_gart_disable(rdev);
+ /* unpin shaders bo */
+ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+ if (likely(r == 0)) {
+ radeon_bo_unpin(rdev->r600_blit.shader_obj);
+ radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+ }
+
+ return 0;
+}
+
+int evergreen_copy_blit(struct radeon_device *rdev,
+ uint64_t src_offset,
+ uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence *fence)
+{
+ int r;
+
+ mutex_lock(&rdev->r600_blit.mutex);
+ rdev->r600_blit.vb_ib = NULL;
+ r = evergreen_blit_prepare_copy(rdev, num_gpu_pages * RADEON_GPU_PAGE_SIZE);
+ if (r) {
+ if (rdev->r600_blit.vb_ib)
+ radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
+ mutex_unlock(&rdev->r600_blit.mutex);
+ return r;
+ }
+ evergreen_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages * RADEON_GPU_PAGE_SIZE);
+ evergreen_blit_done_copy(rdev, fence);
+ mutex_unlock(&rdev->r600_blit.mutex);
return 0;
}
@@ -3345,8 +3201,8 @@ int evergreen_init(struct radeon_device *rdev)
if (r)
return r;
- rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
- r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
+ rdev->cp.ring_obj = NULL;
+ r600_ring_init(rdev, 1024 * 1024);
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
@@ -3355,24 +3211,29 @@ int evergreen_init(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
- if (r) {
- dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
- rdev->accel_working = false;
- }
-
r = evergreen_startup(rdev);
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
r700_cp_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
- r100_ib_fini(rdev);
radeon_irq_kms_fini(rdev);
evergreen_pcie_gart_fini(rdev);
rdev->accel_working = false;
}
+ if (rdev->accel_working) {
+ r = radeon_ib_pool_init(rdev);
+ if (r) {
+ DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
+ rdev->accel_working = false;
+ }
+ r = r600_ib_test(rdev);
+ if (r) {
+ DRM_ERROR("radeon: failed testing IB (%d).\n", r);
+ rdev->accel_working = false;
+ }
+ }
/* Don't start up if the MC ucode is missing on BTC parts.
* The default clocks and voltages before the MC ucode
@@ -3390,17 +3251,14 @@ int evergreen_init(struct radeon_device *rdev)
void evergreen_fini(struct radeon_device *rdev)
{
- r600_audio_fini(rdev);
- r600_blit_fini(rdev);
+ evergreen_blit_fini(rdev);
r700_cp_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
- r100_ib_fini(rdev);
+ radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
evergreen_pcie_gart_fini(rdev);
- r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
- radeon_semaphore_driver_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_agp_fini(rdev);
radeon_bo_fini(rdev);
@@ -3409,7 +3267,7 @@ void evergreen_fini(struct radeon_device *rdev)
rdev->bios = NULL;
}
-void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
+static void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
{
u32 link_width_cntl, speed_cntl;
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
index 4e83fdc..2eb2518 100644
--- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c
+++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
@@ -32,14 +32,23 @@
#include "evergreend.h"
#include "evergreen_blit_shaders.h"
#include "cayman_blit_shaders.h"
-#include "radeon_blit_common.h"
+
+#define DI_PT_RECTLIST 0x11
+#define DI_INDEX_SIZE_16_BIT 0x0
+#define DI_SRC_SEL_AUTO_INDEX 0x2
+
+#define FMT_8 0x1
+#define FMT_5_6_5 0x8
+#define FMT_8_8_8_8 0x1a
+#define COLOR_8 0x1
+#define COLOR_5_6_5 0x8
+#define COLOR_8_8_8_8 0x1a
/* emits 17 */
static void
set_render_target(struct radeon_device *rdev, int format,
int w, int h, u64 gpu_addr)
{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u32 cb_color_info;
int pitch, slice;
@@ -47,29 +56,27 @@ set_render_target(struct radeon_device *rdev, int format,
if (h < 8)
h = 8;
- cb_color_info = CB_FORMAT(format) |
- CB_SOURCE_FORMAT(CB_SF_EXPORT_NORM) |
- CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+ cb_color_info = ((format << 2) | (1 << 24) | (1 << 8));
pitch = (w / 8) - 1;
slice = ((w * h) / 64) - 1;
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 15));
- radeon_ring_write(ring, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_START) >> 2);
- radeon_ring_write(ring, gpu_addr >> 8);
- radeon_ring_write(ring, pitch);
- radeon_ring_write(ring, slice);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, cb_color_info);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, (w - 1) | ((h - 1) << 16));
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 0);
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 15));
+ radeon_ring_write(rdev, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_START) >> 2);
+ radeon_ring_write(rdev, gpu_addr >> 8);
+ radeon_ring_write(rdev, pitch);
+ radeon_ring_write(rdev, slice);
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, cb_color_info);
+ radeon_ring_write(rdev, (1 << 4));
+ radeon_ring_write(rdev, (w - 1) | ((h - 1) << 16));
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, 0);
}
/* emits 5dw */
@@ -78,7 +85,6 @@ cp_set_surface_sync(struct radeon_device *rdev,
u32 sync_type, u32 size,
u64 mc_addr)
{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u32 cp_coher_size;
if (size == 0xffffffff)
@@ -86,45 +92,35 @@ cp_set_surface_sync(struct radeon_device *rdev,
else
cp_coher_size = ((size + 255) >> 8);
- if (rdev->family >= CHIP_CAYMAN) {
- /* CP_COHER_CNTL2 has to be set manually when submitting a surface_sync
- * to the RB directly. For IBs, the CP programs this as part of the
- * surface_sync packet.
- */
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
- radeon_ring_write(ring, (0x85e8 - PACKET3_SET_CONFIG_REG_START) >> 2);
- radeon_ring_write(ring, 0); /* CP_COHER_CNTL2 */
- }
- radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
- radeon_ring_write(ring, sync_type);
- radeon_ring_write(ring, cp_coher_size);
- radeon_ring_write(ring, mc_addr >> 8);
- radeon_ring_write(ring, 10); /* poll interval */
+ radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_SYNC, 3));
+ radeon_ring_write(rdev, sync_type);
+ radeon_ring_write(rdev, cp_coher_size);
+ radeon_ring_write(rdev, mc_addr >> 8);
+ radeon_ring_write(rdev, 10); /* poll interval */
}
/* emits 11dw + 1 surface sync = 16dw */
static void
set_shaders(struct radeon_device *rdev)
{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u64 gpu_addr;
/* VS */
gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 3));
- radeon_ring_write(ring, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_START) >> 2);
- radeon_ring_write(ring, gpu_addr >> 8);
- radeon_ring_write(ring, 2);
- radeon_ring_write(ring, 0);
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 3));
+ radeon_ring_write(rdev, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_START) >> 2);
+ radeon_ring_write(rdev, gpu_addr >> 8);
+ radeon_ring_write(rdev, 2);
+ radeon_ring_write(rdev, 0);
/* PS */
gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.ps_offset;
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 4));
- radeon_ring_write(ring, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_START) >> 2);
- radeon_ring_write(ring, gpu_addr >> 8);
- radeon_ring_write(ring, 1);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 2);
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 4));
+ radeon_ring_write(rdev, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_START) >> 2);
+ radeon_ring_write(rdev, gpu_addr >> 8);
+ radeon_ring_write(rdev, 1);
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, 2);
gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
cp_set_surface_sync(rdev, PACKET3_SH_ACTION_ENA, 512, gpu_addr);
@@ -134,31 +130,26 @@ set_shaders(struct radeon_device *rdev)
static void
set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u32 sq_vtx_constant_word2, sq_vtx_constant_word3;
/* high addr, stride */
- sq_vtx_constant_word2 = SQ_VTXC_BASE_ADDR_HI(upper_32_bits(gpu_addr) & 0xff) |
- SQ_VTXC_STRIDE(16);
+ sq_vtx_constant_word2 = ((upper_32_bits(gpu_addr) & 0xff) | (16 << 8));
#ifdef __BIG_ENDIAN
- sq_vtx_constant_word2 |= SQ_VTXC_ENDIAN_SWAP(SQ_ENDIAN_8IN32);
+ sq_vtx_constant_word2 |= (2 << 30);
#endif
/* xyzw swizzles */
- sq_vtx_constant_word3 = SQ_VTCX_SEL_X(SQ_SEL_X) |
- SQ_VTCX_SEL_Y(SQ_SEL_Y) |
- SQ_VTCX_SEL_Z(SQ_SEL_Z) |
- SQ_VTCX_SEL_W(SQ_SEL_W);
-
- radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 8));
- radeon_ring_write(ring, 0x580);
- radeon_ring_write(ring, gpu_addr & 0xffffffff);
- radeon_ring_write(ring, 48 - 1); /* size */
- radeon_ring_write(ring, sq_vtx_constant_word2);
- radeon_ring_write(ring, sq_vtx_constant_word3);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, S__SQ_CONSTANT_TYPE(SQ_TEX_VTX_VALID_BUFFER));
+ sq_vtx_constant_word3 = (0 << 3) | (1 << 6) | (2 << 9) | (3 << 12);
+
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 8));
+ radeon_ring_write(rdev, 0x580);
+ radeon_ring_write(rdev, gpu_addr & 0xffffffff);
+ radeon_ring_write(rdev, 48 - 1); /* size */
+ radeon_ring_write(rdev, sq_vtx_constant_word2);
+ radeon_ring_write(rdev, sq_vtx_constant_word3);
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, SQ_TEX_VTX_VALID_BUFFER << 30);
if ((rdev->family == CHIP_CEDAR) ||
(rdev->family == CHIP_PALM) ||
@@ -177,42 +168,33 @@ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
static void
set_tex_resource(struct radeon_device *rdev,
int format, int w, int h, int pitch,
- u64 gpu_addr, u32 size)
+ u64 gpu_addr)
{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u32 sq_tex_resource_word0, sq_tex_resource_word1;
u32 sq_tex_resource_word4, sq_tex_resource_word7;
if (h < 1)
h = 1;
- sq_tex_resource_word0 = TEX_DIM(SQ_TEX_DIM_2D);
+ sq_tex_resource_word0 = (1 << 0); /* 2D */
sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 6) |
((w - 1) << 18));
- sq_tex_resource_word1 = ((h - 1) << 0) |
- TEX_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+ sq_tex_resource_word1 = ((h - 1) << 0) | (1 << 28);
/* xyzw swizzles */
- sq_tex_resource_word4 = TEX_DST_SEL_X(SQ_SEL_X) |
- TEX_DST_SEL_Y(SQ_SEL_Y) |
- TEX_DST_SEL_Z(SQ_SEL_Z) |
- TEX_DST_SEL_W(SQ_SEL_W);
-
- sq_tex_resource_word7 = format |
- S__SQ_CONSTANT_TYPE(SQ_TEX_VTX_VALID_TEXTURE);
-
- cp_set_surface_sync(rdev,
- PACKET3_TC_ACTION_ENA, size, gpu_addr);
-
- radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 8));
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, sq_tex_resource_word0);
- radeon_ring_write(ring, sq_tex_resource_word1);
- radeon_ring_write(ring, gpu_addr >> 8);
- radeon_ring_write(ring, gpu_addr >> 8);
- radeon_ring_write(ring, sq_tex_resource_word4);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, sq_tex_resource_word7);
+ sq_tex_resource_word4 = (0 << 16) | (1 << 19) | (2 << 22) | (3 << 25);
+
+ sq_tex_resource_word7 = format | (SQ_TEX_VTX_VALID_TEXTURE << 30);
+
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 8));
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, sq_tex_resource_word0);
+ radeon_ring_write(rdev, sq_tex_resource_word1);
+ radeon_ring_write(rdev, gpu_addr >> 8);
+ radeon_ring_write(rdev, gpu_addr >> 8);
+ radeon_ring_write(rdev, sq_tex_resource_word4);
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, sq_tex_resource_word7);
}
/* emits 12 */
@@ -220,7 +202,6 @@ static void
set_scissors(struct radeon_device *rdev, int x1, int y1,
int x2, int y2)
{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
/* workaround some hw bugs */
if (x2 == 0)
x1 = 1;
@@ -231,44 +212,43 @@ set_scissors(struct radeon_device *rdev, int x1, int y1,
x2 = 2;
}
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
- radeon_ring_write(ring, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
- radeon_ring_write(ring, (x1 << 0) | (y1 << 16));
- radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+ radeon_ring_write(rdev, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
+ radeon_ring_write(rdev, (x1 << 0) | (y1 << 16));
+ radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
- radeon_ring_write(ring, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
- radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31));
- radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+ radeon_ring_write(rdev, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
+ radeon_ring_write(rdev, (x1 << 0) | (y1 << 16) | (1 << 31));
+ radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
- radeon_ring_write(ring, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
- radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31));
- radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+ radeon_ring_write(rdev, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
+ radeon_ring_write(rdev, (x1 << 0) | (y1 << 16) | (1 << 31));
+ radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
}
/* emits 10 */
static void
draw_auto(struct radeon_device *rdev)
{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
- radeon_ring_write(ring, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_START) >> 2);
- radeon_ring_write(ring, DI_PT_RECTLIST);
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(rdev, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_START) >> 2);
+ radeon_ring_write(rdev, DI_PT_RECTLIST);
- radeon_ring_write(ring, PACKET3(PACKET3_INDEX_TYPE, 0));
- radeon_ring_write(ring,
+ radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0));
+ radeon_ring_write(rdev,
#ifdef __BIG_ENDIAN
(2 << 2) |
#endif
DI_INDEX_SIZE_16_BIT);
- radeon_ring_write(ring, PACKET3(PACKET3_NUM_INSTANCES, 0));
- radeon_ring_write(ring, 1);
+ radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0));
+ radeon_ring_write(rdev, 1);
- radeon_ring_write(ring, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1));
- radeon_ring_write(ring, 3);
- radeon_ring_write(ring, DI_SRC_SEL_AUTO_INDEX);
+ radeon_ring_write(rdev, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1));
+ radeon_ring_write(rdev, 3);
+ radeon_ring_write(rdev, DI_SRC_SEL_AUTO_INDEX);
}
@@ -276,7 +256,6 @@ draw_auto(struct radeon_device *rdev)
static void
set_default_state(struct radeon_device *rdev)
{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2, sq_gpr_resource_mgmt_3;
u32 sq_thread_resource_mgmt, sq_thread_resource_mgmt_2;
u32 sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2, sq_stack_resource_mgmt_3;
@@ -290,8 +269,8 @@ set_default_state(struct radeon_device *rdev)
int dwords;
/* set clear context state */
- radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
- radeon_ring_write(ring, 0);
+ radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0));
+ radeon_ring_write(rdev, 0);
if (rdev->family < CHIP_CAYMAN) {
switch (rdev->family) {
@@ -548,63 +527,88 @@ set_default_state(struct radeon_device *rdev)
NUM_LS_STACK_ENTRIES(num_ls_stack_entries));
/* disable dyn gprs */
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
- radeon_ring_write(ring, (SQ_DYN_GPR_CNTL_PS_FLUSH_REQ - PACKET3_SET_CONFIG_REG_START) >> 2);
- radeon_ring_write(ring, 0);
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(rdev, (SQ_DYN_GPR_CNTL_PS_FLUSH_REQ - PACKET3_SET_CONFIG_REG_START) >> 2);
+ radeon_ring_write(rdev, 0);
/* setup LDS */
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
- radeon_ring_write(ring, (SQ_LDS_RESOURCE_MGMT - PACKET3_SET_CONFIG_REG_START) >> 2);
- radeon_ring_write(ring, 0x10001000);
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(rdev, (SQ_LDS_RESOURCE_MGMT - PACKET3_SET_CONFIG_REG_START) >> 2);
+ radeon_ring_write(rdev, 0x10001000);
/* SQ config */
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 11));
- radeon_ring_write(ring, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_START) >> 2);
- radeon_ring_write(ring, sq_config);
- radeon_ring_write(ring, sq_gpr_resource_mgmt_1);
- radeon_ring_write(ring, sq_gpr_resource_mgmt_2);
- radeon_ring_write(ring, sq_gpr_resource_mgmt_3);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, sq_thread_resource_mgmt);
- radeon_ring_write(ring, sq_thread_resource_mgmt_2);
- radeon_ring_write(ring, sq_stack_resource_mgmt_1);
- radeon_ring_write(ring, sq_stack_resource_mgmt_2);
- radeon_ring_write(ring, sq_stack_resource_mgmt_3);
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 11));
+ radeon_ring_write(rdev, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_START) >> 2);
+ radeon_ring_write(rdev, sq_config);
+ radeon_ring_write(rdev, sq_gpr_resource_mgmt_1);
+ radeon_ring_write(rdev, sq_gpr_resource_mgmt_2);
+ radeon_ring_write(rdev, sq_gpr_resource_mgmt_3);
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, sq_thread_resource_mgmt);
+ radeon_ring_write(rdev, sq_thread_resource_mgmt_2);
+ radeon_ring_write(rdev, sq_stack_resource_mgmt_1);
+ radeon_ring_write(rdev, sq_stack_resource_mgmt_2);
+ radeon_ring_write(rdev, sq_stack_resource_mgmt_3);
}
/* CONTEXT_CONTROL */
- radeon_ring_write(ring, 0xc0012800);
- radeon_ring_write(ring, 0x80000000);
- radeon_ring_write(ring, 0x80000000);
+ radeon_ring_write(rdev, 0xc0012800);
+ radeon_ring_write(rdev, 0x80000000);
+ radeon_ring_write(rdev, 0x80000000);
/* SQ_VTX_BASE_VTX_LOC */
- radeon_ring_write(ring, 0xc0026f00);
- radeon_ring_write(ring, 0x00000000);
- radeon_ring_write(ring, 0x00000000);
- radeon_ring_write(ring, 0x00000000);
+ radeon_ring_write(rdev, 0xc0026f00);
+ radeon_ring_write(rdev, 0x00000000);
+ radeon_ring_write(rdev, 0x00000000);
+ radeon_ring_write(rdev, 0x00000000);
/* SET_SAMPLER */
- radeon_ring_write(ring, 0xc0036e00);
- radeon_ring_write(ring, 0x00000000);
- radeon_ring_write(ring, 0x00000012);
- radeon_ring_write(ring, 0x00000000);
- radeon_ring_write(ring, 0x00000000);
+ radeon_ring_write(rdev, 0xc0036e00);
+ radeon_ring_write(rdev, 0x00000000);
+ radeon_ring_write(rdev, 0x00000012);
+ radeon_ring_write(rdev, 0x00000000);
+ radeon_ring_write(rdev, 0x00000000);
/* set to DX10/11 mode */
- radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
- radeon_ring_write(ring, 1);
+ radeon_ring_write(rdev, PACKET3(PACKET3_MODE_CONTROL, 0));
+ radeon_ring_write(rdev, 1);
/* emit an IB pointing at default state */
dwords = ALIGN(rdev->r600_blit.state_len, 0x10);
gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset;
- radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
- radeon_ring_write(ring, gpu_addr & 0xFFFFFFFC);
- radeon_ring_write(ring, upper_32_bits(gpu_addr) & 0xFF);
- radeon_ring_write(ring, dwords);
+ radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
+ radeon_ring_write(rdev, gpu_addr & 0xFFFFFFFC);
+ radeon_ring_write(rdev, upper_32_bits(gpu_addr) & 0xFF);
+ radeon_ring_write(rdev, dwords);
}
+static inline uint32_t i2f(uint32_t input)
+{
+ u32 result, i, exponent, fraction;
+
+ if ((input & 0x3fff) == 0)
+ result = 0; /* 0 is a special case */
+ else {
+ exponent = 140; /* exponent biased by 127; */
+ fraction = (input & 0x3fff) << 10; /* cheat and only
+ handle numbers below 2^^15 */
+ for (i = 0; i < 14; i++) {
+ if (fraction & 0x800000)
+ break;
+ else {
+ fraction = fraction << 1; /* keep
+ shifting left until top bit = 1 */
+ exponent = exponent - 1;
+ }
+ }
+ result = exponent << 23 | (fraction & 0x7fffff); /* mask
+ off top bit; assumed 1 */
+ }
+ return result;
+}
+
int evergreen_blit_init(struct radeon_device *rdev)
{
u32 obj_size;
@@ -613,26 +617,6 @@ int evergreen_blit_init(struct radeon_device *rdev)
u32 packet2s[16];
int num_packet2s = 0;
- rdev->r600_blit.primitives.set_render_target = set_render_target;
- rdev->r600_blit.primitives.cp_set_surface_sync = cp_set_surface_sync;
- rdev->r600_blit.primitives.set_shaders = set_shaders;
- rdev->r600_blit.primitives.set_vtx_resource = set_vtx_resource;
- rdev->r600_blit.primitives.set_tex_resource = set_tex_resource;
- rdev->r600_blit.primitives.set_scissors = set_scissors;
- rdev->r600_blit.primitives.draw_auto = draw_auto;
- rdev->r600_blit.primitives.set_default_state = set_default_state;
-
- rdev->r600_blit.ring_size_common = 55; /* shaders + def state */
- rdev->r600_blit.ring_size_common += 16; /* fence emit for VB IB */
- rdev->r600_blit.ring_size_common += 5; /* done copy */
- rdev->r600_blit.ring_size_common += 16; /* fence emit for done copy */
-
- rdev->r600_blit.ring_size_per_loop = 74;
- if (rdev->family >= CHIP_CAYMAN)
- rdev->r600_blit.ring_size_per_loop += 9; /* additional DWs for surface sync */
-
- rdev->r600_blit.max_dim = 16384;
-
/* pin copy shader into vram if already initialized */
if (rdev->r600_blit.shader_obj)
goto done;
@@ -728,3 +712,277 @@ done:
radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
return 0;
}
+
+void evergreen_blit_fini(struct radeon_device *rdev)
+{
+ int r;
+
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+ if (rdev->r600_blit.shader_obj == NULL)
+ return;
+ /* If we can't reserve the bo, unref should be enough to destroy
+ * it when it becomes idle.
+ */
+ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+ if (!r) {
+ radeon_bo_unpin(rdev->r600_blit.shader_obj);
+ radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+ }
+ radeon_bo_unref(&rdev->r600_blit.shader_obj);
+}
+
+static int evergreen_vb_ib_get(struct radeon_device *rdev)
+{
+ int r;
+ r = radeon_ib_get(rdev, &rdev->r600_blit.vb_ib);
+ if (r) {
+ DRM_ERROR("failed to get IB for vertex buffer\n");
+ return r;
+ }
+
+ rdev->r600_blit.vb_total = 64*1024;
+ rdev->r600_blit.vb_used = 0;
+ return 0;
+}
+
+static void evergreen_vb_ib_put(struct radeon_device *rdev)
+{
+ radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence);
+ radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
+}
+
+int evergreen_blit_prepare_copy(struct radeon_device *rdev, int size_bytes)
+{
+ int r;
+ int ring_size, line_size;
+ int max_size;
+ /* loops of emits + fence emit possible */
+ int dwords_per_loop = 74, num_loops;
+
+ r = evergreen_vb_ib_get(rdev);
+ if (r)
+ return r;
+
+ /* 8 bpp vs 32 bpp for xfer unit */
+ if (size_bytes & 3)
+ line_size = 8192;
+ else
+ line_size = 8192 * 4;
+
+ max_size = 8192 * line_size;
+
+ /* major loops cover the max size transfer */
+ num_loops = ((size_bytes + max_size) / max_size);
+ /* minor loops cover the extra non aligned bits */
+ num_loops += ((size_bytes % line_size) ? 1 : 0);
+ /* calculate number of loops correctly */
+ ring_size = num_loops * dwords_per_loop;
+ /* set default + shaders */
+ ring_size += 55; /* shaders + def state */
+ ring_size += 10; /* fence emit for VB IB */
+ ring_size += 5; /* done copy */
+ ring_size += 10; /* fence emit for done copy */
+ r = radeon_ring_lock(rdev, ring_size);
+ if (r)
+ return r;
+
+ set_default_state(rdev); /* 36 */
+ set_shaders(rdev); /* 16 */
+ return 0;
+}
+
+void evergreen_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence)
+{
+ int r;
+
+ if (rdev->r600_blit.vb_ib)
+ evergreen_vb_ib_put(rdev);
+
+ if (fence)
+ r = radeon_fence_emit(rdev, fence);
+
+ radeon_ring_unlock_commit(rdev);
+}
+
+void evergreen_kms_blit_copy(struct radeon_device *rdev,
+ u64 src_gpu_addr, u64 dst_gpu_addr,
+ int size_bytes)
+{
+ int max_bytes;
+ u64 vb_gpu_addr;
+ u32 *vb;
+
+ DRM_DEBUG("emitting copy %16llx %16llx %d %d\n", src_gpu_addr, dst_gpu_addr,
+ size_bytes, rdev->r600_blit.vb_used);
+ vb = (u32 *)(rdev->r600_blit.vb_ib->ptr + rdev->r600_blit.vb_used);
+ if ((size_bytes & 3) || (src_gpu_addr & 3) || (dst_gpu_addr & 3)) {
+ max_bytes = 8192;
+
+ while (size_bytes) {
+ int cur_size = size_bytes;
+ int src_x = src_gpu_addr & 255;
+ int dst_x = dst_gpu_addr & 255;
+ int h = 1;
+ src_gpu_addr = src_gpu_addr & ~255ULL;
+ dst_gpu_addr = dst_gpu_addr & ~255ULL;
+
+ if (!src_x && !dst_x) {
+ h = (cur_size / max_bytes);
+ if (h > 8192)
+ h = 8192;
+ if (h == 0)
+ h = 1;
+ else
+ cur_size = max_bytes;
+ } else {
+ if (cur_size > max_bytes)
+ cur_size = max_bytes;
+ if (cur_size > (max_bytes - dst_x))
+ cur_size = (max_bytes - dst_x);
+ if (cur_size > (max_bytes - src_x))
+ cur_size = (max_bytes - src_x);
+ }
+
+ if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) {
+ WARN_ON(1);
+ }
+
+ vb[0] = i2f(dst_x);
+ vb[1] = 0;
+ vb[2] = i2f(src_x);
+ vb[3] = 0;
+
+ vb[4] = i2f(dst_x);
+ vb[5] = i2f(h);
+ vb[6] = i2f(src_x);
+ vb[7] = i2f(h);
+
+ vb[8] = i2f(dst_x + cur_size);
+ vb[9] = i2f(h);
+ vb[10] = i2f(src_x + cur_size);
+ vb[11] = i2f(h);
+
+ /* src 10 */
+ set_tex_resource(rdev, FMT_8,
+ src_x + cur_size, h, src_x + cur_size,
+ src_gpu_addr);
+
+ /* 5 */
+ cp_set_surface_sync(rdev,
+ PACKET3_TC_ACTION_ENA, (src_x + cur_size * h), src_gpu_addr);
+
+
+ /* dst 17 */
+ set_render_target(rdev, COLOR_8,
+ dst_x + cur_size, h,
+ dst_gpu_addr);
+
+ /* scissors 12 */
+ set_scissors(rdev, dst_x, 0, dst_x + cur_size, h);
+
+ /* 15 */
+ vb_gpu_addr = rdev->r600_blit.vb_ib->gpu_addr + rdev->r600_blit.vb_used;
+ set_vtx_resource(rdev, vb_gpu_addr);
+
+ /* draw 10 */
+ draw_auto(rdev);
+
+ /* 5 */
+ cp_set_surface_sync(rdev,
+ PACKET3_CB_ACTION_ENA | PACKET3_CB0_DEST_BASE_ENA,
+ cur_size * h, dst_gpu_addr);
+
+ vb += 12;
+ rdev->r600_blit.vb_used += 12 * 4;
+
+ src_gpu_addr += cur_size * h;
+ dst_gpu_addr += cur_size * h;
+ size_bytes -= cur_size * h;
+ }
+ } else {
+ max_bytes = 8192 * 4;
+
+ while (size_bytes) {
+ int cur_size = size_bytes;
+ int src_x = (src_gpu_addr & 255);
+ int dst_x = (dst_gpu_addr & 255);
+ int h = 1;
+ src_gpu_addr = src_gpu_addr & ~255ULL;
+ dst_gpu_addr = dst_gpu_addr & ~255ULL;
+
+ if (!src_x && !dst_x) {
+ h = (cur_size / max_bytes);
+ if (h > 8192)
+ h = 8192;
+ if (h == 0)
+ h = 1;
+ else
+ cur_size = max_bytes;
+ } else {
+ if (cur_size > max_bytes)
+ cur_size = max_bytes;
+ if (cur_size > (max_bytes - dst_x))
+ cur_size = (max_bytes - dst_x);
+ if (cur_size > (max_bytes - src_x))
+ cur_size = (max_bytes - src_x);
+ }
+
+ if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) {
+ WARN_ON(1);
+ }
+
+ vb[0] = i2f(dst_x / 4);
+ vb[1] = 0;
+ vb[2] = i2f(src_x / 4);
+ vb[3] = 0;
+
+ vb[4] = i2f(dst_x / 4);
+ vb[5] = i2f(h);
+ vb[6] = i2f(src_x / 4);
+ vb[7] = i2f(h);
+
+ vb[8] = i2f((dst_x + cur_size) / 4);
+ vb[9] = i2f(h);
+ vb[10] = i2f((src_x + cur_size) / 4);
+ vb[11] = i2f(h);
+
+ /* src 10 */
+ set_tex_resource(rdev, FMT_8_8_8_8,
+ (src_x + cur_size) / 4,
+ h, (src_x + cur_size) / 4,
+ src_gpu_addr);
+ /* 5 */
+ cp_set_surface_sync(rdev,
+ PACKET3_TC_ACTION_ENA, (src_x + cur_size * h), src_gpu_addr);
+
+ /* dst 17 */
+ set_render_target(rdev, COLOR_8_8_8_8,
+ (dst_x + cur_size) / 4, h,
+ dst_gpu_addr);
+
+ /* scissors 12 */
+ set_scissors(rdev, (dst_x / 4), 0, (dst_x + cur_size / 4), h);
+
+ /* Vertex buffer setup 15 */
+ vb_gpu_addr = rdev->r600_blit.vb_ib->gpu_addr + rdev->r600_blit.vb_used;
+ set_vtx_resource(rdev, vb_gpu_addr);
+
+ /* draw 10 */
+ draw_auto(rdev);
+
+ /* 5 */
+ cp_set_surface_sync(rdev,
+ PACKET3_CB_ACTION_ENA | PACKET3_CB0_DEST_BASE_ENA,
+ cur_size * h, dst_gpu_addr);
+
+ /* 74 ring dwords per loop */
+ vb += 12;
+ rdev->r600_blit.vb_used += 12 * 4;
+
+ src_gpu_addr += cur_size * h;
+ dst_gpu_addr += cur_size * h;
+ size_bytes -= cur_size * h;
+ }
+ }
+}
+
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index a58b37a..23d3641 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -31,9 +31,6 @@
#include "evergreen_reg_safe.h"
#include "cayman_reg_safe.h"
-#define MAX(a,b) (((a)>(b))?(a):(b))
-#define MIN(a,b) (((a)<(b))?(a):(b))
-
static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p,
struct radeon_cs_reloc **cs_reloc);
@@ -41,72 +38,45 @@ struct evergreen_cs_track {
u32 group_size;
u32 nbanks;
u32 npipes;
- u32 row_size;
/* value we track */
- u32 nsamples; /* unused */
+ u32 nsamples;
+ u32 cb_color_base_last[12];
struct radeon_bo *cb_color_bo[12];
u32 cb_color_bo_offset[12];
- struct radeon_bo *cb_color_fmask_bo[8]; /* unused */
- struct radeon_bo *cb_color_cmask_bo[8]; /* unused */
+ struct radeon_bo *cb_color_fmask_bo[8];
+ struct radeon_bo *cb_color_cmask_bo[8];
u32 cb_color_info[12];
u32 cb_color_view[12];
+ u32 cb_color_pitch_idx[12];
+ u32 cb_color_slice_idx[12];
+ u32 cb_color_dim_idx[12];
+ u32 cb_color_dim[12];
u32 cb_color_pitch[12];
u32 cb_color_slice[12];
- u32 cb_color_attrib[12];
- u32 cb_color_cmask_slice[8];/* unused */
- u32 cb_color_fmask_slice[8];/* unused */
+ u32 cb_color_cmask_slice[8];
+ u32 cb_color_fmask_slice[8];
u32 cb_target_mask;
- u32 cb_shader_mask; /* unused */
+ u32 cb_shader_mask;
u32 vgt_strmout_config;
u32 vgt_strmout_buffer_config;
- struct radeon_bo *vgt_strmout_bo[4];
- u32 vgt_strmout_bo_offset[4];
- u32 vgt_strmout_size[4];
u32 db_depth_control;
u32 db_depth_view;
- u32 db_depth_slice;
u32 db_depth_size;
+ u32 db_depth_size_idx;
u32 db_z_info;
+ u32 db_z_idx;
u32 db_z_read_offset;
u32 db_z_write_offset;
struct radeon_bo *db_z_read_bo;
struct radeon_bo *db_z_write_bo;
u32 db_s_info;
+ u32 db_s_idx;
u32 db_s_read_offset;
u32 db_s_write_offset;
struct radeon_bo *db_s_read_bo;
struct radeon_bo *db_s_write_bo;
- bool sx_misc_kill_all_prims;
- bool cb_dirty;
- bool db_dirty;
- bool streamout_dirty;
};
-static u32 evergreen_cs_get_aray_mode(u32 tiling_flags)
-{
- if (tiling_flags & RADEON_TILING_MACRO)
- return ARRAY_2D_TILED_THIN1;
- else if (tiling_flags & RADEON_TILING_MICRO)
- return ARRAY_1D_TILED_THIN1;
- else
- return ARRAY_LINEAR_GENERAL;
-}
-
-static u32 evergreen_cs_get_num_banks(u32 nbanks)
-{
- switch (nbanks) {
- case 2:
- return ADDR_SURF_2_BANK;
- case 4:
- return ADDR_SURF_4_BANK;
- case 8:
- default:
- return ADDR_SURF_8_BANK;
- case 16:
- return ADDR_SURF_16_BANK;
- }
-}
-
static void evergreen_cs_track_init(struct evergreen_cs_track *track)
{
int i;
@@ -119,745 +89,56 @@ static void evergreen_cs_track_init(struct evergreen_cs_track *track)
}
for (i = 0; i < 12; i++) {
+ track->cb_color_base_last[i] = 0;
track->cb_color_bo[i] = NULL;
track->cb_color_bo_offset[i] = 0xFFFFFFFF;
track->cb_color_info[i] = 0;
- track->cb_color_view[i] = 0xFFFFFFFF;
+ track->cb_color_view[i] = 0;
+ track->cb_color_pitch_idx[i] = 0;
+ track->cb_color_slice_idx[i] = 0;
+ track->cb_color_dim[i] = 0;
track->cb_color_pitch[i] = 0;
track->cb_color_slice[i] = 0;
+ track->cb_color_dim[i] = 0;
}
track->cb_target_mask = 0xFFFFFFFF;
track->cb_shader_mask = 0xFFFFFFFF;
- track->cb_dirty = true;
track->db_depth_view = 0xFFFFC000;
track->db_depth_size = 0xFFFFFFFF;
+ track->db_depth_size_idx = 0;
track->db_depth_control = 0xFFFFFFFF;
track->db_z_info = 0xFFFFFFFF;
+ track->db_z_idx = 0xFFFFFFFF;
track->db_z_read_offset = 0xFFFFFFFF;
track->db_z_write_offset = 0xFFFFFFFF;
track->db_z_read_bo = NULL;
track->db_z_write_bo = NULL;
track->db_s_info = 0xFFFFFFFF;
+ track->db_s_idx = 0xFFFFFFFF;
track->db_s_read_offset = 0xFFFFFFFF;
track->db_s_write_offset = 0xFFFFFFFF;
track->db_s_read_bo = NULL;
track->db_s_write_bo = NULL;
- track->db_dirty = true;
-
- for (i = 0; i < 4; i++) {
- track->vgt_strmout_size[i] = 0;
- track->vgt_strmout_bo[i] = NULL;
- track->vgt_strmout_bo_offset[i] = 0xFFFFFFFF;
- }
- track->streamout_dirty = true;
- track->sx_misc_kill_all_prims = false;
-}
-
-struct eg_surface {
- /* value gathered from cs */
- unsigned nbx;
- unsigned nby;
- unsigned format;
- unsigned mode;
- unsigned nbanks;
- unsigned bankw;
- unsigned bankh;
- unsigned tsplit;
- unsigned mtilea;
- unsigned nsamples;
- /* output value */
- unsigned bpe;
- unsigned layer_size;
- unsigned palign;
- unsigned halign;
- unsigned long base_align;
-};
-
-static int evergreen_surface_check_linear(struct radeon_cs_parser *p,
- struct eg_surface *surf,
- const char *prefix)
-{
- surf->layer_size = surf->nbx * surf->nby * surf->bpe * surf->nsamples;
- surf->base_align = surf->bpe;
- surf->palign = 1;
- surf->halign = 1;
- return 0;
-}
-
-static int evergreen_surface_check_linear_aligned(struct radeon_cs_parser *p,
- struct eg_surface *surf,
- const char *prefix)
-{
- struct evergreen_cs_track *track = p->track;
- unsigned palign;
-
- palign = MAX(64, track->group_size / surf->bpe);
- surf->layer_size = surf->nbx * surf->nby * surf->bpe * surf->nsamples;
- surf->base_align = track->group_size;
- surf->palign = palign;
- surf->halign = 1;
- if (surf->nbx & (palign - 1)) {
- if (prefix) {
- dev_warn(p->dev, "%s:%d %s pitch %d invalid must be aligned with %d\n",
- __func__, __LINE__, prefix, surf->nbx, palign);
- }
- return -EINVAL;
- }
- return 0;
-}
-
-static int evergreen_surface_check_1d(struct radeon_cs_parser *p,
- struct eg_surface *surf,
- const char *prefix)
-{
- struct evergreen_cs_track *track = p->track;
- unsigned palign;
-
- palign = track->group_size / (8 * surf->bpe * surf->nsamples);
- palign = MAX(8, palign);
- surf->layer_size = surf->nbx * surf->nby * surf->bpe;
- surf->base_align = track->group_size;
- surf->palign = palign;
- surf->halign = 8;
- if ((surf->nbx & (palign - 1))) {
- if (prefix) {
- dev_warn(p->dev, "%s:%d %s pitch %d invalid must be aligned with %d (%d %d %d)\n",
- __func__, __LINE__, prefix, surf->nbx, palign,
- track->group_size, surf->bpe, surf->nsamples);
- }
- return -EINVAL;
- }
- if ((surf->nby & (8 - 1))) {
- if (prefix) {
- dev_warn(p->dev, "%s:%d %s height %d invalid must be aligned with 8\n",
- __func__, __LINE__, prefix, surf->nby);
- }
- return -EINVAL;
- }
- return 0;
-}
-
-static int evergreen_surface_check_2d(struct radeon_cs_parser *p,
- struct eg_surface *surf,
- const char *prefix)
-{
- struct evergreen_cs_track *track = p->track;
- unsigned palign, halign, tileb, slice_pt;
-
- tileb = 64 * surf->bpe * surf->nsamples;
- palign = track->group_size / (8 * surf->bpe * surf->nsamples);
- palign = MAX(8, palign);
- slice_pt = 1;
- if (tileb > surf->tsplit) {
- slice_pt = tileb / surf->tsplit;
- }
- tileb = tileb / slice_pt;
- /* macro tile width & height */
- palign = (8 * surf->bankw * track->npipes) * surf->mtilea;
- halign = (8 * surf->bankh * surf->nbanks) / surf->mtilea;
- surf->layer_size = surf->nbx * surf->nby * surf->bpe * slice_pt;
- surf->base_align = (palign / 8) * (halign / 8) * tileb;
- surf->palign = palign;
- surf->halign = halign;
-
- if ((surf->nbx & (palign - 1))) {
- if (prefix) {
- dev_warn(p->dev, "%s:%d %s pitch %d invalid must be aligned with %d\n",
- __func__, __LINE__, prefix, surf->nbx, palign);
- }
- return -EINVAL;
- }
- if ((surf->nby & (halign - 1))) {
- if (prefix) {
- dev_warn(p->dev, "%s:%d %s height %d invalid must be aligned with %d\n",
- __func__, __LINE__, prefix, surf->nby, halign);
- }
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int evergreen_surface_check(struct radeon_cs_parser *p,
- struct eg_surface *surf,
- const char *prefix)
-{
- /* some common value computed here */
- surf->bpe = r600_fmt_get_blocksize(surf->format);
-
- switch (surf->mode) {
- case ARRAY_LINEAR_GENERAL:
- return evergreen_surface_check_linear(p, surf, prefix);
- case ARRAY_LINEAR_ALIGNED:
- return evergreen_surface_check_linear_aligned(p, surf, prefix);
- case ARRAY_1D_TILED_THIN1:
- return evergreen_surface_check_1d(p, surf, prefix);
- case ARRAY_2D_TILED_THIN1:
- return evergreen_surface_check_2d(p, surf, prefix);
- default:
- dev_warn(p->dev, "%s:%d %s invalid array mode %d\n",
- __func__, __LINE__, prefix, surf->mode);
- return -EINVAL;
- }
- return -EINVAL;
-}
-
-static int evergreen_surface_value_conv_check(struct radeon_cs_parser *p,
- struct eg_surface *surf,
- const char *prefix)
-{
- switch (surf->mode) {
- case ARRAY_2D_TILED_THIN1:
- break;
- case ARRAY_LINEAR_GENERAL:
- case ARRAY_LINEAR_ALIGNED:
- case ARRAY_1D_TILED_THIN1:
- return 0;
- default:
- dev_warn(p->dev, "%s:%d %s invalid array mode %d\n",
- __func__, __LINE__, prefix, surf->mode);
- return -EINVAL;
- }
-
- switch (surf->nbanks) {
- case 0: surf->nbanks = 2; break;
- case 1: surf->nbanks = 4; break;
- case 2: surf->nbanks = 8; break;
- case 3: surf->nbanks = 16; break;
- default:
- dev_warn(p->dev, "%s:%d %s invalid number of banks %d\n",
- __func__, __LINE__, prefix, surf->nbanks);
- return -EINVAL;
- }
- switch (surf->bankw) {
- case 0: surf->bankw = 1; break;
- case 1: surf->bankw = 2; break;
- case 2: surf->bankw = 4; break;
- case 3: surf->bankw = 8; break;
- default:
- dev_warn(p->dev, "%s:%d %s invalid bankw %d\n",
- __func__, __LINE__, prefix, surf->bankw);
- return -EINVAL;
- }
- switch (surf->bankh) {
- case 0: surf->bankh = 1; break;
- case 1: surf->bankh = 2; break;
- case 2: surf->bankh = 4; break;
- case 3: surf->bankh = 8; break;
- default:
- dev_warn(p->dev, "%s:%d %s invalid bankh %d\n",
- __func__, __LINE__, prefix, surf->bankh);
- return -EINVAL;
- }
- switch (surf->mtilea) {
- case 0: surf->mtilea = 1; break;
- case 1: surf->mtilea = 2; break;
- case 2: surf->mtilea = 4; break;
- case 3: surf->mtilea = 8; break;
- default:
- dev_warn(p->dev, "%s:%d %s invalid macro tile aspect %d\n",
- __func__, __LINE__, prefix, surf->mtilea);
- return -EINVAL;
- }
- switch (surf->tsplit) {
- case 0: surf->tsplit = 64; break;
- case 1: surf->tsplit = 128; break;
- case 2: surf->tsplit = 256; break;
- case 3: surf->tsplit = 512; break;
- case 4: surf->tsplit = 1024; break;
- case 5: surf->tsplit = 2048; break;
- case 6: surf->tsplit = 4096; break;
- default:
- dev_warn(p->dev, "%s:%d %s invalid tile split %d\n",
- __func__, __LINE__, prefix, surf->tsplit);
- return -EINVAL;
- }
- return 0;
}
-static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned id)
+static inline int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
{
- struct evergreen_cs_track *track = p->track;
- struct eg_surface surf;
- unsigned pitch, slice, mslice;
- unsigned long offset;
- int r;
-
- mslice = G_028C6C_SLICE_MAX(track->cb_color_view[id]) + 1;
- pitch = track->cb_color_pitch[id];
- slice = track->cb_color_slice[id];
- surf.nbx = (pitch + 1) * 8;
- surf.nby = ((slice + 1) * 64) / surf.nbx;
- surf.mode = G_028C70_ARRAY_MODE(track->cb_color_info[id]);
- surf.format = G_028C70_FORMAT(track->cb_color_info[id]);
- surf.tsplit = G_028C74_TILE_SPLIT(track->cb_color_attrib[id]);
- surf.nbanks = G_028C74_NUM_BANKS(track->cb_color_attrib[id]);
- surf.bankw = G_028C74_BANK_WIDTH(track->cb_color_attrib[id]);
- surf.bankh = G_028C74_BANK_HEIGHT(track->cb_color_attrib[id]);
- surf.mtilea = G_028C74_MACRO_TILE_ASPECT(track->cb_color_attrib[id]);
- surf.nsamples = 1;
-
- if (!r600_fmt_is_valid_color(surf.format)) {
- dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08x)\n",
- __func__, __LINE__, surf.format,
- id, track->cb_color_info[id]);
- return -EINVAL;
- }
-
- r = evergreen_surface_value_conv_check(p, &surf, "cb");
- if (r) {
- return r;
- }
-
- r = evergreen_surface_check(p, &surf, "cb");
- if (r) {
- dev_warn(p->dev, "%s:%d cb[%d] invalid (0x%08x 0x%08x 0x%08x 0x%08x)\n",
- __func__, __LINE__, id, track->cb_color_pitch[id],
- track->cb_color_slice[id], track->cb_color_attrib[id],
- track->cb_color_info[id]);
- return r;
- }
-
- offset = track->cb_color_bo_offset[id] << 8;
- if (offset & (surf.base_align - 1)) {
- dev_warn(p->dev, "%s:%d cb[%d] bo base %ld not aligned with %ld\n",
- __func__, __LINE__, id, offset, surf.base_align);
- return -EINVAL;
- }
-
- offset += surf.layer_size * mslice;
- if (offset > radeon_bo_size(track->cb_color_bo[id])) {
- dev_warn(p->dev, "%s:%d cb[%d] bo too small (layer size %d, "
- "offset %d, max layer %d, bo size %ld, slice %d)\n",
- __func__, __LINE__, id, surf.layer_size,
- track->cb_color_bo_offset[id] << 8, mslice,
- radeon_bo_size(track->cb_color_bo[id]), slice);
- dev_warn(p->dev, "%s:%d problematic surf: (%d %d) (%d %d %d %d %d %d %d)\n",
- __func__, __LINE__, surf.nbx, surf.nby,
- surf.mode, surf.bpe, surf.nsamples,
- surf.bankw, surf.bankh,
- surf.tsplit, surf.mtilea);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int evergreen_cs_track_validate_stencil(struct radeon_cs_parser *p)
-{
- struct evergreen_cs_track *track = p->track;
- struct eg_surface surf;
- unsigned pitch, slice, mslice;
- unsigned long offset;
- int r;
-
- mslice = G_028008_SLICE_MAX(track->db_depth_view) + 1;
- pitch = G_028058_PITCH_TILE_MAX(track->db_depth_size);
- slice = track->db_depth_slice;
- surf.nbx = (pitch + 1) * 8;
- surf.nby = ((slice + 1) * 64) / surf.nbx;
- surf.mode = G_028040_ARRAY_MODE(track->db_z_info);
- surf.format = G_028044_FORMAT(track->db_s_info);
- surf.tsplit = G_028044_TILE_SPLIT(track->db_s_info);
- surf.nbanks = G_028040_NUM_BANKS(track->db_z_info);
- surf.bankw = G_028040_BANK_WIDTH(track->db_z_info);
- surf.bankh = G_028040_BANK_HEIGHT(track->db_z_info);
- surf.mtilea = G_028040_MACRO_TILE_ASPECT(track->db_z_info);
- surf.nsamples = 1;
-
- if (surf.format != 1) {
- dev_warn(p->dev, "%s:%d stencil invalid format %d\n",
- __func__, __LINE__, surf.format);
- return -EINVAL;
- }
- /* replace by color format so we can use same code */
- surf.format = V_028C70_COLOR_8;
-
- r = evergreen_surface_value_conv_check(p, &surf, "stencil");
- if (r) {
- return r;
- }
-
- r = evergreen_surface_check(p, &surf, NULL);
- if (r) {
- /* old userspace doesn't compute proper depth/stencil alignment
- * check that alignment against a bigger byte per elements and
- * only report if that alignment is wrong too.
- */
- surf.format = V_028C70_COLOR_8_8_8_8;
- r = evergreen_surface_check(p, &surf, "stencil");
- if (r) {
- dev_warn(p->dev, "%s:%d stencil invalid (0x%08x 0x%08x 0x%08x 0x%08x)\n",
- __func__, __LINE__, track->db_depth_size,
- track->db_depth_slice, track->db_s_info, track->db_z_info);
- }
- return r;
- }
-
- offset = track->db_s_read_offset << 8;
- if (offset & (surf.base_align - 1)) {
- dev_warn(p->dev, "%s:%d stencil read bo base %ld not aligned with %ld\n",
- __func__, __LINE__, offset, surf.base_align);
- return -EINVAL;
- }
- offset += surf.layer_size * mslice;
- if (offset > radeon_bo_size(track->db_s_read_bo)) {
- dev_warn(p->dev, "%s:%d stencil read bo too small (layer size %d, "
- "offset %ld, max layer %d, bo size %ld)\n",
- __func__, __LINE__, surf.layer_size,
- (unsigned long)track->db_s_read_offset << 8, mslice,
- radeon_bo_size(track->db_s_read_bo));
- dev_warn(p->dev, "%s:%d stencil invalid (0x%08x 0x%08x 0x%08x 0x%08x)\n",
- __func__, __LINE__, track->db_depth_size,
- track->db_depth_slice, track->db_s_info, track->db_z_info);
- return -EINVAL;
- }
-
- offset = track->db_s_write_offset << 8;
- if (offset & (surf.base_align - 1)) {
- dev_warn(p->dev, "%s:%d stencil write bo base %ld not aligned with %ld\n",
- __func__, __LINE__, offset, surf.base_align);
- return -EINVAL;
- }
- offset += surf.layer_size * mslice;
- if (offset > radeon_bo_size(track->db_s_write_bo)) {
- dev_warn(p->dev, "%s:%d stencil write bo too small (layer size %d, "
- "offset %ld, max layer %d, bo size %ld)\n",
- __func__, __LINE__, surf.layer_size,
- (unsigned long)track->db_s_write_offset << 8, mslice,
- radeon_bo_size(track->db_s_write_bo));
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int evergreen_cs_track_validate_depth(struct radeon_cs_parser *p)
-{
- struct evergreen_cs_track *track = p->track;
- struct eg_surface surf;
- unsigned pitch, slice, mslice;
- unsigned long offset;
- int r;
-
- mslice = G_028008_SLICE_MAX(track->db_depth_view) + 1;
- pitch = G_028058_PITCH_TILE_MAX(track->db_depth_size);
- slice = track->db_depth_slice;
- surf.nbx = (pitch + 1) * 8;
- surf.nby = ((slice + 1) * 64) / surf.nbx;
- surf.mode = G_028040_ARRAY_MODE(track->db_z_info);
- surf.format = G_028040_FORMAT(track->db_z_info);
- surf.tsplit = G_028040_TILE_SPLIT(track->db_z_info);
- surf.nbanks = G_028040_NUM_BANKS(track->db_z_info);
- surf.bankw = G_028040_BANK_WIDTH(track->db_z_info);
- surf.bankh = G_028040_BANK_HEIGHT(track->db_z_info);
- surf.mtilea = G_028040_MACRO_TILE_ASPECT(track->db_z_info);
- surf.nsamples = 1;
-
- switch (surf.format) {
- case V_028040_Z_16:
- surf.format = V_028C70_COLOR_16;
- break;
- case V_028040_Z_24:
- case V_028040_Z_32_FLOAT:
- surf.format = V_028C70_COLOR_8_8_8_8;
- break;
- default:
- dev_warn(p->dev, "%s:%d depth invalid format %d\n",
- __func__, __LINE__, surf.format);
- return -EINVAL;
- }
-
- r = evergreen_surface_value_conv_check(p, &surf, "depth");
- if (r) {
- dev_warn(p->dev, "%s:%d depth invalid (0x%08x 0x%08x 0x%08x)\n",
- __func__, __LINE__, track->db_depth_size,
- track->db_depth_slice, track->db_z_info);
- return r;
- }
-
- r = evergreen_surface_check(p, &surf, "depth");
- if (r) {
- dev_warn(p->dev, "%s:%d depth invalid (0x%08x 0x%08x 0x%08x)\n",
- __func__, __LINE__, track->db_depth_size,
- track->db_depth_slice, track->db_z_info);
- return r;
- }
-
- offset = track->db_z_read_offset << 8;
- if (offset & (surf.base_align - 1)) {
- dev_warn(p->dev, "%s:%d stencil read bo base %ld not aligned with %ld\n",
- __func__, __LINE__, offset, surf.base_align);
- return -EINVAL;
- }
- offset += surf.layer_size * mslice;
- if (offset > radeon_bo_size(track->db_z_read_bo)) {
- dev_warn(p->dev, "%s:%d depth read bo too small (layer size %d, "
- "offset %ld, max layer %d, bo size %ld)\n",
- __func__, __LINE__, surf.layer_size,
- (unsigned long)track->db_z_read_offset << 8, mslice,
- radeon_bo_size(track->db_z_read_bo));
- return -EINVAL;
- }
-
- offset = track->db_z_write_offset << 8;
- if (offset & (surf.base_align - 1)) {
- dev_warn(p->dev, "%s:%d stencil write bo base %ld not aligned with %ld\n",
- __func__, __LINE__, offset, surf.base_align);
- return -EINVAL;
- }
- offset += surf.layer_size * mslice;
- if (offset > radeon_bo_size(track->db_z_write_bo)) {
- dev_warn(p->dev, "%s:%d depth write bo too small (layer size %d, "
- "offset %ld, max layer %d, bo size %ld)\n",
- __func__, __LINE__, surf.layer_size,
- (unsigned long)track->db_z_write_offset << 8, mslice,
- radeon_bo_size(track->db_z_write_bo));
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int evergreen_cs_track_validate_texture(struct radeon_cs_parser *p,
- struct radeon_bo *texture,
- struct radeon_bo *mipmap,
- unsigned idx)
-{
- struct eg_surface surf;
- unsigned long toffset, moffset;
- unsigned dim, llevel, mslice, width, height, depth, i;
- u32 texdw[8];
- int r;
-
- texdw[0] = radeon_get_ib_value(p, idx + 0);
- texdw[1] = radeon_get_ib_value(p, idx + 1);
- texdw[2] = radeon_get_ib_value(p, idx + 2);
- texdw[3] = radeon_get_ib_value(p, idx + 3);
- texdw[4] = radeon_get_ib_value(p, idx + 4);
- texdw[5] = radeon_get_ib_value(p, idx + 5);
- texdw[6] = radeon_get_ib_value(p, idx + 6);
- texdw[7] = radeon_get_ib_value(p, idx + 7);
- dim = G_030000_DIM(texdw[0]);
- llevel = G_030014_LAST_LEVEL(texdw[5]);
- mslice = G_030014_LAST_ARRAY(texdw[5]) + 1;
- width = G_030000_TEX_WIDTH(texdw[0]) + 1;
- height = G_030004_TEX_HEIGHT(texdw[1]) + 1;
- depth = G_030004_TEX_DEPTH(texdw[1]) + 1;
- surf.format = G_03001C_DATA_FORMAT(texdw[7]);
- surf.nbx = (G_030000_PITCH(texdw[0]) + 1) * 8;
- surf.nbx = r600_fmt_get_nblocksx(surf.format, surf.nbx);
- surf.nby = r600_fmt_get_nblocksy(surf.format, height);
- surf.mode = G_030004_ARRAY_MODE(texdw[1]);
- surf.tsplit = G_030018_TILE_SPLIT(texdw[6]);
- surf.nbanks = G_03001C_NUM_BANKS(texdw[7]);
- surf.bankw = G_03001C_BANK_WIDTH(texdw[7]);
- surf.bankh = G_03001C_BANK_HEIGHT(texdw[7]);
- surf.mtilea = G_03001C_MACRO_TILE_ASPECT(texdw[7]);
- surf.nsamples = 1;
- toffset = texdw[2] << 8;
- moffset = texdw[3] << 8;
-
- if (!r600_fmt_is_valid_texture(surf.format, p->family)) {
- dev_warn(p->dev, "%s:%d texture invalid format %d\n",
- __func__, __LINE__, surf.format);
- return -EINVAL;
- }
- switch (dim) {
- case V_030000_SQ_TEX_DIM_1D:
- case V_030000_SQ_TEX_DIM_2D:
- case V_030000_SQ_TEX_DIM_CUBEMAP:
- case V_030000_SQ_TEX_DIM_1D_ARRAY:
- case V_030000_SQ_TEX_DIM_2D_ARRAY:
- depth = 1;
- case V_030000_SQ_TEX_DIM_3D:
- break;
- default:
- dev_warn(p->dev, "%s:%d texture invalid dimension %d\n",
- __func__, __LINE__, dim);
- return -EINVAL;
- }
-
- r = evergreen_surface_value_conv_check(p, &surf, "texture");
- if (r) {
- return r;
- }
-
- /* align height */
- evergreen_surface_check(p, &surf, NULL);
- surf.nby = ALIGN(surf.nby, surf.halign);
-
- r = evergreen_surface_check(p, &surf, "texture");
- if (r) {
- dev_warn(p->dev, "%s:%d texture invalid 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
- __func__, __LINE__, texdw[0], texdw[1], texdw[4],
- texdw[5], texdw[6], texdw[7]);
- return r;
- }
-
- /* check texture size */
- if (toffset & (surf.base_align - 1)) {
- dev_warn(p->dev, "%s:%d texture bo base %ld not aligned with %ld\n",
- __func__, __LINE__, toffset, surf.base_align);
- return -EINVAL;
- }
- if (moffset & (surf.base_align - 1)) {
- dev_warn(p->dev, "%s:%d mipmap bo base %ld not aligned with %ld\n",
- __func__, __LINE__, moffset, surf.base_align);
- return -EINVAL;
- }
- if (dim == SQ_TEX_DIM_3D) {
- toffset += surf.layer_size * depth;
- } else {
- toffset += surf.layer_size * mslice;
- }
- if (toffset > radeon_bo_size(texture)) {
- dev_warn(p->dev, "%s:%d texture bo too small (layer size %d, "
- "offset %ld, max layer %d, depth %d, bo size %ld) (%d %d)\n",
- __func__, __LINE__, surf.layer_size,
- (unsigned long)texdw[2] << 8, mslice,
- depth, radeon_bo_size(texture),
- surf.nbx, surf.nby);
- return -EINVAL;
- }
-
- /* check mipmap size */
- for (i = 1; i <= llevel; i++) {
- unsigned w, h, d;
-
- w = r600_mip_minify(width, i);
- h = r600_mip_minify(height, i);
- d = r600_mip_minify(depth, i);
- surf.nbx = r600_fmt_get_nblocksx(surf.format, w);
- surf.nby = r600_fmt_get_nblocksy(surf.format, h);
-
- switch (surf.mode) {
- case ARRAY_2D_TILED_THIN1:
- if (surf.nbx < surf.palign || surf.nby < surf.halign) {
- surf.mode = ARRAY_1D_TILED_THIN1;
- }
- /* recompute alignment */
- evergreen_surface_check(p, &surf, NULL);
- break;
- case ARRAY_LINEAR_GENERAL:
- case ARRAY_LINEAR_ALIGNED:
- case ARRAY_1D_TILED_THIN1:
- break;
- default:
- dev_warn(p->dev, "%s:%d invalid array mode %d\n",
- __func__, __LINE__, surf.mode);
- return -EINVAL;
- }
- surf.nbx = ALIGN(surf.nbx, surf.palign);
- surf.nby = ALIGN(surf.nby, surf.halign);
-
- r = evergreen_surface_check(p, &surf, "mipmap");
- if (r) {
- return r;
- }
-
- if (dim == SQ_TEX_DIM_3D) {
- moffset += surf.layer_size * d;
- } else {
- moffset += surf.layer_size * mslice;
- }
- if (moffset > radeon_bo_size(mipmap)) {
- dev_warn(p->dev, "%s:%d mipmap [%d] bo too small (layer size %d, "
- "offset %ld, coffset %ld, max layer %d, depth %d, "
- "bo size %ld) level0 (%d %d %d)\n",
- __func__, __LINE__, i, surf.layer_size,
- (unsigned long)texdw[3] << 8, moffset, mslice,
- d, radeon_bo_size(mipmap),
- width, height, depth);
- dev_warn(p->dev, "%s:%d problematic surf: (%d %d) (%d %d %d %d %d %d %d)\n",
- __func__, __LINE__, surf.nbx, surf.nby,
- surf.mode, surf.bpe, surf.nsamples,
- surf.bankw, surf.bankh,
- surf.tsplit, surf.mtilea);
- return -EINVAL;
- }
- }
-
+ /* XXX fill in */
return 0;
}
static int evergreen_cs_track_check(struct radeon_cs_parser *p)
{
struct evergreen_cs_track *track = p->track;
- unsigned tmp, i;
- int r;
- unsigned buffer_mask = 0;
- /* check streamout */
- if (track->streamout_dirty && track->vgt_strmout_config) {
- for (i = 0; i < 4; i++) {
- if (track->vgt_strmout_config & (1 << i)) {
- buffer_mask |= (track->vgt_strmout_buffer_config >> (i * 4)) & 0xf;
- }
- }
-
- for (i = 0; i < 4; i++) {
- if (buffer_mask & (1 << i)) {
- if (track->vgt_strmout_bo[i]) {
- u64 offset = (u64)track->vgt_strmout_bo_offset[i] +
- (u64)track->vgt_strmout_size[i];
- if (offset > radeon_bo_size(track->vgt_strmout_bo[i])) {
- DRM_ERROR("streamout %d bo too small: 0x%llx, 0x%lx\n",
- i, offset,
- radeon_bo_size(track->vgt_strmout_bo[i]));
- return -EINVAL;
- }
- } else {
- dev_warn(p->dev, "No buffer for streamout %d\n", i);
- return -EINVAL;
- }
- }
- }
- track->streamout_dirty = false;
- }
-
- if (track->sx_misc_kill_all_prims)
- return 0;
-
- /* check that we have a cb for each enabled target
- */
- if (track->cb_dirty) {
- tmp = track->cb_target_mask;
- for (i = 0; i < 8; i++) {
- if ((tmp >> (i * 4)) & 0xF) {
- /* at least one component is enabled */
- if (track->cb_color_bo[i] == NULL) {
- dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
- __func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i);
- return -EINVAL;
- }
- /* check cb */
- r = evergreen_cs_track_validate_cb(p, i);
- if (r) {
- return r;
- }
- }
- }
- track->cb_dirty = false;
- }
-
- if (track->db_dirty) {
- /* Check stencil buffer */
- if (G_028800_STENCIL_ENABLE(track->db_depth_control)) {
- r = evergreen_cs_track_validate_stencil(p);
- if (r)
- return r;
- }
- /* Check depth buffer */
- if (G_028800_Z_WRITE_ENABLE(track->db_depth_control)) {
- r = evergreen_cs_track_validate_depth(p);
- if (r)
- return r;
- }
- track->db_dirty = false;
+ /* we don't support stream out buffer yet */
+ if (track->vgt_strmout_config || track->vgt_strmout_buffer_config) {
+ dev_warn(p->dev, "this kernel doesn't support SMX output buffer\n");
+ return -EINVAL;
}
+ /* XXX fill in */
return 0;
}
@@ -955,6 +236,28 @@ static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p,
}
/**
+ * evergreen_cs_packet_next_is_pkt3_nop() - test if next packet is packet3 nop for reloc
+ * @parser: parser structure holding parsing context.
+ *
+ * Check next packet is relocation packet3, do bo validation and compute
+ * GPU offset using the provided start.
+ **/
+static inline int evergreen_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
+{
+ struct radeon_cs_packet p3reloc;
+ int r;
+
+ r = evergreen_cs_packet_parse(p, &p3reloc, p->idx);
+ if (r) {
+ return 0;
+ }
+ if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
+ return 0;
+ }
+ return 1;
+}
+
+/**
* evergreen_cs_packet_next_vline() - parse userspace VLINE packet
* @parser: parser structure holding parsing context.
*
@@ -1111,7 +414,7 @@ static int evergreen_cs_parse_packet0(struct radeon_cs_parser *p,
* if register is safe. If register is not flag as safe this function
* will test it against a list of register needind special handling.
*/
-static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
+static inline int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
{
struct evergreen_cs_track *track = (struct evergreen_cs_track *)p->track;
struct radeon_cs_reloc *reloc;
@@ -1125,7 +428,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
last_reg = ARRAY_SIZE(evergreen_reg_safe_bm);
i = (reg >> 7);
- if (i >= last_reg) {
+ if (i > last_reg) {
dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
return -EINVAL;
}
@@ -1189,7 +492,6 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
break;
case DB_DEPTH_CONTROL:
track->db_depth_control = radeon_get_ib_value(p, idx);
- track->db_dirty = true;
break;
case CAYMAN_DB_EQAA:
if (p->rdev->family < CHIP_CAYMAN) {
@@ -1206,48 +508,32 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
}
break;
case DB_Z_INFO:
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
track->db_z_info = radeon_get_ib_value(p, idx);
- if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
- r = evergreen_cs_packet_next_reloc(p, &reloc);
- if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
- return -EINVAL;
- }
- ib[idx] &= ~Z_ARRAY_MODE(0xf);
- track->db_z_info &= ~Z_ARRAY_MODE(0xf);
- ib[idx] |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
- track->db_z_info |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
- unsigned bankw, bankh, mtaspect, tile_split;
-
- evergreen_tiling_fields(reloc->lobj.tiling_flags,
- &bankw, &bankh, &mtaspect,
- &tile_split);
- ib[idx] |= DB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
- ib[idx] |= DB_TILE_SPLIT(tile_split) |
- DB_BANK_WIDTH(bankw) |
- DB_BANK_HEIGHT(bankh) |
- DB_MACRO_TILE_ASPECT(mtaspect);
- }
+ ib[idx] &= ~Z_ARRAY_MODE(0xf);
+ track->db_z_info &= ~Z_ARRAY_MODE(0xf);
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+ ib[idx] |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
+ track->db_z_info |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
+ } else {
+ ib[idx] |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+ track->db_z_info |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
}
- track->db_dirty = true;
break;
case DB_STENCIL_INFO:
track->db_s_info = radeon_get_ib_value(p, idx);
- track->db_dirty = true;
break;
case DB_DEPTH_VIEW:
track->db_depth_view = radeon_get_ib_value(p, idx);
- track->db_dirty = true;
break;
case DB_DEPTH_SIZE:
track->db_depth_size = radeon_get_ib_value(p, idx);
- track->db_dirty = true;
- break;
- case R_02805C_DB_DEPTH_SLICE:
- track->db_depth_slice = radeon_get_ib_value(p, idx);
- track->db_dirty = true;
+ track->db_depth_size_idx = idx;
break;
case DB_Z_READ_BASE:
r = evergreen_cs_packet_next_reloc(p, &reloc);
@@ -1259,7 +545,6 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->db_z_read_offset = radeon_get_ib_value(p, idx);
ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
track->db_z_read_bo = reloc->robj;
- track->db_dirty = true;
break;
case DB_Z_WRITE_BASE:
r = evergreen_cs_packet_next_reloc(p, &reloc);
@@ -1271,7 +556,6 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->db_z_write_offset = radeon_get_ib_value(p, idx);
ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
track->db_z_write_bo = reloc->robj;
- track->db_dirty = true;
break;
case DB_STENCIL_READ_BASE:
r = evergreen_cs_packet_next_reloc(p, &reloc);
@@ -1283,7 +567,6 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->db_s_read_offset = radeon_get_ib_value(p, idx);
ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
track->db_s_read_bo = reloc->robj;
- track->db_dirty = true;
break;
case DB_STENCIL_WRITE_BASE:
r = evergreen_cs_packet_next_reloc(p, &reloc);
@@ -1295,56 +578,18 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->db_s_write_offset = radeon_get_ib_value(p, idx);
ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
track->db_s_write_bo = reloc->robj;
- track->db_dirty = true;
break;
case VGT_STRMOUT_CONFIG:
track->vgt_strmout_config = radeon_get_ib_value(p, idx);
- track->streamout_dirty = true;
break;
case VGT_STRMOUT_BUFFER_CONFIG:
track->vgt_strmout_buffer_config = radeon_get_ib_value(p, idx);
- track->streamout_dirty = true;
break;
- case VGT_STRMOUT_BUFFER_BASE_0:
- case VGT_STRMOUT_BUFFER_BASE_1:
- case VGT_STRMOUT_BUFFER_BASE_2:
- case VGT_STRMOUT_BUFFER_BASE_3:
- r = evergreen_cs_packet_next_reloc(p, &reloc);
- if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
- return -EINVAL;
- }
- tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16;
- track->vgt_strmout_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
- ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
- track->vgt_strmout_bo[tmp] = reloc->robj;
- track->streamout_dirty = true;
- break;
- case VGT_STRMOUT_BUFFER_SIZE_0:
- case VGT_STRMOUT_BUFFER_SIZE_1:
- case VGT_STRMOUT_BUFFER_SIZE_2:
- case VGT_STRMOUT_BUFFER_SIZE_3:
- tmp = (reg - VGT_STRMOUT_BUFFER_SIZE_0) / 16;
- /* size in register is DWs, convert to bytes */
- track->vgt_strmout_size[tmp] = radeon_get_ib_value(p, idx) * 4;
- track->streamout_dirty = true;
- break;
- case CP_COHER_BASE:
- r = evergreen_cs_packet_next_reloc(p, &reloc);
- if (r) {
- dev_warn(p->dev, "missing reloc for CP_COHER_BASE "
- "0x%04X\n", reg);
- return -EINVAL;
- }
- ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
case CB_TARGET_MASK:
track->cb_target_mask = radeon_get_ib_value(p, idx);
- track->cb_dirty = true;
break;
case CB_SHADER_MASK:
track->cb_shader_mask = radeon_get_ib_value(p, idx);
- track->cb_dirty = true;
break;
case PA_SC_AA_CONFIG:
if (p->rdev->family >= CHIP_CAYMAN) {
@@ -1374,7 +619,6 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR7_VIEW:
tmp = (reg - CB_COLOR0_VIEW) / 0x3c;
track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
- track->cb_dirty = true;
break;
case CB_COLOR8_VIEW:
case CB_COLOR9_VIEW:
@@ -1382,7 +626,6 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR11_VIEW:
tmp = ((reg - CB_COLOR8_VIEW) / 0x1c) + 8;
track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
- track->cb_dirty = true;
break;
case CB_COLOR0_INFO:
case CB_COLOR1_INFO:
@@ -1392,37 +635,41 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR5_INFO:
case CB_COLOR6_INFO:
case CB_COLOR7_INFO:
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
tmp = (reg - CB_COLOR0_INFO) / 0x3c;
track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
- if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
- r = evergreen_cs_packet_next_reloc(p, &reloc);
- if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
- return -EINVAL;
- }
- ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
- track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+ ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
+ track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
+ } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
+ ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+ track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
}
- track->cb_dirty = true;
break;
case CB_COLOR8_INFO:
case CB_COLOR9_INFO:
case CB_COLOR10_INFO:
case CB_COLOR11_INFO:
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
tmp = ((reg - CB_COLOR8_INFO) / 0x1c) + 8;
track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
- if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
- r = evergreen_cs_packet_next_reloc(p, &reloc);
- if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
- return -EINVAL;
- }
- ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
- track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+ ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
+ track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
+ } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
+ ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+ track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
}
- track->cb_dirty = true;
break;
case CB_COLOR0_PITCH:
case CB_COLOR1_PITCH:
@@ -1434,7 +681,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR7_PITCH:
tmp = (reg - CB_COLOR0_PITCH) / 0x3c;
track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx);
- track->cb_dirty = true;
+ track->cb_color_pitch_idx[tmp] = idx;
break;
case CB_COLOR8_PITCH:
case CB_COLOR9_PITCH:
@@ -1442,7 +689,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR11_PITCH:
tmp = ((reg - CB_COLOR8_PITCH) / 0x1c) + 8;
track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx);
- track->cb_dirty = true;
+ track->cb_color_pitch_idx[tmp] = idx;
break;
case CB_COLOR0_SLICE:
case CB_COLOR1_SLICE:
@@ -1454,7 +701,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR7_SLICE:
tmp = (reg - CB_COLOR0_SLICE) / 0x3c;
track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
- track->cb_dirty = true;
+ track->cb_color_slice_idx[tmp] = idx;
break;
case CB_COLOR8_SLICE:
case CB_COLOR9_SLICE:
@@ -1462,7 +709,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR11_SLICE:
tmp = ((reg - CB_COLOR8_SLICE) / 0x1c) + 8;
track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
- track->cb_dirty = true;
+ track->cb_color_slice_idx[tmp] = idx;
break;
case CB_COLOR0_ATTRIB:
case CB_COLOR1_ATTRIB:
@@ -1472,57 +719,30 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR5_ATTRIB:
case CB_COLOR6_ATTRIB:
case CB_COLOR7_ATTRIB:
- r = evergreen_cs_packet_next_reloc(p, &reloc);
- if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
- return -EINVAL;
- }
- if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
- unsigned bankw, bankh, mtaspect, tile_split;
-
- evergreen_tiling_fields(reloc->lobj.tiling_flags,
- &bankw, &bankh, &mtaspect,
- &tile_split);
- ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
- ib[idx] |= CB_TILE_SPLIT(tile_split) |
- CB_BANK_WIDTH(bankw) |
- CB_BANK_HEIGHT(bankh) |
- CB_MACRO_TILE_ASPECT(mtaspect);
- }
- }
- tmp = ((reg - CB_COLOR0_ATTRIB) / 0x3c);
- track->cb_color_attrib[tmp] = ib[idx];
- track->cb_dirty = true;
- break;
case CB_COLOR8_ATTRIB:
case CB_COLOR9_ATTRIB:
case CB_COLOR10_ATTRIB:
case CB_COLOR11_ATTRIB:
- r = evergreen_cs_packet_next_reloc(p, &reloc);
- if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
- return -EINVAL;
- }
- if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
- unsigned bankw, bankh, mtaspect, tile_split;
-
- evergreen_tiling_fields(reloc->lobj.tiling_flags,
- &bankw, &bankh, &mtaspect,
- &tile_split);
- ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
- ib[idx] |= CB_TILE_SPLIT(tile_split) |
- CB_BANK_WIDTH(bankw) |
- CB_BANK_HEIGHT(bankh) |
- CB_MACRO_TILE_ASPECT(mtaspect);
- }
- }
- tmp = ((reg - CB_COLOR8_ATTRIB) / 0x1c) + 8;
- track->cb_color_attrib[tmp] = ib[idx];
- track->cb_dirty = true;
+ break;
+ case CB_COLOR0_DIM:
+ case CB_COLOR1_DIM:
+ case CB_COLOR2_DIM:
+ case CB_COLOR3_DIM:
+ case CB_COLOR4_DIM:
+ case CB_COLOR5_DIM:
+ case CB_COLOR6_DIM:
+ case CB_COLOR7_DIM:
+ tmp = (reg - CB_COLOR0_DIM) / 0x3c;
+ track->cb_color_dim[tmp] = radeon_get_ib_value(p, idx);
+ track->cb_color_dim_idx[tmp] = idx;
+ break;
+ case CB_COLOR8_DIM:
+ case CB_COLOR9_DIM:
+ case CB_COLOR10_DIM:
+ case CB_COLOR11_DIM:
+ tmp = ((reg - CB_COLOR8_DIM) / 0x1c) + 8;
+ track->cb_color_dim[tmp] = radeon_get_ib_value(p, idx);
+ track->cb_color_dim_idx[tmp] = idx;
break;
case CB_COLOR0_FMASK:
case CB_COLOR1_FMASK:
@@ -1597,8 +817,8 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
tmp = (reg - CB_COLOR0_BASE) / 0x3c;
track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ track->cb_color_base_last[tmp] = ib[idx];
track->cb_color_bo[tmp] = reloc->robj;
- track->cb_dirty = true;
break;
case CB_COLOR8_BASE:
case CB_COLOR9_BASE:
@@ -1613,8 +833,8 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
tmp = ((reg - CB_COLOR8_BASE) / 0x1c) + 8;
track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ track->cb_color_base_last[tmp] = ib[idx];
track->cb_color_bo[tmp] = reloc->robj;
- track->cb_dirty = true;
break;
case CB_IMMED0_BASE:
case CB_IMMED1_BASE:
@@ -1636,6 +856,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case SQ_PGM_START_PS:
case SQ_PGM_START_HS:
case SQ_PGM_START_LS:
+ case GDS_ADDR_BASE:
case SQ_CONST_MEM_BASE:
case SQ_ALU_CONST_CACHE_GS_0:
case SQ_ALU_CONST_CACHE_GS_1:
@@ -1725,37 +946,6 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
}
ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
break;
- case SX_MEMORY_EXPORT_BASE:
- if (p->rdev->family >= CHIP_CAYMAN) {
- dev_warn(p->dev, "bad SET_CONFIG_REG "
- "0x%04X\n", reg);
- return -EINVAL;
- }
- r = evergreen_cs_packet_next_reloc(p, &reloc);
- if (r) {
- dev_warn(p->dev, "bad SET_CONFIG_REG "
- "0x%04X\n", reg);
- return -EINVAL;
- }
- ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
- break;
- case CAYMAN_SX_SCATTER_EXPORT_BASE:
- if (p->rdev->family < CHIP_CAYMAN) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
- return -EINVAL;
- }
- r = evergreen_cs_packet_next_reloc(p, &reloc);
- if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
- return -EINVAL;
- }
- ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
- break;
- case SX_MISC:
- track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0;
- break;
default:
dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
return -EINVAL;
@@ -1763,30 +953,22 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
return 0;
}
-static bool evergreen_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
+/**
+ * evergreen_check_texture_resource() - check if register is authorized or not
+ * @p: parser structure holding parsing context
+ * @idx: index into the cs buffer
+ * @texture: texture's bo structure
+ * @mipmap: mipmap's bo structure
+ *
+ * This function will check that the resource has valid field and that
+ * the texture and mipmap bo object are big enough to cover this resource.
+ */
+static inline int evergreen_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
+ struct radeon_bo *texture,
+ struct radeon_bo *mipmap)
{
- u32 last_reg, m, i;
-
- if (p->rdev->family >= CHIP_CAYMAN)
- last_reg = ARRAY_SIZE(cayman_reg_safe_bm);
- else
- last_reg = ARRAY_SIZE(evergreen_reg_safe_bm);
-
- i = (reg >> 7);
- if (i >= last_reg) {
- dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
- return false;
- }
- m = 1 << ((reg >> 2) & 31);
- if (p->rdev->family >= CHIP_CAYMAN) {
- if (!(cayman_reg_safe_bm[i] & m))
- return true;
- } else {
- if (!(evergreen_reg_safe_bm[i] & m))
- return true;
- }
- dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
- return false;
+ /* XXX fill in */
+ return 0;
}
static int evergreen_packet3_check(struct radeon_cs_parser *p,
@@ -1811,8 +993,6 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
{
int pred_op;
int tmp;
- uint64_t offset;
-
if (pkt->count != 1) {
DRM_ERROR("bad SET PREDICATION\n");
return -EINVAL;
@@ -1836,12 +1016,8 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
- offset = reloc->lobj.gpu_offset +
- (idx_value & 0xfffffff0) +
- ((u64)(tmp & 0xff) << 32);
-
- ib[idx + 0] = offset;
- ib[idx + 1] = (tmp & 0xffffff00) | (upper_32_bits(offset) & 0xff);
+ ib[idx + 0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx + 1] = tmp + (upper_32_bits(reloc->lobj.gpu_offset) & 0xff);
}
break;
case PACKET3_CONTEXT_CONTROL:
@@ -1869,9 +1045,6 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
}
break;
case PACKET3_INDEX_BASE:
- {
- uint64_t offset;
-
if (pkt->count != 1) {
DRM_ERROR("bad INDEX_BASE\n");
return -EINVAL;
@@ -1881,24 +1054,15 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad INDEX_BASE\n");
return -EINVAL;
}
-
- offset = reloc->lobj.gpu_offset +
- idx_value +
- ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
-
- ib[idx+0] = offset;
- ib[idx+1] = upper_32_bits(offset) & 0xff;
-
+ ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx+1] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
r = evergreen_cs_track_check(p);
if (r) {
dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
return r;
}
break;
- }
case PACKET3_DRAW_INDEX:
- {
- uint64_t offset;
if (pkt->count != 3) {
DRM_ERROR("bad DRAW_INDEX\n");
return -EINVAL;
@@ -1908,25 +1072,15 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad DRAW_INDEX\n");
return -EINVAL;
}
-
- offset = reloc->lobj.gpu_offset +
- idx_value +
- ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
-
- ib[idx+0] = offset;
- ib[idx+1] = upper_32_bits(offset) & 0xff;
-
+ ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx+1] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
r = evergreen_cs_track_check(p);
if (r) {
dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
return r;
}
break;
- }
case PACKET3_DRAW_INDEX_2:
- {
- uint64_t offset;
-
if (pkt->count != 4) {
DRM_ERROR("bad DRAW_INDEX_2\n");
return -EINVAL;
@@ -1936,21 +1090,14 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad DRAW_INDEX_2\n");
return -EINVAL;
}
-
- offset = reloc->lobj.gpu_offset +
- radeon_get_ib_value(p, idx+1) +
- ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
-
- ib[idx+1] = offset;
- ib[idx+2] = upper_32_bits(offset) & 0xff;
-
+ ib[idx+1] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
r = evergreen_cs_track_check(p);
if (r) {
dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
return r;
}
break;
- }
case PACKET3_DRAW_INDEX_AUTO:
if (pkt->count != 1) {
DRM_ERROR("bad DRAW_INDEX_AUTO\n");
@@ -2006,34 +1153,6 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
return r;
}
break;
- case PACKET3_DISPATCH_DIRECT:
- if (pkt->count != 3) {
- DRM_ERROR("bad DISPATCH_DIRECT\n");
- return -EINVAL;
- }
- r = evergreen_cs_track_check(p);
- if (r) {
- dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
- return r;
- }
- break;
- case PACKET3_DISPATCH_INDIRECT:
- if (pkt->count != 1) {
- DRM_ERROR("bad DISPATCH_INDIRECT\n");
- return -EINVAL;
- }
- r = evergreen_cs_packet_next_reloc(p, &reloc);
- if (r) {
- DRM_ERROR("bad DISPATCH_INDIRECT\n");
- return -EINVAL;
- }
- ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
- r = evergreen_cs_track_check(p);
- if (r) {
- dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
- return r;
- }
- break;
case PACKET3_WAIT_REG_MEM:
if (pkt->count != 5) {
DRM_ERROR("bad WAIT_REG_MEM\n");
@@ -2041,20 +1160,13 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
}
/* bit 4 is reg (0) or mem (1) */
if (idx_value & 0x10) {
- uint64_t offset;
-
r = evergreen_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("bad WAIT_REG_MEM\n");
return -EINVAL;
}
-
- offset = reloc->lobj.gpu_offset +
- (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
- ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
-
- ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffffc);
- ib[idx+2] = upper_32_bits(offset) & 0xff;
+ ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
}
break;
case PACKET3_SURFACE_SYNC:
@@ -2079,25 +1191,16 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
if (pkt->count) {
- uint64_t offset;
-
r = evergreen_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("bad EVENT_WRITE\n");
return -EINVAL;
}
- offset = reloc->lobj.gpu_offset +
- (radeon_get_ib_value(p, idx+1) & 0xfffffff8) +
- ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
-
- ib[idx+1] = offset & 0xfffffff8;
- ib[idx+2] = upper_32_bits(offset) & 0xff;
+ ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
}
break;
case PACKET3_EVENT_WRITE_EOP:
- {
- uint64_t offset;
-
if (pkt->count != 4) {
DRM_ERROR("bad EVENT_WRITE_EOP\n");
return -EINVAL;
@@ -2107,19 +1210,10 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad EVENT_WRITE_EOP\n");
return -EINVAL;
}
-
- offset = reloc->lobj.gpu_offset +
- (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
- ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
-
- ib[idx+1] = offset & 0xfffffffc;
- ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
+ ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
break;
- }
case PACKET3_EVENT_WRITE_EOS:
- {
- uint64_t offset;
-
if (pkt->count != 3) {
DRM_ERROR("bad EVENT_WRITE_EOS\n");
return -EINVAL;
@@ -2129,15 +1223,9 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad EVENT_WRITE_EOS\n");
return -EINVAL;
}
-
- offset = reloc->lobj.gpu_offset +
- (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
- ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
-
- ib[idx+1] = offset & 0xfffffffc;
- ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
+ ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
break;
- }
case PACKET3_SET_CONFIG_REG:
start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START;
end_reg = 4 * pkt->count + start_reg - 4;
@@ -2185,7 +1273,6 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
}
for (i = 0; i < (pkt->count / 8); i++) {
struct radeon_bo *texture, *mipmap;
- u32 toffset, moffset;
u32 size, offset;
switch (G__SQ_CONSTANT_TYPE(radeon_get_ib_value(p, idx+1+(i*8)+7))) {
@@ -2196,42 +1283,26 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad SET_RESOURCE (tex)\n");
return -EINVAL;
}
- if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
- ib[idx+1+(i*8)+1] |=
- TEX_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
- unsigned bankw, bankh, mtaspect, tile_split;
-
- evergreen_tiling_fields(reloc->lobj.tiling_flags,
- &bankw, &bankh, &mtaspect,
- &tile_split);
- ib[idx+1+(i*8)+6] |= TEX_TILE_SPLIT(tile_split);
- ib[idx+1+(i*8)+7] |=
- TEX_BANK_WIDTH(bankw) |
- TEX_BANK_HEIGHT(bankh) |
- MACRO_TILE_ASPECT(mtaspect) |
- TEX_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
- }
- }
+ ib[idx+1+(i*8)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+ ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
+ else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+ ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
texture = reloc->robj;
- toffset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
/* tex mip base */
r = evergreen_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("bad SET_RESOURCE (tex)\n");
return -EINVAL;
}
- moffset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ ib[idx+1+(i*8)+3] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
mipmap = reloc->robj;
- r = evergreen_cs_track_validate_texture(p, texture, mipmap, idx+1+(i*8));
+ r = evergreen_check_texture_resource(p, idx+1+(i*8),
+ texture, mipmap);
if (r)
return r;
- ib[idx+1+(i*8)+2] += toffset;
- ib[idx+1+(i*8)+3] += moffset;
break;
case SQ_TEX_VTX_VALID_BUFFER:
- {
- uint64_t offset64;
/* vtx base */
r = evergreen_cs_packet_next_reloc(p, &reloc);
if (r) {
@@ -2243,15 +1314,11 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) {
/* force size to size of the buffer */
dev_warn(p->dev, "vbo resource seems too big for the bo\n");
- ib[idx+1+(i*8)+1] = radeon_bo_size(reloc->robj) - offset;
+ ib[idx+1+(i*8)+1] = radeon_bo_size(reloc->robj);
}
-
- offset64 = reloc->lobj.gpu_offset + offset;
- ib[idx+1+(i*8)+0] = offset64;
- ib[idx+1+(i*8)+2] = (ib[idx+1+(i*8)+2] & 0xffffff00) |
- (upper_32_bits(offset64) & 0xff);
+ ib[idx+1+(i*8)+0] += (u32)((reloc->lobj.gpu_offset) & 0xffffffff);
+ ib[idx+1+(i*8)+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
break;
- }
case SQ_TEX_VTX_INVALID_TEXTURE:
case SQ_TEX_VTX_INVALID_BUFFER:
default:
@@ -2307,104 +1374,6 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
break;
- case PACKET3_STRMOUT_BUFFER_UPDATE:
- if (pkt->count != 4) {
- DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (invalid count)\n");
- return -EINVAL;
- }
- /* Updating memory at DST_ADDRESS. */
- if (idx_value & 0x1) {
- u64 offset;
- r = evergreen_cs_packet_next_reloc(p, &reloc);
- if (r) {
- DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n");
- return -EINVAL;
- }
- offset = radeon_get_ib_value(p, idx+1);
- offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
- if ((offset + 4) > radeon_bo_size(reloc->robj)) {
- DRM_ERROR("bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%llx, 0x%lx\n",
- offset + 4, radeon_bo_size(reloc->robj));
- return -EINVAL;
- }
- offset += reloc->lobj.gpu_offset;
- ib[idx+1] = offset;
- ib[idx+2] = upper_32_bits(offset) & 0xff;
- }
- /* Reading data from SRC_ADDRESS. */
- if (((idx_value >> 1) & 0x3) == 2) {
- u64 offset;
- r = evergreen_cs_packet_next_reloc(p, &reloc);
- if (r) {
- DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n");
- return -EINVAL;
- }
- offset = radeon_get_ib_value(p, idx+3);
- offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
- if ((offset + 4) > radeon_bo_size(reloc->robj)) {
- DRM_ERROR("bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%llx, 0x%lx\n",
- offset + 4, radeon_bo_size(reloc->robj));
- return -EINVAL;
- }
- offset += reloc->lobj.gpu_offset;
- ib[idx+3] = offset;
- ib[idx+4] = upper_32_bits(offset) & 0xff;
- }
- break;
- case PACKET3_COPY_DW:
- if (pkt->count != 4) {
- DRM_ERROR("bad COPY_DW (invalid count)\n");
- return -EINVAL;
- }
- if (idx_value & 0x1) {
- u64 offset;
- /* SRC is memory. */
- r = evergreen_cs_packet_next_reloc(p, &reloc);
- if (r) {
- DRM_ERROR("bad COPY_DW (missing src reloc)\n");
- return -EINVAL;
- }
- offset = radeon_get_ib_value(p, idx+1);
- offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
- if ((offset + 4) > radeon_bo_size(reloc->robj)) {
- DRM_ERROR("bad COPY_DW src bo too small: 0x%llx, 0x%lx\n",
- offset + 4, radeon_bo_size(reloc->robj));
- return -EINVAL;
- }
- offset += reloc->lobj.gpu_offset;
- ib[idx+1] = offset;
- ib[idx+2] = upper_32_bits(offset) & 0xff;
- } else {
- /* SRC is a reg. */
- reg = radeon_get_ib_value(p, idx+1) << 2;
- if (!evergreen_is_safe_reg(p, reg, idx+1))
- return -EINVAL;
- }
- if (idx_value & 0x2) {
- u64 offset;
- /* DST is memory. */
- r = evergreen_cs_packet_next_reloc(p, &reloc);
- if (r) {
- DRM_ERROR("bad COPY_DW (missing dst reloc)\n");
- return -EINVAL;
- }
- offset = radeon_get_ib_value(p, idx+3);
- offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
- if ((offset + 4) > radeon_bo_size(reloc->robj)) {
- DRM_ERROR("bad COPY_DW dst bo too small: 0x%llx, 0x%lx\n",
- offset + 4, radeon_bo_size(reloc->robj));
- return -EINVAL;
- }
- offset += reloc->lobj.gpu_offset;
- ib[idx+3] = offset;
- ib[idx+4] = upper_32_bits(offset) & 0xff;
- } else {
- /* DST is a reg. */
- reg = radeon_get_ib_value(p, idx+3) << 2;
- if (!evergreen_is_safe_reg(p, reg, idx+3))
- return -EINVAL;
- }
- break;
case PACKET3_NOP:
break;
default:
@@ -2418,7 +1387,6 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
{
struct radeon_cs_packet pkt;
struct evergreen_cs_track *track;
- u32 tmp;
int r;
if (p->track == NULL) {
@@ -2427,63 +1395,9 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
if (track == NULL)
return -ENOMEM;
evergreen_cs_track_init(track);
- if (p->rdev->family >= CHIP_CAYMAN)
- tmp = p->rdev->config.cayman.tile_config;
- else
- tmp = p->rdev->config.evergreen.tile_config;
-
- switch (tmp & 0xf) {
- case 0:
- track->npipes = 1;
- break;
- case 1:
- default:
- track->npipes = 2;
- break;
- case 2:
- track->npipes = 4;
- break;
- case 3:
- track->npipes = 8;
- break;
- }
-
- switch ((tmp & 0xf0) >> 4) {
- case 0:
- track->nbanks = 4;
- break;
- case 1:
- default:
- track->nbanks = 8;
- break;
- case 2:
- track->nbanks = 16;
- break;
- }
-
- switch ((tmp & 0xf00) >> 8) {
- case 0:
- track->group_size = 256;
- break;
- case 1:
- default:
- track->group_size = 512;
- break;
- }
-
- switch ((tmp & 0xf000) >> 12) {
- case 0:
- track->row_size = 1;
- break;
- case 1:
- default:
- track->row_size = 2;
- break;
- case 2:
- track->row_size = 4;
- break;
- }
-
+ track->npipes = p->rdev->config.evergreen.tiling_npipes;
+ track->nbanks = p->rdev->config.evergreen.tiling_nbanks;
+ track->group_size = p->rdev->config.evergreen.tiling_group_size;
p->track = track;
}
do {
@@ -2526,242 +1440,3 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
return 0;
}
-/* vm parser */
-static bool evergreen_vm_reg_valid(u32 reg)
-{
- /* context regs are fine */
- if (reg >= 0x28000)
- return true;
-
- /* check config regs */
- switch (reg) {
- case GRBM_GFX_INDEX:
- case VGT_VTX_VECT_EJECT_REG:
- case VGT_CACHE_INVALIDATION:
- case VGT_GS_VERTEX_REUSE:
- case VGT_PRIMITIVE_TYPE:
- case VGT_INDEX_TYPE:
- case VGT_NUM_INDICES:
- case VGT_NUM_INSTANCES:
- case VGT_COMPUTE_DIM_X:
- case VGT_COMPUTE_DIM_Y:
- case VGT_COMPUTE_DIM_Z:
- case VGT_COMPUTE_START_X:
- case VGT_COMPUTE_START_Y:
- case VGT_COMPUTE_START_Z:
- case VGT_COMPUTE_INDEX:
- case VGT_COMPUTE_THREAD_GROUP_SIZE:
- case VGT_HS_OFFCHIP_PARAM:
- case PA_CL_ENHANCE:
- case PA_SU_LINE_STIPPLE_VALUE:
- case PA_SC_LINE_STIPPLE_STATE:
- case PA_SC_ENHANCE:
- case SQ_DYN_GPR_CNTL_PS_FLUSH_REQ:
- case SQ_DYN_GPR_SIMD_LOCK_EN:
- case SQ_CONFIG:
- case SQ_GPR_RESOURCE_MGMT_1:
- case SQ_GLOBAL_GPR_RESOURCE_MGMT_1:
- case SQ_GLOBAL_GPR_RESOURCE_MGMT_2:
- case SQ_CONST_MEM_BASE:
- case SQ_STATIC_THREAD_MGMT_1:
- case SQ_STATIC_THREAD_MGMT_2:
- case SQ_STATIC_THREAD_MGMT_3:
- case SPI_CONFIG_CNTL:
- case SPI_CONFIG_CNTL_1:
- case TA_CNTL_AUX:
- case DB_DEBUG:
- case DB_DEBUG2:
- case DB_DEBUG3:
- case DB_DEBUG4:
- case DB_WATERMARKS:
- case TD_PS_BORDER_COLOR_INDEX:
- case TD_PS_BORDER_COLOR_RED:
- case TD_PS_BORDER_COLOR_GREEN:
- case TD_PS_BORDER_COLOR_BLUE:
- case TD_PS_BORDER_COLOR_ALPHA:
- case TD_VS_BORDER_COLOR_INDEX:
- case TD_VS_BORDER_COLOR_RED:
- case TD_VS_BORDER_COLOR_GREEN:
- case TD_VS_BORDER_COLOR_BLUE:
- case TD_VS_BORDER_COLOR_ALPHA:
- case TD_GS_BORDER_COLOR_INDEX:
- case TD_GS_BORDER_COLOR_RED:
- case TD_GS_BORDER_COLOR_GREEN:
- case TD_GS_BORDER_COLOR_BLUE:
- case TD_GS_BORDER_COLOR_ALPHA:
- case TD_HS_BORDER_COLOR_INDEX:
- case TD_HS_BORDER_COLOR_RED:
- case TD_HS_BORDER_COLOR_GREEN:
- case TD_HS_BORDER_COLOR_BLUE:
- case TD_HS_BORDER_COLOR_ALPHA:
- case TD_LS_BORDER_COLOR_INDEX:
- case TD_LS_BORDER_COLOR_RED:
- case TD_LS_BORDER_COLOR_GREEN:
- case TD_LS_BORDER_COLOR_BLUE:
- case TD_LS_BORDER_COLOR_ALPHA:
- case TD_CS_BORDER_COLOR_INDEX:
- case TD_CS_BORDER_COLOR_RED:
- case TD_CS_BORDER_COLOR_GREEN:
- case TD_CS_BORDER_COLOR_BLUE:
- case TD_CS_BORDER_COLOR_ALPHA:
- case SQ_ESGS_RING_SIZE:
- case SQ_GSVS_RING_SIZE:
- case SQ_ESTMP_RING_SIZE:
- case SQ_GSTMP_RING_SIZE:
- case SQ_HSTMP_RING_SIZE:
- case SQ_LSTMP_RING_SIZE:
- case SQ_PSTMP_RING_SIZE:
- case SQ_VSTMP_RING_SIZE:
- case SQ_ESGS_RING_ITEMSIZE:
- case SQ_ESTMP_RING_ITEMSIZE:
- case SQ_GSTMP_RING_ITEMSIZE:
- case SQ_GSVS_RING_ITEMSIZE:
- case SQ_GS_VERT_ITEMSIZE:
- case SQ_GS_VERT_ITEMSIZE_1:
- case SQ_GS_VERT_ITEMSIZE_2:
- case SQ_GS_VERT_ITEMSIZE_3:
- case SQ_GSVS_RING_OFFSET_1:
- case SQ_GSVS_RING_OFFSET_2:
- case SQ_GSVS_RING_OFFSET_3:
- case SQ_HSTMP_RING_ITEMSIZE:
- case SQ_LSTMP_RING_ITEMSIZE:
- case SQ_PSTMP_RING_ITEMSIZE:
- case SQ_VSTMP_RING_ITEMSIZE:
- case VGT_TF_RING_SIZE:
- case SQ_ESGS_RING_BASE:
- case SQ_GSVS_RING_BASE:
- case SQ_ESTMP_RING_BASE:
- case SQ_GSTMP_RING_BASE:
- case SQ_HSTMP_RING_BASE:
- case SQ_LSTMP_RING_BASE:
- case SQ_PSTMP_RING_BASE:
- case SQ_VSTMP_RING_BASE:
- case CAYMAN_VGT_OFFCHIP_LDS_BASE:
- case CAYMAN_SQ_EX_ALLOC_TABLE_SLOTS:
- return true;
- default:
- return false;
- }
-}
-
-static int evergreen_vm_packet3_check(struct radeon_device *rdev,
- u32 *ib, struct radeon_cs_packet *pkt)
-{
- u32 idx = pkt->idx + 1;
- u32 idx_value = ib[idx];
- u32 start_reg, end_reg, reg, i;
-
- switch (pkt->opcode) {
- case PACKET3_NOP:
- case PACKET3_SET_BASE:
- case PACKET3_CLEAR_STATE:
- case PACKET3_INDEX_BUFFER_SIZE:
- case PACKET3_DISPATCH_DIRECT:
- case PACKET3_DISPATCH_INDIRECT:
- case PACKET3_MODE_CONTROL:
- case PACKET3_SET_PREDICATION:
- case PACKET3_COND_EXEC:
- case PACKET3_PRED_EXEC:
- case PACKET3_DRAW_INDIRECT:
- case PACKET3_DRAW_INDEX_INDIRECT:
- case PACKET3_INDEX_BASE:
- case PACKET3_DRAW_INDEX_2:
- case PACKET3_CONTEXT_CONTROL:
- case PACKET3_DRAW_INDEX_OFFSET:
- case PACKET3_INDEX_TYPE:
- case PACKET3_DRAW_INDEX:
- case PACKET3_DRAW_INDEX_AUTO:
- case PACKET3_DRAW_INDEX_IMMD:
- case PACKET3_NUM_INSTANCES:
- case PACKET3_DRAW_INDEX_MULTI_AUTO:
- case PACKET3_STRMOUT_BUFFER_UPDATE:
- case PACKET3_DRAW_INDEX_OFFSET_2:
- case PACKET3_DRAW_INDEX_MULTI_ELEMENT:
- case PACKET3_MPEG_INDEX:
- case PACKET3_WAIT_REG_MEM:
- case PACKET3_MEM_WRITE:
- case PACKET3_SURFACE_SYNC:
- case PACKET3_EVENT_WRITE:
- case PACKET3_EVENT_WRITE_EOP:
- case PACKET3_EVENT_WRITE_EOS:
- case PACKET3_SET_CONTEXT_REG:
- case PACKET3_SET_BOOL_CONST:
- case PACKET3_SET_LOOP_CONST:
- case PACKET3_SET_RESOURCE:
- case PACKET3_SET_SAMPLER:
- case PACKET3_SET_CTL_CONST:
- case PACKET3_SET_RESOURCE_OFFSET:
- case PACKET3_SET_CONTEXT_REG_INDIRECT:
- case PACKET3_SET_RESOURCE_INDIRECT:
- case CAYMAN_PACKET3_DEALLOC_STATE:
- break;
- case PACKET3_COND_WRITE:
- if (idx_value & 0x100) {
- reg = ib[idx + 5] * 4;
- if (!evergreen_vm_reg_valid(reg))
- return -EINVAL;
- }
- break;
- case PACKET3_COPY_DW:
- if (idx_value & 0x2) {
- reg = ib[idx + 3] * 4;
- if (!evergreen_vm_reg_valid(reg))
- return -EINVAL;
- }
- break;
- case PACKET3_SET_CONFIG_REG:
- start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START;
- end_reg = 4 * pkt->count + start_reg - 4;
- if ((start_reg < PACKET3_SET_CONFIG_REG_START) ||
- (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
- (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
- DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
- return -EINVAL;
- }
- for (i = 0; i < pkt->count; i++) {
- reg = start_reg + (4 * i);
- if (!evergreen_vm_reg_valid(reg))
- return -EINVAL;
- }
- break;
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
-{
- int ret = 0;
- u32 idx = 0;
- struct radeon_cs_packet pkt;
-
- do {
- pkt.idx = idx;
- pkt.type = CP_PACKET_GET_TYPE(ib->ptr[idx]);
- pkt.count = CP_PACKET_GET_COUNT(ib->ptr[idx]);
- pkt.one_reg_wr = 0;
- switch (pkt.type) {
- case PACKET_TYPE0:
- dev_err(rdev->dev, "Packet0 not allowed!\n");
- ret = -EINVAL;
- break;
- case PACKET_TYPE2:
- idx += 1;
- break;
- case PACKET_TYPE3:
- pkt.opcode = CP_PACKET3_GET_OPCODE(ib->ptr[idx]);
- ret = evergreen_vm_packet3_check(rdev, ib->ptr, &pkt);
- idx += pkt.count + 2;
- break;
- default:
- dev_err(rdev->dev, "Unknown packet type %d !\n", pkt.type);
- ret = -EINVAL;
- break;
- }
- if (ret)
- break;
- } while (idx < ib->length_dw);
-
- return ret;
-}
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index 96c10b3..c781c92 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -35,14 +35,6 @@
#define EVERGREEN_P1PLL_SS_CNTL 0x414
#define EVERGREEN_P2PLL_SS_CNTL 0x454
# define EVERGREEN_PxPLL_SS_EN (1 << 12)
-
-#define EVERGREEN_AUDIO_PLL1_MUL 0x5b0
-#define EVERGREEN_AUDIO_PLL1_DIV 0x5b4
-#define EVERGREEN_AUDIO_PLL1_UNK 0x5bc
-
-#define EVERGREEN_AUDIO_ENABLE 0x5e78
-#define EVERGREEN_AUDIO_VENDOR_ID 0x5ec0
-
/* GRPH blocks at 0x6800, 0x7400, 0x10000, 0x10c00, 0x11800, 0x12400 */
#define EVERGREEN_GRPH_ENABLE 0x6800
#define EVERGREEN_GRPH_CONTROL 0x6804
@@ -50,17 +42,6 @@
# define EVERGREEN_GRPH_DEPTH_8BPP 0
# define EVERGREEN_GRPH_DEPTH_16BPP 1
# define EVERGREEN_GRPH_DEPTH_32BPP 2
-# define EVERGREEN_GRPH_NUM_BANKS(x) (((x) & 0x3) << 2)
-# define EVERGREEN_ADDR_SURF_2_BANK 0
-# define EVERGREEN_ADDR_SURF_4_BANK 1
-# define EVERGREEN_ADDR_SURF_8_BANK 2
-# define EVERGREEN_ADDR_SURF_16_BANK 3
-# define EVERGREEN_GRPH_Z(x) (((x) & 0x3) << 4)
-# define EVERGREEN_GRPH_BANK_WIDTH(x) (((x) & 0x3) << 6)
-# define EVERGREEN_ADDR_SURF_BANK_WIDTH_1 0
-# define EVERGREEN_ADDR_SURF_BANK_WIDTH_2 1
-# define EVERGREEN_ADDR_SURF_BANK_WIDTH_4 2
-# define EVERGREEN_ADDR_SURF_BANK_WIDTH_8 3
# define EVERGREEN_GRPH_FORMAT(x) (((x) & 0x7) << 8)
/* 8 BPP */
# define EVERGREEN_GRPH_FORMAT_INDEXED 0
@@ -80,24 +61,6 @@
# define EVERGREEN_GRPH_FORMAT_8B_BGRA1010102 5
# define EVERGREEN_GRPH_FORMAT_RGB111110 6
# define EVERGREEN_GRPH_FORMAT_BGR101111 7
-# define EVERGREEN_GRPH_BANK_HEIGHT(x) (((x) & 0x3) << 11)
-# define EVERGREEN_ADDR_SURF_BANK_HEIGHT_1 0
-# define EVERGREEN_ADDR_SURF_BANK_HEIGHT_2 1
-# define EVERGREEN_ADDR_SURF_BANK_HEIGHT_4 2
-# define EVERGREEN_ADDR_SURF_BANK_HEIGHT_8 3
-# define EVERGREEN_GRPH_TILE_SPLIT(x) (((x) & 0x7) << 13)
-# define EVERGREEN_ADDR_SURF_TILE_SPLIT_64B 0
-# define EVERGREEN_ADDR_SURF_TILE_SPLIT_128B 1
-# define EVERGREEN_ADDR_SURF_TILE_SPLIT_256B 2
-# define EVERGREEN_ADDR_SURF_TILE_SPLIT_512B 3
-# define EVERGREEN_ADDR_SURF_TILE_SPLIT_1KB 4
-# define EVERGREEN_ADDR_SURF_TILE_SPLIT_2KB 5
-# define EVERGREEN_ADDR_SURF_TILE_SPLIT_4KB 6
-# define EVERGREEN_GRPH_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 18)
-# define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1 0
-# define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2 1
-# define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4 2
-# define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8 3
# define EVERGREEN_GRPH_ARRAY_MODE(x) (((x) & 0x7) << 20)
# define EVERGREEN_GRPH_ARRAY_LINEAR_GENERAL 0
# define EVERGREEN_GRPH_ARRAY_LINEAR_ALIGNED 1
@@ -219,7 +182,6 @@
# define EVERGREEN_CRTC_MASTER_EN (1 << 0)
# define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24)
#define EVERGREEN_CRTC_STATUS 0x6e8c
-# define EVERGREEN_CRTC_V_BLANK (1 << 0)
#define EVERGREEN_CRTC_STATUS_POSITION 0x6e90
#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8
#define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4
@@ -229,9 +191,4 @@
#define EVERGREEN_DC_GPIO_HPD_EN 0x64b8
#define EVERGREEN_DC_GPIO_HPD_Y 0x64bc
-/* HDMI blocks at 0x7030, 0x7c30, 0x10830, 0x11430, 0x12030, 0x12c30 */
-#define EVERGREEN_HDMI_BASE 0x7030
-
-#define EVERGREEN_HDMI_CONFIG_OFFSET 0xf0
-
#endif
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index d83fd9c..ab670c3 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -77,7 +77,6 @@
#define CONFIG_MEMSIZE 0x5428
-#define CP_COHER_BASE 0x85F8
#define CP_ME_CNTL 0x86D8
#define CP_ME_HALT (1 << 28)
#define CP_PFP_HALT (1 << 26)
@@ -109,7 +108,6 @@
#define CP_RB_WPTR_ADDR_HI 0xC11C
#define CP_RB_WPTR_DELAY 0x8704
#define CP_SEM_WAIT_TIMER 0x85BC
-#define CP_SEM_INCOMPLETE_TIMER_CNTL 0x85C8
#define CP_DEBUG 0xC1FC
@@ -245,7 +243,6 @@
#define PA_CL_ENHANCE 0x8A14
#define CLIP_VTX_REORDER_ENA (1 << 0)
#define NUM_CLIP_SEQ(x) ((x) << 1)
-#define PA_SC_ENHANCE 0x8BF0
#define PA_SC_AA_CONFIG 0x28C04
#define MSAA_NUM_SAMPLES_SHIFT 0
#define MSAA_NUM_SAMPLES_MASK 0x3
@@ -323,8 +320,6 @@
#define SQ_GPR_RESOURCE_MGMT_3 0x8C0C
#define NUM_HS_GPRS(x) ((x) << 0)
#define NUM_LS_GPRS(x) ((x) << 16)
-#define SQ_GLOBAL_GPR_RESOURCE_MGMT_1 0x8C10
-#define SQ_GLOBAL_GPR_RESOURCE_MGMT_2 0x8C14
#define SQ_THREAD_RESOURCE_MGMT 0x8C18
#define NUM_PS_THREADS(x) ((x) << 0)
#define NUM_VS_THREADS(x) ((x) << 8)
@@ -343,10 +338,6 @@
#define NUM_HS_STACK_ENTRIES(x) ((x) << 0)
#define NUM_LS_STACK_ENTRIES(x) ((x) << 16)
#define SQ_DYN_GPR_CNTL_PS_FLUSH_REQ 0x8D8C
-#define SQ_DYN_GPR_SIMD_LOCK_EN 0x8D94
-#define SQ_STATIC_THREAD_MGMT_1 0x8E20
-#define SQ_STATIC_THREAD_MGMT_2 0x8E24
-#define SQ_STATIC_THREAD_MGMT_3 0x8E28
#define SQ_LDS_RESOURCE_MGMT 0x8E2C
#define SQ_MS_FIFO_SIZES 0x8CF0
@@ -361,7 +352,6 @@
#define COLOR_BUFFER_SIZE(x) ((x) << 0)
#define POSITION_BUFFER_SIZE(x) ((x) << 8)
#define SMX_BUFFER_SIZE(x) ((x) << 16)
-#define SX_MEMORY_EXPORT_BASE 0x9010
#define SX_MISC 0x28350
#define CB_PERF_CTR0_SEL_0 0x9A20
@@ -705,7 +695,6 @@
#define PACKET3_DRAW_INDEX_MULTI_ELEMENT 0x36
#define PACKET3_MEM_SEMAPHORE 0x39
#define PACKET3_MPEG_INDEX 0x3A
-#define PACKET3_COPY_DW 0x3B
#define PACKET3_WAIT_REG_MEM 0x3C
#define PACKET3_MEM_WRITE 0x3D
#define PACKET3_INDIRECT_BUFFER 0x32
@@ -783,8 +772,6 @@
#define SQ_TEX_VTX_VALID_TEXTURE 0x2
#define SQ_TEX_VTX_VALID_BUFFER 0x3
-#define VGT_VTX_VECT_EJECT_REG 0x88b0
-
#define SQ_CONST_MEM_BASE 0x8df8
#define SQ_ESGS_RING_BASE 0x8c40
@@ -909,152 +896,19 @@
#define PA_SC_SCREEN_SCISSOR_TL 0x28030
#define PA_SC_GENERIC_SCISSOR_TL 0x28240
#define PA_SC_WINDOW_SCISSOR_TL 0x28204
-
#define VGT_PRIMITIVE_TYPE 0x8958
-#define VGT_INDEX_TYPE 0x895C
-
-#define VGT_NUM_INDICES 0x8970
-
-#define VGT_COMPUTE_DIM_X 0x8990
-#define VGT_COMPUTE_DIM_Y 0x8994
-#define VGT_COMPUTE_DIM_Z 0x8998
-#define VGT_COMPUTE_START_X 0x899C
-#define VGT_COMPUTE_START_Y 0x89A0
-#define VGT_COMPUTE_START_Z 0x89A4
-#define VGT_COMPUTE_INDEX 0x89A8
-#define VGT_COMPUTE_THREAD_GROUP_SIZE 0x89AC
-#define VGT_HS_OFFCHIP_PARAM 0x89B0
-
-#define DB_DEBUG 0x9830
-#define DB_DEBUG2 0x9834
-#define DB_DEBUG3 0x9838
-#define DB_DEBUG4 0x983C
-#define DB_WATERMARKS 0x9854
+
#define DB_DEPTH_CONTROL 0x28800
-#define R_028800_DB_DEPTH_CONTROL 0x028800
-#define S_028800_STENCIL_ENABLE(x) (((x) & 0x1) << 0)
-#define G_028800_STENCIL_ENABLE(x) (((x) >> 0) & 0x1)
-#define C_028800_STENCIL_ENABLE 0xFFFFFFFE
-#define S_028800_Z_ENABLE(x) (((x) & 0x1) << 1)
-#define G_028800_Z_ENABLE(x) (((x) >> 1) & 0x1)
-#define C_028800_Z_ENABLE 0xFFFFFFFD
-#define S_028800_Z_WRITE_ENABLE(x) (((x) & 0x1) << 2)
-#define G_028800_Z_WRITE_ENABLE(x) (((x) >> 2) & 0x1)
-#define C_028800_Z_WRITE_ENABLE 0xFFFFFFFB
-#define S_028800_ZFUNC(x) (((x) & 0x7) << 4)
-#define G_028800_ZFUNC(x) (((x) >> 4) & 0x7)
-#define C_028800_ZFUNC 0xFFFFFF8F
-#define S_028800_BACKFACE_ENABLE(x) (((x) & 0x1) << 7)
-#define G_028800_BACKFACE_ENABLE(x) (((x) >> 7) & 0x1)
-#define C_028800_BACKFACE_ENABLE 0xFFFFFF7F
-#define S_028800_STENCILFUNC(x) (((x) & 0x7) << 8)
-#define G_028800_STENCILFUNC(x) (((x) >> 8) & 0x7)
-#define C_028800_STENCILFUNC 0xFFFFF8FF
-#define V_028800_STENCILFUNC_NEVER 0x00000000
-#define V_028800_STENCILFUNC_LESS 0x00000001
-#define V_028800_STENCILFUNC_EQUAL 0x00000002
-#define V_028800_STENCILFUNC_LEQUAL 0x00000003
-#define V_028800_STENCILFUNC_GREATER 0x00000004
-#define V_028800_STENCILFUNC_NOTEQUAL 0x00000005
-#define V_028800_STENCILFUNC_GEQUAL 0x00000006
-#define V_028800_STENCILFUNC_ALWAYS 0x00000007
-#define S_028800_STENCILFAIL(x) (((x) & 0x7) << 11)
-#define G_028800_STENCILFAIL(x) (((x) >> 11) & 0x7)
-#define C_028800_STENCILFAIL 0xFFFFC7FF
-#define V_028800_STENCIL_KEEP 0x00000000
-#define V_028800_STENCIL_ZERO 0x00000001
-#define V_028800_STENCIL_REPLACE 0x00000002
-#define V_028800_STENCIL_INCR 0x00000003
-#define V_028800_STENCIL_DECR 0x00000004
-#define V_028800_STENCIL_INVERT 0x00000005
-#define V_028800_STENCIL_INCR_WRAP 0x00000006
-#define V_028800_STENCIL_DECR_WRAP 0x00000007
-#define S_028800_STENCILZPASS(x) (((x) & 0x7) << 14)
-#define G_028800_STENCILZPASS(x) (((x) >> 14) & 0x7)
-#define C_028800_STENCILZPASS 0xFFFE3FFF
-#define S_028800_STENCILZFAIL(x) (((x) & 0x7) << 17)
-#define G_028800_STENCILZFAIL(x) (((x) >> 17) & 0x7)
-#define C_028800_STENCILZFAIL 0xFFF1FFFF
-#define S_028800_STENCILFUNC_BF(x) (((x) & 0x7) << 20)
-#define G_028800_STENCILFUNC_BF(x) (((x) >> 20) & 0x7)
-#define C_028800_STENCILFUNC_BF 0xFF8FFFFF
-#define S_028800_STENCILFAIL_BF(x) (((x) & 0x7) << 23)
-#define G_028800_STENCILFAIL_BF(x) (((x) >> 23) & 0x7)
-#define C_028800_STENCILFAIL_BF 0xFC7FFFFF
-#define S_028800_STENCILZPASS_BF(x) (((x) & 0x7) << 26)
-#define G_028800_STENCILZPASS_BF(x) (((x) >> 26) & 0x7)
-#define C_028800_STENCILZPASS_BF 0xE3FFFFFF
-#define S_028800_STENCILZFAIL_BF(x) (((x) & 0x7) << 29)
-#define G_028800_STENCILZFAIL_BF(x) (((x) >> 29) & 0x7)
-#define C_028800_STENCILZFAIL_BF 0x1FFFFFFF
#define DB_DEPTH_VIEW 0x28008
-#define R_028008_DB_DEPTH_VIEW 0x00028008
-#define S_028008_SLICE_START(x) (((x) & 0x7FF) << 0)
-#define G_028008_SLICE_START(x) (((x) >> 0) & 0x7FF)
-#define C_028008_SLICE_START 0xFFFFF800
-#define S_028008_SLICE_MAX(x) (((x) & 0x7FF) << 13)
-#define G_028008_SLICE_MAX(x) (((x) >> 13) & 0x7FF)
-#define C_028008_SLICE_MAX 0xFF001FFF
#define DB_HTILE_DATA_BASE 0x28014
#define DB_Z_INFO 0x28040
# define Z_ARRAY_MODE(x) ((x) << 4)
-# define DB_TILE_SPLIT(x) (((x) & 0x7) << 8)
-# define DB_NUM_BANKS(x) (((x) & 0x3) << 12)
-# define DB_BANK_WIDTH(x) (((x) & 0x3) << 16)
-# define DB_BANK_HEIGHT(x) (((x) & 0x3) << 20)
-# define DB_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 24)
-#define R_028040_DB_Z_INFO 0x028040
-#define S_028040_FORMAT(x) (((x) & 0x3) << 0)
-#define G_028040_FORMAT(x) (((x) >> 0) & 0x3)
-#define C_028040_FORMAT 0xFFFFFFFC
-#define V_028040_Z_INVALID 0x00000000
-#define V_028040_Z_16 0x00000001
-#define V_028040_Z_24 0x00000002
-#define V_028040_Z_32_FLOAT 0x00000003
-#define S_028040_ARRAY_MODE(x) (((x) & 0xF) << 4)
-#define G_028040_ARRAY_MODE(x) (((x) >> 4) & 0xF)
-#define C_028040_ARRAY_MODE 0xFFFFFF0F
-#define S_028040_READ_SIZE(x) (((x) & 0x1) << 28)
-#define G_028040_READ_SIZE(x) (((x) >> 28) & 0x1)
-#define C_028040_READ_SIZE 0xEFFFFFFF
-#define S_028040_TILE_SURFACE_ENABLE(x) (((x) & 0x1) << 29)
-#define G_028040_TILE_SURFACE_ENABLE(x) (((x) >> 29) & 0x1)
-#define C_028040_TILE_SURFACE_ENABLE 0xDFFFFFFF
-#define S_028040_ZRANGE_PRECISION(x) (((x) & 0x1) << 31)
-#define G_028040_ZRANGE_PRECISION(x) (((x) >> 31) & 0x1)
-#define C_028040_ZRANGE_PRECISION 0x7FFFFFFF
-#define S_028040_TILE_SPLIT(x) (((x) & 0x7) << 8)
-#define G_028040_TILE_SPLIT(x) (((x) >> 8) & 0x7)
-#define S_028040_NUM_BANKS(x) (((x) & 0x3) << 12)
-#define G_028040_NUM_BANKS(x) (((x) >> 12) & 0x3)
-#define S_028040_BANK_WIDTH(x) (((x) & 0x3) << 16)
-#define G_028040_BANK_WIDTH(x) (((x) >> 16) & 0x3)
-#define S_028040_BANK_HEIGHT(x) (((x) & 0x3) << 20)
-#define G_028040_BANK_HEIGHT(x) (((x) >> 20) & 0x3)
-#define S_028040_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 24)
-#define G_028040_MACRO_TILE_ASPECT(x) (((x) >> 24) & 0x3)
#define DB_STENCIL_INFO 0x28044
-#define R_028044_DB_STENCIL_INFO 0x028044
-#define S_028044_FORMAT(x) (((x) & 0x1) << 0)
-#define G_028044_FORMAT(x) (((x) >> 0) & 0x1)
-#define C_028044_FORMAT 0xFFFFFFFE
-#define G_028044_TILE_SPLIT(x) (((x) >> 8) & 0x7)
#define DB_Z_READ_BASE 0x28048
#define DB_STENCIL_READ_BASE 0x2804c
#define DB_Z_WRITE_BASE 0x28050
#define DB_STENCIL_WRITE_BASE 0x28054
#define DB_DEPTH_SIZE 0x28058
-#define R_028058_DB_DEPTH_SIZE 0x028058
-#define S_028058_PITCH_TILE_MAX(x) (((x) & 0x7FF) << 0)
-#define G_028058_PITCH_TILE_MAX(x) (((x) >> 0) & 0x7FF)
-#define C_028058_PITCH_TILE_MAX 0xFFFFF800
-#define S_028058_HEIGHT_TILE_MAX(x) (((x) & 0x7FF) << 11)
-#define G_028058_HEIGHT_TILE_MAX(x) (((x) >> 11) & 0x7FF)
-#define C_028058_HEIGHT_TILE_MAX 0xFFC007FF
-#define R_02805C_DB_DEPTH_SLICE 0x02805C
-#define S_02805C_SLICE_TILE_MAX(x) (((x) & 0x3FFFFF) << 0)
-#define G_02805C_SLICE_TILE_MAX(x) (((x) >> 0) & 0x3FFFFF)
-#define C_02805C_SLICE_TILE_MAX 0xFFC00000
#define SQ_PGM_START_PS 0x28840
#define SQ_PGM_START_VS 0x2885c
@@ -1064,14 +918,6 @@
#define SQ_PGM_START_HS 0x288b8
#define SQ_PGM_START_LS 0x288d0
-#define VGT_STRMOUT_BUFFER_BASE_0 0x28AD8
-#define VGT_STRMOUT_BUFFER_BASE_1 0x28AE8
-#define VGT_STRMOUT_BUFFER_BASE_2 0x28AF8
-#define VGT_STRMOUT_BUFFER_BASE_3 0x28B08
-#define VGT_STRMOUT_BUFFER_SIZE_0 0x28AD0
-#define VGT_STRMOUT_BUFFER_SIZE_1 0x28AE0
-#define VGT_STRMOUT_BUFFER_SIZE_2 0x28AF0
-#define VGT_STRMOUT_BUFFER_SIZE_3 0x28B00
#define VGT_STRMOUT_CONFIG 0x28b94
#define VGT_STRMOUT_BUFFER_CONFIG 0x28b98
@@ -1098,163 +944,13 @@
#define CB_COLOR0_PITCH 0x28c64
#define CB_COLOR0_SLICE 0x28c68
#define CB_COLOR0_VIEW 0x28c6c
-#define R_028C6C_CB_COLOR0_VIEW 0x00028C6C
-#define S_028C6C_SLICE_START(x) (((x) & 0x7FF) << 0)
-#define G_028C6C_SLICE_START(x) (((x) >> 0) & 0x7FF)
-#define C_028C6C_SLICE_START 0xFFFFF800
-#define S_028C6C_SLICE_MAX(x) (((x) & 0x7FF) << 13)
-#define G_028C6C_SLICE_MAX(x) (((x) >> 13) & 0x7FF)
-#define C_028C6C_SLICE_MAX 0xFF001FFF
-#define R_028C70_CB_COLOR0_INFO 0x028C70
-#define S_028C70_ENDIAN(x) (((x) & 0x3) << 0)
-#define G_028C70_ENDIAN(x) (((x) >> 0) & 0x3)
-#define C_028C70_ENDIAN 0xFFFFFFFC
-#define S_028C70_FORMAT(x) (((x) & 0x3F) << 2)
-#define G_028C70_FORMAT(x) (((x) >> 2) & 0x3F)
-#define C_028C70_FORMAT 0xFFFFFF03
-#define V_028C70_COLOR_INVALID 0x00000000
-#define V_028C70_COLOR_8 0x00000001
-#define V_028C70_COLOR_4_4 0x00000002
-#define V_028C70_COLOR_3_3_2 0x00000003
-#define V_028C70_COLOR_16 0x00000005
-#define V_028C70_COLOR_16_FLOAT 0x00000006
-#define V_028C70_COLOR_8_8 0x00000007
-#define V_028C70_COLOR_5_6_5 0x00000008
-#define V_028C70_COLOR_6_5_5 0x00000009
-#define V_028C70_COLOR_1_5_5_5 0x0000000A
-#define V_028C70_COLOR_4_4_4_4 0x0000000B
-#define V_028C70_COLOR_5_5_5_1 0x0000000C
-#define V_028C70_COLOR_32 0x0000000D
-#define V_028C70_COLOR_32_FLOAT 0x0000000E
-#define V_028C70_COLOR_16_16 0x0000000F
-#define V_028C70_COLOR_16_16_FLOAT 0x00000010
-#define V_028C70_COLOR_8_24 0x00000011
-#define V_028C70_COLOR_8_24_FLOAT 0x00000012
-#define V_028C70_COLOR_24_8 0x00000013
-#define V_028C70_COLOR_24_8_FLOAT 0x00000014
-#define V_028C70_COLOR_10_11_11 0x00000015
-#define V_028C70_COLOR_10_11_11_FLOAT 0x00000016
-#define V_028C70_COLOR_11_11_10 0x00000017
-#define V_028C70_COLOR_11_11_10_FLOAT 0x00000018
-#define V_028C70_COLOR_2_10_10_10 0x00000019
-#define V_028C70_COLOR_8_8_8_8 0x0000001A
-#define V_028C70_COLOR_10_10_10_2 0x0000001B
-#define V_028C70_COLOR_X24_8_32_FLOAT 0x0000001C
-#define V_028C70_COLOR_32_32 0x0000001D
-#define V_028C70_COLOR_32_32_FLOAT 0x0000001E
-#define V_028C70_COLOR_16_16_16_16 0x0000001F
-#define V_028C70_COLOR_16_16_16_16_FLOAT 0x00000020
-#define V_028C70_COLOR_32_32_32_32 0x00000022
-#define V_028C70_COLOR_32_32_32_32_FLOAT 0x00000023
-#define V_028C70_COLOR_32_32_32_FLOAT 0x00000030
-#define S_028C70_ARRAY_MODE(x) (((x) & 0xF) << 8)
-#define G_028C70_ARRAY_MODE(x) (((x) >> 8) & 0xF)
-#define C_028C70_ARRAY_MODE 0xFFFFF0FF
-#define V_028C70_ARRAY_LINEAR_GENERAL 0x00000000
-#define V_028C70_ARRAY_LINEAR_ALIGNED 0x00000001
-#define V_028C70_ARRAY_1D_TILED_THIN1 0x00000002
-#define V_028C70_ARRAY_2D_TILED_THIN1 0x00000004
-#define S_028C70_NUMBER_TYPE(x) (((x) & 0x7) << 12)
-#define G_028C70_NUMBER_TYPE(x) (((x) >> 12) & 0x7)
-#define C_028C70_NUMBER_TYPE 0xFFFF8FFF
-#define V_028C70_NUMBER_UNORM 0x00000000
-#define V_028C70_NUMBER_SNORM 0x00000001
-#define V_028C70_NUMBER_USCALED 0x00000002
-#define V_028C70_NUMBER_SSCALED 0x00000003
-#define V_028C70_NUMBER_UINT 0x00000004
-#define V_028C70_NUMBER_SINT 0x00000005
-#define V_028C70_NUMBER_SRGB 0x00000006
-#define V_028C70_NUMBER_FLOAT 0x00000007
-#define S_028C70_COMP_SWAP(x) (((x) & 0x3) << 15)
-#define G_028C70_COMP_SWAP(x) (((x) >> 15) & 0x3)
-#define C_028C70_COMP_SWAP 0xFFFE7FFF
-#define V_028C70_SWAP_STD 0x00000000
-#define V_028C70_SWAP_ALT 0x00000001
-#define V_028C70_SWAP_STD_REV 0x00000002
-#define V_028C70_SWAP_ALT_REV 0x00000003
-#define S_028C70_FAST_CLEAR(x) (((x) & 0x1) << 17)
-#define G_028C70_FAST_CLEAR(x) (((x) >> 17) & 0x1)
-#define C_028C70_FAST_CLEAR 0xFFFDFFFF
-#define S_028C70_COMPRESSION(x) (((x) & 0x3) << 18)
-#define G_028C70_COMPRESSION(x) (((x) >> 18) & 0x3)
-#define C_028C70_COMPRESSION 0xFFF3FFFF
-#define S_028C70_BLEND_CLAMP(x) (((x) & 0x1) << 19)
-#define G_028C70_BLEND_CLAMP(x) (((x) >> 19) & 0x1)
-#define C_028C70_BLEND_CLAMP 0xFFF7FFFF
-#define S_028C70_BLEND_BYPASS(x) (((x) & 0x1) << 20)
-#define G_028C70_BLEND_BYPASS(x) (((x) >> 20) & 0x1)
-#define C_028C70_BLEND_BYPASS 0xFFEFFFFF
-#define S_028C70_SIMPLE_FLOAT(x) (((x) & 0x1) << 21)
-#define G_028C70_SIMPLE_FLOAT(x) (((x) >> 21) & 0x1)
-#define C_028C70_SIMPLE_FLOAT 0xFFDFFFFF
-#define S_028C70_ROUND_MODE(x) (((x) & 0x1) << 22)
-#define G_028C70_ROUND_MODE(x) (((x) >> 22) & 0x1)
-#define C_028C70_ROUND_MODE 0xFFBFFFFF
-#define S_028C70_TILE_COMPACT(x) (((x) & 0x1) << 23)
-#define G_028C70_TILE_COMPACT(x) (((x) >> 23) & 0x1)
-#define C_028C70_TILE_COMPACT 0xFF7FFFFF
-#define S_028C70_SOURCE_FORMAT(x) (((x) & 0x3) << 24)
-#define G_028C70_SOURCE_FORMAT(x) (((x) >> 24) & 0x3)
-#define C_028C70_SOURCE_FORMAT 0xFCFFFFFF
-#define V_028C70_EXPORT_4C_32BPC 0x0
-#define V_028C70_EXPORT_4C_16BPC 0x1
-#define V_028C70_EXPORT_2C_32BPC 0x2 /* Do not use */
-#define S_028C70_RAT(x) (((x) & 0x1) << 26)
-#define G_028C70_RAT(x) (((x) >> 26) & 0x1)
-#define C_028C70_RAT 0xFBFFFFFF
-#define S_028C70_RESOURCE_TYPE(x) (((x) & 0x7) << 27)
-#define G_028C70_RESOURCE_TYPE(x) (((x) >> 27) & 0x7)
-#define C_028C70_RESOURCE_TYPE 0xC7FFFFFF
-
#define CB_COLOR0_INFO 0x28c70
-# define CB_FORMAT(x) ((x) << 2)
# define CB_ARRAY_MODE(x) ((x) << 8)
# define ARRAY_LINEAR_GENERAL 0
# define ARRAY_LINEAR_ALIGNED 1
# define ARRAY_1D_TILED_THIN1 2
# define ARRAY_2D_TILED_THIN1 4
-# define CB_SOURCE_FORMAT(x) ((x) << 24)
-# define CB_SF_EXPORT_FULL 0
-# define CB_SF_EXPORT_NORM 1
-#define R_028C74_CB_COLOR0_ATTRIB 0x028C74
-#define S_028C74_NON_DISP_TILING_ORDER(x) (((x) & 0x1) << 4)
-#define G_028C74_NON_DISP_TILING_ORDER(x) (((x) >> 4) & 0x1)
-#define C_028C74_NON_DISP_TILING_ORDER 0xFFFFFFEF
-#define S_028C74_TILE_SPLIT(x) (((x) & 0xf) << 5)
-#define G_028C74_TILE_SPLIT(x) (((x) >> 5) & 0xf)
-#define S_028C74_NUM_BANKS(x) (((x) & 0x3) << 10)
-#define G_028C74_NUM_BANKS(x) (((x) >> 10) & 0x3)
-#define S_028C74_BANK_WIDTH(x) (((x) & 0x3) << 13)
-#define G_028C74_BANK_WIDTH(x) (((x) >> 13) & 0x3)
-#define S_028C74_BANK_HEIGHT(x) (((x) & 0x3) << 16)
-#define G_028C74_BANK_HEIGHT(x) (((x) >> 16) & 0x3)
-#define S_028C74_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 19)
-#define G_028C74_MACRO_TILE_ASPECT(x) (((x) >> 19) & 0x3)
#define CB_COLOR0_ATTRIB 0x28c74
-# define CB_TILE_SPLIT(x) (((x) & 0x7) << 5)
-# define ADDR_SURF_TILE_SPLIT_64B 0
-# define ADDR_SURF_TILE_SPLIT_128B 1
-# define ADDR_SURF_TILE_SPLIT_256B 2
-# define ADDR_SURF_TILE_SPLIT_512B 3
-# define ADDR_SURF_TILE_SPLIT_1KB 4
-# define ADDR_SURF_TILE_SPLIT_2KB 5
-# define ADDR_SURF_TILE_SPLIT_4KB 6
-# define CB_NUM_BANKS(x) (((x) & 0x3) << 10)
-# define ADDR_SURF_2_BANK 0
-# define ADDR_SURF_4_BANK 1
-# define ADDR_SURF_8_BANK 2
-# define ADDR_SURF_16_BANK 3
-# define CB_BANK_WIDTH(x) (((x) & 0x3) << 13)
-# define ADDR_SURF_BANK_WIDTH_1 0
-# define ADDR_SURF_BANK_WIDTH_2 1
-# define ADDR_SURF_BANK_WIDTH_4 2
-# define ADDR_SURF_BANK_WIDTH_8 3
-# define CB_BANK_HEIGHT(x) (((x) & 0x3) << 16)
-# define ADDR_SURF_BANK_HEIGHT_1 0
-# define ADDR_SURF_BANK_HEIGHT_2 1
-# define ADDR_SURF_BANK_HEIGHT_4 2
-# define ADDR_SURF_BANK_HEIGHT_8 3
-# define CB_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 19)
#define CB_COLOR0_DIM 0x28c78
/* only CB0-7 blocks have these regs */
#define CB_COLOR0_CMASK 0x28c7c
@@ -1415,232 +1111,22 @@
#define CB_COLOR7_CLEAR_WORD3 0x28e3c
#define SQ_TEX_RESOURCE_WORD0_0 0x30000
-# define TEX_DIM(x) ((x) << 0)
-# define SQ_TEX_DIM_1D 0
-# define SQ_TEX_DIM_2D 1
-# define SQ_TEX_DIM_3D 2
-# define SQ_TEX_DIM_CUBEMAP 3
-# define SQ_TEX_DIM_1D_ARRAY 4
-# define SQ_TEX_DIM_2D_ARRAY 5
-# define SQ_TEX_DIM_2D_MSAA 6
-# define SQ_TEX_DIM_2D_ARRAY_MSAA 7
#define SQ_TEX_RESOURCE_WORD1_0 0x30004
# define TEX_ARRAY_MODE(x) ((x) << 28)
#define SQ_TEX_RESOURCE_WORD2_0 0x30008
#define SQ_TEX_RESOURCE_WORD3_0 0x3000C
#define SQ_TEX_RESOURCE_WORD4_0 0x30010
-# define TEX_DST_SEL_X(x) ((x) << 16)
-# define TEX_DST_SEL_Y(x) ((x) << 19)
-# define TEX_DST_SEL_Z(x) ((x) << 22)
-# define TEX_DST_SEL_W(x) ((x) << 25)
-# define SQ_SEL_X 0
-# define SQ_SEL_Y 1
-# define SQ_SEL_Z 2
-# define SQ_SEL_W 3
-# define SQ_SEL_0 4
-# define SQ_SEL_1 5
#define SQ_TEX_RESOURCE_WORD5_0 0x30014
#define SQ_TEX_RESOURCE_WORD6_0 0x30018
-# define TEX_TILE_SPLIT(x) (((x) & 0x7) << 29)
#define SQ_TEX_RESOURCE_WORD7_0 0x3001c
-# define MACRO_TILE_ASPECT(x) (((x) & 0x3) << 6)
-# define TEX_BANK_WIDTH(x) (((x) & 0x3) << 8)
-# define TEX_BANK_HEIGHT(x) (((x) & 0x3) << 10)
-# define TEX_NUM_BANKS(x) (((x) & 0x3) << 16)
-#define R_030000_SQ_TEX_RESOURCE_WORD0_0 0x030000
-#define S_030000_DIM(x) (((x) & 0x7) << 0)
-#define G_030000_DIM(x) (((x) >> 0) & 0x7)
-#define C_030000_DIM 0xFFFFFFF8
-#define V_030000_SQ_TEX_DIM_1D 0x00000000
-#define V_030000_SQ_TEX_DIM_2D 0x00000001
-#define V_030000_SQ_TEX_DIM_3D 0x00000002
-#define V_030000_SQ_TEX_DIM_CUBEMAP 0x00000003
-#define V_030000_SQ_TEX_DIM_1D_ARRAY 0x00000004
-#define V_030000_SQ_TEX_DIM_2D_ARRAY 0x00000005
-#define V_030000_SQ_TEX_DIM_2D_MSAA 0x00000006
-#define V_030000_SQ_TEX_DIM_2D_ARRAY_MSAA 0x00000007
-#define S_030000_NON_DISP_TILING_ORDER(x) (((x) & 0x1) << 5)
-#define G_030000_NON_DISP_TILING_ORDER(x) (((x) >> 5) & 0x1)
-#define C_030000_NON_DISP_TILING_ORDER 0xFFFFFFDF
-#define S_030000_PITCH(x) (((x) & 0xFFF) << 6)
-#define G_030000_PITCH(x) (((x) >> 6) & 0xFFF)
-#define C_030000_PITCH 0xFFFC003F
-#define S_030000_TEX_WIDTH(x) (((x) & 0x3FFF) << 18)
-#define G_030000_TEX_WIDTH(x) (((x) >> 18) & 0x3FFF)
-#define C_030000_TEX_WIDTH 0x0003FFFF
-#define R_030004_SQ_TEX_RESOURCE_WORD1_0 0x030004
-#define S_030004_TEX_HEIGHT(x) (((x) & 0x3FFF) << 0)
-#define G_030004_TEX_HEIGHT(x) (((x) >> 0) & 0x3FFF)
-#define C_030004_TEX_HEIGHT 0xFFFFC000
-#define S_030004_TEX_DEPTH(x) (((x) & 0x1FFF) << 14)
-#define G_030004_TEX_DEPTH(x) (((x) >> 14) & 0x1FFF)
-#define C_030004_TEX_DEPTH 0xF8003FFF
-#define S_030004_ARRAY_MODE(x) (((x) & 0xF) << 28)
-#define G_030004_ARRAY_MODE(x) (((x) >> 28) & 0xF)
-#define C_030004_ARRAY_MODE 0x0FFFFFFF
-#define R_030008_SQ_TEX_RESOURCE_WORD2_0 0x030008
-#define S_030008_BASE_ADDRESS(x) (((x) & 0xFFFFFFFF) << 0)
-#define G_030008_BASE_ADDRESS(x) (((x) >> 0) & 0xFFFFFFFF)
-#define C_030008_BASE_ADDRESS 0x00000000
-#define R_03000C_SQ_TEX_RESOURCE_WORD3_0 0x03000C
-#define S_03000C_MIP_ADDRESS(x) (((x) & 0xFFFFFFFF) << 0)
-#define G_03000C_MIP_ADDRESS(x) (((x) >> 0) & 0xFFFFFFFF)
-#define C_03000C_MIP_ADDRESS 0x00000000
-#define R_030010_SQ_TEX_RESOURCE_WORD4_0 0x030010
-#define S_030010_FORMAT_COMP_X(x) (((x) & 0x3) << 0)
-#define G_030010_FORMAT_COMP_X(x) (((x) >> 0) & 0x3)
-#define C_030010_FORMAT_COMP_X 0xFFFFFFFC
-#define V_030010_SQ_FORMAT_COMP_UNSIGNED 0x00000000
-#define V_030010_SQ_FORMAT_COMP_SIGNED 0x00000001
-#define V_030010_SQ_FORMAT_COMP_UNSIGNED_BIASED 0x00000002
-#define S_030010_FORMAT_COMP_Y(x) (((x) & 0x3) << 2)
-#define G_030010_FORMAT_COMP_Y(x) (((x) >> 2) & 0x3)
-#define C_030010_FORMAT_COMP_Y 0xFFFFFFF3
-#define S_030010_FORMAT_COMP_Z(x) (((x) & 0x3) << 4)
-#define G_030010_FORMAT_COMP_Z(x) (((x) >> 4) & 0x3)
-#define C_030010_FORMAT_COMP_Z 0xFFFFFFCF
-#define S_030010_FORMAT_COMP_W(x) (((x) & 0x3) << 6)
-#define G_030010_FORMAT_COMP_W(x) (((x) >> 6) & 0x3)
-#define C_030010_FORMAT_COMP_W 0xFFFFFF3F
-#define S_030010_NUM_FORMAT_ALL(x) (((x) & 0x3) << 8)
-#define G_030010_NUM_FORMAT_ALL(x) (((x) >> 8) & 0x3)
-#define C_030010_NUM_FORMAT_ALL 0xFFFFFCFF
-#define V_030010_SQ_NUM_FORMAT_NORM 0x00000000
-#define V_030010_SQ_NUM_FORMAT_INT 0x00000001
-#define V_030010_SQ_NUM_FORMAT_SCALED 0x00000002
-#define S_030010_SRF_MODE_ALL(x) (((x) & 0x1) << 10)
-#define G_030010_SRF_MODE_ALL(x) (((x) >> 10) & 0x1)
-#define C_030010_SRF_MODE_ALL 0xFFFFFBFF
-#define V_030010_SRF_MODE_ZERO_CLAMP_MINUS_ONE 0x00000000
-#define V_030010_SRF_MODE_NO_ZERO 0x00000001
-#define S_030010_FORCE_DEGAMMA(x) (((x) & 0x1) << 11)
-#define G_030010_FORCE_DEGAMMA(x) (((x) >> 11) & 0x1)
-#define C_030010_FORCE_DEGAMMA 0xFFFFF7FF
-#define S_030010_ENDIAN_SWAP(x) (((x) & 0x3) << 12)
-#define G_030010_ENDIAN_SWAP(x) (((x) >> 12) & 0x3)
-#define C_030010_ENDIAN_SWAP 0xFFFFCFFF
-#define S_030010_DST_SEL_X(x) (((x) & 0x7) << 16)
-#define G_030010_DST_SEL_X(x) (((x) >> 16) & 0x7)
-#define C_030010_DST_SEL_X 0xFFF8FFFF
-#define V_030010_SQ_SEL_X 0x00000000
-#define V_030010_SQ_SEL_Y 0x00000001
-#define V_030010_SQ_SEL_Z 0x00000002
-#define V_030010_SQ_SEL_W 0x00000003
-#define V_030010_SQ_SEL_0 0x00000004
-#define V_030010_SQ_SEL_1 0x00000005
-#define S_030010_DST_SEL_Y(x) (((x) & 0x7) << 19)
-#define G_030010_DST_SEL_Y(x) (((x) >> 19) & 0x7)
-#define C_030010_DST_SEL_Y 0xFFC7FFFF
-#define S_030010_DST_SEL_Z(x) (((x) & 0x7) << 22)
-#define G_030010_DST_SEL_Z(x) (((x) >> 22) & 0x7)
-#define C_030010_DST_SEL_Z 0xFE3FFFFF
-#define S_030010_DST_SEL_W(x) (((x) & 0x7) << 25)
-#define G_030010_DST_SEL_W(x) (((x) >> 25) & 0x7)
-#define C_030010_DST_SEL_W 0xF1FFFFFF
-#define S_030010_BASE_LEVEL(x) (((x) & 0xF) << 28)
-#define G_030010_BASE_LEVEL(x) (((x) >> 28) & 0xF)
-#define C_030010_BASE_LEVEL 0x0FFFFFFF
-#define R_030014_SQ_TEX_RESOURCE_WORD5_0 0x030014
-#define S_030014_LAST_LEVEL(x) (((x) & 0xF) << 0)
-#define G_030014_LAST_LEVEL(x) (((x) >> 0) & 0xF)
-#define C_030014_LAST_LEVEL 0xFFFFFFF0
-#define S_030014_BASE_ARRAY(x) (((x) & 0x1FFF) << 4)
-#define G_030014_BASE_ARRAY(x) (((x) >> 4) & 0x1FFF)
-#define C_030014_BASE_ARRAY 0xFFFE000F
-#define S_030014_LAST_ARRAY(x) (((x) & 0x1FFF) << 17)
-#define G_030014_LAST_ARRAY(x) (((x) >> 17) & 0x1FFF)
-#define C_030014_LAST_ARRAY 0xC001FFFF
-#define R_030018_SQ_TEX_RESOURCE_WORD6_0 0x030018
-#define S_030018_MAX_ANISO(x) (((x) & 0x7) << 0)
-#define G_030018_MAX_ANISO(x) (((x) >> 0) & 0x7)
-#define C_030018_MAX_ANISO 0xFFFFFFF8
-#define S_030018_PERF_MODULATION(x) (((x) & 0x7) << 3)
-#define G_030018_PERF_MODULATION(x) (((x) >> 3) & 0x7)
-#define C_030018_PERF_MODULATION 0xFFFFFFC7
-#define S_030018_INTERLACED(x) (((x) & 0x1) << 6)
-#define G_030018_INTERLACED(x) (((x) >> 6) & 0x1)
-#define C_030018_INTERLACED 0xFFFFFFBF
-#define S_030018_TILE_SPLIT(x) (((x) & 0x7) << 29)
-#define G_030018_TILE_SPLIT(x) (((x) >> 29) & 0x7)
-#define R_03001C_SQ_TEX_RESOURCE_WORD7_0 0x03001C
-#define S_03001C_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 6)
-#define G_03001C_MACRO_TILE_ASPECT(x) (((x) >> 6) & 0x3)
-#define S_03001C_BANK_WIDTH(x) (((x) & 0x3) << 8)
-#define G_03001C_BANK_WIDTH(x) (((x) >> 8) & 0x3)
-#define S_03001C_BANK_HEIGHT(x) (((x) & 0x3) << 10)
-#define G_03001C_BANK_HEIGHT(x) (((x) >> 10) & 0x3)
-#define S_03001C_NUM_BANKS(x) (((x) & 0x3) << 16)
-#define G_03001C_NUM_BANKS(x) (((x) >> 16) & 0x3)
-#define S_03001C_TYPE(x) (((x) & 0x3) << 30)
-#define G_03001C_TYPE(x) (((x) >> 30) & 0x3)
-#define C_03001C_TYPE 0x3FFFFFFF
-#define V_03001C_SQ_TEX_VTX_INVALID_TEXTURE 0x00000000
-#define V_03001C_SQ_TEX_VTX_INVALID_BUFFER 0x00000001
-#define V_03001C_SQ_TEX_VTX_VALID_TEXTURE 0x00000002
-#define V_03001C_SQ_TEX_VTX_VALID_BUFFER 0x00000003
-#define S_03001C_DATA_FORMAT(x) (((x) & 0x3F) << 0)
-#define G_03001C_DATA_FORMAT(x) (((x) >> 0) & 0x3F)
-#define C_03001C_DATA_FORMAT 0xFFFFFFC0
-
-#define SQ_VTX_CONSTANT_WORD0_0 0x30000
-#define SQ_VTX_CONSTANT_WORD1_0 0x30004
-#define SQ_VTX_CONSTANT_WORD2_0 0x30008
-# define SQ_VTXC_BASE_ADDR_HI(x) ((x) << 0)
-# define SQ_VTXC_STRIDE(x) ((x) << 8)
-# define SQ_VTXC_ENDIAN_SWAP(x) ((x) << 30)
-# define SQ_ENDIAN_NONE 0
-# define SQ_ENDIAN_8IN16 1
-# define SQ_ENDIAN_8IN32 2
-#define SQ_VTX_CONSTANT_WORD3_0 0x3000C
-# define SQ_VTCX_SEL_X(x) ((x) << 3)
-# define SQ_VTCX_SEL_Y(x) ((x) << 6)
-# define SQ_VTCX_SEL_Z(x) ((x) << 9)
-# define SQ_VTCX_SEL_W(x) ((x) << 12)
-#define SQ_VTX_CONSTANT_WORD4_0 0x30010
-#define SQ_VTX_CONSTANT_WORD5_0 0x30014
-#define SQ_VTX_CONSTANT_WORD6_0 0x30018
-#define SQ_VTX_CONSTANT_WORD7_0 0x3001c
-
-#define TD_PS_BORDER_COLOR_INDEX 0xA400
-#define TD_PS_BORDER_COLOR_RED 0xA404
-#define TD_PS_BORDER_COLOR_GREEN 0xA408
-#define TD_PS_BORDER_COLOR_BLUE 0xA40C
-#define TD_PS_BORDER_COLOR_ALPHA 0xA410
-#define TD_VS_BORDER_COLOR_INDEX 0xA414
-#define TD_VS_BORDER_COLOR_RED 0xA418
-#define TD_VS_BORDER_COLOR_GREEN 0xA41C
-#define TD_VS_BORDER_COLOR_BLUE 0xA420
-#define TD_VS_BORDER_COLOR_ALPHA 0xA424
-#define TD_GS_BORDER_COLOR_INDEX 0xA428
-#define TD_GS_BORDER_COLOR_RED 0xA42C
-#define TD_GS_BORDER_COLOR_GREEN 0xA430
-#define TD_GS_BORDER_COLOR_BLUE 0xA434
-#define TD_GS_BORDER_COLOR_ALPHA 0xA438
-#define TD_HS_BORDER_COLOR_INDEX 0xA43C
-#define TD_HS_BORDER_COLOR_RED 0xA440
-#define TD_HS_BORDER_COLOR_GREEN 0xA444
-#define TD_HS_BORDER_COLOR_BLUE 0xA448
-#define TD_HS_BORDER_COLOR_ALPHA 0xA44C
-#define TD_LS_BORDER_COLOR_INDEX 0xA450
-#define TD_LS_BORDER_COLOR_RED 0xA454
-#define TD_LS_BORDER_COLOR_GREEN 0xA458
-#define TD_LS_BORDER_COLOR_BLUE 0xA45C
-#define TD_LS_BORDER_COLOR_ALPHA 0xA460
-#define TD_CS_BORDER_COLOR_INDEX 0xA464
-#define TD_CS_BORDER_COLOR_RED 0xA468
-#define TD_CS_BORDER_COLOR_GREEN 0xA46C
-#define TD_CS_BORDER_COLOR_BLUE 0xA470
-#define TD_CS_BORDER_COLOR_ALPHA 0xA474
/* cayman 3D regs */
-#define CAYMAN_VGT_OFFCHIP_LDS_BASE 0x89B4
-#define CAYMAN_SQ_EX_ALLOC_TABLE_SLOTS 0x8E48
+#define CAYMAN_VGT_OFFCHIP_LDS_BASE 0x89B0
#define CAYMAN_DB_EQAA 0x28804
#define CAYMAN_DB_DEPTH_INFO 0x2803C
#define CAYMAN_PA_SC_AA_CONFIG 0x28BE0
#define CAYMAN_MSAA_NUM_SAMPLES_SHIFT 0
#define CAYMAN_MSAA_NUM_SAMPLES_MASK 0x7
-#define CAYMAN_SX_SCATTER_EXPORT_BASE 0x28358
/* cayman packet3 addition */
#define CAYMAN_PACKET3_DEALLOC_STATE 0x14
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 160799c..0c460c4 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -24,7 +24,6 @@
#include <linux/firmware.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
-#include <linux/module.h>
#include "drmP.h"
#include "radeon.h"
#include "radeon_asic.h"
@@ -41,7 +40,6 @@ extern void evergreen_mc_program(struct radeon_device *rdev);
extern void evergreen_irq_suspend(struct radeon_device *rdev);
extern int evergreen_mc_init(struct radeon_device *rdev);
extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev);
-extern void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
#define EVERGREEN_PFP_UCODE_SIZE 1120
#define EVERGREEN_PM4_UCODE_SIZE 1376
@@ -262,11 +260,8 @@ int ni_mc_load_microcode(struct radeon_device *rdev)
WREG32(MC_SEQ_SUP_CNTL, 0x00000001);
/* wait for training to complete */
- for (i = 0; i < rdev->usec_timeout; i++) {
- if (RREG32(MC_IO_PAD_CNTL_D0) & MEM_FALL_OUT_CMD)
- break;
- udelay(1);
- }
+ while (!(RREG32(MC_IO_PAD_CNTL_D0) & MEM_FALL_OUT_CMD))
+ udelay(10);
if (running)
WREG32(MC_SHARED_BLACKOUT_CNTL, blackout);
@@ -811,7 +806,6 @@ static void cayman_gpu_init(struct radeon_device *rdev)
rdev->config.cayman.tile_config |=
((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
- rdev->config.cayman.backend_map = gb_backend_map;
WREG32(GB_BACKEND_MAP, gb_backend_map);
WREG32(GB_ADDR_CONFIG, gb_addr_config);
WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
@@ -934,9 +928,9 @@ void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev)
int cayman_pcie_gart_enable(struct radeon_device *rdev)
{
- int i, r;
+ int r;
- if (rdev->gart.robj == NULL) {
+ if (rdev->gart.table.vram.robj == NULL) {
dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
return -EINVAL;
}
@@ -945,12 +939,9 @@ int cayman_pcie_gart_enable(struct radeon_device *rdev)
return r;
radeon_gart_restore(rdev);
/* Setup TLB control */
- WREG32(MC_VM_MX_L1_TLB_CNTL,
- (0xA << 7) |
- ENABLE_L1_TLB |
+ WREG32(MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB |
ENABLE_L1_FRAGMENT_PROCESSING |
SYSTEM_ACCESS_MODE_NOT_IN_SYS |
- ENABLE_ADVANCED_DRIVER_MODEL |
SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
/* Setup L2 cache */
WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
@@ -970,37 +961,19 @@ int cayman_pcie_gart_enable(struct radeon_device *rdev)
WREG32(VM_CONTEXT0_CNTL2, 0);
WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
-
- WREG32(0x15D4, 0);
- WREG32(0x15D8, 0);
- WREG32(0x15DC, 0);
-
- /* empty context1-7 */
- for (i = 1; i < 8; i++) {
- WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (i << 2), 0);
- WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2), 0);
- WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
- rdev->gart.table_addr >> 12);
- }
-
- /* enable context1-7 */
- WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
- (u32)(rdev->dummy_page.addr >> 12));
+ /* disable context1-7 */
WREG32(VM_CONTEXT1_CNTL2, 0);
WREG32(VM_CONTEXT1_CNTL, 0);
- WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
- RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
cayman_pcie_gart_tlb_flush(rdev);
- DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
- (unsigned)(rdev->mc.gtt_size >> 20),
- (unsigned long long)rdev->gart.table_addr);
rdev->gart.ready = true;
return 0;
}
void cayman_pcie_gart_disable(struct radeon_device *rdev)
{
+ int r;
+
/* Disable all tables */
WREG32(VM_CONTEXT0_CNTL, 0);
WREG32(VM_CONTEXT1_CNTL, 0);
@@ -1016,7 +989,14 @@ void cayman_pcie_gart_disable(struct radeon_device *rdev)
WREG32(VM_L2_CNTL2, 0);
WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
L2_CACHE_BIGK_FRAGMENT_SIZE(6));
- radeon_gart_table_vram_unpin(rdev);
+ if (rdev->gart.table.vram.robj) {
+ r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
+ if (likely(r == 0)) {
+ radeon_bo_kunmap(rdev->gart.table.vram.robj);
+ radeon_bo_unpin(rdev->gart.table.vram.robj);
+ radeon_bo_unreserve(rdev->gart.table.vram.robj);
+ }
+ }
}
void cayman_pcie_gart_fini(struct radeon_device *rdev)
@@ -1026,69 +1006,9 @@ void cayman_pcie_gart_fini(struct radeon_device *rdev)
radeon_gart_fini(rdev);
}
-void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
- int ring, u32 cp_int_cntl)
-{
- u32 srbm_gfx_cntl = RREG32(SRBM_GFX_CNTL) & ~3;
-
- WREG32(SRBM_GFX_CNTL, srbm_gfx_cntl | (ring & 3));
- WREG32(CP_INT_CNTL, cp_int_cntl);
-}
-
/*
* CP.
*/
-void cayman_fence_ring_emit(struct radeon_device *rdev,
- struct radeon_fence *fence)
-{
- struct radeon_ring *ring = &rdev->ring[fence->ring];
- u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
-
- /* flush read cache over gart for this vmid */
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
- radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
- radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA);
- radeon_ring_write(ring, 0xFFFFFFFF);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 10); /* poll interval */
- /* EVENT_WRITE_EOP - flush caches, send int */
- radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
- radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
- radeon_ring_write(ring, addr & 0xffffffff);
- radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
- radeon_ring_write(ring, fence->seq);
- radeon_ring_write(ring, 0);
-}
-
-void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
-{
- struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
-
- /* set to DX10/11 mode */
- radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
- radeon_ring_write(ring, 1);
- radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
- radeon_ring_write(ring,
-#ifdef __BIG_ENDIAN
- (2 << 0) |
-#endif
- (ib->gpu_addr & 0xFFFFFFFC));
- radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
- radeon_ring_write(ring, ib->length_dw | (ib->vm_id << 24));
-
- /* flush read cache over gart for this vmid */
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
- radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
- radeon_ring_write(ring, ib->vm_id);
- radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
- radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA);
- radeon_ring_write(ring, 0xFFFFFFFF);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 10); /* poll interval */
-}
-
static void cayman_cp_enable(struct radeon_device *rdev, bool enable)
{
if (enable)
@@ -1129,64 +1049,63 @@ static int cayman_cp_load_microcode(struct radeon_device *rdev)
static int cayman_cp_start(struct radeon_device *rdev)
{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r, i;
- r = radeon_ring_lock(rdev, ring, 7);
+ r = radeon_ring_lock(rdev, 7);
if (r) {
DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
return r;
}
- radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
- radeon_ring_write(ring, 0x1);
- radeon_ring_write(ring, 0x0);
- radeon_ring_write(ring, rdev->config.cayman.max_hw_contexts - 1);
- radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 0);
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
+ radeon_ring_write(rdev, 0x1);
+ radeon_ring_write(rdev, 0x0);
+ radeon_ring_write(rdev, rdev->config.cayman.max_hw_contexts - 1);
+ radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, 0);
+ radeon_ring_unlock_commit(rdev);
cayman_cp_enable(rdev, true);
- r = radeon_ring_lock(rdev, ring, cayman_default_size + 19);
+ r = radeon_ring_lock(rdev, cayman_default_size + 19);
if (r) {
DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
return r;
}
/* setup clear context state */
- radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
+ radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+ radeon_ring_write(rdev, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
for (i = 0; i < cayman_default_size; i++)
- radeon_ring_write(ring, cayman_default_state[i]);
+ radeon_ring_write(rdev, cayman_default_state[i]);
- radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
+ radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+ radeon_ring_write(rdev, PACKET3_PREAMBLE_END_CLEAR_STATE);
/* set clear context state */
- radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
- radeon_ring_write(ring, 0);
+ radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0));
+ radeon_ring_write(rdev, 0);
/* SQ_VTX_BASE_VTX_LOC */
- radeon_ring_write(ring, 0xc0026f00);
- radeon_ring_write(ring, 0x00000000);
- radeon_ring_write(ring, 0x00000000);
- radeon_ring_write(ring, 0x00000000);
+ radeon_ring_write(rdev, 0xc0026f00);
+ radeon_ring_write(rdev, 0x00000000);
+ radeon_ring_write(rdev, 0x00000000);
+ radeon_ring_write(rdev, 0x00000000);
/* Clear consts */
- radeon_ring_write(ring, 0xc0036f00);
- radeon_ring_write(ring, 0x00000bc4);
- radeon_ring_write(ring, 0xffffffff);
- radeon_ring_write(ring, 0xffffffff);
- radeon_ring_write(ring, 0xffffffff);
+ radeon_ring_write(rdev, 0xc0036f00);
+ radeon_ring_write(rdev, 0x00000bc4);
+ radeon_ring_write(rdev, 0xffffffff);
+ radeon_ring_write(rdev, 0xffffffff);
+ radeon_ring_write(rdev, 0xffffffff);
- radeon_ring_write(ring, 0xc0026900);
- radeon_ring_write(ring, 0x00000316);
- radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
- radeon_ring_write(ring, 0x00000010); /* */
+ radeon_ring_write(rdev, 0xc0026900);
+ radeon_ring_write(rdev, 0x00000316);
+ radeon_ring_write(rdev, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
+ radeon_ring_write(rdev, 0x00000010); /* */
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev);
/* XXX init other rings */
@@ -1196,12 +1115,11 @@ static int cayman_cp_start(struct radeon_device *rdev)
static void cayman_cp_fini(struct radeon_device *rdev)
{
cayman_cp_enable(rdev, false);
- radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
+ radeon_ring_fini(rdev);
}
int cayman_cp_resume(struct radeon_device *rdev)
{
- struct radeon_ring *ring;
u32 tmp;
u32 rb_bufsz;
int r;
@@ -1218,8 +1136,7 @@ int cayman_cp_resume(struct radeon_device *rdev)
WREG32(GRBM_SOFT_RESET, 0);
RREG32(GRBM_SOFT_RESET);
- WREG32(CP_SEM_WAIT_TIMER, 0x0);
- WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
+ WREG32(CP_SEM_WAIT_TIMER, 0x4);
/* Set the write pointer delay */
WREG32(CP_RB_WPTR_DELAY, 0);
@@ -1228,8 +1145,7 @@ int cayman_cp_resume(struct radeon_device *rdev)
/* ring 0 - compute and gfx */
/* Set ring buffer size */
- ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
- rb_bufsz = drm_order(ring->ring_size / 8);
+ rb_bufsz = drm_order(rdev->cp.ring_size / 8);
tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
@@ -1238,8 +1154,8 @@ int cayman_cp_resume(struct radeon_device *rdev)
/* Initialize the ring buffer's read and write pointers */
WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA);
- ring->wptr = 0;
- WREG32(CP_RB0_WPTR, ring->wptr);
+ rdev->cp.wptr = 0;
+ WREG32(CP_RB0_WPTR, rdev->cp.wptr);
/* set the wb address wether it's enabled or not */
WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
@@ -1256,14 +1172,13 @@ int cayman_cp_resume(struct radeon_device *rdev)
mdelay(1);
WREG32(CP_RB0_CNTL, tmp);
- WREG32(CP_RB0_BASE, ring->gpu_addr >> 8);
+ WREG32(CP_RB0_BASE, rdev->cp.gpu_addr >> 8);
- ring->rptr = RREG32(CP_RB0_RPTR);
+ rdev->cp.rptr = RREG32(CP_RB0_RPTR);
/* ring1 - compute only */
/* Set ring buffer size */
- ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
- rb_bufsz = drm_order(ring->ring_size / 8);
+ rb_bufsz = drm_order(rdev->cp1.ring_size / 8);
tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
@@ -1272,8 +1187,8 @@ int cayman_cp_resume(struct radeon_device *rdev)
/* Initialize the ring buffer's read and write pointers */
WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA);
- ring->wptr = 0;
- WREG32(CP_RB1_WPTR, ring->wptr);
+ rdev->cp1.wptr = 0;
+ WREG32(CP_RB1_WPTR, rdev->cp1.wptr);
/* set the wb address wether it's enabled or not */
WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC);
@@ -1282,14 +1197,13 @@ int cayman_cp_resume(struct radeon_device *rdev)
mdelay(1);
WREG32(CP_RB1_CNTL, tmp);
- WREG32(CP_RB1_BASE, ring->gpu_addr >> 8);
+ WREG32(CP_RB1_BASE, rdev->cp1.gpu_addr >> 8);
- ring->rptr = RREG32(CP_RB1_RPTR);
+ rdev->cp1.rptr = RREG32(CP_RB1_RPTR);
/* ring2 - compute only */
/* Set ring buffer size */
- ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
- rb_bufsz = drm_order(ring->ring_size / 8);
+ rb_bufsz = drm_order(rdev->cp2.ring_size / 8);
tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
@@ -1298,8 +1212,8 @@ int cayman_cp_resume(struct radeon_device *rdev)
/* Initialize the ring buffer's read and write pointers */
WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA);
- ring->wptr = 0;
- WREG32(CP_RB2_WPTR, ring->wptr);
+ rdev->cp2.wptr = 0;
+ WREG32(CP_RB2_WPTR, rdev->cp2.wptr);
/* set the wb address wether it's enabled or not */
WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC);
@@ -1308,28 +1222,28 @@ int cayman_cp_resume(struct radeon_device *rdev)
mdelay(1);
WREG32(CP_RB2_CNTL, tmp);
- WREG32(CP_RB2_BASE, ring->gpu_addr >> 8);
+ WREG32(CP_RB2_BASE, rdev->cp2.gpu_addr >> 8);
- ring->rptr = RREG32(CP_RB2_RPTR);
+ rdev->cp2.rptr = RREG32(CP_RB2_RPTR);
/* start the rings */
cayman_cp_start(rdev);
- rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true;
- rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
- rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
+ rdev->cp.ready = true;
+ rdev->cp1.ready = true;
+ rdev->cp2.ready = true;
/* this only test cp0 */
- r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
+ r = radeon_ring_test(rdev);
if (r) {
- rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
- rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
- rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
+ rdev->cp.ready = false;
+ rdev->cp1.ready = false;
+ rdev->cp2.ready = false;
return r;
}
return 0;
}
-bool cayman_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+bool cayman_gpu_is_lockup(struct radeon_device *rdev)
{
u32 srbm_status;
u32 grbm_status;
@@ -1342,20 +1256,20 @@ bool cayman_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
if (!(grbm_status & GUI_ACTIVE)) {
- r100_gpu_lockup_update(lockup, ring);
+ r100_gpu_lockup_update(lockup, &rdev->cp);
return false;
}
/* force CP activities */
- r = radeon_ring_lock(rdev, ring, 2);
+ r = radeon_ring_lock(rdev, 2);
if (!r) {
/* PACKET2 NOP */
- radeon_ring_write(ring, 0x80000000);
- radeon_ring_write(ring, 0x80000000);
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_write(rdev, 0x80000000);
+ radeon_ring_write(rdev, 0x80000000);
+ radeon_ring_unlock_commit(rdev);
}
/* XXX deal with CP0,1,2 */
- ring->rptr = RREG32(ring->rptr_reg);
- return r100_gpu_cp_is_lockup(rdev, lockup, ring);
+ rdev->cp.rptr = RREG32(CP_RB0_RPTR);
+ return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp);
}
static int cayman_gpu_soft_reset(struct radeon_device *rdev)
@@ -1375,15 +1289,6 @@ static int cayman_gpu_soft_reset(struct radeon_device *rdev)
RREG32(GRBM_STATUS_SE1));
dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
RREG32(SRBM_STATUS));
- dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_ADDR 0x%08X\n",
- RREG32(0x14F8));
- dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_STATUS 0x%08X\n",
- RREG32(0x14D8));
- dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
- RREG32(0x14FC));
- dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
- RREG32(0x14DC));
-
evergreen_mc_stop(rdev, &save);
if (evergreen_mc_wait_for_idle(rdev)) {
dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
@@ -1414,7 +1319,6 @@ static int cayman_gpu_soft_reset(struct radeon_device *rdev)
(void)RREG32(GRBM_SOFT_RESET);
/* Wait a little for things to settle down */
udelay(50);
-
dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
RREG32(GRBM_STATUS));
dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
@@ -1434,12 +1338,8 @@ int cayman_asic_reset(struct radeon_device *rdev)
static int cayman_startup(struct radeon_device *rdev)
{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
- /* enable pcie gen2 link */
- evergreen_pcie_gen2_enable(rdev);
-
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
r = ni_init_microcode(rdev);
if (r) {
@@ -1453,10 +1353,6 @@ static int cayman_startup(struct radeon_device *rdev)
return r;
}
- r = r600_vram_scratch_init(rdev);
- if (r)
- return r;
-
evergreen_mc_program(rdev);
r = cayman_pcie_gart_enable(rdev);
if (r)
@@ -1465,8 +1361,8 @@ static int cayman_startup(struct radeon_device *rdev)
r = evergreen_blit_init(rdev);
if (r) {
- r600_blit_fini(rdev);
- rdev->asic->copy.copy = NULL;
+ evergreen_blit_fini(rdev);
+ rdev->asic->copy = NULL;
dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
}
@@ -1475,24 +1371,6 @@ static int cayman_startup(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
- if (r) {
- dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
- return r;
- }
-
- r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
- if (r) {
- dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
- return r;
- }
-
- r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
- if (r) {
- dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
- return r;
- }
-
/* Enable IRQ */
r = r600_irq_init(rdev);
if (r) {
@@ -1502,9 +1380,7 @@ static int cayman_startup(struct radeon_device *rdev)
}
evergreen_irq_set(rdev);
- r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
- CP_RB0_RPTR, CP_RB0_WPTR,
- 0, 0xfffff, RADEON_CP_PACKET2);
+ r = radeon_ring_init(rdev, rdev->cp.ring_size);
if (r)
return r;
r = cayman_cp_load_microcode(rdev);
@@ -1514,21 +1390,6 @@ static int cayman_startup(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_ib_pool_start(rdev);
- if (r)
- return r;
-
- r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
- if (r) {
- DRM_ERROR("radeon: failed testing IB (%d).\n", r);
- rdev->accel_working = false;
- return r;
- }
-
- r = radeon_vm_manager_start(rdev);
- if (r)
- return r;
-
return 0;
}
@@ -1543,27 +1404,40 @@ int cayman_resume(struct radeon_device *rdev)
/* post card */
atom_asic_init(rdev->mode_info.atom_context);
- rdev->accel_working = true;
r = cayman_startup(rdev);
if (r) {
DRM_ERROR("cayman startup failed on resume\n");
- rdev->accel_working = false;
return r;
}
+
+ r = r600_ib_test(rdev);
+ if (r) {
+ DRM_ERROR("radeon: failled testing IB (%d).\n", r);
+ return r;
+ }
+
return r;
+
}
int cayman_suspend(struct radeon_device *rdev)
{
+ int r;
+
/* FIXME: we should wait for ring to be empty */
- radeon_ib_pool_suspend(rdev);
- radeon_vm_manager_suspend(rdev);
- r600_blit_suspend(rdev);
cayman_cp_enable(rdev, false);
- rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+ rdev->cp.ready = false;
evergreen_irq_suspend(rdev);
radeon_wb_disable(rdev);
cayman_pcie_gart_disable(rdev);
+
+ /* unpin shaders bo */
+ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+ if (likely(r == 0)) {
+ radeon_bo_unpin(rdev->r600_blit.shader_obj);
+ radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+ }
+
return 0;
}
@@ -1575,7 +1449,6 @@ int cayman_suspend(struct radeon_device *rdev)
*/
int cayman_init(struct radeon_device *rdev)
{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
/* This don't do much */
@@ -1628,8 +1501,8 @@ int cayman_init(struct radeon_device *rdev)
if (r)
return r;
- ring->ring_obj = NULL;
- r600_ring_init(rdev, ring, 1024 * 1024);
+ rdev->cp.ring_obj = NULL;
+ r600_ring_init(rdev, 1024 * 1024);
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
@@ -1638,29 +1511,29 @@ int cayman_init(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
- if (r) {
- dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
- rdev->accel_working = false;
- }
- r = radeon_vm_manager_init(rdev);
- if (r) {
- dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r);
- }
-
r = cayman_startup(rdev);
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
cayman_cp_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
- r100_ib_fini(rdev);
- radeon_vm_manager_fini(rdev);
radeon_irq_kms_fini(rdev);
cayman_pcie_gart_fini(rdev);
rdev->accel_working = false;
}
+ if (rdev->accel_working) {
+ r = radeon_ib_pool_init(rdev);
+ if (r) {
+ DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
+ rdev->accel_working = false;
+ }
+ r = r600_ib_test(rdev);
+ if (r) {
+ DRM_ERROR("radeon: failed testing IB (%d).\n", r);
+ rdev->accel_working = false;
+ }
+ }
/* Don't start up if the MC ucode is missing.
* The default clocks and voltages before the MC ucode
@@ -1676,17 +1549,14 @@ int cayman_init(struct radeon_device *rdev)
void cayman_fini(struct radeon_device *rdev)
{
- r600_blit_fini(rdev);
+ evergreen_blit_fini(rdev);
cayman_cp_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
- radeon_vm_manager_fini(rdev);
- r100_ib_fini(rdev);
+ radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
cayman_pcie_gart_fini(rdev);
- r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
- radeon_semaphore_driver_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
@@ -1694,84 +1564,3 @@ void cayman_fini(struct radeon_device *rdev)
rdev->bios = NULL;
}
-/*
- * vm
- */
-int cayman_vm_init(struct radeon_device *rdev)
-{
- /* number of VMs */
- rdev->vm_manager.nvm = 8;
- /* base offset of vram pages */
- rdev->vm_manager.vram_base_offset = 0;
- return 0;
-}
-
-void cayman_vm_fini(struct radeon_device *rdev)
-{
-}
-
-int cayman_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm, int id)
-{
- WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (id << 2), 0);
- WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (id << 2), vm->last_pfn);
- WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (id << 2), vm->pt_gpu_addr >> 12);
- /* flush hdp cache */
- WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
- /* bits 0-7 are the VM contexts0-7 */
- WREG32(VM_INVALIDATE_REQUEST, 1 << id);
- return 0;
-}
-
-void cayman_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm)
-{
- WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (vm->id << 2), 0);
- WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (vm->id << 2), 0);
- WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2), 0);
- /* flush hdp cache */
- WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
- /* bits 0-7 are the VM contexts0-7 */
- WREG32(VM_INVALIDATE_REQUEST, 1 << vm->id);
-}
-
-void cayman_vm_tlb_flush(struct radeon_device *rdev, struct radeon_vm *vm)
-{
- if (vm->id == -1)
- return;
-
- /* flush hdp cache */
- WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
- /* bits 0-7 are the VM contexts0-7 */
- WREG32(VM_INVALIDATE_REQUEST, 1 << vm->id);
-}
-
-#define R600_PTE_VALID (1 << 0)
-#define R600_PTE_SYSTEM (1 << 1)
-#define R600_PTE_SNOOPED (1 << 2)
-#define R600_PTE_READABLE (1 << 5)
-#define R600_PTE_WRITEABLE (1 << 6)
-
-uint32_t cayman_vm_page_flags(struct radeon_device *rdev,
- struct radeon_vm *vm,
- uint32_t flags)
-{
- uint32_t r600_flags = 0;
-
- r600_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_PTE_VALID : 0;
- r600_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0;
- r600_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0;
- if (flags & RADEON_VM_PAGE_SYSTEM) {
- r600_flags |= R600_PTE_SYSTEM;
- r600_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0;
- }
- return r600_flags;
-}
-
-void cayman_vm_set_page(struct radeon_device *rdev, struct radeon_vm *vm,
- unsigned pfn, uint64_t addr, uint32_t flags)
-{
- void __iomem *ptr = (void *)vm->pt;
-
- addr = addr & 0xFFFFFFFFFFFFF000ULL;
- addr |= flags;
- writeq(addr, ptr + (pfn * 8));
-}
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index 9a7f3b6..4672869 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -42,9 +42,6 @@
#define CAYMAN_MAX_TCC_MASK 0xFF
#define DMIF_ADDR_CONFIG 0xBD4
-#define SRBM_GFX_CNTL 0x0E44
-#define RINGID(x) (((x) & 0x3) << 0)
-#define VMID(x) (((x) & 0x7) << 0)
#define SRBM_STATUS 0x0E50
#define VM_CONTEXT0_REQUEST_RESPONSE 0x1470
@@ -222,8 +219,6 @@
#define SCRATCH_UMSK 0x8540
#define SCRATCH_ADDR 0x8544
#define CP_SEM_WAIT_TIMER 0x85BC
-#define CP_SEM_INCOMPLETE_TIMER_CNTL 0x85C8
-#define CP_COHER_CNTL2 0x85E8
#define CP_ME_CNTL 0x86D8
#define CP_ME_HALT (1 << 28)
#define CP_PFP_HALT (1 << 26)
@@ -399,12 +394,6 @@
#define CP_RB0_RPTR_ADDR 0xC10C
#define CP_RB0_RPTR_ADDR_HI 0xC110
#define CP_RB0_WPTR 0xC114
-
-#define CP_INT_CNTL 0xC124
-# define CNTX_BUSY_INT_ENABLE (1 << 19)
-# define CNTX_EMPTY_INT_ENABLE (1 << 20)
-# define TIME_STAMP_INT_ENABLE (1 << 26)
-
#define CP_RB1_BASE 0xC180
#define CP_RB1_CNTL 0xC184
#define CP_RB1_RPTR_ADDR 0xC188
@@ -422,10 +411,6 @@
#define CP_ME_RAM_DATA 0xC160
#define CP_DEBUG 0xC1FC
-#define VGT_EVENT_INITIATOR 0x28a90
-# define CACHE_FLUSH_AND_INV_EVENT_TS (0x14 << 0)
-# define CACHE_FLUSH_AND_INV_EVENT (0x16 << 0)
-
/*
* PM4
*/
@@ -460,7 +445,6 @@
#define PACKET3_DISPATCH_DIRECT 0x15
#define PACKET3_DISPATCH_INDIRECT 0x16
#define PACKET3_INDIRECT_BUFFER_END 0x17
-#define PACKET3_MODE_CONTROL 0x18
#define PACKET3_SET_PREDICATION 0x20
#define PACKET3_REG_RMW 0x21
#define PACKET3_COND_EXEC 0x22
@@ -510,27 +494,7 @@
#define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
#define PACKET3_COND_WRITE 0x45
#define PACKET3_EVENT_WRITE 0x46
-#define EVENT_TYPE(x) ((x) << 0)
-#define EVENT_INDEX(x) ((x) << 8)
- /* 0 - any non-TS event
- * 1 - ZPASS_DONE
- * 2 - SAMPLE_PIPELINESTAT
- * 3 - SAMPLE_STREAMOUTSTAT*
- * 4 - *S_PARTIAL_FLUSH
- * 5 - TS events
- */
#define PACKET3_EVENT_WRITE_EOP 0x47
-#define DATA_SEL(x) ((x) << 29)
- /* 0 - discard
- * 1 - send low 32bit data
- * 2 - send 64bit data
- * 3 - send 64bit counter value
- */
-#define INT_SEL(x) ((x) << 24)
- /* 0 - none
- * 1 - interrupt only (DATA_SEL = 0)
- * 2 - interrupt when data write is confirmed
- */
#define PACKET3_EVENT_WRITE_EOS 0x48
#define PACKET3_PREAMBLE_CNTL 0x4A
# define PACKET3_PREAMBLE_BEGIN_CLEAR_STATE (2 << 28)
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index ae356cf..d94f440 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -41,7 +41,6 @@
#include <linux/firmware.h>
#include <linux/platform_device.h>
-#include <linux/module.h>
#include "r100_reg_safe.h"
#include "rn50_reg_safe.h"
@@ -65,150 +64,10 @@ MODULE_FIRMWARE(FIRMWARE_R520);
#include "r100_track.h"
-void r100_wait_for_vblank(struct radeon_device *rdev, int crtc)
-{
- struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc];
- int i;
-
- if (radeon_crtc->crtc_id == 0) {
- if (RREG32(RADEON_CRTC_GEN_CNTL) & RADEON_CRTC_EN) {
- for (i = 0; i < rdev->usec_timeout; i++) {
- if (!(RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR))
- break;
- udelay(1);
- }
- for (i = 0; i < rdev->usec_timeout; i++) {
- if (RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR)
- break;
- udelay(1);
- }
- }
- } else {
- if (RREG32(RADEON_CRTC2_GEN_CNTL) & RADEON_CRTC2_EN) {
- for (i = 0; i < rdev->usec_timeout; i++) {
- if (!(RREG32(RADEON_CRTC2_STATUS) & RADEON_CRTC2_VBLANK_CUR))
- break;
- udelay(1);
- }
- for (i = 0; i < rdev->usec_timeout; i++) {
- if (RREG32(RADEON_CRTC2_STATUS) & RADEON_CRTC2_VBLANK_CUR)
- break;
- udelay(1);
- }
- }
- }
-}
-
/* This files gather functions specifics to:
* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
*/
-int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
- struct radeon_cs_packet *pkt,
- unsigned idx,
- unsigned reg)
-{
- int r;
- u32 tile_flags = 0;
- u32 tmp;
- struct radeon_cs_reloc *reloc;
- u32 value;
-
- r = r100_cs_packet_next_reloc(p, &reloc);
- if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
- r100_cs_dump_packet(p, pkt);
- return r;
- }
-
- value = radeon_get_ib_value(p, idx);
- tmp = value & 0x003fffff;
- tmp += (((u32)reloc->lobj.gpu_offset) >> 10);
-
- if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
- tile_flags |= RADEON_DST_TILE_MACRO;
- if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
- if (reg == RADEON_SRC_PITCH_OFFSET) {
- DRM_ERROR("Cannot src blit from microtiled surface\n");
- r100_cs_dump_packet(p, pkt);
- return -EINVAL;
- }
- tile_flags |= RADEON_DST_TILE_MICRO;
- }
-
- tmp |= tile_flags;
- p->ib->ptr[idx] = (value & 0x3fc00000) | tmp;
- } else
- p->ib->ptr[idx] = (value & 0xffc00000) | tmp;
- return 0;
-}
-
-int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
- struct radeon_cs_packet *pkt,
- int idx)
-{
- unsigned c, i;
- struct radeon_cs_reloc *reloc;
- struct r100_cs_track *track;
- int r = 0;
- volatile uint32_t *ib;
- u32 idx_value;
-
- ib = p->ib->ptr;
- track = (struct r100_cs_track *)p->track;
- c = radeon_get_ib_value(p, idx++) & 0x1F;
- if (c > 16) {
- DRM_ERROR("Only 16 vertex buffers are allowed %d\n",
- pkt->opcode);
- r100_cs_dump_packet(p, pkt);
- return -EINVAL;
- }
- track->num_arrays = c;
- for (i = 0; i < (c - 1); i+=2, idx+=3) {
- r = r100_cs_packet_next_reloc(p, &reloc);
- if (r) {
- DRM_ERROR("No reloc for packet3 %d\n",
- pkt->opcode);
- r100_cs_dump_packet(p, pkt);
- return r;
- }
- idx_value = radeon_get_ib_value(p, idx);
- ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
-
- track->arrays[i + 0].esize = idx_value >> 8;
- track->arrays[i + 0].robj = reloc->robj;
- track->arrays[i + 0].esize &= 0x7F;
- r = r100_cs_packet_next_reloc(p, &reloc);
- if (r) {
- DRM_ERROR("No reloc for packet3 %d\n",
- pkt->opcode);
- r100_cs_dump_packet(p, pkt);
- return r;
- }
- ib[idx+2] = radeon_get_ib_value(p, idx + 2) + ((u32)reloc->lobj.gpu_offset);
- track->arrays[i + 1].robj = reloc->robj;
- track->arrays[i + 1].esize = idx_value >> 24;
- track->arrays[i + 1].esize &= 0x7F;
- }
- if (c & 1) {
- r = r100_cs_packet_next_reloc(p, &reloc);
- if (r) {
- DRM_ERROR("No reloc for packet3 %d\n",
- pkt->opcode);
- r100_cs_dump_packet(p, pkt);
- return r;
- }
- idx_value = radeon_get_ib_value(p, idx);
- ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
- track->arrays[i + 0].robj = reloc->robj;
- track->arrays[i + 0].esize = idx_value >> 8;
- track->arrays[i + 0].esize &= 0x7F;
- }
- return r;
-}
-
void r100_pre_page_flip(struct radeon_device *rdev, int crtc)
{
/* enable the pflip int */
@@ -450,7 +309,7 @@ void r100_pm_misc(struct radeon_device *rdev)
/* set pcie lanes */
if ((rdev->flags & RADEON_IS_PCIE) &&
!(rdev->flags & RADEON_IS_IGP) &&
- rdev->asic->pm.set_pcie_lanes &&
+ rdev->asic->set_pcie_lanes &&
(ps->pcie_lanes !=
rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) {
radeon_set_pcie_lanes(rdev,
@@ -621,7 +480,7 @@ int r100_pci_gart_init(struct radeon_device *rdev)
{
int r;
- if (rdev->gart.ptr) {
+ if (rdev->gart.table.ram.ptr) {
WARN(1, "R100 PCI GART already initialized\n");
return 0;
}
@@ -630,8 +489,8 @@ int r100_pci_gart_init(struct radeon_device *rdev)
if (r)
return r;
rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
- rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush;
- rdev->asic->gart.set_page = &r100_pci_gart_set_page;
+ rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush;
+ rdev->asic->gart_set_page = &r100_pci_gart_set_page;
return radeon_gart_table_ram_alloc(rdev);
}
@@ -660,9 +519,6 @@ int r100_pci_gart_enable(struct radeon_device *rdev)
tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN;
WREG32(RADEON_AIC_CNTL, tmp);
r100_pci_gart_tlb_flush(rdev);
- DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
- (unsigned)(rdev->mc.gtt_size >> 20),
- (unsigned long long)rdev->gart.table_addr);
rdev->gart.ready = true;
return 0;
}
@@ -680,12 +536,10 @@ void r100_pci_gart_disable(struct radeon_device *rdev)
int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
{
- u32 *gtt = rdev->gart.ptr;
-
if (i < 0 || i > rdev->gart.num_gpu_pages) {
return -EINVAL;
}
- gtt[i] = cpu_to_le32(lower_32_bits(addr));
+ rdev->gart.table.ram.ptr[i] = cpu_to_le32(lower_32_bits(addr));
return 0;
}
@@ -705,7 +559,7 @@ int r100_irq_set(struct radeon_device *rdev)
WREG32(R_000040_GEN_INT_CNTL, 0);
return -EINVAL;
}
- if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
+ if (rdev->irq.sw_int) {
tmp |= RADEON_SW_INT_ENABLE;
}
if (rdev->irq.gui_idle) {
@@ -740,7 +594,7 @@ void r100_irq_disable(struct radeon_device *rdev)
WREG32(R_000044_GEN_INT_STATUS, tmp);
}
-static uint32_t r100_irq_ack(struct radeon_device *rdev)
+static inline uint32_t r100_irq_ack(struct radeon_device *rdev)
{
uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS);
uint32_t irq_mask = RADEON_SW_INT_TEST |
@@ -777,7 +631,7 @@ int r100_irq_process(struct radeon_device *rdev)
while (status) {
/* SW interrupt */
if (status & RADEON_SW_INT_TEST) {
- radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ radeon_fence_process(rdev);
}
/* gui idle interrupt */
if (status & RADEON_GUI_IDLE_STAT) {
@@ -847,36 +701,25 @@ u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc)
void r100_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence)
{
- struct radeon_ring *ring = &rdev->ring[fence->ring];
-
/* We have to make sure that caches are flushed before
* CPU might read something from VRAM. */
- radeon_ring_write(ring, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0));
- radeon_ring_write(ring, RADEON_RB3D_DC_FLUSH_ALL);
- radeon_ring_write(ring, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));
- radeon_ring_write(ring, RADEON_RB3D_ZC_FLUSH_ALL);
+ radeon_ring_write(rdev, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0));
+ radeon_ring_write(rdev, RADEON_RB3D_DC_FLUSH_ALL);
+ radeon_ring_write(rdev, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));
+ radeon_ring_write(rdev, RADEON_RB3D_ZC_FLUSH_ALL);
/* Wait until IDLE & CLEAN */
- radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
- radeon_ring_write(ring, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN);
- radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
- radeon_ring_write(ring, rdev->config.r100.hdp_cntl |
+ radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
+ radeon_ring_write(rdev, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN);
+ radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+ radeon_ring_write(rdev, rdev->config.r100.hdp_cntl |
RADEON_HDP_READ_BUFFER_INVALIDATE);
- radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
- radeon_ring_write(ring, rdev->config.r100.hdp_cntl);
+ radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+ radeon_ring_write(rdev, rdev->config.r100.hdp_cntl);
/* Emit fence sequence & fire IRQ */
- radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0));
- radeon_ring_write(ring, fence->seq);
- radeon_ring_write(ring, PACKET0(RADEON_GEN_INT_STATUS, 0));
- radeon_ring_write(ring, RADEON_SW_INT_FIRE);
-}
-
-void r100_semaphore_ring_emit(struct radeon_device *rdev,
- struct radeon_ring *ring,
- struct radeon_semaphore *semaphore,
- bool emit_wait)
-{
- /* Unused on older asics, since we don't have semaphores or multiple rings */
- BUG();
+ radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
+ radeon_ring_write(rdev, fence->seq);
+ radeon_ring_write(rdev, PACKET0(RADEON_GEN_INT_STATUS, 0));
+ radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
}
int r100_copy_blit(struct radeon_device *rdev,
@@ -885,7 +728,6 @@ int r100_copy_blit(struct radeon_device *rdev,
unsigned num_gpu_pages,
struct radeon_fence *fence)
{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
uint32_t cur_pages;
uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE;
uint32_t pitch;
@@ -903,7 +745,7 @@ int r100_copy_blit(struct radeon_device *rdev,
/* Ask for enough room for blit + flush + fence */
ndw = 64 + (10 * num_loops);
- r = radeon_ring_lock(rdev, ring, ndw);
+ r = radeon_ring_lock(rdev, ndw);
if (r) {
DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw);
return -EINVAL;
@@ -917,8 +759,8 @@ int r100_copy_blit(struct radeon_device *rdev,
/* pages are in Y direction - height
page width in X direction - width */
- radeon_ring_write(ring, PACKET3(PACKET3_BITBLT_MULTI, 8));
- radeon_ring_write(ring,
+ radeon_ring_write(rdev, PACKET3(PACKET3_BITBLT_MULTI, 8));
+ radeon_ring_write(rdev,
RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
RADEON_GMC_DST_PITCH_OFFSET_CNTL |
RADEON_GMC_SRC_CLIPPING |
@@ -930,26 +772,26 @@ int r100_copy_blit(struct radeon_device *rdev,
RADEON_DP_SRC_SOURCE_MEMORY |
RADEON_GMC_CLR_CMP_CNTL_DIS |
RADEON_GMC_WR_MSK_DIS);
- radeon_ring_write(ring, (pitch << 22) | (src_offset >> 10));
- radeon_ring_write(ring, (pitch << 22) | (dst_offset >> 10));
- radeon_ring_write(ring, (0x1fff) | (0x1fff << 16));
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, (0x1fff) | (0x1fff << 16));
- radeon_ring_write(ring, num_gpu_pages);
- radeon_ring_write(ring, num_gpu_pages);
- radeon_ring_write(ring, cur_pages | (stride_pixels << 16));
- }
- radeon_ring_write(ring, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0));
- radeon_ring_write(ring, RADEON_RB2D_DC_FLUSH_ALL);
- radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
- radeon_ring_write(ring,
+ radeon_ring_write(rdev, (pitch << 22) | (src_offset >> 10));
+ radeon_ring_write(rdev, (pitch << 22) | (dst_offset >> 10));
+ radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
+ radeon_ring_write(rdev, num_gpu_pages);
+ radeon_ring_write(rdev, num_gpu_pages);
+ radeon_ring_write(rdev, cur_pages | (stride_pixels << 16));
+ }
+ radeon_ring_write(rdev, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0));
+ radeon_ring_write(rdev, RADEON_RB2D_DC_FLUSH_ALL);
+ radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
+ radeon_ring_write(rdev,
RADEON_WAIT_2D_IDLECLEAN |
RADEON_WAIT_HOST_IDLECLEAN |
RADEON_WAIT_DMA_GUI_IDLE);
if (fence) {
r = radeon_fence_emit(rdev, fence);
}
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev);
return r;
}
@@ -968,21 +810,21 @@ static int r100_cp_wait_for_idle(struct radeon_device *rdev)
return -1;
}
-void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
+void r100_ring_start(struct radeon_device *rdev)
{
int r;
- r = radeon_ring_lock(rdev, ring, 2);
+ r = radeon_ring_lock(rdev, 2);
if (r) {
return;
}
- radeon_ring_write(ring, PACKET0(RADEON_ISYNC_CNTL, 0));
- radeon_ring_write(ring,
+ radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0));
+ radeon_ring_write(rdev,
RADEON_ISYNC_ANY2D_IDLE3D |
RADEON_ISYNC_ANY3D_IDLE2D |
RADEON_ISYNC_WAIT_IDLEGUI |
RADEON_ISYNC_CPSCRATCH_IDLEGUI);
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev);
}
@@ -1083,7 +925,6 @@ static void r100_cp_load_microcode(struct radeon_device *rdev)
int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
unsigned rb_bufsz;
unsigned rb_blksz;
unsigned max_fetch;
@@ -1109,9 +950,7 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
rb_bufsz = drm_order(ring_size / 8);
ring_size = (1 << (rb_bufsz + 1)) * 4;
r100_cp_load_microcode(rdev);
- r = radeon_ring_init(rdev, ring, ring_size, RADEON_WB_CP_RPTR_OFFSET,
- RADEON_CP_RB_RPTR, RADEON_CP_RB_WPTR,
- 0, 0x7fffff, RADEON_CP_PACKET2);
+ r = radeon_ring_init(rdev, ring_size);
if (r) {
return r;
}
@@ -1120,7 +959,7 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
rb_blksz = 9;
/* cp will read 128bytes at a time (4 dwords) */
max_fetch = 1;
- ring->align_mask = 16 - 1;
+ rdev->cp.align_mask = 16 - 1;
/* Write to CP_RB_WPTR will be delayed for pre_write_timer clocks */
pre_write_timer = 64;
/* Force CP_RB_WPTR write if written more than one time before the
@@ -1150,13 +989,13 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_NO_UPDATE);
/* Set ring address */
- DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)ring->gpu_addr);
- WREG32(RADEON_CP_RB_BASE, ring->gpu_addr);
+ DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)rdev->cp.gpu_addr);
+ WREG32(RADEON_CP_RB_BASE, rdev->cp.gpu_addr);
/* Force read & write ptr to 0 */
WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE);
WREG32(RADEON_CP_RB_RPTR_WR, 0);
- ring->wptr = 0;
- WREG32(RADEON_CP_RB_WPTR, ring->wptr);
+ rdev->cp.wptr = 0;
+ WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr);
/* set the wb address whether it's enabled or not */
WREG32(R_00070C_CP_RB_RPTR_ADDR,
@@ -1172,7 +1011,7 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
WREG32(RADEON_CP_RB_CNTL, tmp);
udelay(10);
- ring->rptr = RREG32(RADEON_CP_RB_RPTR);
+ rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
/* Set cp mode to bus mastering & enable cp*/
WREG32(RADEON_CP_CSQ_MODE,
REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
@@ -1180,13 +1019,13 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
WREG32(RADEON_CP_RB_WPTR_DELAY, 0);
WREG32(RADEON_CP_CSQ_MODE, 0x00004D4D);
WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM);
- radeon_ring_start(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
- r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
+ radeon_ring_start(rdev);
+ r = radeon_ring_test(rdev);
if (r) {
DRM_ERROR("radeon: cp isn't working (%d).\n", r);
return r;
}
- ring->ready = true;
+ rdev->cp.ready = true;
radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
return 0;
}
@@ -1198,7 +1037,7 @@ void r100_cp_fini(struct radeon_device *rdev)
}
/* Disable ring */
r100_cp_disable(rdev);
- radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
+ radeon_ring_fini(rdev);
DRM_INFO("radeon: cp finalized\n");
}
@@ -1206,7 +1045,7 @@ void r100_cp_disable(struct radeon_device *rdev)
{
/* Disable ring */
radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
- rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+ rdev->cp.ready = false;
WREG32(RADEON_CP_CSQ_MODE, 0);
WREG32(RADEON_CP_CSQ_CNTL, 0);
WREG32(R_000770_SCRATCH_UMSK, 0);
@@ -1216,6 +1055,13 @@ void r100_cp_disable(struct radeon_device *rdev)
}
}
+void r100_cp_commit(struct radeon_device *rdev)
+{
+ WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr);
+ (void)RREG32(RADEON_CP_RB_WPTR);
+}
+
+
/*
* CS functions
*/
@@ -1589,17 +1435,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
r100_cs_dump_packet(p, pkt);
return r;
}
- if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
- tile_flags |= RADEON_TXO_MACRO_TILE;
- if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
- tile_flags |= RADEON_TXO_MICRO_TILE_X2;
-
- tmp = idx_value & ~(0x7 << 2);
- tmp |= tile_flags;
- ib[idx] = tmp + ((u32)reloc->lobj.gpu_offset);
- } else
- ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+ ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
track->textures[i].robj = reloc->robj;
track->tex_dirty = true;
break;
@@ -1670,17 +1506,15 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
r100_cs_dump_packet(p, pkt);
return r;
}
- if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
- tile_flags |= RADEON_COLOR_TILE_ENABLE;
- if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
- tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
-
- tmp = idx_value & ~(0x7 << 16);
- tmp |= tile_flags;
- ib[idx] = tmp;
- } else
- ib[idx] = idx_value;
+
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+ tile_flags |= RADEON_COLOR_TILE_ENABLE;
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+ tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
+
+ tmp = idx_value & ~(0x7 << 16);
+ tmp |= tile_flags;
+ ib[idx] = tmp;
track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK;
track->cb_dirty = true;
@@ -2155,9 +1989,9 @@ int r100_mc_wait_for_idle(struct radeon_device *rdev)
return -1;
}
-void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_ring *ring)
+void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_cp *cp)
{
- lockup->last_cp_rptr = ring->rptr;
+ lockup->last_cp_rptr = cp->rptr;
lockup->last_jiffies = jiffies;
}
@@ -2182,20 +2016,20 @@ void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_ring *
* false positive when CP is just gived nothing to do.
*
**/
-bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_ring *ring)
+bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_cp *cp)
{
unsigned long cjiffies, elapsed;
cjiffies = jiffies;
if (!time_after(cjiffies, lockup->last_jiffies)) {
/* likely a wrap around */
- lockup->last_cp_rptr = ring->rptr;
+ lockup->last_cp_rptr = cp->rptr;
lockup->last_jiffies = jiffies;
return false;
}
- if (ring->rptr != lockup->last_cp_rptr) {
+ if (cp->rptr != lockup->last_cp_rptr) {
/* CP is still working no lockup */
- lockup->last_cp_rptr = ring->rptr;
+ lockup->last_cp_rptr = cp->rptr;
lockup->last_jiffies = jiffies;
return false;
}
@@ -2208,26 +2042,26 @@ bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *l
return false;
}
-bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+bool r100_gpu_is_lockup(struct radeon_device *rdev)
{
u32 rbbm_status;
int r;
rbbm_status = RREG32(R_000E40_RBBM_STATUS);
if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
- r100_gpu_lockup_update(&rdev->config.r100.lockup, ring);
+ r100_gpu_lockup_update(&rdev->config.r100.lockup, &rdev->cp);
return false;
}
/* force CP activities */
- r = radeon_ring_lock(rdev, ring, 2);
+ r = radeon_ring_lock(rdev, 2);
if (!r) {
/* PACKET2 NOP */
- radeon_ring_write(ring, 0x80000000);
- radeon_ring_write(ring, 0x80000000);
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_write(rdev, 0x80000000);
+ radeon_ring_write(rdev, 0x80000000);
+ radeon_ring_unlock_commit(rdev);
}
- ring->rptr = RREG32(ring->rptr_reg);
- return r100_gpu_cp_is_lockup(rdev, &rdev->config.r100.lockup, ring);
+ rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
+ return r100_gpu_cp_is_lockup(rdev, &rdev->config.r100.lockup, &rdev->cp);
}
void r100_bm_disable(struct radeon_device *rdev)
@@ -2244,7 +2078,8 @@ void r100_bm_disable(struct radeon_device *rdev)
WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000040);
tmp = RREG32(RADEON_BUS_CNTL);
mdelay(1);
- pci_clear_master(rdev->pdev);
+ pci_read_config_word(rdev->pdev, 0x4, &tmp16);
+ pci_write_config_word(rdev->pdev, 0x4, tmp16 & 0xFFFB);
mdelay(1);
}
@@ -2635,22 +2470,21 @@ static int r100_debugfs_cp_ring_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private;
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
uint32_t rdp, wdp;
unsigned count, i, j;
- radeon_ring_free_size(rdev, ring);
+ radeon_ring_free_size(rdev);
rdp = RREG32(RADEON_CP_RB_RPTR);
wdp = RREG32(RADEON_CP_RB_WPTR);
- count = (rdp + ring->ring_size - wdp) & ring->ptr_mask;
+ count = (rdp + rdev->cp.ring_size - wdp) & rdev->cp.ptr_mask;
seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp);
seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp);
- seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
+ seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
seq_printf(m, "%u dwords in ring\n", count);
for (j = 0; j <= count; j++) {
- i = (rdp + j) & ring->ptr_mask;
- seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
+ i = (rdp + j) & rdev->cp.ptr_mask;
+ seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
}
return 0;
}
@@ -3318,7 +3152,7 @@ void r100_bandwidth_update(struct radeon_device *rdev)
}
}
-static void r100_cs_track_texture_print(struct r100_cs_track_texture *t)
+static inline void r100_cs_track_texture_print(struct r100_cs_track_texture *t)
{
DRM_ERROR("pitch %d\n", t->pitch);
DRM_ERROR("use_pitch %d\n", t->use_pitch);
@@ -3692,7 +3526,7 @@ void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track
}
}
-int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
+int r100_ring_test(struct radeon_device *rdev)
{
uint32_t scratch;
uint32_t tmp = 0;
@@ -3705,15 +3539,15 @@ int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
return r;
}
WREG32(scratch, 0xCAFEDEAD);
- r = radeon_ring_lock(rdev, ring, 2);
+ r = radeon_ring_lock(rdev, 2);
if (r) {
DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
radeon_scratch_free(rdev, scratch);
return r;
}
- radeon_ring_write(ring, PACKET0(scratch, 0));
- radeon_ring_write(ring, 0xDEADBEEF);
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_write(rdev, PACKET0(scratch, 0));
+ radeon_ring_write(rdev, 0xDEADBEEF);
+ radeon_ring_unlock_commit(rdev);
for (i = 0; i < rdev->usec_timeout; i++) {
tmp = RREG32(scratch);
if (tmp == 0xDEADBEEF) {
@@ -3734,14 +3568,12 @@ int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
-
- radeon_ring_write(ring, PACKET0(RADEON_CP_IB_BASE, 1));
- radeon_ring_write(ring, ib->gpu_addr);
- radeon_ring_write(ring, ib->length_dw);
+ radeon_ring_write(rdev, PACKET0(RADEON_CP_IB_BASE, 1));
+ radeon_ring_write(rdev, ib->gpu_addr);
+ radeon_ring_write(rdev, ib->length_dw);
}
-int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
+int r100_ib_test(struct radeon_device *rdev)
{
struct radeon_ib *ib;
uint32_t scratch;
@@ -3755,7 +3587,7 @@ int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
return r;
}
WREG32(scratch, 0xCAFEDEAD);
- r = radeon_ib_get(rdev, RADEON_RING_TYPE_GFX_INDEX, &ib, 256);
+ r = radeon_ib_get(rdev, &ib);
if (r) {
return r;
}
@@ -3799,16 +3631,34 @@ int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
void r100_ib_fini(struct radeon_device *rdev)
{
- radeon_ib_pool_suspend(rdev);
radeon_ib_pool_fini(rdev);
}
+int r100_ib_init(struct radeon_device *rdev)
+{
+ int r;
+
+ r = radeon_ib_pool_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing IB pool (%d).\n", r);
+ r100_ib_fini(rdev);
+ return r;
+ }
+ r = r100_ib_test(rdev);
+ if (r) {
+ dev_err(rdev->dev, "failed testing IB (%d).\n", r);
+ r100_ib_fini(rdev);
+ return r;
+ }
+ return 0;
+}
+
void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save)
{
/* Shutdown CP we shouldn't need to do that but better be safe than
* sorry
*/
- rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+ rdev->cp.ready = false;
WREG32(R_000740_CP_CSQ_CNTL, 0);
/* Save few CRTC registers */
@@ -3946,12 +3796,6 @@ static int r100_startup(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
- if (r) {
- dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
- return r;
- }
-
/* Enable IRQ */
r100_irq_set(rdev);
rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -3961,25 +3805,16 @@ static int r100_startup(struct radeon_device *rdev)
dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
-
- r = radeon_ib_pool_start(rdev);
- if (r)
- return r;
-
- r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
+ r = r100_ib_init(rdev);
if (r) {
- dev_err(rdev->dev, "failed testing IB (%d).\n", r);
- rdev->accel_working = false;
+ dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
return r;
}
-
return 0;
}
int r100_resume(struct radeon_device *rdev)
{
- int r;
-
/* Make sur GART are not working */
if (rdev->flags & RADEON_IS_PCI)
r100_pci_gart_disable(rdev);
@@ -3997,18 +3832,11 @@ int r100_resume(struct radeon_device *rdev)
r100_clock_startup(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
-
- rdev->accel_working = true;
- r = r100_startup(rdev);
- if (r) {
- rdev->accel_working = false;
- }
- return r;
+ return r100_startup(rdev);
}
int r100_suspend(struct radeon_device *rdev)
{
- radeon_ib_pool_suspend(rdev);
r100_cp_disable(rdev);
radeon_wb_disable(rdev);
r100_irq_disable(rdev);
@@ -4127,14 +3955,7 @@ int r100_init(struct radeon_device *rdev)
return r;
}
r100_set_safe_registers(rdev);
-
- r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
- if (r) {
- dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
- rdev->accel_working = false;
- }
-
r = r100_startup(rdev);
if (r) {
/* Somethings want wront with the accel init stop accel */
@@ -4149,43 +3970,3 @@ int r100_init(struct radeon_device *rdev)
}
return 0;
}
-
-uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg)
-{
- if (reg < rdev->rmmio_size)
- return readl(((void __iomem *)rdev->rmmio) + reg);
- else {
- writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
- return readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
- }
-}
-
-void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
-{
- if (reg < rdev->rmmio_size)
- writel(v, ((void __iomem *)rdev->rmmio) + reg);
- else {
- writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
- writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
- }
-}
-
-u32 r100_io_rreg(struct radeon_device *rdev, u32 reg)
-{
- if (reg < rdev->rio_mem_size)
- return ioread32(rdev->rio_mem + reg);
- else {
- iowrite32(reg, rdev->rio_mem + RADEON_MM_INDEX);
- return ioread32(rdev->rio_mem + RADEON_MM_DATA);
- }
-}
-
-void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v)
-{
- if (reg < rdev->rio_mem_size)
- iowrite32(v, rdev->rio_mem + reg);
- else {
- iowrite32(reg, rdev->rio_mem + RADEON_MM_INDEX);
- iowrite32(v, rdev->rio_mem + RADEON_MM_DATA);
- }
-}
diff --git a/drivers/gpu/drm/radeon/r100_track.h b/drivers/gpu/drm/radeon/r100_track.h
index 6a603b3..686f9dc 100644
--- a/drivers/gpu/drm/radeon/r100_track.h
+++ b/drivers/gpu/drm/radeon/r100_track.h
@@ -92,10 +92,106 @@ int r200_packet0_check(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt,
unsigned idx, unsigned reg);
-int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
- struct radeon_cs_packet *pkt,
- unsigned idx,
- unsigned reg);
-int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
- struct radeon_cs_packet *pkt,
- int idx);
+
+
+static inline int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt,
+ unsigned idx,
+ unsigned reg)
+{
+ int r;
+ u32 tile_flags = 0;
+ u32 tmp;
+ struct radeon_cs_reloc *reloc;
+ u32 value;
+
+ r = r100_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
+ r100_cs_dump_packet(p, pkt);
+ return r;
+ }
+ value = radeon_get_ib_value(p, idx);
+ tmp = value & 0x003fffff;
+ tmp += (((u32)reloc->lobj.gpu_offset) >> 10);
+
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+ tile_flags |= RADEON_DST_TILE_MACRO;
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
+ if (reg == RADEON_SRC_PITCH_OFFSET) {
+ DRM_ERROR("Cannot src blit from microtiled surface\n");
+ r100_cs_dump_packet(p, pkt);
+ return -EINVAL;
+ }
+ tile_flags |= RADEON_DST_TILE_MICRO;
+ }
+
+ tmp |= tile_flags;
+ p->ib->ptr[idx] = (value & 0x3fc00000) | tmp;
+ return 0;
+}
+
+static inline int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt,
+ int idx)
+{
+ unsigned c, i;
+ struct radeon_cs_reloc *reloc;
+ struct r100_cs_track *track;
+ int r = 0;
+ volatile uint32_t *ib;
+ u32 idx_value;
+
+ ib = p->ib->ptr;
+ track = (struct r100_cs_track *)p->track;
+ c = radeon_get_ib_value(p, idx++) & 0x1F;
+ if (c > 16) {
+ DRM_ERROR("Only 16 vertex buffers are allowed %d\n",
+ pkt->opcode);
+ r100_cs_dump_packet(p, pkt);
+ return -EINVAL;
+ }
+ track->num_arrays = c;
+ for (i = 0; i < (c - 1); i+=2, idx+=3) {
+ r = r100_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("No reloc for packet3 %d\n",
+ pkt->opcode);
+ r100_cs_dump_packet(p, pkt);
+ return r;
+ }
+ idx_value = radeon_get_ib_value(p, idx);
+ ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
+
+ track->arrays[i + 0].esize = idx_value >> 8;
+ track->arrays[i + 0].robj = reloc->robj;
+ track->arrays[i + 0].esize &= 0x7F;
+ r = r100_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("No reloc for packet3 %d\n",
+ pkt->opcode);
+ r100_cs_dump_packet(p, pkt);
+ return r;
+ }
+ ib[idx+2] = radeon_get_ib_value(p, idx + 2) + ((u32)reloc->lobj.gpu_offset);
+ track->arrays[i + 1].robj = reloc->robj;
+ track->arrays[i + 1].esize = idx_value >> 24;
+ track->arrays[i + 1].esize &= 0x7F;
+ }
+ if (c & 1) {
+ r = r100_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("No reloc for packet3 %d\n",
+ pkt->opcode);
+ r100_cs_dump_packet(p, pkt);
+ return r;
+ }
+ idx_value = radeon_get_ib_value(p, idx);
+ ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
+ track->arrays[i + 0].robj = reloc->robj;
+ track->arrays[i + 0].esize = idx_value >> 8;
+ track->arrays[i + 0].esize &= 0x7F;
+ }
+ return r;
+}
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index a59cc47..a1f3ba0 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -87,7 +87,6 @@ int r200_copy_dma(struct radeon_device *rdev,
unsigned num_gpu_pages,
struct radeon_fence *fence)
{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
uint32_t size;
uint32_t cur_size;
int i, num_loops;
@@ -96,33 +95,33 @@ int r200_copy_dma(struct radeon_device *rdev,
/* radeon pitch is /64 */
size = num_gpu_pages << RADEON_GPU_PAGE_SHIFT;
num_loops = DIV_ROUND_UP(size, 0x1FFFFF);
- r = radeon_ring_lock(rdev, ring, num_loops * 4 + 64);
+ r = radeon_ring_lock(rdev, num_loops * 4 + 64);
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
return r;
}
/* Must wait for 2D idle & clean before DMA or hangs might happen */
- radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
- radeon_ring_write(ring, (1 << 16));
+ radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
+ radeon_ring_write(rdev, (1 << 16));
for (i = 0; i < num_loops; i++) {
cur_size = size;
if (cur_size > 0x1FFFFF) {
cur_size = 0x1FFFFF;
}
size -= cur_size;
- radeon_ring_write(ring, PACKET0(0x720, 2));
- radeon_ring_write(ring, src_offset);
- radeon_ring_write(ring, dst_offset);
- radeon_ring_write(ring, cur_size | (1 << 31) | (1 << 30));
+ radeon_ring_write(rdev, PACKET0(0x720, 2));
+ radeon_ring_write(rdev, src_offset);
+ radeon_ring_write(rdev, dst_offset);
+ radeon_ring_write(rdev, cur_size | (1 << 31) | (1 << 30));
src_offset += cur_size;
dst_offset += cur_size;
}
- radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
- radeon_ring_write(ring, RADEON_WAIT_DMA_GUI_IDLE);
+ radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
+ radeon_ring_write(rdev, RADEON_WAIT_DMA_GUI_IDLE);
if (fence) {
r = radeon_fence_emit(rdev, fence);
}
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev);
return r;
}
@@ -215,17 +214,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
r100_cs_dump_packet(p, pkt);
return r;
}
- if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
- tile_flags |= R200_TXO_MACRO_TILE;
- if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
- tile_flags |= R200_TXO_MICRO_TILE;
-
- tmp = idx_value & ~(0x7 << 2);
- tmp |= tile_flags;
- ib[idx] = tmp + ((u32)reloc->lobj.gpu_offset);
- } else
- ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+ ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
track->textures[i].robj = reloc->robj;
track->tex_dirty = true;
break;
@@ -287,17 +276,14 @@ int r200_packet0_check(struct radeon_cs_parser *p,
return r;
}
- if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
- tile_flags |= RADEON_COLOR_TILE_ENABLE;
- if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
- tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+ tile_flags |= RADEON_COLOR_TILE_ENABLE;
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+ tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
- tmp = idx_value & ~(0x7 << 16);
- tmp |= tile_flags;
- ib[idx] = tmp;
- } else
- ib[idx] = idx_value;
+ tmp = idx_value & ~(0x7 << 16);
+ tmp |= tile_flags;
+ ib[idx] = tmp;
track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK;
track->cb_dirty = true;
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index fa14383..55a7f19 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -74,7 +74,7 @@ void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev)
int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
{
- void __iomem *ptr = rdev->gart.ptr;
+ void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
if (i < 0 || i > rdev->gart.num_gpu_pages) {
return -EINVAL;
@@ -93,7 +93,7 @@ int rv370_pcie_gart_init(struct radeon_device *rdev)
{
int r;
- if (rdev->gart.robj) {
+ if (rdev->gart.table.vram.robj) {
WARN(1, "RV370 PCIE GART already initialized\n");
return 0;
}
@@ -105,8 +105,8 @@ int rv370_pcie_gart_init(struct radeon_device *rdev)
if (r)
DRM_ERROR("Failed to register debugfs file for PCIE gart !\n");
rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
- rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush;
- rdev->asic->gart.set_page = &rv370_pcie_gart_set_page;
+ rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
+ rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
return radeon_gart_table_vram_alloc(rdev);
}
@@ -116,7 +116,7 @@ int rv370_pcie_gart_enable(struct radeon_device *rdev)
uint32_t tmp;
int r;
- if (rdev->gart.robj == NULL) {
+ if (rdev->gart.table.vram.robj == NULL) {
dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
return -EINVAL;
}
@@ -144,9 +144,8 @@ int rv370_pcie_gart_enable(struct radeon_device *rdev)
tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
rv370_pcie_gart_tlb_flush(rdev);
- DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
- (unsigned)(rdev->mc.gtt_size >> 20),
- (unsigned long long)table_addr);
+ DRM_INFO("PCIE GART of %uM enabled (table at 0x%08X).\n",
+ (unsigned)(rdev->mc.gtt_size >> 20), table_addr);
rdev->gart.ready = true;
return 0;
}
@@ -154,6 +153,7 @@ int rv370_pcie_gart_enable(struct radeon_device *rdev)
void rv370_pcie_gart_disable(struct radeon_device *rdev)
{
u32 tmp;
+ int r;
WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, 0);
WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, 0);
@@ -162,7 +162,14 @@ void rv370_pcie_gart_disable(struct radeon_device *rdev)
tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN);
- radeon_gart_table_vram_unpin(rdev);
+ if (rdev->gart.table.vram.robj) {
+ r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
+ if (likely(r == 0)) {
+ radeon_bo_kunmap(rdev->gart.table.vram.robj);
+ radeon_bo_unpin(rdev->gart.table.vram.robj);
+ radeon_bo_unreserve(rdev->gart.table.vram.robj);
+ }
+ }
}
void rv370_pcie_gart_fini(struct radeon_device *rdev)
@@ -175,38 +182,36 @@ void rv370_pcie_gart_fini(struct radeon_device *rdev)
void r300_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence)
{
- struct radeon_ring *ring = &rdev->ring[fence->ring];
-
/* Who ever call radeon_fence_emit should call ring_lock and ask
* for enough space (today caller are ib schedule and buffer move) */
/* Write SC register so SC & US assert idle */
- radeon_ring_write(ring, PACKET0(R300_RE_SCISSORS_TL, 0));
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, PACKET0(R300_RE_SCISSORS_BR, 0));
- radeon_ring_write(ring, 0);
+ radeon_ring_write(rdev, PACKET0(R300_RE_SCISSORS_TL, 0));
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, PACKET0(R300_RE_SCISSORS_BR, 0));
+ radeon_ring_write(rdev, 0);
/* Flush 3D cache */
- radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
- radeon_ring_write(ring, R300_RB3D_DC_FLUSH);
- radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
- radeon_ring_write(ring, R300_ZC_FLUSH);
+ radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+ radeon_ring_write(rdev, R300_RB3D_DC_FLUSH);
+ radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
+ radeon_ring_write(rdev, R300_ZC_FLUSH);
/* Wait until IDLE & CLEAN */
- radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
- radeon_ring_write(ring, (RADEON_WAIT_3D_IDLECLEAN |
+ radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
+ radeon_ring_write(rdev, (RADEON_WAIT_3D_IDLECLEAN |
RADEON_WAIT_2D_IDLECLEAN |
RADEON_WAIT_DMA_GUI_IDLE));
- radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
- radeon_ring_write(ring, rdev->config.r300.hdp_cntl |
+ radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+ radeon_ring_write(rdev, rdev->config.r300.hdp_cntl |
RADEON_HDP_READ_BUFFER_INVALIDATE);
- radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
- radeon_ring_write(ring, rdev->config.r300.hdp_cntl);
+ radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+ radeon_ring_write(rdev, rdev->config.r300.hdp_cntl);
/* Emit fence sequence & fire IRQ */
- radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0));
- radeon_ring_write(ring, fence->seq);
- radeon_ring_write(ring, PACKET0(RADEON_GEN_INT_STATUS, 0));
- radeon_ring_write(ring, RADEON_SW_INT_FIRE);
+ radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
+ radeon_ring_write(rdev, fence->seq);
+ radeon_ring_write(rdev, PACKET0(RADEON_GEN_INT_STATUS, 0));
+ radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
}
-void r300_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
+void r300_ring_start(struct radeon_device *rdev)
{
unsigned gb_tile_config;
int r;
@@ -229,44 +234,44 @@ void r300_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
break;
}
- r = radeon_ring_lock(rdev, ring, 64);
+ r = radeon_ring_lock(rdev, 64);
if (r) {
return;
}
- radeon_ring_write(ring, PACKET0(RADEON_ISYNC_CNTL, 0));
- radeon_ring_write(ring,
+ radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0));
+ radeon_ring_write(rdev,
RADEON_ISYNC_ANY2D_IDLE3D |
RADEON_ISYNC_ANY3D_IDLE2D |
RADEON_ISYNC_WAIT_IDLEGUI |
RADEON_ISYNC_CPSCRATCH_IDLEGUI);
- radeon_ring_write(ring, PACKET0(R300_GB_TILE_CONFIG, 0));
- radeon_ring_write(ring, gb_tile_config);
- radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
- radeon_ring_write(ring,
+ radeon_ring_write(rdev, PACKET0(R300_GB_TILE_CONFIG, 0));
+ radeon_ring_write(rdev, gb_tile_config);
+ radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
+ radeon_ring_write(rdev,
RADEON_WAIT_2D_IDLECLEAN |
RADEON_WAIT_3D_IDLECLEAN);
- radeon_ring_write(ring, PACKET0(R300_DST_PIPE_CONFIG, 0));
- radeon_ring_write(ring, R300_PIPE_AUTO_CONFIG);
- radeon_ring_write(ring, PACKET0(R300_GB_SELECT, 0));
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, PACKET0(R300_GB_ENABLE, 0));
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
- radeon_ring_write(ring, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
- radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
- radeon_ring_write(ring, R300_ZC_FLUSH | R300_ZC_FREE);
- radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
- radeon_ring_write(ring,
+ radeon_ring_write(rdev, PACKET0(R300_DST_PIPE_CONFIG, 0));
+ radeon_ring_write(rdev, R300_PIPE_AUTO_CONFIG);
+ radeon_ring_write(rdev, PACKET0(R300_GB_SELECT, 0));
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, PACKET0(R300_GB_ENABLE, 0));
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+ radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
+ radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
+ radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE);
+ radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
+ radeon_ring_write(rdev,
RADEON_WAIT_2D_IDLECLEAN |
RADEON_WAIT_3D_IDLECLEAN);
- radeon_ring_write(ring, PACKET0(R300_GB_AA_CONFIG, 0));
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
- radeon_ring_write(ring, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
- radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
- radeon_ring_write(ring, R300_ZC_FLUSH | R300_ZC_FREE);
- radeon_ring_write(ring, PACKET0(R300_GB_MSPOS0, 0));
- radeon_ring_write(ring,
+ radeon_ring_write(rdev, PACKET0(R300_GB_AA_CONFIG, 0));
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+ radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
+ radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
+ radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE);
+ radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS0, 0));
+ radeon_ring_write(rdev,
((6 << R300_MS_X0_SHIFT) |
(6 << R300_MS_Y0_SHIFT) |
(6 << R300_MS_X1_SHIFT) |
@@ -275,8 +280,8 @@ void r300_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
(6 << R300_MS_Y2_SHIFT) |
(6 << R300_MSBD0_Y_SHIFT) |
(6 << R300_MSBD0_X_SHIFT)));
- radeon_ring_write(ring, PACKET0(R300_GB_MSPOS1, 0));
- radeon_ring_write(ring,
+ radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS1, 0));
+ radeon_ring_write(rdev,
((6 << R300_MS_X3_SHIFT) |
(6 << R300_MS_Y3_SHIFT) |
(6 << R300_MS_X4_SHIFT) |
@@ -284,16 +289,16 @@ void r300_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
(6 << R300_MS_X5_SHIFT) |
(6 << R300_MS_Y5_SHIFT) |
(6 << R300_MSBD1_SHIFT)));
- radeon_ring_write(ring, PACKET0(R300_GA_ENHANCE, 0));
- radeon_ring_write(ring, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL);
- radeon_ring_write(ring, PACKET0(R300_GA_POLY_MODE, 0));
- radeon_ring_write(ring,
+ radeon_ring_write(rdev, PACKET0(R300_GA_ENHANCE, 0));
+ radeon_ring_write(rdev, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL);
+ radeon_ring_write(rdev, PACKET0(R300_GA_POLY_MODE, 0));
+ radeon_ring_write(rdev,
R300_FRONT_PTYPE_TRIANGE | R300_BACK_PTYPE_TRIANGE);
- radeon_ring_write(ring, PACKET0(R300_GA_ROUND_MODE, 0));
- radeon_ring_write(ring,
+ radeon_ring_write(rdev, PACKET0(R300_GA_ROUND_MODE, 0));
+ radeon_ring_write(rdev,
R300_GEOMETRY_ROUND_NEAREST |
R300_COLOR_ROUND_NEAREST);
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev);
}
void r300_errata(struct radeon_device *rdev)
@@ -377,26 +382,26 @@ void r300_gpu_init(struct radeon_device *rdev)
rdev->num_gb_pipes, rdev->num_z_pipes);
}
-bool r300_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+bool r300_gpu_is_lockup(struct radeon_device *rdev)
{
u32 rbbm_status;
int r;
rbbm_status = RREG32(R_000E40_RBBM_STATUS);
if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
- r100_gpu_lockup_update(&rdev->config.r300.lockup, ring);
+ r100_gpu_lockup_update(&rdev->config.r300.lockup, &rdev->cp);
return false;
}
/* force CP activities */
- r = radeon_ring_lock(rdev, ring, 2);
+ r = radeon_ring_lock(rdev, 2);
if (!r) {
/* PACKET2 NOP */
- radeon_ring_write(ring, 0x80000000);
- radeon_ring_write(ring, 0x80000000);
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_write(rdev, 0x80000000);
+ radeon_ring_write(rdev, 0x80000000);
+ radeon_ring_unlock_commit(rdev);
}
- ring->rptr = RREG32(RADEON_CP_RB_RPTR);
- return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, ring);
+ rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
+ return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, &rdev->cp);
}
int r300_asic_reset(struct radeon_device *rdev)
@@ -703,21 +708,16 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
return r;
}
- if (p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) {
- ib[idx] = (idx_value & 31) | /* keep the 1st 5 bits */
- ((idx_value & ~31) + (u32)reloc->lobj.gpu_offset);
- } else {
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
- tile_flags |= R300_TXO_MACRO_TILE;
- if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
- tile_flags |= R300_TXO_MICRO_TILE;
- else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
- tile_flags |= R300_TXO_MICRO_TILE_SQUARE;
-
- tmp = idx_value + ((u32)reloc->lobj.gpu_offset);
- tmp |= tile_flags;
- ib[idx] = tmp;
- }
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+ tile_flags |= R300_TXO_MACRO_TILE;
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+ tile_flags |= R300_TXO_MICRO_TILE;
+ else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
+ tile_flags |= R300_TXO_MICRO_TILE_SQUARE;
+
+ tmp = idx_value + ((u32)reloc->lobj.gpu_offset);
+ tmp |= tile_flags;
+ ib[idx] = tmp;
track->textures[i].robj = reloc->robj;
track->tex_dirty = true;
break;
@@ -767,26 +767,24 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
/* RB3D_COLORPITCH1 */
/* RB3D_COLORPITCH2 */
/* RB3D_COLORPITCH3 */
- if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
- r = r100_cs_packet_next_reloc(p, &reloc);
- if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
- r100_cs_dump_packet(p, pkt);
- return r;
- }
+ r = r100_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
+ r100_cs_dump_packet(p, pkt);
+ return r;
+ }
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
- tile_flags |= R300_COLOR_TILE_ENABLE;
- if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
- tile_flags |= R300_COLOR_MICROTILE_ENABLE;
- else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
- tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE;
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+ tile_flags |= R300_COLOR_TILE_ENABLE;
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+ tile_flags |= R300_COLOR_MICROTILE_ENABLE;
+ else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
+ tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE;
- tmp = idx_value & ~(0x7 << 16);
- tmp |= tile_flags;
- ib[idx] = tmp;
- }
+ tmp = idx_value & ~(0x7 << 16);
+ tmp |= tile_flags;
+ ib[idx] = tmp;
i = (reg - 0x4E38) >> 2;
track->cb[i].pitch = idx_value & 0x3FFE;
switch (((idx_value >> 21) & 0xF)) {
@@ -852,26 +850,25 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
break;
case 0x4F24:
/* ZB_DEPTHPITCH */
- if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
- r = r100_cs_packet_next_reloc(p, &reloc);
- if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
- r100_cs_dump_packet(p, pkt);
- return r;
- }
+ r = r100_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
+ r100_cs_dump_packet(p, pkt);
+ return r;
+ }
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
- tile_flags |= R300_DEPTHMACROTILE_ENABLE;
- if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
- tile_flags |= R300_DEPTHMICROTILE_TILED;
- else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
- tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE;
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+ tile_flags |= R300_DEPTHMACROTILE_ENABLE;
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+ tile_flags |= R300_DEPTHMICROTILE_TILED;
+ else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
+ tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE;
+
+ tmp = idx_value & ~(0x7 << 16);
+ tmp |= tile_flags;
+ ib[idx] = tmp;
- tmp = idx_value & ~(0x7 << 16);
- tmp |= tile_flags;
- ib[idx] = tmp;
- }
track->zb.pitch = idx_value & 0x3FFC;
track->zb_dirty = true;
break;
@@ -1398,12 +1395,6 @@ static int r300_startup(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
- if (r) {
- dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
- return r;
- }
-
/* Enable IRQ */
r100_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -1413,25 +1404,16 @@ static int r300_startup(struct radeon_device *rdev)
dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
-
- r = radeon_ib_pool_start(rdev);
- if (r)
- return r;
-
- r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
+ r = r100_ib_init(rdev);
if (r) {
- dev_err(rdev->dev, "failed testing IB (%d).\n", r);
- rdev->accel_working = false;
+ dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
return r;
}
-
return 0;
}
int r300_resume(struct radeon_device *rdev)
{
- int r;
-
/* Make sur GART are not working */
if (rdev->flags & RADEON_IS_PCIE)
rv370_pcie_gart_disable(rdev);
@@ -1451,18 +1433,11 @@ int r300_resume(struct radeon_device *rdev)
r300_clock_startup(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
-
- rdev->accel_working = true;
- r = r300_startup(rdev);
- if (r) {
- rdev->accel_working = false;
- }
- return r;
+ return r300_startup(rdev);
}
int r300_suspend(struct radeon_device *rdev)
{
- radeon_ib_pool_suspend(rdev);
r100_cp_disable(rdev);
radeon_wb_disable(rdev);
r100_irq_disable(rdev);
@@ -1563,14 +1538,7 @@ int r300_init(struct radeon_device *rdev)
return r;
}
r300_set_reg_safe(rdev);
-
- r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
- if (r) {
- dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
- rdev->accel_working = false;
- }
-
r = r300_startup(rdev);
if (r) {
/* Somethings want wront with the accel init stop accel */
diff --git a/drivers/gpu/drm/radeon/r300_cmdbuf.c b/drivers/gpu/drm/radeon/r300_cmdbuf.c
index 9aa02be..a12f373 100644
--- a/drivers/gpu/drm/radeon/r300_cmdbuf.c
+++ b/drivers/gpu/drm/radeon/r300_cmdbuf.c
@@ -791,7 +791,7 @@ static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv,
/**
* Emit the sequence to pacify R300.
*/
-static void r300_pacify(drm_radeon_private_t *dev_priv)
+static __inline__ void r300_pacify(drm_radeon_private_t *dev_priv)
{
uint32_t cache_z, cache_3d, cache_2d;
RING_LOCALS;
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index f3fcaac..417fab8 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -199,8 +199,6 @@ static void r420_clock_resume(struct radeon_device *rdev)
static void r420_cp_errata_init(struct radeon_device *rdev)
{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
-
/* RV410 and R420 can lock up if CP DMA to host memory happens
* while the 2D engine is busy.
*
@@ -208,24 +206,22 @@ static void r420_cp_errata_init(struct radeon_device *rdev)
* of the CP init, apparently.
*/
radeon_scratch_get(rdev, &rdev->config.r300.resync_scratch);
- radeon_ring_lock(rdev, ring, 8);
- radeon_ring_write(ring, PACKET0(R300_CP_RESYNC_ADDR, 1));
- radeon_ring_write(ring, rdev->config.r300.resync_scratch);
- radeon_ring_write(ring, 0xDEADBEEF);
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_lock(rdev, 8);
+ radeon_ring_write(rdev, PACKET0(R300_CP_RESYNC_ADDR, 1));
+ radeon_ring_write(rdev, rdev->config.r300.resync_scratch);
+ radeon_ring_write(rdev, 0xDEADBEEF);
+ radeon_ring_unlock_commit(rdev);
}
static void r420_cp_errata_fini(struct radeon_device *rdev)
{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
-
/* Catch the RESYNC we dispatched all the way back,
* at the very beginning of the CP init.
*/
- radeon_ring_lock(rdev, ring, 8);
- radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
- radeon_ring_write(ring, R300_RB3D_DC_FINISH);
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_lock(rdev, 8);
+ radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+ radeon_ring_write(rdev, R300_RB3D_DC_FINISH);
+ radeon_ring_unlock_commit(rdev);
radeon_scratch_free(rdev, rdev->config.r300.resync_scratch);
}
@@ -258,12 +254,6 @@ static int r420_startup(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
- if (r) {
- dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
- return r;
- }
-
/* Enable IRQ */
r100_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -274,25 +264,16 @@ static int r420_startup(struct radeon_device *rdev)
return r;
}
r420_cp_errata_init(rdev);
-
- r = radeon_ib_pool_start(rdev);
- if (r)
- return r;
-
- r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
+ r = r100_ib_init(rdev);
if (r) {
- dev_err(rdev->dev, "failed testing IB (%d).\n", r);
- rdev->accel_working = false;
+ dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
return r;
}
-
return 0;
}
int r420_resume(struct radeon_device *rdev)
{
- int r;
-
/* Make sur GART are not working */
if (rdev->flags & RADEON_IS_PCIE)
rv370_pcie_gart_disable(rdev);
@@ -316,18 +297,11 @@ int r420_resume(struct radeon_device *rdev)
r420_clock_resume(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
-
- rdev->accel_working = true;
- r = r420_startup(rdev);
- if (r) {
- rdev->accel_working = false;
- }
- return r;
+ return r420_startup(rdev);
}
int r420_suspend(struct radeon_device *rdev)
{
- radeon_ib_pool_suspend(rdev);
r420_cp_errata_fini(rdev);
r100_cp_disable(rdev);
radeon_wb_disable(rdev);
@@ -440,14 +414,7 @@ int r420_init(struct radeon_device *rdev)
return r;
}
r420_set_reg_safe(rdev);
-
- r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
- if (r) {
- dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
- rdev->accel_working = false;
- }
-
r = r420_startup(rdev);
if (r) {
/* Somethings want wront with the accel init stop accel */
diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h
index ec576aa..fc43705 100644
--- a/drivers/gpu/drm/radeon/r500_reg.h
+++ b/drivers/gpu/drm/radeon/r500_reg.h
@@ -351,8 +351,6 @@
#define AVIVO_D1CRTC_BLANK_CONTROL 0x6084
#define AVIVO_D1CRTC_INTERLACE_CONTROL 0x6088
#define AVIVO_D1CRTC_INTERLACE_STATUS 0x608c
-#define AVIVO_D1CRTC_STATUS 0x609c
-# define AVIVO_D1CRTC_V_BLANK (1 << 0)
#define AVIVO_D1CRTC_STATUS_POSITION 0x60a0
#define AVIVO_D1CRTC_FRAME_COUNT 0x60a4
#define AVIVO_D1CRTC_STEREO_CONTROL 0x60c4
@@ -575,7 +573,6 @@
#define AVIVO_TMDSA_CNTL 0x7880
# define AVIVO_TMDSA_CNTL_ENABLE (1 << 0)
-# define AVIVO_TMDSA_CNTL_HDMI_EN (1 << 2)
# define AVIVO_TMDSA_CNTL_HPD_MASK (1 << 4)
# define AVIVO_TMDSA_CNTL_HPD_SELECT (1 << 8)
# define AVIVO_TMDSA_CNTL_SYNC_PHASE (1 << 12)
@@ -636,7 +633,6 @@
#define AVIVO_LVTMA_CNTL 0x7a80
# define AVIVO_LVTMA_CNTL_ENABLE (1 << 0)
-# define AVIVO_LVTMA_CNTL_HDMI_EN (1 << 2)
# define AVIVO_LVTMA_CNTL_HPD_MASK (1 << 4)
# define AVIVO_LVTMA_CNTL_HPD_SELECT (1 << 8)
# define AVIVO_LVTMA_CNTL_SYNC_PHASE (1 << 12)
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index ebcc15b..3081d07 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -33,7 +33,7 @@
/* This files gather functions specifics to: r520,rv530,rv560,rv570,r580 */
-int r520_mc_wait_for_idle(struct radeon_device *rdev)
+static int r520_mc_wait_for_idle(struct radeon_device *rdev)
{
unsigned i;
uint32_t tmp;
@@ -187,12 +187,6 @@ static int r520_startup(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
- if (r) {
- dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
- return r;
- }
-
/* Enable IRQ */
rs600_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -202,15 +196,9 @@ static int r520_startup(struct radeon_device *rdev)
dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
-
- r = radeon_ib_pool_start(rdev);
- if (r)
- return r;
-
- r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
+ r = r100_ib_init(rdev);
if (r) {
- dev_err(rdev->dev, "failed testing IB (%d).\n", r);
- rdev->accel_working = false;
+ dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
return r;
}
return 0;
@@ -218,8 +206,6 @@ static int r520_startup(struct radeon_device *rdev)
int r520_resume(struct radeon_device *rdev)
{
- int r;
-
/* Make sur GART are not working */
if (rdev->flags & RADEON_IS_PCIE)
rv370_pcie_gart_disable(rdev);
@@ -237,13 +223,7 @@ int r520_resume(struct radeon_device *rdev)
rv515_clock_startup(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
-
- rdev->accel_working = true;
- r = r520_startup(rdev);
- if (r) {
- rdev->accel_working = false;
- }
- return r;
+ return r520_startup(rdev);
}
int r520_init(struct radeon_device *rdev)
@@ -312,14 +292,7 @@ int r520_init(struct radeon_device *rdev)
if (r)
return r;
rv515_set_safe_registers(rdev);
-
- r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
- if (r) {
- dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
- rdev->accel_working = false;
- }
-
r = r520_startup(rdev);
if (r) {
/* Somethings want wront with the accel init stop accel */
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 5eb2382..1a4ed43 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -29,7 +29,6 @@
#include <linux/seq_file.h>
#include <linux/firmware.h>
#include <linux/platform_device.h>
-#include <linux/module.h>
#include "drmP.h"
#include "radeon_drm.h"
#include "radeon.h"
@@ -288,6 +287,24 @@ void r600_pm_get_dynpm_state(struct radeon_device *rdev)
pcie_lanes);
}
+static int r600_pm_get_type_index(struct radeon_device *rdev,
+ enum radeon_pm_state_type ps_type,
+ int instance)
+{
+ int i;
+ int found_instance = -1;
+
+ for (i = 0; i < rdev->pm.num_power_states; i++) {
+ if (rdev->pm.power_state[i].type == ps_type) {
+ found_instance++;
+ if (found_instance == instance)
+ return i;
+ }
+ }
+ /* return default if no match */
+ return rdev->pm.default_power_state_index;
+}
+
void rs780_pm_init_profile(struct radeon_device *rdev)
{
if (rdev->pm.num_power_states == 2) {
@@ -403,8 +420,6 @@ void rs780_pm_init_profile(struct radeon_device *rdev)
void r600_pm_init_profile(struct radeon_device *rdev)
{
- int idx;
-
if (rdev->family == CHIP_R600) {
/* XXX */
/* default */
@@ -486,43 +501,81 @@ void r600_pm_init_profile(struct radeon_device *rdev)
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
/* low sh */
- if (rdev->flags & RADEON_IS_MOBILITY)
- idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
- else
- idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
- rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
- rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
- rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
- rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+ if (rdev->flags & RADEON_IS_MOBILITY) {
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+ } else {
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+ }
/* mid sh */
- rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
- rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
- rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
- rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
+ if (rdev->flags & RADEON_IS_MOBILITY) {
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
+ } else {
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
+ }
/* high sh */
- idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
- rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
- rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
/* low mh */
- if (rdev->flags & RADEON_IS_MOBILITY)
- idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
- else
- idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
- rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
- rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
- rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
- rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+ if (rdev->flags & RADEON_IS_MOBILITY) {
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+ } else {
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+ }
/* mid mh */
- rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
- rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
- rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
- rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
+ if (rdev->flags & RADEON_IS_MOBILITY) {
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
+ } else {
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
+ }
/* high mh */
- idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
- rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
- rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
}
@@ -842,7 +895,7 @@ void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
/* flush hdp cache so updates hit vram */
if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
!(rdev->flags & RADEON_IS_AGP)) {
- void __iomem *ptr = (void *)rdev->gart.ptr;
+ void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
u32 tmp;
/* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
@@ -877,7 +930,7 @@ int r600_pcie_gart_init(struct radeon_device *rdev)
{
int r;
- if (rdev->gart.robj) {
+ if (rdev->gart.table.vram.robj) {
WARN(1, "R600 PCIE GART already initialized\n");
return 0;
}
@@ -894,7 +947,7 @@ int r600_pcie_gart_enable(struct radeon_device *rdev)
u32 tmp;
int r, i;
- if (rdev->gart.robj == NULL) {
+ if (rdev->gart.table.vram.robj == NULL) {
dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
return -EINVAL;
}
@@ -939,9 +992,6 @@ int r600_pcie_gart_enable(struct radeon_device *rdev)
WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
r600_pcie_gart_tlb_flush(rdev);
- DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
- (unsigned)(rdev->mc.gtt_size >> 20),
- (unsigned long long)rdev->gart.table_addr);
rdev->gart.ready = true;
return 0;
}
@@ -949,7 +999,7 @@ int r600_pcie_gart_enable(struct radeon_device *rdev)
void r600_pcie_gart_disable(struct radeon_device *rdev)
{
u32 tmp;
- int i;
+ int i, r;
/* Disable all tables */
for (i = 0; i < 7; i++)
@@ -976,7 +1026,14 @@ void r600_pcie_gart_disable(struct radeon_device *rdev)
WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
- radeon_gart_table_vram_unpin(rdev);
+ if (rdev->gart.table.vram.robj) {
+ r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
+ if (likely(r == 0)) {
+ radeon_bo_kunmap(rdev->gart.table.vram.robj);
+ radeon_bo_unpin(rdev->gart.table.vram.robj);
+ radeon_bo_unreserve(rdev->gart.table.vram.robj);
+ }
+ }
}
void r600_pcie_gart_fini(struct radeon_device *rdev)
@@ -1076,7 +1133,7 @@ static void r600_mc_program(struct radeon_device *rdev)
WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12);
}
- WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
+ WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
WREG32(MC_VM_FB_LOCATION, tmp);
@@ -1215,53 +1272,6 @@ int r600_mc_init(struct radeon_device *rdev)
return 0;
}
-int r600_vram_scratch_init(struct radeon_device *rdev)
-{
- int r;
-
- if (rdev->vram_scratch.robj == NULL) {
- r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE,
- PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
- &rdev->vram_scratch.robj);
- if (r) {
- return r;
- }
- }
-
- r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
- if (unlikely(r != 0))
- return r;
- r = radeon_bo_pin(rdev->vram_scratch.robj,
- RADEON_GEM_DOMAIN_VRAM, &rdev->vram_scratch.gpu_addr);
- if (r) {
- radeon_bo_unreserve(rdev->vram_scratch.robj);
- return r;
- }
- r = radeon_bo_kmap(rdev->vram_scratch.robj,
- (void **)&rdev->vram_scratch.ptr);
- if (r)
- radeon_bo_unpin(rdev->vram_scratch.robj);
- radeon_bo_unreserve(rdev->vram_scratch.robj);
-
- return r;
-}
-
-void r600_vram_scratch_fini(struct radeon_device *rdev)
-{
- int r;
-
- if (rdev->vram_scratch.robj == NULL) {
- return;
- }
- r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
- if (likely(r == 0)) {
- radeon_bo_kunmap(rdev->vram_scratch.robj);
- radeon_bo_unpin(rdev->vram_scratch.robj);
- radeon_bo_unreserve(rdev->vram_scratch.robj);
- }
- radeon_bo_unref(&rdev->vram_scratch.robj);
-}
-
/* We doesn't check that the GPU really needs a reset we simply do the
* reset, it's up to the caller to determine if the GPU needs one. We
* might add an helper function to check that.
@@ -1344,7 +1354,7 @@ int r600_gpu_soft_reset(struct radeon_device *rdev)
return 0;
}
-bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+bool r600_gpu_is_lockup(struct radeon_device *rdev)
{
u32 srbm_status;
u32 grbm_status;
@@ -1361,19 +1371,19 @@ bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
grbm_status = RREG32(R_008010_GRBM_STATUS);
grbm_status2 = RREG32(R_008014_GRBM_STATUS2);
if (!G_008010_GUI_ACTIVE(grbm_status)) {
- r100_gpu_lockup_update(lockup, ring);
+ r100_gpu_lockup_update(lockup, &rdev->cp);
return false;
}
/* force CP activities */
- r = radeon_ring_lock(rdev, ring, 2);
+ r = radeon_ring_lock(rdev, 2);
if (!r) {
/* PACKET2 NOP */
- radeon_ring_write(ring, 0x80000000);
- radeon_ring_write(ring, 0x80000000);
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_write(rdev, 0x80000000);
+ radeon_ring_write(rdev, 0x80000000);
+ radeon_ring_unlock_commit(rdev);
}
- ring->rptr = RREG32(ring->rptr_reg);
- return r100_gpu_cp_is_lockup(rdev, lockup, ring);
+ rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
+ return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp);
}
int r600_asic_reset(struct radeon_device *rdev)
@@ -1651,7 +1661,6 @@ void r600_gpu_init(struct radeon_device *rdev)
R6XX_MAX_BACKENDS_MASK) >> 16)),
(cc_rb_backend_disable >> 16));
rdev->config.r600.tile_config = tiling_config;
- rdev->config.r600.backend_map = backend_map;
tiling_config |= BACKEND_MAP(backend_map);
WREG32(GB_TILING_CONFIG, tiling_config);
WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
@@ -2144,28 +2153,27 @@ static int r600_cp_load_microcode(struct radeon_device *rdev)
int r600_cp_start(struct radeon_device *rdev)
{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
uint32_t cp_me;
- r = radeon_ring_lock(rdev, ring, 7);
+ r = radeon_ring_lock(rdev, 7);
if (r) {
DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
return r;
}
- radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
- radeon_ring_write(ring, 0x1);
+ radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
+ radeon_ring_write(rdev, 0x1);
if (rdev->family >= CHIP_RV770) {
- radeon_ring_write(ring, 0x0);
- radeon_ring_write(ring, rdev->config.rv770.max_hw_contexts - 1);
+ radeon_ring_write(rdev, 0x0);
+ radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1);
} else {
- radeon_ring_write(ring, 0x3);
- radeon_ring_write(ring, rdev->config.r600.max_hw_contexts - 1);
+ radeon_ring_write(rdev, 0x3);
+ radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1);
}
- radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 0);
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, 0);
+ radeon_ring_unlock_commit(rdev);
cp_me = 0xff;
WREG32(R_0086D8_CP_ME_CNTL, cp_me);
@@ -2174,7 +2182,6 @@ int r600_cp_start(struct radeon_device *rdev)
int r600_cp_resume(struct radeon_device *rdev)
{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u32 tmp;
u32 rb_bufsz;
int r;
@@ -2186,13 +2193,13 @@ int r600_cp_resume(struct radeon_device *rdev)
WREG32(GRBM_SOFT_RESET, 0);
/* Set ring buffer size */
- rb_bufsz = drm_order(ring->ring_size / 8);
+ rb_bufsz = drm_order(rdev->cp.ring_size / 8);
tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
#endif
WREG32(CP_RB_CNTL, tmp);
- WREG32(CP_SEM_WAIT_TIMER, 0x0);
+ WREG32(CP_SEM_WAIT_TIMER, 0x4);
/* Set the write pointer delay */
WREG32(CP_RB_WPTR_DELAY, 0);
@@ -2200,11 +2207,14 @@ int r600_cp_resume(struct radeon_device *rdev)
/* Initialize the ring buffer's read and write pointers */
WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
WREG32(CP_RB_RPTR_WR, 0);
- ring->wptr = 0;
- WREG32(CP_RB_WPTR, ring->wptr);
+ rdev->cp.wptr = 0;
+ WREG32(CP_RB_WPTR, rdev->cp.wptr);
/* set the wb address whether it's enabled or not */
WREG32(CP_RB_RPTR_ADDR,
+#ifdef __BIG_ENDIAN
+ RB_RPTR_SWAP(2) |
+#endif
((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
@@ -2219,36 +2229,42 @@ int r600_cp_resume(struct radeon_device *rdev)
mdelay(1);
WREG32(CP_RB_CNTL, tmp);
- WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
+ WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8);
WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
- ring->rptr = RREG32(CP_RB_RPTR);
+ rdev->cp.rptr = RREG32(CP_RB_RPTR);
r600_cp_start(rdev);
- ring->ready = true;
- r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
+ rdev->cp.ready = true;
+ r = radeon_ring_test(rdev);
if (r) {
- ring->ready = false;
+ rdev->cp.ready = false;
return r;
}
return 0;
}
-void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size)
+void r600_cp_commit(struct radeon_device *rdev)
+{
+ WREG32(CP_RB_WPTR, rdev->cp.wptr);
+ (void)RREG32(CP_RB_WPTR);
+}
+
+void r600_ring_init(struct radeon_device *rdev, unsigned ring_size)
{
u32 rb_bufsz;
/* Align ring size */
rb_bufsz = drm_order(ring_size / 8);
ring_size = (1 << (rb_bufsz + 1)) * 4;
- ring->ring_size = ring_size;
- ring->align_mask = 16 - 1;
+ rdev->cp.ring_size = ring_size;
+ rdev->cp.align_mask = 16 - 1;
}
void r600_cp_fini(struct radeon_device *rdev)
{
r600_cp_stop(rdev);
- radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
+ radeon_ring_fini(rdev);
}
@@ -2267,11 +2283,11 @@ void r600_scratch_init(struct radeon_device *rdev)
}
}
-int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
+int r600_ring_test(struct radeon_device *rdev)
{
uint32_t scratch;
uint32_t tmp = 0;
- unsigned i, ridx = radeon_ring_index(rdev, ring);
+ unsigned i;
int r;
r = radeon_scratch_get(rdev, &scratch);
@@ -2280,16 +2296,16 @@ int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
return r;
}
WREG32(scratch, 0xCAFEDEAD);
- r = radeon_ring_lock(rdev, ring, 3);
+ r = radeon_ring_lock(rdev, 3);
if (r) {
- DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ridx, r);
+ DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
radeon_scratch_free(rdev, scratch);
return r;
}
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
- radeon_ring_write(ring, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
- radeon_ring_write(ring, 0xDEADBEEF);
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(rdev, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
+ radeon_ring_write(rdev, 0xDEADBEEF);
+ radeon_ring_unlock_commit(rdev);
for (i = 0; i < rdev->usec_timeout; i++) {
tmp = RREG32(scratch);
if (tmp == 0xDEADBEEF)
@@ -2297,10 +2313,10 @@ int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
DRM_UDELAY(1);
}
if (i < rdev->usec_timeout) {
- DRM_INFO("ring test on %d succeeded in %d usecs\n", ridx, i);
+ DRM_INFO("ring test succeeded in %d usecs\n", i);
} else {
- DRM_ERROR("radeon: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
- ridx, scratch, tmp);
+ DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n",
+ scratch, tmp);
r = -EINVAL;
}
radeon_scratch_free(rdev, scratch);
@@ -2310,66 +2326,33 @@ int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
void r600_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence)
{
- struct radeon_ring *ring = &rdev->ring[fence->ring];
-
if (rdev->wb.use_event) {
- u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
- /* flush read cache over gart */
- radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
- radeon_ring_write(ring, PACKET3_TC_ACTION_ENA |
- PACKET3_VC_ACTION_ENA |
- PACKET3_SH_ACTION_ENA);
- radeon_ring_write(ring, 0xFFFFFFFF);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 10); /* poll interval */
+ u64 addr = rdev->wb.gpu_addr + R600_WB_EVENT_OFFSET +
+ (u64)(rdev->fence_drv.scratch_reg - rdev->scratch.reg_base);
/* EVENT_WRITE_EOP - flush caches, send int */
- radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
- radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
- radeon_ring_write(ring, addr & 0xffffffff);
- radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
- radeon_ring_write(ring, fence->seq);
- radeon_ring_write(ring, 0);
+ radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
+ radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
+ radeon_ring_write(rdev, addr & 0xffffffff);
+ radeon_ring_write(rdev, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
+ radeon_ring_write(rdev, fence->seq);
+ radeon_ring_write(rdev, 0);
} else {
- /* flush read cache over gart */
- radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
- radeon_ring_write(ring, PACKET3_TC_ACTION_ENA |
- PACKET3_VC_ACTION_ENA |
- PACKET3_SH_ACTION_ENA);
- radeon_ring_write(ring, 0xFFFFFFFF);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 10); /* poll interval */
- radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
- radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0));
+ radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
+ radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0));
/* wait for 3D idle clean */
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
- radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
- radeon_ring_write(ring, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
+ radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
/* Emit fence sequence & fire IRQ */
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
- radeon_ring_write(ring, ((rdev->fence_drv[fence->ring].scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
- radeon_ring_write(ring, fence->seq);
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
+ radeon_ring_write(rdev, fence->seq);
/* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
- radeon_ring_write(ring, PACKET0(CP_INT_STATUS, 0));
- radeon_ring_write(ring, RB_INT_STAT);
+ radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
+ radeon_ring_write(rdev, RB_INT_STAT);
}
}
-void r600_semaphore_ring_emit(struct radeon_device *rdev,
- struct radeon_ring *ring,
- struct radeon_semaphore *semaphore,
- bool emit_wait)
-{
- uint64_t addr = semaphore->gpu_addr;
- unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
-
- if (rdev->family < CHIP_CAYMAN)
- sel |= PACKET3_SEM_WAIT_ON_SIGNAL;
-
- radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
- radeon_ring_write(ring, addr & 0xffffffff);
- radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
-}
-
int r600_copy_blit(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
@@ -2380,33 +2363,19 @@ int r600_copy_blit(struct radeon_device *rdev,
mutex_lock(&rdev->r600_blit.mutex);
rdev->r600_blit.vb_ib = NULL;
- r = r600_blit_prepare_copy(rdev, num_gpu_pages);
+ r = r600_blit_prepare_copy(rdev, num_gpu_pages * RADEON_GPU_PAGE_SIZE);
if (r) {
if (rdev->r600_blit.vb_ib)
radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
mutex_unlock(&rdev->r600_blit.mutex);
return r;
}
- r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages);
+ r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages * RADEON_GPU_PAGE_SIZE);
r600_blit_done_copy(rdev, fence);
mutex_unlock(&rdev->r600_blit.mutex);
return 0;
}
-void r600_blit_suspend(struct radeon_device *rdev)
-{
- int r;
-
- /* unpin shaders bo */
- if (rdev->r600_blit.shader_obj) {
- r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
- if (!r) {
- radeon_bo_unpin(rdev->r600_blit.shader_obj);
- radeon_bo_unreserve(rdev->r600_blit.shader_obj);
- }
- }
-}
-
int r600_set_surface_reg(struct radeon_device *rdev, int reg,
uint32_t tiling_flags, uint32_t pitch,
uint32_t offset, uint32_t obj_size)
@@ -2422,7 +2391,6 @@ void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
int r600_startup(struct radeon_device *rdev)
{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
/* enable pcie gen2 link */
@@ -2436,10 +2404,6 @@ int r600_startup(struct radeon_device *rdev)
}
}
- r = r600_vram_scratch_init(rdev);
- if (r)
- return r;
-
r600_mc_program(rdev);
if (rdev->flags & RADEON_IS_AGP) {
r600_agp_enable(rdev);
@@ -2452,7 +2416,7 @@ int r600_startup(struct radeon_device *rdev)
r = r600_blit_init(rdev);
if (r) {
r600_blit_fini(rdev);
- rdev->asic->copy.copy = NULL;
+ rdev->asic->copy = NULL;
dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
}
@@ -2461,12 +2425,6 @@ int r600_startup(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
- if (r) {
- dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
- return r;
- }
-
/* Enable IRQ */
r = r600_irq_init(rdev);
if (r) {
@@ -2476,10 +2434,7 @@ int r600_startup(struct radeon_device *rdev)
}
r600_irq_set(rdev);
- r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
- R600_CP_RB_RPTR, R600_CP_RB_WPTR,
- 0, 0xfffff, RADEON_CP_PACKET2);
-
+ r = radeon_ring_init(rdev, rdev->cp.ring_size);
if (r)
return r;
r = r600_cp_load_microcode(rdev);
@@ -2489,17 +2444,6 @@ int r600_startup(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_ib_pool_start(rdev);
- if (r)
- return r;
-
- r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
- if (r) {
- DRM_ERROR("radeon: failed testing IB (%d).\n", r);
- rdev->accel_working = false;
- return r;
- }
-
return 0;
}
@@ -2528,11 +2472,15 @@ int r600_resume(struct radeon_device *rdev)
/* post card */
atom_asic_init(rdev->mode_info.atom_context);
- rdev->accel_working = true;
r = r600_startup(rdev);
if (r) {
DRM_ERROR("r600 startup failed on resume\n");
- rdev->accel_working = false;
+ return r;
+ }
+
+ r = r600_ib_test(rdev);
+ if (r) {
+ DRM_ERROR("radeon: failed testing IB (%d).\n", r);
return r;
}
@@ -2547,16 +2495,23 @@ int r600_resume(struct radeon_device *rdev)
int r600_suspend(struct radeon_device *rdev)
{
+ int r;
+
r600_audio_fini(rdev);
- radeon_ib_pool_suspend(rdev);
- r600_blit_suspend(rdev);
/* FIXME: we should wait for ring to be empty */
r600_cp_stop(rdev);
- rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+ rdev->cp.ready = false;
r600_irq_suspend(rdev);
radeon_wb_disable(rdev);
r600_pcie_gart_disable(rdev);
-
+ /* unpin shaders bo */
+ if (rdev->r600_blit.shader_obj) {
+ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+ if (!r) {
+ radeon_bo_unpin(rdev->r600_blit.shader_obj);
+ radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+ }
+ }
return 0;
}
@@ -2626,8 +2581,8 @@ int r600_init(struct radeon_device *rdev)
if (r)
return r;
- rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
- r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
+ rdev->cp.ring_obj = NULL;
+ r600_ring_init(rdev, 1024 * 1024);
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
@@ -2636,24 +2591,30 @@ int r600_init(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
- if (r) {
- dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
- rdev->accel_working = false;
- }
-
r = r600_startup(rdev);
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
r600_cp_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
- r100_ib_fini(rdev);
radeon_irq_kms_fini(rdev);
r600_pcie_gart_fini(rdev);
rdev->accel_working = false;
}
+ if (rdev->accel_working) {
+ r = radeon_ib_pool_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+ rdev->accel_working = false;
+ } else {
+ r = r600_ib_test(rdev);
+ if (r) {
+ dev_err(rdev->dev, "IB test failed (%d).\n", r);
+ rdev->accel_working = false;
+ }
+ }
+ }
r = r600_audio_init(rdev);
if (r)
@@ -2668,13 +2629,11 @@ void r600_fini(struct radeon_device *rdev)
r600_cp_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
- r100_ib_fini(rdev);
+ radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
r600_pcie_gart_fini(rdev);
- r600_vram_scratch_fini(rdev);
radeon_agp_fini(rdev);
radeon_gem_fini(rdev);
- radeon_semaphore_driver_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
@@ -2688,27 +2647,24 @@ void r600_fini(struct radeon_device *rdev)
*/
void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{
- struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
-
/* FIXME: implement */
- radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
- radeon_ring_write(ring,
+ radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
+ radeon_ring_write(rdev,
#ifdef __BIG_ENDIAN
(2 << 0) |
#endif
(ib->gpu_addr & 0xFFFFFFFC));
- radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
- radeon_ring_write(ring, ib->length_dw);
+ radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
+ radeon_ring_write(rdev, ib->length_dw);
}
-int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
+int r600_ib_test(struct radeon_device *rdev)
{
struct radeon_ib *ib;
uint32_t scratch;
uint32_t tmp = 0;
unsigned i;
int r;
- int ring_index = radeon_ring_index(rdev, ring);
r = radeon_scratch_get(rdev, &scratch);
if (r) {
@@ -2716,7 +2672,7 @@ int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
return r;
}
WREG32(scratch, 0xCAFEDEAD);
- r = radeon_ib_get(rdev, ring_index, &ib, 256);
+ r = radeon_ib_get(rdev, &ib);
if (r) {
DRM_ERROR("radeon: failed to get ib (%d).\n", r);
return r;
@@ -2724,7 +2680,20 @@ int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
ib->ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
ib->ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
ib->ptr[2] = 0xDEADBEEF;
- ib->length_dw = 3;
+ ib->ptr[3] = PACKET2(0);
+ ib->ptr[4] = PACKET2(0);
+ ib->ptr[5] = PACKET2(0);
+ ib->ptr[6] = PACKET2(0);
+ ib->ptr[7] = PACKET2(0);
+ ib->ptr[8] = PACKET2(0);
+ ib->ptr[9] = PACKET2(0);
+ ib->ptr[10] = PACKET2(0);
+ ib->ptr[11] = PACKET2(0);
+ ib->ptr[12] = PACKET2(0);
+ ib->ptr[13] = PACKET2(0);
+ ib->ptr[14] = PACKET2(0);
+ ib->ptr[15] = PACKET2(0);
+ ib->length_dw = 16;
r = radeon_ib_schedule(rdev, ib);
if (r) {
radeon_scratch_free(rdev, scratch);
@@ -2744,7 +2713,7 @@ int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
DRM_UDELAY(1);
}
if (i < rdev->usec_timeout) {
- DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib->fence->ring, i);
+ DRM_INFO("ib test succeeded in %u usecs\n", i);
} else {
DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
scratch, tmp);
@@ -3026,6 +2995,10 @@ int r600_irq_init(struct radeon_device *rdev)
/* RPTR_REARM only works if msi's are enabled */
if (rdev->msi_enabled)
ih_cntl |= RPTR_REARM;
+
+#ifdef __BIG_ENDIAN
+ ih_cntl |= IH_MC_SWAP(IH_MC_SWAP_32BIT);
+#endif
WREG32(IH_CNTL, ih_cntl);
/* force the active interrupt state to all disabled */
@@ -3091,7 +3064,7 @@ int r600_irq_set(struct radeon_device *rdev)
hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
}
- if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
+ if (rdev->irq.sw_int) {
DRM_DEBUG("r600_irq_set: sw int\n");
cp_int_cntl |= RB_INT_ENABLE;
cp_int_cntl |= TIME_STAMP_INT_ENABLE;
@@ -3169,7 +3142,7 @@ int r600_irq_set(struct radeon_device *rdev)
return 0;
}
-static void r600_irq_ack(struct radeon_device *rdev)
+static inline void r600_irq_ack(struct radeon_device *rdev)
{
u32 tmp;
@@ -3270,7 +3243,7 @@ void r600_irq_disable(struct radeon_device *rdev)
r600_disable_interrupt_state(rdev);
}
-static u32 r600_get_ih_wptr(struct radeon_device *rdev)
+static inline u32 r600_get_ih_wptr(struct radeon_device *rdev)
{
u32 wptr, tmp;
@@ -3336,10 +3309,6 @@ int r600_irq_process(struct radeon_device *rdev)
if (!rdev->ih.enabled || rdev->shutdown)
return IRQ_NONE;
- /* No MSIs, need a dummy read to flush PCI DMAs */
- if (!rdev->msi_enabled)
- RREG32(IH_RB_WPTR);
-
wptr = r600_get_ih_wptr(rdev);
rptr = rdev->ih.rptr;
DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
@@ -3352,9 +3321,6 @@ int r600_irq_process(struct radeon_device *rdev)
}
restart_ih:
- /* Order reading of wptr vs. reading of IH ring data */
- rmb();
-
/* display interrupts */
r600_irq_ack(rdev);
@@ -3475,11 +3441,11 @@ restart_ih:
case 177: /* CP_INT in IB1 */
case 178: /* CP_INT in IB2 */
DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
- radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ radeon_fence_process(rdev);
break;
case 181: /* CP EOP event */
DRM_DEBUG("IH: CP EOP\n");
- radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ radeon_fence_process(rdev);
break;
case 233: /* GUI IDLE */
DRM_DEBUG("IH: GUI idle\n");
@@ -3512,6 +3478,30 @@ restart_ih:
*/
#if defined(CONFIG_DEBUG_FS)
+static int r600_debugfs_cp_ring_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ unsigned count, i, j;
+
+ radeon_ring_free_size(rdev);
+ count = (rdev->cp.ring_size / 4) - rdev->cp.ring_free_dw;
+ seq_printf(m, "CP_STAT 0x%08x\n", RREG32(CP_STAT));
+ seq_printf(m, "CP_RB_WPTR 0x%08x\n", RREG32(CP_RB_WPTR));
+ seq_printf(m, "CP_RB_RPTR 0x%08x\n", RREG32(CP_RB_RPTR));
+ seq_printf(m, "driver's copy of the CP_RB_WPTR 0x%08x\n", rdev->cp.wptr);
+ seq_printf(m, "driver's copy of the CP_RB_RPTR 0x%08x\n", rdev->cp.rptr);
+ seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
+ seq_printf(m, "%u dwords in ring\n", count);
+ i = rdev->cp.rptr;
+ for (j = 0; j <= count; j++) {
+ seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
+ i = (i + 1) & rdev->cp.ptr_mask;
+ }
+ return 0;
+}
+
static int r600_debugfs_mc_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -3525,6 +3515,7 @@ static int r600_debugfs_mc_info(struct seq_file *m, void *data)
static struct drm_info_list r600_mc_info_list[] = {
{"r600_mc_info", r600_debugfs_mc_info, 0, NULL},
+ {"r600_ring_info", r600_debugfs_cp_ring_info, 0, NULL},
};
#endif
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
index ba66f30..846fae5 100644
--- a/drivers/gpu/drm/radeon/r600_audio.c
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -36,7 +36,7 @@
*/
static int r600_audio_chipset_supported(struct radeon_device *rdev)
{
- return (rdev->family >= CHIP_R600 && !ASIC_IS_DCE5(rdev))
+ return (rdev->family >= CHIP_R600 && rdev->family < CHIP_CEDAR)
|| rdev->family == CHIP_RS600
|| rdev->family == CHIP_RS690
|| rdev->family == CHIP_RS740;
@@ -161,18 +161,8 @@ static void r600_audio_update_hdmi(unsigned long param)
*/
static void r600_audio_engine_enable(struct radeon_device *rdev, bool enable)
{
- u32 value = 0;
DRM_INFO("%s audio support\n", enable ? "Enabling" : "Disabling");
- if (ASIC_IS_DCE4(rdev)) {
- if (enable) {
- value |= 0x81000000; /* Required to enable audio */
- value |= 0x0e1000f0; /* fglrx sets that too */
- }
- WREG32(EVERGREEN_AUDIO_ENABLE, value);
- } else {
- WREG32_P(R600_AUDIO_ENABLE,
- enable ? 0x81000000 : 0x0, ~0x81000000);
- }
+ WREG32_P(R600_AUDIO_ENABLE, enable ? 0x81000000 : 0x0, ~0x81000000);
rdev->audio_enabled = enable;
}
@@ -258,33 +248,22 @@ void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
return;
}
- if (ASIC_IS_DCE4(rdev)) {
- /* TODO: other PLLs? */
- WREG32(EVERGREEN_AUDIO_PLL1_MUL, base_rate * 10);
- WREG32(EVERGREEN_AUDIO_PLL1_DIV, clock * 10);
- WREG32(EVERGREEN_AUDIO_PLL1_UNK, 0x00000071);
-
- /* Some magic trigger or src sel? */
- WREG32_P(0x5ac, 0x01, ~0x77);
- } else {
- switch (dig->dig_encoder) {
- case 0:
- WREG32(R600_AUDIO_PLL1_MUL, base_rate * 50);
- WREG32(R600_AUDIO_PLL1_DIV, clock * 100);
- WREG32(R600_AUDIO_CLK_SRCSEL, 0);
- break;
-
- case 1:
- WREG32(R600_AUDIO_PLL2_MUL, base_rate * 50);
- WREG32(R600_AUDIO_PLL2_DIV, clock * 100);
- WREG32(R600_AUDIO_CLK_SRCSEL, 1);
- break;
- default:
- dev_err(rdev->dev,
- "Unsupported DIG on encoder 0x%02X\n",
- radeon_encoder->encoder_id);
- return;
- }
+ switch (dig->dig_encoder) {
+ case 0:
+ WREG32(R600_AUDIO_PLL1_MUL, base_rate * 50);
+ WREG32(R600_AUDIO_PLL1_DIV, clock * 100);
+ WREG32(R600_AUDIO_CLK_SRCSEL, 0);
+ break;
+
+ case 1:
+ WREG32(R600_AUDIO_PLL2_MUL, base_rate * 50);
+ WREG32(R600_AUDIO_PLL2_DIV, clock * 100);
+ WREG32(R600_AUDIO_CLK_SRCSEL, 1);
+ break;
+ default:
+ dev_err(rdev->dev, "Unsupported DIG on encoder 0x%02X\n",
+ radeon_encoder->encoder_id);
+ return;
}
}
diff --git a/drivers/gpu/drm/radeon/r600_blit.c b/drivers/gpu/drm/radeon/r600_blit.c
index 3c031a4..7f10434 100644
--- a/drivers/gpu/drm/radeon/r600_blit.c
+++ b/drivers/gpu/drm/radeon/r600_blit.c
@@ -41,7 +41,7 @@
#define COLOR_5_6_5 0x8
#define COLOR_8_8_8_8 0x1a
-static void
+static inline void
set_render_target(drm_radeon_private_t *dev_priv, int format, int w, int h, u64 gpu_addr)
{
u32 cb_color_info;
@@ -99,7 +99,7 @@ set_render_target(drm_radeon_private_t *dev_priv, int format, int w, int h, u64
ADVANCE_RING();
}
-static void
+static inline void
cp_set_surface_sync(drm_radeon_private_t *dev_priv,
u32 sync_type, u32 size, u64 mc_addr)
{
@@ -121,7 +121,7 @@ cp_set_surface_sync(drm_radeon_private_t *dev_priv,
ADVANCE_RING();
}
-static void
+static inline void
set_shaders(struct drm_device *dev)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
@@ -184,7 +184,7 @@ set_shaders(struct drm_device *dev)
R600_SH_ACTION_ENA, 512, gpu_addr);
}
-static void
+static inline void
set_vtx_resource(drm_radeon_private_t *dev_priv, u64 gpu_addr)
{
uint32_t sq_vtx_constant_word2;
@@ -220,7 +220,7 @@ set_vtx_resource(drm_radeon_private_t *dev_priv, u64 gpu_addr)
R600_VC_ACTION_ENA, 48, gpu_addr);
}
-static void
+static inline void
set_tex_resource(drm_radeon_private_t *dev_priv,
int format, int w, int h, int pitch, u64 gpu_addr)
{
@@ -258,7 +258,7 @@ set_tex_resource(drm_radeon_private_t *dev_priv,
}
-static void
+static inline void
set_scissors(drm_radeon_private_t *dev_priv, int x1, int y1, int x2, int y2)
{
RING_LOCALS;
@@ -282,7 +282,7 @@ set_scissors(drm_radeon_private_t *dev_priv, int x1, int y1, int x2, int y2)
ADVANCE_RING();
}
-static void
+static inline void
draw_auto(drm_radeon_private_t *dev_priv)
{
RING_LOCALS;
@@ -311,7 +311,7 @@ draw_auto(drm_radeon_private_t *dev_priv)
COMMIT_RING();
}
-static void
+static inline void
set_default_state(drm_radeon_private_t *dev_priv)
{
int i;
@@ -489,7 +489,7 @@ set_default_state(drm_radeon_private_t *dev_priv)
ADVANCE_RING();
}
-static uint32_t i2f(uint32_t input)
+static inline uint32_t i2f(uint32_t input)
{
u32 result, i, exponent, fraction;
@@ -515,7 +515,7 @@ static uint32_t i2f(uint32_t input)
}
-static int r600_nomm_get_vb(struct drm_device *dev)
+static inline int r600_nomm_get_vb(struct drm_device *dev)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
dev_priv->blit_vb = radeon_freelist_get(dev);
@@ -526,7 +526,7 @@ static int r600_nomm_get_vb(struct drm_device *dev)
return 0;
}
-static void r600_nomm_put_vb(struct drm_device *dev)
+static inline void r600_nomm_put_vb(struct drm_device *dev)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
@@ -534,7 +534,7 @@ static void r600_nomm_put_vb(struct drm_device *dev)
radeon_cp_discard_buffer(dev, dev_priv->blit_vb->file_priv->master, dev_priv->blit_vb);
}
-static void *r600_nomm_get_vb_ptr(struct drm_device *dev)
+static inline void *r600_nomm_get_vb_ptr(struct drm_device *dev)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
return (((char *)dev->agp_buffer_map->handle +
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index db38f58..9aa74c3 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -30,14 +30,23 @@
#include "r600d.h"
#include "r600_blit_shaders.h"
-#include "radeon_blit_common.h"
+
+#define DI_PT_RECTLIST 0x11
+#define DI_INDEX_SIZE_16_BIT 0x0
+#define DI_SRC_SEL_AUTO_INDEX 0x2
+
+#define FMT_8 0x1
+#define FMT_5_6_5 0x8
+#define FMT_8_8_8_8 0x1a
+#define COLOR_8 0x1
+#define COLOR_5_6_5 0x8
+#define COLOR_8_8_8_8 0x1a
/* emits 21 on rv770+, 23 on r600 */
static void
set_render_target(struct radeon_device *rdev, int format,
int w, int h, u64 gpu_addr)
{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u32 cb_color_info;
int pitch, slice;
@@ -45,44 +54,42 @@ set_render_target(struct radeon_device *rdev, int format,
if (h < 8)
h = 8;
- cb_color_info = CB_FORMAT(format) |
- CB_SOURCE_FORMAT(CB_SF_EXPORT_NORM) |
- CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+ cb_color_info = ((format << 2) | (1 << 27) | (1 << 8));
pitch = (w / 8) - 1;
slice = ((w * h) / 64) - 1;
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(ring, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(ring, gpu_addr >> 8);
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+ radeon_ring_write(rdev, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(rdev, gpu_addr >> 8);
if (rdev->family > CHIP_R600 && rdev->family < CHIP_RV770) {
- radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_BASE_UPDATE, 0));
- radeon_ring_write(ring, 2 << 0);
+ radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_BASE_UPDATE, 0));
+ radeon_ring_write(rdev, 2 << 0);
}
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(ring, (CB_COLOR0_SIZE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(ring, (pitch << 0) | (slice << 10));
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+ radeon_ring_write(rdev, (CB_COLOR0_SIZE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(rdev, (pitch << 0) | (slice << 10));
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(ring, (CB_COLOR0_VIEW - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(ring, 0);
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+ radeon_ring_write(rdev, (CB_COLOR0_VIEW - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(rdev, 0);
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(ring, (CB_COLOR0_INFO - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(ring, cb_color_info);
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+ radeon_ring_write(rdev, (CB_COLOR0_INFO - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(rdev, cb_color_info);
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(ring, (CB_COLOR0_TILE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(ring, 0);
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+ radeon_ring_write(rdev, (CB_COLOR0_TILE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(rdev, 0);
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(ring, (CB_COLOR0_FRAG - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(ring, 0);
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+ radeon_ring_write(rdev, (CB_COLOR0_FRAG - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(rdev, 0);
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(ring, (CB_COLOR0_MASK - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(ring, 0);
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+ radeon_ring_write(rdev, (CB_COLOR0_MASK - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(rdev, 0);
}
/* emits 5dw */
@@ -91,7 +98,6 @@ cp_set_surface_sync(struct radeon_device *rdev,
u32 sync_type, u32 size,
u64 mc_addr)
{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u32 cp_coher_size;
if (size == 0xffffffff)
@@ -99,18 +105,17 @@ cp_set_surface_sync(struct radeon_device *rdev,
else
cp_coher_size = ((size + 255) >> 8);
- radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
- radeon_ring_write(ring, sync_type);
- radeon_ring_write(ring, cp_coher_size);
- radeon_ring_write(ring, mc_addr >> 8);
- radeon_ring_write(ring, 10); /* poll interval */
+ radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_SYNC, 3));
+ radeon_ring_write(rdev, sync_type);
+ radeon_ring_write(rdev, cp_coher_size);
+ radeon_ring_write(rdev, mc_addr >> 8);
+ radeon_ring_write(rdev, 10); /* poll interval */
}
/* emits 21dw + 1 surface sync = 26dw */
static void
set_shaders(struct radeon_device *rdev)
{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u64 gpu_addr;
u32 sq_pgm_resources;
@@ -119,35 +124,35 @@ set_shaders(struct radeon_device *rdev)
/* VS */
gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(ring, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(ring, gpu_addr >> 8);
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+ radeon_ring_write(rdev, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(rdev, gpu_addr >> 8);
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(ring, (SQ_PGM_RESOURCES_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(ring, sq_pgm_resources);
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+ radeon_ring_write(rdev, (SQ_PGM_RESOURCES_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(rdev, sq_pgm_resources);
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(ring, (SQ_PGM_CF_OFFSET_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(ring, 0);
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+ radeon_ring_write(rdev, (SQ_PGM_CF_OFFSET_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(rdev, 0);
/* PS */
gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.ps_offset;
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(ring, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(ring, gpu_addr >> 8);
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+ radeon_ring_write(rdev, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(rdev, gpu_addr >> 8);
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(ring, (SQ_PGM_RESOURCES_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(ring, sq_pgm_resources | (1 << 28));
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+ radeon_ring_write(rdev, (SQ_PGM_RESOURCES_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(rdev, sq_pgm_resources | (1 << 28));
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(ring, (SQ_PGM_EXPORTS_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(ring, 2);
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+ radeon_ring_write(rdev, (SQ_PGM_EXPORTS_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(rdev, 2);
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(ring, (SQ_PGM_CF_OFFSET_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(ring, 0);
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+ radeon_ring_write(rdev, (SQ_PGM_CF_OFFSET_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(rdev, 0);
gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
cp_set_surface_sync(rdev, PACKET3_SH_ACTION_ENA, 512, gpu_addr);
@@ -157,24 +162,22 @@ set_shaders(struct radeon_device *rdev)
static void
set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u32 sq_vtx_constant_word2;
- sq_vtx_constant_word2 = SQ_VTXC_BASE_ADDR_HI(upper_32_bits(gpu_addr) & 0xff) |
- SQ_VTXC_STRIDE(16);
+ sq_vtx_constant_word2 = ((upper_32_bits(gpu_addr) & 0xff) | (16 << 8));
#ifdef __BIG_ENDIAN
- sq_vtx_constant_word2 |= SQ_VTXC_ENDIAN_SWAP(SQ_ENDIAN_8IN32);
+ sq_vtx_constant_word2 |= (2 << 30);
#endif
- radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 7));
- radeon_ring_write(ring, 0x460);
- radeon_ring_write(ring, gpu_addr & 0xffffffff);
- radeon_ring_write(ring, 48 - 1);
- radeon_ring_write(ring, sq_vtx_constant_word2);
- radeon_ring_write(ring, 1 << 0);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, SQ_TEX_VTX_VALID_BUFFER << 30);
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 7));
+ radeon_ring_write(rdev, 0x460);
+ radeon_ring_write(rdev, gpu_addr & 0xffffffff);
+ radeon_ring_write(rdev, 48 - 1);
+ radeon_ring_write(rdev, sq_vtx_constant_word2);
+ radeon_ring_write(rdev, 1 << 0);
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, SQ_TEX_VTX_VALID_BUFFER << 30);
if ((rdev->family == CHIP_RV610) ||
(rdev->family == CHIP_RV620) ||
@@ -192,40 +195,35 @@ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
static void
set_tex_resource(struct radeon_device *rdev,
int format, int w, int h, int pitch,
- u64 gpu_addr, u32 size)
+ u64 gpu_addr)
{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
uint32_t sq_tex_resource_word0, sq_tex_resource_word1, sq_tex_resource_word4;
if (h < 1)
h = 1;
- sq_tex_resource_word0 = S_038000_DIM(V_038000_SQ_TEX_DIM_2D) |
- S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
- sq_tex_resource_word0 |= S_038000_PITCH((pitch >> 3) - 1) |
- S_038000_TEX_WIDTH(w - 1);
-
- sq_tex_resource_word1 = S_038004_DATA_FORMAT(format);
- sq_tex_resource_word1 |= S_038004_TEX_HEIGHT(h - 1);
-
- sq_tex_resource_word4 = S_038010_REQUEST_SIZE(1) |
- S_038010_DST_SEL_X(SQ_SEL_X) |
- S_038010_DST_SEL_Y(SQ_SEL_Y) |
- S_038010_DST_SEL_Z(SQ_SEL_Z) |
- S_038010_DST_SEL_W(SQ_SEL_W);
-
- cp_set_surface_sync(rdev,
- PACKET3_TC_ACTION_ENA, size, gpu_addr);
-
- radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 7));
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, sq_tex_resource_word0);
- radeon_ring_write(ring, sq_tex_resource_word1);
- radeon_ring_write(ring, gpu_addr >> 8);
- radeon_ring_write(ring, gpu_addr >> 8);
- radeon_ring_write(ring, sq_tex_resource_word4);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, SQ_TEX_VTX_VALID_TEXTURE << 30);
+ sq_tex_resource_word0 = (1 << 0) | (1 << 3);
+ sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 8) |
+ ((w - 1) << 19));
+
+ sq_tex_resource_word1 = (format << 26);
+ sq_tex_resource_word1 |= ((h - 1) << 0);
+
+ sq_tex_resource_word4 = ((1 << 14) |
+ (0 << 16) |
+ (1 << 19) |
+ (2 << 22) |
+ (3 << 25));
+
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 7));
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, sq_tex_resource_word0);
+ radeon_ring_write(rdev, sq_tex_resource_word1);
+ radeon_ring_write(rdev, gpu_addr >> 8);
+ radeon_ring_write(rdev, gpu_addr >> 8);
+ radeon_ring_write(rdev, sq_tex_resource_word4);
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, SQ_TEX_VTX_VALID_TEXTURE << 30);
}
/* emits 12 */
@@ -233,45 +231,43 @@ static void
set_scissors(struct radeon_device *rdev, int x1, int y1,
int x2, int y2)
{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
- radeon_ring_write(ring, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(ring, (x1 << 0) | (y1 << 16));
- radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
-
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
- radeon_ring_write(ring, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31));
- radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
-
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
- radeon_ring_write(ring, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31));
- radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+ radeon_ring_write(rdev, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(rdev, (x1 << 0) | (y1 << 16));
+ radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
+
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+ radeon_ring_write(rdev, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(rdev, (x1 << 0) | (y1 << 16) | (1 << 31));
+ radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
+
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+ radeon_ring_write(rdev, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(rdev, (x1 << 0) | (y1 << 16) | (1 << 31));
+ radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
}
/* emits 10 */
static void
draw_auto(struct radeon_device *rdev)
{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
- radeon_ring_write(ring, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
- radeon_ring_write(ring, DI_PT_RECTLIST);
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(rdev, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
+ radeon_ring_write(rdev, DI_PT_RECTLIST);
- radeon_ring_write(ring, PACKET3(PACKET3_INDEX_TYPE, 0));
- radeon_ring_write(ring,
+ radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0));
+ radeon_ring_write(rdev,
#ifdef __BIG_ENDIAN
(2 << 2) |
#endif
DI_INDEX_SIZE_16_BIT);
- radeon_ring_write(ring, PACKET3(PACKET3_NUM_INSTANCES, 0));
- radeon_ring_write(ring, 1);
+ radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0));
+ radeon_ring_write(rdev, 1);
- radeon_ring_write(ring, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1));
- radeon_ring_write(ring, 3);
- radeon_ring_write(ring, DI_SRC_SEL_AUTO_INDEX);
+ radeon_ring_write(rdev, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1));
+ radeon_ring_write(rdev, 3);
+ radeon_ring_write(rdev, DI_SRC_SEL_AUTO_INDEX);
}
@@ -279,7 +275,6 @@ draw_auto(struct radeon_device *rdev)
static void
set_default_state(struct radeon_device *rdev)
{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2;
u32 sq_thread_resource_mgmt, sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2;
int num_ps_gprs, num_vs_gprs, num_temp_gprs, num_gs_gprs, num_es_gprs;
@@ -435,62 +430,47 @@ set_default_state(struct radeon_device *rdev)
/* emit an IB pointing at default state */
dwords = ALIGN(rdev->r600_blit.state_len, 0x10);
gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset;
- radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
- radeon_ring_write(ring,
+ radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
+ radeon_ring_write(rdev,
#ifdef __BIG_ENDIAN
(2 << 0) |
#endif
(gpu_addr & 0xFFFFFFFC));
- radeon_ring_write(ring, upper_32_bits(gpu_addr) & 0xFF);
- radeon_ring_write(ring, dwords);
+ radeon_ring_write(rdev, upper_32_bits(gpu_addr) & 0xFF);
+ radeon_ring_write(rdev, dwords);
/* SQ config */
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 6));
- radeon_ring_write(ring, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
- radeon_ring_write(ring, sq_config);
- radeon_ring_write(ring, sq_gpr_resource_mgmt_1);
- radeon_ring_write(ring, sq_gpr_resource_mgmt_2);
- radeon_ring_write(ring, sq_thread_resource_mgmt);
- radeon_ring_write(ring, sq_stack_resource_mgmt_1);
- radeon_ring_write(ring, sq_stack_resource_mgmt_2);
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 6));
+ radeon_ring_write(rdev, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
+ radeon_ring_write(rdev, sq_config);
+ radeon_ring_write(rdev, sq_gpr_resource_mgmt_1);
+ radeon_ring_write(rdev, sq_gpr_resource_mgmt_2);
+ radeon_ring_write(rdev, sq_thread_resource_mgmt);
+ radeon_ring_write(rdev, sq_stack_resource_mgmt_1);
+ radeon_ring_write(rdev, sq_stack_resource_mgmt_2);
}
-#define I2F_MAX_BITS 15
-#define I2F_MAX_INPUT ((1 << I2F_MAX_BITS) - 1)
-#define I2F_SHIFT (24 - I2F_MAX_BITS)
-
-/*
- * Converts unsigned integer into 32-bit IEEE floating point representation.
- * Conversion is not universal and only works for the range from 0
- * to 2^I2F_MAX_BITS-1. Currently we only use it with inputs between
- * 0 and 16384 (inclusive), so I2F_MAX_BITS=15 is enough. If necessary,
- * I2F_MAX_BITS can be increased, but that will add to the loop iterations
- * and slow us down. Conversion is done by shifting the input and counting
- * down until the first 1 reaches bit position 23. The resulting counter
- * and the shifted input are, respectively, the exponent and the fraction.
- * The sign is always zero.
- */
-static uint32_t i2f(uint32_t input)
+static inline uint32_t i2f(uint32_t input)
{
u32 result, i, exponent, fraction;
- WARN_ON_ONCE(input > I2F_MAX_INPUT);
-
- if ((input & I2F_MAX_INPUT) == 0)
- result = 0;
+ if ((input & 0x3fff) == 0)
+ result = 0; /* 0 is a special case */
else {
- exponent = 126 + I2F_MAX_BITS;
- fraction = (input & I2F_MAX_INPUT) << I2F_SHIFT;
-
- for (i = 0; i < I2F_MAX_BITS; i++) {
+ exponent = 140; /* exponent biased by 127; */
+ fraction = (input & 0x3fff) << 10; /* cheat and only
+ handle numbers below 2^^15 */
+ for (i = 0; i < 14; i++) {
if (fraction & 0x800000)
break;
else {
- fraction = fraction << 1;
+ fraction = fraction << 1; /* keep
+ shifting left until top bit = 1 */
exponent = exponent - 1;
}
}
- result = exponent << 23 | (fraction & 0x7fffff);
+ result = exponent << 23 | (fraction & 0x7fffff); /* mask
+ off top bit; assumed 1 */
}
return result;
}
@@ -503,27 +483,6 @@ int r600_blit_init(struct radeon_device *rdev)
u32 packet2s[16];
int num_packet2s = 0;
- rdev->r600_blit.primitives.set_render_target = set_render_target;
- rdev->r600_blit.primitives.cp_set_surface_sync = cp_set_surface_sync;
- rdev->r600_blit.primitives.set_shaders = set_shaders;
- rdev->r600_blit.primitives.set_vtx_resource = set_vtx_resource;
- rdev->r600_blit.primitives.set_tex_resource = set_tex_resource;
- rdev->r600_blit.primitives.set_scissors = set_scissors;
- rdev->r600_blit.primitives.draw_auto = draw_auto;
- rdev->r600_blit.primitives.set_default_state = set_default_state;
-
- rdev->r600_blit.ring_size_common = 40; /* shaders + def state */
- rdev->r600_blit.ring_size_common += 16; /* fence emit for VB IB */
- rdev->r600_blit.ring_size_common += 5; /* done copy */
- rdev->r600_blit.ring_size_common += 16; /* fence emit for done copy */
-
- rdev->r600_blit.ring_size_per_loop = 76;
- /* set_render_target emits 2 extra dwords on rv6xx */
- if (rdev->family > CHIP_R600 && rdev->family < CHIP_RV770)
- rdev->r600_blit.ring_size_per_loop += 2;
-
- rdev->r600_blit.max_dim = 8192;
-
/* pin copy shader into vram if already initialized */
if (rdev->r600_blit.shader_obj)
goto done;
@@ -621,17 +580,16 @@ void r600_blit_fini(struct radeon_device *rdev)
radeon_bo_unref(&rdev->r600_blit.shader_obj);
}
-static int r600_vb_ib_get(struct radeon_device *rdev, unsigned size)
+static int r600_vb_ib_get(struct radeon_device *rdev)
{
int r;
- r = radeon_ib_get(rdev, RADEON_RING_TYPE_GFX_INDEX,
- &rdev->r600_blit.vb_ib, size);
+ r = radeon_ib_get(rdev, &rdev->r600_blit.vb_ib);
if (r) {
DRM_ERROR("failed to get IB for vertex buffer\n");
return r;
}
- rdev->r600_blit.vb_total = size;
+ rdev->r600_blit.vb_total = 64*1024;
rdev->r600_blit.vb_used = 0;
return 0;
}
@@ -642,82 +600,47 @@ static void r600_vb_ib_put(struct radeon_device *rdev)
radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
}
-static unsigned r600_blit_create_rect(unsigned num_gpu_pages,
- int *width, int *height, int max_dim)
+int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes)
{
- unsigned max_pages;
- unsigned pages = num_gpu_pages;
- int w, h;
-
- if (num_gpu_pages == 0) {
- /* not supposed to be called with no pages, but just in case */
- h = 0;
- w = 0;
- pages = 0;
- WARN_ON(1);
- } else {
- int rect_order = 2;
- h = RECT_UNIT_H;
- while (num_gpu_pages / rect_order) {
- h *= 2;
- rect_order *= 4;
- if (h >= max_dim) {
- h = max_dim;
- break;
- }
- }
- max_pages = (max_dim * h) / (RECT_UNIT_W * RECT_UNIT_H);
- if (pages > max_pages)
- pages = max_pages;
- w = (pages * RECT_UNIT_W * RECT_UNIT_H) / h;
- w = (w / RECT_UNIT_W) * RECT_UNIT_W;
- pages = (w * h) / (RECT_UNIT_W * RECT_UNIT_H);
- BUG_ON(pages == 0);
- }
-
-
- DRM_DEBUG("blit_rectangle: h=%d, w=%d, pages=%d\n", h, w, pages);
-
- /* return width and height only of the caller wants it */
- if (height)
- *height = h;
- if (width)
- *width = w;
-
- return pages;
-}
-
-
-int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages)
-{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
- int ring_size;
- int num_loops = 0;
- int dwords_per_loop = rdev->r600_blit.ring_size_per_loop;
-
- /* num loops */
- while (num_gpu_pages) {
- num_gpu_pages -=
- r600_blit_create_rect(num_gpu_pages, NULL, NULL,
- rdev->r600_blit.max_dim);
- num_loops++;
- }
+ int ring_size, line_size;
+ int max_size;
+ /* loops of emits 64 + fence emit possible */
+ int dwords_per_loop = 76, num_loops;
- /* 48 bytes for vertex per loop */
- r = r600_vb_ib_get(rdev, (num_loops*48)+256);
+ r = r600_vb_ib_get(rdev);
if (r)
return r;
+ /* set_render_target emits 2 extra dwords on rv6xx */
+ if (rdev->family > CHIP_R600 && rdev->family < CHIP_RV770)
+ dwords_per_loop += 2;
+
+ /* 8 bpp vs 32 bpp for xfer unit */
+ if (size_bytes & 3)
+ line_size = 8192;
+ else
+ line_size = 8192*4;
+
+ max_size = 8192 * line_size;
+
+ /* major loops cover the max size transfer */
+ num_loops = ((size_bytes + max_size) / max_size);
+ /* minor loops cover the extra non aligned bits */
+ num_loops += ((size_bytes % line_size) ? 1 : 0);
/* calculate number of loops correctly */
ring_size = num_loops * dwords_per_loop;
- ring_size += rdev->r600_blit.ring_size_common;
- r = radeon_ring_lock(rdev, ring, ring_size);
+ /* set default + shaders */
+ ring_size += 40; /* shaders + def state */
+ ring_size += 10; /* fence emit for VB IB */
+ ring_size += 5; /* done copy */
+ ring_size += 10; /* fence emit for done copy */
+ r = radeon_ring_lock(rdev, ring_size);
if (r)
return r;
- rdev->r600_blit.primitives.set_default_state(rdev);
- rdev->r600_blit.primitives.set_shaders(rdev);
+ set_default_state(rdev); /* 14 */
+ set_shaders(rdev); /* 26 */
return 0;
}
@@ -731,66 +654,187 @@ void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence)
if (fence)
r = radeon_fence_emit(rdev, fence);
- radeon_ring_unlock_commit(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
+ radeon_ring_unlock_commit(rdev);
}
void r600_kms_blit_copy(struct radeon_device *rdev,
u64 src_gpu_addr, u64 dst_gpu_addr,
- unsigned num_gpu_pages)
+ int size_bytes)
{
+ int max_bytes;
u64 vb_gpu_addr;
u32 *vb;
- DRM_DEBUG("emitting copy %16llx %16llx %d %d\n",
- src_gpu_addr, dst_gpu_addr,
- num_gpu_pages, rdev->r600_blit.vb_used);
+ DRM_DEBUG("emitting copy %16llx %16llx %d %d\n", src_gpu_addr, dst_gpu_addr,
+ size_bytes, rdev->r600_blit.vb_used);
vb = (u32 *)(rdev->r600_blit.vb_ib->ptr + rdev->r600_blit.vb_used);
+ if ((size_bytes & 3) || (src_gpu_addr & 3) || (dst_gpu_addr & 3)) {
+ max_bytes = 8192;
+
+ while (size_bytes) {
+ int cur_size = size_bytes;
+ int src_x = src_gpu_addr & 255;
+ int dst_x = dst_gpu_addr & 255;
+ int h = 1;
+ src_gpu_addr = src_gpu_addr & ~255ULL;
+ dst_gpu_addr = dst_gpu_addr & ~255ULL;
+
+ if (!src_x && !dst_x) {
+ h = (cur_size / max_bytes);
+ if (h > 8192)
+ h = 8192;
+ if (h == 0)
+ h = 1;
+ else
+ cur_size = max_bytes;
+ } else {
+ if (cur_size > max_bytes)
+ cur_size = max_bytes;
+ if (cur_size > (max_bytes - dst_x))
+ cur_size = (max_bytes - dst_x);
+ if (cur_size > (max_bytes - src_x))
+ cur_size = (max_bytes - src_x);
+ }
+
+ if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) {
+ WARN_ON(1);
+ }
+
+ vb[0] = i2f(dst_x);
+ vb[1] = 0;
+ vb[2] = i2f(src_x);
+ vb[3] = 0;
+
+ vb[4] = i2f(dst_x);
+ vb[5] = i2f(h);
+ vb[6] = i2f(src_x);
+ vb[7] = i2f(h);
+
+ vb[8] = i2f(dst_x + cur_size);
+ vb[9] = i2f(h);
+ vb[10] = i2f(src_x + cur_size);
+ vb[11] = i2f(h);
- while (num_gpu_pages) {
- int w, h;
- unsigned size_in_bytes;
- unsigned pages_per_loop =
- r600_blit_create_rect(num_gpu_pages, &w, &h,
- rdev->r600_blit.max_dim);
+ /* src 9 */
+ set_tex_resource(rdev, FMT_8,
+ src_x + cur_size, h, src_x + cur_size,
+ src_gpu_addr);
- size_in_bytes = pages_per_loop * RADEON_GPU_PAGE_SIZE;
- DRM_DEBUG("rectangle w=%d h=%d\n", w, h);
+ /* 5 */
+ cp_set_surface_sync(rdev,
+ PACKET3_TC_ACTION_ENA, (src_x + cur_size * h), src_gpu_addr);
- if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) {
- WARN_ON(1);
+ /* dst 23 */
+ set_render_target(rdev, COLOR_8,
+ dst_x + cur_size, h,
+ dst_gpu_addr);
+
+ /* scissors 12 */
+ set_scissors(rdev, dst_x, 0, dst_x + cur_size, h);
+
+ /* 14 */
+ vb_gpu_addr = rdev->r600_blit.vb_ib->gpu_addr + rdev->r600_blit.vb_used;
+ set_vtx_resource(rdev, vb_gpu_addr);
+
+ /* draw 10 */
+ draw_auto(rdev);
+
+ /* 5 */
+ cp_set_surface_sync(rdev,
+ PACKET3_CB_ACTION_ENA | PACKET3_CB0_DEST_BASE_ENA,
+ cur_size * h, dst_gpu_addr);
+
+ vb += 12;
+ rdev->r600_blit.vb_used += 12 * 4;
+
+ src_gpu_addr += cur_size * h;
+ dst_gpu_addr += cur_size * h;
+ size_bytes -= cur_size * h;
}
+ } else {
+ max_bytes = 8192 * 4;
+
+ while (size_bytes) {
+ int cur_size = size_bytes;
+ int src_x = (src_gpu_addr & 255);
+ int dst_x = (dst_gpu_addr & 255);
+ int h = 1;
+ src_gpu_addr = src_gpu_addr & ~255ULL;
+ dst_gpu_addr = dst_gpu_addr & ~255ULL;
+
+ if (!src_x && !dst_x) {
+ h = (cur_size / max_bytes);
+ if (h > 8192)
+ h = 8192;
+ if (h == 0)
+ h = 1;
+ else
+ cur_size = max_bytes;
+ } else {
+ if (cur_size > max_bytes)
+ cur_size = max_bytes;
+ if (cur_size > (max_bytes - dst_x))
+ cur_size = (max_bytes - dst_x);
+ if (cur_size > (max_bytes - src_x))
+ cur_size = (max_bytes - src_x);
+ }
+
+ if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) {
+ WARN_ON(1);
+ }
- vb[0] = 0;
- vb[1] = 0;
- vb[2] = 0;
- vb[3] = 0;
-
- vb[4] = 0;
- vb[5] = i2f(h);
- vb[6] = 0;
- vb[7] = i2f(h);
-
- vb[8] = i2f(w);
- vb[9] = i2f(h);
- vb[10] = i2f(w);
- vb[11] = i2f(h);
-
- rdev->r600_blit.primitives.set_tex_resource(rdev, FMT_8_8_8_8,
- w, h, w, src_gpu_addr, size_in_bytes);
- rdev->r600_blit.primitives.set_render_target(rdev, COLOR_8_8_8_8,
- w, h, dst_gpu_addr);
- rdev->r600_blit.primitives.set_scissors(rdev, 0, 0, w, h);
- vb_gpu_addr = rdev->r600_blit.vb_ib->gpu_addr + rdev->r600_blit.vb_used;
- rdev->r600_blit.primitives.set_vtx_resource(rdev, vb_gpu_addr);
- rdev->r600_blit.primitives.draw_auto(rdev);
- rdev->r600_blit.primitives.cp_set_surface_sync(rdev,
- PACKET3_CB_ACTION_ENA | PACKET3_CB0_DEST_BASE_ENA,
- size_in_bytes, dst_gpu_addr);
-
- vb += 12;
- rdev->r600_blit.vb_used += 4*12;
- src_gpu_addr += size_in_bytes;
- dst_gpu_addr += size_in_bytes;
- num_gpu_pages -= pages_per_loop;
+ vb[0] = i2f(dst_x / 4);
+ vb[1] = 0;
+ vb[2] = i2f(src_x / 4);
+ vb[3] = 0;
+
+ vb[4] = i2f(dst_x / 4);
+ vb[5] = i2f(h);
+ vb[6] = i2f(src_x / 4);
+ vb[7] = i2f(h);
+
+ vb[8] = i2f((dst_x + cur_size) / 4);
+ vb[9] = i2f(h);
+ vb[10] = i2f((src_x + cur_size) / 4);
+ vb[11] = i2f(h);
+
+ /* src 9 */
+ set_tex_resource(rdev, FMT_8_8_8_8,
+ (src_x + cur_size) / 4,
+ h, (src_x + cur_size) / 4,
+ src_gpu_addr);
+ /* 5 */
+ cp_set_surface_sync(rdev,
+ PACKET3_TC_ACTION_ENA, (src_x + cur_size * h), src_gpu_addr);
+
+ /* dst 23 */
+ set_render_target(rdev, COLOR_8_8_8_8,
+ (dst_x + cur_size) / 4, h,
+ dst_gpu_addr);
+
+ /* scissors 12 */
+ set_scissors(rdev, (dst_x / 4), 0, (dst_x + cur_size / 4), h);
+
+ /* Vertex buffer setup 14 */
+ vb_gpu_addr = rdev->r600_blit.vb_ib->gpu_addr + rdev->r600_blit.vb_used;
+ set_vtx_resource(rdev, vb_gpu_addr);
+
+ /* draw 10 */
+ draw_auto(rdev);
+
+ /* 5 */
+ cp_set_surface_sync(rdev,
+ PACKET3_CB_ACTION_ENA | PACKET3_CB0_DEST_BASE_ENA,
+ cur_size * h, dst_gpu_addr);
+
+ /* 78 ring dwords per loop */
+ vb += 12;
+ rdev->r600_blit.vb_used += 12 * 4;
+
+ src_gpu_addr += cur_size * h;
+ dst_gpu_addr += cur_size * h;
+ size_bytes -= cur_size * h;
+ }
}
}
+
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index 84c5462..c3ab959 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -26,8 +26,6 @@
* Alex Deucher <alexander.deucher@amd.com>
*/
-#include <linux/module.h>
-
#include "drmP.h"
#include "drm.h"
#include "radeon_drm.h"
@@ -1804,8 +1802,8 @@ static void r600_cp_init_ring_buffer(struct drm_device *dev,
/* Set ring buffer size */
#ifdef __BIG_ENDIAN
RADEON_WRITE(R600_CP_RB_CNTL,
- R600_BUF_SWAP_32BIT |
- R600_RB_NO_UPDATE |
+ RADEON_BUF_SWAP_32BIT |
+ RADEON_RB_NO_UPDATE |
(dev_priv->ring.rptr_update_l2qw << 8) |
dev_priv->ring.size_l2qw);
#else
@@ -1815,22 +1813,22 @@ static void r600_cp_init_ring_buffer(struct drm_device *dev,
dev_priv->ring.size_l2qw);
#endif
- RADEON_WRITE(R600_CP_SEM_WAIT_TIMER, 0x0);
+ RADEON_WRITE(R600_CP_SEM_WAIT_TIMER, 0x4);
/* Set the write pointer delay */
RADEON_WRITE(R600_CP_RB_WPTR_DELAY, 0);
#ifdef __BIG_ENDIAN
RADEON_WRITE(R600_CP_RB_CNTL,
- R600_BUF_SWAP_32BIT |
- R600_RB_NO_UPDATE |
- R600_RB_RPTR_WR_ENA |
+ RADEON_BUF_SWAP_32BIT |
+ RADEON_RB_NO_UPDATE |
+ RADEON_RB_RPTR_WR_ENA |
(dev_priv->ring.rptr_update_l2qw << 8) |
dev_priv->ring.size_l2qw);
#else
RADEON_WRITE(R600_CP_RB_CNTL,
- R600_RB_NO_UPDATE |
- R600_RB_RPTR_WR_ENA |
+ RADEON_RB_NO_UPDATE |
+ RADEON_RB_RPTR_WR_ENA |
(dev_priv->ring.rptr_update_l2qw << 8) |
dev_priv->ring.size_l2qw);
#endif
@@ -1853,8 +1851,13 @@ static void r600_cp_init_ring_buffer(struct drm_device *dev,
- ((unsigned long) dev->sg->virtual)
+ dev_priv->gart_vm_start;
}
- RADEON_WRITE(R600_CP_RB_RPTR_ADDR, (rptr_addr & 0xfffffffc));
- RADEON_WRITE(R600_CP_RB_RPTR_ADDR_HI, upper_32_bits(rptr_addr));
+ RADEON_WRITE(R600_CP_RB_RPTR_ADDR,
+#ifdef __BIG_ENDIAN
+ (2 << 0) |
+#endif
+ (rptr_addr & 0xfffffffc));
+ RADEON_WRITE(R600_CP_RB_RPTR_ADDR_HI,
+ upper_32_bits(rptr_addr));
#ifdef __BIG_ENDIAN
RADEON_WRITE(R600_CP_RB_CNTL,
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 0ec3f20..909bda8 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -52,20 +52,15 @@ struct r600_cs_track {
struct radeon_bo *cb_color_bo[8];
u64 cb_color_bo_mc[8];
u32 cb_color_bo_offset[8];
- struct radeon_bo *cb_color_frag_bo[8]; /* unused */
- struct radeon_bo *cb_color_tile_bo[8]; /* unused */
+ struct radeon_bo *cb_color_frag_bo[8];
+ struct radeon_bo *cb_color_tile_bo[8];
u32 cb_color_info[8];
- u32 cb_color_view[8];
- u32 cb_color_size_idx[8]; /* unused */
+ u32 cb_color_size_idx[8];
u32 cb_target_mask;
- u32 cb_shader_mask; /* unused */
+ u32 cb_shader_mask;
u32 cb_color_size[8];
u32 vgt_strmout_en;
u32 vgt_strmout_buffer_en;
- struct radeon_bo *vgt_strmout_bo[4];
- u64 vgt_strmout_bo_mc[4]; /* unused */
- u32 vgt_strmout_bo_offset[4];
- u32 vgt_strmout_size[4];
u32 db_depth_control;
u32 db_depth_info;
u32 db_depth_size_idx;
@@ -74,17 +69,13 @@ struct r600_cs_track {
u32 db_offset;
struct radeon_bo *db_bo;
u64 db_bo_mc;
- bool sx_misc_kill_all_prims;
- bool cb_dirty;
- bool db_dirty;
- bool streamout_dirty;
};
#define FMT_8_BIT(fmt, vc) [fmt] = { 1, 1, 1, vc, CHIP_R600 }
#define FMT_16_BIT(fmt, vc) [fmt] = { 1, 1, 2, vc, CHIP_R600 }
-#define FMT_24_BIT(fmt) [fmt] = { 1, 1, 4, 0, CHIP_R600 }
+#define FMT_24_BIT(fmt) [fmt] = { 1, 1, 3, 0, CHIP_R600 }
#define FMT_32_BIT(fmt, vc) [fmt] = { 1, 1, 4, vc, CHIP_R600 }
-#define FMT_48_BIT(fmt) [fmt] = { 1, 1, 8, 0, CHIP_R600 }
+#define FMT_48_BIT(fmt) [fmt] = { 1, 1, 6, 0, CHIP_R600 }
#define FMT_64_BIT(fmt, vc) [fmt] = { 1, 1, 8, vc, CHIP_R600 }
#define FMT_96_BIT(fmt) [fmt] = { 1, 1, 12, 0, CHIP_R600 }
#define FMT_128_BIT(fmt, vc) [fmt] = { 1, 1, 16,vc, CHIP_R600 }
@@ -116,7 +107,7 @@ static const struct gpu_formats color_formats_table[] = {
/* 24-bit */
FMT_24_BIT(V_038004_FMT_8_8_8),
-
+
/* 32-bit */
FMT_32_BIT(V_038004_COLOR_32, 1),
FMT_32_BIT(V_038004_COLOR_32_FLOAT, 1),
@@ -171,22 +162,22 @@ static const struct gpu_formats color_formats_table[] = {
[V_038004_FMT_32_AS_32_32_32_32] = { 1, 1, 4, 0, CHIP_CEDAR},
};
-bool r600_fmt_is_valid_color(u32 format)
+static inline bool fmt_is_valid_color(u32 format)
{
if (format >= ARRAY_SIZE(color_formats_table))
return false;
-
+
if (color_formats_table[format].valid_color)
return true;
return false;
}
-bool r600_fmt_is_valid_texture(u32 format, enum radeon_family family)
+static inline bool fmt_is_valid_texture(u32 format, enum radeon_family family)
{
if (format >= ARRAY_SIZE(color_formats_table))
return false;
-
+
if (family < color_formats_table[format].min_family)
return false;
@@ -196,7 +187,7 @@ bool r600_fmt_is_valid_texture(u32 format, enum radeon_family family)
return false;
}
-int r600_fmt_get_blocksize(u32 format)
+static inline int fmt_get_blocksize(u32 format)
{
if (format >= ARRAY_SIZE(color_formats_table))
return 0;
@@ -204,7 +195,7 @@ int r600_fmt_get_blocksize(u32 format)
return color_formats_table[format].blocksize;
}
-int r600_fmt_get_nblocksx(u32 format, u32 w)
+static inline int fmt_get_nblocksx(u32 format, u32 w)
{
unsigned bw;
@@ -218,7 +209,7 @@ int r600_fmt_get_nblocksx(u32 format, u32 w)
return (w + bw - 1) / bw;
}
-int r600_fmt_get_nblocksy(u32 format, u32 h)
+static inline int fmt_get_nblocksy(u32 format, u32 h)
{
unsigned bh;
@@ -232,6 +223,25 @@ int r600_fmt_get_nblocksy(u32 format, u32 h)
return (h + bh - 1) / bh;
}
+static inline int r600_bpe_from_format(u32 *bpe, u32 format)
+{
+ unsigned res;
+
+ if (format >= ARRAY_SIZE(color_formats_table))
+ goto fail;
+
+ res = color_formats_table[format].blocksize;
+ if (res == 0)
+ goto fail;
+
+ *bpe = res;
+ return 0;
+
+fail:
+ *bpe = 16;
+ return -EINVAL;
+}
+
struct array_mode_checker {
int array_mode;
u32 group_size;
@@ -242,7 +252,7 @@ struct array_mode_checker {
};
/* returns alignment in pixels for pitch/height/depth and bytes for base */
-static int r600_get_array_mode_alignment(struct array_mode_checker *values,
+static inline int r600_get_array_mode_alignment(struct array_mode_checker *values,
u32 *pitch_align,
u32 *height_align,
u32 *depth_align,
@@ -265,7 +275,7 @@ static int r600_get_array_mode_alignment(struct array_mode_checker *values,
break;
case ARRAY_LINEAR_ALIGNED:
*pitch_align = max((u32)64, (u32)(values->group_size / values->blocksize));
- *height_align = 1;
+ *height_align = tile_height;
*depth_align = 1;
*base_align = values->group_size;
break;
@@ -278,9 +288,10 @@ static int r600_get_array_mode_alignment(struct array_mode_checker *values,
*base_align = values->group_size;
break;
case ARRAY_2D_TILED_THIN1:
- *pitch_align = max((u32)macro_tile_width * tile_width,
- (u32)((values->group_size * values->nbanks) /
- (values->blocksize * values->nsamples * tile_width)));
+ *pitch_align = max((u32)macro_tile_width,
+ (u32)(((values->group_size / tile_height) /
+ (values->blocksize * values->nsamples)) *
+ values->nbanks)) * tile_width;
*height_align = macro_tile_height * tile_height;
*depth_align = 1;
*base_align = max(macro_tile_bytes,
@@ -304,14 +315,12 @@ static void r600_cs_track_init(struct r600_cs_track *track)
track->cb_color_size[i] = 0;
track->cb_color_size_idx[i] = 0;
track->cb_color_info[i] = 0;
- track->cb_color_view[i] = 0xFFFFFFFF;
track->cb_color_bo[i] = NULL;
track->cb_color_bo_offset[i] = 0xFFFFFFFF;
track->cb_color_bo_mc[i] = 0xFFFFFFFF;
}
track->cb_target_mask = 0xFFFFFFFF;
track->cb_shader_mask = 0xFFFFFFFF;
- track->cb_dirty = true;
track->db_bo = NULL;
track->db_bo_mc = 0xFFFFFFFF;
/* assume the biggest format and that htile is enabled */
@@ -320,19 +329,9 @@ static void r600_cs_track_init(struct r600_cs_track *track)
track->db_depth_size = 0xFFFFFFFF;
track->db_depth_size_idx = 0;
track->db_depth_control = 0xFFFFFFFF;
- track->db_dirty = true;
-
- for (i = 0; i < 4; i++) {
- track->vgt_strmout_size[i] = 0;
- track->vgt_strmout_bo[i] = NULL;
- track->vgt_strmout_bo_offset[i] = 0xFFFFFFFF;
- track->vgt_strmout_bo_mc[i] = 0xFFFFFFFF;
- }
- track->streamout_dirty = true;
- track->sx_misc_kill_all_prims = false;
}
-static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
+static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
{
struct r600_cs_track *track = p->track;
u32 slice_tile_max, size, tmp;
@@ -342,14 +341,13 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
volatile u32 *ib = p->ib->ptr;
unsigned array_mode;
u32 format;
-
if (G_0280A0_TILE_MODE(track->cb_color_info[i])) {
dev_warn(p->dev, "FMASK or CMASK buffer are not supported by this kernel\n");
return -EINVAL;
}
size = radeon_bo_size(track->cb_color_bo[i]) - track->cb_color_bo_offset[i];
format = G_0280A0_FORMAT(track->cb_color_info[i]);
- if (!r600_fmt_is_valid_color(format)) {
+ if (!fmt_is_valid_color(format)) {
dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08X)\n",
__func__, __LINE__, format,
i, track->cb_color_info[i]);
@@ -370,7 +368,7 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
array_check.nbanks = track->nbanks;
array_check.npipes = track->npipes;
array_check.nsamples = track->nsamples;
- array_check.blocksize = r600_fmt_get_blocksize(format);
+ array_check.blocksize = fmt_get_blocksize(format);
if (r600_get_array_mode_alignment(&array_check,
&pitch_align, &height_align, &depth_align, &base_align)) {
dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
@@ -414,18 +412,7 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
}
/* check offset */
- tmp = r600_fmt_get_nblocksy(format, height) * r600_fmt_get_nblocksx(format, pitch) * r600_fmt_get_blocksize(format);
- switch (array_mode) {
- default:
- case V_0280A0_ARRAY_LINEAR_GENERAL:
- case V_0280A0_ARRAY_LINEAR_ALIGNED:
- tmp += track->cb_color_view[i] & 0xFF;
- break;
- case V_0280A0_ARRAY_1D_TILED_THIN1:
- case V_0280A0_ARRAY_2D_TILED_THIN1:
- tmp += G_028080_SLICE_MAX(track->cb_color_view[i]) * tmp;
- break;
- }
+ tmp = fmt_get_nblocksy(format, height) * fmt_get_nblocksx(format, pitch) * fmt_get_blocksize(format);
if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) {
if (array_mode == V_0280A0_ARRAY_LINEAR_GENERAL) {
/* the initial DDX does bad things with the CB size occasionally */
@@ -435,13 +422,10 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
* broken userspace.
*/
} else {
- dev_warn(p->dev, "%s offset[%d] %d %d %d %lu too big (%d %d) (%d %d %d)\n",
- __func__, i, array_mode,
+ dev_warn(p->dev, "%s offset[%d] %d %d %d %lu too big\n", __func__, i,
+ array_mode,
track->cb_color_bo_offset[i], tmp,
- radeon_bo_size(track->cb_color_bo[i]),
- pitch, height, r600_fmt_get_nblocksx(format, pitch),
- r600_fmt_get_nblocksy(format, height),
- r600_fmt_get_blocksize(format));
+ radeon_bo_size(track->cb_color_bo[i]));
return -EINVAL;
}
}
@@ -465,171 +449,143 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
/* on legacy kernel we don't perform advanced check */
if (p->rdev == NULL)
return 0;
-
- /* check streamout */
- if (track->streamout_dirty && track->vgt_strmout_en) {
- for (i = 0; i < 4; i++) {
- if (track->vgt_strmout_buffer_en & (1 << i)) {
- if (track->vgt_strmout_bo[i]) {
- u64 offset = (u64)track->vgt_strmout_bo_offset[i] +
- (u64)track->vgt_strmout_size[i];
- if (offset > radeon_bo_size(track->vgt_strmout_bo[i])) {
- DRM_ERROR("streamout %d bo too small: 0x%llx, 0x%lx\n",
- i, offset,
- radeon_bo_size(track->vgt_strmout_bo[i]));
- return -EINVAL;
- }
- } else {
- dev_warn(p->dev, "No buffer for streamout %d\n", i);
- return -EINVAL;
- }
- }
- }
- track->streamout_dirty = false;
+ /* we don't support out buffer yet */
+ if (track->vgt_strmout_en || track->vgt_strmout_buffer_en) {
+ dev_warn(p->dev, "this kernel doesn't support SMX output buffer\n");
+ return -EINVAL;
}
-
- if (track->sx_misc_kill_all_prims)
- return 0;
-
/* check that we have a cb for each enabled target, we don't check
* shader_mask because it seems mesa isn't always setting it :(
*/
- if (track->cb_dirty) {
- tmp = track->cb_target_mask;
- for (i = 0; i < 8; i++) {
- if ((tmp >> (i * 4)) & 0xF) {
- /* at least one component is enabled */
- if (track->cb_color_bo[i] == NULL) {
- dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
- __func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i);
- return -EINVAL;
- }
- /* perform rewrite of CB_COLOR[0-7]_SIZE */
- r = r600_cs_track_validate_cb(p, i);
- if (r)
- return r;
+ tmp = track->cb_target_mask;
+ for (i = 0; i < 8; i++) {
+ if ((tmp >> (i * 4)) & 0xF) {
+ /* at least one component is enabled */
+ if (track->cb_color_bo[i] == NULL) {
+ dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
+ __func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i);
+ return -EINVAL;
}
+ /* perform rewrite of CB_COLOR[0-7]_SIZE */
+ r = r600_cs_track_validate_cb(p, i);
+ if (r)
+ return r;
}
- track->cb_dirty = false;
}
-
- if (track->db_dirty) {
- /* Check depth buffer */
- if (G_028800_STENCIL_ENABLE(track->db_depth_control) ||
- G_028800_Z_ENABLE(track->db_depth_control)) {
- u32 nviews, bpe, ntiles, size, slice_tile_max;
- u32 height, height_align, pitch, pitch_align, depth_align;
- u64 base_offset, base_align;
- struct array_mode_checker array_check;
- int array_mode;
-
- if (track->db_bo == NULL) {
- dev_warn(p->dev, "z/stencil with no depth buffer\n");
+ /* Check depth buffer */
+ if (G_028800_STENCIL_ENABLE(track->db_depth_control) ||
+ G_028800_Z_ENABLE(track->db_depth_control)) {
+ u32 nviews, bpe, ntiles, size, slice_tile_max;
+ u32 height, height_align, pitch, pitch_align, depth_align;
+ u64 base_offset, base_align;
+ struct array_mode_checker array_check;
+ int array_mode;
+
+ if (track->db_bo == NULL) {
+ dev_warn(p->dev, "z/stencil with no depth buffer\n");
+ return -EINVAL;
+ }
+ if (G_028010_TILE_SURFACE_ENABLE(track->db_depth_info)) {
+ dev_warn(p->dev, "this kernel doesn't support z/stencil htile\n");
+ return -EINVAL;
+ }
+ switch (G_028010_FORMAT(track->db_depth_info)) {
+ case V_028010_DEPTH_16:
+ bpe = 2;
+ break;
+ case V_028010_DEPTH_X8_24:
+ case V_028010_DEPTH_8_24:
+ case V_028010_DEPTH_X8_24_FLOAT:
+ case V_028010_DEPTH_8_24_FLOAT:
+ case V_028010_DEPTH_32_FLOAT:
+ bpe = 4;
+ break;
+ case V_028010_DEPTH_X24_8_32_FLOAT:
+ bpe = 8;
+ break;
+ default:
+ dev_warn(p->dev, "z/stencil with invalid format %d\n", G_028010_FORMAT(track->db_depth_info));
+ return -EINVAL;
+ }
+ if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) {
+ if (!track->db_depth_size_idx) {
+ dev_warn(p->dev, "z/stencil buffer size not set\n");
return -EINVAL;
}
- if (G_028010_TILE_SURFACE_ENABLE(track->db_depth_info)) {
- dev_warn(p->dev, "this kernel doesn't support z/stencil htile\n");
+ tmp = radeon_bo_size(track->db_bo) - track->db_offset;
+ tmp = (tmp / bpe) >> 6;
+ if (!tmp) {
+ dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %ld)\n",
+ track->db_depth_size, bpe, track->db_offset,
+ radeon_bo_size(track->db_bo));
return -EINVAL;
}
- switch (G_028010_FORMAT(track->db_depth_info)) {
- case V_028010_DEPTH_16:
- bpe = 2;
- break;
- case V_028010_DEPTH_X8_24:
- case V_028010_DEPTH_8_24:
- case V_028010_DEPTH_X8_24_FLOAT:
- case V_028010_DEPTH_8_24_FLOAT:
- case V_028010_DEPTH_32_FLOAT:
- bpe = 4;
+ ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF);
+ } else {
+ size = radeon_bo_size(track->db_bo);
+ /* pitch in pixels */
+ pitch = (G_028000_PITCH_TILE_MAX(track->db_depth_size) + 1) * 8;
+ slice_tile_max = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
+ slice_tile_max *= 64;
+ height = slice_tile_max / pitch;
+ if (height > 8192)
+ height = 8192;
+ base_offset = track->db_bo_mc + track->db_offset;
+ array_mode = G_028010_ARRAY_MODE(track->db_depth_info);
+ array_check.array_mode = array_mode;
+ array_check.group_size = track->group_size;
+ array_check.nbanks = track->nbanks;
+ array_check.npipes = track->npipes;
+ array_check.nsamples = track->nsamples;
+ array_check.blocksize = bpe;
+ if (r600_get_array_mode_alignment(&array_check,
+ &pitch_align, &height_align, &depth_align, &base_align)) {
+ dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
+ G_028010_ARRAY_MODE(track->db_depth_info),
+ track->db_depth_info);
+ return -EINVAL;
+ }
+ switch (array_mode) {
+ case V_028010_ARRAY_1D_TILED_THIN1:
+ /* don't break userspace */
+ height &= ~0x7;
break;
- case V_028010_DEPTH_X24_8_32_FLOAT:
- bpe = 8;
+ case V_028010_ARRAY_2D_TILED_THIN1:
break;
default:
- dev_warn(p->dev, "z/stencil with invalid format %d\n", G_028010_FORMAT(track->db_depth_info));
+ dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
+ G_028010_ARRAY_MODE(track->db_depth_info),
+ track->db_depth_info);
return -EINVAL;
}
- if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) {
- if (!track->db_depth_size_idx) {
- dev_warn(p->dev, "z/stencil buffer size not set\n");
- return -EINVAL;
- }
- tmp = radeon_bo_size(track->db_bo) - track->db_offset;
- tmp = (tmp / bpe) >> 6;
- if (!tmp) {
- dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %ld)\n",
- track->db_depth_size, bpe, track->db_offset,
- radeon_bo_size(track->db_bo));
- return -EINVAL;
- }
- ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF);
- } else {
- size = radeon_bo_size(track->db_bo);
- /* pitch in pixels */
- pitch = (G_028000_PITCH_TILE_MAX(track->db_depth_size) + 1) * 8;
- slice_tile_max = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
- slice_tile_max *= 64;
- height = slice_tile_max / pitch;
- if (height > 8192)
- height = 8192;
- base_offset = track->db_bo_mc + track->db_offset;
- array_mode = G_028010_ARRAY_MODE(track->db_depth_info);
- array_check.array_mode = array_mode;
- array_check.group_size = track->group_size;
- array_check.nbanks = track->nbanks;
- array_check.npipes = track->npipes;
- array_check.nsamples = track->nsamples;
- array_check.blocksize = bpe;
- if (r600_get_array_mode_alignment(&array_check,
- &pitch_align, &height_align, &depth_align, &base_align)) {
- dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
- G_028010_ARRAY_MODE(track->db_depth_info),
- track->db_depth_info);
- return -EINVAL;
- }
- switch (array_mode) {
- case V_028010_ARRAY_1D_TILED_THIN1:
- /* don't break userspace */
- height &= ~0x7;
- break;
- case V_028010_ARRAY_2D_TILED_THIN1:
- break;
- default:
- dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
- G_028010_ARRAY_MODE(track->db_depth_info),
- track->db_depth_info);
- return -EINVAL;
- }
- if (!IS_ALIGNED(pitch, pitch_align)) {
- dev_warn(p->dev, "%s:%d db pitch (%d, 0x%x, %d) invalid\n",
- __func__, __LINE__, pitch, pitch_align, array_mode);
- return -EINVAL;
- }
- if (!IS_ALIGNED(height, height_align)) {
- dev_warn(p->dev, "%s:%d db height (%d, 0x%x, %d) invalid\n",
- __func__, __LINE__, height, height_align, array_mode);
- return -EINVAL;
- }
- if (!IS_ALIGNED(base_offset, base_align)) {
- dev_warn(p->dev, "%s offset[%d] 0x%llx, 0x%llx, %d not aligned\n", __func__, i,
- base_offset, base_align, array_mode);
- return -EINVAL;
- }
+ if (!IS_ALIGNED(pitch, pitch_align)) {
+ dev_warn(p->dev, "%s:%d db pitch (%d, 0x%x, %d) invalid\n",
+ __func__, __LINE__, pitch, pitch_align, array_mode);
+ return -EINVAL;
+ }
+ if (!IS_ALIGNED(height, height_align)) {
+ dev_warn(p->dev, "%s:%d db height (%d, 0x%x, %d) invalid\n",
+ __func__, __LINE__, height, height_align, array_mode);
+ return -EINVAL;
+ }
+ if (!IS_ALIGNED(base_offset, base_align)) {
+ dev_warn(p->dev, "%s offset[%d] 0x%llx, 0x%llx, %d not aligned\n", __func__, i,
+ base_offset, base_align, array_mode);
+ return -EINVAL;
+ }
- ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
- nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1;
- tmp = ntiles * bpe * 64 * nviews;
- if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) {
- dev_warn(p->dev, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n",
- array_mode,
- track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset,
- radeon_bo_size(track->db_bo));
- return -EINVAL;
- }
+ ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
+ nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1;
+ tmp = ntiles * bpe * 64 * nviews;
+ if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) {
+ dev_warn(p->dev, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n",
+ array_mode,
+ track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset,
+ radeon_bo_size(track->db_bo));
+ return -EINVAL;
}
}
- track->db_dirty = false;
}
return 0;
}
@@ -781,7 +737,7 @@ static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
* Check next packet is relocation packet3, do bo validation and compute
* GPU offset using the provided start.
**/
-static int r600_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
+static inline int r600_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
{
struct radeon_cs_packet p3reloc;
int r;
@@ -955,15 +911,16 @@ static int r600_cs_parse_packet0(struct radeon_cs_parser *p,
* if register is safe. If register is not flag as safe this function
* will test it against a list of register needind special handling.
*/
-static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
+static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
{
struct r600_cs_track *track = (struct r600_cs_track *)p->track;
struct radeon_cs_reloc *reloc;
+ u32 last_reg = ARRAY_SIZE(r600_reg_safe_bm);
u32 m, i, tmp, *ib;
int r;
i = (reg >> 7);
- if (i >= ARRAY_SIZE(r600_reg_safe_bm)) {
+ if (i > last_reg) {
dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
return -EINVAL;
}
@@ -1002,11 +959,9 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
break;
case R_028800_DB_DEPTH_CONTROL:
track->db_depth_control = radeon_get_ib_value(p, idx);
- track->db_dirty = true;
break;
case R_028010_DB_DEPTH_INFO:
- if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) &&
- r600_cs_packet_next_is_pkt3_nop(p)) {
+ if (r600_cs_packet_next_is_pkt3_nop(p)) {
r = r600_cs_packet_next_reloc(p, &reloc);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
@@ -1023,66 +978,24 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1);
track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1);
}
- } else {
+ } else
track->db_depth_info = radeon_get_ib_value(p, idx);
- }
- track->db_dirty = true;
break;
case R_028004_DB_DEPTH_VIEW:
track->db_depth_view = radeon_get_ib_value(p, idx);
- track->db_dirty = true;
break;
case R_028000_DB_DEPTH_SIZE:
track->db_depth_size = radeon_get_ib_value(p, idx);
track->db_depth_size_idx = idx;
- track->db_dirty = true;
break;
case R_028AB0_VGT_STRMOUT_EN:
track->vgt_strmout_en = radeon_get_ib_value(p, idx);
- track->streamout_dirty = true;
break;
case R_028B20_VGT_STRMOUT_BUFFER_EN:
track->vgt_strmout_buffer_en = radeon_get_ib_value(p, idx);
- track->streamout_dirty = true;
- break;
- case VGT_STRMOUT_BUFFER_BASE_0:
- case VGT_STRMOUT_BUFFER_BASE_1:
- case VGT_STRMOUT_BUFFER_BASE_2:
- case VGT_STRMOUT_BUFFER_BASE_3:
- r = r600_cs_packet_next_reloc(p, &reloc);
- if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
- return -EINVAL;
- }
- tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16;
- track->vgt_strmout_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
- ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
- track->vgt_strmout_bo[tmp] = reloc->robj;
- track->vgt_strmout_bo_mc[tmp] = reloc->lobj.gpu_offset;
- track->streamout_dirty = true;
- break;
- case VGT_STRMOUT_BUFFER_SIZE_0:
- case VGT_STRMOUT_BUFFER_SIZE_1:
- case VGT_STRMOUT_BUFFER_SIZE_2:
- case VGT_STRMOUT_BUFFER_SIZE_3:
- tmp = (reg - VGT_STRMOUT_BUFFER_SIZE_0) / 16;
- /* size in register is DWs, convert to bytes */
- track->vgt_strmout_size[tmp] = radeon_get_ib_value(p, idx) * 4;
- track->streamout_dirty = true;
- break;
- case CP_COHER_BASE:
- r = r600_cs_packet_next_reloc(p, &reloc);
- if (r) {
- dev_warn(p->dev, "missing reloc for CP_COHER_BASE "
- "0x%04X\n", reg);
- return -EINVAL;
- }
- ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
break;
case R_028238_CB_TARGET_MASK:
track->cb_target_mask = radeon_get_ib_value(p, idx);
- track->cb_dirty = true;
break;
case R_02823C_CB_SHADER_MASK:
track->cb_shader_mask = radeon_get_ib_value(p, idx);
@@ -1090,7 +1003,6 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case R_028C04_PA_SC_AA_CONFIG:
tmp = G_028C04_MSAA_NUM_SAMPLES(radeon_get_ib_value(p, idx));
track->nsamples = 1 << tmp;
- track->cb_dirty = true;
break;
case R_0280A0_CB_COLOR0_INFO:
case R_0280A4_CB_COLOR1_INFO:
@@ -1100,8 +1012,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case R_0280B4_CB_COLOR5_INFO:
case R_0280B8_CB_COLOR6_INFO:
case R_0280BC_CB_COLOR7_INFO:
- if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) &&
- r600_cs_packet_next_is_pkt3_nop(p)) {
+ if (r600_cs_packet_next_is_pkt3_nop(p)) {
r = r600_cs_packet_next_reloc(p, &reloc);
if (r) {
dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
@@ -1120,19 +1031,6 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4;
track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
}
- track->cb_dirty = true;
- break;
- case R_028080_CB_COLOR0_VIEW:
- case R_028084_CB_COLOR1_VIEW:
- case R_028088_CB_COLOR2_VIEW:
- case R_02808C_CB_COLOR3_VIEW:
- case R_028090_CB_COLOR4_VIEW:
- case R_028094_CB_COLOR5_VIEW:
- case R_028098_CB_COLOR6_VIEW:
- case R_02809C_CB_COLOR7_VIEW:
- tmp = (reg - R_028080_CB_COLOR0_VIEW) / 4;
- track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
- track->cb_dirty = true;
break;
case R_028060_CB_COLOR0_SIZE:
case R_028064_CB_COLOR1_SIZE:
@@ -1145,7 +1043,6 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
tmp = (reg - R_028060_CB_COLOR0_SIZE) / 4;
track->cb_color_size[tmp] = radeon_get_ib_value(p, idx);
track->cb_color_size_idx[tmp] = idx;
- track->cb_dirty = true;
break;
/* This register were added late, there is userspace
* which does provide relocation for those but set
@@ -1228,7 +1125,6 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->cb_color_base_last[tmp] = ib[idx];
track->cb_color_bo[tmp] = reloc->robj;
track->cb_color_bo_mc[tmp] = reloc->lobj.gpu_offset;
- track->cb_dirty = true;
break;
case DB_DEPTH_BASE:
r = r600_cs_packet_next_reloc(p, &reloc);
@@ -1241,7 +1137,6 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
track->db_bo = reloc->robj;
track->db_bo_mc = reloc->lobj.gpu_offset;
- track->db_dirty = true;
break;
case DB_HTILE_DATA_BASE:
case SQ_PGM_START_FS:
@@ -1305,18 +1200,6 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
}
ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
break;
- case SX_MEMORY_EXPORT_BASE:
- r = r600_cs_packet_next_reloc(p, &reloc);
- if (r) {
- dev_warn(p->dev, "bad SET_CONFIG_REG "
- "0x%04X\n", reg);
- return -EINVAL;
- }
- ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
- break;
- case SX_MISC:
- track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0;
- break;
default:
dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
return -EINVAL;
@@ -1324,7 +1207,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
return 0;
}
-unsigned r600_mip_minify(unsigned size, unsigned level)
+static inline unsigned mip_minify(unsigned size, unsigned level)
{
unsigned val;
@@ -1346,22 +1229,22 @@ static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel,
unsigned nlevels = llevel - blevel + 1;
*l0_size = -1;
- blocksize = r600_fmt_get_blocksize(format);
+ blocksize = fmt_get_blocksize(format);
- w0 = r600_mip_minify(w0, 0);
- h0 = r600_mip_minify(h0, 0);
- d0 = r600_mip_minify(d0, 0);
+ w0 = mip_minify(w0, 0);
+ h0 = mip_minify(h0, 0);
+ d0 = mip_minify(d0, 0);
for(i = 0, offset = 0, level = blevel; i < nlevels; i++, level++) {
- width = r600_mip_minify(w0, i);
- nbx = r600_fmt_get_nblocksx(format, width);
+ width = mip_minify(w0, i);
+ nbx = fmt_get_nblocksx(format, width);
nbx = round_up(nbx, block_align);
- height = r600_mip_minify(h0, i);
- nby = r600_fmt_get_nblocksy(format, height);
+ height = mip_minify(h0, i);
+ nby = fmt_get_nblocksy(format, height);
nby = round_up(nby, height_align);
- depth = r600_mip_minify(d0, i);
+ depth = mip_minify(d0, i);
size = nbx * nby * blocksize;
if (nfaces)
@@ -1394,7 +1277,7 @@ static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel,
* This function will check that the resource has valid field and that
* the texture and mipmap bo object are big enough to cover this resource.
*/
-static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
+static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
struct radeon_bo *texture,
struct radeon_bo *mipmap,
u64 base_offset,
@@ -1419,18 +1302,15 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
mip_offset <<= 8;
word0 = radeon_get_ib_value(p, idx + 0);
- if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
- if (tiling_flags & RADEON_TILING_MACRO)
- word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
- else if (tiling_flags & RADEON_TILING_MICRO)
- word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
- }
+ if (tiling_flags & RADEON_TILING_MACRO)
+ word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
+ else if (tiling_flags & RADEON_TILING_MICRO)
+ word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
word1 = radeon_get_ib_value(p, idx + 1);
w0 = G_038000_TEX_WIDTH(word0) + 1;
h0 = G_038004_TEX_HEIGHT(word1) + 1;
d0 = G_038004_TEX_DEPTH(word1);
nfaces = 1;
- array = 0;
switch (G_038000_DIM(word0)) {
case V_038000_SQ_TEX_DIM_1D:
case V_038000_SQ_TEX_DIM_2D:
@@ -1453,7 +1333,7 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
return -EINVAL;
}
format = G_038004_DATA_FORMAT(word1);
- if (!r600_fmt_is_valid_texture(format, p->family)) {
+ if (!fmt_is_valid_texture(format, p->family)) {
dev_warn(p->dev, "%s:%d texture invalid format %d\n",
__func__, __LINE__, format);
return -EINVAL;
@@ -1466,7 +1346,7 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
array_check.nbanks = track->nbanks;
array_check.npipes = track->npipes;
array_check.nsamples = 1;
- array_check.blocksize = r600_fmt_get_blocksize(format);
+ array_check.blocksize = fmt_get_blocksize(format);
if (r600_get_array_mode_alignment(&array_check,
&pitch_align, &height_align, &depth_align, &base_align)) {
dev_warn(p->dev, "%s:%d tex array mode (%d) invalid\n",
@@ -1499,10 +1379,6 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
word1 = radeon_get_ib_value(p, idx + 5);
blevel = G_038010_BASE_LEVEL(word0);
llevel = G_038014_LAST_LEVEL(word1);
- if (blevel > llevel) {
- dev_warn(p->dev, "texture blevel %d > llevel %d\n",
- blevel, llevel);
- }
if (array == 1) {
barray = G_038014_BASE_ARRAY(word1);
larray = G_038014_LAST_ARRAY(word1);
@@ -1514,10 +1390,8 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
&l0_size, &mipmap_size);
/* using get ib will give us the offset into the texture bo */
if ((l0_size + word2) > radeon_bo_size(texture)) {
- dev_warn(p->dev, "texture bo too small ((%d %d) (%d %d) %d %d %d -> %d have %ld)\n",
- w0, h0, pitch_align, height_align,
- array_check.array_mode, format, word2,
- l0_size, radeon_bo_size(texture));
+ dev_warn(p->dev, "texture bo too small (%d %d %d %d -> %d have %ld)\n",
+ w0, h0, format, word2, l0_size, radeon_bo_size(texture));
dev_warn(p->dev, "alignments %d %d %d %lld\n", pitch, pitch_align, height_align, base_align);
return -EINVAL;
}
@@ -1530,22 +1404,6 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
return 0;
}
-static bool r600_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
-{
- u32 m, i;
-
- i = (reg >> 7);
- if (i >= ARRAY_SIZE(r600_reg_safe_bm)) {
- dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
- return false;
- }
- m = 1 << ((reg >> 2) & 31);
- if (!(r600_reg_safe_bm[i] & m))
- return true;
- dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
- return false;
-}
-
static int r600_packet3_check(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt)
{
@@ -1568,8 +1426,6 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
{
int pred_op;
int tmp;
- uint64_t offset;
-
if (pkt->count != 1) {
DRM_ERROR("bad SET PREDICATION\n");
return -EINVAL;
@@ -1593,12 +1449,8 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
- offset = reloc->lobj.gpu_offset +
- (idx_value & 0xfffffff0) +
- ((u64)(tmp & 0xff) << 32);
-
- ib[idx + 0] = offset;
- ib[idx + 1] = (tmp & 0xffffff00) | (upper_32_bits(offset) & 0xff);
+ ib[idx + 0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx + 1] = tmp + (upper_32_bits(reloc->lobj.gpu_offset) & 0xff);
}
break;
@@ -1622,8 +1474,6 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
}
break;
case PACKET3_DRAW_INDEX:
- {
- uint64_t offset;
if (pkt->count != 3) {
DRM_ERROR("bad DRAW_INDEX\n");
return -EINVAL;
@@ -1633,21 +1483,14 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad DRAW_INDEX\n");
return -EINVAL;
}
-
- offset = reloc->lobj.gpu_offset +
- idx_value +
- ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
-
- ib[idx+0] = offset;
- ib[idx+1] = upper_32_bits(offset) & 0xff;
-
+ ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx+1] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
r = r600_cs_track_check(p);
if (r) {
dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
return r;
}
break;
- }
case PACKET3_DRAW_INDEX_AUTO:
if (pkt->count != 1) {
DRM_ERROR("bad DRAW_INDEX_AUTO\n");
@@ -1678,20 +1521,13 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
}
/* bit 4 is reg (0) or mem (1) */
if (idx_value & 0x10) {
- uint64_t offset;
-
r = r600_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("bad WAIT_REG_MEM\n");
return -EINVAL;
}
-
- offset = reloc->lobj.gpu_offset +
- (radeon_get_ib_value(p, idx+1) & 0xfffffff0) +
- ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
-
- ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffff0);
- ib[idx+2] = upper_32_bits(offset) & 0xff;
+ ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
}
break;
case PACKET3_SURFACE_SYNC:
@@ -1716,25 +1552,16 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
if (pkt->count) {
- uint64_t offset;
-
r = r600_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("bad EVENT_WRITE\n");
return -EINVAL;
}
- offset = reloc->lobj.gpu_offset +
- (radeon_get_ib_value(p, idx+1) & 0xfffffff8) +
- ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
-
- ib[idx+1] = offset & 0xfffffff8;
- ib[idx+2] = upper_32_bits(offset) & 0xff;
+ ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
}
break;
case PACKET3_EVENT_WRITE_EOP:
- {
- uint64_t offset;
-
if (pkt->count != 4) {
DRM_ERROR("bad EVENT_WRITE_EOP\n");
return -EINVAL;
@@ -1744,15 +1571,9 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad EVENT_WRITE\n");
return -EINVAL;
}
-
- offset = reloc->lobj.gpu_offset +
- (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
- ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
-
- ib[idx+1] = offset & 0xfffffffc;
- ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
+ ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
break;
- }
case PACKET3_SET_CONFIG_REG:
start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_OFFSET;
end_reg = 4 * pkt->count + start_reg - 4;
@@ -1811,12 +1632,10 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
base_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
- if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
- ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
- else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
- ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
- }
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+ ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
+ else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+ ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
texture = reloc->robj;
/* tex mip base */
r = r600_cs_packet_next_reloc(p, &reloc);
@@ -1837,8 +1656,6 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
ib[idx+1+(i*7)+3] += mip_offset;
break;
case SQ_TEX_VTX_VALID_BUFFER:
- {
- uint64_t offset64;
/* vtx base */
r = r600_cs_packet_next_reloc(p, &reloc);
if (r) {
@@ -1851,15 +1668,11 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
/* force size to size of the buffer */
dev_warn(p->dev, "vbo resource seems too big (%d) for the bo (%ld)\n",
size + offset, radeon_bo_size(reloc->robj));
- ib[idx+1+(i*7)+1] = radeon_bo_size(reloc->robj) - offset;
+ ib[idx+1+(i*7)+1] = radeon_bo_size(reloc->robj);
}
-
- offset64 = reloc->lobj.gpu_offset + offset;
- ib[idx+1+(i*8)+0] = offset64;
- ib[idx+1+(i*8)+2] = (ib[idx+1+(i*8)+2] & 0xffffff00) |
- (upper_32_bits(offset64) & 0xff);
+ ib[idx+1+(i*7)+0] += (u32)((reloc->lobj.gpu_offset) & 0xffffffff);
+ ib[idx+1+(i*7)+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
break;
- }
case SQ_TEX_VTX_INVALID_TEXTURE:
case SQ_TEX_VTX_INVALID_BUFFER:
default:
@@ -1934,104 +1747,6 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
break;
- case PACKET3_STRMOUT_BUFFER_UPDATE:
- if (pkt->count != 4) {
- DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (invalid count)\n");
- return -EINVAL;
- }
- /* Updating memory at DST_ADDRESS. */
- if (idx_value & 0x1) {
- u64 offset;
- r = r600_cs_packet_next_reloc(p, &reloc);
- if (r) {
- DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n");
- return -EINVAL;
- }
- offset = radeon_get_ib_value(p, idx+1);
- offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
- if ((offset + 4) > radeon_bo_size(reloc->robj)) {
- DRM_ERROR("bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%llx, 0x%lx\n",
- offset + 4, radeon_bo_size(reloc->robj));
- return -EINVAL;
- }
- offset += reloc->lobj.gpu_offset;
- ib[idx+1] = offset;
- ib[idx+2] = upper_32_bits(offset) & 0xff;
- }
- /* Reading data from SRC_ADDRESS. */
- if (((idx_value >> 1) & 0x3) == 2) {
- u64 offset;
- r = r600_cs_packet_next_reloc(p, &reloc);
- if (r) {
- DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n");
- return -EINVAL;
- }
- offset = radeon_get_ib_value(p, idx+3);
- offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
- if ((offset + 4) > radeon_bo_size(reloc->robj)) {
- DRM_ERROR("bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%llx, 0x%lx\n",
- offset + 4, radeon_bo_size(reloc->robj));
- return -EINVAL;
- }
- offset += reloc->lobj.gpu_offset;
- ib[idx+3] = offset;
- ib[idx+4] = upper_32_bits(offset) & 0xff;
- }
- break;
- case PACKET3_COPY_DW:
- if (pkt->count != 4) {
- DRM_ERROR("bad COPY_DW (invalid count)\n");
- return -EINVAL;
- }
- if (idx_value & 0x1) {
- u64 offset;
- /* SRC is memory. */
- r = r600_cs_packet_next_reloc(p, &reloc);
- if (r) {
- DRM_ERROR("bad COPY_DW (missing src reloc)\n");
- return -EINVAL;
- }
- offset = radeon_get_ib_value(p, idx+1);
- offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
- if ((offset + 4) > radeon_bo_size(reloc->robj)) {
- DRM_ERROR("bad COPY_DW src bo too small: 0x%llx, 0x%lx\n",
- offset + 4, radeon_bo_size(reloc->robj));
- return -EINVAL;
- }
- offset += reloc->lobj.gpu_offset;
- ib[idx+1] = offset;
- ib[idx+2] = upper_32_bits(offset) & 0xff;
- } else {
- /* SRC is a reg. */
- reg = radeon_get_ib_value(p, idx+1) << 2;
- if (!r600_is_safe_reg(p, reg, idx+1))
- return -EINVAL;
- }
- if (idx_value & 0x2) {
- u64 offset;
- /* DST is memory. */
- r = r600_cs_packet_next_reloc(p, &reloc);
- if (r) {
- DRM_ERROR("bad COPY_DW (missing dst reloc)\n");
- return -EINVAL;
- }
- offset = radeon_get_ib_value(p, idx+3);
- offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
- if ((offset + 4) > radeon_bo_size(reloc->robj)) {
- DRM_ERROR("bad COPY_DW dst bo too small: 0x%llx, 0x%lx\n",
- offset + 4, radeon_bo_size(reloc->robj));
- return -EINVAL;
- }
- offset += reloc->lobj.gpu_offset;
- ib[idx+3] = offset;
- ib[idx+4] = upper_32_bits(offset) & 0xff;
- } else {
- /* DST is a reg. */
- reg = radeon_get_ib_value(p, idx+3) << 2;
- if (!r600_is_safe_reg(p, reg, idx+3))
- return -EINVAL;
- }
- break;
case PACKET3_NOP:
break;
default:
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index 0b59206..c45d921 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -320,7 +320,7 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
struct radeon_device *rdev = dev->dev_private;
uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
- if (ASIC_IS_DCE5(rdev))
+ if (ASIC_IS_DCE4(rdev))
return;
if (!offset)
@@ -462,31 +462,13 @@ static void r600_hdmi_assign_block(struct drm_encoder *encoder)
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- u16 eg_offsets[] = {
- EVERGREEN_CRTC0_REGISTER_OFFSET,
- EVERGREEN_CRTC1_REGISTER_OFFSET,
- EVERGREEN_CRTC2_REGISTER_OFFSET,
- EVERGREEN_CRTC3_REGISTER_OFFSET,
- EVERGREEN_CRTC4_REGISTER_OFFSET,
- EVERGREEN_CRTC5_REGISTER_OFFSET,
- };
-
if (!dig) {
dev_err(rdev->dev, "Enabling HDMI on non-dig encoder\n");
return;
}
- if (ASIC_IS_DCE5(rdev)) {
+ if (ASIC_IS_DCE4(rdev)) {
/* TODO */
- } else if (ASIC_IS_DCE4(rdev)) {
- if (dig->dig_encoder >= ARRAY_SIZE(eg_offsets)) {
- dev_err(rdev->dev, "Enabling HDMI on unknown dig\n");
- return;
- }
- radeon_encoder->hdmi_offset = EVERGREEN_HDMI_BASE +
- eg_offsets[dig->dig_encoder];
- radeon_encoder->hdmi_config_offset = radeon_encoder->hdmi_offset
- + EVERGREEN_HDMI_CONFIG_OFFSET;
} else if (ASIC_IS_DCE3(rdev)) {
radeon_encoder->hdmi_offset = dig->dig_encoder ?
R600_HDMI_BLOCK3 : R600_HDMI_BLOCK1;
@@ -509,7 +491,7 @@ void r600_hdmi_enable(struct drm_encoder *encoder)
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
uint32_t offset;
- if (ASIC_IS_DCE5(rdev))
+ if (ASIC_IS_DCE4(rdev))
return;
if (!radeon_encoder->hdmi_offset) {
@@ -522,24 +504,16 @@ void r600_hdmi_enable(struct drm_encoder *encoder)
}
offset = radeon_encoder->hdmi_offset;
- if (ASIC_IS_DCE5(rdev)) {
- /* TODO */
- } else if (ASIC_IS_DCE4(rdev)) {
- WREG32_P(radeon_encoder->hdmi_config_offset + 0xc, 0x1, ~0x1);
- } else if (ASIC_IS_DCE32(rdev)) {
+ if (ASIC_IS_DCE32(rdev) && !ASIC_IS_DCE4(rdev)) {
WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0x1, ~0x1);
- } else if (ASIC_IS_DCE3(rdev)) {
- /* TODO */
- } else if (rdev->family >= CHIP_R600) {
+ } else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
- WREG32_P(AVIVO_TMDSA_CNTL, AVIVO_TMDSA_CNTL_HDMI_EN,
- ~AVIVO_TMDSA_CNTL_HDMI_EN);
+ WREG32_P(AVIVO_TMDSA_CNTL, 0x4, ~0x4);
WREG32(offset + R600_HDMI_ENABLE, 0x101);
break;
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
- WREG32_P(AVIVO_LVTMA_CNTL, AVIVO_LVTMA_CNTL_HDMI_EN,
- ~AVIVO_LVTMA_CNTL_HDMI_EN);
+ WREG32_P(AVIVO_LVTMA_CNTL, 0x4, ~0x4);
WREG32(offset + R600_HDMI_ENABLE, 0x105);
break;
default:
@@ -551,8 +525,8 @@ void r600_hdmi_enable(struct drm_encoder *encoder)
if (rdev->irq.installed
&& rdev->family != CHIP_RS600
&& rdev->family != CHIP_RS690
- && rdev->family != CHIP_RS740
- && !ASIC_IS_DCE4(rdev)) {
+ && rdev->family != CHIP_RS740) {
+
/* if irq is available use it */
rdev->irq.hdmi[offset == R600_HDMI_BLOCK1 ? 0 : 1] = true;
radeon_irq_set(rdev);
@@ -577,7 +551,7 @@ void r600_hdmi_disable(struct drm_encoder *encoder)
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
uint32_t offset;
- if (ASIC_IS_DCE5(rdev))
+ if (ASIC_IS_DCE4(rdev))
return;
offset = radeon_encoder->hdmi_offset;
@@ -596,22 +570,16 @@ void r600_hdmi_disable(struct drm_encoder *encoder)
/* disable polling */
r600_audio_disable_polling(encoder);
- if (ASIC_IS_DCE5(rdev)) {
- /* TODO */
- } else if (ASIC_IS_DCE4(rdev)) {
- WREG32_P(radeon_encoder->hdmi_config_offset + 0xc, 0, ~0x1);
- } else if (ASIC_IS_DCE32(rdev)) {
+ if (ASIC_IS_DCE32(rdev) && !ASIC_IS_DCE4(rdev)) {
WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0, ~0x1);
} else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
- WREG32_P(AVIVO_TMDSA_CNTL, 0,
- ~AVIVO_TMDSA_CNTL_HDMI_EN);
+ WREG32_P(AVIVO_TMDSA_CNTL, 0, ~0x4);
WREG32(offset + R600_HDMI_ENABLE, 0);
break;
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
- WREG32_P(AVIVO_LVTMA_CNTL, 0,
- ~AVIVO_LVTMA_CNTL_HDMI_EN);
+ WREG32_P(AVIVO_LVTMA_CNTL, 0, ~0x4);
WREG32(offset + R600_HDMI_ENABLE, 0);
break;
default:
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 8ae328f..0245ae6 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -78,26 +78,7 @@
#define CB_COLOR0_SIZE 0x28060
#define CB_COLOR0_VIEW 0x28080
-#define R_028080_CB_COLOR0_VIEW 0x028080
-#define S_028080_SLICE_START(x) (((x) & 0x7FF) << 0)
-#define G_028080_SLICE_START(x) (((x) >> 0) & 0x7FF)
-#define C_028080_SLICE_START 0xFFFFF800
-#define S_028080_SLICE_MAX(x) (((x) & 0x7FF) << 13)
-#define G_028080_SLICE_MAX(x) (((x) >> 13) & 0x7FF)
-#define C_028080_SLICE_MAX 0xFF001FFF
-#define R_028084_CB_COLOR1_VIEW 0x028084
-#define R_028088_CB_COLOR2_VIEW 0x028088
-#define R_02808C_CB_COLOR3_VIEW 0x02808C
-#define R_028090_CB_COLOR4_VIEW 0x028090
-#define R_028094_CB_COLOR5_VIEW 0x028094
-#define R_028098_CB_COLOR6_VIEW 0x028098
-#define R_02809C_CB_COLOR7_VIEW 0x02809C
#define CB_COLOR0_INFO 0x280a0
-# define CB_FORMAT(x) ((x) << 2)
-# define CB_ARRAY_MODE(x) ((x) << 8)
-# define CB_SOURCE_FORMAT(x) ((x) << 27)
-# define CB_SF_EXPORT_FULL 0
-# define CB_SF_EXPORT_NORM 1
#define CB_COLOR0_TILE 0x280c0
#define CB_COLOR0_FRAG 0x280e0
#define CB_COLOR0_MASK 0x28100
@@ -436,17 +417,6 @@
#define SQ_PGM_START_VS 0x28858
#define SQ_PGM_RESOURCES_VS 0x28868
#define SQ_PGM_CF_OFFSET_VS 0x288d0
-
-#define SQ_VTX_CONSTANT_WORD0_0 0x30000
-#define SQ_VTX_CONSTANT_WORD1_0 0x30004
-#define SQ_VTX_CONSTANT_WORD2_0 0x30008
-# define SQ_VTXC_BASE_ADDR_HI(x) ((x) << 0)
-# define SQ_VTXC_STRIDE(x) ((x) << 8)
-# define SQ_VTXC_ENDIAN_SWAP(x) ((x) << 30)
-# define SQ_ENDIAN_NONE 0
-# define SQ_ENDIAN_8IN16 1
-# define SQ_ENDIAN_8IN32 2
-#define SQ_VTX_CONSTANT_WORD3_0 0x3000c
#define SQ_VTX_CONSTANT_WORD6_0 0x38018
#define S__SQ_VTX_CONSTANT_TYPE(x) (((x) & 3) << 30)
#define G__SQ_VTX_CONSTANT_TYPE(x) (((x) >> 30) & 3)
@@ -507,11 +477,6 @@
#define VGT_STRMOUT_BUFFER_OFFSET_1 0x28AEC
#define VGT_STRMOUT_BUFFER_OFFSET_2 0x28AFC
#define VGT_STRMOUT_BUFFER_OFFSET_3 0x28B0C
-#define VGT_STRMOUT_BUFFER_SIZE_0 0x28AD0
-#define VGT_STRMOUT_BUFFER_SIZE_1 0x28AE0
-#define VGT_STRMOUT_BUFFER_SIZE_2 0x28AF0
-#define VGT_STRMOUT_BUFFER_SIZE_3 0x28B00
-
#define VGT_STRMOUT_EN 0x28AB0
#define VGT_VERTEX_REUSE_BLOCK_CNTL 0x28C58
#define VTX_REUSE_DEPTH_MASK 0x000000FF
@@ -850,11 +815,7 @@
#define PACKET3_STRMOUT_BUFFER_UPDATE 0x34
#define PACKET3_INDIRECT_BUFFER_MP 0x38
#define PACKET3_MEM_SEMAPHORE 0x39
-# define PACKET3_SEM_WAIT_ON_SIGNAL (0x1 << 12)
-# define PACKET3_SEM_SEL_SIGNAL (0x6 << 29)
-# define PACKET3_SEM_SEL_WAIT (0x7 << 29)
#define PACKET3_MPEG_INDEX 0x3A
-#define PACKET3_COPY_DW 0x3B
#define PACKET3_WAIT_REG_MEM 0x3C
#define PACKET3_MEM_WRITE 0x3D
#define PACKET3_INDIRECT_BUFFER 0x32
@@ -1391,12 +1352,6 @@
#define S_038010_DST_SEL_W(x) (((x) & 0x7) << 25)
#define G_038010_DST_SEL_W(x) (((x) >> 25) & 0x7)
#define C_038010_DST_SEL_W 0xF1FFFFFF
-# define SQ_SEL_X 0
-# define SQ_SEL_Y 1
-# define SQ_SEL_Z 2
-# define SQ_SEL_W 3
-# define SQ_SEL_0 4
-# define SQ_SEL_1 5
#define S_038010_BASE_LEVEL(x) (((x) & 0xF) << 28)
#define G_038010_BASE_LEVEL(x) (((x) >> 28) & 0xF)
#define C_038010_BASE_LEVEL 0x0FFFFFFF
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index d2870a0..59d72d0 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -60,7 +60,7 @@
* are considered as fatal)
*/
-#include <linux/atomic.h>
+#include <asm/atomic.h>
#include <linux/wait.h>
#include <linux/list.h>
#include <linux/kref.h>
@@ -103,25 +103,10 @@ extern int radeon_msi;
#define RADEON_FENCE_JIFFIES_TIMEOUT (HZ / 2)
/* RADEON_IB_POOL_SIZE must be a power of 2 */
#define RADEON_IB_POOL_SIZE 16
-#define RADEON_DEBUGFS_MAX_COMPONENTS 32
+#define RADEON_DEBUGFS_MAX_NUM_FILES 32
#define RADEONFB_CONN_LIMIT 4
#define RADEON_BIOS_NUM_SCRATCH 8
-/* max number of rings */
-#define RADEON_NUM_RINGS 3
-
-/* internal ring indices */
-/* r1xx+ has gfx CP ring */
-#define RADEON_RING_TYPE_GFX_INDEX 0
-
-/* cayman has 2 compute CP rings */
-#define CAYMAN_RING_TYPE_CP1_INDEX 1
-#define CAYMAN_RING_TYPE_CP2_INDEX 2
-
-/* hardcode those limit for now */
-#define RADEON_VA_RESERVED_SIZE (8 << 20)
-#define RADEON_IB_VM_MAX_SIZE (64 << 10)
-
/*
* Errata workarounds.
*/
@@ -157,47 +142,6 @@ bool radeon_get_bios(struct radeon_device *rdev);
/*
- * Mutex which allows recursive locking from the same process.
- */
-struct radeon_mutex {
- struct mutex mutex;
- struct task_struct *owner;
- int level;
-};
-
-static inline void radeon_mutex_init(struct radeon_mutex *mutex)
-{
- mutex_init(&mutex->mutex);
- mutex->owner = NULL;
- mutex->level = 0;
-}
-
-static inline void radeon_mutex_lock(struct radeon_mutex *mutex)
-{
- if (mutex_trylock(&mutex->mutex)) {
- /* The mutex was unlocked before, so it's ours now */
- mutex->owner = current;
- } else if (mutex->owner != current) {
- /* Another process locked the mutex, take it */
- mutex_lock(&mutex->mutex);
- mutex->owner = current;
- }
- /* Otherwise the mutex was already locked by this process */
-
- mutex->level++;
-}
-
-static inline void radeon_mutex_unlock(struct radeon_mutex *mutex)
-{
- if (--mutex->level > 0)
- return;
-
- mutex->owner = NULL;
- mutex_unlock(&mutex->mutex);
-}
-
-
-/*
* Dummy page
*/
struct radeon_dummy_page {
@@ -242,24 +186,20 @@ extern int rv6xx_get_temp(struct radeon_device *rdev);
extern int rv770_get_temp(struct radeon_device *rdev);
extern int evergreen_get_temp(struct radeon_device *rdev);
extern int sumo_get_temp(struct radeon_device *rdev);
-extern void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw,
- unsigned *bankh, unsigned *mtaspect,
- unsigned *tile_split);
/*
* Fences.
*/
struct radeon_fence_driver {
uint32_t scratch_reg;
- uint64_t gpu_addr;
- volatile uint32_t *cpu_addr;
atomic_t seq;
uint32_t last_seq;
unsigned long last_jiffies;
unsigned long last_timeout;
wait_queue_head_t queue;
+ rwlock_t lock;
struct list_head created;
- struct list_head emitted;
+ struct list_head emited;
struct list_head signaled;
bool initialized;
};
@@ -270,26 +210,21 @@ struct radeon_fence {
struct list_head list;
/* protected by radeon_fence.lock */
uint32_t seq;
- bool emitted;
+ bool emited;
bool signaled;
- /* RB, DMA, etc. */
- int ring;
- struct radeon_semaphore *semaphore;
};
-int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring);
int radeon_fence_driver_init(struct radeon_device *rdev);
void radeon_fence_driver_fini(struct radeon_device *rdev);
-int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence, int ring);
+int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence);
int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence);
-void radeon_fence_process(struct radeon_device *rdev, int ring);
+void radeon_fence_process(struct radeon_device *rdev);
bool radeon_fence_signaled(struct radeon_fence *fence);
int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
-int radeon_fence_wait_next(struct radeon_device *rdev, int ring);
-int radeon_fence_wait_last(struct radeon_device *rdev, int ring);
+int radeon_fence_wait_next(struct radeon_device *rdev);
+int radeon_fence_wait_last(struct radeon_device *rdev);
struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence);
void radeon_fence_unref(struct radeon_fence **fence);
-int radeon_fence_count_emitted(struct radeon_device *rdev, int ring);
/*
* Tiling registers
@@ -311,21 +246,6 @@ struct radeon_mman {
bool initialized;
};
-/* bo virtual address in a specific vm */
-struct radeon_bo_va {
- /* bo list is protected by bo being reserved */
- struct list_head bo_list;
- /* vm list is protected by vm mutex */
- struct list_head vm_list;
- /* constant after initialization */
- struct radeon_vm *vm;
- struct radeon_bo *bo;
- uint64_t soffset;
- uint64_t eoffset;
- uint32_t flags;
- bool valid;
-};
-
struct radeon_bo {
/* Protected by gem.mutex */
struct list_head list;
@@ -339,10 +259,6 @@ struct radeon_bo {
u32 tiling_flags;
u32 pitch;
int surface_reg;
- /* list of all virtual address to which this bo
- * is associated to
- */
- struct list_head va;
/* Constant after initialization */
struct radeon_device *rdev;
struct drm_gem_object gem_base;
@@ -358,48 +274,6 @@ struct radeon_bo_list {
u32 tiling_flags;
};
-/* sub-allocation manager, it has to be protected by another lock.
- * By conception this is an helper for other part of the driver
- * like the indirect buffer or semaphore, which both have their
- * locking.
- *
- * Principe is simple, we keep a list of sub allocation in offset
- * order (first entry has offset == 0, last entry has the highest
- * offset).
- *
- * When allocating new object we first check if there is room at
- * the end total_size - (last_object_offset + last_object_size) >=
- * alloc_size. If so we allocate new object there.
- *
- * When there is not enough room at the end, we start waiting for
- * each sub object until we reach object_offset+object_size >=
- * alloc_size, this object then become the sub object we return.
- *
- * Alignment can't be bigger than page size.
- *
- * Hole are not considered for allocation to keep things simple.
- * Assumption is that there won't be hole (all object on same
- * alignment).
- */
-struct radeon_sa_manager {
- struct radeon_bo *bo;
- struct list_head sa_bo;
- unsigned size;
- uint64_t gpu_addr;
- void *cpu_ptr;
- uint32_t domain;
-};
-
-struct radeon_sa_bo;
-
-/* sub-allocation buffer */
-struct radeon_sa_bo {
- struct list_head list;
- struct radeon_sa_manager *manager;
- unsigned offset;
- unsigned size;
-};
-
/*
* GEM objects.
*/
@@ -414,6 +288,9 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
int alignment, int initial_domain,
bool discardable, bool kernel,
struct drm_gem_object **obj);
+int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
+ uint64_t *gpu_addr);
+void radeon_gem_object_unpin(struct drm_gem_object *obj);
int radeon_mode_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
@@ -426,64 +303,37 @@ int radeon_mode_dumb_destroy(struct drm_file *file_priv,
uint32_t handle);
/*
- * Semaphores.
+ * GART structures, functions & helpers
*/
-struct radeon_ring;
-
-#define RADEON_SEMAPHORE_BO_SIZE 256
+struct radeon_mc;
-struct radeon_semaphore_driver {
- rwlock_t lock;
- struct list_head bo;
+struct radeon_gart_table_ram {
+ volatile uint32_t *ptr;
};
-struct radeon_semaphore_bo;
-
-/* everything here is constant */
-struct radeon_semaphore {
- struct list_head list;
- uint64_t gpu_addr;
- uint32_t *cpu_ptr;
- struct radeon_semaphore_bo *bo;
+struct radeon_gart_table_vram {
+ struct radeon_bo *robj;
+ volatile uint32_t *ptr;
};
-struct radeon_semaphore_bo {
- struct list_head list;
- struct radeon_ib *ib;
- struct list_head free;
- struct radeon_semaphore semaphores[RADEON_SEMAPHORE_BO_SIZE/8];
- unsigned nused;
+union radeon_gart_table {
+ struct radeon_gart_table_ram ram;
+ struct radeon_gart_table_vram vram;
};
-void radeon_semaphore_driver_fini(struct radeon_device *rdev);
-int radeon_semaphore_create(struct radeon_device *rdev,
- struct radeon_semaphore **semaphore);
-void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
- struct radeon_semaphore *semaphore);
-void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
- struct radeon_semaphore *semaphore);
-void radeon_semaphore_free(struct radeon_device *rdev,
- struct radeon_semaphore *semaphore);
-
-/*
- * GART structures, functions & helpers
- */
-struct radeon_mc;
-
#define RADEON_GPU_PAGE_SIZE 4096
#define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1)
#define RADEON_GPU_PAGE_SHIFT 12
-#define RADEON_GPU_PAGE_ALIGN(a) (((a) + RADEON_GPU_PAGE_MASK) & ~RADEON_GPU_PAGE_MASK)
struct radeon_gart {
dma_addr_t table_addr;
- struct radeon_bo *robj;
- void *ptr;
unsigned num_gpu_pages;
unsigned num_cpu_pages;
unsigned table_size;
+ union radeon_gart_table table;
struct page **pages;
dma_addr_t *pages_addr;
+ bool *ttm_alloced;
bool ready;
};
@@ -491,8 +341,6 @@ int radeon_gart_table_ram_alloc(struct radeon_device *rdev);
void radeon_gart_table_ram_free(struct radeon_device *rdev);
int radeon_gart_table_vram_alloc(struct radeon_device *rdev);
void radeon_gart_table_vram_free(struct radeon_device *rdev);
-int radeon_gart_table_vram_pin(struct radeon_device *rdev);
-void radeon_gart_table_vram_unpin(struct radeon_device *rdev);
int radeon_gart_init(struct radeon_device *rdev);
void radeon_gart_fini(struct radeon_device *rdev);
void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
@@ -500,7 +348,6 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
int pages, struct page **pagelist,
dma_addr_t *dma_addr);
-void radeon_gart_restore(struct radeon_device *rdev);
/*
@@ -591,47 +438,45 @@ union radeon_irq_stat_regs {
struct evergreen_irq_stat_regs evergreen;
};
-#define RADEON_MAX_HPD_PINS 6
-#define RADEON_MAX_CRTCS 6
-#define RADEON_MAX_HDMI_BLOCKS 2
-
struct radeon_irq {
bool installed;
- bool sw_int[RADEON_NUM_RINGS];
- bool crtc_vblank_int[RADEON_MAX_CRTCS];
- bool pflip[RADEON_MAX_CRTCS];
+ bool sw_int;
+ /* FIXME: use a define max crtc rather than hardcode it */
+ bool crtc_vblank_int[6];
+ bool pflip[6];
wait_queue_head_t vblank_queue;
- bool hpd[RADEON_MAX_HPD_PINS];
+ /* FIXME: use defines for max hpd/dacs */
+ bool hpd[6];
bool gui_idle;
bool gui_idle_acked;
wait_queue_head_t idle_queue;
- bool hdmi[RADEON_MAX_HDMI_BLOCKS];
+ /* FIXME: use defines for max HDMI blocks */
+ bool hdmi[2];
spinlock_t sw_lock;
- int sw_refcount[RADEON_NUM_RINGS];
+ int sw_refcount;
union radeon_irq_stat_regs stat_regs;
- spinlock_t pflip_lock[RADEON_MAX_CRTCS];
- int pflip_refcount[RADEON_MAX_CRTCS];
+ spinlock_t pflip_lock[6];
+ int pflip_refcount[6];
};
int radeon_irq_kms_init(struct radeon_device *rdev);
void radeon_irq_kms_fini(struct radeon_device *rdev);
-void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring);
-void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring);
+void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev);
+void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev);
void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc);
void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc);
/*
- * CP & rings.
+ * CP & ring.
*/
-
struct radeon_ib {
- struct radeon_sa_bo sa_bo;
+ struct list_head list;
unsigned idx;
- uint32_t length_dw;
uint64_t gpu_addr;
- uint32_t *ptr;
struct radeon_fence *fence;
- unsigned vm_id;
+ uint32_t *ptr;
+ uint32_t length_dw;
+ bool free;
};
/*
@@ -639,22 +484,20 @@ struct radeon_ib {
* mutex protects scheduled_ibs, ready, alloc_bm
*/
struct radeon_ib_pool {
- struct radeon_mutex mutex;
- struct radeon_sa_manager sa_manager;
- struct radeon_ib ibs[RADEON_IB_POOL_SIZE];
- bool ready;
- unsigned head_id;
+ struct mutex mutex;
+ struct radeon_bo *robj;
+ struct list_head bogus_ib;
+ struct radeon_ib ibs[RADEON_IB_POOL_SIZE];
+ bool ready;
+ unsigned head_id;
};
-struct radeon_ring {
+struct radeon_cp {
struct radeon_bo *ring_obj;
volatile uint32_t *ring;
unsigned rptr;
- unsigned rptr_offs;
- unsigned rptr_reg;
unsigned wptr;
unsigned wptr_old;
- unsigned wptr_reg;
unsigned ring_size;
unsigned ring_free_dw;
int count_dw;
@@ -663,61 +506,6 @@ struct radeon_ring {
uint32_t ptr_mask;
struct mutex mutex;
bool ready;
- u32 ptr_reg_shift;
- u32 ptr_reg_mask;
- u32 nop;
-};
-
-/*
- * VM
- */
-struct radeon_vm {
- struct list_head list;
- struct list_head va;
- int id;
- unsigned last_pfn;
- u64 pt_gpu_addr;
- u64 *pt;
- struct radeon_sa_bo sa_bo;
- struct mutex mutex;
- /* last fence for cs using this vm */
- struct radeon_fence *fence;
-};
-
-struct radeon_vm_funcs {
- int (*init)(struct radeon_device *rdev);
- void (*fini)(struct radeon_device *rdev);
- /* cs mutex must be lock for schedule_ib */
- int (*bind)(struct radeon_device *rdev, struct radeon_vm *vm, int id);
- void (*unbind)(struct radeon_device *rdev, struct radeon_vm *vm);
- void (*tlb_flush)(struct radeon_device *rdev, struct radeon_vm *vm);
- uint32_t (*page_flags)(struct radeon_device *rdev,
- struct radeon_vm *vm,
- uint32_t flags);
- void (*set_page)(struct radeon_device *rdev, struct radeon_vm *vm,
- unsigned pfn, uint64_t addr, uint32_t flags);
-};
-
-struct radeon_vm_manager {
- struct list_head lru_vm;
- uint32_t use_bitmap;
- struct radeon_sa_manager sa_manager;
- uint32_t max_pfn;
- /* fields constant after init */
- const struct radeon_vm_funcs *funcs;
- /* number of VMIDs */
- unsigned nvm;
- /* vram base address for page table entry */
- u64 vram_base_offset;
- /* is vm enabled? */
- bool enabled;
-};
-
-/*
- * file private structure
- */
-struct radeon_fpriv {
- struct radeon_vm vm;
};
/*
@@ -727,7 +515,6 @@ struct r600_ih {
struct radeon_bo *ring_obj;
volatile uint32_t *ring;
unsigned rptr;
- unsigned rptr_offs;
unsigned wptr;
unsigned wptr_old;
unsigned ring_size;
@@ -737,30 +524,9 @@ struct r600_ih {
bool enabled;
};
-struct r600_blit_cp_primitives {
- void (*set_render_target)(struct radeon_device *rdev, int format,
- int w, int h, u64 gpu_addr);
- void (*cp_set_surface_sync)(struct radeon_device *rdev,
- u32 sync_type, u32 size,
- u64 mc_addr);
- void (*set_shaders)(struct radeon_device *rdev);
- void (*set_vtx_resource)(struct radeon_device *rdev, u64 gpu_addr);
- void (*set_tex_resource)(struct radeon_device *rdev,
- int format, int w, int h, int pitch,
- u64 gpu_addr, u32 size);
- void (*set_scissors)(struct radeon_device *rdev, int x1, int y1,
- int x2, int y2);
- void (*draw_auto)(struct radeon_device *rdev);
- void (*set_default_state)(struct radeon_device *rdev);
-};
-
struct r600_blit {
struct mutex mutex;
struct radeon_bo *shader_obj;
- struct r600_blit_cp_primitives primitives;
- int max_dim;
- int ring_size_common;
- int ring_size_per_loop;
u64 shader_gpu_addr;
u32 vs_offset, ps_offset;
u32 state_offset;
@@ -769,30 +535,23 @@ struct r600_blit {
struct radeon_ib *vb_ib;
};
-void r600_blit_suspend(struct radeon_device *rdev);
-
-int radeon_ib_get(struct radeon_device *rdev, int ring,
- struct radeon_ib **ib, unsigned size);
+int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib);
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib);
-bool radeon_ib_try_free(struct radeon_device *rdev, struct radeon_ib *ib);
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib);
int radeon_ib_pool_init(struct radeon_device *rdev);
void radeon_ib_pool_fini(struct radeon_device *rdev);
-int radeon_ib_pool_start(struct radeon_device *rdev);
-int radeon_ib_pool_suspend(struct radeon_device *rdev);
+int radeon_ib_test(struct radeon_device *rdev);
+extern void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib);
/* Ring access between begin & end cannot sleep */
-int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *cp);
-void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *cp);
-int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
-int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
-void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *cp);
-void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *cp);
-void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *cp);
-int radeon_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
-int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size,
- unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg,
- u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop);
-void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *cp);
+void radeon_ring_free_size(struct radeon_device *rdev);
+int radeon_ring_alloc(struct radeon_device *rdev, unsigned ndw);
+int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw);
+void radeon_ring_commit(struct radeon_device *rdev);
+void radeon_ring_unlock_commit(struct radeon_device *rdev);
+void radeon_ring_unlock_undo(struct radeon_device *rdev);
+int radeon_ring_test(struct radeon_device *rdev);
+int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size);
+void radeon_ring_fini(struct radeon_device *rdev);
/*
@@ -809,12 +568,12 @@ struct radeon_cs_reloc {
struct radeon_cs_chunk {
uint32_t chunk_id;
uint32_t length_dw;
- int kpage_idx[2];
- uint32_t *kpage[2];
+ int kpage_idx[2];
+ uint32_t *kpage[2];
uint32_t *kdata;
- void __user *user_ptr;
- int last_copied_page;
- int last_page_index;
+ void __user *user_ptr;
+ int last_copied_page;
+ int last_page_index;
};
struct radeon_cs_parser {
@@ -835,19 +594,40 @@ struct radeon_cs_parser {
/* indices of various chunks */
int chunk_ib_idx;
int chunk_relocs_idx;
- int chunk_flags_idx;
struct radeon_ib *ib;
void *track;
unsigned family;
- int parser_error;
- u32 cs_flags;
- u32 ring;
- s32 priority;
+ int parser_error;
};
extern int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx);
extern int radeon_cs_finish_pages(struct radeon_cs_parser *p);
-extern u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx);
+
+
+static inline u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
+{
+ struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
+ u32 pg_idx, pg_offset;
+ u32 idx_value = 0;
+ int new_page;
+
+ pg_idx = (idx * 4) / PAGE_SIZE;
+ pg_offset = (idx * 4) % PAGE_SIZE;
+
+ if (ibc->kpage_idx[0] == pg_idx)
+ return ibc->kpage[0][pg_offset/4];
+ if (ibc->kpage_idx[1] == pg_idx)
+ return ibc->kpage[1][pg_offset/4];
+
+ new_page = radeon_cs_update_pages(p, pg_idx);
+ if (new_page < 0) {
+ p->parser_error = new_page;
+ return 0;
+ }
+
+ idx_value = ibc->kpage[new_page][pg_offset/4];
+ return idx_value;
+}
struct radeon_cs_packet {
unsigned idx;
@@ -1015,7 +795,8 @@ struct radeon_pm_clock_info {
struct radeon_power_state {
enum radeon_pm_state_type type;
- struct radeon_pm_clock_info *clock_info;
+ /* XXX: use a define for num clock modes */
+ struct radeon_pm_clock_info clock_info[8];
/* number of valid clock modes in this power state */
int num_clock_modes;
struct radeon_pm_clock_info *default_clock_mode;
@@ -1085,34 +866,22 @@ struct radeon_pm {
struct device *int_hwmon_dev;
};
-int radeon_pm_get_type_index(struct radeon_device *rdev,
- enum radeon_pm_state_type ps_type,
- int instance);
/*
* Benchmarking
*/
-void radeon_benchmark(struct radeon_device *rdev, int test_number);
+void radeon_benchmark(struct radeon_device *rdev);
/*
* Testing
*/
void radeon_test_moves(struct radeon_device *rdev);
-void radeon_test_ring_sync(struct radeon_device *rdev,
- struct radeon_ring *cpA,
- struct radeon_ring *cpB);
-void radeon_test_syncing(struct radeon_device *rdev);
/*
* Debugfs
*/
-struct radeon_debugfs {
- struct drm_info_list *files;
- unsigned num_files;
-};
-
int radeon_debugfs_add_files(struct radeon_device *rdev,
struct drm_info_list *files,
unsigned nfiles);
@@ -1128,8 +897,53 @@ struct radeon_asic {
int (*resume)(struct radeon_device *rdev);
int (*suspend)(struct radeon_device *rdev);
void (*vga_set_state)(struct radeon_device *rdev, bool state);
- bool (*gpu_is_lockup)(struct radeon_device *rdev, struct radeon_ring *cp);
+ bool (*gpu_is_lockup)(struct radeon_device *rdev);
int (*asic_reset)(struct radeon_device *rdev);
+ void (*gart_tlb_flush)(struct radeon_device *rdev);
+ int (*gart_set_page)(struct radeon_device *rdev, int i, uint64_t addr);
+ int (*cp_init)(struct radeon_device *rdev, unsigned ring_size);
+ void (*cp_fini)(struct radeon_device *rdev);
+ void (*cp_disable)(struct radeon_device *rdev);
+ void (*cp_commit)(struct radeon_device *rdev);
+ void (*ring_start)(struct radeon_device *rdev);
+ int (*ring_test)(struct radeon_device *rdev);
+ void (*ring_ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
+ int (*irq_set)(struct radeon_device *rdev);
+ int (*irq_process)(struct radeon_device *rdev);
+ u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc);
+ void (*fence_ring_emit)(struct radeon_device *rdev, struct radeon_fence *fence);
+ int (*cs_parse)(struct radeon_cs_parser *p);
+ int (*copy_blit)(struct radeon_device *rdev,
+ uint64_t src_offset,
+ uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence *fence);
+ int (*copy_dma)(struct radeon_device *rdev,
+ uint64_t src_offset,
+ uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence *fence);
+ int (*copy)(struct radeon_device *rdev,
+ uint64_t src_offset,
+ uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence *fence);
+ uint32_t (*get_engine_clock)(struct radeon_device *rdev);
+ void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock);
+ uint32_t (*get_memory_clock)(struct radeon_device *rdev);
+ void (*set_memory_clock)(struct radeon_device *rdev, uint32_t mem_clock);
+ int (*get_pcie_lanes)(struct radeon_device *rdev);
+ void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes);
+ void (*set_clock_gating)(struct radeon_device *rdev, int enable);
+ int (*set_surface_reg)(struct radeon_device *rdev, int reg,
+ uint32_t tiling_flags, uint32_t pitch,
+ uint32_t offset, uint32_t obj_size);
+ void (*clear_surface_reg)(struct radeon_device *rdev, int reg);
+ void (*bandwidth_update)(struct radeon_device *rdev);
+ void (*hpd_init)(struct radeon_device *rdev);
+ void (*hpd_fini)(struct radeon_device *rdev);
+ bool (*hpd_sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
+ void (*hpd_set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
/* ioctl hw specific callback. Some hw might want to perform special
* operation on specific ioctl. For instance on wait idle some hw
* might want to perform and HDP flush through MMIO as it seems that
@@ -1137,99 +951,17 @@ struct radeon_asic {
* through ring.
*/
void (*ioctl_wait_idle)(struct radeon_device *rdev, struct radeon_bo *bo);
- /* check if 3D engine is idle */
bool (*gui_idle)(struct radeon_device *rdev);
- /* wait for mc_idle */
- int (*mc_wait_for_idle)(struct radeon_device *rdev);
- /* gart */
- struct {
- void (*tlb_flush)(struct radeon_device *rdev);
- int (*set_page)(struct radeon_device *rdev, int i, uint64_t addr);
- } gart;
- /* ring specific callbacks */
- struct {
- void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
- int (*ib_parse)(struct radeon_device *rdev, struct radeon_ib *ib);
- void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence);
- void (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp,
- struct radeon_semaphore *semaphore, bool emit_wait);
- int (*cs_parse)(struct radeon_cs_parser *p);
- void (*ring_start)(struct radeon_device *rdev, struct radeon_ring *cp);
- int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp);
- int (*ib_test)(struct radeon_device *rdev, struct radeon_ring *cp);
- } ring[RADEON_NUM_RINGS];
- /* irqs */
- struct {
- int (*set)(struct radeon_device *rdev);
- int (*process)(struct radeon_device *rdev);
- } irq;
- /* displays */
- struct {
- /* display watermarks */
- void (*bandwidth_update)(struct radeon_device *rdev);
- /* get frame count */
- u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc);
- /* wait for vblank */
- void (*wait_for_vblank)(struct radeon_device *rdev, int crtc);
- } display;
- /* copy functions for bo handling */
- struct {
- int (*blit)(struct radeon_device *rdev,
- uint64_t src_offset,
- uint64_t dst_offset,
- unsigned num_gpu_pages,
- struct radeon_fence *fence);
- u32 blit_ring_index;
- int (*dma)(struct radeon_device *rdev,
- uint64_t src_offset,
- uint64_t dst_offset,
- unsigned num_gpu_pages,
- struct radeon_fence *fence);
- u32 dma_ring_index;
- /* method used for bo copy */
- int (*copy)(struct radeon_device *rdev,
- uint64_t src_offset,
- uint64_t dst_offset,
- unsigned num_gpu_pages,
- struct radeon_fence *fence);
- /* ring used for bo copies */
- u32 copy_ring_index;
- } copy;
- /* surfaces */
- struct {
- int (*set_reg)(struct radeon_device *rdev, int reg,
- uint32_t tiling_flags, uint32_t pitch,
- uint32_t offset, uint32_t obj_size);
- void (*clear_reg)(struct radeon_device *rdev, int reg);
- } surface;
- /* hotplug detect */
- struct {
- void (*init)(struct radeon_device *rdev);
- void (*fini)(struct radeon_device *rdev);
- bool (*sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
- void (*set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
- } hpd;
/* power management */
- struct {
- void (*misc)(struct radeon_device *rdev);
- void (*prepare)(struct radeon_device *rdev);
- void (*finish)(struct radeon_device *rdev);
- void (*init_profile)(struct radeon_device *rdev);
- void (*get_dynpm_state)(struct radeon_device *rdev);
- uint32_t (*get_engine_clock)(struct radeon_device *rdev);
- void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock);
- uint32_t (*get_memory_clock)(struct radeon_device *rdev);
- void (*set_memory_clock)(struct radeon_device *rdev, uint32_t mem_clock);
- int (*get_pcie_lanes)(struct radeon_device *rdev);
- void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes);
- void (*set_clock_gating)(struct radeon_device *rdev, int enable);
- } pm;
+ void (*pm_misc)(struct radeon_device *rdev);
+ void (*pm_prepare)(struct radeon_device *rdev);
+ void (*pm_finish)(struct radeon_device *rdev);
+ void (*pm_init_profile)(struct radeon_device *rdev);
+ void (*pm_get_dynpm_state)(struct radeon_device *rdev);
/* pageflipping */
- struct {
- void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
- u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
- void (*post_page_flip)(struct radeon_device *rdev, int crtc);
- } pflip;
+ void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
+ u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
+ void (*post_page_flip)(struct radeon_device *rdev, int crtc);
};
/*
@@ -1273,7 +1005,6 @@ struct r600_asic {
unsigned tiling_npipes;
unsigned tiling_group_size;
unsigned tile_config;
- unsigned backend_map;
struct r100_gpu_lockup lockup;
};
@@ -1299,7 +1030,6 @@ struct rv770_asic {
unsigned tiling_npipes;
unsigned tiling_group_size;
unsigned tile_config;
- unsigned backend_map;
struct r100_gpu_lockup lockup;
};
@@ -1326,7 +1056,6 @@ struct evergreen_asic {
unsigned tiling_npipes;
unsigned tiling_group_size;
unsigned tile_config;
- unsigned backend_map;
struct r100_gpu_lockup lockup;
};
@@ -1408,22 +1137,18 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
-int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
- struct drm_file *filp);
int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
-/* VRAM scratch page for HDP bug, default vram page */
-struct r600_vram_scratch {
+/* VRAM scratch page for HDP bug */
+struct r700_vram_scratch {
struct radeon_bo *robj;
volatile uint32_t *ptr;
- u64 gpu_addr;
};
-
/*
* Core structure, functions and helpers.
*/
@@ -1451,7 +1176,7 @@ struct radeon_device {
/* Register mmio */
resource_size_t rmmio_base;
resource_size_t rmmio_size;
- void __iomem *rmmio;
+ void *rmmio;
radeon_rreg_t mc_rreg;
radeon_wreg_t mc_wreg;
radeon_rreg_t pll_rreg;
@@ -1468,17 +1193,18 @@ struct radeon_device {
struct radeon_mode_info mode_info;
struct radeon_scratch scratch;
struct radeon_mman mman;
- rwlock_t fence_lock;
- struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS];
- struct radeon_semaphore_driver semaphore_drv;
- struct radeon_ring ring[RADEON_NUM_RINGS];
+ struct radeon_fence_driver fence_drv;
+ struct radeon_cp cp;
+ /* cayman compute rings */
+ struct radeon_cp cp1;
+ struct radeon_cp cp2;
struct radeon_ib_pool ib_pool;
struct radeon_irq irq;
struct radeon_asic *asic;
struct radeon_gem gem;
struct radeon_pm pm;
uint32_t bios_scratch[RADEON_BIOS_NUM_SCRATCH];
- struct radeon_mutex cs_mutex;
+ struct mutex cs_mutex;
struct radeon_wb wb;
struct radeon_dummy_page dummy_page;
bool gpu_lockup;
@@ -1492,7 +1218,7 @@ struct radeon_device {
const struct firmware *rlc_fw; /* r6/700 RLC firmware */
const struct firmware *mc_fw; /* NI MC firmware */
struct r600_blit r600_blit;
- struct r600_vram_scratch vram_scratch;
+ struct r700_vram_scratch vram_scratch;
int msi_enabled; /* msi enabled */
struct r600_ih ih; /* r6/700 interrupt ring */
struct work_struct hotplug_work;
@@ -1515,11 +1241,6 @@ struct radeon_device {
struct drm_file *cmask_filp;
/* i2c buses */
struct radeon_i2c_chan *i2c_bus[RADEON_MAX_I2C_BUS];
- /* debugfs */
- struct radeon_debugfs debugfs[RADEON_DEBUGFS_MAX_COMPONENTS];
- unsigned debugfs_count;
- /* virtual memory */
- struct radeon_vm_manager vm_manager;
};
int radeon_device_init(struct radeon_device *rdev,
@@ -1529,10 +1250,45 @@ int radeon_device_init(struct radeon_device *rdev,
void radeon_device_fini(struct radeon_device *rdev);
int radeon_gpu_wait_for_idle(struct radeon_device *rdev);
-uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg);
-void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
-u32 r100_io_rreg(struct radeon_device *rdev, u32 reg);
-void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
+static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg)
+{
+ if (reg < rdev->rmmio_size)
+ return readl(((void __iomem *)rdev->rmmio) + reg);
+ else {
+ writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
+ return readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
+ }
+}
+
+static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
+{
+ if (reg < rdev->rmmio_size)
+ writel(v, ((void __iomem *)rdev->rmmio) + reg);
+ else {
+ writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
+ writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
+ }
+}
+
+static inline u32 r100_io_rreg(struct radeon_device *rdev, u32 reg)
+{
+ if (reg < rdev->rio_mem_size)
+ return ioread32(rdev->rio_mem + reg);
+ else {
+ iowrite32(reg, rdev->rio_mem + RADEON_MM_INDEX);
+ return ioread32(rdev->rio_mem + RADEON_MM_DATA);
+ }
+}
+
+static inline void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v)
+{
+ if (reg < rdev->rio_mem_size)
+ iowrite32(v, rdev->rio_mem + reg);
+ else {
+ iowrite32(reg, rdev->rio_mem + RADEON_MM_INDEX);
+ iowrite32(v, rdev->rio_mem + RADEON_MM_DATA);
+ }
+}
/*
* Cast helper
@@ -1542,10 +1298,10 @@ void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
/*
* Registers read & write functions.
*/
-#define RREG8(reg) readb((rdev->rmmio) + (reg))
-#define WREG8(reg, v) writeb(v, (rdev->rmmio) + (reg))
-#define RREG16(reg) readw((rdev->rmmio) + (reg))
-#define WREG16(reg, v) writew(v, (rdev->rmmio) + (reg))
+#define RREG8(reg) readb(((void __iomem *)rdev->rmmio) + (reg))
+#define WREG8(reg, v) writeb(v, ((void __iomem *)rdev->rmmio) + (reg))
+#define RREG16(reg) readw(((void __iomem *)rdev->rmmio) + (reg))
+#define WREG16(reg, v) writew(v, ((void __iomem *)rdev->rmmio) + (reg))
#define RREG32(reg) r100_mm_rreg(rdev, (reg))
#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", r100_mm_rreg(rdev, (reg)))
#define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v))
@@ -1655,18 +1411,19 @@ void radeon_atombios_fini(struct radeon_device *rdev);
/*
* RING helpers.
*/
-#if DRM_DEBUG_CODE == 0
-static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
+static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
{
- ring->ring[ring->wptr++] = v;
- ring->wptr &= ring->ptr_mask;
- ring->count_dw--;
- ring->ring_free_dw--;
-}
-#else
-/* With debugging this is just too big to inline */
-void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
+#if DRM_DEBUG_CODE
+ if (rdev->cp.count_dw <= 0) {
+ DRM_ERROR("radeon: writting more dword to ring than expected !\n");
+ }
#endif
+ rdev->cp.ring[rdev->cp.wptr++] = v;
+ rdev->cp.wptr &= rdev->cp.ptr_mask;
+ rdev->cp.count_dw--;
+ rdev->cp.ring_free_dw--;
+}
+
/*
* ASICs macro.
@@ -1675,58 +1432,53 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
#define radeon_fini(rdev) (rdev)->asic->fini((rdev))
#define radeon_resume(rdev) (rdev)->asic->resume((rdev))
#define radeon_suspend(rdev) (rdev)->asic->suspend((rdev))
-#define radeon_cs_parse(rdev, r, p) (rdev)->asic->ring[(r)].cs_parse((p))
+#define radeon_cs_parse(p) rdev->asic->cs_parse((p))
#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
-#define radeon_gpu_is_lockup(rdev, cp) (rdev)->asic->gpu_is_lockup((rdev), (cp))
+#define radeon_gpu_is_lockup(rdev) (rdev)->asic->gpu_is_lockup((rdev))
#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
-#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev))
-#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart.set_page((rdev), (i), (p))
-#define radeon_ring_start(rdev, r, cp) (rdev)->asic->ring[(r)].ring_start((rdev), (cp))
-#define radeon_ring_test(rdev, r, cp) (rdev)->asic->ring[(r)].ring_test((rdev), (cp))
-#define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)].ib_test((rdev), (cp))
-#define radeon_ring_ib_execute(rdev, r, ib) (rdev)->asic->ring[(r)].ib_execute((rdev), (ib))
-#define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)].ib_parse((rdev), (ib))
-#define radeon_irq_set(rdev) (rdev)->asic->irq.set((rdev))
-#define radeon_irq_process(rdev) (rdev)->asic->irq.process((rdev))
-#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->display.get_vblank_counter((rdev), (crtc))
-#define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)].emit_fence((rdev), (fence))
-#define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)].emit_semaphore((rdev), (cp), (semaphore), (emit_wait))
-#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy.blit((rdev), (s), (d), (np), (f))
-#define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy.dma((rdev), (s), (d), (np), (f))
-#define radeon_copy(rdev, s, d, np, f) (rdev)->asic->copy.copy((rdev), (s), (d), (np), (f))
-#define radeon_copy_blit_ring_index(rdev) (rdev)->asic->copy.blit_ring_index
-#define radeon_copy_dma_ring_index(rdev) (rdev)->asic->copy.dma_ring_index
-#define radeon_copy_ring_index(rdev) (rdev)->asic->copy.copy_ring_index
-#define radeon_get_engine_clock(rdev) (rdev)->asic->pm.get_engine_clock((rdev))
-#define radeon_set_engine_clock(rdev, e) (rdev)->asic->pm.set_engine_clock((rdev), (e))
-#define radeon_get_memory_clock(rdev) (rdev)->asic->pm.get_memory_clock((rdev))
-#define radeon_set_memory_clock(rdev, e) (rdev)->asic->pm.set_memory_clock((rdev), (e))
-#define radeon_get_pcie_lanes(rdev) (rdev)->asic->pm.get_pcie_lanes((rdev))
-#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->pm.set_pcie_lanes((rdev), (l))
-#define radeon_set_clock_gating(rdev, e) (rdev)->asic->pm.set_clock_gating((rdev), (e))
-#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->surface.set_reg((rdev), (r), (f), (p), (o), (s)))
-#define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->surface.clear_reg((rdev), (r)))
-#define radeon_bandwidth_update(rdev) (rdev)->asic->display.bandwidth_update((rdev))
-#define radeon_hpd_init(rdev) (rdev)->asic->hpd.init((rdev))
-#define radeon_hpd_fini(rdev) (rdev)->asic->hpd.fini((rdev))
-#define radeon_hpd_sense(rdev, h) (rdev)->asic->hpd.sense((rdev), (h))
-#define radeon_hpd_set_polarity(rdev, h) (rdev)->asic->hpd.set_polarity((rdev), (h))
+#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart_tlb_flush((rdev))
+#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart_set_page((rdev), (i), (p))
+#define radeon_cp_commit(rdev) (rdev)->asic->cp_commit((rdev))
+#define radeon_ring_start(rdev) (rdev)->asic->ring_start((rdev))
+#define radeon_ring_test(rdev) (rdev)->asic->ring_test((rdev))
+#define radeon_ring_ib_execute(rdev, ib) (rdev)->asic->ring_ib_execute((rdev), (ib))
+#define radeon_irq_set(rdev) (rdev)->asic->irq_set((rdev))
+#define radeon_irq_process(rdev) (rdev)->asic->irq_process((rdev))
+#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->get_vblank_counter((rdev), (crtc))
+#define radeon_fence_ring_emit(rdev, fence) (rdev)->asic->fence_ring_emit((rdev), (fence))
+#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy_blit((rdev), (s), (d), (np), (f))
+#define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy_dma((rdev), (s), (d), (np), (f))
+#define radeon_copy(rdev, s, d, np, f) (rdev)->asic->copy((rdev), (s), (d), (np), (f))
+#define radeon_get_engine_clock(rdev) (rdev)->asic->get_engine_clock((rdev))
+#define radeon_set_engine_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e))
+#define radeon_get_memory_clock(rdev) (rdev)->asic->get_memory_clock((rdev))
+#define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_memory_clock((rdev), (e))
+#define radeon_get_pcie_lanes(rdev) (rdev)->asic->get_pcie_lanes((rdev))
+#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->set_pcie_lanes((rdev), (l))
+#define radeon_set_clock_gating(rdev, e) (rdev)->asic->set_clock_gating((rdev), (e))
+#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->set_surface_reg((rdev), (r), (f), (p), (o), (s)))
+#define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->clear_surface_reg((rdev), (r)))
+#define radeon_bandwidth_update(rdev) (rdev)->asic->bandwidth_update((rdev))
+#define radeon_hpd_init(rdev) (rdev)->asic->hpd_init((rdev))
+#define radeon_hpd_fini(rdev) (rdev)->asic->hpd_fini((rdev))
+#define radeon_hpd_sense(rdev, hpd) (rdev)->asic->hpd_sense((rdev), (hpd))
+#define radeon_hpd_set_polarity(rdev, hpd) (rdev)->asic->hpd_set_polarity((rdev), (hpd))
#define radeon_gui_idle(rdev) (rdev)->asic->gui_idle((rdev))
-#define radeon_pm_misc(rdev) (rdev)->asic->pm.misc((rdev))
-#define radeon_pm_prepare(rdev) (rdev)->asic->pm.prepare((rdev))
-#define radeon_pm_finish(rdev) (rdev)->asic->pm.finish((rdev))
-#define radeon_pm_init_profile(rdev) (rdev)->asic->pm.init_profile((rdev))
-#define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm.get_dynpm_state((rdev))
-#define radeon_pre_page_flip(rdev, crtc) rdev->asic->pflip.pre_page_flip((rdev), (crtc))
-#define radeon_page_flip(rdev, crtc, base) rdev->asic->pflip.page_flip((rdev), (crtc), (base))
-#define radeon_post_page_flip(rdev, crtc) rdev->asic->pflip.post_page_flip((rdev), (crtc))
-#define radeon_wait_for_vblank(rdev, crtc) rdev->asic->display.wait_for_vblank((rdev), (crtc))
-#define radeon_mc_wait_for_idle(rdev) rdev->asic->mc_wait_for_idle((rdev))
+#define radeon_pm_misc(rdev) (rdev)->asic->pm_misc((rdev))
+#define radeon_pm_prepare(rdev) (rdev)->asic->pm_prepare((rdev))
+#define radeon_pm_finish(rdev) (rdev)->asic->pm_finish((rdev))
+#define radeon_pm_init_profile(rdev) (rdev)->asic->pm_init_profile((rdev))
+#define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm_get_dynpm_state((rdev))
+#define radeon_pre_page_flip(rdev, crtc) rdev->asic->pre_page_flip((rdev), (crtc))
+#define radeon_page_flip(rdev, crtc, base) rdev->asic->page_flip((rdev), (crtc), (base))
+#define radeon_post_page_flip(rdev, crtc) rdev->asic->post_page_flip((rdev), (crtc))
/* Common functions */
/* AGP */
extern int radeon_gpu_reset(struct radeon_device *rdev);
extern void radeon_agp_disable(struct radeon_device *rdev);
+extern int radeon_gart_table_vram_pin(struct radeon_device *rdev);
+extern void radeon_gart_restore(struct radeon_device *rdev);
extern int radeon_modeset_init(struct radeon_device *rdev);
extern void radeon_modeset_fini(struct radeon_device *rdev);
extern bool radeon_card_posted(struct radeon_device *rdev);
@@ -1750,49 +1502,6 @@ extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size);
/*
- * vm
- */
-int radeon_vm_manager_init(struct radeon_device *rdev);
-void radeon_vm_manager_fini(struct radeon_device *rdev);
-int radeon_vm_manager_start(struct radeon_device *rdev);
-int radeon_vm_manager_suspend(struct radeon_device *rdev);
-int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm);
-void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm);
-int radeon_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm);
-void radeon_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm);
-int radeon_vm_bo_update_pte(struct radeon_device *rdev,
- struct radeon_vm *vm,
- struct radeon_bo *bo,
- struct ttm_mem_reg *mem);
-void radeon_vm_bo_invalidate(struct radeon_device *rdev,
- struct radeon_bo *bo);
-int radeon_vm_bo_add(struct radeon_device *rdev,
- struct radeon_vm *vm,
- struct radeon_bo *bo,
- uint64_t offset,
- uint32_t flags);
-int radeon_vm_bo_rmv(struct radeon_device *rdev,
- struct radeon_vm *vm,
- struct radeon_bo *bo);
-
-
-/*
- * R600 vram scratch functions
- */
-int r600_vram_scratch_init(struct radeon_device *rdev);
-void r600_vram_scratch_fini(struct radeon_device *rdev);
-
-/*
- * r600 cs checking helper
- */
-unsigned r600_mip_minify(unsigned size, unsigned level);
-bool r600_fmt_is_valid_color(u32 format);
-bool r600_fmt_is_valid_texture(u32 format, enum radeon_family family);
-int r600_fmt_get_blocksize(u32 format);
-int r600_fmt_get_nblocksx(u32 format, u32 w);
-int r600_fmt_get_nblocksy(u32 format, u32 h);
-
-/*
* r600 functions used by radeon_encoder.c
*/
extern void r600_hdmi_enable(struct drm_encoder *encoder);
diff --git a/drivers/gpu/drm/radeon/radeon_acpi.c b/drivers/gpu/drm/radeon/radeon_acpi.c
index 3516a60..3f6636b 100644
--- a/drivers/gpu/drm/radeon/radeon_acpi.c
+++ b/drivers/gpu/drm/radeon/radeon_acpi.c
@@ -35,8 +35,7 @@ static int radeon_atif_call(acpi_handle handle)
/* Fail only if calling the method fails and ATIF is supported */
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
- DRM_DEBUG_DRIVER("failed to evaluate ATIF got %s\n",
- acpi_format_exception(status));
+ printk(KERN_DEBUG "failed to evaluate ATIF got %s\n", acpi_format_exception(status));
kfree(buffer.pointer);
return 1;
}
@@ -51,13 +50,13 @@ int radeon_acpi_init(struct radeon_device *rdev)
acpi_handle handle;
int ret;
- /* Get the device handle */
- handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev);
-
/* No need to proceed if we're sure that ATIF is not supported */
- if (!ASIC_IS_AVIVO(rdev) || !rdev->bios || !handle)
+ if (!ASIC_IS_AVIVO(rdev) || !rdev->bios)
return 0;
+ /* Get the device handle */
+ handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev);
+
/* Call the ATIF method */
ret = radeon_atif_call(handle);
if (ret)
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 479c89e..b244962 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -114,13 +114,13 @@ void radeon_agp_disable(struct radeon_device *rdev)
rdev->family == CHIP_R423) {
DRM_INFO("Forcing AGP to PCIE mode\n");
rdev->flags |= RADEON_IS_PCIE;
- rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush;
- rdev->asic->gart.set_page = &rv370_pcie_gart_set_page;
+ rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
+ rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
} else {
DRM_INFO("Forcing AGP to PCI mode\n");
rdev->flags |= RADEON_IS_PCI;
- rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush;
- rdev->asic->gart.set_page = &r100_pci_gart_set_page;
+ rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush;
+ rdev->asic->gart_set_page = &r100_pci_gart_set_page;
}
rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
}
@@ -136,70 +136,44 @@ static struct radeon_asic r100_asic = {
.vga_set_state = &r100_vga_set_state,
.gpu_is_lockup = &r100_gpu_is_lockup,
.asic_reset = &r100_asic_reset,
+ .gart_tlb_flush = &r100_pci_gart_tlb_flush,
+ .gart_set_page = &r100_pci_gart_set_page,
+ .cp_commit = &r100_cp_commit,
+ .ring_start = &r100_ring_start,
+ .ring_test = &r100_ring_test,
+ .ring_ib_execute = &r100_ring_ib_execute,
+ .irq_set = &r100_irq_set,
+ .irq_process = &r100_irq_process,
+ .get_vblank_counter = &r100_get_vblank_counter,
+ .fence_ring_emit = &r100_fence_ring_emit,
+ .cs_parse = &r100_cs_parse,
+ .copy_blit = &r100_copy_blit,
+ .copy_dma = NULL,
+ .copy = &r100_copy_blit,
+ .get_engine_clock = &radeon_legacy_get_engine_clock,
+ .set_engine_clock = &radeon_legacy_set_engine_clock,
+ .get_memory_clock = &radeon_legacy_get_memory_clock,
+ .set_memory_clock = NULL,
+ .get_pcie_lanes = NULL,
+ .set_pcie_lanes = NULL,
+ .set_clock_gating = &radeon_legacy_set_clock_gating,
+ .set_surface_reg = r100_set_surface_reg,
+ .clear_surface_reg = r100_clear_surface_reg,
+ .bandwidth_update = &r100_bandwidth_update,
+ .hpd_init = &r100_hpd_init,
+ .hpd_fini = &r100_hpd_fini,
+ .hpd_sense = &r100_hpd_sense,
+ .hpd_set_polarity = &r100_hpd_set_polarity,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
- .mc_wait_for_idle = &r100_mc_wait_for_idle,
- .gart = {
- .tlb_flush = &r100_pci_gart_tlb_flush,
- .set_page = &r100_pci_gart_set_page,
- },
- .ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &r100_ring_ib_execute,
- .emit_fence = &r100_fence_ring_emit,
- .emit_semaphore = &r100_semaphore_ring_emit,
- .cs_parse = &r100_cs_parse,
- .ring_start = &r100_ring_start,
- .ring_test = &r100_ring_test,
- .ib_test = &r100_ib_test,
- }
- },
- .irq = {
- .set = &r100_irq_set,
- .process = &r100_irq_process,
- },
- .display = {
- .bandwidth_update = &r100_bandwidth_update,
- .get_vblank_counter = &r100_get_vblank_counter,
- .wait_for_vblank = &r100_wait_for_vblank,
- },
- .copy = {
- .blit = &r100_copy_blit,
- .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .dma = NULL,
- .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .copy = &r100_copy_blit,
- .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- },
- .surface = {
- .set_reg = r100_set_surface_reg,
- .clear_reg = r100_clear_surface_reg,
- },
- .hpd = {
- .init = &r100_hpd_init,
- .fini = &r100_hpd_fini,
- .sense = &r100_hpd_sense,
- .set_polarity = &r100_hpd_set_polarity,
- },
- .pm = {
- .misc = &r100_pm_misc,
- .prepare = &r100_pm_prepare,
- .finish = &r100_pm_finish,
- .init_profile = &r100_pm_init_profile,
- .get_dynpm_state = &r100_pm_get_dynpm_state,
- .get_engine_clock = &radeon_legacy_get_engine_clock,
- .set_engine_clock = &radeon_legacy_set_engine_clock,
- .get_memory_clock = &radeon_legacy_get_memory_clock,
- .set_memory_clock = NULL,
- .get_pcie_lanes = NULL,
- .set_pcie_lanes = NULL,
- .set_clock_gating = &radeon_legacy_set_clock_gating,
- },
- .pflip = {
- .pre_page_flip = &r100_pre_page_flip,
- .page_flip = &r100_page_flip,
- .post_page_flip = &r100_post_page_flip,
- },
+ .pm_misc = &r100_pm_misc,
+ .pm_prepare = &r100_pm_prepare,
+ .pm_finish = &r100_pm_finish,
+ .pm_init_profile = &r100_pm_init_profile,
+ .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+ .pre_page_flip = &r100_pre_page_flip,
+ .page_flip = &r100_page_flip,
+ .post_page_flip = &r100_post_page_flip,
};
static struct radeon_asic r200_asic = {
@@ -210,70 +184,43 @@ static struct radeon_asic r200_asic = {
.vga_set_state = &r100_vga_set_state,
.gpu_is_lockup = &r100_gpu_is_lockup,
.asic_reset = &r100_asic_reset,
+ .gart_tlb_flush = &r100_pci_gart_tlb_flush,
+ .gart_set_page = &r100_pci_gart_set_page,
+ .cp_commit = &r100_cp_commit,
+ .ring_start = &r100_ring_start,
+ .ring_test = &r100_ring_test,
+ .ring_ib_execute = &r100_ring_ib_execute,
+ .irq_set = &r100_irq_set,
+ .irq_process = &r100_irq_process,
+ .get_vblank_counter = &r100_get_vblank_counter,
+ .fence_ring_emit = &r100_fence_ring_emit,
+ .cs_parse = &r100_cs_parse,
+ .copy_blit = &r100_copy_blit,
+ .copy_dma = &r200_copy_dma,
+ .copy = &r100_copy_blit,
+ .get_engine_clock = &radeon_legacy_get_engine_clock,
+ .set_engine_clock = &radeon_legacy_set_engine_clock,
+ .get_memory_clock = &radeon_legacy_get_memory_clock,
+ .set_memory_clock = NULL,
+ .set_pcie_lanes = NULL,
+ .set_clock_gating = &radeon_legacy_set_clock_gating,
+ .set_surface_reg = r100_set_surface_reg,
+ .clear_surface_reg = r100_clear_surface_reg,
+ .bandwidth_update = &r100_bandwidth_update,
+ .hpd_init = &r100_hpd_init,
+ .hpd_fini = &r100_hpd_fini,
+ .hpd_sense = &r100_hpd_sense,
+ .hpd_set_polarity = &r100_hpd_set_polarity,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
- .mc_wait_for_idle = &r100_mc_wait_for_idle,
- .gart = {
- .tlb_flush = &r100_pci_gart_tlb_flush,
- .set_page = &r100_pci_gart_set_page,
- },
- .ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &r100_ring_ib_execute,
- .emit_fence = &r100_fence_ring_emit,
- .emit_semaphore = &r100_semaphore_ring_emit,
- .cs_parse = &r100_cs_parse,
- .ring_start = &r100_ring_start,
- .ring_test = &r100_ring_test,
- .ib_test = &r100_ib_test,
- }
- },
- .irq = {
- .set = &r100_irq_set,
- .process = &r100_irq_process,
- },
- .display = {
- .bandwidth_update = &r100_bandwidth_update,
- .get_vblank_counter = &r100_get_vblank_counter,
- .wait_for_vblank = &r100_wait_for_vblank,
- },
- .copy = {
- .blit = &r100_copy_blit,
- .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .dma = &r200_copy_dma,
- .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .copy = &r100_copy_blit,
- .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- },
- .surface = {
- .set_reg = r100_set_surface_reg,
- .clear_reg = r100_clear_surface_reg,
- },
- .hpd = {
- .init = &r100_hpd_init,
- .fini = &r100_hpd_fini,
- .sense = &r100_hpd_sense,
- .set_polarity = &r100_hpd_set_polarity,
- },
- .pm = {
- .misc = &r100_pm_misc,
- .prepare = &r100_pm_prepare,
- .finish = &r100_pm_finish,
- .init_profile = &r100_pm_init_profile,
- .get_dynpm_state = &r100_pm_get_dynpm_state,
- .get_engine_clock = &radeon_legacy_get_engine_clock,
- .set_engine_clock = &radeon_legacy_set_engine_clock,
- .get_memory_clock = &radeon_legacy_get_memory_clock,
- .set_memory_clock = NULL,
- .get_pcie_lanes = NULL,
- .set_pcie_lanes = NULL,
- .set_clock_gating = &radeon_legacy_set_clock_gating,
- },
- .pflip = {
- .pre_page_flip = &r100_pre_page_flip,
- .page_flip = &r100_page_flip,
- .post_page_flip = &r100_post_page_flip,
- },
+ .pm_misc = &r100_pm_misc,
+ .pm_prepare = &r100_pm_prepare,
+ .pm_finish = &r100_pm_finish,
+ .pm_init_profile = &r100_pm_init_profile,
+ .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+ .pre_page_flip = &r100_pre_page_flip,
+ .page_flip = &r100_page_flip,
+ .post_page_flip = &r100_post_page_flip,
};
static struct radeon_asic r300_asic = {
@@ -284,70 +231,44 @@ static struct radeon_asic r300_asic = {
.vga_set_state = &r100_vga_set_state,
.gpu_is_lockup = &r300_gpu_is_lockup,
.asic_reset = &r300_asic_reset,
+ .gart_tlb_flush = &r100_pci_gart_tlb_flush,
+ .gart_set_page = &r100_pci_gart_set_page,
+ .cp_commit = &r100_cp_commit,
+ .ring_start = &r300_ring_start,
+ .ring_test = &r100_ring_test,
+ .ring_ib_execute = &r100_ring_ib_execute,
+ .irq_set = &r100_irq_set,
+ .irq_process = &r100_irq_process,
+ .get_vblank_counter = &r100_get_vblank_counter,
+ .fence_ring_emit = &r300_fence_ring_emit,
+ .cs_parse = &r300_cs_parse,
+ .copy_blit = &r100_copy_blit,
+ .copy_dma = &r200_copy_dma,
+ .copy = &r100_copy_blit,
+ .get_engine_clock = &radeon_legacy_get_engine_clock,
+ .set_engine_clock = &radeon_legacy_set_engine_clock,
+ .get_memory_clock = &radeon_legacy_get_memory_clock,
+ .set_memory_clock = NULL,
+ .get_pcie_lanes = &rv370_get_pcie_lanes,
+ .set_pcie_lanes = &rv370_set_pcie_lanes,
+ .set_clock_gating = &radeon_legacy_set_clock_gating,
+ .set_surface_reg = r100_set_surface_reg,
+ .clear_surface_reg = r100_clear_surface_reg,
+ .bandwidth_update = &r100_bandwidth_update,
+ .hpd_init = &r100_hpd_init,
+ .hpd_fini = &r100_hpd_fini,
+ .hpd_sense = &r100_hpd_sense,
+ .hpd_set_polarity = &r100_hpd_set_polarity,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
- .mc_wait_for_idle = &r300_mc_wait_for_idle,
- .gart = {
- .tlb_flush = &r100_pci_gart_tlb_flush,
- .set_page = &r100_pci_gart_set_page,
- },
- .ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &r100_ring_ib_execute,
- .emit_fence = &r300_fence_ring_emit,
- .emit_semaphore = &r100_semaphore_ring_emit,
- .cs_parse = &r300_cs_parse,
- .ring_start = &r300_ring_start,
- .ring_test = &r100_ring_test,
- .ib_test = &r100_ib_test,
- }
- },
- .irq = {
- .set = &r100_irq_set,
- .process = &r100_irq_process,
- },
- .display = {
- .bandwidth_update = &r100_bandwidth_update,
- .get_vblank_counter = &r100_get_vblank_counter,
- .wait_for_vblank = &r100_wait_for_vblank,
- },
- .copy = {
- .blit = &r100_copy_blit,
- .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .dma = &r200_copy_dma,
- .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .copy = &r100_copy_blit,
- .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- },
- .surface = {
- .set_reg = r100_set_surface_reg,
- .clear_reg = r100_clear_surface_reg,
- },
- .hpd = {
- .init = &r100_hpd_init,
- .fini = &r100_hpd_fini,
- .sense = &r100_hpd_sense,
- .set_polarity = &r100_hpd_set_polarity,
- },
- .pm = {
- .misc = &r100_pm_misc,
- .prepare = &r100_pm_prepare,
- .finish = &r100_pm_finish,
- .init_profile = &r100_pm_init_profile,
- .get_dynpm_state = &r100_pm_get_dynpm_state,
- .get_engine_clock = &radeon_legacy_get_engine_clock,
- .set_engine_clock = &radeon_legacy_set_engine_clock,
- .get_memory_clock = &radeon_legacy_get_memory_clock,
- .set_memory_clock = NULL,
- .get_pcie_lanes = &rv370_get_pcie_lanes,
- .set_pcie_lanes = &rv370_set_pcie_lanes,
- .set_clock_gating = &radeon_legacy_set_clock_gating,
- },
- .pflip = {
- .pre_page_flip = &r100_pre_page_flip,
- .page_flip = &r100_page_flip,
- .post_page_flip = &r100_post_page_flip,
- },
+ .pm_misc = &r100_pm_misc,
+ .pm_prepare = &r100_pm_prepare,
+ .pm_finish = &r100_pm_finish,
+ .pm_init_profile = &r100_pm_init_profile,
+ .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+ .pre_page_flip = &r100_pre_page_flip,
+ .page_flip = &r100_page_flip,
+ .post_page_flip = &r100_post_page_flip,
};
static struct radeon_asic r300_asic_pcie = {
@@ -358,70 +279,43 @@ static struct radeon_asic r300_asic_pcie = {
.vga_set_state = &r100_vga_set_state,
.gpu_is_lockup = &r300_gpu_is_lockup,
.asic_reset = &r300_asic_reset,
+ .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
+ .gart_set_page = &rv370_pcie_gart_set_page,
+ .cp_commit = &r100_cp_commit,
+ .ring_start = &r300_ring_start,
+ .ring_test = &r100_ring_test,
+ .ring_ib_execute = &r100_ring_ib_execute,
+ .irq_set = &r100_irq_set,
+ .irq_process = &r100_irq_process,
+ .get_vblank_counter = &r100_get_vblank_counter,
+ .fence_ring_emit = &r300_fence_ring_emit,
+ .cs_parse = &r300_cs_parse,
+ .copy_blit = &r100_copy_blit,
+ .copy_dma = &r200_copy_dma,
+ .copy = &r100_copy_blit,
+ .get_engine_clock = &radeon_legacy_get_engine_clock,
+ .set_engine_clock = &radeon_legacy_set_engine_clock,
+ .get_memory_clock = &radeon_legacy_get_memory_clock,
+ .set_memory_clock = NULL,
+ .set_pcie_lanes = &rv370_set_pcie_lanes,
+ .set_clock_gating = &radeon_legacy_set_clock_gating,
+ .set_surface_reg = r100_set_surface_reg,
+ .clear_surface_reg = r100_clear_surface_reg,
+ .bandwidth_update = &r100_bandwidth_update,
+ .hpd_init = &r100_hpd_init,
+ .hpd_fini = &r100_hpd_fini,
+ .hpd_sense = &r100_hpd_sense,
+ .hpd_set_polarity = &r100_hpd_set_polarity,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
- .mc_wait_for_idle = &r300_mc_wait_for_idle,
- .gart = {
- .tlb_flush = &rv370_pcie_gart_tlb_flush,
- .set_page = &rv370_pcie_gart_set_page,
- },
- .ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &r100_ring_ib_execute,
- .emit_fence = &r300_fence_ring_emit,
- .emit_semaphore = &r100_semaphore_ring_emit,
- .cs_parse = &r300_cs_parse,
- .ring_start = &r300_ring_start,
- .ring_test = &r100_ring_test,
- .ib_test = &r100_ib_test,
- }
- },
- .irq = {
- .set = &r100_irq_set,
- .process = &r100_irq_process,
- },
- .display = {
- .bandwidth_update = &r100_bandwidth_update,
- .get_vblank_counter = &r100_get_vblank_counter,
- .wait_for_vblank = &r100_wait_for_vblank,
- },
- .copy = {
- .blit = &r100_copy_blit,
- .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .dma = &r200_copy_dma,
- .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .copy = &r100_copy_blit,
- .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- },
- .surface = {
- .set_reg = r100_set_surface_reg,
- .clear_reg = r100_clear_surface_reg,
- },
- .hpd = {
- .init = &r100_hpd_init,
- .fini = &r100_hpd_fini,
- .sense = &r100_hpd_sense,
- .set_polarity = &r100_hpd_set_polarity,
- },
- .pm = {
- .misc = &r100_pm_misc,
- .prepare = &r100_pm_prepare,
- .finish = &r100_pm_finish,
- .init_profile = &r100_pm_init_profile,
- .get_dynpm_state = &r100_pm_get_dynpm_state,
- .get_engine_clock = &radeon_legacy_get_engine_clock,
- .set_engine_clock = &radeon_legacy_set_engine_clock,
- .get_memory_clock = &radeon_legacy_get_memory_clock,
- .set_memory_clock = NULL,
- .get_pcie_lanes = &rv370_get_pcie_lanes,
- .set_pcie_lanes = &rv370_set_pcie_lanes,
- .set_clock_gating = &radeon_legacy_set_clock_gating,
- },
- .pflip = {
- .pre_page_flip = &r100_pre_page_flip,
- .page_flip = &r100_page_flip,
- .post_page_flip = &r100_post_page_flip,
- },
+ .pm_misc = &r100_pm_misc,
+ .pm_prepare = &r100_pm_prepare,
+ .pm_finish = &r100_pm_finish,
+ .pm_init_profile = &r100_pm_init_profile,
+ .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+ .pre_page_flip = &r100_pre_page_flip,
+ .page_flip = &r100_page_flip,
+ .post_page_flip = &r100_post_page_flip,
};
static struct radeon_asic r420_asic = {
@@ -432,70 +326,44 @@ static struct radeon_asic r420_asic = {
.vga_set_state = &r100_vga_set_state,
.gpu_is_lockup = &r300_gpu_is_lockup,
.asic_reset = &r300_asic_reset,
+ .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
+ .gart_set_page = &rv370_pcie_gart_set_page,
+ .cp_commit = &r100_cp_commit,
+ .ring_start = &r300_ring_start,
+ .ring_test = &r100_ring_test,
+ .ring_ib_execute = &r100_ring_ib_execute,
+ .irq_set = &r100_irq_set,
+ .irq_process = &r100_irq_process,
+ .get_vblank_counter = &r100_get_vblank_counter,
+ .fence_ring_emit = &r300_fence_ring_emit,
+ .cs_parse = &r300_cs_parse,
+ .copy_blit = &r100_copy_blit,
+ .copy_dma = &r200_copy_dma,
+ .copy = &r100_copy_blit,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = &radeon_atom_get_memory_clock,
+ .set_memory_clock = &radeon_atom_set_memory_clock,
+ .get_pcie_lanes = &rv370_get_pcie_lanes,
+ .set_pcie_lanes = &rv370_set_pcie_lanes,
+ .set_clock_gating = &radeon_atom_set_clock_gating,
+ .set_surface_reg = r100_set_surface_reg,
+ .clear_surface_reg = r100_clear_surface_reg,
+ .bandwidth_update = &r100_bandwidth_update,
+ .hpd_init = &r100_hpd_init,
+ .hpd_fini = &r100_hpd_fini,
+ .hpd_sense = &r100_hpd_sense,
+ .hpd_set_polarity = &r100_hpd_set_polarity,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
- .mc_wait_for_idle = &r300_mc_wait_for_idle,
- .gart = {
- .tlb_flush = &rv370_pcie_gart_tlb_flush,
- .set_page = &rv370_pcie_gart_set_page,
- },
- .ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &r100_ring_ib_execute,
- .emit_fence = &r300_fence_ring_emit,
- .emit_semaphore = &r100_semaphore_ring_emit,
- .cs_parse = &r300_cs_parse,
- .ring_start = &r300_ring_start,
- .ring_test = &r100_ring_test,
- .ib_test = &r100_ib_test,
- }
- },
- .irq = {
- .set = &r100_irq_set,
- .process = &r100_irq_process,
- },
- .display = {
- .bandwidth_update = &r100_bandwidth_update,
- .get_vblank_counter = &r100_get_vblank_counter,
- .wait_for_vblank = &r100_wait_for_vblank,
- },
- .copy = {
- .blit = &r100_copy_blit,
- .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .dma = &r200_copy_dma,
- .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .copy = &r100_copy_blit,
- .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- },
- .surface = {
- .set_reg = r100_set_surface_reg,
- .clear_reg = r100_clear_surface_reg,
- },
- .hpd = {
- .init = &r100_hpd_init,
- .fini = &r100_hpd_fini,
- .sense = &r100_hpd_sense,
- .set_polarity = &r100_hpd_set_polarity,
- },
- .pm = {
- .misc = &r100_pm_misc,
- .prepare = &r100_pm_prepare,
- .finish = &r100_pm_finish,
- .init_profile = &r420_pm_init_profile,
- .get_dynpm_state = &r100_pm_get_dynpm_state,
- .get_engine_clock = &radeon_atom_get_engine_clock,
- .set_engine_clock = &radeon_atom_set_engine_clock,
- .get_memory_clock = &radeon_atom_get_memory_clock,
- .set_memory_clock = &radeon_atom_set_memory_clock,
- .get_pcie_lanes = &rv370_get_pcie_lanes,
- .set_pcie_lanes = &rv370_set_pcie_lanes,
- .set_clock_gating = &radeon_atom_set_clock_gating,
- },
- .pflip = {
- .pre_page_flip = &r100_pre_page_flip,
- .page_flip = &r100_page_flip,
- .post_page_flip = &r100_post_page_flip,
- },
+ .pm_misc = &r100_pm_misc,
+ .pm_prepare = &r100_pm_prepare,
+ .pm_finish = &r100_pm_finish,
+ .pm_init_profile = &r420_pm_init_profile,
+ .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+ .pre_page_flip = &r100_pre_page_flip,
+ .page_flip = &r100_page_flip,
+ .post_page_flip = &r100_post_page_flip,
};
static struct radeon_asic rs400_asic = {
@@ -506,70 +374,44 @@ static struct radeon_asic rs400_asic = {
.vga_set_state = &r100_vga_set_state,
.gpu_is_lockup = &r300_gpu_is_lockup,
.asic_reset = &r300_asic_reset,
+ .gart_tlb_flush = &rs400_gart_tlb_flush,
+ .gart_set_page = &rs400_gart_set_page,
+ .cp_commit = &r100_cp_commit,
+ .ring_start = &r300_ring_start,
+ .ring_test = &r100_ring_test,
+ .ring_ib_execute = &r100_ring_ib_execute,
+ .irq_set = &r100_irq_set,
+ .irq_process = &r100_irq_process,
+ .get_vblank_counter = &r100_get_vblank_counter,
+ .fence_ring_emit = &r300_fence_ring_emit,
+ .cs_parse = &r300_cs_parse,
+ .copy_blit = &r100_copy_blit,
+ .copy_dma = &r200_copy_dma,
+ .copy = &r100_copy_blit,
+ .get_engine_clock = &radeon_legacy_get_engine_clock,
+ .set_engine_clock = &radeon_legacy_set_engine_clock,
+ .get_memory_clock = &radeon_legacy_get_memory_clock,
+ .set_memory_clock = NULL,
+ .get_pcie_lanes = NULL,
+ .set_pcie_lanes = NULL,
+ .set_clock_gating = &radeon_legacy_set_clock_gating,
+ .set_surface_reg = r100_set_surface_reg,
+ .clear_surface_reg = r100_clear_surface_reg,
+ .bandwidth_update = &r100_bandwidth_update,
+ .hpd_init = &r100_hpd_init,
+ .hpd_fini = &r100_hpd_fini,
+ .hpd_sense = &r100_hpd_sense,
+ .hpd_set_polarity = &r100_hpd_set_polarity,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
- .mc_wait_for_idle = &rs400_mc_wait_for_idle,
- .gart = {
- .tlb_flush = &rs400_gart_tlb_flush,
- .set_page = &rs400_gart_set_page,
- },
- .ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &r100_ring_ib_execute,
- .emit_fence = &r300_fence_ring_emit,
- .emit_semaphore = &r100_semaphore_ring_emit,
- .cs_parse = &r300_cs_parse,
- .ring_start = &r300_ring_start,
- .ring_test = &r100_ring_test,
- .ib_test = &r100_ib_test,
- }
- },
- .irq = {
- .set = &r100_irq_set,
- .process = &r100_irq_process,
- },
- .display = {
- .bandwidth_update = &r100_bandwidth_update,
- .get_vblank_counter = &r100_get_vblank_counter,
- .wait_for_vblank = &r100_wait_for_vblank,
- },
- .copy = {
- .blit = &r100_copy_blit,
- .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .dma = &r200_copy_dma,
- .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .copy = &r100_copy_blit,
- .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- },
- .surface = {
- .set_reg = r100_set_surface_reg,
- .clear_reg = r100_clear_surface_reg,
- },
- .hpd = {
- .init = &r100_hpd_init,
- .fini = &r100_hpd_fini,
- .sense = &r100_hpd_sense,
- .set_polarity = &r100_hpd_set_polarity,
- },
- .pm = {
- .misc = &r100_pm_misc,
- .prepare = &r100_pm_prepare,
- .finish = &r100_pm_finish,
- .init_profile = &r100_pm_init_profile,
- .get_dynpm_state = &r100_pm_get_dynpm_state,
- .get_engine_clock = &radeon_legacy_get_engine_clock,
- .set_engine_clock = &radeon_legacy_set_engine_clock,
- .get_memory_clock = &radeon_legacy_get_memory_clock,
- .set_memory_clock = NULL,
- .get_pcie_lanes = NULL,
- .set_pcie_lanes = NULL,
- .set_clock_gating = &radeon_legacy_set_clock_gating,
- },
- .pflip = {
- .pre_page_flip = &r100_pre_page_flip,
- .page_flip = &r100_page_flip,
- .post_page_flip = &r100_post_page_flip,
- },
+ .pm_misc = &r100_pm_misc,
+ .pm_prepare = &r100_pm_prepare,
+ .pm_finish = &r100_pm_finish,
+ .pm_init_profile = &r100_pm_init_profile,
+ .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+ .pre_page_flip = &r100_pre_page_flip,
+ .page_flip = &r100_page_flip,
+ .post_page_flip = &r100_post_page_flip,
};
static struct radeon_asic rs600_asic = {
@@ -580,70 +422,44 @@ static struct radeon_asic rs600_asic = {
.vga_set_state = &r100_vga_set_state,
.gpu_is_lockup = &r300_gpu_is_lockup,
.asic_reset = &rs600_asic_reset,
+ .gart_tlb_flush = &rs600_gart_tlb_flush,
+ .gart_set_page = &rs600_gart_set_page,
+ .cp_commit = &r100_cp_commit,
+ .ring_start = &r300_ring_start,
+ .ring_test = &r100_ring_test,
+ .ring_ib_execute = &r100_ring_ib_execute,
+ .irq_set = &rs600_irq_set,
+ .irq_process = &rs600_irq_process,
+ .get_vblank_counter = &rs600_get_vblank_counter,
+ .fence_ring_emit = &r300_fence_ring_emit,
+ .cs_parse = &r300_cs_parse,
+ .copy_blit = &r100_copy_blit,
+ .copy_dma = &r200_copy_dma,
+ .copy = &r100_copy_blit,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = &radeon_atom_get_memory_clock,
+ .set_memory_clock = &radeon_atom_set_memory_clock,
+ .get_pcie_lanes = NULL,
+ .set_pcie_lanes = NULL,
+ .set_clock_gating = &radeon_atom_set_clock_gating,
+ .set_surface_reg = r100_set_surface_reg,
+ .clear_surface_reg = r100_clear_surface_reg,
+ .bandwidth_update = &rs600_bandwidth_update,
+ .hpd_init = &rs600_hpd_init,
+ .hpd_fini = &rs600_hpd_fini,
+ .hpd_sense = &rs600_hpd_sense,
+ .hpd_set_polarity = &rs600_hpd_set_polarity,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
- .mc_wait_for_idle = &rs600_mc_wait_for_idle,
- .gart = {
- .tlb_flush = &rs600_gart_tlb_flush,
- .set_page = &rs600_gart_set_page,
- },
- .ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &r100_ring_ib_execute,
- .emit_fence = &r300_fence_ring_emit,
- .emit_semaphore = &r100_semaphore_ring_emit,
- .cs_parse = &r300_cs_parse,
- .ring_start = &r300_ring_start,
- .ring_test = &r100_ring_test,
- .ib_test = &r100_ib_test,
- }
- },
- .irq = {
- .set = &rs600_irq_set,
- .process = &rs600_irq_process,
- },
- .display = {
- .bandwidth_update = &rs600_bandwidth_update,
- .get_vblank_counter = &rs600_get_vblank_counter,
- .wait_for_vblank = &avivo_wait_for_vblank,
- },
- .copy = {
- .blit = &r100_copy_blit,
- .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .dma = &r200_copy_dma,
- .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .copy = &r100_copy_blit,
- .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- },
- .surface = {
- .set_reg = r100_set_surface_reg,
- .clear_reg = r100_clear_surface_reg,
- },
- .hpd = {
- .init = &rs600_hpd_init,
- .fini = &rs600_hpd_fini,
- .sense = &rs600_hpd_sense,
- .set_polarity = &rs600_hpd_set_polarity,
- },
- .pm = {
- .misc = &rs600_pm_misc,
- .prepare = &rs600_pm_prepare,
- .finish = &rs600_pm_finish,
- .init_profile = &r420_pm_init_profile,
- .get_dynpm_state = &r100_pm_get_dynpm_state,
- .get_engine_clock = &radeon_atom_get_engine_clock,
- .set_engine_clock = &radeon_atom_set_engine_clock,
- .get_memory_clock = &radeon_atom_get_memory_clock,
- .set_memory_clock = &radeon_atom_set_memory_clock,
- .get_pcie_lanes = NULL,
- .set_pcie_lanes = NULL,
- .set_clock_gating = &radeon_atom_set_clock_gating,
- },
- .pflip = {
- .pre_page_flip = &rs600_pre_page_flip,
- .page_flip = &rs600_page_flip,
- .post_page_flip = &rs600_post_page_flip,
- },
+ .pm_misc = &rs600_pm_misc,
+ .pm_prepare = &rs600_pm_prepare,
+ .pm_finish = &rs600_pm_finish,
+ .pm_init_profile = &r420_pm_init_profile,
+ .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+ .pre_page_flip = &rs600_pre_page_flip,
+ .page_flip = &rs600_page_flip,
+ .post_page_flip = &rs600_post_page_flip,
};
static struct radeon_asic rs690_asic = {
@@ -654,70 +470,44 @@ static struct radeon_asic rs690_asic = {
.vga_set_state = &r100_vga_set_state,
.gpu_is_lockup = &r300_gpu_is_lockup,
.asic_reset = &rs600_asic_reset,
+ .gart_tlb_flush = &rs400_gart_tlb_flush,
+ .gart_set_page = &rs400_gart_set_page,
+ .cp_commit = &r100_cp_commit,
+ .ring_start = &r300_ring_start,
+ .ring_test = &r100_ring_test,
+ .ring_ib_execute = &r100_ring_ib_execute,
+ .irq_set = &rs600_irq_set,
+ .irq_process = &rs600_irq_process,
+ .get_vblank_counter = &rs600_get_vblank_counter,
+ .fence_ring_emit = &r300_fence_ring_emit,
+ .cs_parse = &r300_cs_parse,
+ .copy_blit = &r100_copy_blit,
+ .copy_dma = &r200_copy_dma,
+ .copy = &r200_copy_dma,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = &radeon_atom_get_memory_clock,
+ .set_memory_clock = &radeon_atom_set_memory_clock,
+ .get_pcie_lanes = NULL,
+ .set_pcie_lanes = NULL,
+ .set_clock_gating = &radeon_atom_set_clock_gating,
+ .set_surface_reg = r100_set_surface_reg,
+ .clear_surface_reg = r100_clear_surface_reg,
+ .bandwidth_update = &rs690_bandwidth_update,
+ .hpd_init = &rs600_hpd_init,
+ .hpd_fini = &rs600_hpd_fini,
+ .hpd_sense = &rs600_hpd_sense,
+ .hpd_set_polarity = &rs600_hpd_set_polarity,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
- .mc_wait_for_idle = &rs690_mc_wait_for_idle,
- .gart = {
- .tlb_flush = &rs400_gart_tlb_flush,
- .set_page = &rs400_gart_set_page,
- },
- .ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &r100_ring_ib_execute,
- .emit_fence = &r300_fence_ring_emit,
- .emit_semaphore = &r100_semaphore_ring_emit,
- .cs_parse = &r300_cs_parse,
- .ring_start = &r300_ring_start,
- .ring_test = &r100_ring_test,
- .ib_test = &r100_ib_test,
- }
- },
- .irq = {
- .set = &rs600_irq_set,
- .process = &rs600_irq_process,
- },
- .display = {
- .get_vblank_counter = &rs600_get_vblank_counter,
- .bandwidth_update = &rs690_bandwidth_update,
- .wait_for_vblank = &avivo_wait_for_vblank,
- },
- .copy = {
- .blit = &r100_copy_blit,
- .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .dma = &r200_copy_dma,
- .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .copy = &r200_copy_dma,
- .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- },
- .surface = {
- .set_reg = r100_set_surface_reg,
- .clear_reg = r100_clear_surface_reg,
- },
- .hpd = {
- .init = &rs600_hpd_init,
- .fini = &rs600_hpd_fini,
- .sense = &rs600_hpd_sense,
- .set_polarity = &rs600_hpd_set_polarity,
- },
- .pm = {
- .misc = &rs600_pm_misc,
- .prepare = &rs600_pm_prepare,
- .finish = &rs600_pm_finish,
- .init_profile = &r420_pm_init_profile,
- .get_dynpm_state = &r100_pm_get_dynpm_state,
- .get_engine_clock = &radeon_atom_get_engine_clock,
- .set_engine_clock = &radeon_atom_set_engine_clock,
- .get_memory_clock = &radeon_atom_get_memory_clock,
- .set_memory_clock = &radeon_atom_set_memory_clock,
- .get_pcie_lanes = NULL,
- .set_pcie_lanes = NULL,
- .set_clock_gating = &radeon_atom_set_clock_gating,
- },
- .pflip = {
- .pre_page_flip = &rs600_pre_page_flip,
- .page_flip = &rs600_page_flip,
- .post_page_flip = &rs600_post_page_flip,
- },
+ .pm_misc = &rs600_pm_misc,
+ .pm_prepare = &rs600_pm_prepare,
+ .pm_finish = &rs600_pm_finish,
+ .pm_init_profile = &r420_pm_init_profile,
+ .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+ .pre_page_flip = &rs600_pre_page_flip,
+ .page_flip = &rs600_page_flip,
+ .post_page_flip = &rs600_post_page_flip,
};
static struct radeon_asic rv515_asic = {
@@ -728,70 +518,44 @@ static struct radeon_asic rv515_asic = {
.vga_set_state = &r100_vga_set_state,
.gpu_is_lockup = &r300_gpu_is_lockup,
.asic_reset = &rs600_asic_reset,
+ .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
+ .gart_set_page = &rv370_pcie_gart_set_page,
+ .cp_commit = &r100_cp_commit,
+ .ring_start = &rv515_ring_start,
+ .ring_test = &r100_ring_test,
+ .ring_ib_execute = &r100_ring_ib_execute,
+ .irq_set = &rs600_irq_set,
+ .irq_process = &rs600_irq_process,
+ .get_vblank_counter = &rs600_get_vblank_counter,
+ .fence_ring_emit = &r300_fence_ring_emit,
+ .cs_parse = &r300_cs_parse,
+ .copy_blit = &r100_copy_blit,
+ .copy_dma = &r200_copy_dma,
+ .copy = &r100_copy_blit,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = &radeon_atom_get_memory_clock,
+ .set_memory_clock = &radeon_atom_set_memory_clock,
+ .get_pcie_lanes = &rv370_get_pcie_lanes,
+ .set_pcie_lanes = &rv370_set_pcie_lanes,
+ .set_clock_gating = &radeon_atom_set_clock_gating,
+ .set_surface_reg = r100_set_surface_reg,
+ .clear_surface_reg = r100_clear_surface_reg,
+ .bandwidth_update = &rv515_bandwidth_update,
+ .hpd_init = &rs600_hpd_init,
+ .hpd_fini = &rs600_hpd_fini,
+ .hpd_sense = &rs600_hpd_sense,
+ .hpd_set_polarity = &rs600_hpd_set_polarity,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
- .mc_wait_for_idle = &rv515_mc_wait_for_idle,
- .gart = {
- .tlb_flush = &rv370_pcie_gart_tlb_flush,
- .set_page = &rv370_pcie_gart_set_page,
- },
- .ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &r100_ring_ib_execute,
- .emit_fence = &r300_fence_ring_emit,
- .emit_semaphore = &r100_semaphore_ring_emit,
- .cs_parse = &r300_cs_parse,
- .ring_start = &rv515_ring_start,
- .ring_test = &r100_ring_test,
- .ib_test = &r100_ib_test,
- }
- },
- .irq = {
- .set = &rs600_irq_set,
- .process = &rs600_irq_process,
- },
- .display = {
- .get_vblank_counter = &rs600_get_vblank_counter,
- .bandwidth_update = &rv515_bandwidth_update,
- .wait_for_vblank = &avivo_wait_for_vblank,
- },
- .copy = {
- .blit = &r100_copy_blit,
- .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .dma = &r200_copy_dma,
- .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .copy = &r100_copy_blit,
- .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- },
- .surface = {
- .set_reg = r100_set_surface_reg,
- .clear_reg = r100_clear_surface_reg,
- },
- .hpd = {
- .init = &rs600_hpd_init,
- .fini = &rs600_hpd_fini,
- .sense = &rs600_hpd_sense,
- .set_polarity = &rs600_hpd_set_polarity,
- },
- .pm = {
- .misc = &rs600_pm_misc,
- .prepare = &rs600_pm_prepare,
- .finish = &rs600_pm_finish,
- .init_profile = &r420_pm_init_profile,
- .get_dynpm_state = &r100_pm_get_dynpm_state,
- .get_engine_clock = &radeon_atom_get_engine_clock,
- .set_engine_clock = &radeon_atom_set_engine_clock,
- .get_memory_clock = &radeon_atom_get_memory_clock,
- .set_memory_clock = &radeon_atom_set_memory_clock,
- .get_pcie_lanes = &rv370_get_pcie_lanes,
- .set_pcie_lanes = &rv370_set_pcie_lanes,
- .set_clock_gating = &radeon_atom_set_clock_gating,
- },
- .pflip = {
- .pre_page_flip = &rs600_pre_page_flip,
- .page_flip = &rs600_page_flip,
- .post_page_flip = &rs600_post_page_flip,
- },
+ .pm_misc = &rs600_pm_misc,
+ .pm_prepare = &rs600_pm_prepare,
+ .pm_finish = &rs600_pm_finish,
+ .pm_init_profile = &r420_pm_init_profile,
+ .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+ .pre_page_flip = &rs600_pre_page_flip,
+ .page_flip = &rs600_page_flip,
+ .post_page_flip = &rs600_post_page_flip,
};
static struct radeon_asic r520_asic = {
@@ -802,70 +566,44 @@ static struct radeon_asic r520_asic = {
.vga_set_state = &r100_vga_set_state,
.gpu_is_lockup = &r300_gpu_is_lockup,
.asic_reset = &rs600_asic_reset,
+ .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
+ .gart_set_page = &rv370_pcie_gart_set_page,
+ .cp_commit = &r100_cp_commit,
+ .ring_start = &rv515_ring_start,
+ .ring_test = &r100_ring_test,
+ .ring_ib_execute = &r100_ring_ib_execute,
+ .irq_set = &rs600_irq_set,
+ .irq_process = &rs600_irq_process,
+ .get_vblank_counter = &rs600_get_vblank_counter,
+ .fence_ring_emit = &r300_fence_ring_emit,
+ .cs_parse = &r300_cs_parse,
+ .copy_blit = &r100_copy_blit,
+ .copy_dma = &r200_copy_dma,
+ .copy = &r100_copy_blit,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = &radeon_atom_get_memory_clock,
+ .set_memory_clock = &radeon_atom_set_memory_clock,
+ .get_pcie_lanes = &rv370_get_pcie_lanes,
+ .set_pcie_lanes = &rv370_set_pcie_lanes,
+ .set_clock_gating = &radeon_atom_set_clock_gating,
+ .set_surface_reg = r100_set_surface_reg,
+ .clear_surface_reg = r100_clear_surface_reg,
+ .bandwidth_update = &rv515_bandwidth_update,
+ .hpd_init = &rs600_hpd_init,
+ .hpd_fini = &rs600_hpd_fini,
+ .hpd_sense = &rs600_hpd_sense,
+ .hpd_set_polarity = &rs600_hpd_set_polarity,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
- .mc_wait_for_idle = &r520_mc_wait_for_idle,
- .gart = {
- .tlb_flush = &rv370_pcie_gart_tlb_flush,
- .set_page = &rv370_pcie_gart_set_page,
- },
- .ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &r100_ring_ib_execute,
- .emit_fence = &r300_fence_ring_emit,
- .emit_semaphore = &r100_semaphore_ring_emit,
- .cs_parse = &r300_cs_parse,
- .ring_start = &rv515_ring_start,
- .ring_test = &r100_ring_test,
- .ib_test = &r100_ib_test,
- }
- },
- .irq = {
- .set = &rs600_irq_set,
- .process = &rs600_irq_process,
- },
- .display = {
- .bandwidth_update = &rv515_bandwidth_update,
- .get_vblank_counter = &rs600_get_vblank_counter,
- .wait_for_vblank = &avivo_wait_for_vblank,
- },
- .copy = {
- .blit = &r100_copy_blit,
- .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .dma = &r200_copy_dma,
- .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .copy = &r100_copy_blit,
- .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- },
- .surface = {
- .set_reg = r100_set_surface_reg,
- .clear_reg = r100_clear_surface_reg,
- },
- .hpd = {
- .init = &rs600_hpd_init,
- .fini = &rs600_hpd_fini,
- .sense = &rs600_hpd_sense,
- .set_polarity = &rs600_hpd_set_polarity,
- },
- .pm = {
- .misc = &rs600_pm_misc,
- .prepare = &rs600_pm_prepare,
- .finish = &rs600_pm_finish,
- .init_profile = &r420_pm_init_profile,
- .get_dynpm_state = &r100_pm_get_dynpm_state,
- .get_engine_clock = &radeon_atom_get_engine_clock,
- .set_engine_clock = &radeon_atom_set_engine_clock,
- .get_memory_clock = &radeon_atom_get_memory_clock,
- .set_memory_clock = &radeon_atom_set_memory_clock,
- .get_pcie_lanes = &rv370_get_pcie_lanes,
- .set_pcie_lanes = &rv370_set_pcie_lanes,
- .set_clock_gating = &radeon_atom_set_clock_gating,
- },
- .pflip = {
- .pre_page_flip = &rs600_pre_page_flip,
- .page_flip = &rs600_page_flip,
- .post_page_flip = &rs600_post_page_flip,
- },
+ .pm_misc = &rs600_pm_misc,
+ .pm_prepare = &rs600_pm_prepare,
+ .pm_finish = &rs600_pm_finish,
+ .pm_init_profile = &r420_pm_init_profile,
+ .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+ .pre_page_flip = &rs600_pre_page_flip,
+ .page_flip = &rs600_page_flip,
+ .post_page_flip = &rs600_post_page_flip,
};
static struct radeon_asic r600_asic = {
@@ -873,72 +611,46 @@ static struct radeon_asic r600_asic = {
.fini = &r600_fini,
.suspend = &r600_suspend,
.resume = &r600_resume,
+ .cp_commit = &r600_cp_commit,
.vga_set_state = &r600_vga_set_state,
.gpu_is_lockup = &r600_gpu_is_lockup,
.asic_reset = &r600_asic_reset,
+ .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
+ .gart_set_page = &rs600_gart_set_page,
+ .ring_test = &r600_ring_test,
+ .ring_ib_execute = &r600_ring_ib_execute,
+ .irq_set = &r600_irq_set,
+ .irq_process = &r600_irq_process,
+ .get_vblank_counter = &rs600_get_vblank_counter,
+ .fence_ring_emit = &r600_fence_ring_emit,
+ .cs_parse = &r600_cs_parse,
+ .copy_blit = &r600_copy_blit,
+ .copy_dma = &r600_copy_blit,
+ .copy = &r600_copy_blit,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = &radeon_atom_get_memory_clock,
+ .set_memory_clock = &radeon_atom_set_memory_clock,
+ .get_pcie_lanes = &r600_get_pcie_lanes,
+ .set_pcie_lanes = &r600_set_pcie_lanes,
+ .set_clock_gating = NULL,
+ .set_surface_reg = r600_set_surface_reg,
+ .clear_surface_reg = r600_clear_surface_reg,
+ .bandwidth_update = &rv515_bandwidth_update,
+ .hpd_init = &r600_hpd_init,
+ .hpd_fini = &r600_hpd_fini,
+ .hpd_sense = &r600_hpd_sense,
+ .hpd_set_polarity = &r600_hpd_set_polarity,
.ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
- .mc_wait_for_idle = &r600_mc_wait_for_idle,
- .gart = {
- .tlb_flush = &r600_pcie_gart_tlb_flush,
- .set_page = &rs600_gart_set_page,
- },
- .ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &r600_ring_ib_execute,
- .emit_fence = &r600_fence_ring_emit,
- .emit_semaphore = &r600_semaphore_ring_emit,
- .cs_parse = &r600_cs_parse,
- .ring_test = &r600_ring_test,
- .ib_test = &r600_ib_test,
- }
- },
- .irq = {
- .set = &r600_irq_set,
- .process = &r600_irq_process,
- },
- .display = {
- .bandwidth_update = &rv515_bandwidth_update,
- .get_vblank_counter = &rs600_get_vblank_counter,
- .wait_for_vblank = &avivo_wait_for_vblank,
- },
- .copy = {
- .blit = &r600_copy_blit,
- .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .dma = NULL,
- .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .copy = &r600_copy_blit,
- .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- },
- .surface = {
- .set_reg = r600_set_surface_reg,
- .clear_reg = r600_clear_surface_reg,
- },
- .hpd = {
- .init = &r600_hpd_init,
- .fini = &r600_hpd_fini,
- .sense = &r600_hpd_sense,
- .set_polarity = &r600_hpd_set_polarity,
- },
- .pm = {
- .misc = &r600_pm_misc,
- .prepare = &rs600_pm_prepare,
- .finish = &rs600_pm_finish,
- .init_profile = &r600_pm_init_profile,
- .get_dynpm_state = &r600_pm_get_dynpm_state,
- .get_engine_clock = &radeon_atom_get_engine_clock,
- .set_engine_clock = &radeon_atom_set_engine_clock,
- .get_memory_clock = &radeon_atom_get_memory_clock,
- .set_memory_clock = &radeon_atom_set_memory_clock,
- .get_pcie_lanes = &r600_get_pcie_lanes,
- .set_pcie_lanes = &r600_set_pcie_lanes,
- .set_clock_gating = NULL,
- },
- .pflip = {
- .pre_page_flip = &rs600_pre_page_flip,
- .page_flip = &rs600_page_flip,
- .post_page_flip = &rs600_post_page_flip,
- },
+ .pm_misc = &r600_pm_misc,
+ .pm_prepare = &rs600_pm_prepare,
+ .pm_finish = &rs600_pm_finish,
+ .pm_init_profile = &r600_pm_init_profile,
+ .pm_get_dynpm_state = &r600_pm_get_dynpm_state,
+ .pre_page_flip = &rs600_pre_page_flip,
+ .page_flip = &rs600_page_flip,
+ .post_page_flip = &rs600_post_page_flip,
};
static struct radeon_asic rs780_asic = {
@@ -946,72 +658,46 @@ static struct radeon_asic rs780_asic = {
.fini = &r600_fini,
.suspend = &r600_suspend,
.resume = &r600_resume,
+ .cp_commit = &r600_cp_commit,
.gpu_is_lockup = &r600_gpu_is_lockup,
.vga_set_state = &r600_vga_set_state,
.asic_reset = &r600_asic_reset,
+ .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
+ .gart_set_page = &rs600_gart_set_page,
+ .ring_test = &r600_ring_test,
+ .ring_ib_execute = &r600_ring_ib_execute,
+ .irq_set = &r600_irq_set,
+ .irq_process = &r600_irq_process,
+ .get_vblank_counter = &rs600_get_vblank_counter,
+ .fence_ring_emit = &r600_fence_ring_emit,
+ .cs_parse = &r600_cs_parse,
+ .copy_blit = &r600_copy_blit,
+ .copy_dma = &r600_copy_blit,
+ .copy = &r600_copy_blit,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = NULL,
+ .set_memory_clock = NULL,
+ .get_pcie_lanes = NULL,
+ .set_pcie_lanes = NULL,
+ .set_clock_gating = NULL,
+ .set_surface_reg = r600_set_surface_reg,
+ .clear_surface_reg = r600_clear_surface_reg,
+ .bandwidth_update = &rs690_bandwidth_update,
+ .hpd_init = &r600_hpd_init,
+ .hpd_fini = &r600_hpd_fini,
+ .hpd_sense = &r600_hpd_sense,
+ .hpd_set_polarity = &r600_hpd_set_polarity,
.ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
- .mc_wait_for_idle = &r600_mc_wait_for_idle,
- .gart = {
- .tlb_flush = &r600_pcie_gart_tlb_flush,
- .set_page = &rs600_gart_set_page,
- },
- .ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &r600_ring_ib_execute,
- .emit_fence = &r600_fence_ring_emit,
- .emit_semaphore = &r600_semaphore_ring_emit,
- .cs_parse = &r600_cs_parse,
- .ring_test = &r600_ring_test,
- .ib_test = &r600_ib_test,
- }
- },
- .irq = {
- .set = &r600_irq_set,
- .process = &r600_irq_process,
- },
- .display = {
- .bandwidth_update = &rs690_bandwidth_update,
- .get_vblank_counter = &rs600_get_vblank_counter,
- .wait_for_vblank = &avivo_wait_for_vblank,
- },
- .copy = {
- .blit = &r600_copy_blit,
- .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .dma = NULL,
- .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .copy = &r600_copy_blit,
- .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- },
- .surface = {
- .set_reg = r600_set_surface_reg,
- .clear_reg = r600_clear_surface_reg,
- },
- .hpd = {
- .init = &r600_hpd_init,
- .fini = &r600_hpd_fini,
- .sense = &r600_hpd_sense,
- .set_polarity = &r600_hpd_set_polarity,
- },
- .pm = {
- .misc = &r600_pm_misc,
- .prepare = &rs600_pm_prepare,
- .finish = &rs600_pm_finish,
- .init_profile = &rs780_pm_init_profile,
- .get_dynpm_state = &r600_pm_get_dynpm_state,
- .get_engine_clock = &radeon_atom_get_engine_clock,
- .set_engine_clock = &radeon_atom_set_engine_clock,
- .get_memory_clock = NULL,
- .set_memory_clock = NULL,
- .get_pcie_lanes = NULL,
- .set_pcie_lanes = NULL,
- .set_clock_gating = NULL,
- },
- .pflip = {
- .pre_page_flip = &rs600_pre_page_flip,
- .page_flip = &rs600_page_flip,
- .post_page_flip = &rs600_post_page_flip,
- },
+ .pm_misc = &r600_pm_misc,
+ .pm_prepare = &rs600_pm_prepare,
+ .pm_finish = &rs600_pm_finish,
+ .pm_init_profile = &rs780_pm_init_profile,
+ .pm_get_dynpm_state = &r600_pm_get_dynpm_state,
+ .pre_page_flip = &rs600_pre_page_flip,
+ .page_flip = &rs600_page_flip,
+ .post_page_flip = &rs600_post_page_flip,
};
static struct radeon_asic rv770_asic = {
@@ -1019,72 +705,46 @@ static struct radeon_asic rv770_asic = {
.fini = &rv770_fini,
.suspend = &rv770_suspend,
.resume = &rv770_resume,
+ .cp_commit = &r600_cp_commit,
.asic_reset = &r600_asic_reset,
.gpu_is_lockup = &r600_gpu_is_lockup,
.vga_set_state = &r600_vga_set_state,
+ .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
+ .gart_set_page = &rs600_gart_set_page,
+ .ring_test = &r600_ring_test,
+ .ring_ib_execute = &r600_ring_ib_execute,
+ .irq_set = &r600_irq_set,
+ .irq_process = &r600_irq_process,
+ .get_vblank_counter = &rs600_get_vblank_counter,
+ .fence_ring_emit = &r600_fence_ring_emit,
+ .cs_parse = &r600_cs_parse,
+ .copy_blit = &r600_copy_blit,
+ .copy_dma = &r600_copy_blit,
+ .copy = &r600_copy_blit,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = &radeon_atom_get_memory_clock,
+ .set_memory_clock = &radeon_atom_set_memory_clock,
+ .get_pcie_lanes = &r600_get_pcie_lanes,
+ .set_pcie_lanes = &r600_set_pcie_lanes,
+ .set_clock_gating = &radeon_atom_set_clock_gating,
+ .set_surface_reg = r600_set_surface_reg,
+ .clear_surface_reg = r600_clear_surface_reg,
+ .bandwidth_update = &rv515_bandwidth_update,
+ .hpd_init = &r600_hpd_init,
+ .hpd_fini = &r600_hpd_fini,
+ .hpd_sense = &r600_hpd_sense,
+ .hpd_set_polarity = &r600_hpd_set_polarity,
.ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
- .mc_wait_for_idle = &r600_mc_wait_for_idle,
- .gart = {
- .tlb_flush = &r600_pcie_gart_tlb_flush,
- .set_page = &rs600_gart_set_page,
- },
- .ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &r600_ring_ib_execute,
- .emit_fence = &r600_fence_ring_emit,
- .emit_semaphore = &r600_semaphore_ring_emit,
- .cs_parse = &r600_cs_parse,
- .ring_test = &r600_ring_test,
- .ib_test = &r600_ib_test,
- }
- },
- .irq = {
- .set = &r600_irq_set,
- .process = &r600_irq_process,
- },
- .display = {
- .bandwidth_update = &rv515_bandwidth_update,
- .get_vblank_counter = &rs600_get_vblank_counter,
- .wait_for_vblank = &avivo_wait_for_vblank,
- },
- .copy = {
- .blit = &r600_copy_blit,
- .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .dma = NULL,
- .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .copy = &r600_copy_blit,
- .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- },
- .surface = {
- .set_reg = r600_set_surface_reg,
- .clear_reg = r600_clear_surface_reg,
- },
- .hpd = {
- .init = &r600_hpd_init,
- .fini = &r600_hpd_fini,
- .sense = &r600_hpd_sense,
- .set_polarity = &r600_hpd_set_polarity,
- },
- .pm = {
- .misc = &rv770_pm_misc,
- .prepare = &rs600_pm_prepare,
- .finish = &rs600_pm_finish,
- .init_profile = &r600_pm_init_profile,
- .get_dynpm_state = &r600_pm_get_dynpm_state,
- .get_engine_clock = &radeon_atom_get_engine_clock,
- .set_engine_clock = &radeon_atom_set_engine_clock,
- .get_memory_clock = &radeon_atom_get_memory_clock,
- .set_memory_clock = &radeon_atom_set_memory_clock,
- .get_pcie_lanes = &r600_get_pcie_lanes,
- .set_pcie_lanes = &r600_set_pcie_lanes,
- .set_clock_gating = &radeon_atom_set_clock_gating,
- },
- .pflip = {
- .pre_page_flip = &rs600_pre_page_flip,
- .page_flip = &rv770_page_flip,
- .post_page_flip = &rs600_post_page_flip,
- },
+ .pm_misc = &rv770_pm_misc,
+ .pm_prepare = &rs600_pm_prepare,
+ .pm_finish = &rs600_pm_finish,
+ .pm_init_profile = &r600_pm_init_profile,
+ .pm_get_dynpm_state = &r600_pm_get_dynpm_state,
+ .pre_page_flip = &rs600_pre_page_flip,
+ .page_flip = &rv770_page_flip,
+ .post_page_flip = &rs600_post_page_flip,
};
static struct radeon_asic evergreen_asic = {
@@ -1092,72 +752,46 @@ static struct radeon_asic evergreen_asic = {
.fini = &evergreen_fini,
.suspend = &evergreen_suspend,
.resume = &evergreen_resume,
+ .cp_commit = &r600_cp_commit,
.gpu_is_lockup = &evergreen_gpu_is_lockup,
.asic_reset = &evergreen_asic_reset,
.vga_set_state = &r600_vga_set_state,
+ .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
+ .gart_set_page = &rs600_gart_set_page,
+ .ring_test = &r600_ring_test,
+ .ring_ib_execute = &evergreen_ring_ib_execute,
+ .irq_set = &evergreen_irq_set,
+ .irq_process = &evergreen_irq_process,
+ .get_vblank_counter = &evergreen_get_vblank_counter,
+ .fence_ring_emit = &r600_fence_ring_emit,
+ .cs_parse = &evergreen_cs_parse,
+ .copy_blit = &evergreen_copy_blit,
+ .copy_dma = &evergreen_copy_blit,
+ .copy = &evergreen_copy_blit,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = &radeon_atom_get_memory_clock,
+ .set_memory_clock = &radeon_atom_set_memory_clock,
+ .get_pcie_lanes = &r600_get_pcie_lanes,
+ .set_pcie_lanes = &r600_set_pcie_lanes,
+ .set_clock_gating = NULL,
+ .set_surface_reg = r600_set_surface_reg,
+ .clear_surface_reg = r600_clear_surface_reg,
+ .bandwidth_update = &evergreen_bandwidth_update,
+ .hpd_init = &evergreen_hpd_init,
+ .hpd_fini = &evergreen_hpd_fini,
+ .hpd_sense = &evergreen_hpd_sense,
+ .hpd_set_polarity = &evergreen_hpd_set_polarity,
.ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
- .mc_wait_for_idle = &evergreen_mc_wait_for_idle,
- .gart = {
- .tlb_flush = &evergreen_pcie_gart_tlb_flush,
- .set_page = &rs600_gart_set_page,
- },
- .ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &evergreen_ring_ib_execute,
- .emit_fence = &r600_fence_ring_emit,
- .emit_semaphore = &r600_semaphore_ring_emit,
- .cs_parse = &evergreen_cs_parse,
- .ring_test = &r600_ring_test,
- .ib_test = &r600_ib_test,
- }
- },
- .irq = {
- .set = &evergreen_irq_set,
- .process = &evergreen_irq_process,
- },
- .display = {
- .bandwidth_update = &evergreen_bandwidth_update,
- .get_vblank_counter = &evergreen_get_vblank_counter,
- .wait_for_vblank = &dce4_wait_for_vblank,
- },
- .copy = {
- .blit = &r600_copy_blit,
- .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .dma = NULL,
- .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .copy = &r600_copy_blit,
- .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- },
- .surface = {
- .set_reg = r600_set_surface_reg,
- .clear_reg = r600_clear_surface_reg,
- },
- .hpd = {
- .init = &evergreen_hpd_init,
- .fini = &evergreen_hpd_fini,
- .sense = &evergreen_hpd_sense,
- .set_polarity = &evergreen_hpd_set_polarity,
- },
- .pm = {
- .misc = &evergreen_pm_misc,
- .prepare = &evergreen_pm_prepare,
- .finish = &evergreen_pm_finish,
- .init_profile = &r600_pm_init_profile,
- .get_dynpm_state = &r600_pm_get_dynpm_state,
- .get_engine_clock = &radeon_atom_get_engine_clock,
- .set_engine_clock = &radeon_atom_set_engine_clock,
- .get_memory_clock = &radeon_atom_get_memory_clock,
- .set_memory_clock = &radeon_atom_set_memory_clock,
- .get_pcie_lanes = &r600_get_pcie_lanes,
- .set_pcie_lanes = &r600_set_pcie_lanes,
- .set_clock_gating = NULL,
- },
- .pflip = {
- .pre_page_flip = &evergreen_pre_page_flip,
- .page_flip = &evergreen_page_flip,
- .post_page_flip = &evergreen_post_page_flip,
- },
+ .pm_misc = &evergreen_pm_misc,
+ .pm_prepare = &evergreen_pm_prepare,
+ .pm_finish = &evergreen_pm_finish,
+ .pm_init_profile = &r600_pm_init_profile,
+ .pm_get_dynpm_state = &r600_pm_get_dynpm_state,
+ .pre_page_flip = &evergreen_pre_page_flip,
+ .page_flip = &evergreen_page_flip,
+ .post_page_flip = &evergreen_post_page_flip,
};
static struct radeon_asic sumo_asic = {
@@ -1165,72 +799,46 @@ static struct radeon_asic sumo_asic = {
.fini = &evergreen_fini,
.suspend = &evergreen_suspend,
.resume = &evergreen_resume,
+ .cp_commit = &r600_cp_commit,
.gpu_is_lockup = &evergreen_gpu_is_lockup,
.asic_reset = &evergreen_asic_reset,
.vga_set_state = &r600_vga_set_state,
+ .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
+ .gart_set_page = &rs600_gart_set_page,
+ .ring_test = &r600_ring_test,
+ .ring_ib_execute = &evergreen_ring_ib_execute,
+ .irq_set = &evergreen_irq_set,
+ .irq_process = &evergreen_irq_process,
+ .get_vblank_counter = &evergreen_get_vblank_counter,
+ .fence_ring_emit = &r600_fence_ring_emit,
+ .cs_parse = &evergreen_cs_parse,
+ .copy_blit = &evergreen_copy_blit,
+ .copy_dma = &evergreen_copy_blit,
+ .copy = &evergreen_copy_blit,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = NULL,
+ .set_memory_clock = NULL,
+ .get_pcie_lanes = NULL,
+ .set_pcie_lanes = NULL,
+ .set_clock_gating = NULL,
+ .set_surface_reg = r600_set_surface_reg,
+ .clear_surface_reg = r600_clear_surface_reg,
+ .bandwidth_update = &evergreen_bandwidth_update,
+ .hpd_init = &evergreen_hpd_init,
+ .hpd_fini = &evergreen_hpd_fini,
+ .hpd_sense = &evergreen_hpd_sense,
+ .hpd_set_polarity = &evergreen_hpd_set_polarity,
.ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
- .mc_wait_for_idle = &evergreen_mc_wait_for_idle,
- .gart = {
- .tlb_flush = &evergreen_pcie_gart_tlb_flush,
- .set_page = &rs600_gart_set_page,
- },
- .ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &evergreen_ring_ib_execute,
- .emit_fence = &r600_fence_ring_emit,
- .emit_semaphore = &r600_semaphore_ring_emit,
- .cs_parse = &evergreen_cs_parse,
- .ring_test = &r600_ring_test,
- .ib_test = &r600_ib_test,
- },
- },
- .irq = {
- .set = &evergreen_irq_set,
- .process = &evergreen_irq_process,
- },
- .display = {
- .bandwidth_update = &evergreen_bandwidth_update,
- .get_vblank_counter = &evergreen_get_vblank_counter,
- .wait_for_vblank = &dce4_wait_for_vblank,
- },
- .copy = {
- .blit = &r600_copy_blit,
- .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .dma = NULL,
- .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .copy = &r600_copy_blit,
- .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- },
- .surface = {
- .set_reg = r600_set_surface_reg,
- .clear_reg = r600_clear_surface_reg,
- },
- .hpd = {
- .init = &evergreen_hpd_init,
- .fini = &evergreen_hpd_fini,
- .sense = &evergreen_hpd_sense,
- .set_polarity = &evergreen_hpd_set_polarity,
- },
- .pm = {
- .misc = &evergreen_pm_misc,
- .prepare = &evergreen_pm_prepare,
- .finish = &evergreen_pm_finish,
- .init_profile = &sumo_pm_init_profile,
- .get_dynpm_state = &r600_pm_get_dynpm_state,
- .get_engine_clock = &radeon_atom_get_engine_clock,
- .set_engine_clock = &radeon_atom_set_engine_clock,
- .get_memory_clock = NULL,
- .set_memory_clock = NULL,
- .get_pcie_lanes = NULL,
- .set_pcie_lanes = NULL,
- .set_clock_gating = NULL,
- },
- .pflip = {
- .pre_page_flip = &evergreen_pre_page_flip,
- .page_flip = &evergreen_page_flip,
- .post_page_flip = &evergreen_post_page_flip,
- },
+ .pm_misc = &evergreen_pm_misc,
+ .pm_prepare = &evergreen_pm_prepare,
+ .pm_finish = &evergreen_pm_finish,
+ .pm_init_profile = &rs780_pm_init_profile,
+ .pm_get_dynpm_state = &r600_pm_get_dynpm_state,
+ .pre_page_flip = &evergreen_pre_page_flip,
+ .page_flip = &evergreen_page_flip,
+ .post_page_flip = &evergreen_post_page_flip,
};
static struct radeon_asic btc_asic = {
@@ -1238,82 +846,46 @@ static struct radeon_asic btc_asic = {
.fini = &evergreen_fini,
.suspend = &evergreen_suspend,
.resume = &evergreen_resume,
+ .cp_commit = &r600_cp_commit,
.gpu_is_lockup = &evergreen_gpu_is_lockup,
.asic_reset = &evergreen_asic_reset,
.vga_set_state = &r600_vga_set_state,
+ .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
+ .gart_set_page = &rs600_gart_set_page,
+ .ring_test = &r600_ring_test,
+ .ring_ib_execute = &evergreen_ring_ib_execute,
+ .irq_set = &evergreen_irq_set,
+ .irq_process = &evergreen_irq_process,
+ .get_vblank_counter = &evergreen_get_vblank_counter,
+ .fence_ring_emit = &r600_fence_ring_emit,
+ .cs_parse = &evergreen_cs_parse,
+ .copy_blit = &evergreen_copy_blit,
+ .copy_dma = &evergreen_copy_blit,
+ .copy = &evergreen_copy_blit,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = &radeon_atom_get_memory_clock,
+ .set_memory_clock = &radeon_atom_set_memory_clock,
+ .get_pcie_lanes = NULL,
+ .set_pcie_lanes = NULL,
+ .set_clock_gating = NULL,
+ .set_surface_reg = r600_set_surface_reg,
+ .clear_surface_reg = r600_clear_surface_reg,
+ .bandwidth_update = &evergreen_bandwidth_update,
+ .hpd_init = &evergreen_hpd_init,
+ .hpd_fini = &evergreen_hpd_fini,
+ .hpd_sense = &evergreen_hpd_sense,
+ .hpd_set_polarity = &evergreen_hpd_set_polarity,
.ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
- .mc_wait_for_idle = &evergreen_mc_wait_for_idle,
- .gart = {
- .tlb_flush = &evergreen_pcie_gart_tlb_flush,
- .set_page = &rs600_gart_set_page,
- },
- .ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &evergreen_ring_ib_execute,
- .emit_fence = &r600_fence_ring_emit,
- .emit_semaphore = &r600_semaphore_ring_emit,
- .cs_parse = &evergreen_cs_parse,
- .ring_test = &r600_ring_test,
- .ib_test = &r600_ib_test,
- }
- },
- .irq = {
- .set = &evergreen_irq_set,
- .process = &evergreen_irq_process,
- },
- .display = {
- .bandwidth_update = &evergreen_bandwidth_update,
- .get_vblank_counter = &evergreen_get_vblank_counter,
- .wait_for_vblank = &dce4_wait_for_vblank,
- },
- .copy = {
- .blit = &r600_copy_blit,
- .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .dma = NULL,
- .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .copy = &r600_copy_blit,
- .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- },
- .surface = {
- .set_reg = r600_set_surface_reg,
- .clear_reg = r600_clear_surface_reg,
- },
- .hpd = {
- .init = &evergreen_hpd_init,
- .fini = &evergreen_hpd_fini,
- .sense = &evergreen_hpd_sense,
- .set_polarity = &evergreen_hpd_set_polarity,
- },
- .pm = {
- .misc = &evergreen_pm_misc,
- .prepare = &evergreen_pm_prepare,
- .finish = &evergreen_pm_finish,
- .init_profile = &r600_pm_init_profile,
- .get_dynpm_state = &r600_pm_get_dynpm_state,
- .get_engine_clock = &radeon_atom_get_engine_clock,
- .set_engine_clock = &radeon_atom_set_engine_clock,
- .get_memory_clock = &radeon_atom_get_memory_clock,
- .set_memory_clock = &radeon_atom_set_memory_clock,
- .get_pcie_lanes = NULL,
- .set_pcie_lanes = NULL,
- .set_clock_gating = NULL,
- },
- .pflip = {
- .pre_page_flip = &evergreen_pre_page_flip,
- .page_flip = &evergreen_page_flip,
- .post_page_flip = &evergreen_post_page_flip,
- },
-};
-
-static const struct radeon_vm_funcs cayman_vm_funcs = {
- .init = &cayman_vm_init,
- .fini = &cayman_vm_fini,
- .bind = &cayman_vm_bind,
- .unbind = &cayman_vm_unbind,
- .tlb_flush = &cayman_vm_tlb_flush,
- .page_flags = &cayman_vm_page_flags,
- .set_page = &cayman_vm_set_page,
+ .pm_misc = &evergreen_pm_misc,
+ .pm_prepare = &evergreen_pm_prepare,
+ .pm_finish = &evergreen_pm_finish,
+ .pm_init_profile = &r600_pm_init_profile,
+ .pm_get_dynpm_state = &r600_pm_get_dynpm_state,
+ .pre_page_flip = &evergreen_pre_page_flip,
+ .page_flip = &evergreen_page_flip,
+ .post_page_flip = &evergreen_post_page_flip,
};
static struct radeon_asic cayman_asic = {
@@ -1321,91 +893,46 @@ static struct radeon_asic cayman_asic = {
.fini = &cayman_fini,
.suspend = &cayman_suspend,
.resume = &cayman_resume,
+ .cp_commit = &r600_cp_commit,
.gpu_is_lockup = &cayman_gpu_is_lockup,
.asic_reset = &cayman_asic_reset,
.vga_set_state = &r600_vga_set_state,
+ .gart_tlb_flush = &cayman_pcie_gart_tlb_flush,
+ .gart_set_page = &rs600_gart_set_page,
+ .ring_test = &r600_ring_test,
+ .ring_ib_execute = &evergreen_ring_ib_execute,
+ .irq_set = &evergreen_irq_set,
+ .irq_process = &evergreen_irq_process,
+ .get_vblank_counter = &evergreen_get_vblank_counter,
+ .fence_ring_emit = &r600_fence_ring_emit,
+ .cs_parse = &evergreen_cs_parse,
+ .copy_blit = &evergreen_copy_blit,
+ .copy_dma = &evergreen_copy_blit,
+ .copy = &evergreen_copy_blit,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = &radeon_atom_get_memory_clock,
+ .set_memory_clock = &radeon_atom_set_memory_clock,
+ .get_pcie_lanes = NULL,
+ .set_pcie_lanes = NULL,
+ .set_clock_gating = NULL,
+ .set_surface_reg = r600_set_surface_reg,
+ .clear_surface_reg = r600_clear_surface_reg,
+ .bandwidth_update = &evergreen_bandwidth_update,
+ .hpd_init = &evergreen_hpd_init,
+ .hpd_fini = &evergreen_hpd_fini,
+ .hpd_sense = &evergreen_hpd_sense,
+ .hpd_set_polarity = &evergreen_hpd_set_polarity,
.ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
- .mc_wait_for_idle = &evergreen_mc_wait_for_idle,
- .gart = {
- .tlb_flush = &cayman_pcie_gart_tlb_flush,
- .set_page = &rs600_gart_set_page,
- },
- .ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &cayman_ring_ib_execute,
- .ib_parse = &evergreen_ib_parse,
- .emit_fence = &cayman_fence_ring_emit,
- .emit_semaphore = &r600_semaphore_ring_emit,
- .cs_parse = &evergreen_cs_parse,
- .ring_test = &r600_ring_test,
- .ib_test = &r600_ib_test,
- },
- [CAYMAN_RING_TYPE_CP1_INDEX] = {
- .ib_execute = &cayman_ring_ib_execute,
- .ib_parse = &evergreen_ib_parse,
- .emit_fence = &cayman_fence_ring_emit,
- .emit_semaphore = &r600_semaphore_ring_emit,
- .cs_parse = &evergreen_cs_parse,
- .ring_test = &r600_ring_test,
- .ib_test = &r600_ib_test,
- },
- [CAYMAN_RING_TYPE_CP2_INDEX] = {
- .ib_execute = &cayman_ring_ib_execute,
- .ib_parse = &evergreen_ib_parse,
- .emit_fence = &cayman_fence_ring_emit,
- .emit_semaphore = &r600_semaphore_ring_emit,
- .cs_parse = &evergreen_cs_parse,
- .ring_test = &r600_ring_test,
- .ib_test = &r600_ib_test,
- }
- },
- .irq = {
- .set = &evergreen_irq_set,
- .process = &evergreen_irq_process,
- },
- .display = {
- .bandwidth_update = &evergreen_bandwidth_update,
- .get_vblank_counter = &evergreen_get_vblank_counter,
- .wait_for_vblank = &dce4_wait_for_vblank,
- },
- .copy = {
- .blit = &r600_copy_blit,
- .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .dma = NULL,
- .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .copy = &r600_copy_blit,
- .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- },
- .surface = {
- .set_reg = r600_set_surface_reg,
- .clear_reg = r600_clear_surface_reg,
- },
- .hpd = {
- .init = &evergreen_hpd_init,
- .fini = &evergreen_hpd_fini,
- .sense = &evergreen_hpd_sense,
- .set_polarity = &evergreen_hpd_set_polarity,
- },
- .pm = {
- .misc = &evergreen_pm_misc,
- .prepare = &evergreen_pm_prepare,
- .finish = &evergreen_pm_finish,
- .init_profile = &r600_pm_init_profile,
- .get_dynpm_state = &r600_pm_get_dynpm_state,
- .get_engine_clock = &radeon_atom_get_engine_clock,
- .set_engine_clock = &radeon_atom_set_engine_clock,
- .get_memory_clock = &radeon_atom_get_memory_clock,
- .set_memory_clock = &radeon_atom_set_memory_clock,
- .get_pcie_lanes = NULL,
- .set_pcie_lanes = NULL,
- .set_clock_gating = NULL,
- },
- .pflip = {
- .pre_page_flip = &evergreen_pre_page_flip,
- .page_flip = &evergreen_page_flip,
- .post_page_flip = &evergreen_post_page_flip,
- },
+ .pm_misc = &evergreen_pm_misc,
+ .pm_prepare = &evergreen_pm_prepare,
+ .pm_finish = &evergreen_pm_finish,
+ .pm_init_profile = &r600_pm_init_profile,
+ .pm_get_dynpm_state = &r600_pm_get_dynpm_state,
+ .pre_page_flip = &evergreen_pre_page_flip,
+ .page_flip = &evergreen_page_flip,
+ .post_page_flip = &evergreen_post_page_flip,
};
int radeon_asic_init(struct radeon_device *rdev)
@@ -1447,10 +974,10 @@ int radeon_asic_init(struct radeon_device *rdev)
rdev->asic = &r420_asic;
/* handle macs */
if (rdev->bios == NULL) {
- rdev->asic->pm.get_engine_clock = &radeon_legacy_get_engine_clock;
- rdev->asic->pm.set_engine_clock = &radeon_legacy_set_engine_clock;
- rdev->asic->pm.get_memory_clock = &radeon_legacy_get_memory_clock;
- rdev->asic->pm.set_memory_clock = NULL;
+ rdev->asic->get_engine_clock = &radeon_legacy_get_engine_clock;
+ rdev->asic->set_engine_clock = &radeon_legacy_set_engine_clock;
+ rdev->asic->get_memory_clock = &radeon_legacy_get_memory_clock;
+ rdev->asic->set_memory_clock = NULL;
}
break;
case CHIP_RS400:
@@ -1523,7 +1050,6 @@ int radeon_asic_init(struct radeon_device *rdev)
rdev->asic = &cayman_asic;
/* set num crtcs */
rdev->num_crtc = 6;
- rdev->vm_manager.funcs = &cayman_vm_funcs;
break;
default:
/* FIXME: not supported yet */
@@ -1531,8 +1057,8 @@ int radeon_asic_init(struct radeon_device *rdev)
}
if (rdev->flags & RADEON_IS_IGP) {
- rdev->asic->pm.get_memory_clock = NULL;
- rdev->asic->pm.set_memory_clock = NULL;
+ rdev->asic->get_memory_clock = NULL;
+ rdev->asic->set_memory_clock = NULL;
}
return 0;
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index dec17b3..4d81e96 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -58,20 +58,17 @@ void r100_fini(struct radeon_device *rdev);
int r100_suspend(struct radeon_device *rdev);
int r100_resume(struct radeon_device *rdev);
void r100_vga_set_state(struct radeon_device *rdev, bool state);
-bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
+bool r100_gpu_is_lockup(struct radeon_device *rdev);
int r100_asic_reset(struct radeon_device *rdev);
u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc);
void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
-void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring);
+void r100_cp_commit(struct radeon_device *rdev);
+void r100_ring_start(struct radeon_device *rdev);
int r100_irq_set(struct radeon_device *rdev);
int r100_irq_process(struct radeon_device *rdev);
void r100_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
-void r100_semaphore_ring_emit(struct radeon_device *rdev,
- struct radeon_ring *cp,
- struct radeon_semaphore *semaphore,
- bool emit_wait);
int r100_cs_parse(struct radeon_cs_parser *p);
void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg);
@@ -86,7 +83,7 @@ int r100_set_surface_reg(struct radeon_device *rdev, int reg,
void r100_clear_surface_reg(struct radeon_device *rdev, int reg);
void r100_bandwidth_update(struct radeon_device *rdev);
void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
-int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
+int r100_ring_test(struct radeon_device *rdev);
void r100_hpd_init(struct radeon_device *rdev);
void r100_hpd_fini(struct radeon_device *rdev);
bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
@@ -104,12 +101,12 @@ void r100_pci_gart_disable(struct radeon_device *rdev);
int r100_debugfs_mc_info_init(struct radeon_device *rdev);
int r100_gui_wait_for_idle(struct radeon_device *rdev);
void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup,
- struct radeon_ring *cp);
+ struct radeon_cp *cp);
bool r100_gpu_cp_is_lockup(struct radeon_device *rdev,
struct r100_gpu_lockup *lockup,
- struct radeon_ring *cp);
+ struct radeon_cp *cp);
void r100_ib_fini(struct radeon_device *rdev);
-int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
+int r100_ib_init(struct radeon_device *rdev);
void r100_irq_disable(struct radeon_device *rdev);
void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save);
void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save);
@@ -139,8 +136,6 @@ extern void r100_pm_get_dynpm_state(struct radeon_device *rdev);
extern void r100_pre_page_flip(struct radeon_device *rdev, int crtc);
extern u32 r100_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
extern void r100_post_page_flip(struct radeon_device *rdev, int crtc);
-extern void r100_wait_for_vblank(struct radeon_device *rdev, int crtc);
-extern int r100_mc_wait_for_idle(struct radeon_device *rdev);
/*
* r200,rv250,rs300,rv280
@@ -159,9 +154,9 @@ extern int r300_init(struct radeon_device *rdev);
extern void r300_fini(struct radeon_device *rdev);
extern int r300_suspend(struct radeon_device *rdev);
extern int r300_resume(struct radeon_device *rdev);
-extern bool r300_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
+extern bool r300_gpu_is_lockup(struct radeon_device *rdev);
extern int r300_asic_reset(struct radeon_device *rdev);
-extern void r300_ring_start(struct radeon_device *rdev, struct radeon_ring *ring);
+extern void r300_ring_start(struct radeon_device *rdev);
extern void r300_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
extern int r300_cs_parse(struct radeon_cs_parser *p);
@@ -178,7 +173,6 @@ extern int rv370_pcie_gart_init(struct radeon_device *rdev);
extern void rv370_pcie_gart_fini(struct radeon_device *rdev);
extern int rv370_pcie_gart_enable(struct radeon_device *rdev);
extern void rv370_pcie_gart_disable(struct radeon_device *rdev);
-extern int r300_mc_wait_for_idle(struct radeon_device *rdev);
/*
* r420,r423,rv410
@@ -209,7 +203,6 @@ int rs400_gart_enable(struct radeon_device *rdev);
void rs400_gart_adjust_size(struct radeon_device *rdev);
void rs400_gart_disable(struct radeon_device *rdev);
void rs400_gart_fini(struct radeon_device *rdev);
-extern int rs400_mc_wait_for_idle(struct radeon_device *rdev);
/*
* rs600.
@@ -240,8 +233,7 @@ extern void rs600_pre_page_flip(struct radeon_device *rdev, int crtc);
extern u32 rs600_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
extern void rs600_post_page_flip(struct radeon_device *rdev, int crtc);
void rs600_set_safe_registers(struct radeon_device *rdev);
-extern void avivo_wait_for_vblank(struct radeon_device *rdev, int crtc);
-extern int rs600_mc_wait_for_idle(struct radeon_device *rdev);
+
/*
* rs690,rs740
@@ -256,7 +248,6 @@ void rs690_bandwidth_update(struct radeon_device *rdev);
void rs690_line_buffer_adjust(struct radeon_device *rdev,
struct drm_display_mode *mode1,
struct drm_display_mode *mode2);
-extern int rs690_mc_wait_for_idle(struct radeon_device *rdev);
/*
* rv515
@@ -270,7 +261,7 @@ int rv515_init(struct radeon_device *rdev);
void rv515_fini(struct radeon_device *rdev);
uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg);
void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
-void rv515_ring_start(struct radeon_device *rdev, struct radeon_ring *ring);
+void rv515_ring_start(struct radeon_device *rdev);
void rv515_bandwidth_update(struct radeon_device *rdev);
int rv515_resume(struct radeon_device *rdev);
int rv515_suspend(struct radeon_device *rdev);
@@ -281,14 +272,13 @@ void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save);
void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save);
void rv515_clock_startup(struct radeon_device *rdev);
void rv515_debugfs(struct radeon_device *rdev);
-int rv515_mc_wait_for_idle(struct radeon_device *rdev);
+
/*
* r520,rv530,rv560,rv570,r580
*/
int r520_init(struct radeon_device *rdev);
int r520_resume(struct radeon_device *rdev);
-int r520_mc_wait_for_idle(struct radeon_device *rdev);
/*
* r600,rv610,rv630,rv620,rv635,rv670,rs780,rs880
@@ -300,25 +290,22 @@ int r600_resume(struct radeon_device *rdev);
void r600_vga_set_state(struct radeon_device *rdev, bool state);
int r600_wb_init(struct radeon_device *rdev);
void r600_wb_fini(struct radeon_device *rdev);
+void r600_cp_commit(struct radeon_device *rdev);
void r600_pcie_gart_tlb_flush(struct radeon_device *rdev);
uint32_t r600_pciep_rreg(struct radeon_device *rdev, uint32_t reg);
void r600_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
int r600_cs_parse(struct radeon_cs_parser *p);
void r600_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
-void r600_semaphore_ring_emit(struct radeon_device *rdev,
- struct radeon_ring *cp,
- struct radeon_semaphore *semaphore,
- bool emit_wait);
-bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
+bool r600_gpu_is_lockup(struct radeon_device *rdev);
int r600_asic_reset(struct radeon_device *rdev);
int r600_set_surface_reg(struct radeon_device *rdev, int reg,
uint32_t tiling_flags, uint32_t pitch,
uint32_t offset, uint32_t obj_size);
void r600_clear_surface_reg(struct radeon_device *rdev, int reg);
-int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
+int r600_ib_test(struct radeon_device *rdev);
void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
-int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
+int r600_ring_test(struct radeon_device *rdev);
int r600_copy_blit(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages, struct radeon_fence *fence);
@@ -338,7 +325,7 @@ extern int r600_get_pcie_lanes(struct radeon_device *rdev);
bool r600_card_posted(struct radeon_device *rdev);
void r600_cp_stop(struct radeon_device *rdev);
int r600_cp_start(struct radeon_device *rdev);
-void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size);
+void r600_ring_init(struct radeon_device *rdev, unsigned ring_size);
int r600_cp_resume(struct radeon_device *rdev);
void r600_cp_fini(struct radeon_device *rdev);
int r600_count_pipe_bits(uint32_t val);
@@ -374,12 +361,11 @@ void r600_hdmi_init(struct drm_encoder *encoder);
int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
void r600_hdmi_update_audio_settings(struct drm_encoder *encoder);
/* r600 blit */
-int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages);
+int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes);
void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence);
void r600_kms_blit_copy(struct radeon_device *rdev,
u64 src_gpu_addr, u64 dst_gpu_addr,
- unsigned num_gpu_pages);
-int r600_mc_wait_for_idle(struct radeon_device *rdev);
+ int size_bytes);
/*
* rv770,rv730,rv710,rv740
@@ -407,10 +393,13 @@ int evergreen_init(struct radeon_device *rdev);
void evergreen_fini(struct radeon_device *rdev);
int evergreen_suspend(struct radeon_device *rdev);
int evergreen_resume(struct radeon_device *rdev);
-bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
+bool evergreen_gpu_is_lockup(struct radeon_device *rdev);
int evergreen_asic_reset(struct radeon_device *rdev);
void evergreen_bandwidth_update(struct radeon_device *rdev);
void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
+int evergreen_copy_blit(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages, struct radeon_fence *fence);
void evergreen_hpd_init(struct radeon_device *rdev);
void evergreen_hpd_fini(struct radeon_device *rdev);
bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
@@ -423,38 +412,28 @@ extern int evergreen_cs_parse(struct radeon_cs_parser *p);
extern void evergreen_pm_misc(struct radeon_device *rdev);
extern void evergreen_pm_prepare(struct radeon_device *rdev);
extern void evergreen_pm_finish(struct radeon_device *rdev);
-extern void sumo_pm_init_profile(struct radeon_device *rdev);
extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc);
extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc);
-extern void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc);
void evergreen_disable_interrupt_state(struct radeon_device *rdev);
int evergreen_blit_init(struct radeon_device *rdev);
-int evergreen_mc_wait_for_idle(struct radeon_device *rdev);
+void evergreen_blit_fini(struct radeon_device *rdev);
+/* evergreen blit */
+int evergreen_blit_prepare_copy(struct radeon_device *rdev, int size_bytes);
+void evergreen_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence);
+void evergreen_kms_blit_copy(struct radeon_device *rdev,
+ u64 src_gpu_addr, u64 dst_gpu_addr,
+ int size_bytes);
/*
* cayman
*/
-void cayman_fence_ring_emit(struct radeon_device *rdev,
- struct radeon_fence *fence);
void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev);
int cayman_init(struct radeon_device *rdev);
void cayman_fini(struct radeon_device *rdev);
int cayman_suspend(struct radeon_device *rdev);
int cayman_resume(struct radeon_device *rdev);
-bool cayman_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
+bool cayman_gpu_is_lockup(struct radeon_device *rdev);
int cayman_asic_reset(struct radeon_device *rdev);
-void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
-int cayman_vm_init(struct radeon_device *rdev);
-void cayman_vm_fini(struct radeon_device *rdev);
-int cayman_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm, int id);
-void cayman_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm);
-void cayman_vm_tlb_flush(struct radeon_device *rdev, struct radeon_vm *vm);
-uint32_t cayman_vm_page_flags(struct radeon_device *rdev,
- struct radeon_vm *vm,
- uint32_t flags);
-void cayman_vm_set_page(struct radeon_device *rdev, struct radeon_vm *vm,
- unsigned pfn, uint64_t addr, uint32_t flags);
-int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 97ab2ec..f9d49e3 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -62,88 +62,7 @@ union atom_supported_devices {
struct _ATOM_SUPPORTED_DEVICES_INFO_2d1 info_2d1;
};
-static void radeon_lookup_i2c_gpio_quirks(struct radeon_device *rdev,
- ATOM_GPIO_I2C_ASSIGMENT *gpio,
- u8 index)
-{
- /* r4xx mask is technically not used by the hw, so patch in the legacy mask bits */
- if ((rdev->family == CHIP_R420) ||
- (rdev->family == CHIP_R423) ||
- (rdev->family == CHIP_RV410)) {
- if ((le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0018) ||
- (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0019) ||
- (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x001a)) {
- gpio->ucClkMaskShift = 0x19;
- gpio->ucDataMaskShift = 0x18;
- }
- }
-
- /* some evergreen boards have bad data for this entry */
- if (ASIC_IS_DCE4(rdev)) {
- if ((index == 7) &&
- (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) &&
- (gpio->sucI2cId.ucAccess == 0)) {
- gpio->sucI2cId.ucAccess = 0x97;
- gpio->ucDataMaskShift = 8;
- gpio->ucDataEnShift = 8;
- gpio->ucDataY_Shift = 8;
- gpio->ucDataA_Shift = 8;
- }
- }
-
- /* some DCE3 boards have bad data for this entry */
- if (ASIC_IS_DCE3(rdev)) {
- if ((index == 4) &&
- (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) &&
- (gpio->sucI2cId.ucAccess == 0x94))
- gpio->sucI2cId.ucAccess = 0x14;
- }
-}
-
-static struct radeon_i2c_bus_rec radeon_get_bus_rec_for_i2c_gpio(ATOM_GPIO_I2C_ASSIGMENT *gpio)
-{
- struct radeon_i2c_bus_rec i2c;
-
- memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec));
-
- i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
- i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
- i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
- i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4;
- i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4;
- i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4;
- i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4;
- i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4;
- i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift);
- i2c.mask_data_mask = (1 << gpio->ucDataMaskShift);
- i2c.en_clk_mask = (1 << gpio->ucClkEnShift);
- i2c.en_data_mask = (1 << gpio->ucDataEnShift);
- i2c.y_clk_mask = (1 << gpio->ucClkY_Shift);
- i2c.y_data_mask = (1 << gpio->ucDataY_Shift);
- i2c.a_clk_mask = (1 << gpio->ucClkA_Shift);
- i2c.a_data_mask = (1 << gpio->ucDataA_Shift);
-
- if (gpio->sucI2cId.sbfAccess.bfHW_Capable)
- i2c.hw_capable = true;
- else
- i2c.hw_capable = false;
-
- if (gpio->sucI2cId.ucAccess == 0xa0)
- i2c.mm_i2c = true;
- else
- i2c.mm_i2c = false;
-
- i2c.i2c_id = gpio->sucI2cId.ucAccess;
-
- if (i2c.mask_clk_reg)
- i2c.valid = true;
- else
- i2c.valid = false;
-
- return i2c;
-}
-
-static struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rdev,
+static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rdev,
uint8_t id)
{
struct atom_context *ctx = rdev->mode_info.atom_context;
@@ -166,10 +85,71 @@ static struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rd
for (i = 0; i < num_indices; i++) {
gpio = &i2c_info->asGPIO_Info[i];
- radeon_lookup_i2c_gpio_quirks(rdev, gpio, i);
+ /* r4xx mask is technically not used by the hw, so patch in the legacy mask bits */
+ if ((rdev->family == CHIP_R420) ||
+ (rdev->family == CHIP_R423) ||
+ (rdev->family == CHIP_RV410)) {
+ if ((le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0018) ||
+ (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0019) ||
+ (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x001a)) {
+ gpio->ucClkMaskShift = 0x19;
+ gpio->ucDataMaskShift = 0x18;
+ }
+ }
+
+ /* some evergreen boards have bad data for this entry */
+ if (ASIC_IS_DCE4(rdev)) {
+ if ((i == 7) &&
+ (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) &&
+ (gpio->sucI2cId.ucAccess == 0)) {
+ gpio->sucI2cId.ucAccess = 0x97;
+ gpio->ucDataMaskShift = 8;
+ gpio->ucDataEnShift = 8;
+ gpio->ucDataY_Shift = 8;
+ gpio->ucDataA_Shift = 8;
+ }
+ }
+
+ /* some DCE3 boards have bad data for this entry */
+ if (ASIC_IS_DCE3(rdev)) {
+ if ((i == 4) &&
+ (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) &&
+ (gpio->sucI2cId.ucAccess == 0x94))
+ gpio->sucI2cId.ucAccess = 0x14;
+ }
if (gpio->sucI2cId.ucAccess == id) {
- i2c = radeon_get_bus_rec_for_i2c_gpio(gpio);
+ i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
+ i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
+ i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
+ i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4;
+ i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4;
+ i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4;
+ i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4;
+ i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4;
+ i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift);
+ i2c.mask_data_mask = (1 << gpio->ucDataMaskShift);
+ i2c.en_clk_mask = (1 << gpio->ucClkEnShift);
+ i2c.en_data_mask = (1 << gpio->ucDataEnShift);
+ i2c.y_clk_mask = (1 << gpio->ucClkY_Shift);
+ i2c.y_data_mask = (1 << gpio->ucDataY_Shift);
+ i2c.a_clk_mask = (1 << gpio->ucClkA_Shift);
+ i2c.a_data_mask = (1 << gpio->ucDataA_Shift);
+
+ if (gpio->sucI2cId.sbfAccess.bfHW_Capable)
+ i2c.hw_capable = true;
+ else
+ i2c.hw_capable = false;
+
+ if (gpio->sucI2cId.ucAccess == 0xa0)
+ i2c.mm_i2c = true;
+ else
+ i2c.mm_i2c = false;
+
+ i2c.i2c_id = gpio->sucI2cId.ucAccess;
+
+ if (i2c.mask_clk_reg)
+ i2c.valid = true;
break;
}
}
@@ -189,6 +169,8 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
int i, num_indices;
char stmp[32];
+ memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec));
+
if (atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) {
i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset);
@@ -197,12 +179,72 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
for (i = 0; i < num_indices; i++) {
gpio = &i2c_info->asGPIO_Info[i];
+ i2c.valid = false;
+
+ /* r4xx mask is technically not used by the hw, so patch in the legacy mask bits */
+ if ((rdev->family == CHIP_R420) ||
+ (rdev->family == CHIP_R423) ||
+ (rdev->family == CHIP_RV410)) {
+ if ((le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0018) ||
+ (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0019) ||
+ (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x001a)) {
+ gpio->ucClkMaskShift = 0x19;
+ gpio->ucDataMaskShift = 0x18;
+ }
+ }
+
+ /* some evergreen boards have bad data for this entry */
+ if (ASIC_IS_DCE4(rdev)) {
+ if ((i == 7) &&
+ (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) &&
+ (gpio->sucI2cId.ucAccess == 0)) {
+ gpio->sucI2cId.ucAccess = 0x97;
+ gpio->ucDataMaskShift = 8;
+ gpio->ucDataEnShift = 8;
+ gpio->ucDataY_Shift = 8;
+ gpio->ucDataA_Shift = 8;
+ }
+ }
+
+ /* some DCE3 boards have bad data for this entry */
+ if (ASIC_IS_DCE3(rdev)) {
+ if ((i == 4) &&
+ (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) &&
+ (gpio->sucI2cId.ucAccess == 0x94))
+ gpio->sucI2cId.ucAccess = 0x14;
+ }
- radeon_lookup_i2c_gpio_quirks(rdev, gpio, i);
+ i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
+ i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
+ i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
+ i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4;
+ i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4;
+ i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4;
+ i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4;
+ i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4;
+ i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift);
+ i2c.mask_data_mask = (1 << gpio->ucDataMaskShift);
+ i2c.en_clk_mask = (1 << gpio->ucClkEnShift);
+ i2c.en_data_mask = (1 << gpio->ucDataEnShift);
+ i2c.y_clk_mask = (1 << gpio->ucClkY_Shift);
+ i2c.y_data_mask = (1 << gpio->ucDataY_Shift);
+ i2c.a_clk_mask = (1 << gpio->ucClkA_Shift);
+ i2c.a_data_mask = (1 << gpio->ucDataA_Shift);
+
+ if (gpio->sucI2cId.sbfAccess.bfHW_Capable)
+ i2c.hw_capable = true;
+ else
+ i2c.hw_capable = false;
- i2c = radeon_get_bus_rec_for_i2c_gpio(gpio);
+ if (gpio->sucI2cId.ucAccess == 0xa0)
+ i2c.mm_i2c = true;
+ else
+ i2c.mm_i2c = false;
- if (i2c.valid) {
+ i2c.i2c_id = gpio->sucI2cId.ucAccess;
+
+ if (i2c.mask_clk_reg) {
+ i2c.valid = true;
sprintf(stmp, "0x%x", i2c.i2c_id);
rdev->i2c_bus[i] = radeon_i2c_create(rdev->ddev, &i2c, stmp);
}
@@ -210,7 +252,7 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
}
}
-static struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rdev,
+static inline struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rdev,
u8 id)
{
struct atom_context *ctx = rdev->mode_info.atom_context;
@@ -2001,14 +2043,10 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
return state_index;
/* last mode is usually default, array is low to high */
for (i = 0; i < num_modes; i++) {
- rdev->pm.power_state[state_index].clock_info =
- kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
- if (!rdev->pm.power_state[state_index].clock_info)
- return state_index;
- rdev->pm.power_state[state_index].num_clock_modes = 1;
rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
switch (frev) {
case 1:
+ rdev->pm.power_state[state_index].num_clock_modes = 1;
rdev->pm.power_state[state_index].clock_info[0].mclk =
le16_to_cpu(power_info->info.asPowerPlayInfo[i].usMemoryClock);
rdev->pm.power_state[state_index].clock_info[0].sclk =
@@ -2044,6 +2082,7 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
state_index++;
break;
case 2:
+ rdev->pm.power_state[state_index].num_clock_modes = 1;
rdev->pm.power_state[state_index].clock_info[0].mclk =
le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMemoryClock);
rdev->pm.power_state[state_index].clock_info[0].sclk =
@@ -2080,6 +2119,7 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
state_index++;
break;
case 3:
+ rdev->pm.power_state[state_index].num_clock_modes = 1;
rdev->pm.power_state[state_index].clock_info[0].mclk =
le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMemoryClock);
rdev->pm.power_state[state_index].clock_info[0].sclk =
@@ -2264,7 +2304,7 @@ static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rde
rdev->pm.default_power_state_index = state_index;
rdev->pm.power_state[state_index].default_clock_mode =
&rdev->pm.power_state[state_index].clock_info[mode_index - 1];
- if (ASIC_IS_DCE5(rdev) && !(rdev->flags & RADEON_IS_IGP)) {
+ if (ASIC_IS_DCE5(rdev)) {
/* NI chips post without MC ucode, so default clocks are strobe mode only */
rdev->pm.default_sclk = rdev->pm.power_state[state_index].clock_info[0].sclk;
rdev->pm.default_mclk = rdev->pm.power_state[state_index].clock_info[0].mclk;
@@ -2386,31 +2426,17 @@ static int radeon_atombios_parse_power_table_4_5(struct radeon_device *rdev)
le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset) +
(power_state->v1.ucNonClockStateIndex *
power_info->pplib.ucNonClockSize));
- rdev->pm.power_state[i].clock_info = kzalloc(sizeof(struct radeon_pm_clock_info) *
- ((power_info->pplib.ucStateEntrySize - 1) ?
- (power_info->pplib.ucStateEntrySize - 1) : 1),
- GFP_KERNEL);
- if (!rdev->pm.power_state[i].clock_info)
- return state_index;
- if (power_info->pplib.ucStateEntrySize - 1) {
- for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) {
- clock_info = (union pplib_clock_info *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) +
- (power_state->v1.ucClockStateIndices[j] *
- power_info->pplib.ucClockInfoSize));
- valid = radeon_atombios_parse_pplib_clock_info(rdev,
- state_index, mode_index,
- clock_info);
- if (valid)
- mode_index++;
- }
- } else {
- rdev->pm.power_state[state_index].clock_info[0].mclk =
- rdev->clock.default_mclk;
- rdev->pm.power_state[state_index].clock_info[0].sclk =
- rdev->clock.default_sclk;
- mode_index++;
+ for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) {
+ clock_info = (union pplib_clock_info *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) +
+ (power_state->v1.ucClockStateIndices[j] *
+ power_info->pplib.ucClockInfoSize));
+ valid = radeon_atombios_parse_pplib_clock_info(rdev,
+ state_index, mode_index,
+ clock_info);
+ if (valid)
+ mode_index++;
}
rdev->pm.power_state[state_index].num_clock_modes = mode_index;
if (mode_index) {
@@ -2481,32 +2507,18 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
non_clock_array_index = i; /* power_state->v2.nonClockInfoIndex */
non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
&non_clock_info_array->nonClockInfo[non_clock_array_index];
- rdev->pm.power_state[i].clock_info = kzalloc(sizeof(struct radeon_pm_clock_info) *
- (power_state->v2.ucNumDPMLevels ?
- power_state->v2.ucNumDPMLevels : 1),
- GFP_KERNEL);
- if (!rdev->pm.power_state[i].clock_info)
- return state_index;
- if (power_state->v2.ucNumDPMLevels) {
- for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
- clock_array_index = power_state->v2.clockInfoIndex[j];
- /* XXX this might be an inagua bug... */
- if (clock_array_index >= clock_info_array->ucNumEntries)
- continue;
- clock_info = (union pplib_clock_info *)
- &clock_info_array->clockInfo[clock_array_index];
- valid = radeon_atombios_parse_pplib_clock_info(rdev,
- state_index, mode_index,
- clock_info);
- if (valid)
- mode_index++;
- }
- } else {
- rdev->pm.power_state[state_index].clock_info[0].mclk =
- rdev->clock.default_mclk;
- rdev->pm.power_state[state_index].clock_info[0].sclk =
- rdev->clock.default_sclk;
- mode_index++;
+ for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
+ clock_array_index = power_state->v2.clockInfoIndex[j];
+ /* XXX this might be an inagua bug... */
+ if (clock_array_index >= clock_info_array->ucNumEntries)
+ continue;
+ clock_info = (union pplib_clock_info *)
+ &clock_info_array->clockInfo[clock_array_index];
+ valid = radeon_atombios_parse_pplib_clock_info(rdev,
+ state_index, mode_index,
+ clock_info);
+ if (valid)
+ mode_index++;
}
rdev->pm.power_state[state_index].num_clock_modes = mode_index;
if (mode_index) {
@@ -2565,23 +2577,19 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
if (state_index == 0) {
rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state), GFP_KERNEL);
if (rdev->pm.power_state) {
- rdev->pm.power_state[0].clock_info =
- kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
- if (rdev->pm.power_state[0].clock_info) {
- /* add the default mode */
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_DEFAULT;
- rdev->pm.power_state[state_index].num_clock_modes = 1;
- rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk;
- rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk;
- rdev->pm.power_state[state_index].default_clock_mode =
- &rdev->pm.power_state[state_index].clock_info[0];
- rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
- rdev->pm.power_state[state_index].pcie_lanes = 16;
- rdev->pm.default_power_state_index = state_index;
- rdev->pm.power_state[state_index].flags = 0;
- state_index++;
- }
+ /* add the default mode */
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_DEFAULT;
+ rdev->pm.power_state[state_index].num_clock_modes = 1;
+ rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk;
+ rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk;
+ rdev->pm.power_state[state_index].default_clock_mode =
+ &rdev->pm.power_state[state_index].clock_info[0];
+ rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
+ rdev->pm.power_state[state_index].pcie_lanes = 16;
+ rdev->pm.default_power_state_index = state_index;
+ rdev->pm.power_state[state_index].flags = 0;
+ state_index++;
}
}
@@ -2960,20 +2968,6 @@ radeon_atombios_connected_scratch_regs(struct drm_connector *connector,
bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP5;
}
}
- if ((radeon_encoder->devices & ATOM_DEVICE_DFP6_SUPPORT) &&
- (radeon_connector->devices & ATOM_DEVICE_DFP6_SUPPORT)) {
- if (connected) {
- DRM_DEBUG_KMS("DFP6 connected\n");
- bios_0_scratch |= ATOM_S0_DFP6;
- bios_3_scratch |= ATOM_S3_DFP6_ACTIVE;
- bios_6_scratch |= ATOM_S6_ACC_REQ_DFP6;
- } else {
- DRM_DEBUG_KMS("DFP6 disconnected\n");
- bios_0_scratch &= ~ATOM_S0_DFP6;
- bios_3_scratch &= ~ATOM_S3_DFP6_ACTIVE;
- bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP6;
- }
- }
if (rdev->family >= CHIP_R600) {
WREG32(R600_BIOS_0_SCRATCH, bios_0_scratch);
@@ -2994,9 +2988,6 @@ radeon_atombios_encoder_crtc_scratch_regs(struct drm_encoder *encoder, int crtc)
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
uint32_t bios_3_scratch;
- if (ASIC_IS_DCE4(rdev))
- return;
-
if (rdev->family >= CHIP_R600)
bios_3_scratch = RREG32(R600_BIOS_3_SCRATCH);
else
@@ -3049,9 +3040,6 @@ radeon_atombios_encoder_dpms_scratch_regs(struct drm_encoder *encoder, bool on)
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
uint32_t bios_2_scratch;
- if (ASIC_IS_DCE4(rdev))
- return;
-
if (rdev->family >= CHIP_R600)
bios_2_scratch = RREG32(R600_BIOS_2_SCRATCH);
else
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index 98724fc..9d95792 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -58,8 +58,7 @@ static int radeon_atrm_call(acpi_handle atrm_handle, uint8_t *bios,
}
obj = (union acpi_object *)buffer.pointer;
- memcpy(bios+offset, obj->buffer.pointer, obj->buffer.length);
- len = obj->buffer.length;
+ memcpy(bios+offset, obj->buffer.pointer, len);
kfree(buffer.pointer);
return len;
}
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
index fef7b72..10191d9 100644
--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -26,83 +26,21 @@
#include "radeon_reg.h"
#include "radeon.h"
-#define RADEON_BENCHMARK_COPY_BLIT 1
-#define RADEON_BENCHMARK_COPY_DMA 0
-
-#define RADEON_BENCHMARK_ITERATIONS 1024
-#define RADEON_BENCHMARK_COMMON_MODES_N 17
-
-static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size,
- uint64_t saddr, uint64_t daddr,
- int flag, int n)
-{
- unsigned long start_jiffies;
- unsigned long end_jiffies;
- struct radeon_fence *fence = NULL;
- int i, r;
-
- start_jiffies = jiffies;
- for (i = 0; i < n; i++) {
- switch (flag) {
- case RADEON_BENCHMARK_COPY_DMA:
- r = radeon_fence_create(rdev, &fence, radeon_copy_dma_ring_index(rdev));
- if (r)
- return r;
- r = radeon_copy_dma(rdev, saddr, daddr,
- size / RADEON_GPU_PAGE_SIZE,
- fence);
- break;
- case RADEON_BENCHMARK_COPY_BLIT:
- r = radeon_fence_create(rdev, &fence, radeon_copy_blit_ring_index(rdev));
- if (r)
- return r;
- r = radeon_copy_blit(rdev, saddr, daddr,
- size / RADEON_GPU_PAGE_SIZE,
- fence);
- break;
- default:
- DRM_ERROR("Unknown copy method\n");
- r = -EINVAL;
- }
- if (r)
- goto exit_do_move;
- r = radeon_fence_wait(fence, false);
- if (r)
- goto exit_do_move;
- radeon_fence_unref(&fence);
- }
- end_jiffies = jiffies;
- r = jiffies_to_msecs(end_jiffies - start_jiffies);
-
-exit_do_move:
- if (fence)
- radeon_fence_unref(&fence);
- return r;
-}
-
-
-static void radeon_benchmark_log_results(int n, unsigned size,
- unsigned int time,
- unsigned sdomain, unsigned ddomain,
- char *kind)
-{
- unsigned int throughput = (n * (size >> 10)) / time;
- DRM_INFO("radeon: %s %u bo moves of %u kB from"
- " %d to %d in %u ms, throughput: %u Mb/s or %u MB/s\n",
- kind, n, size >> 10, sdomain, ddomain, time,
- throughput * 8, throughput);
-}
-
-static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
- unsigned sdomain, unsigned ddomain)
+void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
+ unsigned sdomain, unsigned ddomain)
{
struct radeon_bo *dobj = NULL;
struct radeon_bo *sobj = NULL;
+ struct radeon_fence *fence = NULL;
uint64_t saddr, daddr;
- int r, n;
- int time;
+ unsigned long start_jiffies;
+ unsigned long end_jiffies;
+ unsigned long time;
+ unsigned i, n, size;
+ int r;
- n = RADEON_BENCHMARK_ITERATIONS;
+ size = bsize;
+ n = 1024;
r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, &sobj);
if (r) {
goto out_cleanup;
@@ -129,26 +67,65 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
}
/* r100 doesn't have dma engine so skip the test */
- /* also, VRAM-to-VRAM test doesn't make much sense for DMA */
- /* skip it as well if domains are the same */
- if ((rdev->asic->copy.dma) && (sdomain != ddomain)) {
- time = radeon_benchmark_do_move(rdev, size, saddr, daddr,
- RADEON_BENCHMARK_COPY_DMA, n);
- if (time < 0)
- goto out_cleanup;
- if (time > 0)
- radeon_benchmark_log_results(n, size, time,
- sdomain, ddomain, "dma");
- }
+ if (rdev->asic->copy_dma) {
- time = radeon_benchmark_do_move(rdev, size, saddr, daddr,
- RADEON_BENCHMARK_COPY_BLIT, n);
- if (time < 0)
- goto out_cleanup;
- if (time > 0)
- radeon_benchmark_log_results(n, size, time,
- sdomain, ddomain, "blit");
+ start_jiffies = jiffies;
+ for (i = 0; i < n; i++) {
+ r = radeon_fence_create(rdev, &fence);
+ if (r) {
+ goto out_cleanup;
+ }
+
+ r = radeon_copy_dma(rdev, saddr, daddr,
+ size / RADEON_GPU_PAGE_SIZE, fence);
+
+ if (r) {
+ goto out_cleanup;
+ }
+ r = radeon_fence_wait(fence, false);
+ if (r) {
+ goto out_cleanup;
+ }
+ radeon_fence_unref(&fence);
+ }
+ end_jiffies = jiffies;
+ time = end_jiffies - start_jiffies;
+ time = jiffies_to_msecs(time);
+ if (time > 0) {
+ i = ((n * size) >> 10) / time;
+ printk(KERN_INFO "radeon: dma %u bo moves of %ukb from"
+ " %d to %d in %lums (%ukb/ms %ukb/s %uM/s)\n",
+ n, size >> 10,
+ sdomain, ddomain, time,
+ i, i * 1000, (i * 1000) / 1024);
+ }
+ }
+ start_jiffies = jiffies;
+ for (i = 0; i < n; i++) {
+ r = radeon_fence_create(rdev, &fence);
+ if (r) {
+ goto out_cleanup;
+ }
+ r = radeon_copy_blit(rdev, saddr, daddr, size / RADEON_GPU_PAGE_SIZE, fence);
+ if (r) {
+ goto out_cleanup;
+ }
+ r = radeon_fence_wait(fence, false);
+ if (r) {
+ goto out_cleanup;
+ }
+ radeon_fence_unref(&fence);
+ }
+ end_jiffies = jiffies;
+ time = end_jiffies - start_jiffies;
+ time = jiffies_to_msecs(time);
+ if (time > 0) {
+ i = ((n * size) >> 10) / time;
+ printk(KERN_INFO "radeon: blit %u bo moves of %ukb from %d to %d"
+ " in %lums (%ukb/ms %ukb/s %uM/s)\n", n, size >> 10,
+ sdomain, ddomain, time, i, i * 1000, (i * 1000) / 1024);
+ }
out_cleanup:
if (sobj) {
r = radeon_bo_reserve(sobj, false);
@@ -166,92 +143,18 @@ out_cleanup:
}
radeon_bo_unref(&dobj);
}
-
+ if (fence) {
+ radeon_fence_unref(&fence);
+ }
if (r) {
- DRM_ERROR("Error while benchmarking BO move.\n");
+ printk(KERN_WARNING "Error while benchmarking BO move.\n");
}
}
-void radeon_benchmark(struct radeon_device *rdev, int test_number)
+void radeon_benchmark(struct radeon_device *rdev)
{
- int i;
- int common_modes[RADEON_BENCHMARK_COMMON_MODES_N] = {
- 640 * 480 * 4,
- 720 * 480 * 4,
- 800 * 600 * 4,
- 848 * 480 * 4,
- 1024 * 768 * 4,
- 1152 * 768 * 4,
- 1280 * 720 * 4,
- 1280 * 800 * 4,
- 1280 * 854 * 4,
- 1280 * 960 * 4,
- 1280 * 1024 * 4,
- 1440 * 900 * 4,
- 1400 * 1050 * 4,
- 1680 * 1050 * 4,
- 1600 * 1200 * 4,
- 1920 * 1080 * 4,
- 1920 * 1200 * 4
- };
-
- switch (test_number) {
- case 1:
- /* simple test, VRAM to GTT and GTT to VRAM */
- radeon_benchmark_move(rdev, 1024*1024, RADEON_GEM_DOMAIN_GTT,
- RADEON_GEM_DOMAIN_VRAM);
- radeon_benchmark_move(rdev, 1024*1024, RADEON_GEM_DOMAIN_VRAM,
- RADEON_GEM_DOMAIN_GTT);
- break;
- case 2:
- /* simple test, VRAM to VRAM */
- radeon_benchmark_move(rdev, 1024*1024, RADEON_GEM_DOMAIN_VRAM,
- RADEON_GEM_DOMAIN_VRAM);
- break;
- case 3:
- /* GTT to VRAM, buffer size sweep, powers of 2 */
- for (i = 1; i <= 16384; i <<= 1)
- radeon_benchmark_move(rdev, i * RADEON_GPU_PAGE_SIZE,
- RADEON_GEM_DOMAIN_GTT,
- RADEON_GEM_DOMAIN_VRAM);
- break;
- case 4:
- /* VRAM to GTT, buffer size sweep, powers of 2 */
- for (i = 1; i <= 16384; i <<= 1)
- radeon_benchmark_move(rdev, i * RADEON_GPU_PAGE_SIZE,
- RADEON_GEM_DOMAIN_VRAM,
- RADEON_GEM_DOMAIN_GTT);
- break;
- case 5:
- /* VRAM to VRAM, buffer size sweep, powers of 2 */
- for (i = 1; i <= 16384; i <<= 1)
- radeon_benchmark_move(rdev, i * RADEON_GPU_PAGE_SIZE,
- RADEON_GEM_DOMAIN_VRAM,
- RADEON_GEM_DOMAIN_VRAM);
- break;
- case 6:
- /* GTT to VRAM, buffer size sweep, common modes */
- for (i = 0; i < RADEON_BENCHMARK_COMMON_MODES_N; i++)
- radeon_benchmark_move(rdev, common_modes[i],
- RADEON_GEM_DOMAIN_GTT,
- RADEON_GEM_DOMAIN_VRAM);
- break;
- case 7:
- /* VRAM to GTT, buffer size sweep, common modes */
- for (i = 0; i < RADEON_BENCHMARK_COMMON_MODES_N; i++)
- radeon_benchmark_move(rdev, common_modes[i],
- RADEON_GEM_DOMAIN_VRAM,
- RADEON_GEM_DOMAIN_GTT);
- break;
- case 8:
- /* VRAM to VRAM, buffer size sweep, common modes */
- for (i = 0; i < RADEON_BENCHMARK_COMMON_MODES_N; i++)
- radeon_benchmark_move(rdev, common_modes[i],
- RADEON_GEM_DOMAIN_VRAM,
- RADEON_GEM_DOMAIN_VRAM);
- break;
-
- default:
- DRM_ERROR("Unknown benchmark\n");
- }
+ radeon_benchmark_move(rdev, 1024*1024, RADEON_GEM_DOMAIN_GTT,
+ RADEON_GEM_DOMAIN_VRAM);
+ radeon_benchmark_move(rdev, 1024*1024, RADEON_GEM_DOMAIN_VRAM,
+ RADEON_GEM_DOMAIN_GTT);
}
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index 501f488..229a20f 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -120,7 +120,7 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
ret = radeon_atrm_get_bios_chunk(rdev->bios,
(i * ATRM_BIOS_PAGE),
ATRM_BIOS_PAGE);
- if (ret < ATRM_BIOS_PAGE)
+ if (ret <= 0)
break;
}
diff --git a/drivers/gpu/drm/radeon/radeon_blit_common.h b/drivers/gpu/drm/radeon/radeon_blit_common.h
deleted file mode 100644
index 4ecbe72..0000000
--- a/drivers/gpu/drm/radeon/radeon_blit_common.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright 2009 Advanced Micro Devices, Inc.
- * Copyright 2009 Red Hat Inc.
- * Copyright 2012 Alcatel-Lucent, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef __RADEON_BLIT_COMMON_H__
-
-#define DI_PT_RECTLIST 0x11
-#define DI_INDEX_SIZE_16_BIT 0x0
-#define DI_SRC_SEL_AUTO_INDEX 0x2
-
-#define FMT_8 0x1
-#define FMT_5_6_5 0x8
-#define FMT_8_8_8_8 0x1a
-#define COLOR_8 0x1
-#define COLOR_5_6_5 0x8
-#define COLOR_8_8_8_8 0x1a
-
-#define RECT_UNIT_H 32
-#define RECT_UNIT_W (RADEON_GPU_PAGE_SIZE / 4 / RECT_UNIT_H)
-
-#define __RADEON_BLIT_COMMON_H__
-#endif
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c
index 6ae0c75..b956cf1 100644
--- a/drivers/gpu/drm/radeon/radeon_clocks.c
+++ b/drivers/gpu/drm/radeon/radeon_clocks.c
@@ -96,7 +96,7 @@ uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev)
* Read XTAL (ref clock), SCLK and MCLK from Open Firmware device
* tree. Hopefully, ATI OF driver is kind enough to fill these
*/
-static bool radeon_read_clocks_OF(struct drm_device *dev)
+static bool __devinit radeon_read_clocks_OF(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
struct device_node *dp = rdev->pdev->dev.of_node;
@@ -166,7 +166,7 @@ static bool radeon_read_clocks_OF(struct drm_device *dev)
return true;
}
#else
-static bool radeon_read_clocks_OF(struct drm_device *dev)
+static bool __devinit radeon_read_clocks_OF(struct drm_device *dev)
{
return false;
}
@@ -334,7 +334,7 @@ void radeon_get_clock_info(struct drm_device *dev)
if (!rdev->clock.default_sclk)
rdev->clock.default_sclk = radeon_get_engine_clock(rdev);
- if ((!rdev->clock.default_mclk) && rdev->asic->pm.get_memory_clock)
+ if ((!rdev->clock.default_mclk) && rdev->asic->get_memory_clock)
rdev->clock.default_mclk = radeon_get_memory_clock(rdev);
rdev->pm.current_sclk = rdev->clock.default_sclk;
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index e158e68..f616e40 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -2513,28 +2513,25 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev)
u16 offset, misc, misc2 = 0;
u8 rev, blocks, tmp;
int state_index = 0;
- struct radeon_i2c_bus_rec i2c_bus;
rdev->pm.default_power_state_index = -1;
/* allocate 2 power states */
rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * 2, GFP_KERNEL);
- if (rdev->pm.power_state) {
- /* allocate 1 clock mode per state */
- rdev->pm.power_state[0].clock_info =
- kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
- rdev->pm.power_state[1].clock_info =
- kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
- if (!rdev->pm.power_state[0].clock_info ||
- !rdev->pm.power_state[1].clock_info)
- goto pm_failed;
- } else
- goto pm_failed;
+ if (!rdev->pm.power_state) {
+ rdev->pm.default_power_state_index = state_index;
+ rdev->pm.num_power_states = 0;
+
+ rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
+ rdev->pm.current_clock_mode_index = 0;
+ return;
+ }
/* check for a thermal chip */
offset = combios_get_table_offset(dev, COMBIOS_OVERDRIVE_INFO_TABLE);
if (offset) {
u8 thermal_controller = 0, gpio = 0, i2c_addr = 0, clk_bit = 0, data_bit = 0;
+ struct radeon_i2c_bus_rec i2c_bus;
rev = RBIOS8(offset);
@@ -2576,25 +2573,6 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev)
i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
}
}
- } else {
- /* boards with a thermal chip, but no overdrive table */
-
- /* Asus 9600xt has an f75375 on the monid bus */
- if ((dev->pdev->device == 0x4152) &&
- (dev->pdev->subsystem_vendor == 0x1043) &&
- (dev->pdev->subsystem_device == 0xc002)) {
- i2c_bus = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
- rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
- if (rdev->pm.i2c_bus) {
- struct i2c_board_info info = { };
- const char *name = "f75375";
- info.addr = 0x28;
- strlcpy(info.type, name, sizeof(info.type));
- i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
- DRM_INFO("Possible %s thermal controller at 0x%02x\n",
- name, info.addr);
- }
- }
}
if (rdev->flags & RADEON_IS_MOBILITY) {
@@ -2694,14 +2672,6 @@ default_mode:
rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
rdev->pm.current_clock_mode_index = 0;
- return;
-
-pm_failed:
- rdev->pm.default_power_state_index = state_index;
- rdev->pm.num_power_states = 0;
-
- rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
- rdev->pm.current_clock_mode_index = 0;
}
void radeon_external_tmds_setup(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 4c4c924..f1a1e8a 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -44,6 +44,8 @@ extern void
radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
struct drm_connector *drm_connector);
+bool radeon_connector_encoder_is_dp_bridge(struct drm_connector *connector);
+
void radeon_connector_hotplug(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
@@ -449,24 +451,65 @@ int radeon_connector_set_property(struct drm_connector *connector, struct drm_pr
return 0;
}
+/*
+ * Some integrated ATI Radeon chipset implementations (e. g.
+ * Asus M2A-VM HDMI) may indicate the availability of a DDC,
+ * even when there's no monitor connected. For these connectors
+ * following DDC probe extension will be applied: check also for the
+ * availability of EDID with at least a correct EDID header. Only then,
+ * DDC is assumed to be available. This prevents drm_get_edid() and
+ * drm_edid_block_valid() from periodically dumping data and kernel
+ * errors into the logs and onto the terminal.
+ */
+static bool radeon_connector_needs_extended_probe(struct radeon_device *dev,
+ uint32_t supported_device,
+ int connector_type)
+{
+ /* Asus M2A-VM HDMI board sends data to i2c bus even,
+ * if HDMI add-on card is not plugged in or HDMI is disabled in
+ * BIOS. Valid DDC can only be assumed, if also a valid EDID header
+ * can be retrieved via i2c bus during DDC probe */
+ if ((dev->pdev->device == 0x791e) &&
+ (dev->pdev->subsystem_vendor == 0x1043) &&
+ (dev->pdev->subsystem_device == 0x826d)) {
+ if ((connector_type == DRM_MODE_CONNECTOR_HDMIA) &&
+ (supported_device == ATOM_DEVICE_DFP2_SUPPORT))
+ return true;
+ }
+ /* ECS A740GM-M with ATI RADEON 2100 sends data to i2c bus
+ * for a DVI connector that is not implemented */
+ if ((dev->pdev->device == 0x796e) &&
+ (dev->pdev->subsystem_vendor == 0x1019) &&
+ (dev->pdev->subsystem_device == 0x2615)) {
+ if ((connector_type == DRM_MODE_CONNECTOR_DVID) &&
+ (supported_device == ATOM_DEVICE_DFP2_SUPPORT))
+ return true;
+ }
+ /* TOSHIBA Satellite L300D with ATI Mobility Radeon x1100
+ * (RS690M) sends data to i2c bus for a HDMI connector that
+ * is not implemented */
+ if ((dev->pdev->device == 0x791f) &&
+ (dev->pdev->subsystem_vendor == 0x1179) &&
+ (dev->pdev->subsystem_device == 0xff68)) {
+ if ((connector_type == DRM_MODE_CONNECTOR_HDMIA) &&
+ (supported_device == ATOM_DEVICE_DFP2_SUPPORT))
+ return true;
+ }
+
+ /* Default: no EDID header probe required for DDC probing */
+ return false;
+}
+
static void radeon_fixup_lvds_native_mode(struct drm_encoder *encoder,
struct drm_connector *connector)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
- struct drm_display_mode *t, *mode;
-
- /* If the EDID preferred mode doesn't match the native mode, use it */
- list_for_each_entry_safe(mode, t, &connector->probed_modes, head) {
- if (mode->type & DRM_MODE_TYPE_PREFERRED) {
- if (mode->hdisplay != native_mode->hdisplay ||
- mode->vdisplay != native_mode->vdisplay)
- memcpy(native_mode, mode, sizeof(*mode));
- }
- }
/* Try to get native mode details from EDID if necessary */
if (!native_mode->clock) {
+ struct drm_display_mode *t, *mode;
+
list_for_each_entry_safe(mode, t, &connector->probed_modes, head) {
if (mode->hdisplay == native_mode->hdisplay &&
mode->vdisplay == native_mode->vdisplay) {
@@ -477,7 +520,6 @@ static void radeon_fixup_lvds_native_mode(struct drm_encoder *encoder,
}
}
}
-
if (!native_mode->clock) {
DRM_DEBUG_KMS("No LVDS native mode details, disabling RMX\n");
radeon_encoder->rmx_type = RMX_OFF;
@@ -689,7 +731,8 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
ret = connector_status_disconnected;
if (radeon_connector->ddc_bus)
- dret = radeon_ddc_probe(radeon_connector);
+ dret = radeon_ddc_probe(radeon_connector,
+ radeon_connector->requires_extended_probe);
if (dret) {
radeon_connector->detected_by_load = false;
if (radeon_connector->edid) {
@@ -846,27 +889,6 @@ static int radeon_dvi_get_modes(struct drm_connector *connector)
return ret;
}
-static bool radeon_check_hpd_status_unchanged(struct drm_connector *connector)
-{
- struct drm_device *dev = connector->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- enum drm_connector_status status;
-
- /* We only trust HPD on R600 and newer ASICS. */
- if (rdev->family >= CHIP_R600
- && radeon_connector->hpd.hpd != RADEON_HPD_NONE) {
- if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd))
- status = connector_status_connected;
- else
- status = connector_status_disconnected;
- if (connector->status == status)
- return true;
- }
-
- return false;
-}
-
/*
* DVI is complicated
* Do a DDC probe, if DDC probe passes, get the full EDID so
@@ -891,11 +913,9 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
enum drm_connector_status ret = connector_status_disconnected;
bool dret = false;
- if (!force && radeon_check_hpd_status_unchanged(connector))
- return connector->status;
-
if (radeon_connector->ddc_bus)
- dret = radeon_ddc_probe(radeon_connector);
+ dret = radeon_ddc_probe(radeon_connector,
+ radeon_connector->requires_extended_probe);
if (dret) {
radeon_connector->detected_by_load = false;
if (radeon_connector->edid) {
@@ -1104,7 +1124,7 @@ static int radeon_dvi_mode_valid(struct drm_connector *connector,
(radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_B))
return MODE_OK;
else if (radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_A) {
- if (0) {
+ if (ASIC_IS_DCE3(rdev)) {
/* HDMI 1.3+ supports max clock of 340 Mhz */
if (mode->clock > 340000)
return MODE_CLOCK_HIGH;
@@ -1164,23 +1184,13 @@ static int radeon_dp_get_modes(struct drm_connector *connector)
(connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) {
struct drm_display_mode *mode;
- if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
- if (!radeon_dig_connector->edp_on)
- atombios_set_edp_panel_power(connector,
- ATOM_TRANSMITTER_ACTION_POWER_ON);
- ret = radeon_ddc_get_modes(radeon_connector);
- if (!radeon_dig_connector->edp_on)
- atombios_set_edp_panel_power(connector,
- ATOM_TRANSMITTER_ACTION_POWER_OFF);
- } else {
- /* need to setup ddc on the bridge */
- if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) !=
- ENCODER_OBJECT_ID_NONE) {
- if (encoder)
- radeon_atom_ext_encoder_setup_ddc(encoder);
- }
- ret = radeon_ddc_get_modes(radeon_connector);
- }
+ if (!radeon_dig_connector->edp_on)
+ atombios_set_edp_panel_power(connector,
+ ATOM_TRANSMITTER_ACTION_POWER_ON);
+ ret = radeon_ddc_get_modes(radeon_connector);
+ if (!radeon_dig_connector->edp_on)
+ atombios_set_edp_panel_power(connector,
+ ATOM_TRANSMITTER_ACTION_POWER_OFF);
if (ret > 0) {
if (encoder) {
@@ -1191,6 +1201,7 @@ static int radeon_dp_get_modes(struct drm_connector *connector)
return ret;
}
+ encoder = radeon_best_single_encoder(connector);
if (!encoder)
return 0;
@@ -1207,8 +1218,7 @@ static int radeon_dp_get_modes(struct drm_connector *connector)
}
} else {
/* need to setup ddc on the bridge */
- if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) !=
- ENCODER_OBJECT_ID_NONE) {
+ if (radeon_connector_encoder_is_dp_bridge(connector)) {
if (encoder)
radeon_atom_ext_encoder_setup_ddc(encoder);
}
@@ -1218,12 +1228,13 @@ static int radeon_dp_get_modes(struct drm_connector *connector)
return ret;
}
-u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *connector)
+bool radeon_connector_encoder_is_dp_bridge(struct drm_connector *connector)
{
struct drm_mode_object *obj;
struct drm_encoder *encoder;
struct radeon_encoder *radeon_encoder;
int i;
+ bool found = false;
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
if (connector->encoder_ids[i] == 0)
@@ -1239,13 +1250,14 @@ u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *conn
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_TRAVIS:
case ENCODER_OBJECT_ID_NUTMEG:
- return radeon_encoder->encoder_id;
+ found = true;
+ break;
default:
break;
}
}
- return ENCODER_OBJECT_ID_NONE;
+ return found;
}
bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector)
@@ -1297,9 +1309,6 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
struct drm_encoder *encoder = radeon_best_single_encoder(connector);
- if (!force && radeon_check_hpd_status_unchanged(connector))
- return connector->status;
-
if (radeon_connector->edid) {
kfree(radeon_connector->edid);
radeon_connector->edid = NULL;
@@ -1325,24 +1334,12 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
if (!radeon_dig_connector->edp_on)
atombios_set_edp_panel_power(connector,
ATOM_TRANSMITTER_ACTION_POWER_OFF);
- } else if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) !=
- ENCODER_OBJECT_ID_NONE) {
- /* DP bridges are always DP */
- radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT;
- /* get the DPCD from the bridge */
- radeon_dp_getdpcd(radeon_connector);
-
- if (encoder) {
- /* setup ddc on the bridge */
- radeon_atom_ext_encoder_setup_ddc(encoder);
- if (radeon_ddc_probe(radeon_connector)) /* try DDC */
- ret = connector_status_connected;
- else if (radeon_connector->dac_load_detect) { /* try load detection */
- struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
- ret = encoder_funcs->detect(encoder, connector);
- }
- }
} else {
+ /* need to setup ddc on the bridge */
+ if (radeon_connector_encoder_is_dp_bridge(connector)) {
+ if (encoder)
+ radeon_atom_ext_encoder_setup_ddc(encoder);
+ }
radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector);
if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
ret = connector_status_connected;
@@ -1353,10 +1350,21 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
if (radeon_dp_getdpcd(radeon_connector))
ret = connector_status_connected;
} else {
- if (radeon_ddc_probe(radeon_connector))
+ if (radeon_ddc_probe(radeon_connector,
+ radeon_connector->requires_extended_probe))
ret = connector_status_connected;
}
}
+
+ if ((ret == connector_status_disconnected) &&
+ radeon_connector->dac_load_detect) {
+ struct drm_encoder *encoder = radeon_best_single_encoder(connector);
+ struct drm_encoder_helper_funcs *encoder_funcs;
+ if (encoder) {
+ encoder_funcs = encoder->helper_private;
+ ret = encoder_funcs->detect(encoder, connector);
+ }
+ }
}
radeon_connector_update_scratch_regs(connector, ret);
@@ -1498,7 +1506,9 @@ radeon_add_atom_connector(struct drm_device *dev,
radeon_connector->shared_ddc = shared_ddc;
radeon_connector->connector_object_id = connector_object_id;
radeon_connector->hpd = *hpd;
-
+ radeon_connector->requires_extended_probe =
+ radeon_connector_needs_extended_probe(rdev, supported_device,
+ connector_type);
radeon_connector->router = *router;
if (router->ddc_valid || router->cd_valid) {
radeon_connector->router_bus = radeon_i2c_lookup(rdev, &router->i2c_info);
@@ -1845,7 +1855,9 @@ radeon_add_legacy_connector(struct drm_device *dev,
radeon_connector->devices = supported_device;
radeon_connector->connector_object_id = connector_object_id;
radeon_connector->hpd = *hpd;
-
+ radeon_connector->requires_extended_probe =
+ radeon_connector_needs_extended_probe(rdev, supported_device,
+ connector_type);
switch (connector_type) {
case DRM_MODE_CONNECTOR_VGA:
drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index 0ebb7d4..7586779 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -29,8 +29,6 @@
* Gareth Hughes <gareth@valinux.com>
*/
-#include <linux/module.h>
-
#include "drmP.h"
#include "drm.h"
#include "drm_sarea.h"
@@ -2115,11 +2113,9 @@ int radeon_driver_load(struct drm_device *dev, unsigned long flags)
break;
}
- pci_set_master(dev->pdev);
-
if (drm_pci_device_is_agp(dev))
dev_priv->flags |= RADEON_IS_AGP;
- else if (pci_is_pcie(dev->pdev))
+ else if (drm_pci_device_is_pcie(dev))
dev_priv->flags |= RADEON_IS_PCIE;
else
dev_priv->flags |= RADEON_IS_PCI;
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index d9d9f5a..fae00c0 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -58,7 +58,7 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
duplicate = false;
r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4];
- for (j = 0; j < i; j++) {
+ for (j = 0; j < p->nrelocs; j++) {
if (r->handle == p->relocs[j].handle) {
p->relocs_ptr[i] = &p->relocs[j];
duplicate = true;
@@ -84,73 +84,9 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
p->relocs[i].flags = r->flags;
radeon_bo_list_add_object(&p->relocs[i].lobj,
&p->validated);
-
- } else
- p->relocs[i].handle = 0;
- }
- return radeon_bo_list_validate(&p->validated);
-}
-
-static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority)
-{
- p->priority = priority;
-
- switch (ring) {
- default:
- DRM_ERROR("unknown ring id: %d\n", ring);
- return -EINVAL;
- case RADEON_CS_RING_GFX:
- p->ring = RADEON_RING_TYPE_GFX_INDEX;
- break;
- case RADEON_CS_RING_COMPUTE:
- /* for now */
- p->ring = RADEON_RING_TYPE_GFX_INDEX;
- break;
- }
- return 0;
-}
-
-static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
-{
- bool sync_to_ring[RADEON_NUM_RINGS] = { };
- int i, r;
-
- for (i = 0; i < p->nrelocs; i++) {
- if (!p->relocs[i].robj || !p->relocs[i].robj->tbo.sync_obj)
- continue;
-
- if (!(p->relocs[i].flags & RADEON_RELOC_DONT_SYNC)) {
- struct radeon_fence *fence = p->relocs[i].robj->tbo.sync_obj;
- if (!radeon_fence_signaled(fence)) {
- sync_to_ring[fence->ring] = true;
- }
- }
- }
-
- for (i = 0; i < RADEON_NUM_RINGS; ++i) {
- /* no need to sync to our own or unused rings */
- if (i == p->ring || !sync_to_ring[i] || !p->rdev->ring[i].ready)
- continue;
-
- if (!p->ib->fence->semaphore) {
- r = radeon_semaphore_create(p->rdev, &p->ib->fence->semaphore);
- if (r)
- return r;
}
-
- r = radeon_ring_lock(p->rdev, &p->rdev->ring[i], 3);
- if (r)
- return r;
- radeon_semaphore_emit_signal(p->rdev, i, p->ib->fence->semaphore);
- radeon_ring_unlock_commit(p->rdev, &p->rdev->ring[i]);
-
- r = radeon_ring_lock(p->rdev, &p->rdev->ring[p->ring], 3);
- if (r)
- return r;
- radeon_semaphore_emit_wait(p->rdev, p->ring, p->ib->fence->semaphore);
- radeon_ring_unlock_commit(p->rdev, &p->rdev->ring[p->ring]);
}
- return 0;
+ return radeon_bo_list_validate(&p->validated);
}
int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
@@ -158,8 +94,6 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
struct drm_radeon_cs *cs = data;
uint64_t *chunk_array_ptr;
unsigned size, i;
- u32 ring = RADEON_CS_RING_GFX;
- s32 priority = 0;
if (!cs->num_chunks) {
return 0;
@@ -169,7 +103,6 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
p->idx = 0;
p->chunk_ib_idx = -1;
p->chunk_relocs_idx = -1;
- p->chunk_flags_idx = -1;
p->chunks_array = kcalloc(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL);
if (p->chunks_array == NULL) {
return -ENOMEM;
@@ -179,7 +112,6 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
sizeof(uint64_t)*cs->num_chunks)) {
return -EFAULT;
}
- p->cs_flags = 0;
p->nchunks = cs->num_chunks;
p->chunks = kcalloc(p->nchunks, sizeof(struct radeon_cs_chunk), GFP_KERNEL);
if (p->chunks == NULL) {
@@ -208,19 +140,12 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
if (p->chunks[i].length_dw == 0)
return -EINVAL;
}
- if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
- p->chunk_flags_idx = i;
- /* zero length flags aren't useful */
- if (p->chunks[i].length_dw == 0)
- return -EINVAL;
- }
p->chunks[i].length_dw = user_chunk.length_dw;
p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data;
cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data;
- if ((p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) ||
- (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS)) {
+ if (p->chunks[i].chunk_id != RADEON_CHUNK_ID_IB) {
size = p->chunks[i].length_dw * sizeof(uint32_t);
p->chunks[i].kdata = kmalloc(size, GFP_KERNEL);
if (p->chunks[i].kdata == NULL) {
@@ -230,47 +155,25 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
p->chunks[i].user_ptr, size)) {
return -EFAULT;
}
- if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
- p->cs_flags = p->chunks[i].kdata[0];
- if (p->chunks[i].length_dw > 1)
- ring = p->chunks[i].kdata[1];
- if (p->chunks[i].length_dw > 2)
- priority = (s32)p->chunks[i].kdata[2];
+ } else {
+ p->chunks[i].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ p->chunks[i].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (p->chunks[i].kpage[0] == NULL || p->chunks[i].kpage[1] == NULL) {
+ kfree(p->chunks[i].kpage[0]);
+ kfree(p->chunks[i].kpage[1]);
+ return -ENOMEM;
}
+ p->chunks[i].kpage_idx[0] = -1;
+ p->chunks[i].kpage_idx[1] = -1;
+ p->chunks[i].last_copied_page = -1;
+ p->chunks[i].last_page_index = ((p->chunks[i].length_dw * 4) - 1) / PAGE_SIZE;
}
}
-
- if ((p->cs_flags & RADEON_CS_USE_VM) &&
- !p->rdev->vm_manager.enabled) {
- DRM_ERROR("VM not active on asic!\n");
+ if (p->chunks[p->chunk_ib_idx].length_dw > (16 * 1024)) {
+ DRM_ERROR("cs IB too big: %d\n",
+ p->chunks[p->chunk_ib_idx].length_dw);
return -EINVAL;
}
-
- if (radeon_cs_get_ring(p, ring, priority))
- return -EINVAL;
-
-
- /* deal with non-vm */
- if ((p->chunk_ib_idx != -1) &&
- ((p->cs_flags & RADEON_CS_USE_VM) == 0) &&
- (p->chunks[p->chunk_ib_idx].chunk_id == RADEON_CHUNK_ID_IB)) {
- if (p->chunks[p->chunk_ib_idx].length_dw > (16 * 1024)) {
- DRM_ERROR("cs IB too big: %d\n",
- p->chunks[p->chunk_ib_idx].length_dw);
- return -EINVAL;
- }
- p->chunks[p->chunk_ib_idx].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL);
- p->chunks[p->chunk_ib_idx].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (p->chunks[p->chunk_ib_idx].kpage[0] == NULL ||
- p->chunks[p->chunk_ib_idx].kpage[1] == NULL)
- return -ENOMEM;
- p->chunks[p->chunk_ib_idx].kpage_idx[0] = -1;
- p->chunks[p->chunk_ib_idx].kpage_idx[1] = -1;
- p->chunks[p->chunk_ib_idx].last_copied_page = -1;
- p->chunks[p->chunk_ib_idx].last_page_index =
- ((p->chunks[p->chunk_ib_idx].length_dw * 4) - 1) / PAGE_SIZE;
- }
-
return 0;
}
@@ -312,146 +215,14 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
radeon_ib_free(parser->rdev, &parser->ib);
}
-static int radeon_cs_ib_chunk(struct radeon_device *rdev,
- struct radeon_cs_parser *parser)
-{
- struct radeon_cs_chunk *ib_chunk;
- int r;
-
- if (parser->chunk_ib_idx == -1)
- return 0;
-
- if (parser->cs_flags & RADEON_CS_USE_VM)
- return 0;
-
- ib_chunk = &parser->chunks[parser->chunk_ib_idx];
- /* Copy the packet into the IB, the parser will read from the
- * input memory (cached) and write to the IB (which can be
- * uncached).
- */
- r = radeon_ib_get(rdev, parser->ring, &parser->ib,
- ib_chunk->length_dw * 4);
- if (r) {
- DRM_ERROR("Failed to get ib !\n");
- return r;
- }
- parser->ib->length_dw = ib_chunk->length_dw;
- r = radeon_cs_parse(rdev, parser->ring, parser);
- if (r || parser->parser_error) {
- DRM_ERROR("Invalid command stream !\n");
- return r;
- }
- r = radeon_cs_finish_pages(parser);
- if (r) {
- DRM_ERROR("Invalid command stream !\n");
- return r;
- }
- r = radeon_cs_sync_rings(parser);
- if (r) {
- DRM_ERROR("Failed to synchronize rings !\n");
- }
- parser->ib->vm_id = 0;
- r = radeon_ib_schedule(rdev, parser->ib);
- if (r) {
- DRM_ERROR("Failed to schedule IB !\n");
- }
- return 0;
-}
-
-static int radeon_bo_vm_update_pte(struct radeon_cs_parser *parser,
- struct radeon_vm *vm)
-{
- struct radeon_bo_list *lobj;
- struct radeon_bo *bo;
- int r;
-
- list_for_each_entry(lobj, &parser->validated, tv.head) {
- bo = lobj->bo;
- r = radeon_vm_bo_update_pte(parser->rdev, vm, bo, &bo->tbo.mem);
- if (r) {
- return r;
- }
- }
- return 0;
-}
-
-static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
- struct radeon_cs_parser *parser)
-{
- struct radeon_cs_chunk *ib_chunk;
- struct radeon_fpriv *fpriv = parser->filp->driver_priv;
- struct radeon_vm *vm = &fpriv->vm;
- int r;
-
- if (parser->chunk_ib_idx == -1)
- return 0;
-
- if ((parser->cs_flags & RADEON_CS_USE_VM) == 0)
- return 0;
-
- ib_chunk = &parser->chunks[parser->chunk_ib_idx];
- if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
- DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw);
- return -EINVAL;
- }
- r = radeon_ib_get(rdev, parser->ring, &parser->ib,
- ib_chunk->length_dw * 4);
- if (r) {
- DRM_ERROR("Failed to get ib !\n");
- return r;
- }
- parser->ib->length_dw = ib_chunk->length_dw;
- /* Copy the packet into the IB */
- if (DRM_COPY_FROM_USER(parser->ib->ptr, ib_chunk->user_ptr,
- ib_chunk->length_dw * 4)) {
- return -EFAULT;
- }
- r = radeon_ring_ib_parse(rdev, parser->ring, parser->ib);
- if (r) {
- return r;
- }
-
- mutex_lock(&vm->mutex);
- r = radeon_vm_bind(rdev, vm);
- if (r) {
- goto out;
- }
- r = radeon_bo_vm_update_pte(parser, vm);
- if (r) {
- goto out;
- }
- r = radeon_cs_sync_rings(parser);
- if (r) {
- DRM_ERROR("Failed to synchronize rings !\n");
- }
- parser->ib->vm_id = vm->id;
- /* ib pool is bind at 0 in virtual address space to gpu_addr is the
- * offset inside the pool bo
- */
- parser->ib->gpu_addr = parser->ib->sa_bo.offset;
- r = radeon_ib_schedule(rdev, parser->ib);
-out:
- if (!r) {
- if (vm->fence) {
- radeon_fence_unref(&vm->fence);
- }
- vm->fence = radeon_fence_ref(parser->ib->fence);
- }
- mutex_unlock(&fpriv->vm.mutex);
- return r;
-}
-
int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
struct radeon_device *rdev = dev->dev_private;
struct radeon_cs_parser parser;
+ struct radeon_cs_chunk *ib_chunk;
int r;
- radeon_mutex_lock(&rdev->cs_mutex);
- if (!rdev->accel_working) {
- radeon_mutex_unlock(&rdev->cs_mutex);
- return -EBUSY;
- }
+ mutex_lock(&rdev->cs_mutex);
/* initialize parser */
memset(&parser, 0, sizeof(struct radeon_cs_parser));
parser.filp = filp;
@@ -462,7 +233,14 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
if (r) {
DRM_ERROR("Failed to initialize parser !\n");
radeon_cs_parser_fini(&parser, r);
- radeon_mutex_unlock(&rdev->cs_mutex);
+ mutex_unlock(&rdev->cs_mutex);
+ return r;
+ }
+ r = radeon_ib_get(rdev, &parser.ib);
+ if (r) {
+ DRM_ERROR("Failed to get ib !\n");
+ radeon_cs_parser_fini(&parser, r);
+ mutex_unlock(&rdev->cs_mutex);
return r;
}
r = radeon_cs_parser_relocs(&parser);
@@ -470,20 +248,34 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
if (r != -ERESTARTSYS)
DRM_ERROR("Failed to parse relocation %d!\n", r);
radeon_cs_parser_fini(&parser, r);
- radeon_mutex_unlock(&rdev->cs_mutex);
+ mutex_unlock(&rdev->cs_mutex);
return r;
}
- r = radeon_cs_ib_chunk(rdev, &parser);
+ /* Copy the packet into the IB, the parser will read from the
+ * input memory (cached) and write to the IB (which can be
+ * uncached). */
+ ib_chunk = &parser.chunks[parser.chunk_ib_idx];
+ parser.ib->length_dw = ib_chunk->length_dw;
+ r = radeon_cs_parse(&parser);
+ if (r || parser.parser_error) {
+ DRM_ERROR("Invalid command stream !\n");
+ radeon_cs_parser_fini(&parser, r);
+ mutex_unlock(&rdev->cs_mutex);
+ return r;
+ }
+ r = radeon_cs_finish_pages(&parser);
if (r) {
- goto out;
+ DRM_ERROR("Invalid command stream !\n");
+ radeon_cs_parser_fini(&parser, r);
+ mutex_unlock(&rdev->cs_mutex);
+ return r;
}
- r = radeon_cs_ib_vm_chunk(rdev, &parser);
+ r = radeon_ib_schedule(rdev, parser.ib);
if (r) {
- goto out;
+ DRM_ERROR("Failed to schedule IB !\n");
}
-out:
radeon_cs_parser_fini(&parser, r);
- radeon_mutex_unlock(&rdev->cs_mutex);
+ mutex_unlock(&rdev->cs_mutex);
return r;
}
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
index 711e95a..72f749d 100644
--- a/drivers/gpu/drm/radeon/radeon_cursor.c
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -197,12 +197,7 @@ int radeon_crtc_cursor_set(struct drm_crtc *crtc,
unpin:
if (radeon_crtc->cursor_bo) {
- robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
- ret = radeon_bo_reserve(robj, false);
- if (likely(ret == 0)) {
- radeon_bo_unpin(robj);
- radeon_bo_unreserve(robj);
- }
+ radeon_gem_object_unpin(radeon_crtc->cursor_bo);
drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo);
}
@@ -229,14 +224,14 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
}
DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
- if (x < 0) {
- xorigin = min(-x, CURSOR_WIDTH - 1);
- x = 0;
- }
- if (y < 0) {
- yorigin = min(-y, CURSOR_HEIGHT - 1);
- y = 0;
- }
+ if (x < 0)
+ xorigin = -x + 1;
+ if (y < 0)
+ yorigin = -y + 1;
+ if (xorigin >= CURSOR_WIDTH)
+ xorigin = CURSOR_WIDTH - 1;
+ if (yorigin >= CURSOR_HEIGHT)
+ yorigin = CURSOR_HEIGHT - 1;
if (ASIC_IS_AVIVO(rdev)) {
int i = 0;
@@ -275,12 +270,16 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
radeon_lock_cursor(crtc, true);
if (ASIC_IS_DCE4(rdev)) {
- WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset, (x << 16) | y);
+ WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset,
+ ((xorigin ? 0 : x) << 16) |
+ (yorigin ? 0 : y));
WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin);
WREG32(EVERGREEN_CUR_SIZE + radeon_crtc->crtc_offset,
((w - 1) << 16) | (radeon_crtc->cursor_height - 1));
} else if (ASIC_IS_AVIVO(rdev)) {
- WREG32(AVIVO_D1CUR_POSITION + radeon_crtc->crtc_offset, (x << 16) | y);
+ WREG32(AVIVO_D1CUR_POSITION + radeon_crtc->crtc_offset,
+ ((xorigin ? 0 : x) << 16) |
+ (yorigin ? 0 : y));
WREG32(AVIVO_D1CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin);
WREG32(AVIVO_D1CUR_SIZE + radeon_crtc->crtc_offset,
((w - 1) << 16) | (radeon_crtc->cursor_height - 1));
@@ -294,8 +293,8 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
| yorigin));
WREG32(RADEON_CUR_HORZ_VERT_POSN + radeon_crtc->crtc_offset,
(RADEON_CUR_LOCK
- | (x << 16)
- | y));
+ | ((xorigin ? 0 : x) << 16)
+ | (yorigin ? 0 : y)));
/* offset is from DISP(2)_BASE_ADDRESS */
WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, (radeon_crtc->legacy_cursor_offset +
(yorigin * 256)));
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 405c827..a275cf6 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -32,7 +32,6 @@
#include <drm/radeon_drm.h>
#include <linux/vgaarb.h>
#include <linux/vga_switcheroo.h>
-#include <linux/efi.h>
#include "radeon_reg.h"
#include "radeon.h"
#include "atom.h"
@@ -304,8 +303,6 @@ void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64
mc->mc_vram_size = mc->aper_size;
}
mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
- if (radeon_vram_limit && radeon_vram_limit < mc->real_vram_size)
- mc->real_vram_size = radeon_vram_limit;
dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
mc->mc_vram_size >> 20, mc->vram_start,
mc->vram_end, mc->real_vram_size >> 20);
@@ -354,9 +351,6 @@ bool radeon_card_posted(struct radeon_device *rdev)
{
uint32_t reg;
- if (efi_enabled && rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE)
- return false;
-
/* first check CRTCs */
if (ASIC_IS_DCE4(rdev)) {
reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
@@ -718,26 +712,19 @@ int radeon_device_init(struct radeon_device *rdev,
/* mutex initialization are all done here so we
* can recall function without having locking issues */
- radeon_mutex_init(&rdev->cs_mutex);
- radeon_mutex_init(&rdev->ib_pool.mutex);
- for (i = 0; i < RADEON_NUM_RINGS; ++i)
- mutex_init(&rdev->ring[i].mutex);
+ mutex_init(&rdev->cs_mutex);
+ mutex_init(&rdev->ib_pool.mutex);
+ mutex_init(&rdev->cp.mutex);
mutex_init(&rdev->dc_hw_i2c_mutex);
if (rdev->family >= CHIP_R600)
spin_lock_init(&rdev->ih.lock);
mutex_init(&rdev->gem.mutex);
mutex_init(&rdev->pm.mutex);
mutex_init(&rdev->vram_mutex);
- rwlock_init(&rdev->fence_lock);
- rwlock_init(&rdev->semaphore_drv.lock);
+ rwlock_init(&rdev->fence_drv.lock);
INIT_LIST_HEAD(&rdev->gem.objects);
init_waitqueue_head(&rdev->irq.vblank_queue);
init_waitqueue_head(&rdev->irq.idle_queue);
- INIT_LIST_HEAD(&rdev->semaphore_drv.bo);
- /* initialize vm here */
- rdev->vm_manager.use_bitmap = 1;
- rdev->vm_manager.max_pfn = 1 << 20;
- INIT_LIST_HEAD(&rdev->vm_manager.lru_vm);
/* Set asic functions */
r = radeon_asic_init(rdev);
@@ -759,29 +746,22 @@ int radeon_device_init(struct radeon_device *rdev,
/* set DMA mask + need_dma32 flags.
* PCIE - can handle 40-bits.
- * IGP - can handle 40-bits
+ * IGP - can handle 40-bits (in theory)
* AGP - generally dma32 is safest
- * PCI - dma32 for legacy pci gart, 40 bits on newer asics
+ * PCI - only dma32
*/
rdev->need_dma32 = false;
if (rdev->flags & RADEON_IS_AGP)
rdev->need_dma32 = true;
- if ((rdev->flags & RADEON_IS_PCI) &&
- (rdev->family < CHIP_RS400))
+ if (rdev->flags & RADEON_IS_PCI)
rdev->need_dma32 = true;
dma_bits = rdev->need_dma32 ? 32 : 40;
r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
if (r) {
rdev->need_dma32 = true;
- dma_bits = 32;
printk(KERN_WARNING "radeon: No suitable DMA available.\n");
}
- r = pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
- if (r) {
- pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
- printk(KERN_WARNING "radeon: No coherent DMA available.\n");
- }
/* Registers mapping */
/* TODO: block userspace mapping of io register */
@@ -829,20 +809,15 @@ int radeon_device_init(struct radeon_device *rdev,
if (r)
return r;
}
- if ((radeon_testing & 1)) {
+ if (radeon_testing) {
radeon_test_moves(rdev);
}
- if ((radeon_testing & 2)) {
- radeon_test_syncing(rdev);
- }
if (radeon_benchmarking) {
- radeon_benchmark(rdev, radeon_benchmarking);
+ radeon_benchmark(rdev);
}
return 0;
}
-static void radeon_debugfs_remove_files(struct radeon_device *rdev);
-
void radeon_device_fini(struct radeon_device *rdev)
{
DRM_INFO("radeon: finishing device.\n");
@@ -857,7 +832,6 @@ void radeon_device_fini(struct radeon_device *rdev)
rdev->rio_mem = NULL;
iounmap(rdev->rmmio);
rdev->rmmio = NULL;
- radeon_debugfs_remove_files(rdev);
}
@@ -869,7 +843,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
struct radeon_device *rdev;
struct drm_crtc *crtc;
struct drm_connector *connector;
- int i, r;
+ int r;
if (dev == NULL || dev->dev_private == NULL) {
return -ENODEV;
@@ -910,8 +884,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
/* evict vram memory */
radeon_bo_evict_vram(rdev);
/* wait for gpu to finish processing current batch */
- for (i = 0; i < RADEON_NUM_RINGS; i++)
- radeon_fence_wait_last(rdev, i);
+ radeon_fence_wait_last(rdev);
radeon_save_bios_scratch_regs(rdev);
@@ -960,11 +933,9 @@ int radeon_resume_kms(struct drm_device *dev)
radeon_fbdev_set_suspend(rdev, 0);
console_unlock();
- /* init dig PHYs, disp eng pll */
- if (rdev->is_atom_bios) {
+ /* init dig PHYs */
+ if (rdev->is_atom_bios)
radeon_atom_encoder_init(rdev);
- radeon_atom_dcpll_init(rdev);
- }
/* reset hpd state */
radeon_hpd_init(rdev);
/* blat the mode back in */
@@ -983,9 +954,6 @@ int radeon_gpu_reset(struct radeon_device *rdev)
int r;
int resched;
- /* Prevent CS ioctl from interfering */
- radeon_mutex_lock(&rdev->cs_mutex);
-
radeon_save_bios_scratch_regs(rdev);
/* block TTM */
resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
@@ -998,15 +966,10 @@ int radeon_gpu_reset(struct radeon_device *rdev)
radeon_restore_bios_scratch_regs(rdev);
drm_helper_resume_force_mode(rdev->ddev);
ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
+ return 0;
}
-
- radeon_mutex_unlock(&rdev->cs_mutex);
-
- if (r) {
- /* bad news, how to tell it to userspace ? */
- dev_info(rdev->dev, "GPU reset failed\n");
- }
-
+ /* bad news, how to tell it to userspace ? */
+ dev_info(rdev->dev, "GPU reset failed\n");
return r;
}
@@ -1014,29 +977,33 @@ int radeon_gpu_reset(struct radeon_device *rdev)
/*
* Debugfs
*/
+struct radeon_debugfs {
+ struct drm_info_list *files;
+ unsigned num_files;
+};
+static struct radeon_debugfs _radeon_debugfs[RADEON_DEBUGFS_MAX_NUM_FILES];
+static unsigned _radeon_debugfs_count = 0;
+
int radeon_debugfs_add_files(struct radeon_device *rdev,
struct drm_info_list *files,
unsigned nfiles)
{
unsigned i;
- for (i = 0; i < rdev->debugfs_count; i++) {
- if (rdev->debugfs[i].files == files) {
+ for (i = 0; i < _radeon_debugfs_count; i++) {
+ if (_radeon_debugfs[i].files == files) {
/* Already registered */
return 0;
}
}
-
- i = rdev->debugfs_count + 1;
- if (i > RADEON_DEBUGFS_MAX_COMPONENTS) {
- DRM_ERROR("Reached maximum number of debugfs components.\n");
- DRM_ERROR("Report so we increase "
- "RADEON_DEBUGFS_MAX_COMPONENTS.\n");
+ if ((_radeon_debugfs_count + nfiles) > RADEON_DEBUGFS_MAX_NUM_FILES) {
+ DRM_ERROR("Reached maximum number of debugfs files.\n");
+ DRM_ERROR("Report so we increase RADEON_DEBUGFS_MAX_NUM_FILES.\n");
return -EINVAL;
}
- rdev->debugfs[rdev->debugfs_count].files = files;
- rdev->debugfs[rdev->debugfs_count].num_files = nfiles;
- rdev->debugfs_count = i;
+ _radeon_debugfs[_radeon_debugfs_count].files = files;
+ _radeon_debugfs[_radeon_debugfs_count].num_files = nfiles;
+ _radeon_debugfs_count++;
#if defined(CONFIG_DEBUG_FS)
drm_debugfs_create_files(files, nfiles,
rdev->ddev->control->debugfs_root,
@@ -1048,22 +1015,6 @@ int radeon_debugfs_add_files(struct radeon_device *rdev,
return 0;
}
-static void radeon_debugfs_remove_files(struct radeon_device *rdev)
-{
-#if defined(CONFIG_DEBUG_FS)
- unsigned i;
-
- for (i = 0; i < rdev->debugfs_count; i++) {
- drm_debugfs_remove_files(rdev->debugfs[i].files,
- rdev->debugfs[i].num_files,
- rdev->ddev->control);
- drm_debugfs_remove_files(rdev->debugfs[i].files,
- rdev->debugfs[i].num_files,
- rdev->ddev->primary);
- }
-#endif
-}
-
#if defined(CONFIG_DEBUG_FS)
int radeon_debugfs_init(struct drm_minor *minor)
{
@@ -1072,5 +1023,11 @@ int radeon_debugfs_init(struct drm_minor *minor)
void radeon_debugfs_cleanup(struct drm_minor *minor)
{
+ unsigned i;
+
+ for (i = 0; i < _radeon_debugfs_count; i++) {
+ drm_debugfs_remove_files(_radeon_debugfs[i].files,
+ _radeon_debugfs[i].num_files, minor);
+ }
}
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index a3e07c2..0896fae 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -33,6 +33,8 @@
#include "drm_crtc_helper.h"
#include "drm_edid.h"
+static int radeon_ddc_dump(struct drm_connector *connector);
+
static void avivo_crtc_load_lut(struct drm_crtc *crtc)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
@@ -280,7 +282,7 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
spin_lock_irqsave(&rdev->ddev->event_lock, flags);
work = radeon_crtc->unpin_work;
if (work == NULL ||
- (work->fence && !radeon_fence_signaled(work->fence))) {
+ !radeon_fence_signaled(work->fence)) {
spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
return;
}
@@ -303,17 +305,8 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
if (update_pending &&
(DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev, crtc_id,
&vpos, &hpos)) &&
- ((vpos >= (99 * rdev->mode_info.crtcs[crtc_id]->base.hwmode.crtc_vdisplay)/100) ||
- (vpos < 0 && !ASIC_IS_AVIVO(rdev)))) {
- /* crtc didn't flip in this target vblank interval,
- * but flip is pending in crtc. Based on the current
- * scanout position we know that the current frame is
- * (nearly) complete and the flip will (likely)
- * complete before the start of the next frame.
- */
- update_pending = 0;
- }
- if (update_pending) {
+ (vpos >=0) &&
+ (vpos < (99 * rdev->mode_info.crtcs[crtc_id]->base.hwmode.crtc_vdisplay)/100)) {
/* crtc didn't flip in this target vblank interval,
* but flip is pending in crtc. It will complete it
* in next vblank interval, so complete the flip at
@@ -355,6 +348,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
struct radeon_framebuffer *new_radeon_fb;
struct drm_gem_object *obj;
struct radeon_bo *rbo;
+ struct radeon_fence *fence;
struct radeon_unpin_work *work;
unsigned long flags;
u32 tiling_flags, pitch_pixels;
@@ -365,9 +359,16 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
if (work == NULL)
return -ENOMEM;
+ r = radeon_fence_create(rdev, &fence);
+ if (unlikely(r != 0)) {
+ kfree(work);
+ DRM_ERROR("flip queue: failed to create fence.\n");
+ return -ENOMEM;
+ }
work->event = event;
work->rdev = rdev;
work->crtc_id = radeon_crtc->crtc_id;
+ work->fence = radeon_fence_ref(fence);
old_radeon_fb = to_radeon_framebuffer(crtc->fb);
new_radeon_fb = to_radeon_framebuffer(fb);
/* schedule unpin of the old buffer */
@@ -376,10 +377,6 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
drm_gem_object_reference(obj);
rbo = gem_to_radeon_bo(obj);
work->old_rbo = rbo;
- obj = new_radeon_fb->obj;
- rbo = gem_to_radeon_bo(obj);
- if (rbo->tbo.sync_obj)
- work->fence = radeon_fence_ref(rbo->tbo.sync_obj);
INIT_WORK(&work->work, radeon_unpin_work_func);
/* We borrow the event spin lock for protecting unpin_work */
@@ -394,6 +391,9 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
spin_unlock_irqrestore(&dev->event_lock, flags);
/* pin the new buffer */
+ obj = new_radeon_fb->obj;
+ rbo = gem_to_radeon_bo(obj);
+
DRM_DEBUG_DRIVER("flip-ioctl() cur_fbo = %p, cur_bbo = %p\n",
work->old_rbo, rbo);
@@ -402,9 +402,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
DRM_ERROR("failed to reserve new rbo buffer before flip\n");
goto pflip_cleanup;
}
- /* Only 27 bit offset for legacy CRTC */
- r = radeon_bo_pin_restricted(rbo, RADEON_GEM_DOMAIN_VRAM,
- ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27, &base);
+ r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &base);
if (unlikely(r != 0)) {
radeon_bo_unreserve(rbo);
r = -EINVAL;
@@ -417,7 +415,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
if (!ASIC_IS_AVIVO(rdev)) {
/* crtc offset is from display base addr not FB location */
base -= radeon_crtc->legacy_display_base_addr;
- pitch_pixels = fb->pitches[0] / (fb->bits_per_pixel / 8);
+ pitch_pixels = fb->pitch / (fb->bits_per_pixel / 8);
if (tiling_flags & RADEON_TILING_MACRO) {
if (ASIC_IS_R300(rdev)) {
@@ -463,18 +461,37 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
goto pflip_cleanup1;
}
+ /* 32 ought to cover us */
+ r = radeon_ring_lock(rdev, 32);
+ if (r) {
+ DRM_ERROR("failed to lock the ring before flip\n");
+ goto pflip_cleanup2;
+ }
+
+ /* emit the fence */
+ radeon_fence_emit(rdev, fence);
/* set the proper interrupt */
radeon_pre_page_flip(rdev, radeon_crtc->crtc_id);
+ /* fire the ring */
+ radeon_ring_unlock_commit(rdev);
return 0;
+pflip_cleanup2:
+ drm_vblank_put(dev, radeon_crtc->crtc_id);
+
pflip_cleanup1:
- if (unlikely(radeon_bo_reserve(rbo, false) != 0)) {
+ r = radeon_bo_reserve(rbo, false);
+ if (unlikely(r != 0)) {
DRM_ERROR("failed to reserve new rbo in error path\n");
goto pflip_cleanup;
}
- if (unlikely(radeon_bo_unpin(rbo) != 0)) {
+ r = radeon_bo_unpin(rbo);
+ if (unlikely(r != 0)) {
+ radeon_bo_unreserve(rbo);
+ r = -EINVAL;
DRM_ERROR("failed to unpin new rbo in error path\n");
+ goto pflip_cleanup;
}
radeon_bo_unreserve(rbo);
@@ -482,9 +499,9 @@ pflip_cleanup:
spin_lock_irqsave(&dev->event_lock, flags);
radeon_crtc->unpin_work = NULL;
unlock_free:
- spin_unlock_irqrestore(&dev->event_lock, flags);
drm_gem_object_unreference_unlocked(old_radeon_fb->obj);
- radeon_fence_unref(&work->fence);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ radeon_fence_unref(&fence);
kfree(work);
return r;
@@ -678,6 +695,7 @@ static void radeon_print_display_setup(struct drm_device *dev)
static bool radeon_setup_enc_conn(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
+ struct drm_connector *drm_connector;
bool ret = false;
if (rdev->bios) {
@@ -697,6 +715,8 @@ static bool radeon_setup_enc_conn(struct drm_device *dev)
if (ret) {
radeon_setup_encoder_clones(dev);
radeon_print_display_setup(dev);
+ list_for_each_entry(drm_connector, &dev->mode_config.connector_list, head)
+ radeon_ddc_dump(drm_connector);
}
return ret;
@@ -713,22 +733,16 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
radeon_router_select_ddc_port(radeon_connector);
if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
- (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) ||
- (radeon_connector_encoder_get_dp_bridge_encoder_id(&radeon_connector->base) !=
- ENCODER_OBJECT_ID_NONE)) {
+ (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) {
struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
-
if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT ||
dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) && dig->dp_i2c_bus)
- radeon_connector->edid = drm_get_edid(&radeon_connector->base,
- &dig->dp_i2c_bus->adapter);
- else if (radeon_connector->ddc_bus && !radeon_connector->edid)
- radeon_connector->edid = drm_get_edid(&radeon_connector->base,
- &radeon_connector->ddc_bus->adapter);
- } else {
- if (radeon_connector->ddc_bus && !radeon_connector->edid)
- radeon_connector->edid = drm_get_edid(&radeon_connector->base,
- &radeon_connector->ddc_bus->adapter);
+ radeon_connector->edid = drm_get_edid(&radeon_connector->base, &dig->dp_i2c_bus->adapter);
+ }
+ if (!radeon_connector->ddc_bus)
+ return -1;
+ if (!radeon_connector->edid) {
+ radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
}
if (!radeon_connector->edid) {
@@ -750,6 +764,34 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
return 0;
}
+static int radeon_ddc_dump(struct drm_connector *connector)
+{
+ struct edid *edid;
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ int ret = 0;
+
+ /* on hw with routers, select right port */
+ if (radeon_connector->router.ddc_valid)
+ radeon_router_select_ddc_port(radeon_connector);
+
+ if (!radeon_connector->ddc_bus)
+ return -1;
+ edid = drm_get_edid(connector, &radeon_connector->ddc_bus->adapter);
+ /* Log EDID retrieval status here. In particular with regard to
+ * connectors with requires_extended_probe flag set, that will prevent
+ * function radeon_dvi_detect() to fetch EDID on this connector,
+ * as long as there is no valid EDID header found */
+ if (edid) {
+ DRM_INFO("Radeon display connector %s: Found valid EDID",
+ drm_get_connector_name(connector));
+ kfree(edid);
+ } else {
+ DRM_INFO("Radeon display connector %s: No monitor connected or invalid EDID",
+ drm_get_connector_name(connector));
+ }
+ return ret;
+}
+
/* avivo */
static void avivo_get_fb_div(struct radeon_pll *pll,
u32 target_clock,
@@ -1089,36 +1131,29 @@ static const struct drm_framebuffer_funcs radeon_fb_funcs = {
.create_handle = radeon_user_framebuffer_create_handle,
};
-int
+void
radeon_framebuffer_init(struct drm_device *dev,
struct radeon_framebuffer *rfb,
- struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_mode_fb_cmd *mode_cmd,
struct drm_gem_object *obj)
{
- int ret;
rfb->obj = obj;
- ret = drm_framebuffer_init(dev, &rfb->base, &radeon_fb_funcs);
- if (ret) {
- rfb->obj = NULL;
- return ret;
- }
+ drm_framebuffer_init(dev, &rfb->base, &radeon_fb_funcs);
drm_helper_mode_fill_fb_struct(&rfb->base, mode_cmd);
- return 0;
}
static struct drm_framebuffer *
radeon_user_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv,
- struct drm_mode_fb_cmd2 *mode_cmd)
+ struct drm_mode_fb_cmd *mode_cmd)
{
struct drm_gem_object *obj;
struct radeon_framebuffer *radeon_fb;
- int ret;
- obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
+ obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
if (obj == NULL) {
dev_err(&dev->pdev->dev, "No GEM object associated to handle 0x%08X, "
- "can't create framebuffer\n", mode_cmd->handles[0]);
+ "can't create framebuffer\n", mode_cmd->handle);
return ERR_PTR(-ENOENT);
}
@@ -1128,12 +1163,7 @@ radeon_user_framebuffer_create(struct drm_device *dev,
return ERR_PTR(-ENOMEM);
}
- ret = radeon_framebuffer_init(dev, radeon_fb, mode_cmd, obj);
- if (ret) {
- kfree(radeon_fb);
- drm_gem_object_unreference_unlocked(obj);
- return NULL;
- }
+ radeon_framebuffer_init(dev, radeon_fb, mode_cmd, obj);
return &radeon_fb->base;
}
@@ -1149,6 +1179,11 @@ static const struct drm_mode_config_funcs radeon_mode_funcs = {
.output_poll_changed = radeon_output_poll_changed
};
+struct drm_prop_enum_list {
+ int type;
+ char *name;
+};
+
static struct drm_prop_enum_list radeon_tmds_pll_enum_list[] =
{ { 0, "driver" },
{ 1, "bios" },
@@ -1173,53 +1208,86 @@ static struct drm_prop_enum_list radeon_underscan_enum_list[] =
static int radeon_modeset_create_props(struct radeon_device *rdev)
{
- int sz;
+ int i, sz;
if (rdev->is_atom_bios) {
rdev->mode_info.coherent_mode_property =
- drm_property_create_range(rdev->ddev, 0 , "coherent", 0, 1);
+ drm_property_create(rdev->ddev,
+ DRM_MODE_PROP_RANGE,
+ "coherent", 2);
if (!rdev->mode_info.coherent_mode_property)
return -ENOMEM;
+
+ rdev->mode_info.coherent_mode_property->values[0] = 0;
+ rdev->mode_info.coherent_mode_property->values[1] = 1;
}
if (!ASIC_IS_AVIVO(rdev)) {
sz = ARRAY_SIZE(radeon_tmds_pll_enum_list);
rdev->mode_info.tmds_pll_property =
- drm_property_create_enum(rdev->ddev, 0,
- "tmds_pll",
- radeon_tmds_pll_enum_list, sz);
+ drm_property_create(rdev->ddev,
+ DRM_MODE_PROP_ENUM,
+ "tmds_pll", sz);
+ for (i = 0; i < sz; i++) {
+ drm_property_add_enum(rdev->mode_info.tmds_pll_property,
+ i,
+ radeon_tmds_pll_enum_list[i].type,
+ radeon_tmds_pll_enum_list[i].name);
+ }
}
rdev->mode_info.load_detect_property =
- drm_property_create_range(rdev->ddev, 0, "load detection", 0, 1);
+ drm_property_create(rdev->ddev,
+ DRM_MODE_PROP_RANGE,
+ "load detection", 2);
if (!rdev->mode_info.load_detect_property)
return -ENOMEM;
+ rdev->mode_info.load_detect_property->values[0] = 0;
+ rdev->mode_info.load_detect_property->values[1] = 1;
drm_mode_create_scaling_mode_property(rdev->ddev);
sz = ARRAY_SIZE(radeon_tv_std_enum_list);
rdev->mode_info.tv_std_property =
- drm_property_create_enum(rdev->ddev, 0,
- "tv standard",
- radeon_tv_std_enum_list, sz);
+ drm_property_create(rdev->ddev,
+ DRM_MODE_PROP_ENUM,
+ "tv standard", sz);
+ for (i = 0; i < sz; i++) {
+ drm_property_add_enum(rdev->mode_info.tv_std_property,
+ i,
+ radeon_tv_std_enum_list[i].type,
+ radeon_tv_std_enum_list[i].name);
+ }
sz = ARRAY_SIZE(radeon_underscan_enum_list);
rdev->mode_info.underscan_property =
- drm_property_create_enum(rdev->ddev, 0,
- "underscan",
- radeon_underscan_enum_list, sz);
+ drm_property_create(rdev->ddev,
+ DRM_MODE_PROP_ENUM,
+ "underscan", sz);
+ for (i = 0; i < sz; i++) {
+ drm_property_add_enum(rdev->mode_info.underscan_property,
+ i,
+ radeon_underscan_enum_list[i].type,
+ radeon_underscan_enum_list[i].name);
+ }
rdev->mode_info.underscan_hborder_property =
- drm_property_create_range(rdev->ddev, 0,
- "underscan hborder", 0, 128);
+ drm_property_create(rdev->ddev,
+ DRM_MODE_PROP_RANGE,
+ "underscan hborder", 2);
if (!rdev->mode_info.underscan_hborder_property)
return -ENOMEM;
+ rdev->mode_info.underscan_hborder_property->values[0] = 0;
+ rdev->mode_info.underscan_hborder_property->values[1] = 128;
rdev->mode_info.underscan_vborder_property =
- drm_property_create_range(rdev->ddev, 0,
- "underscan vborder", 0, 128);
+ drm_property_create(rdev->ddev,
+ DRM_MODE_PROP_RANGE,
+ "underscan vborder", 2);
if (!rdev->mode_info.underscan_vborder_property)
return -ENOMEM;
+ rdev->mode_info.underscan_vborder_property->values[0] = 0;
+ rdev->mode_info.underscan_vborder_property->values[1] = 128;
return 0;
}
@@ -1265,9 +1333,6 @@ int radeon_modeset_init(struct radeon_device *rdev)
rdev->ddev->mode_config.max_height = 4096;
}
- rdev->ddev->mode_config.preferred_depth = 24;
- rdev->ddev->mode_config.prefer_shadow = 1;
-
rdev->ddev->mode_config.fb_base = rdev->mc.aper_base;
ret = radeon_modeset_create_props(rdev);
@@ -1295,11 +1360,9 @@ int radeon_modeset_init(struct radeon_device *rdev)
return ret;
}
- /* init dig PHYs, disp eng pll */
- if (rdev->is_atom_bios) {
+ /* init dig PHYs */
+ if (rdev->is_atom_bios)
radeon_atom_encoder_init(rdev);
- radeon_atom_dcpll_init(rdev);
- }
/* initialize hpd */
radeon_hpd_init(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 498d21d..60e1605 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -36,7 +36,6 @@
#include "drm_pciids.h"
#include <linux/console.h>
-#include <linux/module.h>
/*
@@ -52,13 +51,9 @@
* 2.8.0 - pageflip support, r500 US_FORMAT regs. r500 ARGB2101010 colorbuf, r300->r500 CMASK, clock crystal query
* 2.9.0 - r600 tiling (s3tc,rgtc) working, SET_PREDICATION packet 3 on r600 + eg, backend query
* 2.10.0 - fusion 2D tiling
- * 2.11.0 - backend map, initial compute support for the CS checker
- * 2.12.0 - RADEON_CS_KEEP_TILING_FLAGS
- * 2.13.0 - virtual memory support, streamout
- * 2.14.0 - add evergreen tiling informations
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 14
+#define KMS_DRIVER_MINOR 10
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
@@ -86,10 +81,6 @@ int radeon_dma_ioctl_kms(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int radeon_gem_object_init(struct drm_gem_object *obj);
void radeon_gem_object_free(struct drm_gem_object *obj);
-int radeon_gem_object_open(struct drm_gem_object *obj,
- struct drm_file *file_priv);
-void radeon_gem_object_close(struct drm_gem_object *obj,
- struct drm_file *file_priv);
extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
int *vpos, int *hpos);
extern struct drm_ioctl_desc radeon_ioctls_kms[];
@@ -146,7 +137,7 @@ module_param_named(vramlimit, radeon_vram_limit, int, 0600);
MODULE_PARM_DESC(agpmode, "AGP Mode (-1 == PCI)");
module_param_named(agpmode, radeon_agpmode, int, 0444);
-MODULE_PARM_DESC(gartsize, "Size of PCIE/IGP gart to setup in megabytes (32, 64, etc)");
+MODULE_PARM_DESC(gartsize, "Size of PCIE/IGP gart to setup in megabytes (32,64, etc)\n");
module_param_named(gartsize, radeon_gart_size, int, 0600);
MODULE_PARM_DESC(benchmark, "Run benchmark");
@@ -212,21 +203,6 @@ static struct pci_device_id pciidlist[] = {
MODULE_DEVICE_TABLE(pci, pciidlist);
#endif
-static const struct file_operations radeon_driver_old_fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = drm_mmap,
- .poll = drm_poll,
- .fasync = drm_fasync,
- .read = drm_read,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = radeon_compat_ioctl,
-#endif
- .llseek = noop_llseek,
-};
-
static struct drm_driver driver_old = {
.driver_features =
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
@@ -253,7 +229,21 @@ static struct drm_driver driver_old = {
.reclaim_buffers = drm_core_reclaim_buffers,
.ioctls = radeon_ioctls,
.dma_ioctl = radeon_cp_buffers,
- .fops = &radeon_driver_old_fops,
+ .fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = drm_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+ .read = drm_read,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = radeon_compat_ioctl,
+#endif
+ .llseek = noop_llseek,
+ },
+
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
@@ -311,20 +301,6 @@ radeon_pci_resume(struct pci_dev *pdev)
return radeon_resume_kms(dev);
}
-static const struct file_operations radeon_driver_kms_fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = radeon_mmap,
- .poll = drm_poll,
- .fasync = drm_fasync,
- .read = drm_read,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = radeon_kms_compat_ioctl,
-#endif
-};
-
static struct drm_driver kms_driver = {
.driver_features =
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
@@ -356,13 +332,24 @@ static struct drm_driver kms_driver = {
.ioctls = radeon_ioctls_kms,
.gem_init_object = radeon_gem_object_init,
.gem_free_object = radeon_gem_object_free,
- .gem_open_object = radeon_gem_object_open,
- .gem_close_object = radeon_gem_object_close,
.dma_ioctl = radeon_dma_ioctl_kms,
.dumb_create = radeon_mode_dumb_create,
.dumb_map_offset = radeon_mode_dumb_mmap,
.dumb_destroy = radeon_mode_dumb_destroy,
- .fops = &radeon_driver_kms_fops,
+ .fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = radeon_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+ .read = drm_read,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = radeon_kms_compat_ioctl,
+#endif
+ },
+
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 26e9270..8a171b2 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -29,6 +29,12 @@
#include "radeon.h"
#include "atom.h"
+extern int atom_debug;
+
+/* evil but including atombios.h is much worse */
+bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
+ struct drm_display_mode *mode);
+
static uint32_t radeon_encoder_clones(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
@@ -150,6 +156,27 @@ radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device, uint8
return ret;
}
+static inline bool radeon_encoder_is_digital(struct drm_encoder *encoder)
+{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+ case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+ case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+ case ENCODER_OBJECT_ID_INTERNAL_DDI:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ return true;
+ default:
+ return false;
+ }
+}
+
void
radeon_link_encoder_connector(struct drm_device *dev)
{
@@ -202,7 +229,7 @@ radeon_get_connector_for_encoder(struct drm_encoder *encoder)
return NULL;
}
-struct drm_connector *
+static struct drm_connector *
radeon_get_connector_for_encoder_init(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
@@ -218,7 +245,7 @@ radeon_get_connector_for_encoder_init(struct drm_encoder *encoder)
return NULL;
}
-struct drm_encoder *radeon_get_external_encoder(struct drm_encoder *encoder)
+struct drm_encoder *radeon_atom_get_external_encoder(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
@@ -239,9 +266,9 @@ struct drm_encoder *radeon_get_external_encoder(struct drm_encoder *encoder)
return NULL;
}
-u16 radeon_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder)
+bool radeon_encoder_is_dp_bridge(struct drm_encoder *encoder)
{
- struct drm_encoder *other_encoder = radeon_get_external_encoder(encoder);
+ struct drm_encoder *other_encoder = radeon_atom_get_external_encoder(encoder);
if (other_encoder) {
struct radeon_encoder *radeon_encoder = to_radeon_encoder(other_encoder);
@@ -249,12 +276,13 @@ u16 radeon_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder)
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_TRAVIS:
case ENCODER_OBJECT_ID_NUTMEG:
- return radeon_encoder->encoder_id;
+ return true;
default:
- return ENCODER_OBJECT_ID_NONE;
+ return false;
}
}
- return ENCODER_OBJECT_ID_NONE;
+
+ return false;
}
void radeon_panel_mode_fixup(struct drm_encoder *encoder,
@@ -304,13 +332,339 @@ void radeon_panel_mode_fixup(struct drm_encoder *encoder,
}
-bool radeon_dig_monitor_is_duallink(struct drm_encoder *encoder,
- u32 pixel_clock)
+static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+
+ /* set the active encoder to connector routing */
+ radeon_encoder_set_active_device(encoder);
+ drm_mode_set_crtcinfo(adjusted_mode, 0);
+
+ /* hw bug */
+ if ((mode->flags & DRM_MODE_FLAG_INTERLACE)
+ && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
+ adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
+
+ /* get the native mode for LVDS */
+ if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT))
+ radeon_panel_mode_fixup(encoder, adjusted_mode);
+
+ /* get the native mode for TV */
+ if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) {
+ struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv;
+ if (tv_dac) {
+ if (tv_dac->tv_std == TV_STD_NTSC ||
+ tv_dac->tv_std == TV_STD_NTSC_J ||
+ tv_dac->tv_std == TV_STD_PAL_M)
+ radeon_atom_get_tv_timings(rdev, 0, adjusted_mode);
+ else
+ radeon_atom_get_tv_timings(rdev, 1, adjusted_mode);
+ }
+ }
+
+ if (ASIC_IS_DCE3(rdev) &&
+ ((radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
+ radeon_encoder_is_dp_bridge(encoder))) {
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+ radeon_dp_set_link_config(connector, mode);
+ }
+
+ return true;
+}
+
+static void
+atombios_dac_setup(struct drm_encoder *encoder, int action)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ DAC_ENCODER_CONTROL_PS_ALLOCATION args;
+ int index = 0;
+ struct radeon_encoder_atom_dac *dac_info = radeon_encoder->enc_priv;
+
+ memset(&args, 0, sizeof(args));
+
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+ index = GetIndexIntoMasterTable(COMMAND, DAC1EncoderControl);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+ index = GetIndexIntoMasterTable(COMMAND, DAC2EncoderControl);
+ break;
+ }
+
+ args.ucAction = action;
+
+ if (radeon_encoder->active_device & (ATOM_DEVICE_CRT_SUPPORT))
+ args.ucDacStandard = ATOM_DAC1_PS2;
+ else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
+ args.ucDacStandard = ATOM_DAC1_CV;
+ else {
+ switch (dac_info->tv_std) {
+ case TV_STD_PAL:
+ case TV_STD_PAL_M:
+ case TV_STD_SCART_PAL:
+ case TV_STD_SECAM:
+ case TV_STD_PAL_CN:
+ args.ucDacStandard = ATOM_DAC1_PAL;
+ break;
+ case TV_STD_NTSC:
+ case TV_STD_NTSC_J:
+ case TV_STD_PAL_60:
+ default:
+ args.ucDacStandard = ATOM_DAC1_NTSC;
+ break;
+ }
+ }
+ args.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+}
+
+static void
+atombios_tv_setup(struct drm_encoder *encoder, int action)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ TV_ENCODER_CONTROL_PS_ALLOCATION args;
+ int index = 0;
+ struct radeon_encoder_atom_dac *dac_info = radeon_encoder->enc_priv;
+
+ memset(&args, 0, sizeof(args));
+
+ index = GetIndexIntoMasterTable(COMMAND, TVEncoderControl);
+
+ args.sTVEncoder.ucAction = action;
+
+ if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
+ args.sTVEncoder.ucTvStandard = ATOM_TV_CV;
+ else {
+ switch (dac_info->tv_std) {
+ case TV_STD_NTSC:
+ args.sTVEncoder.ucTvStandard = ATOM_TV_NTSC;
+ break;
+ case TV_STD_PAL:
+ args.sTVEncoder.ucTvStandard = ATOM_TV_PAL;
+ break;
+ case TV_STD_PAL_M:
+ args.sTVEncoder.ucTvStandard = ATOM_TV_PALM;
+ break;
+ case TV_STD_PAL_60:
+ args.sTVEncoder.ucTvStandard = ATOM_TV_PAL60;
+ break;
+ case TV_STD_NTSC_J:
+ args.sTVEncoder.ucTvStandard = ATOM_TV_NTSCJ;
+ break;
+ case TV_STD_SCART_PAL:
+ args.sTVEncoder.ucTvStandard = ATOM_TV_PAL; /* ??? */
+ break;
+ case TV_STD_SECAM:
+ args.sTVEncoder.ucTvStandard = ATOM_TV_SECAM;
+ break;
+ case TV_STD_PAL_CN:
+ args.sTVEncoder.ucTvStandard = ATOM_TV_PALCN;
+ break;
+ default:
+ args.sTVEncoder.ucTvStandard = ATOM_TV_NTSC;
+ break;
+ }
+ }
+
+ args.sTVEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+}
+
+union dvo_encoder_control {
+ ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION ext_tmds;
+ DVO_ENCODER_CONTROL_PS_ALLOCATION dvo;
+ DVO_ENCODER_CONTROL_PS_ALLOCATION_V3 dvo_v3;
+};
+
+void
+atombios_dvo_setup(struct drm_encoder *encoder, int action)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ union dvo_encoder_control args;
+ int index = GetIndexIntoMasterTable(COMMAND, DVOEncoderControl);
+
+ memset(&args, 0, sizeof(args));
+
+ if (ASIC_IS_DCE3(rdev)) {
+ /* DCE3+ */
+ args.dvo_v3.ucAction = action;
+ args.dvo_v3.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+ args.dvo_v3.ucDVOConfig = 0; /* XXX */
+ } else if (ASIC_IS_DCE2(rdev)) {
+ /* DCE2 (pre-DCE3 R6xx, RS600/690/740 */
+ args.dvo.sDVOEncoder.ucAction = action;
+ args.dvo.sDVOEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+ /* DFP1, CRT1, TV1 depending on the type of port */
+ args.dvo.sDVOEncoder.ucDeviceType = ATOM_DEVICE_DFP1_INDEX;
+
+ if (radeon_encoder->pixel_clock > 165000)
+ args.dvo.sDVOEncoder.usDevAttr.sDigAttrib.ucAttribute |= PANEL_ENCODER_MISC_DUAL;
+ } else {
+ /* R4xx, R5xx */
+ args.ext_tmds.sXTmdsEncoder.ucEnable = action;
+
+ if (radeon_encoder->pixel_clock > 165000)
+ args.ext_tmds.sXTmdsEncoder.ucMisc |= PANEL_ENCODER_MISC_DUAL;
+
+ /*if (pScrn->rgbBits == 8)*/
+ args.ext_tmds.sXTmdsEncoder.ucMisc |= ATOM_PANEL_MISC_888RGB;
+ }
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+union lvds_encoder_control {
+ LVDS_ENCODER_CONTROL_PS_ALLOCATION v1;
+ LVDS_ENCODER_CONTROL_PS_ALLOCATION_V2 v2;
+};
+
+void
+atombios_digital_setup(struct drm_encoder *encoder, int action)
{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ union lvds_encoder_control args;
+ int index = 0;
+ int hdmi_detected = 0;
+ uint8_t frev, crev;
+
+ if (!dig)
+ return;
+
+ if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
+ hdmi_detected = 1;
+
+ memset(&args, 0, sizeof(args));
+
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+ index = GetIndexIntoMasterTable(COMMAND, LVDSEncoderControl);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+ index = GetIndexIntoMasterTable(COMMAND, TMDS1EncoderControl);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+ index = GetIndexIntoMasterTable(COMMAND, LVDSEncoderControl);
+ else
+ index = GetIndexIntoMasterTable(COMMAND, TMDS2EncoderControl);
+ break;
+ }
+
+ if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+ return;
+
+ switch (frev) {
+ case 1:
+ case 2:
+ switch (crev) {
+ case 1:
+ args.v1.ucMisc = 0;
+ args.v1.ucAction = action;
+ if (hdmi_detected)
+ args.v1.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE;
+ args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+ if (dig->lcd_misc & ATOM_PANEL_MISC_DUAL)
+ args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL;
+ if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB)
+ args.v1.ucMisc |= ATOM_PANEL_MISC_888RGB;
+ } else {
+ if (dig->linkb)
+ args.v1.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB;
+ if (radeon_encoder->pixel_clock > 165000)
+ args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL;
+ /*if (pScrn->rgbBits == 8) */
+ args.v1.ucMisc |= ATOM_PANEL_MISC_888RGB;
+ }
+ break;
+ case 2:
+ case 3:
+ args.v2.ucMisc = 0;
+ args.v2.ucAction = action;
+ if (crev == 3) {
+ if (dig->coherent_mode)
+ args.v2.ucMisc |= PANEL_ENCODER_MISC_COHERENT;
+ }
+ if (hdmi_detected)
+ args.v2.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE;
+ args.v2.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+ args.v2.ucTruncate = 0;
+ args.v2.ucSpatial = 0;
+ args.v2.ucTemporal = 0;
+ args.v2.ucFRC = 0;
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+ if (dig->lcd_misc & ATOM_PANEL_MISC_DUAL)
+ args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL;
+ if (dig->lcd_misc & ATOM_PANEL_MISC_SPATIAL) {
+ args.v2.ucSpatial = PANEL_ENCODER_SPATIAL_DITHER_EN;
+ if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB)
+ args.v2.ucSpatial |= PANEL_ENCODER_SPATIAL_DITHER_DEPTH;
+ }
+ if (dig->lcd_misc & ATOM_PANEL_MISC_TEMPORAL) {
+ args.v2.ucTemporal = PANEL_ENCODER_TEMPORAL_DITHER_EN;
+ if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB)
+ args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_DITHER_DEPTH;
+ if (((dig->lcd_misc >> ATOM_PANEL_MISC_GREY_LEVEL_SHIFT) & 0x3) == 2)
+ args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_LEVEL_4;
+ }
+ } else {
+ if (dig->linkb)
+ args.v2.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB;
+ if (radeon_encoder->pixel_clock > 165000)
+ args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL;
+ }
+ break;
+ default:
+ DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+ break;
+ }
+ break;
+ default:
+ DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+ break;
+ }
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+int
+atombios_get_encoder_mode(struct drm_encoder *encoder)
+{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
struct drm_connector *connector;
struct radeon_connector *radeon_connector;
struct radeon_connector_atom_dig *dig_connector;
+ /* dp bridges are always DP */
+ if (radeon_encoder_is_dp_bridge(encoder))
+ return ATOM_ENCODER_MODE_DP;
+
+ /* DVO is always DVO */
+ if (radeon_encoder->encoder_id == ATOM_ENCODER_MODE_DVO)
+ return ATOM_ENCODER_MODE_DVO;
+
connector = radeon_get_connector_for_encoder(encoder);
/* if we don't have an active device yet, just use one of
* the connectors tied to the encoder.
@@ -321,45 +675,1747 @@ bool radeon_dig_monitor_is_duallink(struct drm_encoder *encoder,
switch (connector->connector_type) {
case DRM_MODE_CONNECTOR_DVII:
- case DRM_MODE_CONNECTOR_HDMIB:
- if (radeon_connector->use_digital) {
- /* HDMI 1.3 supports up to 340 Mhz over single link */
- if (0 && drm_detect_hdmi_monitor(radeon_connector->edid)) {
- if (pixel_clock > 340000)
- return true;
- else
- return false;
- } else {
- if (pixel_clock > 165000)
- return true;
- else
- return false;
- }
- } else
- return false;
+ case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
+ if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) {
+ /* fix me */
+ if (ASIC_IS_DCE4(rdev))
+ return ATOM_ENCODER_MODE_DVI;
+ else
+ return ATOM_ENCODER_MODE_HDMI;
+ } else if (radeon_connector->use_digital)
+ return ATOM_ENCODER_MODE_DVI;
+ else
+ return ATOM_ENCODER_MODE_CRT;
+ break;
case DRM_MODE_CONNECTOR_DVID:
case DRM_MODE_CONNECTOR_HDMIA:
+ default:
+ if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) {
+ /* fix me */
+ if (ASIC_IS_DCE4(rdev))
+ return ATOM_ENCODER_MODE_DVI;
+ else
+ return ATOM_ENCODER_MODE_HDMI;
+ } else
+ return ATOM_ENCODER_MODE_DVI;
+ break;
+ case DRM_MODE_CONNECTOR_LVDS:
+ return ATOM_ENCODER_MODE_LVDS;
+ break;
case DRM_MODE_CONNECTOR_DisplayPort:
dig_connector = radeon_connector->con_priv;
if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
- return false;
- else {
- /* HDMI 1.3 supports up to 340 Mhz over single link */
- if (0 && drm_detect_hdmi_monitor(radeon_connector->edid)) {
- if (pixel_clock > 340000)
- return true;
+ return ATOM_ENCODER_MODE_DP;
+ else if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) {
+ /* fix me */
+ if (ASIC_IS_DCE4(rdev))
+ return ATOM_ENCODER_MODE_DVI;
+ else
+ return ATOM_ENCODER_MODE_HDMI;
+ } else
+ return ATOM_ENCODER_MODE_DVI;
+ break;
+ case DRM_MODE_CONNECTOR_eDP:
+ return ATOM_ENCODER_MODE_DP;
+ case DRM_MODE_CONNECTOR_DVIA:
+ case DRM_MODE_CONNECTOR_VGA:
+ return ATOM_ENCODER_MODE_CRT;
+ break;
+ case DRM_MODE_CONNECTOR_Composite:
+ case DRM_MODE_CONNECTOR_SVIDEO:
+ case DRM_MODE_CONNECTOR_9PinDIN:
+ /* fix me */
+ return ATOM_ENCODER_MODE_TV;
+ /*return ATOM_ENCODER_MODE_CV;*/
+ break;
+ }
+}
+
+/*
+ * DIG Encoder/Transmitter Setup
+ *
+ * DCE 3.0/3.1
+ * - 2 DIG transmitter blocks. UNIPHY (links A and B) and LVTMA.
+ * Supports up to 3 digital outputs
+ * - 2 DIG encoder blocks.
+ * DIG1 can drive UNIPHY link A or link B
+ * DIG2 can drive UNIPHY link B or LVTMA
+ *
+ * DCE 3.2
+ * - 3 DIG transmitter blocks. UNIPHY0/1/2 (links A and B).
+ * Supports up to 5 digital outputs
+ * - 2 DIG encoder blocks.
+ * DIG1/2 can drive UNIPHY0/1/2 link A or link B
+ *
+ * DCE 4.0/5.0
+ * - 3 DIG transmitter blocks UNIPHY0/1/2 (links A and B).
+ * Supports up to 6 digital outputs
+ * - 6 DIG encoder blocks.
+ * - DIG to PHY mapping is hardcoded
+ * DIG1 drives UNIPHY0 link A, A+B
+ * DIG2 drives UNIPHY0 link B
+ * DIG3 drives UNIPHY1 link A, A+B
+ * DIG4 drives UNIPHY1 link B
+ * DIG5 drives UNIPHY2 link A, A+B
+ * DIG6 drives UNIPHY2 link B
+ *
+ * DCE 4.1
+ * - 3 DIG transmitter blocks UNIPHY0/1/2 (links A and B).
+ * Supports up to 6 digital outputs
+ * - 2 DIG encoder blocks.
+ * DIG1/2 can drive UNIPHY0/1/2 link A or link B
+ *
+ * Routing
+ * crtc -> dig encoder -> UNIPHY/LVTMA (1 or 2 links)
+ * Examples:
+ * crtc0 -> dig2 -> LVTMA links A+B -> TMDS/HDMI
+ * crtc1 -> dig1 -> UNIPHY0 link B -> DP
+ * crtc0 -> dig1 -> UNIPHY2 link A -> LVDS
+ * crtc1 -> dig2 -> UNIPHY1 link B+A -> TMDS/HDMI
+ */
+
+union dig_encoder_control {
+ DIG_ENCODER_CONTROL_PS_ALLOCATION v1;
+ DIG_ENCODER_CONTROL_PARAMETERS_V2 v2;
+ DIG_ENCODER_CONTROL_PARAMETERS_V3 v3;
+ DIG_ENCODER_CONTROL_PARAMETERS_V4 v4;
+};
+
+void
+atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+ union dig_encoder_control args;
+ int index = 0;
+ uint8_t frev, crev;
+ int dp_clock = 0;
+ int dp_lane_count = 0;
+ int hpd_id = RADEON_HPD_NONE;
+ int bpc = 8;
+
+ if (connector) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct radeon_connector_atom_dig *dig_connector =
+ radeon_connector->con_priv;
+
+ dp_clock = dig_connector->dp_clock;
+ dp_lane_count = dig_connector->dp_lane_count;
+ hpd_id = radeon_connector->hpd.hpd;
+ bpc = connector->display_info.bpc;
+ }
+
+ /* no dig encoder assigned */
+ if (dig->dig_encoder == -1)
+ return;
+
+ memset(&args, 0, sizeof(args));
+
+ if (ASIC_IS_DCE4(rdev))
+ index = GetIndexIntoMasterTable(COMMAND, DIGxEncoderControl);
+ else {
+ if (dig->dig_encoder)
+ index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
+ else
+ index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl);
+ }
+
+ if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+ return;
+
+ args.v1.ucAction = action;
+ args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+ if (action == ATOM_ENCODER_CMD_SETUP_PANEL_MODE)
+ args.v3.ucPanelMode = panel_mode;
+ else
+ args.v1.ucEncoderMode = atombios_get_encoder_mode(encoder);
+
+ if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) ||
+ (args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP_MST))
+ args.v1.ucLaneNum = dp_lane_count;
+ else if (radeon_encoder->pixel_clock > 165000)
+ args.v1.ucLaneNum = 8;
+ else
+ args.v1.ucLaneNum = 4;
+
+ if (ASIC_IS_DCE5(rdev)) {
+ if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) ||
+ (args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP_MST)) {
+ if (dp_clock == 270000)
+ args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_2_70GHZ;
+ else if (dp_clock == 540000)
+ args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ;
+ }
+ args.v4.acConfig.ucDigSel = dig->dig_encoder;
+ switch (bpc) {
+ case 0:
+ args.v4.ucBitPerColor = PANEL_BPC_UNDEFINE;
+ break;
+ case 6:
+ args.v4.ucBitPerColor = PANEL_6BIT_PER_COLOR;
+ break;
+ case 8:
+ default:
+ args.v4.ucBitPerColor = PANEL_8BIT_PER_COLOR;
+ break;
+ case 10:
+ args.v4.ucBitPerColor = PANEL_10BIT_PER_COLOR;
+ break;
+ case 12:
+ args.v4.ucBitPerColor = PANEL_12BIT_PER_COLOR;
+ break;
+ case 16:
+ args.v4.ucBitPerColor = PANEL_16BIT_PER_COLOR;
+ break;
+ }
+ if (hpd_id == RADEON_HPD_NONE)
+ args.v4.ucHPD_ID = 0;
+ else
+ args.v4.ucHPD_ID = hpd_id + 1;
+ } else if (ASIC_IS_DCE4(rdev)) {
+ if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) && (dp_clock == 270000))
+ args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ;
+ args.v3.acConfig.ucDigSel = dig->dig_encoder;
+ switch (bpc) {
+ case 0:
+ args.v3.ucBitPerColor = PANEL_BPC_UNDEFINE;
+ break;
+ case 6:
+ args.v3.ucBitPerColor = PANEL_6BIT_PER_COLOR;
+ break;
+ case 8:
+ default:
+ args.v3.ucBitPerColor = PANEL_8BIT_PER_COLOR;
+ break;
+ case 10:
+ args.v3.ucBitPerColor = PANEL_10BIT_PER_COLOR;
+ break;
+ case 12:
+ args.v3.ucBitPerColor = PANEL_12BIT_PER_COLOR;
+ break;
+ case 16:
+ args.v3.ucBitPerColor = PANEL_16BIT_PER_COLOR;
+ break;
+ }
+ } else {
+ if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) && (dp_clock == 270000))
+ args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER2;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER3;
+ break;
+ }
+ if (dig->linkb)
+ args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
+ else
+ args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
+ }
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+}
+
+union dig_transmitter_control {
+ DIG_TRANSMITTER_CONTROL_PS_ALLOCATION v1;
+ DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2;
+ DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 v3;
+ DIG_TRANSMITTER_CONTROL_PARAMETERS_V4 v4;
+};
+
+void
+atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t lane_num, uint8_t lane_set)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ struct drm_connector *connector;
+ union dig_transmitter_control args;
+ int index = 0;
+ uint8_t frev, crev;
+ bool is_dp = false;
+ int pll_id = 0;
+ int dp_clock = 0;
+ int dp_lane_count = 0;
+ int connector_object_id = 0;
+ int igp_lane_info = 0;
+ int dig_encoder = dig->dig_encoder;
+
+ if (action == ATOM_TRANSMITTER_ACTION_INIT) {
+ connector = radeon_get_connector_for_encoder_init(encoder);
+ /* just needed to avoid bailing in the encoder check. the encoder
+ * isn't used for init
+ */
+ dig_encoder = 0;
+ } else
+ connector = radeon_get_connector_for_encoder(encoder);
+
+ if (connector) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct radeon_connector_atom_dig *dig_connector =
+ radeon_connector->con_priv;
+
+ dp_clock = dig_connector->dp_clock;
+ dp_lane_count = dig_connector->dp_lane_count;
+ connector_object_id =
+ (radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
+ igp_lane_info = dig_connector->igp_lane_info;
+ }
+
+ /* no dig encoder assigned */
+ if (dig_encoder == -1)
+ return;
+
+ if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP)
+ is_dp = true;
+
+ memset(&args, 0, sizeof(args));
+
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+ index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ index = GetIndexIntoMasterTable(COMMAND, LVTMATransmitterControl);
+ break;
+ }
+
+ if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+ return;
+
+ args.v1.ucAction = action;
+ if (action == ATOM_TRANSMITTER_ACTION_INIT) {
+ args.v1.usInitInfo = cpu_to_le16(connector_object_id);
+ } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
+ args.v1.asMode.ucLaneSel = lane_num;
+ args.v1.asMode.ucLaneSet = lane_set;
+ } else {
+ if (is_dp)
+ args.v1.usPixelClock =
+ cpu_to_le16(dp_clock / 10);
+ else if (radeon_encoder->pixel_clock > 165000)
+ args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
+ else
+ args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+ }
+ if (ASIC_IS_DCE4(rdev)) {
+ if (is_dp)
+ args.v3.ucLaneNum = dp_lane_count;
+ else if (radeon_encoder->pixel_clock > 165000)
+ args.v3.ucLaneNum = 8;
+ else
+ args.v3.ucLaneNum = 4;
+
+ if (dig->linkb)
+ args.v3.acConfig.ucLinkSel = 1;
+ if (dig_encoder & 1)
+ args.v3.acConfig.ucEncoderSel = 1;
+
+ /* Select the PLL for the PHY
+ * DP PHY should be clocked from external src if there is
+ * one.
+ */
+ if (encoder->crtc) {
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+ pll_id = radeon_crtc->pll_id;
+ }
+
+ if (ASIC_IS_DCE5(rdev)) {
+ /* On DCE5 DCPLL usually generates the DP ref clock */
+ if (is_dp) {
+ if (rdev->clock.dp_extclk)
+ args.v4.acConfig.ucRefClkSource = ENCODER_REFCLK_SRC_EXTCLK;
else
- return false;
+ args.v4.acConfig.ucRefClkSource = ENCODER_REFCLK_SRC_DCPLL;
+ } else
+ args.v4.acConfig.ucRefClkSource = pll_id;
+ } else {
+ /* On DCE4, if there is an external clock, it generates the DP ref clock */
+ if (is_dp && rdev->clock.dp_extclk)
+ args.v3.acConfig.ucRefClkSource = 2; /* external src */
+ else
+ args.v3.acConfig.ucRefClkSource = pll_id;
+ }
+
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ args.v3.acConfig.ucTransmitterSel = 0;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ args.v3.acConfig.ucTransmitterSel = 1;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ args.v3.acConfig.ucTransmitterSel = 2;
+ break;
+ }
+
+ if (is_dp)
+ args.v3.acConfig.fCoherentMode = 1; /* DP requires coherent */
+ else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+ if (dig->coherent_mode)
+ args.v3.acConfig.fCoherentMode = 1;
+ if (radeon_encoder->pixel_clock > 165000)
+ args.v3.acConfig.fDualLinkConnector = 1;
+ }
+ } else if (ASIC_IS_DCE32(rdev)) {
+ args.v2.acConfig.ucEncoderSel = dig_encoder;
+ if (dig->linkb)
+ args.v2.acConfig.ucLinkSel = 1;
+
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ args.v2.acConfig.ucTransmitterSel = 0;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ args.v2.acConfig.ucTransmitterSel = 1;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ args.v2.acConfig.ucTransmitterSel = 2;
+ break;
+ }
+
+ if (is_dp) {
+ args.v2.acConfig.fCoherentMode = 1;
+ args.v2.acConfig.fDPConnector = 1;
+ } else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+ if (dig->coherent_mode)
+ args.v2.acConfig.fCoherentMode = 1;
+ if (radeon_encoder->pixel_clock > 165000)
+ args.v2.acConfig.fDualLinkConnector = 1;
+ }
+ } else {
+ args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL;
+
+ if (dig_encoder)
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER;
+ else
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER;
+
+ if ((rdev->flags & RADEON_IS_IGP) &&
+ (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY)) {
+ if (is_dp || (radeon_encoder->pixel_clock <= 165000)) {
+ if (igp_lane_info & 0x1)
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3;
+ else if (igp_lane_info & 0x2)
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_4_7;
+ else if (igp_lane_info & 0x4)
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_11;
+ else if (igp_lane_info & 0x8)
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15;
} else {
- if (pixel_clock > 165000)
- return true;
+ if (igp_lane_info & 0x3)
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7;
+ else if (igp_lane_info & 0xc)
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15;
+ }
+ }
+
+ if (dig->linkb)
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB;
+ else
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA;
+
+ if (is_dp)
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT;
+ else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+ if (dig->coherent_mode)
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT;
+ if (radeon_encoder->pixel_clock > 165000)
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_8LANE_LINK;
+ }
+ }
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+bool
+atombios_set_edp_panel_power(struct drm_connector *connector, int action)
+{
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct drm_device *dev = radeon_connector->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ union dig_transmitter_control args;
+ int index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl);
+ uint8_t frev, crev;
+
+ if (connector->connector_type != DRM_MODE_CONNECTOR_eDP)
+ goto done;
+
+ if (!ASIC_IS_DCE4(rdev))
+ goto done;
+
+ if ((action != ATOM_TRANSMITTER_ACTION_POWER_ON) &&
+ (action != ATOM_TRANSMITTER_ACTION_POWER_OFF))
+ goto done;
+
+ if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+ goto done;
+
+ memset(&args, 0, sizeof(args));
+
+ args.v1.ucAction = action;
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+ /* wait for the panel to power up */
+ if (action == ATOM_TRANSMITTER_ACTION_POWER_ON) {
+ int i;
+
+ for (i = 0; i < 300; i++) {
+ if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd))
+ return true;
+ mdelay(1);
+ }
+ return false;
+ }
+done:
+ return true;
+}
+
+union external_encoder_control {
+ EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION v1;
+ EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3 v3;
+};
+
+static void
+atombios_external_encoder_setup(struct drm_encoder *encoder,
+ struct drm_encoder *ext_encoder,
+ int action)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder *ext_radeon_encoder = to_radeon_encoder(ext_encoder);
+ union external_encoder_control args;
+ struct drm_connector *connector;
+ int index = GetIndexIntoMasterTable(COMMAND, ExternalEncoderControl);
+ u8 frev, crev;
+ int dp_clock = 0;
+ int dp_lane_count = 0;
+ int connector_object_id = 0;
+ u32 ext_enum = (ext_radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
+ int bpc = 8;
+
+ if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT)
+ connector = radeon_get_connector_for_encoder_init(encoder);
+ else
+ connector = radeon_get_connector_for_encoder(encoder);
+
+ if (connector) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct radeon_connector_atom_dig *dig_connector =
+ radeon_connector->con_priv;
+
+ dp_clock = dig_connector->dp_clock;
+ dp_lane_count = dig_connector->dp_lane_count;
+ connector_object_id =
+ (radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
+ bpc = connector->display_info.bpc;
+ }
+
+ memset(&args, 0, sizeof(args));
+
+ if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+ return;
+
+ switch (frev) {
+ case 1:
+ /* no params on frev 1 */
+ break;
+ case 2:
+ switch (crev) {
+ case 1:
+ case 2:
+ args.v1.sDigEncoder.ucAction = action;
+ args.v1.sDigEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+ args.v1.sDigEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder);
+
+ if (args.v1.sDigEncoder.ucEncoderMode == ATOM_ENCODER_MODE_DP) {
+ if (dp_clock == 270000)
+ args.v1.sDigEncoder.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
+ args.v1.sDigEncoder.ucLaneNum = dp_lane_count;
+ } else if (radeon_encoder->pixel_clock > 165000)
+ args.v1.sDigEncoder.ucLaneNum = 8;
+ else
+ args.v1.sDigEncoder.ucLaneNum = 4;
+ break;
+ case 3:
+ args.v3.sExtEncoder.ucAction = action;
+ if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT)
+ args.v3.sExtEncoder.usConnectorId = cpu_to_le16(connector_object_id);
+ else
+ args.v3.sExtEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+ args.v3.sExtEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder);
+
+ if (args.v3.sExtEncoder.ucEncoderMode == ATOM_ENCODER_MODE_DP) {
+ if (dp_clock == 270000)
+ args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ;
+ else if (dp_clock == 540000)
+ args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_5_40GHZ;
+ args.v3.sExtEncoder.ucLaneNum = dp_lane_count;
+ } else if (radeon_encoder->pixel_clock > 165000)
+ args.v3.sExtEncoder.ucLaneNum = 8;
+ else
+ args.v3.sExtEncoder.ucLaneNum = 4;
+ switch (ext_enum) {
+ case GRAPH_OBJECT_ENUM_ID1:
+ args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER1;
+ break;
+ case GRAPH_OBJECT_ENUM_ID2:
+ args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER2;
+ break;
+ case GRAPH_OBJECT_ENUM_ID3:
+ args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER3;
+ break;
+ }
+ switch (bpc) {
+ case 0:
+ args.v3.sExtEncoder.ucBitPerColor = PANEL_BPC_UNDEFINE;
+ break;
+ case 6:
+ args.v3.sExtEncoder.ucBitPerColor = PANEL_6BIT_PER_COLOR;
+ break;
+ case 8:
+ default:
+ args.v3.sExtEncoder.ucBitPerColor = PANEL_8BIT_PER_COLOR;
+ break;
+ case 10:
+ args.v3.sExtEncoder.ucBitPerColor = PANEL_10BIT_PER_COLOR;
+ break;
+ case 12:
+ args.v3.sExtEncoder.ucBitPerColor = PANEL_12BIT_PER_COLOR;
+ break;
+ case 16:
+ args.v3.sExtEncoder.ucBitPerColor = PANEL_16BIT_PER_COLOR;
+ break;
+ }
+ break;
+ default:
+ DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
+ return;
+ }
+ break;
+ default:
+ DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
+ return;
+ }
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+static void
+atombios_yuv_setup(struct drm_encoder *encoder, bool enable)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+ ENABLE_YUV_PS_ALLOCATION args;
+ int index = GetIndexIntoMasterTable(COMMAND, EnableYUV);
+ uint32_t temp, reg;
+
+ memset(&args, 0, sizeof(args));
+
+ if (rdev->family >= CHIP_R600)
+ reg = R600_BIOS_3_SCRATCH;
+ else
+ reg = RADEON_BIOS_3_SCRATCH;
+
+ /* XXX: fix up scratch reg handling */
+ temp = RREG32(reg);
+ if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
+ WREG32(reg, (ATOM_S3_TV1_ACTIVE |
+ (radeon_crtc->crtc_id << 18)));
+ else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
+ WREG32(reg, (ATOM_S3_CV_ACTIVE | (radeon_crtc->crtc_id << 24)));
+ else
+ WREG32(reg, 0);
+
+ if (enable)
+ args.ucEnable = ATOM_ENABLE;
+ args.ucCRTC = radeon_crtc->crtc_id;
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+ WREG32(reg, temp);
+}
+
+static void
+radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder);
+ DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION args;
+ int index = 0;
+ bool is_dig = false;
+ bool is_dce5_dac = false;
+ bool is_dce5_dvo = false;
+
+ memset(&args, 0, sizeof(args));
+
+ DRM_DEBUG_KMS("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n",
+ radeon_encoder->encoder_id, mode, radeon_encoder->devices,
+ radeon_encoder->active_device);
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+ index = GetIndexIntoMasterTable(COMMAND, TMDSAOutputControl);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ is_dig = true;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+ case ENCODER_OBJECT_ID_INTERNAL_DDI:
+ index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+ if (ASIC_IS_DCE5(rdev))
+ is_dce5_dvo = true;
+ else if (ASIC_IS_DCE3(rdev))
+ is_dig = true;
+ else
+ index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+ index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+ index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl);
+ else
+ index = GetIndexIntoMasterTable(COMMAND, LVTMAOutputControl);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+ if (ASIC_IS_DCE5(rdev))
+ is_dce5_dac = true;
+ else {
+ if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
+ index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl);
+ else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
+ index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl);
+ else
+ index = GetIndexIntoMasterTable(COMMAND, DAC1OutputControl);
+ }
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+ if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
+ index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl);
+ else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
+ index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl);
+ else
+ index = GetIndexIntoMasterTable(COMMAND, DAC2OutputControl);
+ break;
+ }
+
+ if (is_dig) {
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ /* some early dce3.2 boards have a bug in their transmitter control table */
+ if ((rdev->family == CHIP_RV710) || (rdev->family == CHIP_RV730))
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
+ else
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
+ if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) {
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+
+ if (connector &&
+ (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct radeon_connector_atom_dig *radeon_dig_connector =
+ radeon_connector->con_priv;
+ atombios_set_edp_panel_power(connector,
+ ATOM_TRANSMITTER_ACTION_POWER_ON);
+ radeon_dig_connector->edp_on = true;
+ }
+ if (ASIC_IS_DCE4(rdev))
+ atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0);
+ radeon_dp_link_train(encoder, connector);
+ if (ASIC_IS_DCE4(rdev))
+ atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0);
+ }
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
+ if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) {
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+
+ if (ASIC_IS_DCE4(rdev))
+ atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0);
+ if (connector &&
+ (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct radeon_connector_atom_dig *radeon_dig_connector =
+ radeon_connector->con_priv;
+ atombios_set_edp_panel_power(connector,
+ ATOM_TRANSMITTER_ACTION_POWER_OFF);
+ radeon_dig_connector->edp_on = false;
+ }
+ }
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
+ break;
+ }
+ } else if (is_dce5_dac) {
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ atombios_dac_setup(encoder, ATOM_ENABLE);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ atombios_dac_setup(encoder, ATOM_DISABLE);
+ break;
+ }
+ } else if (is_dce5_dvo) {
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ atombios_dvo_setup(encoder, ATOM_ENABLE);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ atombios_dvo_setup(encoder, ATOM_DISABLE);
+ break;
+ }
+ } else {
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ args.ucAction = ATOM_ENABLE;
+ /* workaround for DVOOutputControl on some RS690 systems */
+ if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DDI) {
+ u32 reg = RREG32(RADEON_BIOS_3_SCRATCH);
+ WREG32(RADEON_BIOS_3_SCRATCH, reg & ~ATOM_S3_DFP2I_ACTIVE);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ WREG32(RADEON_BIOS_3_SCRATCH, reg);
+ } else
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+ args.ucAction = ATOM_LCD_BLON;
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ }
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ args.ucAction = ATOM_DISABLE;
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+ args.ucAction = ATOM_LCD_BLOFF;
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ }
+ break;
+ }
+ }
+
+ if (ext_encoder) {
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ default:
+ if (ASIC_IS_DCE41(rdev)) {
+ atombios_external_encoder_setup(encoder, ext_encoder,
+ EXTERNAL_ENCODER_ACTION_V3_ENABLE_OUTPUT);
+ atombios_external_encoder_setup(encoder, ext_encoder,
+ EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING_OFF);
+ } else
+ atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ if (ASIC_IS_DCE41(rdev)) {
+ atombios_external_encoder_setup(encoder, ext_encoder,
+ EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING);
+ atombios_external_encoder_setup(encoder, ext_encoder,
+ EXTERNAL_ENCODER_ACTION_V3_DISABLE_OUTPUT);
+ } else
+ atombios_external_encoder_setup(encoder, ext_encoder, ATOM_DISABLE);
+ break;
+ }
+ }
+
+ radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+
+}
+
+union crtc_source_param {
+ SELECT_CRTC_SOURCE_PS_ALLOCATION v1;
+ SELECT_CRTC_SOURCE_PARAMETERS_V2 v2;
+};
+
+static void
+atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+ union crtc_source_param args;
+ int index = GetIndexIntoMasterTable(COMMAND, SelectCRTC_Source);
+ uint8_t frev, crev;
+ struct radeon_encoder_atom_dig *dig;
+
+ memset(&args, 0, sizeof(args));
+
+ if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+ return;
+
+ switch (frev) {
+ case 1:
+ switch (crev) {
+ case 1:
+ default:
+ if (ASIC_IS_AVIVO(rdev))
+ args.v1.ucCRTC = radeon_crtc->crtc_id;
+ else {
+ if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DAC1) {
+ args.v1.ucCRTC = radeon_crtc->crtc_id;
+ } else {
+ args.v1.ucCRTC = radeon_crtc->crtc_id << 2;
+ }
+ }
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+ args.v1.ucDevice = ATOM_DEVICE_DFP1_INDEX;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+ case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+ if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT)
+ args.v1.ucDevice = ATOM_DEVICE_LCD1_INDEX;
+ else
+ args.v1.ucDevice = ATOM_DEVICE_DFP3_INDEX;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+ case ENCODER_OBJECT_ID_INTERNAL_DDI:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+ args.v1.ucDevice = ATOM_DEVICE_DFP2_INDEX;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+ if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
+ args.v1.ucDevice = ATOM_DEVICE_TV1_INDEX;
+ else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
+ args.v1.ucDevice = ATOM_DEVICE_CV_INDEX;
+ else
+ args.v1.ucDevice = ATOM_DEVICE_CRT1_INDEX;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+ if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
+ args.v1.ucDevice = ATOM_DEVICE_TV1_INDEX;
+ else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
+ args.v1.ucDevice = ATOM_DEVICE_CV_INDEX;
+ else
+ args.v1.ucDevice = ATOM_DEVICE_CRT2_INDEX;
+ break;
+ }
+ break;
+ case 2:
+ args.v2.ucCRTC = radeon_crtc->crtc_id;
+ args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder);
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ dig = radeon_encoder->enc_priv;
+ switch (dig->dig_encoder) {
+ case 0:
+ args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
+ break;
+ case 1:
+ args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
+ break;
+ case 2:
+ args.v2.ucEncoderID = ASIC_INT_DIG3_ENCODER_ID;
+ break;
+ case 3:
+ args.v2.ucEncoderID = ASIC_INT_DIG4_ENCODER_ID;
+ break;
+ case 4:
+ args.v2.ucEncoderID = ASIC_INT_DIG5_ENCODER_ID;
+ break;
+ case 5:
+ args.v2.ucEncoderID = ASIC_INT_DIG6_ENCODER_ID;
+ break;
+ }
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+ args.v2.ucEncoderID = ASIC_INT_DVO_ENCODER_ID;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+ if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
+ args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
+ else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
+ args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
+ else
+ args.v2.ucEncoderID = ASIC_INT_DAC1_ENCODER_ID;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+ if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
+ args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
+ else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
+ args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
else
- return false;
+ args.v2.ucEncoderID = ASIC_INT_DAC2_ENCODER_ID;
+ break;
}
+ break;
}
+ break;
default:
+ DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
+ return;
+ }
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+ /* update scratch regs with new routing */
+ radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
+}
+
+static void
+atombios_apply_encoder_quirks(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+
+ /* Funky macbooks */
+ if ((dev->pdev->device == 0x71C5) &&
+ (dev->pdev->subsystem_vendor == 0x106b) &&
+ (dev->pdev->subsystem_device == 0x0080)) {
+ if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) {
+ uint32_t lvtma_bit_depth_control = RREG32(AVIVO_LVTMA_BIT_DEPTH_CONTROL);
+
+ lvtma_bit_depth_control &= ~AVIVO_LVTMA_BIT_DEPTH_CONTROL_TRUNCATE_EN;
+ lvtma_bit_depth_control &= ~AVIVO_LVTMA_BIT_DEPTH_CONTROL_SPATIAL_DITHER_EN;
+
+ WREG32(AVIVO_LVTMA_BIT_DEPTH_CONTROL, lvtma_bit_depth_control);
+ }
+ }
+
+ /* set scaler clears this on some chips */
+ if (ASIC_IS_AVIVO(rdev) &&
+ (!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)))) {
+ if (ASIC_IS_DCE4(rdev)) {
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset,
+ EVERGREEN_INTERLEAVE_EN);
+ else
+ WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
+ } else {
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset,
+ AVIVO_D1MODE_INTERLEAVE_EN);
+ else
+ WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
+ }
+ }
+}
+
+static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_encoder *test_encoder;
+ struct radeon_encoder_atom_dig *dig;
+ uint32_t dig_enc_in_use = 0;
+
+ /* DCE4/5 */
+ if (ASIC_IS_DCE4(rdev)) {
+ dig = radeon_encoder->enc_priv;
+ if (ASIC_IS_DCE41(rdev)) {
+ if (dig->linkb)
+ return 1;
+ else
+ return 0;
+ } else {
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ if (dig->linkb)
+ return 1;
+ else
+ return 0;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ if (dig->linkb)
+ return 3;
+ else
+ return 2;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ if (dig->linkb)
+ return 5;
+ else
+ return 4;
+ break;
+ }
+ }
+ }
+
+ /* on DCE32 and encoder can driver any block so just crtc id */
+ if (ASIC_IS_DCE32(rdev)) {
+ return radeon_crtc->crtc_id;
+ }
+
+ /* on DCE3 - LVTMA can only be driven by DIGB */
+ list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) {
+ struct radeon_encoder *radeon_test_encoder;
+
+ if (encoder == test_encoder)
+ continue;
+
+ if (!radeon_encoder_is_digital(test_encoder))
+ continue;
+
+ radeon_test_encoder = to_radeon_encoder(test_encoder);
+ dig = radeon_test_encoder->enc_priv;
+
+ if (dig->dig_encoder >= 0)
+ dig_enc_in_use |= (1 << dig->dig_encoder);
+ }
+
+ if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA) {
+ if (dig_enc_in_use & 0x2)
+ DRM_ERROR("LVDS required digital encoder 2 but it was in use - stealing\n");
+ return 1;
+ }
+ if (!(dig_enc_in_use & 1))
+ return 0;
+ return 1;
+}
+
+/* This only needs to be called once at startup */
+void
+radeon_atom_encoder_init(struct radeon_device *rdev)
+{
+ struct drm_device *dev = rdev->ddev;
+ struct drm_encoder *encoder;
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder);
+
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0);
+ break;
+ default:
+ break;
+ }
+
+ if (ext_encoder && ASIC_IS_DCE41(rdev))
+ atombios_external_encoder_setup(encoder, ext_encoder,
+ EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT);
+ }
+}
+
+static void
+radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder);
+
+ radeon_encoder->pixel_clock = adjusted_mode->clock;
+
+ if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE4(rdev)) {
+ if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT))
+ atombios_yuv_setup(encoder, true);
+ else
+ atombios_yuv_setup(encoder, false);
+ }
+
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+ case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+ atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_ENABLE);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ if (ASIC_IS_DCE4(rdev)) {
+ /* disable the transmitter */
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
+ /* setup and enable the encoder */
+ atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
+
+ /* enable the transmitter */
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
+ } else {
+ /* disable the encoder and transmitter */
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
+ atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0);
+
+ /* setup and enable the encoder and transmitter */
+ atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0);
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
+ }
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DDI:
+ case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+ atombios_dvo_setup(encoder, ATOM_ENABLE);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+ case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+ atombios_dac_setup(encoder, ATOM_ENABLE);
+ if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) {
+ if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
+ atombios_tv_setup(encoder, ATOM_ENABLE);
+ else
+ atombios_tv_setup(encoder, ATOM_DISABLE);
+ }
+ break;
+ }
+
+ if (ext_encoder) {
+ if (ASIC_IS_DCE41(rdev))
+ atombios_external_encoder_setup(encoder, ext_encoder,
+ EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP);
+ else
+ atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
+ }
+
+ atombios_apply_encoder_quirks(encoder, adjusted_mode);
+
+ if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
+ r600_hdmi_enable(encoder);
+ r600_hdmi_setmode(encoder, adjusted_mode);
+ }
+}
+
+static bool
+atombios_dac_load_detect(struct drm_encoder *encoder, struct drm_connector *connector)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+ if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT |
+ ATOM_DEVICE_CV_SUPPORT |
+ ATOM_DEVICE_CRT_SUPPORT)) {
+ DAC_LOAD_DETECTION_PS_ALLOCATION args;
+ int index = GetIndexIntoMasterTable(COMMAND, DAC_LoadDetection);
+ uint8_t frev, crev;
+
+ memset(&args, 0, sizeof(args));
+
+ if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+ return false;
+
+ args.sDacload.ucMisc = 0;
+
+ if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DAC1) ||
+ (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1))
+ args.sDacload.ucDacType = ATOM_DAC_A;
+ else
+ args.sDacload.ucDacType = ATOM_DAC_B;
+
+ if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT)
+ args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CRT1_SUPPORT);
+ else if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT)
+ args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CRT2_SUPPORT);
+ else if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) {
+ args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CV_SUPPORT);
+ if (crev >= 3)
+ args.sDacload.ucMisc = DAC_LOAD_MISC_YPrPb;
+ } else if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) {
+ args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_TV1_SUPPORT);
+ if (crev >= 3)
+ args.sDacload.ucMisc = DAC_LOAD_MISC_YPrPb;
+ }
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+ return true;
+ } else
return false;
+}
+
+static enum drm_connector_status
+radeon_atom_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ uint32_t bios_0_scratch;
+
+ if (!atombios_dac_load_detect(encoder, connector)) {
+ DRM_DEBUG_KMS("detect returned false \n");
+ return connector_status_unknown;
+ }
+
+ if (rdev->family >= CHIP_R600)
+ bios_0_scratch = RREG32(R600_BIOS_0_SCRATCH);
+ else
+ bios_0_scratch = RREG32(RADEON_BIOS_0_SCRATCH);
+
+ DRM_DEBUG_KMS("Bios 0 scratch %x %08x\n", bios_0_scratch, radeon_encoder->devices);
+ if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT) {
+ if (bios_0_scratch & ATOM_S0_CRT1_MASK)
+ return connector_status_connected;
+ }
+ if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT) {
+ if (bios_0_scratch & ATOM_S0_CRT2_MASK)
+ return connector_status_connected;
+ }
+ if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) {
+ if (bios_0_scratch & (ATOM_S0_CV_MASK|ATOM_S0_CV_MASK_A))
+ return connector_status_connected;
+ }
+ if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) {
+ if (bios_0_scratch & (ATOM_S0_TV1_COMPOSITE | ATOM_S0_TV1_COMPOSITE_A))
+ return connector_status_connected; /* CTV */
+ else if (bios_0_scratch & (ATOM_S0_TV1_SVIDEO | ATOM_S0_TV1_SVIDEO_A))
+ return connector_status_connected; /* STV */
+ }
+ return connector_status_disconnected;
+}
+
+static enum drm_connector_status
+radeon_atom_dig_detect(struct drm_encoder *encoder, struct drm_connector *connector)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder);
+ u32 bios_0_scratch;
+
+ if (!ASIC_IS_DCE4(rdev))
+ return connector_status_unknown;
+
+ if (!ext_encoder)
+ return connector_status_unknown;
+
+ if ((radeon_connector->devices & ATOM_DEVICE_CRT_SUPPORT) == 0)
+ return connector_status_unknown;
+
+ /* load detect on the dp bridge */
+ atombios_external_encoder_setup(encoder, ext_encoder,
+ EXTERNAL_ENCODER_ACTION_V3_DACLOAD_DETECTION);
+
+ bios_0_scratch = RREG32(R600_BIOS_0_SCRATCH);
+
+ DRM_DEBUG_KMS("Bios 0 scratch %x %08x\n", bios_0_scratch, radeon_encoder->devices);
+ if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT) {
+ if (bios_0_scratch & ATOM_S0_CRT1_MASK)
+ return connector_status_connected;
+ }
+ if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT) {
+ if (bios_0_scratch & ATOM_S0_CRT2_MASK)
+ return connector_status_connected;
}
+ if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) {
+ if (bios_0_scratch & (ATOM_S0_CV_MASK|ATOM_S0_CV_MASK_A))
+ return connector_status_connected;
+ }
+ if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) {
+ if (bios_0_scratch & (ATOM_S0_TV1_COMPOSITE | ATOM_S0_TV1_COMPOSITE_A))
+ return connector_status_connected; /* CTV */
+ else if (bios_0_scratch & (ATOM_S0_TV1_SVIDEO | ATOM_S0_TV1_SVIDEO_A))
+ return connector_status_connected; /* STV */
+ }
+ return connector_status_disconnected;
}
+void
+radeon_atom_ext_encoder_setup_ddc(struct drm_encoder *encoder)
+{
+ struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder);
+
+ if (ext_encoder)
+ /* ddc_setup on the dp bridge */
+ atombios_external_encoder_setup(encoder, ext_encoder,
+ EXTERNAL_ENCODER_ACTION_V3_DDC_SETUP);
+
+}
+
+static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
+{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+
+ if ((radeon_encoder->active_device &
+ (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
+ radeon_encoder_is_dp_bridge(encoder)) {
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ if (dig)
+ dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder);
+ }
+
+ radeon_atom_output_lock(encoder, true);
+ radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+
+ if (connector) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+ /* select the clock/data port if it uses a router */
+ if (radeon_connector->router.cd_valid)
+ radeon_router_select_cd_port(radeon_connector);
+
+ /* turn eDP panel on for mode set */
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
+ atombios_set_edp_panel_power(connector,
+ ATOM_TRANSMITTER_ACTION_POWER_ON);
+ }
+
+ /* this is needed for the pll/ss setup to work correctly in some cases */
+ atombios_set_encoder_crtc_source(encoder);
+}
+
+static void radeon_atom_encoder_commit(struct drm_encoder *encoder)
+{
+ radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
+ radeon_atom_output_lock(encoder, false);
+}
+
+static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig;
+
+ /* check for pre-DCE3 cards with shared encoders;
+ * can't really use the links individually, so don't disable
+ * the encoder if it's in use by another connector
+ */
+ if (!ASIC_IS_DCE3(rdev)) {
+ struct drm_encoder *other_encoder;
+ struct radeon_encoder *other_radeon_encoder;
+
+ list_for_each_entry(other_encoder, &dev->mode_config.encoder_list, head) {
+ other_radeon_encoder = to_radeon_encoder(other_encoder);
+ if ((radeon_encoder->encoder_id == other_radeon_encoder->encoder_id) &&
+ drm_helper_encoder_in_use(other_encoder))
+ goto disable_done;
+ }
+ }
+
+ radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+ case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+ atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_DISABLE);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ if (ASIC_IS_DCE4(rdev))
+ /* disable the transmitter */
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
+ else {
+ /* disable the encoder and transmitter */
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
+ atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0);
+ }
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DDI:
+ case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+ atombios_dvo_setup(encoder, ATOM_DISABLE);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+ case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+ atombios_dac_setup(encoder, ATOM_DISABLE);
+ if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
+ atombios_tv_setup(encoder, ATOM_DISABLE);
+ break;
+ }
+
+disable_done:
+ if (radeon_encoder_is_digital(encoder)) {
+ if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
+ r600_hdmi_disable(encoder);
+ dig = radeon_encoder->enc_priv;
+ dig->dig_encoder = -1;
+ }
+ radeon_encoder->active_device = 0;
+}
+
+/* these are handled by the primary encoders */
+static void radeon_atom_ext_prepare(struct drm_encoder *encoder)
+{
+
+}
+
+static void radeon_atom_ext_commit(struct drm_encoder *encoder)
+{
+
+}
+
+static void
+radeon_atom_ext_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+
+}
+
+static void radeon_atom_ext_disable(struct drm_encoder *encoder)
+{
+
+}
+
+static void
+radeon_atom_ext_dpms(struct drm_encoder *encoder, int mode)
+{
+
+}
+
+static bool radeon_atom_ext_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static const struct drm_encoder_helper_funcs radeon_atom_ext_helper_funcs = {
+ .dpms = radeon_atom_ext_dpms,
+ .mode_fixup = radeon_atom_ext_mode_fixup,
+ .prepare = radeon_atom_ext_prepare,
+ .mode_set = radeon_atom_ext_mode_set,
+ .commit = radeon_atom_ext_commit,
+ .disable = radeon_atom_ext_disable,
+ /* no detect for TMDS/LVDS yet */
+};
+
+static const struct drm_encoder_helper_funcs radeon_atom_dig_helper_funcs = {
+ .dpms = radeon_atom_encoder_dpms,
+ .mode_fixup = radeon_atom_mode_fixup,
+ .prepare = radeon_atom_encoder_prepare,
+ .mode_set = radeon_atom_encoder_mode_set,
+ .commit = radeon_atom_encoder_commit,
+ .disable = radeon_atom_encoder_disable,
+ .detect = radeon_atom_dig_detect,
+};
+
+static const struct drm_encoder_helper_funcs radeon_atom_dac_helper_funcs = {
+ .dpms = radeon_atom_encoder_dpms,
+ .mode_fixup = radeon_atom_mode_fixup,
+ .prepare = radeon_atom_encoder_prepare,
+ .mode_set = radeon_atom_encoder_mode_set,
+ .commit = radeon_atom_encoder_commit,
+ .detect = radeon_atom_dac_detect,
+};
+
+void radeon_enc_destroy(struct drm_encoder *encoder)
+{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ kfree(radeon_encoder->enc_priv);
+ drm_encoder_cleanup(encoder);
+ kfree(radeon_encoder);
+}
+
+static const struct drm_encoder_funcs radeon_atom_enc_funcs = {
+ .destroy = radeon_enc_destroy,
+};
+
+struct radeon_encoder_atom_dac *
+radeon_atombios_set_dac_info(struct radeon_encoder *radeon_encoder)
+{
+ struct drm_device *dev = radeon_encoder->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder_atom_dac *dac = kzalloc(sizeof(struct radeon_encoder_atom_dac), GFP_KERNEL);
+
+ if (!dac)
+ return NULL;
+
+ dac->tv_std = radeon_atombios_get_tv_info(rdev);
+ return dac;
+}
+
+struct radeon_encoder_atom_dig *
+radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder)
+{
+ int encoder_enum = (radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
+ struct radeon_encoder_atom_dig *dig = kzalloc(sizeof(struct radeon_encoder_atom_dig), GFP_KERNEL);
+
+ if (!dig)
+ return NULL;
+
+ /* coherent mode by default */
+ dig->coherent_mode = true;
+ dig->dig_encoder = -1;
+
+ if (encoder_enum == 2)
+ dig->linkb = true;
+ else
+ dig->linkb = false;
+
+ return dig;
+}
+
+void
+radeon_add_atom_encoder(struct drm_device *dev,
+ uint32_t encoder_enum,
+ uint32_t supported_device,
+ u16 caps)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ struct drm_encoder *encoder;
+ struct radeon_encoder *radeon_encoder;
+
+ /* see if we already added it */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ radeon_encoder = to_radeon_encoder(encoder);
+ if (radeon_encoder->encoder_enum == encoder_enum) {
+ radeon_encoder->devices |= supported_device;
+ return;
+ }
+
+ }
+
+ /* add a new one */
+ radeon_encoder = kzalloc(sizeof(struct radeon_encoder), GFP_KERNEL);
+ if (!radeon_encoder)
+ return;
+
+ encoder = &radeon_encoder->base;
+ switch (rdev->num_crtc) {
+ case 1:
+ encoder->possible_crtcs = 0x1;
+ break;
+ case 2:
+ default:
+ encoder->possible_crtcs = 0x3;
+ break;
+ case 4:
+ encoder->possible_crtcs = 0xf;
+ break;
+ case 6:
+ encoder->possible_crtcs = 0x3f;
+ break;
+ }
+
+ radeon_encoder->enc_priv = NULL;
+
+ radeon_encoder->encoder_enum = encoder_enum;
+ radeon_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
+ radeon_encoder->devices = supported_device;
+ radeon_encoder->rmx_type = RMX_OFF;
+ radeon_encoder->underscan_type = UNDERSCAN_OFF;
+ radeon_encoder->is_ext_encoder = false;
+ radeon_encoder->caps = caps;
+
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+ case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+ radeon_encoder->rmx_type = RMX_FULL;
+ drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS);
+ radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder);
+ } else {
+ drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS);
+ radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
+ }
+ drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+ drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC);
+ radeon_encoder->enc_priv = radeon_atombios_set_dac_info(radeon_encoder);
+ drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+ drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TVDAC);
+ radeon_encoder->enc_priv = radeon_atombios_set_dac_info(radeon_encoder);
+ drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+ case ENCODER_OBJECT_ID_INTERNAL_DDI:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+ radeon_encoder->rmx_type = RMX_FULL;
+ drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS);
+ radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder);
+ } else if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
+ drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC);
+ radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
+ } else {
+ drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS);
+ radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
+ }
+ drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs);
+ break;
+ case ENCODER_OBJECT_ID_SI170B:
+ case ENCODER_OBJECT_ID_CH7303:
+ case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
+ case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
+ case ENCODER_OBJECT_ID_TITFP513:
+ case ENCODER_OBJECT_ID_VT1623:
+ case ENCODER_OBJECT_ID_HDMI_SI1930:
+ case ENCODER_OBJECT_ID_TRAVIS:
+ case ENCODER_OBJECT_ID_NUTMEG:
+ /* these are handled by the primary encoders */
+ radeon_encoder->is_ext_encoder = true;
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+ drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS);
+ else if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
+ drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC);
+ else
+ drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS);
+ drm_encoder_helper_add(encoder, &radeon_atom_ext_helper_funcs);
+ break;
+ }
+}
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 5906914..0b7b486 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -103,7 +103,7 @@ static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj)
}
static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
- struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_mode_fb_cmd *mode_cmd,
struct drm_gem_object **gobj_p)
{
struct radeon_device *rdev = rfbdev->rdev;
@@ -114,17 +114,13 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
int ret;
int aligned_size, size;
int height = mode_cmd->height;
- u32 bpp, depth;
-
- drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
/* need to align pitch with crtc limits */
- mode_cmd->pitches[0] = radeon_align_pitch(rdev, mode_cmd->width, bpp,
- fb_tiled) * ((bpp + 1) / 8);
+ mode_cmd->pitch = radeon_align_pitch(rdev, mode_cmd->width, mode_cmd->bpp, fb_tiled) * ((mode_cmd->bpp + 1) / 8);
if (rdev->family >= CHIP_R600)
height = ALIGN(mode_cmd->height, 8);
- size = mode_cmd->pitches[0] * height;
+ size = mode_cmd->pitch * height;
aligned_size = ALIGN(size, PAGE_SIZE);
ret = radeon_gem_object_create(rdev, aligned_size, 0,
RADEON_GEM_DOMAIN_VRAM,
@@ -141,7 +137,7 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
tiling_flags = RADEON_TILING_MACRO;
#ifdef __BIG_ENDIAN
- switch (bpp) {
+ switch (mode_cmd->bpp) {
case 32:
tiling_flags |= RADEON_TILING_SWAP_32BIT;
break;
@@ -155,7 +151,7 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
if (tiling_flags) {
ret = radeon_bo_set_tiling_flags(rbo,
tiling_flags | RADEON_TILING_SURFACE,
- mode_cmd->pitches[0]);
+ mode_cmd->pitch);
if (ret)
dev_err(rdev->dev, "FB failed to set tiling flags\n");
}
@@ -164,10 +160,7 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
ret = radeon_bo_reserve(rbo, false);
if (unlikely(ret != 0))
goto out_unref;
- /* Only 27 bit offset for legacy CRTC */
- ret = radeon_bo_pin_restricted(rbo, RADEON_GEM_DOMAIN_VRAM,
- ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27,
- NULL);
+ ret = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, NULL);
if (ret) {
radeon_bo_unreserve(rbo);
goto out_unref;
@@ -194,7 +187,7 @@ static int radeonfb_create(struct radeon_fbdev *rfbdev,
struct radeon_device *rdev = rfbdev->rdev;
struct fb_info *info;
struct drm_framebuffer *fb = NULL;
- struct drm_mode_fb_cmd2 mode_cmd;
+ struct drm_mode_fb_cmd mode_cmd;
struct drm_gem_object *gobj = NULL;
struct radeon_bo *rbo = NULL;
struct device *device = &rdev->pdev->dev;
@@ -208,15 +201,10 @@ static int radeonfb_create(struct radeon_fbdev *rfbdev,
if ((sizes->surface_bpp == 24) && ASIC_IS_AVIVO(rdev))
sizes->surface_bpp = 32;
- mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
- sizes->surface_depth);
+ mode_cmd.bpp = sizes->surface_bpp;
+ mode_cmd.depth = sizes->surface_depth;
ret = radeonfb_create_pinned_object(rfbdev, &mode_cmd, &gobj);
- if (ret) {
- DRM_ERROR("failed to create fbcon object %d\n", ret);
- return ret;
- }
-
rbo = gem_to_radeon_bo(gobj);
/* okay we have an object now allocate the framebuffer */
@@ -228,11 +216,7 @@ static int radeonfb_create(struct radeon_fbdev *rfbdev,
info->par = rfbdev;
- ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
- if (ret) {
- DRM_ERROR("failed to initalise framebuffer %d\n", ret);
- goto out_unref;
- }
+ radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
fb = &rfbdev->rfb.base;
@@ -244,7 +228,7 @@ static int radeonfb_create(struct radeon_fbdev *rfbdev,
strcpy(info->fix.id, "radeondrmfb");
- drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
info->fbops = &radeonfb_ops;
@@ -266,7 +250,11 @@ static int radeonfb_create(struct radeon_fbdev *rfbdev,
info->apertures->ranges[0].base = rdev->ddev->mode_config.fb_base;
info->apertures->ranges[0].size = rdev->mc.aper_size;
- /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
+ info->pixmap.size = 64*1024;
+ info->pixmap.buf_align = 8;
+ info->pixmap.access_align = 32;
+ info->pixmap.flags = FB_PIXMAP_SYSTEM;
+ info->pixmap.scan_align = 1;
if (info->screen_base == NULL) {
ret = -ENOSPC;
@@ -283,7 +271,7 @@ static int radeonfb_create(struct radeon_fbdev *rfbdev,
DRM_INFO("vram apper at 0x%lX\n", (unsigned long)rdev->mc.aper_base);
DRM_INFO("size %lu\n", (unsigned long)radeon_bo_size(rbo));
DRM_INFO("fb depth is %d\n", fb->depth);
- DRM_INFO(" pitch is %d\n", fb->pitches[0]);
+ DRM_INFO(" pitch is %d\n", fb->pitch);
vga_switcheroo_client_fb_set(rdev->ddev->pdev, info);
return 0;
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 4bd36a3..021d2b6 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -29,7 +29,7 @@
* Dave Airlie
*/
#include <linux/seq_file.h>
-#include <linux/atomic.h>
+#include <asm/atomic.h>
#include <linux/wait.h>
#include <linux/list.h>
#include <linux/kref.h>
@@ -40,24 +40,32 @@
#include "radeon.h"
#include "radeon_trace.h"
-static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring)
+static void radeon_fence_write(struct radeon_device *rdev, u32 seq)
{
if (rdev->wb.enabled) {
- *rdev->fence_drv[ring].cpu_addr = cpu_to_le32(seq);
- } else {
- WREG32(rdev->fence_drv[ring].scratch_reg, seq);
- }
+ u32 scratch_index;
+ if (rdev->wb.use_event)
+ scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
+ else
+ scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
+ rdev->wb.wb[scratch_index/4] = cpu_to_le32(seq);;
+ } else
+ WREG32(rdev->fence_drv.scratch_reg, seq);
}
-static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
+static u32 radeon_fence_read(struct radeon_device *rdev)
{
- u32 seq = 0;
+ u32 seq;
if (rdev->wb.enabled) {
- seq = le32_to_cpu(*rdev->fence_drv[ring].cpu_addr);
- } else {
- seq = RREG32(rdev->fence_drv[ring].scratch_reg);
- }
+ u32 scratch_index;
+ if (rdev->wb.use_event)
+ scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
+ else
+ scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
+ seq = le32_to_cpu(rdev->wb.wb[scratch_index/4]);
+ } else
+ seq = RREG32(rdev->fence_drv.scratch_reg);
return seq;
}
@@ -65,28 +73,28 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
{
unsigned long irq_flags;
- write_lock_irqsave(&rdev->fence_lock, irq_flags);
- if (fence->emitted) {
- write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
+ write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
+ if (fence->emited) {
+ write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
return 0;
}
- fence->seq = atomic_add_return(1, &rdev->fence_drv[fence->ring].seq);
- if (!rdev->ring[fence->ring].ready)
+ fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
+ if (!rdev->cp.ready)
/* FIXME: cp is not running assume everythings is done right
* away
*/
- radeon_fence_write(rdev, fence->seq, fence->ring);
+ radeon_fence_write(rdev, fence->seq);
else
- radeon_fence_ring_emit(rdev, fence->ring, fence);
+ radeon_fence_ring_emit(rdev, fence);
trace_radeon_fence_emit(rdev->ddev, fence->seq);
- fence->emitted = true;
- list_move_tail(&fence->list, &rdev->fence_drv[fence->ring].emitted);
- write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
+ fence->emited = true;
+ list_move_tail(&fence->list, &rdev->fence_drv.emited);
+ write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
return 0;
}
-static bool radeon_fence_poll_locked(struct radeon_device *rdev, int ring)
+static bool radeon_fence_poll_locked(struct radeon_device *rdev)
{
struct radeon_fence *fence;
struct list_head *i, *n;
@@ -94,34 +102,34 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev, int ring)
bool wake = false;
unsigned long cjiffies;
- seq = radeon_fence_read(rdev, ring);
- if (seq != rdev->fence_drv[ring].last_seq) {
- rdev->fence_drv[ring].last_seq = seq;
- rdev->fence_drv[ring].last_jiffies = jiffies;
- rdev->fence_drv[ring].last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
+ seq = radeon_fence_read(rdev);
+ if (seq != rdev->fence_drv.last_seq) {
+ rdev->fence_drv.last_seq = seq;
+ rdev->fence_drv.last_jiffies = jiffies;
+ rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
} else {
cjiffies = jiffies;
- if (time_after(cjiffies, rdev->fence_drv[ring].last_jiffies)) {
- cjiffies -= rdev->fence_drv[ring].last_jiffies;
- if (time_after(rdev->fence_drv[ring].last_timeout, cjiffies)) {
+ if (time_after(cjiffies, rdev->fence_drv.last_jiffies)) {
+ cjiffies -= rdev->fence_drv.last_jiffies;
+ if (time_after(rdev->fence_drv.last_timeout, cjiffies)) {
/* update the timeout */
- rdev->fence_drv[ring].last_timeout -= cjiffies;
+ rdev->fence_drv.last_timeout -= cjiffies;
} else {
/* the 500ms timeout is elapsed we should test
* for GPU lockup
*/
- rdev->fence_drv[ring].last_timeout = 1;
+ rdev->fence_drv.last_timeout = 1;
}
} else {
/* wrap around update last jiffies, we will just wait
* a little longer
*/
- rdev->fence_drv[ring].last_jiffies = cjiffies;
+ rdev->fence_drv.last_jiffies = cjiffies;
}
return false;
}
n = NULL;
- list_for_each(i, &rdev->fence_drv[ring].emitted) {
+ list_for_each(i, &rdev->fence_drv.emited) {
fence = list_entry(i, struct radeon_fence, list);
if (fence->seq == seq) {
n = i;
@@ -133,11 +141,11 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev, int ring)
i = n;
do {
n = i->prev;
- list_move_tail(i, &rdev->fence_drv[ring].signaled);
+ list_move_tail(i, &rdev->fence_drv.signaled);
fence = list_entry(i, struct radeon_fence, list);
fence->signaled = true;
i = n;
- } while (i != &rdev->fence_drv[ring].emitted);
+ } while (i != &rdev->fence_drv.emited);
wake = true;
}
return wake;
@@ -149,18 +157,14 @@ static void radeon_fence_destroy(struct kref *kref)
struct radeon_fence *fence;
fence = container_of(kref, struct radeon_fence, kref);
- write_lock_irqsave(&fence->rdev->fence_lock, irq_flags);
+ write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags);
list_del(&fence->list);
- fence->emitted = false;
- write_unlock_irqrestore(&fence->rdev->fence_lock, irq_flags);
- if (fence->semaphore)
- radeon_semaphore_free(fence->rdev, fence->semaphore);
+ fence->emited = false;
+ write_unlock_irqrestore(&fence->rdev->fence_drv.lock, irq_flags);
kfree(fence);
}
-int radeon_fence_create(struct radeon_device *rdev,
- struct radeon_fence **fence,
- int ring)
+int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence)
{
unsigned long irq_flags;
@@ -170,19 +174,18 @@ int radeon_fence_create(struct radeon_device *rdev,
}
kref_init(&((*fence)->kref));
(*fence)->rdev = rdev;
- (*fence)->emitted = false;
+ (*fence)->emited = false;
(*fence)->signaled = false;
(*fence)->seq = 0;
- (*fence)->ring = ring;
- (*fence)->semaphore = NULL;
INIT_LIST_HEAD(&(*fence)->list);
- write_lock_irqsave(&rdev->fence_lock, irq_flags);
- list_add_tail(&(*fence)->list, &rdev->fence_drv[ring].created);
- write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
+ write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
+ list_add_tail(&(*fence)->list, &rdev->fence_drv.created);
+ write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
return 0;
}
+
bool radeon_fence_signaled(struct radeon_fence *fence)
{
unsigned long irq_flags;
@@ -194,21 +197,21 @@ bool radeon_fence_signaled(struct radeon_fence *fence)
if (fence->rdev->gpu_lockup)
return true;
- write_lock_irqsave(&fence->rdev->fence_lock, irq_flags);
+ write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags);
signaled = fence->signaled;
/* if we are shuting down report all fence as signaled */
if (fence->rdev->shutdown) {
signaled = true;
}
- if (!fence->emitted) {
- WARN(1, "Querying an unemitted fence : %p !\n", fence);
+ if (!fence->emited) {
+ WARN(1, "Querying an unemited fence : %p !\n", fence);
signaled = true;
}
if (!signaled) {
- radeon_fence_poll_locked(fence->rdev, fence->ring);
+ radeon_fence_poll_locked(fence->rdev);
signaled = fence->signaled;
}
- write_unlock_irqrestore(&fence->rdev->fence_lock, irq_flags);
+ write_unlock_irqrestore(&fence->rdev->fence_drv.lock, irq_flags);
return signaled;
}
@@ -227,24 +230,24 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr)
if (radeon_fence_signaled(fence)) {
return 0;
}
- timeout = rdev->fence_drv[fence->ring].last_timeout;
+ timeout = rdev->fence_drv.last_timeout;
retry:
/* save current sequence used to check for GPU lockup */
- seq = rdev->fence_drv[fence->ring].last_seq;
+ seq = rdev->fence_drv.last_seq;
trace_radeon_fence_wait_begin(rdev->ddev, seq);
if (intr) {
- radeon_irq_kms_sw_irq_get(rdev, fence->ring);
- r = wait_event_interruptible_timeout(rdev->fence_drv[fence->ring].queue,
+ radeon_irq_kms_sw_irq_get(rdev);
+ r = wait_event_interruptible_timeout(rdev->fence_drv.queue,
radeon_fence_signaled(fence), timeout);
- radeon_irq_kms_sw_irq_put(rdev, fence->ring);
+ radeon_irq_kms_sw_irq_put(rdev);
if (unlikely(r < 0)) {
return r;
}
} else {
- radeon_irq_kms_sw_irq_get(rdev, fence->ring);
- r = wait_event_timeout(rdev->fence_drv[fence->ring].queue,
+ radeon_irq_kms_sw_irq_get(rdev);
+ r = wait_event_timeout(rdev->fence_drv.queue,
radeon_fence_signaled(fence), timeout);
- radeon_irq_kms_sw_irq_put(rdev, fence->ring);
+ radeon_irq_kms_sw_irq_put(rdev);
}
trace_radeon_fence_wait_end(rdev->ddev, seq);
if (unlikely(!radeon_fence_signaled(fence))) {
@@ -255,13 +258,12 @@ retry:
timeout = r;
goto retry;
}
- /* don't protect read access to rdev->fence_drv[t].last_seq
+ /* don't protect read access to rdev->fence_drv.last_seq
* if we experiencing a lockup the value doesn't change
*/
- if (seq == rdev->fence_drv[fence->ring].last_seq &&
- radeon_gpu_is_lockup(rdev, &rdev->ring[fence->ring])) {
+ if (seq == rdev->fence_drv.last_seq && radeon_gpu_is_lockup(rdev)) {
/* good news we believe it's a lockup */
- printk(KERN_WARNING "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n",
+ WARN(1, "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n",
fence->seq, seq);
/* FIXME: what should we do ? marking everyone
* as signaled for now
@@ -270,20 +272,20 @@ retry:
r = radeon_gpu_reset(rdev);
if (r)
return r;
- radeon_fence_write(rdev, fence->seq, fence->ring);
+ radeon_fence_write(rdev, fence->seq);
rdev->gpu_lockup = false;
}
timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
- write_lock_irqsave(&rdev->fence_lock, irq_flags);
- rdev->fence_drv[fence->ring].last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
- rdev->fence_drv[fence->ring].last_jiffies = jiffies;
- write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
+ write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
+ rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
+ rdev->fence_drv.last_jiffies = jiffies;
+ write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
goto retry;
}
return 0;
}
-int radeon_fence_wait_next(struct radeon_device *rdev, int ring)
+int radeon_fence_wait_next(struct radeon_device *rdev)
{
unsigned long irq_flags;
struct radeon_fence *fence;
@@ -292,21 +294,21 @@ int radeon_fence_wait_next(struct radeon_device *rdev, int ring)
if (rdev->gpu_lockup) {
return 0;
}
- write_lock_irqsave(&rdev->fence_lock, irq_flags);
- if (list_empty(&rdev->fence_drv[ring].emitted)) {
- write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
+ write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
+ if (list_empty(&rdev->fence_drv.emited)) {
+ write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
return 0;
}
- fence = list_entry(rdev->fence_drv[ring].emitted.next,
+ fence = list_entry(rdev->fence_drv.emited.next,
struct radeon_fence, list);
radeon_fence_ref(fence);
- write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
+ write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
r = radeon_fence_wait(fence, false);
radeon_fence_unref(&fence);
return r;
}
-int radeon_fence_wait_last(struct radeon_device *rdev, int ring)
+int radeon_fence_wait_last(struct radeon_device *rdev)
{
unsigned long irq_flags;
struct radeon_fence *fence;
@@ -315,15 +317,15 @@ int radeon_fence_wait_last(struct radeon_device *rdev, int ring)
if (rdev->gpu_lockup) {
return 0;
}
- write_lock_irqsave(&rdev->fence_lock, irq_flags);
- if (list_empty(&rdev->fence_drv[ring].emitted)) {
- write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
+ write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
+ if (list_empty(&rdev->fence_drv.emited)) {
+ write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
return 0;
}
- fence = list_entry(rdev->fence_drv[ring].emitted.prev,
+ fence = list_entry(rdev->fence_drv.emited.prev,
struct radeon_fence, list);
radeon_fence_ref(fence);
- write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
+ write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
r = radeon_fence_wait(fence, false);
radeon_fence_unref(&fence);
return r;
@@ -345,97 +347,39 @@ void radeon_fence_unref(struct radeon_fence **fence)
}
}
-void radeon_fence_process(struct radeon_device *rdev, int ring)
+void radeon_fence_process(struct radeon_device *rdev)
{
unsigned long irq_flags;
bool wake;
- write_lock_irqsave(&rdev->fence_lock, irq_flags);
- wake = radeon_fence_poll_locked(rdev, ring);
- write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
+ write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
+ wake = radeon_fence_poll_locked(rdev);
+ write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
if (wake) {
- wake_up_all(&rdev->fence_drv[ring].queue);
- }
-}
-
-int radeon_fence_count_emitted(struct radeon_device *rdev, int ring)
-{
- unsigned long irq_flags;
- int not_processed = 0;
-
- read_lock_irqsave(&rdev->fence_lock, irq_flags);
- if (!rdev->fence_drv[ring].initialized) {
- read_unlock_irqrestore(&rdev->fence_lock, irq_flags);
- return 0;
- }
-
- if (!list_empty(&rdev->fence_drv[ring].emitted)) {
- struct list_head *ptr;
- list_for_each(ptr, &rdev->fence_drv[ring].emitted) {
- /* count up to 3, that's enought info */
- if (++not_processed >= 3)
- break;
- }
- }
- read_unlock_irqrestore(&rdev->fence_lock, irq_flags);
- return not_processed;
-}
-
-int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
-{
- unsigned long irq_flags;
- uint64_t index;
- int r;
-
- write_lock_irqsave(&rdev->fence_lock, irq_flags);
- radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
- if (rdev->wb.use_event) {
- rdev->fence_drv[ring].scratch_reg = 0;
- index = R600_WB_EVENT_OFFSET + ring * 4;
- } else {
- r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg);
- if (r) {
- dev_err(rdev->dev, "fence failed to get scratch register\n");
- write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
- return r;
- }
- index = RADEON_WB_SCRATCH_OFFSET +
- rdev->fence_drv[ring].scratch_reg -
- rdev->scratch.reg_base;
+ wake_up_all(&rdev->fence_drv.queue);
}
- rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
- rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
- radeon_fence_write(rdev, atomic_read(&rdev->fence_drv[ring].seq), ring);
- rdev->fence_drv[ring].initialized = true;
- DRM_INFO("fence driver on ring %d use gpu addr 0x%08Lx and cpu addr 0x%p\n",
- ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
- write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
- return 0;
-}
-
-static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
-{
- rdev->fence_drv[ring].scratch_reg = -1;
- rdev->fence_drv[ring].cpu_addr = NULL;
- rdev->fence_drv[ring].gpu_addr = 0;
- atomic_set(&rdev->fence_drv[ring].seq, 0);
- INIT_LIST_HEAD(&rdev->fence_drv[ring].created);
- INIT_LIST_HEAD(&rdev->fence_drv[ring].emitted);
- INIT_LIST_HEAD(&rdev->fence_drv[ring].signaled);
- init_waitqueue_head(&rdev->fence_drv[ring].queue);
- rdev->fence_drv[ring].initialized = false;
}
int radeon_fence_driver_init(struct radeon_device *rdev)
{
unsigned long irq_flags;
- int ring;
+ int r;
- write_lock_irqsave(&rdev->fence_lock, irq_flags);
- for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
- radeon_fence_driver_init_ring(rdev, ring);
+ write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
+ r = radeon_scratch_get(rdev, &rdev->fence_drv.scratch_reg);
+ if (r) {
+ dev_err(rdev->dev, "fence failed to get scratch register\n");
+ write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+ return r;
}
- write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
+ radeon_fence_write(rdev, 0);
+ atomic_set(&rdev->fence_drv.seq, 0);
+ INIT_LIST_HEAD(&rdev->fence_drv.created);
+ INIT_LIST_HEAD(&rdev->fence_drv.emited);
+ INIT_LIST_HEAD(&rdev->fence_drv.signaled);
+ init_waitqueue_head(&rdev->fence_drv.queue);
+ rdev->fence_drv.initialized = true;
+ write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
if (radeon_debugfs_fence_init(rdev)) {
dev_err(rdev->dev, "fence debugfs file creation failed\n");
}
@@ -445,18 +389,14 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
void radeon_fence_driver_fini(struct radeon_device *rdev)
{
unsigned long irq_flags;
- int ring;
-
- for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
- if (!rdev->fence_drv[ring].initialized)
- continue;
- radeon_fence_wait_last(rdev, ring);
- wake_up_all(&rdev->fence_drv[ring].queue);
- write_lock_irqsave(&rdev->fence_lock, irq_flags);
- radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
- write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
- rdev->fence_drv[ring].initialized = false;
- }
+
+ if (!rdev->fence_drv.initialized)
+ return;
+ wake_up_all(&rdev->fence_drv.queue);
+ write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
+ radeon_scratch_free(rdev, rdev->fence_drv.scratch_reg);
+ write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+ rdev->fence_drv.initialized = false;
}
@@ -470,21 +410,14 @@ static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_fence *fence;
- int i;
-
- for (i = 0; i < RADEON_NUM_RINGS; ++i) {
- if (!rdev->fence_drv[i].initialized)
- continue;
-
- seq_printf(m, "--- ring %d ---\n", i);
- seq_printf(m, "Last signaled fence 0x%08X\n",
- radeon_fence_read(rdev, i));
- if (!list_empty(&rdev->fence_drv[i].emitted)) {
- fence = list_entry(rdev->fence_drv[i].emitted.prev,
- struct radeon_fence, list);
- seq_printf(m, "Last emitted fence %p with 0x%08X\n",
- fence, fence->seq);
- }
+
+ seq_printf(m, "Last signaled fence 0x%08X\n",
+ radeon_fence_read(rdev));
+ if (!list_empty(&rdev->fence_drv.emited)) {
+ fence = list_entry(rdev->fence_drv.emited.prev,
+ struct radeon_fence, list);
+ seq_printf(m, "Last emited fence %p with 0x%08X\n",
+ fence, fence->seq);
}
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index c58a036..a533f52 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -49,27 +49,27 @@ int radeon_gart_table_ram_alloc(struct radeon_device *rdev)
rdev->gart.table_size >> PAGE_SHIFT);
}
#endif
- rdev->gart.ptr = ptr;
- memset((void *)rdev->gart.ptr, 0, rdev->gart.table_size);
+ rdev->gart.table.ram.ptr = ptr;
+ memset((void *)rdev->gart.table.ram.ptr, 0, rdev->gart.table_size);
return 0;
}
void radeon_gart_table_ram_free(struct radeon_device *rdev)
{
- if (rdev->gart.ptr == NULL) {
+ if (rdev->gart.table.ram.ptr == NULL) {
return;
}
#ifdef CONFIG_X86
if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 ||
rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
- set_memory_wb((unsigned long)rdev->gart.ptr,
+ set_memory_wb((unsigned long)rdev->gart.table.ram.ptr,
rdev->gart.table_size >> PAGE_SHIFT);
}
#endif
pci_free_consistent(rdev->pdev, rdev->gart.table_size,
- (void *)rdev->gart.ptr,
+ (void *)rdev->gart.table.ram.ptr,
rdev->gart.table_addr);
- rdev->gart.ptr = NULL;
+ rdev->gart.table.ram.ptr = NULL;
rdev->gart.table_addr = 0;
}
@@ -77,10 +77,10 @@ int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
{
int r;
- if (rdev->gart.robj == NULL) {
+ if (rdev->gart.table.vram.robj == NULL) {
r = radeon_bo_create(rdev, rdev->gart.table_size,
PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
- &rdev->gart.robj);
+ &rdev->gart.table.vram.robj);
if (r) {
return r;
}
@@ -93,46 +93,38 @@ int radeon_gart_table_vram_pin(struct radeon_device *rdev)
uint64_t gpu_addr;
int r;
- r = radeon_bo_reserve(rdev->gart.robj, false);
+ r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
if (unlikely(r != 0))
return r;
- r = radeon_bo_pin(rdev->gart.robj,
+ r = radeon_bo_pin(rdev->gart.table.vram.robj,
RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
if (r) {
- radeon_bo_unreserve(rdev->gart.robj);
+ radeon_bo_unreserve(rdev->gart.table.vram.robj);
return r;
}
- r = radeon_bo_kmap(rdev->gart.robj, &rdev->gart.ptr);
+ r = radeon_bo_kmap(rdev->gart.table.vram.robj,
+ (void **)&rdev->gart.table.vram.ptr);
if (r)
- radeon_bo_unpin(rdev->gart.robj);
- radeon_bo_unreserve(rdev->gart.robj);
+ radeon_bo_unpin(rdev->gart.table.vram.robj);
+ radeon_bo_unreserve(rdev->gart.table.vram.robj);
rdev->gart.table_addr = gpu_addr;
return r;
}
-void radeon_gart_table_vram_unpin(struct radeon_device *rdev)
+void radeon_gart_table_vram_free(struct radeon_device *rdev)
{
int r;
- if (rdev->gart.robj == NULL) {
+ if (rdev->gart.table.vram.robj == NULL) {
return;
}
- r = radeon_bo_reserve(rdev->gart.robj, false);
+ r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
if (likely(r == 0)) {
- radeon_bo_kunmap(rdev->gart.robj);
- radeon_bo_unpin(rdev->gart.robj);
- radeon_bo_unreserve(rdev->gart.robj);
- rdev->gart.ptr = NULL;
- }
-}
-
-void radeon_gart_table_vram_free(struct radeon_device *rdev)
-{
- if (rdev->gart.robj == NULL) {
- return;
+ radeon_bo_kunmap(rdev->gart.table.vram.robj);
+ radeon_bo_unpin(rdev->gart.table.vram.robj);
+ radeon_bo_unreserve(rdev->gart.table.vram.robj);
}
- radeon_gart_table_vram_unpin(rdev);
- radeon_bo_unref(&rdev->gart.robj);
+ radeon_bo_unref(&rdev->gart.table.vram.robj);
}
@@ -150,20 +142,21 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
u64 page_base;
if (!rdev->gart.ready) {
- WARN(1, "trying to unbind memory from uninitialized GART !\n");
+ WARN(1, "trying to unbind memory to unitialized GART !\n");
return;
}
t = offset / RADEON_GPU_PAGE_SIZE;
p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
for (i = 0; i < pages; i++, p++) {
if (rdev->gart.pages[p]) {
+ if (!rdev->gart.ttm_alloced[p])
+ pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p],
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
rdev->gart.pages[p] = NULL;
rdev->gart.pages_addr[p] = rdev->dummy_page.addr;
page_base = rdev->gart.pages_addr[p];
for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
- if (rdev->gart.ptr) {
- radeon_gart_set_page(rdev, t, page_base);
- }
+ radeon_gart_set_page(rdev, t, page_base);
page_base += RADEON_GPU_PAGE_SIZE;
}
}
@@ -181,22 +174,36 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
int i, j;
if (!rdev->gart.ready) {
- WARN(1, "trying to bind memory to uninitialized GART !\n");
+ WARN(1, "trying to bind memory to unitialized GART !\n");
return -EINVAL;
}
t = offset / RADEON_GPU_PAGE_SIZE;
p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
for (i = 0; i < pages; i++, p++) {
- rdev->gart.pages_addr[p] = dma_addr[i];
- rdev->gart.pages[p] = pagelist[i];
- if (rdev->gart.ptr) {
- page_base = rdev->gart.pages_addr[p];
- for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
- radeon_gart_set_page(rdev, t, page_base);
- page_base += RADEON_GPU_PAGE_SIZE;
+ /* we reverted the patch using dma_addr in TTM for now but this
+ * code stops building on alpha so just comment it out for now */
+ if (0) { /*dma_addr[i] != DMA_ERROR_CODE) */
+ rdev->gart.ttm_alloced[p] = true;
+ rdev->gart.pages_addr[p] = dma_addr[i];
+ } else {
+ /* we need to support large memory configurations */
+ /* assume that unbind have already been call on the range */
+ rdev->gart.pages_addr[p] = pci_map_page(rdev->pdev, pagelist[i],
+ 0, PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+ if (pci_dma_mapping_error(rdev->pdev, rdev->gart.pages_addr[p])) {
+ /* FIXME: failed to map page (return -ENOMEM?) */
+ radeon_gart_unbind(rdev, offset, pages);
+ return -ENOMEM;
}
}
+ rdev->gart.pages[p] = pagelist[i];
+ page_base = rdev->gart.pages_addr[p];
+ for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
+ radeon_gart_set_page(rdev, t, page_base);
+ page_base += RADEON_GPU_PAGE_SIZE;
+ }
}
mb();
radeon_gart_tlb_flush(rdev);
@@ -208,9 +215,6 @@ void radeon_gart_restore(struct radeon_device *rdev)
int i, j, t;
u64 page_base;
- if (!rdev->gart.ptr) {
- return;
- }
for (i = 0, t = 0; i < rdev->gart.num_cpu_pages; i++) {
page_base = rdev->gart.pages_addr[i];
for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
@@ -255,6 +259,12 @@ int radeon_gart_init(struct radeon_device *rdev)
radeon_gart_fini(rdev);
return -ENOMEM;
}
+ rdev->gart.ttm_alloced = kzalloc(sizeof(bool) *
+ rdev->gart.num_cpu_pages, GFP_KERNEL);
+ if (rdev->gart.ttm_alloced == NULL) {
+ radeon_gart_fini(rdev);
+ return -ENOMEM;
+ }
/* set GART entry to point to the dummy page by default */
for (i = 0; i < rdev->gart.num_cpu_pages; i++) {
rdev->gart.pages_addr[i] = rdev->dummy_page.addr;
@@ -271,404 +281,10 @@ void radeon_gart_fini(struct radeon_device *rdev)
rdev->gart.ready = false;
kfree(rdev->gart.pages);
kfree(rdev->gart.pages_addr);
+ kfree(rdev->gart.ttm_alloced);
rdev->gart.pages = NULL;
rdev->gart.pages_addr = NULL;
+ rdev->gart.ttm_alloced = NULL;
radeon_dummy_page_fini(rdev);
}
-
-/*
- * vm helpers
- *
- * TODO bind a default page at vm initialization for default address
- */
-int radeon_vm_manager_init(struct radeon_device *rdev)
-{
- int r;
-
- rdev->vm_manager.enabled = false;
-
- /* mark first vm as always in use, it's the system one */
- r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager,
- rdev->vm_manager.max_pfn * 8,
- RADEON_GEM_DOMAIN_VRAM);
- if (r) {
- dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n",
- (rdev->vm_manager.max_pfn * 8) >> 10);
- return r;
- }
-
- r = rdev->vm_manager.funcs->init(rdev);
- if (r == 0)
- rdev->vm_manager.enabled = true;
-
- return r;
-}
-
-/* cs mutex must be lock */
-static void radeon_vm_unbind_locked(struct radeon_device *rdev,
- struct radeon_vm *vm)
-{
- struct radeon_bo_va *bo_va;
-
- if (vm->id == -1) {
- return;
- }
-
- /* wait for vm use to end */
- if (vm->fence) {
- radeon_fence_wait(vm->fence, false);
- radeon_fence_unref(&vm->fence);
- }
-
- /* hw unbind */
- rdev->vm_manager.funcs->unbind(rdev, vm);
- rdev->vm_manager.use_bitmap &= ~(1 << vm->id);
- list_del_init(&vm->list);
- vm->id = -1;
- radeon_sa_bo_free(rdev, &vm->sa_bo);
- vm->pt = NULL;
-
- list_for_each_entry(bo_va, &vm->va, vm_list) {
- bo_va->valid = false;
- }
-}
-
-void radeon_vm_manager_fini(struct radeon_device *rdev)
-{
- if (rdev->vm_manager.sa_manager.bo == NULL)
- return;
- radeon_vm_manager_suspend(rdev);
- rdev->vm_manager.funcs->fini(rdev);
- radeon_sa_bo_manager_fini(rdev, &rdev->vm_manager.sa_manager);
- rdev->vm_manager.enabled = false;
-}
-
-int radeon_vm_manager_start(struct radeon_device *rdev)
-{
- if (rdev->vm_manager.sa_manager.bo == NULL) {
- return -EINVAL;
- }
- return radeon_sa_bo_manager_start(rdev, &rdev->vm_manager.sa_manager);
-}
-
-int radeon_vm_manager_suspend(struct radeon_device *rdev)
-{
- struct radeon_vm *vm, *tmp;
-
- radeon_mutex_lock(&rdev->cs_mutex);
- /* unbind all active vm */
- list_for_each_entry_safe(vm, tmp, &rdev->vm_manager.lru_vm, list) {
- radeon_vm_unbind_locked(rdev, vm);
- }
- rdev->vm_manager.funcs->fini(rdev);
- radeon_mutex_unlock(&rdev->cs_mutex);
- return radeon_sa_bo_manager_suspend(rdev, &rdev->vm_manager.sa_manager);
-}
-
-/* cs mutex must be lock */
-void radeon_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm)
-{
- mutex_lock(&vm->mutex);
- radeon_vm_unbind_locked(rdev, vm);
- mutex_unlock(&vm->mutex);
-}
-
-/* cs mutex must be lock & vm mutex must be lock */
-int radeon_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm)
-{
- struct radeon_vm *vm_evict;
- unsigned i;
- int id = -1, r;
-
- if (vm == NULL) {
- return -EINVAL;
- }
-
- if (vm->id != -1) {
- /* update lru */
- list_del_init(&vm->list);
- list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
- return 0;
- }
-
-retry:
- r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, &vm->sa_bo,
- RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8),
- RADEON_GPU_PAGE_SIZE);
- if (r) {
- if (list_empty(&rdev->vm_manager.lru_vm)) {
- return r;
- }
- vm_evict = list_first_entry(&rdev->vm_manager.lru_vm, struct radeon_vm, list);
- radeon_vm_unbind(rdev, vm_evict);
- goto retry;
- }
- vm->pt = rdev->vm_manager.sa_manager.cpu_ptr;
- vm->pt += (vm->sa_bo.offset >> 3);
- vm->pt_gpu_addr = rdev->vm_manager.sa_manager.gpu_addr;
- vm->pt_gpu_addr += vm->sa_bo.offset;
- memset(vm->pt, 0, RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8));
-
-retry_id:
- /* search for free vm */
- for (i = 0; i < rdev->vm_manager.nvm; i++) {
- if (!(rdev->vm_manager.use_bitmap & (1 << i))) {
- id = i;
- break;
- }
- }
- /* evict vm if necessary */
- if (id == -1) {
- vm_evict = list_first_entry(&rdev->vm_manager.lru_vm, struct radeon_vm, list);
- radeon_vm_unbind(rdev, vm_evict);
- goto retry_id;
- }
-
- /* do hw bind */
- r = rdev->vm_manager.funcs->bind(rdev, vm, id);
- if (r) {
- radeon_sa_bo_free(rdev, &vm->sa_bo);
- return r;
- }
- rdev->vm_manager.use_bitmap |= 1 << id;
- vm->id = id;
- list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
- return radeon_vm_bo_update_pte(rdev, vm, rdev->ib_pool.sa_manager.bo,
- &rdev->ib_pool.sa_manager.bo->tbo.mem);
-}
-
-/* object have to be reserved */
-int radeon_vm_bo_add(struct radeon_device *rdev,
- struct radeon_vm *vm,
- struct radeon_bo *bo,
- uint64_t offset,
- uint32_t flags)
-{
- struct radeon_bo_va *bo_va, *tmp;
- struct list_head *head;
- uint64_t size = radeon_bo_size(bo), last_offset = 0;
- unsigned last_pfn;
-
- bo_va = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
- if (bo_va == NULL) {
- return -ENOMEM;
- }
- bo_va->vm = vm;
- bo_va->bo = bo;
- bo_va->soffset = offset;
- bo_va->eoffset = offset + size;
- bo_va->flags = flags;
- bo_va->valid = false;
- INIT_LIST_HEAD(&bo_va->bo_list);
- INIT_LIST_HEAD(&bo_va->vm_list);
- /* make sure object fit at this offset */
- if (bo_va->soffset >= bo_va->eoffset) {
- kfree(bo_va);
- return -EINVAL;
- }
-
- last_pfn = bo_va->eoffset / RADEON_GPU_PAGE_SIZE;
- if (last_pfn > rdev->vm_manager.max_pfn) {
- kfree(bo_va);
- dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n",
- last_pfn, rdev->vm_manager.max_pfn);
- return -EINVAL;
- }
-
- mutex_lock(&vm->mutex);
- if (last_pfn > vm->last_pfn) {
- /* grow va space 32M by 32M */
- unsigned align = ((32 << 20) >> 12) - 1;
- radeon_mutex_lock(&rdev->cs_mutex);
- radeon_vm_unbind_locked(rdev, vm);
- radeon_mutex_unlock(&rdev->cs_mutex);
- vm->last_pfn = (last_pfn + align) & ~align;
- }
- head = &vm->va;
- last_offset = 0;
- list_for_each_entry(tmp, &vm->va, vm_list) {
- if (bo_va->soffset >= last_offset && bo_va->eoffset < tmp->soffset) {
- /* bo can be added before this one */
- break;
- }
- if (bo_va->soffset >= tmp->soffset && bo_va->soffset < tmp->eoffset) {
- /* bo and tmp overlap, invalid offset */
- dev_err(rdev->dev, "bo %p va 0x%08X conflict with (bo %p 0x%08X 0x%08X)\n",
- bo, (unsigned)bo_va->soffset, tmp->bo,
- (unsigned)tmp->soffset, (unsigned)tmp->eoffset);
- kfree(bo_va);
- mutex_unlock(&vm->mutex);
- return -EINVAL;
- }
- last_offset = tmp->eoffset;
- head = &tmp->vm_list;
- }
- list_add(&bo_va->vm_list, head);
- list_add_tail(&bo_va->bo_list, &bo->va);
- mutex_unlock(&vm->mutex);
- return 0;
-}
-
-static u64 radeon_vm_get_addr(struct radeon_device *rdev,
- struct ttm_mem_reg *mem,
- unsigned pfn)
-{
- u64 addr = 0;
-
- switch (mem->mem_type) {
- case TTM_PL_VRAM:
- addr = (mem->start << PAGE_SHIFT);
- addr += pfn * RADEON_GPU_PAGE_SIZE;
- addr += rdev->vm_manager.vram_base_offset;
- break;
- case TTM_PL_TT:
- /* offset inside page table */
- addr = mem->start << PAGE_SHIFT;
- addr += pfn * RADEON_GPU_PAGE_SIZE;
- addr = addr >> PAGE_SHIFT;
- /* page table offset */
- addr = rdev->gart.pages_addr[addr];
- /* in case cpu page size != gpu page size*/
- addr += (pfn * RADEON_GPU_PAGE_SIZE) & (~PAGE_MASK);
- break;
- default:
- break;
- }
- return addr;
-}
-
-/* object have to be reserved & cs mutex took & vm mutex took */
-int radeon_vm_bo_update_pte(struct radeon_device *rdev,
- struct radeon_vm *vm,
- struct radeon_bo *bo,
- struct ttm_mem_reg *mem)
-{
- struct radeon_bo_va *bo_va;
- unsigned ngpu_pages, i;
- uint64_t addr = 0, pfn;
- uint32_t flags;
-
- /* nothing to do if vm isn't bound */
- if (vm->id == -1)
- return 0;;
-
- bo_va = radeon_bo_va(bo, vm);
- if (bo_va == NULL) {
- dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
- return -EINVAL;
- }
-
- if (bo_va->valid)
- return 0;
-
- ngpu_pages = radeon_bo_ngpu_pages(bo);
- bo_va->flags &= ~RADEON_VM_PAGE_VALID;
- bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM;
- if (mem) {
- if (mem->mem_type != TTM_PL_SYSTEM) {
- bo_va->flags |= RADEON_VM_PAGE_VALID;
- bo_va->valid = true;
- }
- if (mem->mem_type == TTM_PL_TT) {
- bo_va->flags |= RADEON_VM_PAGE_SYSTEM;
- }
- }
- pfn = bo_va->soffset / RADEON_GPU_PAGE_SIZE;
- flags = rdev->vm_manager.funcs->page_flags(rdev, bo_va->vm, bo_va->flags);
- for (i = 0, addr = 0; i < ngpu_pages; i++) {
- if (mem && bo_va->valid) {
- addr = radeon_vm_get_addr(rdev, mem, i);
- }
- rdev->vm_manager.funcs->set_page(rdev, bo_va->vm, i + pfn, addr, flags);
- }
- rdev->vm_manager.funcs->tlb_flush(rdev, bo_va->vm);
- return 0;
-}
-
-/* object have to be reserved */
-int radeon_vm_bo_rmv(struct radeon_device *rdev,
- struct radeon_vm *vm,
- struct radeon_bo *bo)
-{
- struct radeon_bo_va *bo_va;
-
- bo_va = radeon_bo_va(bo, vm);
- if (bo_va == NULL)
- return 0;
-
- mutex_lock(&vm->mutex);
- radeon_mutex_lock(&rdev->cs_mutex);
- radeon_vm_bo_update_pte(rdev, vm, bo, NULL);
- radeon_mutex_unlock(&rdev->cs_mutex);
- list_del(&bo_va->vm_list);
- mutex_unlock(&vm->mutex);
- list_del(&bo_va->bo_list);
-
- kfree(bo_va);
- return 0;
-}
-
-void radeon_vm_bo_invalidate(struct radeon_device *rdev,
- struct radeon_bo *bo)
-{
- struct radeon_bo_va *bo_va;
-
- BUG_ON(!atomic_read(&bo->tbo.reserved));
- list_for_each_entry(bo_va, &bo->va, bo_list) {
- bo_va->valid = false;
- }
-}
-
-int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
-{
- int r;
-
- vm->id = -1;
- vm->fence = NULL;
- mutex_init(&vm->mutex);
- INIT_LIST_HEAD(&vm->list);
- INIT_LIST_HEAD(&vm->va);
- vm->last_pfn = 0;
- /* map the ib pool buffer at 0 in virtual address space, set
- * read only
- */
- r = radeon_vm_bo_add(rdev, vm, rdev->ib_pool.sa_manager.bo, 0,
- RADEON_VM_PAGE_READABLE | RADEON_VM_PAGE_SNOOPED);
- return r;
-}
-
-void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
-{
- struct radeon_bo_va *bo_va, *tmp;
- int r;
-
- mutex_lock(&vm->mutex);
-
- radeon_mutex_lock(&rdev->cs_mutex);
- radeon_vm_unbind_locked(rdev, vm);
- radeon_mutex_unlock(&rdev->cs_mutex);
-
- /* remove all bo */
- r = radeon_bo_reserve(rdev->ib_pool.sa_manager.bo, false);
- if (!r) {
- bo_va = radeon_bo_va(rdev->ib_pool.sa_manager.bo, vm);
- list_del_init(&bo_va->bo_list);
- list_del_init(&bo_va->vm_list);
- radeon_bo_unreserve(rdev->ib_pool.sa_manager.bo);
- kfree(bo_va);
- }
- if (!list_empty(&vm->va)) {
- dev_err(rdev->dev, "still active bo inside vm\n");
- }
- list_for_each_entry_safe(bo_va, tmp, &vm->va, vm_list) {
- list_del_init(&bo_va->vm_list);
- r = radeon_bo_reserve(bo_va->bo, false);
- if (!r) {
- list_del_init(&bo_va->bo_list);
- radeon_bo_unreserve(bo_va->bo);
- kfree(bo_va);
- }
- }
- mutex_unlock(&vm->mutex);
-}
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index c7008b5..aa1ca2d 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -75,6 +75,32 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
return 0;
}
+int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
+ uint64_t *gpu_addr)
+{
+ struct radeon_bo *robj = gem_to_radeon_bo(obj);
+ int r;
+
+ r = radeon_bo_reserve(robj, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_pin(robj, pin_domain, gpu_addr);
+ radeon_bo_unreserve(robj);
+ return r;
+}
+
+void radeon_gem_object_unpin(struct drm_gem_object *obj)
+{
+ struct radeon_bo *robj = gem_to_radeon_bo(obj);
+ int r;
+
+ r = radeon_bo_reserve(robj, false);
+ if (likely(r == 0)) {
+ radeon_bo_unpin(robj);
+ radeon_bo_unreserve(robj);
+ }
+}
+
int radeon_gem_set_domain(struct drm_gem_object *gobj,
uint32_t rdomain, uint32_t wdomain)
{
@@ -116,44 +142,6 @@ void radeon_gem_fini(struct radeon_device *rdev)
radeon_bo_force_delete(rdev);
}
-/*
- * Call from drm_gem_handle_create which appear in both new and open ioctl
- * case.
- */
-int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
-{
- return 0;
-}
-
-void radeon_gem_object_close(struct drm_gem_object *obj,
- struct drm_file *file_priv)
-{
- struct radeon_bo *rbo = gem_to_radeon_bo(obj);
- struct radeon_device *rdev = rbo->rdev;
- struct radeon_fpriv *fpriv = file_priv->driver_priv;
- struct radeon_vm *vm = &fpriv->vm;
- struct radeon_bo_va *bo_va, *tmp;
-
- if (rdev->family < CHIP_CAYMAN) {
- return;
- }
-
- if (radeon_bo_reserve(rbo, false)) {
- return;
- }
- list_for_each_entry_safe(bo_va, tmp, &rbo->va, bo_list) {
- if (bo_va->vm == vm) {
- /* remove from this vm address space */
- mutex_lock(&vm->mutex);
- list_del(&bo_va->vm_list);
- mutex_unlock(&vm->mutex);
- list_del(&bo_va->bo_list);
- kfree(bo_va);
- }
- }
- radeon_bo_unreserve(rbo);
-}
-
/*
* GEM ioctls.
@@ -164,7 +152,6 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
struct radeon_device *rdev = dev->dev_private;
struct drm_radeon_gem_info *args = data;
struct ttm_mem_type_manager *man;
- unsigned i;
man = &rdev->mman.bdev.man[TTM_PL_VRAM];
@@ -173,9 +160,8 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
if (rdev->stollen_vga_memory)
args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
args->vram_visible -= radeon_fbdev_total_size(rdev);
- args->gart_size = rdev->mc.gtt_size - 4096 - RADEON_IB_POOL_SIZE*64*1024;
- for(i = 0; i < RADEON_NUM_RINGS; ++i)
- args->gart_size -= rdev->ring[i].ring_size;
+ args->gart_size = rdev->mc.gtt_size - rdev->cp.ring_size - 4096 -
+ RADEON_IB_POOL_SIZE*64*1024;
return 0;
}
@@ -366,109 +352,6 @@ out:
return r;
}
-int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
- struct drm_file *filp)
-{
- struct drm_radeon_gem_va *args = data;
- struct drm_gem_object *gobj;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_fpriv *fpriv = filp->driver_priv;
- struct radeon_bo *rbo;
- struct radeon_bo_va *bo_va;
- u32 invalid_flags;
- int r = 0;
-
- if (!rdev->vm_manager.enabled) {
- args->operation = RADEON_VA_RESULT_ERROR;
- return -ENOTTY;
- }
-
- /* !! DONT REMOVE !!
- * We don't support vm_id yet, to be sure we don't have have broken
- * userspace, reject anyone trying to use non 0 value thus moving
- * forward we can use those fields without breaking existant userspace
- */
- if (args->vm_id) {
- args->operation = RADEON_VA_RESULT_ERROR;
- return -EINVAL;
- }
-
- if (args->offset < RADEON_VA_RESERVED_SIZE) {
- dev_err(&dev->pdev->dev,
- "offset 0x%lX is in reserved area 0x%X\n",
- (unsigned long)args->offset,
- RADEON_VA_RESERVED_SIZE);
- args->operation = RADEON_VA_RESULT_ERROR;
- return -EINVAL;
- }
-
- /* don't remove, we need to enforce userspace to set the snooped flag
- * otherwise we will endup with broken userspace and we won't be able
- * to enable this feature without adding new interface
- */
- invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM;
- if ((args->flags & invalid_flags)) {
- dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n",
- args->flags, invalid_flags);
- args->operation = RADEON_VA_RESULT_ERROR;
- return -EINVAL;
- }
- if (!(args->flags & RADEON_VM_PAGE_SNOOPED)) {
- dev_err(&dev->pdev->dev, "only supported snooped mapping for now\n");
- args->operation = RADEON_VA_RESULT_ERROR;
- return -EINVAL;
- }
-
- switch (args->operation) {
- case RADEON_VA_MAP:
- case RADEON_VA_UNMAP:
- break;
- default:
- dev_err(&dev->pdev->dev, "unsupported operation %d\n",
- args->operation);
- args->operation = RADEON_VA_RESULT_ERROR;
- return -EINVAL;
- }
-
- gobj = drm_gem_object_lookup(dev, filp, args->handle);
- if (gobj == NULL) {
- args->operation = RADEON_VA_RESULT_ERROR;
- return -ENOENT;
- }
- rbo = gem_to_radeon_bo(gobj);
- r = radeon_bo_reserve(rbo, false);
- if (r) {
- args->operation = RADEON_VA_RESULT_ERROR;
- drm_gem_object_unreference_unlocked(gobj);
- return r;
- }
- switch (args->operation) {
- case RADEON_VA_MAP:
- bo_va = radeon_bo_va(rbo, &fpriv->vm);
- if (bo_va) {
- args->operation = RADEON_VA_RESULT_VA_EXIST;
- args->offset = bo_va->soffset;
- goto out;
- }
- r = radeon_vm_bo_add(rdev, &fpriv->vm, rbo,
- args->offset, args->flags);
- break;
- case RADEON_VA_UNMAP:
- r = radeon_vm_bo_rmv(rdev, &fpriv->vm, rbo);
- break;
- default:
- break;
- }
- args->operation = RADEON_VA_RESULT_OK;
- if (r) {
- args->operation = RADEON_VA_RESULT_ERROR;
- }
-out:
- radeon_bo_unreserve(rbo);
- drm_gem_object_unreference_unlocked(gobj);
- return r;
-}
-
int radeon_mode_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args)
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index 91ef8f6..c90425c 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -23,41 +23,39 @@
* Authors: Dave Airlie
* Alex Deucher
*/
-
#include "drmP.h"
-#include "drm_edid.h"
#include "radeon_drm.h"
#include "radeon.h"
#include "atom.h"
-extern int radeon_atom_hw_i2c_xfer(struct i2c_adapter *i2c_adap,
- struct i2c_msg *msgs, int num);
-extern u32 radeon_atom_hw_i2c_func(struct i2c_adapter *adap);
-
/**
* radeon_ddc_probe
*
*/
-bool radeon_ddc_probe(struct radeon_connector *radeon_connector)
+bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool requires_extended_probe)
{
u8 out = 0x0;
u8 buf[8];
int ret;
struct i2c_msg msgs[] = {
{
- .addr = DDC_ADDR,
+ .addr = 0x50,
.flags = 0,
.len = 1,
.buf = &out,
},
{
- .addr = DDC_ADDR,
+ .addr = 0x50,
.flags = I2C_M_RD,
- .len = 8,
+ .len = 1,
.buf = buf,
}
};
+ /* Read 8 bytes from i2c for extended probe of EDID header */
+ if (requires_extended_probe)
+ msgs[1].len = 8;
+
/* on hw with routers, select right port */
if (radeon_connector->router.ddc_valid)
radeon_router_select_ddc_port(radeon_connector);
@@ -66,24 +64,25 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector)
if (ret != 2)
/* Couldn't find an accessible DDC on this connector */
return false;
- /* Probe also for valid EDID header
- * EDID header starts with:
- * 0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00.
- * Only the first 6 bytes must be valid as
- * drm_edid_block_valid() can fix the last 2 bytes */
- if (drm_edid_header_is_valid(buf) < 6) {
- /* Couldn't find an accessible EDID on this
- * connector */
- return false;
+ if (requires_extended_probe) {
+ /* Probe also for valid EDID header
+ * EDID header starts with:
+ * 0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00.
+ * Only the first 6 bytes must be valid as
+ * drm_edid_block_valid() can fix the last 2 bytes */
+ if (drm_edid_header_is_valid(buf) < 6) {
+ /* Couldn't find an accessible EDID on this
+ * connector */
+ return false;
+ }
}
return true;
}
/* bit banging i2c */
-static int pre_xfer(struct i2c_adapter *i2c_adap)
+static void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state)
{
- struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
struct radeon_device *rdev = i2c->dev->dev_private;
struct radeon_i2c_bus_rec *rec = &i2c->rec;
uint32_t temp;
@@ -138,30 +137,19 @@ static int pre_xfer(struct i2c_adapter *i2c_adap)
WREG32(rec->en_data_reg, temp);
/* mask the gpio pins for software use */
- temp = RREG32(rec->mask_clk_reg) | rec->mask_clk_mask;
- WREG32(rec->mask_clk_reg, temp);
temp = RREG32(rec->mask_clk_reg);
-
- temp = RREG32(rec->mask_data_reg) | rec->mask_data_mask;
- WREG32(rec->mask_data_reg, temp);
- temp = RREG32(rec->mask_data_reg);
-
- return 0;
-}
-
-static void post_xfer(struct i2c_adapter *i2c_adap)
-{
- struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
- struct radeon_device *rdev = i2c->dev->dev_private;
- struct radeon_i2c_bus_rec *rec = &i2c->rec;
- uint32_t temp;
-
- /* unmask the gpio pins for software use */
- temp = RREG32(rec->mask_clk_reg) & ~rec->mask_clk_mask;
+ if (lock_state)
+ temp |= rec->mask_clk_mask;
+ else
+ temp &= ~rec->mask_clk_mask;
WREG32(rec->mask_clk_reg, temp);
temp = RREG32(rec->mask_clk_reg);
- temp = RREG32(rec->mask_data_reg) & ~rec->mask_data_mask;
+ temp = RREG32(rec->mask_data_reg);
+ if (lock_state)
+ temp |= rec->mask_data_mask;
+ else
+ temp &= ~rec->mask_data_mask;
WREG32(rec->mask_data_reg, temp);
temp = RREG32(rec->mask_data_reg);
}
@@ -221,6 +209,22 @@ static void set_data(void *i2c_priv, int data)
WREG32(rec->en_data_reg, val);
}
+static int pre_xfer(struct i2c_adapter *i2c_adap)
+{
+ struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
+
+ radeon_i2c_do_lock(i2c, 1);
+
+ return 0;
+}
+
+static void post_xfer(struct i2c_adapter *i2c_adap)
+{
+ struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
+
+ radeon_i2c_do_lock(i2c, 0);
+}
+
/* hw i2c */
static u32 radeon_get_i2c_prescale(struct radeon_device *rdev)
@@ -886,11 +890,6 @@ static const struct i2c_algorithm radeon_i2c_algo = {
.functionality = radeon_hw_i2c_func,
};
-static const struct i2c_algorithm radeon_atom_i2c_algo = {
- .master_xfer = radeon_atom_hw_i2c_xfer,
- .functionality = radeon_atom_hw_i2c_func,
-};
-
struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
struct radeon_i2c_bus_rec *rec,
const char *name)
@@ -910,7 +909,6 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
i2c->rec = *rec;
i2c->adapter.owner = THIS_MODULE;
i2c->adapter.class = I2C_CLASS_DDC;
- i2c->adapter.dev.parent = &dev->pdev->dev;
i2c->dev = dev;
i2c_set_adapdata(&i2c->adapter, i2c);
if (rec->mm_i2c ||
@@ -927,18 +925,6 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
DRM_ERROR("Failed to register hw i2c %s\n", name);
goto out_free;
}
- } else if (rec->hw_capable &&
- radeon_hw_i2c &&
- ASIC_IS_DCE3(rdev)) {
- /* hw i2c using atom */
- snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
- "Radeon i2c hw bus %s", name);
- i2c->adapter.algo = &radeon_atom_i2c_algo;
- ret = i2c_add_adapter(&i2c->adapter);
- if (ret) {
- DRM_ERROR("Failed to register hw i2c %s\n", name);
- goto out_free;
- }
} else {
/* set the radeon bit adapter */
snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
@@ -950,8 +936,10 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
i2c->algo.bit.setscl = set_clock;
i2c->algo.bit.getsda = get_data;
i2c->algo.bit.getscl = get_clock;
- i2c->algo.bit.udelay = 10;
- i2c->algo.bit.timeout = usecs_to_jiffies(2200); /* from VESA */
+ i2c->algo.bit.udelay = 20;
+ /* vesa says 2.2 ms is enough, 1 jiffy doesn't seem to always
+ * make this, 2 jiffies is a lot more reliable */
+ i2c->algo.bit.timeout = 2;
i2c->algo.bit.data = i2c;
ret = i2c_bit_add_bus(&i2c->adapter);
if (ret) {
@@ -981,7 +969,6 @@ struct radeon_i2c_chan *radeon_i2c_create_dp(struct drm_device *dev,
i2c->rec = *rec;
i2c->adapter.owner = THIS_MODULE;
i2c->adapter.class = I2C_CLASS_DDC;
- i2c->adapter.dev.parent = &dev->pdev->dev;
i2c->dev = dev;
snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
"Radeon aux bus %s", name);
diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
index 00da384..465746b 100644
--- a/drivers/gpu/drm/radeon/radeon_irq.c
+++ b/drivers/gpu/drm/radeon/radeon_irq.c
@@ -129,7 +129,7 @@ void radeon_disable_vblank(struct drm_device *dev, int crtc)
}
}
-static u32 radeon_acknowledge_irqs(drm_radeon_private_t *dev_priv, u32 *r500_disp_int)
+static inline u32 radeon_acknowledge_irqs(drm_radeon_private_t *dev_priv, u32 *r500_disp_int)
{
u32 irqs = RADEON_READ(RADEON_GEN_INT_STATUS);
u32 irq_mask = RADEON_SW_INT_TEST;
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 645dcbf..1cfe753 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -65,13 +65,12 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
unsigned i;
/* Disable *all* interrupts */
- for (i = 0; i < RADEON_NUM_RINGS; i++)
- rdev->irq.sw_int[i] = false;
+ rdev->irq.sw_int = false;
rdev->irq.gui_idle = false;
- for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
- rdev->irq.hpd[i] = false;
- for (i = 0; i < RADEON_MAX_CRTCS; i++) {
+ for (i = 0; i < rdev->num_crtc; i++)
rdev->irq.crtc_vblank_int[i] = false;
+ for (i = 0; i < 6; i++) {
+ rdev->irq.hpd[i] = false;
rdev->irq.pflip[i] = false;
}
radeon_irq_set(rdev);
@@ -82,11 +81,9 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
int radeon_driver_irq_postinstall_kms(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
- unsigned i;
dev->max_vblank_count = 0x001fffff;
- for (i = 0; i < RADEON_NUM_RINGS; i++)
- rdev->irq.sw_int[i] = true;
+ rdev->irq.sw_int = true;
radeon_irq_set(rdev);
return 0;
}
@@ -100,13 +97,12 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
return;
}
/* Disable *all* interrupts */
- for (i = 0; i < RADEON_NUM_RINGS; i++)
- rdev->irq.sw_int[i] = false;
+ rdev->irq.sw_int = false;
rdev->irq.gui_idle = false;
- for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
- rdev->irq.hpd[i] = false;
- for (i = 0; i < RADEON_MAX_CRTCS; i++) {
+ for (i = 0; i < rdev->num_crtc; i++)
rdev->irq.crtc_vblank_int[i] = false;
+ for (i = 0; i < 6; i++) {
+ rdev->irq.hpd[i] = false;
rdev->irq.pflip[i] = false;
}
radeon_irq_set(rdev);
@@ -220,26 +216,26 @@ void radeon_irq_kms_fini(struct radeon_device *rdev)
flush_work_sync(&rdev->hotplug_work);
}
-void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring)
+void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev)
{
unsigned long irqflags;
spin_lock_irqsave(&rdev->irq.sw_lock, irqflags);
- if (rdev->ddev->irq_enabled && (++rdev->irq.sw_refcount[ring] == 1)) {
- rdev->irq.sw_int[ring] = true;
+ if (rdev->ddev->irq_enabled && (++rdev->irq.sw_refcount == 1)) {
+ rdev->irq.sw_int = true;
radeon_irq_set(rdev);
}
spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags);
}
-void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring)
+void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev)
{
unsigned long irqflags;
spin_lock_irqsave(&rdev->irq.sw_lock, irqflags);
- BUG_ON(rdev->ddev->irq_enabled && rdev->irq.sw_refcount[ring] <= 0);
- if (rdev->ddev->irq_enabled && (--rdev->irq.sw_refcount[ring] == 0)) {
- rdev->irq.sw_int[ring] = false;
+ BUG_ON(rdev->ddev->irq_enabled && rdev->irq.sw_refcount <= 0);
+ if (rdev->ddev->irq_enabled && (--rdev->irq.sw_refcount == 0)) {
+ rdev->irq.sw_int = false;
radeon_irq_set(rdev);
}
spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags);
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 1986eba..bd58af6 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -57,12 +57,10 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
}
dev->dev_private = (void *)rdev;
- pci_set_master(dev->pdev);
-
/* update BUS flag */
if (drm_pci_device_is_agp(dev)) {
flags |= RADEON_IS_AGP;
- } else if (pci_is_pcie(dev->pdev)) {
+ } else if (drm_pci_device_is_pcie(dev)) {
flags |= RADEON_IS_PCIE;
} else {
flags |= RADEON_IS_PCI;
@@ -239,31 +237,6 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
case RADEON_INFO_FUSION_GART_WORKING:
value = 1;
break;
- case RADEON_INFO_BACKEND_MAP:
- if (rdev->family >= CHIP_CAYMAN)
- value = rdev->config.cayman.backend_map;
- else if (rdev->family >= CHIP_CEDAR)
- value = rdev->config.evergreen.backend_map;
- else if (rdev->family >= CHIP_RV770)
- value = rdev->config.rv770.backend_map;
- else if (rdev->family >= CHIP_R600)
- value = rdev->config.r600.backend_map;
- else {
- return -EINVAL;
- }
- break;
- case RADEON_INFO_VA_START:
- /* this is where we report if vm is supported or not */
- if (rdev->family < CHIP_CAYMAN)
- return -EINVAL;
- value = RADEON_VA_RESERVED_SIZE;
- break;
- case RADEON_INFO_IB_VM_MAX_SIZE:
- /* this is where we report if vm is supported or not */
- if (rdev->family < CHIP_CAYMAN)
- return -EINVAL;
- value = RADEON_IB_VM_MAX_SIZE;
- break;
default:
DRM_DEBUG_KMS("Invalid request %d\n", info->request);
return -EINVAL;
@@ -284,6 +257,7 @@ int radeon_driver_firstopen_kms(struct drm_device *dev)
return 0;
}
+
void radeon_driver_lastclose_kms(struct drm_device *dev)
{
vga_switcheroo_process_delayed_switch();
@@ -291,45 +265,12 @@ void radeon_driver_lastclose_kms(struct drm_device *dev)
int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
{
- struct radeon_device *rdev = dev->dev_private;
-
- file_priv->driver_priv = NULL;
-
- /* new gpu have virtual address space support */
- if (rdev->family >= CHIP_CAYMAN) {
- struct radeon_fpriv *fpriv;
- int r;
-
- fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
- if (unlikely(!fpriv)) {
- return -ENOMEM;
- }
-
- r = radeon_vm_init(rdev, &fpriv->vm);
- if (r) {
- radeon_vm_fini(rdev, &fpriv->vm);
- kfree(fpriv);
- return r;
- }
-
- file_priv->driver_priv = fpriv;
- }
return 0;
}
void radeon_driver_postclose_kms(struct drm_device *dev,
struct drm_file *file_priv)
{
- struct radeon_device *rdev = dev->dev_private;
-
- /* new gpu have virtual address space support */
- if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) {
- struct radeon_fpriv *fpriv = file_priv->driver_priv;
-
- radeon_vm_fini(rdev, &fpriv->vm);
- kfree(fpriv);
- file_priv->driver_priv = NULL;
- }
}
void radeon_driver_preclose_kms(struct drm_device *dev,
@@ -497,6 +438,5 @@ struct drm_ioctl_desc radeon_ioctls_kms[] = {
DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_UNLOCKED),
};
int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms);
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 210317c..41a5d48 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -419,9 +419,7 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc,
r = radeon_bo_reserve(rbo, false);
if (unlikely(r != 0))
return r;
- /* Only 27 bit offset for legacy CRTC */
- r = radeon_bo_pin_restricted(rbo, RADEON_GEM_DOMAIN_VRAM, 1 << 27,
- &base);
+ r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &base);
if (unlikely(r != 0)) {
radeon_bo_unreserve(rbo);
return -EINVAL;
@@ -439,7 +437,7 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc,
crtc_offset_cntl = 0;
- pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
+ pitch_pixels = target_fb->pitch / (target_fb->bits_per_pixel / 8);
crtc_pitch = (((pitch_pixels * target_fb->bits_per_pixel) +
((target_fb->bits_per_pixel * 8) - 1)) /
(target_fb->bits_per_pixel * 8));
@@ -993,6 +991,12 @@ static bool radeon_crtc_mode_fixup(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+
+ /* adjust pm to upcoming mode change */
+ radeon_pm_compute_clocks(rdev);
+
if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
return false;
return true;
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_tv.c b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
index b37ec0f..c7b6cb4 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_tv.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
@@ -864,7 +864,7 @@ void radeon_legacy_tv_adjust_crtc_reg(struct drm_encoder *encoder,
*v_sync_strt_wid = tmp;
}
-static int get_post_div(int value)
+static inline int get_post_div(int value)
{
int post_div;
switch (value) {
diff --git a/drivers/gpu/drm/radeon/radeon_mem.c b/drivers/gpu/drm/radeon/radeon_mem.c
index 988548e..ed95155 100644
--- a/drivers/gpu/drm/radeon/radeon_mem.c
+++ b/drivers/gpu/drm/radeon/radeon_mem.c
@@ -139,7 +139,7 @@ static int init_heap(struct mem_block **heap, int start, int size)
if (!blocks)
return -ENOMEM;
- *heap = kzalloc(sizeof(**heap), GFP_KERNEL);
+ *heap = kmalloc(sizeof(**heap), GFP_KERNEL);
if (!*heap) {
kfree(blocks);
return -ENOMEM;
@@ -150,6 +150,7 @@ static int init_heap(struct mem_block **heap, int start, int size)
blocks->file_priv = NULL;
blocks->next = blocks->prev = *heap;
+ memset(*heap, 0, sizeof(**heap));
(*heap)->file_priv = (struct drm_file *) - 1;
(*heap)->next = (*heap)->prev = blocks;
return 0;
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 8a85598..ed0178f 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -362,7 +362,6 @@ struct radeon_encoder_atom_dig {
struct backlight_device *bl_dev;
int dpms_mode;
uint8_t backlight_level;
- int panel_mode;
};
struct radeon_encoder_atom_dac {
@@ -439,6 +438,9 @@ struct radeon_connector {
struct radeon_i2c_chan *ddc_bus;
/* some systems have an hdmi and vga port with a shared ddc line */
bool shared_ddc;
+ /* for some Radeon chip families we apply an additional EDID header
+ check as part of the DDC probe */
+ bool requires_extended_probe;
bool use_digital;
/* we need to mind the EDID between detect
and get modes due to analog/digital/tvencoder */
@@ -457,8 +459,6 @@ struct radeon_framebuffer {
struct drm_gem_object *obj;
};
-#define ENCODER_MODE_IS_DP(em) (((em) == ATOM_ENCODER_MODE_DP) || \
- ((em) == ATOM_ENCODER_MODE_DP_MST))
extern enum radeon_tv_std
radeon_combios_get_tv_info(struct radeon_device *rdev);
@@ -467,13 +467,9 @@ radeon_atombios_get_tv_info(struct radeon_device *rdev);
extern struct drm_connector *
radeon_get_connector_for_encoder(struct drm_encoder *encoder);
-extern struct drm_connector *
-radeon_get_connector_for_encoder_init(struct drm_encoder *encoder);
-extern bool radeon_dig_monitor_is_duallink(struct drm_encoder *encoder,
- u32 pixel_clock);
-extern u16 radeon_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder);
-extern u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *connector);
+extern bool radeon_encoder_is_dp_bridge(struct drm_encoder *encoder);
+extern bool radeon_connector_encoder_is_dp_bridge(struct drm_connector *connector);
extern bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector);
extern bool radeon_connector_is_dp12_capable(struct drm_connector *connector);
@@ -487,16 +483,13 @@ extern void radeon_dp_link_train(struct drm_encoder *encoder,
extern bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector);
extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector);
extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector);
-extern int radeon_dp_get_panel_mode(struct drm_encoder *encoder,
- struct drm_connector *connector);
extern void atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode);
extern void radeon_atom_encoder_init(struct radeon_device *rdev);
-extern void radeon_atom_dcpll_init(struct radeon_device *rdev);
extern void atombios_dig_transmitter_setup(struct drm_encoder *encoder,
int action, uint8_t lane_num,
uint8_t lane_set);
extern void radeon_atom_ext_encoder_setup_ddc(struct drm_encoder *encoder);
-extern struct drm_encoder *radeon_get_external_encoder(struct drm_encoder *encoder);
+extern struct drm_encoder *radeon_atom_get_external_encoder(struct drm_encoder *encoder);
extern int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
u8 write_byte, u8 *read_byte);
@@ -526,7 +519,8 @@ extern void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c,
u8 val);
extern void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector);
extern void radeon_router_select_cd_port(struct radeon_connector *radeon_connector);
-extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector);
+extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector,
+ bool requires_extended_probe);
extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector);
extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector);
@@ -649,9 +643,9 @@ extern void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, int regno);
extern void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, int regno);
-int radeon_framebuffer_init(struct drm_device *dev,
+void radeon_framebuffer_init(struct drm_device *dev,
struct radeon_framebuffer *rfb,
- struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_mode_fb_cmd *mode_cmd,
struct drm_gem_object *obj);
int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 91541e6..35da1b4 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -46,20 +46,6 @@ static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
* function are calling it.
*/
-void radeon_bo_clear_va(struct radeon_bo *bo)
-{
- struct radeon_bo_va *bo_va, *tmp;
-
- list_for_each_entry_safe(bo_va, tmp, &bo->va, bo_list) {
- /* remove from all vm address space */
- mutex_lock(&bo_va->vm->mutex);
- list_del(&bo_va->vm_list);
- mutex_unlock(&bo_va->vm->mutex);
- list_del(&bo_va->bo_list);
- kfree(bo_va);
- }
-}
-
static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
{
struct radeon_bo *bo;
@@ -69,7 +55,6 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
list_del_init(&bo->list);
mutex_unlock(&bo->rdev->gem.mutex);
radeon_bo_clear_surface_reg(bo);
- radeon_bo_clear_va(bo);
drm_gem_object_release(&bo->gem_base);
kfree(bo);
}
@@ -110,7 +95,6 @@ int radeon_bo_create(struct radeon_device *rdev,
enum ttm_bo_type type;
unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
unsigned long max_size = 0;
- size_t acc_size;
int r;
size = ALIGN(size, PAGE_SIZE);
@@ -133,9 +117,6 @@ int radeon_bo_create(struct radeon_device *rdev,
return -ENOMEM;
}
- acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size,
- sizeof(struct radeon_bo));
-
retry:
bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
if (bo == NULL)
@@ -149,13 +130,12 @@ retry:
bo->gem_base.driver_private = NULL;
bo->surface_reg = -1;
INIT_LIST_HEAD(&bo->list);
- INIT_LIST_HEAD(&bo->va);
radeon_ttm_placement_from_domain(bo, domain);
/* Kernel allocation are uninterruptible */
mutex_lock(&rdev->vram_mutex);
r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
- &bo->placement, page_align, 0, !kernel, NULL,
- acc_size, &radeon_ttm_bo_destroy);
+ &bo->placement, page_align, 0, !kernel, NULL, size,
+ &radeon_ttm_bo_destroy);
mutex_unlock(&rdev->vram_mutex);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS) {
@@ -461,54 +441,8 @@ static void radeon_bo_clear_surface_reg(struct radeon_bo *bo)
int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
uint32_t tiling_flags, uint32_t pitch)
{
- struct radeon_device *rdev = bo->rdev;
int r;
- if (rdev->family >= CHIP_CEDAR) {
- unsigned bankw, bankh, mtaspect, tilesplit, stilesplit;
-
- bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;
- bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK;
- mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK;
- tilesplit = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK;
- stilesplit = (tiling_flags >> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK;
- switch (bankw) {
- case 0:
- case 1:
- case 2:
- case 4:
- case 8:
- break;
- default:
- return -EINVAL;
- }
- switch (bankh) {
- case 0:
- case 1:
- case 2:
- case 4:
- case 8:
- break;
- default:
- return -EINVAL;
- }
- switch (mtaspect) {
- case 0:
- case 1:
- case 2:
- case 4:
- case 8:
- break;
- default:
- return -EINVAL;
- }
- if (tilesplit > 6) {
- return -EINVAL;
- }
- if (stilesplit > 6) {
- return -EINVAL;
- }
- }
r = radeon_bo_reserve(bo, false);
if (unlikely(r != 0))
return r;
@@ -565,7 +499,6 @@ void radeon_bo_move_notify(struct ttm_buffer_object *bo,
return;
rbo = container_of(bo, struct radeon_bo, tbo);
radeon_bo_check_tiling(rbo, 0, 1);
- radeon_vm_bo_invalidate(rbo->rdev, rbo);
}
int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
@@ -598,57 +531,3 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
}
return 0;
}
-
-int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait)
-{
- int r;
-
- r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
- if (unlikely(r != 0))
- return r;
- spin_lock(&bo->tbo.bdev->fence_lock);
- if (mem_type)
- *mem_type = bo->tbo.mem.mem_type;
- if (bo->tbo.sync_obj)
- r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
- spin_unlock(&bo->tbo.bdev->fence_lock);
- ttm_bo_unreserve(&bo->tbo);
- return r;
-}
-
-
-/**
- * radeon_bo_reserve - reserve bo
- * @bo: bo structure
- * @no_wait: don't sleep while trying to reserve (return -EBUSY)
- *
- * Returns:
- * -EBUSY: buffer is busy and @no_wait is true
- * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
- * a signal. Release all buffer reservations and return to user-space.
- */
-int radeon_bo_reserve(struct radeon_bo *bo, bool no_wait)
-{
- int r;
-
- r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
- if (unlikely(r != 0)) {
- if (r != -ERESTARTSYS)
- dev_err(bo->rdev->dev, "%p reserve failed\n", bo);
- return r;
- }
- return 0;
-}
-
-/* object have to be reserved */
-struct radeon_bo_va *radeon_bo_va(struct radeon_bo *rbo, struct radeon_vm *vm)
-{
- struct radeon_bo_va *bo_va;
-
- list_for_each_entry(bo_va, &rbo->va, bo_list) {
- if (bo_va->vm == vm) {
- return bo_va;
- }
- }
- return NULL;
-}
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index f9104be..7199c6a 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -52,7 +52,28 @@ static inline unsigned radeon_mem_type_to_domain(u32 mem_type)
return 0;
}
-int radeon_bo_reserve(struct radeon_bo *bo, bool no_wait);
+/**
+ * radeon_bo_reserve - reserve bo
+ * @bo: bo structure
+ * @no_wait: don't sleep while trying to reserve (return -EBUSY)
+ *
+ * Returns:
+ * -EBUSY: buffer is busy and @no_wait is true
+ * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
+ * a signal. Release all buffer reservations and return to user-space.
+ */
+static inline int radeon_bo_reserve(struct radeon_bo *bo, bool no_wait)
+{
+ int r;
+
+ r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
+ if (unlikely(r != 0)) {
+ if (r != -ERESTARTSYS)
+ dev_err(bo->rdev->dev, "%p reserve failed\n", bo);
+ return r;
+ }
+ return 0;
+}
static inline void radeon_bo_unreserve(struct radeon_bo *bo)
{
@@ -83,16 +104,6 @@ static inline bool radeon_bo_is_reserved(struct radeon_bo *bo)
return !!atomic_read(&bo->tbo.reserved);
}
-static inline unsigned radeon_bo_ngpu_pages(struct radeon_bo *bo)
-{
- return (bo->tbo.num_pages << PAGE_SHIFT) / RADEON_GPU_PAGE_SIZE;
-}
-
-static inline unsigned radeon_bo_gpu_page_alignment(struct radeon_bo *bo)
-{
- return (bo->tbo.mem.page_alignment << PAGE_SHIFT) / RADEON_GPU_PAGE_SIZE;
-}
-
/**
* radeon_bo_mmap_offset - return mmap offset of bo
* @bo: radeon object for which we query the offset
@@ -107,8 +118,23 @@ static inline u64 radeon_bo_mmap_offset(struct radeon_bo *bo)
return bo->tbo.addr_space_offset;
}
-extern int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
- bool no_wait);
+static inline int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
+ bool no_wait)
+{
+ int r;
+
+ r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
+ if (unlikely(r != 0))
+ return r;
+ spin_lock(&bo->tbo.bdev->fence_lock);
+ if (mem_type)
+ *mem_type = bo->tbo.mem.mem_type;
+ if (bo->tbo.sync_obj)
+ r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
+ spin_unlock(&bo->tbo.bdev->fence_lock);
+ ttm_bo_unreserve(&bo->tbo);
+ return r;
+}
extern int radeon_bo_create(struct radeon_device *rdev,
unsigned long size, int byte_align,
@@ -140,26 +166,4 @@ extern void radeon_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem);
extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
extern int radeon_bo_get_surface_reg(struct radeon_bo *bo);
-extern struct radeon_bo_va *radeon_bo_va(struct radeon_bo *rbo,
- struct radeon_vm *vm);
-
-/*
- * sub allocation
- */
-extern int radeon_sa_bo_manager_init(struct radeon_device *rdev,
- struct radeon_sa_manager *sa_manager,
- unsigned size, u32 domain);
-extern void radeon_sa_bo_manager_fini(struct radeon_device *rdev,
- struct radeon_sa_manager *sa_manager);
-extern int radeon_sa_bo_manager_start(struct radeon_device *rdev,
- struct radeon_sa_manager *sa_manager);
-extern int radeon_sa_bo_manager_suspend(struct radeon_device *rdev,
- struct radeon_sa_manager *sa_manager);
-extern int radeon_sa_bo_new(struct radeon_device *rdev,
- struct radeon_sa_manager *sa_manager,
- struct radeon_sa_bo *sa_bo,
- unsigned size, unsigned align);
-extern void radeon_sa_bo_free(struct radeon_device *rdev,
- struct radeon_sa_bo *sa_bo);
-
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 44d0e5b..8270a85 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -53,24 +53,6 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev);
#define ACPI_AC_CLASS "ac_adapter"
-int radeon_pm_get_type_index(struct radeon_device *rdev,
- enum radeon_pm_state_type ps_type,
- int instance)
-{
- int i;
- int found_instance = -1;
-
- for (i = 0; i < rdev->pm.num_power_states; i++) {
- if (rdev->pm.power_state[i].type == ps_type) {
- found_instance++;
- if (found_instance == instance)
- return i;
- }
- }
- /* return default if no match */
- return rdev->pm.default_power_state_index;
-}
-
#ifdef CONFIG_ACPI
static int radeon_acpi_event(struct notifier_block *nb,
unsigned long val,
@@ -221,7 +203,7 @@ static void radeon_set_power_state(struct radeon_device *rdev)
}
/* set memory clock */
- if (rdev->asic->pm.set_memory_clock && (mclk != rdev->pm.current_mclk)) {
+ if (rdev->asic->set_memory_clock && (mclk != rdev->pm.current_mclk)) {
radeon_pm_debug_check_in_vbl(rdev, false);
radeon_set_memory_clock(rdev, mclk);
radeon_pm_debug_check_in_vbl(rdev, true);
@@ -252,10 +234,7 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
mutex_lock(&rdev->ddev->struct_mutex);
mutex_lock(&rdev->vram_mutex);
- for (i = 0; i < RADEON_NUM_RINGS; ++i) {
- if (rdev->ring[i].ring_obj)
- mutex_lock(&rdev->ring[i].mutex);
- }
+ mutex_lock(&rdev->cp.mutex);
/* gui idle int has issues on older chips it seems */
if (rdev->family >= CHIP_R600) {
@@ -271,13 +250,12 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
radeon_irq_set(rdev);
}
} else {
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
- if (ring->ready) {
+ if (rdev->cp.ready) {
struct radeon_fence *fence;
- radeon_ring_alloc(rdev, ring, 64);
- radeon_fence_create(rdev, &fence, radeon_ring_index(rdev, ring));
+ radeon_ring_alloc(rdev, 64);
+ radeon_fence_create(rdev, &fence);
radeon_fence_emit(rdev, fence);
- radeon_ring_commit(rdev, ring);
+ radeon_ring_commit(rdev);
radeon_fence_wait(fence, false);
radeon_fence_unref(&fence);
}
@@ -311,10 +289,7 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
- for (i = 0; i < RADEON_NUM_RINGS; ++i) {
- if (rdev->ring[i].ring_obj)
- mutex_unlock(&rdev->ring[i].mutex);
- }
+ mutex_unlock(&rdev->cp.mutex);
mutex_unlock(&rdev->vram_mutex);
mutex_unlock(&rdev->ddev->struct_mutex);
}
@@ -806,14 +781,19 @@ static void radeon_dynpm_idle_work_handler(struct work_struct *work)
resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
mutex_lock(&rdev->pm.mutex);
if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
+ unsigned long irq_flags;
int not_processed = 0;
- int i;
- for (i = 0; i < RADEON_NUM_RINGS; ++i) {
- not_processed += radeon_fence_count_emitted(rdev, i);
- if (not_processed >= 3)
- break;
+ read_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
+ if (!list_empty(&rdev->fence_drv.emited)) {
+ struct list_head *ptr;
+ list_for_each(ptr, &rdev->fence_drv.emited) {
+ /* count up to 3, that's enought info */
+ if (++not_processed >= 3)
+ break;
+ }
}
+ read_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
if (not_processed >= 3) { /* should upclock */
if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_DOWNCLOCK) {
@@ -871,11 +851,11 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
else
seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk);
- if (rdev->asic->pm.get_memory_clock)
+ if (rdev->asic->get_memory_clock)
seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
if (rdev->pm.current_vddc)
seq_printf(m, "voltage: %u mV\n", rdev->pm.current_vddc);
- if (rdev->asic->pm.get_pcie_lanes)
+ if (rdev->asic->get_pcie_lanes)
seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev));
return 0;
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h
index 5098634..bc44a3d 100644
--- a/drivers/gpu/drm/radeon/radeon_reg.h
+++ b/drivers/gpu/drm/radeon/radeon_reg.h
@@ -539,11 +539,9 @@
#define RADEON_CRTC2_PITCH 0x032c
#define RADEON_CRTC_STATUS 0x005c
-# define RADEON_CRTC_VBLANK_CUR (1 << 0)
# define RADEON_CRTC_VBLANK_SAVE (1 << 1)
# define RADEON_CRTC_VBLANK_SAVE_CLEAR (1 << 1)
#define RADEON_CRTC2_STATUS 0x03fc
-# define RADEON_CRTC2_VBLANK_CUR (1 << 0)
# define RADEON_CRTC2_VBLANK_SAVE (1 << 1)
# define RADEON_CRTC2_VBLANK_SAVE_CLEAR (1 << 1)
#define RADEON_CRTC_V_SYNC_STRT_WID 0x020c
@@ -3297,7 +3295,7 @@
# define RADEON_RB_BUFSZ_MASK (0x3f << 0)
# define RADEON_RB_BLKSZ_SHIFT 8
# define RADEON_RB_BLKSZ_MASK (0x3f << 8)
-# define RADEON_BUF_SWAP_32BIT (2 << 16)
+# define RADEON_BUF_SWAP_32BIT (1 << 17)
# define RADEON_MAX_FETCH_SHIFT 18
# define RADEON_MAX_FETCH_MASK (0x3 << 18)
# define RADEON_RB_NO_UPDATE (1 << 27)
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 3056620..08c0233 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -34,133 +34,93 @@
#include "atom.h"
int radeon_debugfs_ib_init(struct radeon_device *rdev);
-int radeon_debugfs_ring_init(struct radeon_device *rdev);
-u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
+void radeon_ib_bogus_cleanup(struct radeon_device *rdev)
{
- struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
- u32 pg_idx, pg_offset;
- u32 idx_value = 0;
- int new_page;
-
- pg_idx = (idx * 4) / PAGE_SIZE;
- pg_offset = (idx * 4) % PAGE_SIZE;
-
- if (ibc->kpage_idx[0] == pg_idx)
- return ibc->kpage[0][pg_offset/4];
- if (ibc->kpage_idx[1] == pg_idx)
- return ibc->kpage[1][pg_offset/4];
-
- new_page = radeon_cs_update_pages(p, pg_idx);
- if (new_page < 0) {
- p->parser_error = new_page;
- return 0;
- }
+ struct radeon_ib *ib, *n;
- idx_value = ibc->kpage[new_page][pg_offset/4];
- return idx_value;
+ list_for_each_entry_safe(ib, n, &rdev->ib_pool.bogus_ib, list) {
+ list_del(&ib->list);
+ vfree(ib->ptr);
+ kfree(ib);
+ }
}
-void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
+void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib)
{
-#if DRM_DEBUG_CODE
- if (ring->count_dw <= 0) {
- DRM_ERROR("radeon: writting more dword to ring than expected !\n");
+ struct radeon_ib *bib;
+
+ bib = kmalloc(sizeof(*bib), GFP_KERNEL);
+ if (bib == NULL)
+ return;
+ bib->ptr = vmalloc(ib->length_dw * 4);
+ if (bib->ptr == NULL) {
+ kfree(bib);
+ return;
}
-#endif
- ring->ring[ring->wptr++] = v;
- ring->wptr &= ring->ptr_mask;
- ring->count_dw--;
- ring->ring_free_dw--;
+ memcpy(bib->ptr, ib->ptr, ib->length_dw * 4);
+ bib->length_dw = ib->length_dw;
+ mutex_lock(&rdev->ib_pool.mutex);
+ list_add_tail(&bib->list, &rdev->ib_pool.bogus_ib);
+ mutex_unlock(&rdev->ib_pool.mutex);
}
/*
* IB.
*/
-bool radeon_ib_try_free(struct radeon_device *rdev, struct radeon_ib *ib)
-{
- bool done = false;
-
- /* only free ib which have been emited */
- if (ib->fence && ib->fence->emitted) {
- if (radeon_fence_signaled(ib->fence)) {
- radeon_fence_unref(&ib->fence);
- radeon_sa_bo_free(rdev, &ib->sa_bo);
- done = true;
- }
- }
- return done;
-}
-
-int radeon_ib_get(struct radeon_device *rdev, int ring,
- struct radeon_ib **ib, unsigned size)
+int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib)
{
struct radeon_fence *fence;
- unsigned cretry = 0;
- int r = 0, i, idx;
+ struct radeon_ib *nib;
+ int r = 0, i, c;
*ib = NULL;
- /* align size on 256 bytes */
- size = ALIGN(size, 256);
-
- r = radeon_fence_create(rdev, &fence, ring);
+ r = radeon_fence_create(rdev, &fence);
if (r) {
dev_err(rdev->dev, "failed to create fence for new IB\n");
return r;
}
-
- radeon_mutex_lock(&rdev->ib_pool.mutex);
- idx = rdev->ib_pool.head_id;
-retry:
- if (cretry > 5) {
- dev_err(rdev->dev, "failed to get an ib after 5 retry\n");
- radeon_mutex_unlock(&rdev->ib_pool.mutex);
- radeon_fence_unref(&fence);
- return -ENOMEM;
- }
- cretry++;
- for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
- radeon_ib_try_free(rdev, &rdev->ib_pool.ibs[idx]);
- if (rdev->ib_pool.ibs[idx].fence == NULL) {
- r = radeon_sa_bo_new(rdev, &rdev->ib_pool.sa_manager,
- &rdev->ib_pool.ibs[idx].sa_bo,
- size, 256);
- if (!r) {
- *ib = &rdev->ib_pool.ibs[idx];
- (*ib)->ptr = rdev->ib_pool.sa_manager.cpu_ptr;
- (*ib)->ptr += ((*ib)->sa_bo.offset >> 2);
- (*ib)->gpu_addr = rdev->ib_pool.sa_manager.gpu_addr;
- (*ib)->gpu_addr += (*ib)->sa_bo.offset;
- (*ib)->fence = fence;
- (*ib)->vm_id = 0;
- /* ib are most likely to be allocated in a ring fashion
- * thus rdev->ib_pool.head_id should be the id of the
- * oldest ib
- */
- rdev->ib_pool.head_id = (1 + idx);
- rdev->ib_pool.head_id &= (RADEON_IB_POOL_SIZE - 1);
- radeon_mutex_unlock(&rdev->ib_pool.mutex);
- return 0;
- }
+ mutex_lock(&rdev->ib_pool.mutex);
+ for (i = rdev->ib_pool.head_id, c = 0, nib = NULL; c < RADEON_IB_POOL_SIZE; c++, i++) {
+ i &= (RADEON_IB_POOL_SIZE - 1);
+ if (rdev->ib_pool.ibs[i].free) {
+ nib = &rdev->ib_pool.ibs[i];
+ break;
}
- idx = (idx + 1) & (RADEON_IB_POOL_SIZE - 1);
}
- /* this should be rare event, ie all ib scheduled none signaled yet.
- */
- for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
- if (rdev->ib_pool.ibs[idx].fence && rdev->ib_pool.ibs[idx].fence->emitted) {
- r = radeon_fence_wait(rdev->ib_pool.ibs[idx].fence, false);
- if (!r) {
- goto retry;
- }
- /* an error happened */
- break;
+ if (nib == NULL) {
+ /* This should never happen, it means we allocated all
+ * IB and haven't scheduled one yet, return EBUSY to
+ * userspace hoping that on ioctl recall we get better
+ * luck
+ */
+ dev_err(rdev->dev, "no free indirect buffer !\n");
+ mutex_unlock(&rdev->ib_pool.mutex);
+ radeon_fence_unref(&fence);
+ return -EBUSY;
+ }
+ rdev->ib_pool.head_id = (nib->idx + 1) & (RADEON_IB_POOL_SIZE - 1);
+ nib->free = false;
+ if (nib->fence) {
+ mutex_unlock(&rdev->ib_pool.mutex);
+ r = radeon_fence_wait(nib->fence, false);
+ if (r) {
+ dev_err(rdev->dev, "error waiting fence of IB(%u:0x%016lX:%u)\n",
+ nib->idx, (unsigned long)nib->gpu_addr, nib->length_dw);
+ mutex_lock(&rdev->ib_pool.mutex);
+ nib->free = true;
+ mutex_unlock(&rdev->ib_pool.mutex);
+ radeon_fence_unref(&fence);
+ return r;
}
- idx = (idx + 1) & (RADEON_IB_POOL_SIZE - 1);
+ mutex_lock(&rdev->ib_pool.mutex);
}
- radeon_mutex_unlock(&rdev->ib_pool.mutex);
- radeon_fence_unref(&fence);
- return r;
+ radeon_fence_unref(&nib->fence);
+ nib->fence = fence;
+ nib->length_dw = 0;
+ mutex_unlock(&rdev->ib_pool.mutex);
+ *ib = nib;
+ return 0;
}
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
@@ -171,258 +131,247 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
if (tmp == NULL) {
return;
}
- radeon_mutex_lock(&rdev->ib_pool.mutex);
- if (tmp->fence && !tmp->fence->emitted) {
- radeon_sa_bo_free(rdev, &tmp->sa_bo);
+ if (!tmp->fence->emited)
radeon_fence_unref(&tmp->fence);
- }
- radeon_mutex_unlock(&rdev->ib_pool.mutex);
+ mutex_lock(&rdev->ib_pool.mutex);
+ tmp->free = true;
+ mutex_unlock(&rdev->ib_pool.mutex);
}
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
{
- struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
int r = 0;
- if (!ib->length_dw || !ring->ready) {
+ if (!ib->length_dw || !rdev->cp.ready) {
/* TODO: Nothings in the ib we should report. */
DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx);
return -EINVAL;
}
/* 64 dwords should be enough for fence too */
- r = radeon_ring_lock(rdev, ring, 64);
+ r = radeon_ring_lock(rdev, 64);
if (r) {
DRM_ERROR("radeon: scheduling IB failed (%d).\n", r);
return r;
}
- radeon_ring_ib_execute(rdev, ib->fence->ring, ib);
+ radeon_ring_ib_execute(rdev, ib);
radeon_fence_emit(rdev, ib->fence);
- radeon_ring_unlock_commit(rdev, ring);
+ mutex_lock(&rdev->ib_pool.mutex);
+ /* once scheduled IB is considered free and protected by the fence */
+ ib->free = true;
+ mutex_unlock(&rdev->ib_pool.mutex);
+ radeon_ring_unlock_commit(rdev);
return 0;
}
int radeon_ib_pool_init(struct radeon_device *rdev)
{
- struct radeon_sa_manager tmp;
- int i, r;
+ void *ptr;
+ uint64_t gpu_addr;
+ int i;
+ int r = 0;
- r = radeon_sa_bo_manager_init(rdev, &tmp,
- RADEON_IB_POOL_SIZE*64*1024,
- RADEON_GEM_DOMAIN_GTT);
+ if (rdev->ib_pool.robj)
+ return 0;
+ INIT_LIST_HEAD(&rdev->ib_pool.bogus_ib);
+ /* Allocate 1M object buffer */
+ r = radeon_bo_create(rdev, RADEON_IB_POOL_SIZE*64*1024,
+ PAGE_SIZE, true, RADEON_GEM_DOMAIN_GTT,
+ &rdev->ib_pool.robj);
if (r) {
+ DRM_ERROR("radeon: failed to ib pool (%d).\n", r);
return r;
}
-
- radeon_mutex_lock(&rdev->ib_pool.mutex);
- if (rdev->ib_pool.ready) {
- radeon_mutex_unlock(&rdev->ib_pool.mutex);
- radeon_sa_bo_manager_fini(rdev, &tmp);
- return 0;
+ r = radeon_bo_reserve(rdev->ib_pool.robj, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_pin(rdev->ib_pool.robj, RADEON_GEM_DOMAIN_GTT, &gpu_addr);
+ if (r) {
+ radeon_bo_unreserve(rdev->ib_pool.robj);
+ DRM_ERROR("radeon: failed to pin ib pool (%d).\n", r);
+ return r;
+ }
+ r = radeon_bo_kmap(rdev->ib_pool.robj, &ptr);
+ radeon_bo_unreserve(rdev->ib_pool.robj);
+ if (r) {
+ DRM_ERROR("radeon: failed to map ib pool (%d).\n", r);
+ return r;
}
-
- rdev->ib_pool.sa_manager = tmp;
- INIT_LIST_HEAD(&rdev->ib_pool.sa_manager.sa_bo);
for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
- rdev->ib_pool.ibs[i].fence = NULL;
+ unsigned offset;
+
+ offset = i * 64 * 1024;
+ rdev->ib_pool.ibs[i].gpu_addr = gpu_addr + offset;
+ rdev->ib_pool.ibs[i].ptr = ptr + offset;
rdev->ib_pool.ibs[i].idx = i;
rdev->ib_pool.ibs[i].length_dw = 0;
- INIT_LIST_HEAD(&rdev->ib_pool.ibs[i].sa_bo.list);
+ rdev->ib_pool.ibs[i].free = true;
}
rdev->ib_pool.head_id = 0;
rdev->ib_pool.ready = true;
DRM_INFO("radeon: ib pool ready.\n");
-
if (radeon_debugfs_ib_init(rdev)) {
DRM_ERROR("Failed to register debugfs file for IB !\n");
}
- if (radeon_debugfs_ring_init(rdev)) {
- DRM_ERROR("Failed to register debugfs file for rings !\n");
- }
- radeon_mutex_unlock(&rdev->ib_pool.mutex);
- return 0;
+ return r;
}
void radeon_ib_pool_fini(struct radeon_device *rdev)
{
- unsigned i;
+ int r;
+ struct radeon_bo *robj;
- radeon_mutex_lock(&rdev->ib_pool.mutex);
- if (rdev->ib_pool.ready) {
- for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
- radeon_sa_bo_free(rdev, &rdev->ib_pool.ibs[i].sa_bo);
- radeon_fence_unref(&rdev->ib_pool.ibs[i].fence);
+ if (!rdev->ib_pool.ready) {
+ return;
+ }
+ mutex_lock(&rdev->ib_pool.mutex);
+ radeon_ib_bogus_cleanup(rdev);
+ robj = rdev->ib_pool.robj;
+ rdev->ib_pool.robj = NULL;
+ mutex_unlock(&rdev->ib_pool.mutex);
+
+ if (robj) {
+ r = radeon_bo_reserve(robj, false);
+ if (likely(r == 0)) {
+ radeon_bo_kunmap(robj);
+ radeon_bo_unpin(robj);
+ radeon_bo_unreserve(robj);
}
- radeon_sa_bo_manager_fini(rdev, &rdev->ib_pool.sa_manager);
- rdev->ib_pool.ready = false;
+ radeon_bo_unref(&robj);
}
- radeon_mutex_unlock(&rdev->ib_pool.mutex);
}
-int radeon_ib_pool_start(struct radeon_device *rdev)
-{
- return radeon_sa_bo_manager_start(rdev, &rdev->ib_pool.sa_manager);
-}
-
-int radeon_ib_pool_suspend(struct radeon_device *rdev)
-{
- return radeon_sa_bo_manager_suspend(rdev, &rdev->ib_pool.sa_manager);
-}
/*
* Ring.
*/
-int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *ring)
-{
- /* r1xx-r5xx only has CP ring */
- if (rdev->family < CHIP_R600)
- return RADEON_RING_TYPE_GFX_INDEX;
-
- if (rdev->family >= CHIP_CAYMAN) {
- if (ring == &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX])
- return CAYMAN_RING_TYPE_CP1_INDEX;
- else if (ring == &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX])
- return CAYMAN_RING_TYPE_CP2_INDEX;
- }
- return RADEON_RING_TYPE_GFX_INDEX;
-}
-
-void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring)
+void radeon_ring_free_size(struct radeon_device *rdev)
{
- u32 rptr;
-
if (rdev->wb.enabled)
- rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
- else
- rptr = RREG32(ring->rptr_reg);
- ring->rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
+ rdev->cp.rptr = le32_to_cpu(rdev->wb.wb[RADEON_WB_CP_RPTR_OFFSET/4]);
+ else {
+ if (rdev->family >= CHIP_R600)
+ rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
+ else
+ rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
+ }
/* This works because ring_size is a power of 2 */
- ring->ring_free_dw = (ring->rptr + (ring->ring_size / 4));
- ring->ring_free_dw -= ring->wptr;
- ring->ring_free_dw &= ring->ptr_mask;
- if (!ring->ring_free_dw) {
- ring->ring_free_dw = ring->ring_size / 4;
+ rdev->cp.ring_free_dw = (rdev->cp.rptr + (rdev->cp.ring_size / 4));
+ rdev->cp.ring_free_dw -= rdev->cp.wptr;
+ rdev->cp.ring_free_dw &= rdev->cp.ptr_mask;
+ if (!rdev->cp.ring_free_dw) {
+ rdev->cp.ring_free_dw = rdev->cp.ring_size / 4;
}
}
-
-int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw)
+int radeon_ring_alloc(struct radeon_device *rdev, unsigned ndw)
{
int r;
/* Align requested size with padding so unlock_commit can
* pad safely */
- ndw = (ndw + ring->align_mask) & ~ring->align_mask;
- while (ndw > (ring->ring_free_dw - 1)) {
- radeon_ring_free_size(rdev, ring);
- if (ndw < ring->ring_free_dw) {
+ ndw = (ndw + rdev->cp.align_mask) & ~rdev->cp.align_mask;
+ while (ndw > (rdev->cp.ring_free_dw - 1)) {
+ radeon_ring_free_size(rdev);
+ if (ndw < rdev->cp.ring_free_dw) {
break;
}
- r = radeon_fence_wait_next(rdev, radeon_ring_index(rdev, ring));
+ r = radeon_fence_wait_next(rdev);
if (r)
return r;
}
- ring->count_dw = ndw;
- ring->wptr_old = ring->wptr;
+ rdev->cp.count_dw = ndw;
+ rdev->cp.wptr_old = rdev->cp.wptr;
return 0;
}
-int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw)
+int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw)
{
int r;
- mutex_lock(&ring->mutex);
- r = radeon_ring_alloc(rdev, ring, ndw);
+ mutex_lock(&rdev->cp.mutex);
+ r = radeon_ring_alloc(rdev, ndw);
if (r) {
- mutex_unlock(&ring->mutex);
+ mutex_unlock(&rdev->cp.mutex);
return r;
}
return 0;
}
-void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
+void radeon_ring_commit(struct radeon_device *rdev)
{
unsigned count_dw_pad;
unsigned i;
/* We pad to match fetch size */
- count_dw_pad = (ring->align_mask + 1) -
- (ring->wptr & ring->align_mask);
+ count_dw_pad = (rdev->cp.align_mask + 1) -
+ (rdev->cp.wptr & rdev->cp.align_mask);
for (i = 0; i < count_dw_pad; i++) {
- radeon_ring_write(ring, ring->nop);
+ radeon_ring_write(rdev, 2 << 30);
}
DRM_MEMORYBARRIER();
- WREG32(ring->wptr_reg, (ring->wptr << ring->ptr_reg_shift) & ring->ptr_reg_mask);
- (void)RREG32(ring->wptr_reg);
+ radeon_cp_commit(rdev);
}
-void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring)
+void radeon_ring_unlock_commit(struct radeon_device *rdev)
{
- radeon_ring_commit(rdev, ring);
- mutex_unlock(&ring->mutex);
+ radeon_ring_commit(rdev);
+ mutex_unlock(&rdev->cp.mutex);
}
-void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *ring)
+void radeon_ring_unlock_undo(struct radeon_device *rdev)
{
- ring->wptr = ring->wptr_old;
- mutex_unlock(&ring->mutex);
+ rdev->cp.wptr = rdev->cp.wptr_old;
+ mutex_unlock(&rdev->cp.mutex);
}
-int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size,
- unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg,
- u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop)
+int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
{
int r;
- ring->ring_size = ring_size;
- ring->rptr_offs = rptr_offs;
- ring->rptr_reg = rptr_reg;
- ring->wptr_reg = wptr_reg;
- ring->ptr_reg_shift = ptr_reg_shift;
- ring->ptr_reg_mask = ptr_reg_mask;
- ring->nop = nop;
+ rdev->cp.ring_size = ring_size;
/* Allocate ring buffer */
- if (ring->ring_obj == NULL) {
- r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true,
+ if (rdev->cp.ring_obj == NULL) {
+ r = radeon_bo_create(rdev, rdev->cp.ring_size, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_GTT,
- &ring->ring_obj);
+ &rdev->cp.ring_obj);
if (r) {
dev_err(rdev->dev, "(%d) ring create failed\n", r);
return r;
}
- r = radeon_bo_reserve(ring->ring_obj, false);
+ r = radeon_bo_reserve(rdev->cp.ring_obj, false);
if (unlikely(r != 0))
return r;
- r = radeon_bo_pin(ring->ring_obj, RADEON_GEM_DOMAIN_GTT,
- &ring->gpu_addr);
+ r = radeon_bo_pin(rdev->cp.ring_obj, RADEON_GEM_DOMAIN_GTT,
+ &rdev->cp.gpu_addr);
if (r) {
- radeon_bo_unreserve(ring->ring_obj);
+ radeon_bo_unreserve(rdev->cp.ring_obj);
dev_err(rdev->dev, "(%d) ring pin failed\n", r);
return r;
}
- r = radeon_bo_kmap(ring->ring_obj,
- (void **)&ring->ring);
- radeon_bo_unreserve(ring->ring_obj);
+ r = radeon_bo_kmap(rdev->cp.ring_obj,
+ (void **)&rdev->cp.ring);
+ radeon_bo_unreserve(rdev->cp.ring_obj);
if (r) {
dev_err(rdev->dev, "(%d) ring map failed\n", r);
return r;
}
}
- ring->ptr_mask = (ring->ring_size / 4) - 1;
- ring->ring_free_dw = ring->ring_size / 4;
+ rdev->cp.ptr_mask = (rdev->cp.ring_size / 4) - 1;
+ rdev->cp.ring_free_dw = rdev->cp.ring_size / 4;
return 0;
}
-void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *ring)
+void radeon_ring_fini(struct radeon_device *rdev)
{
int r;
struct radeon_bo *ring_obj;
- mutex_lock(&ring->mutex);
- ring_obj = ring->ring_obj;
- ring->ring = NULL;
- ring->ring_obj = NULL;
- mutex_unlock(&ring->mutex);
+ mutex_lock(&rdev->cp.mutex);
+ ring_obj = rdev->cp.ring_obj;
+ rdev->cp.ring = NULL;
+ rdev->cp.ring_obj = NULL;
+ mutex_unlock(&rdev->cp.mutex);
if (ring_obj) {
r = radeon_bo_reserve(ring_obj, false);
@@ -435,96 +384,78 @@ void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *ring)
}
}
+
/*
* Debugfs info
*/
#if defined(CONFIG_DEBUG_FS)
-
-static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
+static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct radeon_device *rdev = dev->dev_private;
- int ridx = *(int*)node->info_ent->data;
- struct radeon_ring *ring = &rdev->ring[ridx];
- unsigned count, i, j;
-
- radeon_ring_free_size(rdev, ring);
- count = (ring->ring_size / 4) - ring->ring_free_dw;
- seq_printf(m, "wptr(0x%04x): 0x%08x\n", ring->wptr_reg, RREG32(ring->wptr_reg));
- seq_printf(m, "rptr(0x%04x): 0x%08x\n", ring->rptr_reg, RREG32(ring->rptr_reg));
- seq_printf(m, "driver's copy of the wptr: 0x%08x\n", ring->wptr);
- seq_printf(m, "driver's copy of the rptr: 0x%08x\n", ring->rptr);
- seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
- seq_printf(m, "%u dwords in ring\n", count);
- i = ring->rptr;
- for (j = 0; j <= count; j++) {
- seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
- i = (i + 1) & ring->ptr_mask;
+ struct radeon_ib *ib = node->info_ent->data;
+ unsigned i;
+
+ if (ib == NULL) {
+ return 0;
+ }
+ seq_printf(m, "IB %04u\n", ib->idx);
+ seq_printf(m, "IB fence %p\n", ib->fence);
+ seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
+ for (i = 0; i < ib->length_dw; i++) {
+ seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
}
return 0;
}
-static int radeon_ring_type_gfx_index = RADEON_RING_TYPE_GFX_INDEX;
-static int cayman_ring_type_cp1_index = CAYMAN_RING_TYPE_CP1_INDEX;
-static int cayman_ring_type_cp2_index = CAYMAN_RING_TYPE_CP2_INDEX;
-
-static struct drm_info_list radeon_debugfs_ring_info_list[] = {
- {"radeon_ring_gfx", radeon_debugfs_ring_info, 0, &radeon_ring_type_gfx_index},
- {"radeon_ring_cp1", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp1_index},
- {"radeon_ring_cp2", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp2_index},
-};
-
-static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
+static int radeon_debugfs_ib_bogus_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_ib *ib = &rdev->ib_pool.ibs[*((unsigned*)node->info_ent->data)];
+ struct radeon_device *rdev = node->info_ent->data;
+ struct radeon_ib *ib;
unsigned i;
- if (ib == NULL) {
+ mutex_lock(&rdev->ib_pool.mutex);
+ if (list_empty(&rdev->ib_pool.bogus_ib)) {
+ mutex_unlock(&rdev->ib_pool.mutex);
+ seq_printf(m, "no bogus IB recorded\n");
return 0;
}
- seq_printf(m, "IB %04u\n", ib->idx);
- seq_printf(m, "IB fence %p\n", ib->fence);
+ ib = list_first_entry(&rdev->ib_pool.bogus_ib, struct radeon_ib, list);
+ list_del_init(&ib->list);
+ mutex_unlock(&rdev->ib_pool.mutex);
seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
for (i = 0; i < ib->length_dw; i++) {
seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
}
+ vfree(ib->ptr);
+ kfree(ib);
return 0;
}
static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE];
static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32];
-static unsigned radeon_debugfs_ib_idx[RADEON_IB_POOL_SIZE];
-#endif
-int radeon_debugfs_ring_init(struct radeon_device *rdev)
-{
-#if defined(CONFIG_DEBUG_FS)
- if (rdev->family >= CHIP_CAYMAN)
- return radeon_debugfs_add_files(rdev, radeon_debugfs_ring_info_list,
- ARRAY_SIZE(radeon_debugfs_ring_info_list));
- else
- return radeon_debugfs_add_files(rdev, radeon_debugfs_ring_info_list, 1);
-#else
- return 0;
+static struct drm_info_list radeon_debugfs_ib_bogus_info_list[] = {
+ {"radeon_ib_bogus", radeon_debugfs_ib_bogus_info, 0, NULL},
+};
#endif
-}
int radeon_debugfs_ib_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
unsigned i;
+ int r;
+ radeon_debugfs_ib_bogus_info_list[0].data = rdev;
+ r = radeon_debugfs_add_files(rdev, radeon_debugfs_ib_bogus_info_list, 1);
+ if (r)
+ return r;
for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i);
- radeon_debugfs_ib_idx[i] = i;
radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i];
radeon_debugfs_ib_list[i].show = &radeon_debugfs_ib_info;
radeon_debugfs_ib_list[i].driver_features = 0;
- radeon_debugfs_ib_list[i].data = &radeon_debugfs_ib_idx[i];
+ radeon_debugfs_ib_list[i].data = &rdev->ib_pool.ibs[i];
}
return radeon_debugfs_add_files(rdev, radeon_debugfs_ib_list,
RADEON_IB_POOL_SIZE);
diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
deleted file mode 100644
index 4cce47e..0000000
--- a/drivers/gpu/drm/radeon/radeon_sa.c
+++ /dev/null
@@ -1,189 +0,0 @@
-/*
- * Copyright 2011 Red Hat Inc.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- */
-/*
- * Authors:
- * Jerome Glisse <glisse@freedesktop.org>
- */
-#include "drmP.h"
-#include "drm.h"
-#include "radeon.h"
-
-int radeon_sa_bo_manager_init(struct radeon_device *rdev,
- struct radeon_sa_manager *sa_manager,
- unsigned size, u32 domain)
-{
- int r;
-
- sa_manager->bo = NULL;
- sa_manager->size = size;
- sa_manager->domain = domain;
- INIT_LIST_HEAD(&sa_manager->sa_bo);
-
- r = radeon_bo_create(rdev, size, RADEON_GPU_PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_CPU, &sa_manager->bo);
- if (r) {
- dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r);
- return r;
- }
-
- return r;
-}
-
-void radeon_sa_bo_manager_fini(struct radeon_device *rdev,
- struct radeon_sa_manager *sa_manager)
-{
- struct radeon_sa_bo *sa_bo, *tmp;
-
- if (!list_empty(&sa_manager->sa_bo)) {
- dev_err(rdev->dev, "sa_manager is not empty, clearing anyway\n");
- }
- list_for_each_entry_safe(sa_bo, tmp, &sa_manager->sa_bo, list) {
- list_del_init(&sa_bo->list);
- }
- radeon_bo_unref(&sa_manager->bo);
- sa_manager->size = 0;
-}
-
-int radeon_sa_bo_manager_start(struct radeon_device *rdev,
- struct radeon_sa_manager *sa_manager)
-{
- int r;
-
- if (sa_manager->bo == NULL) {
- dev_err(rdev->dev, "no bo for sa manager\n");
- return -EINVAL;
- }
-
- /* map the buffer */
- r = radeon_bo_reserve(sa_manager->bo, false);
- if (r) {
- dev_err(rdev->dev, "(%d) failed to reserve manager bo\n", r);
- return r;
- }
- r = radeon_bo_pin(sa_manager->bo, sa_manager->domain, &sa_manager->gpu_addr);
- if (r) {
- radeon_bo_unreserve(sa_manager->bo);
- dev_err(rdev->dev, "(%d) failed to pin manager bo\n", r);
- return r;
- }
- r = radeon_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr);
- radeon_bo_unreserve(sa_manager->bo);
- return r;
-}
-
-int radeon_sa_bo_manager_suspend(struct radeon_device *rdev,
- struct radeon_sa_manager *sa_manager)
-{
- int r;
-
- if (sa_manager->bo == NULL) {
- dev_err(rdev->dev, "no bo for sa manager\n");
- return -EINVAL;
- }
-
- r = radeon_bo_reserve(sa_manager->bo, false);
- if (!r) {
- radeon_bo_kunmap(sa_manager->bo);
- radeon_bo_unpin(sa_manager->bo);
- radeon_bo_unreserve(sa_manager->bo);
- }
- return r;
-}
-
-/*
- * Principe is simple, we keep a list of sub allocation in offset
- * order (first entry has offset == 0, last entry has the highest
- * offset).
- *
- * When allocating new object we first check if there is room at
- * the end total_size - (last_object_offset + last_object_size) >=
- * alloc_size. If so we allocate new object there.
- *
- * When there is not enough room at the end, we start waiting for
- * each sub object until we reach object_offset+object_size >=
- * alloc_size, this object then become the sub object we return.
- *
- * Alignment can't be bigger than page size
- */
-int radeon_sa_bo_new(struct radeon_device *rdev,
- struct radeon_sa_manager *sa_manager,
- struct radeon_sa_bo *sa_bo,
- unsigned size, unsigned align)
-{
- struct radeon_sa_bo *tmp;
- struct list_head *head;
- unsigned offset = 0, wasted = 0;
-
- BUG_ON(align > RADEON_GPU_PAGE_SIZE);
- BUG_ON(size > sa_manager->size);
-
- /* no one ? */
- head = sa_manager->sa_bo.prev;
- if (list_empty(&sa_manager->sa_bo)) {
- goto out;
- }
-
- /* look for a hole big enough */
- offset = 0;
- list_for_each_entry(tmp, &sa_manager->sa_bo, list) {
- /* room before this object ? */
- if ((tmp->offset - offset) >= size) {
- head = tmp->list.prev;
- goto out;
- }
- offset = tmp->offset + tmp->size;
- wasted = offset % align;
- if (wasted) {
- wasted = align - wasted;
- }
- offset += wasted;
- }
- /* room at the end ? */
- head = sa_manager->sa_bo.prev;
- tmp = list_entry(head, struct radeon_sa_bo, list);
- offset = tmp->offset + tmp->size;
- wasted = offset % align;
- if (wasted) {
- wasted = align - wasted;
- }
- offset += wasted;
- if ((sa_manager->size - offset) < size) {
- /* failed to find somethings big enough */
- return -ENOMEM;
- }
-
-out:
- sa_bo->manager = sa_manager;
- sa_bo->offset = offset;
- sa_bo->size = size;
- list_add(&sa_bo->list, head);
- return 0;
-}
-
-void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo *sa_bo)
-{
- list_del_init(&sa_bo->list);
-}
diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c
deleted file mode 100644
index 61dd4e3..0000000
--- a/drivers/gpu/drm/radeon/radeon_semaphore.c
+++ /dev/null
@@ -1,178 +0,0 @@
-/*
- * Copyright 2011 Christian König.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- */
-/*
- * Authors:
- * Christian König <deathsimple@vodafone.de>
- */
-#include "drmP.h"
-#include "drm.h"
-#include "radeon.h"
-
-static int radeon_semaphore_add_bo(struct radeon_device *rdev)
-{
- struct radeon_semaphore_bo *bo;
- unsigned long irq_flags;
- uint64_t gpu_addr;
- uint32_t *cpu_ptr;
- int r, i;
-
-
- bo = kmalloc(sizeof(struct radeon_semaphore_bo), GFP_KERNEL);
- if (bo == NULL) {
- return -ENOMEM;
- }
- INIT_LIST_HEAD(&bo->free);
- INIT_LIST_HEAD(&bo->list);
- bo->nused = 0;
-
- r = radeon_ib_get(rdev, 0, &bo->ib, RADEON_SEMAPHORE_BO_SIZE);
- if (r) {
- dev_err(rdev->dev, "failed to get a bo after 5 retry\n");
- kfree(bo);
- return r;
- }
- gpu_addr = rdev->ib_pool.sa_manager.gpu_addr;
- gpu_addr += bo->ib->sa_bo.offset;
- cpu_ptr = rdev->ib_pool.sa_manager.cpu_ptr;
- cpu_ptr += (bo->ib->sa_bo.offset >> 2);
- for (i = 0; i < (RADEON_SEMAPHORE_BO_SIZE/8); i++) {
- bo->semaphores[i].gpu_addr = gpu_addr;
- bo->semaphores[i].cpu_ptr = cpu_ptr;
- bo->semaphores[i].bo = bo;
- list_add_tail(&bo->semaphores[i].list, &bo->free);
- gpu_addr += 8;
- cpu_ptr += 2;
- }
- write_lock_irqsave(&rdev->semaphore_drv.lock, irq_flags);
- list_add_tail(&bo->list, &rdev->semaphore_drv.bo);
- write_unlock_irqrestore(&rdev->semaphore_drv.lock, irq_flags);
- return 0;
-}
-
-static void radeon_semaphore_del_bo_locked(struct radeon_device *rdev,
- struct radeon_semaphore_bo *bo)
-{
- radeon_sa_bo_free(rdev, &bo->ib->sa_bo);
- radeon_fence_unref(&bo->ib->fence);
- list_del(&bo->list);
- kfree(bo);
-}
-
-void radeon_semaphore_shrink_locked(struct radeon_device *rdev)
-{
- struct radeon_semaphore_bo *bo, *n;
-
- if (list_empty(&rdev->semaphore_drv.bo)) {
- return;
- }
- /* only shrink if first bo has free semaphore */
- bo = list_first_entry(&rdev->semaphore_drv.bo, struct radeon_semaphore_bo, list);
- if (list_empty(&bo->free)) {
- return;
- }
- list_for_each_entry_safe_continue(bo, n, &rdev->semaphore_drv.bo, list) {
- if (bo->nused)
- continue;
- radeon_semaphore_del_bo_locked(rdev, bo);
- }
-}
-
-int radeon_semaphore_create(struct radeon_device *rdev,
- struct radeon_semaphore **semaphore)
-{
- struct radeon_semaphore_bo *bo;
- unsigned long irq_flags;
- bool do_retry = true;
- int r;
-
-retry:
- *semaphore = NULL;
- write_lock_irqsave(&rdev->semaphore_drv.lock, irq_flags);
- list_for_each_entry(bo, &rdev->semaphore_drv.bo, list) {
- if (list_empty(&bo->free))
- continue;
- *semaphore = list_first_entry(&bo->free, struct radeon_semaphore, list);
- (*semaphore)->cpu_ptr[0] = 0;
- (*semaphore)->cpu_ptr[1] = 0;
- list_del(&(*semaphore)->list);
- bo->nused++;
- break;
- }
- write_unlock_irqrestore(&rdev->semaphore_drv.lock, irq_flags);
-
- if (*semaphore == NULL) {
- if (do_retry) {
- do_retry = false;
- r = radeon_semaphore_add_bo(rdev);
- if (r)
- return r;
- goto retry;
- }
- return -ENOMEM;
- }
-
- return 0;
-}
-
-void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
- struct radeon_semaphore *semaphore)
-{
- radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, false);
-}
-
-void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
- struct radeon_semaphore *semaphore)
-{
- radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, true);
-}
-
-void radeon_semaphore_free(struct radeon_device *rdev,
- struct radeon_semaphore *semaphore)
-{
- unsigned long irq_flags;
-
- write_lock_irqsave(&rdev->semaphore_drv.lock, irq_flags);
- semaphore->bo->nused--;
- list_add_tail(&semaphore->list, &semaphore->bo->free);
- radeon_semaphore_shrink_locked(rdev);
- write_unlock_irqrestore(&rdev->semaphore_drv.lock, irq_flags);
-}
-
-void radeon_semaphore_driver_fini(struct radeon_device *rdev)
-{
- struct radeon_semaphore_bo *bo, *n;
- unsigned long irq_flags;
-
- write_lock_irqsave(&rdev->semaphore_drv.lock, irq_flags);
- /* we force to free everything */
- list_for_each_entry_safe(bo, n, &rdev->semaphore_drv.bo, list) {
- if (!list_empty(&bo->free)) {
- dev_err(rdev->dev, "still in use semaphore\n");
- }
- radeon_semaphore_del_bo_locked(rdev, bo);
- }
- write_unlock_irqrestore(&rdev->semaphore_drv.lock, irq_flags);
-}
diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
index e8422ae..92e7ea7 100644
--- a/drivers/gpu/drm/radeon/radeon_state.c
+++ b/drivers/gpu/drm/radeon/radeon_state.c
@@ -272,12 +272,12 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
return 0;
}
-static int radeon_check_and_fixup_packet3(drm_radeon_private_t *
- dev_priv,
- struct drm_file *file_priv,
- drm_radeon_kcmd_buffer_t *
- cmdbuf,
- unsigned int *cmdsz)
+static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
+ dev_priv,
+ struct drm_file *file_priv,
+ drm_radeon_kcmd_buffer_t *
+ cmdbuf,
+ unsigned int *cmdsz)
{
u32 *cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
u32 offset, narrays;
@@ -446,8 +446,8 @@ static int radeon_check_and_fixup_packet3(drm_radeon_private_t *
* CP hardware state programming functions
*/
-static void radeon_emit_clip_rect(drm_radeon_private_t * dev_priv,
- struct drm_clip_rect * box)
+static __inline__ void radeon_emit_clip_rect(drm_radeon_private_t * dev_priv,
+ struct drm_clip_rect * box)
{
RING_LOCALS;
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index dc5dcf4..dee4a0c 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -40,16 +40,10 @@ void radeon_test_moves(struct radeon_device *rdev)
size = 1024 * 1024;
/* Number of tests =
- * (Total GTT - IB pool - writeback page - ring buffers) / test size
+ * (Total GTT - IB pool - writeback page - ring buffer) / test size
*/
- n = rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024;
- for (i = 0; i < RADEON_NUM_RINGS; ++i)
- n -= rdev->ring[i].ring_size;
- if (rdev->wb.wb_obj)
- n -= RADEON_GPU_PAGE_SIZE;
- if (rdev->ih.ring_obj)
- n -= rdev->ih.ring_size;
- n /= size;
+ n = ((u32)(rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - RADEON_GPU_PAGE_SIZE -
+ rdev->cp.ring_size)) / size;
gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL);
if (!gtt_obj) {
@@ -106,7 +100,7 @@ void radeon_test_moves(struct radeon_device *rdev)
radeon_bo_kunmap(gtt_obj[i]);
- r = radeon_fence_create(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX);
+ r = radeon_fence_create(rdev, &fence);
if (r) {
DRM_ERROR("Failed to create GTT->VRAM fence %d\n", i);
goto out_cleanup;
@@ -138,15 +132,9 @@ void radeon_test_moves(struct radeon_device *rdev)
gtt_start++, vram_start++) {
if (*vram_start != gtt_start) {
DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, "
- "expected 0x%p (GTT/VRAM offset "
- "0x%16llx/0x%16llx)\n",
- i, *vram_start, gtt_start,
- (unsigned long long)
- (gtt_addr - rdev->mc.gtt_start +
- (void*)gtt_start - gtt_map),
- (unsigned long long)
- (vram_addr - rdev->mc.vram_start +
- (void*)gtt_start - gtt_map));
+ "expected 0x%p (GTT map 0x%p-0x%p)\n",
+ i, *vram_start, gtt_start, gtt_map,
+ gtt_end);
radeon_bo_kunmap(vram_obj);
goto out_cleanup;
}
@@ -155,7 +143,7 @@ void radeon_test_moves(struct radeon_device *rdev)
radeon_bo_kunmap(vram_obj);
- r = radeon_fence_create(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX);
+ r = radeon_fence_create(rdev, &fence);
if (r) {
DRM_ERROR("Failed to create VRAM->GTT fence %d\n", i);
goto out_cleanup;
@@ -187,15 +175,9 @@ void radeon_test_moves(struct radeon_device *rdev)
gtt_start++, vram_start++) {
if (*gtt_start != vram_start) {
DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, "
- "expected 0x%p (VRAM/GTT offset "
- "0x%16llx/0x%16llx)\n",
- i, *gtt_start, vram_start,
- (unsigned long long)
- (vram_addr - rdev->mc.vram_start +
- (void*)vram_start - vram_map),
- (unsigned long long)
- (gtt_addr - rdev->mc.gtt_start +
- (void*)vram_start - vram_map));
+ "expected 0x%p (VRAM map 0x%p-0x%p)\n",
+ i, *gtt_start, vram_start, vram_map,
+ vram_end);
radeon_bo_kunmap(gtt_obj[i]);
goto out_cleanup;
}
@@ -234,264 +216,3 @@ out_cleanup:
printk(KERN_WARNING "Error while testing BO move.\n");
}
}
-
-void radeon_test_ring_sync(struct radeon_device *rdev,
- struct radeon_ring *ringA,
- struct radeon_ring *ringB)
-{
- struct radeon_fence *fence1 = NULL, *fence2 = NULL;
- struct radeon_semaphore *semaphore = NULL;
- int ridxA = radeon_ring_index(rdev, ringA);
- int ridxB = radeon_ring_index(rdev, ringB);
- int r;
-
- r = radeon_fence_create(rdev, &fence1, ridxA);
- if (r) {
- DRM_ERROR("Failed to create sync fence 1\n");
- goto out_cleanup;
- }
- r = radeon_fence_create(rdev, &fence2, ridxA);
- if (r) {
- DRM_ERROR("Failed to create sync fence 2\n");
- goto out_cleanup;
- }
-
- r = radeon_semaphore_create(rdev, &semaphore);
- if (r) {
- DRM_ERROR("Failed to create semaphore\n");
- goto out_cleanup;
- }
-
- r = radeon_ring_lock(rdev, ringA, 64);
- if (r) {
- DRM_ERROR("Failed to lock ring A %d\n", ridxA);
- goto out_cleanup;
- }
- radeon_semaphore_emit_wait(rdev, ridxA, semaphore);
- radeon_fence_emit(rdev, fence1);
- radeon_semaphore_emit_wait(rdev, ridxA, semaphore);
- radeon_fence_emit(rdev, fence2);
- radeon_ring_unlock_commit(rdev, ringA);
-
- mdelay(1000);
-
- if (radeon_fence_signaled(fence1)) {
- DRM_ERROR("Fence 1 signaled without waiting for semaphore.\n");
- goto out_cleanup;
- }
-
- r = radeon_ring_lock(rdev, ringB, 64);
- if (r) {
- DRM_ERROR("Failed to lock ring B %p\n", ringB);
- goto out_cleanup;
- }
- radeon_semaphore_emit_signal(rdev, ridxB, semaphore);
- radeon_ring_unlock_commit(rdev, ringB);
-
- r = radeon_fence_wait(fence1, false);
- if (r) {
- DRM_ERROR("Failed to wait for sync fence 1\n");
- goto out_cleanup;
- }
-
- mdelay(1000);
-
- if (radeon_fence_signaled(fence2)) {
- DRM_ERROR("Fence 2 signaled without waiting for semaphore.\n");
- goto out_cleanup;
- }
-
- r = radeon_ring_lock(rdev, ringB, 64);
- if (r) {
- DRM_ERROR("Failed to lock ring B %p\n", ringB);
- goto out_cleanup;
- }
- radeon_semaphore_emit_signal(rdev, ridxB, semaphore);
- radeon_ring_unlock_commit(rdev, ringB);
-
- r = radeon_fence_wait(fence2, false);
- if (r) {
- DRM_ERROR("Failed to wait for sync fence 1\n");
- goto out_cleanup;
- }
-
-out_cleanup:
- if (semaphore)
- radeon_semaphore_free(rdev, semaphore);
-
- if (fence1)
- radeon_fence_unref(&fence1);
-
- if (fence2)
- radeon_fence_unref(&fence2);
-
- if (r)
- printk(KERN_WARNING "Error while testing ring sync (%d).\n", r);
-}
-
-void radeon_test_ring_sync2(struct radeon_device *rdev,
- struct radeon_ring *ringA,
- struct radeon_ring *ringB,
- struct radeon_ring *ringC)
-{
- struct radeon_fence *fenceA = NULL, *fenceB = NULL;
- struct radeon_semaphore *semaphore = NULL;
- int ridxA = radeon_ring_index(rdev, ringA);
- int ridxB = radeon_ring_index(rdev, ringB);
- int ridxC = radeon_ring_index(rdev, ringC);
- bool sigA, sigB;
- int i, r;
-
- r = radeon_fence_create(rdev, &fenceA, ridxA);
- if (r) {
- DRM_ERROR("Failed to create sync fence 1\n");
- goto out_cleanup;
- }
- r = radeon_fence_create(rdev, &fenceB, ridxB);
- if (r) {
- DRM_ERROR("Failed to create sync fence 2\n");
- goto out_cleanup;
- }
-
- r = radeon_semaphore_create(rdev, &semaphore);
- if (r) {
- DRM_ERROR("Failed to create semaphore\n");
- goto out_cleanup;
- }
-
- r = radeon_ring_lock(rdev, ringA, 64);
- if (r) {
- DRM_ERROR("Failed to lock ring A %d\n", ridxA);
- goto out_cleanup;
- }
- radeon_semaphore_emit_wait(rdev, ridxA, semaphore);
- radeon_fence_emit(rdev, fenceA);
- radeon_ring_unlock_commit(rdev, ringA);
-
- r = radeon_ring_lock(rdev, ringB, 64);
- if (r) {
- DRM_ERROR("Failed to lock ring B %d\n", ridxB);
- goto out_cleanup;
- }
- radeon_semaphore_emit_wait(rdev, ridxB, semaphore);
- radeon_fence_emit(rdev, fenceB);
- radeon_ring_unlock_commit(rdev, ringB);
-
- mdelay(1000);
-
- if (radeon_fence_signaled(fenceA)) {
- DRM_ERROR("Fence A signaled without waiting for semaphore.\n");
- goto out_cleanup;
- }
- if (radeon_fence_signaled(fenceB)) {
- DRM_ERROR("Fence A signaled without waiting for semaphore.\n");
- goto out_cleanup;
- }
-
- r = radeon_ring_lock(rdev, ringC, 64);
- if (r) {
- DRM_ERROR("Failed to lock ring B %p\n", ringC);
- goto out_cleanup;
- }
- radeon_semaphore_emit_signal(rdev, ridxC, semaphore);
- radeon_ring_unlock_commit(rdev, ringC);
-
- for (i = 0; i < 30; ++i) {
- mdelay(100);
- sigA = radeon_fence_signaled(fenceA);
- sigB = radeon_fence_signaled(fenceB);
- if (sigA || sigB)
- break;
- }
-
- if (!sigA && !sigB) {
- DRM_ERROR("Neither fence A nor B has been signaled\n");
- goto out_cleanup;
- } else if (sigA && sigB) {
- DRM_ERROR("Both fence A and B has been signaled\n");
- goto out_cleanup;
- }
-
- DRM_INFO("Fence %c was first signaled\n", sigA ? 'A' : 'B');
-
- r = radeon_ring_lock(rdev, ringC, 64);
- if (r) {
- DRM_ERROR("Failed to lock ring B %p\n", ringC);
- goto out_cleanup;
- }
- radeon_semaphore_emit_signal(rdev, ridxC, semaphore);
- radeon_ring_unlock_commit(rdev, ringC);
-
- mdelay(1000);
-
- r = radeon_fence_wait(fenceA, false);
- if (r) {
- DRM_ERROR("Failed to wait for sync fence A\n");
- goto out_cleanup;
- }
- r = radeon_fence_wait(fenceB, false);
- if (r) {
- DRM_ERROR("Failed to wait for sync fence B\n");
- goto out_cleanup;
- }
-
-out_cleanup:
- if (semaphore)
- radeon_semaphore_free(rdev, semaphore);
-
- if (fenceA)
- radeon_fence_unref(&fenceA);
-
- if (fenceB)
- radeon_fence_unref(&fenceB);
-
- if (r)
- printk(KERN_WARNING "Error while testing ring sync (%d).\n", r);
-}
-
-void radeon_test_syncing(struct radeon_device *rdev)
-{
- int i, j, k;
-
- for (i = 1; i < RADEON_NUM_RINGS; ++i) {
- struct radeon_ring *ringA = &rdev->ring[i];
- if (!ringA->ready)
- continue;
-
- for (j = 0; j < i; ++j) {
- struct radeon_ring *ringB = &rdev->ring[j];
- if (!ringB->ready)
- continue;
-
- DRM_INFO("Testing syncing between rings %d and %d...\n", i, j);
- radeon_test_ring_sync(rdev, ringA, ringB);
-
- DRM_INFO("Testing syncing between rings %d and %d...\n", j, i);
- radeon_test_ring_sync(rdev, ringB, ringA);
-
- for (k = 0; k < j; ++k) {
- struct radeon_ring *ringC = &rdev->ring[k];
- if (!ringC->ready)
- continue;
-
- DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, j, k);
- radeon_test_ring_sync2(rdev, ringA, ringB, ringC);
-
- DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, k, j);
- radeon_test_ring_sync2(rdev, ringA, ringC, ringB);
-
- DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, i, k);
- radeon_test_ring_sync2(rdev, ringB, ringA, ringC);
-
- DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, k, i);
- radeon_test_ring_sync2(rdev, ringB, ringC, ringA);
-
- DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, i, j);
- radeon_test_ring_sync2(rdev, ringC, ringA, ringB);
-
- DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, j, i);
- radeon_test_ring_sync2(rdev, ringC, ringB, ringA);
- }
- }
- }
-}
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index f493c64..3e9b41b 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -114,6 +114,24 @@ static void radeon_ttm_global_fini(struct radeon_device *rdev)
}
}
+struct ttm_backend *radeon_ttm_backend_create(struct radeon_device *rdev);
+
+static struct ttm_backend*
+radeon_create_ttm_backend_entry(struct ttm_bo_device *bdev)
+{
+ struct radeon_device *rdev;
+
+ rdev = radeon_get_rdev(bdev);
+#if __OS_HAS_AGP
+ if (rdev->flags & RADEON_IS_AGP) {
+ return ttm_agp_backend_init(bdev, rdev->ddev->agp->bridge);
+ } else
+#endif
+ {
+ return radeon_ttm_backend_create(rdev);
+ }
+}
+
static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
{
return 0;
@@ -188,7 +206,7 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo,
rbo = container_of(bo, struct radeon_bo, tbo);
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
- if (rbo->rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready == false)
+ if (rbo->rdev->cp.ready == false)
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
else
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
@@ -223,10 +241,10 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
struct radeon_device *rdev;
uint64_t old_start, new_start;
struct radeon_fence *fence;
- int r, i;
+ int r;
rdev = radeon_get_rdev(bo->bdev);
- r = radeon_fence_create(rdev, &fence, radeon_copy_ring_index(rdev));
+ r = radeon_fence_create(rdev, &fence);
if (unlikely(r)) {
return r;
}
@@ -255,43 +273,13 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
return -EINVAL;
}
- if (!rdev->ring[radeon_copy_ring_index(rdev)].ready) {
- DRM_ERROR("Trying to move memory with ring turned off.\n");
+ if (!rdev->cp.ready) {
+ DRM_ERROR("Trying to move memory with CP turned off.\n");
return -EINVAL;
}
BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0);
- /* sync other rings */
- if (rdev->family >= CHIP_R600) {
- for (i = 0; i < RADEON_NUM_RINGS; ++i) {
- /* no need to sync to our own or unused rings */
- if (i == radeon_copy_ring_index(rdev) || !rdev->ring[i].ready)
- continue;
-
- if (!fence->semaphore) {
- r = radeon_semaphore_create(rdev, &fence->semaphore);
- /* FIXME: handle semaphore error */
- if (r)
- continue;
- }
-
- r = radeon_ring_lock(rdev, &rdev->ring[i], 3);
- /* FIXME: handle ring lock error */
- if (r)
- continue;
- radeon_semaphore_emit_signal(rdev, i, fence->semaphore);
- radeon_ring_unlock_commit(rdev, &rdev->ring[i]);
-
- r = radeon_ring_lock(rdev, &rdev->ring[radeon_copy_ring_index(rdev)], 3);
- /* FIXME: handle ring lock error */
- if (r)
- continue;
- radeon_semaphore_emit_wait(rdev, radeon_copy_ring_index(rdev), fence->semaphore);
- radeon_ring_unlock_commit(rdev, &rdev->ring[radeon_copy_ring_index(rdev)]);
- }
- }
-
r = radeon_copy(rdev, old_start, new_start,
new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */
fence);
@@ -410,8 +398,7 @@ static int radeon_bo_move(struct ttm_buffer_object *bo,
radeon_move_null(bo, new_mem);
return 0;
}
- if (!rdev->ring[radeon_copy_ring_index(rdev)].ready ||
- rdev->asic->copy.copy == NULL) {
+ if (!rdev->cp.ready || rdev->asic->copy == NULL) {
/* use memcpy */
goto memcpy;
}
@@ -468,29 +455,6 @@ static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_
return -EINVAL;
mem->bus.base = rdev->mc.aper_base;
mem->bus.is_iomem = true;
-#ifdef __alpha__
- /*
- * Alpha: use bus.addr to hold the ioremap() return,
- * so we can modify bus.base below.
- */
- if (mem->placement & TTM_PL_FLAG_WC)
- mem->bus.addr =
- ioremap_wc(mem->bus.base + mem->bus.offset,
- mem->bus.size);
- else
- mem->bus.addr =
- ioremap_nocache(mem->bus.base + mem->bus.offset,
- mem->bus.size);
-
- /*
- * Alpha: Use just the bus offset plus
- * the hose/domain memory base for bus.base.
- * It then can be used to build PTEs for VRAM
- * access, as done in ttm_bo_vm_fault().
- */
- mem->bus.base = (mem->bus.base & 0x0ffffffffUL) +
- rdev->ddev->hose->dense_mem_base;
-#endif
break;
default:
return -EINVAL;
@@ -528,166 +492,8 @@ static bool radeon_sync_obj_signaled(void *sync_obj, void *sync_arg)
return radeon_fence_signaled((struct radeon_fence *)sync_obj);
}
-/*
- * TTM backend functions.
- */
-struct radeon_ttm_tt {
- struct ttm_dma_tt ttm;
- struct radeon_device *rdev;
- u64 offset;
-};
-
-static int radeon_ttm_backend_bind(struct ttm_tt *ttm,
- struct ttm_mem_reg *bo_mem)
-{
- struct radeon_ttm_tt *gtt = (void*)ttm;
- int r;
-
- gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
- if (!ttm->num_pages) {
- WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
- ttm->num_pages, bo_mem, ttm);
- }
- r = radeon_gart_bind(gtt->rdev, gtt->offset,
- ttm->num_pages, ttm->pages, gtt->ttm.dma_address);
- if (r) {
- DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
- ttm->num_pages, (unsigned)gtt->offset);
- return r;
- }
- return 0;
-}
-
-static int radeon_ttm_backend_unbind(struct ttm_tt *ttm)
-{
- struct radeon_ttm_tt *gtt = (void *)ttm;
-
- radeon_gart_unbind(gtt->rdev, gtt->offset, ttm->num_pages);
- return 0;
-}
-
-static void radeon_ttm_backend_destroy(struct ttm_tt *ttm)
-{
- struct radeon_ttm_tt *gtt = (void *)ttm;
-
- ttm_dma_tt_fini(&gtt->ttm);
- kfree(gtt);
-}
-
-static struct ttm_backend_func radeon_backend_func = {
- .bind = &radeon_ttm_backend_bind,
- .unbind = &radeon_ttm_backend_unbind,
- .destroy = &radeon_ttm_backend_destroy,
-};
-
-struct ttm_tt *radeon_ttm_tt_create(struct ttm_bo_device *bdev,
- unsigned long size, uint32_t page_flags,
- struct page *dummy_read_page)
-{
- struct radeon_device *rdev;
- struct radeon_ttm_tt *gtt;
-
- rdev = radeon_get_rdev(bdev);
-#if __OS_HAS_AGP
- if (rdev->flags & RADEON_IS_AGP) {
- return ttm_agp_tt_create(bdev, rdev->ddev->agp->bridge,
- size, page_flags, dummy_read_page);
- }
-#endif
-
- gtt = kzalloc(sizeof(struct radeon_ttm_tt), GFP_KERNEL);
- if (gtt == NULL) {
- return NULL;
- }
- gtt->ttm.ttm.func = &radeon_backend_func;
- gtt->rdev = rdev;
- if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags, dummy_read_page)) {
- kfree(gtt);
- return NULL;
- }
- return &gtt->ttm.ttm;
-}
-
-static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
-{
- struct radeon_device *rdev;
- struct radeon_ttm_tt *gtt = (void *)ttm;
- unsigned i;
- int r;
-
- if (ttm->state != tt_unpopulated)
- return 0;
-
- rdev = radeon_get_rdev(ttm->bdev);
-#if __OS_HAS_AGP
- if (rdev->flags & RADEON_IS_AGP) {
- return ttm_agp_tt_populate(ttm);
- }
-#endif
-
-#ifdef CONFIG_SWIOTLB
- if (swiotlb_nr_tbl()) {
- return ttm_dma_populate(&gtt->ttm, rdev->dev);
- }
-#endif
-
- r = ttm_pool_populate(ttm);
- if (r) {
- return r;
- }
-
- for (i = 0; i < ttm->num_pages; i++) {
- gtt->ttm.dma_address[i] = pci_map_page(rdev->pdev, ttm->pages[i],
- 0, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) {
- while (--i) {
- pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- gtt->ttm.dma_address[i] = 0;
- }
- ttm_pool_unpopulate(ttm);
- return -EFAULT;
- }
- }
- return 0;
-}
-
-static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm)
-{
- struct radeon_device *rdev;
- struct radeon_ttm_tt *gtt = (void *)ttm;
- unsigned i;
-
- rdev = radeon_get_rdev(ttm->bdev);
-#if __OS_HAS_AGP
- if (rdev->flags & RADEON_IS_AGP) {
- ttm_agp_tt_unpopulate(ttm);
- return;
- }
-#endif
-
-#ifdef CONFIG_SWIOTLB
- if (swiotlb_nr_tbl()) {
- ttm_dma_unpopulate(&gtt->ttm, rdev->dev);
- return;
- }
-#endif
-
- for (i = 0; i < ttm->num_pages; i++) {
- if (gtt->ttm.dma_address[i]) {
- pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- }
- }
-
- ttm_pool_unpopulate(ttm);
-}
-
static struct ttm_bo_driver radeon_bo_driver = {
- .ttm_tt_create = &radeon_ttm_tt_create,
- .ttm_tt_populate = &radeon_ttm_tt_populate,
- .ttm_tt_unpopulate = &radeon_ttm_tt_unpopulate,
+ .create_ttm_backend_entry = &radeon_create_ttm_backend_entry,
.invalidate_caches = &radeon_invalidate_caches,
.init_mem_type = &radeon_init_mem_type,
.evict_flags = &radeon_evict_flags,
@@ -851,6 +657,124 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
}
+/*
+ * TTM backend functions.
+ */
+struct radeon_ttm_backend {
+ struct ttm_backend backend;
+ struct radeon_device *rdev;
+ unsigned long num_pages;
+ struct page **pages;
+ struct page *dummy_read_page;
+ dma_addr_t *dma_addrs;
+ bool populated;
+ bool bound;
+ unsigned offset;
+};
+
+static int radeon_ttm_backend_populate(struct ttm_backend *backend,
+ unsigned long num_pages,
+ struct page **pages,
+ struct page *dummy_read_page,
+ dma_addr_t *dma_addrs)
+{
+ struct radeon_ttm_backend *gtt;
+
+ gtt = container_of(backend, struct radeon_ttm_backend, backend);
+ gtt->pages = pages;
+ gtt->dma_addrs = dma_addrs;
+ gtt->num_pages = num_pages;
+ gtt->dummy_read_page = dummy_read_page;
+ gtt->populated = true;
+ return 0;
+}
+
+static void radeon_ttm_backend_clear(struct ttm_backend *backend)
+{
+ struct radeon_ttm_backend *gtt;
+
+ gtt = container_of(backend, struct radeon_ttm_backend, backend);
+ gtt->pages = NULL;
+ gtt->dma_addrs = NULL;
+ gtt->num_pages = 0;
+ gtt->dummy_read_page = NULL;
+ gtt->populated = false;
+ gtt->bound = false;
+}
+
+
+static int radeon_ttm_backend_bind(struct ttm_backend *backend,
+ struct ttm_mem_reg *bo_mem)
+{
+ struct radeon_ttm_backend *gtt;
+ int r;
+
+ gtt = container_of(backend, struct radeon_ttm_backend, backend);
+ gtt->offset = bo_mem->start << PAGE_SHIFT;
+ if (!gtt->num_pages) {
+ WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
+ gtt->num_pages, bo_mem, backend);
+ }
+ r = radeon_gart_bind(gtt->rdev, gtt->offset,
+ gtt->num_pages, gtt->pages, gtt->dma_addrs);
+ if (r) {
+ DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
+ gtt->num_pages, gtt->offset);
+ return r;
+ }
+ gtt->bound = true;
+ return 0;
+}
+
+static int radeon_ttm_backend_unbind(struct ttm_backend *backend)
+{
+ struct radeon_ttm_backend *gtt;
+
+ gtt = container_of(backend, struct radeon_ttm_backend, backend);
+ radeon_gart_unbind(gtt->rdev, gtt->offset, gtt->num_pages);
+ gtt->bound = false;
+ return 0;
+}
+
+static void radeon_ttm_backend_destroy(struct ttm_backend *backend)
+{
+ struct radeon_ttm_backend *gtt;
+
+ gtt = container_of(backend, struct radeon_ttm_backend, backend);
+ if (gtt->bound) {
+ radeon_ttm_backend_unbind(backend);
+ }
+ kfree(gtt);
+}
+
+static struct ttm_backend_func radeon_backend_func = {
+ .populate = &radeon_ttm_backend_populate,
+ .clear = &radeon_ttm_backend_clear,
+ .bind = &radeon_ttm_backend_bind,
+ .unbind = &radeon_ttm_backend_unbind,
+ .destroy = &radeon_ttm_backend_destroy,
+};
+
+struct ttm_backend *radeon_ttm_backend_create(struct radeon_device *rdev)
+{
+ struct radeon_ttm_backend *gtt;
+
+ gtt = kzalloc(sizeof(struct radeon_ttm_backend), GFP_KERNEL);
+ if (gtt == NULL) {
+ return NULL;
+ }
+ gtt->backend.bdev = &rdev->mman.bdev;
+ gtt->backend.flags = 0;
+ gtt->backend.func = &radeon_backend_func;
+ gtt->rdev = rdev;
+ gtt->pages = NULL;
+ gtt->num_pages = 0;
+ gtt->dummy_read_page = NULL;
+ gtt->populated = false;
+ gtt->bound = false;
+ return &gtt->backend;
+}
+
#define RADEON_DEBUGFS_MEM_TYPES 2
#if defined(CONFIG_DEBUG_FS)
@@ -873,8 +797,8 @@ static int radeon_mm_dump_table(struct seq_file *m, void *data)
static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
- static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+2];
- static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES+2][32];
+ static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+1];
+ static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES+1][32];
unsigned i;
for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) {
@@ -896,17 +820,8 @@ static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
radeon_mem_types_list[i].name = radeon_mem_types_names[i];
radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs;
radeon_mem_types_list[i].driver_features = 0;
- radeon_mem_types_list[i++].data = NULL;
-#ifdef CONFIG_SWIOTLB
- if (swiotlb_nr_tbl()) {
- sprintf(radeon_mem_types_names[i], "ttm_dma_page_pool");
- radeon_mem_types_list[i].name = radeon_mem_types_names[i];
- radeon_mem_types_list[i].show = &ttm_dma_page_alloc_debugfs;
- radeon_mem_types_list[i].driver_features = 0;
- radeon_mem_types_list[i++].data = NULL;
- }
-#endif
- return radeon_debugfs_add_files(rdev, radeon_mem_types_list, i);
+ radeon_mem_types_list[i].data = NULL;
+ return radeon_debugfs_add_files(rdev, radeon_mem_types_list, RADEON_DEBUGFS_MEM_TYPES+1);
#endif
return 0;
diff --git a/drivers/gpu/drm/radeon/reg_srcs/cayman b/drivers/gpu/drm/radeon/reg_srcs/cayman
index aea63c4..0aa8e85 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/cayman
+++ b/drivers/gpu/drm/radeon/reg_srcs/cayman
@@ -1,8 +1,5 @@
cayman 0x9400
0x0000802C GRBM_GFX_INDEX
-0x000084FC CP_STRMOUT_CNTL
-0x000085F0 CP_COHER_CNTL
-0x000085F4 CP_COHER_SIZE
0x000088B0 VGT_VTX_VECT_EJECT_REG
0x000088C4 VGT_CACHE_INVALIDATION
0x000088D4 VGT_GS_VERTEX_REUSE
@@ -80,6 +77,7 @@ cayman 0x9400
0x0002802C DB_DEPTH_CLEAR
0x00028030 PA_SC_SCREEN_SCISSOR_TL
0x00028034 PA_SC_SCREEN_SCISSOR_BR
+0x0002805C DB_DEPTH_SLICE
0x00028140 SQ_ALU_CONST_BUFFER_SIZE_PS_0
0x00028144 SQ_ALU_CONST_BUFFER_SIZE_PS_1
0x00028148 SQ_ALU_CONST_BUFFER_SIZE_PS_2
@@ -208,8 +206,8 @@ cayman 0x9400
0x00028344 PA_SC_VPORT_ZMAX_14
0x00028348 PA_SC_VPORT_ZMIN_15
0x0002834C PA_SC_VPORT_ZMAX_15
+0x00028350 SX_MISC
0x00028354 SX_SURFACE_SYNC
-0x0002835C SX_SCATTER_EXPORT_SIZE
0x00028380 SQ_VTX_SEMANTIC_0
0x00028384 SQ_VTX_SEMANTIC_1
0x00028388 SQ_VTX_SEMANTIC_2
@@ -434,7 +432,6 @@ cayman 0x9400
0x00028700 SPI_STACK_MGMT
0x00028704 SPI_WAVE_MGMT_1
0x00028708 SPI_WAVE_MGMT_2
-0x00028720 GDS_ADDR_BASE
0x00028724 GDS_ADDR_SIZE
0x00028780 CB_BLEND0_CONTROL
0x00028784 CB_BLEND1_CONTROL
@@ -513,13 +510,6 @@ cayman 0x9400
0x00028AC0 DB_SRESULTS_COMPARE_STATE0
0x00028AC4 DB_SRESULTS_COMPARE_STATE1
0x00028AC8 DB_PRELOAD_CONTROL
-0x00028AD4 VGT_STRMOUT_VTX_STRIDE_0
-0x00028AE4 VGT_STRMOUT_VTX_STRIDE_1
-0x00028AF4 VGT_STRMOUT_VTX_STRIDE_2
-0x00028B04 VGT_STRMOUT_VTX_STRIDE_3
-0x00028B28 VGT_STRMOUT_DRAW_OPAQUE_OFFSET
-0x00028B2C VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
-0x00028B30 VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
0x00028B38 VGT_GS_MAX_VERT_OUT
0x00028B54 VGT_SHADER_STAGES_EN
0x00028B58 VGT_LS_HS_CONFIG
@@ -559,18 +549,6 @@ cayman 0x9400
0x00028C34 PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y1_3
0x00028C38 PA_SC_AA_MASK_X0_Y0_X1_Y0
0x00028C3C PA_SC_AA_MASK_X0_Y1_X1_Y1
-0x00028C78 CB_COLOR0_DIM
-0x00028CB4 CB_COLOR1_DIM
-0x00028CF0 CB_COLOR2_DIM
-0x00028D2C CB_COLOR3_DIM
-0x00028D68 CB_COLOR4_DIM
-0x00028DA4 CB_COLOR5_DIM
-0x00028DE0 CB_COLOR6_DIM
-0x00028E1C CB_COLOR7_DIM
-0x00028E58 CB_COLOR8_DIM
-0x00028E74 CB_COLOR9_DIM
-0x00028E90 CB_COLOR10_DIM
-0x00028EAC CB_COLOR11_DIM
0x00028C8C CB_COLOR0_CLEAR_WORD0
0x00028C90 CB_COLOR0_CLEAR_WORD1
0x00028C94 CB_COLOR0_CLEAR_WORD2
diff --git a/drivers/gpu/drm/radeon/reg_srcs/evergreen b/drivers/gpu/drm/radeon/reg_srcs/evergreen
index 77c3720..0e28cae 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/evergreen
+++ b/drivers/gpu/drm/radeon/reg_srcs/evergreen
@@ -4,9 +4,6 @@ evergreen 0x9400
0x00008044 WAIT_UNTIL_POLL_CNTL
0x00008048 WAIT_UNTIL_POLL_MASK
0x0000804c WAIT_UNTIL_POLL_REFDATA
-0x000084FC CP_STRMOUT_CNTL
-0x000085F0 CP_COHER_CNTL
-0x000085F4 CP_COHER_SIZE
0x000088B0 VGT_VTX_VECT_EJECT_REG
0x000088C4 VGT_CACHE_INVALIDATION
0x000088D4 VGT_GS_VERTEX_REUSE
@@ -47,7 +44,6 @@ evergreen 0x9400
0x00008E28 SQ_STATIC_THREAD_MGMT_3
0x00008E2C SQ_LDS_RESOURCE_MGMT
0x00008E48 SQ_EX_ALLOC_TABLE_SLOTS
-0x00009014 SX_MEMORY_EXPORT_SIZE
0x00009100 SPI_CONFIG_CNTL
0x0000913C SPI_CONFIG_CNTL_1
0x00009508 TA_CNTL_AUX
@@ -96,6 +92,7 @@ evergreen 0x9400
0x0002802C DB_DEPTH_CLEAR
0x00028030 PA_SC_SCREEN_SCISSOR_TL
0x00028034 PA_SC_SCREEN_SCISSOR_BR
+0x0002805C DB_DEPTH_SLICE
0x00028140 SQ_ALU_CONST_BUFFER_SIZE_PS_0
0x00028144 SQ_ALU_CONST_BUFFER_SIZE_PS_1
0x00028148 SQ_ALU_CONST_BUFFER_SIZE_PS_2
@@ -224,6 +221,7 @@ evergreen 0x9400
0x00028344 PA_SC_VPORT_ZMAX_14
0x00028348 PA_SC_VPORT_ZMIN_15
0x0002834C PA_SC_VPORT_ZMAX_15
+0x00028350 SX_MISC
0x00028354 SX_SURFACE_SYNC
0x00028380 SQ_VTX_SEMANTIC_0
0x00028384 SQ_VTX_SEMANTIC_1
@@ -444,9 +442,7 @@ evergreen 0x9400
0x000286EC SPI_COMPUTE_NUM_THREAD_X
0x000286F0 SPI_COMPUTE_NUM_THREAD_Y
0x000286F4 SPI_COMPUTE_NUM_THREAD_Z
-0x00028720 GDS_ADDR_BASE
0x00028724 GDS_ADDR_SIZE
-0x00028728 GDS_ORDERED_WAVE_PER_SE
0x00028780 CB_BLEND0_CONTROL
0x00028784 CB_BLEND1_CONTROL
0x00028788 CB_BLEND2_CONTROL
@@ -523,13 +519,6 @@ evergreen 0x9400
0x00028AC0 DB_SRESULTS_COMPARE_STATE0
0x00028AC4 DB_SRESULTS_COMPARE_STATE1
0x00028AC8 DB_PRELOAD_CONTROL
-0x00028AD4 VGT_STRMOUT_VTX_STRIDE_0
-0x00028AE4 VGT_STRMOUT_VTX_STRIDE_1
-0x00028AF4 VGT_STRMOUT_VTX_STRIDE_2
-0x00028B04 VGT_STRMOUT_VTX_STRIDE_3
-0x00028B28 VGT_STRMOUT_DRAW_OPAQUE_OFFSET
-0x00028B2C VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
-0x00028B30 VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
0x00028B38 VGT_GS_MAX_VERT_OUT
0x00028B54 VGT_SHADER_STAGES_EN
0x00028B58 VGT_LS_HS_CONFIG
@@ -562,18 +551,6 @@ evergreen 0x9400
0x00028C34 PA_SC_AA_SAMPLE_LOCS_6
0x00028C38 PA_SC_AA_SAMPLE_LOCS_7
0x00028C3C PA_SC_AA_MASK
-0x00028C78 CB_COLOR0_DIM
-0x00028CB4 CB_COLOR1_DIM
-0x00028CF0 CB_COLOR2_DIM
-0x00028D2C CB_COLOR3_DIM
-0x00028D68 CB_COLOR4_DIM
-0x00028DA4 CB_COLOR5_DIM
-0x00028DE0 CB_COLOR6_DIM
-0x00028E1C CB_COLOR7_DIM
-0x00028E58 CB_COLOR8_DIM
-0x00028E74 CB_COLOR9_DIM
-0x00028E90 CB_COLOR10_DIM
-0x00028EAC CB_COLOR11_DIM
0x00028C8C CB_COLOR0_CLEAR_WORD0
0x00028C90 CB_COLOR0_CLEAR_WORD1
0x00028C94 CB_COLOR0_CLEAR_WORD2
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r600 b/drivers/gpu/drm/radeon/reg_srcs/r600
index 626c24e..ea49752 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/r600
+++ b/drivers/gpu/drm/radeon/reg_srcs/r600
@@ -3,9 +3,6 @@ r600 0x9400
0x00028230 R7xx_PA_SC_EDGERULE
0x000286C8 R7xx_SPI_THREAD_GROUPING
0x00008D8C R7xx_SQ_DYN_GPR_CNTL_PS_FLUSH_REQ
-0x00008490 CP_STRMOUT_CNTL
-0x000085F0 CP_COHER_CNTL
-0x000085F4 CP_COHER_SIZE
0x000088C4 VGT_CACHE_INVALIDATION
0x00028A50 VGT_ENHANCE
0x000088CC VGT_ES_PER_GS
@@ -41,13 +38,6 @@ r600 0x9400
0x00028AB4 VGT_REUSE_OFF
0x00028AB8 VGT_VTX_CNT_EN
0x000088B0 VGT_VTX_VECT_EJECT_REG
-0x00028AD4 VGT_STRMOUT_VTX_STRIDE_0
-0x00028AE4 VGT_STRMOUT_VTX_STRIDE_1
-0x00028AF4 VGT_STRMOUT_VTX_STRIDE_2
-0x00028B04 VGT_STRMOUT_VTX_STRIDE_3
-0x00028B28 VGT_STRMOUT_DRAW_OPAQUE_OFFSET
-0x00028B2C VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
-0x00028B30 VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
0x00028810 PA_CL_CLIP_CNTL
0x00008A14 PA_CL_ENHANCE
0x00028C14 PA_CL_GB_HORZ_CLIP_ADJ
@@ -438,8 +428,7 @@ r600 0x9400
0x00028638 SPI_VS_OUT_ID_9
0x00028438 SX_ALPHA_REF
0x00028410 SX_ALPHA_TEST_CONTROL
-0x00028354 SX_SURFACE_SYNC
-0x00009014 SX_MEMORY_EXPORT_SIZE
+0x00028350 SX_MISC
0x00009604 TC_INVALIDATE
0x00009400 TD_FILTER4
0x00009404 TD_FILTER4_1
@@ -753,6 +742,14 @@ r600 0x9400
0x00028114 CB_COLOR5_MASK
0x00028118 CB_COLOR6_MASK
0x0002811C CB_COLOR7_MASK
+0x00028080 CB_COLOR0_VIEW
+0x00028084 CB_COLOR1_VIEW
+0x00028088 CB_COLOR2_VIEW
+0x0002808C CB_COLOR3_VIEW
+0x00028090 CB_COLOR4_VIEW
+0x00028094 CB_COLOR5_VIEW
+0x00028098 CB_COLOR6_VIEW
+0x0002809C CB_COLOR7_VIEW
0x00028808 CB_COLOR_CONTROL
0x0002842C CB_FOG_BLUE
0x00028428 CB_FOG_GREEN
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index aba2093..317eac1 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -77,7 +77,7 @@ int rs400_gart_init(struct radeon_device *rdev)
{
int r;
- if (rdev->gart.ptr) {
+ if (rdev->gart.table.ram.ptr) {
WARN(1, "RS400 GART already initialized\n");
return 0;
}
@@ -185,9 +185,6 @@ int rs400_gart_enable(struct radeon_device *rdev)
/* Enable gart */
WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN | size_reg));
rs400_gart_tlb_flush(rdev);
- DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
- (unsigned)(rdev->mc.gtt_size >> 20),
- (unsigned long long)rdev->gart.table_addr);
rdev->gart.ready = true;
return 0;
}
@@ -215,7 +212,6 @@ void rs400_gart_fini(struct radeon_device *rdev)
int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
{
uint32_t entry;
- u32 *gtt = rdev->gart.ptr;
if (i < 0 || i > rdev->gart.num_gpu_pages) {
return -EINVAL;
@@ -225,7 +221,7 @@ int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
((upper_32_bits(addr) & 0xff) << 4) |
RS400_PTE_WRITEABLE | RS400_PTE_READABLE;
entry = cpu_to_le32(entry);
- gtt[i] = entry;
+ rdev->gart.table.ram.ptr[i] = entry;
return 0;
}
@@ -413,12 +409,6 @@ static int rs400_startup(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
- if (r) {
- dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
- return r;
- }
-
/* Enable IRQ */
r100_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -428,25 +418,16 @@ static int rs400_startup(struct radeon_device *rdev)
dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
-
- r = radeon_ib_pool_start(rdev);
- if (r)
- return r;
-
- r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
+ r = r100_ib_init(rdev);
if (r) {
- dev_err(rdev->dev, "failed testing IB (%d).\n", r);
- rdev->accel_working = false;
+ dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
return r;
}
-
return 0;
}
int rs400_resume(struct radeon_device *rdev)
{
- int r;
-
/* Make sur GART are not working */
rs400_gart_disable(rdev);
/* Resume clock before doing reset */
@@ -465,18 +446,11 @@ int rs400_resume(struct radeon_device *rdev)
r300_clock_startup(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
-
- rdev->accel_working = true;
- r = rs400_startup(rdev);
- if (r) {
- rdev->accel_working = false;
- }
- return r;
+ return rs400_startup(rdev);
}
int rs400_suspend(struct radeon_device *rdev)
{
- radeon_ib_pool_suspend(rdev);
r100_cp_disable(rdev);
radeon_wb_disable(rdev);
r100_irq_disable(rdev);
@@ -555,14 +529,7 @@ int rs400_init(struct radeon_device *rdev)
if (r)
return r;
r300_set_reg_safe(rdev);
-
- r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
- if (r) {
- dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
- rdev->accel_working = false;
- }
-
r = rs400_startup(rdev);
if (r) {
/* Somethings want wront with the accel init stop accel */
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index d25cf86..2026c2d 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -46,25 +46,6 @@
void rs600_gpu_init(struct radeon_device *rdev);
int rs600_mc_wait_for_idle(struct radeon_device *rdev);
-void avivo_wait_for_vblank(struct radeon_device *rdev, int crtc)
-{
- struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc];
- int i;
-
- if (RREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset) & AVIVO_CRTC_EN) {
- for (i = 0; i < rdev->usec_timeout; i++) {
- if (!(RREG32(AVIVO_D1CRTC_STATUS + radeon_crtc->crtc_offset) & AVIVO_D1CRTC_V_BLANK))
- break;
- udelay(1);
- }
- for (i = 0; i < rdev->usec_timeout; i++) {
- if (RREG32(AVIVO_D1CRTC_STATUS + radeon_crtc->crtc_offset) & AVIVO_D1CRTC_V_BLANK)
- break;
- udelay(1);
- }
- }
-}
-
void rs600_pre_page_flip(struct radeon_device *rdev, int crtc)
{
/* enable the pflip int */
@@ -194,7 +175,7 @@ void rs600_pm_misc(struct radeon_device *rdev)
/* set pcie lanes */
if ((rdev->flags & RADEON_IS_PCIE) &&
!(rdev->flags & RADEON_IS_IGP) &&
- rdev->asic->pm.set_pcie_lanes &&
+ rdev->asic->set_pcie_lanes &&
(ps->pcie_lanes !=
rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) {
radeon_set_pcie_lanes(rdev,
@@ -341,6 +322,16 @@ void rs600_hpd_fini(struct radeon_device *rdev)
}
}
+void rs600_bm_disable(struct radeon_device *rdev)
+{
+ u16 tmp;
+
+ /* disable bus mastering */
+ pci_read_config_word(rdev->pdev, 0x4, &tmp);
+ pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB);
+ mdelay(1);
+}
+
int rs600_asic_reset(struct radeon_device *rdev)
{
struct rv515_mc_save save;
@@ -364,8 +355,7 @@ int rs600_asic_reset(struct radeon_device *rdev)
WREG32(RADEON_CP_RB_CNTL, tmp);
pci_save_state(rdev->pdev);
/* disable bus mastering */
- pci_clear_master(rdev->pdev);
- mdelay(1);
+ rs600_bm_disable(rdev);
/* reset GA+VAP */
WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) |
S_0000F0_SOFT_RESET_GA(1));
@@ -429,7 +419,7 @@ int rs600_gart_init(struct radeon_device *rdev)
{
int r;
- if (rdev->gart.robj) {
+ if (rdev->gart.table.vram.robj) {
WARN(1, "RS600 GART already initialized\n");
return 0;
}
@@ -447,7 +437,7 @@ static int rs600_gart_enable(struct radeon_device *rdev)
u32 tmp;
int r, i;
- if (rdev->gart.robj == NULL) {
+ if (rdev->gart.table.vram.robj == NULL) {
dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
return -EINVAL;
}
@@ -500,9 +490,6 @@ static int rs600_gart_enable(struct radeon_device *rdev)
tmp = RREG32_MC(R_000009_MC_CNTL1);
WREG32_MC(R_000009_MC_CNTL1, (tmp | S_000009_ENABLE_PAGE_TABLES(1)));
rs600_gart_tlb_flush(rdev);
- DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
- (unsigned)(rdev->mc.gtt_size >> 20),
- (unsigned long long)rdev->gart.table_addr);
rdev->gart.ready = true;
return 0;
}
@@ -510,12 +497,20 @@ static int rs600_gart_enable(struct radeon_device *rdev)
void rs600_gart_disable(struct radeon_device *rdev)
{
u32 tmp;
+ int r;
/* FIXME: disable out of gart access */
WREG32_MC(R_000100_MC_PT0_CNTL, 0);
tmp = RREG32_MC(R_000009_MC_CNTL1);
WREG32_MC(R_000009_MC_CNTL1, tmp & C_000009_ENABLE_PAGE_TABLES);
- radeon_gart_table_vram_unpin(rdev);
+ if (rdev->gart.table.vram.robj) {
+ r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
+ if (r == 0) {
+ radeon_bo_kunmap(rdev->gart.table.vram.robj);
+ radeon_bo_unpin(rdev->gart.table.vram.robj);
+ radeon_bo_unreserve(rdev->gart.table.vram.robj);
+ }
+ }
}
void rs600_gart_fini(struct radeon_device *rdev)
@@ -533,7 +528,7 @@ void rs600_gart_fini(struct radeon_device *rdev)
int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
{
- void __iomem *ptr = (void *)rdev->gart.ptr;
+ void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
if (i < 0 || i > rdev->gart.num_gpu_pages) {
return -EINVAL;
@@ -541,7 +536,7 @@ int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
addr = addr & 0xFFFFFFFFFFFFF000ULL;
addr |= R600_PTE_VALID | R600_PTE_SYSTEM | R600_PTE_SNOOPED;
addr |= R600_PTE_READABLE | R600_PTE_WRITEABLE;
- writeq(addr, ptr + (i * 8));
+ writeq(addr, ((void __iomem *)ptr) + (i * 8));
return 0;
}
@@ -559,7 +554,7 @@ int rs600_irq_set(struct radeon_device *rdev)
WREG32(R_000040_GEN_INT_CNTL, 0);
return -EINVAL;
}
- if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
+ if (rdev->irq.sw_int) {
tmp |= S_000040_SW_INT_EN(1);
}
if (rdev->irq.gui_idle) {
@@ -652,7 +647,7 @@ int rs600_irq_process(struct radeon_device *rdev)
while (status || rdev->irq.stat_regs.r500.disp_int) {
/* SW interrupt */
if (G_000044_SW_INT(status)) {
- radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ radeon_fence_process(rdev);
}
/* GUI idle */
if (G_000040_GUI_IDLE(status)) {
@@ -857,12 +852,6 @@ static int rs600_startup(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
- if (r) {
- dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
- return r;
- }
-
/* Enable IRQ */
rs600_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -872,21 +861,15 @@ static int rs600_startup(struct radeon_device *rdev)
dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
-
- r = r600_audio_init(rdev);
+ r = r100_ib_init(rdev);
if (r) {
- dev_err(rdev->dev, "failed initializing audio\n");
+ dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
return r;
}
- r = radeon_ib_pool_start(rdev);
- if (r)
- return r;
-
- r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
+ r = r600_audio_init(rdev);
if (r) {
- dev_err(rdev->dev, "failed testing IB (%d).\n", r);
- rdev->accel_working = false;
+ dev_err(rdev->dev, "failed initializing audio\n");
return r;
}
@@ -895,8 +878,6 @@ static int rs600_startup(struct radeon_device *rdev)
int rs600_resume(struct radeon_device *rdev)
{
- int r;
-
/* Make sur GART are not working */
rs600_gart_disable(rdev);
/* Resume clock before doing reset */
@@ -913,18 +894,11 @@ int rs600_resume(struct radeon_device *rdev)
rv515_clock_startup(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
-
- rdev->accel_working = true;
- r = rs600_startup(rdev);
- if (r) {
- rdev->accel_working = false;
- }
- return r;
+ return rs600_startup(rdev);
}
int rs600_suspend(struct radeon_device *rdev)
{
- radeon_ib_pool_suspend(rdev);
r600_audio_fini(rdev);
r100_cp_disable(rdev);
radeon_wb_disable(rdev);
@@ -1005,14 +979,7 @@ int rs600_init(struct radeon_device *rdev)
if (r)
return r;
rs600_set_safe_registers(rdev);
-
- r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
- if (r) {
- dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
- rdev->accel_working = false;
- }
-
r = rs600_startup(rdev);
if (r) {
/* Somethings want wront with the accel init stop accel */
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index f2c3b9d..a9049ed 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -31,7 +31,7 @@
#include "atom.h"
#include "rs690d.h"
-int rs690_mc_wait_for_idle(struct radeon_device *rdev)
+static int rs690_mc_wait_for_idle(struct radeon_device *rdev)
{
unsigned i;
uint32_t tmp;
@@ -621,12 +621,6 @@ static int rs690_startup(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
- if (r) {
- dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
- return r;
- }
-
/* Enable IRQ */
rs600_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -636,21 +630,15 @@ static int rs690_startup(struct radeon_device *rdev)
dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
-
- r = r600_audio_init(rdev);
+ r = r100_ib_init(rdev);
if (r) {
- dev_err(rdev->dev, "failed initializing audio\n");
+ dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
return r;
}
- r = radeon_ib_pool_start(rdev);
- if (r)
- return r;
-
- r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
+ r = r600_audio_init(rdev);
if (r) {
- dev_err(rdev->dev, "failed testing IB (%d).\n", r);
- rdev->accel_working = false;
+ dev_err(rdev->dev, "failed initializing audio\n");
return r;
}
@@ -659,8 +647,6 @@ static int rs690_startup(struct radeon_device *rdev)
int rs690_resume(struct radeon_device *rdev)
{
- int r;
-
/* Make sur GART are not working */
rs400_gart_disable(rdev);
/* Resume clock before doing reset */
@@ -677,18 +663,11 @@ int rs690_resume(struct radeon_device *rdev)
rv515_clock_startup(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
-
- rdev->accel_working = true;
- r = rs690_startup(rdev);
- if (r) {
- rdev->accel_working = false;
- }
- return r;
+ return rs690_startup(rdev);
}
int rs690_suspend(struct radeon_device *rdev)
{
- radeon_ib_pool_suspend(rdev);
r600_audio_fini(rdev);
r100_cp_disable(rdev);
radeon_wb_disable(rdev);
@@ -770,14 +749,7 @@ int rs690_init(struct radeon_device *rdev)
if (r)
return r;
rs600_set_safe_registers(rdev);
-
- r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
- if (r) {
- dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
- rdev->accel_working = false;
- }
-
r = rs690_startup(rdev);
if (r) {
/* Somethings want wront with the accel init stop accel */
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index ea18e58..d5f45b4 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -53,46 +53,46 @@ void rv515_debugfs(struct radeon_device *rdev)
}
}
-void rv515_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
+void rv515_ring_start(struct radeon_device *rdev)
{
int r;
- r = radeon_ring_lock(rdev, ring, 64);
+ r = radeon_ring_lock(rdev, 64);
if (r) {
return;
}
- radeon_ring_write(ring, PACKET0(ISYNC_CNTL, 0));
- radeon_ring_write(ring,
+ radeon_ring_write(rdev, PACKET0(ISYNC_CNTL, 0));
+ radeon_ring_write(rdev,
ISYNC_ANY2D_IDLE3D |
ISYNC_ANY3D_IDLE2D |
ISYNC_WAIT_IDLEGUI |
ISYNC_CPSCRATCH_IDLEGUI);
- radeon_ring_write(ring, PACKET0(WAIT_UNTIL, 0));
- radeon_ring_write(ring, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
- radeon_ring_write(ring, PACKET0(R300_DST_PIPE_CONFIG, 0));
- radeon_ring_write(ring, R300_PIPE_AUTO_CONFIG);
- radeon_ring_write(ring, PACKET0(GB_SELECT, 0));
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, PACKET0(GB_ENABLE, 0));
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, PACKET0(R500_SU_REG_DEST, 0));
- radeon_ring_write(ring, (1 << rdev->num_gb_pipes) - 1);
- radeon_ring_write(ring, PACKET0(VAP_INDEX_OFFSET, 0));
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0));
- radeon_ring_write(ring, RB3D_DC_FLUSH | RB3D_DC_FREE);
- radeon_ring_write(ring, PACKET0(ZB_ZCACHE_CTLSTAT, 0));
- radeon_ring_write(ring, ZC_FLUSH | ZC_FREE);
- radeon_ring_write(ring, PACKET0(WAIT_UNTIL, 0));
- radeon_ring_write(ring, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
- radeon_ring_write(ring, PACKET0(GB_AA_CONFIG, 0));
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0));
- radeon_ring_write(ring, RB3D_DC_FLUSH | RB3D_DC_FREE);
- radeon_ring_write(ring, PACKET0(ZB_ZCACHE_CTLSTAT, 0));
- radeon_ring_write(ring, ZC_FLUSH | ZC_FREE);
- radeon_ring_write(ring, PACKET0(GB_MSPOS0, 0));
- radeon_ring_write(ring,
+ radeon_ring_write(rdev, PACKET0(WAIT_UNTIL, 0));
+ radeon_ring_write(rdev, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
+ radeon_ring_write(rdev, PACKET0(R300_DST_PIPE_CONFIG, 0));
+ radeon_ring_write(rdev, R300_PIPE_AUTO_CONFIG);
+ radeon_ring_write(rdev, PACKET0(GB_SELECT, 0));
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, PACKET0(GB_ENABLE, 0));
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, PACKET0(R500_SU_REG_DEST, 0));
+ radeon_ring_write(rdev, (1 << rdev->num_gb_pipes) - 1);
+ radeon_ring_write(rdev, PACKET0(VAP_INDEX_OFFSET, 0));
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0));
+ radeon_ring_write(rdev, RB3D_DC_FLUSH | RB3D_DC_FREE);
+ radeon_ring_write(rdev, PACKET0(ZB_ZCACHE_CTLSTAT, 0));
+ radeon_ring_write(rdev, ZC_FLUSH | ZC_FREE);
+ radeon_ring_write(rdev, PACKET0(WAIT_UNTIL, 0));
+ radeon_ring_write(rdev, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
+ radeon_ring_write(rdev, PACKET0(GB_AA_CONFIG, 0));
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0));
+ radeon_ring_write(rdev, RB3D_DC_FLUSH | RB3D_DC_FREE);
+ radeon_ring_write(rdev, PACKET0(ZB_ZCACHE_CTLSTAT, 0));
+ radeon_ring_write(rdev, ZC_FLUSH | ZC_FREE);
+ radeon_ring_write(rdev, PACKET0(GB_MSPOS0, 0));
+ radeon_ring_write(rdev,
((6 << MS_X0_SHIFT) |
(6 << MS_Y0_SHIFT) |
(6 << MS_X1_SHIFT) |
@@ -101,8 +101,8 @@ void rv515_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
(6 << MS_Y2_SHIFT) |
(6 << MSBD0_Y_SHIFT) |
(6 << MSBD0_X_SHIFT)));
- radeon_ring_write(ring, PACKET0(GB_MSPOS1, 0));
- radeon_ring_write(ring,
+ radeon_ring_write(rdev, PACKET0(GB_MSPOS1, 0));
+ radeon_ring_write(rdev,
((6 << MS_X3_SHIFT) |
(6 << MS_Y3_SHIFT) |
(6 << MS_X4_SHIFT) |
@@ -110,15 +110,15 @@ void rv515_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
(6 << MS_X5_SHIFT) |
(6 << MS_Y5_SHIFT) |
(6 << MSBD1_SHIFT)));
- radeon_ring_write(ring, PACKET0(GA_ENHANCE, 0));
- radeon_ring_write(ring, GA_DEADLOCK_CNTL | GA_FASTSYNC_CNTL);
- radeon_ring_write(ring, PACKET0(GA_POLY_MODE, 0));
- radeon_ring_write(ring, FRONT_PTYPE_TRIANGE | BACK_PTYPE_TRIANGE);
- radeon_ring_write(ring, PACKET0(GA_ROUND_MODE, 0));
- radeon_ring_write(ring, GEOMETRY_ROUND_NEAREST | COLOR_ROUND_NEAREST);
- radeon_ring_write(ring, PACKET0(0x20C8, 0));
- radeon_ring_write(ring, 0);
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_write(rdev, PACKET0(GA_ENHANCE, 0));
+ radeon_ring_write(rdev, GA_DEADLOCK_CNTL | GA_FASTSYNC_CNTL);
+ radeon_ring_write(rdev, PACKET0(GA_POLY_MODE, 0));
+ radeon_ring_write(rdev, FRONT_PTYPE_TRIANGE | BACK_PTYPE_TRIANGE);
+ radeon_ring_write(rdev, PACKET0(GA_ROUND_MODE, 0));
+ radeon_ring_write(rdev, GEOMETRY_ROUND_NEAREST | COLOR_ROUND_NEAREST);
+ radeon_ring_write(rdev, PACKET0(0x20C8, 0));
+ radeon_ring_write(rdev, 0);
+ radeon_ring_unlock_commit(rdev);
}
int rv515_mc_wait_for_idle(struct radeon_device *rdev)
@@ -379,12 +379,6 @@ static int rv515_startup(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
- if (r) {
- dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
- return r;
- }
-
/* Enable IRQ */
rs600_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -394,15 +388,9 @@ static int rv515_startup(struct radeon_device *rdev)
dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
-
- r = radeon_ib_pool_start(rdev);
- if (r)
- return r;
-
- r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
+ r = r100_ib_init(rdev);
if (r) {
- dev_err(rdev->dev, "failed testing IB (%d).\n", r);
- rdev->accel_working = false;
+ dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
return r;
}
return 0;
@@ -410,8 +398,6 @@ static int rv515_startup(struct radeon_device *rdev)
int rv515_resume(struct radeon_device *rdev)
{
- int r;
-
/* Make sur GART are not working */
if (rdev->flags & RADEON_IS_PCIE)
rv370_pcie_gart_disable(rdev);
@@ -429,13 +415,7 @@ int rv515_resume(struct radeon_device *rdev)
rv515_clock_startup(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
-
- rdev->accel_working = true;
- r = rv515_startup(rdev);
- if (r) {
- rdev->accel_working = false;
- }
- return r;
+ return rv515_startup(rdev);
}
int rv515_suspend(struct radeon_device *rdev)
@@ -531,14 +511,7 @@ int rv515_init(struct radeon_device *rdev)
if (r)
return r;
rv515_set_safe_registers(rdev);
-
- r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
- if (r) {
- dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
- rdev->accel_working = false;
- }
-
r = rv515_startup(rdev);
if (r) {
/* Somethings want wront with the accel init stop accel */
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index c73ea54..51d20aa 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -129,7 +129,7 @@ int rv770_pcie_gart_enable(struct radeon_device *rdev)
u32 tmp;
int r, i;
- if (rdev->gart.robj == NULL) {
+ if (rdev->gart.table.vram.robj == NULL) {
dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
return -EINVAL;
}
@@ -168,9 +168,6 @@ int rv770_pcie_gart_enable(struct radeon_device *rdev)
WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
r600_pcie_gart_tlb_flush(rdev);
- DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
- (unsigned)(rdev->mc.gtt_size >> 20),
- (unsigned long long)rdev->gart.table_addr);
rdev->gart.ready = true;
return 0;
}
@@ -178,7 +175,7 @@ int rv770_pcie_gart_enable(struct radeon_device *rdev)
void rv770_pcie_gart_disable(struct radeon_device *rdev)
{
u32 tmp;
- int i;
+ int i, r;
/* Disable all tables */
for (i = 0; i < 7; i++)
@@ -198,7 +195,14 @@ void rv770_pcie_gart_disable(struct radeon_device *rdev)
WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
- radeon_gart_table_vram_unpin(rdev);
+ if (rdev->gart.table.vram.robj) {
+ r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
+ if (likely(r == 0)) {
+ radeon_bo_kunmap(rdev->gart.table.vram.robj);
+ radeon_bo_unpin(rdev->gart.table.vram.robj);
+ radeon_bo_unreserve(rdev->gart.table.vram.robj);
+ }
+ }
}
void rv770_pcie_gart_fini(struct radeon_device *rdev)
@@ -282,7 +286,7 @@ static void rv770_mc_program(struct radeon_device *rdev)
WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
rdev->mc.vram_end >> 12);
}
- WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
+ WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
WREG32(MC_VM_FB_LOCATION, tmp);
@@ -359,7 +363,7 @@ static int rv770_cp_load_microcode(struct radeon_device *rdev)
void r700_cp_fini(struct radeon_device *rdev)
{
r700_cp_stop(rdev);
- radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
+ radeon_ring_fini(rdev);
}
/*
@@ -732,7 +736,6 @@ static void rv770_gpu_init(struct radeon_device *rdev)
(cc_rb_backend_disable >> 16));
rdev->config.rv770.tile_config = gb_tiling_config;
- rdev->config.rv770.backend_map = backend_map;
gb_tiling_config |= BACKEND_MAP(backend_map);
WREG32(GB_TILING_CONFIG, gb_tiling_config);
@@ -959,6 +962,54 @@ static void rv770_gpu_init(struct radeon_device *rdev)
}
+static int rv770_vram_scratch_init(struct radeon_device *rdev)
+{
+ int r;
+ u64 gpu_addr;
+
+ if (rdev->vram_scratch.robj == NULL) {
+ r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE,
+ PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
+ &rdev->vram_scratch.robj);
+ if (r) {
+ return r;
+ }
+ }
+
+ r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_pin(rdev->vram_scratch.robj,
+ RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
+ if (r) {
+ radeon_bo_unreserve(rdev->vram_scratch.robj);
+ return r;
+ }
+ r = radeon_bo_kmap(rdev->vram_scratch.robj,
+ (void **)&rdev->vram_scratch.ptr);
+ if (r)
+ radeon_bo_unpin(rdev->vram_scratch.robj);
+ radeon_bo_unreserve(rdev->vram_scratch.robj);
+
+ return r;
+}
+
+static void rv770_vram_scratch_fini(struct radeon_device *rdev)
+{
+ int r;
+
+ if (rdev->vram_scratch.robj == NULL) {
+ return;
+ }
+ r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
+ if (likely(r == 0)) {
+ radeon_bo_kunmap(rdev->vram_scratch.robj);
+ radeon_bo_unpin(rdev->vram_scratch.robj);
+ radeon_bo_unreserve(rdev->vram_scratch.robj);
+ }
+ radeon_bo_unref(&rdev->vram_scratch.robj);
+}
+
void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
{
u64 size_bf, size_af;
@@ -1045,7 +1096,6 @@ int rv770_mc_init(struct radeon_device *rdev)
static int rv770_startup(struct radeon_device *rdev)
{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
/* enable pcie gen2 link */
@@ -1059,10 +1109,6 @@ static int rv770_startup(struct radeon_device *rdev)
}
}
- r = r600_vram_scratch_init(rdev);
- if (r)
- return r;
-
rv770_mc_program(rdev);
if (rdev->flags & RADEON_IS_AGP) {
rv770_agp_enable(rdev);
@@ -1071,12 +1117,14 @@ static int rv770_startup(struct radeon_device *rdev)
if (r)
return r;
}
-
+ r = rv770_vram_scratch_init(rdev);
+ if (r)
+ return r;
rv770_gpu_init(rdev);
r = r600_blit_init(rdev);
if (r) {
r600_blit_fini(rdev);
- rdev->asic->copy.copy = NULL;
+ rdev->asic->copy = NULL;
dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
}
@@ -1085,12 +1133,6 @@ static int rv770_startup(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
- if (r) {
- dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
- return r;
- }
-
/* Enable IRQ */
r = r600_irq_init(rdev);
if (r) {
@@ -1100,9 +1142,7 @@ static int rv770_startup(struct radeon_device *rdev)
}
r600_irq_set(rdev);
- r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
- R600_CP_RB_RPTR, R600_CP_RB_WPTR,
- 0, 0xfffff, RADEON_CP_PACKET2);
+ r = radeon_ring_init(rdev, rdev->cp.ring_size);
if (r)
return r;
r = rv770_cp_load_microcode(rdev);
@@ -1112,17 +1152,6 @@ static int rv770_startup(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_ib_pool_start(rdev);
- if (r)
- return r;
-
- r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
- if (r) {
- dev_err(rdev->dev, "IB test failed (%d).\n", r);
- rdev->accel_working = false;
- return r;
- }
-
return 0;
}
@@ -1137,11 +1166,15 @@ int rv770_resume(struct radeon_device *rdev)
/* post card */
atom_asic_init(rdev->mode_info.atom_context);
- rdev->accel_working = true;
r = rv770_startup(rdev);
if (r) {
DRM_ERROR("r600 startup failed on resume\n");
- rdev->accel_working = false;
+ return r;
+ }
+
+ r = r600_ib_test(rdev);
+ if (r) {
+ DRM_ERROR("radeon: failed testing IB (%d).\n", r);
return r;
}
@@ -1157,16 +1190,23 @@ int rv770_resume(struct radeon_device *rdev)
int rv770_suspend(struct radeon_device *rdev)
{
+ int r;
+
r600_audio_fini(rdev);
- radeon_ib_pool_suspend(rdev);
- r600_blit_suspend(rdev);
/* FIXME: we should wait for ring to be empty */
r700_cp_stop(rdev);
- rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+ rdev->cp.ready = false;
r600_irq_suspend(rdev);
radeon_wb_disable(rdev);
rv770_pcie_gart_disable(rdev);
-
+ /* unpin shaders bo */
+ if (rdev->r600_blit.shader_obj) {
+ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+ if (likely(r == 0)) {
+ radeon_bo_unpin(rdev->r600_blit.shader_obj);
+ radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+ }
+ }
return 0;
}
@@ -1234,8 +1274,8 @@ int rv770_init(struct radeon_device *rdev)
if (r)
return r;
- rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
- r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
+ rdev->cp.ring_obj = NULL;
+ r600_ring_init(rdev, 1024 * 1024);
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
@@ -1244,24 +1284,30 @@ int rv770_init(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
- if (r) {
- dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
- rdev->accel_working = false;
- }
-
r = rv770_startup(rdev);
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
r700_cp_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
- r100_ib_fini(rdev);
radeon_irq_kms_fini(rdev);
rv770_pcie_gart_fini(rdev);
rdev->accel_working = false;
}
+ if (rdev->accel_working) {
+ r = radeon_ib_pool_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+ rdev->accel_working = false;
+ } else {
+ r = r600_ib_test(rdev);
+ if (r) {
+ dev_err(rdev->dev, "IB test failed (%d).\n", r);
+ rdev->accel_working = false;
+ }
+ }
+ }
r = r600_audio_init(rdev);
if (r) {
@@ -1278,12 +1324,11 @@ void rv770_fini(struct radeon_device *rdev)
r700_cp_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
- r100_ib_fini(rdev);
+ radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
rv770_pcie_gart_fini(rdev);
- r600_vram_scratch_fini(rdev);
+ rv770_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
- radeon_semaphore_driver_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_agp_fini(rdev);
radeon_bo_fini(rdev);
diff --git a/drivers/gpu/drm/savage/savage_drv.c b/drivers/gpu/drm/savage/savage_drv.c
index 89afe0b..6464490 100644
--- a/drivers/gpu/drm/savage/savage_drv.c
+++ b/drivers/gpu/drm/savage/savage_drv.c
@@ -23,8 +23,6 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <linux/module.h>
-
#include "drmP.h"
#include "savage_drm.h"
#include "savage_drv.h"
@@ -35,17 +33,6 @@ static struct pci_device_id pciidlist[] = {
savage_PCI_IDS
};
-static const struct file_operations savage_driver_fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = drm_mmap,
- .poll = drm_poll,
- .fasync = drm_fasync,
- .llseek = noop_llseek,
-};
-
static struct drm_driver driver = {
.driver_features =
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_DMA | DRIVER_PCI_DMA,
@@ -57,7 +44,17 @@ static struct drm_driver driver = {
.reclaim_buffers = savage_reclaim_buffers,
.ioctls = savage_ioctls,
.dma_ioctl = savage_bci_buffers,
- .fops = &savage_driver_fops,
+ .fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = drm_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+ .llseek = noop_llseek,
+ },
+
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
diff --git a/drivers/gpu/drm/savage/savage_state.c b/drivers/gpu/drm/savage/savage_state.c
index 031aaaf..8a3e315 100644
--- a/drivers/gpu/drm/savage/savage_state.c
+++ b/drivers/gpu/drm/savage/savage_state.c
@@ -1057,8 +1057,7 @@ int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_
DRM_ERROR("indexed drawing command extends "
"beyond end of command buffer\n");
DMA_FLUSH();
- ret = -EINVAL;
- goto done;
+ return -EINVAL;
}
/* fall through */
case SAVAGE_CMD_DMA_PRIM:
@@ -1077,7 +1076,7 @@ int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_
cmdbuf->vb_stride,
cmdbuf->nbox, cmdbuf->box_addr);
if (ret != 0)
- goto done;
+ return ret;
first_draw_cmd = NULL;
}
}
diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c
index 30d98d1..46d5be6 100644
--- a/drivers/gpu/drm/sis/sis_drv.c
+++ b/drivers/gpu/drm/sis/sis_drv.c
@@ -25,8 +25,6 @@
*
*/
-#include <linux/module.h>
-
#include "drmP.h"
#include "sis_drm.h"
#include "sis_drv.h"
@@ -40,8 +38,7 @@ static struct pci_device_id pciidlist[] = {
static int sis_driver_load(struct drm_device *dev, unsigned long chipset)
{
drm_sis_private_t *dev_priv;
-
- pci_set_master(dev->pdev);
+ int ret;
dev_priv = kzalloc(sizeof(drm_sis_private_t), GFP_KERNEL);
if (dev_priv == NULL)
@@ -49,69 +46,43 @@ static int sis_driver_load(struct drm_device *dev, unsigned long chipset)
dev->dev_private = (void *)dev_priv;
dev_priv->chipset = chipset;
- idr_init(&dev->object_name_idr);
+ ret = drm_sman_init(&dev_priv->sman, 2, 12, 8);
+ if (ret)
+ kfree(dev_priv);
- return 0;
+ return ret;
}
static int sis_driver_unload(struct drm_device *dev)
{
drm_sis_private_t *dev_priv = dev->dev_private;
- idr_remove_all(&dev_priv->object_idr);
- idr_destroy(&dev_priv->object_idr);
-
+ drm_sman_takedown(&dev_priv->sman);
kfree(dev_priv);
return 0;
}
-static const struct file_operations sis_driver_fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = drm_mmap,
- .poll = drm_poll,
- .fasync = drm_fasync,
- .llseek = noop_llseek,
-};
-
-static int sis_driver_open(struct drm_device *dev, struct drm_file *file)
-{
- struct sis_file_private *file_priv;
-
- DRM_DEBUG_DRIVER("\n");
- file_priv = kmalloc(sizeof(*file_priv), GFP_KERNEL);
- if (!file_priv)
- return -ENOMEM;
-
- file->driver_priv = file_priv;
-
- INIT_LIST_HEAD(&file_priv->obj_list);
-
- return 0;
-}
-
-void sis_driver_postclose(struct drm_device *dev, struct drm_file *file)
-{
- struct sis_file_private *file_priv = file->driver_priv;
-
- kfree(file_priv);
-}
-
static struct drm_driver driver = {
.driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR,
.load = sis_driver_load,
.unload = sis_driver_unload,
- .open = sis_driver_open,
- .postclose = sis_driver_postclose,
.dma_quiescent = sis_idle,
.reclaim_buffers = NULL,
.reclaim_buffers_idlelocked = sis_reclaim_buffers_locked,
.lastclose = sis_lastclose,
.ioctls = sis_ioctls,
- .fops = &sis_driver_fops,
+ .fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = drm_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+ .llseek = noop_llseek,
+ },
+
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
diff --git a/drivers/gpu/drm/sis/sis_drv.h b/drivers/gpu/drm/sis/sis_drv.h
index 573758b..ef940ba 100644
--- a/drivers/gpu/drm/sis/sis_drv.h
+++ b/drivers/gpu/drm/sis/sis_drv.h
@@ -44,25 +44,22 @@ enum sis_family {
SIS_CHIP_315 = 1,
};
-#include "drm_mm.h"
+#include "drm_sman.h"
#define SIS_BASE (dev_priv->mmio)
-#define SIS_READ(reg) DRM_READ32(SIS_BASE, reg)
-#define SIS_WRITE(reg, val) DRM_WRITE32(SIS_BASE, reg, val)
+#define SIS_READ(reg) DRM_READ32(SIS_BASE, reg);
+#define SIS_WRITE(reg, val) DRM_WRITE32(SIS_BASE, reg, val);
typedef struct drm_sis_private {
drm_local_map_t *mmio;
unsigned int idle_fault;
+ struct drm_sman sman;
unsigned int chipset;
int vram_initialized;
int agp_initialized;
unsigned long vram_offset;
unsigned long agp_offset;
- struct drm_mm vram_mm;
- struct drm_mm agp_mm;
- /** Mapping of userspace keys to mm objects */
- struct idr object_idr;
} drm_sis_private_t;
extern int sis_idle(struct drm_device *dev);
diff --git a/drivers/gpu/drm/sis/sis_mm.c b/drivers/gpu/drm/sis/sis_mm.c
index dd4a316..7fe2b63 100644
--- a/drivers/gpu/drm/sis/sis_mm.c
+++ b/drivers/gpu/drm/sis/sis_mm.c
@@ -41,18 +41,40 @@
#define AGP_TYPE 1
-struct sis_memblock {
- struct drm_mm_node mm_node;
- struct sis_memreq req;
- struct list_head owner_list;
-};
-
#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
/* fb management via fb device */
#define SIS_MM_ALIGN_SHIFT 0
#define SIS_MM_ALIGN_MASK 0
+static void *sis_sman_mm_allocate(void *private, unsigned long size,
+ unsigned alignment)
+{
+ struct sis_memreq req;
+
+ req.size = size;
+ sis_malloc(&req);
+ if (req.size == 0)
+ return NULL;
+ else
+ return (void *)(unsigned long)~req.offset;
+}
+
+static void sis_sman_mm_free(void *private, void *ref)
+{
+ sis_free(~((unsigned long)ref));
+}
+
+static void sis_sman_mm_destroy(void *private)
+{
+ ;
+}
+
+static unsigned long sis_sman_mm_offset(void *private, void *ref)
+{
+ return ~((unsigned long)ref);
+}
+
#else /* CONFIG_FB_SIS[_MODULE] */
#define SIS_MM_ALIGN_SHIFT 4
@@ -64,11 +86,30 @@ static int sis_fb_init(struct drm_device *dev, void *data, struct drm_file *file
{
drm_sis_private_t *dev_priv = dev->dev_private;
drm_sis_fb_t *fb = data;
+ int ret;
mutex_lock(&dev->struct_mutex);
- /* Unconditionally init the drm_mm, even though we don't use it when the
- * fb sis driver is available - make cleanup easier. */
- drm_mm_init(&dev_priv->vram_mm, 0, fb->size >> SIS_MM_ALIGN_SHIFT);
+#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
+ {
+ struct drm_sman_mm sman_mm;
+ sman_mm.private = (void *)0xFFFFFFFF;
+ sman_mm.allocate = sis_sman_mm_allocate;
+ sman_mm.free = sis_sman_mm_free;
+ sman_mm.destroy = sis_sman_mm_destroy;
+ sman_mm.offset = sis_sman_mm_offset;
+ ret =
+ drm_sman_set_manager(&dev_priv->sman, VIDEO_TYPE, &sman_mm);
+ }
+#else
+ ret = drm_sman_set_range(&dev_priv->sman, VIDEO_TYPE, 0,
+ fb->size >> SIS_MM_ALIGN_SHIFT);
+#endif
+
+ if (ret) {
+ DRM_ERROR("VRAM memory manager initialisation error\n");
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
dev_priv->vram_initialized = 1;
dev_priv->vram_offset = fb->offset;
@@ -79,15 +120,13 @@ static int sis_fb_init(struct drm_device *dev, void *data, struct drm_file *file
return 0;
}
-static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file,
+static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file_priv,
void *data, int pool)
{
drm_sis_private_t *dev_priv = dev->dev_private;
drm_sis_mem_t *mem = data;
- int retval = 0, user_key;
- struct sis_memblock *item;
- struct sis_file_private *file_priv = file->driver_priv;
- unsigned long offset;
+ int retval = 0;
+ struct drm_memblock_item *item;
mutex_lock(&dev->struct_mutex);
@@ -99,68 +138,25 @@ static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file,
return -EINVAL;
}
- item = kzalloc(sizeof(*item), GFP_KERNEL);
- if (!item) {
- retval = -ENOMEM;
- goto fail_alloc;
- }
-
mem->size = (mem->size + SIS_MM_ALIGN_MASK) >> SIS_MM_ALIGN_SHIFT;
- if (pool == AGP_TYPE) {
- retval = drm_mm_insert_node(&dev_priv->agp_mm,
- &item->mm_node,
- mem->size, 0);
- offset = item->mm_node.start;
- } else {
-#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
- item->req.size = mem->size;
- sis_malloc(&item->req);
- if (item->req.size == 0)
- retval = -ENOMEM;
- offset = item->req.offset;
-#else
- retval = drm_mm_insert_node(&dev_priv->vram_mm,
- &item->mm_node,
- mem->size, 0);
- offset = item->mm_node.start;
-#endif
- }
- if (retval)
- goto fail_alloc;
+ item = drm_sman_alloc(&dev_priv->sman, pool, mem->size, 0,
+ (unsigned long)file_priv);
-again:
- if (idr_pre_get(&dev_priv->object_idr, GFP_KERNEL) == 0) {
+ mutex_unlock(&dev->struct_mutex);
+ if (item) {
+ mem->offset = ((pool == 0) ?
+ dev_priv->vram_offset : dev_priv->agp_offset) +
+ (item->mm->
+ offset(item->mm, item->mm_info) << SIS_MM_ALIGN_SHIFT);
+ mem->free = item->user_hash.key;
+ mem->size = mem->size << SIS_MM_ALIGN_SHIFT;
+ } else {
+ mem->offset = 0;
+ mem->size = 0;
+ mem->free = 0;
retval = -ENOMEM;
- goto fail_idr;
}
- retval = idr_get_new_above(&dev_priv->object_idr, item, 1, &user_key);
- if (retval == -EAGAIN)
- goto again;
- if (retval)
- goto fail_idr;
-
- list_add(&item->owner_list, &file_priv->obj_list);
- mutex_unlock(&dev->struct_mutex);
-
- mem->offset = ((pool == 0) ?
- dev_priv->vram_offset : dev_priv->agp_offset) +
- (offset << SIS_MM_ALIGN_SHIFT);
- mem->free = user_key;
- mem->size = mem->size << SIS_MM_ALIGN_SHIFT;
-
- return 0;
-
-fail_idr:
- drm_mm_remove_node(&item->mm_node);
-fail_alloc:
- kfree(item);
- mutex_unlock(&dev->struct_mutex);
-
- mem->offset = 0;
- mem->size = 0;
- mem->free = 0;
-
DRM_DEBUG("alloc %d, size = %d, offset = %d\n", pool, mem->size,
mem->offset);
@@ -171,28 +167,14 @@ static int sis_drm_free(struct drm_device *dev, void *data, struct drm_file *fil
{
drm_sis_private_t *dev_priv = dev->dev_private;
drm_sis_mem_t *mem = data;
- struct sis_memblock *obj;
+ int ret;
mutex_lock(&dev->struct_mutex);
- obj = idr_find(&dev_priv->object_idr, mem->free);
- if (obj == NULL) {
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
- }
-
- idr_remove(&dev_priv->object_idr, mem->free);
- list_del(&obj->owner_list);
- if (drm_mm_node_allocated(&obj->mm_node))
- drm_mm_remove_node(&obj->mm_node);
-#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
- else
- sis_free(obj->req.offset);
-#endif
- kfree(obj);
+ ret = drm_sman_free_key(&dev_priv->sman, mem->free);
mutex_unlock(&dev->struct_mutex);
DRM_DEBUG("free = 0x%lx\n", mem->free);
- return 0;
+ return ret;
}
static int sis_fb_alloc(struct drm_device *dev, void *data,
@@ -206,10 +188,18 @@ static int sis_ioctl_agp_init(struct drm_device *dev, void *data,
{
drm_sis_private_t *dev_priv = dev->dev_private;
drm_sis_agp_t *agp = data;
+ int ret;
dev_priv = dev->dev_private;
mutex_lock(&dev->struct_mutex);
- drm_mm_init(&dev_priv->agp_mm, 0, agp->size >> SIS_MM_ALIGN_SHIFT);
+ ret = drm_sman_set_range(&dev_priv->sman, AGP_TYPE, 0,
+ agp->size >> SIS_MM_ALIGN_SHIFT);
+
+ if (ret) {
+ DRM_ERROR("AGP memory manager initialisation error\n");
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
dev_priv->agp_initialized = 1;
dev_priv->agp_offset = agp->offset;
@@ -303,26 +293,20 @@ void sis_lastclose(struct drm_device *dev)
return;
mutex_lock(&dev->struct_mutex);
- if (dev_priv->vram_initialized) {
- drm_mm_takedown(&dev_priv->vram_mm);
- dev_priv->vram_initialized = 0;
- }
- if (dev_priv->agp_initialized) {
- drm_mm_takedown(&dev_priv->agp_mm);
- dev_priv->agp_initialized = 0;
- }
+ drm_sman_cleanup(&dev_priv->sman);
+ dev_priv->vram_initialized = 0;
+ dev_priv->agp_initialized = 0;
dev_priv->mmio = NULL;
mutex_unlock(&dev->struct_mutex);
}
void sis_reclaim_buffers_locked(struct drm_device *dev,
- struct drm_file *file)
+ struct drm_file *file_priv)
{
- struct sis_file_private *file_priv = file->driver_priv;
- struct sis_memblock *entry, *next;
+ drm_sis_private_t *dev_priv = dev->dev_private;
mutex_lock(&dev->struct_mutex);
- if (list_empty(&file_priv->obj_list)) {
+ if (drm_sman_owner_clean(&dev_priv->sman, (unsigned long)file_priv)) {
mutex_unlock(&dev->struct_mutex);
return;
}
@@ -330,18 +314,7 @@ void sis_reclaim_buffers_locked(struct drm_device *dev,
if (dev->driver->dma_quiescent)
dev->driver->dma_quiescent(dev);
-
- list_for_each_entry_safe(entry, next, &file_priv->obj_list,
- owner_list) {
- list_del(&entry->owner_list);
- if (drm_mm_node_allocated(&entry->mm_node))
- drm_mm_remove_node(&entry->mm_node);
-#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
- else
- sis_free(entry->req.offset);
-#endif
- kfree(entry);
- }
+ drm_sman_owner_cleanup(&dev_priv->sman, (unsigned long)file_priv);
mutex_unlock(&dev->struct_mutex);
return;
}
diff --git a/drivers/gpu/drm/tdfx/tdfx_drv.c b/drivers/gpu/drm/tdfx/tdfx_drv.c
index 1613c78..8bf9881 100644
--- a/drivers/gpu/drm/tdfx/tdfx_drv.c
+++ b/drivers/gpu/drm/tdfx/tdfx_drv.c
@@ -30,8 +30,6 @@
* Gareth Hughes <gareth@valinux.com>
*/
-#include <linux/module.h>
-
#include "drmP.h"
#include "tdfx_drv.h"
@@ -41,21 +39,20 @@ static struct pci_device_id pciidlist[] = {
tdfx_PCI_IDS
};
-static const struct file_operations tdfx_driver_fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = drm_mmap,
- .poll = drm_poll,
- .fasync = drm_fasync,
- .llseek = noop_llseek,
-};
-
static struct drm_driver driver = {
.driver_features = DRIVER_USE_MTRR,
.reclaim_buffers = drm_core_reclaim_buffers,
- .fops = &tdfx_driver_fops,
+ .fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = drm_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+ .llseek = noop_llseek,
+ },
+
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig
index 794ff67..30ad133 100644
--- a/drivers/gpu/drm/vmwgfx/Kconfig
+++ b/drivers/gpu/drm/vmwgfx/Kconfig
@@ -7,8 +7,7 @@ config DRM_VMWGFX
select FB_CFB_IMAGEBLIT
select DRM_TTM
help
- Choose this option if you would like to run 3D acceleration
- in a VMware virtual machine.
- This is a KMS enabled DRM driver for the VMware SVGA2
- virtual hardware.
- The compiled module will be called "vmwgfx.ko".
+ KMS enabled DRM driver for SVGA2 virtual hardware.
+
+ If unsure say n. The compiled module will be
+ called vmwgfx.ko
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index 586869c..c9281a1 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -4,7 +4,6 @@ ccflags-y := -Iinclude/drm
vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \
vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
- vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \
- vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o
+ vmwgfx_overlay.o vmwgfx_fence.o vmwgfx_gmrid_manager.o
obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/svga3d_reg.h b/drivers/gpu/drm/vmwgfx/svga3d_reg.h
index d0e085e..77cb453 100644
--- a/drivers/gpu/drm/vmwgfx/svga3d_reg.h
+++ b/drivers/gpu/drm/vmwgfx/svga3d_reg.h
@@ -57,8 +57,7 @@ typedef enum {
SVGA3D_HWVERSION_WS6_B1 = SVGA3D_MAKE_HWVERSION(1, 1),
SVGA3D_HWVERSION_FUSION_11 = SVGA3D_MAKE_HWVERSION(1, 4),
SVGA3D_HWVERSION_WS65_B1 = SVGA3D_MAKE_HWVERSION(2, 0),
- SVGA3D_HWVERSION_WS8_B1 = SVGA3D_MAKE_HWVERSION(2, 1),
- SVGA3D_HWVERSION_CURRENT = SVGA3D_HWVERSION_WS8_B1,
+ SVGA3D_HWVERSION_CURRENT = SVGA3D_HWVERSION_WS65_B1,
} SVGA3dHardwareVersion;
/*
@@ -68,8 +67,7 @@ typedef enum {
typedef uint32 SVGA3dBool; /* 32-bit Bool definition */
#define SVGA3D_NUM_CLIPPLANES 6
#define SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS 8
-#define SVGA3D_MAX_CONTEXT_IDS 256
-#define SVGA3D_MAX_SURFACE_IDS (32 * 1024)
+
/*
* Surface formats.
@@ -81,91 +79,76 @@ typedef uint32 SVGA3dBool; /* 32-bit Bool definition */
*/
typedef enum SVGA3dSurfaceFormat {
- SVGA3D_FORMAT_INVALID = 0,
+ SVGA3D_FORMAT_INVALID = 0,
- SVGA3D_X8R8G8B8 = 1,
- SVGA3D_A8R8G8B8 = 2,
+ SVGA3D_X8R8G8B8 = 1,
+ SVGA3D_A8R8G8B8 = 2,
- SVGA3D_R5G6B5 = 3,
- SVGA3D_X1R5G5B5 = 4,
- SVGA3D_A1R5G5B5 = 5,
- SVGA3D_A4R4G4B4 = 6,
+ SVGA3D_R5G6B5 = 3,
+ SVGA3D_X1R5G5B5 = 4,
+ SVGA3D_A1R5G5B5 = 5,
+ SVGA3D_A4R4G4B4 = 6,
- SVGA3D_Z_D32 = 7,
- SVGA3D_Z_D16 = 8,
- SVGA3D_Z_D24S8 = 9,
- SVGA3D_Z_D15S1 = 10,
+ SVGA3D_Z_D32 = 7,
+ SVGA3D_Z_D16 = 8,
+ SVGA3D_Z_D24S8 = 9,
+ SVGA3D_Z_D15S1 = 10,
- SVGA3D_LUMINANCE8 = 11,
- SVGA3D_LUMINANCE4_ALPHA4 = 12,
- SVGA3D_LUMINANCE16 = 13,
- SVGA3D_LUMINANCE8_ALPHA8 = 14,
+ SVGA3D_LUMINANCE8 = 11,
+ SVGA3D_LUMINANCE4_ALPHA4 = 12,
+ SVGA3D_LUMINANCE16 = 13,
+ SVGA3D_LUMINANCE8_ALPHA8 = 14,
- SVGA3D_DXT1 = 15,
- SVGA3D_DXT2 = 16,
- SVGA3D_DXT3 = 17,
- SVGA3D_DXT4 = 18,
- SVGA3D_DXT5 = 19,
+ SVGA3D_DXT1 = 15,
+ SVGA3D_DXT2 = 16,
+ SVGA3D_DXT3 = 17,
+ SVGA3D_DXT4 = 18,
+ SVGA3D_DXT5 = 19,
- SVGA3D_BUMPU8V8 = 20,
- SVGA3D_BUMPL6V5U5 = 21,
- SVGA3D_BUMPX8L8V8U8 = 22,
- SVGA3D_BUMPL8V8U8 = 23,
+ SVGA3D_BUMPU8V8 = 20,
+ SVGA3D_BUMPL6V5U5 = 21,
+ SVGA3D_BUMPX8L8V8U8 = 22,
+ SVGA3D_BUMPL8V8U8 = 23,
- SVGA3D_ARGB_S10E5 = 24, /* 16-bit floating-point ARGB */
- SVGA3D_ARGB_S23E8 = 25, /* 32-bit floating-point ARGB */
+ SVGA3D_ARGB_S10E5 = 24, /* 16-bit floating-point ARGB */
+ SVGA3D_ARGB_S23E8 = 25, /* 32-bit floating-point ARGB */
- SVGA3D_A2R10G10B10 = 26,
+ SVGA3D_A2R10G10B10 = 26,
/* signed formats */
- SVGA3D_V8U8 = 27,
- SVGA3D_Q8W8V8U8 = 28,
- SVGA3D_CxV8U8 = 29,
+ SVGA3D_V8U8 = 27,
+ SVGA3D_Q8W8V8U8 = 28,
+ SVGA3D_CxV8U8 = 29,
/* mixed formats */
- SVGA3D_X8L8V8U8 = 30,
- SVGA3D_A2W10V10U10 = 31,
+ SVGA3D_X8L8V8U8 = 30,
+ SVGA3D_A2W10V10U10 = 31,
- SVGA3D_ALPHA8 = 32,
+ SVGA3D_ALPHA8 = 32,
/* Single- and dual-component floating point formats */
- SVGA3D_R_S10E5 = 33,
- SVGA3D_R_S23E8 = 34,
- SVGA3D_RG_S10E5 = 35,
- SVGA3D_RG_S23E8 = 36,
+ SVGA3D_R_S10E5 = 33,
+ SVGA3D_R_S23E8 = 34,
+ SVGA3D_RG_S10E5 = 35,
+ SVGA3D_RG_S23E8 = 36,
/*
* Any surface can be used as a buffer object, but SVGA3D_BUFFER is
* the most efficient format to use when creating new surfaces
* expressly for index or vertex data.
*/
+ SVGA3D_BUFFER = 37,
- SVGA3D_BUFFER = 37,
-
- SVGA3D_Z_D24X8 = 38,
+ SVGA3D_Z_D24X8 = 38,
- SVGA3D_V16U16 = 39,
+ SVGA3D_V16U16 = 39,
- SVGA3D_G16R16 = 40,
- SVGA3D_A16B16G16R16 = 41,
+ SVGA3D_G16R16 = 40,
+ SVGA3D_A16B16G16R16 = 41,
/* Packed Video formats */
- SVGA3D_UYVY = 42,
- SVGA3D_YUY2 = 43,
-
- /* Planar video formats */
- SVGA3D_NV12 = 44,
-
- /* Video format with alpha */
- SVGA3D_AYUV = 45,
-
- SVGA3D_BC4_UNORM = 108,
- SVGA3D_BC5_UNORM = 111,
-
- /* Advanced D3D9 depth formats. */
- SVGA3D_Z_DF16 = 118,
- SVGA3D_Z_DF24 = 119,
- SVGA3D_Z_D24S8_INT = 120,
+ SVGA3D_UYVY = 42,
+ SVGA3D_YUY2 = 43,
SVGA3D_FORMAT_MAX
} SVGA3dSurfaceFormat;
@@ -431,20 +414,10 @@ typedef enum {
SVGA3D_RS_SRCBLENDALPHA = 94, /* SVGA3dBlendOp */
SVGA3D_RS_DSTBLENDALPHA = 95, /* SVGA3dBlendOp */
SVGA3D_RS_BLENDEQUATIONALPHA = 96, /* SVGA3dBlendEquation */
- SVGA3D_RS_TRANSPARENCYANTIALIAS = 97, /* SVGA3dTransparencyAntialiasType */
- SVGA3D_RS_LINEAA = 98, /* SVGA3dBool */
- SVGA3D_RS_LINEWIDTH = 99, /* float */
SVGA3D_RS_MAX
} SVGA3dRenderStateName;
typedef enum {
- SVGA3D_TRANSPARENCYANTIALIAS_NORMAL = 0,
- SVGA3D_TRANSPARENCYANTIALIAS_ALPHATOCOVERAGE = 1,
- SVGA3D_TRANSPARENCYANTIALIAS_SUPERSAMPLE = 2,
- SVGA3D_TRANSPARENCYANTIALIAS_MAX
-} SVGA3dTransparencyAntialiasType;
-
-typedef enum {
SVGA3D_VERTEXMATERIAL_NONE = 0, /* Use the value in the current material */
SVGA3D_VERTEXMATERIAL_DIFFUSE = 1, /* Use the value in the diffuse component */
SVGA3D_VERTEXMATERIAL_SPECULAR = 2, /* Use the value in the specular component */
@@ -755,10 +728,10 @@ typedef enum {
SVGA3D_TEX_FILTER_NEAREST = 1,
SVGA3D_TEX_FILTER_LINEAR = 2,
SVGA3D_TEX_FILTER_ANISOTROPIC = 3,
- SVGA3D_TEX_FILTER_FLATCUBIC = 4, /* Deprecated, not implemented */
- SVGA3D_TEX_FILTER_GAUSSIANCUBIC = 5, /* Deprecated, not implemented */
- SVGA3D_TEX_FILTER_PYRAMIDALQUAD = 6, /* Not currently implemented */
- SVGA3D_TEX_FILTER_GAUSSIANQUAD = 7, /* Not currently implemented */
+ SVGA3D_TEX_FILTER_FLATCUBIC = 4, // Deprecated, not implemented
+ SVGA3D_TEX_FILTER_GAUSSIANCUBIC = 5, // Deprecated, not implemented
+ SVGA3D_TEX_FILTER_PYRAMIDALQUAD = 6, // Not currently implemented
+ SVGA3D_TEX_FILTER_GAUSSIANQUAD = 7, // Not currently implemented
SVGA3D_TEX_FILTER_MAX
} SVGA3dTextureFilter;
@@ -826,19 +799,19 @@ typedef enum {
typedef enum {
SVGA3D_DECLUSAGE_POSITION = 0,
- SVGA3D_DECLUSAGE_BLENDWEIGHT, /* 1 */
- SVGA3D_DECLUSAGE_BLENDINDICES, /* 2 */
- SVGA3D_DECLUSAGE_NORMAL, /* 3 */
- SVGA3D_DECLUSAGE_PSIZE, /* 4 */
- SVGA3D_DECLUSAGE_TEXCOORD, /* 5 */
- SVGA3D_DECLUSAGE_TANGENT, /* 6 */
- SVGA3D_DECLUSAGE_BINORMAL, /* 7 */
- SVGA3D_DECLUSAGE_TESSFACTOR, /* 8 */
- SVGA3D_DECLUSAGE_POSITIONT, /* 9 */
- SVGA3D_DECLUSAGE_COLOR, /* 10 */
- SVGA3D_DECLUSAGE_FOG, /* 11 */
- SVGA3D_DECLUSAGE_DEPTH, /* 12 */
- SVGA3D_DECLUSAGE_SAMPLE, /* 13 */
+ SVGA3D_DECLUSAGE_BLENDWEIGHT, // 1
+ SVGA3D_DECLUSAGE_BLENDINDICES, // 2
+ SVGA3D_DECLUSAGE_NORMAL, // 3
+ SVGA3D_DECLUSAGE_PSIZE, // 4
+ SVGA3D_DECLUSAGE_TEXCOORD, // 5
+ SVGA3D_DECLUSAGE_TANGENT, // 6
+ SVGA3D_DECLUSAGE_BINORMAL, // 7
+ SVGA3D_DECLUSAGE_TESSFACTOR, // 8
+ SVGA3D_DECLUSAGE_POSITIONT, // 9
+ SVGA3D_DECLUSAGE_COLOR, // 10
+ SVGA3D_DECLUSAGE_FOG, // 11
+ SVGA3D_DECLUSAGE_DEPTH, // 12
+ SVGA3D_DECLUSAGE_SAMPLE, // 13
SVGA3D_DECLUSAGE_MAX
} SVGA3dDeclUsage;
@@ -846,10 +819,10 @@ typedef enum {
SVGA3D_DECLMETHOD_DEFAULT = 0,
SVGA3D_DECLMETHOD_PARTIALU,
SVGA3D_DECLMETHOD_PARTIALV,
- SVGA3D_DECLMETHOD_CROSSUV, /* Normal */
+ SVGA3D_DECLMETHOD_CROSSUV, // Normal
SVGA3D_DECLMETHOD_UV,
- SVGA3D_DECLMETHOD_LOOKUP, /* Lookup a displacement map */
- SVGA3D_DECLMETHOD_LOOKUPPRESAMPLED, /* Lookup a pre-sampled displacement map */
+ SVGA3D_DECLMETHOD_LOOKUP, // Lookup a displacement map
+ SVGA3D_DECLMETHOD_LOOKUPPRESAMPLED, // Lookup a pre-sampled displacement map
} SVGA3dDeclMethod;
typedef enum {
@@ -957,6 +930,7 @@ typedef enum {
} SVGA3dCubeFace;
typedef enum {
+ SVGA3D_SHADERTYPE_COMPILED_DX8 = 0,
SVGA3D_SHADERTYPE_VS = 1,
SVGA3D_SHADERTYPE_PS = 2,
SVGA3D_SHADERTYPE_MAX
@@ -994,18 +968,12 @@ typedef enum {
} SVGA3dTransferType;
/*
- * The maximum number of vertex arrays we're guaranteed to support in
+ * The maximum number vertex arrays we're guaranteed to support in
* SVGA_3D_CMD_DRAWPRIMITIVES.
*/
#define SVGA3D_MAX_VERTEX_ARRAYS 32
/*
- * The maximum number of primitive ranges we're guaranteed to support
- * in SVGA_3D_CMD_DRAWPRIMITIVES.
- */
-#define SVGA3D_MAX_DRAW_PRIMITIVE_RANGES 32
-
-/*
* Identifiers for commands in the command FIFO.
*
* IDs between 1000 and 1039 (inclusive) were used by obsolete versions of
@@ -1022,7 +990,7 @@ typedef enum {
#define SVGA_3D_CMD_LEGACY_BASE 1000
#define SVGA_3D_CMD_BASE 1040
-#define SVGA_3D_CMD_SURFACE_DEFINE SVGA_3D_CMD_BASE + 0 /* Deprecated */
+#define SVGA_3D_CMD_SURFACE_DEFINE SVGA_3D_CMD_BASE + 0
#define SVGA_3D_CMD_SURFACE_DESTROY SVGA_3D_CMD_BASE + 1
#define SVGA_3D_CMD_SURFACE_COPY SVGA_3D_CMD_BASE + 2
#define SVGA_3D_CMD_SURFACE_STRETCHBLT SVGA_3D_CMD_BASE + 3
@@ -1040,7 +1008,7 @@ typedef enum {
#define SVGA_3D_CMD_SETVIEWPORT SVGA_3D_CMD_BASE + 15
#define SVGA_3D_CMD_SETCLIPPLANE SVGA_3D_CMD_BASE + 16
#define SVGA_3D_CMD_CLEAR SVGA_3D_CMD_BASE + 17
-#define SVGA_3D_CMD_PRESENT SVGA_3D_CMD_BASE + 18 /* Deprecated */
+#define SVGA_3D_CMD_PRESENT SVGA_3D_CMD_BASE + 18 // Deprecated
#define SVGA_3D_CMD_SHADER_DEFINE SVGA_3D_CMD_BASE + 19
#define SVGA_3D_CMD_SHADER_DESTROY SVGA_3D_CMD_BASE + 20
#define SVGA_3D_CMD_SET_SHADER SVGA_3D_CMD_BASE + 21
@@ -1050,13 +1018,9 @@ typedef enum {
#define SVGA_3D_CMD_BEGIN_QUERY SVGA_3D_CMD_BASE + 25
#define SVGA_3D_CMD_END_QUERY SVGA_3D_CMD_BASE + 26
#define SVGA_3D_CMD_WAIT_FOR_QUERY SVGA_3D_CMD_BASE + 27
-#define SVGA_3D_CMD_PRESENT_READBACK SVGA_3D_CMD_BASE + 28 /* Deprecated */
+#define SVGA_3D_CMD_PRESENT_READBACK SVGA_3D_CMD_BASE + 28 // Deprecated
#define SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN SVGA_3D_CMD_BASE + 29
-#define SVGA_3D_CMD_SURFACE_DEFINE_V2 SVGA_3D_CMD_BASE + 30
-#define SVGA_3D_CMD_GENERATE_MIPMAPS SVGA_3D_CMD_BASE + 31
-#define SVGA_3D_CMD_ACTIVATE_SURFACE SVGA_3D_CMD_BASE + 40
-#define SVGA_3D_CMD_DEACTIVATE_SURFACE SVGA_3D_CMD_BASE + 41
-#define SVGA_3D_CMD_MAX SVGA_3D_CMD_BASE + 42
+#define SVGA_3D_CMD_MAX SVGA_3D_CMD_BASE + 30
#define SVGA_3D_CMD_FUTURE_MAX 2000
@@ -1067,9 +1031,9 @@ typedef enum {
typedef struct {
union {
struct {
- uint16 function; /* SVGA3dFogFunction */
- uint8 type; /* SVGA3dFogType */
- uint8 base; /* SVGA3dFogBase */
+ uint16 function; // SVGA3dFogFunction
+ uint8 type; // SVGA3dFogType
+ uint8 base; // SVGA3dFogBase
};
uint32 uintValue;
};
@@ -1145,8 +1109,6 @@ typedef enum {
SVGA3D_SURFACE_HINT_RENDERTARGET = (1 << 6),
SVGA3D_SURFACE_HINT_DEPTHSTENCIL = (1 << 7),
SVGA3D_SURFACE_HINT_WRITEONLY = (1 << 8),
- SVGA3D_SURFACE_MASKABLE_ANTIALIAS = (1 << 9),
- SVGA3D_SURFACE_AUTOGENMIPMAPS = (1 << 10),
} SVGA3dSurfaceFlags;
typedef
@@ -1159,12 +1121,6 @@ struct {
uint32 sid;
SVGA3dSurfaceFlags surfaceFlags;
SVGA3dSurfaceFormat format;
- /*
- * If surfaceFlags has SVGA3D_SURFACE_CUBEMAP bit set, all SVGA3dSurfaceFace
- * structures must have the same value of numMipLevels field.
- * Otherwise, all but the first SVGA3dSurfaceFace structures must have the
- * numMipLevels set to 0.
- */
SVGA3dSurfaceFace face[SVGA3D_MAX_SURFACE_FACES];
/*
* Followed by an SVGA3dSize structure for each mip level in each face.
@@ -1179,31 +1135,6 @@ struct {
typedef
struct {
- uint32 sid;
- SVGA3dSurfaceFlags surfaceFlags;
- SVGA3dSurfaceFormat format;
- /*
- * If surfaceFlags has SVGA3D_SURFACE_CUBEMAP bit set, all SVGA3dSurfaceFace
- * structures must have the same value of numMipLevels field.
- * Otherwise, all but the first SVGA3dSurfaceFace structures must have the
- * numMipLevels set to 0.
- */
- SVGA3dSurfaceFace face[SVGA3D_MAX_SURFACE_FACES];
- uint32 multisampleCount;
- SVGA3dTextureFilter autogenFilter;
- /*
- * Followed by an SVGA3dSize structure for each mip level in each face.
- *
- * A note on surface sizes: Sizes are always specified in pixels,
- * even if the true surface size is not a multiple of the minimum
- * block size of the surface's format. For example, a 3x3x1 DXT1
- * compressed texture would actually be stored as a 4x4x1 image in
- * memory.
- */
-} SVGA3dCmdDefineSurface_v2; /* SVGA_3D_CMD_SURFACE_DEFINE_V2 */
-
-typedef
-struct {
uint32 sid;
} SVGA3dCmdDestroySurface; /* SVGA_3D_CMD_SURFACE_DESTROY */
@@ -1543,12 +1474,10 @@ struct {
* SVGA3dCmdDrawPrimitives structure. In order,
* they are:
*
- * 1. SVGA3dVertexDecl, quantity 'numVertexDecls', but no more than
- * SVGA3D_MAX_VERTEX_ARRAYS;
- * 2. SVGA3dPrimitiveRange, quantity 'numRanges', but no more than
- * SVGA3D_MAX_DRAW_PRIMITIVE_RANGES;
+ * 1. SVGA3dVertexDecl, quantity 'numVertexDecls'
+ * 2. SVGA3dPrimitiveRange, quantity 'numRanges'
* 3. Optionally, SVGA3dVertexDivisor, quantity 'numVertexDecls' (contains
- * the frequency divisor for the corresponding vertex decl).
+ * the frequency divisor for this the corresponding vertex decl)
*/
} SVGA3dCmdDrawPrimitives; /* SVGA_3D_CMD_DRAWPRIMITIVES */
@@ -1742,12 +1671,6 @@ struct {
/* Clipping: zero or more SVGASignedRects follow */
} SVGA3dCmdBlitSurfaceToScreen; /* SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN */
-typedef
-struct {
- uint32 sid;
- SVGA3dTextureFilter filter;
-} SVGA3dCmdGenerateMipmaps; /* SVGA_3D_CMD_GENERATE_MIPMAPS */
-
/*
* Capability query index.
@@ -1851,32 +1774,6 @@ typedef enum {
SVGA3D_DEVCAP_SURFACEFMT_A16B16G16R16 = 67,
SVGA3D_DEVCAP_SURFACEFMT_UYVY = 68,
SVGA3D_DEVCAP_SURFACEFMT_YUY2 = 69,
- SVGA3D_DEVCAP_MULTISAMPLE_NONMASKABLESAMPLES = 70,
- SVGA3D_DEVCAP_MULTISAMPLE_MASKABLESAMPLES = 71,
- SVGA3D_DEVCAP_ALPHATOCOVERAGE = 72,
- SVGA3D_DEVCAP_SUPERSAMPLE = 73,
- SVGA3D_DEVCAP_AUTOGENMIPMAPS = 74,
- SVGA3D_DEVCAP_SURFACEFMT_NV12 = 75,
- SVGA3D_DEVCAP_SURFACEFMT_AYUV = 76,
-
- /*
- * This is the maximum number of SVGA context IDs that the guest
- * can define using SVGA_3D_CMD_CONTEXT_DEFINE.
- */
- SVGA3D_DEVCAP_MAX_CONTEXT_IDS = 77,
-
- /*
- * This is the maximum number of SVGA surface IDs that the guest
- * can define using SVGA_3D_CMD_SURFACE_DEFINE*.
- */
- SVGA3D_DEVCAP_MAX_SURFACE_IDS = 78,
-
- SVGA3D_DEVCAP_SURFACEFMT_Z_DF16 = 79,
- SVGA3D_DEVCAP_SURFACEFMT_Z_DF24 = 80,
- SVGA3D_DEVCAP_SURFACEFMT_Z_D24S8_INT = 81,
-
- SVGA3D_DEVCAP_SURFACEFMT_BC4_UNORM = 82,
- SVGA3D_DEVCAP_SURFACEFMT_BC5_UNORM = 83,
/*
* Don't add new caps into the previous section; the values in this
diff --git a/drivers/gpu/drm/vmwgfx/svga_escape.h b/drivers/gpu/drm/vmwgfx/svga_escape.h
index 8e8d968..7b85e9b 100644
--- a/drivers/gpu/drm/vmwgfx/svga_escape.h
+++ b/drivers/gpu/drm/vmwgfx/svga_escape.h
@@ -75,7 +75,7 @@
*/
#define SVGA_ESCAPE_VMWARE_HINT 0x00030000
-#define SVGA_ESCAPE_VMWARE_HINT_FULLSCREEN 0x00030001 /* Deprecated */
+#define SVGA_ESCAPE_VMWARE_HINT_FULLSCREEN 0x00030001 // Deprecated
typedef
struct {
diff --git a/drivers/gpu/drm/vmwgfx/svga_overlay.h b/drivers/gpu/drm/vmwgfx/svga_overlay.h
index f38416f..f753d73 100644
--- a/drivers/gpu/drm/vmwgfx/svga_overlay.h
+++ b/drivers/gpu/drm/vmwgfx/svga_overlay.h
@@ -38,9 +38,9 @@
* Video formats we support
*/
-#define VMWARE_FOURCC_YV12 0x32315659 /* 'Y' 'V' '1' '2' */
-#define VMWARE_FOURCC_YUY2 0x32595559 /* 'Y' 'U' 'Y' '2' */
-#define VMWARE_FOURCC_UYVY 0x59565955 /* 'U' 'Y' 'V' 'Y' */
+#define VMWARE_FOURCC_YV12 0x32315659 // 'Y' 'V' '1' '2'
+#define VMWARE_FOURCC_YUY2 0x32595559 // 'Y' 'U' 'Y' '2'
+#define VMWARE_FOURCC_UYVY 0x59565955 // 'U' 'Y' 'V' 'Y'
typedef enum {
SVGA_OVERLAY_FORMAT_INVALID = 0,
@@ -68,7 +68,7 @@ struct SVGAEscapeVideoSetRegs {
uint32 streamId;
} header;
- /* May include zero or more items. */
+ // May include zero or more items.
struct {
uint32 registerId;
uint32 value;
@@ -134,12 +134,12 @@ struct {
*/
static inline bool
-VMwareVideoGetAttributes(const SVGAOverlayFormat format, /* IN */
- uint32 *width, /* IN / OUT */
- uint32 *height, /* IN / OUT */
- uint32 *size, /* OUT */
- uint32 *pitches, /* OUT (optional) */
- uint32 *offsets) /* OUT (optional) */
+VMwareVideoGetAttributes(const SVGAOverlayFormat format, // IN
+ uint32 *width, // IN / OUT
+ uint32 *height, // IN / OUT
+ uint32 *size, // OUT
+ uint32 *pitches, // OUT (optional)
+ uint32 *offsets) // OUT (optional)
{
int tmp;
@@ -198,4 +198,4 @@ VMwareVideoGetAttributes(const SVGAOverlayFormat format, /* IN */
return true;
}
-#endif /* _SVGA_OVERLAY_H_ */
+#endif // _SVGA_OVERLAY_H_
diff --git a/drivers/gpu/drm/vmwgfx/svga_reg.h b/drivers/gpu/drm/vmwgfx/svga_reg.h
index 01f63cb..1b96c2e 100644
--- a/drivers/gpu/drm/vmwgfx/svga_reg.h
+++ b/drivers/gpu/drm/vmwgfx/svga_reg.h
@@ -39,15 +39,6 @@
#define PCI_DEVICE_ID_VMWARE_SVGA2 0x0405
/*
- * SVGA_REG_ENABLE bit definitions.
- */
-#define SVGA_REG_ENABLE_DISABLE 0
-#define SVGA_REG_ENABLE_ENABLE 1
-#define SVGA_REG_ENABLE_HIDE 2
-#define SVGA_REG_ENABLE_ENABLE_HIDE (SVGA_REG_ENABLE_ENABLE |\
- SVGA_REG_ENABLE_HIDE)
-
-/*
* Legal values for the SVGA_REG_CURSOR_ON register in old-fashioned
* cursor bypass mode. This is still supported, but no new guest
* drivers should use it.
@@ -167,9 +158,7 @@ enum {
SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH = 44,
SVGA_REG_TRACES = 45, /* Enable trace-based updates even when FIFO is on */
- SVGA_REG_GMRS_MAX_PAGES = 46, /* Maximum number of 4KB pages for all GMRs */
- SVGA_REG_MEMORY_SIZE = 47, /* Total dedicated device memory excluding FIFO */
- SVGA_REG_TOP = 48, /* Must be 1 more than the last register */
+ SVGA_REG_TOP = 46, /* Must be 1 more than the last register */
SVGA_PALETTE_BASE = 1024, /* Base of SVGA color map */
/* Next 768 (== 256*3) registers exist for colormap */
@@ -276,7 +265,7 @@ enum {
* possible.
*/
#define SVGA_GMR_NULL ((uint32) -1)
-#define SVGA_GMR_FRAMEBUFFER ((uint32) -2) /* Guest Framebuffer (GFB) */
+#define SVGA_GMR_FRAMEBUFFER ((uint32) -2) // Guest Framebuffer (GFB)
typedef
struct SVGAGuestMemDescriptor {
@@ -317,35 +306,13 @@ struct SVGAGMRImageFormat {
struct {
uint32 bitsPerPixel : 8;
uint32 colorDepth : 8;
- uint32 reserved : 16; /* Must be zero */
+ uint32 reserved : 16; // Must be zero
};
uint32 value;
};
} SVGAGMRImageFormat;
-typedef
-struct SVGAGuestImage {
- SVGAGuestPtr ptr;
-
- /*
- * A note on interpretation of pitch: This value of pitch is the
- * number of bytes between vertically adjacent image
- * blocks. Normally this is the number of bytes between the first
- * pixel of two adjacent scanlines. With compressed textures,
- * however, this may represent the number of bytes between
- * compression blocks rather than between rows of pixels.
- *
- * XXX: Compressed textures currently must be tightly packed in guest memory.
- *
- * If the image is 1-dimensional, pitch is ignored.
- *
- * If 'pitch' is zero, the SVGA3D device calculates a pitch value
- * assuming each row of blocks is tightly packed.
- */
- uint32 pitch;
-} SVGAGuestImage;
-
/*
* SVGAColorBGRX --
*
@@ -361,7 +328,7 @@ struct SVGAColorBGRX {
uint32 b : 8;
uint32 g : 8;
uint32 r : 8;
- uint32 x : 8; /* Unused */
+ uint32 x : 8; // Unused
};
uint32 value;
@@ -403,34 +370,23 @@ struct SVGASignedPoint {
* Note the holes in the bitfield. Missing bits have been deprecated,
* and must not be reused. Those capabilities will never be reported
* by new versions of the SVGA device.
- *
- * SVGA_CAP_GMR2 --
- * Provides asynchronous commands to define and remap guest memory
- * regions. Adds device registers SVGA_REG_GMRS_MAX_PAGES and
- * SVGA_REG_MEMORY_SIZE.
- *
- * SVGA_CAP_SCREEN_OBJECT_2 --
- * Allow screen object support, and require backing stores from the
- * guest for each screen object.
*/
#define SVGA_CAP_NONE 0x00000000
#define SVGA_CAP_RECT_COPY 0x00000002
#define SVGA_CAP_CURSOR 0x00000020
-#define SVGA_CAP_CURSOR_BYPASS 0x00000040 /* Legacy (Use Cursor Bypass 3 instead) */
-#define SVGA_CAP_CURSOR_BYPASS_2 0x00000080 /* Legacy (Use Cursor Bypass 3 instead) */
+#define SVGA_CAP_CURSOR_BYPASS 0x00000040 // Legacy (Use Cursor Bypass 3 instead)
+#define SVGA_CAP_CURSOR_BYPASS_2 0x00000080 // Legacy (Use Cursor Bypass 3 instead)
#define SVGA_CAP_8BIT_EMULATION 0x00000100
#define SVGA_CAP_ALPHA_CURSOR 0x00000200
#define SVGA_CAP_3D 0x00004000
#define SVGA_CAP_EXTENDED_FIFO 0x00008000
-#define SVGA_CAP_MULTIMON 0x00010000 /* Legacy multi-monitor support */
+#define SVGA_CAP_MULTIMON 0x00010000 // Legacy multi-monitor support
#define SVGA_CAP_PITCHLOCK 0x00020000
#define SVGA_CAP_IRQMASK 0x00040000
-#define SVGA_CAP_DISPLAY_TOPOLOGY 0x00080000 /* Legacy multi-monitor support */
+#define SVGA_CAP_DISPLAY_TOPOLOGY 0x00080000 // Legacy multi-monitor support
#define SVGA_CAP_GMR 0x00100000
#define SVGA_CAP_TRACES 0x00200000
-#define SVGA_CAP_GMR2 0x00400000
-#define SVGA_CAP_SCREEN_OBJECT_2 0x00800000
/*
@@ -475,7 +431,7 @@ enum {
SVGA_FIFO_CAPABILITIES = 4,
SVGA_FIFO_FLAGS,
- /* Valid with SVGA_FIFO_CAP_FENCE: */
+ // Valid with SVGA_FIFO_CAP_FENCE:
SVGA_FIFO_FENCE,
/*
@@ -488,47 +444,33 @@ enum {
* extended FIFO.
*/
- /* Valid if exists (i.e. if extended FIFO enabled): */
+ // Valid if exists (i.e. if extended FIFO enabled):
SVGA_FIFO_3D_HWVERSION, /* See SVGA3dHardwareVersion in svga3d_reg.h */
- /* Valid with SVGA_FIFO_CAP_PITCHLOCK: */
+ // Valid with SVGA_FIFO_CAP_PITCHLOCK:
SVGA_FIFO_PITCHLOCK,
- /* Valid with SVGA_FIFO_CAP_CURSOR_BYPASS_3: */
+ // Valid with SVGA_FIFO_CAP_CURSOR_BYPASS_3:
SVGA_FIFO_CURSOR_ON, /* Cursor bypass 3 show/hide register */
SVGA_FIFO_CURSOR_X, /* Cursor bypass 3 x register */
SVGA_FIFO_CURSOR_Y, /* Cursor bypass 3 y register */
SVGA_FIFO_CURSOR_COUNT, /* Incremented when any of the other 3 change */
SVGA_FIFO_CURSOR_LAST_UPDATED,/* Last time the host updated the cursor */
- /* Valid with SVGA_FIFO_CAP_RESERVE: */
+ // Valid with SVGA_FIFO_CAP_RESERVE:
SVGA_FIFO_RESERVED, /* Bytes past NEXT_CMD with real contents */
/*
- * Valid with SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2:
+ * Valid with SVGA_FIFO_CAP_SCREEN_OBJECT:
*
* By default this is SVGA_ID_INVALID, to indicate that the cursor
* coordinates are specified relative to the virtual root. If this
* is set to a specific screen ID, cursor position is reinterpreted
- * as a signed offset relative to that screen's origin.
+ * as a signed offset relative to that screen's origin. This is the
+ * only way to place the cursor on a non-rooted screen.
*/
SVGA_FIFO_CURSOR_SCREEN_ID,
/*
- * Valid with SVGA_FIFO_CAP_DEAD
- *
- * An arbitrary value written by the host, drivers should not use it.
- */
- SVGA_FIFO_DEAD,
-
- /*
- * Valid with SVGA_FIFO_CAP_3D_HWVERSION_REVISED:
- *
- * Contains 3D HWVERSION (see SVGA3dHardwareVersion in svga3d_reg.h)
- * on platforms that can enforce graphics resource limits.
- */
- SVGA_FIFO_3D_HWVERSION_REVISED,
-
- /*
* XXX: The gap here, up until SVGA_FIFO_3D_CAPS, can be used for new
* registers, but this must be done carefully and with judicious use of
* capability bits, since comparisons based on SVGA_FIFO_MIN aren't
@@ -566,7 +508,7 @@ enum {
* sets SVGA_FIFO_MIN high enough to leave room for them.
*/
- /* Valid if register exists: */
+ // Valid if register exists:
SVGA_FIFO_GUEST_3D_HWVERSION, /* Guest driver's 3D version */
SVGA_FIFO_FENCE_GOAL, /* Matching target for SVGA_IRQFLAG_FENCE_GOAL */
SVGA_FIFO_BUSY, /* See "FIFO Synchronization Registers" */
@@ -767,37 +709,6 @@ enum {
*
* - When a screen is resized, either using Screen Object commands or
* legacy multimon registers, its contents are preserved.
- *
- * SVGA_FIFO_CAP_GMR2 --
- *
- * Provides new commands to define and remap guest memory regions (GMR).
- *
- * New 2D commands:
- * DEFINE_GMR2, REMAP_GMR2.
- *
- * SVGA_FIFO_CAP_3D_HWVERSION_REVISED --
- *
- * Indicates new register SVGA_FIFO_3D_HWVERSION_REVISED exists.
- * This register may replace SVGA_FIFO_3D_HWVERSION on platforms
- * that enforce graphics resource limits. This allows the platform
- * to clear SVGA_FIFO_3D_HWVERSION and disable 3D in legacy guest
- * drivers that do not limit their resources.
- *
- * Note this is an alias to SVGA_FIFO_CAP_GMR2 because these indicators
- * are codependent (and thus we use a single capability bit).
- *
- * SVGA_FIFO_CAP_SCREEN_OBJECT_2 --
- *
- * Modifies the DEFINE_SCREEN command to include a guest provided
- * backing store in GMR memory and the bytesPerLine for the backing
- * store. This capability requires the use of a backing store when
- * creating screen objects. However if SVGA_FIFO_CAP_SCREEN_OBJECT
- * is present then backing stores are optional.
- *
- * SVGA_FIFO_CAP_DEAD --
- *
- * Drivers should not use this cap bit. This cap bit can not be
- * reused since some hosts already expose it.
*/
#define SVGA_FIFO_CAP_NONE 0
@@ -809,10 +720,6 @@ enum {
#define SVGA_FIFO_CAP_ESCAPE (1<<5)
#define SVGA_FIFO_CAP_RESERVE (1<<6)
#define SVGA_FIFO_CAP_SCREEN_OBJECT (1<<7)
-#define SVGA_FIFO_CAP_GMR2 (1<<8)
-#define SVGA_FIFO_CAP_3D_HWVERSION_REVISED SVGA_FIFO_CAP_GMR2
-#define SVGA_FIFO_CAP_SCREEN_OBJECT_2 (1<<9)
-#define SVGA_FIFO_CAP_DEAD (1<<10)
/*
@@ -823,7 +730,7 @@ enum {
#define SVGA_FIFO_FLAG_NONE 0
#define SVGA_FIFO_FLAG_ACCELFRONT (1<<0)
-#define SVGA_FIFO_FLAG_RESERVED (1<<31) /* Internal use only */
+#define SVGA_FIFO_FLAG_RESERVED (1<<31) // Internal use only
/*
* FIFO reservation sentinel value
@@ -856,22 +763,22 @@ enum {
SVGA_VIDEO_DATA_OFFSET,
SVGA_VIDEO_FORMAT,
SVGA_VIDEO_COLORKEY,
- SVGA_VIDEO_SIZE, /* Deprecated */
+ SVGA_VIDEO_SIZE, // Deprecated
SVGA_VIDEO_WIDTH,
SVGA_VIDEO_HEIGHT,
SVGA_VIDEO_SRC_X,
SVGA_VIDEO_SRC_Y,
SVGA_VIDEO_SRC_WIDTH,
SVGA_VIDEO_SRC_HEIGHT,
- SVGA_VIDEO_DST_X, /* Signed int32 */
- SVGA_VIDEO_DST_Y, /* Signed int32 */
+ SVGA_VIDEO_DST_X, // Signed int32
+ SVGA_VIDEO_DST_Y, // Signed int32
SVGA_VIDEO_DST_WIDTH,
SVGA_VIDEO_DST_HEIGHT,
SVGA_VIDEO_PITCH_1,
SVGA_VIDEO_PITCH_2,
SVGA_VIDEO_PITCH_3,
- SVGA_VIDEO_DATA_GMRID, /* Optional, defaults to SVGA_GMR_FRAMEBUFFER */
- SVGA_VIDEO_DST_SCREEN_ID, /* Optional, defaults to virtual coords (SVGA_ID_INVALID) */
+ SVGA_VIDEO_DATA_GMRID, // Optional, defaults to SVGA_GMR_FRAMEBUFFER
+ SVGA_VIDEO_DST_SCREEN_ID, // Optional, defaults to virtual coords (SVGA_ID_INVALID)
SVGA_VIDEO_NUM_REGS
};
@@ -922,51 +829,15 @@ typedef struct SVGAOverlayUnit {
* compatibility. New flags can be added, and the struct may grow,
* but existing fields must retain their meaning.
*
- * Added with SVGA_FIFO_CAP_SCREEN_OBJECT_2 are required fields of
- * a SVGAGuestPtr that is used to back the screen contents. This
- * memory must come from the GFB. The guest is not allowed to
- * access the memory and doing so will have undefined results. The
- * backing store is required to be page aligned and the size is
- * padded to the next page boundry. The number of pages is:
- * (bytesPerLine * size.width * 4 + PAGE_SIZE - 1) / PAGE_SIZE
- *
- * The pitch in the backingStore is required to be at least large
- * enough to hold a 32bbp scanline. It is recommended that the
- * driver pad bytesPerLine for a potential performance win.
- *
- * The cloneCount field is treated as a hint from the guest that
- * the user wants this display to be cloned, countCount times. A
- * value of zero means no cloning should happen.
- */
-
-#define SVGA_SCREEN_MUST_BE_SET (1 << 0) /* Must be set or results undefined */
-#define SVGA_SCREEN_HAS_ROOT SVGA_SCREEN_MUST_BE_SET /* Deprecated */
-#define SVGA_SCREEN_IS_PRIMARY (1 << 1) /* Guest considers this screen to be 'primary' */
-#define SVGA_SCREEN_FULLSCREEN_HINT (1 << 2) /* Guest is running a fullscreen app here */
-
-/*
- * Added with SVGA_FIFO_CAP_SCREEN_OBJECT_2. When the screen is
- * deactivated the base layer is defined to lose all contents and
- * become black. When a screen is deactivated the backing store is
- * optional. When set backingPtr and bytesPerLine will be ignored.
*/
-#define SVGA_SCREEN_DEACTIVATE (1 << 3)
-/*
- * Added with SVGA_FIFO_CAP_SCREEN_OBJECT_2. When this flag is set
- * the screen contents will be outputted as all black to the user
- * though the base layer contents is preserved. The screen base layer
- * can still be read and written to like normal though the no visible
- * effect will be seen by the user. When the flag is changed the
- * screen will be blanked or redrawn to the current contents as needed
- * without any extra commands from the driver. This flag only has an
- * effect when the screen is not deactivated.
- */
-#define SVGA_SCREEN_BLANKING (1 << 4)
+#define SVGA_SCREEN_HAS_ROOT (1 << 0) // Screen is present in the virtual coord space
+#define SVGA_SCREEN_IS_PRIMARY (1 << 1) // Guest considers this screen to be 'primary'
+#define SVGA_SCREEN_FULLSCREEN_HINT (1 << 2) // Guest is running a fullscreen app here
typedef
struct SVGAScreenObject {
- uint32 structSize; /* sizeof(SVGAScreenObject) */
+ uint32 structSize; // sizeof(SVGAScreenObject)
uint32 id;
uint32 flags;
struct {
@@ -976,14 +847,7 @@ struct SVGAScreenObject {
struct {
int32 x;
int32 y;
- } root;
-
- /*
- * Added and required by SVGA_FIFO_CAP_SCREEN_OBJECT_2, optional
- * with SVGA_FIFO_CAP_SCREEN_OBJECT.
- */
- SVGAGuestImage backingStore;
- uint32 cloneCount;
+ } root; // Only used if SVGA_SCREEN_HAS_ROOT is set.
} SVGAScreenObject;
@@ -1021,8 +885,6 @@ typedef enum {
SVGA_CMD_BLIT_SCREEN_TO_GMRFB = 38,
SVGA_CMD_ANNOTATION_FILL = 39,
SVGA_CMD_ANNOTATION_COPY = 40,
- SVGA_CMD_DEFINE_GMR2 = 41,
- SVGA_CMD_REMAP_GMR2 = 42,
SVGA_CMD_MAX
} SVGAFifoCmdId;
@@ -1058,7 +920,7 @@ typedef enum {
*/
typedef
-struct SVGAFifoCmdUpdate {
+struct {
uint32 x;
uint32 y;
uint32 width;
@@ -1077,7 +939,7 @@ struct SVGAFifoCmdUpdate {
*/
typedef
-struct SVGAFifoCmdRectCopy {
+struct {
uint32 srcX;
uint32 srcY;
uint32 destX;
@@ -1101,14 +963,14 @@ struct SVGAFifoCmdRectCopy {
*/
typedef
-struct SVGAFifoCmdDefineCursor {
- uint32 id; /* Reserved, must be zero. */
+struct {
+ uint32 id; // Reserved, must be zero.
uint32 hotspotX;
uint32 hotspotY;
uint32 width;
uint32 height;
- uint32 andMaskDepth; /* Value must be 1 or equal to BITS_PER_PIXEL */
- uint32 xorMaskDepth; /* Value must be 1 or equal to BITS_PER_PIXEL */
+ uint32 andMaskDepth; // Value must be 1 or equal to BITS_PER_PIXEL
+ uint32 xorMaskDepth; // Value must be 1 or equal to BITS_PER_PIXEL
/*
* Followed by scanline data for AND mask, then XOR mask.
* Each scanline is padded to a 32-bit boundary.
@@ -1130,8 +992,8 @@ struct SVGAFifoCmdDefineCursor {
*/
typedef
-struct SVGAFifoCmdDefineAlphaCursor {
- uint32 id; /* Reserved, must be zero. */
+struct {
+ uint32 id; // Reserved, must be zero.
uint32 hotspotX;
uint32 hotspotY;
uint32 width;
@@ -1153,7 +1015,7 @@ struct SVGAFifoCmdDefineAlphaCursor {
*/
typedef
-struct SVGAFifoCmdUpdateVerbose {
+struct {
uint32 x;
uint32 y;
uint32 width;
@@ -1178,13 +1040,13 @@ struct SVGAFifoCmdUpdateVerbose {
#define SVGA_ROP_COPY 0x03
typedef
-struct SVGAFifoCmdFrontRopFill {
- uint32 color; /* In the same format as the GFB */
+struct {
+ uint32 color; // In the same format as the GFB
uint32 x;
uint32 y;
uint32 width;
uint32 height;
- uint32 rop; /* Must be SVGA_ROP_COPY */
+ uint32 rop; // Must be SVGA_ROP_COPY
} SVGAFifoCmdFrontRopFill;
@@ -1221,7 +1083,7 @@ struct {
*/
typedef
-struct SVGAFifoCmdEscape {
+struct {
uint32 nsid;
uint32 size;
/* followed by 'size' bytes of data */
@@ -1251,12 +1113,12 @@ struct SVGAFifoCmdEscape {
* registers (SVGA_REG_NUM_GUEST_DISPLAYS, SVGA_REG_DISPLAY_*).
*
* Availability:
- * SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
+ * SVGA_FIFO_CAP_SCREEN_OBJECT
*/
typedef
struct {
- SVGAScreenObject screen; /* Variable-length according to version */
+ SVGAScreenObject screen; // Variable-length according to version
} SVGAFifoCmdDefineScreen;
@@ -1267,7 +1129,7 @@ struct {
* re-use.
*
* Availability:
- * SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
+ * SVGA_FIFO_CAP_SCREEN_OBJECT
*/
typedef
@@ -1320,7 +1182,7 @@ struct {
* GMRFB.
*
* Availability:
- * SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
+ * SVGA_FIFO_CAP_SCREEN_OBJECT
*/
typedef
@@ -1357,7 +1219,7 @@ struct {
* SVGA_CMD_ANNOTATION_* commands for details.
*
* Availability:
- * SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
+ * SVGA_FIFO_CAP_SCREEN_OBJECT
*/
typedef
@@ -1405,7 +1267,7 @@ struct {
* the time any subsequent FENCE commands are reached.
*
* Availability:
- * SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
+ * SVGA_FIFO_CAP_SCREEN_OBJECT
*/
typedef
@@ -1440,7 +1302,7 @@ struct {
* user's display is being remoted over a network connection.
*
* Availability:
- * SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
+ * SVGA_FIFO_CAP_SCREEN_OBJECT
*/
typedef
@@ -1472,7 +1334,7 @@ struct {
* undefined.
*
* Availability:
- * SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
+ * SVGA_FIFO_CAP_SCREEN_OBJECT
*/
typedef
@@ -1481,72 +1343,4 @@ struct {
uint32 srcScreenId;
} SVGAFifoCmdAnnotationCopy;
-
-/*
- * SVGA_CMD_DEFINE_GMR2 --
- *
- * Define guest memory region v2. See the description of GMRs above.
- *
- * Availability:
- * SVGA_CAP_GMR2
- */
-
-typedef
-struct {
- uint32 gmrId;
- uint32 numPages;
-} SVGAFifoCmdDefineGMR2;
-
-
-/*
- * SVGA_CMD_REMAP_GMR2 --
- *
- * Remap guest memory region v2. See the description of GMRs above.
- *
- * This command allows guest to modify a portion of an existing GMR by
- * invalidating it or reassigning it to different guest physical pages.
- * The pages are identified by physical page number (PPN). The pages
- * are assumed to be pinned and valid for DMA operations.
- *
- * Description of command flags:
- *
- * SVGA_REMAP_GMR2_VIA_GMR: If enabled, references a PPN list in a GMR.
- * The PPN list must not overlap with the remap region (this can be
- * handled trivially by referencing a separate GMR). If flag is
- * disabled, PPN list is appended to SVGARemapGMR command.
- *
- * SVGA_REMAP_GMR2_PPN64: If set, PPN list is in PPN64 format, otherwise
- * it is in PPN32 format.
- *
- * SVGA_REMAP_GMR2_SINGLE_PPN: If set, PPN list contains a single entry.
- * A single PPN can be used to invalidate a portion of a GMR or
- * map it to to a single guest scratch page.
- *
- * Availability:
- * SVGA_CAP_GMR2
- */
-
-typedef enum {
- SVGA_REMAP_GMR2_PPN32 = 0,
- SVGA_REMAP_GMR2_VIA_GMR = (1 << 0),
- SVGA_REMAP_GMR2_PPN64 = (1 << 1),
- SVGA_REMAP_GMR2_SINGLE_PPN = (1 << 2),
-} SVGARemapGMR2Flags;
-
-typedef
-struct {
- uint32 gmrId;
- SVGARemapGMR2Flags flags;
- uint32 offsetPages; /* offset in pages to begin remap */
- uint32 numPages; /* number of pages to remap */
- /*
- * Followed by additional data depending on SVGARemapGMR2Flags.
- *
- * If flag SVGA_REMAP_GMR2_VIA_GMR is set, single SVGAGuestPtr follows.
- * Otherwise an array of page descriptors in PPN32 or PPN64 format
- * (according to flag SVGA_REMAP_GMR2_PPN64) follows. If flag
- * SVGA_REMAP_GMR2_SINGLE_PPN is set, array contains a single entry.
- */
-} SVGAFifoCmdRemapGMR2;
-
#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 1e2c0fb..87e43e0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -28,7 +28,6 @@
#include "vmwgfx_drv.h"
#include "ttm/ttm_bo_driver.h"
#include "ttm/ttm_placement.h"
-#include "ttm/ttm_page_alloc.h"
static uint32_t vram_placement_flags = TTM_PL_FLAG_VRAM |
TTM_PL_FLAG_CACHED;
@@ -43,10 +42,6 @@ static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM |
static uint32_t gmr_placement_flags = VMW_PL_FLAG_GMR |
TTM_PL_FLAG_CACHED;
-static uint32_t gmr_ne_placement_flags = VMW_PL_FLAG_GMR |
- TTM_PL_FLAG_CACHED |
- TTM_PL_FLAG_NO_EVICT;
-
struct ttm_placement vmw_vram_placement = {
.fpfn = 0,
.lpfn = 0,
@@ -61,11 +56,6 @@ static uint32_t vram_gmr_placement_flags[] = {
VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
};
-static uint32_t gmr_vram_placement_flags[] = {
- VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED,
- TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
-};
-
struct ttm_placement vmw_vram_gmr_placement = {
.fpfn = 0,
.lpfn = 0,
@@ -75,20 +65,6 @@ struct ttm_placement vmw_vram_gmr_placement = {
.busy_placement = &gmr_placement_flags
};
-static uint32_t vram_gmr_ne_placement_flags[] = {
- TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT,
- VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
-};
-
-struct ttm_placement vmw_vram_gmr_ne_placement = {
- .fpfn = 0,
- .lpfn = 0,
- .num_placement = 2,
- .placement = vram_gmr_ne_placement_flags,
- .num_busy_placement = 1,
- .busy_placement = &gmr_ne_placement_flags
-};
-
struct ttm_placement vmw_vram_sys_placement = {
.fpfn = 0,
.lpfn = 0,
@@ -116,87 +92,85 @@ struct ttm_placement vmw_sys_placement = {
.busy_placement = &sys_placement_flags
};
-static uint32_t evictable_placement_flags[] = {
- TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED,
- TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED,
- VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
+struct vmw_ttm_backend {
+ struct ttm_backend backend;
+ struct page **pages;
+ unsigned long num_pages;
+ struct vmw_private *dev_priv;
+ int gmr_id;
};
-struct ttm_placement vmw_evictable_placement = {
- .fpfn = 0,
- .lpfn = 0,
- .num_placement = 3,
- .placement = evictable_placement_flags,
- .num_busy_placement = 1,
- .busy_placement = &sys_placement_flags
-};
+static int vmw_ttm_populate(struct ttm_backend *backend,
+ unsigned long num_pages, struct page **pages,
+ struct page *dummy_read_page,
+ dma_addr_t *dma_addrs)
+{
+ struct vmw_ttm_backend *vmw_be =
+ container_of(backend, struct vmw_ttm_backend, backend);
-struct ttm_placement vmw_srf_placement = {
- .fpfn = 0,
- .lpfn = 0,
- .num_placement = 1,
- .num_busy_placement = 2,
- .placement = &gmr_placement_flags,
- .busy_placement = gmr_vram_placement_flags
-};
+ vmw_be->pages = pages;
+ vmw_be->num_pages = num_pages;
-struct vmw_ttm_tt {
- struct ttm_tt ttm;
- struct vmw_private *dev_priv;
- int gmr_id;
-};
+ return 0;
+}
-static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
+static int vmw_ttm_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
{
- struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm);
+ struct vmw_ttm_backend *vmw_be =
+ container_of(backend, struct vmw_ttm_backend, backend);
vmw_be->gmr_id = bo_mem->start;
- return vmw_gmr_bind(vmw_be->dev_priv, ttm->pages,
- ttm->num_pages, vmw_be->gmr_id);
+ return vmw_gmr_bind(vmw_be->dev_priv, vmw_be->pages,
+ vmw_be->num_pages, vmw_be->gmr_id);
}
-static int vmw_ttm_unbind(struct ttm_tt *ttm)
+static int vmw_ttm_unbind(struct ttm_backend *backend)
{
- struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm);
+ struct vmw_ttm_backend *vmw_be =
+ container_of(backend, struct vmw_ttm_backend, backend);
vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
return 0;
}
-static void vmw_ttm_destroy(struct ttm_tt *ttm)
+static void vmw_ttm_clear(struct ttm_backend *backend)
+{
+ struct vmw_ttm_backend *vmw_be =
+ container_of(backend, struct vmw_ttm_backend, backend);
+
+ vmw_be->pages = NULL;
+ vmw_be->num_pages = 0;
+}
+
+static void vmw_ttm_destroy(struct ttm_backend *backend)
{
- struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm);
+ struct vmw_ttm_backend *vmw_be =
+ container_of(backend, struct vmw_ttm_backend, backend);
- ttm_tt_fini(ttm);
kfree(vmw_be);
}
static struct ttm_backend_func vmw_ttm_func = {
+ .populate = vmw_ttm_populate,
+ .clear = vmw_ttm_clear,
.bind = vmw_ttm_bind,
.unbind = vmw_ttm_unbind,
.destroy = vmw_ttm_destroy,
};
-struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev,
- unsigned long size, uint32_t page_flags,
- struct page *dummy_read_page)
+struct ttm_backend *vmw_ttm_backend_init(struct ttm_bo_device *bdev)
{
- struct vmw_ttm_tt *vmw_be;
+ struct vmw_ttm_backend *vmw_be;
vmw_be = kmalloc(sizeof(*vmw_be), GFP_KERNEL);
if (!vmw_be)
return NULL;
- vmw_be->ttm.func = &vmw_ttm_func;
+ vmw_be->backend.func = &vmw_ttm_func;
vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev);
- if (ttm_tt_init(&vmw_be->ttm, bdev, size, page_flags, dummy_read_page)) {
- kfree(vmw_be);
- return NULL;
- }
-
- return &vmw_be->ttm;
+ return &vmw_be->backend;
}
int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
@@ -300,45 +274,43 @@ static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
static void *vmw_sync_obj_ref(void *sync_obj)
{
-
- return (void *)
- vmw_fence_obj_reference((struct vmw_fence_obj *) sync_obj);
+ return sync_obj;
}
static void vmw_sync_obj_unref(void **sync_obj)
{
- vmw_fence_obj_unreference((struct vmw_fence_obj **) sync_obj);
+ *sync_obj = NULL;
}
static int vmw_sync_obj_flush(void *sync_obj, void *sync_arg)
{
- vmw_fence_obj_flush((struct vmw_fence_obj *) sync_obj);
+ struct vmw_private *dev_priv = (struct vmw_private *)sync_arg;
+
+ mutex_lock(&dev_priv->hw_mutex);
+ vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
+ mutex_unlock(&dev_priv->hw_mutex);
return 0;
}
static bool vmw_sync_obj_signaled(void *sync_obj, void *sync_arg)
{
- unsigned long flags = (unsigned long) sync_arg;
- return vmw_fence_obj_signaled((struct vmw_fence_obj *) sync_obj,
- (uint32_t) flags);
+ struct vmw_private *dev_priv = (struct vmw_private *)sync_arg;
+ uint32_t sequence = (unsigned long) sync_obj;
+ return vmw_fence_signaled(dev_priv, sequence);
}
static int vmw_sync_obj_wait(void *sync_obj, void *sync_arg,
bool lazy, bool interruptible)
{
- unsigned long flags = (unsigned long) sync_arg;
+ struct vmw_private *dev_priv = (struct vmw_private *)sync_arg;
+ uint32_t sequence = (unsigned long) sync_obj;
- return vmw_fence_obj_wait((struct vmw_fence_obj *) sync_obj,
- (uint32_t) flags,
- lazy, interruptible,
- VMW_FENCE_WAIT_TIMEOUT);
+ return vmw_wait_fence(dev_priv, false, sequence, false, 3*HZ);
}
struct ttm_bo_driver vmw_bo_driver = {
- .ttm_tt_create = &vmw_ttm_tt_create,
- .ttm_tt_populate = &ttm_pool_populate,
- .ttm_tt_unpopulate = &ttm_pool_unpopulate,
+ .create_ttm_backend_entry = vmw_ttm_backend_init,
.invalidate_caches = vmw_invalidate_caches,
.init_mem_type = vmw_init_mem_type,
.evict_flags = vmw_evict_flags,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
deleted file mode 100644
index 3fa884d..0000000
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
+++ /dev/null
@@ -1,322 +0,0 @@
-/**************************************************************************
- *
- * Copyright © 2011 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-#include "ttm/ttm_placement.h"
-
-#include "drmP.h"
-#include "vmwgfx_drv.h"
-
-
-/**
- * vmw_dmabuf_to_placement - Validate a buffer to placement.
- *
- * @dev_priv: Driver private.
- * @buf: DMA buffer to move.
- * @pin: Pin buffer if true.
- * @interruptible: Use interruptible wait.
- *
- * May only be called by the current master since it assumes that the
- * master lock is the current master's lock.
- * This function takes the master's lock in write mode.
- * Flushes and unpins the query bo to avoid failures.
- *
- * Returns
- * -ERESTARTSYS if interrupted by a signal.
- */
-int vmw_dmabuf_to_placement(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *buf,
- struct ttm_placement *placement,
- bool interruptible)
-{
- struct vmw_master *vmaster = dev_priv->active_master;
- struct ttm_buffer_object *bo = &buf->base;
- int ret;
-
- ret = ttm_write_lock(&vmaster->lock, interruptible);
- if (unlikely(ret != 0))
- return ret;
-
- vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
-
- ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
- if (unlikely(ret != 0))
- goto err;
-
- ret = ttm_bo_validate(bo, placement, interruptible, false, false);
-
- ttm_bo_unreserve(bo);
-
-err:
- ttm_write_unlock(&vmaster->lock);
- return ret;
-}
-
-/**
- * vmw_dmabuf_to_vram_or_gmr - Move a buffer to vram or gmr.
- *
- * May only be called by the current master since it assumes that the
- * master lock is the current master's lock.
- * This function takes the master's lock in write mode.
- * Flushes and unpins the query bo if @pin == true to avoid failures.
- *
- * @dev_priv: Driver private.
- * @buf: DMA buffer to move.
- * @pin: Pin buffer if true.
- * @interruptible: Use interruptible wait.
- *
- * Returns
- * -ERESTARTSYS if interrupted by a signal.
- */
-int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *buf,
- bool pin, bool interruptible)
-{
- struct vmw_master *vmaster = dev_priv->active_master;
- struct ttm_buffer_object *bo = &buf->base;
- struct ttm_placement *placement;
- int ret;
-
- ret = ttm_write_lock(&vmaster->lock, interruptible);
- if (unlikely(ret != 0))
- return ret;
-
- if (pin)
- vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
-
- ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
- if (unlikely(ret != 0))
- goto err;
-
- /**
- * Put BO in VRAM if there is space, otherwise as a GMR.
- * If there is no space in VRAM and GMR ids are all used up,
- * start evicting GMRs to make room. If the DMA buffer can't be
- * used as a GMR, this will return -ENOMEM.
- */
-
- if (pin)
- placement = &vmw_vram_gmr_ne_placement;
- else
- placement = &vmw_vram_gmr_placement;
-
- ret = ttm_bo_validate(bo, placement, interruptible, false, false);
- if (likely(ret == 0) || ret == -ERESTARTSYS)
- goto err_unreserve;
-
-
- /**
- * If that failed, try VRAM again, this time evicting
- * previous contents.
- */
-
- if (pin)
- placement = &vmw_vram_ne_placement;
- else
- placement = &vmw_vram_placement;
-
- ret = ttm_bo_validate(bo, placement, interruptible, false, false);
-
-err_unreserve:
- ttm_bo_unreserve(bo);
-err:
- ttm_write_unlock(&vmaster->lock);
- return ret;
-}
-
-/**
- * vmw_dmabuf_to_vram - Move a buffer to vram.
- *
- * May only be called by the current master since it assumes that the
- * master lock is the current master's lock.
- * This function takes the master's lock in write mode.
- *
- * @dev_priv: Driver private.
- * @buf: DMA buffer to move.
- * @pin: Pin buffer in vram if true.
- * @interruptible: Use interruptible wait.
- *
- * Returns
- * -ERESTARTSYS if interrupted by a signal.
- */
-int vmw_dmabuf_to_vram(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *buf,
- bool pin, bool interruptible)
-{
- struct ttm_placement *placement;
-
- if (pin)
- placement = &vmw_vram_ne_placement;
- else
- placement = &vmw_vram_placement;
-
- return vmw_dmabuf_to_placement(dev_priv, buf,
- placement,
- interruptible);
-}
-
-/**
- * vmw_dmabuf_to_start_of_vram - Move a buffer to start of vram.
- *
- * May only be called by the current master since it assumes that the
- * master lock is the current master's lock.
- * This function takes the master's lock in write mode.
- * Flushes and unpins the query bo if @pin == true to avoid failures.
- *
- * @dev_priv: Driver private.
- * @buf: DMA buffer to move.
- * @pin: Pin buffer in vram if true.
- * @interruptible: Use interruptible wait.
- *
- * Returns
- * -ERESTARTSYS if interrupted by a signal.
- */
-int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *buf,
- bool pin, bool interruptible)
-{
- struct vmw_master *vmaster = dev_priv->active_master;
- struct ttm_buffer_object *bo = &buf->base;
- struct ttm_placement placement;
- int ret = 0;
-
- if (pin)
- placement = vmw_vram_ne_placement;
- else
- placement = vmw_vram_placement;
- placement.lpfn = bo->num_pages;
-
- ret = ttm_write_lock(&vmaster->lock, interruptible);
- if (unlikely(ret != 0))
- return ret;
-
- if (pin)
- vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
-
- ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
- if (unlikely(ret != 0))
- goto err_unlock;
-
- /* Is this buffer already in vram but not at the start of it? */
- if (bo->mem.mem_type == TTM_PL_VRAM &&
- bo->mem.start < bo->num_pages &&
- bo->mem.start > 0)
- (void) ttm_bo_validate(bo, &vmw_sys_placement, false,
- false, false);
-
- ret = ttm_bo_validate(bo, &placement, interruptible, false, false);
-
- /* For some reason we didn't up at the start of vram */
- WARN_ON(ret == 0 && bo->offset != 0);
-
- ttm_bo_unreserve(bo);
-err_unlock:
- ttm_write_unlock(&vmaster->lock);
-
- return ret;
-}
-
-
-/**
- * vmw_dmabuf_upin - Unpin the buffer given buffer, does not move the buffer.
- *
- * May only be called by the current master since it assumes that the
- * master lock is the current master's lock.
- * This function takes the master's lock in write mode.
- *
- * @dev_priv: Driver private.
- * @buf: DMA buffer to unpin.
- * @interruptible: Use interruptible wait.
- *
- * Returns
- * -ERESTARTSYS if interrupted by a signal.
- */
-int vmw_dmabuf_unpin(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *buf,
- bool interruptible)
-{
- /*
- * We could in theory early out if the buffer is
- * unpinned but we need to lock and reserve the buffer
- * anyways so we don't gain much by that.
- */
- return vmw_dmabuf_to_placement(dev_priv, buf,
- &vmw_evictable_placement,
- interruptible);
-}
-
-
-/**
- * vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement
- * of a buffer.
- *
- * @bo: Pointer to a struct ttm_buffer_object. Must be pinned or reserved.
- * @ptr: SVGAGuestPtr returning the result.
- */
-void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
- SVGAGuestPtr *ptr)
-{
- if (bo->mem.mem_type == TTM_PL_VRAM) {
- ptr->gmrId = SVGA_GMR_FRAMEBUFFER;
- ptr->offset = bo->offset;
- } else {
- ptr->gmrId = bo->mem.start;
- ptr->offset = 0;
- }
-}
-
-
-/**
- * vmw_bo_pin - Pin or unpin a buffer object without moving it.
- *
- * @bo: The buffer object. Must be reserved, and present either in VRAM
- * or GMR memory.
- * @pin: Whether to pin or unpin.
- *
- */
-void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin)
-{
- uint32_t pl_flags;
- struct ttm_placement placement;
- uint32_t old_mem_type = bo->mem.mem_type;
- int ret;
-
- BUG_ON(!atomic_read(&bo->reserved));
- BUG_ON(old_mem_type != TTM_PL_VRAM &&
- old_mem_type != VMW_PL_FLAG_GMR);
-
- pl_flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED;
- if (pin)
- pl_flags |= TTM_PL_FLAG_NO_EVICT;
-
- memset(&placement, 0, sizeof(placement));
- placement.num_placement = 1;
- placement.placement = &pl_flags;
-
- ret = ttm_bo_validate(bo, &placement, false, true, true);
-
- BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
-}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index eb78beb..55d4b29 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -24,7 +24,6 @@
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
-#include <linux/module.h>
#include "drmP.h"
#include "vmwgfx_drv.h"
@@ -38,10 +37,6 @@
#define VMWGFX_CHIP_SVGAII 0
#define VMW_FB_RESERVATION 0
-#define VMW_MIN_INITIAL_WIDTH 800
-#define VMW_MIN_INITIAL_HEIGHT 600
-
-
/**
* Fully encoded drm commands. Might move to vmw_drm.h
*/
@@ -87,31 +82,17 @@
#define DRM_IOCTL_VMW_EXECBUF \
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \
struct drm_vmw_execbuf_arg)
-#define DRM_IOCTL_VMW_GET_3D_CAP \
- DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP, \
- struct drm_vmw_get_3d_cap_arg)
+#define DRM_IOCTL_VMW_FIFO_DEBUG \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FIFO_DEBUG, \
+ struct drm_vmw_fifo_debug_arg)
#define DRM_IOCTL_VMW_FENCE_WAIT \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \
struct drm_vmw_fence_wait_arg)
-#define DRM_IOCTL_VMW_FENCE_SIGNALED \
- DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED, \
- struct drm_vmw_fence_signaled_arg)
-#define DRM_IOCTL_VMW_FENCE_UNREF \
- DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF, \
- struct drm_vmw_fence_arg)
-#define DRM_IOCTL_VMW_FENCE_EVENT \
- DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT, \
- struct drm_vmw_fence_event_arg)
-#define DRM_IOCTL_VMW_PRESENT \
- DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT, \
- struct drm_vmw_present_arg)
-#define DRM_IOCTL_VMW_PRESENT_READBACK \
- DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK, \
- struct drm_vmw_present_readback_arg)
#define DRM_IOCTL_VMW_UPDATE_LAYOUT \
- DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \
struct drm_vmw_update_layout_arg)
+
/**
* The core DRM version of this macro doesn't account for
* DRM_COMMAND_BASE.
@@ -154,28 +135,12 @@ static struct drm_ioctl_desc vmw_ioctls[] = {
DRM_AUTH | DRM_UNLOCKED),
VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl,
DRM_AUTH | DRM_UNLOCKED),
- VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
- DRM_AUTH | DRM_UNLOCKED),
- VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
- vmw_fence_obj_signaled_ioctl,
- DRM_AUTH | DRM_UNLOCKED),
- VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
- DRM_AUTH | DRM_UNLOCKED),
- VMW_IOCTL_DEF(VMW_FENCE_EVENT,
- vmw_fence_event_ioctl,
- DRM_AUTH | DRM_UNLOCKED),
- VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
+ VMW_IOCTL_DEF(VMW_FIFO_DEBUG, vmw_fifo_debug_ioctl,
+ DRM_AUTH | DRM_ROOT_ONLY | DRM_MASTER | DRM_UNLOCKED),
+ VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_wait_ioctl,
DRM_AUTH | DRM_UNLOCKED),
-
- /* these allow direct access to the framebuffers mark as master only */
- VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
- DRM_MASTER | DRM_AUTH | DRM_UNLOCKED),
- VMW_IOCTL_DEF(VMW_PRESENT_READBACK,
- vmw_present_readback_ioctl,
- DRM_MASTER | DRM_AUTH | DRM_UNLOCKED),
- VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
- vmw_kms_update_layout_ioctl,
- DRM_MASTER | DRM_UNLOCKED),
+ VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT, vmw_kms_update_layout_ioctl,
+ DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED)
};
static struct pci_device_id vmw_pci_id_list[] = {
@@ -225,78 +190,8 @@ static void vmw_print_capabilities(uint32_t capabilities)
DRM_INFO(" GMR.\n");
if (capabilities & SVGA_CAP_TRACES)
DRM_INFO(" Traces.\n");
- if (capabilities & SVGA_CAP_GMR2)
- DRM_INFO(" GMR2.\n");
- if (capabilities & SVGA_CAP_SCREEN_OBJECT_2)
- DRM_INFO(" Screen Object 2.\n");
-}
-
-
-/**
- * vmw_execbuf_prepare_dummy_query - Initialize a query result structure at
- * the start of a buffer object.
- *
- * @dev_priv: The device private structure.
- *
- * This function will idle the buffer using an uninterruptible wait, then
- * map the first page and initialize a pending occlusion query result structure,
- * Finally it will unmap the buffer.
- *
- * TODO: Since we're only mapping a single page, we should optimize the map
- * to use kmap_atomic / iomap_atomic.
- */
-static void vmw_dummy_query_bo_prepare(struct vmw_private *dev_priv)
-{
- struct ttm_bo_kmap_obj map;
- volatile SVGA3dQueryResult *result;
- bool dummy;
- int ret;
- struct ttm_bo_device *bdev = &dev_priv->bdev;
- struct ttm_buffer_object *bo = dev_priv->dummy_query_bo;
-
- ttm_bo_reserve(bo, false, false, false, 0);
- spin_lock(&bdev->fence_lock);
- ret = ttm_bo_wait(bo, false, false, false);
- spin_unlock(&bdev->fence_lock);
- if (unlikely(ret != 0))
- (void) vmw_fallback_wait(dev_priv, false, true, 0, false,
- 10*HZ);
-
- ret = ttm_bo_kmap(bo, 0, 1, &map);
- if (likely(ret == 0)) {
- result = ttm_kmap_obj_virtual(&map, &dummy);
- result->totalSize = sizeof(*result);
- result->state = SVGA3D_QUERYSTATE_PENDING;
- result->result32 = 0xff;
- ttm_bo_kunmap(&map);
- } else
- DRM_ERROR("Dummy query buffer map failed.\n");
- ttm_bo_unreserve(bo);
-}
-
-
-/**
- * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
- *
- * @dev_priv: A device private structure.
- *
- * This function creates a small buffer object that holds the query
- * result for dummy queries emitted as query barriers.
- * No interruptible waits are done within this function.
- *
- * Returns an error if bo creation fails.
- */
-static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
-{
- return ttm_bo_create(&dev_priv->bdev,
- PAGE_SIZE,
- ttm_bo_type_device,
- &vmw_vram_sys_placement,
- 0, 0, false, NULL,
- &dev_priv->dummy_query_bo);
}
-
static int vmw_request_device(struct vmw_private *dev_priv)
{
int ret;
@@ -306,42 +201,16 @@ static int vmw_request_device(struct vmw_private *dev_priv)
DRM_ERROR("Unable to initialize FIFO.\n");
return ret;
}
- vmw_fence_fifo_up(dev_priv->fman);
- ret = vmw_dummy_query_bo_create(dev_priv);
- if (unlikely(ret != 0))
- goto out_no_query_bo;
- vmw_dummy_query_bo_prepare(dev_priv);
return 0;
-
-out_no_query_bo:
- vmw_fence_fifo_down(dev_priv->fman);
- vmw_fifo_release(dev_priv, &dev_priv->fifo);
- return ret;
}
static void vmw_release_device(struct vmw_private *dev_priv)
{
- /*
- * Previous destructions should've released
- * the pinned bo.
- */
-
- BUG_ON(dev_priv->pinned_bo != NULL);
-
- ttm_bo_unref(&dev_priv->dummy_query_bo);
- vmw_fence_fifo_down(dev_priv->fman);
vmw_fifo_release(dev_priv, &dev_priv->fifo);
}
-/**
- * Increase the 3d resource refcount.
- * If the count was prevously zero, initialize the fifo, switching to svga
- * mode. Note that the master holds a ref as well, and may request an
- * explicit switch to svga mode if fb is not running, using @unhide_svga.
- */
-int vmw_3d_resource_inc(struct vmw_private *dev_priv,
- bool unhide_svga)
+int vmw_3d_resource_inc(struct vmw_private *dev_priv)
{
int ret = 0;
@@ -350,83 +219,25 @@ int vmw_3d_resource_inc(struct vmw_private *dev_priv,
ret = vmw_request_device(dev_priv);
if (unlikely(ret != 0))
--dev_priv->num_3d_resources;
- } else if (unhide_svga) {
- mutex_lock(&dev_priv->hw_mutex);
- vmw_write(dev_priv, SVGA_REG_ENABLE,
- vmw_read(dev_priv, SVGA_REG_ENABLE) &
- ~SVGA_REG_ENABLE_HIDE);
- mutex_unlock(&dev_priv->hw_mutex);
}
-
mutex_unlock(&dev_priv->release_mutex);
return ret;
}
-/**
- * Decrease the 3d resource refcount.
- * If the count reaches zero, disable the fifo, switching to vga mode.
- * Note that the master holds a refcount as well, and may request an
- * explicit switch to vga mode when it releases its refcount to account
- * for the situation of an X server vt switch to VGA with 3d resources
- * active.
- */
-void vmw_3d_resource_dec(struct vmw_private *dev_priv,
- bool hide_svga)
+
+void vmw_3d_resource_dec(struct vmw_private *dev_priv)
{
int32_t n3d;
mutex_lock(&dev_priv->release_mutex);
if (unlikely(--dev_priv->num_3d_resources == 0))
vmw_release_device(dev_priv);
- else if (hide_svga) {
- mutex_lock(&dev_priv->hw_mutex);
- vmw_write(dev_priv, SVGA_REG_ENABLE,
- vmw_read(dev_priv, SVGA_REG_ENABLE) |
- SVGA_REG_ENABLE_HIDE);
- mutex_unlock(&dev_priv->hw_mutex);
- }
-
n3d = (int32_t) dev_priv->num_3d_resources;
mutex_unlock(&dev_priv->release_mutex);
BUG_ON(n3d < 0);
}
-/**
- * Sets the initial_[width|height] fields on the given vmw_private.
- *
- * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then
- * clamping the value to fb_max_[width|height] fields and the
- * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
- * If the values appear to be invalid, set them to
- * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
- */
-static void vmw_get_initial_size(struct vmw_private *dev_priv)
-{
- uint32_t width;
- uint32_t height;
-
- width = vmw_read(dev_priv, SVGA_REG_WIDTH);
- height = vmw_read(dev_priv, SVGA_REG_HEIGHT);
-
- width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH);
- height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT);
-
- if (width > dev_priv->fb_max_width ||
- height > dev_priv->fb_max_height) {
-
- /*
- * This is a host error and shouldn't occur.
- */
-
- width = VMW_MIN_INITIAL_WIDTH;
- height = VMW_MIN_INITIAL_HEIGHT;
- }
-
- dev_priv->initial_width = width;
- dev_priv->initial_height = height;
-}
-
static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
{
struct vmw_private *dev_priv;
@@ -440,11 +251,9 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
}
memset(dev_priv, 0, sizeof(*dev_priv));
- pci_set_master(dev->pdev);
-
dev_priv->dev = dev;
dev_priv->vmw_chipset = chipset;
- dev_priv->last_read_seqno = (uint32_t) -100;
+ dev_priv->last_read_sequence = (uint32_t) -100;
mutex_init(&dev_priv->hw_mutex);
mutex_init(&dev_priv->cmdbuf_mutex);
mutex_init(&dev_priv->release_mutex);
@@ -455,10 +264,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
mutex_init(&dev_priv->init_mutex);
init_waitqueue_head(&dev_priv->fence_queue);
init_waitqueue_head(&dev_priv->fifo_queue);
- dev_priv->fence_queue_waiters = 0;
+ atomic_set(&dev_priv->fence_queue_waiters, 0);
atomic_set(&dev_priv->fifo_queue_waiters, 0);
- INIT_LIST_HEAD(&dev_priv->surface_lru);
- dev_priv->used_memory_size = 0;
dev_priv->io_start = pci_resource_start(dev->pdev, 0);
dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
@@ -479,13 +286,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
- dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
- dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
- dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
- dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
-
- vmw_get_initial_size(dev_priv);
-
if (dev_priv->capabilities & SVGA_CAP_GMR) {
dev_priv->max_gmr_descriptors =
vmw_read(dev_priv,
@@ -493,19 +293,11 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
dev_priv->max_gmr_ids =
vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
}
- if (dev_priv->capabilities & SVGA_CAP_GMR2) {
- dev_priv->max_gmr_pages =
- vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
- dev_priv->memory_size =
- vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
- dev_priv->memory_size -= dev_priv->vram_size;
- } else {
- /*
- * An arbitrary limit of 512MiB on surface
- * memory. But all HWV8 hardware supports GMR2.
- */
- dev_priv->memory_size = 512*1024*1024;
- }
+
+ dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
+ dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
+ dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
+ dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
mutex_unlock(&dev_priv->hw_mutex);
@@ -517,12 +309,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
DRM_INFO("Max GMR descriptors is %u\n",
(unsigned)dev_priv->max_gmr_descriptors);
}
- if (dev_priv->capabilities & SVGA_CAP_GMR2) {
- DRM_INFO("Max number of GMR pages is %u\n",
- (unsigned)dev_priv->max_gmr_pages);
- DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
- (unsigned)dev_priv->memory_size / 1024);
- }
DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
dev_priv->vram_start, dev_priv->vram_size / 1024);
DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
@@ -609,34 +395,22 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
goto out_no_device;
}
}
-
- dev_priv->fman = vmw_fence_manager_init(dev_priv);
- if (unlikely(dev_priv->fman == NULL))
- goto out_no_fman;
-
- /* Need to start the fifo to check if we can do screen objects */
- ret = vmw_3d_resource_inc(dev_priv, true);
- if (unlikely(ret != 0))
- goto out_no_fifo;
- vmw_kms_save_vga(dev_priv);
-
- /* Start kms and overlay systems, needs fifo. */
ret = vmw_kms_init(dev_priv);
if (unlikely(ret != 0))
goto out_no_kms;
vmw_overlay_init(dev_priv);
-
- /* 3D Depends on Screen Objects being used. */
- DRM_INFO("Detected %sdevice 3D availability.\n",
- vmw_fifo_have_3d(dev_priv) ?
- "" : "no ");
-
- /* We might be done with the fifo now */
if (dev_priv->enable_fb) {
+ ret = vmw_3d_resource_inc(dev_priv);
+ if (unlikely(ret != 0))
+ goto out_no_fifo;
+ vmw_kms_save_vga(dev_priv);
vmw_fb_init(dev_priv);
+ DRM_INFO("%s", vmw_fifo_have_3d(dev_priv) ?
+ "Detected device 3D availability.\n" :
+ "Detected no device 3D availability.\n");
} else {
- vmw_kms_restore_vga(dev_priv);
- vmw_3d_resource_dec(dev_priv, true);
+ DRM_INFO("Delayed 3D detection since we're not "
+ "running the device in SVGA mode yet.\n");
}
if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
@@ -653,19 +427,15 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
return 0;
out_no_irq:
- if (dev_priv->enable_fb)
- vmw_fb_close(dev_priv);
- vmw_overlay_close(dev_priv);
- vmw_kms_close(dev_priv);
-out_no_kms:
- /* We still have a 3D resource reference held */
if (dev_priv->enable_fb) {
+ vmw_fb_close(dev_priv);
vmw_kms_restore_vga(dev_priv);
- vmw_3d_resource_dec(dev_priv, false);
+ vmw_3d_resource_dec(dev_priv);
}
out_no_fifo:
- vmw_fence_manager_takedown(dev_priv->fman);
-out_no_fman:
+ vmw_overlay_close(dev_priv);
+ vmw_kms_close(dev_priv);
+out_no_kms:
if (dev_priv->stealth)
pci_release_region(dev->pdev, 2);
else
@@ -698,18 +468,15 @@ static int vmw_driver_unload(struct drm_device *dev)
unregister_pm_notifier(&dev_priv->pm_nb);
- if (dev_priv->ctx.cmd_bounce)
- vfree(dev_priv->ctx.cmd_bounce);
if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
drm_irq_uninstall(dev_priv->dev);
if (dev_priv->enable_fb) {
vmw_fb_close(dev_priv);
vmw_kms_restore_vga(dev_priv);
- vmw_3d_resource_dec(dev_priv, false);
+ vmw_3d_resource_dec(dev_priv);
}
vmw_kms_close(dev_priv);
vmw_overlay_close(dev_priv);
- vmw_fence_manager_takedown(dev_priv->fman);
if (dev_priv->stealth)
pci_release_region(dev->pdev, 2);
else
@@ -733,15 +500,6 @@ static int vmw_driver_unload(struct drm_device *dev)
return 0;
}
-static void vmw_preclose(struct drm_device *dev,
- struct drm_file *file_priv)
-{
- struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
- struct vmw_private *dev_priv = vmw_priv(dev);
-
- vmw_event_fence_fpriv_gone(dev_priv->fman, &vmw_fp->fence_events);
-}
-
static void vmw_postclose(struct drm_device *dev,
struct drm_file *file_priv)
{
@@ -764,7 +522,6 @@ static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
if (unlikely(vmw_fp == NULL))
return ret;
- INIT_LIST_HEAD(&vmw_fp->fence_events);
vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
if (unlikely(vmw_fp->tfile == NULL))
goto out_no_tfile;
@@ -890,7 +647,7 @@ static int vmw_master_set(struct drm_device *dev,
int ret = 0;
if (!dev_priv->enable_fb) {
- ret = vmw_3d_resource_inc(dev_priv, true);
+ ret = vmw_3d_resource_inc(dev_priv);
if (unlikely(ret != 0))
return ret;
vmw_kms_save_vga(dev_priv);
@@ -932,7 +689,7 @@ out_no_active_lock:
vmw_write(dev_priv, SVGA_REG_TRACES, 1);
mutex_unlock(&dev_priv->hw_mutex);
vmw_kms_restore_vga(dev_priv);
- vmw_3d_resource_dec(dev_priv, true);
+ vmw_3d_resource_dec(dev_priv);
}
return ret;
}
@@ -953,7 +710,7 @@ static void vmw_master_drop(struct drm_device *dev,
vmw_fp->locked_master = drm_master_get(file_priv->master);
ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
- vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
+ vmw_kms_idle_workqueues(vmaster);
if (unlikely((ret != 0))) {
DRM_ERROR("Unable to lock TTM at VT switch.\n");
@@ -970,7 +727,7 @@ static void vmw_master_drop(struct drm_device *dev,
vmw_write(dev_priv, SVGA_REG_TRACES, 1);
mutex_unlock(&dev_priv->hw_mutex);
vmw_kms_restore_vga(dev_priv);
- vmw_3d_resource_dec(dev_priv, true);
+ vmw_3d_resource_dec(dev_priv);
}
dev_priv->active_master = &dev_priv->fbdev_master;
@@ -1005,7 +762,6 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
* This empties VRAM and unbinds all GMR bindings.
* Buffer contents is moved to swappable memory.
*/
- vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
ttm_bo_swapout_all(&dev_priv->bdev);
break;
@@ -1080,7 +836,7 @@ static int vmw_pm_prepare(struct device *kdev)
*/
dev_priv->suspended = true;
if (dev_priv->enable_fb)
- vmw_3d_resource_dec(dev_priv, true);
+ vmw_3d_resource_dec(dev_priv);
if (dev_priv->num_3d_resources != 0) {
@@ -1088,7 +844,7 @@ static int vmw_pm_prepare(struct device *kdev)
"while 3D resources are active.\n");
if (dev_priv->enable_fb)
- vmw_3d_resource_inc(dev_priv, true);
+ vmw_3d_resource_inc(dev_priv);
dev_priv->suspended = false;
return -EBUSY;
}
@@ -1112,7 +868,7 @@ static void vmw_pm_complete(struct device *kdev)
* start fifo.
*/
if (dev_priv->enable_fb)
- vmw_3d_resource_inc(dev_priv, false);
+ vmw_3d_resource_inc(dev_priv);
dev_priv->suspended = false;
}
@@ -1124,21 +880,6 @@ static const struct dev_pm_ops vmw_pm_ops = {
.resume = vmw_pm_resume,
};
-static const struct file_operations vmwgfx_driver_fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = vmw_unlocked_ioctl,
- .mmap = vmw_mmap,
- .poll = vmw_fops_poll,
- .read = vmw_fops_read,
- .fasync = drm_fasync,
-#if defined(CONFIG_COMPAT)
- .compat_ioctl = drm_compat_ioctl,
-#endif
- .llseek = noop_llseek,
-};
-
static struct drm_driver driver = {
.driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
DRIVER_MODESET,
@@ -1151,8 +892,6 @@ static struct drm_driver driver = {
.irq_uninstall = vmw_irq_uninstall,
.irq_handler = vmw_irq_handler,
.get_vblank_counter = vmw_get_vblank_counter,
- .enable_vblank = vmw_enable_vblank,
- .disable_vblank = vmw_disable_vblank,
.reclaim_buffers_locked = NULL,
.ioctls = vmw_ioctls,
.num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls),
@@ -1162,9 +901,20 @@ static struct drm_driver driver = {
.master_set = vmw_master_set,
.master_drop = vmw_master_drop,
.open = vmw_driver_open,
- .preclose = vmw_preclose,
.postclose = vmw_postclose,
- .fops = &vmwgfx_driver_fops,
+ .fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = vmw_unlocked_ioctl,
+ .mmap = vmw_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+#if defined(CONFIG_COMPAT)
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+ .llseek = noop_llseek,
+ },
.name = VMWGFX_DRIVER_NAME,
.desc = VMWGFX_DRIVER_DESC,
.date = VMWGFX_DRIVER_DATE,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index d0f2c07..10fc01f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -38,31 +38,23 @@
#include "ttm/ttm_lock.h"
#include "ttm/ttm_execbuf_util.h"
#include "ttm/ttm_module.h"
-#include "vmwgfx_fence.h"
-#define VMWGFX_DRIVER_DATE "20120209"
-#define VMWGFX_DRIVER_MAJOR 2
+#define VMWGFX_DRIVER_DATE "20100927"
+#define VMWGFX_DRIVER_MAJOR 1
#define VMWGFX_DRIVER_MINOR 4
#define VMWGFX_DRIVER_PATCHLEVEL 0
#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
#define VMWGFX_MAX_RELOCATIONS 2048
-#define VMWGFX_MAX_VALIDATIONS 2048
+#define VMWGFX_MAX_GMRS 2048
#define VMWGFX_MAX_DISPLAYS 16
-#define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768
#define VMW_PL_GMR TTM_PL_PRIV0
#define VMW_PL_FLAG_GMR TTM_PL_FLAG_PRIV0
-#define VMW_RES_CONTEXT ttm_driver_type0
-#define VMW_RES_SURFACE ttm_driver_type1
-#define VMW_RES_STREAM ttm_driver_type2
-#define VMW_RES_FENCE ttm_driver_type3
-
struct vmw_fpriv {
struct drm_master *locked_master;
struct ttm_object_file *tfile;
- struct list_head fence_events;
};
struct vmw_dma_buffer {
@@ -80,11 +72,9 @@ struct vmw_resource {
int id;
enum ttm_object_type res_type;
bool avail;
- void (*remove_from_lists) (struct vmw_resource *res);
void (*hw_destroy) (struct vmw_resource *res);
void (*res_free) (struct vmw_resource *res);
- struct list_head validate_head;
- struct list_head query_head; /* Protected by the cmdbuf mutex */
+
/* TODO is a generic snooper needed? */
#if 0
void (*snoop)(struct vmw_resource *res,
@@ -100,12 +90,8 @@ struct vmw_cursor_snooper {
uint32_t *image;
};
-struct vmw_framebuffer;
-struct vmw_surface_offset;
-
struct vmw_surface {
struct vmw_resource res;
- struct list_head lru_head; /* Protected by the resource lock */
uint32_t flags;
uint32_t format;
uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
@@ -116,12 +102,9 @@ struct vmw_surface {
/* TODO so far just a extra pointer */
struct vmw_cursor_snooper snooper;
- struct ttm_buffer_object *backup;
- struct vmw_surface_offset *offsets;
- uint32_t backup_size;
};
-struct vmw_marker_queue {
+struct vmw_fence_queue {
struct list_head head;
struct timespec lag;
struct timespec lag_time;
@@ -132,12 +115,16 @@ struct vmw_fifo_state {
unsigned long reserved_size;
__le32 *dynamic_buffer;
__le32 *static_buffer;
+ __le32 *last_buffer;
+ uint32_t last_data_size;
+ uint32_t last_buffer_size;
+ bool last_buffer_add;
unsigned long static_buffer_size;
bool using_bounce_buffer;
uint32_t capabilities;
struct mutex fifo_mutex;
struct rw_semaphore rwsem;
- struct vmw_marker_queue marker_queue;
+ struct vmw_fence_queue fence_queue;
};
struct vmw_relocation {
@@ -149,8 +136,6 @@ struct vmw_sw_context{
struct ida bo_list;
uint32_t last_cid;
bool cid_valid;
- bool kernel; /**< is the called made from the kernel */
- struct vmw_resource *cur_ctx;
uint32_t last_sid;
uint32_t sid_translation;
bool sid_valid;
@@ -158,16 +143,8 @@ struct vmw_sw_context{
struct list_head validate_nodes;
struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
uint32_t cur_reloc;
- struct ttm_validate_buffer val_bufs[VMWGFX_MAX_VALIDATIONS];
+ struct ttm_validate_buffer val_bufs[VMWGFX_MAX_GMRS];
uint32_t cur_val_buf;
- uint32_t *cmd_bounce;
- uint32_t cmd_bounce_size;
- struct list_head resource_list;
- uint32_t fence_flags;
- struct list_head query_list;
- struct ttm_buffer_object *cur_query_bo;
- uint32_t cur_query_cid;
- bool query_cid_valid;
};
struct vmw_legacy_display;
@@ -203,15 +180,11 @@ struct vmw_private {
uint32_t mmio_size;
uint32_t fb_max_width;
uint32_t fb_max_height;
- uint32_t initial_width;
- uint32_t initial_height;
__le32 __iomem *mmio_virt;
int mmio_mtrr;
uint32_t capabilities;
uint32_t max_gmr_descriptors;
uint32_t max_gmr_ids;
- uint32_t max_gmr_pages;
- uint32_t memory_size;
bool has_gmr;
struct mutex hw_mutex;
@@ -222,7 +195,12 @@ struct vmw_private {
struct vmw_vga_topology_state vga_save[VMWGFX_MAX_DISPLAYS];
uint32_t vga_width;
uint32_t vga_height;
+ uint32_t vga_depth;
uint32_t vga_bpp;
+ uint32_t vga_pseudo;
+ uint32_t vga_red_mask;
+ uint32_t vga_green_mask;
+ uint32_t vga_blue_mask;
uint32_t vga_bpl;
uint32_t vga_pitchlock;
@@ -234,7 +212,6 @@ struct vmw_private {
void *fb_info;
struct vmw_legacy_display *ldu_priv;
- struct vmw_screen_object_display *sou_priv;
struct vmw_overlay *overlay_priv;
/*
@@ -263,16 +240,13 @@ struct vmw_private {
* Fencing and IRQs.
*/
- atomic_t marker_seq;
+ atomic_t fence_seq;
wait_queue_head_t fence_queue;
wait_queue_head_t fifo_queue;
- int fence_queue_waiters; /* Protected by hw_mutex */
- int goal_queue_waiters; /* Protected by hw_mutex */
+ atomic_t fence_queue_waiters;
atomic_t fifo_queue_waiters;
- uint32_t last_read_seqno;
+ uint32_t last_read_sequence;
spinlock_t irq_lock;
- struct vmw_fence_manager *fman;
- uint32_t irq_mask;
/*
* Device state
@@ -311,26 +285,6 @@ struct vmw_private {
struct mutex release_mutex;
uint32_t num_3d_resources;
-
- /*
- * Query processing. These members
- * are protected by the cmdbuf mutex.
- */
-
- struct ttm_buffer_object *dummy_query_bo;
- struct ttm_buffer_object *pinned_bo;
- uint32_t query_cid;
- bool dummy_query_bo_pinned;
-
- /*
- * Surface swapping. The "surface_lru" list is protected by the
- * resource lock in order to be able to destroy a surface and take
- * it off the lru atomically. "used_memory_size" is currently
- * protected by the cmdbuf mutex for simplicity.
- */
-
- struct list_head surface_lru;
- uint32_t used_memory_size;
};
static inline struct vmw_private *vmw_priv(struct drm_device *dev)
@@ -365,8 +319,8 @@ static inline uint32_t vmw_read(struct vmw_private *dev_priv,
return val;
}
-int vmw_3d_resource_inc(struct vmw_private *dev_priv, bool unhide_svga);
-void vmw_3d_resource_dec(struct vmw_private *dev_priv, bool hide_svga);
+int vmw_3d_resource_inc(struct vmw_private *dev_priv);
+void vmw_3d_resource_dec(struct vmw_private *dev_priv);
/**
* GMR utilities - vmwgfx_gmr.c
@@ -391,13 +345,7 @@ extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_context_check(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
- int id,
- struct vmw_resource **p_res);
-extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
- struct ttm_object_file *tfile,
- uint32_t handle,
- struct vmw_surface **out_surf,
- struct vmw_dma_buffer **out_buf);
+ int id);
extern void vmw_surface_res_free(struct vmw_resource *res);
extern int vmw_surface_init(struct vmw_private *dev_priv,
struct vmw_surface *srf,
@@ -415,8 +363,6 @@ extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
extern int vmw_surface_check(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
uint32_t handle, int *id);
-extern int vmw_surface_validate(struct vmw_private *dev_priv,
- struct vmw_surface *srf);
extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo);
extern int vmw_dmabuf_init(struct vmw_private *dev_priv,
struct vmw_dma_buffer *vmw_bo,
@@ -432,6 +378,10 @@ extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
uint32_t id, struct vmw_dma_buffer **out);
+extern int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
+ struct vmw_dma_buffer *bo);
+extern int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv,
+ struct vmw_dma_buffer *bo);
extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
@@ -440,30 +390,7 @@ extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
uint32_t *inout_id,
struct vmw_resource **out);
-extern void vmw_resource_unreserve(struct list_head *list);
-/**
- * DMA buffer helper routines - vmwgfx_dmabuf.c
- */
-extern int vmw_dmabuf_to_placement(struct vmw_private *vmw_priv,
- struct vmw_dma_buffer *bo,
- struct ttm_placement *placement,
- bool interruptible);
-extern int vmw_dmabuf_to_vram(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *buf,
- bool pin, bool interruptible);
-extern int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *buf,
- bool pin, bool interruptible);
-extern int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
- struct vmw_dma_buffer *bo,
- bool pin, bool interruptible);
-extern int vmw_dmabuf_unpin(struct vmw_private *vmw_priv,
- struct vmw_dma_buffer *bo,
- bool interruptible);
-extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf,
- SVGAGuestPtr *ptr);
-extern void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin);
/**
* Misc Ioctl functionality - vmwgfx_ioctl.c
@@ -471,16 +398,8 @@ extern void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin);
extern int vmw_getparam_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-extern int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
+extern int vmw_fifo_debug_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-extern int vmw_present_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern unsigned int vmw_fops_poll(struct file *filp,
- struct poll_table_struct *wait);
-extern ssize_t vmw_fops_read(struct file *filp, char __user *buffer,
- size_t count, loff_t *offset);
/**
* Fifo utilities - vmwgfx_fifo.c
@@ -493,12 +412,11 @@ extern void vmw_fifo_release(struct vmw_private *dev_priv,
extern void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes);
extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes);
extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
- uint32_t *seqno);
+ uint32_t *sequence);
extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);
+extern int vmw_fifo_mmap(struct file *filp, struct vm_area_struct *vma);
extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);
extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv);
-extern int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
- uint32_t cid);
/**
* TTM glue - vmwgfx_ttm_glue.c
@@ -516,10 +434,7 @@ extern struct ttm_placement vmw_vram_placement;
extern struct ttm_placement vmw_vram_ne_placement;
extern struct ttm_placement vmw_vram_sys_placement;
extern struct ttm_placement vmw_vram_gmr_placement;
-extern struct ttm_placement vmw_vram_gmr_ne_placement;
extern struct ttm_placement vmw_sys_placement;
-extern struct ttm_placement vmw_evictable_placement;
-extern struct ttm_placement vmw_srf_placement;
extern struct ttm_bo_driver vmw_bo_driver;
extern int vmw_dma_quiescent(struct drm_device *dev);
@@ -529,71 +444,45 @@ extern int vmw_dma_quiescent(struct drm_device *dev);
extern int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-extern int vmw_execbuf_process(struct drm_file *file_priv,
- struct vmw_private *dev_priv,
- void __user *user_commands,
- void *kernel_commands,
- uint32_t command_size,
- uint64_t throttle_us,
- struct drm_vmw_fence_rep __user
- *user_fence_rep,
- struct vmw_fence_obj **out_fence);
-
-extern void
-vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
- bool only_on_cid_match, uint32_t cid);
-
-extern int vmw_execbuf_fence_commands(struct drm_file *file_priv,
- struct vmw_private *dev_priv,
- struct vmw_fence_obj **p_fence,
- uint32_t *p_handle);
-extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
- struct vmw_fpriv *vmw_fp,
- int ret,
- struct drm_vmw_fence_rep __user
- *user_fence_rep,
- struct vmw_fence_obj *fence,
- uint32_t fence_handle);
/**
* IRQs and wating - vmwgfx_irq.c
*/
extern irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS);
-extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy,
- uint32_t seqno, bool interruptible,
- unsigned long timeout);
+extern int vmw_wait_fence(struct vmw_private *dev_priv, bool lazy,
+ uint32_t sequence, bool interruptible,
+ unsigned long timeout);
extern void vmw_irq_preinstall(struct drm_device *dev);
extern int vmw_irq_postinstall(struct drm_device *dev);
extern void vmw_irq_uninstall(struct drm_device *dev);
-extern bool vmw_seqno_passed(struct vmw_private *dev_priv,
- uint32_t seqno);
+extern bool vmw_fence_signaled(struct vmw_private *dev_priv,
+ uint32_t sequence);
+extern int vmw_fence_wait_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
extern int vmw_fallback_wait(struct vmw_private *dev_priv,
bool lazy,
bool fifo_idle,
- uint32_t seqno,
+ uint32_t sequence,
bool interruptible,
unsigned long timeout);
-extern void vmw_update_seqno(struct vmw_private *dev_priv,
+extern void vmw_update_sequence(struct vmw_private *dev_priv,
struct vmw_fifo_state *fifo_state);
-extern void vmw_seqno_waiter_add(struct vmw_private *dev_priv);
-extern void vmw_seqno_waiter_remove(struct vmw_private *dev_priv);
-extern void vmw_goal_waiter_add(struct vmw_private *dev_priv);
-extern void vmw_goal_waiter_remove(struct vmw_private *dev_priv);
+
/**
- * Rudimentary fence-like objects currently used only for throttling -
- * vmwgfx_marker.c
+ * Rudimentary fence objects currently used only for throttling -
+ * vmwgfx_fence.c
*/
-extern void vmw_marker_queue_init(struct vmw_marker_queue *queue);
-extern void vmw_marker_queue_takedown(struct vmw_marker_queue *queue);
-extern int vmw_marker_push(struct vmw_marker_queue *queue,
- uint32_t seqno);
-extern int vmw_marker_pull(struct vmw_marker_queue *queue,
- uint32_t signaled_seqno);
+extern void vmw_fence_queue_init(struct vmw_fence_queue *queue);
+extern void vmw_fence_queue_takedown(struct vmw_fence_queue *queue);
+extern int vmw_fence_push(struct vmw_fence_queue *queue,
+ uint32_t sequence);
+extern int vmw_fence_pull(struct vmw_fence_queue *queue,
+ uint32_t signaled_sequence);
extern int vmw_wait_lag(struct vmw_private *dev_priv,
- struct vmw_marker_queue *queue, uint32_t us);
+ struct vmw_fence_queue *queue, uint32_t us);
/**
* Kernel framebuffer - vmwgfx_fb.c
@@ -619,31 +508,16 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf,
struct ttm_object_file *tfile,
struct ttm_buffer_object *bo,
SVGA3dCmdHeader *header);
-int vmw_kms_write_svga(struct vmw_private *vmw_priv,
- unsigned width, unsigned height, unsigned pitch,
- unsigned bpp, unsigned depth);
+void vmw_kms_write_svga(struct vmw_private *vmw_priv,
+ unsigned width, unsigned height, unsigned pitch,
+ unsigned bbp, unsigned depth);
+int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
void vmw_kms_idle_workqueues(struct vmw_master *vmaster);
bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
uint32_t pitch,
uint32_t height);
u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc);
-int vmw_enable_vblank(struct drm_device *dev, int crtc);
-void vmw_disable_vblank(struct drm_device *dev, int crtc);
-int vmw_kms_present(struct vmw_private *dev_priv,
- struct drm_file *file_priv,
- struct vmw_framebuffer *vfb,
- struct vmw_surface *surface,
- uint32_t sid, int32_t destX, int32_t destY,
- struct drm_vmw_rect *clips,
- uint32_t num_clips);
-int vmw_kms_readback(struct vmw_private *dev_priv,
- struct drm_file *file_priv,
- struct vmw_framebuffer *vfb,
- struct drm_vmw_fence_rep __user *user_fence_rep,
- struct drm_vmw_rect *clips,
- uint32_t num_clips);
-int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
/**
* Overlay control - vmwgfx_overlay.c
@@ -702,8 +576,4 @@ static inline struct vmw_dma_buffer *vmw_dmabuf_reference(struct vmw_dma_buffer
return NULL;
}
-static inline struct ttm_mem_global *vmw_mem_glob(struct vmw_private *dev_priv)
-{
- return (struct ttm_mem_global *) dev_priv->mem_global_ref.object;
-}
#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 4acced4..41b95ed 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -44,71 +44,10 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv,
return 0;
}
-static void vmw_resource_to_validate_list(struct vmw_sw_context *sw_context,
- struct vmw_resource **p_res)
-{
- struct vmw_resource *res = *p_res;
-
- if (list_empty(&res->validate_head)) {
- list_add_tail(&res->validate_head, &sw_context->resource_list);
- *p_res = NULL;
- } else
- vmw_resource_unreference(p_res);
-}
-
-/**
- * vmw_bo_to_validate_list - add a bo to a validate list
- *
- * @sw_context: The software context used for this command submission batch.
- * @bo: The buffer object to add.
- * @fence_flags: Fence flags to be or'ed with any other fence flags for
- * this buffer on this submission batch.
- * @p_val_node: If non-NULL Will be updated with the validate node number
- * on return.
- *
- * Returns -EINVAL if the limit of number of buffer objects per command
- * submission is reached.
- */
-static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
- struct ttm_buffer_object *bo,
- uint32_t fence_flags,
- uint32_t *p_val_node)
-{
- uint32_t val_node;
- struct ttm_validate_buffer *val_buf;
-
- val_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf);
-
- if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
- DRM_ERROR("Max number of DMA buffers per submission"
- " exceeded.\n");
- return -EINVAL;
- }
-
- val_buf = &sw_context->val_bufs[val_node];
- if (unlikely(val_node == sw_context->cur_val_buf)) {
- val_buf->new_sync_obj_arg = NULL;
- val_buf->bo = ttm_bo_reference(bo);
- list_add_tail(&val_buf->head, &sw_context->validate_nodes);
- ++sw_context->cur_val_buf;
- }
-
- val_buf->new_sync_obj_arg = (void *)
- ((unsigned long) val_buf->new_sync_obj_arg | fence_flags);
- sw_context->fence_flags |= fence_flags;
-
- if (p_val_node)
- *p_val_node = val_node;
-
- return 0;
-}
-
static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_resource *ctx;
-
struct vmw_cid_cmd {
SVGA3dCmdHeader header;
__le32 cid;
@@ -119,8 +58,7 @@ static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
if (likely(sw_context->cid_valid && cmd->cid == sw_context->last_cid))
return 0;
- ret = vmw_context_check(dev_priv, sw_context->tfile, cmd->cid,
- &ctx);
+ ret = vmw_context_check(dev_priv, sw_context->tfile, cmd->cid);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use context %u\n",
(unsigned) cmd->cid);
@@ -129,8 +67,6 @@ static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
sw_context->last_cid = cmd->cid;
sw_context->cid_valid = true;
- sw_context->cur_ctx = ctx;
- vmw_resource_to_validate_list(sw_context, &ctx);
return 0;
}
@@ -139,45 +75,29 @@ static int vmw_cmd_sid_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
uint32_t *sid)
{
- struct vmw_surface *srf;
- int ret;
- struct vmw_resource *res;
-
if (*sid == SVGA3D_INVALID_ID)
return 0;
- if (likely((sw_context->sid_valid &&
- *sid == sw_context->last_sid))) {
- *sid = sw_context->sid_translation;
- return 0;
- }
-
- ret = vmw_user_surface_lookup_handle(dev_priv,
- sw_context->tfile,
- *sid, &srf);
- if (unlikely(ret != 0)) {
- DRM_ERROR("Could ot find or use surface 0x%08x "
- "address 0x%08lx\n",
- (unsigned int) *sid,
- (unsigned long) sid);
- return ret;
- }
+ if (unlikely((!sw_context->sid_valid ||
+ *sid != sw_context->last_sid))) {
+ int real_id;
+ int ret = vmw_surface_check(dev_priv, sw_context->tfile,
+ *sid, &real_id);
- ret = vmw_surface_validate(dev_priv, srf);
- if (unlikely(ret != 0)) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("Could not validate surface.\n");
- vmw_surface_unreference(&srf);
- return ret;
- }
-
- sw_context->last_sid = *sid;
- sw_context->sid_valid = true;
- sw_context->sid_translation = srf->res.id;
- *sid = sw_context->sid_translation;
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Could ot find or use surface 0x%08x "
+ "address 0x%08lx\n",
+ (unsigned int) *sid,
+ (unsigned long) sid);
+ return ret;
+ }
- res = &srf->res;
- vmw_resource_to_validate_list(sw_context, &res);
+ sw_context->last_sid = *sid;
+ sw_context->sid_valid = true;
+ *sid = real_id;
+ sw_context->sid_translation = real_id;
+ } else
+ *sid = sw_context->sid_translation;
return 0;
}
@@ -246,12 +166,6 @@ static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
} *cmd;
cmd = container_of(header, struct vmw_sid_cmd, header);
-
- if (unlikely(!sw_context->kernel)) {
- DRM_ERROR("Kernel only SVGA3d command: %u.\n", cmd->header.id);
- return -EPERM;
- }
-
return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.srcImage.sid);
}
@@ -264,179 +178,10 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
SVGA3dCmdPresent body;
} *cmd;
-
cmd = container_of(header, struct vmw_sid_cmd, header);
-
- if (unlikely(!sw_context->kernel)) {
- DRM_ERROR("Kernel only SVGA3d command: %u.\n", cmd->header.id);
- return -EPERM;
- }
-
return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid);
}
-/**
- * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
- *
- * @dev_priv: The device private structure.
- * @cid: The hardware context for the next query.
- * @new_query_bo: The new buffer holding query results.
- * @sw_context: The software context used for this command submission.
- *
- * This function checks whether @new_query_bo is suitable for holding
- * query results, and if another buffer currently is pinned for query
- * results. If so, the function prepares the state of @sw_context for
- * switching pinned buffers after successful submission of the current
- * command batch. It also checks whether we're using a new query context.
- * In that case, it makes sure we emit a query barrier for the old
- * context before the current query buffer is fenced.
- */
-static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
- uint32_t cid,
- struct ttm_buffer_object *new_query_bo,
- struct vmw_sw_context *sw_context)
-{
- int ret;
- bool add_cid = false;
- uint32_t cid_to_add;
-
- if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
-
- if (unlikely(new_query_bo->num_pages > 4)) {
- DRM_ERROR("Query buffer too large.\n");
- return -EINVAL;
- }
-
- if (unlikely(sw_context->cur_query_bo != NULL)) {
- BUG_ON(!sw_context->query_cid_valid);
- add_cid = true;
- cid_to_add = sw_context->cur_query_cid;
- ret = vmw_bo_to_validate_list(sw_context,
- sw_context->cur_query_bo,
- DRM_VMW_FENCE_FLAG_EXEC,
- NULL);
- if (unlikely(ret != 0))
- return ret;
- }
- sw_context->cur_query_bo = new_query_bo;
-
- ret = vmw_bo_to_validate_list(sw_context,
- dev_priv->dummy_query_bo,
- DRM_VMW_FENCE_FLAG_EXEC,
- NULL);
- if (unlikely(ret != 0))
- return ret;
-
- }
-
- if (unlikely(cid != sw_context->cur_query_cid &&
- sw_context->query_cid_valid)) {
- add_cid = true;
- cid_to_add = sw_context->cur_query_cid;
- }
-
- sw_context->cur_query_cid = cid;
- sw_context->query_cid_valid = true;
-
- if (add_cid) {
- struct vmw_resource *ctx = sw_context->cur_ctx;
-
- if (list_empty(&ctx->query_head))
- list_add_tail(&ctx->query_head,
- &sw_context->query_list);
- ret = vmw_bo_to_validate_list(sw_context,
- dev_priv->dummy_query_bo,
- DRM_VMW_FENCE_FLAG_EXEC,
- NULL);
- if (unlikely(ret != 0))
- return ret;
- }
- return 0;
-}
-
-
-/**
- * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
- *
- * @dev_priv: The device private structure.
- * @sw_context: The software context used for this command submission batch.
- *
- * This function will check if we're switching query buffers, and will then,
- * if no other query waits are issued this command submission batch,
- * issue a dummy occlusion query wait used as a query barrier. When the fence
- * object following that query wait has signaled, we are sure that all
- * preseding queries have finished, and the old query buffer can be unpinned.
- * However, since both the new query buffer and the old one are fenced with
- * that fence, we can do an asynchronus unpin now, and be sure that the
- * old query buffer won't be moved until the fence has signaled.
- *
- * As mentioned above, both the new - and old query buffers need to be fenced
- * using a sequence emitted *after* calling this function.
- */
-static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context)
-{
-
- struct vmw_resource *ctx, *next_ctx;
- int ret;
-
- /*
- * The validate list should still hold references to all
- * contexts here.
- */
-
- list_for_each_entry_safe(ctx, next_ctx, &sw_context->query_list,
- query_head) {
- list_del_init(&ctx->query_head);
-
- BUG_ON(list_empty(&ctx->validate_head));
-
- ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
-
- if (unlikely(ret != 0))
- DRM_ERROR("Out of fifo space for dummy query.\n");
- }
-
- if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
- if (dev_priv->pinned_bo) {
- vmw_bo_pin(dev_priv->pinned_bo, false);
- ttm_bo_unref(&dev_priv->pinned_bo);
- }
-
- vmw_bo_pin(sw_context->cur_query_bo, true);
-
- /*
- * We pin also the dummy_query_bo buffer so that we
- * don't need to validate it when emitting
- * dummy queries in context destroy paths.
- */
-
- vmw_bo_pin(dev_priv->dummy_query_bo, true);
- dev_priv->dummy_query_bo_pinned = true;
-
- dev_priv->query_cid = sw_context->cur_query_cid;
- dev_priv->pinned_bo =
- ttm_bo_reference(sw_context->cur_query_bo);
- }
-}
-
-/**
- * vmw_query_switch_backoff - clear query barrier list
- * @sw_context: The sw context used for this submission batch.
- *
- * This function is used as part of an error path, where a previously
- * set up list of query barriers needs to be cleared.
- *
- */
-static void vmw_query_switch_backoff(struct vmw_sw_context *sw_context)
-{
- struct list_head *list, *next;
-
- list_for_each_safe(list, next, &sw_context->query_list) {
- list_del_init(list);
- }
-}
-
static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGAGuestPtr *ptr,
@@ -446,6 +191,8 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
struct ttm_buffer_object *bo;
uint32_t handle = ptr->gmrId;
struct vmw_relocation *reloc;
+ uint32_t cur_validate_node;
+ struct ttm_validate_buffer *val_buf;
int ret;
ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
@@ -465,11 +212,22 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
reloc = &sw_context->relocs[sw_context->cur_reloc++];
reloc->location = ptr;
- ret = vmw_bo_to_validate_list(sw_context, bo, DRM_VMW_FENCE_FLAG_EXEC,
- &reloc->index);
- if (unlikely(ret != 0))
+ cur_validate_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf);
+ if (unlikely(cur_validate_node >= VMWGFX_MAX_GMRS)) {
+ DRM_ERROR("Max number of DMA buffers per submission"
+ " exceeded.\n");
+ ret = -EINVAL;
goto out_no_reloc;
+ }
+ reloc->index = cur_validate_node;
+ if (unlikely(cur_validate_node == sw_context->cur_val_buf)) {
+ val_buf = &sw_context->val_bufs[cur_validate_node];
+ val_buf->bo = ttm_bo_reference(bo);
+ val_buf->new_sync_obj_arg = (void *) dev_priv;
+ list_add_tail(&val_buf->head, &sw_context->validate_nodes);
+ ++sw_context->cur_val_buf;
+ }
*vmw_bo_p = vmw_bo;
return 0;
@@ -501,11 +259,8 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
return ret;
- ret = vmw_query_bo_switch_prepare(dev_priv, cmd->q.cid,
- &vmw_bo->base, sw_context);
-
vmw_dmabuf_unreference(&vmw_bo);
- return ret;
+ return 0;
}
static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
@@ -518,7 +273,6 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
SVGA3dCmdWaitForQuery q;
} *cmd;
int ret;
- struct vmw_resource *ctx;
cmd = container_of(header, struct vmw_query_cmd, header);
ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
@@ -532,19 +286,10 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
return ret;
vmw_dmabuf_unreference(&vmw_bo);
-
- /*
- * This wait will act as a barrier for previous waits for this
- * context.
- */
-
- ctx = sw_context->cur_ctx;
- if (!list_empty(&ctx->query_head))
- list_del_init(&ctx->query_head);
-
return 0;
}
+
static int vmw_cmd_dma(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
@@ -557,7 +302,6 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
SVGA3dCmdSurfaceDMA dma;
} *cmd;
int ret;
- struct vmw_resource *res;
cmd = container_of(header, struct vmw_dma_cmd, header);
ret = vmw_translate_guest_ptr(dev_priv, sw_context,
@@ -574,28 +318,18 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
goto out_no_reloc;
}
- ret = vmw_surface_validate(dev_priv, srf);
- if (unlikely(ret != 0)) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("Culd not validate surface.\n");
- goto out_no_validate;
- }
-
- /*
+ /**
* Patch command stream with device SID.
*/
+
cmd->dma.host.sid = srf->res.id;
vmw_kms_cursor_snoop(srf, sw_context->tfile, bo, header);
-
- vmw_dmabuf_unreference(&vmw_bo);
-
- res = &srf->res;
- vmw_resource_to_validate_list(sw_context, &res);
-
- return 0;
-
-out_no_validate:
+ /**
+ * FIXME: May deadlock here when called from the
+ * command parsing code.
+ */
vmw_surface_unreference(&srf);
+
out_no_reloc:
vmw_dmabuf_unreference(&vmw_bo);
return ret;
@@ -685,71 +419,6 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
return 0;
}
-static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context,
- void *buf)
-{
- struct vmw_dma_buffer *vmw_bo;
- int ret;
-
- struct {
- uint32_t header;
- SVGAFifoCmdDefineGMRFB body;
- } *cmd = buf;
-
- ret = vmw_translate_guest_ptr(dev_priv, sw_context,
- &cmd->body.ptr,
- &vmw_bo);
- if (unlikely(ret != 0))
- return ret;
-
- vmw_dmabuf_unreference(&vmw_bo);
-
- return ret;
-}
-
-static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context,
- void *buf, uint32_t *size)
-{
- uint32_t size_remaining = *size;
- uint32_t cmd_id;
-
- cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
- switch (cmd_id) {
- case SVGA_CMD_UPDATE:
- *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
- break;
- case SVGA_CMD_DEFINE_GMRFB:
- *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
- break;
- case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
- *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
- break;
- case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
- *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
- break;
- default:
- DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id);
- return -EINVAL;
- }
-
- if (*size > size_remaining) {
- DRM_ERROR("Invalid SVGA command (size mismatch):"
- " %u.\n", cmd_id);
- return -EINVAL;
- }
-
- if (unlikely(!sw_context->kernel)) {
- DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id);
- return -EPERM;
- }
-
- if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
- return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
-
- return 0;
-}
typedef int (*vmw_cmd_func) (struct vmw_private *,
struct vmw_sw_context *,
@@ -802,11 +471,11 @@ static int vmw_cmd_check(struct vmw_private *dev_priv,
SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
int ret;
- cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
- /* Handle any none 3D commands */
- if (unlikely(cmd_id < SVGA_CMD_MAX))
- return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
-
+ cmd_id = ((uint32_t *)buf)[0];
+ if (cmd_id == SVGA_CMD_UPDATE) {
+ *size = 5 << 2;
+ return 0;
+ }
cmd_id = le32_to_cpu(header->id);
*size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader);
@@ -831,8 +500,7 @@ out_err:
static int vmw_cmd_check_all(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
- void *buf,
- uint32_t size)
+ void *buf, uint32_t size)
{
int32_t cur_size = size;
int ret;
@@ -882,11 +550,7 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
static void vmw_clear_validations(struct vmw_sw_context *sw_context)
{
struct ttm_validate_buffer *entry, *next;
- struct vmw_resource *res, *res_next;
- /*
- * Drop references to DMA buffers held during command submission.
- */
list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
head) {
list_del(&entry->head);
@@ -895,16 +559,6 @@ static void vmw_clear_validations(struct vmw_sw_context *sw_context)
sw_context->cur_val_buf--;
}
BUG_ON(sw_context->cur_val_buf != 0);
-
- /*
- * Drop references to resources held during command submission.
- */
- vmw_resource_unreserve(&sw_context->resource_list);
- list_for_each_entry_safe(res, res_next, &sw_context->resource_list,
- validate_head) {
- list_del_init(&res->validate_head);
- vmw_resource_unreference(&res);
- }
}
static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
@@ -912,16 +566,6 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
{
int ret;
-
- /*
- * Don't validate pinned buffers.
- */
-
- if (bo == dev_priv->pinned_bo ||
- (bo == dev_priv->dummy_query_bo &&
- dev_priv->dummy_query_bo_pinned))
- return 0;
-
/**
* Put BO in VRAM if there is space, otherwise as a GMR.
* If there is no space in VRAM and GMR ids are all used up,
@@ -958,209 +602,57 @@ static int vmw_validate_buffers(struct vmw_private *dev_priv,
return 0;
}
-static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
- uint32_t size)
-{
- if (likely(sw_context->cmd_bounce_size >= size))
- return 0;
-
- if (sw_context->cmd_bounce_size == 0)
- sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
-
- while (sw_context->cmd_bounce_size < size) {
- sw_context->cmd_bounce_size =
- PAGE_ALIGN(sw_context->cmd_bounce_size +
- (sw_context->cmd_bounce_size >> 1));
- }
-
- if (sw_context->cmd_bounce != NULL)
- vfree(sw_context->cmd_bounce);
-
- sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
-
- if (sw_context->cmd_bounce == NULL) {
- DRM_ERROR("Failed to allocate command bounce buffer.\n");
- sw_context->cmd_bounce_size = 0;
- return -ENOMEM;
- }
-
- return 0;
-}
-
-/**
- * vmw_execbuf_fence_commands - create and submit a command stream fence
- *
- * Creates a fence object and submits a command stream marker.
- * If this fails for some reason, We sync the fifo and return NULL.
- * It is then safe to fence buffers with a NULL pointer.
- *
- * If @p_handle is not NULL @file_priv must also not be NULL. Creates
- * a userspace handle if @p_handle is not NULL, otherwise not.
- */
-
-int vmw_execbuf_fence_commands(struct drm_file *file_priv,
- struct vmw_private *dev_priv,
- struct vmw_fence_obj **p_fence,
- uint32_t *p_handle)
+int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- uint32_t sequence;
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
+ struct drm_vmw_fence_rep fence_rep;
+ struct drm_vmw_fence_rep __user *user_fence_rep;
int ret;
- bool synced = false;
+ void *user_cmd;
+ void *cmd;
+ uint32_t sequence;
+ struct vmw_sw_context *sw_context = &dev_priv->ctx;
+ struct vmw_master *vmaster = vmw_master(file_priv->master);
- /* p_handle implies file_priv. */
- BUG_ON(p_handle != NULL && file_priv == NULL);
+ ret = ttm_read_lock(&vmaster->lock, true);
+ if (unlikely(ret != 0))
+ return ret;
- ret = vmw_fifo_send_fence(dev_priv, &sequence);
+ ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
if (unlikely(ret != 0)) {
- DRM_ERROR("Fence submission error. Syncing.\n");
- synced = true;
- }
-
- if (p_handle != NULL)
- ret = vmw_user_fence_create(file_priv, dev_priv->fman,
- sequence,
- DRM_VMW_FENCE_FLAG_EXEC,
- p_fence, p_handle);
- else
- ret = vmw_fence_create(dev_priv->fman, sequence,
- DRM_VMW_FENCE_FLAG_EXEC,
- p_fence);
-
- if (unlikely(ret != 0 && !synced)) {
- (void) vmw_fallback_wait(dev_priv, false, false,
- sequence, false,
- VMW_FENCE_WAIT_TIMEOUT);
- *p_fence = NULL;
+ ret = -ERESTARTSYS;
+ goto out_no_cmd_mutex;
}
- return 0;
-}
-
-/**
- * vmw_execbuf_copy_fence_user - copy fence object information to
- * user-space.
- *
- * @dev_priv: Pointer to a vmw_private struct.
- * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
- * @ret: Return value from fence object creation.
- * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to
- * which the information should be copied.
- * @fence: Pointer to the fenc object.
- * @fence_handle: User-space fence handle.
- *
- * This function copies fence information to user-space. If copying fails,
- * The user-space struct drm_vmw_fence_rep::error member is hopefully
- * left untouched, and if it's preloaded with an -EFAULT by user-space,
- * the error will hopefully be detected.
- * Also if copying fails, user-space will be unable to signal the fence
- * object so we wait for it immediately, and then unreference the
- * user-space reference.
- */
-void
-vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
- struct vmw_fpriv *vmw_fp,
- int ret,
- struct drm_vmw_fence_rep __user *user_fence_rep,
- struct vmw_fence_obj *fence,
- uint32_t fence_handle)
-{
- struct drm_vmw_fence_rep fence_rep;
-
- if (user_fence_rep == NULL)
- return;
-
- memset(&fence_rep, 0, sizeof(fence_rep));
-
- fence_rep.error = ret;
- if (ret == 0) {
- BUG_ON(fence == NULL);
-
- fence_rep.handle = fence_handle;
- fence_rep.seqno = fence->seqno;
- vmw_update_seqno(dev_priv, &dev_priv->fifo);
- fence_rep.passed_seqno = dev_priv->last_read_seqno;
+ cmd = vmw_fifo_reserve(dev_priv, arg->command_size);
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving fifo space for commands.\n");
+ ret = -ENOMEM;
+ goto out_unlock;
}
- /*
- * copy_to_user errors will be detected by user space not
- * seeing fence_rep::error filled in. Typically
- * user-space would have pre-set that member to -EFAULT.
- */
- ret = copy_to_user(user_fence_rep, &fence_rep,
- sizeof(fence_rep));
+ user_cmd = (void __user *)(unsigned long)arg->commands;
+ ret = copy_from_user(cmd, user_cmd, arg->command_size);
- /*
- * User-space lost the fence object. We need to sync
- * and unreference the handle.
- */
- if (unlikely(ret != 0) && (fence_rep.error == 0)) {
- ttm_ref_object_base_unref(vmw_fp->tfile,
- fence_handle, TTM_REF_USAGE);
- DRM_ERROR("Fence copy error. Syncing.\n");
- (void) vmw_fence_obj_wait(fence, fence->signal_mask,
- false, false,
- VMW_FENCE_WAIT_TIMEOUT);
+ if (unlikely(ret != 0)) {
+ ret = -EFAULT;
+ DRM_ERROR("Failed copying commands.\n");
+ goto out_commit;
}
-}
-
-int vmw_execbuf_process(struct drm_file *file_priv,
- struct vmw_private *dev_priv,
- void __user *user_commands,
- void *kernel_commands,
- uint32_t command_size,
- uint64_t throttle_us,
- struct drm_vmw_fence_rep __user *user_fence_rep,
- struct vmw_fence_obj **out_fence)
-{
- struct vmw_sw_context *sw_context = &dev_priv->ctx;
- struct vmw_fence_obj *fence = NULL;
- uint32_t handle;
- void *cmd;
- int ret;
-
- ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
- if (unlikely(ret != 0))
- return -ERESTARTSYS;
-
- if (kernel_commands == NULL) {
- sw_context->kernel = false;
-
- ret = vmw_resize_cmd_bounce(sw_context, command_size);
- if (unlikely(ret != 0))
- goto out_unlock;
-
-
- ret = copy_from_user(sw_context->cmd_bounce,
- user_commands, command_size);
-
- if (unlikely(ret != 0)) {
- ret = -EFAULT;
- DRM_ERROR("Failed copying commands.\n");
- goto out_unlock;
- }
- kernel_commands = sw_context->cmd_bounce;
- } else
- sw_context->kernel = true;
sw_context->tfile = vmw_fpriv(file_priv)->tfile;
sw_context->cid_valid = false;
sw_context->sid_valid = false;
sw_context->cur_reloc = 0;
sw_context->cur_val_buf = 0;
- sw_context->fence_flags = 0;
- INIT_LIST_HEAD(&sw_context->query_list);
- INIT_LIST_HEAD(&sw_context->resource_list);
- sw_context->cur_query_bo = dev_priv->pinned_bo;
- sw_context->cur_query_cid = dev_priv->query_cid;
- sw_context->query_cid_valid = (dev_priv->pinned_bo != NULL);
INIT_LIST_HEAD(&sw_context->validate_nodes);
- ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
- command_size);
+ ret = vmw_cmd_check_all(dev_priv, sw_context, cmd, arg->command_size);
if (unlikely(ret != 0))
goto out_err;
-
ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes);
if (unlikely(ret != 0))
goto out_err;
@@ -1171,212 +663,57 @@ int vmw_execbuf_process(struct drm_file *file_priv,
vmw_apply_relocations(sw_context);
- if (throttle_us) {
- ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
- throttle_us);
+ if (arg->throttle_us) {
+ ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.fence_queue,
+ arg->throttle_us);
if (unlikely(ret != 0))
- goto out_throttle;
+ goto out_err;
}
- cmd = vmw_fifo_reserve(dev_priv, command_size);
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving fifo space for commands.\n");
- ret = -ENOMEM;
- goto out_throttle;
- }
+ vmw_fifo_commit(dev_priv, arg->command_size);
- memcpy(cmd, kernel_commands, command_size);
- vmw_fifo_commit(dev_priv, command_size);
+ ret = vmw_fifo_send_fence(dev_priv, &sequence);
+
+ ttm_eu_fence_buffer_objects(&sw_context->validate_nodes,
+ (void *)(unsigned long) sequence);
+ vmw_clear_validations(sw_context);
+ mutex_unlock(&dev_priv->cmdbuf_mutex);
- vmw_query_bo_switch_commit(dev_priv, sw_context);
- ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
- &fence,
- (user_fence_rep) ? &handle : NULL);
/*
* This error is harmless, because if fence submission fails,
- * vmw_fifo_send_fence will sync. The error will be propagated to
- * user-space in @fence_rep
+ * vmw_fifo_send_fence will sync.
*/
if (ret != 0)
DRM_ERROR("Fence submission error. Syncing.\n");
- ttm_eu_fence_buffer_objects(&sw_context->validate_nodes,
- (void *) fence);
+ fence_rep.error = ret;
+ fence_rep.fence_seq = (uint64_t) sequence;
+ fence_rep.pad64 = 0;
- vmw_clear_validations(sw_context);
- vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
- user_fence_rep, fence, handle);
-
- /* Don't unreference when handing fence out */
- if (unlikely(out_fence != NULL)) {
- *out_fence = fence;
- fence = NULL;
- } else if (likely(fence != NULL)) {
- vmw_fence_obj_unreference(&fence);
- }
+ user_fence_rep = (struct drm_vmw_fence_rep __user *)
+ (unsigned long)arg->fence_rep;
- mutex_unlock(&dev_priv->cmdbuf_mutex);
- return 0;
+ /*
+ * copy_to_user errors will be detected by user space not
+ * seeing fence_rep::error filled in.
+ */
+
+ ret = copy_to_user(user_fence_rep, &fence_rep, sizeof(fence_rep));
+ vmw_kms_cursor_post_execbuf(dev_priv);
+ ttm_read_unlock(&vmaster->lock);
+ return 0;
out_err:
vmw_free_relocations(sw_context);
-out_throttle:
- vmw_query_switch_backoff(sw_context);
ttm_eu_backoff_reservation(&sw_context->validate_nodes);
vmw_clear_validations(sw_context);
+out_commit:
+ vmw_fifo_commit(dev_priv, 0);
out_unlock:
mutex_unlock(&dev_priv->cmdbuf_mutex);
- return ret;
-}
-
-/**
- * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
- *
- * @dev_priv: The device private structure.
- *
- * This function is called to idle the fifo and unpin the query buffer
- * if the normal way to do this hits an error, which should typically be
- * extremely rare.
- */
-static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
-{
- DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
-
- (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
- vmw_bo_pin(dev_priv->pinned_bo, false);
- vmw_bo_pin(dev_priv->dummy_query_bo, false);
- dev_priv->dummy_query_bo_pinned = false;
-}
-
-
-/**
- * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
- * query bo.
- *
- * @dev_priv: The device private structure.
- * @only_on_cid_match: Only flush and unpin if the current active query cid
- * matches @cid.
- * @cid: Optional context id to match.
- *
- * This function should be used to unpin the pinned query bo, or
- * as a query barrier when we need to make sure that all queries have
- * finished before the next fifo command. (For example on hardware
- * context destructions where the hardware may otherwise leak unfinished
- * queries).
- *
- * This function does not return any failure codes, but make attempts
- * to do safe unpinning in case of errors.
- *
- * The function will synchronize on the previous query barrier, and will
- * thus not finish until that barrier has executed.
- */
-void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
- bool only_on_cid_match, uint32_t cid)
-{
- int ret = 0;
- struct list_head validate_list;
- struct ttm_validate_buffer pinned_val, query_val;
- struct vmw_fence_obj *fence;
-
- mutex_lock(&dev_priv->cmdbuf_mutex);
-
- if (dev_priv->pinned_bo == NULL)
- goto out_unlock;
-
- if (only_on_cid_match && cid != dev_priv->query_cid)
- goto out_unlock;
-
- INIT_LIST_HEAD(&validate_list);
-
- pinned_val.new_sync_obj_arg = (void *)(unsigned long)
- DRM_VMW_FENCE_FLAG_EXEC;
- pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo);
- list_add_tail(&pinned_val.head, &validate_list);
-
- query_val.new_sync_obj_arg = pinned_val.new_sync_obj_arg;
- query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo);
- list_add_tail(&query_val.head, &validate_list);
-
- do {
- ret = ttm_eu_reserve_buffers(&validate_list);
- } while (ret == -ERESTARTSYS);
-
- if (unlikely(ret != 0)) {
- vmw_execbuf_unpin_panic(dev_priv);
- goto out_no_reserve;
- }
-
- ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
- if (unlikely(ret != 0)) {
- vmw_execbuf_unpin_panic(dev_priv);
- goto out_no_emit;
- }
-
- vmw_bo_pin(dev_priv->pinned_bo, false);
- vmw_bo_pin(dev_priv->dummy_query_bo, false);
- dev_priv->dummy_query_bo_pinned = false;
-
- (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
- ttm_eu_fence_buffer_objects(&validate_list, (void *) fence);
-
- ttm_bo_unref(&query_val.bo);
- ttm_bo_unref(&pinned_val.bo);
- ttm_bo_unref(&dev_priv->pinned_bo);
-
-out_unlock:
- mutex_unlock(&dev_priv->cmdbuf_mutex);
- return;
-
-out_no_emit:
- ttm_eu_backoff_reservation(&validate_list);
-out_no_reserve:
- ttm_bo_unref(&query_val.bo);
- ttm_bo_unref(&pinned_val.bo);
- ttm_bo_unref(&dev_priv->pinned_bo);
- mutex_unlock(&dev_priv->cmdbuf_mutex);
-}
-
-
-int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct vmw_private *dev_priv = vmw_priv(dev);
- struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
- struct vmw_master *vmaster = vmw_master(file_priv->master);
- int ret;
-
- /*
- * This will allow us to extend the ioctl argument while
- * maintaining backwards compatibility:
- * We take different code paths depending on the value of
- * arg->version.
- */
-
- if (unlikely(arg->version != DRM_VMW_EXECBUF_VERSION)) {
- DRM_ERROR("Incorrect execbuf version.\n");
- DRM_ERROR("You're running outdated experimental "
- "vmwgfx user-space drivers.");
- return -EINVAL;
- }
-
- ret = ttm_read_lock(&vmaster->lock, true);
- if (unlikely(ret != 0))
- return ret;
-
- ret = vmw_execbuf_process(file_priv, dev_priv,
- (void __user *)(unsigned long)arg->commands,
- NULL, arg->command_size, arg->throttle_us,
- (void __user *)(unsigned long)arg->fence_rep,
- NULL);
-
- if (unlikely(ret != 0))
- goto out_unlock;
-
- vmw_kms_cursor_post_execbuf(dev_priv);
-
-out_unlock:
+out_no_cmd_mutex:
ttm_read_unlock(&vmaster->lock);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index b19a7ba..bfab60c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -158,14 +158,10 @@ static int vmw_fb_set_par(struct fb_info *info)
{
struct vmw_fb_par *par = info->par;
struct vmw_private *vmw_priv = par->vmw_priv;
- int ret;
-
- ret = vmw_kms_write_svga(vmw_priv, info->var.xres, info->var.yres,
- info->fix.line_length,
- par->bpp, par->depth);
- if (ret)
- return ret;
+ vmw_kms_write_svga(vmw_priv, info->var.xres, info->var.yres,
+ info->fix.line_length,
+ par->bpp, par->depth);
if (vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) {
/* TODO check if pitch and offset changes */
vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
@@ -409,20 +405,24 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
struct fb_info *info;
unsigned initial_width, initial_height;
unsigned fb_width, fb_height;
- unsigned fb_bpp, fb_depth, fb_offset, fb_pitch, fb_size;
+ unsigned fb_bbp, fb_depth, fb_offset, fb_pitch, fb_size;
int ret;
- fb_bpp = 32;
+ /* XXX These shouldn't be hardcoded. */
+ initial_width = 800;
+ initial_height = 600;
+
+ fb_bbp = 32;
fb_depth = 24;
/* XXX As shouldn't these be as well. */
fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
- initial_width = min(vmw_priv->initial_width, fb_width);
- initial_height = min(vmw_priv->initial_height, fb_height);
+ initial_width = min(fb_width, initial_width);
+ initial_height = min(fb_height, initial_height);
- fb_pitch = fb_width * fb_bpp / 8;
+ fb_pitch = fb_width * fb_bbp / 8;
fb_size = fb_pitch * fb_height;
fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET);
@@ -437,7 +437,7 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
par = info->par;
par->vmw_priv = vmw_priv;
par->depth = fb_depth;
- par->bpp = fb_bpp;
+ par->bpp = fb_bbp;
par->vmalloc = NULL;
par->max_width = fb_width;
par->max_height = fb_height;
@@ -509,7 +509,19 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
info->var.xres = initial_width;
info->var.yres = initial_height;
- /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
+#if 0
+ info->pixmap.size = 64*1024;
+ info->pixmap.buf_align = 8;
+ info->pixmap.access_align = 32;
+ info->pixmap.flags = FB_PIXMAP_SYSTEM;
+ info->pixmap.scan_align = 1;
+#else
+ info->pixmap.size = 0;
+ info->pixmap.buf_align = 8;
+ info->pixmap.access_align = 32;
+ info->pixmap.flags = FB_PIXMAP_SYSTEM;
+ info->pixmap.scan_align = 1;
+#endif
info->apertures = alloc_apertures(1);
if (!info->apertures) {
@@ -576,6 +588,58 @@ int vmw_fb_close(struct vmw_private *vmw_priv)
return 0;
}
+int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv,
+ struct vmw_dma_buffer *vmw_bo)
+{
+ struct ttm_buffer_object *bo = &vmw_bo->base;
+ int ret = 0;
+
+ ret = ttm_bo_reserve(bo, false, false, false, 0);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = ttm_bo_validate(bo, &vmw_sys_placement, false, false, false);
+ ttm_bo_unreserve(bo);
+
+ return ret;
+}
+
+int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
+ struct vmw_dma_buffer *vmw_bo)
+{
+ struct ttm_buffer_object *bo = &vmw_bo->base;
+ struct ttm_placement ne_placement = vmw_vram_ne_placement;
+ int ret = 0;
+
+ ne_placement.lpfn = bo->num_pages;
+
+ /* interuptable? */
+ ret = ttm_write_lock(&vmw_priv->active_master->lock, false);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = ttm_bo_reserve(bo, false, false, false, 0);
+ if (unlikely(ret != 0))
+ goto err_unlock;
+
+ if (bo->mem.mem_type == TTM_PL_VRAM &&
+ bo->mem.start < bo->num_pages &&
+ bo->mem.start > 0)
+ (void) ttm_bo_validate(bo, &vmw_sys_placement, false,
+ false, false);
+
+ ret = ttm_bo_validate(bo, &ne_placement, false, false, false);
+
+ /* Could probably bug on */
+ WARN_ON(bo->offset != 0);
+
+ ttm_bo_unreserve(bo);
+err_unlock:
+ ttm_write_unlock(&vmw_priv->active_master->lock);
+
+ return ret;
+}
+
int vmw_fb_off(struct vmw_private *vmw_priv)
{
struct fb_info *info;
@@ -597,7 +661,7 @@ int vmw_fb_off(struct vmw_private *vmw_priv)
par->bo_ptr = NULL;
ttm_bo_kunmap(&par->map);
- vmw_dmabuf_unpin(vmw_priv, par->vmw_bo, false);
+ vmw_dmabuf_from_vram(vmw_priv, par->vmw_bo);
return 0;
}
@@ -623,7 +687,7 @@ int vmw_fb_on(struct vmw_private *vmw_priv)
/* Make sure that all overlays are stoped when we take over */
vmw_overlay_stop_all(vmw_priv);
- ret = vmw_dmabuf_to_start_of_vram(vmw_priv, par->vmw_bo, true, false);
+ ret = vmw_dmabuf_to_start_of_vram(vmw_priv, par->vmw_bo);
if (unlikely(ret != 0)) {
DRM_ERROR("could not move buffer to start of VRAM\n");
goto err_no_buffer;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index f2fb8f1..61eacc1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright © 2011 VMware, Inc., Palo Alto, CA., USA
+ * Copyright (C) 2010 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -25,1130 +25,149 @@
*
**************************************************************************/
-#include "drmP.h"
-#include "vmwgfx_drv.h"
-
-#define VMW_FENCE_WRAP (1 << 31)
-struct vmw_fence_manager {
- int num_fence_objects;
- struct vmw_private *dev_priv;
- spinlock_t lock;
- struct list_head fence_list;
- struct work_struct work;
- u32 user_fence_size;
- u32 fence_size;
- u32 event_fence_action_size;
- bool fifo_down;
- struct list_head cleanup_list;
- uint32_t pending_actions[VMW_ACTION_MAX];
- struct mutex goal_irq_mutex;
- bool goal_irq_on; /* Protected by @goal_irq_mutex */
- bool seqno_valid; /* Protected by @lock, and may not be set to true
- without the @goal_irq_mutex held. */
-};
-
-struct vmw_user_fence {
- struct ttm_base_object base;
- struct vmw_fence_obj fence;
-};
-
-/**
- * struct vmw_event_fence_action - fence action that delivers a drm event.
- *
- * @e: A struct drm_pending_event that controls the event delivery.
- * @action: A struct vmw_fence_action to hook up to a fence.
- * @fence: A referenced pointer to the fence to keep it alive while @action
- * hangs on it.
- * @dev: Pointer to a struct drm_device so we can access the event stuff.
- * @kref: Both @e and @action has destructors, so we need to refcount.
- * @size: Size accounted for this object.
- * @tv_sec: If non-null, the variable pointed to will be assigned
- * current time tv_sec val when the fence signals.
- * @tv_usec: Must be set if @tv_sec is set, and the variable pointed to will
- * be assigned the current time tv_usec val when the fence signals.
- */
-struct vmw_event_fence_action {
- struct vmw_fence_action action;
- struct list_head fpriv_head;
-
- struct drm_pending_event *event;
- struct vmw_fence_obj *fence;
- struct drm_device *dev;
+#include "vmwgfx_drv.h"
- uint32_t *tv_sec;
- uint32_t *tv_usec;
+struct vmw_fence {
+ struct list_head head;
+ uint32_t sequence;
+ struct timespec submitted;
};
-/**
- * Note on fencing subsystem usage of irqs:
- * Typically the vmw_fences_update function is called
- *
- * a) When a new fence seqno has been submitted by the fifo code.
- * b) On-demand when we have waiters. Sleeping waiters will switch on the
- * ANY_FENCE irq and call vmw_fences_update function each time an ANY_FENCE
- * irq is received. When the last fence waiter is gone, that IRQ is masked
- * away.
- *
- * In situations where there are no waiters and we don't submit any new fences,
- * fence objects may not be signaled. This is perfectly OK, since there are
- * no consumers of the signaled data, but that is NOT ok when there are fence
- * actions attached to a fence. The fencing subsystem then makes use of the
- * FENCE_GOAL irq and sets the fence goal seqno to that of the next fence
- * which has an action attached, and each time vmw_fences_update is called,
- * the subsystem makes sure the fence goal seqno is updated.
- *
- * The fence goal seqno irq is on as long as there are unsignaled fence
- * objects with actions attached to them.
- */
-
-static void vmw_fence_obj_destroy_locked(struct kref *kref)
+void vmw_fence_queue_init(struct vmw_fence_queue *queue)
{
- struct vmw_fence_obj *fence =
- container_of(kref, struct vmw_fence_obj, kref);
-
- struct vmw_fence_manager *fman = fence->fman;
- unsigned int num_fences;
-
- list_del_init(&fence->head);
- num_fences = --fman->num_fence_objects;
- spin_unlock_irq(&fman->lock);
- if (fence->destroy)
- fence->destroy(fence);
- else
- kfree(fence);
-
- spin_lock_irq(&fman->lock);
+ INIT_LIST_HEAD(&queue->head);
+ queue->lag = ns_to_timespec(0);
+ getrawmonotonic(&queue->lag_time);
+ spin_lock_init(&queue->lock);
}
-
-/**
- * Execute signal actions on fences recently signaled.
- * This is done from a workqueue so we don't have to execute
- * signal actions from atomic context.
- */
-
-static void vmw_fence_work_func(struct work_struct *work)
+void vmw_fence_queue_takedown(struct vmw_fence_queue *queue)
{
- struct vmw_fence_manager *fman =
- container_of(work, struct vmw_fence_manager, work);
- struct list_head list;
- struct vmw_fence_action *action, *next_action;
- bool seqno_valid;
-
- do {
- INIT_LIST_HEAD(&list);
- mutex_lock(&fman->goal_irq_mutex);
-
- spin_lock_irq(&fman->lock);
- list_splice_init(&fman->cleanup_list, &list);
- seqno_valid = fman->seqno_valid;
- spin_unlock_irq(&fman->lock);
-
- if (!seqno_valid && fman->goal_irq_on) {
- fman->goal_irq_on = false;
- vmw_goal_waiter_remove(fman->dev_priv);
- }
- mutex_unlock(&fman->goal_irq_mutex);
+ struct vmw_fence *fence, *next;
- if (list_empty(&list))
- return;
-
- /*
- * At this point, only we should be able to manipulate the
- * list heads of the actions we have on the private list.
- * hence fman::lock not held.
- */
-
- list_for_each_entry_safe(action, next_action, &list, head) {
- list_del_init(&action->head);
- if (action->cleanup)
- action->cleanup(action);
- }
- } while (1);
-}
-
-struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
-{
- struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL);
-
- if (unlikely(fman == NULL))
- return NULL;
-
- fman->dev_priv = dev_priv;
- spin_lock_init(&fman->lock);
- INIT_LIST_HEAD(&fman->fence_list);
- INIT_LIST_HEAD(&fman->cleanup_list);
- INIT_WORK(&fman->work, &vmw_fence_work_func);
- fman->fifo_down = true;
- fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence));
- fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj));
- fman->event_fence_action_size =
- ttm_round_pot(sizeof(struct vmw_event_fence_action));
- mutex_init(&fman->goal_irq_mutex);
-
- return fman;
-}
-
-void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
-{
- unsigned long irq_flags;
- bool lists_empty;
-
- (void) cancel_work_sync(&fman->work);
-
- spin_lock_irqsave(&fman->lock, irq_flags);
- lists_empty = list_empty(&fman->fence_list) &&
- list_empty(&fman->cleanup_list);
- spin_unlock_irqrestore(&fman->lock, irq_flags);
-
- BUG_ON(!lists_empty);
- kfree(fman);
-}
-
-static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
- struct vmw_fence_obj *fence,
- u32 seqno,
- uint32_t mask,
- void (*destroy) (struct vmw_fence_obj *fence))
-{
- unsigned long irq_flags;
- unsigned int num_fences;
- int ret = 0;
-
- fence->seqno = seqno;
- INIT_LIST_HEAD(&fence->seq_passed_actions);
- fence->fman = fman;
- fence->signaled = 0;
- fence->signal_mask = mask;
- kref_init(&fence->kref);
- fence->destroy = destroy;
- init_waitqueue_head(&fence->queue);
-
- spin_lock_irqsave(&fman->lock, irq_flags);
- if (unlikely(fman->fifo_down)) {
- ret = -EBUSY;
- goto out_unlock;
- }
- list_add_tail(&fence->head, &fman->fence_list);
- num_fences = ++fman->num_fence_objects;
-
-out_unlock:
- spin_unlock_irqrestore(&fman->lock, irq_flags);
- return ret;
-
-}
-
-struct vmw_fence_obj *vmw_fence_obj_reference(struct vmw_fence_obj *fence)
-{
- if (unlikely(fence == NULL))
- return NULL;
-
- kref_get(&fence->kref);
- return fence;
-}
-
-/**
- * vmw_fence_obj_unreference
- *
- * Note that this function may not be entered with disabled irqs since
- * it may re-enable them in the destroy function.
- *
- */
-void vmw_fence_obj_unreference(struct vmw_fence_obj **fence_p)
-{
- struct vmw_fence_obj *fence = *fence_p;
- struct vmw_fence_manager *fman;
-
- if (unlikely(fence == NULL))
- return;
-
- fman = fence->fman;
- *fence_p = NULL;
- spin_lock_irq(&fman->lock);
- BUG_ON(atomic_read(&fence->kref.refcount) == 0);
- kref_put(&fence->kref, vmw_fence_obj_destroy_locked);
- spin_unlock_irq(&fman->lock);
-}
-
-void vmw_fences_perform_actions(struct vmw_fence_manager *fman,
- struct list_head *list)
-{
- struct vmw_fence_action *action, *next_action;
-
- list_for_each_entry_safe(action, next_action, list, head) {
- list_del_init(&action->head);
- fman->pending_actions[action->type]--;
- if (action->seq_passed != NULL)
- action->seq_passed(action);
-
- /*
- * Add the cleanup action to the cleanup list so that
- * it will be performed by a worker task.
- */
-
- list_add_tail(&action->head, &fman->cleanup_list);
- }
-}
-
-/**
- * vmw_fence_goal_new_locked - Figure out a new device fence goal
- * seqno if needed.
- *
- * @fman: Pointer to a fence manager.
- * @passed_seqno: The seqno the device currently signals as passed.
- *
- * This function should be called with the fence manager lock held.
- * It is typically called when we have a new passed_seqno, and
- * we might need to update the fence goal. It checks to see whether
- * the current fence goal has already passed, and, in that case,
- * scans through all unsignaled fences to get the next fence object with an
- * action attached, and sets the seqno of that fence as a new fence goal.
- *
- * returns true if the device goal seqno was updated. False otherwise.
- */
-static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
- u32 passed_seqno)
-{
- u32 goal_seqno;
- __le32 __iomem *fifo_mem;
- struct vmw_fence_obj *fence;
-
- if (likely(!fman->seqno_valid))
- return false;
-
- fifo_mem = fman->dev_priv->mmio_virt;
- goal_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE_GOAL);
- if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP))
- return false;
-
- fman->seqno_valid = false;
- list_for_each_entry(fence, &fman->fence_list, head) {
- if (!list_empty(&fence->seq_passed_actions)) {
- fman->seqno_valid = true;
- iowrite32(fence->seqno,
- fifo_mem + SVGA_FIFO_FENCE_GOAL);
- break;
- }
- }
-
- return true;
-}
-
-
-/**
- * vmw_fence_goal_check_locked - Replace the device fence goal seqno if
- * needed.
- *
- * @fence: Pointer to a struct vmw_fence_obj the seqno of which should be
- * considered as a device fence goal.
- *
- * This function should be called with the fence manager lock held.
- * It is typically called when an action has been attached to a fence to
- * check whether the seqno of that fence should be used for a fence
- * goal interrupt. This is typically needed if the current fence goal is
- * invalid, or has a higher seqno than that of the current fence object.
- *
- * returns true if the device goal seqno was updated. False otherwise.
- */
-static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
-{
- u32 goal_seqno;
- __le32 __iomem *fifo_mem;
-
- if (fence->signaled & DRM_VMW_FENCE_FLAG_EXEC)
- return false;
-
- fifo_mem = fence->fman->dev_priv->mmio_virt;
- goal_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE_GOAL);
- if (likely(fence->fman->seqno_valid &&
- goal_seqno - fence->seqno < VMW_FENCE_WRAP))
- return false;
-
- iowrite32(fence->seqno, fifo_mem + SVGA_FIFO_FENCE_GOAL);
- fence->fman->seqno_valid = true;
-
- return true;
-}
-
-void vmw_fences_update(struct vmw_fence_manager *fman)
-{
- unsigned long flags;
- struct vmw_fence_obj *fence, *next_fence;
- struct list_head action_list;
- bool needs_rerun;
- uint32_t seqno, new_seqno;
- __le32 __iomem *fifo_mem = fman->dev_priv->mmio_virt;
-
- seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
-rerun:
- spin_lock_irqsave(&fman->lock, flags);
- list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
- if (seqno - fence->seqno < VMW_FENCE_WRAP) {
- list_del_init(&fence->head);
- fence->signaled |= DRM_VMW_FENCE_FLAG_EXEC;
- INIT_LIST_HEAD(&action_list);
- list_splice_init(&fence->seq_passed_actions,
- &action_list);
- vmw_fences_perform_actions(fman, &action_list);
- wake_up_all(&fence->queue);
- } else
- break;
- }
-
- needs_rerun = vmw_fence_goal_new_locked(fman, seqno);
-
- if (!list_empty(&fman->cleanup_list))
- (void) schedule_work(&fman->work);
- spin_unlock_irqrestore(&fman->lock, flags);
-
- /*
- * Rerun if the fence goal seqno was updated, and the
- * hardware might have raced with that update, so that
- * we missed a fence_goal irq.
- */
-
- if (unlikely(needs_rerun)) {
- new_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
- if (new_seqno != seqno) {
- seqno = new_seqno;
- goto rerun;
- }
- }
-}
-
-bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence,
- uint32_t flags)
-{
- struct vmw_fence_manager *fman = fence->fman;
- unsigned long irq_flags;
- uint32_t signaled;
-
- spin_lock_irqsave(&fman->lock, irq_flags);
- signaled = fence->signaled;
- spin_unlock_irqrestore(&fman->lock, irq_flags);
-
- flags &= fence->signal_mask;
- if ((signaled & flags) == flags)
- return 1;
-
- if ((signaled & DRM_VMW_FENCE_FLAG_EXEC) == 0)
- vmw_fences_update(fman);
-
- spin_lock_irqsave(&fman->lock, irq_flags);
- signaled = fence->signaled;
- spin_unlock_irqrestore(&fman->lock, irq_flags);
-
- return ((signaled & flags) == flags);
-}
-
-int vmw_fence_obj_wait(struct vmw_fence_obj *fence,
- uint32_t flags, bool lazy,
- bool interruptible, unsigned long timeout)
-{
- struct vmw_private *dev_priv = fence->fman->dev_priv;
- long ret;
-
- if (likely(vmw_fence_obj_signaled(fence, flags)))
- return 0;
-
- vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
- vmw_seqno_waiter_add(dev_priv);
-
- if (interruptible)
- ret = wait_event_interruptible_timeout
- (fence->queue,
- vmw_fence_obj_signaled(fence, flags),
- timeout);
- else
- ret = wait_event_timeout
- (fence->queue,
- vmw_fence_obj_signaled(fence, flags),
- timeout);
-
- vmw_seqno_waiter_remove(dev_priv);
-
- if (unlikely(ret == 0))
- ret = -EBUSY;
- else if (likely(ret > 0))
- ret = 0;
-
- return ret;
-}
-
-void vmw_fence_obj_flush(struct vmw_fence_obj *fence)
-{
- struct vmw_private *dev_priv = fence->fman->dev_priv;
-
- vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
-}
-
-static void vmw_fence_destroy(struct vmw_fence_obj *fence)
-{
- struct vmw_fence_manager *fman = fence->fman;
-
- kfree(fence);
- /*
- * Free kernel space accounting.
- */
- ttm_mem_global_free(vmw_mem_glob(fman->dev_priv),
- fman->fence_size);
-}
-
-int vmw_fence_create(struct vmw_fence_manager *fman,
- uint32_t seqno,
- uint32_t mask,
- struct vmw_fence_obj **p_fence)
-{
- struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv);
- struct vmw_fence_obj *fence;
- int ret;
-
- ret = ttm_mem_global_alloc(mem_glob, fman->fence_size,
- false, false);
- if (unlikely(ret != 0))
- return ret;
-
- fence = kzalloc(sizeof(*fence), GFP_KERNEL);
- if (unlikely(fence == NULL)) {
- ret = -ENOMEM;
- goto out_no_object;
+ spin_lock(&queue->lock);
+ list_for_each_entry_safe(fence, next, &queue->head, head) {
+ kfree(fence);
}
-
- ret = vmw_fence_obj_init(fman, fence, seqno, mask,
- vmw_fence_destroy);
- if (unlikely(ret != 0))
- goto out_err_init;
-
- *p_fence = fence;
- return 0;
-
-out_err_init:
- kfree(fence);
-out_no_object:
- ttm_mem_global_free(mem_glob, fman->fence_size);
- return ret;
+ spin_unlock(&queue->lock);
}
-
-static void vmw_user_fence_destroy(struct vmw_fence_obj *fence)
+int vmw_fence_push(struct vmw_fence_queue *queue,
+ uint32_t sequence)
{
- struct vmw_user_fence *ufence =
- container_of(fence, struct vmw_user_fence, fence);
- struct vmw_fence_manager *fman = fence->fman;
+ struct vmw_fence *fence = kmalloc(sizeof(*fence), GFP_KERNEL);
- kfree(ufence);
- /*
- * Free kernel space accounting.
- */
- ttm_mem_global_free(vmw_mem_glob(fman->dev_priv),
- fman->user_fence_size);
-}
-
-static void vmw_user_fence_base_release(struct ttm_base_object **p_base)
-{
- struct ttm_base_object *base = *p_base;
- struct vmw_user_fence *ufence =
- container_of(base, struct vmw_user_fence, base);
- struct vmw_fence_obj *fence = &ufence->fence;
-
- *p_base = NULL;
- vmw_fence_obj_unreference(&fence);
-}
-
-int vmw_user_fence_create(struct drm_file *file_priv,
- struct vmw_fence_manager *fman,
- uint32_t seqno,
- uint32_t mask,
- struct vmw_fence_obj **p_fence,
- uint32_t *p_handle)
-{
- struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- struct vmw_user_fence *ufence;
- struct vmw_fence_obj *tmp;
- struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv);
- int ret;
-
- /*
- * Kernel memory space accounting, since this object may
- * be created by a user-space request.
- */
-
- ret = ttm_mem_global_alloc(mem_glob, fman->user_fence_size,
- false, false);
- if (unlikely(ret != 0))
- return ret;
-
- ufence = kzalloc(sizeof(*ufence), GFP_KERNEL);
- if (unlikely(ufence == NULL)) {
- ret = -ENOMEM;
- goto out_no_object;
- }
-
- ret = vmw_fence_obj_init(fman, &ufence->fence, seqno,
- mask, vmw_user_fence_destroy);
- if (unlikely(ret != 0)) {
- kfree(ufence);
- goto out_no_object;
- }
-
- /*
- * The base object holds a reference which is freed in
- * vmw_user_fence_base_release.
- */
- tmp = vmw_fence_obj_reference(&ufence->fence);
- ret = ttm_base_object_init(tfile, &ufence->base, false,
- VMW_RES_FENCE,
- &vmw_user_fence_base_release, NULL);
-
-
- if (unlikely(ret != 0)) {
- /*
- * Free the base object's reference
- */
- vmw_fence_obj_unreference(&tmp);
- goto out_err;
- }
+ if (unlikely(!fence))
+ return -ENOMEM;
- *p_fence = &ufence->fence;
- *p_handle = ufence->base.hash.key;
+ fence->sequence = sequence;
+ getrawmonotonic(&fence->submitted);
+ spin_lock(&queue->lock);
+ list_add_tail(&fence->head, &queue->head);
+ spin_unlock(&queue->lock);
return 0;
-out_err:
- tmp = &ufence->fence;
- vmw_fence_obj_unreference(&tmp);
-out_no_object:
- ttm_mem_global_free(mem_glob, fman->user_fence_size);
- return ret;
-}
-
-
-/**
- * vmw_fence_fifo_down - signal all unsignaled fence objects.
- */
-
-void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
-{
- unsigned long irq_flags;
- struct list_head action_list;
- int ret;
-
- /*
- * The list may be altered while we traverse it, so always
- * restart when we've released the fman->lock.
- */
-
- spin_lock_irqsave(&fman->lock, irq_flags);
- fman->fifo_down = true;
- while (!list_empty(&fman->fence_list)) {
- struct vmw_fence_obj *fence =
- list_entry(fman->fence_list.prev, struct vmw_fence_obj,
- head);
- kref_get(&fence->kref);
- spin_unlock_irq(&fman->lock);
-
- ret = vmw_fence_obj_wait(fence, fence->signal_mask,
- false, false,
- VMW_FENCE_WAIT_TIMEOUT);
-
- if (unlikely(ret != 0)) {
- list_del_init(&fence->head);
- fence->signaled |= DRM_VMW_FENCE_FLAG_EXEC;
- INIT_LIST_HEAD(&action_list);
- list_splice_init(&fence->seq_passed_actions,
- &action_list);
- vmw_fences_perform_actions(fman, &action_list);
- wake_up_all(&fence->queue);
- }
-
- spin_lock_irq(&fman->lock);
-
- BUG_ON(!list_empty(&fence->head));
- kref_put(&fence->kref, vmw_fence_obj_destroy_locked);
- }
- spin_unlock_irqrestore(&fman->lock, irq_flags);
-}
-
-void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
-{
- unsigned long irq_flags;
-
- spin_lock_irqsave(&fman->lock, irq_flags);
- fman->fifo_down = false;
- spin_unlock_irqrestore(&fman->lock, irq_flags);
}
-
-int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+int vmw_fence_pull(struct vmw_fence_queue *queue,
+ uint32_t signaled_sequence)
{
- struct drm_vmw_fence_wait_arg *arg =
- (struct drm_vmw_fence_wait_arg *)data;
- unsigned long timeout;
- struct ttm_base_object *base;
- struct vmw_fence_obj *fence;
- struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- int ret;
- uint64_t wait_timeout = ((uint64_t)arg->timeout_us * HZ);
+ struct vmw_fence *fence, *next;
+ struct timespec now;
+ bool updated = false;
- /*
- * 64-bit division not present on 32-bit systems, so do an
- * approximation. (Divide by 1000000).
- */
+ spin_lock(&queue->lock);
+ getrawmonotonic(&now);
- wait_timeout = (wait_timeout >> 20) + (wait_timeout >> 24) -
- (wait_timeout >> 26);
-
- if (!arg->cookie_valid) {
- arg->cookie_valid = 1;
- arg->kernel_cookie = jiffies + wait_timeout;
- }
-
- base = ttm_base_object_lookup(tfile, arg->handle);
- if (unlikely(base == NULL)) {
- printk(KERN_ERR "Wait invalid fence object handle "
- "0x%08lx.\n",
- (unsigned long)arg->handle);
- return -EINVAL;
- }
-
- fence = &(container_of(base, struct vmw_user_fence, base)->fence);
-
- timeout = jiffies;
- if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie)) {
- ret = ((vmw_fence_obj_signaled(fence, arg->flags)) ?
- 0 : -EBUSY);
- goto out;
+ if (list_empty(&queue->head)) {
+ queue->lag = ns_to_timespec(0);
+ queue->lag_time = now;
+ updated = true;
+ goto out_unlock;
}
- timeout = (unsigned long)arg->kernel_cookie - timeout;
-
- ret = vmw_fence_obj_wait(fence, arg->flags, arg->lazy, true, timeout);
-
-out:
- ttm_base_object_unref(&base);
-
- /*
- * Optionally unref the fence object.
- */
-
- if (ret == 0 && (arg->wait_options & DRM_VMW_WAIT_OPTION_UNREF))
- return ttm_ref_object_base_unref(tfile, arg->handle,
- TTM_REF_USAGE);
- return ret;
-}
-
-int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_vmw_fence_signaled_arg *arg =
- (struct drm_vmw_fence_signaled_arg *) data;
- struct ttm_base_object *base;
- struct vmw_fence_obj *fence;
- struct vmw_fence_manager *fman;
- struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- struct vmw_private *dev_priv = vmw_priv(dev);
+ list_for_each_entry_safe(fence, next, &queue->head, head) {
+ if (signaled_sequence - fence->sequence > (1 << 30))
+ continue;
- base = ttm_base_object_lookup(tfile, arg->handle);
- if (unlikely(base == NULL)) {
- printk(KERN_ERR "Fence signaled invalid fence object handle "
- "0x%08lx.\n",
- (unsigned long)arg->handle);
- return -EINVAL;
+ queue->lag = timespec_sub(now, fence->submitted);
+ queue->lag_time = now;
+ updated = true;
+ list_del(&fence->head);
+ kfree(fence);
}
- fence = &(container_of(base, struct vmw_user_fence, base)->fence);
- fman = fence->fman;
-
- arg->signaled = vmw_fence_obj_signaled(fence, arg->flags);
- spin_lock_irq(&fman->lock);
-
- arg->signaled_flags = fence->signaled;
- arg->passed_seqno = dev_priv->last_read_seqno;
- spin_unlock_irq(&fman->lock);
-
- ttm_base_object_unref(&base);
-
- return 0;
-}
-
-
-int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_vmw_fence_arg *arg =
- (struct drm_vmw_fence_arg *) data;
-
- return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
- arg->handle,
- TTM_REF_USAGE);
-}
-
-/**
- * vmw_event_fence_fpriv_gone - Remove references to struct drm_file objects
- *
- * @fman: Pointer to a struct vmw_fence_manager
- * @event_list: Pointer to linked list of struct vmw_event_fence_action objects
- * with pointers to a struct drm_file object about to be closed.
- *
- * This function removes all pending fence events with references to a
- * specific struct drm_file object about to be closed. The caller is required
- * to pass a list of all struct vmw_event_fence_action objects with such
- * events attached. This function is typically called before the
- * struct drm_file object's event management is taken down.
- */
-void vmw_event_fence_fpriv_gone(struct vmw_fence_manager *fman,
- struct list_head *event_list)
-{
- struct vmw_event_fence_action *eaction;
- struct drm_pending_event *event;
- unsigned long irq_flags;
-
- while (1) {
- spin_lock_irqsave(&fman->lock, irq_flags);
- if (list_empty(event_list))
- goto out_unlock;
- eaction = list_first_entry(event_list,
- struct vmw_event_fence_action,
- fpriv_head);
- list_del_init(&eaction->fpriv_head);
- event = eaction->event;
- eaction->event = NULL;
- spin_unlock_irqrestore(&fman->lock, irq_flags);
- event->destroy(event);
- }
out_unlock:
- spin_unlock_irqrestore(&fman->lock, irq_flags);
-}
-
-
-/**
- * vmw_event_fence_action_seq_passed
- *
- * @action: The struct vmw_fence_action embedded in a struct
- * vmw_event_fence_action.
- *
- * This function is called when the seqno of the fence where @action is
- * attached has passed. It queues the event on the submitter's event list.
- * This function is always called from atomic context, and may be called
- * from irq context.
- */
-static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
-{
- struct vmw_event_fence_action *eaction =
- container_of(action, struct vmw_event_fence_action, action);
- struct drm_device *dev = eaction->dev;
- struct drm_pending_event *event = eaction->event;
- struct drm_file *file_priv;
- unsigned long irq_flags;
-
- if (unlikely(event == NULL))
- return;
-
- file_priv = event->file_priv;
- spin_lock_irqsave(&dev->event_lock, irq_flags);
-
- if (likely(eaction->tv_sec != NULL)) {
- struct timeval tv;
-
- do_gettimeofday(&tv);
- *eaction->tv_sec = tv.tv_sec;
- *eaction->tv_usec = tv.tv_usec;
- }
-
- list_del_init(&eaction->fpriv_head);
- list_add_tail(&eaction->event->link, &file_priv->event_list);
- eaction->event = NULL;
- wake_up_all(&file_priv->event_wait);
- spin_unlock_irqrestore(&dev->event_lock, irq_flags);
-}
-
-/**
- * vmw_event_fence_action_cleanup
- *
- * @action: The struct vmw_fence_action embedded in a struct
- * vmw_event_fence_action.
- *
- * This function is the struct vmw_fence_action destructor. It's typically
- * called from a workqueue.
- */
-static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action)
-{
- struct vmw_event_fence_action *eaction =
- container_of(action, struct vmw_event_fence_action, action);
- struct vmw_fence_manager *fman = eaction->fence->fman;
- unsigned long irq_flags;
+ spin_unlock(&queue->lock);
- spin_lock_irqsave(&fman->lock, irq_flags);
- list_del(&eaction->fpriv_head);
- spin_unlock_irqrestore(&fman->lock, irq_flags);
-
- vmw_fence_obj_unreference(&eaction->fence);
- kfree(eaction);
+ return (updated) ? 0 : -EBUSY;
}
-
-/**
- * vmw_fence_obj_add_action - Add an action to a fence object.
- *
- * @fence - The fence object.
- * @action - The action to add.
- *
- * Note that the action callbacks may be executed before this function
- * returns.
- */
-void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
- struct vmw_fence_action *action)
+static struct timespec vmw_timespec_add(struct timespec t1,
+ struct timespec t2)
{
- struct vmw_fence_manager *fman = fence->fman;
- unsigned long irq_flags;
- bool run_update = false;
-
- mutex_lock(&fman->goal_irq_mutex);
- spin_lock_irqsave(&fman->lock, irq_flags);
-
- fman->pending_actions[action->type]++;
- if (fence->signaled & DRM_VMW_FENCE_FLAG_EXEC) {
- struct list_head action_list;
-
- INIT_LIST_HEAD(&action_list);
- list_add_tail(&action->head, &action_list);
- vmw_fences_perform_actions(fman, &action_list);
- } else {
- list_add_tail(&action->head, &fence->seq_passed_actions);
-
- /*
- * This function may set fman::seqno_valid, so it must
- * be run with the goal_irq_mutex held.
- */
- run_update = vmw_fence_goal_check_locked(fence);
- }
-
- spin_unlock_irqrestore(&fman->lock, irq_flags);
-
- if (run_update) {
- if (!fman->goal_irq_on) {
- fman->goal_irq_on = true;
- vmw_goal_waiter_add(fman->dev_priv);
- }
- vmw_fences_update(fman);
+ t1.tv_sec += t2.tv_sec;
+ t1.tv_nsec += t2.tv_nsec;
+ if (t1.tv_nsec >= 1000000000L) {
+ t1.tv_sec += 1;
+ t1.tv_nsec -= 1000000000L;
}
- mutex_unlock(&fman->goal_irq_mutex);
+ return t1;
}
-/**
- * vmw_event_fence_action_create - Post an event for sending when a fence
- * object seqno has passed.
- *
- * @file_priv: The file connection on which the event should be posted.
- * @fence: The fence object on which to post the event.
- * @event: Event to be posted. This event should've been alloced
- * using k[mz]alloc, and should've been completely initialized.
- * @interruptible: Interruptible waits if possible.
- *
- * As a side effect, the object pointed to by @event may have been
- * freed when this function returns. If this function returns with
- * an error code, the caller needs to free that object.
- */
-
-int vmw_event_fence_action_queue(struct drm_file *file_priv,
- struct vmw_fence_obj *fence,
- struct drm_pending_event *event,
- uint32_t *tv_sec,
- uint32_t *tv_usec,
- bool interruptible)
+static struct timespec vmw_fifo_lag(struct vmw_fence_queue *queue)
{
- struct vmw_event_fence_action *eaction;
- struct vmw_fence_manager *fman = fence->fman;
- struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
- unsigned long irq_flags;
-
- eaction = kzalloc(sizeof(*eaction), GFP_KERNEL);
- if (unlikely(eaction == NULL))
- return -ENOMEM;
-
- eaction->event = event;
-
- eaction->action.seq_passed = vmw_event_fence_action_seq_passed;
- eaction->action.cleanup = vmw_event_fence_action_cleanup;
- eaction->action.type = VMW_ACTION_EVENT;
+ struct timespec now;
- eaction->fence = vmw_fence_obj_reference(fence);
- eaction->dev = fman->dev_priv->dev;
- eaction->tv_sec = tv_sec;
- eaction->tv_usec = tv_usec;
-
- spin_lock_irqsave(&fman->lock, irq_flags);
- list_add_tail(&eaction->fpriv_head, &vmw_fp->fence_events);
- spin_unlock_irqrestore(&fman->lock, irq_flags);
-
- vmw_fence_obj_add_action(fence, &eaction->action);
-
- return 0;
+ spin_lock(&queue->lock);
+ getrawmonotonic(&now);
+ queue->lag = vmw_timespec_add(queue->lag,
+ timespec_sub(now, queue->lag_time));
+ queue->lag_time = now;
+ spin_unlock(&queue->lock);
+ return queue->lag;
}
-struct vmw_event_fence_pending {
- struct drm_pending_event base;
- struct drm_vmw_event_fence event;
-};
-int vmw_event_fence_action_create(struct drm_file *file_priv,
- struct vmw_fence_obj *fence,
- uint32_t flags,
- uint64_t user_data,
- bool interruptible)
+static bool vmw_lag_lt(struct vmw_fence_queue *queue,
+ uint32_t us)
{
- struct vmw_event_fence_pending *event;
- struct drm_device *dev = fence->fman->dev_priv->dev;
- unsigned long irq_flags;
- int ret;
-
- spin_lock_irqsave(&dev->event_lock, irq_flags);
-
- ret = (file_priv->event_space < sizeof(event->event)) ? -EBUSY : 0;
- if (likely(ret == 0))
- file_priv->event_space -= sizeof(event->event);
-
- spin_unlock_irqrestore(&dev->event_lock, irq_flags);
-
- if (unlikely(ret != 0)) {
- DRM_ERROR("Failed to allocate event space for this file.\n");
- goto out_no_space;
- }
-
+ struct timespec lag, cond;
- event = kzalloc(sizeof(event->event), GFP_KERNEL);
- if (unlikely(event == NULL)) {
- DRM_ERROR("Failed to allocate an event.\n");
- ret = -ENOMEM;
- goto out_no_event;
- }
-
- event->event.base.type = DRM_VMW_EVENT_FENCE_SIGNALED;
- event->event.base.length = sizeof(*event);
- event->event.user_data = user_data;
-
- event->base.event = &event->event.base;
- event->base.file_priv = file_priv;
- event->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
-
-
- if (flags & DRM_VMW_FE_FLAG_REQ_TIME)
- ret = vmw_event_fence_action_queue(file_priv, fence,
- &event->base,
- &event->event.tv_sec,
- &event->event.tv_usec,
- interruptible);
- else
- ret = vmw_event_fence_action_queue(file_priv, fence,
- &event->base,
- NULL,
- NULL,
- interruptible);
- if (ret != 0)
- goto out_no_queue;
-
-out_no_queue:
- event->base.destroy(&event->base);
-out_no_event:
- spin_lock_irqsave(&dev->event_lock, irq_flags);
- file_priv->event_space += sizeof(*event);
- spin_unlock_irqrestore(&dev->event_lock, irq_flags);
-out_no_space:
- return ret;
+ cond = ns_to_timespec((s64) us * 1000);
+ lag = vmw_fifo_lag(queue);
+ return (timespec_compare(&lag, &cond) < 1);
}
-int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+int vmw_wait_lag(struct vmw_private *dev_priv,
+ struct vmw_fence_queue *queue, uint32_t us)
{
- struct vmw_private *dev_priv = vmw_priv(dev);
- struct drm_vmw_fence_event_arg *arg =
- (struct drm_vmw_fence_event_arg *) data;
- struct vmw_fence_obj *fence = NULL;
- struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
- struct drm_vmw_fence_rep __user *user_fence_rep =
- (struct drm_vmw_fence_rep __user *)(unsigned long)
- arg->fence_rep;
- uint32_t handle;
+ struct vmw_fence *fence;
+ uint32_t sequence;
int ret;
- /*
- * Look up an existing fence object,
- * and if user-space wants a new reference,
- * add one.
- */
- if (arg->handle) {
- struct ttm_base_object *base =
- ttm_base_object_lookup(vmw_fp->tfile, arg->handle);
-
- if (unlikely(base == NULL)) {
- DRM_ERROR("Fence event invalid fence object handle "
- "0x%08lx.\n",
- (unsigned long)arg->handle);
- return -EINVAL;
+ while (!vmw_lag_lt(queue, us)) {
+ spin_lock(&queue->lock);
+ if (list_empty(&queue->head))
+ sequence = atomic_read(&dev_priv->fence_seq);
+ else {
+ fence = list_first_entry(&queue->head,
+ struct vmw_fence, head);
+ sequence = fence->sequence;
}
- fence = &(container_of(base, struct vmw_user_fence,
- base)->fence);
- (void) vmw_fence_obj_reference(fence);
+ spin_unlock(&queue->lock);
- if (user_fence_rep != NULL) {
- bool existed;
+ ret = vmw_wait_fence(dev_priv, false, sequence, true,
+ 3*HZ);
- ret = ttm_ref_object_add(vmw_fp->tfile, base,
- TTM_REF_USAGE, &existed);
- if (unlikely(ret != 0)) {
- DRM_ERROR("Failed to reference a fence "
- "object.\n");
- goto out_no_ref_obj;
- }
- handle = base->hash.key;
- }
- ttm_base_object_unref(&base);
- }
-
- /*
- * Create a new fence object.
- */
- if (!fence) {
- ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
- &fence,
- (user_fence_rep) ?
- &handle : NULL);
- if (unlikely(ret != 0)) {
- DRM_ERROR("Fence event failed to create fence.\n");
+ if (unlikely(ret != 0))
return ret;
- }
- }
-
- BUG_ON(fence == NULL);
- if (arg->flags & DRM_VMW_FE_FLAG_REQ_TIME)
- ret = vmw_event_fence_action_create(file_priv, fence,
- arg->flags,
- arg->user_data,
- true);
- else
- ret = vmw_event_fence_action_create(file_priv, fence,
- arg->flags,
- arg->user_data,
- true);
-
- if (unlikely(ret != 0)) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("Failed to attach event to fence.\n");
- goto out_no_create;
+ (void) vmw_fence_pull(queue, sequence);
}
-
- vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence,
- handle);
- vmw_fence_obj_unreference(&fence);
return 0;
-out_no_create:
- if (user_fence_rep != NULL)
- ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
- handle, TTM_REF_USAGE);
-out_no_ref_obj:
- vmw_fence_obj_unreference(&fence);
- return ret;
}
+
+
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
deleted file mode 100644
index faf2e78..0000000
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
+++ /dev/null
@@ -1,120 +0,0 @@
-/**************************************************************************
- *
- * Copyright © 2011 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-#ifndef _VMWGFX_FENCE_H_
-
-#define VMW_FENCE_WAIT_TIMEOUT (5*HZ)
-
-struct vmw_private;
-
-struct vmw_fence_manager;
-
-/**
- *
- *
- */
-enum vmw_action_type {
- VMW_ACTION_EVENT = 0,
- VMW_ACTION_MAX
-};
-
-struct vmw_fence_action {
- struct list_head head;
- enum vmw_action_type type;
- void (*seq_passed) (struct vmw_fence_action *action);
- void (*cleanup) (struct vmw_fence_action *action);
-};
-
-struct vmw_fence_obj {
- struct kref kref;
- u32 seqno;
-
- struct vmw_fence_manager *fman;
- struct list_head head;
- uint32_t signaled;
- uint32_t signal_mask;
- struct list_head seq_passed_actions;
- void (*destroy)(struct vmw_fence_obj *fence);
- wait_queue_head_t queue;
-};
-
-extern struct vmw_fence_manager *
-vmw_fence_manager_init(struct vmw_private *dev_priv);
-
-extern void vmw_fence_manager_takedown(struct vmw_fence_manager *fman);
-
-extern void vmw_fence_obj_unreference(struct vmw_fence_obj **fence_p);
-
-extern struct vmw_fence_obj *
-vmw_fence_obj_reference(struct vmw_fence_obj *fence);
-
-extern void vmw_fences_update(struct vmw_fence_manager *fman);
-
-extern bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence,
- uint32_t flags);
-
-extern int vmw_fence_obj_wait(struct vmw_fence_obj *fence, uint32_t flags,
- bool lazy,
- bool interruptible, unsigned long timeout);
-
-extern void vmw_fence_obj_flush(struct vmw_fence_obj *fence);
-
-extern int vmw_fence_create(struct vmw_fence_manager *fman,
- uint32_t seqno,
- uint32_t mask,
- struct vmw_fence_obj **p_fence);
-
-extern int vmw_user_fence_create(struct drm_file *file_priv,
- struct vmw_fence_manager *fman,
- uint32_t sequence,
- uint32_t mask,
- struct vmw_fence_obj **p_fence,
- uint32_t *p_handle);
-
-extern void vmw_fence_fifo_up(struct vmw_fence_manager *fman);
-
-extern void vmw_fence_fifo_down(struct vmw_fence_manager *fman);
-
-extern int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-
-extern int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-
-extern int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern void vmw_event_fence_fpriv_gone(struct vmw_fence_manager *fman,
- struct list_head *event_list);
-extern int vmw_event_fence_action_queue(struct drm_file *filee_priv,
- struct vmw_fence_obj *fence,
- struct drm_pending_event *event,
- uint32_t *tv_sec,
- uint32_t *tv_usec,
- bool interruptible);
-#endif /* _VMWGFX_FENCE_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index a0c2f12..635c0ff 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -33,7 +33,6 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
uint32_t fifo_min, hwversion;
- const struct vmw_fifo_state *fifo = &dev_priv->fifo;
if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
return false;
@@ -42,20 +41,11 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
return false;
- hwversion = ioread32(fifo_mem +
- ((fifo->capabilities &
- SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
- SVGA_FIFO_3D_HWVERSION_REVISED :
- SVGA_FIFO_3D_HWVERSION));
-
+ hwversion = ioread32(fifo_mem + SVGA_FIFO_3D_HWVERSION);
if (hwversion == 0)
return false;
- if (hwversion < SVGA3D_HWVERSION_WS8_B1)
- return false;
-
- /* Non-Screen Object path does not support surfaces */
- if (!dev_priv->sou_priv)
+ if (hwversion < SVGA3D_HWVERSION_WS65_B1)
return false;
return true;
@@ -82,12 +72,22 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
uint32_t max;
uint32_t min;
uint32_t dummy;
+ int ret;
fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
fifo->static_buffer = vmalloc(fifo->static_buffer_size);
if (unlikely(fifo->static_buffer == NULL))
return -ENOMEM;
+ fifo->last_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
+ fifo->last_data_size = 0;
+ fifo->last_buffer_add = false;
+ fifo->last_buffer = vmalloc(fifo->last_buffer_size);
+ if (unlikely(fifo->last_buffer == NULL)) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
fifo->dynamic_buffer = NULL;
fifo->reserved_size = 0;
fifo->using_bounce_buffer = false;
@@ -137,10 +137,14 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
(unsigned int) min,
(unsigned int) fifo->capabilities);
- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
- iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
- vmw_marker_queue_init(&fifo->marker_queue);
+ atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence);
+ iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
+ vmw_fence_queue_init(&fifo->fence_queue);
return vmw_fifo_send_fence(dev_priv, &dummy);
+out_err:
+ vfree(fifo->static_buffer);
+ fifo->static_buffer = NULL;
+ return ret;
}
void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
@@ -166,7 +170,7 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
- dev_priv->last_read_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
+ dev_priv->last_read_sequence = ioread32(fifo_mem + SVGA_FIFO_FENCE);
vmw_write(dev_priv, SVGA_REG_CONFIG_DONE,
dev_priv->config_done_state);
@@ -176,7 +180,12 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
dev_priv->traces_state);
mutex_unlock(&dev_priv->hw_mutex);
- vmw_marker_queue_takedown(&fifo->marker_queue);
+ vmw_fence_queue_takedown(&fifo->fence_queue);
+
+ if (likely(fifo->last_buffer != NULL)) {
+ vfree(fifo->last_buffer);
+ fifo->last_buffer = NULL;
+ }
if (likely(fifo->static_buffer != NULL)) {
vfree(fifo->static_buffer);
@@ -253,8 +262,9 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
outl(SVGA_IRQFLAG_FIFO_PROGRESS,
dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
- dev_priv->irq_mask |= SVGA_IRQFLAG_FIFO_PROGRESS;
- vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
+ vmw_write(dev_priv, SVGA_REG_IRQMASK,
+ vmw_read(dev_priv, SVGA_REG_IRQMASK) |
+ SVGA_IRQFLAG_FIFO_PROGRESS);
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
}
mutex_unlock(&dev_priv->hw_mutex);
@@ -276,8 +286,9 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
mutex_lock(&dev_priv->hw_mutex);
if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) {
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
- dev_priv->irq_mask &= ~SVGA_IRQFLAG_FIFO_PROGRESS;
- vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
+ vmw_write(dev_priv, SVGA_REG_IRQMASK,
+ vmw_read(dev_priv, SVGA_REG_IRQMASK) &
+ ~SVGA_IRQFLAG_FIFO_PROGRESS);
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
}
mutex_unlock(&dev_priv->hw_mutex);
@@ -285,16 +296,6 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
return ret;
}
-/**
- * Reserve @bytes number of bytes in the fifo.
- *
- * This function will return NULL (error) on two conditions:
- * If it timeouts waiting for fifo space, or if @bytes is larger than the
- * available fifo space.
- *
- * Returns:
- * Pointer to the fifo, or null on error (possible hardware hang).
- */
void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
{
struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
@@ -465,7 +466,7 @@ void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
mutex_unlock(&fifo_state->fifo_mutex);
}
-int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
+int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
{
struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
struct svga_fifo_cmd_fence *cmd_fence;
@@ -475,16 +476,16 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
fm = vmw_fifo_reserve(dev_priv, bytes);
if (unlikely(fm == NULL)) {
- *seqno = atomic_read(&dev_priv->marker_seq);
+ *sequence = atomic_read(&dev_priv->fence_seq);
ret = -ENOMEM;
- (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
+ (void)vmw_fallback_wait(dev_priv, false, true, *sequence,
false, 3*HZ);
goto out_err;
}
do {
- *seqno = atomic_add_return(1, &dev_priv->marker_seq);
- } while (*seqno == 0);
+ *sequence = atomic_add_return(1, &dev_priv->fence_seq);
+ } while (*sequence == 0);
if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
@@ -501,68 +502,61 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
cmd_fence = (struct svga_fifo_cmd_fence *)
((unsigned long)fm + sizeof(__le32));
- iowrite32(*seqno, &cmd_fence->fence);
+ iowrite32(*sequence, &cmd_fence->fence);
+ fifo_state->last_buffer_add = true;
vmw_fifo_commit(dev_priv, bytes);
- (void) vmw_marker_push(&fifo_state->marker_queue, *seqno);
- vmw_update_seqno(dev_priv, fifo_state);
+ fifo_state->last_buffer_add = false;
+ (void) vmw_fence_push(&fifo_state->fence_queue, *sequence);
+ vmw_update_sequence(dev_priv, fifo_state);
out_err:
return ret;
}
/**
- * vmw_fifo_emit_dummy_query - emits a dummy query to the fifo.
- *
- * @dev_priv: The device private structure.
- * @cid: The hardware context id used for the query.
- *
- * This function is used to emit a dummy occlusion query with
- * no primitives rendered between query begin and query end.
- * It's used to provide a query barrier, in order to know that when
- * this query is finished, all preceding queries are also finished.
- *
- * A Query results structure should have been initialized at the start
- * of the dev_priv->dummy_query_bo buffer object. And that buffer object
- * must also be either reserved or pinned when this function is called.
- *
- * Returns -ENOMEM on failure to reserve fifo space.
+ * Map the first page of the FIFO read-only to user-space.
*/
-int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
- uint32_t cid)
-{
- /*
- * A query wait without a preceding query end will
- * actually finish all queries for this cid
- * without writing to the query result structure.
- */
- struct ttm_buffer_object *bo = dev_priv->dummy_query_bo;
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdWaitForQuery body;
- } *cmd;
+static int vmw_fifo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ int ret;
+ unsigned long address = (unsigned long)vmf->virtual_address;
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (address != vma->vm_start)
+ return VM_FAULT_SIGBUS;
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Out of fifo space for dummy query.\n");
- return -ENOMEM;
- }
+ ret = vm_insert_pfn(vma, address, vma->vm_pgoff);
+ if (likely(ret == -EBUSY || ret == 0))
+ return VM_FAULT_NOPAGE;
+ else if (ret == -ENOMEM)
+ return VM_FAULT_OOM;
- cmd->header.id = SVGA_3D_CMD_WAIT_FOR_QUERY;
- cmd->header.size = sizeof(cmd->body);
- cmd->body.cid = cid;
- cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
-
- if (bo->mem.mem_type == TTM_PL_VRAM) {
- cmd->body.guestResult.gmrId = SVGA_GMR_FRAMEBUFFER;
- cmd->body.guestResult.offset = bo->offset;
- } else {
- cmd->body.guestResult.gmrId = bo->mem.start;
- cmd->body.guestResult.offset = 0;
- }
+ return VM_FAULT_SIGBUS;
+}
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+static struct vm_operations_struct vmw_fifo_vm_ops = {
+ .fault = vmw_fifo_vm_fault,
+ .open = NULL,
+ .close = NULL
+};
+int vmw_fifo_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_file *file_priv;
+ struct vmw_private *dev_priv;
+
+ file_priv = filp->private_data;
+ dev_priv = vmw_priv(file_priv->minor->dev);
+
+ if (vma->vm_pgoff != (dev_priv->mmio_start >> PAGE_SHIFT) ||
+ (vma->vm_end - vma->vm_start) != PAGE_SIZE)
+ return -EINVAL;
+
+ vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
+ vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_SHARED;
+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+ vma->vm_page_prot = ttm_io_prot(TTM_PL_FLAG_UNCACHED,
+ vma->vm_page_prot);
+ vma->vm_ops = &vmw_fifo_vm_ops;
return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
index f4e7763..de0c594 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright © 2009-2011 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -29,77 +29,6 @@
#include "drmP.h"
#include "ttm/ttm_bo_driver.h"
-#define VMW_PPN_SIZE sizeof(unsigned long)
-
-static int vmw_gmr2_bind(struct vmw_private *dev_priv,
- struct page *pages[],
- unsigned long num_pages,
- int gmr_id)
-{
- SVGAFifoCmdDefineGMR2 define_cmd;
- SVGAFifoCmdRemapGMR2 remap_cmd;
- uint32_t define_size = sizeof(define_cmd) + 4;
- uint32_t remap_size = VMW_PPN_SIZE * num_pages + sizeof(remap_cmd) + 4;
- uint32_t *cmd;
- uint32_t *cmd_orig;
- uint32_t i;
-
- cmd_orig = cmd = vmw_fifo_reserve(dev_priv, define_size + remap_size);
- if (unlikely(cmd == NULL))
- return -ENOMEM;
-
- define_cmd.gmrId = gmr_id;
- define_cmd.numPages = num_pages;
-
- remap_cmd.gmrId = gmr_id;
- remap_cmd.flags = (VMW_PPN_SIZE > sizeof(*cmd)) ?
- SVGA_REMAP_GMR2_PPN64 : SVGA_REMAP_GMR2_PPN32;
- remap_cmd.offsetPages = 0;
- remap_cmd.numPages = num_pages;
-
- *cmd++ = SVGA_CMD_DEFINE_GMR2;
- memcpy(cmd, &define_cmd, sizeof(define_cmd));
- cmd += sizeof(define_cmd) / sizeof(uint32);
-
- *cmd++ = SVGA_CMD_REMAP_GMR2;
- memcpy(cmd, &remap_cmd, sizeof(remap_cmd));
- cmd += sizeof(remap_cmd) / sizeof(uint32);
-
- for (i = 0; i < num_pages; ++i) {
- if (VMW_PPN_SIZE > 4)
- *cmd = page_to_pfn(*pages++);
- else
- *((uint64_t *)cmd) = page_to_pfn(*pages++);
-
- cmd += VMW_PPN_SIZE / sizeof(*cmd);
- }
-
- vmw_fifo_commit(dev_priv, define_size + remap_size);
-
- return 0;
-}
-
-static void vmw_gmr2_unbind(struct vmw_private *dev_priv,
- int gmr_id)
-{
- SVGAFifoCmdDefineGMR2 define_cmd;
- uint32_t define_size = sizeof(define_cmd) + 4;
- uint32_t *cmd;
-
- cmd = vmw_fifo_reserve(dev_priv, define_size);
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("GMR2 unbind failed.\n");
- return;
- }
- define_cmd.gmrId = gmr_id;
- define_cmd.numPages = 0;
-
- *cmd++ = SVGA_CMD_DEFINE_GMR2;
- memcpy(cmd, &define_cmd, sizeof(define_cmd));
-
- vmw_fifo_commit(dev_priv, define_size);
-}
-
/**
* FIXME: Adjust to the ttm lowmem / highmem storage to minimize
* the number of used descriptors.
@@ -241,9 +170,6 @@ int vmw_gmr_bind(struct vmw_private *dev_priv,
struct list_head desc_pages;
int ret;
- if (likely(dev_priv->capabilities & SVGA_CAP_GMR2))
- return vmw_gmr2_bind(dev_priv, pages, num_pages, gmr_id);
-
if (unlikely(!(dev_priv->capabilities & SVGA_CAP_GMR)))
return -EINVAL;
@@ -266,11 +192,6 @@ int vmw_gmr_bind(struct vmw_private *dev_priv,
void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id)
{
- if (likely(dev_priv->capabilities & SVGA_CAP_GMR2)) {
- vmw_gmr2_unbind(dev_priv, gmr_id);
- return;
- }
-
mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id);
wmb();
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
index 5f71715..ac6e0d1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
@@ -40,8 +40,6 @@ struct vmwgfx_gmrid_man {
spinlock_t lock;
struct ida gmr_ida;
uint32_t max_gmr_ids;
- uint32_t max_gmr_pages;
- uint32_t used_gmr_pages;
};
static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man,
@@ -51,50 +49,33 @@ static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man,
{
struct vmwgfx_gmrid_man *gman =
(struct vmwgfx_gmrid_man *)man->priv;
- int ret = 0;
+ int ret;
int id;
mem->mm_node = NULL;
- spin_lock(&gman->lock);
-
- if (gman->max_gmr_pages > 0) {
- gman->used_gmr_pages += bo->num_pages;
- if (unlikely(gman->used_gmr_pages > gman->max_gmr_pages))
- goto out_err_locked;
- }
-
do {
- spin_unlock(&gman->lock);
- if (unlikely(ida_pre_get(&gman->gmr_ida, GFP_KERNEL) == 0)) {
- ret = -ENOMEM;
- goto out_err;
- }
- spin_lock(&gman->lock);
+ if (unlikely(ida_pre_get(&gman->gmr_ida, GFP_KERNEL) == 0))
+ return -ENOMEM;
+ spin_lock(&gman->lock);
ret = ida_get_new(&gman->gmr_ida, &id);
+
if (unlikely(ret == 0 && id >= gman->max_gmr_ids)) {
ida_remove(&gman->gmr_ida, id);
- ret = 0;
- goto out_err_locked;
+ spin_unlock(&gman->lock);
+ return 0;
}
+
+ spin_unlock(&gman->lock);
+
} while (ret == -EAGAIN);
if (likely(ret == 0)) {
mem->mm_node = gman;
mem->start = id;
- mem->num_pages = bo->num_pages;
- } else
- goto out_err_locked;
-
- spin_unlock(&gman->lock);
- return 0;
+ }
-out_err:
- spin_lock(&gman->lock);
-out_err_locked:
- gman->used_gmr_pages -= bo->num_pages;
- spin_unlock(&gman->lock);
return ret;
}
@@ -107,7 +88,6 @@ static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man,
if (mem->mm_node) {
spin_lock(&gman->lock);
ida_remove(&gman->gmr_ida, mem->start);
- gman->used_gmr_pages -= mem->num_pages;
spin_unlock(&gman->lock);
mem->mm_node = NULL;
}
@@ -116,8 +96,6 @@ static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man,
static int vmw_gmrid_man_init(struct ttm_mem_type_manager *man,
unsigned long p_size)
{
- struct vmw_private *dev_priv =
- container_of(man->bdev, struct vmw_private, bdev);
struct vmwgfx_gmrid_man *gman =
kzalloc(sizeof(*gman), GFP_KERNEL);
@@ -125,8 +103,6 @@ static int vmw_gmrid_man_init(struct ttm_mem_type_manager *man,
return -ENOMEM;
spin_lock_init(&gman->lock);
- gman->max_gmr_pages = dev_priv->max_gmr_pages;
- gman->used_gmr_pages = 0;
ida_init(&gman->gmr_ida);
gman->max_gmr_ids = p_size;
man->priv = (void *) gman;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 66917c6..570d577 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -27,7 +27,6 @@
#include "vmwgfx_drv.h"
#include "vmwgfx_drm.h"
-#include "vmwgfx_kms.h"
int vmw_getparam_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
@@ -46,6 +45,9 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
case DRM_VMW_PARAM_3D:
param->value = vmw_fifo_have_3d(dev_priv) ? 1 : 0;
break;
+ case DRM_VMW_PARAM_FIFO_OFFSET:
+ param->value = dev_priv->mmio_start;
+ break;
case DRM_VMW_PARAM_HW_CAPS:
param->value = dev_priv->capabilities;
break;
@@ -55,19 +57,6 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
case DRM_VMW_PARAM_MAX_FB_SIZE:
param->value = dev_priv->vram_size;
break;
- case DRM_VMW_PARAM_FIFO_HW_VERSION:
- {
- __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
- const struct vmw_fifo_state *fifo = &dev_priv->fifo;
-
- param->value =
- ioread32(fifo_mem +
- ((fifo->capabilities &
- SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
- SVGA_FIFO_3D_HWVERSION_REVISED :
- SVGA_FIFO_3D_HWVERSION));
- break;
- }
default:
DRM_ERROR("Illegal vmwgfx get param request: %d\n",
param->param);
@@ -77,253 +66,25 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
return 0;
}
-
-int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
+int vmw_fifo_debug_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- struct drm_vmw_get_3d_cap_arg *arg =
- (struct drm_vmw_get_3d_cap_arg *) data;
struct vmw_private *dev_priv = vmw_priv(dev);
- uint32_t size;
- __le32 __iomem *fifo_mem;
- void __user *buffer = (void __user *)((unsigned long)(arg->buffer));
- void *bounce;
- int ret;
+ struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
+ struct drm_vmw_fifo_debug_arg *arg =
+ (struct drm_vmw_fifo_debug_arg *)data;
+ __le32 __user *buffer = (__le32 __user *)
+ (unsigned long)arg->debug_buffer;
- if (unlikely(arg->pad64 != 0)) {
- DRM_ERROR("Illegal GET_3D_CAP argument.\n");
+ if (unlikely(fifo_state->last_buffer == NULL))
return -EINVAL;
- }
-
- size = (SVGA_FIFO_3D_CAPS_LAST - SVGA_FIFO_3D_CAPS + 1) << 2;
-
- if (arg->max_size < size)
- size = arg->max_size;
-
- bounce = vmalloc(size);
- if (unlikely(bounce == NULL)) {
- DRM_ERROR("Failed to allocate bounce buffer for 3D caps.\n");
- return -ENOMEM;
- }
-
- fifo_mem = dev_priv->mmio_virt;
- memcpy_fromio(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size);
-
- ret = copy_to_user(buffer, bounce, size);
- vfree(bounce);
-
- if (unlikely(ret != 0))
- DRM_ERROR("Failed to report 3D caps info.\n");
-
- return ret;
-}
-
-int vmw_present_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- struct vmw_private *dev_priv = vmw_priv(dev);
- struct drm_vmw_present_arg *arg =
- (struct drm_vmw_present_arg *)data;
- struct vmw_surface *surface;
- struct vmw_master *vmaster = vmw_master(file_priv->master);
- struct drm_vmw_rect __user *clips_ptr;
- struct drm_vmw_rect *clips = NULL;
- struct drm_mode_object *obj;
- struct vmw_framebuffer *vfb;
- uint32_t num_clips;
- int ret;
-
- num_clips = arg->num_clips;
- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
-
- if (unlikely(num_clips == 0))
- return 0;
-
- if (clips_ptr == NULL) {
- DRM_ERROR("Variable clips_ptr must be specified.\n");
- ret = -EINVAL;
- goto out_clips;
- }
-
- clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
- if (clips == NULL) {
- DRM_ERROR("Failed to allocate clip rect list.\n");
- ret = -ENOMEM;
- goto out_clips;
- }
-
- ret = copy_from_user(clips, clips_ptr, num_clips * sizeof(*clips));
- if (ret) {
- DRM_ERROR("Failed to copy clip rects from userspace.\n");
- ret = -EFAULT;
- goto out_no_copy;
- }
-
- ret = mutex_lock_interruptible(&dev->mode_config.mutex);
- if (unlikely(ret != 0)) {
- ret = -ERESTARTSYS;
- goto out_no_mode_mutex;
- }
-
- obj = drm_mode_object_find(dev, arg->fb_id, DRM_MODE_OBJECT_FB);
- if (!obj) {
- DRM_ERROR("Invalid framebuffer id.\n");
- ret = -EINVAL;
- goto out_no_fb;
- }
- vfb = vmw_framebuffer_to_vfb(obj_to_fb(obj));
-
- ret = ttm_read_lock(&vmaster->lock, true);
- if (unlikely(ret != 0))
- goto out_no_ttm_lock;
-
- ret = vmw_user_surface_lookup_handle(dev_priv, tfile, arg->sid,
- &surface);
- if (ret)
- goto out_no_surface;
-
- ret = vmw_kms_present(dev_priv, file_priv,
- vfb, surface, arg->sid,
- arg->dest_x, arg->dest_y,
- clips, num_clips);
-
- /* vmw_user_surface_lookup takes one ref so does new_fb */
- vmw_surface_unreference(&surface);
-out_no_surface:
- ttm_read_unlock(&vmaster->lock);
-out_no_ttm_lock:
-out_no_fb:
- mutex_unlock(&dev->mode_config.mutex);
-out_no_mode_mutex:
-out_no_copy:
- kfree(clips);
-out_clips:
- return ret;
-}
-
-int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct vmw_private *dev_priv = vmw_priv(dev);
- struct drm_vmw_present_readback_arg *arg =
- (struct drm_vmw_present_readback_arg *)data;
- struct drm_vmw_fence_rep __user *user_fence_rep =
- (struct drm_vmw_fence_rep __user *)
- (unsigned long)arg->fence_rep;
- struct vmw_master *vmaster = vmw_master(file_priv->master);
- struct drm_vmw_rect __user *clips_ptr;
- struct drm_vmw_rect *clips = NULL;
- struct drm_mode_object *obj;
- struct vmw_framebuffer *vfb;
- uint32_t num_clips;
- int ret;
-
- num_clips = arg->num_clips;
- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
-
- if (unlikely(num_clips == 0))
- return 0;
-
- if (clips_ptr == NULL) {
- DRM_ERROR("Argument clips_ptr must be specified.\n");
- ret = -EINVAL;
- goto out_clips;
- }
-
- clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
- if (clips == NULL) {
- DRM_ERROR("Failed to allocate clip rect list.\n");
- ret = -ENOMEM;
- goto out_clips;
- }
-
- ret = copy_from_user(clips, clips_ptr, num_clips * sizeof(*clips));
- if (ret) {
- DRM_ERROR("Failed to copy clip rects from userspace.\n");
- ret = -EFAULT;
- goto out_no_copy;
- }
-
- ret = mutex_lock_interruptible(&dev->mode_config.mutex);
- if (unlikely(ret != 0)) {
- ret = -ERESTARTSYS;
- goto out_no_mode_mutex;
- }
-
- obj = drm_mode_object_find(dev, arg->fb_id, DRM_MODE_OBJECT_FB);
- if (!obj) {
- DRM_ERROR("Invalid framebuffer id.\n");
- ret = -EINVAL;
- goto out_no_fb;
- }
-
- vfb = vmw_framebuffer_to_vfb(obj_to_fb(obj));
- if (!vfb->dmabuf) {
- DRM_ERROR("Framebuffer not dmabuf backed.\n");
- ret = -EINVAL;
- goto out_no_fb;
+ if (arg->debug_buffer_size < fifo_state->last_data_size) {
+ arg->used_size = arg->debug_buffer_size;
+ arg->did_not_fit = 1;
+ } else {
+ arg->used_size = fifo_state->last_data_size;
+ arg->did_not_fit = 0;
}
-
- ret = ttm_read_lock(&vmaster->lock, true);
- if (unlikely(ret != 0))
- goto out_no_ttm_lock;
-
- ret = vmw_kms_readback(dev_priv, file_priv,
- vfb, user_fence_rep,
- clips, num_clips);
-
- ttm_read_unlock(&vmaster->lock);
-out_no_ttm_lock:
-out_no_fb:
- mutex_unlock(&dev->mode_config.mutex);
-out_no_mode_mutex:
-out_no_copy:
- kfree(clips);
-out_clips:
- return ret;
-}
-
-
-/**
- * vmw_fops_poll - wrapper around the drm_poll function
- *
- * @filp: See the linux fops poll documentation.
- * @wait: See the linux fops poll documentation.
- *
- * Wrapper around the drm_poll function that makes sure the device is
- * processing the fifo if drm_poll decides to wait.
- */
-unsigned int vmw_fops_poll(struct file *filp, struct poll_table_struct *wait)
-{
- struct drm_file *file_priv = filp->private_data;
- struct vmw_private *dev_priv =
- vmw_priv(file_priv->minor->dev);
-
- vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
- return drm_poll(filp, wait);
-}
-
-
-/**
- * vmw_fops_read - wrapper around the drm_read function
- *
- * @filp: See the linux fops read documentation.
- * @buffer: See the linux fops read documentation.
- * @count: See the linux fops read documentation.
- * offset: See the linux fops read documentation.
- *
- * Wrapper around the drm_read function that makes sure the device is
- * processing the fifo if drm_read decides to wait.
- */
-ssize_t vmw_fops_read(struct file *filp, char __user *buffer,
- size_t count, loff_t *offset)
-{
- struct drm_file *file_priv = filp->private_data;
- struct vmw_private *dev_priv =
- vmw_priv(file_priv->minor->dev);
-
- vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
- return drm_read(filp, buffer, count, offset);
+ return copy_to_user(buffer, fifo_state->last_buffer, arg->used_size);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
index cabc95f..e92298a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -34,33 +34,26 @@ irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *)arg;
struct vmw_private *dev_priv = vmw_priv(dev);
- uint32_t status, masked_status;
+ uint32_t status;
spin_lock(&dev_priv->irq_lock);
status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
- masked_status = status & dev_priv->irq_mask;
spin_unlock(&dev_priv->irq_lock);
- if (likely(status))
- outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
-
- if (!masked_status)
- return IRQ_NONE;
-
- if (masked_status & (SVGA_IRQFLAG_ANY_FENCE |
- SVGA_IRQFLAG_FENCE_GOAL)) {
- vmw_fences_update(dev_priv->fman);
+ if (status & SVGA_IRQFLAG_ANY_FENCE)
wake_up_all(&dev_priv->fence_queue);
- }
-
- if (masked_status & SVGA_IRQFLAG_FIFO_PROGRESS)
+ if (status & SVGA_IRQFLAG_FIFO_PROGRESS)
wake_up_all(&dev_priv->fifo_queue);
+ if (likely(status)) {
+ outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+ return IRQ_HANDLED;
+ }
- return IRQ_HANDLED;
+ return IRQ_NONE;
}
-static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)
+static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t sequence)
{
uint32_t busy;
@@ -71,43 +64,43 @@ static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)
return (busy == 0);
}
-void vmw_update_seqno(struct vmw_private *dev_priv,
+void vmw_update_sequence(struct vmw_private *dev_priv,
struct vmw_fifo_state *fifo_state)
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
- uint32_t seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
- if (dev_priv->last_read_seqno != seqno) {
- dev_priv->last_read_seqno = seqno;
- vmw_marker_pull(&fifo_state->marker_queue, seqno);
- vmw_fences_update(dev_priv->fman);
+ uint32_t sequence = ioread32(fifo_mem + SVGA_FIFO_FENCE);
+
+ if (dev_priv->last_read_sequence != sequence) {
+ dev_priv->last_read_sequence = sequence;
+ vmw_fence_pull(&fifo_state->fence_queue, sequence);
}
}
-bool vmw_seqno_passed(struct vmw_private *dev_priv,
- uint32_t seqno)
+bool vmw_fence_signaled(struct vmw_private *dev_priv,
+ uint32_t sequence)
{
struct vmw_fifo_state *fifo_state;
bool ret;
- if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
+ if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP))
return true;
fifo_state = &dev_priv->fifo;
- vmw_update_seqno(dev_priv, fifo_state);
- if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
+ vmw_update_sequence(dev_priv, fifo_state);
+ if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP))
return true;
if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE) &&
- vmw_fifo_idle(dev_priv, seqno))
+ vmw_fifo_idle(dev_priv, sequence))
return true;
/**
- * Then check if the seqno is higher than what we've actually
+ * Then check if the sequence is higher than what we've actually
* emitted. Then the fence is stale and signaled.
*/
- ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
+ ret = ((atomic_read(&dev_priv->fence_seq) - sequence)
> VMW_FENCE_WRAP);
return ret;
@@ -116,7 +109,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
int vmw_fallback_wait(struct vmw_private *dev_priv,
bool lazy,
bool fifo_idle,
- uint32_t seqno,
+ uint32_t sequence,
bool interruptible,
unsigned long timeout)
{
@@ -130,7 +123,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
DEFINE_WAIT(__wait);
wait_condition = (fifo_idle) ? &vmw_fifo_idle :
- &vmw_seqno_passed;
+ &vmw_fence_signaled;
/**
* Block command submission while waiting for idle.
@@ -138,14 +131,14 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
if (fifo_idle)
down_read(&fifo_state->rwsem);
- signal_seq = atomic_read(&dev_priv->marker_seq);
+ signal_seq = atomic_read(&dev_priv->fence_seq);
ret = 0;
for (;;) {
prepare_to_wait(&dev_priv->fence_queue, &__wait,
(interruptible) ?
TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
- if (wait_condition(dev_priv, seqno))
+ if (wait_condition(dev_priv, sequence))
break;
if (time_after_eq(jiffies, end_jiffies)) {
DRM_ERROR("SVGA device lockup.\n");
@@ -182,110 +175,68 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
return ret;
}
-void vmw_seqno_waiter_add(struct vmw_private *dev_priv)
-{
- mutex_lock(&dev_priv->hw_mutex);
- if (dev_priv->fence_queue_waiters++ == 0) {
- unsigned long irq_flags;
-
- spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
- outl(SVGA_IRQFLAG_ANY_FENCE,
- dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
- dev_priv->irq_mask |= SVGA_IRQFLAG_ANY_FENCE;
- vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
- }
- mutex_unlock(&dev_priv->hw_mutex);
-}
-
-void vmw_seqno_waiter_remove(struct vmw_private *dev_priv)
-{
- mutex_lock(&dev_priv->hw_mutex);
- if (--dev_priv->fence_queue_waiters == 0) {
- unsigned long irq_flags;
-
- spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
- dev_priv->irq_mask &= ~SVGA_IRQFLAG_ANY_FENCE;
- vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
- }
- mutex_unlock(&dev_priv->hw_mutex);
-}
-
-
-void vmw_goal_waiter_add(struct vmw_private *dev_priv)
-{
- mutex_lock(&dev_priv->hw_mutex);
- if (dev_priv->goal_queue_waiters++ == 0) {
- unsigned long irq_flags;
-
- spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
- outl(SVGA_IRQFLAG_FENCE_GOAL,
- dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
- dev_priv->irq_mask |= SVGA_IRQFLAG_FENCE_GOAL;
- vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
- }
- mutex_unlock(&dev_priv->hw_mutex);
-}
-
-void vmw_goal_waiter_remove(struct vmw_private *dev_priv)
-{
- mutex_lock(&dev_priv->hw_mutex);
- if (--dev_priv->goal_queue_waiters == 0) {
- unsigned long irq_flags;
-
- spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
- dev_priv->irq_mask &= ~SVGA_IRQFLAG_FENCE_GOAL;
- vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
- }
- mutex_unlock(&dev_priv->hw_mutex);
-}
-
-int vmw_wait_seqno(struct vmw_private *dev_priv,
- bool lazy, uint32_t seqno,
- bool interruptible, unsigned long timeout)
+int vmw_wait_fence(struct vmw_private *dev_priv,
+ bool lazy, uint32_t sequence,
+ bool interruptible, unsigned long timeout)
{
long ret;
+ unsigned long irq_flags;
struct vmw_fifo_state *fifo = &dev_priv->fifo;
- if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
+ if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP))
return 0;
- if (likely(vmw_seqno_passed(dev_priv, seqno)))
+ if (likely(vmw_fence_signaled(dev_priv, sequence)))
return 0;
vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
if (!(fifo->capabilities & SVGA_FIFO_CAP_FENCE))
- return vmw_fallback_wait(dev_priv, lazy, true, seqno,
+ return vmw_fallback_wait(dev_priv, lazy, true, sequence,
interruptible, timeout);
if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
- return vmw_fallback_wait(dev_priv, lazy, false, seqno,
+ return vmw_fallback_wait(dev_priv, lazy, false, sequence,
interruptible, timeout);
- vmw_seqno_waiter_add(dev_priv);
+ mutex_lock(&dev_priv->hw_mutex);
+ if (atomic_add_return(1, &dev_priv->fence_queue_waiters) > 0) {
+ spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
+ outl(SVGA_IRQFLAG_ANY_FENCE,
+ dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+ vmw_write(dev_priv, SVGA_REG_IRQMASK,
+ vmw_read(dev_priv, SVGA_REG_IRQMASK) |
+ SVGA_IRQFLAG_ANY_FENCE);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
+ }
+ mutex_unlock(&dev_priv->hw_mutex);
if (interruptible)
ret = wait_event_interruptible_timeout
(dev_priv->fence_queue,
- vmw_seqno_passed(dev_priv, seqno),
+ vmw_fence_signaled(dev_priv, sequence),
timeout);
else
ret = wait_event_timeout
(dev_priv->fence_queue,
- vmw_seqno_passed(dev_priv, seqno),
+ vmw_fence_signaled(dev_priv, sequence),
timeout);
- vmw_seqno_waiter_remove(dev_priv);
-
if (unlikely(ret == 0))
ret = -EBUSY;
else if (likely(ret > 0))
ret = 0;
+ mutex_lock(&dev_priv->hw_mutex);
+ if (atomic_dec_and_test(&dev_priv->fence_queue_waiters)) {
+ spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
+ vmw_write(dev_priv, SVGA_REG_IRQMASK,
+ vmw_read(dev_priv, SVGA_REG_IRQMASK) &
+ ~SVGA_IRQFLAG_ANY_FENCE);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
+ }
+ mutex_unlock(&dev_priv->hw_mutex);
+
return ret;
}
@@ -322,3 +273,25 @@ void vmw_irq_uninstall(struct drm_device *dev)
status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
}
+
+#define VMW_FENCE_WAIT_TIMEOUT 3*HZ;
+
+int vmw_fence_wait_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_vmw_fence_wait_arg *arg =
+ (struct drm_vmw_fence_wait_arg *)data;
+ unsigned long timeout;
+
+ if (!arg->cookie_valid) {
+ arg->cookie_valid = 1;
+ arg->kernel_cookie = jiffies + VMW_FENCE_WAIT_TIMEOUT;
+ }
+
+ timeout = jiffies;
+ if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie))
+ return -EBUSY;
+
+ timeout = (unsigned long)arg->kernel_cookie - timeout;
+ return vmw_wait_fence(vmw_priv(dev), true, arg->sequence, true, timeout);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 2286d47..8a38c91 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -27,47 +27,11 @@
#include "vmwgfx_kms.h"
-
/* Might need a hrtimer here? */
#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
-
-struct vmw_clip_rect {
- int x1, x2, y1, y2;
-};
-
-/**
- * Clip @num_rects number of @rects against @clip storing the
- * results in @out_rects and the number of passed rects in @out_num.
- */
-void vmw_clip_cliprects(struct drm_clip_rect *rects,
- int num_rects,
- struct vmw_clip_rect clip,
- SVGASignedRect *out_rects,
- int *out_num)
-{
- int i, k;
-
- for (i = 0, k = 0; i < num_rects; i++) {
- int x1 = max_t(int, clip.x1, rects[i].x1);
- int y1 = max_t(int, clip.y1, rects[i].y1);
- int x2 = min_t(int, clip.x2, rects[i].x2);
- int y2 = min_t(int, clip.y2, rects[i].y2);
-
- if (x1 >= x2)
- continue;
- if (y1 >= y2)
- continue;
-
- out_rects[k].left = x1;
- out_rects[k].top = y1;
- out_rects[k].right = x2;
- out_rects[k].bottom = y2;
- k++;
- }
-
- *out_num = k;
-}
+static int vmw_surface_dmabuf_pin(struct vmw_framebuffer *vfb);
+static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer *vfb);
void vmw_display_unit_cleanup(struct vmw_display_unit *du)
{
@@ -120,43 +84,6 @@ int vmw_cursor_update_image(struct vmw_private *dev_priv,
return 0;
}
-int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *dmabuf,
- u32 width, u32 height,
- u32 hotspotX, u32 hotspotY)
-{
- struct ttm_bo_kmap_obj map;
- unsigned long kmap_offset;
- unsigned long kmap_num;
- void *virtual;
- bool dummy;
- int ret;
-
- kmap_offset = 0;
- kmap_num = (width*height*4 + PAGE_SIZE - 1) >> PAGE_SHIFT;
-
- ret = ttm_bo_reserve(&dmabuf->base, true, false, false, 0);
- if (unlikely(ret != 0)) {
- DRM_ERROR("reserve failed\n");
- return -EINVAL;
- }
-
- ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map);
- if (unlikely(ret != 0))
- goto err_unreserve;
-
- virtual = ttm_kmap_obj_virtual(&map, &dummy);
- ret = vmw_cursor_update_image(dev_priv, virtual, width, height,
- hotspotX, hotspotY);
-
- ttm_bo_kunmap(&map);
-err_unreserve:
- ttm_bo_unreserve(&dmabuf->base);
-
- return ret;
-}
-
-
void vmw_cursor_update_position(struct vmw_private *dev_priv,
bool show, int x, int y)
{
@@ -180,26 +107,24 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
struct vmw_dma_buffer *dmabuf = NULL;
int ret;
- /* A lot of the code assumes this */
- if (handle && (width != 64 || height != 64))
- return -EINVAL;
-
if (handle) {
- ret = vmw_user_lookup_handle(dev_priv, tfile,
- handle, &surface, &dmabuf);
- if (ret) {
- DRM_ERROR("failed to find surface or dmabuf: %i\n", ret);
- return -EINVAL;
+ ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
+ handle, &surface);
+ if (!ret) {
+ if (!surface->snooper.image) {
+ DRM_ERROR("surface not suitable for cursor\n");
+ return -EINVAL;
+ }
+ } else {
+ ret = vmw_user_dmabuf_lookup(tfile,
+ handle, &dmabuf);
+ if (ret) {
+ DRM_ERROR("failed to find surface or dmabuf: %i\n", ret);
+ return -EINVAL;
+ }
}
}
- /* need to do this before taking down old image */
- if (surface && !surface->snooper.image) {
- DRM_ERROR("surface not suitable for cursor\n");
- vmw_surface_unreference(&surface);
- return -EINVAL;
- }
-
/* takedown old cursor */
if (du->cursor_surface) {
du->cursor_surface->snooper.crtc = NULL;
@@ -218,19 +143,42 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
vmw_cursor_update_image(dev_priv, surface->snooper.image,
64, 64, du->hotspot_x, du->hotspot_y);
} else if (dmabuf) {
+ struct ttm_bo_kmap_obj map;
+ unsigned long kmap_offset;
+ unsigned long kmap_num;
+ void *virtual;
+ bool dummy;
+
/* vmw_user_surface_lookup takes one reference */
du->cursor_dmabuf = dmabuf;
- ret = vmw_cursor_update_dmabuf(dev_priv, dmabuf, width, height,
- du->hotspot_x, du->hotspot_y);
+ kmap_offset = 0;
+ kmap_num = (64*64*4) >> PAGE_SHIFT;
+
+ ret = ttm_bo_reserve(&dmabuf->base, true, false, false, 0);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("reserve failed\n");
+ return -EINVAL;
+ }
+
+ ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map);
+ if (unlikely(ret != 0))
+ goto err_unreserve;
+
+ virtual = ttm_kmap_obj_virtual(&map, &dummy);
+ vmw_cursor_update_image(dev_priv, virtual, 64, 64,
+ du->hotspot_x, du->hotspot_y);
+
+ ttm_bo_kunmap(&map);
+err_unreserve:
+ ttm_bo_unreserve(&dmabuf->base);
+
} else {
vmw_cursor_update_position(dev_priv, false, 0, 0);
return 0;
}
- vmw_cursor_update_position(dev_priv, true,
- du->cursor_x + du->hotspot_x,
- du->cursor_y + du->hotspot_y);
+ vmw_cursor_update_position(dev_priv, true, du->cursor_x, du->cursor_y);
return 0;
}
@@ -245,8 +193,7 @@ int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
du->cursor_y = y + crtc->y;
vmw_cursor_update_position(dev_priv, shown,
- du->cursor_x + du->hotspot_x,
- du->cursor_y + du->hotspot_y);
+ du->cursor_x, du->cursor_y);
return 0;
}
@@ -267,7 +214,7 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf,
SVGA3dCmdHeader header;
SVGA3dCmdSurfaceDMA dma;
} *cmd;
- int i, ret;
+ int ret;
cmd = container_of(header, struct vmw_dma_cmd, header);
@@ -289,19 +236,16 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf,
box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
sizeof(SVGA3dCopyBox);
- if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
+ if (cmd->dma.guest.pitch != (64 * 4) ||
+ cmd->dma.guest.ptr.offset % PAGE_SIZE ||
box->x != 0 || box->y != 0 || box->z != 0 ||
box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
- box->d != 1 || box_count != 1) {
+ box->w != 64 || box->h != 64 || box->d != 1 ||
+ box_count != 1) {
/* TODO handle none page aligned offsets */
- /* TODO handle more dst & src != 0 */
- /* TODO handle more then one copy */
- DRM_ERROR("Cant snoop dma request for cursor!\n");
- DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
- box->srcx, box->srcy, box->srcz,
- box->x, box->y, box->z,
- box->w, box->h, box->d, box_count,
- cmd->dma.guest.ptr.offset);
+ /* TODO handle partial uploads and pitch != 256 */
+ /* TODO handle more then one copy (size != 64) */
+ DRM_ERROR("lazy programmer, can't handle weird stuff\n");
return;
}
@@ -320,16 +264,7 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf,
virtual = ttm_kmap_obj_virtual(&map, &dummy);
- if (box->w == 64 && cmd->dma.guest.pitch == 64*4) {
- memcpy(srf->snooper.image, virtual, 64*64*4);
- } else {
- /* Image is unsigned pointer. */
- for (i = 0; i < box->h; i++)
- memcpy(srf->snooper.image + i * 64,
- virtual + i * cmd->dma.guest.pitch,
- box->w * 4);
- }
-
+ memcpy(srf->snooper.image, virtual, 64*64*4);
srf->snooper.age++;
/* we can't call this function from this function since execbuf has
@@ -394,10 +329,41 @@ struct vmw_framebuffer_surface {
struct vmw_framebuffer base;
struct vmw_surface *surface;
struct vmw_dma_buffer *buffer;
+ struct delayed_work d_work;
+ struct mutex work_lock;
+ bool present_fs;
struct list_head head;
struct drm_master *master;
};
+/**
+ * vmw_kms_idle_workqueues - Flush workqueues on this master
+ *
+ * @vmaster - Pointer identifying the master, for the surfaces of which
+ * we idle the dirty work queues.
+ *
+ * This function should be called with the ttm lock held in exclusive mode
+ * to idle all dirty work queues before the fifo is taken down.
+ *
+ * The work task may actually requeue itself, but after the flush returns we're
+ * sure that there's nothing to present, since the ttm lock is held in
+ * exclusive mode, so the fifo will never get used.
+ */
+
+void vmw_kms_idle_workqueues(struct vmw_master *vmaster)
+{
+ struct vmw_framebuffer_surface *entry;
+
+ mutex_lock(&vmaster->fb_surf_mutex);
+ list_for_each_entry(entry, &vmaster->fb_surf, head) {
+ if (cancel_delayed_work_sync(&entry->d_work))
+ (void) entry->d_work.work.func(&entry->d_work.work);
+
+ (void) cancel_delayed_work_sync(&entry->d_work);
+ }
+ mutex_unlock(&vmaster->fb_surf_mutex);
+}
+
void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
{
struct vmw_framebuffer_surface *vfbs =
@@ -409,162 +375,65 @@ void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
list_del(&vfbs->head);
mutex_unlock(&vmaster->fb_surf_mutex);
+ cancel_delayed_work_sync(&vfbs->d_work);
drm_master_put(&vfbs->master);
drm_framebuffer_cleanup(framebuffer);
vmw_surface_unreference(&vfbs->surface);
- ttm_base_object_unref(&vfbs->base.user_obj);
kfree(vfbs);
}
-static int do_surface_dirty_sou(struct vmw_private *dev_priv,
- struct drm_file *file_priv,
- struct vmw_framebuffer *framebuffer,
- unsigned flags, unsigned color,
- struct drm_clip_rect *clips,
- unsigned num_clips, int inc,
- struct vmw_fence_obj **out_fence)
+static void vmw_framebuffer_present_fs_callback(struct work_struct *work)
{
- struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
- struct drm_clip_rect *clips_ptr;
- struct drm_clip_rect *tmp;
- struct drm_crtc *crtc;
- size_t fifo_size;
- int i, num_units;
- int ret = 0; /* silence warning */
- int left, right, top, bottom;
+ struct delayed_work *d_work =
+ container_of(work, struct delayed_work, work);
+ struct vmw_framebuffer_surface *vfbs =
+ container_of(d_work, struct vmw_framebuffer_surface, d_work);
+ struct vmw_surface *surf = vfbs->surface;
+ struct drm_framebuffer *framebuffer = &vfbs->base.base;
+ struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
struct {
SVGA3dCmdHeader header;
- SVGA3dCmdBlitSurfaceToScreen body;
+ SVGA3dCmdPresent body;
+ SVGA3dCopyRect cr;
} *cmd;
- SVGASignedRect *blits;
-
- num_units = 0;
- list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list,
- head) {
- if (crtc->fb != &framebuffer->base)
- continue;
- units[num_units++] = vmw_crtc_to_du(crtc);
- }
-
- BUG_ON(!clips || !num_clips);
-
- tmp = kzalloc(sizeof(*tmp) * num_clips, GFP_KERNEL);
- if (unlikely(tmp == NULL)) {
- DRM_ERROR("Temporary cliprect memory alloc failed.\n");
- return -ENOMEM;
- }
-
- fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips;
- cmd = kzalloc(fifo_size, GFP_KERNEL);
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Temporary fifo memory alloc failed.\n");
- ret = -ENOMEM;
- goto out_free_tmp;
- }
-
- /* setup blits pointer */
- blits = (SVGASignedRect *)&cmd[1];
-
- /* initial clip region */
- left = clips->x1;
- right = clips->x2;
- top = clips->y1;
- bottom = clips->y2;
-
- /* skip the first clip rect */
- for (i = 1, clips_ptr = clips + inc;
- i < num_clips; i++, clips_ptr += inc) {
- left = min_t(int, left, (int)clips_ptr->x1);
- right = max_t(int, right, (int)clips_ptr->x2);
- top = min_t(int, top, (int)clips_ptr->y1);
- bottom = max_t(int, bottom, (int)clips_ptr->y2);
- }
- /* only need to do this once */
- memset(cmd, 0, fifo_size);
- cmd->header.id = cpu_to_le32(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN);
- cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
-
- cmd->body.srcRect.left = left;
- cmd->body.srcRect.right = right;
- cmd->body.srcRect.top = top;
- cmd->body.srcRect.bottom = bottom;
-
- clips_ptr = clips;
- for (i = 0; i < num_clips; i++, clips_ptr += inc) {
- tmp[i].x1 = clips_ptr->x1 - left;
- tmp[i].x2 = clips_ptr->x2 - left;
- tmp[i].y1 = clips_ptr->y1 - top;
- tmp[i].y2 = clips_ptr->y2 - top;
- }
-
- /* do per unit writing, reuse fifo for each */
- for (i = 0; i < num_units; i++) {
- struct vmw_display_unit *unit = units[i];
- struct vmw_clip_rect clip;
- int num;
-
- clip.x1 = left - unit->crtc.x;
- clip.y1 = top - unit->crtc.y;
- clip.x2 = right - unit->crtc.x;
- clip.y2 = bottom - unit->crtc.y;
-
- /* skip any crtcs that misses the clip region */
- if (clip.x1 >= unit->crtc.mode.hdisplay ||
- clip.y1 >= unit->crtc.mode.vdisplay ||
- clip.x2 <= 0 || clip.y2 <= 0)
- continue;
-
- /*
- * In order for the clip rects to be correctly scaled
- * the src and dest rects needs to be the same size.
- */
- cmd->body.destRect.left = clip.x1;
- cmd->body.destRect.right = clip.x2;
- cmd->body.destRect.top = clip.y1;
- cmd->body.destRect.bottom = clip.y2;
-
- /* create a clip rect of the crtc in dest coords */
- clip.x2 = unit->crtc.mode.hdisplay - clip.x1;
- clip.y2 = unit->crtc.mode.vdisplay - clip.y1;
- clip.x1 = 0 - clip.x1;
- clip.y1 = 0 - clip.y1;
-
- /* need to reset sid as it is changed by execbuf */
- cmd->body.srcImage.sid = cpu_to_le32(framebuffer->user_handle);
- cmd->body.destScreenId = unit->unit;
-
- /* clip and write blits to cmd stream */
- vmw_clip_cliprects(tmp, num_clips, clip, blits, &num);
-
- /* if no cliprects hit skip this */
- if (num == 0)
- continue;
-
- /* only return the last fence */
- if (out_fence && *out_fence)
- vmw_fence_obj_unreference(out_fence);
-
- /* recalculate package length */
- fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num;
- cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
- ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
- fifo_size, 0, NULL, out_fence);
-
- if (unlikely(ret != 0))
- break;
- }
-
-
- kfree(cmd);
-out_free_tmp:
- kfree(tmp);
+ /**
+ * Strictly we should take the ttm_lock in read mode before accessing
+ * the fifo, to make sure the fifo is present and up. However,
+ * instead we flush all workqueues under the ttm lock in exclusive mode
+ * before taking down the fifo.
+ */
+ mutex_lock(&vfbs->work_lock);
+ if (!vfbs->present_fs)
+ goto out_unlock;
- return ret;
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL))
+ goto out_resched;
+
+ cmd->header.id = cpu_to_le32(SVGA_3D_CMD_PRESENT);
+ cmd->header.size = cpu_to_le32(sizeof(cmd->body) + sizeof(cmd->cr));
+ cmd->body.sid = cpu_to_le32(surf->res.id);
+ cmd->cr.x = cpu_to_le32(0);
+ cmd->cr.y = cpu_to_le32(0);
+ cmd->cr.srcx = cmd->cr.x;
+ cmd->cr.srcy = cmd->cr.y;
+ cmd->cr.w = cpu_to_le32(framebuffer->width);
+ cmd->cr.h = cpu_to_le32(framebuffer->height);
+ vfbs->present_fs = false;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+out_resched:
+ /**
+ * Will not re-add if already pending.
+ */
+ schedule_delayed_work(&vfbs->d_work, VMWGFX_PRESENT_RATE);
+out_unlock:
+ mutex_unlock(&vfbs->work_lock);
}
+
int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
struct drm_file *file_priv,
unsigned flags, unsigned color,
@@ -575,20 +444,44 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
struct vmw_master *vmaster = vmw_master(file_priv->master);
struct vmw_framebuffer_surface *vfbs =
vmw_framebuffer_to_vfbs(framebuffer);
+ struct vmw_surface *surf = vfbs->surface;
struct drm_clip_rect norect;
- int ret, inc = 1;
+ SVGA3dCopyRect *cr;
+ int i, inc = 1;
+ int ret;
- if (unlikely(vfbs->master != file_priv->master))
- return -EINVAL;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdPresent body;
+ SVGA3dCopyRect cr;
+ } *cmd;
- /* Require ScreenObject support for 3D */
- if (!dev_priv->sou_priv)
+ if (unlikely(vfbs->master != file_priv->master))
return -EINVAL;
ret = ttm_read_lock(&vmaster->lock, true);
if (unlikely(ret != 0))
return ret;
+ if (!num_clips ||
+ !(dev_priv->fifo.capabilities &
+ SVGA_FIFO_CAP_SCREEN_OBJECT)) {
+ int ret;
+
+ mutex_lock(&vfbs->work_lock);
+ vfbs->present_fs = true;
+ ret = schedule_delayed_work(&vfbs->d_work, VMWGFX_PRESENT_RATE);
+ mutex_unlock(&vfbs->work_lock);
+ if (ret) {
+ /**
+ * No work pending, Force immediate present.
+ */
+ vmw_framebuffer_present_fs_callback(&vfbs->d_work.work);
+ }
+ ttm_read_unlock(&vmaster->lock);
+ return 0;
+ }
+
if (!num_clips) {
num_clips = 1;
clips = &norect;
@@ -600,10 +493,29 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
inc = 2; /* skip source rects */
}
- ret = do_surface_dirty_sou(dev_priv, file_priv, &vfbs->base,
- flags, color,
- clips, num_clips, inc, NULL);
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Fifo reserve failed.\n");
+ ttm_read_unlock(&vmaster->lock);
+ return -ENOMEM;
+ }
+
+ memset(cmd, 0, sizeof(*cmd));
+
+ cmd->header.id = cpu_to_le32(SVGA_3D_CMD_PRESENT);
+ cmd->header.size = cpu_to_le32(sizeof(cmd->body) + num_clips * sizeof(cmd->cr));
+ cmd->body.sid = cpu_to_le32(surf->res.id);
+
+ for (i = 0, cr = &cmd->cr; i < num_clips; i++, cr++, clips += inc) {
+ cr->x = cpu_to_le16(clips->x1);
+ cr->y = cpu_to_le16(clips->y1);
+ cr->srcx = cr->x;
+ cr->srcy = cr->y;
+ cr->w = cpu_to_le16(clips->x2 - clips->x1);
+ cr->h = cpu_to_le16(clips->y2 - clips->y1);
+ }
+ vmw_fifo_commit(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr));
ttm_read_unlock(&vmaster->lock);
return 0;
}
@@ -628,18 +540,10 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
struct vmw_master *vmaster = vmw_master(file_priv->master);
int ret;
- /* 3D is only supported on HWv8 hosts which supports screen objects */
- if (!dev_priv->sou_priv)
- return -ENOSYS;
-
/*
* Sanity checks.
*/
- /* Surface must be marked as a scanout. */
- if (unlikely(!surface->scanout))
- return -EINVAL;
-
if (unlikely(surface->mip_levels[0] != 1 ||
surface->num_sizes != 1 ||
surface->sizes[0].width < mode_cmd->width ||
@@ -663,9 +567,6 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
case 15:
format = SVGA3D_A1R5G5B5;
break;
- case 8:
- format = SVGA3D_LUMINANCE8;
- break;
default:
DRM_ERROR("Invalid color depth: %d\n", mode_cmd->depth);
return -EINVAL;
@@ -694,15 +595,18 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
/* XXX get the first 3 from the surface info */
vfbs->base.base.bits_per_pixel = mode_cmd->bpp;
- vfbs->base.base.pitches[0] = mode_cmd->pitch;
+ vfbs->base.base.pitch = mode_cmd->pitch;
vfbs->base.base.depth = mode_cmd->depth;
vfbs->base.base.width = mode_cmd->width;
vfbs->base.base.height = mode_cmd->height;
+ vfbs->base.pin = &vmw_surface_dmabuf_pin;
+ vfbs->base.unpin = &vmw_surface_dmabuf_unpin;
vfbs->surface = surface;
- vfbs->base.user_handle = mode_cmd->handle;
vfbs->master = drm_master_get(file_priv->master);
+ mutex_init(&vfbs->work_lock);
mutex_lock(&vmaster->fb_surf_mutex);
+ INIT_DELAYED_WORK(&vfbs->d_work, &vmw_framebuffer_present_fs_callback);
list_add_tail(&vfbs->head, &vmaster->fb_surf);
mutex_unlock(&vmaster->fb_surf_mutex);
@@ -737,33 +641,48 @@ void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer)
drm_framebuffer_cleanup(framebuffer);
vmw_dmabuf_unreference(&vfbd->buffer);
- ttm_base_object_unref(&vfbd->base.user_obj);
kfree(vfbd);
}
-static int do_dmabuf_dirty_ldu(struct vmw_private *dev_priv,
- struct vmw_framebuffer *framebuffer,
- unsigned flags, unsigned color,
- struct drm_clip_rect *clips,
- unsigned num_clips, int increment)
+int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
+ struct drm_file *file_priv,
+ unsigned flags, unsigned color,
+ struct drm_clip_rect *clips,
+ unsigned num_clips)
{
- size_t fifo_size;
- int i;
-
+ struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
+ struct vmw_master *vmaster = vmw_master(file_priv->master);
+ struct drm_clip_rect norect;
+ int ret;
struct {
uint32_t header;
SVGAFifoCmdUpdate body;
} *cmd;
+ int i, increment = 1;
+
+ ret = ttm_read_lock(&vmaster->lock, true);
+ if (unlikely(ret != 0))
+ return ret;
+
+ if (!num_clips) {
+ num_clips = 1;
+ clips = &norect;
+ norect.x1 = norect.y1 = 0;
+ norect.x2 = framebuffer->width;
+ norect.y2 = framebuffer->height;
+ } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
+ num_clips /= 2;
+ increment = 2;
+ }
- fifo_size = sizeof(*cmd) * num_clips;
- cmd = vmw_fifo_reserve(dev_priv, fifo_size);
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) * num_clips);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Fifo reserve failed.\n");
+ ttm_read_unlock(&vmaster->lock);
return -ENOMEM;
}
- memset(cmd, 0, fifo_size);
for (i = 0; i < num_clips; i++, clips += increment) {
cmd[i].header = cpu_to_le32(SVGA_CMD_UPDATE);
cmd[i].body.x = cpu_to_le32(clips->x1);
@@ -772,201 +691,57 @@ static int do_dmabuf_dirty_ldu(struct vmw_private *dev_priv,
cmd[i].body.height = cpu_to_le32(clips->y2 - clips->y1);
}
- vmw_fifo_commit(dev_priv, fifo_size);
+ vmw_fifo_commit(dev_priv, sizeof(*cmd) * num_clips);
+ ttm_read_unlock(&vmaster->lock);
+
return 0;
}
-static int do_dmabuf_define_gmrfb(struct drm_file *file_priv,
- struct vmw_private *dev_priv,
- struct vmw_framebuffer *framebuffer)
+static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = {
+ .destroy = vmw_framebuffer_dmabuf_destroy,
+ .dirty = vmw_framebuffer_dmabuf_dirty,
+ .create_handle = vmw_framebuffer_create_handle,
+};
+
+static int vmw_surface_dmabuf_pin(struct vmw_framebuffer *vfb)
{
- int depth = framebuffer->base.depth;
- size_t fifo_size;
+ struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
+ struct vmw_framebuffer_surface *vfbs =
+ vmw_framebuffer_to_vfbs(&vfb->base);
+ unsigned long size = vfbs->base.base.pitch * vfbs->base.base.height;
int ret;
- struct {
- uint32_t header;
- SVGAFifoCmdDefineGMRFB body;
- } *cmd;
-
- /* Emulate RGBA support, contrary to svga_reg.h this is not
- * supported by hosts. This is only a problem if we are reading
- * this value later and expecting what we uploaded back.
- */
- if (depth == 32)
- depth = 24;
-
- fifo_size = sizeof(*cmd);
- cmd = kmalloc(fifo_size, GFP_KERNEL);
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed to allocate temporary cmd buffer.\n");
+ vfbs->buffer = kzalloc(sizeof(*vfbs->buffer), GFP_KERNEL);
+ if (unlikely(vfbs->buffer == NULL))
return -ENOMEM;
- }
-
- memset(cmd, 0, fifo_size);
- cmd->header = SVGA_CMD_DEFINE_GMRFB;
- cmd->body.format.bitsPerPixel = framebuffer->base.bits_per_pixel;
- cmd->body.format.colorDepth = depth;
- cmd->body.format.reserved = 0;
- cmd->body.bytesPerLine = framebuffer->base.pitches[0];
- cmd->body.ptr.gmrId = framebuffer->user_handle;
- cmd->body.ptr.offset = 0;
-
- ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
- fifo_size, 0, NULL, NULL);
-
- kfree(cmd);
- return ret;
-}
-
-static int do_dmabuf_dirty_sou(struct drm_file *file_priv,
- struct vmw_private *dev_priv,
- struct vmw_framebuffer *framebuffer,
- unsigned flags, unsigned color,
- struct drm_clip_rect *clips,
- unsigned num_clips, int increment,
- struct vmw_fence_obj **out_fence)
-{
- struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
- struct drm_clip_rect *clips_ptr;
- int i, k, num_units, ret;
- struct drm_crtc *crtc;
- size_t fifo_size;
-
- struct {
- uint32_t header;
- SVGAFifoCmdBlitGMRFBToScreen body;
- } *blits;
-
- ret = do_dmabuf_define_gmrfb(file_priv, dev_priv, framebuffer);
+ vmw_overlay_pause_all(dev_priv);
+ ret = vmw_dmabuf_init(dev_priv, vfbs->buffer, size,
+ &vmw_vram_ne_placement,
+ false, &vmw_dmabuf_bo_free);
+ vmw_overlay_resume_all(dev_priv);
if (unlikely(ret != 0))
- return ret; /* define_gmrfb prints warnings */
-
- fifo_size = sizeof(*blits) * num_clips;
- blits = kmalloc(fifo_size, GFP_KERNEL);
- if (unlikely(blits == NULL)) {
- DRM_ERROR("Failed to allocate temporary cmd buffer.\n");
- return -ENOMEM;
- }
-
- num_units = 0;
- list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
- if (crtc->fb != &framebuffer->base)
- continue;
- units[num_units++] = vmw_crtc_to_du(crtc);
- }
-
- for (k = 0; k < num_units; k++) {
- struct vmw_display_unit *unit = units[k];
- int hit_num = 0;
-
- clips_ptr = clips;
- for (i = 0; i < num_clips; i++, clips_ptr += increment) {
- int clip_x1 = clips_ptr->x1 - unit->crtc.x;
- int clip_y1 = clips_ptr->y1 - unit->crtc.y;
- int clip_x2 = clips_ptr->x2 - unit->crtc.x;
- int clip_y2 = clips_ptr->y2 - unit->crtc.y;
- int move_x, move_y;
-
- /* skip any crtcs that misses the clip region */
- if (clip_x1 >= unit->crtc.mode.hdisplay ||
- clip_y1 >= unit->crtc.mode.vdisplay ||
- clip_x2 <= 0 || clip_y2 <= 0)
- continue;
-
- /* clip size to crtc size */
- clip_x2 = min_t(int, clip_x2, unit->crtc.mode.hdisplay);
- clip_y2 = min_t(int, clip_y2, unit->crtc.mode.vdisplay);
-
- /* translate both src and dest to bring clip into screen */
- move_x = min_t(int, clip_x1, 0);
- move_y = min_t(int, clip_y1, 0);
-
- /* actual translate done here */
- blits[hit_num].header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN;
- blits[hit_num].body.destScreenId = unit->unit;
- blits[hit_num].body.srcOrigin.x = clips_ptr->x1 - move_x;
- blits[hit_num].body.srcOrigin.y = clips_ptr->y1 - move_y;
- blits[hit_num].body.destRect.left = clip_x1 - move_x;
- blits[hit_num].body.destRect.top = clip_y1 - move_y;
- blits[hit_num].body.destRect.right = clip_x2;
- blits[hit_num].body.destRect.bottom = clip_y2;
- hit_num++;
- }
-
- /* no clips hit the crtc */
- if (hit_num == 0)
- continue;
-
- /* only return the last fence */
- if (out_fence && *out_fence)
- vmw_fence_obj_unreference(out_fence);
-
- fifo_size = sizeof(*blits) * hit_num;
- ret = vmw_execbuf_process(file_priv, dev_priv, NULL, blits,
- fifo_size, 0, NULL, out_fence);
-
- if (unlikely(ret != 0))
- break;
- }
-
- kfree(blits);
+ vfbs->buffer = NULL;
return ret;
}
-int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
- struct drm_file *file_priv,
- unsigned flags, unsigned color,
- struct drm_clip_rect *clips,
- unsigned num_clips)
+static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer *vfb)
{
- struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
- struct vmw_master *vmaster = vmw_master(file_priv->master);
- struct vmw_framebuffer_dmabuf *vfbd =
- vmw_framebuffer_to_vfbd(framebuffer);
- struct drm_clip_rect norect;
- int ret, increment = 1;
-
- ret = ttm_read_lock(&vmaster->lock, true);
- if (unlikely(ret != 0))
- return ret;
+ struct ttm_buffer_object *bo;
+ struct vmw_framebuffer_surface *vfbs =
+ vmw_framebuffer_to_vfbs(&vfb->base);
- if (!num_clips) {
- num_clips = 1;
- clips = &norect;
- norect.x1 = norect.y1 = 0;
- norect.x2 = framebuffer->width;
- norect.y2 = framebuffer->height;
- } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
- num_clips /= 2;
- increment = 2;
- }
+ if (unlikely(vfbs->buffer == NULL))
+ return 0;
- if (dev_priv->ldu_priv) {
- ret = do_dmabuf_dirty_ldu(dev_priv, &vfbd->base,
- flags, color,
- clips, num_clips, increment);
- } else {
- ret = do_dmabuf_dirty_sou(file_priv, dev_priv, &vfbd->base,
- flags, color,
- clips, num_clips, increment, NULL);
- }
+ bo = &vfbs->buffer->base;
+ ttm_bo_unref(&bo);
+ vfbs->buffer = NULL;
- ttm_read_unlock(&vmaster->lock);
- return ret;
+ return 0;
}
-static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = {
- .destroy = vmw_framebuffer_dmabuf_destroy,
- .dirty = vmw_framebuffer_dmabuf_dirty,
- .create_handle = vmw_framebuffer_create_handle,
-};
-
-/**
- * Pin the dmabuffer to the start of vram.
- */
static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb)
{
struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
@@ -974,12 +749,10 @@ static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb)
vmw_framebuffer_to_vfbd(&vfb->base);
int ret;
- /* This code should not be used with screen objects */
- BUG_ON(dev_priv->sou_priv);
vmw_overlay_pause_all(dev_priv);
- ret = vmw_dmabuf_to_start_of_vram(dev_priv, vfbd->buffer, true, false);
+ ret = vmw_dmabuf_to_start_of_vram(dev_priv, vfbd->buffer);
vmw_overlay_resume_all(dev_priv);
@@ -999,7 +772,7 @@ static int vmw_framebuffer_dmabuf_unpin(struct vmw_framebuffer *vfb)
return 0;
}
- return vmw_dmabuf_unpin(dev_priv, vfbd->buffer, false);
+ return vmw_dmabuf_from_vram(dev_priv, vfbd->buffer);
}
static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
@@ -1021,33 +794,6 @@ static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
return -EINVAL;
}
- /* Limited framebuffer color depth support for screen objects */
- if (dev_priv->sou_priv) {
- switch (mode_cmd->depth) {
- case 32:
- case 24:
- /* Only support 32 bpp for 32 and 24 depth fbs */
- if (mode_cmd->bpp == 32)
- break;
-
- DRM_ERROR("Invalid color depth/bbp: %d %d\n",
- mode_cmd->depth, mode_cmd->bpp);
- return -EINVAL;
- case 16:
- case 15:
- /* Only support 16 bpp for 16 and 15 depth fbs */
- if (mode_cmd->bpp == 16)
- break;
-
- DRM_ERROR("Invalid color depth/bbp: %d %d\n",
- mode_cmd->depth, mode_cmd->bpp);
- return -EINVAL;
- default:
- DRM_ERROR("Invalid color depth: %d\n", mode_cmd->depth);
- return -EINVAL;
- }
- }
-
vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
if (!vfbd) {
ret = -ENOMEM;
@@ -1065,17 +811,13 @@ static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
}
vfbd->base.base.bits_per_pixel = mode_cmd->bpp;
- vfbd->base.base.pitches[0] = mode_cmd->pitch;
+ vfbd->base.base.pitch = mode_cmd->pitch;
vfbd->base.base.depth = mode_cmd->depth;
vfbd->base.base.width = mode_cmd->width;
vfbd->base.base.height = mode_cmd->height;
- if (!dev_priv->sou_priv) {
- vfbd->base.pin = vmw_framebuffer_dmabuf_pin;
- vfbd->base.unpin = vmw_framebuffer_dmabuf_unpin;
- }
- vfbd->base.dmabuf = true;
+ vfbd->base.pin = vmw_framebuffer_dmabuf_pin;
+ vfbd->base.unpin = vmw_framebuffer_dmabuf_unpin;
vfbd->buffer = dmabuf;
- vfbd->base.user_handle = mode_cmd->handle;
*out = &vfbd->base;
return 0;
@@ -1094,336 +836,85 @@ out_err1:
static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
struct drm_file *file_priv,
- struct drm_mode_fb_cmd2 *mode_cmd2)
+ struct drm_mode_fb_cmd *mode_cmd)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_framebuffer *vfb = NULL;
struct vmw_surface *surface = NULL;
struct vmw_dma_buffer *bo = NULL;
- struct ttm_base_object *user_obj;
- struct drm_mode_fb_cmd mode_cmd;
+ u64 required_size;
int ret;
- mode_cmd.width = mode_cmd2->width;
- mode_cmd.height = mode_cmd2->height;
- mode_cmd.pitch = mode_cmd2->pitches[0];
- mode_cmd.handle = mode_cmd2->handles[0];
- drm_fb_get_bpp_depth(mode_cmd2->pixel_format, &mode_cmd.depth,
- &mode_cmd.bpp);
-
/**
* This code should be conditioned on Screen Objects not being used.
* If screen objects are used, we can allocate a GMR to hold the
* requested framebuffer.
*/
- if (!vmw_kms_validate_mode_vram(dev_priv,
- mode_cmd.pitch,
- mode_cmd.height)) {
+ required_size = mode_cmd->pitch * mode_cmd->height;
+ if (unlikely(required_size > (u64) dev_priv->vram_size)) {
DRM_ERROR("VRAM size is too small for requested mode.\n");
- return ERR_PTR(-ENOMEM);
- }
-
- /*
- * Take a reference on the user object of the resource
- * backing the kms fb. This ensures that user-space handle
- * lookups on that resource will always work as long as
- * it's registered with a kms framebuffer. This is important,
- * since vmw_execbuf_process identifies resources in the
- * command stream using user-space handles.
- */
-
- user_obj = ttm_base_object_lookup(tfile, mode_cmd.handle);
- if (unlikely(user_obj == NULL)) {
- DRM_ERROR("Could not locate requested kms frame buffer.\n");
- return ERR_PTR(-ENOENT);
+ return NULL;
}
/**
* End conditioned code.
*/
- /* returns either a dmabuf or surface */
- ret = vmw_user_lookup_handle(dev_priv, tfile,
- mode_cmd.handle,
- &surface, &bo);
+ ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
+ mode_cmd->handle, &surface);
if (ret)
- goto err_out;
-
- /* Create the new framebuffer depending one what we got back */
- if (bo)
- ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb,
- &mode_cmd);
- else if (surface)
- ret = vmw_kms_new_framebuffer_surface(dev_priv, file_priv,
- surface, &vfb, &mode_cmd);
- else
- BUG();
-
-err_out:
- /* vmw_user_lookup_handle takes one ref so does new_fb */
- if (bo)
- vmw_dmabuf_unreference(&bo);
- if (surface)
- vmw_surface_unreference(&surface);
-
- if (ret) {
- DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
- ttm_base_object_unref(&user_obj);
- return ERR_PTR(ret);
- } else
- vfb->user_obj = user_obj;
-
- return &vfb->base;
-}
-
-static struct drm_mode_config_funcs vmw_kms_funcs = {
- .fb_create = vmw_kms_fb_create,
-};
-
-int vmw_kms_present(struct vmw_private *dev_priv,
- struct drm_file *file_priv,
- struct vmw_framebuffer *vfb,
- struct vmw_surface *surface,
- uint32_t sid,
- int32_t destX, int32_t destY,
- struct drm_vmw_rect *clips,
- uint32_t num_clips)
-{
- struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
- struct drm_clip_rect *tmp;
- struct drm_crtc *crtc;
- size_t fifo_size;
- int i, k, num_units;
- int ret = 0; /* silence warning */
- int left, right, top, bottom;
-
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdBlitSurfaceToScreen body;
- } *cmd;
- SVGASignedRect *blits;
+ goto try_dmabuf;
- num_units = 0;
- list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
- if (crtc->fb != &vfb->base)
- continue;
- units[num_units++] = vmw_crtc_to_du(crtc);
- }
+ if (!surface->scanout)
+ goto err_not_scanout;
- BUG_ON(surface == NULL);
- BUG_ON(!clips || !num_clips);
+ ret = vmw_kms_new_framebuffer_surface(dev_priv, file_priv, surface,
+ &vfb, mode_cmd);
- tmp = kzalloc(sizeof(*tmp) * num_clips, GFP_KERNEL);
- if (unlikely(tmp == NULL)) {
- DRM_ERROR("Temporary cliprect memory alloc failed.\n");
- return -ENOMEM;
- }
+ /* vmw_user_surface_lookup takes one ref so does new_fb */
+ vmw_surface_unreference(&surface);
- fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips;
- cmd = kmalloc(fifo_size, GFP_KERNEL);
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed to allocate temporary fifo memory.\n");
- ret = -ENOMEM;
- goto out_free_tmp;
+ if (ret) {
+ DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
+ return ERR_PTR(ret);
}
+ return &vfb->base;
- left = clips->x;
- right = clips->x + clips->w;
- top = clips->y;
- bottom = clips->y + clips->h;
+try_dmabuf:
+ DRM_INFO("%s: trying buffer\n", __func__);
- for (i = 1; i < num_clips; i++) {
- left = min_t(int, left, (int)clips[i].x);
- right = max_t(int, right, (int)clips[i].x + clips[i].w);
- top = min_t(int, top, (int)clips[i].y);
- bottom = max_t(int, bottom, (int)clips[i].y + clips[i].h);
+ ret = vmw_user_dmabuf_lookup(tfile, mode_cmd->handle, &bo);
+ if (ret) {
+ DRM_ERROR("failed to find buffer: %i\n", ret);
+ return ERR_PTR(-ENOENT);
}
- /* only need to do this once */
- memset(cmd, 0, fifo_size);
- cmd->header.id = cpu_to_le32(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN);
+ ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb,
+ mode_cmd);
- blits = (SVGASignedRect *)&cmd[1];
+ /* vmw_user_dmabuf_lookup takes one ref so does new_fb */
+ vmw_dmabuf_unreference(&bo);
- cmd->body.srcRect.left = left;
- cmd->body.srcRect.right = right;
- cmd->body.srcRect.top = top;
- cmd->body.srcRect.bottom = bottom;
-
- for (i = 0; i < num_clips; i++) {
- tmp[i].x1 = clips[i].x - left;
- tmp[i].x2 = clips[i].x + clips[i].w - left;
- tmp[i].y1 = clips[i].y - top;
- tmp[i].y2 = clips[i].y + clips[i].h - top;
+ if (ret) {
+ DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
+ return ERR_PTR(ret);
}
- for (k = 0; k < num_units; k++) {
- struct vmw_display_unit *unit = units[k];
- struct vmw_clip_rect clip;
- int num;
-
- clip.x1 = left + destX - unit->crtc.x;
- clip.y1 = top + destY - unit->crtc.y;
- clip.x2 = right + destX - unit->crtc.x;
- clip.y2 = bottom + destY - unit->crtc.y;
-
- /* skip any crtcs that misses the clip region */
- if (clip.x1 >= unit->crtc.mode.hdisplay ||
- clip.y1 >= unit->crtc.mode.vdisplay ||
- clip.x2 <= 0 || clip.y2 <= 0)
- continue;
-
- /*
- * In order for the clip rects to be correctly scaled
- * the src and dest rects needs to be the same size.
- */
- cmd->body.destRect.left = clip.x1;
- cmd->body.destRect.right = clip.x2;
- cmd->body.destRect.top = clip.y1;
- cmd->body.destRect.bottom = clip.y2;
-
- /* create a clip rect of the crtc in dest coords */
- clip.x2 = unit->crtc.mode.hdisplay - clip.x1;
- clip.y2 = unit->crtc.mode.vdisplay - clip.y1;
- clip.x1 = 0 - clip.x1;
- clip.y1 = 0 - clip.y1;
-
- /* need to reset sid as it is changed by execbuf */
- cmd->body.srcImage.sid = sid;
- cmd->body.destScreenId = unit->unit;
-
- /* clip and write blits to cmd stream */
- vmw_clip_cliprects(tmp, num_clips, clip, blits, &num);
-
- /* if no cliprects hit skip this */
- if (num == 0)
- continue;
-
- /* recalculate package length */
- fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num;
- cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
- ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
- fifo_size, 0, NULL, NULL);
-
- if (unlikely(ret != 0))
- break;
- }
+ return &vfb->base;
- kfree(cmd);
-out_free_tmp:
- kfree(tmp);
+err_not_scanout:
+ DRM_ERROR("surface not marked as scanout\n");
+ /* vmw_user_surface_lookup takes one ref */
+ vmw_surface_unreference(&surface);
- return ret;
+ return ERR_PTR(-EINVAL);
}
-int vmw_kms_readback(struct vmw_private *dev_priv,
- struct drm_file *file_priv,
- struct vmw_framebuffer *vfb,
- struct drm_vmw_fence_rep __user *user_fence_rep,
- struct drm_vmw_rect *clips,
- uint32_t num_clips)
-{
- struct vmw_framebuffer_dmabuf *vfbd =
- vmw_framebuffer_to_vfbd(&vfb->base);
- struct vmw_dma_buffer *dmabuf = vfbd->buffer;
- struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
- struct drm_crtc *crtc;
- size_t fifo_size;
- int i, k, ret, num_units, blits_pos;
-
- struct {
- uint32_t header;
- SVGAFifoCmdDefineGMRFB body;
- } *cmd;
- struct {
- uint32_t header;
- SVGAFifoCmdBlitScreenToGMRFB body;
- } *blits;
-
- num_units = 0;
- list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
- if (crtc->fb != &vfb->base)
- continue;
- units[num_units++] = vmw_crtc_to_du(crtc);
- }
-
- BUG_ON(dmabuf == NULL);
- BUG_ON(!clips || !num_clips);
-
- /* take a safe guess at fifo size */
- fifo_size = sizeof(*cmd) + sizeof(*blits) * num_clips * num_units;
- cmd = kmalloc(fifo_size, GFP_KERNEL);
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed to allocate temporary fifo memory.\n");
- return -ENOMEM;
- }
-
- memset(cmd, 0, fifo_size);
- cmd->header = SVGA_CMD_DEFINE_GMRFB;
- cmd->body.format.bitsPerPixel = vfb->base.bits_per_pixel;
- cmd->body.format.colorDepth = vfb->base.depth;
- cmd->body.format.reserved = 0;
- cmd->body.bytesPerLine = vfb->base.pitches[0];
- cmd->body.ptr.gmrId = vfb->user_handle;
- cmd->body.ptr.offset = 0;
-
- blits = (void *)&cmd[1];
- blits_pos = 0;
- for (i = 0; i < num_units; i++) {
- struct drm_vmw_rect *c = clips;
- for (k = 0; k < num_clips; k++, c++) {
- /* transform clip coords to crtc origin based coords */
- int clip_x1 = c->x - units[i]->crtc.x;
- int clip_x2 = c->x - units[i]->crtc.x + c->w;
- int clip_y1 = c->y - units[i]->crtc.y;
- int clip_y2 = c->y - units[i]->crtc.y + c->h;
- int dest_x = c->x;
- int dest_y = c->y;
-
- /* compensate for clipping, we negate
- * a negative number and add that.
- */
- if (clip_x1 < 0)
- dest_x += -clip_x1;
- if (clip_y1 < 0)
- dest_y += -clip_y1;
-
- /* clip */
- clip_x1 = max(clip_x1, 0);
- clip_y1 = max(clip_y1, 0);
- clip_x2 = min(clip_x2, units[i]->crtc.mode.hdisplay);
- clip_y2 = min(clip_y2, units[i]->crtc.mode.vdisplay);
-
- /* and cull any rects that misses the crtc */
- if (clip_x1 >= units[i]->crtc.mode.hdisplay ||
- clip_y1 >= units[i]->crtc.mode.vdisplay ||
- clip_x2 <= 0 || clip_y2 <= 0)
- continue;
-
- blits[blits_pos].header = SVGA_CMD_BLIT_SCREEN_TO_GMRFB;
- blits[blits_pos].body.srcScreenId = units[i]->unit;
- blits[blits_pos].body.destOrigin.x = dest_x;
- blits[blits_pos].body.destOrigin.y = dest_y;
-
- blits[blits_pos].body.srcRect.left = clip_x1;
- blits[blits_pos].body.srcRect.top = clip_y1;
- blits[blits_pos].body.srcRect.right = clip_x2;
- blits[blits_pos].body.srcRect.bottom = clip_y2;
- blits_pos++;
- }
- }
- /* reset size here and use calculated exact size from loops */
- fifo_size = sizeof(*cmd) + sizeof(*blits) * blits_pos;
-
- ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd, fifo_size,
- 0, user_fence_rep, NULL);
-
- kfree(cmd);
-
- return ret;
-}
+static struct drm_mode_config_funcs vmw_kms_funcs = {
+ .fb_create = vmw_kms_fb_create,
+};
int vmw_kms_init(struct vmw_private *dev_priv)
{
@@ -1438,9 +929,7 @@ int vmw_kms_init(struct vmw_private *dev_priv)
dev->mode_config.max_width = 8192;
dev->mode_config.max_height = 8192;
- ret = vmw_kms_init_screen_object_display(dev_priv);
- if (ret) /* Fallback */
- (void)vmw_kms_init_legacy_display_system(dev_priv);
+ ret = vmw_kms_init_legacy_display_system(dev_priv);
return 0;
}
@@ -1453,10 +942,7 @@ int vmw_kms_close(struct vmw_private *dev_priv)
* drm_encoder_cleanup which takes the lock we deadlock.
*/
drm_mode_config_cleanup(dev_priv->dev);
- if (dev_priv->sou_priv)
- vmw_kms_close_screen_object_display(dev_priv);
- else
- vmw_kms_close_legacy_display_system(dev_priv);
+ vmw_kms_close_legacy_display_system(dev_priv);
return 0;
}
@@ -1501,9 +987,9 @@ out:
return ret;
}
-int vmw_kms_write_svga(struct vmw_private *vmw_priv,
+void vmw_kms_write_svga(struct vmw_private *vmw_priv,
unsigned width, unsigned height, unsigned pitch,
- unsigned bpp, unsigned depth)
+ unsigned bbp, unsigned depth)
{
if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
@@ -1511,15 +997,11 @@ int vmw_kms_write_svga(struct vmw_private *vmw_priv,
iowrite32(pitch, vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
- vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp);
-
- if (vmw_read(vmw_priv, SVGA_REG_DEPTH) != depth) {
- DRM_ERROR("Invalid depth %u for %u bpp, host expects %u\n",
- depth, bpp, vmw_read(vmw_priv, SVGA_REG_DEPTH));
- return -EINVAL;
- }
-
- return 0;
+ vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bbp);
+ vmw_write(vmw_priv, SVGA_REG_DEPTH, depth);
+ vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000);
+ vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
+ vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
}
int vmw_kms_save_vga(struct vmw_private *vmw_priv)
@@ -1529,7 +1011,12 @@ int vmw_kms_save_vga(struct vmw_private *vmw_priv)
vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH);
vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT);
+ vmw_priv->vga_depth = vmw_read(vmw_priv, SVGA_REG_DEPTH);
vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL);
+ vmw_priv->vga_pseudo = vmw_read(vmw_priv, SVGA_REG_PSEUDOCOLOR);
+ vmw_priv->vga_red_mask = vmw_read(vmw_priv, SVGA_REG_RED_MASK);
+ vmw_priv->vga_blue_mask = vmw_read(vmw_priv, SVGA_REG_BLUE_MASK);
+ vmw_priv->vga_green_mask = vmw_read(vmw_priv, SVGA_REG_GREEN_MASK);
if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
vmw_priv->vga_pitchlock =
vmw_read(vmw_priv, SVGA_REG_PITCHLOCK);
@@ -1578,7 +1065,12 @@ int vmw_kms_restore_vga(struct vmw_private *vmw_priv)
vmw_write(vmw_priv, SVGA_REG_WIDTH, vmw_priv->vga_width);
vmw_write(vmw_priv, SVGA_REG_HEIGHT, vmw_priv->vga_height);
+ vmw_write(vmw_priv, SVGA_REG_DEPTH, vmw_priv->vga_depth);
vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp);
+ vmw_write(vmw_priv, SVGA_REG_PSEUDOCOLOR, vmw_priv->vga_pseudo);
+ vmw_write(vmw_priv, SVGA_REG_RED_MASK, vmw_priv->vga_red_mask);
+ vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, vmw_priv->vga_green_mask);
+ vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, vmw_priv->vga_blue_mask);
if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
vmw_write(vmw_priv, SVGA_REG_PITCHLOCK,
vmw_priv->vga_pitchlock);
@@ -1603,376 +1095,6 @@ int vmw_kms_restore_vga(struct vmw_private *vmw_priv)
return 0;
}
-bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
- uint32_t pitch,
- uint32_t height)
-{
- return ((u64) pitch * (u64) height) < (u64) dev_priv->vram_size;
-}
-
-
-/**
- * Function called by DRM code called with vbl_lock held.
- */
-u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc)
-{
- return 0;
-}
-
-/**
- * Function called by DRM code called with vbl_lock held.
- */
-int vmw_enable_vblank(struct drm_device *dev, int crtc)
-{
- return -ENOSYS;
-}
-
-/**
- * Function called by DRM code called with vbl_lock held.
- */
-void vmw_disable_vblank(struct drm_device *dev, int crtc)
-{
-}
-
-
-/*
- * Small shared kms functions.
- */
-
-int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num,
- struct drm_vmw_rect *rects)
-{
- struct drm_device *dev = dev_priv->dev;
- struct vmw_display_unit *du;
- struct drm_connector *con;
-
- mutex_lock(&dev->mode_config.mutex);
-
-#if 0
- {
- unsigned int i;
-
- DRM_INFO("%s: new layout ", __func__);
- for (i = 0; i < num; i++)
- DRM_INFO("(%i, %i %ux%u) ", rects[i].x, rects[i].y,
- rects[i].w, rects[i].h);
- DRM_INFO("\n");
- }
-#endif
-
- list_for_each_entry(con, &dev->mode_config.connector_list, head) {
- du = vmw_connector_to_du(con);
- if (num > du->unit) {
- du->pref_width = rects[du->unit].w;
- du->pref_height = rects[du->unit].h;
- du->pref_active = true;
- du->gui_x = rects[du->unit].x;
- du->gui_y = rects[du->unit].y;
- } else {
- du->pref_width = 800;
- du->pref_height = 600;
- du->pref_active = false;
- }
- con->status = vmw_du_connector_detect(con, true);
- }
-
- mutex_unlock(&dev->mode_config.mutex);
-
- return 0;
-}
-
-int vmw_du_page_flip(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event)
-{
- struct vmw_private *dev_priv = vmw_priv(crtc->dev);
- struct drm_framebuffer *old_fb = crtc->fb;
- struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(fb);
- struct drm_file *file_priv = event->base.file_priv;
- struct vmw_fence_obj *fence = NULL;
- struct drm_clip_rect clips;
- int ret;
-
- /* require ScreenObject support for page flipping */
- if (!dev_priv->sou_priv)
- return -ENOSYS;
-
- if (!vmw_kms_screen_object_flippable(dev_priv, crtc))
- return -EINVAL;
-
- crtc->fb = fb;
-
- /* do a full screen dirty update */
- clips.x1 = clips.y1 = 0;
- clips.x2 = fb->width;
- clips.y2 = fb->height;
-
- if (vfb->dmabuf)
- ret = do_dmabuf_dirty_sou(file_priv, dev_priv, vfb,
- 0, 0, &clips, 1, 1, &fence);
- else
- ret = do_surface_dirty_sou(dev_priv, file_priv, vfb,
- 0, 0, &clips, 1, 1, &fence);
-
-
- if (ret != 0)
- goto out_no_fence;
- if (!fence) {
- ret = -EINVAL;
- goto out_no_fence;
- }
-
- ret = vmw_event_fence_action_queue(file_priv, fence,
- &event->base,
- &event->event.tv_sec,
- &event->event.tv_usec,
- true);
-
- /*
- * No need to hold on to this now. The only cleanup
- * we need to do if we fail is unref the fence.
- */
- vmw_fence_obj_unreference(&fence);
-
- if (vmw_crtc_to_du(crtc)->is_implicit)
- vmw_kms_screen_object_update_implicit_fb(dev_priv, crtc);
-
- return ret;
-
-out_no_fence:
- crtc->fb = old_fb;
- return ret;
-}
-
-
-void vmw_du_crtc_save(struct drm_crtc *crtc)
-{
-}
-
-void vmw_du_crtc_restore(struct drm_crtc *crtc)
-{
-}
-
-void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
- u16 *r, u16 *g, u16 *b,
- uint32_t start, uint32_t size)
-{
- struct vmw_private *dev_priv = vmw_priv(crtc->dev);
- int i;
-
- for (i = 0; i < size; i++) {
- DRM_DEBUG("%d r/g/b = 0x%04x / 0x%04x / 0x%04x\n", i,
- r[i], g[i], b[i]);
- vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 0, r[i] >> 8);
- vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8);
- vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8);
- }
-}
-
-void vmw_du_connector_dpms(struct drm_connector *connector, int mode)
-{
-}
-
-void vmw_du_connector_save(struct drm_connector *connector)
-{
-}
-
-void vmw_du_connector_restore(struct drm_connector *connector)
-{
-}
-
-enum drm_connector_status
-vmw_du_connector_detect(struct drm_connector *connector, bool force)
-{
- uint32_t num_displays;
- struct drm_device *dev = connector->dev;
- struct vmw_private *dev_priv = vmw_priv(dev);
- struct vmw_display_unit *du = vmw_connector_to_du(connector);
-
- mutex_lock(&dev_priv->hw_mutex);
- num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS);
- mutex_unlock(&dev_priv->hw_mutex);
-
- return ((vmw_connector_to_du(connector)->unit < num_displays &&
- du->pref_active) ?
- connector_status_connected : connector_status_disconnected);
-}
-
-static struct drm_display_mode vmw_kms_connector_builtin[] = {
- /* 640x480@60Hz */
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
- 752, 800, 0, 480, 489, 492, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 800x600@60Hz */
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
- 968, 1056, 0, 600, 601, 605, 628, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1024x768@60Hz */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
- 1184, 1344, 0, 768, 771, 777, 806, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1152x864@75Hz */
- { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
- 1344, 1600, 0, 864, 865, 868, 900, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x768@60Hz */
- { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
- 1472, 1664, 0, 768, 771, 778, 798, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x800@60Hz */
- { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
- 1480, 1680, 0, 800, 803, 809, 831, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1280x960@60Hz */
- { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
- 1488, 1800, 0, 960, 961, 964, 1000, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x1024@60Hz */
- { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
- 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1360x768@60Hz */
- { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
- 1536, 1792, 0, 768, 771, 777, 795, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1440x1050@60Hz */
- { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
- 1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1440x900@60Hz */
- { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
- 1672, 1904, 0, 900, 903, 909, 934, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1600x1200@60Hz */
- { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
- 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1680x1050@60Hz */
- { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
- 1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1792x1344@60Hz */
- { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
- 2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1853x1392@60Hz */
- { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
- 2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1920x1200@60Hz */
- { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
- 2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1920x1440@60Hz */
- { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
- 2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 2560x1600@60Hz */
- { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
- 3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* Terminate */
- { DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) },
-};
-
-/**
- * vmw_guess_mode_timing - Provide fake timings for a
- * 60Hz vrefresh mode.
- *
- * @mode - Pointer to a struct drm_display_mode with hdisplay and vdisplay
- * members filled in.
- */
-static void vmw_guess_mode_timing(struct drm_display_mode *mode)
-{
- mode->hsync_start = mode->hdisplay + 50;
- mode->hsync_end = mode->hsync_start + 50;
- mode->htotal = mode->hsync_end + 50;
-
- mode->vsync_start = mode->vdisplay + 50;
- mode->vsync_end = mode->vsync_start + 50;
- mode->vtotal = mode->vsync_end + 50;
-
- mode->clock = (u32)mode->htotal * (u32)mode->vtotal / 100 * 6;
- mode->vrefresh = drm_mode_vrefresh(mode);
-}
-
-
-int vmw_du_connector_fill_modes(struct drm_connector *connector,
- uint32_t max_width, uint32_t max_height)
-{
- struct vmw_display_unit *du = vmw_connector_to_du(connector);
- struct drm_device *dev = connector->dev;
- struct vmw_private *dev_priv = vmw_priv(dev);
- struct drm_display_mode *mode = NULL;
- struct drm_display_mode *bmode;
- struct drm_display_mode prefmode = { DRM_MODE("preferred",
- DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
- };
- int i;
-
- /* Add preferred mode */
- {
- mode = drm_mode_duplicate(dev, &prefmode);
- if (!mode)
- return 0;
- mode->hdisplay = du->pref_width;
- mode->vdisplay = du->pref_height;
- vmw_guess_mode_timing(mode);
-
- if (vmw_kms_validate_mode_vram(dev_priv, mode->hdisplay * 2,
- mode->vdisplay)) {
- drm_mode_probed_add(connector, mode);
- } else {
- drm_mode_destroy(dev, mode);
- mode = NULL;
- }
-
- if (du->pref_mode) {
- list_del_init(&du->pref_mode->head);
- drm_mode_destroy(dev, du->pref_mode);
- }
-
- /* mode might be null here, this is intended */
- du->pref_mode = mode;
- }
-
- for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) {
- bmode = &vmw_kms_connector_builtin[i];
- if (bmode->hdisplay > max_width ||
- bmode->vdisplay > max_height)
- continue;
-
- if (!vmw_kms_validate_mode_vram(dev_priv, bmode->hdisplay * 2,
- bmode->vdisplay))
- continue;
-
- mode = drm_mode_duplicate(dev, bmode);
- if (!mode)
- return 0;
- mode->vrefresh = drm_mode_vrefresh(mode);
-
- drm_mode_probed_add(connector, mode);
- }
-
- /* Move the prefered mode first, help apps pick the right mode. */
- if (du->pref_mode)
- list_move(&du->pref_mode->head, &connector->probed_modes);
-
- drm_mode_connector_list_update(connector);
-
- return 1;
-}
-
-int vmw_du_connector_set_property(struct drm_connector *connector,
- struct drm_property *property,
- uint64_t val)
-{
- return 0;
-}
-
-
int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
@@ -1984,8 +1106,6 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
struct drm_vmw_rect *rects;
unsigned rects_size;
int ret;
- int i;
- struct drm_mode_config *mode_config = &dev->mode_config;
ret = ttm_read_lock(&vmaster->lock, true);
if (unlikely(ret != 0))
@@ -1993,13 +1113,12 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
if (!arg->num_outputs) {
struct drm_vmw_rect def_rect = {0, 0, 800, 600};
- vmw_du_update_layout(dev_priv, 1, &def_rect);
+ vmw_kms_ldu_update_layout(dev_priv, 1, &def_rect);
goto out_unlock;
}
rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
- rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
- GFP_KERNEL);
+ rects = kzalloc(rects_size, GFP_KERNEL);
if (unlikely(!rects)) {
ret = -ENOMEM;
goto out_unlock;
@@ -2013,18 +1132,7 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
goto out_free;
}
- for (i = 0; i < arg->num_outputs; ++i) {
- if (rects[i].x < 0 ||
- rects[i].y < 0 ||
- rects[i].x + rects[i].w > mode_config->max_width ||
- rects[i].y + rects[i].h > mode_config->max_height) {
- DRM_ERROR("Invalid GUI layout.\n");
- ret = -EINVAL;
- goto out_free;
- }
- }
-
- vmw_du_update_layout(dev_priv, arg->num_outputs, rects);
+ vmw_kms_ldu_update_layout(dev_priv, arg->num_outputs, rects);
out_free:
kfree(rects);
@@ -2032,3 +1140,15 @@ out_unlock:
ttm_read_unlock(&vmaster->lock);
return ret;
}
+
+bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
+ uint32_t pitch,
+ uint32_t height)
+{
+ return ((u64) pitch * (u64) height) < (u64) dev_priv->vram_size;
+}
+
+u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc)
+{
+ return 0;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index 8184bc5..8a398a0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -29,11 +29,8 @@
#define VMWGFX_KMS_H_
#include "drmP.h"
-#include "drm_crtc_helper.h"
#include "vmwgfx_drv.h"
-#define VMWGFX_NUM_DISPLAY_UNITS 8
-
#define vmw_framebuffer_to_vfb(x) \
container_of(x, struct vmw_framebuffer, base)
@@ -48,9 +45,6 @@ struct vmw_framebuffer {
struct drm_framebuffer base;
int (*pin)(struct vmw_framebuffer *fb);
int (*unpin)(struct vmw_framebuffer *fb);
- bool dmabuf;
- struct ttm_base_object *user_obj;
- uint32_t user_handle;
};
@@ -63,14 +57,9 @@ struct vmw_framebuffer {
int vmw_cursor_update_image(struct vmw_private *dev_priv,
u32 *image, u32 width, u32 height,
u32 hotspotX, u32 hotspotY);
-int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *dmabuf,
- u32 width, u32 height,
- u32 hotspotX, u32 hotspotY);
void vmw_cursor_update_position(struct vmw_private *dev_priv,
bool show, int x, int y);
-
/**
* Base class display unit.
*
@@ -94,73 +83,22 @@ struct vmw_display_unit {
int hotspot_y;
unsigned unit;
-
- /*
- * Prefered mode tracking.
- */
- unsigned pref_width;
- unsigned pref_height;
- bool pref_active;
- struct drm_display_mode *pref_mode;
-
- /*
- * Gui positioning
- */
- int gui_x;
- int gui_y;
- bool is_implicit;
};
-#define vmw_crtc_to_du(x) \
- container_of(x, struct vmw_display_unit, crtc)
-#define vmw_connector_to_du(x) \
- container_of(x, struct vmw_display_unit, connector)
-
-
/*
* Shared display unit functions - vmwgfx_kms.c
*/
void vmw_display_unit_cleanup(struct vmw_display_unit *du);
-int vmw_du_page_flip(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event);
-void vmw_du_crtc_save(struct drm_crtc *crtc);
-void vmw_du_crtc_restore(struct drm_crtc *crtc);
-void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
- u16 *r, u16 *g, u16 *b,
- uint32_t start, uint32_t size);
int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
uint32_t handle, uint32_t width, uint32_t height);
int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
-void vmw_du_connector_dpms(struct drm_connector *connector, int mode);
-void vmw_du_connector_save(struct drm_connector *connector);
-void vmw_du_connector_restore(struct drm_connector *connector);
-enum drm_connector_status
-vmw_du_connector_detect(struct drm_connector *connector, bool force);
-int vmw_du_connector_fill_modes(struct drm_connector *connector,
- uint32_t max_width, uint32_t max_height);
-int vmw_du_connector_set_property(struct drm_connector *connector,
- struct drm_property *property,
- uint64_t val);
-
/*
* Legacy display unit functions - vmwgfx_ldu.c
*/
int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv);
int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv);
-
-/*
- * Screen Objects display functions - vmwgfx_scrn.c
- */
-int vmw_kms_init_screen_object_display(struct vmw_private *dev_priv);
-int vmw_kms_close_screen_object_display(struct vmw_private *dev_priv);
-int vmw_kms_sou_update_layout(struct vmw_private *dev_priv, unsigned num,
+int vmw_kms_ldu_update_layout(struct vmw_private *dev_priv, unsigned num,
struct drm_vmw_rect *rects);
-bool vmw_kms_screen_object_flippable(struct vmw_private *dev_priv,
- struct drm_crtc *crtc);
-void vmw_kms_screen_object_update_implicit_fb(struct vmw_private *dev_priv,
- struct drm_crtc *crtc);
-
#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 070fb23..b3a2cd5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -27,6 +27,7 @@
#include "vmwgfx_kms.h"
+#define VMWGFX_LDU_NUM_DU 8
#define vmw_crtc_to_ldu(x) \
container_of(x, struct vmw_legacy_display_unit, base.crtc)
@@ -50,6 +51,11 @@ struct vmw_legacy_display {
struct vmw_legacy_display_unit {
struct vmw_display_unit base;
+ unsigned pref_width;
+ unsigned pref_height;
+ bool pref_active;
+ struct drm_display_mode *pref_mode;
+
struct list_head active;
};
@@ -65,6 +71,20 @@ static void vmw_ldu_destroy(struct vmw_legacy_display_unit *ldu)
* Legacy Display Unit CRTC functions
*/
+static void vmw_ldu_crtc_save(struct drm_crtc *crtc)
+{
+}
+
+static void vmw_ldu_crtc_restore(struct drm_crtc *crtc)
+{
+}
+
+static void vmw_ldu_crtc_gamma_set(struct drm_crtc *crtc,
+ u16 *r, u16 *g, u16 *b,
+ uint32_t start, uint32_t size)
+{
+}
+
static void vmw_ldu_crtc_destroy(struct drm_crtc *crtc)
{
vmw_ldu_destroy(vmw_crtc_to_ldu(crtc));
@@ -74,10 +94,9 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
{
struct vmw_legacy_display *lds = dev_priv->ldu_priv;
struct vmw_legacy_display_unit *entry;
- struct vmw_display_unit *du = NULL;
struct drm_framebuffer *fb = NULL;
struct drm_crtc *crtc = NULL;
- int i = 0, ret;
+ int i = 0;
/* If there is no display topology the host just assumes
* that the guest will set the same layout as the host.
@@ -95,15 +114,17 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
return 0;
fb = entry->base.crtc.fb;
- return vmw_kms_write_svga(dev_priv, w, h, fb->pitches[0],
- fb->bits_per_pixel, fb->depth);
+ vmw_kms_write_svga(dev_priv, w, h, fb->pitch,
+ fb->bits_per_pixel, fb->depth);
+
+ return 0;
}
if (!list_empty(&lds->active)) {
entry = list_entry(lds->active.next, typeof(*entry), active);
fb = entry->base.crtc.fb;
- vmw_kms_write_svga(dev_priv, fb->width, fb->height, fb->pitches[0],
+ vmw_kms_write_svga(dev_priv, fb->width, fb->height, fb->pitch,
fb->bits_per_pixel, fb->depth);
}
@@ -130,25 +151,6 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
lds->last_num_active = lds->num_active;
-
- /* Find the first du with a cursor. */
- list_for_each_entry(entry, &lds->active, active) {
- du = &entry->base;
-
- if (!du->cursor_dmabuf)
- continue;
-
- ret = vmw_cursor_update_dmabuf(dev_priv,
- du->cursor_dmabuf,
- 64, 64,
- du->hotspot_x,
- du->hotspot_y);
- if (ret == 0)
- break;
-
- DRM_ERROR("Could not update cursor image\n");
- }
-
return 0;
}
@@ -263,7 +265,9 @@ static int vmw_ldu_crtc_set_config(struct drm_mode_set *set)
vmw_ldu_del_active(dev_priv, ldu);
- return vmw_ldu_commit_list(dev_priv);
+ vmw_ldu_commit_list(dev_priv);
+
+ return 0;
}
@@ -288,20 +292,21 @@ static int vmw_ldu_crtc_set_config(struct drm_mode_set *set)
vmw_ldu_add_active(dev_priv, ldu, vfb);
- return vmw_ldu_commit_list(dev_priv);
+ vmw_ldu_commit_list(dev_priv);
+
+ return 0;
}
static struct drm_crtc_funcs vmw_legacy_crtc_funcs = {
- .save = vmw_du_crtc_save,
- .restore = vmw_du_crtc_restore,
+ .save = vmw_ldu_crtc_save,
+ .restore = vmw_ldu_crtc_restore,
.cursor_set = vmw_du_crtc_cursor_set,
.cursor_move = vmw_du_crtc_cursor_move,
- .gamma_set = vmw_du_crtc_gamma_set,
+ .gamma_set = vmw_ldu_crtc_gamma_set,
.destroy = vmw_ldu_crtc_destroy,
.set_config = vmw_ldu_crtc_set_config,
};
-
/*
* Legacy Display Unit encoder functions
*/
@@ -319,18 +324,183 @@ static struct drm_encoder_funcs vmw_legacy_encoder_funcs = {
* Legacy Display Unit connector functions
*/
+static void vmw_ldu_connector_dpms(struct drm_connector *connector, int mode)
+{
+}
+
+static void vmw_ldu_connector_save(struct drm_connector *connector)
+{
+}
+
+static void vmw_ldu_connector_restore(struct drm_connector *connector)
+{
+}
+
+static enum drm_connector_status
+ vmw_ldu_connector_detect(struct drm_connector *connector,
+ bool force)
+{
+ if (vmw_connector_to_ldu(connector)->pref_active)
+ return connector_status_connected;
+ return connector_status_disconnected;
+}
+
+static const struct drm_display_mode vmw_ldu_connector_builtin[] = {
+ /* 640x480@60Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
+ 752, 800, 0, 480, 489, 492, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 800x600@60Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
+ 968, 1056, 0, 600, 601, 605, 628, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1024x768@60Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
+ 1184, 1344, 0, 768, 771, 777, 806, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1152x864@75Hz */
+ { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
+ 1344, 1600, 0, 864, 865, 868, 900, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x768@60Hz */
+ { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
+ 1472, 1664, 0, 768, 771, 778, 798, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x800@60Hz */
+ { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
+ 1480, 1680, 0, 800, 803, 809, 831, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1280x960@60Hz */
+ { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
+ 1488, 1800, 0, 960, 961, 964, 1000, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x1024@60Hz */
+ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
+ 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1360x768@60Hz */
+ { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
+ 1536, 1792, 0, 768, 771, 777, 795, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1440x1050@60Hz */
+ { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
+ 1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1440x900@60Hz */
+ { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
+ 1672, 1904, 0, 900, 903, 909, 934, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1600x1200@60Hz */
+ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
+ 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1680x1050@60Hz */
+ { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
+ 1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1792x1344@60Hz */
+ { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
+ 2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1853x1392@60Hz */
+ { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
+ 2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1200@60Hz */
+ { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
+ 2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1440@60Hz */
+ { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
+ 2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 2560x1600@60Hz */
+ { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
+ 3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* Terminate */
+ { DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) },
+};
+
+static int vmw_ldu_connector_fill_modes(struct drm_connector *connector,
+ uint32_t max_width, uint32_t max_height)
+{
+ struct vmw_legacy_display_unit *ldu = vmw_connector_to_ldu(connector);
+ struct drm_device *dev = connector->dev;
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct drm_display_mode *mode = NULL;
+ struct drm_display_mode prefmode = { DRM_MODE("preferred",
+ DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
+ };
+ int i;
+
+ /* Add preferred mode */
+ {
+ mode = drm_mode_duplicate(dev, &prefmode);
+ if (!mode)
+ return 0;
+ mode->hdisplay = ldu->pref_width;
+ mode->vdisplay = ldu->pref_height;
+ mode->vrefresh = drm_mode_vrefresh(mode);
+ if (vmw_kms_validate_mode_vram(dev_priv, mode->hdisplay * 2,
+ mode->vdisplay)) {
+ drm_mode_probed_add(connector, mode);
+
+ if (ldu->pref_mode) {
+ list_del_init(&ldu->pref_mode->head);
+ drm_mode_destroy(dev, ldu->pref_mode);
+ }
+
+ ldu->pref_mode = mode;
+ }
+ }
+
+ for (i = 0; vmw_ldu_connector_builtin[i].type != 0; i++) {
+ const struct drm_display_mode *bmode;
+
+ bmode = &vmw_ldu_connector_builtin[i];
+ if (bmode->hdisplay > max_width ||
+ bmode->vdisplay > max_height)
+ continue;
+
+ if (!vmw_kms_validate_mode_vram(dev_priv, bmode->hdisplay * 2,
+ bmode->vdisplay))
+ continue;
+
+ mode = drm_mode_duplicate(dev, bmode);
+ if (!mode)
+ return 0;
+ mode->vrefresh = drm_mode_vrefresh(mode);
+
+ drm_mode_probed_add(connector, mode);
+ }
+
+ drm_mode_connector_list_update(connector);
+
+ return 1;
+}
+
+static int vmw_ldu_connector_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val)
+{
+ return 0;
+}
+
static void vmw_ldu_connector_destroy(struct drm_connector *connector)
{
vmw_ldu_destroy(vmw_connector_to_ldu(connector));
}
static struct drm_connector_funcs vmw_legacy_connector_funcs = {
- .dpms = vmw_du_connector_dpms,
- .save = vmw_du_connector_save,
- .restore = vmw_du_connector_restore,
- .detect = vmw_du_connector_detect,
- .fill_modes = vmw_du_connector_fill_modes,
- .set_property = vmw_du_connector_set_property,
+ .dpms = vmw_ldu_connector_dpms,
+ .save = vmw_ldu_connector_save,
+ .restore = vmw_ldu_connector_restore,
+ .detect = vmw_ldu_connector_detect,
+ .fill_modes = vmw_ldu_connector_fill_modes,
+ .set_property = vmw_ldu_connector_set_property,
.destroy = vmw_ldu_connector_destroy,
};
@@ -353,26 +523,23 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
INIT_LIST_HEAD(&ldu->active);
- ldu->base.pref_active = (unit == 0);
- ldu->base.pref_width = dev_priv->initial_width;
- ldu->base.pref_height = dev_priv->initial_height;
- ldu->base.pref_mode = NULL;
- ldu->base.is_implicit = true;
+ ldu->pref_active = (unit == 0);
+ ldu->pref_width = 800;
+ ldu->pref_height = 600;
+ ldu->pref_mode = NULL;
drm_connector_init(dev, connector, &vmw_legacy_connector_funcs,
- DRM_MODE_CONNECTOR_VIRTUAL);
- connector->status = vmw_du_connector_detect(connector, true);
+ DRM_MODE_CONNECTOR_LVDS);
+ connector->status = vmw_ldu_connector_detect(connector, true);
drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs,
- DRM_MODE_ENCODER_VIRTUAL);
+ DRM_MODE_ENCODER_LVDS);
drm_mode_connector_attach_encoder(connector, encoder);
encoder->possible_crtcs = (1 << unit);
encoder->possible_clones = 0;
drm_crtc_init(dev, crtc, &vmw_legacy_crtc_funcs);
- drm_mode_crtc_set_gamma_size(crtc, 256);
-
drm_connector_attach_property(connector,
dev->mode_config.dirty_info_property,
1);
@@ -383,7 +550,8 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
- int i, ret;
+ int i;
+ int ret;
if (dev_priv->ldu_priv) {
DRM_INFO("ldu system already on\n");
@@ -391,6 +559,7 @@ int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv)
}
dev_priv->ldu_priv = kmalloc(sizeof(*dev_priv->ldu_priv), GFP_KERNEL);
+
if (!dev_priv->ldu_priv)
return -ENOMEM;
@@ -399,31 +568,18 @@ int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv)
dev_priv->ldu_priv->last_num_active = 0;
dev_priv->ldu_priv->fb = NULL;
- /* for old hardware without multimon only enable one display */
- if (dev_priv->capabilities & SVGA_CAP_MULTIMON)
- ret = drm_vblank_init(dev, VMWGFX_NUM_DISPLAY_UNITS);
- else
- ret = drm_vblank_init(dev, 1);
- if (ret != 0)
- goto err_free;
-
- ret = drm_mode_create_dirty_info_property(dev);
- if (ret != 0)
- goto err_vblank_cleanup;
+ drm_mode_create_dirty_info_property(dev_priv->dev);
- if (dev_priv->capabilities & SVGA_CAP_MULTIMON)
- for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i)
+ if (dev_priv->capabilities & SVGA_CAP_MULTIMON) {
+ for (i = 0; i < VMWGFX_LDU_NUM_DU; ++i)
vmw_ldu_init(dev_priv, i);
- else
+ ret = drm_vblank_init(dev, VMWGFX_LDU_NUM_DU);
+ } else {
+ /* for old hardware without multimon only enable one display */
vmw_ldu_init(dev_priv, 0);
+ ret = drm_vblank_init(dev, 1);
+ }
- return 0;
-
-err_vblank_cleanup:
- drm_vblank_cleanup(dev);
-err_free:
- kfree(dev_priv->ldu_priv);
- dev_priv->ldu_priv = NULL;
return ret;
}
@@ -431,14 +587,52 @@ int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
+ drm_vblank_cleanup(dev);
if (!dev_priv->ldu_priv)
return -ENOSYS;
- drm_vblank_cleanup(dev);
-
BUG_ON(!list_empty(&dev_priv->ldu_priv->active));
kfree(dev_priv->ldu_priv);
return 0;
}
+
+int vmw_kms_ldu_update_layout(struct vmw_private *dev_priv, unsigned num,
+ struct drm_vmw_rect *rects)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct vmw_legacy_display_unit *ldu;
+ struct drm_connector *con;
+ int i;
+
+ mutex_lock(&dev->mode_config.mutex);
+
+#if 0
+ DRM_INFO("%s: new layout ", __func__);
+ for (i = 0; i < (int)num; i++)
+ DRM_INFO("(%i, %i %ux%u) ", rects[i].x, rects[i].y,
+ rects[i].w, rects[i].h);
+ DRM_INFO("\n");
+#else
+ (void)i;
+#endif
+
+ list_for_each_entry(con, &dev->mode_config.connector_list, head) {
+ ldu = vmw_connector_to_ldu(con);
+ if (num > ldu->base.unit) {
+ ldu->pref_width = rects[ldu->base.unit].w;
+ ldu->pref_height = rects[ldu->base.unit].h;
+ ldu->pref_active = true;
+ } else {
+ ldu->pref_width = 800;
+ ldu->pref_height = 600;
+ ldu->pref_active = false;
+ }
+ con->status = vmw_ldu_connector_detect(con, true);
+ }
+
+ mutex_unlock(&dev->mode_config.mutex);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
deleted file mode 100644
index 8a8725c..0000000
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
+++ /dev/null
@@ -1,171 +0,0 @@
-/**************************************************************************
- *
- * Copyright (C) 2010 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-
-#include "vmwgfx_drv.h"
-
-struct vmw_marker {
- struct list_head head;
- uint32_t seqno;
- struct timespec submitted;
-};
-
-void vmw_marker_queue_init(struct vmw_marker_queue *queue)
-{
- INIT_LIST_HEAD(&queue->head);
- queue->lag = ns_to_timespec(0);
- getrawmonotonic(&queue->lag_time);
- spin_lock_init(&queue->lock);
-}
-
-void vmw_marker_queue_takedown(struct vmw_marker_queue *queue)
-{
- struct vmw_marker *marker, *next;
-
- spin_lock(&queue->lock);
- list_for_each_entry_safe(marker, next, &queue->head, head) {
- kfree(marker);
- }
- spin_unlock(&queue->lock);
-}
-
-int vmw_marker_push(struct vmw_marker_queue *queue,
- uint32_t seqno)
-{
- struct vmw_marker *marker = kmalloc(sizeof(*marker), GFP_KERNEL);
-
- if (unlikely(!marker))
- return -ENOMEM;
-
- marker->seqno = seqno;
- getrawmonotonic(&marker->submitted);
- spin_lock(&queue->lock);
- list_add_tail(&marker->head, &queue->head);
- spin_unlock(&queue->lock);
-
- return 0;
-}
-
-int vmw_marker_pull(struct vmw_marker_queue *queue,
- uint32_t signaled_seqno)
-{
- struct vmw_marker *marker, *next;
- struct timespec now;
- bool updated = false;
-
- spin_lock(&queue->lock);
- getrawmonotonic(&now);
-
- if (list_empty(&queue->head)) {
- queue->lag = ns_to_timespec(0);
- queue->lag_time = now;
- updated = true;
- goto out_unlock;
- }
-
- list_for_each_entry_safe(marker, next, &queue->head, head) {
- if (signaled_seqno - marker->seqno > (1 << 30))
- continue;
-
- queue->lag = timespec_sub(now, marker->submitted);
- queue->lag_time = now;
- updated = true;
- list_del(&marker->head);
- kfree(marker);
- }
-
-out_unlock:
- spin_unlock(&queue->lock);
-
- return (updated) ? 0 : -EBUSY;
-}
-
-static struct timespec vmw_timespec_add(struct timespec t1,
- struct timespec t2)
-{
- t1.tv_sec += t2.tv_sec;
- t1.tv_nsec += t2.tv_nsec;
- if (t1.tv_nsec >= 1000000000L) {
- t1.tv_sec += 1;
- t1.tv_nsec -= 1000000000L;
- }
-
- return t1;
-}
-
-static struct timespec vmw_fifo_lag(struct vmw_marker_queue *queue)
-{
- struct timespec now;
-
- spin_lock(&queue->lock);
- getrawmonotonic(&now);
- queue->lag = vmw_timespec_add(queue->lag,
- timespec_sub(now, queue->lag_time));
- queue->lag_time = now;
- spin_unlock(&queue->lock);
- return queue->lag;
-}
-
-
-static bool vmw_lag_lt(struct vmw_marker_queue *queue,
- uint32_t us)
-{
- struct timespec lag, cond;
-
- cond = ns_to_timespec((s64) us * 1000);
- lag = vmw_fifo_lag(queue);
- return (timespec_compare(&lag, &cond) < 1);
-}
-
-int vmw_wait_lag(struct vmw_private *dev_priv,
- struct vmw_marker_queue *queue, uint32_t us)
-{
- struct vmw_marker *marker;
- uint32_t seqno;
- int ret;
-
- while (!vmw_lag_lt(queue, us)) {
- spin_lock(&queue->lock);
- if (list_empty(&queue->head))
- seqno = atomic_read(&dev_priv->marker_seq);
- else {
- marker = list_first_entry(&queue->head,
- struct vmw_marker, head);
- seqno = marker->seqno;
- }
- spin_unlock(&queue->lock);
-
- ret = vmw_wait_seqno(dev_priv, false, seqno, true,
- 3*HZ);
-
- if (unlikely(ret != 0))
- return ret;
-
- (void) vmw_marker_pull(queue, seqno);
- }
- return 0;
-}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index 14399ee..f1a52f9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -87,6 +87,48 @@ static inline void fill_flush(struct vmw_escape_video_flush *cmd,
}
/**
+ * Pin or unpin a buffer in vram.
+ *
+ * @dev_priv: Driver private.
+ * @buf: DMA buffer to pin or unpin.
+ * @pin: Pin buffer in vram if true.
+ * @interruptible: Use interruptible wait.
+ *
+ * Takes the current masters ttm lock in read.
+ *
+ * Returns
+ * -ERESTARTSYS if interrupted by a signal.
+ */
+static int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *buf,
+ bool pin, bool interruptible)
+{
+ struct ttm_buffer_object *bo = &buf->base;
+ struct ttm_placement *overlay_placement = &vmw_vram_placement;
+ int ret;
+
+ ret = ttm_read_lock(&dev_priv->active_master->lock, interruptible);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
+ if (unlikely(ret != 0))
+ goto err;
+
+ if (pin)
+ overlay_placement = &vmw_vram_ne_placement;
+
+ ret = ttm_bo_validate(bo, overlay_placement, interruptible, false, false);
+
+ ttm_bo_unreserve(bo);
+
+err:
+ ttm_read_unlock(&dev_priv->active_master->lock);
+
+ return ret;
+}
+
+/**
* Send put command to hw.
*
* Returns
@@ -97,80 +139,68 @@ static int vmw_overlay_send_put(struct vmw_private *dev_priv,
struct drm_vmw_control_stream_arg *arg,
bool interruptible)
{
- struct vmw_escape_video_flush *flush;
- size_t fifo_size;
- bool have_so = dev_priv->sou_priv ? true : false;
- int i, num_items;
- SVGAGuestPtr ptr;
-
struct {
struct vmw_escape_header escape;
struct {
- uint32_t cmdType;
- uint32_t streamId;
- } header;
+ struct {
+ uint32_t cmdType;
+ uint32_t streamId;
+ } header;
+ struct {
+ uint32_t registerId;
+ uint32_t value;
+ } items[SVGA_VIDEO_PITCH_3 + 1];
+ } body;
+ struct vmw_escape_video_flush flush;
} *cmds;
- struct {
- uint32_t registerId;
- uint32_t value;
- } *items;
-
- /* defines are a index needs + 1 */
- if (have_so)
- num_items = SVGA_VIDEO_DST_SCREEN_ID + 1;
- else
- num_items = SVGA_VIDEO_PITCH_3 + 1;
-
- fifo_size = sizeof(*cmds) + sizeof(*flush) + sizeof(*items) * num_items;
+ uint32_t offset;
+ int i, ret;
- cmds = vmw_fifo_reserve(dev_priv, fifo_size);
- /* hardware has hung, can't do anything here */
- if (!cmds)
- return -ENOMEM;
+ for (;;) {
+ cmds = vmw_fifo_reserve(dev_priv, sizeof(*cmds));
+ if (cmds)
+ break;
- items = (typeof(items))&cmds[1];
- flush = (struct vmw_escape_video_flush *)&items[num_items];
-
- /* the size is header + number of items */
- fill_escape(&cmds->escape, sizeof(*items) * (num_items + 1));
-
- cmds->header.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS;
- cmds->header.streamId = arg->stream_id;
-
- /* the IDs are neatly numbered */
- for (i = 0; i < num_items; i++)
- items[i].registerId = i;
-
- vmw_bo_get_guest_ptr(&buf->base, &ptr);
- ptr.offset += arg->offset;
-
- items[SVGA_VIDEO_ENABLED].value = true;
- items[SVGA_VIDEO_FLAGS].value = arg->flags;
- items[SVGA_VIDEO_DATA_OFFSET].value = ptr.offset;
- items[SVGA_VIDEO_FORMAT].value = arg->format;
- items[SVGA_VIDEO_COLORKEY].value = arg->color_key;
- items[SVGA_VIDEO_SIZE].value = arg->size;
- items[SVGA_VIDEO_WIDTH].value = arg->width;
- items[SVGA_VIDEO_HEIGHT].value = arg->height;
- items[SVGA_VIDEO_SRC_X].value = arg->src.x;
- items[SVGA_VIDEO_SRC_Y].value = arg->src.y;
- items[SVGA_VIDEO_SRC_WIDTH].value = arg->src.w;
- items[SVGA_VIDEO_SRC_HEIGHT].value = arg->src.h;
- items[SVGA_VIDEO_DST_X].value = arg->dst.x;
- items[SVGA_VIDEO_DST_Y].value = arg->dst.y;
- items[SVGA_VIDEO_DST_WIDTH].value = arg->dst.w;
- items[SVGA_VIDEO_DST_HEIGHT].value = arg->dst.h;
- items[SVGA_VIDEO_PITCH_1].value = arg->pitch[0];
- items[SVGA_VIDEO_PITCH_2].value = arg->pitch[1];
- items[SVGA_VIDEO_PITCH_3].value = arg->pitch[2];
- if (have_so) {
- items[SVGA_VIDEO_DATA_GMRID].value = ptr.gmrId;
- items[SVGA_VIDEO_DST_SCREEN_ID].value = SVGA_ID_INVALID;
+ ret = vmw_fallback_wait(dev_priv, false, true, 0,
+ interruptible, 3*HZ);
+ if (interruptible && ret == -ERESTARTSYS)
+ return ret;
+ else
+ BUG_ON(ret != 0);
}
- fill_flush(flush, arg->stream_id);
+ fill_escape(&cmds->escape, sizeof(cmds->body));
+ cmds->body.header.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS;
+ cmds->body.header.streamId = arg->stream_id;
+
+ for (i = 0; i <= SVGA_VIDEO_PITCH_3; i++)
+ cmds->body.items[i].registerId = i;
+
+ offset = buf->base.offset + arg->offset;
+
+ cmds->body.items[SVGA_VIDEO_ENABLED].value = true;
+ cmds->body.items[SVGA_VIDEO_FLAGS].value = arg->flags;
+ cmds->body.items[SVGA_VIDEO_DATA_OFFSET].value = offset;
+ cmds->body.items[SVGA_VIDEO_FORMAT].value = arg->format;
+ cmds->body.items[SVGA_VIDEO_COLORKEY].value = arg->color_key;
+ cmds->body.items[SVGA_VIDEO_SIZE].value = arg->size;
+ cmds->body.items[SVGA_VIDEO_WIDTH].value = arg->width;
+ cmds->body.items[SVGA_VIDEO_HEIGHT].value = arg->height;
+ cmds->body.items[SVGA_VIDEO_SRC_X].value = arg->src.x;
+ cmds->body.items[SVGA_VIDEO_SRC_Y].value = arg->src.y;
+ cmds->body.items[SVGA_VIDEO_SRC_WIDTH].value = arg->src.w;
+ cmds->body.items[SVGA_VIDEO_SRC_HEIGHT].value = arg->src.h;
+ cmds->body.items[SVGA_VIDEO_DST_X].value = arg->dst.x;
+ cmds->body.items[SVGA_VIDEO_DST_Y].value = arg->dst.y;
+ cmds->body.items[SVGA_VIDEO_DST_WIDTH].value = arg->dst.w;
+ cmds->body.items[SVGA_VIDEO_DST_HEIGHT].value = arg->dst.h;
+ cmds->body.items[SVGA_VIDEO_PITCH_1].value = arg->pitch[0];
+ cmds->body.items[SVGA_VIDEO_PITCH_2].value = arg->pitch[1];
+ cmds->body.items[SVGA_VIDEO_PITCH_3].value = arg->pitch[2];
+
+ fill_flush(&cmds->flush, arg->stream_id);
- vmw_fifo_commit(dev_priv, fifo_size);
+ vmw_fifo_commit(dev_priv, sizeof(*cmds));
return 0;
}
@@ -218,25 +248,6 @@ static int vmw_overlay_send_stop(struct vmw_private *dev_priv,
}
/**
- * Move a buffer to vram or gmr if @pin is set, else unpin the buffer.
- *
- * With the introduction of screen objects buffers could now be
- * used with GMRs instead of being locked to vram.
- */
-static int vmw_overlay_move_buffer(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *buf,
- bool pin, bool inter)
-{
- if (!pin)
- return vmw_dmabuf_unpin(dev_priv, buf, inter);
-
- if (!dev_priv->sou_priv)
- return vmw_dmabuf_to_vram(dev_priv, buf, true, inter);
-
- return vmw_dmabuf_to_vram_or_gmr(dev_priv, buf, true, inter);
-}
-
-/**
* Stop or pause a stream.
*
* If the stream is paused the no evict flag is removed from the buffer
@@ -268,8 +279,8 @@ static int vmw_overlay_stop(struct vmw_private *dev_priv,
return ret;
/* We just remove the NO_EVICT flag so no -ENOMEM */
- ret = vmw_overlay_move_buffer(dev_priv, stream->buf, false,
- interruptible);
+ ret = vmw_dmabuf_pin_in_vram(dev_priv, stream->buf, false,
+ interruptible);
if (interruptible && ret == -ERESTARTSYS)
return ret;
else
@@ -331,7 +342,7 @@ static int vmw_overlay_update_stream(struct vmw_private *dev_priv,
/* We don't start the old stream if we are interrupted.
* Might return -ENOMEM if it can't fit the buffer in vram.
*/
- ret = vmw_overlay_move_buffer(dev_priv, buf, true, interruptible);
+ ret = vmw_dmabuf_pin_in_vram(dev_priv, buf, true, interruptible);
if (ret)
return ret;
@@ -340,8 +351,7 @@ static int vmw_overlay_update_stream(struct vmw_private *dev_priv,
/* This one needs to happen no matter what. We only remove
* the NO_EVICT flag so this is safe from -ENOMEM.
*/
- BUG_ON(vmw_overlay_move_buffer(dev_priv, buf, false, false)
- != 0);
+ BUG_ON(vmw_dmabuf_pin_in_vram(dev_priv, buf, false, false) != 0);
return ret;
}
@@ -575,10 +585,11 @@ int vmw_overlay_init(struct vmw_private *dev_priv)
return -ENOSYS;
}
- overlay = kzalloc(sizeof(*overlay), GFP_KERNEL);
+ overlay = kmalloc(sizeof(*overlay), GFP_KERNEL);
if (!overlay)
return -ENOMEM;
+ memset(overlay, 0, sizeof(*overlay));
mutex_init(&overlay->mutex);
for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
overlay->stream[i].buf = NULL;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index a37abb5..5408b1b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -31,6 +31,10 @@
#include "ttm/ttm_placement.h"
#include "drmP.h"
+#define VMW_RES_CONTEXT ttm_driver_type0
+#define VMW_RES_SURFACE ttm_driver_type1
+#define VMW_RES_STREAM ttm_driver_type2
+
struct vmw_user_context {
struct ttm_base_object base;
struct vmw_resource res;
@@ -39,7 +43,6 @@ struct vmw_user_context {
struct vmw_user_surface {
struct ttm_base_object base;
struct vmw_surface srf;
- uint32_t size;
};
struct vmw_user_dma_buffer {
@@ -62,17 +65,6 @@ struct vmw_user_stream {
struct vmw_stream stream;
};
-struct vmw_surface_offset {
- uint32_t face;
- uint32_t mip;
- uint32_t bo_offset;
-};
-
-
-static uint64_t vmw_user_context_size;
-static uint64_t vmw_user_surface_size;
-static uint64_t vmw_user_stream_size;
-
static inline struct vmw_dma_buffer *
vmw_dma_buffer(struct ttm_buffer_object *bo)
{
@@ -92,36 +84,13 @@ struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
return res;
}
-
-/**
- * vmw_resource_release_id - release a resource id to the id manager.
- *
- * @res: Pointer to the resource.
- *
- * Release the resource id to the resource id manager and set it to -1
- */
-static void vmw_resource_release_id(struct vmw_resource *res)
-{
- struct vmw_private *dev_priv = res->dev_priv;
-
- write_lock(&dev_priv->resource_lock);
- if (res->id != -1)
- idr_remove(res->idr, res->id);
- res->id = -1;
- write_unlock(&dev_priv->resource_lock);
-}
-
static void vmw_resource_release(struct kref *kref)
{
struct vmw_resource *res =
container_of(kref, struct vmw_resource, kref);
struct vmw_private *dev_priv = res->dev_priv;
- int id = res->id;
- struct idr *idr = res->idr;
- res->avail = false;
- if (res->remove_from_lists != NULL)
- res->remove_from_lists(res);
+ idr_remove(res->idr, res->id);
write_unlock(&dev_priv->resource_lock);
if (likely(res->hw_destroy != NULL))
@@ -133,9 +102,6 @@ static void vmw_resource_release(struct kref *kref)
kfree(res);
write_lock(&dev_priv->resource_lock);
-
- if (id != -1)
- idr_remove(idr, id);
}
void vmw_resource_unreference(struct vmw_resource **p_res)
@@ -149,29 +115,28 @@ void vmw_resource_unreference(struct vmw_resource **p_res)
write_unlock(&dev_priv->resource_lock);
}
-
-/**
- * vmw_resource_alloc_id - release a resource id to the id manager.
- *
- * @dev_priv: Pointer to the device private structure.
- * @res: Pointer to the resource.
- *
- * Allocate the lowest free resource from the resource manager, and set
- * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
- */
-static int vmw_resource_alloc_id(struct vmw_private *dev_priv,
- struct vmw_resource *res)
+static int vmw_resource_init(struct vmw_private *dev_priv,
+ struct vmw_resource *res,
+ struct idr *idr,
+ enum ttm_object_type obj_type,
+ void (*res_free) (struct vmw_resource *res))
{
int ret;
- BUG_ON(res->id != -1);
+ kref_init(&res->kref);
+ res->hw_destroy = NULL;
+ res->res_free = res_free;
+ res->res_type = obj_type;
+ res->idr = idr;
+ res->avail = false;
+ res->dev_priv = dev_priv;
do {
- if (unlikely(idr_pre_get(res->idr, GFP_KERNEL) == 0))
+ if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0))
return -ENOMEM;
write_lock(&dev_priv->resource_lock);
- ret = idr_get_new_above(res->idr, res, 1, &res->id);
+ ret = idr_get_new_above(idr, res, 1, &res->id);
write_unlock(&dev_priv->resource_lock);
} while (ret == -EAGAIN);
@@ -179,33 +144,6 @@ static int vmw_resource_alloc_id(struct vmw_private *dev_priv,
return ret;
}
-
-static int vmw_resource_init(struct vmw_private *dev_priv,
- struct vmw_resource *res,
- struct idr *idr,
- enum ttm_object_type obj_type,
- bool delay_id,
- void (*res_free) (struct vmw_resource *res),
- void (*remove_from_lists)
- (struct vmw_resource *res))
-{
- kref_init(&res->kref);
- res->hw_destroy = NULL;
- res->res_free = res_free;
- res->remove_from_lists = remove_from_lists;
- res->res_type = obj_type;
- res->idr = idr;
- res->avail = false;
- res->dev_priv = dev_priv;
- INIT_LIST_HEAD(&res->query_head);
- INIT_LIST_HEAD(&res->validate_head);
- res->id = -1;
- if (delay_id)
- return 0;
- else
- return vmw_resource_alloc_id(dev_priv, res);
-}
-
/**
* vmw_resource_activate
*
@@ -260,12 +198,8 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDestroyContext body;
- } *cmd;
+ } *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
-
- vmw_execbuf_release_pinned_bo(dev_priv, true, res->id);
-
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed reserving FIFO space for surface "
"destruction.\n");
@@ -277,7 +211,7 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
cmd->body.cid = cpu_to_le32(res->id);
vmw_fifo_commit(dev_priv, sizeof(*cmd));
- vmw_3d_resource_dec(dev_priv, false);
+ vmw_3d_resource_dec(dev_priv);
}
static int vmw_context_init(struct vmw_private *dev_priv,
@@ -292,17 +226,14 @@ static int vmw_context_init(struct vmw_private *dev_priv,
} *cmd;
ret = vmw_resource_init(dev_priv, res, &dev_priv->context_idr,
- VMW_RES_CONTEXT, false, res_free, NULL);
+ VMW_RES_CONTEXT, res_free);
if (unlikely(ret != 0)) {
- DRM_ERROR("Failed to allocate a resource id.\n");
- goto out_early;
- }
-
- if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) {
- DRM_ERROR("Out of hw context ids.\n");
- vmw_resource_unreference(&res);
- return -ENOMEM;
+ if (res_free == NULL)
+ kfree(res);
+ else
+ res_free(res);
+ return ret;
}
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
@@ -317,16 +248,9 @@ static int vmw_context_init(struct vmw_private *dev_priv,
cmd->body.cid = cpu_to_le32(res->id);
vmw_fifo_commit(dev_priv, sizeof(*cmd));
- (void) vmw_3d_resource_inc(dev_priv, false);
+ (void) vmw_3d_resource_inc(dev_priv);
vmw_resource_activate(res, vmw_hw_context_destroy);
return 0;
-
-out_early:
- if (res_free == NULL)
- kfree(res);
- else
- res_free(res);
- return ret;
}
struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
@@ -349,11 +273,8 @@ static void vmw_user_context_free(struct vmw_resource *res)
{
struct vmw_user_context *ctx =
container_of(res, struct vmw_user_context, res);
- struct vmw_private *dev_priv = res->dev_priv;
kfree(ctx);
- ttm_mem_global_free(vmw_mem_glob(dev_priv),
- vmw_user_context_size);
}
/**
@@ -407,56 +328,23 @@ int vmw_context_define_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct vmw_private *dev_priv = vmw_priv(dev);
- struct vmw_user_context *ctx;
+ struct vmw_user_context *ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
struct vmw_resource *res;
struct vmw_resource *tmp;
struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- struct vmw_master *vmaster = vmw_master(file_priv->master);
int ret;
-
- /*
- * Approximate idr memory usage with 128 bytes. It will be limited
- * by maximum number_of contexts anyway.
- */
-
- if (unlikely(vmw_user_context_size == 0))
- vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128;
-
- ret = ttm_read_lock(&vmaster->lock, true);
- if (unlikely(ret != 0))
- return ret;
-
- ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
- vmw_user_context_size,
- false, true);
- if (unlikely(ret != 0)) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("Out of graphics memory for context"
- " creation.\n");
- goto out_unlock;
- }
-
- ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
- if (unlikely(ctx == NULL)) {
- ttm_mem_global_free(vmw_mem_glob(dev_priv),
- vmw_user_context_size);
- ret = -ENOMEM;
- goto out_unlock;
- }
+ if (unlikely(ctx == NULL))
+ return -ENOMEM;
res = &ctx->res;
ctx->base.shareable = false;
ctx->base.tfile = NULL;
- /*
- * From here on, the destructor takes over resource freeing.
- */
-
ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
if (unlikely(ret != 0))
- goto out_unlock;
+ return ret;
tmp = vmw_resource_reference(&ctx->res);
ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
@@ -470,16 +358,13 @@ int vmw_context_define_ioctl(struct drm_device *dev, void *data,
arg->cid = res->id;
out_err:
vmw_resource_unreference(&res);
-out_unlock:
- ttm_read_unlock(&vmaster->lock);
return ret;
}
int vmw_context_check(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
- int id,
- struct vmw_resource **p_res)
+ int id)
{
struct vmw_resource *res;
int ret = 0;
@@ -491,8 +376,6 @@ int vmw_context_check(struct vmw_private *dev_priv,
container_of(res, struct vmw_user_context, res);
if (ctx->base.tfile != tfile && !ctx->base.shareable)
ret = -EPERM;
- if (p_res)
- *p_res = vmw_resource_reference(res);
} else
ret = -EINVAL;
read_unlock(&dev_priv->resource_lock);
@@ -500,638 +383,102 @@ int vmw_context_check(struct vmw_private *dev_priv,
return ret;
}
-struct vmw_bpp {
- uint8_t bpp;
- uint8_t s_bpp;
-};
-
-/*
- * Size table for the supported SVGA3D surface formats. It consists of
- * two values. The bpp value and the s_bpp value which is short for
- * "stride bits per pixel" The values are given in such a way that the
- * minimum stride for the image is calculated using
- *
- * min_stride = w*s_bpp
- *
- * and the total memory requirement for the image is
- *
- * h*min_stride*bpp/s_bpp
- *
- */
-static const struct vmw_bpp vmw_sf_bpp[] = {
- [SVGA3D_FORMAT_INVALID] = {0, 0},
- [SVGA3D_X8R8G8B8] = {32, 32},
- [SVGA3D_A8R8G8B8] = {32, 32},
- [SVGA3D_R5G6B5] = {16, 16},
- [SVGA3D_X1R5G5B5] = {16, 16},
- [SVGA3D_A1R5G5B5] = {16, 16},
- [SVGA3D_A4R4G4B4] = {16, 16},
- [SVGA3D_Z_D32] = {32, 32},
- [SVGA3D_Z_D16] = {16, 16},
- [SVGA3D_Z_D24S8] = {32, 32},
- [SVGA3D_Z_D15S1] = {16, 16},
- [SVGA3D_LUMINANCE8] = {8, 8},
- [SVGA3D_LUMINANCE4_ALPHA4] = {8, 8},
- [SVGA3D_LUMINANCE16] = {16, 16},
- [SVGA3D_LUMINANCE8_ALPHA8] = {16, 16},
- [SVGA3D_DXT1] = {4, 16},
- [SVGA3D_DXT2] = {8, 32},
- [SVGA3D_DXT3] = {8, 32},
- [SVGA3D_DXT4] = {8, 32},
- [SVGA3D_DXT5] = {8, 32},
- [SVGA3D_BUMPU8V8] = {16, 16},
- [SVGA3D_BUMPL6V5U5] = {16, 16},
- [SVGA3D_BUMPX8L8V8U8] = {32, 32},
- [SVGA3D_ARGB_S10E5] = {16, 16},
- [SVGA3D_ARGB_S23E8] = {32, 32},
- [SVGA3D_A2R10G10B10] = {32, 32},
- [SVGA3D_V8U8] = {16, 16},
- [SVGA3D_Q8W8V8U8] = {32, 32},
- [SVGA3D_CxV8U8] = {16, 16},
- [SVGA3D_X8L8V8U8] = {32, 32},
- [SVGA3D_A2W10V10U10] = {32, 32},
- [SVGA3D_ALPHA8] = {8, 8},
- [SVGA3D_R_S10E5] = {16, 16},
- [SVGA3D_R_S23E8] = {32, 32},
- [SVGA3D_RG_S10E5] = {16, 16},
- [SVGA3D_RG_S23E8] = {32, 32},
- [SVGA3D_BUFFER] = {8, 8},
- [SVGA3D_Z_D24X8] = {32, 32},
- [SVGA3D_V16U16] = {32, 32},
- [SVGA3D_G16R16] = {32, 32},
- [SVGA3D_A16B16G16R16] = {64, 64},
- [SVGA3D_UYVY] = {12, 12},
- [SVGA3D_YUY2] = {12, 12},
- [SVGA3D_NV12] = {12, 8},
- [SVGA3D_AYUV] = {32, 32},
- [SVGA3D_BC4_UNORM] = {4, 16},
- [SVGA3D_BC5_UNORM] = {8, 32},
- [SVGA3D_Z_DF16] = {16, 16},
- [SVGA3D_Z_DF24] = {24, 24},
- [SVGA3D_Z_D24S8_INT] = {32, 32}
-};
-
/**
* Surface management.
*/
-struct vmw_surface_dma {
- SVGA3dCmdHeader header;
- SVGA3dCmdSurfaceDMA body;
- SVGA3dCopyBox cb;
- SVGA3dCmdSurfaceDMASuffix suffix;
-};
-
-struct vmw_surface_define {
- SVGA3dCmdHeader header;
- SVGA3dCmdDefineSurface body;
-};
-
-struct vmw_surface_destroy {
- SVGA3dCmdHeader header;
- SVGA3dCmdDestroySurface body;
-};
-
-
-/**
- * vmw_surface_dma_size - Compute fifo size for a dma command.
- *
- * @srf: Pointer to a struct vmw_surface
- *
- * Computes the required size for a surface dma command for backup or
- * restoration of the surface represented by @srf.
- */
-static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf)
-{
- return srf->num_sizes * sizeof(struct vmw_surface_dma);
-}
-
-
-/**
- * vmw_surface_define_size - Compute fifo size for a surface define command.
- *
- * @srf: Pointer to a struct vmw_surface
- *
- * Computes the required size for a surface define command for the definition
- * of the surface represented by @srf.
- */
-static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf)
-{
- return sizeof(struct vmw_surface_define) + srf->num_sizes *
- sizeof(SVGA3dSize);
-}
-
-
-/**
- * vmw_surface_destroy_size - Compute fifo size for a surface destroy command.
- *
- * Computes the required size for a surface destroy command for the destruction
- * of a hw surface.
- */
-static inline uint32_t vmw_surface_destroy_size(void)
-{
- return sizeof(struct vmw_surface_destroy);
-}
-
-/**
- * vmw_surface_destroy_encode - Encode a surface_destroy command.
- *
- * @id: The surface id
- * @cmd_space: Pointer to memory area in which the commands should be encoded.
- */
-static void vmw_surface_destroy_encode(uint32_t id,
- void *cmd_space)
-{
- struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *)
- cmd_space;
-
- cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY;
- cmd->header.size = sizeof(cmd->body);
- cmd->body.sid = id;
-}
-
-/**
- * vmw_surface_define_encode - Encode a surface_define command.
- *
- * @srf: Pointer to a struct vmw_surface object.
- * @cmd_space: Pointer to memory area in which the commands should be encoded.
- */
-static void vmw_surface_define_encode(const struct vmw_surface *srf,
- void *cmd_space)
-{
- struct vmw_surface_define *cmd = (struct vmw_surface_define *)
- cmd_space;
- struct drm_vmw_size *src_size;
- SVGA3dSize *cmd_size;
- uint32_t cmd_len;
- int i;
-
- cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
-
- cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE;
- cmd->header.size = cmd_len;
- cmd->body.sid = srf->res.id;
- cmd->body.surfaceFlags = srf->flags;
- cmd->body.format = cpu_to_le32(srf->format);
- for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
- cmd->body.face[i].numMipLevels = srf->mip_levels[i];
-
- cmd += 1;
- cmd_size = (SVGA3dSize *) cmd;
- src_size = srf->sizes;
-
- for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
- cmd_size->width = src_size->width;
- cmd_size->height = src_size->height;
- cmd_size->depth = src_size->depth;
- }
-}
-
-
-/**
- * vmw_surface_dma_encode - Encode a surface_dma command.
- *
- * @srf: Pointer to a struct vmw_surface object.
- * @cmd_space: Pointer to memory area in which the commands should be encoded.
- * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents
- * should be placed or read from.
- * @to_surface: Boolean whether to DMA to the surface or from the surface.
- */
-static void vmw_surface_dma_encode(struct vmw_surface *srf,
- void *cmd_space,
- const SVGAGuestPtr *ptr,
- bool to_surface)
-{
- uint32_t i;
- uint32_t bpp = vmw_sf_bpp[srf->format].bpp;
- uint32_t stride_bpp = vmw_sf_bpp[srf->format].s_bpp;
- struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space;
-
- for (i = 0; i < srf->num_sizes; ++i) {
- SVGA3dCmdHeader *header = &cmd->header;
- SVGA3dCmdSurfaceDMA *body = &cmd->body;
- SVGA3dCopyBox *cb = &cmd->cb;
- SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix;
- const struct vmw_surface_offset *cur_offset = &srf->offsets[i];
- const struct drm_vmw_size *cur_size = &srf->sizes[i];
-
- header->id = SVGA_3D_CMD_SURFACE_DMA;
- header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix);
-
- body->guest.ptr = *ptr;
- body->guest.ptr.offset += cur_offset->bo_offset;
- body->guest.pitch = (cur_size->width * stride_bpp + 7) >> 3;
- body->host.sid = srf->res.id;
- body->host.face = cur_offset->face;
- body->host.mipmap = cur_offset->mip;
- body->transfer = ((to_surface) ? SVGA3D_WRITE_HOST_VRAM :
- SVGA3D_READ_HOST_VRAM);
- cb->x = 0;
- cb->y = 0;
- cb->z = 0;
- cb->srcx = 0;
- cb->srcy = 0;
- cb->srcz = 0;
- cb->w = cur_size->width;
- cb->h = cur_size->height;
- cb->d = cur_size->depth;
-
- suffix->suffixSize = sizeof(*suffix);
- suffix->maximumOffset = body->guest.pitch*cur_size->height*
- cur_size->depth*bpp / stride_bpp;
- suffix->flags.discard = 0;
- suffix->flags.unsynchronized = 0;
- suffix->flags.reserved = 0;
- ++cmd;
- }
-};
-
-
static void vmw_hw_surface_destroy(struct vmw_resource *res)
{
struct vmw_private *dev_priv = res->dev_priv;
- struct vmw_surface *srf;
- void *cmd;
-
- if (res->id != -1) {
-
- cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size());
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for surface "
- "destruction.\n");
- return;
- }
-
- vmw_surface_destroy_encode(res->id, cmd);
- vmw_fifo_commit(dev_priv, vmw_surface_destroy_size());
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDestroySurface body;
+ } *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- /*
- * used_memory_size_atomic, or separate lock
- * to avoid taking dev_priv::cmdbuf_mutex in
- * the destroy path.
- */
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for surface "
+ "destruction.\n");
+ return;
+ }
- mutex_lock(&dev_priv->cmdbuf_mutex);
- srf = container_of(res, struct vmw_surface, res);
- dev_priv->used_memory_size -= srf->backup_size;
- mutex_unlock(&dev_priv->cmdbuf_mutex);
+ cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DESTROY);
+ cmd->header.size = cpu_to_le32(sizeof(cmd->body));
+ cmd->body.sid = cpu_to_le32(res->id);
- }
- vmw_3d_resource_dec(dev_priv, false);
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_3d_resource_dec(dev_priv);
}
void vmw_surface_res_free(struct vmw_resource *res)
{
struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
- if (srf->backup)
- ttm_bo_unref(&srf->backup);
- kfree(srf->offsets);
kfree(srf->sizes);
kfree(srf->snooper.image);
kfree(srf);
}
-
-/**
- * vmw_surface_do_validate - make a surface available to the device.
- *
- * @dev_priv: Pointer to a device private struct.
- * @srf: Pointer to a struct vmw_surface.
- *
- * If the surface doesn't have a hw id, allocate one, and optionally
- * DMA the backed up surface contents to the device.
- *
- * Returns -EBUSY if there wasn't sufficient device resources to
- * complete the validation. Retry after freeing up resources.
- *
- * May return other errors if the kernel is out of guest resources.
- */
-int vmw_surface_do_validate(struct vmw_private *dev_priv,
- struct vmw_surface *srf)
+int vmw_surface_init(struct vmw_private *dev_priv,
+ struct vmw_surface *srf,
+ void (*res_free) (struct vmw_resource *res))
{
- struct vmw_resource *res = &srf->res;
- struct list_head val_list;
- struct ttm_validate_buffer val_buf;
- uint32_t submit_size;
- uint8_t *cmd;
int ret;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDefineSurface body;
+ } *cmd;
+ SVGA3dSize *cmd_size;
+ struct vmw_resource *res = &srf->res;
+ struct drm_vmw_size *src_size;
+ size_t submit_size;
+ uint32_t cmd_len;
+ int i;
- if (likely(res->id != -1))
- return 0;
-
- if (unlikely(dev_priv->used_memory_size + srf->backup_size >=
- dev_priv->memory_size))
- return -EBUSY;
-
- /*
- * Reserve- and validate the backup DMA bo.
- */
-
- if (srf->backup) {
- INIT_LIST_HEAD(&val_list);
- val_buf.bo = ttm_bo_reference(srf->backup);
- val_buf.new_sync_obj_arg = (void *)((unsigned long)
- DRM_VMW_FENCE_FLAG_EXEC);
- list_add_tail(&val_buf.head, &val_list);
- ret = ttm_eu_reserve_buffers(&val_list);
- if (unlikely(ret != 0))
- goto out_no_reserve;
-
- ret = ttm_bo_validate(srf->backup, &vmw_srf_placement,
- true, false, false);
- if (unlikely(ret != 0))
- goto out_no_validate;
- }
-
- /*
- * Alloc id for the resource.
- */
+ BUG_ON(res_free == NULL);
+ ret = vmw_resource_init(dev_priv, res, &dev_priv->surface_idr,
+ VMW_RES_SURFACE, res_free);
- ret = vmw_resource_alloc_id(dev_priv, res);
if (unlikely(ret != 0)) {
- DRM_ERROR("Failed to allocate a surface id.\n");
- goto out_no_id;
- }
- if (unlikely(res->id >= SVGA3D_MAX_SURFACE_IDS)) {
- ret = -EBUSY;
- goto out_no_fifo;
+ res_free(res);
+ return ret;
}
-
- /*
- * Encode surface define- and dma commands.
- */
-
- submit_size = vmw_surface_define_size(srf);
- if (srf->backup)
- submit_size += vmw_surface_dma_size(srf);
+ submit_size = sizeof(*cmd) + srf->num_sizes * sizeof(SVGA3dSize);
+ cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
cmd = vmw_fifo_reserve(dev_priv, submit_size);
if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for surface "
- "validation.\n");
- ret = -ENOMEM;
- goto out_no_fifo;
- }
-
- vmw_surface_define_encode(srf, cmd);
- if (srf->backup) {
- SVGAGuestPtr ptr;
-
- cmd += vmw_surface_define_size(srf);
- vmw_bo_get_guest_ptr(srf->backup, &ptr);
- vmw_surface_dma_encode(srf, cmd, &ptr, true);
- }
-
- vmw_fifo_commit(dev_priv, submit_size);
-
- /*
- * Create a fence object and fence the backup buffer.
- */
-
- if (srf->backup) {
- struct vmw_fence_obj *fence;
-
- (void) vmw_execbuf_fence_commands(NULL, dev_priv,
- &fence, NULL);
- ttm_eu_fence_buffer_objects(&val_list, fence);
- if (likely(fence != NULL))
- vmw_fence_obj_unreference(&fence);
- ttm_bo_unref(&val_buf.bo);
- ttm_bo_unref(&srf->backup);
+ DRM_ERROR("Fifo reserve failed for create surface.\n");
+ vmw_resource_unreference(&res);
+ return -ENOMEM;
}
- /*
- * Surface memory usage accounting.
- */
-
- dev_priv->used_memory_size += srf->backup_size;
-
- return 0;
-
-out_no_fifo:
- vmw_resource_release_id(res);
-out_no_id:
-out_no_validate:
- if (srf->backup)
- ttm_eu_backoff_reservation(&val_list);
-out_no_reserve:
- if (srf->backup)
- ttm_bo_unref(&val_buf.bo);
- return ret;
-}
-
-/**
- * vmw_surface_evict - Evict a hw surface.
- *
- * @dev_priv: Pointer to a device private struct.
- * @srf: Pointer to a struct vmw_surface
- *
- * DMA the contents of a hw surface to a backup guest buffer object,
- * and destroy the hw surface, releasing its id.
- */
-int vmw_surface_evict(struct vmw_private *dev_priv,
- struct vmw_surface *srf)
-{
- struct vmw_resource *res = &srf->res;
- struct list_head val_list;
- struct ttm_validate_buffer val_buf;
- uint32_t submit_size;
- uint8_t *cmd;
- int ret;
- struct vmw_fence_obj *fence;
- SVGAGuestPtr ptr;
-
- BUG_ON(res->id == -1);
-
- /*
- * Create a surface backup buffer object.
- */
-
- if (!srf->backup) {
- ret = ttm_bo_create(&dev_priv->bdev, srf->backup_size,
- ttm_bo_type_device,
- &vmw_srf_placement, 0, 0, true,
- NULL, &srf->backup);
- if (unlikely(ret != 0))
- return ret;
+ cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DEFINE);
+ cmd->header.size = cpu_to_le32(cmd_len);
+ cmd->body.sid = cpu_to_le32(res->id);
+ cmd->body.surfaceFlags = cpu_to_le32(srf->flags);
+ cmd->body.format = cpu_to_le32(srf->format);
+ for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
+ cmd->body.face[i].numMipLevels =
+ cpu_to_le32(srf->mip_levels[i]);
}
- /*
- * Reserve- and validate the backup DMA bo.
- */
-
- INIT_LIST_HEAD(&val_list);
- val_buf.bo = ttm_bo_reference(srf->backup);
- val_buf.new_sync_obj_arg = (void *)(unsigned long)
- DRM_VMW_FENCE_FLAG_EXEC;
- list_add_tail(&val_buf.head, &val_list);
- ret = ttm_eu_reserve_buffers(&val_list);
- if (unlikely(ret != 0))
- goto out_no_reserve;
-
- ret = ttm_bo_validate(srf->backup, &vmw_srf_placement,
- true, false, false);
- if (unlikely(ret != 0))
- goto out_no_validate;
-
-
- /*
- * Encode the dma- and surface destroy commands.
- */
+ cmd += 1;
+ cmd_size = (SVGA3dSize *) cmd;
+ src_size = srf->sizes;
- submit_size = vmw_surface_dma_size(srf) + vmw_surface_destroy_size();
- cmd = vmw_fifo_reserve(dev_priv, submit_size);
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for surface "
- "eviction.\n");
- ret = -ENOMEM;
- goto out_no_fifo;
+ for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
+ cmd_size->width = cpu_to_le32(src_size->width);
+ cmd_size->height = cpu_to_le32(src_size->height);
+ cmd_size->depth = cpu_to_le32(src_size->depth);
}
- vmw_bo_get_guest_ptr(srf->backup, &ptr);
- vmw_surface_dma_encode(srf, cmd, &ptr, false);
- cmd += vmw_surface_dma_size(srf);
- vmw_surface_destroy_encode(res->id, cmd);
vmw_fifo_commit(dev_priv, submit_size);
-
- /*
- * Surface memory usage accounting.
- */
-
- dev_priv->used_memory_size -= srf->backup_size;
-
- /*
- * Create a fence object and fence the DMA buffer.
- */
-
- (void) vmw_execbuf_fence_commands(NULL, dev_priv,
- &fence, NULL);
- ttm_eu_fence_buffer_objects(&val_list, fence);
- if (likely(fence != NULL))
- vmw_fence_obj_unreference(&fence);
- ttm_bo_unref(&val_buf.bo);
-
- /*
- * Release the surface ID.
- */
-
- vmw_resource_release_id(res);
-
- return 0;
-
-out_no_fifo:
-out_no_validate:
- if (srf->backup)
- ttm_eu_backoff_reservation(&val_list);
-out_no_reserve:
- ttm_bo_unref(&val_buf.bo);
- ttm_bo_unref(&srf->backup);
- return ret;
-}
-
-
-/**
- * vmw_surface_validate - make a surface available to the device, evicting
- * other surfaces if needed.
- *
- * @dev_priv: Pointer to a device private struct.
- * @srf: Pointer to a struct vmw_surface.
- *
- * Try to validate a surface and if it fails due to limited device resources,
- * repeatedly try to evict other surfaces until the request can be
- * acommodated.
- *
- * May return errors if out of resources.
- */
-int vmw_surface_validate(struct vmw_private *dev_priv,
- struct vmw_surface *srf)
-{
- int ret;
- struct vmw_surface *evict_srf;
-
- do {
- write_lock(&dev_priv->resource_lock);
- list_del_init(&srf->lru_head);
- write_unlock(&dev_priv->resource_lock);
-
- ret = vmw_surface_do_validate(dev_priv, srf);
- if (likely(ret != -EBUSY))
- break;
-
- write_lock(&dev_priv->resource_lock);
- if (list_empty(&dev_priv->surface_lru)) {
- DRM_ERROR("Out of device memory for surfaces.\n");
- ret = -EBUSY;
- write_unlock(&dev_priv->resource_lock);
- break;
- }
-
- evict_srf = vmw_surface_reference
- (list_first_entry(&dev_priv->surface_lru,
- struct vmw_surface,
- lru_head));
- list_del_init(&evict_srf->lru_head);
-
- write_unlock(&dev_priv->resource_lock);
- (void) vmw_surface_evict(dev_priv, evict_srf);
-
- vmw_surface_unreference(&evict_srf);
-
- } while (1);
-
- if (unlikely(ret != 0 && srf->res.id != -1)) {
- write_lock(&dev_priv->resource_lock);
- list_add_tail(&srf->lru_head, &dev_priv->surface_lru);
- write_unlock(&dev_priv->resource_lock);
- }
-
- return ret;
-}
-
-
-/**
- * vmw_surface_remove_from_lists - Remove surface resources from lookup lists
- *
- * @res: Pointer to a struct vmw_resource embedded in a struct vmw_surface
- *
- * As part of the resource destruction, remove the surface from any
- * lookup lists.
- */
-static void vmw_surface_remove_from_lists(struct vmw_resource *res)
-{
- struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
-
- list_del_init(&srf->lru_head);
-}
-
-int vmw_surface_init(struct vmw_private *dev_priv,
- struct vmw_surface *srf,
- void (*res_free) (struct vmw_resource *res))
-{
- int ret;
- struct vmw_resource *res = &srf->res;
-
- BUG_ON(res_free == NULL);
- INIT_LIST_HEAD(&srf->lru_head);
- ret = vmw_resource_init(dev_priv, res, &dev_priv->surface_idr,
- VMW_RES_SURFACE, true, res_free,
- vmw_surface_remove_from_lists);
-
- if (unlikely(ret != 0))
- res_free(res);
-
- /*
- * The surface won't be visible to hardware until a
- * surface validate.
- */
-
- (void) vmw_3d_resource_inc(dev_priv, false);
+ (void) vmw_3d_resource_inc(dev_priv);
vmw_resource_activate(res, vmw_hw_surface_destroy);
- return ret;
+ return 0;
}
static void vmw_user_surface_free(struct vmw_resource *res)
@@ -1139,81 +486,12 @@ static void vmw_user_surface_free(struct vmw_resource *res)
struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
struct vmw_user_surface *user_srf =
container_of(srf, struct vmw_user_surface, srf);
- struct vmw_private *dev_priv = srf->res.dev_priv;
- uint32_t size = user_srf->size;
- if (srf->backup)
- ttm_bo_unref(&srf->backup);
- kfree(srf->offsets);
kfree(srf->sizes);
kfree(srf->snooper.image);
kfree(user_srf);
- ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
-}
-
-/**
- * vmw_resource_unreserve - unreserve resources previously reserved for
- * command submission.
- *
- * @list_head: list of resources to unreserve.
- *
- * Currently only surfaces are considered, and unreserving a surface
- * means putting it back on the device's surface lru list,
- * so that it can be evicted if necessary.
- * This function traverses the resource list and
- * checks whether resources are surfaces, and in that case puts them back
- * on the device's surface LRU list.
- */
-void vmw_resource_unreserve(struct list_head *list)
-{
- struct vmw_resource *res;
- struct vmw_surface *srf;
- rwlock_t *lock = NULL;
-
- list_for_each_entry(res, list, validate_head) {
-
- if (res->res_free != &vmw_surface_res_free &&
- res->res_free != &vmw_user_surface_free)
- continue;
-
- if (unlikely(lock == NULL)) {
- lock = &res->dev_priv->resource_lock;
- write_lock(lock);
- }
-
- srf = container_of(res, struct vmw_surface, res);
- list_del_init(&srf->lru_head);
- list_add_tail(&srf->lru_head, &res->dev_priv->surface_lru);
- }
-
- if (lock != NULL)
- write_unlock(lock);
}
-/**
- * Helper function that looks either a surface or dmabuf.
- *
- * The pointer this pointed at by out_surf and out_buf needs to be null.
- */
-int vmw_user_lookup_handle(struct vmw_private *dev_priv,
- struct ttm_object_file *tfile,
- uint32_t handle,
- struct vmw_surface **out_surf,
- struct vmw_dma_buffer **out_buf)
-{
- int ret;
-
- BUG_ON(*out_surf || *out_buf);
-
- ret = vmw_user_surface_lookup_handle(dev_priv, tfile, handle, out_surf);
- if (!ret)
- return 0;
-
- ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf);
- return ret;
-}
-
-
int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
uint32_t handle, struct vmw_surface **out)
@@ -1278,7 +556,8 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct vmw_private *dev_priv = vmw_priv(dev);
- struct vmw_user_surface *user_srf;
+ struct vmw_user_surface *user_srf =
+ kmalloc(sizeof(*user_srf), GFP_KERNEL);
struct vmw_surface *srf;
struct vmw_resource *res;
struct vmw_resource *tmp;
@@ -1289,51 +568,10 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct drm_vmw_size __user *user_sizes;
int ret;
- int i, j;
- uint32_t cur_bo_offset;
- struct drm_vmw_size *cur_size;
- struct vmw_surface_offset *cur_offset;
- uint32_t stride_bpp;
- uint32_t bpp;
- uint32_t num_sizes;
- uint32_t size;
- struct vmw_master *vmaster = vmw_master(file_priv->master);
-
- if (unlikely(vmw_user_surface_size == 0))
- vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
- 128;
-
- num_sizes = 0;
- for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
- num_sizes += req->mip_levels[i];
-
- if (num_sizes > DRM_VMW_MAX_SURFACE_FACES *
- DRM_VMW_MAX_MIP_LEVELS)
- return -EINVAL;
-
- size = vmw_user_surface_size + 128 +
- ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) +
- ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset));
-
-
- ret = ttm_read_lock(&vmaster->lock, true);
- if (unlikely(ret != 0))
- return ret;
-
- ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
- size, false, true);
- if (unlikely(ret != 0)) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("Out of graphics memory for surface"
- " creation.\n");
- goto out_unlock;
- }
+ int i;
- user_srf = kmalloc(sizeof(*user_srf), GFP_KERNEL);
- if (unlikely(user_srf == NULL)) {
- ret = -ENOMEM;
- goto out_no_user_srf;
- }
+ if (unlikely(user_srf == NULL))
+ return -ENOMEM;
srf = &user_srf->srf;
res = &srf->res;
@@ -1341,22 +579,21 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
srf->flags = req->flags;
srf->format = req->format;
srf->scanout = req->scanout;
- srf->backup = NULL;
-
memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
- srf->num_sizes = num_sizes;
- user_srf->size = size;
+ srf->num_sizes = 0;
+ for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
+ srf->num_sizes += srf->mip_levels[i];
- srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
- if (unlikely(srf->sizes == NULL)) {
- ret = -ENOMEM;
- goto out_no_sizes;
+ if (srf->num_sizes > DRM_VMW_MAX_SURFACE_FACES *
+ DRM_VMW_MAX_MIP_LEVELS) {
+ ret = -EINVAL;
+ goto out_err0;
}
- srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets),
- GFP_KERNEL);
+
+ srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
if (unlikely(srf->sizes == NULL)) {
ret = -ENOMEM;
- goto out_no_offsets;
+ goto out_err0;
}
user_sizes = (struct drm_vmw_size __user *)(unsigned long)
@@ -1366,31 +603,8 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
srf->num_sizes * sizeof(*srf->sizes));
if (unlikely(ret != 0)) {
ret = -EFAULT;
- goto out_no_copy;
- }
-
- cur_bo_offset = 0;
- cur_offset = srf->offsets;
- cur_size = srf->sizes;
-
- bpp = vmw_sf_bpp[srf->format].bpp;
- stride_bpp = vmw_sf_bpp[srf->format].s_bpp;
-
- for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
- for (j = 0; j < srf->mip_levels[i]; ++j) {
- uint32_t stride =
- (cur_size->width * stride_bpp + 7) >> 3;
-
- cur_offset->face = i;
- cur_offset->mip = j;
- cur_offset->bo_offset = cur_bo_offset;
- cur_bo_offset += stride * cur_size->height *
- cur_size->depth * bpp / stride_bpp;
- ++cur_offset;
- ++cur_size;
- }
+ goto out_err1;
}
- srf->backup_size = cur_bo_offset;
if (srf->scanout &&
srf->num_sizes == 1 &&
@@ -1398,12 +612,14 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
srf->sizes[0].height == 64 &&
srf->format == SVGA3D_A8R8G8B8) {
- /* allocate image area and clear it */
- srf->snooper.image = kzalloc(64 * 64 * 4, GFP_KERNEL);
- if (!srf->snooper.image) {
+ srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL);
+ /* clear the image */
+ if (srf->snooper.image) {
+ memset(srf->snooper.image, 0x00, 64 * 64 * 4);
+ } else {
DRM_ERROR("Failed to allocate cursor_image\n");
ret = -ENOMEM;
- goto out_no_copy;
+ goto out_err1;
}
} else {
srf->snooper.image = NULL;
@@ -1420,7 +636,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
if (unlikely(ret != 0))
- goto out_unlock;
+ return ret;
tmp = vmw_resource_reference(&srf->res);
ret = ttm_base_object_init(tfile, &user_srf->base,
@@ -1430,7 +646,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
if (unlikely(ret != 0)) {
vmw_resource_unreference(&tmp);
vmw_resource_unreference(&res);
- goto out_unlock;
+ return ret;
}
rep->sid = user_srf->base.hash.key;
@@ -1438,19 +654,11 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
DRM_ERROR("Created bad Surface ID.\n");
vmw_resource_unreference(&res);
-
- ttm_read_unlock(&vmaster->lock);
return 0;
-out_no_copy:
- kfree(srf->offsets);
-out_no_offsets:
+out_err1:
kfree(srf->sizes);
-out_no_sizes:
+out_err0:
kfree(user_srf);
-out_no_user_srf:
- ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
-out_unlock:
- ttm_read_unlock(&vmaster->lock);
return ret;
}
@@ -1540,10 +748,29 @@ out_bad_surface:
/**
* Buffer management.
*/
+
+static size_t vmw_dmabuf_acc_size(struct ttm_bo_global *glob,
+ unsigned long num_pages)
+{
+ static size_t bo_user_size = ~0;
+
+ size_t page_array_size =
+ (num_pages * sizeof(void *) + PAGE_SIZE - 1) & PAGE_MASK;
+
+ if (unlikely(bo_user_size == ~0)) {
+ bo_user_size = glob->ttm_bo_extra_size +
+ ttm_round_pot(sizeof(struct vmw_dma_buffer));
+ }
+
+ return bo_user_size + page_array_size;
+}
+
void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
{
struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
+ struct ttm_bo_global *glob = bo->glob;
+ ttm_mem_global_free(glob->mem_glob, bo->acc_size);
kfree(vmw_bo);
}
@@ -1554,12 +781,24 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv,
void (*bo_free) (struct ttm_buffer_object *bo))
{
struct ttm_bo_device *bdev = &dev_priv->bdev;
+ struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
size_t acc_size;
int ret;
BUG_ON(!bo_free);
- acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct vmw_dma_buffer));
+ acc_size =
+ vmw_dmabuf_acc_size(bdev->glob,
+ (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
+
+ ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
+ if (unlikely(ret != 0)) {
+ /* we must free the bo here as
+ * ttm_buffer_object_init does so as well */
+ bo_free(&vmw_bo->base);
+ return ret;
+ }
+
memset(vmw_bo, 0, sizeof(*vmw_bo));
INIT_LIST_HEAD(&vmw_bo->validate_list);
@@ -1574,7 +813,9 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv,
static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
{
struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
+ struct ttm_bo_global *glob = bo->glob;
+ ttm_mem_global_free(glob->mem_glob, bo->acc_size);
kfree(vmw_user_bo);
}
@@ -1731,7 +972,7 @@ static int vmw_stream_init(struct vmw_private *dev_priv,
int ret;
ret = vmw_resource_init(dev_priv, res, &dev_priv->stream_idr,
- VMW_RES_STREAM, false, res_free, NULL);
+ VMW_RES_STREAM, res_free);
if (unlikely(ret != 0)) {
if (res_free == NULL)
@@ -1761,11 +1002,8 @@ static void vmw_user_stream_free(struct vmw_resource *res)
{
struct vmw_user_stream *stream =
container_of(res, struct vmw_user_stream, stream.res);
- struct vmw_private *dev_priv = res->dev_priv;
kfree(stream);
- ttm_mem_global_free(vmw_mem_glob(dev_priv),
- vmw_user_stream_size);
}
/**
@@ -1819,56 +1057,23 @@ int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct vmw_private *dev_priv = vmw_priv(dev);
- struct vmw_user_stream *stream;
+ struct vmw_user_stream *stream = kmalloc(sizeof(*stream), GFP_KERNEL);
struct vmw_resource *res;
struct vmw_resource *tmp;
struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- struct vmw_master *vmaster = vmw_master(file_priv->master);
int ret;
- /*
- * Approximate idr memory usage with 128 bytes. It will be limited
- * by maximum number_of streams anyway?
- */
-
- if (unlikely(vmw_user_stream_size == 0))
- vmw_user_stream_size = ttm_round_pot(sizeof(*stream)) + 128;
-
- ret = ttm_read_lock(&vmaster->lock, true);
- if (unlikely(ret != 0))
- return ret;
-
- ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
- vmw_user_stream_size,
- false, true);
- if (unlikely(ret != 0)) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("Out of graphics memory for stream"
- " creation.\n");
- goto out_unlock;
- }
-
-
- stream = kmalloc(sizeof(*stream), GFP_KERNEL);
- if (unlikely(stream == NULL)) {
- ttm_mem_global_free(vmw_mem_glob(dev_priv),
- vmw_user_stream_size);
- ret = -ENOMEM;
- goto out_unlock;
- }
+ if (unlikely(stream == NULL))
+ return -ENOMEM;
res = &stream->stream.res;
stream->base.shareable = false;
stream->base.tfile = NULL;
- /*
- * From here on, the destructor takes over resource freeing.
- */
-
ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
if (unlikely(ret != 0))
- goto out_unlock;
+ return ret;
tmp = vmw_resource_reference(res);
ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
@@ -1882,8 +1087,6 @@ int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
arg->stream_id = res->id;
out_err:
vmw_resource_unreference(&res);
-out_unlock:
- ttm_read_unlock(&vmaster->lock);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
deleted file mode 100644
index 6deaf2f..0000000
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ /dev/null
@@ -1,571 +0,0 @@
-/**************************************************************************
- *
- * Copyright © 2011 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-#include "vmwgfx_kms.h"
-
-
-#define vmw_crtc_to_sou(x) \
- container_of(x, struct vmw_screen_object_unit, base.crtc)
-#define vmw_encoder_to_sou(x) \
- container_of(x, struct vmw_screen_object_unit, base.encoder)
-#define vmw_connector_to_sou(x) \
- container_of(x, struct vmw_screen_object_unit, base.connector)
-
-struct vmw_screen_object_display {
- unsigned num_implicit;
-
- struct vmw_framebuffer *implicit_fb;
-};
-
-/**
- * Display unit using screen objects.
- */
-struct vmw_screen_object_unit {
- struct vmw_display_unit base;
-
- unsigned long buffer_size; /**< Size of allocated buffer */
- struct vmw_dma_buffer *buffer; /**< Backing store buffer */
-
- bool defined;
- bool active_implicit;
-};
-
-static void vmw_sou_destroy(struct vmw_screen_object_unit *sou)
-{
- vmw_display_unit_cleanup(&sou->base);
- kfree(sou);
-}
-
-
-/*
- * Screen Object Display Unit CRTC functions
- */
-
-static void vmw_sou_crtc_destroy(struct drm_crtc *crtc)
-{
- vmw_sou_destroy(vmw_crtc_to_sou(crtc));
-}
-
-static void vmw_sou_del_active(struct vmw_private *vmw_priv,
- struct vmw_screen_object_unit *sou)
-{
- struct vmw_screen_object_display *ld = vmw_priv->sou_priv;
-
- if (sou->active_implicit) {
- if (--(ld->num_implicit) == 0)
- ld->implicit_fb = NULL;
- sou->active_implicit = false;
- }
-}
-
-static void vmw_sou_add_active(struct vmw_private *vmw_priv,
- struct vmw_screen_object_unit *sou,
- struct vmw_framebuffer *vfb)
-{
- struct vmw_screen_object_display *ld = vmw_priv->sou_priv;
-
- BUG_ON(!ld->num_implicit && ld->implicit_fb);
-
- if (!sou->active_implicit && sou->base.is_implicit) {
- ld->implicit_fb = vfb;
- sou->active_implicit = true;
- ld->num_implicit++;
- }
-}
-
-/**
- * Send the fifo command to create a screen.
- */
-static int vmw_sou_fifo_create(struct vmw_private *dev_priv,
- struct vmw_screen_object_unit *sou,
- uint32_t x, uint32_t y,
- struct drm_display_mode *mode)
-{
- size_t fifo_size;
-
- struct {
- struct {
- uint32_t cmdType;
- } header;
- SVGAScreenObject obj;
- } *cmd;
-
- BUG_ON(!sou->buffer);
-
- fifo_size = sizeof(*cmd);
- cmd = vmw_fifo_reserve(dev_priv, fifo_size);
- /* The hardware has hung, nothing we can do about it here. */
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Fifo reserve failed.\n");
- return -ENOMEM;
- }
-
- memset(cmd, 0, fifo_size);
- cmd->header.cmdType = SVGA_CMD_DEFINE_SCREEN;
- cmd->obj.structSize = sizeof(SVGAScreenObject);
- cmd->obj.id = sou->base.unit;
- cmd->obj.flags = SVGA_SCREEN_HAS_ROOT |
- (sou->base.unit == 0 ? SVGA_SCREEN_IS_PRIMARY : 0);
- cmd->obj.size.width = mode->hdisplay;
- cmd->obj.size.height = mode->vdisplay;
- if (sou->base.is_implicit) {
- cmd->obj.root.x = x;
- cmd->obj.root.y = y;
- } else {
- cmd->obj.root.x = sou->base.gui_x;
- cmd->obj.root.y = sou->base.gui_y;
- }
-
- /* Ok to assume that buffer is pinned in vram */
- vmw_bo_get_guest_ptr(&sou->buffer->base, &cmd->obj.backingStore.ptr);
- cmd->obj.backingStore.pitch = mode->hdisplay * 4;
-
- vmw_fifo_commit(dev_priv, fifo_size);
-
- sou->defined = true;
-
- return 0;
-}
-
-/**
- * Send the fifo command to destroy a screen.
- */
-static int vmw_sou_fifo_destroy(struct vmw_private *dev_priv,
- struct vmw_screen_object_unit *sou)
-{
- size_t fifo_size;
- int ret;
-
- struct {
- struct {
- uint32_t cmdType;
- } header;
- SVGAFifoCmdDestroyScreen body;
- } *cmd;
-
- /* no need to do anything */
- if (unlikely(!sou->defined))
- return 0;
-
- fifo_size = sizeof(*cmd);
- cmd = vmw_fifo_reserve(dev_priv, fifo_size);
- /* the hardware has hung, nothing we can do about it here */
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Fifo reserve failed.\n");
- return -ENOMEM;
- }
-
- memset(cmd, 0, fifo_size);
- cmd->header.cmdType = SVGA_CMD_DESTROY_SCREEN;
- cmd->body.screenId = sou->base.unit;
-
- vmw_fifo_commit(dev_priv, fifo_size);
-
- /* Force sync */
- ret = vmw_fallback_wait(dev_priv, false, true, 0, false, 3*HZ);
- if (unlikely(ret != 0))
- DRM_ERROR("Failed to sync with HW");
- else
- sou->defined = false;
-
- return ret;
-}
-
-/**
- * Free the backing store.
- */
-static void vmw_sou_backing_free(struct vmw_private *dev_priv,
- struct vmw_screen_object_unit *sou)
-{
- struct ttm_buffer_object *bo;
-
- if (unlikely(sou->buffer == NULL))
- return;
-
- bo = &sou->buffer->base;
- ttm_bo_unref(&bo);
- sou->buffer = NULL;
- sou->buffer_size = 0;
-}
-
-/**
- * Allocate the backing store for the buffer.
- */
-static int vmw_sou_backing_alloc(struct vmw_private *dev_priv,
- struct vmw_screen_object_unit *sou,
- unsigned long size)
-{
- int ret;
-
- if (sou->buffer_size == size)
- return 0;
-
- if (sou->buffer)
- vmw_sou_backing_free(dev_priv, sou);
-
- sou->buffer = kzalloc(sizeof(*sou->buffer), GFP_KERNEL);
- if (unlikely(sou->buffer == NULL))
- return -ENOMEM;
-
- /* After we have alloced the backing store might not be able to
- * resume the overlays, this is preferred to failing to alloc.
- */
- vmw_overlay_pause_all(dev_priv);
- ret = vmw_dmabuf_init(dev_priv, sou->buffer, size,
- &vmw_vram_ne_placement,
- false, &vmw_dmabuf_bo_free);
- vmw_overlay_resume_all(dev_priv);
-
- if (unlikely(ret != 0))
- sou->buffer = NULL; /* vmw_dmabuf_init frees on error */
- else
- sou->buffer_size = size;
-
- return ret;
-}
-
-static int vmw_sou_crtc_set_config(struct drm_mode_set *set)
-{
- struct vmw_private *dev_priv;
- struct vmw_screen_object_unit *sou;
- struct drm_connector *connector;
- struct drm_display_mode *mode;
- struct drm_encoder *encoder;
- struct vmw_framebuffer *vfb;
- struct drm_framebuffer *fb;
- struct drm_crtc *crtc;
- int ret = 0;
-
- if (!set)
- return -EINVAL;
-
- if (!set->crtc)
- return -EINVAL;
-
- /* get the sou */
- crtc = set->crtc;
- sou = vmw_crtc_to_sou(crtc);
- vfb = set->fb ? vmw_framebuffer_to_vfb(set->fb) : NULL;
- dev_priv = vmw_priv(crtc->dev);
-
- if (set->num_connectors > 1) {
- DRM_ERROR("to many connectors\n");
- return -EINVAL;
- }
-
- if (set->num_connectors == 1 &&
- set->connectors[0] != &sou->base.connector) {
- DRM_ERROR("connector doesn't match %p %p\n",
- set->connectors[0], &sou->base.connector);
- return -EINVAL;
- }
-
- /* sou only supports one fb active at the time */
- if (sou->base.is_implicit &&
- dev_priv->sou_priv->implicit_fb && vfb &&
- !(dev_priv->sou_priv->num_implicit == 1 &&
- sou->active_implicit) &&
- dev_priv->sou_priv->implicit_fb != vfb) {
- DRM_ERROR("Multiple framebuffers not supported\n");
- return -EINVAL;
- }
-
- /* since they always map one to one these are safe */
- connector = &sou->base.connector;
- encoder = &sou->base.encoder;
-
- /* should we turn the crtc off */
- if (set->num_connectors == 0 || !set->mode || !set->fb) {
- ret = vmw_sou_fifo_destroy(dev_priv, sou);
- /* the hardware has hung don't do anything more */
- if (unlikely(ret != 0))
- return ret;
-
- connector->encoder = NULL;
- encoder->crtc = NULL;
- crtc->fb = NULL;
- crtc->x = 0;
- crtc->y = 0;
-
- vmw_sou_del_active(dev_priv, sou);
-
- vmw_sou_backing_free(dev_priv, sou);
-
- return 0;
- }
-
-
- /* we now know we want to set a mode */
- mode = set->mode;
- fb = set->fb;
-
- if (set->x + mode->hdisplay > fb->width ||
- set->y + mode->vdisplay > fb->height) {
- DRM_ERROR("set outside of framebuffer\n");
- return -EINVAL;
- }
-
- vmw_fb_off(dev_priv);
-
- if (mode->hdisplay != crtc->mode.hdisplay ||
- mode->vdisplay != crtc->mode.vdisplay) {
- /* no need to check if depth is different, because backing
- * store depth is forced to 4 by the device.
- */
-
- ret = vmw_sou_fifo_destroy(dev_priv, sou);
- /* the hardware has hung don't do anything more */
- if (unlikely(ret != 0))
- return ret;
-
- vmw_sou_backing_free(dev_priv, sou);
- }
-
- if (!sou->buffer) {
- /* forced to depth 4 by the device */
- size_t size = mode->hdisplay * mode->vdisplay * 4;
- ret = vmw_sou_backing_alloc(dev_priv, sou, size);
- if (unlikely(ret != 0))
- return ret;
- }
-
- ret = vmw_sou_fifo_create(dev_priv, sou, set->x, set->y, mode);
- if (unlikely(ret != 0)) {
- /*
- * We are in a bit of a situation here, the hardware has
- * hung and we may or may not have a buffer hanging of
- * the screen object, best thing to do is not do anything
- * if we where defined, if not just turn the crtc of.
- * Not what userspace wants but it needs to htfu.
- */
- if (sou->defined)
- return ret;
-
- connector->encoder = NULL;
- encoder->crtc = NULL;
- crtc->fb = NULL;
- crtc->x = 0;
- crtc->y = 0;
-
- return ret;
- }
-
- vmw_sou_add_active(dev_priv, sou, vfb);
-
- connector->encoder = encoder;
- encoder->crtc = crtc;
- crtc->mode = *mode;
- crtc->fb = fb;
- crtc->x = set->x;
- crtc->y = set->y;
-
- return 0;
-}
-
-static struct drm_crtc_funcs vmw_screen_object_crtc_funcs = {
- .save = vmw_du_crtc_save,
- .restore = vmw_du_crtc_restore,
- .cursor_set = vmw_du_crtc_cursor_set,
- .cursor_move = vmw_du_crtc_cursor_move,
- .gamma_set = vmw_du_crtc_gamma_set,
- .destroy = vmw_sou_crtc_destroy,
- .set_config = vmw_sou_crtc_set_config,
- .page_flip = vmw_du_page_flip,
-};
-
-/*
- * Screen Object Display Unit encoder functions
- */
-
-static void vmw_sou_encoder_destroy(struct drm_encoder *encoder)
-{
- vmw_sou_destroy(vmw_encoder_to_sou(encoder));
-}
-
-static struct drm_encoder_funcs vmw_screen_object_encoder_funcs = {
- .destroy = vmw_sou_encoder_destroy,
-};
-
-/*
- * Screen Object Display Unit connector functions
- */
-
-static void vmw_sou_connector_destroy(struct drm_connector *connector)
-{
- vmw_sou_destroy(vmw_connector_to_sou(connector));
-}
-
-static struct drm_connector_funcs vmw_legacy_connector_funcs = {
- .dpms = vmw_du_connector_dpms,
- .save = vmw_du_connector_save,
- .restore = vmw_du_connector_restore,
- .detect = vmw_du_connector_detect,
- .fill_modes = vmw_du_connector_fill_modes,
- .set_property = vmw_du_connector_set_property,
- .destroy = vmw_sou_connector_destroy,
-};
-
-static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
-{
- struct vmw_screen_object_unit *sou;
- struct drm_device *dev = dev_priv->dev;
- struct drm_connector *connector;
- struct drm_encoder *encoder;
- struct drm_crtc *crtc;
-
- sou = kzalloc(sizeof(*sou), GFP_KERNEL);
- if (!sou)
- return -ENOMEM;
-
- sou->base.unit = unit;
- crtc = &sou->base.crtc;
- encoder = &sou->base.encoder;
- connector = &sou->base.connector;
-
- sou->active_implicit = false;
-
- sou->base.pref_active = (unit == 0);
- sou->base.pref_width = dev_priv->initial_width;
- sou->base.pref_height = dev_priv->initial_height;
- sou->base.pref_mode = NULL;
- sou->base.is_implicit = true;
-
- drm_connector_init(dev, connector, &vmw_legacy_connector_funcs,
- DRM_MODE_CONNECTOR_VIRTUAL);
- connector->status = vmw_du_connector_detect(connector, true);
-
- drm_encoder_init(dev, encoder, &vmw_screen_object_encoder_funcs,
- DRM_MODE_ENCODER_VIRTUAL);
- drm_mode_connector_attach_encoder(connector, encoder);
- encoder->possible_crtcs = (1 << unit);
- encoder->possible_clones = 0;
-
- drm_crtc_init(dev, crtc, &vmw_screen_object_crtc_funcs);
-
- drm_mode_crtc_set_gamma_size(crtc, 256);
-
- drm_connector_attach_property(connector,
- dev->mode_config.dirty_info_property,
- 1);
-
- return 0;
-}
-
-int vmw_kms_init_screen_object_display(struct vmw_private *dev_priv)
-{
- struct drm_device *dev = dev_priv->dev;
- int i, ret;
-
- if (dev_priv->sou_priv) {
- DRM_INFO("sou system already on\n");
- return -EINVAL;
- }
-
- if (!(dev_priv->fifo.capabilities & SVGA_FIFO_CAP_SCREEN_OBJECT_2)) {
- DRM_INFO("Not using screen objects,"
- " missing cap SCREEN_OBJECT_2\n");
- return -ENOSYS;
- }
-
- ret = -ENOMEM;
- dev_priv->sou_priv = kmalloc(sizeof(*dev_priv->sou_priv), GFP_KERNEL);
- if (unlikely(!dev_priv->sou_priv))
- goto err_no_mem;
-
- dev_priv->sou_priv->num_implicit = 0;
- dev_priv->sou_priv->implicit_fb = NULL;
-
- ret = drm_vblank_init(dev, VMWGFX_NUM_DISPLAY_UNITS);
- if (unlikely(ret != 0))
- goto err_free;
-
- ret = drm_mode_create_dirty_info_property(dev);
- if (unlikely(ret != 0))
- goto err_vblank_cleanup;
-
- for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i)
- vmw_sou_init(dev_priv, i);
-
- DRM_INFO("Screen objects system initialized\n");
-
- return 0;
-
-err_vblank_cleanup:
- drm_vblank_cleanup(dev);
-err_free:
- kfree(dev_priv->sou_priv);
- dev_priv->sou_priv = NULL;
-err_no_mem:
- return ret;
-}
-
-int vmw_kms_close_screen_object_display(struct vmw_private *dev_priv)
-{
- struct drm_device *dev = dev_priv->dev;
-
- if (!dev_priv->sou_priv)
- return -ENOSYS;
-
- drm_vblank_cleanup(dev);
-
- kfree(dev_priv->sou_priv);
-
- return 0;
-}
-
-/**
- * Returns if this unit can be page flipped.
- * Must be called with the mode_config mutex held.
- */
-bool vmw_kms_screen_object_flippable(struct vmw_private *dev_priv,
- struct drm_crtc *crtc)
-{
- struct vmw_screen_object_unit *sou = vmw_crtc_to_sou(crtc);
-
- if (!sou->base.is_implicit)
- return true;
-
- if (dev_priv->sou_priv->num_implicit != 1)
- return false;
-
- return true;
-}
-
-/**
- * Update the implicit fb to the current fb of this crtc.
- * Must be called with the mode_config mutex held.
- */
-void vmw_kms_screen_object_update_implicit_fb(struct vmw_private *dev_priv,
- struct drm_crtc *crtc)
-{
- struct vmw_screen_object_unit *sou = vmw_crtc_to_sou(crtc);
-
- BUG_ON(!sou->base.is_implicit);
-
- dev_priv->sou_priv->implicit_fb =
- vmw_framebuffer_to_vfb(sou->base.crtc.fb);
-}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
index d3c11f5..1e8eedd 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
@@ -34,8 +34,9 @@ int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
struct vmw_private *dev_priv;
if (unlikely(vma->vm_pgoff < VMWGFX_FILE_PAGE_OFFSET)) {
- DRM_ERROR("Illegal attempt to mmap old fifo space.\n");
- return -EINVAL;
+ if (vmw_fifo_mmap(filp, vma) == 0)
+ return 0;
+ return drm_mmap(filp, vma);
}
file_priv = filp->private_data;