From c6da2cfeb05178a11c6d062a06f8078150ee492f Mon Sep 17 00:00:00 2001 From: codeworkx Date: Sat, 2 Jun 2012 13:09:29 +0200 Subject: samsung update 1 --- drivers/gpu/drm/radeon/radeon.h | 797 +++++++++++++++++++++++++++------------- 1 file changed, 544 insertions(+), 253 deletions(-) (limited to 'drivers/gpu/drm/radeon/radeon.h') diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 59d72d0..d2870a0 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -60,7 +60,7 @@ * are considered as fatal) */ -#include +#include #include #include #include @@ -103,10 +103,25 @@ extern int radeon_msi; #define RADEON_FENCE_JIFFIES_TIMEOUT (HZ / 2) /* RADEON_IB_POOL_SIZE must be a power of 2 */ #define RADEON_IB_POOL_SIZE 16 -#define RADEON_DEBUGFS_MAX_NUM_FILES 32 +#define RADEON_DEBUGFS_MAX_COMPONENTS 32 #define RADEONFB_CONN_LIMIT 4 #define RADEON_BIOS_NUM_SCRATCH 8 +/* max number of rings */ +#define RADEON_NUM_RINGS 3 + +/* internal ring indices */ +/* r1xx+ has gfx CP ring */ +#define RADEON_RING_TYPE_GFX_INDEX 0 + +/* cayman has 2 compute CP rings */ +#define CAYMAN_RING_TYPE_CP1_INDEX 1 +#define CAYMAN_RING_TYPE_CP2_INDEX 2 + +/* hardcode those limit for now */ +#define RADEON_VA_RESERVED_SIZE (8 << 20) +#define RADEON_IB_VM_MAX_SIZE (64 << 10) + /* * Errata workarounds. */ @@ -142,6 +157,47 @@ bool radeon_get_bios(struct radeon_device *rdev); /* + * Mutex which allows recursive locking from the same process. + */ +struct radeon_mutex { + struct mutex mutex; + struct task_struct *owner; + int level; +}; + +static inline void radeon_mutex_init(struct radeon_mutex *mutex) +{ + mutex_init(&mutex->mutex); + mutex->owner = NULL; + mutex->level = 0; +} + +static inline void radeon_mutex_lock(struct radeon_mutex *mutex) +{ + if (mutex_trylock(&mutex->mutex)) { + /* The mutex was unlocked before, so it's ours now */ + mutex->owner = current; + } else if (mutex->owner != current) { + /* Another process locked the mutex, take it */ + mutex_lock(&mutex->mutex); + mutex->owner = current; + } + /* Otherwise the mutex was already locked by this process */ + + mutex->level++; +} + +static inline void radeon_mutex_unlock(struct radeon_mutex *mutex) +{ + if (--mutex->level > 0) + return; + + mutex->owner = NULL; + mutex_unlock(&mutex->mutex); +} + + +/* * Dummy page */ struct radeon_dummy_page { @@ -186,20 +242,24 @@ extern int rv6xx_get_temp(struct radeon_device *rdev); extern int rv770_get_temp(struct radeon_device *rdev); extern int evergreen_get_temp(struct radeon_device *rdev); extern int sumo_get_temp(struct radeon_device *rdev); +extern void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw, + unsigned *bankh, unsigned *mtaspect, + unsigned *tile_split); /* * Fences. */ struct radeon_fence_driver { uint32_t scratch_reg; + uint64_t gpu_addr; + volatile uint32_t *cpu_addr; atomic_t seq; uint32_t last_seq; unsigned long last_jiffies; unsigned long last_timeout; wait_queue_head_t queue; - rwlock_t lock; struct list_head created; - struct list_head emited; + struct list_head emitted; struct list_head signaled; bool initialized; }; @@ -210,21 +270,26 @@ struct radeon_fence { struct list_head list; /* protected by radeon_fence.lock */ uint32_t seq; - bool emited; + bool emitted; bool signaled; + /* RB, DMA, etc. */ + int ring; + struct radeon_semaphore *semaphore; }; +int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring); int radeon_fence_driver_init(struct radeon_device *rdev); void radeon_fence_driver_fini(struct radeon_device *rdev); -int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence); +int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence, int ring); int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence); -void radeon_fence_process(struct radeon_device *rdev); +void radeon_fence_process(struct radeon_device *rdev, int ring); bool radeon_fence_signaled(struct radeon_fence *fence); int radeon_fence_wait(struct radeon_fence *fence, bool interruptible); -int radeon_fence_wait_next(struct radeon_device *rdev); -int radeon_fence_wait_last(struct radeon_device *rdev); +int radeon_fence_wait_next(struct radeon_device *rdev, int ring); +int radeon_fence_wait_last(struct radeon_device *rdev, int ring); struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence); void radeon_fence_unref(struct radeon_fence **fence); +int radeon_fence_count_emitted(struct radeon_device *rdev, int ring); /* * Tiling registers @@ -246,6 +311,21 @@ struct radeon_mman { bool initialized; }; +/* bo virtual address in a specific vm */ +struct radeon_bo_va { + /* bo list is protected by bo being reserved */ + struct list_head bo_list; + /* vm list is protected by vm mutex */ + struct list_head vm_list; + /* constant after initialization */ + struct radeon_vm *vm; + struct radeon_bo *bo; + uint64_t soffset; + uint64_t eoffset; + uint32_t flags; + bool valid; +}; + struct radeon_bo { /* Protected by gem.mutex */ struct list_head list; @@ -259,6 +339,10 @@ struct radeon_bo { u32 tiling_flags; u32 pitch; int surface_reg; + /* list of all virtual address to which this bo + * is associated to + */ + struct list_head va; /* Constant after initialization */ struct radeon_device *rdev; struct drm_gem_object gem_base; @@ -274,6 +358,48 @@ struct radeon_bo_list { u32 tiling_flags; }; +/* sub-allocation manager, it has to be protected by another lock. + * By conception this is an helper for other part of the driver + * like the indirect buffer or semaphore, which both have their + * locking. + * + * Principe is simple, we keep a list of sub allocation in offset + * order (first entry has offset == 0, last entry has the highest + * offset). + * + * When allocating new object we first check if there is room at + * the end total_size - (last_object_offset + last_object_size) >= + * alloc_size. If so we allocate new object there. + * + * When there is not enough room at the end, we start waiting for + * each sub object until we reach object_offset+object_size >= + * alloc_size, this object then become the sub object we return. + * + * Alignment can't be bigger than page size. + * + * Hole are not considered for allocation to keep things simple. + * Assumption is that there won't be hole (all object on same + * alignment). + */ +struct radeon_sa_manager { + struct radeon_bo *bo; + struct list_head sa_bo; + unsigned size; + uint64_t gpu_addr; + void *cpu_ptr; + uint32_t domain; +}; + +struct radeon_sa_bo; + +/* sub-allocation buffer */ +struct radeon_sa_bo { + struct list_head list; + struct radeon_sa_manager *manager; + unsigned offset; + unsigned size; +}; + /* * GEM objects. */ @@ -288,9 +414,6 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size, int alignment, int initial_domain, bool discardable, bool kernel, struct drm_gem_object **obj); -int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain, - uint64_t *gpu_addr); -void radeon_gem_object_unpin(struct drm_gem_object *obj); int radeon_mode_dumb_create(struct drm_file *file_priv, struct drm_device *dev, @@ -303,37 +426,64 @@ int radeon_mode_dumb_destroy(struct drm_file *file_priv, uint32_t handle); /* - * GART structures, functions & helpers + * Semaphores. */ -struct radeon_mc; +struct radeon_ring; -struct radeon_gart_table_ram { - volatile uint32_t *ptr; +#define RADEON_SEMAPHORE_BO_SIZE 256 + +struct radeon_semaphore_driver { + rwlock_t lock; + struct list_head bo; }; -struct radeon_gart_table_vram { - struct radeon_bo *robj; - volatile uint32_t *ptr; +struct radeon_semaphore_bo; + +/* everything here is constant */ +struct radeon_semaphore { + struct list_head list; + uint64_t gpu_addr; + uint32_t *cpu_ptr; + struct radeon_semaphore_bo *bo; }; -union radeon_gart_table { - struct radeon_gart_table_ram ram; - struct radeon_gart_table_vram vram; +struct radeon_semaphore_bo { + struct list_head list; + struct radeon_ib *ib; + struct list_head free; + struct radeon_semaphore semaphores[RADEON_SEMAPHORE_BO_SIZE/8]; + unsigned nused; }; +void radeon_semaphore_driver_fini(struct radeon_device *rdev); +int radeon_semaphore_create(struct radeon_device *rdev, + struct radeon_semaphore **semaphore); +void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring, + struct radeon_semaphore *semaphore); +void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring, + struct radeon_semaphore *semaphore); +void radeon_semaphore_free(struct radeon_device *rdev, + struct radeon_semaphore *semaphore); + +/* + * GART structures, functions & helpers + */ +struct radeon_mc; + #define RADEON_GPU_PAGE_SIZE 4096 #define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1) #define RADEON_GPU_PAGE_SHIFT 12 +#define RADEON_GPU_PAGE_ALIGN(a) (((a) + RADEON_GPU_PAGE_MASK) & ~RADEON_GPU_PAGE_MASK) struct radeon_gart { dma_addr_t table_addr; + struct radeon_bo *robj; + void *ptr; unsigned num_gpu_pages; unsigned num_cpu_pages; unsigned table_size; - union radeon_gart_table table; struct page **pages; dma_addr_t *pages_addr; - bool *ttm_alloced; bool ready; }; @@ -341,6 +491,8 @@ int radeon_gart_table_ram_alloc(struct radeon_device *rdev); void radeon_gart_table_ram_free(struct radeon_device *rdev); int radeon_gart_table_vram_alloc(struct radeon_device *rdev); void radeon_gart_table_vram_free(struct radeon_device *rdev); +int radeon_gart_table_vram_pin(struct radeon_device *rdev); +void radeon_gart_table_vram_unpin(struct radeon_device *rdev); int radeon_gart_init(struct radeon_device *rdev); void radeon_gart_fini(struct radeon_device *rdev); void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, @@ -348,6 +500,7 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, int pages, struct page **pagelist, dma_addr_t *dma_addr); +void radeon_gart_restore(struct radeon_device *rdev); /* @@ -438,45 +591,47 @@ union radeon_irq_stat_regs { struct evergreen_irq_stat_regs evergreen; }; +#define RADEON_MAX_HPD_PINS 6 +#define RADEON_MAX_CRTCS 6 +#define RADEON_MAX_HDMI_BLOCKS 2 + struct radeon_irq { bool installed; - bool sw_int; - /* FIXME: use a define max crtc rather than hardcode it */ - bool crtc_vblank_int[6]; - bool pflip[6]; + bool sw_int[RADEON_NUM_RINGS]; + bool crtc_vblank_int[RADEON_MAX_CRTCS]; + bool pflip[RADEON_MAX_CRTCS]; wait_queue_head_t vblank_queue; - /* FIXME: use defines for max hpd/dacs */ - bool hpd[6]; + bool hpd[RADEON_MAX_HPD_PINS]; bool gui_idle; bool gui_idle_acked; wait_queue_head_t idle_queue; - /* FIXME: use defines for max HDMI blocks */ - bool hdmi[2]; + bool hdmi[RADEON_MAX_HDMI_BLOCKS]; spinlock_t sw_lock; - int sw_refcount; + int sw_refcount[RADEON_NUM_RINGS]; union radeon_irq_stat_regs stat_regs; - spinlock_t pflip_lock[6]; - int pflip_refcount[6]; + spinlock_t pflip_lock[RADEON_MAX_CRTCS]; + int pflip_refcount[RADEON_MAX_CRTCS]; }; int radeon_irq_kms_init(struct radeon_device *rdev); void radeon_irq_kms_fini(struct radeon_device *rdev); -void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev); -void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev); +void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring); +void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring); void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc); void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc); /* - * CP & ring. + * CP & rings. */ + struct radeon_ib { - struct list_head list; + struct radeon_sa_bo sa_bo; unsigned idx; + uint32_t length_dw; uint64_t gpu_addr; - struct radeon_fence *fence; uint32_t *ptr; - uint32_t length_dw; - bool free; + struct radeon_fence *fence; + unsigned vm_id; }; /* @@ -484,20 +639,22 @@ struct radeon_ib { * mutex protects scheduled_ibs, ready, alloc_bm */ struct radeon_ib_pool { - struct mutex mutex; - struct radeon_bo *robj; - struct list_head bogus_ib; - struct radeon_ib ibs[RADEON_IB_POOL_SIZE]; - bool ready; - unsigned head_id; + struct radeon_mutex mutex; + struct radeon_sa_manager sa_manager; + struct radeon_ib ibs[RADEON_IB_POOL_SIZE]; + bool ready; + unsigned head_id; }; -struct radeon_cp { +struct radeon_ring { struct radeon_bo *ring_obj; volatile uint32_t *ring; unsigned rptr; + unsigned rptr_offs; + unsigned rptr_reg; unsigned wptr; unsigned wptr_old; + unsigned wptr_reg; unsigned ring_size; unsigned ring_free_dw; int count_dw; @@ -506,6 +663,61 @@ struct radeon_cp { uint32_t ptr_mask; struct mutex mutex; bool ready; + u32 ptr_reg_shift; + u32 ptr_reg_mask; + u32 nop; +}; + +/* + * VM + */ +struct radeon_vm { + struct list_head list; + struct list_head va; + int id; + unsigned last_pfn; + u64 pt_gpu_addr; + u64 *pt; + struct radeon_sa_bo sa_bo; + struct mutex mutex; + /* last fence for cs using this vm */ + struct radeon_fence *fence; +}; + +struct radeon_vm_funcs { + int (*init)(struct radeon_device *rdev); + void (*fini)(struct radeon_device *rdev); + /* cs mutex must be lock for schedule_ib */ + int (*bind)(struct radeon_device *rdev, struct radeon_vm *vm, int id); + void (*unbind)(struct radeon_device *rdev, struct radeon_vm *vm); + void (*tlb_flush)(struct radeon_device *rdev, struct radeon_vm *vm); + uint32_t (*page_flags)(struct radeon_device *rdev, + struct radeon_vm *vm, + uint32_t flags); + void (*set_page)(struct radeon_device *rdev, struct radeon_vm *vm, + unsigned pfn, uint64_t addr, uint32_t flags); +}; + +struct radeon_vm_manager { + struct list_head lru_vm; + uint32_t use_bitmap; + struct radeon_sa_manager sa_manager; + uint32_t max_pfn; + /* fields constant after init */ + const struct radeon_vm_funcs *funcs; + /* number of VMIDs */ + unsigned nvm; + /* vram base address for page table entry */ + u64 vram_base_offset; + /* is vm enabled? */ + bool enabled; +}; + +/* + * file private structure + */ +struct radeon_fpriv { + struct radeon_vm vm; }; /* @@ -515,6 +727,7 @@ struct r600_ih { struct radeon_bo *ring_obj; volatile uint32_t *ring; unsigned rptr; + unsigned rptr_offs; unsigned wptr; unsigned wptr_old; unsigned ring_size; @@ -524,9 +737,30 @@ struct r600_ih { bool enabled; }; +struct r600_blit_cp_primitives { + void (*set_render_target)(struct radeon_device *rdev, int format, + int w, int h, u64 gpu_addr); + void (*cp_set_surface_sync)(struct radeon_device *rdev, + u32 sync_type, u32 size, + u64 mc_addr); + void (*set_shaders)(struct radeon_device *rdev); + void (*set_vtx_resource)(struct radeon_device *rdev, u64 gpu_addr); + void (*set_tex_resource)(struct radeon_device *rdev, + int format, int w, int h, int pitch, + u64 gpu_addr, u32 size); + void (*set_scissors)(struct radeon_device *rdev, int x1, int y1, + int x2, int y2); + void (*draw_auto)(struct radeon_device *rdev); + void (*set_default_state)(struct radeon_device *rdev); +}; + struct r600_blit { struct mutex mutex; struct radeon_bo *shader_obj; + struct r600_blit_cp_primitives primitives; + int max_dim; + int ring_size_common; + int ring_size_per_loop; u64 shader_gpu_addr; u32 vs_offset, ps_offset; u32 state_offset; @@ -535,23 +769,30 @@ struct r600_blit { struct radeon_ib *vb_ib; }; -int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib); +void r600_blit_suspend(struct radeon_device *rdev); + +int radeon_ib_get(struct radeon_device *rdev, int ring, + struct radeon_ib **ib, unsigned size); void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib); +bool radeon_ib_try_free(struct radeon_device *rdev, struct radeon_ib *ib); int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib); int radeon_ib_pool_init(struct radeon_device *rdev); void radeon_ib_pool_fini(struct radeon_device *rdev); -int radeon_ib_test(struct radeon_device *rdev); -extern void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib); +int radeon_ib_pool_start(struct radeon_device *rdev); +int radeon_ib_pool_suspend(struct radeon_device *rdev); /* Ring access between begin & end cannot sleep */ -void radeon_ring_free_size(struct radeon_device *rdev); -int radeon_ring_alloc(struct radeon_device *rdev, unsigned ndw); -int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw); -void radeon_ring_commit(struct radeon_device *rdev); -void radeon_ring_unlock_commit(struct radeon_device *rdev); -void radeon_ring_unlock_undo(struct radeon_device *rdev); -int radeon_ring_test(struct radeon_device *rdev); -int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size); -void radeon_ring_fini(struct radeon_device *rdev); +int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *cp); +void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *cp); +int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw); +int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw); +void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *cp); +void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *cp); +void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *cp); +int radeon_ring_test(struct radeon_device *rdev, struct radeon_ring *cp); +int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size, + unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg, + u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop); +void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *cp); /* @@ -568,12 +809,12 @@ struct radeon_cs_reloc { struct radeon_cs_chunk { uint32_t chunk_id; uint32_t length_dw; - int kpage_idx[2]; - uint32_t *kpage[2]; + int kpage_idx[2]; + uint32_t *kpage[2]; uint32_t *kdata; - void __user *user_ptr; - int last_copied_page; - int last_page_index; + void __user *user_ptr; + int last_copied_page; + int last_page_index; }; struct radeon_cs_parser { @@ -594,40 +835,19 @@ struct radeon_cs_parser { /* indices of various chunks */ int chunk_ib_idx; int chunk_relocs_idx; + int chunk_flags_idx; struct radeon_ib *ib; void *track; unsigned family; - int parser_error; + int parser_error; + u32 cs_flags; + u32 ring; + s32 priority; }; extern int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx); extern int radeon_cs_finish_pages(struct radeon_cs_parser *p); - - -static inline u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx) -{ - struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx]; - u32 pg_idx, pg_offset; - u32 idx_value = 0; - int new_page; - - pg_idx = (idx * 4) / PAGE_SIZE; - pg_offset = (idx * 4) % PAGE_SIZE; - - if (ibc->kpage_idx[0] == pg_idx) - return ibc->kpage[0][pg_offset/4]; - if (ibc->kpage_idx[1] == pg_idx) - return ibc->kpage[1][pg_offset/4]; - - new_page = radeon_cs_update_pages(p, pg_idx); - if (new_page < 0) { - p->parser_error = new_page; - return 0; - } - - idx_value = ibc->kpage[new_page][pg_offset/4]; - return idx_value; -} +extern u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx); struct radeon_cs_packet { unsigned idx; @@ -795,8 +1015,7 @@ struct radeon_pm_clock_info { struct radeon_power_state { enum radeon_pm_state_type type; - /* XXX: use a define for num clock modes */ - struct radeon_pm_clock_info clock_info[8]; + struct radeon_pm_clock_info *clock_info; /* number of valid clock modes in this power state */ int num_clock_modes; struct radeon_pm_clock_info *default_clock_mode; @@ -866,22 +1085,34 @@ struct radeon_pm { struct device *int_hwmon_dev; }; +int radeon_pm_get_type_index(struct radeon_device *rdev, + enum radeon_pm_state_type ps_type, + int instance); /* * Benchmarking */ -void radeon_benchmark(struct radeon_device *rdev); +void radeon_benchmark(struct radeon_device *rdev, int test_number); /* * Testing */ void radeon_test_moves(struct radeon_device *rdev); +void radeon_test_ring_sync(struct radeon_device *rdev, + struct radeon_ring *cpA, + struct radeon_ring *cpB); +void radeon_test_syncing(struct radeon_device *rdev); /* * Debugfs */ +struct radeon_debugfs { + struct drm_info_list *files; + unsigned num_files; +}; + int radeon_debugfs_add_files(struct radeon_device *rdev, struct drm_info_list *files, unsigned nfiles); @@ -897,53 +1128,8 @@ struct radeon_asic { int (*resume)(struct radeon_device *rdev); int (*suspend)(struct radeon_device *rdev); void (*vga_set_state)(struct radeon_device *rdev, bool state); - bool (*gpu_is_lockup)(struct radeon_device *rdev); + bool (*gpu_is_lockup)(struct radeon_device *rdev, struct radeon_ring *cp); int (*asic_reset)(struct radeon_device *rdev); - void (*gart_tlb_flush)(struct radeon_device *rdev); - int (*gart_set_page)(struct radeon_device *rdev, int i, uint64_t addr); - int (*cp_init)(struct radeon_device *rdev, unsigned ring_size); - void (*cp_fini)(struct radeon_device *rdev); - void (*cp_disable)(struct radeon_device *rdev); - void (*cp_commit)(struct radeon_device *rdev); - void (*ring_start)(struct radeon_device *rdev); - int (*ring_test)(struct radeon_device *rdev); - void (*ring_ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib); - int (*irq_set)(struct radeon_device *rdev); - int (*irq_process)(struct radeon_device *rdev); - u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc); - void (*fence_ring_emit)(struct radeon_device *rdev, struct radeon_fence *fence); - int (*cs_parse)(struct radeon_cs_parser *p); - int (*copy_blit)(struct radeon_device *rdev, - uint64_t src_offset, - uint64_t dst_offset, - unsigned num_gpu_pages, - struct radeon_fence *fence); - int (*copy_dma)(struct radeon_device *rdev, - uint64_t src_offset, - uint64_t dst_offset, - unsigned num_gpu_pages, - struct radeon_fence *fence); - int (*copy)(struct radeon_device *rdev, - uint64_t src_offset, - uint64_t dst_offset, - unsigned num_gpu_pages, - struct radeon_fence *fence); - uint32_t (*get_engine_clock)(struct radeon_device *rdev); - void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock); - uint32_t (*get_memory_clock)(struct radeon_device *rdev); - void (*set_memory_clock)(struct radeon_device *rdev, uint32_t mem_clock); - int (*get_pcie_lanes)(struct radeon_device *rdev); - void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes); - void (*set_clock_gating)(struct radeon_device *rdev, int enable); - int (*set_surface_reg)(struct radeon_device *rdev, int reg, - uint32_t tiling_flags, uint32_t pitch, - uint32_t offset, uint32_t obj_size); - void (*clear_surface_reg)(struct radeon_device *rdev, int reg); - void (*bandwidth_update)(struct radeon_device *rdev); - void (*hpd_init)(struct radeon_device *rdev); - void (*hpd_fini)(struct radeon_device *rdev); - bool (*hpd_sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd); - void (*hpd_set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd); /* ioctl hw specific callback. Some hw might want to perform special * operation on specific ioctl. For instance on wait idle some hw * might want to perform and HDP flush through MMIO as it seems that @@ -951,17 +1137,99 @@ struct radeon_asic { * through ring. */ void (*ioctl_wait_idle)(struct radeon_device *rdev, struct radeon_bo *bo); + /* check if 3D engine is idle */ bool (*gui_idle)(struct radeon_device *rdev); + /* wait for mc_idle */ + int (*mc_wait_for_idle)(struct radeon_device *rdev); + /* gart */ + struct { + void (*tlb_flush)(struct radeon_device *rdev); + int (*set_page)(struct radeon_device *rdev, int i, uint64_t addr); + } gart; + /* ring specific callbacks */ + struct { + void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib); + int (*ib_parse)(struct radeon_device *rdev, struct radeon_ib *ib); + void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence); + void (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp, + struct radeon_semaphore *semaphore, bool emit_wait); + int (*cs_parse)(struct radeon_cs_parser *p); + void (*ring_start)(struct radeon_device *rdev, struct radeon_ring *cp); + int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp); + int (*ib_test)(struct radeon_device *rdev, struct radeon_ring *cp); + } ring[RADEON_NUM_RINGS]; + /* irqs */ + struct { + int (*set)(struct radeon_device *rdev); + int (*process)(struct radeon_device *rdev); + } irq; + /* displays */ + struct { + /* display watermarks */ + void (*bandwidth_update)(struct radeon_device *rdev); + /* get frame count */ + u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc); + /* wait for vblank */ + void (*wait_for_vblank)(struct radeon_device *rdev, int crtc); + } display; + /* copy functions for bo handling */ + struct { + int (*blit)(struct radeon_device *rdev, + uint64_t src_offset, + uint64_t dst_offset, + unsigned num_gpu_pages, + struct radeon_fence *fence); + u32 blit_ring_index; + int (*dma)(struct radeon_device *rdev, + uint64_t src_offset, + uint64_t dst_offset, + unsigned num_gpu_pages, + struct radeon_fence *fence); + u32 dma_ring_index; + /* method used for bo copy */ + int (*copy)(struct radeon_device *rdev, + uint64_t src_offset, + uint64_t dst_offset, + unsigned num_gpu_pages, + struct radeon_fence *fence); + /* ring used for bo copies */ + u32 copy_ring_index; + } copy; + /* surfaces */ + struct { + int (*set_reg)(struct radeon_device *rdev, int reg, + uint32_t tiling_flags, uint32_t pitch, + uint32_t offset, uint32_t obj_size); + void (*clear_reg)(struct radeon_device *rdev, int reg); + } surface; + /* hotplug detect */ + struct { + void (*init)(struct radeon_device *rdev); + void (*fini)(struct radeon_device *rdev); + bool (*sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd); + void (*set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd); + } hpd; /* power management */ - void (*pm_misc)(struct radeon_device *rdev); - void (*pm_prepare)(struct radeon_device *rdev); - void (*pm_finish)(struct radeon_device *rdev); - void (*pm_init_profile)(struct radeon_device *rdev); - void (*pm_get_dynpm_state)(struct radeon_device *rdev); + struct { + void (*misc)(struct radeon_device *rdev); + void (*prepare)(struct radeon_device *rdev); + void (*finish)(struct radeon_device *rdev); + void (*init_profile)(struct radeon_device *rdev); + void (*get_dynpm_state)(struct radeon_device *rdev); + uint32_t (*get_engine_clock)(struct radeon_device *rdev); + void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock); + uint32_t (*get_memory_clock)(struct radeon_device *rdev); + void (*set_memory_clock)(struct radeon_device *rdev, uint32_t mem_clock); + int (*get_pcie_lanes)(struct radeon_device *rdev); + void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes); + void (*set_clock_gating)(struct radeon_device *rdev, int enable); + } pm; /* pageflipping */ - void (*pre_page_flip)(struct radeon_device *rdev, int crtc); - u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base); - void (*post_page_flip)(struct radeon_device *rdev, int crtc); + struct { + void (*pre_page_flip)(struct radeon_device *rdev, int crtc); + u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base); + void (*post_page_flip)(struct radeon_device *rdev, int crtc); + } pflip; }; /* @@ -1005,6 +1273,7 @@ struct r600_asic { unsigned tiling_npipes; unsigned tiling_group_size; unsigned tile_config; + unsigned backend_map; struct r100_gpu_lockup lockup; }; @@ -1030,6 +1299,7 @@ struct rv770_asic { unsigned tiling_npipes; unsigned tiling_group_size; unsigned tile_config; + unsigned backend_map; struct r100_gpu_lockup lockup; }; @@ -1056,6 +1326,7 @@ struct evergreen_asic { unsigned tiling_npipes; unsigned tiling_group_size; unsigned tile_config; + unsigned backend_map; struct r100_gpu_lockup lockup; }; @@ -1137,18 +1408,22 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); +int radeon_gem_va_ioctl(struct drm_device *dev, void *data, + struct drm_file *filp); int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); -/* VRAM scratch page for HDP bug */ -struct r700_vram_scratch { +/* VRAM scratch page for HDP bug, default vram page */ +struct r600_vram_scratch { struct radeon_bo *robj; volatile uint32_t *ptr; + u64 gpu_addr; }; + /* * Core structure, functions and helpers. */ @@ -1176,7 +1451,7 @@ struct radeon_device { /* Register mmio */ resource_size_t rmmio_base; resource_size_t rmmio_size; - void *rmmio; + void __iomem *rmmio; radeon_rreg_t mc_rreg; radeon_wreg_t mc_wreg; radeon_rreg_t pll_rreg; @@ -1193,18 +1468,17 @@ struct radeon_device { struct radeon_mode_info mode_info; struct radeon_scratch scratch; struct radeon_mman mman; - struct radeon_fence_driver fence_drv; - struct radeon_cp cp; - /* cayman compute rings */ - struct radeon_cp cp1; - struct radeon_cp cp2; + rwlock_t fence_lock; + struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS]; + struct radeon_semaphore_driver semaphore_drv; + struct radeon_ring ring[RADEON_NUM_RINGS]; struct radeon_ib_pool ib_pool; struct radeon_irq irq; struct radeon_asic *asic; struct radeon_gem gem; struct radeon_pm pm; uint32_t bios_scratch[RADEON_BIOS_NUM_SCRATCH]; - struct mutex cs_mutex; + struct radeon_mutex cs_mutex; struct radeon_wb wb; struct radeon_dummy_page dummy_page; bool gpu_lockup; @@ -1218,7 +1492,7 @@ struct radeon_device { const struct firmware *rlc_fw; /* r6/700 RLC firmware */ const struct firmware *mc_fw; /* NI MC firmware */ struct r600_blit r600_blit; - struct r700_vram_scratch vram_scratch; + struct r600_vram_scratch vram_scratch; int msi_enabled; /* msi enabled */ struct r600_ih ih; /* r6/700 interrupt ring */ struct work_struct hotplug_work; @@ -1241,6 +1515,11 @@ struct radeon_device { struct drm_file *cmask_filp; /* i2c buses */ struct radeon_i2c_chan *i2c_bus[RADEON_MAX_I2C_BUS]; + /* debugfs */ + struct radeon_debugfs debugfs[RADEON_DEBUGFS_MAX_COMPONENTS]; + unsigned debugfs_count; + /* virtual memory */ + struct radeon_vm_manager vm_manager; }; int radeon_device_init(struct radeon_device *rdev, @@ -1250,45 +1529,10 @@ int radeon_device_init(struct radeon_device *rdev, void radeon_device_fini(struct radeon_device *rdev); int radeon_gpu_wait_for_idle(struct radeon_device *rdev); -static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg) -{ - if (reg < rdev->rmmio_size) - return readl(((void __iomem *)rdev->rmmio) + reg); - else { - writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); - return readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA); - } -} - -static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) -{ - if (reg < rdev->rmmio_size) - writel(v, ((void __iomem *)rdev->rmmio) + reg); - else { - writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); - writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA); - } -} - -static inline u32 r100_io_rreg(struct radeon_device *rdev, u32 reg) -{ - if (reg < rdev->rio_mem_size) - return ioread32(rdev->rio_mem + reg); - else { - iowrite32(reg, rdev->rio_mem + RADEON_MM_INDEX); - return ioread32(rdev->rio_mem + RADEON_MM_DATA); - } -} - -static inline void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v) -{ - if (reg < rdev->rio_mem_size) - iowrite32(v, rdev->rio_mem + reg); - else { - iowrite32(reg, rdev->rio_mem + RADEON_MM_INDEX); - iowrite32(v, rdev->rio_mem + RADEON_MM_DATA); - } -} +uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg); +void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); +u32 r100_io_rreg(struct radeon_device *rdev, u32 reg); +void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v); /* * Cast helper @@ -1298,10 +1542,10 @@ static inline void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v) /* * Registers read & write functions. */ -#define RREG8(reg) readb(((void __iomem *)rdev->rmmio) + (reg)) -#define WREG8(reg, v) writeb(v, ((void __iomem *)rdev->rmmio) + (reg)) -#define RREG16(reg) readw(((void __iomem *)rdev->rmmio) + (reg)) -#define WREG16(reg, v) writew(v, ((void __iomem *)rdev->rmmio) + (reg)) +#define RREG8(reg) readb((rdev->rmmio) + (reg)) +#define WREG8(reg, v) writeb(v, (rdev->rmmio) + (reg)) +#define RREG16(reg) readw((rdev->rmmio) + (reg)) +#define WREG16(reg, v) writew(v, (rdev->rmmio) + (reg)) #define RREG32(reg) r100_mm_rreg(rdev, (reg)) #define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", r100_mm_rreg(rdev, (reg))) #define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v)) @@ -1411,19 +1655,18 @@ void radeon_atombios_fini(struct radeon_device *rdev); /* * RING helpers. */ -static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v) +#if DRM_DEBUG_CODE == 0 +static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v) { -#if DRM_DEBUG_CODE - if (rdev->cp.count_dw <= 0) { - DRM_ERROR("radeon: writting more dword to ring than expected !\n"); - } -#endif - rdev->cp.ring[rdev->cp.wptr++] = v; - rdev->cp.wptr &= rdev->cp.ptr_mask; - rdev->cp.count_dw--; - rdev->cp.ring_free_dw--; + ring->ring[ring->wptr++] = v; + ring->wptr &= ring->ptr_mask; + ring->count_dw--; + ring->ring_free_dw--; } - +#else +/* With debugging this is just too big to inline */ +void radeon_ring_write(struct radeon_ring *ring, uint32_t v); +#endif /* * ASICs macro. @@ -1432,53 +1675,58 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v) #define radeon_fini(rdev) (rdev)->asic->fini((rdev)) #define radeon_resume(rdev) (rdev)->asic->resume((rdev)) #define radeon_suspend(rdev) (rdev)->asic->suspend((rdev)) -#define radeon_cs_parse(p) rdev->asic->cs_parse((p)) +#define radeon_cs_parse(rdev, r, p) (rdev)->asic->ring[(r)].cs_parse((p)) #define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state)) -#define radeon_gpu_is_lockup(rdev) (rdev)->asic->gpu_is_lockup((rdev)) +#define radeon_gpu_is_lockup(rdev, cp) (rdev)->asic->gpu_is_lockup((rdev), (cp)) #define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev)) -#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart_tlb_flush((rdev)) -#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart_set_page((rdev), (i), (p)) -#define radeon_cp_commit(rdev) (rdev)->asic->cp_commit((rdev)) -#define radeon_ring_start(rdev) (rdev)->asic->ring_start((rdev)) -#define radeon_ring_test(rdev) (rdev)->asic->ring_test((rdev)) -#define radeon_ring_ib_execute(rdev, ib) (rdev)->asic->ring_ib_execute((rdev), (ib)) -#define radeon_irq_set(rdev) (rdev)->asic->irq_set((rdev)) -#define radeon_irq_process(rdev) (rdev)->asic->irq_process((rdev)) -#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->get_vblank_counter((rdev), (crtc)) -#define radeon_fence_ring_emit(rdev, fence) (rdev)->asic->fence_ring_emit((rdev), (fence)) -#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy_blit((rdev), (s), (d), (np), (f)) -#define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy_dma((rdev), (s), (d), (np), (f)) -#define radeon_copy(rdev, s, d, np, f) (rdev)->asic->copy((rdev), (s), (d), (np), (f)) -#define radeon_get_engine_clock(rdev) (rdev)->asic->get_engine_clock((rdev)) -#define radeon_set_engine_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e)) -#define radeon_get_memory_clock(rdev) (rdev)->asic->get_memory_clock((rdev)) -#define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_memory_clock((rdev), (e)) -#define radeon_get_pcie_lanes(rdev) (rdev)->asic->get_pcie_lanes((rdev)) -#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->set_pcie_lanes((rdev), (l)) -#define radeon_set_clock_gating(rdev, e) (rdev)->asic->set_clock_gating((rdev), (e)) -#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->set_surface_reg((rdev), (r), (f), (p), (o), (s))) -#define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->clear_surface_reg((rdev), (r))) -#define radeon_bandwidth_update(rdev) (rdev)->asic->bandwidth_update((rdev)) -#define radeon_hpd_init(rdev) (rdev)->asic->hpd_init((rdev)) -#define radeon_hpd_fini(rdev) (rdev)->asic->hpd_fini((rdev)) -#define radeon_hpd_sense(rdev, hpd) (rdev)->asic->hpd_sense((rdev), (hpd)) -#define radeon_hpd_set_polarity(rdev, hpd) (rdev)->asic->hpd_set_polarity((rdev), (hpd)) +#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev)) +#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart.set_page((rdev), (i), (p)) +#define radeon_ring_start(rdev, r, cp) (rdev)->asic->ring[(r)].ring_start((rdev), (cp)) +#define radeon_ring_test(rdev, r, cp) (rdev)->asic->ring[(r)].ring_test((rdev), (cp)) +#define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)].ib_test((rdev), (cp)) +#define radeon_ring_ib_execute(rdev, r, ib) (rdev)->asic->ring[(r)].ib_execute((rdev), (ib)) +#define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)].ib_parse((rdev), (ib)) +#define radeon_irq_set(rdev) (rdev)->asic->irq.set((rdev)) +#define radeon_irq_process(rdev) (rdev)->asic->irq.process((rdev)) +#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->display.get_vblank_counter((rdev), (crtc)) +#define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)].emit_fence((rdev), (fence)) +#define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)].emit_semaphore((rdev), (cp), (semaphore), (emit_wait)) +#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy.blit((rdev), (s), (d), (np), (f)) +#define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy.dma((rdev), (s), (d), (np), (f)) +#define radeon_copy(rdev, s, d, np, f) (rdev)->asic->copy.copy((rdev), (s), (d), (np), (f)) +#define radeon_copy_blit_ring_index(rdev) (rdev)->asic->copy.blit_ring_index +#define radeon_copy_dma_ring_index(rdev) (rdev)->asic->copy.dma_ring_index +#define radeon_copy_ring_index(rdev) (rdev)->asic->copy.copy_ring_index +#define radeon_get_engine_clock(rdev) (rdev)->asic->pm.get_engine_clock((rdev)) +#define radeon_set_engine_clock(rdev, e) (rdev)->asic->pm.set_engine_clock((rdev), (e)) +#define radeon_get_memory_clock(rdev) (rdev)->asic->pm.get_memory_clock((rdev)) +#define radeon_set_memory_clock(rdev, e) (rdev)->asic->pm.set_memory_clock((rdev), (e)) +#define radeon_get_pcie_lanes(rdev) (rdev)->asic->pm.get_pcie_lanes((rdev)) +#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->pm.set_pcie_lanes((rdev), (l)) +#define radeon_set_clock_gating(rdev, e) (rdev)->asic->pm.set_clock_gating((rdev), (e)) +#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->surface.set_reg((rdev), (r), (f), (p), (o), (s))) +#define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->surface.clear_reg((rdev), (r))) +#define radeon_bandwidth_update(rdev) (rdev)->asic->display.bandwidth_update((rdev)) +#define radeon_hpd_init(rdev) (rdev)->asic->hpd.init((rdev)) +#define radeon_hpd_fini(rdev) (rdev)->asic->hpd.fini((rdev)) +#define radeon_hpd_sense(rdev, h) (rdev)->asic->hpd.sense((rdev), (h)) +#define radeon_hpd_set_polarity(rdev, h) (rdev)->asic->hpd.set_polarity((rdev), (h)) #define radeon_gui_idle(rdev) (rdev)->asic->gui_idle((rdev)) -#define radeon_pm_misc(rdev) (rdev)->asic->pm_misc((rdev)) -#define radeon_pm_prepare(rdev) (rdev)->asic->pm_prepare((rdev)) -#define radeon_pm_finish(rdev) (rdev)->asic->pm_finish((rdev)) -#define radeon_pm_init_profile(rdev) (rdev)->asic->pm_init_profile((rdev)) -#define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm_get_dynpm_state((rdev)) -#define radeon_pre_page_flip(rdev, crtc) rdev->asic->pre_page_flip((rdev), (crtc)) -#define radeon_page_flip(rdev, crtc, base) rdev->asic->page_flip((rdev), (crtc), (base)) -#define radeon_post_page_flip(rdev, crtc) rdev->asic->post_page_flip((rdev), (crtc)) +#define radeon_pm_misc(rdev) (rdev)->asic->pm.misc((rdev)) +#define radeon_pm_prepare(rdev) (rdev)->asic->pm.prepare((rdev)) +#define radeon_pm_finish(rdev) (rdev)->asic->pm.finish((rdev)) +#define radeon_pm_init_profile(rdev) (rdev)->asic->pm.init_profile((rdev)) +#define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm.get_dynpm_state((rdev)) +#define radeon_pre_page_flip(rdev, crtc) rdev->asic->pflip.pre_page_flip((rdev), (crtc)) +#define radeon_page_flip(rdev, crtc, base) rdev->asic->pflip.page_flip((rdev), (crtc), (base)) +#define radeon_post_page_flip(rdev, crtc) rdev->asic->pflip.post_page_flip((rdev), (crtc)) +#define radeon_wait_for_vblank(rdev, crtc) rdev->asic->display.wait_for_vblank((rdev), (crtc)) +#define radeon_mc_wait_for_idle(rdev) rdev->asic->mc_wait_for_idle((rdev)) /* Common functions */ /* AGP */ extern int radeon_gpu_reset(struct radeon_device *rdev); extern void radeon_agp_disable(struct radeon_device *rdev); -extern int radeon_gart_table_vram_pin(struct radeon_device *rdev); -extern void radeon_gart_restore(struct radeon_device *rdev); extern int radeon_modeset_init(struct radeon_device *rdev); extern void radeon_modeset_fini(struct radeon_device *rdev); extern bool radeon_card_posted(struct radeon_device *rdev); @@ -1502,6 +1750,49 @@ extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state); extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size); /* + * vm + */ +int radeon_vm_manager_init(struct radeon_device *rdev); +void radeon_vm_manager_fini(struct radeon_device *rdev); +int radeon_vm_manager_start(struct radeon_device *rdev); +int radeon_vm_manager_suspend(struct radeon_device *rdev); +int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm); +void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm); +int radeon_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm); +void radeon_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm); +int radeon_vm_bo_update_pte(struct radeon_device *rdev, + struct radeon_vm *vm, + struct radeon_bo *bo, + struct ttm_mem_reg *mem); +void radeon_vm_bo_invalidate(struct radeon_device *rdev, + struct radeon_bo *bo); +int radeon_vm_bo_add(struct radeon_device *rdev, + struct radeon_vm *vm, + struct radeon_bo *bo, + uint64_t offset, + uint32_t flags); +int radeon_vm_bo_rmv(struct radeon_device *rdev, + struct radeon_vm *vm, + struct radeon_bo *bo); + + +/* + * R600 vram scratch functions + */ +int r600_vram_scratch_init(struct radeon_device *rdev); +void r600_vram_scratch_fini(struct radeon_device *rdev); + +/* + * r600 cs checking helper + */ +unsigned r600_mip_minify(unsigned size, unsigned level); +bool r600_fmt_is_valid_color(u32 format); +bool r600_fmt_is_valid_texture(u32 format, enum radeon_family family); +int r600_fmt_get_blocksize(u32 format); +int r600_fmt_get_nblocksx(u32 format, u32 w); +int r600_fmt_get_nblocksy(u32 format, u32 h); + +/* * r600 functions used by radeon_encoder.c */ extern void r600_hdmi_enable(struct drm_encoder *encoder); -- cgit v1.1