diff options
author | Paul Mundt <lethal@linux-sh.org> | 2008-07-29 08:09:44 +0900 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2008-07-29 08:09:44 +0900 |
commit | f15cbe6f1a4b4d9df59142fc8e4abb973302cf44 (patch) | |
tree | 774d7b11abaaf33561ab8268bf51ddd9ceb79025 /arch/sh/include/asm/cacheflush.h | |
parent | 25326277d8d1393d1c66240e6255aca780f9e3eb (diff) | |
download | kernel_samsung_smdk4412-f15cbe6f1a4b4d9df59142fc8e4abb973302cf44.zip kernel_samsung_smdk4412-f15cbe6f1a4b4d9df59142fc8e4abb973302cf44.tar.gz kernel_samsung_smdk4412-f15cbe6f1a4b4d9df59142fc8e4abb973302cf44.tar.bz2 |
sh: migrate to arch/sh/include/
This follows the sparc changes a439fe51a1f8eb087c22dd24d69cebae4a3addac.
Most of the moving about was done with Sam's directions at:
http://marc.info/?l=linux-sh&m=121724823706062&w=2
with subsequent hacking and fixups entirely my fault.
Signed-off-by: Sam Ravnborg <sam@ravnborg.org>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/include/asm/cacheflush.h')
-rw-r--r-- | arch/sh/include/asm/cacheflush.h | 81 |
1 files changed, 81 insertions, 0 deletions
diff --git a/arch/sh/include/asm/cacheflush.h b/arch/sh/include/asm/cacheflush.h new file mode 100644 index 0000000..09acbc3 --- /dev/null +++ b/arch/sh/include/asm/cacheflush.h @@ -0,0 +1,81 @@ +#ifndef __ASM_SH_CACHEFLUSH_H +#define __ASM_SH_CACHEFLUSH_H + +#ifdef __KERNEL__ + +#ifdef CONFIG_CACHE_OFF +/* + * Nothing to do when the cache is disabled, initial flush and explicit + * disabling is handled at CPU init time. + * + * See arch/sh/kernel/cpu/init.c:cache_init(). + */ +#define p3_cache_init() do { } while (0) +#define flush_cache_all() do { } while (0) +#define flush_cache_mm(mm) do { } while (0) +#define flush_cache_dup_mm(mm) do { } while (0) +#define flush_cache_range(vma, start, end) do { } while (0) +#define flush_cache_page(vma, vmaddr, pfn) do { } while (0) +#define flush_dcache_page(page) do { } while (0) +#define flush_icache_range(start, end) do { } while (0) +#define flush_icache_page(vma,pg) do { } while (0) +#define flush_dcache_mmap_lock(mapping) do { } while (0) +#define flush_dcache_mmap_unlock(mapping) do { } while (0) +#define flush_cache_sigtramp(vaddr) do { } while (0) +#define flush_icache_user_range(vma,pg,adr,len) do { } while (0) +#define __flush_wback_region(start, size) do { (void)(start); } while (0) +#define __flush_purge_region(start, size) do { (void)(start); } while (0) +#define __flush_invalidate_region(start, size) do { (void)(start); } while (0) +#else +#include <cpu/cacheflush.h> + +/* + * Consistent DMA requires that the __flush_xxx() primitives must be set + * for any of the enabled non-coherent caches (most of the UP CPUs), + * regardless of PIPT or VIPT cache configurations. + */ + +/* Flush (write-back only) a region (smaller than a page) */ +extern void __flush_wback_region(void *start, int size); +/* Flush (write-back & invalidate) a region (smaller than a page) */ +extern void __flush_purge_region(void *start, int size); +/* Flush (invalidate only) a region (smaller than a page) */ +extern void __flush_invalidate_region(void *start, int size); +#endif + +#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE +static inline void flush_kernel_dcache_page(struct page *page) +{ + flush_dcache_page(page); +} + +#if defined(CONFIG_CPU_SH4) && !defined(CONFIG_CACHE_OFF) +extern void copy_to_user_page(struct vm_area_struct *vma, + struct page *page, unsigned long vaddr, void *dst, const void *src, + unsigned long len); + +extern void copy_from_user_page(struct vm_area_struct *vma, + struct page *page, unsigned long vaddr, void *dst, const void *src, + unsigned long len); +#else +#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ + do { \ + flush_cache_page(vma, vaddr, page_to_pfn(page));\ + memcpy(dst, src, len); \ + flush_icache_user_range(vma, page, vaddr, len); \ + } while (0) + +#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ + do { \ + flush_cache_page(vma, vaddr, page_to_pfn(page));\ + memcpy(dst, src, len); \ + } while (0) +#endif + +#define flush_cache_vmap(start, end) flush_cache_all() +#define flush_cache_vunmap(start, end) flush_cache_all() + +#define HAVE_ARCH_UNMAPPED_AREA + +#endif /* __KERNEL__ */ +#endif /* __ASM_SH_CACHEFLUSH_H */ |