diff options
author | codeworkx <codeworkx@cyanogenmod.com> | 2012-09-17 17:53:57 +0200 |
---|---|---|
committer | codeworkx <codeworkx@cyanogenmod.com> | 2012-09-18 16:31:59 +0200 |
commit | c28265764ec6ad9995eb0c761a376ffc9f141fcd (patch) | |
tree | 3ad899757480d47deb2be6011509a4243e8e0dc2 /drivers/base | |
parent | 0ddbcb39c0dc0318f68d858f25a96a074142af2f (diff) | |
download | kernel_samsung_smdk4412-c28265764ec6ad9995eb0c761a376ffc9f141fcd.zip kernel_samsung_smdk4412-c28265764ec6ad9995eb0c761a376ffc9f141fcd.tar.gz kernel_samsung_smdk4412-c28265764ec6ad9995eb0c761a376ffc9f141fcd.tar.bz2 |
applied patches from i9305 jb sources, updated mali to r3p0
Change-Id: Iec4bc4e2fb59e2cf5b4d25568a644d4e3719565e
Diffstat (limited to 'drivers/base')
-rw-r--r-- | drivers/base/Kconfig | 115 | ||||
-rw-r--r-- | drivers/base/Makefile | 4 | ||||
-rw-r--r-- | drivers/base/core.c | 8 | ||||
-rw-r--r-- | drivers/base/dma-contiguous.c | 504 | ||||
-rw-r--r-- | drivers/base/firmware_class.c | 14 | ||||
-rw-r--r-- | drivers/base/iommu.c | 124 | ||||
-rw-r--r-- | drivers/base/sw_sync.c | 256 | ||||
-rw-r--r-- | drivers/base/sync.c | 801 |
8 files changed, 1816 insertions, 10 deletions
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig index 5398ce8..e960312 100644 --- a/drivers/base/Kconfig +++ b/drivers/base/Kconfig @@ -178,4 +178,119 @@ config DMA_SHARED_BUFFER APIs extension; the file's descriptor can then be passed on to other driver. +config SYNC + bool "Synchronization framework" + default n + select ANON_INODES + help + This option enables the framework for synchronization between multiple + drivers. Sync implementations can take advantage of hardware + synchronization built into devices like GPUs. + +config SW_SYNC + bool "Software synchronization objects" + default n + depends on SYNC + help + A sync object driver that uses a 32bit counter to coordinate + syncrhronization. Useful when there is no hardware primitive backing + the synchronization. + +config SW_SYNC_USER + bool "Userspace API for SW_SYNC" + default n + depends on SW_SYNC + help + Provides a user space API to the sw sync object. + *WARNING* improper use of this can result in deadlocking kernel + drivers from userspace. + +config DMA_CMA + bool "Contiguous Memory Allocator (EXPERIMENTAL)" + default n + select MIGRATION + help + This enables the Contiguous Memory Allocator which allows drivers + to allocate big physically-contiguous blocks of memory for use with + hardware components that do not support I/O map nor scatter-gather. + + For more information see <include/linux/dma-contiguous.h>. + If unsure, say "n". + +if DMA_CMA + +config DMA_CMA_DEBUG + bool "NEW CMA debug messages (DEVELOPMENT)" + depends on DEBUG_KERNEL + help + Turns on debug messages in CMA. This produces KERN_DEBUG + messages for every CMA call as well as various messages while + processing calls such as dma_alloc_from_contiguous(). + This option does not affect warning and error messages. + +comment "Default contiguous memory area size:" + +config CMA_SIZE_MBYTES + int "Size in Mega Bytes" + depends on !CMA_SIZE_SEL_PERCENTAGE + default 16 + help + Defines the size (in MiB) of the default memory area for CMA. + +config CMA_SIZE_PERCENTAGE + int "Percentage of total memory" + depends on !CMA_SIZE_SEL_MBYTES + default 10 + help + Defines the size of the default memory area for Contiguous Memory + Allocator as a percentage of the total memory in the system. + +choice + prompt "Selected region size" + default CMA_SIZE_SEL_ABSOLUTE + +config CMA_SIZE_SEL_MBYTES + bool "Use mega bytes value only" + +config CMA_SIZE_SEL_PERCENTAGE + bool "Use percentage value only" + +config CMA_SIZE_SEL_MIN + bool "Use lower value (minimum)" + +config CMA_SIZE_SEL_MAX + bool "Use higher value (maximum)" + +endchoice + +config CMA_ALIGNMENT + int "Maximum PAGE_SIZE order of alignment for contiguous buffers" + range 4 9 + default 8 + help + DMA mapping framework by default aligns all buffers to the smallest + PAGE_SIZE order which is greater than or equal to the requested buffer + size. This works well for buffers up to a few hundreds kilobytes, but + for larger buffers it just a memory waste. With this parameter you can + specify the maximum PAGE_SIZE order for contiguous buffers. Larger + buffers will be aligned only to this specified order. The order is + expressed as a power of two multiplied by the PAGE_SIZE. + + For example, if your system defaults to 4KiB pages, the order value + of 8 means that the buffers will be aligned up to 1MiB only. + + If unsure, leave the default value "8". + +config CMA_AREAS + int "Maximum count of the CMA device-private areas" + default 7 + help + CMA allows to create CMA areas for particular devices. This parameter + sets the maximum number of such device private CMA areas in the + system. + + If unsure, leave the default value "7". + +endif + endmenu diff --git a/drivers/base/Makefile b/drivers/base/Makefile index a749d9b..a60d7e2 100644 --- a/drivers/base/Makefile +++ b/drivers/base/Makefile @@ -5,6 +5,7 @@ obj-y := core.o sys.o bus.o dd.o syscore.o \ cpu.o firmware.o init.o map.o devres.o \ attribute_container.o transport_class.o obj-$(CONFIG_DEVTMPFS) += devtmpfs.o +obj-$(CONFIG_DMA_CMA) += dma-contiguous.o obj-y += power/ obj-$(CONFIG_HAS_DMA) += dma-mapping.o obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o @@ -20,5 +21,8 @@ obj-$(CONFIG_MODULES) += module.o endif obj-$(CONFIG_SYS_HYPERVISOR) += hypervisor.o +obj-$(CONFIG_SYNC) += sync.o +obj-$(CONFIG_SW_SYNC) += sw_sync.o + ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG diff --git a/drivers/base/core.c b/drivers/base/core.c index 835ed32..68bd89e 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -1743,10 +1743,12 @@ void device_shutdown(void) */ list_del_init(&dev->kobj.entry); spin_unlock(&devices_kset->list_lock); - /* Disable all device's runtime power management */ - pm_runtime_disable(dev); -#if defined(CONFIG_MACH_Q1_BD) || defined(CONFIG_MACH_PX) + /* Don't allow any more runtime suspends */ + pm_runtime_get_noresume(dev); + pm_runtime_barrier(dev); + +#if defined(CONFIG_MACH_Q1_BD) || defined(CONFIG_MACH_PX) || defined(CONFIG_MACH_MIDAS) /* Temporary log to analyze a problem during shutdown */ if (dev->bus && dev->bus->shutdown) { dev_info(dev, "shutdown +: %pF\n", dev->bus->shutdown); diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c new file mode 100644 index 0000000..c3a6e3b --- /dev/null +++ b/drivers/base/dma-contiguous.c @@ -0,0 +1,504 @@ +/* + * Contiguous Memory Allocator for DMA mapping framework + * Copyright (c) 2010-2011 by Samsung Electronics. + * Written by: + * Marek Szyprowski <m.szyprowski@samsung.com> + * Michal Nazarewicz <mina86@mina86.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License or (at your optional) any later version of the license. + */ + +#define pr_fmt(fmt) "cma: " fmt + +#ifdef CONFIG_DMA_CMA_DEBUG +#ifndef DEBUG +# define DEBUG +#endif +#endif + +#include <asm/page.h> +#include <asm/dma-contiguous.h> + +#include <linux/memblock.h> +#include <linux/err.h> +#include <linux/mm.h> +#include <linux/mutex.h> +#include <linux/page-isolation.h> +#include <linux/slab.h> +#include <linux/swap.h> +#include <linux/mm_types.h> +#include <linux/dma-contiguous.h> + +#ifndef SZ_1M +#define SZ_1M (1 << 20) +#endif + +struct cma { + unsigned long base_pfn; + unsigned long count; + unsigned long *bitmap; +}; + +struct cma *dma_contiguous_default_area; + +#ifdef CONFIG_CMA_SIZE_MBYTES +#define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES +#else +#define CMA_SIZE_MBYTES 0 +#endif + +/* + * Default global CMA area size can be defined in kernel's .config. + * This is usefull mainly for distro maintainers to create a kernel + * that works correctly for most supported systems. + * The size can be set in bytes or as a percentage of the total memory + * in the system. + * + * Users, who want to set the size of global CMA area for their system + * should use cma= kernel parameter. + */ +static const unsigned long size_bytes = CMA_SIZE_MBYTES * SZ_1M; +static long size_cmdline = -1; + +static int __init early_cma(char *p) +{ + pr_debug("%s(%s)\n", __func__, p); + size_cmdline = memparse(p, &p); + return 0; +} +early_param("cma", early_cma); + +#ifdef CONFIG_CMA_SIZE_PERCENTAGE + +static unsigned long __init __maybe_unused cma_early_percent_memory(void) +{ + struct memblock_region *reg; + unsigned long total_pages = 0; + + /* + * We cannot use memblock_phys_mem_size() here, because + * memblock_analyze() has not been called yet. + */ + for_each_memblock(memory, reg) + total_pages += memblock_region_memory_end_pfn(reg) - + memblock_region_memory_base_pfn(reg); + + return (total_pages * CONFIG_CMA_SIZE_PERCENTAGE / 100) << PAGE_SHIFT; +} + +#else + +static inline __maybe_unused unsigned long cma_early_percent_memory(void) +{ + return 0; +} + +#endif + +/** + * dma_contiguous_reserve() - reserve area for contiguous memory handling + * @limit: End address of the reserved memory (optional, 0 for any). + * + * This function reserves memory from early allocator. It should be + * called by arch specific code once the early allocator (memblock or bootmem) + * has been activated and all other subsystems have already allocated/reserved + * memory. + */ +void __init dma_contiguous_reserve(phys_addr_t limit) +{ + unsigned long selected_size = 0; + + pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit); + + if (size_cmdline != -1) { + selected_size = size_cmdline; + } else { +#ifdef CONFIG_CMA_SIZE_SEL_MBYTES + selected_size = size_bytes; +#elif defined(CONFIG_CMA_SIZE_SEL_PERCENTAGE) + selected_size = cma_early_percent_memory(); +#elif defined(CONFIG_CMA_SIZE_SEL_MIN) + selected_size = min(size_bytes, cma_early_percent_memory()); +#elif defined(CONFIG_CMA_SIZE_SEL_MAX) + selected_size = max(size_bytes, cma_early_percent_memory()); +#endif + } + + if (selected_size) { + pr_debug("%s: reserving %ld MiB for global area\n", __func__, + selected_size / SZ_1M); + + dma_declare_contiguous(NULL, selected_size, 0, limit); + } +}; + +static DEFINE_MUTEX(cma_mutex); + +static __init int cma_activate_area(unsigned long base_pfn, unsigned long count) +{ + unsigned long pfn = base_pfn; + unsigned i = count >> pageblock_order; + struct zone *zone; + + WARN_ON_ONCE(!pfn_valid(pfn)); + zone = page_zone(pfn_to_page(pfn)); + + do { + unsigned j; + base_pfn = pfn; + for (j = pageblock_nr_pages; j; --j, pfn++) { + WARN_ON_ONCE(!pfn_valid(pfn)); + if (page_zone(pfn_to_page(pfn)) != zone) + return -EINVAL; + } + init_cma_reserved_pageblock(pfn_to_page(base_pfn)); + } while (--i); + return 0; +} + +static __init struct cma *cma_create_area(unsigned long base_pfn, + unsigned long count) +{ + int bitmap_size = BITS_TO_LONGS(count) * sizeof(long); + struct cma *cma; + int ret = -ENOMEM; + + pr_debug("%s(base %08lx, count %lx)\n", __func__, base_pfn, count); + + cma = kmalloc(sizeof *cma, GFP_KERNEL); + if (!cma) + return ERR_PTR(-ENOMEM); + + cma->base_pfn = base_pfn; + cma->count = count; + cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL); + + if (!cma->bitmap) + goto no_mem; + + ret = cma_activate_area(base_pfn, count); + if (ret) + goto error; + + pr_debug("%s: returned %p\n", __func__, (void *)cma); + return cma; + +error: + kfree(cma->bitmap); +no_mem: + kfree(cma); + return ERR_PTR(ret); +} + +static struct cma_reserved { + phys_addr_t start; + unsigned long size; + struct device *dev; +} cma_reserved[MAX_CMA_AREAS] __initdata; +static unsigned cma_reserved_count __initdata; + +static int __init cma_init_reserved_areas(void) +{ + struct cma_reserved *r = cma_reserved; + unsigned i = cma_reserved_count; + + pr_debug("%s()\n", __func__); + + for (; i; --i, ++r) { + struct cma *cma; + cma = cma_create_area(PFN_DOWN(r->start), + r->size >> PAGE_SHIFT); + if (!IS_ERR(cma)) + dev_set_cma_area(r->dev, cma); + else + printk(KERN_ERR "%s() cma_create_area error for %ud\n", + __func__, i); + } + return 0; +} +core_initcall(cma_init_reserved_areas); + +/** + * dma_declare_contiguous() - reserve area for contiguous memory handling + * for particular device + * @dev: Pointer to device structure. + * @size: Size of the reserved memory. + * @base: Start address of the reserved memory (optional, 0 for any). + * @limit: End address of the reserved memory (optional, 0 for any). + * + * This function reserves memory for specified device. It should be + * called by board specific code when early allocator (memblock or bootmem) + * is still activate. + */ +int __init dma_declare_contiguous(struct device *dev, unsigned long size, + phys_addr_t base, phys_addr_t limit) +{ + struct cma_reserved *r = &cma_reserved[cma_reserved_count]; + unsigned long alignment; + + pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__, + (unsigned long)size, (unsigned long)base, + (unsigned long)limit); + + /* Sanity checks */ + if (cma_reserved_count == ARRAY_SIZE(cma_reserved)) { + pr_err("Not enough slots for CMA reserved regions!\n"); + return -ENOSPC; + } + + if (!size) + return -EINVAL; + + /* Sanitise input arguments */ + alignment = PAGE_SIZE << max(MAX_ORDER-1, pageblock_order); + base = ALIGN(base, alignment); + size = ALIGN(size, alignment); + limit &= ~(alignment - 1); + + /* Reserve memory */ + if (base) { + if (memblock_is_region_reserved(base, size) || + memblock_reserve(base, size) < 0) { + base = -EBUSY; + goto err; + } + } else { + /* + * Use __memblock_alloc_base() since + * memblock_alloc_base() panic()s. + */ + phys_addr_t addr = __memblock_alloc_base(size, alignment, + limit); + if (!addr) { + base = -ENOMEM; + goto err; + } else if (addr + size > ~(unsigned long)0) { + memblock_free(addr, size); + base = -EINVAL; + goto err; + } else { + base = addr; + } + } + + /* + * Each reserved area must be initialised later, when more kernel + * subsystems (like slab allocator) are available. + */ + r->start = base; + r->size = size; + r->dev = dev; + cma_reserved_count++; + pr_info("CMA: reserved %ld MiB at %08lx\n", size / SZ_1M, + (unsigned long)base); + + /* Architecture specific contiguous memory fixup. */ + dma_contiguous_early_fixup(base, size); + return 0; +err: + pr_err("CMA: failed to reserve %ld MiB\n", size / SZ_1M); + return base; +} + +static struct page *__dma_alloc_from_contiguous(struct device *dev, int count, + unsigned int align) +{ + unsigned long mask, pfn, pageno, start = 0; + struct cma *cma = dev_get_cma_area(dev); + int ret; + + if (!cma || !cma->count) + return NULL; + + if (align > CONFIG_CMA_ALIGNMENT) + align = CONFIG_CMA_ALIGNMENT; + + pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma, + count, align); + + if (!count) + return NULL; + + mask = (1 << align) - 1; + + mutex_lock(&cma_mutex); + + for (;;) { + pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count, + start, count, mask); + if (pageno >= cma->count) { + printk(KERN_ERR "%s : cma->count is %lu, " + "pageno is %lu\n", __func__, + cma->count, pageno); + ret = -ENOMEM; + goto error; + } + + pfn = cma->base_pfn + pageno; + ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA); + if (ret == 0) { + bitmap_set(cma->bitmap, pageno, count); + break; + } else if (ret != -EBUSY && ret != -EAGAIN) { + goto error; + } + pr_debug("%s(): memory range at %p is busy, retrying\n", + __func__, pfn_to_page(pfn)); + /* try again with a bit different memory target */ + start = pageno + mask + 1; + } + + mutex_unlock(&cma_mutex); + + pr_debug("%s(): returned %p\n", __func__, pfn_to_page(pfn)); + return pfn_to_page(pfn); +error: + pr_err("%s(): returned error (%d)\n", __func__, ret); + mutex_unlock(&cma_mutex); + return NULL; +} + +struct dma_prepare_alloc_ctx { + struct device *dev; + int count; + unsigned int align; + struct work_struct work; + struct list_head node; + struct page *result; +}; + +/* list of asynchronous allocations */ +static LIST_HEAD(dma_prepare_alloc_head); +/* protects dma_prepare_alloc_head */ +static DEFINE_SPINLOCK(dma_prepare_alloc_lock); + +static void do_dma_prepare_alloc(struct work_struct *work) +{ + struct dma_prepare_alloc_ctx *ctx = container_of( + work, struct dma_prepare_alloc_ctx, work); + + ctx->result = __dma_alloc_from_contiguous(ctx->dev, ctx->count, + ctx->align); + printk(KERN_INFO "%s[%d]: alloc %d pages for dev %s %s\n", + __func__, __LINE__, ctx->count, dev_name(ctx->dev), + ctx->result ? "succeeded" : "failed"); +} + +/** + * dma_prepare_alloc_from_contiguous() - prepare allocation of pages in + * an asynchronous thread + * @dev: Pointer to device for which the allocation is performed. + * @count: Requested number of pages. + * @align: Requested alignment of pages (in PAGE_SIZE order). + * + * This function prepares an allocation by scheduling it in a separate work + * thread. Once dma_alloc_from_contiguous is called then it searches + * the dma_prepare_alloc_head list in the first place. + */ +int dma_prepare_alloc_from_contiguous(struct device *dev, int count, + unsigned int align) +{ + struct dma_prepare_alloc_ctx *ctx; + unsigned long flags; + + ctx = kzalloc(sizeof *ctx, GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ctx->dev = dev; + ctx->count = count; + ctx->align = align; + INIT_WORK(&ctx->work, do_dma_prepare_alloc); + + spin_lock_irqsave(&dma_prepare_alloc_lock, flags); + list_add_tail(&ctx->node, &dma_prepare_alloc_head); + spin_unlock_irqrestore(&dma_prepare_alloc_lock, flags); + + queue_work(system_long_wq, &ctx->work); + return 0; +} + +/** + * dma_alloc_from_contiguous() - allocate pages from contiguous area + * @dev: Pointer to device for which the allocation is performed. + * @count: Requested number of pages. + * @align: Requested alignment of pages (in PAGE_SIZE order). + * + * This function allocates memory buffer for specified device. It uses + * device specific contiguous memory area if available or the default + * global one. Requires architecture specific get_dev_cma_area() helper + * function. + */ +struct page *dma_alloc_from_contiguous(struct device *dev, int count, + unsigned int align) +{ + unsigned long flags; + struct list_head *i, *n; + struct dma_prepare_alloc_ctx *ctx = NULL; + struct page *result = NULL; + + /* search for a cached allocation */ + spin_lock_irqsave(&dma_prepare_alloc_lock, flags); + list_for_each_safe(i, n, &dma_prepare_alloc_head) { + ctx = list_entry(i, struct dma_prepare_alloc_ctx, node); + if (ctx->dev == dev && ctx->count == count && + ctx->align == align) { + list_del(i); + break; + } + ctx = NULL; + } + spin_unlock_irqrestore(&dma_prepare_alloc_lock, flags); + + /* If a cached allocation is found then use its result */ + if (ctx) { + flush_work(&ctx->work); + result = ctx->result; + kfree(ctx); + } + /* fallback to sync allocation */ + if (!result) + result = __dma_alloc_from_contiguous(dev, count, align); + return result; +} + +/** + * dma_release_from_contiguous() - release allocated pages + * @dev: Pointer to device for which the pages were allocated. + * @pages: Allocated pages. + * @count: Number of allocated pages. + * + * This function releases memory allocated by dma_alloc_from_contiguous(). + * It returns false when provided pages do not belong to contiguous area and + * true otherwise. + */ +bool dma_release_from_contiguous(struct device *dev, struct page *pages, + int count) +{ + struct cma *cma = dev_get_cma_area(dev); + unsigned long pfn; + + if (!cma || !pages) + return false; + + pr_debug("%s(page %p)\n", __func__, (void *)pages); + + pfn = page_to_pfn(pages); + + if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count) { + pr_info("%s : return false\n", __func__); + return false; + } + + VM_BUG_ON(pfn + count > cma->base_pfn + cma->count); + + mutex_lock(&cma_mutex); + bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count); + free_contig_range(pfn, count); + mutex_unlock(&cma_mutex); + + return true; +} diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c index 06ed6b4..3719c94 100644 --- a/drivers/base/firmware_class.c +++ b/drivers/base/firmware_class.c @@ -226,13 +226,13 @@ static ssize_t firmware_loading_store(struct device *dev, int loading = simple_strtol(buf, NULL, 10); int i; + mutex_lock(&fw_lock); + + if (!fw_priv->fw) + goto out; + switch (loading) { case 1: - mutex_lock(&fw_lock); - if (!fw_priv->fw) { - mutex_unlock(&fw_lock); - break; - } firmware_free_data(fw_priv->fw); memset(fw_priv->fw, 0, sizeof(struct firmware)); /* If the pages are not owned by 'struct firmware' */ @@ -243,7 +243,6 @@ static ssize_t firmware_loading_store(struct device *dev, fw_priv->page_array_size = 0; fw_priv->nr_pages = 0; set_bit(FW_STATUS_LOADING, &fw_priv->status); - mutex_unlock(&fw_lock); break; case 0: if (test_bit(FW_STATUS_LOADING, &fw_priv->status)) { @@ -274,7 +273,8 @@ static ssize_t firmware_loading_store(struct device *dev, fw_load_abort(fw_priv); break; } - +out: + mutex_unlock(&fw_lock); return count; } diff --git a/drivers/base/iommu.c b/drivers/base/iommu.c new file mode 100644 index 0000000..6e6b6a1 --- /dev/null +++ b/drivers/base/iommu.c @@ -0,0 +1,124 @@ +/* + * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. + * Author: Joerg Roedel <joerg.roedel@amd.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/bug.h> +#include <linux/types.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/errno.h> +#include <linux/iommu.h> + +static struct iommu_ops *iommu_ops; + +void register_iommu(struct iommu_ops *ops) +{ + if (iommu_ops) + BUG(); + + iommu_ops = ops; +} + +bool iommu_found(void) +{ + return iommu_ops != NULL; +} +EXPORT_SYMBOL_GPL(iommu_found); + +struct iommu_domain *iommu_domain_alloc(void) +{ + struct iommu_domain *domain; + int ret; + + domain = kmalloc(sizeof(*domain), GFP_KERNEL); + if (!domain) + return NULL; + + ret = iommu_ops->domain_init(domain); + if (ret) + goto out_free; + + return domain; + +out_free: + kfree(domain); + + return NULL; +} +EXPORT_SYMBOL_GPL(iommu_domain_alloc); + +void iommu_domain_free(struct iommu_domain *domain) +{ + iommu_ops->domain_destroy(domain); + kfree(domain); +} +EXPORT_SYMBOL_GPL(iommu_domain_free); + +int iommu_attach_device(struct iommu_domain *domain, struct device *dev) +{ + return iommu_ops->attach_dev(domain, dev); +} +EXPORT_SYMBOL_GPL(iommu_attach_device); + +void iommu_detach_device(struct iommu_domain *domain, struct device *dev) +{ + iommu_ops->detach_dev(domain, dev); +} +EXPORT_SYMBOL_GPL(iommu_detach_device); + +phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, + unsigned long iova) +{ + return iommu_ops->iova_to_phys(domain, iova); +} +EXPORT_SYMBOL_GPL(iommu_iova_to_phys); + +int iommu_domain_has_cap(struct iommu_domain *domain, + unsigned long cap) +{ + return iommu_ops->domain_has_cap(domain, cap); +} +EXPORT_SYMBOL_GPL(iommu_domain_has_cap); + +int iommu_map(struct iommu_domain *domain, unsigned long iova, + phys_addr_t paddr, int gfp_order, int prot) +{ + unsigned long invalid_mask; + size_t size; + + size = 0x1000UL << gfp_order; + invalid_mask = size - 1; + + BUG_ON((iova | paddr) & invalid_mask); + + return iommu_ops->map(domain, iova, paddr, gfp_order, prot); +} +EXPORT_SYMBOL_GPL(iommu_map); + +int iommu_unmap(struct iommu_domain *domain, unsigned long iova, int gfp_order) +{ + unsigned long invalid_mask; + size_t size; + + size = 0x1000UL << gfp_order; + invalid_mask = size - 1; + + BUG_ON(iova & invalid_mask); + + return iommu_ops->unmap(domain, iova, gfp_order); +} +EXPORT_SYMBOL_GPL(iommu_unmap); diff --git a/drivers/base/sw_sync.c b/drivers/base/sw_sync.c new file mode 100644 index 0000000..21ddf4f --- /dev/null +++ b/drivers/base/sw_sync.c @@ -0,0 +1,256 @@ +/* + * drivers/base/sw_sync.c + * + * Copyright (C) 2012 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/kernel.h> +#include <linux/file.h> +#include <linux/fs.h> +#include <linux/miscdevice.h> +#include <linux/module.h> +#include <linux/sw_sync.h> +#include <linux/syscalls.h> +#include <linux/uaccess.h> + +static int sw_sync_cmp(u32 a, u32 b) +{ + if (a == b) + return 0; + + return ((s32)a - (s32)b) < 0 ? -1 : 1; +} + +struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj, u32 value) +{ + struct sw_sync_pt *pt; + + pt = (struct sw_sync_pt *) + sync_pt_create(&obj->obj, sizeof(struct sw_sync_pt)); + + pt->value = value; + + return (struct sync_pt *)pt; +} + +static struct sync_pt *sw_sync_pt_dup(struct sync_pt *sync_pt) +{ + struct sw_sync_pt *pt = (struct sw_sync_pt *) sync_pt; + struct sw_sync_timeline *obj = + (struct sw_sync_timeline *)sync_pt->parent; + + return (struct sync_pt *) sw_sync_pt_create(obj, pt->value); +} + +static int sw_sync_pt_has_signaled(struct sync_pt *sync_pt) +{ + struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt; + struct sw_sync_timeline *obj = + (struct sw_sync_timeline *)sync_pt->parent; + + return sw_sync_cmp(obj->value, pt->value) >= 0; +} + +static int sw_sync_pt_compare(struct sync_pt *a, struct sync_pt *b) +{ + struct sw_sync_pt *pt_a = (struct sw_sync_pt *)a; + struct sw_sync_pt *pt_b = (struct sw_sync_pt *)b; + + return sw_sync_cmp(pt_a->value, pt_b->value); +} + +static void sw_sync_print_obj(struct seq_file *s, + struct sync_timeline *sync_timeline) +{ + struct sw_sync_timeline *obj = (struct sw_sync_timeline *)sync_timeline; + + seq_printf(s, "%d", obj->value); +} + +static void sw_sync_print_pt(struct seq_file *s, struct sync_pt *sync_pt) +{ + struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt; + struct sw_sync_timeline *obj = + (struct sw_sync_timeline *)sync_pt->parent; + + seq_printf(s, "%d / %d", pt->value, obj->value); +} + +static int sw_sync_fill_driver_data(struct sync_pt *sync_pt, + void *data, int size) +{ + struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt; + + if (size < sizeof(pt->value)) + return -ENOMEM; + + memcpy(data, &pt->value, sizeof(pt->value)); + + return sizeof(pt->value); +} + +struct sync_timeline_ops sw_sync_timeline_ops = { + .driver_name = "sw_sync", + .dup = sw_sync_pt_dup, + .has_signaled = sw_sync_pt_has_signaled, + .compare = sw_sync_pt_compare, + .print_obj = sw_sync_print_obj, + .print_pt = sw_sync_print_pt, + .fill_driver_data = sw_sync_fill_driver_data, +}; + + +struct sw_sync_timeline *sw_sync_timeline_create(const char *name) +{ + struct sw_sync_timeline *obj = (struct sw_sync_timeline *) + sync_timeline_create(&sw_sync_timeline_ops, + sizeof(struct sw_sync_timeline), + name); + + return obj; +} + +void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc) +{ + obj->value += inc; + + sync_timeline_signal(&obj->obj); +} + + +#ifdef CONFIG_SW_SYNC_USER +/* *WARNING* + * + * improper use of this can result in deadlocking kernel drivers from userspace. + */ + +/* opening sw_sync create a new sync obj */ +int sw_sync_open(struct inode *inode, struct file *file) +{ + struct sw_sync_timeline *obj; + char task_comm[TASK_COMM_LEN]; + + get_task_comm(task_comm, current); + + obj = sw_sync_timeline_create(task_comm); + if (obj == NULL) + return -ENOMEM; + + file->private_data = obj; + + return 0; +} + +int sw_sync_release(struct inode *inode, struct file *file) +{ + struct sw_sync_timeline *obj = file->private_data; + sync_timeline_destroy(&obj->obj); + return 0; +} + +long sw_sync_ioctl_create_fence(struct sw_sync_timeline *obj, unsigned long arg) +{ + int fd = get_unused_fd(); + int err; + struct sync_pt *pt; + struct sync_fence *fence; + struct sw_sync_create_fence_data data; + + if (copy_from_user(&data, (void __user *)arg, sizeof(data))) + return -EFAULT; + + pt = sw_sync_pt_create(obj, data.value); + if (pt == NULL) { + err = -ENOMEM; + goto err; + } + + data.name[sizeof(data.name) - 1] = '\0'; + fence = sync_fence_create(data.name, pt); + if (fence == NULL) { + sync_pt_free(pt); + err = -ENOMEM; + goto err; + } + + data.fence = fd; + if (copy_to_user((void __user *)arg, &data, sizeof(data))) { + sync_fence_put(fence); + err = -EFAULT; + goto err; + } + + sync_fence_install(fence, fd); + + return 0; + +err: + put_unused_fd(fd); + return err; +} + +long sw_sync_ioctl_inc(struct sw_sync_timeline *obj, unsigned long arg) +{ + u32 value; + + if (copy_from_user(&value, (void __user *)arg, sizeof(value))) + return -EFAULT; + + sw_sync_timeline_inc(obj, value); + + return 0; +} + +long sw_sync_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct sw_sync_timeline *obj = file->private_data; + + switch (cmd) { + case SW_SYNC_IOC_CREATE_FENCE: + return sw_sync_ioctl_create_fence(obj, arg); + + case SW_SYNC_IOC_INC: + return sw_sync_ioctl_inc(obj, arg); + + default: + return -ENOTTY; + } +} + +static const struct file_operations sw_sync_fops = { + .owner = THIS_MODULE, + .open = sw_sync_open, + .release = sw_sync_release, + .unlocked_ioctl = sw_sync_ioctl, +}; + +static struct miscdevice sw_sync_dev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "sw_sync", + .fops = &sw_sync_fops, +}; + +int __init sw_sync_device_init(void) +{ + return misc_register(&sw_sync_dev); +} + +void __exit sw_sync_device_remove(void) +{ + misc_deregister(&sw_sync_dev); +} + +module_init(sw_sync_device_init); +module_exit(sw_sync_device_remove); + +#endif /* CONFIG_SW_SYNC_USER */ diff --git a/drivers/base/sync.c b/drivers/base/sync.c new file mode 100644 index 0000000..d6913f8 --- /dev/null +++ b/drivers/base/sync.c @@ -0,0 +1,801 @@ +/* + * drivers/base/sync.c + * + * Copyright (C) 2012 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/debugfs.h> +#include <linux/file.h> +#include <linux/fs.h> +#include <linux/kernel.h> +#include <linux/poll.h> +#include <linux/sched.h> +#include <linux/seq_file.h> +#include <linux/slab.h> +#include <linux/sync.h> +#include <linux/uaccess.h> + +#include <linux/anon_inodes.h> + +static void sync_fence_signal_pt(struct sync_pt *pt); +static int _sync_pt_has_signaled(struct sync_pt *pt); + +static LIST_HEAD(sync_timeline_list_head); +static DEFINE_SPINLOCK(sync_timeline_list_lock); + +static LIST_HEAD(sync_fence_list_head); +static DEFINE_SPINLOCK(sync_fence_list_lock); + +struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops, + int size, const char *name) +{ + struct sync_timeline *obj; + unsigned long flags; + + if (size < sizeof(struct sync_timeline)) + return NULL; + + obj = kzalloc(size, GFP_KERNEL); + if (obj == NULL) + return NULL; + + obj->ops = ops; + strlcpy(obj->name, name, sizeof(obj->name)); + + INIT_LIST_HEAD(&obj->child_list_head); + spin_lock_init(&obj->child_list_lock); + + INIT_LIST_HEAD(&obj->active_list_head); + spin_lock_init(&obj->active_list_lock); + + spin_lock_irqsave(&sync_timeline_list_lock, flags); + list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head); + spin_unlock_irqrestore(&sync_timeline_list_lock, flags); + + return obj; +} + +static void sync_timeline_free(struct sync_timeline *obj) +{ + unsigned long flags; + + if (obj->ops->release_obj) + obj->ops->release_obj(obj); + + spin_lock_irqsave(&sync_timeline_list_lock, flags); + list_del(&obj->sync_timeline_list); + spin_unlock_irqrestore(&sync_timeline_list_lock, flags); + + kfree(obj); +} + +void sync_timeline_destroy(struct sync_timeline *obj) +{ + unsigned long flags; + bool needs_freeing; + + spin_lock_irqsave(&obj->child_list_lock, flags); + obj->destroyed = true; + needs_freeing = list_empty(&obj->child_list_head); + spin_unlock_irqrestore(&obj->child_list_lock, flags); + + if (needs_freeing) + sync_timeline_free(obj); + else + sync_timeline_signal(obj); +} + +static void sync_timeline_add_pt(struct sync_timeline *obj, struct sync_pt *pt) +{ + unsigned long flags; + + pt->parent = obj; + + spin_lock_irqsave(&obj->child_list_lock, flags); + list_add_tail(&pt->child_list, &obj->child_list_head); + spin_unlock_irqrestore(&obj->child_list_lock, flags); +} + +static void sync_timeline_remove_pt(struct sync_pt *pt) +{ + struct sync_timeline *obj = pt->parent; + unsigned long flags; + bool needs_freeing; + + spin_lock_irqsave(&obj->active_list_lock, flags); + if (!list_empty(&pt->active_list)) + list_del_init(&pt->active_list); + spin_unlock_irqrestore(&obj->active_list_lock, flags); + + spin_lock_irqsave(&obj->child_list_lock, flags); + list_del(&pt->child_list); + needs_freeing = obj->destroyed && list_empty(&obj->child_list_head); + spin_unlock_irqrestore(&obj->child_list_lock, flags); + + if (needs_freeing) + sync_timeline_free(obj); +} + +void sync_timeline_signal(struct sync_timeline *obj) +{ + unsigned long flags; + LIST_HEAD(signaled_pts); + struct list_head *pos, *n; + + spin_lock_irqsave(&obj->active_list_lock, flags); + + list_for_each_safe(pos, n, &obj->active_list_head) { + struct sync_pt *pt = + container_of(pos, struct sync_pt, active_list); + + if (_sync_pt_has_signaled(pt)) + list_move(pos, &signaled_pts); + } + + spin_unlock_irqrestore(&obj->active_list_lock, flags); + + list_for_each_safe(pos, n, &signaled_pts) { + struct sync_pt *pt = + container_of(pos, struct sync_pt, active_list); + + list_del_init(pos); + sync_fence_signal_pt(pt); + } +} + +struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size) +{ + struct sync_pt *pt; + + if (size < sizeof(struct sync_pt)) + return NULL; + + pt = kzalloc(size, GFP_KERNEL); + if (pt == NULL) + return NULL; + + INIT_LIST_HEAD(&pt->active_list); + sync_timeline_add_pt(parent, pt); + + return pt; +} + +void sync_pt_free(struct sync_pt *pt) +{ + if (pt->parent->ops->free_pt) + pt->parent->ops->free_pt(pt); + + sync_timeline_remove_pt(pt); + + kfree(pt); +} + +/* call with pt->parent->active_list_lock held */ +static int _sync_pt_has_signaled(struct sync_pt *pt) +{ + int old_status = pt->status; + + if (!pt->status) + pt->status = pt->parent->ops->has_signaled(pt); + + if (!pt->status && pt->parent->destroyed) + pt->status = -ENOENT; + + if (pt->status != old_status) + pt->timestamp = ktime_get(); + + return pt->status; +} + +static struct sync_pt *sync_pt_dup(struct sync_pt *pt) +{ + return pt->parent->ops->dup(pt); +} + +/* Adds a sync pt to the active queue. Called when added to a fence */ +static void sync_pt_activate(struct sync_pt *pt) +{ + struct sync_timeline *obj = pt->parent; + unsigned long flags; + int err; + + spin_lock_irqsave(&obj->active_list_lock, flags); + + err = _sync_pt_has_signaled(pt); + if (err != 0) + goto out; + + list_add_tail(&pt->active_list, &obj->active_list_head); + +out: + spin_unlock_irqrestore(&obj->active_list_lock, flags); +} + +static int sync_fence_release(struct inode *inode, struct file *file); +static unsigned int sync_fence_poll(struct file *file, poll_table *wait); +static long sync_fence_ioctl(struct file *file, unsigned int cmd, + unsigned long arg); + + +static const struct file_operations sync_fence_fops = { + .release = sync_fence_release, + .poll = sync_fence_poll, + .unlocked_ioctl = sync_fence_ioctl, +}; + +static struct sync_fence *sync_fence_alloc(const char *name) +{ + struct sync_fence *fence; + unsigned long flags; + + fence = kzalloc(sizeof(struct sync_fence), GFP_KERNEL); + if (fence == NULL) + return NULL; + + fence->file = anon_inode_getfile("sync_fence", &sync_fence_fops, + fence, 0); + if (fence->file == NULL) + goto err; + + strlcpy(fence->name, name, sizeof(fence->name)); + + INIT_LIST_HEAD(&fence->pt_list_head); + INIT_LIST_HEAD(&fence->waiter_list_head); + spin_lock_init(&fence->waiter_list_lock); + + init_waitqueue_head(&fence->wq); + + spin_lock_irqsave(&sync_fence_list_lock, flags); + list_add_tail(&fence->sync_fence_list, &sync_fence_list_head); + spin_unlock_irqrestore(&sync_fence_list_lock, flags); + + return fence; + +err: + kfree(fence); + return NULL; +} + +/* TODO: implement a create which takes more that one sync_pt */ +struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt) +{ + struct sync_fence *fence; + + if (pt->fence) + return NULL; + + fence = sync_fence_alloc(name); + if (fence == NULL) + return NULL; + + pt->fence = fence; + list_add(&pt->pt_list, &fence->pt_list_head); + sync_pt_activate(pt); + + return fence; +} + +static int sync_fence_copy_pts(struct sync_fence *dst, struct sync_fence *src) +{ + struct list_head *pos; + + list_for_each(pos, &src->pt_list_head) { + struct sync_pt *orig_pt = + container_of(pos, struct sync_pt, pt_list); + struct sync_pt *new_pt = sync_pt_dup(orig_pt); + + if (new_pt == NULL) + return -ENOMEM; + + new_pt->fence = dst; + list_add(&new_pt->pt_list, &dst->pt_list_head); + sync_pt_activate(new_pt); + } + + return 0; +} + +static void sync_fence_free_pts(struct sync_fence *fence) +{ + struct list_head *pos, *n; + + list_for_each_safe(pos, n, &fence->pt_list_head) { + struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list); + sync_pt_free(pt); + } +} + +struct sync_fence *sync_fence_fdget(int fd) +{ + struct file *file = fget(fd); + + if (file == NULL) + return NULL; + + if (file->f_op != &sync_fence_fops) + goto err; + + return file->private_data; + +err: + fput(file); + return NULL; +} + +void sync_fence_put(struct sync_fence *fence) +{ + fput(fence->file); +} + +void sync_fence_install(struct sync_fence *fence, int fd) +{ + fd_install(fd, fence->file); +} + +static int sync_fence_get_status(struct sync_fence *fence) +{ + struct list_head *pos; + int status = 1; + + list_for_each(pos, &fence->pt_list_head) { + struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list); + int pt_status = pt->status; + + if (pt_status < 0) { + status = pt_status; + break; + } else if (status == 1) { + status = pt_status; + } + } + + return status; +} + +struct sync_fence *sync_fence_merge(const char *name, + struct sync_fence *a, struct sync_fence *b) +{ + struct sync_fence *fence; + int err; + + fence = sync_fence_alloc(name); + if (fence == NULL) + return NULL; + + err = sync_fence_copy_pts(fence, a); + if (err < 0) + goto err; + + err = sync_fence_copy_pts(fence, b); + if (err < 0) + goto err; + + fence->status = sync_fence_get_status(fence); + + return fence; +err: + sync_fence_free_pts(fence); + kfree(fence); + return NULL; +} + +static void sync_fence_signal_pt(struct sync_pt *pt) +{ + LIST_HEAD(signaled_waiters); + struct sync_fence *fence = pt->fence; + struct list_head *pos; + struct list_head *n; + unsigned long flags; + int status; + + status = sync_fence_get_status(fence); + + spin_lock_irqsave(&fence->waiter_list_lock, flags); + /* + * this should protect against two threads racing on the signaled + * false -> true transition + */ + if (status && !fence->status) { + list_for_each_safe(pos, n, &fence->waiter_list_head) + list_move(pos, &signaled_waiters); + + fence->status = status; + } else { + status = 0; + } + spin_unlock_irqrestore(&fence->waiter_list_lock, flags); + + if (status) { + list_for_each_safe(pos, n, &signaled_waiters) { + struct sync_fence_waiter *waiter = + container_of(pos, struct sync_fence_waiter, + waiter_list); + + waiter->callback(fence, waiter->callback_data); + list_del(pos); + kfree(waiter); + } + wake_up(&fence->wq); + } +} + +int sync_fence_wait_async(struct sync_fence *fence, + void (*callback)(struct sync_fence *, void *data), + void *callback_data) +{ + struct sync_fence_waiter *waiter; + unsigned long flags; + int err = 0; + + waiter = kzalloc(sizeof(struct sync_fence_waiter), GFP_KERNEL); + if (waiter == NULL) + return -ENOMEM; + + waiter->callback = callback; + waiter->callback_data = callback_data; + + spin_lock_irqsave(&fence->waiter_list_lock, flags); + + if (fence->status) { + kfree(waiter); + err = fence->status; + goto out; + } + + list_add_tail(&waiter->waiter_list, &fence->waiter_list_head); +out: + spin_unlock_irqrestore(&fence->waiter_list_lock, flags); + + return err; +} + +int sync_fence_wait(struct sync_fence *fence, long timeout) +{ + int err; + + if (timeout) { + timeout = msecs_to_jiffies(timeout); + err = wait_event_interruptible_timeout(fence->wq, + fence->status != 0, + timeout); + } else { + err = wait_event_interruptible(fence->wq, fence->status != 0); + } + + if (err < 0) + return err; + + if (fence->status < 0) + return fence->status; + + if (fence->status == 0) + return -ETIME; + + return 0; +} + +static int sync_fence_release(struct inode *inode, struct file *file) +{ + struct sync_fence *fence = file->private_data; + unsigned long flags; + + sync_fence_free_pts(fence); + + spin_lock_irqsave(&sync_fence_list_lock, flags); + list_del(&fence->sync_fence_list); + spin_unlock_irqrestore(&sync_fence_list_lock, flags); + + kfree(fence); + + return 0; +} + +static unsigned int sync_fence_poll(struct file *file, poll_table *wait) +{ + struct sync_fence *fence = file->private_data; + + poll_wait(file, &fence->wq, wait); + + if (fence->status == 1) + return POLLIN; + else if (fence->status < 0) + return POLLERR; + else + return 0; +} + +static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg) +{ + __u32 value; + + if (copy_from_user(&value, (void __user *)arg, sizeof(value))) + return -EFAULT; + + return sync_fence_wait(fence, value); +} + +static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg) +{ + int fd = get_unused_fd(); + int err; + struct sync_fence *fence2, *fence3; + struct sync_merge_data data; + + if (copy_from_user(&data, (void __user *)arg, sizeof(data))) + return -EFAULT; + + fence2 = sync_fence_fdget(data.fd2); + if (fence2 == NULL) { + err = -ENOENT; + goto err_put_fd; + } + + data.name[sizeof(data.name) - 1] = '\0'; + fence3 = sync_fence_merge(data.name, fence, fence2); + if (fence3 == NULL) { + err = -ENOMEM; + goto err_put_fence2; + } + + data.fence = fd; + if (copy_to_user((void __user *)arg, &data, sizeof(data))) { + err = -EFAULT; + goto err_put_fence3; + } + + sync_fence_install(fence3, fd); + sync_fence_put(fence2); + return 0; + +err_put_fence3: + sync_fence_put(fence3); + +err_put_fence2: + sync_fence_put(fence2); + +err_put_fd: + put_unused_fd(fd); + return err; +} + +int sync_fill_pt_info(struct sync_pt *pt, void *data, int size) +{ + struct sync_pt_info *info = data; + int ret; + + if (size < sizeof(struct sync_pt_info)) + return -ENOMEM; + + info->len = sizeof(struct sync_pt_info); + + if (pt->parent->ops->fill_driver_data) { + ret = pt->parent->ops->fill_driver_data(pt, info->driver_data, + size - sizeof(*info)); + if (ret < 0) + return ret; + + info->len += ret; + } + + strlcpy(info->obj_name, pt->parent->name, sizeof(info->obj_name)); + strlcpy(info->driver_name, pt->parent->ops->driver_name, + sizeof(info->driver_name)); + info->status = pt->status; + info->timestamp_ns = ktime_to_ns(pt->timestamp); + + return info->len; +} + + +static long sync_fence_ioctl_fence_info(struct sync_fence *fence, + unsigned long arg) +{ + struct sync_fence_info_data *data; + struct list_head *pos; + __u32 size; + __u32 len = 0; + int ret; + + if (copy_from_user(&size, (void __user *)arg, sizeof(size))) + return -EFAULT; + + if (size < sizeof(struct sync_fence_info_data)) + return -EINVAL; + + if (size > 4096) + size = 4096; + + data = kzalloc(size, GFP_KERNEL); + if (data == NULL) + return -ENOMEM; + + strlcpy(data->name, fence->name, sizeof(data->name)); + data->status = fence->status; + len = sizeof(struct sync_fence_info_data); + + list_for_each(pos, &fence->pt_list_head) { + struct sync_pt *pt = + container_of(pos, struct sync_pt, pt_list); + + ret = sync_fill_pt_info(pt, (u8 *)data + len, size - len); + + if (ret < 0) + goto out; + + len += ret; + } + + data->len = len; + + if (copy_to_user((void __user *)arg, data, len)) + ret = -EFAULT; + else + ret = 0; + +out: + kfree(data); + + return ret; +} + +static long sync_fence_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct sync_fence *fence = file->private_data; + switch (cmd) { + case SYNC_IOC_WAIT: + return sync_fence_ioctl_wait(fence, arg); + + case SYNC_IOC_MERGE: + return sync_fence_ioctl_merge(fence, arg); + + case SYNC_IOC_FENCE_INFO: + return sync_fence_ioctl_fence_info(fence, arg); + + default: + return -ENOTTY; + } +} + +#ifdef CONFIG_DEBUG_FS +static const char *sync_status_str(int status) +{ + if (status > 0) + return "signaled"; + else if (status == 0) + return "active"; + else + return "error"; +} + +static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence) +{ + int status = pt->status; + seq_printf(s, " %s%spt %s", + fence ? pt->parent->name : "", + fence ? "_" : "", + sync_status_str(status)); + if (pt->status) { + struct timeval tv = ktime_to_timeval(pt->timestamp); + seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec); + } + + if (pt->parent->ops->print_pt) { + seq_printf(s, ": "); + pt->parent->ops->print_pt(s, pt); + } + + seq_printf(s, "\n"); +} + +static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj) +{ + struct list_head *pos; + unsigned long flags; + + seq_printf(s, "%s %s", obj->name, obj->ops->driver_name); + + if (obj->ops->print_obj) { + seq_printf(s, ": "); + obj->ops->print_obj(s, obj); + } + + seq_printf(s, "\n"); + + spin_lock_irqsave(&obj->child_list_lock, flags); + list_for_each(pos, &obj->child_list_head) { + struct sync_pt *pt = + container_of(pos, struct sync_pt, child_list); + sync_print_pt(s, pt, false); + } + spin_unlock_irqrestore(&obj->child_list_lock, flags); +} + +static void sync_print_fence(struct seq_file *s, struct sync_fence *fence) +{ + struct list_head *pos; + unsigned long flags; + + seq_printf(s, "%s: %s\n", fence->name, sync_status_str(fence->status)); + + list_for_each(pos, &fence->pt_list_head) { + struct sync_pt *pt = + container_of(pos, struct sync_pt, pt_list); + sync_print_pt(s, pt, true); + } + + spin_lock_irqsave(&fence->waiter_list_lock, flags); + list_for_each(pos, &fence->waiter_list_head) { + struct sync_fence_waiter *waiter = + container_of(pos, struct sync_fence_waiter, + waiter_list); + + seq_printf(s, "waiter %pF %p\n", waiter->callback, + waiter->callback_data); + } + spin_unlock_irqrestore(&fence->waiter_list_lock, flags); +} + +static int sync_debugfs_show(struct seq_file *s, void *unused) +{ + unsigned long flags; + struct list_head *pos; + + seq_printf(s, "objs:\n--------------\n"); + + spin_lock_irqsave(&sync_timeline_list_lock, flags); + list_for_each(pos, &sync_timeline_list_head) { + struct sync_timeline *obj = + container_of(pos, struct sync_timeline, + sync_timeline_list); + + sync_print_obj(s, obj); + seq_printf(s, "\n"); + } + spin_unlock_irqrestore(&sync_timeline_list_lock, flags); + + seq_printf(s, "fences:\n--------------\n"); + + spin_lock_irqsave(&sync_fence_list_lock, flags); + list_for_each(pos, &sync_fence_list_head) { + struct sync_fence *fence = + container_of(pos, struct sync_fence, sync_fence_list); + + sync_print_fence(s, fence); + seq_printf(s, "\n"); + } + spin_unlock_irqrestore(&sync_fence_list_lock, flags); + return 0; +} + +static int sync_debugfs_open(struct inode *inode, struct file *file) +{ + return single_open(file, sync_debugfs_show, inode->i_private); +} + +static const struct file_operations sync_debugfs_fops = { + .open = sync_debugfs_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static __init int sync_debugfs_init(void) +{ + debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops); + return 0; +} + +late_initcall(sync_debugfs_init); + +#endif |