Merge "iommu: iommu-debug: Return an error value on invalid data format" into msm-4.8
diff --git a/arch/arm64/configs/msmskunk-perf_defconfig b/arch/arm64/configs/msmskunk-perf_defconfig
index d39d456..abce231 100644
--- a/arch/arm64/configs/msmskunk-perf_defconfig
+++ b/arch/arm64/configs/msmskunk-perf_defconfig
@@ -294,6 +294,7 @@
CONFIG_USB_CONFIGFS_MASS_STORAGE=y
CONFIG_USB_CONFIGFS_F_FS=y
CONFIG_USB_CONFIGFS_F_HID=y
+CONFIG_USB_CONFIGFS_F_DIAG=y
CONFIG_MMC=y
CONFIG_MMC_BLOCK_MINORS=32
CONFIG_MMC_TEST=y
diff --git a/arch/arm64/configs/msmskunk_defconfig b/arch/arm64/configs/msmskunk_defconfig
index bd59fb4..d5f5054 100644
--- a/arch/arm64/configs/msmskunk_defconfig
+++ b/arch/arm64/configs/msmskunk_defconfig
@@ -300,6 +300,7 @@
CONFIG_USB_CONFIGFS_MASS_STORAGE=y
CONFIG_USB_CONFIGFS_F_FS=y
CONFIG_USB_CONFIGFS_F_HID=y
+CONFIG_USB_CONFIGFS_F_DIAG=y
CONFIG_MMC=y
CONFIG_MMC_BLOCK_MINORS=32
CONFIG_MMC_TEST=y
diff --git a/drivers/mailbox/qti-tcs.c b/drivers/mailbox/qti-tcs.c
index 65ad675..5cd481a 100644
--- a/drivers/mailbox/qti-tcs.c
+++ b/drivers/mailbox/qti-tcs.c
@@ -36,6 +36,9 @@
#include "mailbox.h"
+#define CREATE_TRACE_POINTS
+#include <trace/events/rpmh.h>
+
#define MAX_CMDS_PER_TCS 16
#define MAX_TCS_PER_TYPE 3
#define MAX_TCS_SLOTS (MAX_CMDS_PER_TCS * MAX_TCS_PER_TYPE)
@@ -383,6 +386,8 @@
mbox_chan_received_data(resp->chan, resp->msg);
}
+ trace_rpmh_notify_irq(m, resp->msg->payload[0].addr, resp->err);
+
/* Notify the client that this request is completed. */
send_tcs_response(resp);
irq_clear |= BIT(m);
@@ -397,6 +402,7 @@
static inline void mbox_notify_tx_done(struct mbox_chan *chan,
struct tcs_mbox_msg *msg, int m, int err)
{
+ trace_rpmh_notify(m, msg->payload[0].addr, err);
mbox_chan_txdone(chan, err);
}
@@ -487,6 +493,8 @@
write_tcs_reg(base, TCS_DRV_CMD_MSGID, m, n + i, cmd_msgid);
write_tcs_reg(base, TCS_DRV_CMD_ADDR, m, n + i, cmd->addr);
write_tcs_reg(base, TCS_DRV_CMD_DATA, m, n + i, cmd->data);
+ trace_rpmh_send_msg(base, m, n + i,
+ cmd_msgid, cmd->addr, cmd->data, cmd->complete);
}
/* Write the send-after-prev completion bits for the batch */
@@ -830,6 +838,7 @@
/* Only data is write capable */
writel_relaxed(cpu_to_le32(msg->payload[i].data),
addr + offset);
+ trace_rpmh_control_msg(addr + offset, msg->payload[i].data);
addr += TCS_HIDDEN_CMD_SHIFT;
}
}
diff --git a/drivers/staging/android/ion/Kconfig b/drivers/staging/android/ion/Kconfig
index c8fb413..3ae8285 100644
--- a/drivers/staging/android/ion/Kconfig
+++ b/drivers/staging/android/ion/Kconfig
@@ -1,6 +1,6 @@
menuconfig ION
bool "Ion Memory Manager"
- depends on HAVE_MEMBLOCK && HAS_DMA && MMU
+ depends on HAVE_MEMBLOCK && HAS_DMA && MMU && ION_MSM
select GENERIC_ALLOCATOR
select DMA_SHARED_BUFFER
---help---
@@ -36,19 +36,32 @@
config ION_HISI
tristate "Ion for Hisilicon"
depends on ARCH_HISI && ION
- select ION_OF
help
Choose this option if you wish to use ion on Hisilicon Platform.
source "drivers/staging/android/ion/hisilicon/Kconfig"
-config ION_OF
- bool "Devicetree support for Ion"
- depends on ION && OF_ADDRESS
+config ION_POOL_CACHE_POLICY
+ bool "Ion set page pool cache policy"
+ depends on ION && X86
+ default y if X86
help
- Provides base support for defining Ion heaps in devicetree
- and setting them up. Also includes functions for platforms
- to parse the devicetree and expand for their own custom
- extensions
+ Choose this option if need to explicity set cache policy of the
+ pages in the page pool.
- If using Ion and devicetree, you should say Y here
+config ION_MSM
+ tristate "Ion for MSM"
+ depends on ARCH_QCOM && CMA
+ select MSM_SECURE_BUFFER
+ help
+ Choose this option if you wish to use ion on an MSM target.
+ Features include allocating heaps from device tree, buffer
+ cache maintenance, and a custom ioctl/compat_ioctl. Enable
+ utility functions used by ion_system_heap.
+
+config ALLOC_BUFFERS_IN_4K_CHUNKS
+ bool "Turns off allocation optimization and allocate only 4K pages"
+ depends on ARCH_QCOM && ION
+ help
+ Choose this option if you want ION to allocate buffers in
+ only 4KB chunks.
diff --git a/drivers/staging/android/ion/Makefile b/drivers/staging/android/ion/Makefile
index 5d630a0..309b9cc 100644
--- a/drivers/staging/android/ion/Makefile
+++ b/drivers/staging/android/ion/Makefile
@@ -1,13 +1,12 @@
-obj-$(CONFIG_ION) += ion.o ion-ioctl.o ion_heap.o \
- ion_page_pool.o ion_system_heap.o \
- ion_carveout_heap.o ion_chunk_heap.o ion_cma_heap.o
+obj-$(CONFIG_ION) += ion.o ion_heap.o ion_page_pool.o ion_system_heap.o \
+ ion_carveout_heap.o ion_chunk_heap.o ion_cma_heap.o \
+ ion_system_secure_heap.o
obj-$(CONFIG_ION_TEST) += ion_test.o
ifdef CONFIG_COMPAT
obj-$(CONFIG_ION) += compat_ion.o
endif
-
obj-$(CONFIG_ION_DUMMY) += ion_dummy_driver.o
obj-$(CONFIG_ION_TEGRA) += tegra/
obj-$(CONFIG_ION_HISI) += hisilicon/
-obj-$(CONFIG_ION_OF) += ion_of.o
+obj-$(CONFIG_ION_MSM) += msm/
diff --git a/drivers/staging/android/ion/compat_ion.h b/drivers/staging/android/ion/compat_ion.h
index 9da8f91..b3b3430 100644
--- a/drivers/staging/android/ion/compat_ion.h
+++ b/drivers/staging/android/ion/compat_ion.h
@@ -2,6 +2,7 @@
* drivers/staging/android/ion/compat_ion.h
*
* Copyright (C) 2013 Google, Inc.
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -21,6 +22,8 @@
long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
+#define compat_ion_user_handle_t compat_int_t
+
#else
#define compat_ion_ioctl NULL
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index 396ded5..7f39b99 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -3,6 +3,7 @@
* drivers/staging/android/ion/ion.c
*
* Copyright (C) 2011 Google, Inc.
+ * Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -15,7 +16,6 @@
*
*/
-#include <linux/device.h>
#include <linux/err.h>
#include <linux/file.h>
#include <linux/freezer.h>
@@ -23,6 +23,7 @@
#include <linux/anon_inodes.h>
#include <linux/kthread.h>
#include <linux/list.h>
+#include <linux/list_sort.h>
#include <linux/memblock.h>
#include <linux/miscdevice.h>
#include <linux/export.h>
@@ -36,11 +37,90 @@
#include <linux/debugfs.h>
#include <linux/dma-buf.h>
#include <linux/idr.h>
+#include <linux/msm_ion.h>
+#include <trace/events/kmem.h>
+
#include "ion.h"
#include "ion_priv.h"
#include "compat_ion.h"
+/**
+ * struct ion_device - the metadata of the ion device node
+ * @dev: the actual misc device
+ * @buffers: an rb tree of all the existing buffers
+ * @buffer_lock: lock protecting the tree of buffers
+ * @lock: rwsem protecting the tree of heaps and clients
+ * @heaps: list of all the heaps in the system
+ * @user_clients: list of all the clients created from userspace
+ */
+struct ion_device {
+ struct miscdevice dev;
+ struct rb_root buffers;
+ /* Protects rb_tree */
+ struct mutex buffer_lock;
+ struct rw_semaphore lock;
+ struct plist_head heaps;
+ long (*custom_ioctl)(struct ion_client *client, unsigned int cmd,
+ unsigned long arg);
+ struct rb_root clients;
+ struct dentry *debug_root;
+ struct dentry *heaps_debug_root;
+ struct dentry *clients_debug_root;
+};
+
+/**
+ * struct ion_client - a process/hw block local address space
+ * @node: node in the tree of all clients
+ * @dev: backpointer to ion device
+ * @handles: an rb tree of all the handles in this client
+ * @idr: an idr space for allocating handle ids
+ * @lock: lock protecting the tree of handles
+ * @name: used for debugging
+ * @display_name: used for debugging (unique version of @name)
+ * @display_serial: used for debugging (to make display_name unique)
+ * @task: used for debugging
+ *
+ * A client represents a list of buffers this client may access.
+ * The mutex stored here is used to protect both handles tree
+ * as well as the handles themselves, and should be held while modifying either.
+ */
+struct ion_client {
+ struct rb_node node;
+ struct ion_device *dev;
+ struct rb_root handles;
+ struct idr idr;
+ /* Protects idr */
+ struct mutex lock;
+ char *name;
+ char *display_name;
+ int display_serial;
+ struct task_struct *task;
+ pid_t pid;
+ struct dentry *debug_root;
+};
+
+/**
+ * ion_handle - a client local reference to a buffer
+ * @ref: reference count
+ * @client: back pointer to the client the buffer resides in
+ * @buffer: pointer to the buffer
+ * @node: node in the client's handle rbtree
+ * @kmap_cnt: count of times this client has mapped to kernel
+ * @id: client-unique id allocated by client->idr
+ *
+ * Modifications to node, map_cnt or mapping should be protected by the
+ * lock in the client. Other fields are never changed after initialization.
+ */
+struct ion_handle {
+ struct kref ref;
+ struct ion_client *client;
+ struct ion_buffer *buffer;
+ struct rb_node node;
+ unsigned int kmap_cnt;
+ int id;
+};
+
bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
{
return (buffer->flags & ION_FLAG_CACHED) &&
@@ -100,10 +180,10 @@
/* this function should only be called while dev->lock is held */
static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
- struct ion_device *dev,
- unsigned long len,
- unsigned long align,
- unsigned long flags)
+ struct ion_device *dev,
+ unsigned long len,
+ unsigned long align,
+ unsigned long flags)
{
struct ion_buffer *buffer;
struct sg_table *table;
@@ -131,16 +211,21 @@
goto err2;
}
- if (buffer->sg_table == NULL) {
- WARN_ONCE(1, "This heap needs to set the sgtable");
+ buffer->dev = dev;
+ buffer->size = len;
+ buffer->flags = flags;
+ INIT_LIST_HEAD(&buffer->vmas);
+
+ table = heap->ops->map_dma(heap, buffer);
+ if (WARN_ONCE(!table,
+ "heap->ops->map_dma should return ERR_PTR on error"))
+ table = ERR_PTR(-EINVAL);
+ if (IS_ERR(table)) {
ret = -EINVAL;
goto err1;
}
- table = buffer->sg_table;
- buffer->dev = dev;
- buffer->size = len;
-
+ buffer->sg_table = table;
if (ion_buffer_fault_user_mappings(buffer)) {
int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
struct scatterlist *sg;
@@ -149,7 +234,7 @@
buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
if (!buffer->pages) {
ret = -ENOMEM;
- goto err1;
+ goto err;
}
for_each_sg(table->sgl, sg, table->nents, i) {
@@ -160,9 +245,6 @@
}
}
- buffer->dev = dev;
- buffer->size = len;
- INIT_LIST_HEAD(&buffer->vmas);
mutex_init(&buffer->lock);
/*
* this will set up dma addresses for the sglist -- it is not
@@ -178,11 +260,14 @@
sg_dma_address(sg) = sg_phys(sg);
sg_dma_len(sg) = sg->length;
}
+
mutex_lock(&dev->buffer_lock);
ion_buffer_add(dev, buffer);
mutex_unlock(&dev->buffer_lock);
return buffer;
+err:
+ heap->ops->unmap_dma(heap, buffer);
err1:
heap->ops->free(buffer);
err2:
@@ -194,6 +279,8 @@
{
if (WARN_ON(buffer->kmap_cnt > 0))
buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
+ buffer->heap->ops->unmap_dma(buffer->heap, buffer);
+
buffer->heap->ops->free(buffer);
vfree(buffer->pages);
kfree(buffer);
@@ -257,7 +344,7 @@
}
static struct ion_handle *ion_handle_create(struct ion_client *client,
- struct ion_buffer *buffer)
+ struct ion_buffer *buffer)
{
struct ion_handle *handle;
@@ -297,14 +384,23 @@
kfree(handle);
}
+struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
+{
+ return handle->buffer;
+}
+
static void ion_handle_get(struct ion_handle *handle)
{
kref_get(&handle->ref);
}
-int ion_handle_put_nolock(struct ion_handle *handle)
+static int ion_handle_put_nolock(struct ion_handle *handle)
{
- return kref_put(&handle->ref, ion_handle_destroy);
+ int ret;
+
+ ret = kref_put(&handle->ref, ion_handle_destroy);
+
+ return ret;
}
int ion_handle_put(struct ion_handle *handle)
@@ -337,8 +433,8 @@
return ERR_PTR(-EINVAL);
}
-struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
- int id)
+static struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
+ int id)
{
struct ion_handle *handle;
@@ -350,7 +446,7 @@
}
struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
- int id)
+ int id)
{
struct ion_handle *handle;
@@ -408,6 +504,19 @@
struct ion_buffer *buffer = NULL;
struct ion_heap *heap;
int ret;
+ const unsigned int MAX_DBG_STR_LEN = 64;
+ char dbg_str[MAX_DBG_STR_LEN];
+ unsigned int dbg_str_idx = 0;
+
+ dbg_str[0] = '\0';
+
+ /*
+ * For now, we don't want to fault in pages individually since
+ * clients are already doing manual cache maintenance. In
+ * other words, the implicit caching infrastructure is in
+ * place (in code) but should not be used.
+ */
+ flags |= ION_FLAG_CACHED_NEEDS_SYNC;
pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
len, align, heap_id_mask, flags);
@@ -427,17 +536,53 @@
/* if the caller didn't specify this heap id */
if (!((1 << heap->id) & heap_id_mask))
continue;
+ trace_ion_alloc_buffer_start(client->name, heap->name, len,
+ heap_id_mask, flags);
buffer = ion_buffer_create(heap, dev, len, align, flags);
+ trace_ion_alloc_buffer_end(client->name, heap->name, len,
+ heap_id_mask, flags);
if (!IS_ERR(buffer))
break;
+
+ trace_ion_alloc_buffer_fallback(client->name, heap->name, len,
+ heap_id_mask, flags,
+ PTR_ERR(buffer));
+ if (dbg_str_idx < MAX_DBG_STR_LEN) {
+ unsigned int len_left;
+ int ret_value;
+
+ len_left = MAX_DBG_STR_LEN - dbg_str_idx - 1;
+ ret_value = snprintf(&dbg_str[dbg_str_idx],
+ len_left, "%s ", heap->name);
+
+ if (ret_value >= len_left) {
+ /* overflow */
+ dbg_str[MAX_DBG_STR_LEN - 1] = '\0';
+ dbg_str_idx = MAX_DBG_STR_LEN;
+ } else if (ret_value >= 0) {
+ dbg_str_idx += ret_value;
+ } else {
+ /* error */
+ dbg_str[MAX_DBG_STR_LEN - 1] = '\0';
+ }
+ }
}
up_read(&dev->lock);
- if (buffer == NULL)
+ if (!buffer) {
+ trace_ion_alloc_buffer_fail(client->name, dbg_str, len,
+ heap_id_mask, flags, -ENODEV);
return ERR_PTR(-ENODEV);
+ }
- if (IS_ERR(buffer))
+ if (IS_ERR(buffer)) {
+ trace_ion_alloc_buffer_fail(client->name, dbg_str, len,
+ heap_id_mask, flags,
+ PTR_ERR(buffer));
+ pr_debug("ION is unable to allocate 0x%zx bytes (alignment: 0x%zx) from heap(s) %sfor client %s\n",
+ len, align, dbg_str, client->name);
return ERR_CAST(buffer);
+ }
handle = ion_handle_create(client, buffer);
@@ -462,10 +607,15 @@
}
EXPORT_SYMBOL(ion_alloc);
-void ion_free_nolock(struct ion_client *client,
- struct ion_handle *handle)
+static void ion_free_nolock(struct ion_client *client,
+ struct ion_handle *handle)
{
- if (!ion_handle_validate(client, handle)) {
+ bool valid_handle;
+
+ WARN_ON(client != handle->client);
+
+ valid_handle = ion_handle_validate(client, handle);
+ if (!valid_handle) {
WARN(1, "%s: invalid handle passed to free.\n", __func__);
return;
}
@@ -482,6 +632,32 @@
}
EXPORT_SYMBOL(ion_free);
+int ion_phys(struct ion_client *client, struct ion_handle *handle,
+ ion_phys_addr_t *addr, size_t *len)
+{
+ struct ion_buffer *buffer;
+ int ret;
+
+ mutex_lock(&client->lock);
+ if (!ion_handle_validate(client, handle)) {
+ mutex_unlock(&client->lock);
+ return -EINVAL;
+ }
+
+ buffer = handle->buffer;
+
+ if (!buffer->heap->ops->phys) {
+ pr_err("%s: ion_phys is not implemented by this heap (name=%s, type=%d).\n",
+ __func__, buffer->heap->name, buffer->heap->type);
+ mutex_unlock(&client->lock);
+ return -ENODEV;
+ }
+ mutex_unlock(&client->lock);
+ ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
+ return ret;
+}
+EXPORT_SYMBOL(ion_phys);
+
static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
{
void *vaddr;
@@ -492,7 +668,7 @@
}
vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
if (WARN_ONCE(vaddr == NULL,
- "heap->ops->map_kernel should return ERR_PTR on error"))
+ "heap->ops->map_kernel should return ERR_PTR on error"))
return ERR_PTR(-EINVAL);
if (IS_ERR(vaddr))
return vaddr;
@@ -614,9 +790,10 @@
{
struct ion_client *client = s->private;
struct rb_node *n;
- size_t sizes[ION_NUM_HEAP_IDS] = {0};
- const char *names[ION_NUM_HEAP_IDS] = {NULL};
- int i;
+
+ seq_printf(s, "%16.16s: %16.16s : %16.16s : %12.12s\n",
+ "heap_name", "size_in_bytes", "handle refcount",
+ "buffer");
mutex_lock(&debugfs_mutex);
if (!is_client_alive(client)) {
@@ -630,21 +807,17 @@
for (n = rb_first(&client->handles); n; n = rb_next(n)) {
struct ion_handle *handle = rb_entry(n, struct ion_handle,
node);
- unsigned int id = handle->buffer->heap->id;
- if (!names[id])
- names[id] = handle->buffer->heap->name;
- sizes[id] += handle->buffer->size;
+ seq_printf(s, "%16.16s: %16zx : %16d : %12p",
+ handle->buffer->heap->name,
+ handle->buffer->size,
+ atomic_read(&handle->ref.refcount),
+ handle->buffer);
+
+ seq_puts(s, "\n");
}
mutex_unlock(&client->lock);
mutex_unlock(&debugfs_mutex);
-
- seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
- for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
- if (!names[i])
- continue;
- seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
- }
return 0;
}
@@ -661,14 +834,14 @@
};
static int ion_get_client_serial(const struct rb_root *root,
- const unsigned char *name)
+ const unsigned char *name)
{
int serial = -1;
struct rb_node *node;
for (node = rb_first(root); node; node = rb_next(node)) {
struct ion_client *client = rb_entry(node, struct ion_client,
- node);
+ node);
if (strcmp(client->name, name))
continue;
@@ -715,6 +888,7 @@
client->handles = RB_ROOT;
idr_init(&client->idr);
mutex_init(&client->lock);
+
client->task = task;
client->pid = pid;
client->name = kstrdup(name, GFP_KERNEL);
@@ -743,14 +917,14 @@
rb_insert_color(&client->node, &dev->clients);
client->debug_root = debugfs_create_file(client->display_name, 0664,
- dev->clients_debug_root,
- client, &debug_client_fops);
+ dev->clients_debug_root,
+ client, &debug_client_fops);
if (!client->debug_root) {
char buf[256], *path;
path = dentry_path(dev->clients_debug_root, buf, 256);
pr_err("Failed to create client debugfs at %s/%s\n",
- path, client->display_name);
+ path, client->display_name);
}
up_write(&dev->lock);
@@ -788,6 +962,7 @@
put_task_struct(client->task);
rb_erase(&client->node, &dev->clients);
debugfs_remove_recursive(client->debug_root);
+
up_write(&dev->lock);
kfree(client->display_name);
@@ -797,6 +972,102 @@
}
EXPORT_SYMBOL(ion_client_destroy);
+int ion_handle_get_flags(struct ion_client *client, struct ion_handle *handle,
+ unsigned long *flags)
+{
+ struct ion_buffer *buffer;
+
+ mutex_lock(&client->lock);
+ if (!ion_handle_validate(client, handle)) {
+ pr_err("%s: invalid handle passed to %s.\n",
+ __func__, __func__);
+ mutex_unlock(&client->lock);
+ return -EINVAL;
+ }
+ buffer = handle->buffer;
+ mutex_lock(&buffer->lock);
+ *flags = buffer->flags;
+ mutex_unlock(&buffer->lock);
+ mutex_unlock(&client->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(ion_handle_get_flags);
+
+int ion_handle_get_size(struct ion_client *client, struct ion_handle *handle,
+ size_t *size)
+{
+ struct ion_buffer *buffer;
+
+ mutex_lock(&client->lock);
+ if (!ion_handle_validate(client, handle)) {
+ pr_err("%s: invalid handle passed to %s.\n",
+ __func__, __func__);
+ mutex_unlock(&client->lock);
+ return -EINVAL;
+ }
+ buffer = handle->buffer;
+ mutex_lock(&buffer->lock);
+ *size = buffer->size;
+ mutex_unlock(&buffer->lock);
+ mutex_unlock(&client->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(ion_handle_get_size);
+
+struct sg_table *ion_sg_table(struct ion_client *client,
+ struct ion_handle *handle)
+{
+ struct ion_buffer *buffer;
+ struct sg_table *table;
+
+ mutex_lock(&client->lock);
+ if (!ion_handle_validate(client, handle)) {
+ pr_err("%s: invalid handle passed to map_dma.\n",
+ __func__);
+ mutex_unlock(&client->lock);
+ return ERR_PTR(-EINVAL);
+ }
+ buffer = handle->buffer;
+ table = buffer->sg_table;
+ mutex_unlock(&client->lock);
+ return table;
+}
+EXPORT_SYMBOL(ion_sg_table);
+
+struct sg_table *ion_create_chunked_sg_table(phys_addr_t buffer_base,
+ size_t chunk_size,
+ size_t total_size)
+{
+ struct sg_table *table;
+ int i, n_chunks, ret;
+ struct scatterlist *sg;
+
+ table = kzalloc(sizeof(*table), GFP_KERNEL);
+ if (!table)
+ return ERR_PTR(-ENOMEM);
+
+ n_chunks = DIV_ROUND_UP(total_size, chunk_size);
+ pr_debug("creating sg_table with %d chunks\n", n_chunks);
+
+ ret = sg_alloc_table(table, n_chunks, GFP_KERNEL);
+ if (ret)
+ goto err0;
+
+ for_each_sg(table->sgl, sg, table->nents, i) {
+ dma_addr_t addr = buffer_base + i * chunk_size;
+
+ sg_dma_address(sg) = addr;
+ sg->length = chunk_size;
+ }
+
+ return table;
+err0:
+ kfree(table);
+ return ERR_PTR(ret);
+}
+
static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
struct device *dev,
enum dma_data_direction direction);
@@ -818,7 +1089,7 @@
}
void ion_pages_sync_for_device(struct device *dev, struct page *page,
- size_t size, enum dma_data_direction dir)
+ size_t size, enum dma_data_direction dir)
{
struct scatterlist sg;
@@ -858,7 +1129,7 @@
if (ion_buffer_page_is_dirty(page))
ion_pages_sync_for_device(dev, ion_buffer_page(page),
- PAGE_SIZE, dir);
+ PAGE_SIZE, dir);
ion_buffer_page_clean(buffer->pages + i);
}
@@ -921,6 +1192,9 @@
break;
}
mutex_unlock(&buffer->lock);
+
+ if (buffer->heap->ops->unmap_user)
+ buffer->heap->ops->unmap_user(buffer->heap, buffer);
}
static const struct vm_operations_struct ion_vma_ops = {
@@ -936,7 +1210,7 @@
if (!buffer->heap->ops->map_user) {
pr_err("%s: this heap does not define a method for mapping to userspace\n",
- __func__);
+ __func__);
return -EINVAL;
}
@@ -945,6 +1219,7 @@
VM_DONTDUMP;
vma->vm_private_data = buffer;
vma->vm_ops = &ion_vma_ops;
+ vma->vm_flags |= VM_MIXEDMAP;
ion_vm_open(vma);
return 0;
}
@@ -1027,7 +1302,7 @@
};
struct dma_buf *ion_share_dma_buf(struct ion_client *client,
- struct ion_handle *handle)
+ struct ion_handle *handle)
{
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
struct ion_buffer *buffer;
@@ -1072,7 +1347,6 @@
fd = dma_buf_fd(dmabuf, O_CLOEXEC);
if (fd < 0)
dma_buf_put(dmabuf);
-
return fd;
}
EXPORT_SYMBOL(ion_share_dma_buf_fd);
@@ -1135,7 +1409,7 @@
}
EXPORT_SYMBOL(ion_import_dma_buf_fd);
-int ion_sync_for_device(struct ion_client *client, int fd)
+static int ion_sync_for_device(struct ion_client *client, int fd)
{
struct dma_buf *dmabuf;
struct ion_buffer *buffer;
@@ -1159,45 +1433,134 @@
return 0;
}
-int ion_query_heaps(struct ion_client *client, struct ion_heap_query *query)
+/* fix up the cases where the ioctl direction bits are incorrect */
+static unsigned int ion_ioctl_dir(unsigned int cmd)
{
+ switch (cmd) {
+ case ION_IOC_SYNC:
+ case ION_IOC_FREE:
+ case ION_IOC_CUSTOM:
+ return _IOC_WRITE;
+ default:
+ return _IOC_DIR(cmd);
+ }
+}
+
+static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ struct ion_client *client = filp->private_data;
struct ion_device *dev = client->dev;
- struct ion_heap_data __user *buffer = u64_to_user_ptr(query->heaps);
- int ret = -EINVAL, cnt = 0, max_cnt;
- struct ion_heap *heap;
- struct ion_heap_data hdata;
+ struct ion_handle *cleanup_handle = NULL;
+ int ret = 0;
+ unsigned int dir;
- memset(&hdata, 0, sizeof(hdata));
+ union {
+ struct ion_fd_data fd;
+ struct ion_allocation_data allocation;
+ struct ion_handle_data handle;
+ struct ion_custom_data custom;
+ } data;
- down_read(&dev->lock);
- if (!buffer) {
- query->cnt = dev->heap_cnt;
- ret = 0;
- goto out;
+ dir = ion_ioctl_dir(cmd);
+
+ if (_IOC_SIZE(cmd) > sizeof(data))
+ return -EINVAL;
+
+ if (dir & _IOC_WRITE)
+ if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
+ return -EFAULT;
+
+ switch (cmd) {
+ case ION_IOC_ALLOC:
+ {
+ struct ion_handle *handle;
+
+ handle = ion_alloc(client, data.allocation.len,
+ data.allocation.align,
+ data.allocation.heap_id_mask,
+ data.allocation.flags);
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+
+ data.allocation.handle = handle->id;
+
+ cleanup_handle = handle;
+ break;
+ }
+ case ION_IOC_FREE:
+ {
+ struct ion_handle *handle;
+
+ mutex_lock(&client->lock);
+ handle = ion_handle_get_by_id_nolock(client,
+ data.handle.handle);
+ if (IS_ERR(handle)) {
+ mutex_unlock(&client->lock);
+ return PTR_ERR(handle);
+ }
+ ion_free_nolock(client, handle);
+ ion_handle_put_nolock(handle);
+ mutex_unlock(&client->lock);
+ break;
+ }
+ case ION_IOC_SHARE:
+ case ION_IOC_MAP:
+ {
+ struct ion_handle *handle;
+
+ handle = ion_handle_get_by_id(client, data.handle.handle);
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ data.fd.fd = ion_share_dma_buf_fd(client, handle);
+ ion_handle_put(handle);
+ if (data.fd.fd < 0)
+ ret = data.fd.fd;
+ break;
+ }
+ case ION_IOC_IMPORT:
+ {
+ struct ion_handle *handle;
+
+ handle = ion_import_dma_buf_fd(client, data.fd.fd);
+ if (IS_ERR(handle))
+ ret = PTR_ERR(handle);
+ else
+ data.handle.handle = handle->id;
+ break;
+ }
+ case ION_IOC_SYNC:
+ {
+ ret = ion_sync_for_device(client, data.fd.fd);
+ break;
+ }
+ case ION_IOC_CUSTOM:
+ {
+ if (!dev->custom_ioctl)
+ return -ENOTTY;
+ ret = dev->custom_ioctl(client, data.custom.cmd,
+ data.custom.arg);
+ break;
+ }
+ case ION_IOC_CLEAN_CACHES:
+ return client->dev->custom_ioctl(client,
+ ION_IOC_CLEAN_CACHES, arg);
+ case ION_IOC_INV_CACHES:
+ return client->dev->custom_ioctl(client,
+ ION_IOC_INV_CACHES, arg);
+ case ION_IOC_CLEAN_INV_CACHES:
+ return client->dev->custom_ioctl(client,
+ ION_IOC_CLEAN_INV_CACHES, arg);
+ default:
+ return -ENOTTY;
}
- if (query->cnt <= 0)
- goto out;
-
- max_cnt = query->cnt;
-
- plist_for_each_entry(heap, &dev->heaps, node) {
- strncpy(hdata.name, heap->name, MAX_HEAP_NAME);
- hdata.name[sizeof(hdata.name) - 1] = '\0';
- hdata.type = heap->type;
- hdata.heap_id = heap->id;
-
- ret = copy_to_user(&buffer[cnt],
- &hdata, sizeof(hdata));
-
- cnt++;
- if (cnt >= max_cnt)
- break;
+ if (dir & _IOC_READ) {
+ if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
+ if (cleanup_handle)
+ ion_free(client, cleanup_handle);
+ return -EFAULT;
+ }
}
-
- query->cnt = cnt;
-out:
- up_read(&dev->lock);
return ret;
}
@@ -1253,6 +1616,110 @@
return size;
}
+/**
+ * Create a mem_map of the heap.
+ * @param s seq_file to log error message to.
+ * @param heap The heap to create mem_map for.
+ * @param mem_map The mem map to be created.
+ */
+void ion_debug_mem_map_create(struct seq_file *s, struct ion_heap *heap,
+ struct list_head *mem_map)
+{
+ struct ion_device *dev = heap->dev;
+ struct rb_node *cnode;
+ size_t size;
+ struct ion_client *client;
+
+ if (!heap->ops->phys)
+ return;
+
+ down_read(&dev->lock);
+ for (cnode = rb_first(&dev->clients); cnode; cnode = rb_next(cnode)) {
+ struct rb_node *hnode;
+
+ client = rb_entry(cnode, struct ion_client, node);
+
+ mutex_lock(&client->lock);
+ for (hnode = rb_first(&client->handles);
+ hnode;
+ hnode = rb_next(hnode)) {
+ struct ion_handle *handle = rb_entry(
+ hnode, struct ion_handle, node);
+ if (handle->buffer->heap == heap) {
+ struct mem_map_data *data =
+ kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ goto inner_error;
+ heap->ops->phys(heap, handle->buffer,
+ &data->addr, &size);
+ data->size = (unsigned long)size;
+ data->addr_end = data->addr + data->size - 1;
+ data->client_name = kstrdup(client->name,
+ GFP_KERNEL);
+ if (!data->client_name) {
+ kfree(data);
+ goto inner_error;
+ }
+ list_add(&data->node, mem_map);
+ }
+ }
+ mutex_unlock(&client->lock);
+ }
+ up_read(&dev->lock);
+ return;
+
+inner_error:
+ seq_puts(s,
+ "ERROR: out of memory. Part of memory map will not be logged\n");
+ mutex_unlock(&client->lock);
+ up_read(&dev->lock);
+}
+
+/**
+ * Free the memory allocated by ion_debug_mem_map_create
+ * @param mem_map The mem map to free.
+ */
+static void ion_debug_mem_map_destroy(struct list_head *mem_map)
+{
+ if (mem_map) {
+ struct mem_map_data *data, *tmp;
+
+ list_for_each_entry_safe(data, tmp, mem_map, node) {
+ list_del(&data->node);
+ kfree(data->client_name);
+ kfree(data);
+ }
+ }
+}
+
+static int mem_map_cmp(void *priv, struct list_head *a, struct list_head *b)
+{
+ struct mem_map_data *d1, *d2;
+
+ d1 = list_entry(a, struct mem_map_data, node);
+ d2 = list_entry(b, struct mem_map_data, node);
+ if (d1->addr == d2->addr)
+ return d1->size - d2->size;
+ return d1->addr - d2->addr;
+}
+
+/**
+ * Print heap debug information.
+ * @param s seq_file to log message to.
+ * @param heap pointer to heap that we will print debug information for.
+ */
+static void ion_heap_print_debug(struct seq_file *s, struct ion_heap *heap)
+{
+ if (heap->ops->print_debug) {
+ struct list_head mem_map = LIST_HEAD_INIT(mem_map);
+
+ ion_debug_mem_map_create(s, heap, &mem_map);
+ list_sort(NULL, &mem_map, mem_map_cmp);
+ heap->ops->print_debug(heap, s, &mem_map);
+ ion_debug_mem_map_destroy(&mem_map);
+ }
+}
+
static int ion_debug_heap_show(struct seq_file *s, void *unused)
{
struct ion_heap *heap = s->private;
@@ -1309,12 +1776,13 @@
seq_printf(s, "%16s %16zu\n", "total ", total_size);
if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
seq_printf(s, "%16s %16zu\n", "deferred free",
- heap->free_list_size);
+ heap->free_list_size);
seq_puts(s, "----------------------------------------------------\n");
if (heap->debug_show)
heap->debug_show(heap, s, unused);
+ ion_heap_print_debug(s, heap);
return 0;
}
@@ -1369,7 +1837,8 @@
{
struct dentry *debug_file;
- if (!heap->ops->allocate || !heap->ops->free)
+ if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
+ !heap->ops->unmap_dma)
pr_err("%s: can not add heap with invalid ops struct.\n",
__func__);
@@ -1391,15 +1860,15 @@
plist_node_init(&heap->node, -heap->id);
plist_add(&heap->node, &dev->heaps);
debug_file = debugfs_create_file(heap->name, 0664,
- dev->heaps_debug_root, heap,
- &debug_heap_fops);
+ dev->heaps_debug_root, heap,
+ &debug_heap_fops);
if (!debug_file) {
char buf[256], *path;
path = dentry_path(dev->heaps_debug_root, buf, 256);
pr_err("Failed to create heap debugfs at %s/%s\n",
- path, heap->name);
+ path, heap->name);
}
if (heap->shrinker.count_objects && heap->shrinker.scan_objects) {
@@ -1414,15 +1883,36 @@
path = dentry_path(dev->heaps_debug_root, buf, 256);
pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
- path, debug_name);
+ path, debug_name);
}
}
- dev->heap_cnt++;
up_write(&dev->lock);
}
EXPORT_SYMBOL(ion_device_add_heap);
+int ion_walk_heaps(struct ion_client *client, int heap_id, void *data,
+ int (*f)(struct ion_heap *heap, void *data))
+{
+ int ret_val = -EINVAL;
+ struct ion_heap *heap;
+ struct ion_device *dev = client->dev;
+ /*
+ * traverse the list of heaps available in this system
+ * and find the heap that is specified.
+ */
+ down_write(&dev->lock);
+ plist_for_each_entry(heap, &dev->heaps, node) {
+ if (ION_HEAP(heap->id) != heap_id)
+ continue;
+ ret_val = f(heap, data);
+ break;
+ }
+ up_write(&dev->lock);
+ return ret_val;
+}
+EXPORT_SYMBOL(ion_walk_heaps);
+
struct ion_device *ion_device_create(long (*custom_ioctl)
(struct ion_client *client,
unsigned int cmd,
@@ -1483,3 +1973,38 @@
kfree(dev);
}
EXPORT_SYMBOL(ion_device_destroy);
+
+void __init ion_reserve(struct ion_platform_data *data)
+{
+ int i;
+
+ for (i = 0; i < data->nr; i++) {
+ if (data->heaps[i].size == 0)
+ continue;
+
+ if (data->heaps[i].base == 0) {
+ phys_addr_t paddr;
+
+ paddr = memblock_alloc_base(data->heaps[i].size,
+ data->heaps[i].align,
+ MEMBLOCK_ALLOC_ANYWHERE);
+ if (!paddr) {
+ pr_err("%s: error allocating memblock for heap %d\n",
+ __func__, i);
+ continue;
+ }
+ data->heaps[i].base = paddr;
+ } else {
+ int ret = memblock_reserve(data->heaps[i].base,
+ data->heaps[i].size);
+ if (ret)
+ pr_err("memblock reserve of %zx@%pa failed\n",
+ data->heaps[i].size,
+ &data->heaps[i].base);
+ }
+ pr_info("%s: %s reserved base %pa size %zu\n", __func__,
+ data->heaps[i].name,
+ &data->heaps[i].base,
+ data->heaps[i].size);
+ }
+}
diff --git a/drivers/staging/android/ion/ion.h b/drivers/staging/android/ion/ion.h
index 93dafb4..da7c083 100644
--- a/drivers/staging/android/ion/ion.h
+++ b/drivers/staging/android/ion/ion.h
@@ -2,6 +2,7 @@
* drivers/staging/android/ion/ion.h
*
* Copyright (C) 2011 Google, Inc.
+ * Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -17,8 +18,7 @@
#ifndef _LINUX_ION_H
#define _LINUX_ION_H
-#include <linux/types.h>
-
+#include <linux/err.h>
#include "../uapi/ion.h"
struct ion_handle;
@@ -34,7 +34,7 @@
* be converted to phys_addr_t. For the time being many kernel interfaces
* do not accept phys_addr_t's that would have to
*/
-#define ion_phys_addr_t unsigned long
+#define ion_phys_addr_t dma_addr_t
/**
* struct ion_platform_heap - defines a heap in the given platform
@@ -45,6 +45,9 @@
* @name: used for debug purposes
* @base: base address of heap in physical memory if applicable
* @size: size of the heap in bytes if applicable
+ * @has_outer_cache: set to 1 if outer cache is used, 0 otherwise.
+ * @extra_data: Extra data specific to each heap type
+ * @priv: heap private data
* @align: required alignment in physical memory if applicable
* @priv: private info passed from the board file
*
@@ -56,22 +59,39 @@
const char *name;
ion_phys_addr_t base;
size_t size;
+ unsigned int has_outer_cache;
+ void *extra_data;
ion_phys_addr_t align;
void *priv;
};
/**
* struct ion_platform_data - array of platform heaps passed from board file
- * @nr: number of structures in the array
- * @heaps: array of platform_heap structions
+ * @has_outer_cache: set to 1 if outer cache is used, 0 otherwise.
+ * @nr: number of structures in the array
+ * @heaps: array of platform_heap structions
*
* Provided by the board file in the form of platform data to a platform device.
*/
struct ion_platform_data {
+ unsigned int has_outer_cache;
int nr;
struct ion_platform_heap *heaps;
};
+#ifdef CONFIG_ION
+
+/**
+ * ion_reserve() - reserve memory for ion heaps if applicable
+ * @data: platform data specifying starting physical address and
+ * size
+ *
+ * Calls memblock reserve to set aside memory for heaps that are
+ * located at specific memory addresses or of specific sizes not
+ * managed by the kernel
+ */
+void ion_reserve(struct ion_platform_data *data);
+
/**
* ion_client_create() - allocate a client and returns it
* @dev: the global ion device
@@ -119,6 +139,36 @@
void ion_free(struct ion_client *client, struct ion_handle *handle);
/**
+ * ion_phys - returns the physical address and len of a handle
+ * @client: the client
+ * @handle: the handle
+ * @addr: a pointer to put the address in
+ * @len: a pointer to put the length in
+ *
+ * This function queries the heap for a particular handle to get the
+ * handle's physical address. It't output is only correct if
+ * a heap returns physically contiguous memory -- in other cases
+ * this api should not be implemented -- ion_sg_table should be used
+ * instead. Returns -EINVAL if the handle is invalid. This has
+ * no implications on the reference counting of the handle --
+ * the returned value may not be valid if the caller is not
+ * holding a reference.
+ */
+int ion_phys(struct ion_client *client, struct ion_handle *handle,
+ ion_phys_addr_t *addr, size_t *len);
+
+/**
+ * ion_map_dma - return an sg_table describing a handle
+ * @client: the client
+ * @handle: the handle
+ *
+ * This function returns the sg_table describing
+ * a particular ion handle.
+ */
+struct sg_table *ion_sg_table(struct ion_client *client,
+ struct ion_handle *handle);
+
+/**
* ion_map_kernel - create mapping for the given handle
* @client: the client
* @handle: handle to map
@@ -173,4 +223,67 @@
*/
struct ion_handle *ion_import_dma_buf_fd(struct ion_client *client, int fd);
+#else
+static inline void ion_reserve(struct ion_platform_data *data) {}
+
+static inline struct ion_client *ion_client_create(
+ struct ion_device *dev, unsigned int heap_id_mask, const char *name)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline void ion_client_destroy(struct ion_client *client) {}
+
+static inline struct ion_handle *ion_alloc(struct ion_client *client,
+ size_t len, size_t align,
+ unsigned int heap_id_mask,
+ unsigned int flags)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline void ion_free(struct ion_client *client,
+ struct ion_handle *handle) {}
+
+static inline int ion_phys(struct ion_client *client, struct ion_handle *handle,
+ ion_phys_addr_t *addr, size_t *len)
+{
+ return -ENODEV;
+}
+
+static inline struct sg_table *ion_sg_table(struct ion_client *client,
+ struct ion_handle *handle)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline void *ion_map_kernel(struct ion_client *client,
+ struct ion_handle *handle)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline void ion_unmap_kernel(struct ion_client *client,
+ struct ion_handle *handle) {}
+
+static inline int ion_share_dma_buf(struct ion_client *client,
+ struct ion_handle *handle)
+{
+ return -ENODEV;
+}
+
+static inline struct ion_handle *ion_import_dma_buf(struct ion_client *client,
+ int fd)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline int ion_handle_get_flags(struct ion_client *client,
+ struct ion_handle *handle,
+ unsigned long *flags)
+{
+ return -ENODEV;
+}
+
+#endif /* CONFIG_ION */
#endif /* _LINUX_ION_H */
diff --git a/drivers/staging/android/ion/ion_carveout_heap.c b/drivers/staging/android/ion/ion_carveout_heap.c
index a8ea973..1fb0d81 100644
--- a/drivers/staging/android/ion/ion_carveout_heap.c
+++ b/drivers/staging/android/ion/ion_carveout_heap.c
@@ -25,17 +25,15 @@
#include "ion.h"
#include "ion_priv.h"
-#define ION_CARVEOUT_ALLOCATE_FAIL -1
-
struct ion_carveout_heap {
struct ion_heap heap;
struct gen_pool *pool;
ion_phys_addr_t base;
};
-static ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap,
- unsigned long size,
- unsigned long align)
+ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap,
+ unsigned long size,
+ unsigned long align)
{
struct ion_carveout_heap *carveout_heap =
container_of(heap, struct ion_carveout_heap, heap);
@@ -47,8 +45,8 @@
return offset;
}
-static void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
- unsigned long size)
+void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
+ unsigned long size)
{
struct ion_carveout_heap *carveout_heap =
container_of(heap, struct ion_carveout_heap, heap);
@@ -58,6 +56,19 @@
gen_pool_free(carveout_heap->pool, addr, size);
}
+static int ion_carveout_heap_phys(struct ion_heap *heap,
+ struct ion_buffer *buffer,
+ ion_phys_addr_t *addr, size_t *len)
+{
+ struct sg_table *table = buffer->priv_virt;
+ struct page *page = sg_page(table->sgl);
+ ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
+
+ *addr = paddr;
+ *len = buffer->size;
+ return 0;
+}
+
static int ion_carveout_heap_allocate(struct ion_heap *heap,
struct ion_buffer *buffer,
unsigned long size, unsigned long align,
@@ -84,7 +95,7 @@
}
sg_set_page(table->sgl, pfn_to_page(PFN_DOWN(paddr)), size, 0);
- buffer->sg_table = table;
+ buffer->priv_virt = table;
return 0;
@@ -98,7 +109,7 @@
static void ion_carveout_heap_free(struct ion_buffer *buffer)
{
struct ion_heap *heap = buffer->heap;
- struct sg_table *table = buffer->sg_table;
+ struct sg_table *table = buffer->priv_virt;
struct page *page = sg_page(table->sgl);
ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
@@ -113,9 +124,23 @@
kfree(table);
}
+static struct sg_table *ion_carveout_heap_map_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ return buffer->priv_virt;
+}
+
+static void ion_carveout_heap_unmap_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+}
+
static struct ion_heap_ops carveout_heap_ops = {
.allocate = ion_carveout_heap_allocate,
.free = ion_carveout_heap_free,
+ .phys = ion_carveout_heap_phys,
+ .map_dma = ion_carveout_heap_map_dma,
+ .unmap_dma = ion_carveout_heap_unmap_dma,
.map_user = ion_heap_map_user,
.map_kernel = ion_heap_map_kernel,
.unmap_kernel = ion_heap_unmap_kernel,
diff --git a/drivers/staging/android/ion/ion_chunk_heap.c b/drivers/staging/android/ion/ion_chunk_heap.c
index 70495dc..e29f4e2 100644
--- a/drivers/staging/android/ion/ion_chunk_heap.c
+++ b/drivers/staging/android/ion/ion_chunk_heap.c
@@ -2,6 +2,7 @@
* drivers/staging/android/ion/ion_chunk_heap.c
*
* Copyright (C) 2012 Google, Inc.
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -34,9 +35,9 @@
};
static int ion_chunk_heap_allocate(struct ion_heap *heap,
- struct ion_buffer *buffer,
- unsigned long size, unsigned long align,
- unsigned long flags)
+ struct ion_buffer *buffer,
+ unsigned long size, unsigned long align,
+ unsigned long flags)
{
struct ion_chunk_heap *chunk_heap =
container_of(heap, struct ion_chunk_heap, heap);
@@ -71,11 +72,11 @@
if (!paddr)
goto err;
sg_set_page(sg, pfn_to_page(PFN_DOWN(paddr)),
- chunk_heap->chunk_size, 0);
+ chunk_heap->chunk_size, 0);
sg = sg_next(sg);
}
- buffer->sg_table = table;
+ buffer->priv_virt = table;
chunk_heap->allocated += allocated_size;
return 0;
err:
@@ -95,7 +96,7 @@
struct ion_heap *heap = buffer->heap;
struct ion_chunk_heap *chunk_heap =
container_of(heap, struct ion_chunk_heap, heap);
- struct sg_table *table = buffer->sg_table;
+ struct sg_table *table = buffer->priv_virt;
struct scatterlist *sg;
int i;
unsigned long allocated_size;
@@ -106,7 +107,7 @@
if (ion_buffer_cached(buffer))
dma_sync_sg_for_device(NULL, table->sgl, table->nents,
- DMA_BIDIRECTIONAL);
+ DMA_BIDIRECTIONAL);
for_each_sg(table->sgl, sg, table->nents, i) {
gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
@@ -117,9 +118,22 @@
kfree(table);
}
+static struct sg_table *ion_chunk_heap_map_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ return buffer->priv_virt;
+}
+
+static void ion_chunk_heap_unmap_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+}
+
static struct ion_heap_ops chunk_heap_ops = {
.allocate = ion_chunk_heap_allocate,
.free = ion_chunk_heap_free,
+ .map_dma = ion_chunk_heap_map_dma,
+ .unmap_dma = ion_chunk_heap_unmap_dma,
.map_user = ion_heap_map_user,
.map_kernel = ion_heap_map_kernel,
.unmap_kernel = ion_heap_unmap_kernel,
@@ -160,8 +174,8 @@
chunk_heap->heap.ops = &chunk_heap_ops;
chunk_heap->heap.type = ION_HEAP_TYPE_CHUNK;
chunk_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
- pr_debug("%s: base %lu size %zu align %ld\n", __func__,
- chunk_heap->base, heap_data->size, heap_data->align);
+ pr_debug("%s: base %pad size %zu align %pad\n", __func__,
+ &chunk_heap->base, heap_data->size, &heap_data->align);
return &chunk_heap->heap;
diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c
index 6c7de74..af412f0 100644
--- a/drivers/staging/android/ion/ion_cma_heap.c
+++ b/drivers/staging/android/ion/ion_cma_heap.c
@@ -4,6 +4,8 @@
* Copyright (C) Linaro 2012
* Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
@@ -20,49 +22,64 @@
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/dma-mapping.h>
+#include <linux/msm_ion.h>
+
+#include <asm/cacheflush.h>
#include "ion.h"
#include "ion_priv.h"
#define ION_CMA_ALLOCATE_FAILED -1
-struct ion_cma_heap {
- struct ion_heap heap;
- struct device *dev;
-};
-
-#define to_cma_heap(x) container_of(x, struct ion_cma_heap, heap)
-
struct ion_cma_buffer_info {
void *cpu_addr;
dma_addr_t handle;
struct sg_table *table;
+ bool is_cached;
};
+static int cma_heap_has_outer_cache;
+/*
+ * Create scatter-list for the already allocated DMA buffer.
+ * This function could be replace by dma_common_get_sgtable
+ * as soon as it will avalaible.
+ */
+static int ion_cma_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t handle, size_t size)
+{
+ struct page *page = pfn_to_page(PFN_DOWN(handle));
+ int ret;
+
+ ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
+ if (unlikely(ret))
+ return ret;
+
+ sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
+ return 0;
+}
/* ION CMA heap operations functions */
static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
unsigned long len, unsigned long align,
unsigned long flags)
{
- struct ion_cma_heap *cma_heap = to_cma_heap(heap);
- struct device *dev = cma_heap->dev;
+ struct device *dev = heap->priv;
struct ion_cma_buffer_info *info;
dev_dbg(dev, "Request buffer allocation len %ld\n", len);
- if (buffer->flags & ION_FLAG_CACHED)
- return -EINVAL;
-
- if (align > PAGE_SIZE)
- return -EINVAL;
-
info = kzalloc(sizeof(struct ion_cma_buffer_info), GFP_KERNEL);
if (!info)
return ION_CMA_ALLOCATE_FAILED;
- info->cpu_addr = dma_alloc_coherent(dev, len, &(info->handle),
- GFP_HIGHUSER | __GFP_ZERO);
+ if (!ION_IS_CACHED(flags))
+ info->cpu_addr = dma_alloc_writecombine(dev, len,
+ &info->handle,
+ GFP_KERNEL);
+ else
+ info->cpu_addr = dma_alloc_nonconsistent(dev, len,
+ &info->handle,
+ GFP_KERNEL);
if (!info->cpu_addr) {
dev_err(dev, "Fail to allocate buffer\n");
@@ -71,21 +88,18 @@
info->table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
if (!info->table)
- goto free_mem;
+ goto err;
- if (dma_get_sgtable(dev, info->table, info->cpu_addr, info->handle,
- len))
- goto free_table;
+ info->is_cached = ION_IS_CACHED(flags);
+
+ ion_cma_get_sgtable(dev,
+ info->table, info->cpu_addr, info->handle, len);
+
/* keep this for memory release */
buffer->priv_virt = info;
- buffer->sg_table = info->table;
dev_dbg(dev, "Allocate buffer %p\n", buffer);
return 0;
-free_table:
- kfree(info->table);
-free_mem:
- dma_free_coherent(dev, len, info->cpu_addr, info->handle);
err:
kfree(info);
return ION_CMA_ALLOCATE_FAILED;
@@ -93,35 +107,66 @@
static void ion_cma_free(struct ion_buffer *buffer)
{
- struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
- struct device *dev = cma_heap->dev;
+ struct device *dev = buffer->heap->priv;
struct ion_cma_buffer_info *info = buffer->priv_virt;
dev_dbg(dev, "Release buffer %p\n", buffer);
/* release memory */
dma_free_coherent(dev, buffer->size, info->cpu_addr, info->handle);
- /* release sg table */
sg_free_table(info->table);
+ /* release sg table */
kfree(info->table);
kfree(info);
}
+/* return physical address in addr */
+static int ion_cma_phys(struct ion_heap *heap, struct ion_buffer *buffer,
+ ion_phys_addr_t *addr, size_t *len)
+{
+ struct device *dev = heap->priv;
+ struct ion_cma_buffer_info *info = buffer->priv_virt;
+
+ dev_dbg(dev, "Return buffer %p physical address %pa\n", buffer,
+ &info->handle);
+
+ *addr = info->handle;
+ *len = buffer->size;
+
+ return 0;
+}
+
+static struct sg_table *ion_cma_heap_map_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ struct ion_cma_buffer_info *info = buffer->priv_virt;
+
+ return info->table;
+}
+
+static void ion_cma_heap_unmap_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+}
+
static int ion_cma_mmap(struct ion_heap *mapper, struct ion_buffer *buffer,
struct vm_area_struct *vma)
{
- struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
- struct device *dev = cma_heap->dev;
+ struct device *dev = buffer->heap->priv;
struct ion_cma_buffer_info *info = buffer->priv_virt;
- return dma_mmap_coherent(dev, vma, info->cpu_addr, info->handle,
- buffer->size);
+ if (info->is_cached)
+ return dma_mmap_nonconsistent(dev, vma, info->cpu_addr,
+ info->handle, buffer->size);
+ else
+ return dma_mmap_writecombine(dev, vma, info->cpu_addr,
+ info->handle, buffer->size);
}
static void *ion_cma_map_kernel(struct ion_heap *heap,
struct ion_buffer *buffer)
{
struct ion_cma_buffer_info *info = buffer->priv_virt;
- /* kernel memory mapping has been done at allocation time */
+
return info->cpu_addr;
}
@@ -130,36 +175,65 @@
{
}
+static int ion_cma_print_debug(struct ion_heap *heap, struct seq_file *s,
+ const struct list_head *mem_map)
+{
+ if (mem_map) {
+ struct mem_map_data *data;
+
+ seq_puts(s, "\nMemory Map\n");
+ seq_printf(s, "%16.s %14.s %14.s %14.s\n",
+ "client", "start address", "end address",
+ "size");
+
+ list_for_each_entry(data, mem_map, node) {
+ const char *client_name = "(null)";
+
+ if (data->client_name)
+ client_name = data->client_name;
+
+ seq_printf(s, "%16.s 0x%14pa 0x%14pa %14lu (0x%lx)\n",
+ client_name, &data->addr,
+ &data->addr_end,
+ data->size, data->size);
+ }
+ }
+ return 0;
+}
+
static struct ion_heap_ops ion_cma_ops = {
.allocate = ion_cma_allocate,
.free = ion_cma_free,
+ .map_dma = ion_cma_heap_map_dma,
+ .unmap_dma = ion_cma_heap_unmap_dma,
+ .phys = ion_cma_phys,
.map_user = ion_cma_mmap,
.map_kernel = ion_cma_map_kernel,
.unmap_kernel = ion_cma_unmap_kernel,
+ .print_debug = ion_cma_print_debug,
};
struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *data)
{
- struct ion_cma_heap *cma_heap;
+ struct ion_heap *heap;
- cma_heap = kzalloc(sizeof(struct ion_cma_heap), GFP_KERNEL);
+ heap = kzalloc(sizeof(*heap), GFP_KERNEL);
- if (!cma_heap)
+ if (!heap)
return ERR_PTR(-ENOMEM);
- cma_heap->heap.ops = &ion_cma_ops;
+ heap->ops = &ion_cma_ops;
/*
- * get device from private heaps data, later it will be
+ * set device as private heaps data, later it will be
* used to make the link with reserved CMA memory
*/
- cma_heap->dev = data->priv;
- cma_heap->heap.type = ION_HEAP_TYPE_DMA;
- return &cma_heap->heap;
+ heap->priv = data->priv;
+ heap->type = ION_HEAP_TYPE_DMA;
+ cma_heap_has_outer_cache = data->has_outer_cache;
+ return heap;
}
void ion_cma_heap_destroy(struct ion_heap *heap)
{
- struct ion_cma_heap *cma_heap = to_cma_heap(heap);
-
- kfree(cma_heap);
+ kfree(heap);
}
diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c
index 4e5c0f1..e75166a 100644
--- a/drivers/staging/android/ion/ion_heap.c
+++ b/drivers/staging/android/ion/ion_heap.c
@@ -2,6 +2,7 @@
* drivers/staging/android/ion/ion_heap.c
*
* Copyright (C) 2011 Google, Inc.
+ * Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -22,6 +23,9 @@
#include <linux/sched.h>
#include <linux/scatterlist.h>
#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/highmem.h>
+#include <linux/dma-mapping.h>
#include "ion.h"
#include "ion_priv.h"
@@ -38,7 +42,7 @@
struct page **tmp = pages;
if (!pages)
- return NULL;
+ return ERR_PTR(-ENOMEM);
if (buffer->flags & ION_FLAG_CACHED)
pgprot = PAGE_KERNEL;
@@ -93,7 +97,7 @@
}
len = min(len, remainder);
ret = remap_pfn_range(vma, addr, page_to_pfn(page), len,
- vma->vm_page_prot);
+ vma->vm_page_prot);
if (ret)
return ret;
addr += len;
@@ -116,7 +120,7 @@
}
static int ion_heap_sglist_zero(struct scatterlist *sgl, unsigned int nents,
- pgprot_t pgprot)
+ pgprot_t pgprot)
{
int p = 0;
int ret = 0;
@@ -181,7 +185,7 @@
}
static size_t _ion_heap_freelist_drain(struct ion_heap *heap, size_t size,
- bool skip_pools)
+ bool skip_pools)
{
struct ion_buffer *buffer;
size_t total_drained = 0;
@@ -266,7 +270,7 @@
}
static unsigned long ion_heap_shrink_count(struct shrinker *shrinker,
- struct shrink_control *sc)
+ struct shrink_control *sc)
{
struct ion_heap *heap = container_of(shrinker, struct ion_heap,
shrinker);
@@ -279,7 +283,7 @@
}
static unsigned long ion_heap_shrink_scan(struct shrinker *shrinker,
- struct shrink_control *sc)
+ struct shrink_control *sc)
{
struct ion_heap *heap = container_of(shrinker, struct ion_heap,
shrinker);
@@ -342,14 +346,15 @@
}
if (IS_ERR_OR_NULL(heap)) {
- pr_err("%s: error creating heap %s type %d base %lu size %zu\n",
+ pr_err("%s: error creating heap %s type %d base %pa size %zu\n",
__func__, heap_data->name, heap_data->type,
- heap_data->base, heap_data->size);
+ &heap_data->base, heap_data->size);
return ERR_PTR(-EINVAL);
}
heap->name = heap_data->name;
heap->id = heap_data->id;
+ heap->priv = heap_data->priv;
return heap;
}
EXPORT_SYMBOL(ion_heap_create);
diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c
index 88b2249..454f81c 100644
--- a/drivers/staging/android/ion/ion_page_pool.c
+++ b/drivers/staging/android/ion/ion_page_pool.c
@@ -1,7 +1,8 @@
/*
- * drivers/staging/android/ion/ion_mem_pool.c
+ * drivers/staging/android/ion/ion_page_pool.c
*
* Copyright (C) 2011 Google, Inc.
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -22,18 +23,33 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/swap.h>
+#include <linux/vmalloc.h>
#include "ion_priv.h"
static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
{
- struct page *page = alloc_pages(pool->gfp_mask, pool->order);
+ struct page *page;
+
+ page = alloc_pages(pool->gfp_mask & ~__GFP_ZERO, pool->order);
if (!page)
return NULL;
- if (!pool->cached)
- ion_pages_sync_for_device(NULL, page, PAGE_SIZE << pool->order,
- DMA_BIDIRECTIONAL);
+
+ ion_page_pool_alloc_set_cache_policy(pool, page);
+
+/* TODO QCOM - Identify if this sync is needed */
+ ion_pages_sync_for_device(NULL, page, PAGE_SIZE << pool->order,
+ DMA_BIDIRECTIONAL);
+
+ if (pool->gfp_mask & __GFP_ZERO) {
+ if (msm_ion_heap_high_order_page_zero(page, pool->order))
+ goto error_free_pages;
+ }
+
return page;
+error_free_pages:
+ __free_pages(page, pool->order);
+ return NULL;
}
static void ion_page_pool_free_pages(struct ion_page_pool *pool,
@@ -75,22 +91,25 @@
return page;
}
-struct page *ion_page_pool_alloc(struct ion_page_pool *pool)
+void *ion_page_pool_alloc(struct ion_page_pool *pool, bool *from_pool)
{
struct page *page = NULL;
BUG_ON(!pool);
- mutex_lock(&pool->mutex);
- if (pool->high_count)
- page = ion_page_pool_remove(pool, true);
- else if (pool->low_count)
- page = ion_page_pool_remove(pool, false);
- mutex_unlock(&pool->mutex);
+ *from_pool = true;
- if (!page)
+ if (mutex_trylock(&pool->mutex)) {
+ if (pool->high_count)
+ page = ion_page_pool_remove(pool, true);
+ else if (pool->low_count)
+ page = ion_page_pool_remove(pool, false);
+ mutex_unlock(&pool->mutex);
+ }
+ if (!page) {
page = ion_page_pool_alloc_pages(pool);
-
+ *from_pool = false;
+ }
return page;
}
@@ -121,7 +140,7 @@
}
int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
- int nr_to_scan)
+ int nr_to_scan)
{
int freed = 0;
bool high;
@@ -154,8 +173,7 @@
return freed;
}
-struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order,
- bool cached)
+struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order)
{
struct ion_page_pool *pool = kmalloc(sizeof(*pool), GFP_KERNEL);
@@ -169,8 +187,6 @@
pool->order = order;
mutex_init(&pool->mutex);
plist_node_init(&pool->list, order);
- if (cached)
- pool->cached = true;
return pool;
}
diff --git a/drivers/staging/android/ion/ion_priv.h b/drivers/staging/android/ion/ion_priv.h
index 31935fd..222907f 100644
--- a/drivers/staging/android/ion/ion_priv.h
+++ b/drivers/staging/android/ion/ion_priv.h
@@ -2,6 +2,7 @@
* drivers/staging/android/ion/ion_priv.h
*
* Copyright (C) 2011 Google, Inc.
+ * Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -23,13 +24,37 @@
#include <linux/mm_types.h>
#include <linux/mutex.h>
#include <linux/rbtree.h>
+#include <linux/seq_file.h>
+
#include <linux/sched.h>
#include <linux/shrinker.h>
#include <linux/types.h>
-#include <linux/miscdevice.h>
+#ifdef CONFIG_ION_POOL_CACHE_POLICY
+#include <asm/cacheflush.h>
+#endif
+#include <linux/device.h>
#include "ion.h"
+struct ion_buffer *ion_handle_buffer(struct ion_handle *handle);
+
+/**
+ * struct mem_map_data - represents information about the memory map for a heap
+ * @node: list node used to store in the list of mem_map_data
+ * @addr: start address of memory region.
+ * @addr: end address of memory region.
+ * @size: size of memory region
+ * @client_name: name of the client who owns this buffer.
+ *
+ */
+struct mem_map_data {
+ struct list_head node;
+ ion_phys_addr_t addr;
+ ion_phys_addr_t addr_end;
+ unsigned long size;
+ const char *client_name;
+};
+
/**
* struct ion_buffer - metadata for a particular buffer
* @ref: reference count
@@ -41,6 +66,8 @@
* @size: size of the buffer
* @priv_virt: private data to the buffer representable as
* a void *
+ * @priv_phys: private data to the buffer representable as
+ * an ion_phys_addr_t (and someday a phys_addr_t)
* @lock: protects the buffers cnt fields
* @kmap_cnt: number of times the buffer is mapped to the kernel
* @vaddr: the kernel mapping if kmap_cnt is not zero
@@ -66,7 +93,10 @@
unsigned long flags;
unsigned long private_flags;
size_t size;
- void *priv_virt;
+ union {
+ void *priv_virt;
+ ion_phys_addr_t priv_phys;
+ };
struct mutex lock;
int kmap_cnt;
void *vaddr;
@@ -82,87 +112,21 @@
void ion_buffer_destroy(struct ion_buffer *buffer);
/**
- * struct ion_device - the metadata of the ion device node
- * @dev: the actual misc device
- * @buffers: an rb tree of all the existing buffers
- * @buffer_lock: lock protecting the tree of buffers
- * @lock: rwsem protecting the tree of heaps and clients
- * @heaps: list of all the heaps in the system
- * @user_clients: list of all the clients created from userspace
- */
-struct ion_device {
- struct miscdevice dev;
- struct rb_root buffers;
- struct mutex buffer_lock;
- struct rw_semaphore lock;
- struct plist_head heaps;
- long (*custom_ioctl)(struct ion_client *client, unsigned int cmd,
- unsigned long arg);
- struct rb_root clients;
- struct dentry *debug_root;
- struct dentry *heaps_debug_root;
- struct dentry *clients_debug_root;
- int heap_cnt;
-};
-
-/**
- * struct ion_client - a process/hw block local address space
- * @node: node in the tree of all clients
- * @dev: backpointer to ion device
- * @handles: an rb tree of all the handles in this client
- * @idr: an idr space for allocating handle ids
- * @lock: lock protecting the tree of handles
- * @name: used for debugging
- * @display_name: used for debugging (unique version of @name)
- * @display_serial: used for debugging (to make display_name unique)
- * @task: used for debugging
- *
- * A client represents a list of buffers this client may access.
- * The mutex stored here is used to protect both handles tree
- * as well as the handles themselves, and should be held while modifying either.
- */
-struct ion_client {
- struct rb_node node;
- struct ion_device *dev;
- struct rb_root handles;
- struct idr idr;
- struct mutex lock;
- const char *name;
- char *display_name;
- int display_serial;
- struct task_struct *task;
- pid_t pid;
- struct dentry *debug_root;
-};
-
-/**
- * ion_handle - a client local reference to a buffer
- * @ref: reference count
- * @client: back pointer to the client the buffer resides in
- * @buffer: pointer to the buffer
- * @node: node in the client's handle rbtree
- * @kmap_cnt: count of times this client has mapped to kernel
- * @id: client-unique id allocated by client->idr
- *
- * Modifications to node, map_cnt or mapping should be protected by the
- * lock in the client. Other fields are never changed after initialization.
- */
-struct ion_handle {
- struct kref ref;
- struct ion_client *client;
- struct ion_buffer *buffer;
- struct rb_node node;
- unsigned int kmap_cnt;
- int id;
-};
-
-/**
* struct ion_heap_ops - ops to operate on a given heap
* @allocate: allocate memory
- * @free: free memory
+ * @free: free memory. Will be called with
+ * ION_PRIV_FLAG_SHRINKER_FREE set in buffer flags when
+ * called from a shrinker. In that case, the pages being
+ * free'd must be truly free'd back to the system, not put
+ * in a page pool or otherwise cached.
+ * @phys get physical address of a buffer (only define on
+ * physically contiguous heaps)
+ * @map_dma map the memory for dma to a scatterlist
+ * @unmap_dma unmap the memory for dma
* @map_kernel map memory to the kernel
* @unmap_kernel unmap memory to the kernel
* @map_user map memory to userspace
+ * @unmap_user unmap memory to userspace
*
* allocate, phys, and map_user return 0 on success, -errno on error.
* map_dma and map_kernel return pointer on success, ERR_PTR on
@@ -176,11 +140,19 @@
struct ion_buffer *buffer, unsigned long len,
unsigned long align, unsigned long flags);
void (*free)(struct ion_buffer *buffer);
+ int (*phys)(struct ion_heap *heap, struct ion_buffer *buffer,
+ ion_phys_addr_t *addr, size_t *len);
+ struct sg_table * (*map_dma)(struct ion_heap *heap,
+ struct ion_buffer *buffer);
+ void (*unmap_dma)(struct ion_heap *heap, struct ion_buffer *buffer);
void * (*map_kernel)(struct ion_heap *heap, struct ion_buffer *buffer);
void (*unmap_kernel)(struct ion_heap *heap, struct ion_buffer *buffer);
int (*map_user)(struct ion_heap *mapper, struct ion_buffer *buffer,
struct vm_area_struct *vma);
int (*shrink)(struct ion_heap *heap, gfp_t gfp_mask, int nr_to_scan);
+ void (*unmap_user)(struct ion_heap *mapper, struct ion_buffer *buffer);
+ int (*print_debug)(struct ion_heap *heap, struct seq_file *s,
+ const struct list_head *mem_map);
};
/**
@@ -211,6 +183,7 @@
* MUST be unique
* @name: used for debugging
* @shrinker: a shrinker for the heap
+ * @priv: private heap data
* @free_list: free list head if deferred free is used
* @free_list_size size of the deferred free list in bytes
* @lock: protects the free list
@@ -233,6 +206,7 @@
unsigned int id;
const char *name;
struct shrinker shrinker;
+ void *priv;
struct list_head free_list;
size_t free_list_size;
spinlock_t free_lock;
@@ -283,6 +257,12 @@
*/
void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap);
+struct pages_mem {
+ struct page **pages;
+ u32 size;
+ void (*free_fn)(const void *);
+};
+
/**
* some helpers for common operations on buffers using the sg_table
* and vaddr fields
@@ -294,6 +274,29 @@
int ion_heap_buffer_zero(struct ion_buffer *buffer);
int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot);
+int msm_ion_heap_high_order_page_zero(struct page *page, int order);
+struct ion_heap *get_ion_heap(int heap_id);
+int msm_ion_heap_buffer_zero(struct ion_buffer *buffer);
+int msm_ion_heap_pages_zero(struct page **pages, int num_pages);
+int msm_ion_heap_alloc_pages_mem(struct pages_mem *pages_mem);
+void msm_ion_heap_free_pages_mem(struct pages_mem *pages_mem);
+
+long msm_ion_custom_ioctl(struct ion_client *client,
+ unsigned int cmd,
+ unsigned long arg);
+
+int ion_heap_is_system_secure_heap_type(enum ion_heap_type type);
+
+/**
+ * ion_heap_init_shrinker
+ * @heap: the heap
+ *
+ * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag or defines the shrink op
+ * this function will be called to setup a shrinker to shrink the freelists
+ * and call the heap's shrink op.
+ */
+void ion_heap_init_shrinker(struct ion_heap *heap);
+
/**
* ion_heap_init_shrinker
* @heap: the heap
@@ -336,7 +339,7 @@
size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size);
/**
- * ion_heap_freelist_shrink - drain the deferred free
+ * ion_heap_freelist_drain_from_shrinker - drain the deferred free
* list, skipping any heap-specific
* pooling or caching mechanisms
*
@@ -352,11 +355,11 @@
* page pools or otherwise cache the pages. Everything must be
* genuinely free'd back to the system. If you're free'ing from a
* shrinker you probably want to use this. Note that this relies on
- * the heap.ops.free callback honoring the ION_PRIV_FLAG_SHRINKER_FREE
- * flag.
+ * the heap.ops.free callback honoring the
+ * ION_PRIV_FLAG_SHRINKER_FREE flag.
*/
-size_t ion_heap_freelist_shrink(struct ion_heap *heap,
- size_t size);
+size_t ion_heap_freelist_drain_from_shrinker(struct ion_heap *heap,
+ size_t size);
/**
* ion_heap_freelist_size - returns the size of the freelist in bytes
@@ -387,6 +390,23 @@
struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *);
void ion_cma_heap_destroy(struct ion_heap *);
+struct ion_heap *ion_system_secure_heap_create(struct ion_platform_heap *heap);
+void ion_system_secure_heap_destroy(struct ion_heap *heap);
+
+/**
+ * kernel api to allocate/free from carveout -- used when carveout is
+ * used to back an architecture specific custom heap
+ */
+ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, unsigned long size,
+ unsigned long align);
+void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
+ unsigned long size);
+/**
+ * The carveout heap returns physical addresses, since 0 may be a valid
+ * physical address, this is used to indicate allocation failed
+ */
+#define ION_CARVEOUT_ALLOCATE_FAIL -1
+
/**
* functions for creating and destroying a heap pool -- allows you
* to keep a pool of pre allocated memory to use from your heap. Keeping
@@ -406,7 +426,6 @@
* @gfp_mask: gfp_mask to use from alloc
* @order: order of pages in the pool
* @list: plist node for list of pools
- * @cached: it's cached pool or not
*
* Allows you to keep a pool of pre allocated pages to use from your heap.
* Keeping a pool of pages that is ready for dma, ie any cached mapping have
@@ -416,7 +435,6 @@
struct ion_page_pool {
int high_count;
int low_count;
- bool cached;
struct list_head high_items;
struct list_head low_items;
struct mutex mutex;
@@ -425,10 +443,9 @@
struct plist_node list;
};
-struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order,
- bool cached);
+struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order);
void ion_page_pool_destroy(struct ion_page_pool *);
-struct page *ion_page_pool_alloc(struct ion_page_pool *);
+void *ion_page_pool_alloc(struct ion_page_pool *a, bool *from_pool);
void ion_page_pool_free(struct ion_page_pool *, struct page *);
void ion_page_pool_free_immediate(struct ion_page_pool *, struct page *);
@@ -483,22 +500,12 @@
void ion_pages_sync_for_device(struct device *dev, struct page *page,
size_t size, enum dma_data_direction dir);
-long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
-
-int ion_sync_for_device(struct ion_client *client, int fd);
-
-struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
- int id);
-
-void ion_free_nolock(struct ion_client *client, struct ion_handle *handle);
-
-int ion_handle_put_nolock(struct ion_handle *handle);
+int ion_walk_heaps(struct ion_client *client, int heap_id, void *data,
+ int (*f)(struct ion_heap *heap, void *data));
struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
- int id);
+ int id);
int ion_handle_put(struct ion_handle *handle);
-int ion_query_heaps(struct ion_client *client, struct ion_heap_query *query);
-
#endif /* _ION_PRIV_H */
diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
index 7e023d5..02dded6 100644
--- a/drivers/staging/android/ion/ion_system_heap.c
+++ b/drivers/staging/android/ion/ion_system_heap.c
@@ -2,6 +2,7 @@
* drivers/staging/android/ion/ion_system_heap.c
*
* Copyright (C) 2011 Google, Inc.
+ * Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -25,123 +26,173 @@
#include <linux/vmalloc.h>
#include "ion.h"
#include "ion_priv.h"
+#include <linux/dma-mapping.h>
+#include <trace/events/kmem.h>
-#define NUM_ORDERS ARRAY_SIZE(orders)
-
-static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN |
+static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_NOWARN |
__GFP_NORETRY) & ~__GFP_RECLAIM;
-static gfp_t low_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO);
-static const unsigned int orders[] = {8, 4, 0};
+static gfp_t low_order_gfp_flags = (GFP_HIGHUSER | __GFP_NOWARN);
+#ifndef CONFIG_ALLOC_BUFFERS_IN_4K_CHUNKS
+static const unsigned int orders[] = {9, 8, 4, 0};
+#else
+static const unsigned int orders[] = {0};
+#endif
+
+static const int num_orders = ARRAY_SIZE(orders);
static int order_to_index(unsigned int order)
{
int i;
-
- for (i = 0; i < NUM_ORDERS; i++)
+ for (i = 0; i < num_orders; i++)
if (order == orders[i])
return i;
BUG();
return -1;
}
-static inline unsigned int order_to_size(int order)
+static unsigned int order_to_size(int order)
{
return PAGE_SIZE << order;
}
struct ion_system_heap {
struct ion_heap heap;
- struct ion_page_pool *uncached_pools[NUM_ORDERS];
- struct ion_page_pool *cached_pools[NUM_ORDERS];
+ struct ion_page_pool **uncached_pools;
+ struct ion_page_pool **cached_pools;
};
-/**
- * The page from page-pool are all zeroed before. We need do cache
- * clean for cached buffer. The uncached buffer are always non-cached
- * since it's allocated. So no need for non-cached pages.
- */
+struct page_info {
+ struct page *page;
+ bool from_pool;
+ unsigned int order;
+ struct list_head list;
+};
+
static struct page *alloc_buffer_page(struct ion_system_heap *heap,
struct ion_buffer *buffer,
- unsigned long order)
+ unsigned long order,
+ bool *from_pool)
{
bool cached = ion_buffer_cached(buffer);
- struct ion_page_pool *pool;
struct page *page;
+ struct ion_page_pool *pool;
if (!cached)
pool = heap->uncached_pools[order_to_index(order)];
else
pool = heap->cached_pools[order_to_index(order)];
+ page = ion_page_pool_alloc(pool, from_pool);
+ if (!page)
+ return 0;
- page = ion_page_pool_alloc(pool);
-
- if (cached)
- ion_pages_sync_for_device(NULL, page, PAGE_SIZE << order,
- DMA_BIDIRECTIONAL);
return page;
}
static void free_buffer_page(struct ion_system_heap *heap,
- struct ion_buffer *buffer, struct page *page)
+ struct ion_buffer *buffer, struct page *page,
+ unsigned int order)
{
- struct ion_page_pool *pool;
- unsigned int order = compound_order(page);
bool cached = ion_buffer_cached(buffer);
- /* go to system */
- if (buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE) {
+ if (!(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE)) {
+ struct ion_page_pool *pool;
+
+ if (cached)
+ pool = heap->cached_pools[order_to_index(order)];
+ else
+ pool = heap->uncached_pools[order_to_index(order)];
+ ion_page_pool_free(pool, page);
+ } else {
__free_pages(page, order);
- return;
}
-
- if (!cached)
- pool = heap->uncached_pools[order_to_index(order)];
- else
- pool = heap->cached_pools[order_to_index(order)];
-
- ion_page_pool_free(pool, page);
}
-static struct page *alloc_largest_available(struct ion_system_heap *heap,
- struct ion_buffer *buffer,
- unsigned long size,
- unsigned int max_order)
+static struct page_info *alloc_largest_available(struct ion_system_heap *heap,
+ struct ion_buffer *buffer,
+ unsigned long size,
+ unsigned int max_order)
{
struct page *page;
+ struct page_info *info;
int i;
+ bool from_pool;
- for (i = 0; i < NUM_ORDERS; i++) {
+ info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return NULL;
+
+ for (i = 0; i < num_orders; i++) {
if (size < order_to_size(orders[i]))
continue;
if (max_order < orders[i])
continue;
- page = alloc_buffer_page(heap, buffer, orders[i]);
+ page = alloc_buffer_page(heap, buffer, orders[i], &from_pool);
if (!page)
continue;
- return page;
+ info->page = page;
+ info->order = orders[i];
+ info->from_pool = from_pool;
+ INIT_LIST_HEAD(&info->list);
+ return info;
}
+ kfree(info);
return NULL;
}
+static unsigned int process_info(struct page_info *info,
+ struct scatterlist *sg,
+ struct scatterlist *sg_sync,
+ struct pages_mem *data, unsigned int i)
+{
+ struct page *page = info->page;
+ unsigned int j;
+
+ if (sg_sync) {
+ sg_set_page(sg_sync, page, (1 << info->order) * PAGE_SIZE, 0);
+ sg_dma_address(sg_sync) = page_to_phys(page);
+ }
+ sg_set_page(sg, page, (1 << info->order) * PAGE_SIZE, 0);
+ /*
+ * This is not correct - sg_dma_address needs a dma_addr_t
+ * that is valid for the the targeted device, but this works
+ * on the currently targeted hardware.
+ */
+ sg_dma_address(sg) = page_to_phys(page);
+ if (data) {
+ for (j = 0; j < (1 << info->order); ++j)
+ data->pages[i++] = nth_page(page, j);
+ }
+ list_del(&info->list);
+ kfree(info);
+ return i;
+}
+
static int ion_system_heap_allocate(struct ion_heap *heap,
- struct ion_buffer *buffer,
- unsigned long size, unsigned long align,
- unsigned long flags)
+ struct ion_buffer *buffer,
+ unsigned long size, unsigned long align,
+ unsigned long flags)
{
struct ion_system_heap *sys_heap = container_of(heap,
struct ion_system_heap,
heap);
struct sg_table *table;
+ struct sg_table table_sync = {0};
struct scatterlist *sg;
+ struct scatterlist *sg_sync;
+ int ret;
struct list_head pages;
- struct page *page, *tmp_page;
+ struct list_head pages_from_pool;
+ struct page_info *info, *tmp_info;
int i = 0;
+ unsigned int nents_sync = 0;
unsigned long size_remaining = PAGE_ALIGN(size);
unsigned int max_order = orders[0];
+ struct pages_mem data;
+ unsigned int sz;
if (align > PAGE_SIZE)
return -EINVAL;
@@ -149,69 +200,159 @@
if (size / PAGE_SIZE > totalram_pages / 2)
return -ENOMEM;
+ data.size = 0;
INIT_LIST_HEAD(&pages);
+ INIT_LIST_HEAD(&pages_from_pool);
while (size_remaining > 0) {
- page = alloc_largest_available(sys_heap, buffer, size_remaining,
+ info = alloc_largest_available(sys_heap, buffer, size_remaining,
max_order);
- if (!page)
- goto free_pages;
- list_add_tail(&page->lru, &pages);
- size_remaining -= PAGE_SIZE << compound_order(page);
- max_order = compound_order(page);
+ if (!info)
+ goto err;
+
+ sz = (1 << info->order) * PAGE_SIZE;
+
+ if (info->from_pool) {
+ list_add_tail(&info->list, &pages_from_pool);
+ } else {
+ list_add_tail(&info->list, &pages);
+ data.size += sz;
+ ++nents_sync;
+ }
+ size_remaining -= sz;
+ max_order = info->order;
i++;
}
- table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+
+ ret = msm_ion_heap_alloc_pages_mem(&data);
+
+ if (ret)
+ goto err;
+
+ table = kzalloc(sizeof(*table), GFP_KERNEL);
if (!table)
- goto free_pages;
+ goto err_free_data_pages;
- if (sg_alloc_table(table, i, GFP_KERNEL))
- goto free_table;
+ ret = sg_alloc_table(table, i, GFP_KERNEL);
+ if (ret)
+ goto err1;
- sg = table->sgl;
- list_for_each_entry_safe(page, tmp_page, &pages, lru) {
- sg_set_page(sg, page, PAGE_SIZE << compound_order(page), 0);
- sg = sg_next(sg);
- list_del(&page->lru);
+ if (nents_sync) {
+ ret = sg_alloc_table(&table_sync, nents_sync, GFP_KERNEL);
+ if (ret)
+ goto err_free_sg;
}
- buffer->sg_table = table;
- return 0;
+ i = 0;
+ sg = table->sgl;
+ sg_sync = table_sync.sgl;
-free_table:
+ /*
+ * We now have two separate lists. One list contains pages from the
+ * pool and the other pages from buddy. We want to merge these
+ * together while preserving the ordering of the pages (higher order
+ * first).
+ */
+ do {
+ info = list_first_entry_or_null(&pages, struct page_info, list);
+ tmp_info = list_first_entry_or_null(&pages_from_pool,
+ struct page_info, list);
+ if (info && tmp_info) {
+ if (info->order >= tmp_info->order) {
+ i = process_info(info, sg, sg_sync, &data, i);
+ sg_sync = sg_next(sg_sync);
+ } else {
+ i = process_info(tmp_info, sg, 0, 0, i);
+ }
+ } else if (info) {
+ i = process_info(info, sg, sg_sync, &data, i);
+ sg_sync = sg_next(sg_sync);
+ } else if (tmp_info) {
+ i = process_info(tmp_info, sg, 0, 0, i);
+ }
+ sg = sg_next(sg);
+
+ } while (sg);
+
+ ret = msm_ion_heap_pages_zero(data.pages, data.size >> PAGE_SHIFT);
+ if (ret) {
+ pr_err("Unable to zero pages\n");
+ goto err_free_sg2;
+ }
+
+ if (nents_sync)
+ dma_sync_sg_for_device(NULL, table_sync.sgl, table_sync.nents,
+ DMA_BIDIRECTIONAL);
+
+ buffer->priv_virt = table;
+ if (nents_sync)
+ sg_free_table(&table_sync);
+ msm_ion_heap_free_pages_mem(&data);
+ return 0;
+err_free_sg2:
+ /* We failed to zero buffers. Bypass pool */
+ buffer->flags |= ION_PRIV_FLAG_SHRINKER_FREE;
+
+ for_each_sg(table->sgl, sg, table->nents, i)
+ free_buffer_page(sys_heap, buffer, sg_page(sg),
+ get_order(sg->length));
+ if (nents_sync)
+ sg_free_table(&table_sync);
+err_free_sg:
+ sg_free_table(table);
+err1:
kfree(table);
-free_pages:
- list_for_each_entry_safe(page, tmp_page, &pages, lru)
- free_buffer_page(sys_heap, buffer, page);
+err_free_data_pages:
+ msm_ion_heap_free_pages_mem(&data);
+err:
+ list_for_each_entry_safe(info, tmp_info, &pages, list) {
+ free_buffer_page(sys_heap, buffer, info->page, info->order);
+ kfree(info);
+ }
+ list_for_each_entry_safe(info, tmp_info, &pages_from_pool, list) {
+ free_buffer_page(sys_heap, buffer, info->page, info->order);
+ kfree(info);
+ }
return -ENOMEM;
}
-static void ion_system_heap_free(struct ion_buffer *buffer)
+void ion_system_heap_free(struct ion_buffer *buffer)
{
- struct ion_system_heap *sys_heap = container_of(buffer->heap,
+ struct ion_heap *heap = buffer->heap;
+ struct ion_system_heap *sys_heap = container_of(heap,
struct ion_system_heap,
heap);
struct sg_table *table = buffer->sg_table;
struct scatterlist *sg;
+ LIST_HEAD(pages);
int i;
- /* zero the buffer before goto page pool */
if (!(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE))
- ion_heap_buffer_zero(buffer);
+ msm_ion_heap_buffer_zero(buffer);
for_each_sg(table->sgl, sg, table->nents, i)
- free_buffer_page(sys_heap, buffer, sg_page(sg));
+ free_buffer_page(sys_heap, buffer, sg_page(sg),
+ get_order(sg->length));
sg_free_table(table);
kfree(table);
}
-static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
- int nr_to_scan)
+struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
{
- struct ion_page_pool *uncached_pool;
- struct ion_page_pool *cached_pool;
+ return buffer->priv_virt;
+}
+
+void ion_system_heap_unmap_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+}
+
+static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
+ int nr_to_scan)
+{
struct ion_system_heap *sys_heap;
int nr_total = 0;
- int i, nr_freed;
+ int i;
int only_scan = 0;
sys_heap = container_of(heap, struct ion_system_heap, heap);
@@ -219,41 +360,29 @@
if (!nr_to_scan)
only_scan = 1;
- for (i = 0; i < NUM_ORDERS; i++) {
- uncached_pool = sys_heap->uncached_pools[i];
- cached_pool = sys_heap->cached_pools[i];
+ for (i = 0; i < num_orders; i++) {
+ struct ion_page_pool *pool = sys_heap->uncached_pools[i];
- if (only_scan) {
- nr_total += ion_page_pool_shrink(uncached_pool,
- gfp_mask,
- nr_to_scan);
+ nr_total += ion_page_pool_shrink(pool, gfp_mask, nr_to_scan);
+ pool = sys_heap->cached_pools[i];
+ nr_total += ion_page_pool_shrink(pool, gfp_mask, nr_to_scan);
- nr_total += ion_page_pool_shrink(cached_pool,
- gfp_mask,
- nr_to_scan);
- } else {
- nr_freed = ion_page_pool_shrink(uncached_pool,
- gfp_mask,
- nr_to_scan);
- nr_to_scan -= nr_freed;
- nr_total += nr_freed;
- if (nr_to_scan <= 0)
- break;
- nr_freed = ion_page_pool_shrink(cached_pool,
- gfp_mask,
- nr_to_scan);
- nr_to_scan -= nr_freed;
- nr_total += nr_freed;
+ if (!only_scan) {
+ nr_to_scan -= nr_total;
+ /* shrink completed */
if (nr_to_scan <= 0)
break;
}
}
+
return nr_total;
}
static struct ion_heap_ops system_heap_ops = {
.allocate = ion_system_heap_allocate,
.free = ion_system_heap_free,
+ .map_dma = ion_system_heap_map_dma,
+ .unmap_dma = ion_system_heap_unmap_dma,
.map_kernel = ion_heap_map_kernel,
.unmap_kernel = ion_heap_unmap_kernel,
.map_user = ion_heap_map_user,
@@ -264,72 +393,78 @@
void *unused)
{
- struct ion_system_heap *sys_heap = container_of(heap,
- struct ion_system_heap,
- heap);
+ struct ion_system_heap *sys_heap = container_of(
+ heap, struct ion_system_heap, heap);
int i;
- struct ion_page_pool *pool;
- for (i = 0; i < NUM_ORDERS; i++) {
- pool = sys_heap->uncached_pools[i];
+ for (i = 0; i < num_orders; i++) {
+ struct ion_page_pool *pool = sys_heap->uncached_pools[i];
- seq_printf(s, "%d order %u highmem pages uncached %lu total\n",
+ seq_printf(s,
+ "%d order %u highmem pages in uncached pool = %lu total\n",
pool->high_count, pool->order,
- (PAGE_SIZE << pool->order) * pool->high_count);
- seq_printf(s, "%d order %u lowmem pages uncached %lu total\n",
+ (1 << pool->order) * PAGE_SIZE * pool->high_count);
+ seq_printf(s,
+ "%d order %u lowmem pages in uncached pool = %lu total\n",
pool->low_count, pool->order,
- (PAGE_SIZE << pool->order) * pool->low_count);
+ (1 << pool->order) * PAGE_SIZE * pool->low_count);
}
- for (i = 0; i < NUM_ORDERS; i++) {
- pool = sys_heap->cached_pools[i];
+ for (i = 0; i < num_orders; i++) {
+ struct ion_page_pool *pool = sys_heap->cached_pools[i];
- seq_printf(s, "%d order %u highmem pages cached %lu total\n",
+ seq_printf(s,
+ "%d order %u highmem pages in cached pool = %lu total\n",
pool->high_count, pool->order,
- (PAGE_SIZE << pool->order) * pool->high_count);
- seq_printf(s, "%d order %u lowmem pages cached %lu total\n",
+ (1 << pool->order) * PAGE_SIZE * pool->high_count);
+ seq_printf(s,
+ "%d order %u lowmem pages in cached pool = %lu total\n",
pool->low_count, pool->order,
- (PAGE_SIZE << pool->order) * pool->low_count);
+ (1 << pool->order) * PAGE_SIZE * pool->low_count);
}
+
return 0;
}
static void ion_system_heap_destroy_pools(struct ion_page_pool **pools)
{
int i;
-
- for (i = 0; i < NUM_ORDERS; i++)
+ for (i = 0; i < num_orders; i++)
if (pools[i])
ion_page_pool_destroy(pools[i]);
}
-static int ion_system_heap_create_pools(struct ion_page_pool **pools,
- bool cached)
+/**
+ * ion_system_heap_create_pools - Creates pools for all orders
+ *
+ * If this fails you don't need to destroy any pools. It's all or
+ * nothing. If it succeeds you'll eventually need to use
+ * ion_system_heap_destroy_pools to destroy the pools.
+ */
+static int ion_system_heap_create_pools(struct ion_page_pool **pools)
{
int i;
- gfp_t gfp_flags = low_order_gfp_flags;
-
- for (i = 0; i < NUM_ORDERS; i++) {
+ for (i = 0; i < num_orders; i++) {
struct ion_page_pool *pool;
+ gfp_t gfp_flags = low_order_gfp_flags;
- if (orders[i] > 4)
+ if (orders[i])
gfp_flags = high_order_gfp_flags;
-
- pool = ion_page_pool_create(gfp_flags, orders[i], cached);
+ pool = ion_page_pool_create(gfp_flags, orders[i]);
if (!pool)
goto err_create_pool;
pools[i] = pool;
}
return 0;
-
err_create_pool:
ion_system_heap_destroy_pools(pools);
- return -ENOMEM;
+ return 1;
}
struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
{
struct ion_system_heap *heap;
+ int pools_size = sizeof(struct ion_page_pool *) * num_orders;
heap = kzalloc(sizeof(*heap), GFP_KERNEL);
if (!heap)
@@ -338,19 +473,30 @@
heap->heap.type = ION_HEAP_TYPE_SYSTEM;
heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
- if (ion_system_heap_create_pools(heap->uncached_pools, false))
- goto free_heap;
+ heap->uncached_pools = kzalloc(pools_size, GFP_KERNEL);
+ if (!heap->uncached_pools)
+ goto err_alloc_uncached_pools;
- if (ion_system_heap_create_pools(heap->cached_pools, true))
- goto destroy_uncached_pools;
+ heap->cached_pools = kzalloc(pools_size, GFP_KERNEL);
+ if (!heap->cached_pools)
+ goto err_alloc_cached_pools;
+
+ if (ion_system_heap_create_pools(heap->uncached_pools))
+ goto err_create_uncached_pools;
+
+ if (ion_system_heap_create_pools(heap->cached_pools))
+ goto err_create_cached_pools;
heap->heap.debug_show = ion_system_heap_debug_show;
return &heap->heap;
-destroy_uncached_pools:
+err_create_cached_pools:
ion_system_heap_destroy_pools(heap->uncached_pools);
-
-free_heap:
+err_create_uncached_pools:
+ kfree(heap->cached_pools);
+err_alloc_cached_pools:
+ kfree(heap->uncached_pools);
+err_alloc_uncached_pools:
kfree(heap);
return ERR_PTR(-ENOMEM);
}
@@ -360,12 +506,11 @@
struct ion_system_heap *sys_heap = container_of(heap,
struct ion_system_heap,
heap);
- int i;
- for (i = 0; i < NUM_ORDERS; i++) {
- ion_page_pool_destroy(sys_heap->uncached_pools[i]);
- ion_page_pool_destroy(sys_heap->cached_pools[i]);
- }
+ ion_system_heap_destroy_pools(sys_heap->uncached_pools);
+ ion_system_heap_destroy_pools(sys_heap->cached_pools);
+ kfree(sys_heap->uncached_pools);
+ kfree(sys_heap->cached_pools);
kfree(sys_heap);
}
@@ -384,7 +529,7 @@
if (align > (PAGE_SIZE << order))
return -EINVAL;
- page = alloc_pages(low_order_gfp_flags, order);
+ page = alloc_pages(low_order_gfp_flags | __GFP_ZERO, order);
if (!page)
return -ENOMEM;
@@ -394,36 +539,34 @@
for (i = len >> PAGE_SHIFT; i < (1 << order); i++)
__free_page(page + i);
- table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+ table = kzalloc(sizeof(*table), GFP_KERNEL);
if (!table) {
ret = -ENOMEM;
- goto free_pages;
+ goto out;
}
ret = sg_alloc_table(table, 1, GFP_KERNEL);
if (ret)
- goto free_table;
+ goto out;
sg_set_page(table->sgl, page, len, 0);
- buffer->sg_table = table;
+ buffer->priv_virt = table;
ion_pages_sync_for_device(NULL, page, len, DMA_BIDIRECTIONAL);
return 0;
-free_table:
- kfree(table);
-free_pages:
+out:
for (i = 0; i < len >> PAGE_SHIFT; i++)
__free_page(page + i);
-
+ kfree(table);
return ret;
}
-static void ion_system_contig_heap_free(struct ion_buffer *buffer)
+void ion_system_contig_heap_free(struct ion_buffer *buffer)
{
- struct sg_table *table = buffer->sg_table;
+ struct sg_table *table = buffer->priv_virt;
struct page *page = sg_page(table->sgl);
unsigned long pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT;
unsigned long i;
@@ -434,9 +577,34 @@
kfree(table);
}
+static int ion_system_contig_heap_phys(struct ion_heap *heap,
+ struct ion_buffer *buffer,
+ ion_phys_addr_t *addr, size_t *len)
+{
+ struct sg_table *table = buffer->priv_virt;
+ struct page *page = sg_page(table->sgl);
+ *addr = page_to_phys(page);
+ *len = buffer->size;
+ return 0;
+}
+
+struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ return buffer->priv_virt;
+}
+
+void ion_system_contig_heap_unmap_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+}
+
static struct ion_heap_ops kmalloc_ops = {
.allocate = ion_system_contig_heap_allocate,
.free = ion_system_contig_heap_free,
+ .phys = ion_system_contig_heap_phys,
+ .map_dma = ion_system_contig_heap_map_dma,
+ .unmap_dma = ion_system_contig_heap_unmap_dma,
.map_kernel = ion_heap_map_kernel,
.unmap_kernel = ion_heap_unmap_kernel,
.map_user = ion_heap_map_user,
diff --git a/drivers/staging/android/ion/ion_system_secure_heap.c b/drivers/staging/android/ion/ion_system_secure_heap.c
new file mode 100644
index 0000000..4646646
--- /dev/null
+++ b/drivers/staging/android/ion/ion_system_secure_heap.c
@@ -0,0 +1,216 @@
+/*
+ *
+ * Copyright (c) 2014,2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/msm_ion.h>
+#include <soc/qcom/secure_buffer.h>
+#include "ion.h"
+#include "ion_priv.h"
+
+struct ion_system_secure_heap {
+ struct ion_heap *sys_heap;
+ struct ion_heap heap;
+};
+
+static bool is_cp_flag_present(unsigned long flags)
+{
+ return flags && (ION_FLAG_CP_TOUCH ||
+ ION_FLAG_CP_BITSTREAM ||
+ ION_FLAG_CP_PIXEL ||
+ ION_FLAG_CP_NON_PIXEL ||
+ ION_FLAG_CP_CAMERA);
+}
+
+static int get_secure_vmid(unsigned long flags)
+{
+ if (flags & ION_FLAG_CP_TOUCH)
+ return VMID_CP_TOUCH;
+ if (flags & ION_FLAG_CP_BITSTREAM)
+ return VMID_CP_BITSTREAM;
+ if (flags & ION_FLAG_CP_PIXEL)
+ return VMID_CP_PIXEL;
+ if (flags & ION_FLAG_CP_NON_PIXEL)
+ return VMID_CP_NON_PIXEL;
+ if (flags & ION_FLAG_CP_CAMERA)
+ return VMID_CP_CAMERA;
+
+ return -EINVAL;
+}
+
+static void ion_system_secure_heap_free(struct ion_buffer *buffer)
+{
+ int ret = 0;
+ u32 source_vm;
+ int dest_vmid;
+ int dest_perms;
+ struct ion_heap *heap = buffer->heap;
+ struct ion_system_secure_heap *secure_heap = container_of(heap,
+ struct ion_system_secure_heap,
+ heap);
+
+ source_vm = get_secure_vmid(buffer->flags);
+ if (source_vm < 0) {
+ pr_info("%s: Unable to get secure VMID\n", __func__);
+ return;
+ }
+ dest_vmid = VMID_HLOS;
+ dest_perms = PERM_READ | PERM_WRITE;
+
+ ret = hyp_assign_table(buffer->priv_virt, &source_vm, 1,
+ &dest_vmid, &dest_perms, 1);
+ if (ret) {
+ pr_err("%s: Not freeing memory since assign call failed\n",
+ __func__);
+ return;
+ }
+
+ buffer->heap = secure_heap->sys_heap;
+ secure_heap->sys_heap->ops->free(buffer);
+}
+
+static int ion_system_secure_heap_allocate(
+ struct ion_heap *heap,
+ struct ion_buffer *buffer,
+ unsigned long size, unsigned long align,
+ unsigned long flags)
+{
+ int ret = 0;
+ u32 source_vm;
+ int dest_vmid;
+ int dest_perms;
+ struct ion_system_secure_heap *secure_heap = container_of(heap,
+ struct ion_system_secure_heap,
+ heap);
+
+ if (!ion_heap_is_system_secure_heap_type(secure_heap->heap.type) ||
+ !is_cp_flag_present(flags)) {
+ pr_info("%s: Incorrect heap type or incorrect flags\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ ret = secure_heap->sys_heap->ops->allocate(secure_heap->sys_heap,
+ buffer, size, align, flags);
+ if (ret) {
+ pr_info("%s: Failed to get allocation for %s, ret = %d\n",
+ __func__, heap->name, ret);
+ return ret;
+ }
+
+ source_vm = VMID_HLOS;
+ dest_vmid = get_secure_vmid(flags);
+ if (dest_vmid < 0) {
+ pr_info("%s: Unable to get secure VMID\n", __func__);
+ ret = -EINVAL;
+ goto err;
+ }
+ dest_perms = PERM_READ | PERM_WRITE;
+
+ ret = hyp_assign_table(buffer->priv_virt, &source_vm, 1,
+ &dest_vmid, &dest_perms, 1);
+ if (ret) {
+ pr_err("%s: Assign call failed\n", __func__);
+ goto err;
+ }
+ return ret;
+
+err:
+ ion_system_secure_heap_free(buffer);
+ return ret;
+}
+
+static struct sg_table *ion_system_secure_heap_map_dma(
+ struct ion_heap *heap, struct ion_buffer *buffer)
+{
+ struct ion_system_secure_heap *secure_heap = container_of(heap,
+ struct ion_system_secure_heap,
+ heap);
+
+ return secure_heap->sys_heap->ops->map_dma(secure_heap->sys_heap,
+ buffer);
+}
+
+static void ion_system_secure_heap_unmap_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ struct ion_system_secure_heap *secure_heap = container_of(heap,
+ struct ion_system_secure_heap,
+ heap);
+
+ secure_heap->sys_heap->ops->unmap_dma(secure_heap->sys_heap,
+ buffer);
+}
+
+static void *ion_system_secure_heap_map_kernel(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ pr_info("%s: Kernel mapping from secure heap %s disallowed\n",
+ __func__, heap->name);
+ return ERR_PTR(-EINVAL);
+}
+
+static void ion_system_secure_heap_unmap_kernel(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+}
+
+static int ion_system_secure_heap_map_user(struct ion_heap *mapper,
+ struct ion_buffer *buffer,
+ struct vm_area_struct *vma)
+{
+ pr_info("%s: Mapping from secure heap %s disallowed\n",
+ __func__, mapper->name);
+ return -EINVAL;
+}
+
+static int ion_system_secure_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
+ int nr_to_scan)
+{
+ struct ion_system_secure_heap *secure_heap = container_of(heap,
+ struct ion_system_secure_heap,
+ heap);
+
+ return secure_heap->sys_heap->ops->shrink(secure_heap->sys_heap,
+ gfp_mask, nr_to_scan);
+}
+
+static struct ion_heap_ops system_secure_heap_ops = {
+ .allocate = ion_system_secure_heap_allocate,
+ .free = ion_system_secure_heap_free,
+ .map_dma = ion_system_secure_heap_map_dma,
+ .unmap_dma = ion_system_secure_heap_unmap_dma,
+ .map_kernel = ion_system_secure_heap_map_kernel,
+ .unmap_kernel = ion_system_secure_heap_unmap_kernel,
+ .map_user = ion_system_secure_heap_map_user,
+ .shrink = ion_system_secure_heap_shrink,
+};
+
+struct ion_heap *ion_system_secure_heap_create(struct ion_platform_heap *unused)
+{
+ struct ion_system_secure_heap *heap;
+
+ heap = kzalloc(sizeof(*heap), GFP_KERNEL);
+ if (!heap)
+ return ERR_PTR(-ENOMEM);
+ heap->heap.ops = &system_secure_heap_ops;
+ heap->heap.type = ION_HEAP_TYPE_SYSTEM_SECURE;
+ heap->sys_heap = get_ion_heap(ION_SYSTEM_HEAP_ID);
+ return &heap->heap;
+}
+
+void ion_system_secure_heap_destroy(struct ion_heap *heap)
+{
+ kfree(heap);
+}
diff --git a/drivers/staging/android/ion/msm/Makefile b/drivers/staging/android/ion/msm/Makefile
new file mode 100644
index 0000000..c4c01c4
--- /dev/null
+++ b/drivers/staging/android/ion/msm/Makefile
@@ -0,0 +1,4 @@
+obj-y += msm_ion.o
+ifdef CONFIG_COMPAT
+obj-y += compat_msm_ion.o
+endif
diff --git a/drivers/staging/android/ion/msm/compat_msm_ion.c b/drivers/staging/android/ion/msm/compat_msm_ion.c
new file mode 100644
index 0000000..06b8277
--- /dev/null
+++ b/drivers/staging/android/ion/msm/compat_msm_ion.c
@@ -0,0 +1,154 @@
+/* Copyright (c) 2014,2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/compat.h>
+#include <linux/ion.h>
+#include <linux/msm_ion.h>
+#include <linux/uaccess.h>
+#include "../ion_priv.h"
+#include "../compat_ion.h"
+
+struct compat_ion_flush_data {
+ compat_ion_user_handle_t handle;
+ compat_int_t fd;
+ compat_uptr_t vaddr;
+ compat_uint_t offset;
+ compat_uint_t length;
+};
+
+struct compat_ion_prefetch_data {
+ compat_int_t heap_id;
+ compat_ulong_t len;
+};
+
+#define COMPAT_ION_IOC_CLEAN_CACHES _IOWR(ION_IOC_MSM_MAGIC, 0, \
+ struct compat_ion_flush_data)
+#define COMPAT_ION_IOC_INV_CACHES _IOWR(ION_IOC_MSM_MAGIC, 1, \
+ struct compat_ion_flush_data)
+#define COMPAT_ION_IOC_CLEAN_INV_CACHES _IOWR(ION_IOC_MSM_MAGIC, 2, \
+ struct compat_ion_flush_data)
+#define COMPAT_ION_IOC_PREFETCH _IOWR(ION_IOC_MSM_MAGIC, 3, \
+ struct compat_ion_prefetch_data)
+#define COMPAT_ION_IOC_DRAIN _IOWR(ION_IOC_MSM_MAGIC, 4, \
+ struct compat_ion_prefetch_data)
+
+static int compat_get_ion_flush_data(
+ struct compat_ion_flush_data __user *data32,
+ struct ion_flush_data __user *data)
+{
+ compat_ion_user_handle_t h;
+ compat_int_t i;
+ compat_uptr_t u;
+ compat_ulong_t l;
+ int err;
+
+ err = get_user(h, &data32->handle);
+ err |= put_user(h, &data->handle);
+ err |= get_user(i, &data32->fd);
+ err |= put_user(i, &data->fd);
+ err |= get_user(u, &data32->vaddr);
+ /* upper bits won't get set, zero them */
+ data->vaddr = NULL;
+ err |= put_user(u, (compat_uptr_t *)&data->vaddr);
+ err |= get_user(l, &data32->offset);
+ err |= put_user(l, &data->offset);
+ err |= get_user(l, &data32->length);
+ err |= put_user(l, &data->length);
+
+ return err;
+}
+
+static int compat_get_ion_prefetch_data(
+ struct compat_ion_prefetch_data __user *data32,
+ struct ion_prefetch_data __user *data)
+{
+ compat_int_t i;
+ compat_ulong_t l;
+ int err;
+
+ err = get_user(i, &data32->heap_id);
+ err |= put_user(i, &data->heap_id);
+ err |= get_user(l, &data32->len);
+ err |= put_user(l, &data->len);
+
+ return err;
+}
+
+static unsigned int convert_cmd(unsigned int cmd)
+{
+ switch (cmd) {
+ case COMPAT_ION_IOC_CLEAN_CACHES:
+ return ION_IOC_CLEAN_CACHES;
+ case COMPAT_ION_IOC_INV_CACHES:
+ return ION_IOC_INV_CACHES;
+ case COMPAT_ION_IOC_CLEAN_INV_CACHES:
+ return ION_IOC_CLEAN_INV_CACHES;
+ case COMPAT_ION_IOC_PREFETCH:
+ return ION_IOC_PREFETCH;
+ case COMPAT_ION_IOC_DRAIN:
+ return ION_IOC_DRAIN;
+ default:
+ return cmd;
+ }
+}
+
+long compat_msm_ion_ioctl(struct ion_client *client, unsigned int cmd,
+ unsigned long arg)
+{
+ switch (cmd) {
+ case COMPAT_ION_IOC_CLEAN_CACHES:
+ case COMPAT_ION_IOC_INV_CACHES:
+ case COMPAT_ION_IOC_CLEAN_INV_CACHES:
+ {
+ struct compat_ion_flush_data __user *data32;
+ struct ion_flush_data __user *data;
+ int err;
+
+ data32 = compat_ptr(arg);
+ data = compat_alloc_user_space(sizeof(*data));
+ if (!data)
+ return -EFAULT;
+
+ err = compat_get_ion_flush_data(data32, data);
+ if (err)
+ return err;
+
+ return msm_ion_custom_ioctl(client, convert_cmd(cmd),
+ (unsigned long)data);
+ }
+ case COMPAT_ION_IOC_PREFETCH:
+ case COMPAT_ION_IOC_DRAIN:
+ {
+ struct compat_ion_prefetch_data __user *data32;
+ struct ion_prefetch_data __user *data;
+ int err;
+
+ data32 = compat_ptr(arg);
+ data = compat_alloc_user_space(sizeof(*data));
+ if (!data)
+ return -EFAULT;
+
+ err = compat_get_ion_prefetch_data(data32, data);
+ if (err)
+ return err;
+
+ return msm_ion_custom_ioctl(client, convert_cmd(cmd),
+ (unsigned long)data);
+ }
+ default:
+ if (is_compat_task())
+ return -ENOIOCTLCMD;
+ else
+ return msm_ion_custom_ioctl(client, cmd, arg);
+ }
+}
diff --git a/drivers/staging/android/ion/msm/compat_msm_ion.h b/drivers/staging/android/ion/msm/compat_msm_ion.h
new file mode 100644
index 0000000..64b5903
--- /dev/null
+++ b/drivers/staging/android/ion/msm/compat_msm_ion.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2014,2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_COMPAT_ION_H
+#define _LINUX_COMPAT_ION_H
+
+#include <linux/ion.h>
+
+#if IS_ENABLED(CONFIG_COMPAT)
+
+long compat_msm_ion_ioctl(struct ion_client *client, unsigned int cmd,
+ unsigned long arg);
+
+#define compat_ion_user_handle_t compat_int_t
+
+#else
+
+#define compat_msm_ion_ioctl msm_ion_custom_ioctl
+
+#endif
+#endif
diff --git a/drivers/staging/android/ion/msm/msm_ion.c b/drivers/staging/android/ion/msm/msm_ion.c
new file mode 100644
index 0000000..b430008
--- /dev/null
+++ b/drivers/staging/android/ion/msm/msm_ion.c
@@ -0,0 +1,1061 @@
+/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/msm_ion.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/mm.h>
+#include <linux/mm_types.h>
+#include <linux/sched.h>
+#include <linux/rwsem.h>
+#include <linux/uaccess.h>
+#include <linux/memblock.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma-contiguous.h>
+#include <linux/vmalloc.h>
+#include <linux/highmem.h>
+#include <linux/cma.h>
+#include <linux/module.h>
+#include <asm/cacheflush.h>
+#include "../ion_priv.h"
+#include "compat_msm_ion.h"
+
+#define ION_COMPAT_STR "qcom,msm-ion"
+
+static struct ion_device *idev;
+static int num_heaps;
+static struct ion_heap **heaps;
+
+struct ion_heap_desc {
+ unsigned int id;
+ enum ion_heap_type type;
+ const char *name;
+ unsigned int permission_type;
+};
+
+#ifdef CONFIG_OF
+static struct ion_heap_desc ion_heap_meta[] = {
+ {
+ .id = ION_SYSTEM_HEAP_ID,
+ .name = ION_SYSTEM_HEAP_NAME,
+ },
+ {
+ .id = ION_SYSTEM_CONTIG_HEAP_ID,
+ .name = ION_KMALLOC_HEAP_NAME,
+ },
+ {
+ .id = ION_SECURE_HEAP_ID,
+ .name = ION_SECURE_HEAP_NAME,
+ },
+ {
+ .id = ION_CP_MM_HEAP_ID,
+ .name = ION_MM_HEAP_NAME,
+ .permission_type = IPT_TYPE_MM_CARVEOUT,
+ },
+ {
+ .id = ION_MM_FIRMWARE_HEAP_ID,
+ .name = ION_MM_FIRMWARE_HEAP_NAME,
+ },
+ {
+ .id = ION_CP_MFC_HEAP_ID,
+ .name = ION_MFC_HEAP_NAME,
+ .permission_type = IPT_TYPE_MFC_SHAREDMEM,
+ },
+ {
+ .id = ION_SF_HEAP_ID,
+ .name = ION_SF_HEAP_NAME,
+ },
+ {
+ .id = ION_QSECOM_HEAP_ID,
+ .name = ION_QSECOM_HEAP_NAME,
+ },
+ {
+ .id = ION_AUDIO_HEAP_ID,
+ .name = ION_AUDIO_HEAP_NAME,
+ },
+ {
+ .id = ION_PIL1_HEAP_ID,
+ .name = ION_PIL1_HEAP_NAME,
+ },
+ {
+ .id = ION_PIL2_HEAP_ID,
+ .name = ION_PIL2_HEAP_NAME,
+ },
+ {
+ .id = ION_CP_WB_HEAP_ID,
+ .name = ION_WB_HEAP_NAME,
+ },
+ {
+ .id = ION_CAMERA_HEAP_ID,
+ .name = ION_CAMERA_HEAP_NAME,
+ },
+ {
+ .id = ION_ADSP_HEAP_ID,
+ .name = ION_ADSP_HEAP_NAME,
+ }
+};
+#endif
+
+struct ion_client *msm_ion_client_create(const char *name)
+{
+ /*
+ * The assumption is that if there is a NULL device, the ion
+ * driver has not yet probed.
+ */
+ if (!idev)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ if (IS_ERR(idev))
+ return (struct ion_client *)idev;
+
+ return ion_client_create(idev, name);
+}
+EXPORT_SYMBOL(msm_ion_client_create);
+
+static int ion_no_pages_cache_ops(
+ struct ion_client *client,
+ struct ion_handle *handle,
+ void *vaddr,
+ unsigned int offset, unsigned int length,
+ unsigned int cmd)
+{
+ unsigned long size_to_vmap, total_size;
+ int i, j, ret;
+ void *ptr = NULL;
+ ion_phys_addr_t buff_phys = 0;
+ ion_phys_addr_t buff_phys_start = 0;
+ size_t buf_length = 0;
+
+ ret = ion_phys(client, handle, &buff_phys_start, &buf_length);
+ if (ret)
+ return -EINVAL;
+
+ buff_phys = buff_phys_start;
+
+ if (!vaddr) {
+ /*
+ * Split the vmalloc space into smaller regions in
+ * order to clean and/or invalidate the cache.
+ */
+ size_to_vmap = ((VMALLOC_END - VMALLOC_START) / 8);
+ total_size = buf_length;
+
+ for (i = 0; i < total_size; i += size_to_vmap) {
+ size_to_vmap = min(size_to_vmap, total_size - i);
+ for (j = 0; !ptr && j < 10 && size_to_vmap; ++j) {
+ ptr = ioremap(buff_phys, size_to_vmap);
+ if (ptr) {
+ switch (cmd) {
+ case ION_IOC_CLEAN_CACHES:
+ __dma_clean_area(
+ ptr,
+ size_to_vmap);
+ break;
+ case ION_IOC_INV_CACHES:
+ __dma_inv_area(
+ ptr,
+ size_to_vmap);
+ break;
+ case ION_IOC_CLEAN_INV_CACHES:
+ __dma_flush_area(
+ ptr,
+ size_to_vmap);
+ break;
+ default:
+ return -EINVAL;
+ }
+ buff_phys += size_to_vmap;
+ } else {
+ size_to_vmap >>= 1;
+ }
+ }
+ if (!ptr) {
+ pr_err("Couldn't io-remap the memory\n");
+ return -EINVAL;
+ }
+ iounmap(ptr);
+ }
+ } else {
+ switch (cmd) {
+ case ION_IOC_CLEAN_CACHES:
+ __dma_clean_area(vaddr, length);
+ break;
+ case ION_IOC_INV_CACHES:
+ __dma_inv_area(vaddr, length);
+ break;
+ case ION_IOC_CLEAN_INV_CACHES:
+ __dma_flush_area(vaddr, length);
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int ion_pages_cache_ops(
+ struct ion_client *client,
+ struct ion_handle *handle,
+ void *vaddr, unsigned int offset, unsigned int length,
+ unsigned int cmd)
+{
+ struct sg_table *table = NULL;
+
+ table = ion_sg_table(client, handle);
+ if (IS_ERR_OR_NULL(table))
+ return PTR_ERR(table);
+
+ switch (cmd) {
+ case ION_IOC_CLEAN_CACHES:
+ if (!vaddr)
+ dma_sync_sg_for_device(NULL, table->sgl,
+ table->nents, DMA_TO_DEVICE);
+ else
+ __dma_clean_area(vaddr, length);
+ break;
+ case ION_IOC_INV_CACHES:
+ dma_sync_sg_for_cpu(NULL, table->sgl,
+ table->nents, DMA_FROM_DEVICE);
+ break;
+ case ION_IOC_CLEAN_INV_CACHES:
+ if (!vaddr) {
+ dma_sync_sg_for_device(NULL, table->sgl,
+ table->nents, DMA_TO_DEVICE);
+ dma_sync_sg_for_cpu(NULL, table->sgl,
+ table->nents, DMA_FROM_DEVICE);
+ } else {
+ __dma_flush_area(vaddr, length);
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
+ void *uaddr, unsigned long offset, unsigned long len,
+ unsigned int cmd)
+{
+ int ret = -EINVAL;
+ unsigned long flags;
+ struct sg_table *table;
+ struct page *page;
+
+ ret = ion_handle_get_flags(client, handle, &flags);
+ if (ret)
+ return -EINVAL;
+
+ if (!ION_IS_CACHED(flags))
+ return 0;
+
+ if (flags & ION_FLAG_SECURE)
+ return 0;
+
+ table = ion_sg_table(client, handle);
+
+ if (IS_ERR_OR_NULL(table))
+ return PTR_ERR(table);
+
+ page = sg_page(table->sgl);
+
+ if (page)
+ ret = ion_pages_cache_ops(client, handle, uaddr,
+ offset, len, cmd);
+ else
+ ret = ion_no_pages_cache_ops(client, handle, uaddr,
+ offset, len, cmd);
+
+ return ret;
+}
+
+int msm_ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
+ void *vaddr, unsigned long len, unsigned int cmd)
+{
+ return ion_do_cache_op(client, handle, vaddr, 0, len, cmd);
+}
+EXPORT_SYMBOL(msm_ion_do_cache_op);
+
+static void msm_ion_allocate(struct ion_platform_heap *heap)
+{
+ if (!heap->base && heap->extra_data) {
+ WARN(1, "Specifying carveout heaps without a base is deprecated. Convert to the DMA heap type instead");
+ return;
+ }
+}
+
+static int is_heap_overlapping(const struct ion_platform_heap *heap1,
+ const struct ion_platform_heap *heap2)
+{
+ ion_phys_addr_t heap1_base = heap1->base;
+ ion_phys_addr_t heap2_base = heap2->base;
+ ion_phys_addr_t heap1_end = heap1->base + heap1->size - 1;
+ ion_phys_addr_t heap2_end = heap2->base + heap2->size - 1;
+
+ if (heap1_base == heap2_base)
+ return 1;
+ if (heap1_base < heap2_base && heap1_end >= heap2_base)
+ return 1;
+ if (heap2_base < heap1_base && heap2_end >= heap1_base)
+ return 1;
+ return 0;
+}
+
+static void check_for_heap_overlap(const struct ion_platform_heap heap_list[],
+ unsigned long nheaps)
+{
+ unsigned long i;
+ unsigned long j;
+
+ for (i = 0; i < nheaps; ++i) {
+ const struct ion_platform_heap *heap1 = &heap_list[i];
+
+ if (!heap1->base)
+ continue;
+ for (j = i + 1; j < nheaps; ++j) {
+ const struct ion_platform_heap *heap2 = &heap_list[j];
+
+ if (!heap2->base)
+ continue;
+ if (is_heap_overlapping(heap1, heap2)) {
+ panic("Memory in heap %s overlaps with heap %s\n",
+ heap1->name, heap2->name);
+ }
+ }
+ }
+}
+
+#ifdef CONFIG_OF
+static int msm_init_extra_data(struct device_node *node,
+ struct ion_platform_heap *heap,
+ const struct ion_heap_desc *heap_desc)
+{
+ int ret = 0;
+
+ switch ((int)heap->type) {
+ case ION_HEAP_TYPE_CARVEOUT:
+ {
+ heap->extra_data = kzalloc(sizeof(*heap->extra_data),
+ GFP_KERNEL);
+ if (!heap->extra_data)
+ ret = -ENOMEM;
+ break;
+ }
+ default:
+ heap->extra_data = 0;
+ break;
+ }
+ return ret;
+}
+
+#define MAKE_HEAP_TYPE_MAPPING(h) { .name = #h, \
+ .heap_type = ION_HEAP_TYPE_##h, }
+
+static struct heap_types_info {
+ const char *name;
+ int heap_type;
+} heap_types_info[] = {
+ MAKE_HEAP_TYPE_MAPPING(SYSTEM),
+ MAKE_HEAP_TYPE_MAPPING(SYSTEM_CONTIG),
+ MAKE_HEAP_TYPE_MAPPING(CARVEOUT),
+ MAKE_HEAP_TYPE_MAPPING(CHUNK),
+ MAKE_HEAP_TYPE_MAPPING(DMA),
+ MAKE_HEAP_TYPE_MAPPING(SYSTEM_SECURE),
+};
+
+static int msm_ion_get_heap_type_from_dt_node(struct device_node *node,
+ int *heap_type)
+{
+ const char *name;
+ int i, ret = -EINVAL;
+
+ ret = of_property_read_string(node, "qcom,ion-heap-type", &name);
+ if (ret)
+ goto out;
+ for (i = 0; i < ARRAY_SIZE(heap_types_info); ++i) {
+ if (!strcmp(heap_types_info[i].name, name)) {
+ *heap_type = heap_types_info[i].heap_type;
+ ret = 0;
+ goto out;
+ }
+ }
+ WARN(1, "Unknown heap type: %s. You might need to update heap_types_info in %s",
+ name, __FILE__);
+out:
+ return ret;
+}
+
+static int msm_ion_populate_heap(struct device_node *node,
+ struct ion_platform_heap *heap)
+{
+ unsigned int i;
+ int ret = -EINVAL, heap_type = -1;
+ unsigned int len = ARRAY_SIZE(ion_heap_meta);
+
+ for (i = 0; i < len; ++i) {
+ if (ion_heap_meta[i].id == heap->id) {
+ heap->name = ion_heap_meta[i].name;
+ ret = msm_ion_get_heap_type_from_dt_node(
+ node, &heap_type);
+ if (ret)
+ break;
+ heap->type = heap_type;
+ ret = msm_init_extra_data(node, heap,
+ &ion_heap_meta[i]);
+ break;
+ }
+ }
+ if (ret)
+ pr_err("%s: Unable to populate heap, error: %d", __func__, ret);
+ return ret;
+}
+
+static void free_pdata(const struct ion_platform_data *pdata)
+{
+ unsigned int i;
+
+ for (i = 0; i < pdata->nr; ++i)
+ kfree(pdata->heaps[i].extra_data);
+ kfree(pdata->heaps);
+ kfree(pdata);
+}
+
+static void msm_ion_get_heap_align(struct device_node *node,
+ struct ion_platform_heap *heap)
+{
+ unsigned int val;
+ int ret = of_property_read_u32(node, "qcom,heap-align", &val);
+
+ if (!ret) {
+ switch ((int)heap->type) {
+ case ION_HEAP_TYPE_CARVEOUT:
+ {
+ struct ion_co_heap_pdata *extra =
+ heap->extra_data;
+ extra->align = val;
+ break;
+ }
+ default:
+ pr_err("ION-heap %s: Cannot specify alignment for this type of heap\n",
+ heap->name);
+ break;
+ }
+ }
+}
+
+static int msm_ion_get_heap_size(struct device_node *node,
+ struct ion_platform_heap *heap)
+{
+ unsigned int val;
+ int ret = 0;
+ u32 out_values[2];
+ struct device_node *pnode;
+
+ ret = of_property_read_u32(node, "qcom,memory-reservation-size", &val);
+ if (!ret)
+ heap->size = val;
+
+ ret = of_property_read_u32_array(node, "qcom,memory-fixed",
+ out_values, 2);
+ if (!ret) {
+ heap->size = out_values[1];
+ goto out;
+ }
+
+ pnode = of_parse_phandle(node, "linux,contiguous-region", 0);
+ if (pnode) {
+ const u32 *addr;
+ u64 size;
+
+ addr = of_get_address(pnode, 0, &size, NULL);
+ if (!addr) {
+ of_node_put(pnode);
+ ret = -EINVAL;
+ goto out;
+ }
+ heap->size = (u32)size;
+ ret = 0;
+ of_node_put(pnode);
+ }
+
+ ret = 0;
+out:
+ return ret;
+}
+
+static void msm_ion_get_heap_base(struct device_node *node,
+ struct ion_platform_heap *heap)
+{
+ u32 out_values[2];
+ int ret = 0;
+ struct device_node *pnode;
+
+ ret = of_property_read_u32_array(node, "qcom,memory-fixed",
+ out_values, 2);
+ if (!ret)
+ heap->base = out_values[0];
+
+ pnode = of_parse_phandle(node, "linux,contiguous-region", 0);
+ if (pnode) {
+ heap->base = cma_get_base(heap->priv);
+ of_node_put(pnode);
+ }
+}
+
+static void msm_ion_get_heap_adjacent(struct device_node *node,
+ struct ion_platform_heap *heap)
+{
+ unsigned int val;
+ int ret = of_property_read_u32(node, "qcom,heap-adjacent", &val);
+
+ if (!ret) {
+ switch (heap->type) {
+ case ION_HEAP_TYPE_CARVEOUT:
+ {
+ struct ion_co_heap_pdata *extra = heap->extra_data;
+
+ extra->adjacent_mem_id = val;
+ break;
+ }
+ default:
+ pr_err("ION-heap %s: Cannot specify adjcent mem id for this type of heap\n",
+ heap->name);
+ break;
+ }
+ } else {
+ switch (heap->type) {
+ case ION_HEAP_TYPE_CARVEOUT:
+ {
+ struct ion_co_heap_pdata *extra = heap->extra_data;
+
+ extra->adjacent_mem_id = INVALID_HEAP_ID;
+ break;
+ }
+ default:
+ break;
+ }
+ }
+}
+
+static struct ion_platform_data *msm_ion_parse_dt(struct platform_device *pdev)
+{
+ struct ion_platform_data *pdata = 0;
+ struct ion_platform_heap *heaps = NULL;
+ struct device_node *node;
+ struct platform_device *new_dev = NULL;
+ const struct device_node *dt_node = pdev->dev.of_node;
+ const __be32 *val;
+ int ret = -EINVAL;
+ u32 num_heaps = 0;
+ int idx = 0;
+
+ for_each_available_child_of_node(dt_node, node)
+ num_heaps++;
+
+ if (!num_heaps)
+ return ERR_PTR(-EINVAL);
+
+ pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return ERR_PTR(-ENOMEM);
+
+ heaps = kcalloc(num_heaps, sizeof(struct ion_platform_heap),
+ GFP_KERNEL);
+ if (!heaps) {
+ kfree(pdata);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ pdata->heaps = heaps;
+ pdata->nr = num_heaps;
+
+ for_each_available_child_of_node(dt_node, node) {
+ new_dev = of_platform_device_create(node, NULL, &pdev->dev);
+ if (!new_dev) {
+ pr_err("Failed to create device %s\n", node->name);
+ goto free_heaps;
+ }
+
+ pdata->heaps[idx].priv = &new_dev->dev;
+ val = of_get_address(node, 0, NULL, NULL);
+ if (!val) {
+ pr_err("%s: Unable to find reg key", __func__);
+ goto free_heaps;
+ }
+ pdata->heaps[idx].id = (u32)of_read_number(val, 1);
+
+ ret = msm_ion_populate_heap(node, &pdata->heaps[idx]);
+ if (ret)
+ goto free_heaps;
+
+ msm_ion_get_heap_base(node, &pdata->heaps[idx]);
+ msm_ion_get_heap_align(node, &pdata->heaps[idx]);
+
+ ret = msm_ion_get_heap_size(node, &pdata->heaps[idx]);
+ if (ret)
+ goto free_heaps;
+
+ msm_ion_get_heap_adjacent(node, &pdata->heaps[idx]);
+
+ ++idx;
+ }
+ return pdata;
+
+free_heaps:
+ free_pdata(pdata);
+ return ERR_PTR(ret);
+}
+#else
+static struct ion_platform_data *msm_ion_parse_dt(struct platform_device *pdev)
+{
+ return NULL;
+}
+
+static void free_pdata(const struct ion_platform_data *pdata)
+{
+}
+#endif
+
+static int check_vaddr_bounds(unsigned long start, unsigned long end)
+{
+ struct mm_struct *mm = current->active_mm;
+ struct vm_area_struct *vma;
+ int ret = 1;
+
+ if (end < start)
+ goto out;
+
+ vma = find_vma(mm, start);
+ if (vma && vma->vm_start < end) {
+ if (start < vma->vm_start)
+ goto out;
+ if (end > vma->vm_end)
+ goto out;
+ ret = 0;
+ }
+
+out:
+ return ret;
+}
+
+int ion_heap_is_system_secure_heap_type(enum ion_heap_type type)
+{
+ return type == ((enum ion_heap_type)ION_HEAP_TYPE_SYSTEM_SECURE);
+}
+
+int ion_heap_allow_heap_secure(enum ion_heap_type type)
+{
+ return false;
+}
+
+/* fix up the cases where the ioctl direction bits are incorrect */
+static unsigned int msm_ion_ioctl_dir(unsigned int cmd)
+{
+ switch (cmd) {
+ case ION_IOC_CLEAN_CACHES:
+ case ION_IOC_INV_CACHES:
+ case ION_IOC_CLEAN_INV_CACHES:
+ case ION_IOC_PREFETCH:
+ case ION_IOC_DRAIN:
+ return _IOC_WRITE;
+ default:
+ return _IOC_DIR(cmd);
+ }
+}
+
+long msm_ion_custom_ioctl(struct ion_client *client,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ unsigned int dir;
+ union {
+ struct ion_flush_data flush_data;
+ struct ion_prefetch_data prefetch_data;
+ } data;
+
+ dir = msm_ion_ioctl_dir(cmd);
+
+ if (_IOC_SIZE(cmd) > sizeof(data))
+ return -EINVAL;
+
+ if (dir & _IOC_WRITE)
+ if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
+ return -EFAULT;
+
+ switch (cmd) {
+ case ION_IOC_CLEAN_CACHES:
+ case ION_IOC_INV_CACHES:
+ case ION_IOC_CLEAN_INV_CACHES:
+ {
+ unsigned long start, end;
+ struct ion_handle *handle = NULL;
+ int ret;
+ struct mm_struct *mm = current->active_mm;
+
+ if (data.flush_data.handle > 0) {
+ handle = ion_handle_get_by_id(
+ client, (int)data.flush_data.handle);
+ if (IS_ERR(handle)) {
+ pr_info("%s: Could not find handle: %d\n",
+ __func__, (int)data.flush_data.handle);
+ return PTR_ERR(handle);
+ }
+ } else {
+ handle = ion_import_dma_buf_fd(client,
+ data.flush_data.fd);
+ if (IS_ERR(handle)) {
+ pr_info("%s: Could not import handle: %p\n",
+ __func__, handle);
+ return -EINVAL;
+ }
+ }
+
+ down_read(&mm->mmap_sem);
+
+ start = (unsigned long)data.flush_data.vaddr;
+ end = (unsigned long)data.flush_data.vaddr
+ + data.flush_data.length;
+
+ if (check_vaddr_bounds(start, end)) {
+ pr_err("%s: virtual address %p is out of bounds\n",
+ __func__, data.flush_data.vaddr);
+ ret = -EINVAL;
+ } else {
+ ret = ion_do_cache_op(
+ client, handle, data.flush_data.vaddr,
+ data.flush_data.offset,
+ data.flush_data.length, cmd);
+ }
+ up_read(&mm->mmap_sem);
+
+ ion_free(client, handle);
+
+ if (ret < 0)
+ return ret;
+ break;
+ }
+
+ default:
+ return -ENOTTY;
+ }
+ return 0;
+}
+
+#define MAX_VMAP_RETRIES 10
+
+/**
+ * An optimized page-zero'ing function. vmaps arrays of pages in large
+ * chunks to minimize the number of memsets and vmaps/vunmaps.
+ *
+ * Note that the `pages' array should be composed of all 4K pages.
+ *
+ * NOTE: This function does not guarantee synchronization of the caches
+ * and thus caller is responsible for handling any cache maintenance
+ * operations needed.
+ */
+int msm_ion_heap_pages_zero(struct page **pages, int num_pages)
+{
+ int i, j, npages_to_vmap;
+ void *ptr = NULL;
+
+ /*
+ * As an optimization, we manually zero out all of the pages
+ * in one fell swoop here. To safeguard against insufficient
+ * vmalloc space, we only vmap `npages_to_vmap' at a time,
+ * starting with a conservative estimate of 1/8 of the total
+ * number of vmalloc pages available.
+ */
+ npages_to_vmap = ((VMALLOC_END - VMALLOC_START) / 8)
+ >> PAGE_SHIFT;
+ for (i = 0; i < num_pages; i += npages_to_vmap) {
+ npages_to_vmap = min(npages_to_vmap, num_pages - i);
+ for (j = 0; !ptr && j < MAX_VMAP_RETRIES && npages_to_vmap;
+ ++j) {
+ ptr = vmap(&pages[i], npages_to_vmap,
+ VM_IOREMAP, PAGE_KERNEL);
+ if (!ptr)
+ npages_to_vmap >>= 1;
+ }
+ if (!ptr)
+ return -ENOMEM;
+
+ memset(ptr, 0, npages_to_vmap * PAGE_SIZE);
+ vunmap(ptr);
+ }
+
+ return 0;
+}
+
+int msm_ion_heap_alloc_pages_mem(struct pages_mem *pages_mem)
+{
+ struct page **pages;
+ unsigned int page_tbl_size;
+
+ pages_mem->free_fn = kfree;
+ page_tbl_size = sizeof(struct page *) * (pages_mem->size >> PAGE_SHIFT);
+ if (page_tbl_size > SZ_8K) {
+ /*
+ * Do fallback to ensure we have a balance between
+ * performance and availability.
+ */
+ pages = kmalloc(page_tbl_size,
+ __GFP_COMP | __GFP_NORETRY |
+ __GFP_NOWARN);
+ if (!pages) {
+ pages = vmalloc(page_tbl_size);
+ pages_mem->free_fn = vfree;
+ }
+ } else {
+ pages = kmalloc(page_tbl_size, GFP_KERNEL);
+ }
+
+ if (!pages)
+ return -ENOMEM;
+
+ pages_mem->pages = pages;
+ return 0;
+}
+
+void msm_ion_heap_free_pages_mem(struct pages_mem *pages_mem)
+{
+ pages_mem->free_fn(pages_mem->pages);
+}
+
+int msm_ion_heap_high_order_page_zero(struct page *page, int order)
+{
+ int i, ret;
+ struct pages_mem pages_mem;
+ int npages = 1 << order;
+
+ pages_mem.size = npages * PAGE_SIZE;
+
+ if (msm_ion_heap_alloc_pages_mem(&pages_mem))
+ return -ENOMEM;
+
+ for (i = 0; i < (1 << order); ++i)
+ pages_mem.pages[i] = page + i;
+
+ ret = msm_ion_heap_pages_zero(pages_mem.pages, npages);
+ dma_sync_single_for_device(NULL, page_to_phys(page), pages_mem.size,
+ DMA_BIDIRECTIONAL);
+ msm_ion_heap_free_pages_mem(&pages_mem);
+ return ret;
+}
+
+int msm_ion_heap_buffer_zero(struct ion_buffer *buffer)
+{
+ struct sg_table *table = buffer->sg_table;
+ struct scatterlist *sg;
+ int i, j, ret = 0, npages = 0;
+ struct pages_mem pages_mem;
+
+ pages_mem.size = PAGE_ALIGN(buffer->size);
+
+ if (msm_ion_heap_alloc_pages_mem(&pages_mem))
+ return -ENOMEM;
+
+ for_each_sg(table->sgl, sg, table->nents, i) {
+ struct page *page = sg_page(sg);
+ unsigned long len = sg->length;
+
+ for (j = 0; j < len / PAGE_SIZE; j++)
+ pages_mem.pages[npages++] = page + j;
+ }
+
+ ret = msm_ion_heap_pages_zero(pages_mem.pages, npages);
+ dma_sync_sg_for_device(NULL, table->sgl, table->nents,
+ DMA_BIDIRECTIONAL);
+ msm_ion_heap_free_pages_mem(&pages_mem);
+ return ret;
+}
+
+static struct ion_heap *msm_ion_heap_create(struct ion_platform_heap *heap_data)
+{
+ struct ion_heap *heap = NULL;
+
+ switch ((int)heap_data->type) {
+ case ION_HEAP_TYPE_SYSTEM_SECURE:
+ heap = ion_system_secure_heap_create(heap_data);
+ break;
+ default:
+ heap = ion_heap_create(heap_data);
+ }
+
+ if (IS_ERR_OR_NULL(heap)) {
+ pr_err("%s: error creating heap %s type %d base %pa size %zu\n",
+ __func__, heap_data->name, heap_data->type,
+ &heap_data->base, heap_data->size);
+ return ERR_PTR(-EINVAL);
+ }
+
+ heap->name = heap_data->name;
+ heap->id = heap_data->id;
+ heap->priv = heap_data->priv;
+ return heap;
+}
+
+static void msm_ion_heap_destroy(struct ion_heap *heap)
+{
+ if (!heap)
+ return;
+
+ switch ((int)heap->type) {
+ case ION_HEAP_TYPE_SYSTEM_SECURE:
+ ion_system_secure_heap_destroy(heap);
+ break;
+ default:
+ ion_heap_destroy(heap);
+ }
+}
+
+struct ion_heap *get_ion_heap(int heap_id)
+{
+ int i;
+ struct ion_heap *heap;
+
+ for (i = 0; i < num_heaps; i++) {
+ heap = heaps[i];
+ if (heap->id == heap_id)
+ return heap;
+ }
+
+ pr_err("%s: heap_id %d not found\n", __func__, heap_id);
+ return NULL;
+}
+
+static int msm_ion_probe(struct platform_device *pdev)
+{
+ static struct ion_device *new_dev;
+ struct ion_platform_data *pdata;
+ unsigned int pdata_needs_to_be_freed;
+ int err = -1;
+ int i;
+
+ if (pdev->dev.of_node) {
+ pdata = msm_ion_parse_dt(pdev);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+ pdata_needs_to_be_freed = 1;
+ } else {
+ pdata = pdev->dev.platform_data;
+ pdata_needs_to_be_freed = 0;
+ }
+
+ num_heaps = pdata->nr;
+
+ heaps = kcalloc(pdata->nr, sizeof(struct ion_heap *), GFP_KERNEL);
+
+ if (!heaps) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ new_dev = ion_device_create(compat_msm_ion_ioctl);
+ if (IS_ERR_OR_NULL(new_dev)) {
+ /*
+ * set this to the ERR to indicate to the clients
+ * that Ion failed to probe.
+ */
+ idev = new_dev;
+ err = PTR_ERR(new_dev);
+ goto out;
+ }
+
+ /* create the heaps as specified in the board file */
+ for (i = 0; i < num_heaps; i++) {
+ struct ion_platform_heap *heap_data = &pdata->heaps[i];
+
+ msm_ion_allocate(heap_data);
+
+ heap_data->has_outer_cache = pdata->has_outer_cache;
+ heaps[i] = msm_ion_heap_create(heap_data);
+ if (IS_ERR_OR_NULL(heaps[i])) {
+ heaps[i] = 0;
+ continue;
+ } else {
+ if (heap_data->size)
+ pr_info("ION heap %s created at %pa with size %zx\n",
+ heap_data->name,
+ &heap_data->base,
+ heap_data->size);
+ else
+ pr_info("ION heap %s created\n",
+ heap_data->name);
+ }
+
+ ion_device_add_heap(new_dev, heaps[i]);
+ }
+ check_for_heap_overlap(pdata->heaps, num_heaps);
+ if (pdata_needs_to_be_freed)
+ free_pdata(pdata);
+
+ platform_set_drvdata(pdev, new_dev);
+ /*
+ * intentionally set this at the very end to allow probes to be deferred
+ * completely until Ion is setup
+ */
+ idev = new_dev;
+ return 0;
+
+out:
+ kfree(heaps);
+ if (pdata_needs_to_be_freed)
+ free_pdata(pdata);
+ return err;
+}
+
+static int msm_ion_remove(struct platform_device *pdev)
+{
+ struct ion_device *idev = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < num_heaps; i++)
+ msm_ion_heap_destroy(heaps[i]);
+
+ ion_device_destroy(idev);
+ kfree(heaps);
+ return 0;
+}
+
+static const struct of_device_id msm_ion_match_table[] = {
+ {.compatible = ION_COMPAT_STR},
+ {},
+};
+
+static struct platform_driver msm_ion_driver = {
+ .probe = msm_ion_probe,
+ .remove = msm_ion_remove,
+ .driver = {
+ .name = "ion-msm",
+ .of_match_table = msm_ion_match_table,
+ },
+};
+
+static int __init msm_ion_init(void)
+{
+ return platform_driver_register(&msm_ion_driver);
+}
+
+static void __exit msm_ion_exit(void)
+{
+ platform_driver_unregister(&msm_ion_driver);
+}
+
+subsys_initcall(msm_ion_init);
+module_exit(msm_ion_exit);
diff --git a/drivers/staging/android/ion/msm/msm_ion.h b/drivers/staging/android/ion/msm/msm_ion.h
new file mode 100644
index 0000000..b1964ae
--- /dev/null
+++ b/drivers/staging/android/ion/msm/msm_ion.h
@@ -0,0 +1,185 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MSM_MSM_ION_H
+#define _MSM_MSM_ION_H
+
+#include "../ion.h"
+#include "../../uapi/msm_ion.h"
+
+enum ion_permission_type {
+ IPT_TYPE_MM_CARVEOUT = 0,
+ IPT_TYPE_MFC_SHAREDMEM = 1,
+ IPT_TYPE_MDP_WRITEBACK = 2,
+};
+
+/*
+ * This flag allows clients when mapping into the IOMMU to specify to
+ * defer un-mapping from the IOMMU until the buffer memory is freed.
+ */
+#define ION_IOMMU_UNMAP_DELAYED 1
+
+/*
+ * This flag allows clients to defer unsecuring a buffer until the buffer
+ * is actually freed.
+ */
+#define ION_UNSECURE_DELAYED 1
+
+/**
+ * struct ion_cp_heap_pdata - defines a content protection heap in the given
+ * platform
+ * @permission_type: Memory ID used to identify the memory to TZ
+ * @align: Alignment requirement for the memory
+ * @secure_base: Base address for securing the heap.
+ * Note: This might be different from actual base address
+ * of this heap in the case of a shared heap.
+ * @secure_size: Memory size for securing the heap.
+ * Note: This might be different from actual size
+ * of this heap in the case of a shared heap.
+ * @fixed_position If nonzero, position in the fixed area.
+ * @iommu_map_all: Indicates whether we should map whole heap into IOMMU.
+ * @iommu_2x_map_domain: Indicates the domain to use for overmapping.
+ * @request_ion_region: function to be called when the number of allocations
+ * goes from 0 -> 1
+ * @release_ion_region: function to be called when the number of allocations
+ * goes from 1 -> 0
+ * @setup_ion_region: function to be called upon ion registration
+ * @allow_nonsecure_alloc: allow non-secure allocations from this heap. For
+ * secure heaps, this flag must be set so allow non-secure
+ * allocations. For non-secure heaps, this flag is ignored.
+ *
+ */
+struct ion_cp_heap_pdata {
+ enum ion_permission_type permission_type;
+ unsigned int align;
+ ion_phys_addr_t secure_base; /* Base addr used when heap is shared */
+ size_t secure_size; /* Size used for securing heap when heap is shared*/
+ int is_cma;
+ enum ion_fixed_position fixed_position;
+ int iommu_map_all;
+ int iommu_2x_map_domain;
+ int (*request_ion_region)(void *);
+ int (*release_ion_region)(void *);
+ void *(*setup_ion_region)(void);
+ int allow_nonsecure_alloc;
+};
+
+/**
+ * struct ion_co_heap_pdata - defines a carveout heap in the given platform
+ * @adjacent_mem_id: Id of heap that this heap must be adjacent to.
+ * @align: Alignment requirement for the memory
+ * @fixed_position If nonzero, position in the fixed area.
+ * @request_ion_region: function to be called when the number of allocations
+ * goes from 0 -> 1
+ * @release_ion_region: function to be called when the number of allocations
+ * goes from 1 -> 0
+ * @setup_ion_region: function to be called upon ion registration
+ * @memory_type:Memory type used for the heap
+ *
+ */
+struct ion_co_heap_pdata {
+ int adjacent_mem_id;
+ unsigned int align;
+ enum ion_fixed_position fixed_position;
+ int (*request_ion_region)(void *);
+ int (*release_ion_region)(void *);
+ void *(*setup_ion_region)(void);
+};
+
+/**
+ * struct ion_cma_pdata - extra data for CMA regions
+ * @default_prefetch_size - default size to use for prefetching
+ */
+struct ion_cma_pdata {
+ unsigned long default_prefetch_size;
+};
+
+#ifdef CONFIG_ION
+/**
+ * msm_ion_client_create - allocate a client using the ion_device specified in
+ * drivers/staging/android/ion/msm/msm_ion.c
+ *
+ * name is the same as ion_client_create, return values
+ * are the same as ion_client_create.
+ */
+
+struct ion_client *msm_ion_client_create(const char *name);
+
+/**
+ * ion_handle_get_flags - get the flags for a given handle
+ *
+ * @client - client who allocated the handle
+ * @handle - handle to get the flags
+ * @flags - pointer to store the flags
+ *
+ * Gets the current flags for a handle. These flags indicate various options
+ * of the buffer (caching, security, etc.)
+ */
+int ion_handle_get_flags(struct ion_client *client, struct ion_handle *handle,
+ unsigned long *flags);
+
+/**
+ * ion_handle_get_size - get the allocated size of a given handle
+ *
+ * @client - client who allocated the handle
+ * @handle - handle to get the size
+ * @size - pointer to store the size
+ *
+ * gives the allocated size of a handle. returns 0 on success, negative
+ * value on error
+ *
+ * NOTE: This is intended to be used only to get a size to pass to map_iommu.
+ * You should *NOT* rely on this for any other usage.
+ */
+
+int ion_handle_get_size(struct ion_client *client, struct ion_handle *handle,
+ size_t *size);
+/**
+ * msm_ion_do_cache_op - do cache operations.
+ *
+ * @client - pointer to ION client.
+ * @handle - pointer to buffer handle.
+ * @vaddr - virtual address to operate on.
+ * @len - Length of data to do cache operation on.
+ * @cmd - Cache operation to perform:
+ * ION_IOC_CLEAN_CACHES
+ * ION_IOC_INV_CACHES
+ * ION_IOC_CLEAN_INV_CACHES
+ *
+ * Returns 0 on success
+ */
+int msm_ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
+ void *vaddr, unsigned long len, unsigned int cmd);
+
+#else
+static inline struct ion_client *msm_ion_client_create(const char *name)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline int ion_handle_get_size(struct ion_client *client,
+ struct ion_handle *handle, size_t *size)
+{
+ return -ENODEV;
+}
+
+static inline int msm_ion_do_cache_op(
+ struct ion_client *client,
+ struct ion_handle *handle, void *vaddr,
+ unsigned long len, unsigned int cmd)
+{
+ return -ENODEV;
+}
+
+#endif /* CONFIG_ION */
+
+#endif
diff --git a/drivers/staging/android/uapi/ion.h b/drivers/staging/android/uapi/ion.h
index 14cd873..ccc2a7c 100644
--- a/drivers/staging/android/uapi/ion.h
+++ b/drivers/staging/android/uapi/ion.h
@@ -44,26 +44,32 @@
* must be last so device specific heaps always
* are at the end of this enum
*/
+ ION_NUM_HEAPS = 16,
};
+#define ION_HEAP_SYSTEM_MASK ((1 << ION_HEAP_TYPE_SYSTEM))
+#define ION_HEAP_SYSTEM_CONTIG_MASK ((1 << ION_HEAP_TYPE_SYSTEM_CONTIG))
+#define ION_HEAP_CARVEOUT_MASK ((1 << ION_HEAP_TYPE_CARVEOUT))
+#define ION_HEAP_TYPE_DMA_MASK ((1 << ION_HEAP_TYPE_DMA))
+
#define ION_NUM_HEAP_IDS (sizeof(unsigned int) * 8)
/**
* allocation flags - the lower 16 bits are used by core ion, the upper 16
* bits are reserved for use by the heaps themselves.
*/
-
-/*
- * mappings of this buffer should be cached, ion will do cache maintenance
- * when the buffer is mapped for dma
- */
-#define ION_FLAG_CACHED 1
-
-/*
- * mappings of this buffer will created at mmap time, if this is set
- * caches must be managed manually
- */
-#define ION_FLAG_CACHED_NEEDS_SYNC 2
+#define ION_FLAG_CACHED 1 /*
+ * mappings of this buffer should be
+ * cached, ion will do cache
+ * maintenance when the buffer is
+ * mapped for dma
+ */
+#define ION_FLAG_CACHED_NEEDS_SYNC 2 /*
+ * mappings of this buffer will created
+ * at mmap time, if this is set
+ * caches must be managed
+ * manually
+ */
/**
* DOC: Ion Userspace API
@@ -128,36 +134,6 @@
unsigned long arg;
};
-#define MAX_HEAP_NAME 32
-
-/**
- * struct ion_heap_data - data about a heap
- * @name - first 32 characters of the heap name
- * @type - heap type
- * @heap_id - heap id for the heap
- */
-struct ion_heap_data {
- char name[MAX_HEAP_NAME];
- __u32 type;
- __u32 heap_id;
- __u32 reserved0;
- __u32 reserved1;
- __u32 reserved2;
-};
-
-/**
- * struct ion_heap_query - collection of data about all heaps
- * @cnt - total number of heaps to be copied
- * @heaps - buffer to copy heap data
- */
-struct ion_heap_query {
- __u32 cnt; /* Total number of heaps to be copied */
- __u32 reserved0; /* align to 64bits */
- __u64 heaps; /* buffer to be populated */
- __u32 reserved1;
- __u32 reserved2;
-};
-
#define ION_IOC_MAGIC 'I'
/**
@@ -224,13 +200,4 @@
*/
#define ION_IOC_CUSTOM _IOWR(ION_IOC_MAGIC, 6, struct ion_custom_data)
-/**
- * DOC: ION_IOC_HEAP_QUERY - information about available heaps
- *
- * Takes an ion_heap_query structure and populates information about
- * available Ion heaps.
- */
-#define ION_IOC_HEAP_QUERY _IOWR(ION_IOC_MAGIC, 8, \
- struct ion_heap_query)
-
#endif /* _UAPI_LINUX_ION_H */
diff --git a/drivers/staging/android/uapi/msm_ion.h b/drivers/staging/android/uapi/msm_ion.h
new file mode 100644
index 0000000..918c072
--- /dev/null
+++ b/drivers/staging/android/uapi/msm_ion.h
@@ -0,0 +1,189 @@
+#ifndef _UAPI_MSM_ION_H
+#define _UAPI_MSM_ION_H
+
+#include "ion.h"
+
+enum msm_ion_heap_types {
+ ION_HEAP_TYPE_MSM_START = ION_HEAP_TYPE_CUSTOM + 1,
+ ION_HEAP_TYPE_SECURE_DMA = ION_HEAP_TYPE_MSM_START,
+ ION_HEAP_TYPE_SYSTEM_SECURE,
+ /*
+ * if you add a heap type here you should also add it to
+ * heap_types_info[] in msm_ion.c
+ */
+};
+
+/**
+ * These are the only ids that should be used for Ion heap ids.
+ * The ids listed are the order in which allocation will be attempted
+ * if specified. Don't swap the order of heap ids unless you know what
+ * you are doing!
+ * Id's are spaced by purpose to allow new Id's to be inserted in-between (for
+ * possible fallbacks)
+ */
+
+enum ion_heap_ids {
+ INVALID_HEAP_ID = -1,
+ ION_CP_MM_HEAP_ID = 8,
+ ION_SECURE_HEAP_ID = 9,
+ ION_CP_MFC_HEAP_ID = 12,
+ ION_CP_WB_HEAP_ID = 16, /* 8660 only */
+ ION_CAMERA_HEAP_ID = 20, /* 8660 only */
+ ION_SYSTEM_CONTIG_HEAP_ID = 21,
+ ION_ADSP_HEAP_ID = 22,
+ ION_PIL1_HEAP_ID = 23, /* Currently used for other PIL images */
+ ION_SF_HEAP_ID = 24,
+ ION_SYSTEM_HEAP_ID = 25,
+ ION_PIL2_HEAP_ID = 26, /* Currently used for modem firmware images */
+ ION_QSECOM_HEAP_ID = 27,
+ ION_AUDIO_HEAP_ID = 28,
+
+ ION_MM_FIRMWARE_HEAP_ID = 29,
+
+ ION_HEAP_ID_RESERVED = 31 /** Bit reserved for ION_FLAG_SECURE flag */
+};
+
+/*
+ * The IOMMU heap is deprecated! Here are some aliases for backwards
+ * compatibility:
+ */
+#define ION_IOMMU_HEAP_ID ION_SYSTEM_HEAP_ID
+#define ION_HEAP_TYPE_IOMMU ION_HEAP_TYPE_SYSTEM
+
+enum ion_fixed_position {
+ NOT_FIXED,
+ FIXED_LOW,
+ FIXED_MIDDLE,
+ FIXED_HIGH,
+};
+
+enum cp_mem_usage {
+ VIDEO_BITSTREAM = 0x1,
+ VIDEO_PIXEL = 0x2,
+ VIDEO_NONPIXEL = 0x3,
+ DISPLAY_SECURE_CP_USAGE = 0x4,
+ CAMERA_SECURE_CP_USAGE = 0x5,
+ MAX_USAGE = 0x6,
+ UNKNOWN = 0x7FFFFFFF,
+};
+
+/**
+ * Flags to be used when allocating from the secure heap for
+ * content protection
+ */
+#define ION_FLAG_CP_TOUCH ((1 << 17))
+#define ION_FLAG_CP_BITSTREAM ((1 << 18))
+#define ION_FLAG_CP_PIXEL ((1 << 19))
+#define ION_FLAG_CP_NON_PIXEL ((1 << 20))
+#define ION_FLAG_CP_CAMERA ((1 << 21))
+#define ION_FLAG_CP_HLOS ((1 << 22))
+#define ION_FLAG_CP_HLOS_FREE ((1 << 23))
+
+/**
+ * Flag to use when allocating to indicate that a heap is secure.
+ * Do NOT use BIT macro since it is defined in #ifdef __KERNEL__
+ */
+#define ION_FLAG_SECURE (1 << (ION_HEAP_ID_RESERVED))
+
+/**
+ * Flag for clients to force contiguous memort allocation
+ *
+ * Use of this flag is carefully monitored!
+ */
+#define ION_FLAG_FORCE_CONTIGUOUS ((1 << 30))
+
+/*
+ * Used in conjunction with heap which pool memory to force an allocation
+ * to come from the page allocator directly instead of from the pool allocation
+ */
+#define ION_FLAG_POOL_FORCE_ALLOC ((1 << 16))
+
+/**
+ * Deprecated! Please use the corresponding ION_FLAG_*
+ */
+#define ION_SECURE ION_FLAG_SECURE
+#define ION_FORCE_CONTIGUOUS ION_FLAG_FORCE_CONTIGUOUS
+
+/**
+ * Macro should be used with ion_heap_ids defined above.
+ */
+#define ION_HEAP(bit) (1 << (bit))
+
+#define ION_ADSP_HEAP_NAME "adsp"
+#define ION_SYSTEM_HEAP_NAME "system"
+#define ION_VMALLOC_HEAP_NAME ION_SYSTEM_HEAP_NAME
+#define ION_KMALLOC_HEAP_NAME "kmalloc"
+#define ION_AUDIO_HEAP_NAME "audio"
+#define ION_SF_HEAP_NAME "sf"
+#define ION_MM_HEAP_NAME "mm"
+#define ION_CAMERA_HEAP_NAME "camera_preview"
+#define ION_IOMMU_HEAP_NAME "iommu"
+#define ION_MFC_HEAP_NAME "mfc"
+#define ION_WB_HEAP_NAME "wb"
+#define ION_MM_FIRMWARE_HEAP_NAME "mm_fw"
+#define ION_PIL1_HEAP_NAME "pil_1"
+#define ION_PIL2_HEAP_NAME "pil_2"
+#define ION_QSECOM_HEAP_NAME "qsecom"
+#define ION_SECURE_HEAP_NAME "secure_heap"
+
+#define ION_SET_CACHED(__cache) ((__cache) | ION_FLAG_CACHED)
+#define ION_SET_UNCACHED(__cache) ((__cache) & ~ION_FLAG_CACHED)
+
+#define ION_IS_CACHED(__flags) ((__flags) & ION_FLAG_CACHED)
+
+/* struct ion_flush_data - data passed to ion for flushing caches
+ *
+ * @handle: handle with data to flush
+ * @fd: fd to flush
+ * @vaddr: userspace virtual address mapped with mmap
+ * @offset: offset into the handle to flush
+ * @length: length of handle to flush
+ *
+ * Performs cache operations on the handle. If p is the start address
+ * of the handle, p + offset through p + offset + length will have
+ * the cache operations performed
+ */
+struct ion_flush_data {
+ ion_user_handle_t handle;
+ int fd;
+ void *vaddr;
+ unsigned int offset;
+ unsigned int length;
+};
+
+struct ion_prefetch_data {
+ int heap_id;
+ unsigned long len;
+};
+
+#define ION_IOC_MSM_MAGIC 'M'
+
+/**
+ * DOC: ION_IOC_CLEAN_CACHES - clean the caches
+ *
+ * Clean the caches of the handle specified.
+ */
+#define ION_IOC_CLEAN_CACHES _IOWR(ION_IOC_MSM_MAGIC, 0, \
+ struct ion_flush_data)
+/**
+ * DOC: ION_IOC_INV_CACHES - invalidate the caches
+ *
+ * Invalidate the caches of the handle specified.
+ */
+#define ION_IOC_INV_CACHES _IOWR(ION_IOC_MSM_MAGIC, 1, \
+ struct ion_flush_data)
+/**
+ * DOC: ION_IOC_CLEAN_INV_CACHES - clean and invalidate the caches
+ *
+ * Clean and invalidate the caches of the handle specified.
+ */
+#define ION_IOC_CLEAN_INV_CACHES _IOWR(ION_IOC_MSM_MAGIC, 2, \
+ struct ion_flush_data)
+
+#define ION_IOC_PREFETCH _IOWR(ION_IOC_MSM_MAGIC, 3, \
+ struct ion_prefetch_data)
+
+#define ION_IOC_DRAIN _IOWR(ION_IOC_MSM_MAGIC, 4, \
+ struct ion_prefetch_data)
+
+#endif
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 54ad100..8bf9a66 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -2848,10 +2848,8 @@
struct ffs_data *ffs = func->ffs;
const int full = !!func->ffs->fs_descs_count;
- const int high = gadget_is_dualspeed(func->gadget) &&
- func->ffs->hs_descs_count;
- const int super = gadget_is_superspeed(func->gadget) &&
- func->ffs->ss_descs_count;
+ const int high = func->ffs->hs_descs_count;
+ const int super = func->ffs->ss_descs_count;
int fs_len, hs_len, ss_len, ret, i;
struct ffs_ep *eps_ptr;
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 6b8a446..0f478e7 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -818,6 +818,29 @@
#define dma_mmap_writecombine dma_mmap_wc
#endif
+static inline void *dma_alloc_nonconsistent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag)
+{
+ unsigned long attrs = DMA_ATTR_NON_CONSISTENT;
+
+ return dma_alloc_attrs(dev, size, dma_handle, flag, attrs);
+}
+
+static inline void dma_free_nonconsistent(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle)
+{
+ unsigned long attrs = DMA_ATTR_NON_CONSISTENT;
+
+ return dma_free_attrs(dev, size, cpu_addr, dma_handle, attrs);
+}
+
+static inline int dma_mmap_nonconsistent(struct device *dev,
+ struct vm_area_struct *vma, void *cpu_addr,
+ dma_addr_t dma_addr, size_t size)
+{
+ return -ENODEV;
+}
+
#if defined(CONFIG_NEED_DMA_MAP_STATE) || defined(CONFIG_DMA_API_DEBUG)
#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
diff --git a/include/linux/ion.h b/include/linux/ion.h
new file mode 100644
index 0000000..9d72374
--- /dev/null
+++ b/include/linux/ion.h
@@ -0,0 +1,18 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_ION_H__
+#define __LINUX_ION_H__
+
+#include "../../drivers/staging/android/ion/ion.h"
+
+#endif /* __LINUX_ION_H__ */
diff --git a/include/linux/msm_ion.h b/include/linux/msm_ion.h
new file mode 100644
index 0000000..08b35d7
--- /dev/null
+++ b/include/linux/msm_ion.h
@@ -0,0 +1,18 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_MSM_ION_H__
+#define __LINUX_MSM_ION_H__
+
+#include "../../drivers/staging/android/ion/msm/msm_ion.h"
+
+#endif /* __LINUX_MSM_ION_H__ */
diff --git a/include/linux/show_mem_notifier.h b/include/linux/show_mem_notifier.h
new file mode 100644
index 0000000..b1265f8
--- /dev/null
+++ b/include/linux/show_mem_notifier.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/notifier.h>
+
+int show_mem_notifier_register(struct notifier_block *nb);
+
+int show_mem_notifier_unregister(struct notifier_block *nb);
+
+void show_mem_call_notifiers(void);
diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h
index 6b2e154..5b8c6f8 100644
--- a/include/trace/events/kmem.h
+++ b/include/trace/events/kmem.h
@@ -1,3 +1,15 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
#undef TRACE_SYSTEM
#define TRACE_SYSTEM kmem
@@ -317,6 +329,550 @@
__entry->change_ownership)
);
+
+DECLARE_EVENT_CLASS(ion_alloc,
+
+ TP_PROTO(const char *client_name,
+ const char *heap_name,
+ size_t len,
+ unsigned int mask,
+ unsigned int flags),
+
+ TP_ARGS(client_name, heap_name, len, mask, flags),
+
+ TP_STRUCT__entry(
+ __array(char, client_name, 64)
+ __field(const char *, heap_name)
+ __field(size_t, len)
+ __field(unsigned int, mask)
+ __field(unsigned int, flags)
+ ),
+
+ TP_fast_assign(
+ strlcpy(__entry->client_name, client_name, 64);
+ __entry->heap_name = heap_name;
+ __entry->len = len;
+ __entry->mask = mask;
+ __entry->flags = flags;
+ ),
+
+ TP_printk("client_name=%s heap_name=%s len=%zu mask=0x%x flags=0x%x",
+ __entry->client_name,
+ __entry->heap_name,
+ __entry->len,
+ __entry->mask,
+ __entry->flags)
+);
+
+DEFINE_EVENT(ion_alloc, ion_alloc_buffer_start,
+
+ TP_PROTO(const char *client_name,
+ const char *heap_name,
+ size_t len,
+ unsigned int mask,
+ unsigned int flags),
+
+ TP_ARGS(client_name, heap_name, len, mask, flags)
+);
+
+DEFINE_EVENT(ion_alloc, ion_alloc_buffer_end,
+
+ TP_PROTO(const char *client_name,
+ const char *heap_name,
+ size_t len,
+ unsigned int mask,
+ unsigned int flags),
+
+ TP_ARGS(client_name, heap_name, len, mask, flags)
+);
+
+DECLARE_EVENT_CLASS(ion_alloc_error,
+
+ TP_PROTO(const char *client_name,
+ const char *heap_name,
+ size_t len,
+ unsigned int mask,
+ unsigned int flags,
+ long error),
+
+ TP_ARGS(client_name, heap_name, len, mask, flags, error),
+
+ TP_STRUCT__entry(
+ __field(const char *, client_name)
+ __field(const char *, heap_name)
+ __field(size_t, len)
+ __field(unsigned int, mask)
+ __field(unsigned int, flags)
+ __field(long, error)
+ ),
+
+ TP_fast_assign(
+ __entry->client_name = client_name;
+ __entry->heap_name = heap_name;
+ __entry->len = len;
+ __entry->mask = mask;
+ __entry->flags = flags;
+ __entry->error = error;
+ ),
+
+ TP_printk(
+ "client_name=%s heap_name=%s len=%zu mask=0x%x flags=0x%x error=%ld",
+ __entry->client_name,
+ __entry->heap_name,
+ __entry->len,
+ __entry->mask,
+ __entry->flags,
+ __entry->error)
+);
+
+
+DEFINE_EVENT(ion_alloc_error, ion_alloc_buffer_fallback,
+
+ TP_PROTO(const char *client_name,
+ const char *heap_name,
+ size_t len,
+ unsigned int mask,
+ unsigned int flags,
+ long error),
+
+ TP_ARGS(client_name, heap_name, len, mask, flags, error)
+);
+
+DEFINE_EVENT(ion_alloc_error, ion_alloc_buffer_fail,
+
+ TP_PROTO(const char *client_name,
+ const char *heap_name,
+ size_t len,
+ unsigned int mask,
+ unsigned int flags,
+ long error),
+
+ TP_ARGS(client_name, heap_name, len, mask, flags, error)
+);
+
+
+DECLARE_EVENT_CLASS(alloc_retry,
+
+ TP_PROTO(int tries),
+
+ TP_ARGS(tries),
+
+ TP_STRUCT__entry(
+ __field(int, tries)
+ ),
+
+ TP_fast_assign(
+ __entry->tries = tries;
+ ),
+
+ TP_printk("tries=%d",
+ __entry->tries)
+);
+
+DEFINE_EVENT(alloc_retry, ion_cp_alloc_retry,
+
+ TP_PROTO(int tries),
+
+ TP_ARGS(tries)
+);
+
+DEFINE_EVENT(alloc_retry, migrate_retry,
+
+ TP_PROTO(int tries),
+
+ TP_ARGS(tries)
+);
+
+DEFINE_EVENT(alloc_retry, dma_alloc_contiguous_retry,
+
+ TP_PROTO(int tries),
+
+ TP_ARGS(tries)
+);
+
+DECLARE_EVENT_CLASS(migrate_pages,
+
+ TP_PROTO(int mode),
+
+ TP_ARGS(mode),
+
+ TP_STRUCT__entry(
+ __field(int, mode)
+ ),
+
+ TP_fast_assign(
+ __entry->mode = mode;
+ ),
+
+ TP_printk("mode=%d",
+ __entry->mode)
+);
+
+DEFINE_EVENT(migrate_pages, migrate_pages_start,
+
+ TP_PROTO(int mode),
+
+ TP_ARGS(mode)
+);
+
+DEFINE_EVENT(migrate_pages, migrate_pages_end,
+
+ TP_PROTO(int mode),
+
+ TP_ARGS(mode)
+);
+
+DECLARE_EVENT_CLASS(ion_alloc_pages,
+
+ TP_PROTO(gfp_t gfp_flags,
+ unsigned int order),
+
+ TP_ARGS(gfp_flags, order),
+
+ TP_STRUCT__entry(
+ __field(gfp_t, gfp_flags)
+ __field(unsigned int, order)
+ ),
+
+ TP_fast_assign(
+ __entry->gfp_flags = gfp_flags;
+ __entry->order = order;
+ ),
+
+ TP_printk("gfp_flags=%s order=%d",
+ show_gfp_flags(__entry->gfp_flags),
+ __entry->order)
+ );
+
+DEFINE_EVENT(ion_alloc_pages, alloc_pages_iommu_start,
+ TP_PROTO(gfp_t gfp_flags,
+ unsigned int order),
+
+ TP_ARGS(gfp_flags, order)
+ );
+
+DEFINE_EVENT(ion_alloc_pages, alloc_pages_iommu_end,
+ TP_PROTO(gfp_t gfp_flags,
+ unsigned int order),
+
+ TP_ARGS(gfp_flags, order)
+ );
+
+DEFINE_EVENT(ion_alloc_pages, alloc_pages_iommu_fail,
+ TP_PROTO(gfp_t gfp_flags,
+ unsigned int order),
+
+ TP_ARGS(gfp_flags, order)
+ );
+
+DEFINE_EVENT(ion_alloc_pages, alloc_pages_sys_start,
+ TP_PROTO(gfp_t gfp_flags,
+ unsigned int order),
+
+ TP_ARGS(gfp_flags, order)
+ );
+
+DEFINE_EVENT(ion_alloc_pages, alloc_pages_sys_end,
+ TP_PROTO(gfp_t gfp_flags,
+ unsigned int order),
+
+ TP_ARGS(gfp_flags, order)
+ );
+
+DEFINE_EVENT(ion_alloc_pages, alloc_pages_sys_fail,
+ TP_PROTO(gfp_t gfp_flags,
+ unsigned int order),
+
+ TP_ARGS(gfp_flags, order)
+
+ );
+
+DECLARE_EVENT_CLASS(smmu_map,
+
+ TP_PROTO(unsigned long va,
+ phys_addr_t pa,
+ unsigned long chunk_size,
+ size_t len),
+
+ TP_ARGS(va, pa, chunk_size, len),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, va)
+ __field(phys_addr_t, pa)
+ __field(unsigned long, chunk_size)
+ __field(size_t, len)
+ ),
+
+ TP_fast_assign(
+ __entry->va = va;
+ __entry->pa = pa;
+ __entry->chunk_size = chunk_size;
+ __entry->len = len;
+ ),
+
+ TP_printk("v_addr=%p p_addr=%pa chunk_size=0x%lx len=%zu",
+ (void *)__entry->va,
+ &__entry->pa,
+ __entry->chunk_size,
+ __entry->len)
+ );
+
+DEFINE_EVENT(smmu_map, iommu_map_range,
+ TP_PROTO(unsigned long va,
+ phys_addr_t pa,
+ unsigned long chunk_size,
+ size_t len),
+
+ TP_ARGS(va, pa, chunk_size, len)
+ );
+
+DECLARE_EVENT_CLASS(ion_secure_cma_add_to_pool,
+
+ TP_PROTO(unsigned long len,
+ int pool_total,
+ bool is_prefetch),
+
+ TP_ARGS(len, pool_total, is_prefetch),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, len)
+ __field(int, pool_total)
+ __field(bool, is_prefetch)
+ ),
+
+ TP_fast_assign(
+ __entry->len = len;
+ __entry->pool_total = pool_total;
+ __entry->is_prefetch = is_prefetch;
+ ),
+
+ TP_printk("len %lx, pool total %x is_prefetch %d",
+ __entry->len,
+ __entry->pool_total,
+ __entry->is_prefetch)
+ );
+
+DEFINE_EVENT(ion_secure_cma_add_to_pool, ion_secure_cma_add_to_pool_start,
+ TP_PROTO(unsigned long len,
+ int pool_total,
+ bool is_prefetch),
+
+ TP_ARGS(len, pool_total, is_prefetch)
+ );
+
+DEFINE_EVENT(ion_secure_cma_add_to_pool, ion_secure_cma_add_to_pool_end,
+ TP_PROTO(unsigned long len,
+ int pool_total,
+ bool is_prefetch),
+
+ TP_ARGS(len, pool_total, is_prefetch)
+ );
+
+DECLARE_EVENT_CLASS(ion_secure_cma_shrink_pool,
+
+ TP_PROTO(unsigned long drained_size,
+ unsigned long skipped_size),
+
+ TP_ARGS(drained_size, skipped_size),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, drained_size)
+ __field(unsigned long, skipped_size)
+ ),
+
+ TP_fast_assign(
+ __entry->drained_size = drained_size;
+ __entry->skipped_size = skipped_size;
+ ),
+
+ TP_printk("drained size %lx, skipped size %lx",
+ __entry->drained_size,
+ __entry->skipped_size)
+ );
+
+DEFINE_EVENT(ion_secure_cma_shrink_pool, ion_secure_cma_shrink_pool_start,
+ TP_PROTO(unsigned long drained_size,
+ unsigned long skipped_size),
+
+ TP_ARGS(drained_size, skipped_size)
+ );
+
+DEFINE_EVENT(ion_secure_cma_shrink_pool, ion_secure_cma_shrink_pool_end,
+ TP_PROTO(unsigned long drained_size,
+ unsigned long skipped_size),
+
+ TP_ARGS(drained_size, skipped_size)
+ );
+
+TRACE_EVENT(ion_prefetching,
+
+ TP_PROTO(unsigned long len),
+
+ TP_ARGS(len),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, len)
+ ),
+
+ TP_fast_assign(
+ __entry->len = len;
+ ),
+
+ TP_printk("prefetch size %lx",
+ __entry->len)
+ );
+
+DECLARE_EVENT_CLASS(ion_secure_cma_allocate,
+
+ TP_PROTO(const char *heap_name,
+ unsigned long len,
+ unsigned long align,
+ unsigned long flags),
+
+ TP_ARGS(heap_name, len, align, flags),
+
+ TP_STRUCT__entry(
+ __field(const char *, heap_name)
+ __field(unsigned long, len)
+ __field(unsigned long, align)
+ __field(unsigned long, flags)
+ ),
+
+ TP_fast_assign(
+ __entry->heap_name = heap_name;
+ __entry->len = len;
+ __entry->align = align;
+ __entry->flags = flags;
+ ),
+
+ TP_printk("heap_name=%s len=%lx align=%lx flags=%lx",
+ __entry->heap_name,
+ __entry->len,
+ __entry->align,
+ __entry->flags)
+ );
+
+DEFINE_EVENT(ion_secure_cma_allocate, ion_secure_cma_allocate_start,
+ TP_PROTO(const char *heap_name,
+ unsigned long len,
+ unsigned long align,
+ unsigned long flags),
+
+ TP_ARGS(heap_name, len, align, flags)
+ );
+
+DEFINE_EVENT(ion_secure_cma_allocate, ion_secure_cma_allocate_end,
+ TP_PROTO(const char *heap_name,
+ unsigned long len,
+ unsigned long align,
+ unsigned long flags),
+
+ TP_ARGS(heap_name, len, align, flags)
+ );
+
+DECLARE_EVENT_CLASS(ion_cp_secure_buffer,
+
+ TP_PROTO(const char *heap_name,
+ unsigned long len,
+ unsigned long align,
+ unsigned long flags),
+
+ TP_ARGS(heap_name, len, align, flags),
+
+ TP_STRUCT__entry(
+ __field(const char *, heap_name)
+ __field(unsigned long, len)
+ __field(unsigned long, align)
+ __field(unsigned long, flags)
+ ),
+
+ TP_fast_assign(
+ __entry->heap_name = heap_name;
+ __entry->len = len;
+ __entry->align = align;
+ __entry->flags = flags;
+ ),
+
+ TP_printk("heap_name=%s len=%lx align=%lx flags=%lx",
+ __entry->heap_name,
+ __entry->len,
+ __entry->align,
+ __entry->flags)
+ );
+
+DEFINE_EVENT(ion_cp_secure_buffer, ion_cp_secure_buffer_start,
+ TP_PROTO(const char *heap_name,
+ unsigned long len,
+ unsigned long align,
+ unsigned long flags),
+
+ TP_ARGS(heap_name, len, align, flags)
+ );
+
+DEFINE_EVENT(ion_cp_secure_buffer, ion_cp_secure_buffer_end,
+ TP_PROTO(const char *heap_name,
+ unsigned long len,
+ unsigned long align,
+ unsigned long flags),
+
+ TP_ARGS(heap_name, len, align, flags)
+ );
+
+DECLARE_EVENT_CLASS(iommu_sec_ptbl_map_range,
+
+ TP_PROTO(int sec_id,
+ int num,
+ unsigned long va,
+ unsigned int pa,
+ size_t len),
+
+ TP_ARGS(sec_id, num, va, pa, len),
+
+ TP_STRUCT__entry(
+ __field(int, sec_id)
+ __field(int, num)
+ __field(unsigned long, va)
+ __field(unsigned int, pa)
+ __field(size_t, len)
+ ),
+
+ TP_fast_assign(
+ __entry->sec_id = sec_id;
+ __entry->num = num;
+ __entry->va = va;
+ __entry->pa = pa;
+ __entry->len = len;
+ ),
+
+ TP_printk("sec_id=%d num=%d va=%lx pa=%u len=%zu",
+ __entry->sec_id,
+ __entry->num,
+ __entry->va,
+ __entry->pa,
+ __entry->len)
+ );
+
+DEFINE_EVENT(iommu_sec_ptbl_map_range, iommu_sec_ptbl_map_range_start,
+
+ TP_PROTO(int sec_id,
+ int num,
+ unsigned long va,
+ unsigned int pa,
+ size_t len),
+
+ TP_ARGS(sec_id, num, va, pa, len)
+ );
+
+DEFINE_EVENT(iommu_sec_ptbl_map_range, iommu_sec_ptbl_map_range_end,
+
+ TP_PROTO(int sec_id,
+ int num,
+ unsigned long va,
+ unsigned int pa,
+ size_t len),
+
+ TP_ARGS(sec_id, num, va, pa, len)
+ );
#endif /* _TRACE_KMEM_H */
/* This part must be outside protection */
diff --git a/include/trace/events/rpmh.h b/include/trace/events/rpmh.h
new file mode 100644
index 0000000..62e7216
--- /dev/null
+++ b/include/trace/events/rpmh.h
@@ -0,0 +1,108 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rpmh
+
+#if !defined(_TRACE_RPMH_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_RPMH_H
+
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(rpmh_ack_recvd,
+
+ TP_PROTO(int m, u32 addr, int errno),
+
+ TP_ARGS(m, addr, errno),
+
+ TP_STRUCT__entry(
+ __field(int, m)
+ __field(u32, addr)
+ __field(int, errno)
+ ),
+
+ TP_fast_assign(
+ __entry->m = m;
+ __entry->addr = addr;
+ __entry->errno = errno;
+ ),
+
+ TP_printk("ack: tcs-m:%d addr: 0x%08x errno: %d",
+ __entry->m, __entry->addr, __entry->errno)
+);
+
+DEFINE_EVENT(rpmh_ack_recvd, rpmh_notify_irq,
+ TP_PROTO(int m, u32 addr, int err),
+ TP_ARGS(m, addr, err)
+);
+
+DEFINE_EVENT(rpmh_ack_recvd, rpmh_notify,
+ TP_PROTO(int m, u32 addr, int err),
+ TP_ARGS(m, addr, err)
+);
+
+TRACE_EVENT(rpmh_send_msg,
+
+ TP_PROTO(void *b, int m, int n, u32 h, u32 a, u32 v, bool c),
+
+ TP_ARGS(b, m, n, h, a, v, c),
+
+ TP_STRUCT__entry(
+ __field(void *, base)
+ __field(int, m)
+ __field(int, n)
+ __field(u32, hdr)
+ __field(u32, addr)
+ __field(u32, data)
+ __field(bool, complete)
+ ),
+
+ TP_fast_assign(
+ __entry->base = b;
+ __entry->m = m;
+ __entry->n = n;
+ __entry->hdr = h;
+ __entry->addr = a;
+ __entry->data = v;
+ __entry->complete = c;
+ ),
+
+ TP_printk("msg: base: 0x%p tcs(m): %d cmd(n): %d msgid: 0x%08x addr: 0x%08x data: 0x%08x complete: %d",
+ __entry->base + (672 * __entry->m) + (20 * __entry->n),
+ __entry->m, __entry->n, __entry->hdr,
+ __entry->addr, __entry->data, __entry->complete)
+);
+
+TRACE_EVENT(rpmh_control_msg,
+
+ TP_PROTO(void *r, u32 v),
+
+ TP_ARGS(r, v),
+
+ TP_STRUCT__entry(
+ __field(void *, reg)
+ __field(u32, data)
+ ),
+
+ TP_fast_assign(
+ __entry->reg = r;
+ __entry->data = v;
+ ),
+
+ TP_printk("ctrl-msg: reg: 0x%p data: 0x%08x",
+ __entry->reg, __entry->data)
+);
+
+#endif /* _TRACE_RPMH_H */
+
+#define TRACE_INCLUDE_FILE rpmh
+#include <trace/define_trace.h>
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index ceb72cb..a12e93e 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -22,7 +22,7 @@
header-y += netfilter_ipv6/
header-y += usb/
header-y += wimax/
-
+header-y += msm_ipa.h
genhdr-y += version.h
ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/uapi/asm/a.out.h \
@@ -202,6 +202,7 @@
header-y += input-event-codes.h
header-y += in_route.h
header-y += ioctl.h
+header-y += ion.h
header-y += ip6_tunnel.h
header-y += ipc.h
header-y += ip.h
@@ -279,6 +280,7 @@
header-y += mroute.h
header-y += msdos_fs.h
header-y += msg.h
+header-y += msm_ion.h
header-y += msm_rmnet.h
header-y += mtio.h
header-y += nbd.h
diff --git a/include/uapi/linux/ion.h b/include/uapi/linux/ion.h
new file mode 120000
index 0000000..17e8dbb
--- /dev/null
+++ b/include/uapi/linux/ion.h
@@ -0,0 +1 @@
+../../../drivers/staging/android/uapi/ion.h
\ No newline at end of file
diff --git a/include/uapi/linux/msm_ion.h b/include/uapi/linux/msm_ion.h
new file mode 120000
index 0000000..94349d2
--- /dev/null
+++ b/include/uapi/linux/msm_ion.h
@@ -0,0 +1 @@
+../../../drivers/staging/android/uapi/msm_ion.h
\ No newline at end of file
diff --git a/mm/Makefile b/mm/Makefile
index 295bd7a..a7e9b6a 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -37,7 +37,7 @@
mm_init.o mmu_context.o percpu.o slab_common.o \
compaction.o vmacache.o \
interval_tree.o list_lru.o workingset.o \
- debug.o $(mmu-y)
+ debug.o $(mmu-y) showmem.o
obj-y += init-mm.o
diff --git a/mm/showmem.c b/mm/showmem.c
new file mode 100644
index 0000000..57ed07b8
--- /dev/null
+++ b/mm/showmem.c
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2014, 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/notifier.h>
+#include <linux/debugfs.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+
+ATOMIC_NOTIFIER_HEAD(show_mem_notifier);
+
+int show_mem_notifier_register(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&show_mem_notifier, nb);
+}
+
+int show_mem_notifier_unregister(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&show_mem_notifier, nb);
+}
+
+void show_mem_call_notifiers(void)
+{
+ atomic_notifier_call_chain(&show_mem_notifier, 0, NULL);
+}
+
+static int show_mem_notifier_get(void *dat, u64 *val)
+{
+ show_mem_call_notifiers();
+ *val = 0;
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(show_mem_notifier_debug_ops, show_mem_notifier_get,
+ NULL, "%llu\n");
+
+int show_mem_notifier_debugfs_register(void)
+{
+ debugfs_create_file("show_mem_notifier", 0664, NULL, NULL,
+ &show_mem_notifier_debug_ops);
+
+ return 0;
+}
+late_initcall(show_mem_notifier_debugfs_register);