mirror of
https://github.com/openwrt/openwrt.git
synced 2024-12-23 07:22:33 +00:00
ef944dcb85
Also refresh 3.10 patches. Signed-off-by: Gabor Juhos <juhosg@openwrt.org> SVN-Revision: 37502
13522 lines
408 KiB
Diff
13522 lines
408 KiB
Diff
--- /dev/null
|
|
+++ b/drivers/char/broadcom/Kconfig
|
|
@@ -0,0 +1,17 @@
|
|
+#
|
|
+# Broadcom char driver config
|
|
+#
|
|
+
|
|
+menuconfig BRCM_CHAR_DRIVERS
|
|
+ tristate "Broadcom Char Drivers"
|
|
+ depends on PROC_FS
|
|
+ help
|
|
+ Broadcom's char drivers
|
|
+
|
|
+config BCM_VC_CMA
|
|
+ bool "Videocore CMA"
|
|
+ depends on CMA
|
|
+ default n
|
|
+ help
|
|
+ Helper for videocore CMA access.
|
|
+
|
|
--- /dev/null
|
|
+++ b/drivers/char/broadcom/Makefile
|
|
@@ -0,0 +1,2 @@
|
|
+obj-$(CONFIG_BCM_VC_CMA) += vc_cma/
|
|
+
|
|
--- /dev/null
|
|
+++ b/drivers/char/broadcom/vc_cma/Makefile
|
|
@@ -0,0 +1,15 @@
|
|
+EXTRA_CFLAGS += -Wall -Wstrict-prototypes -Wno-trigraphs
|
|
+EXTRA_CFLAGS += -Werror
|
|
+EXTRA_CFLAGS += -I"include/linux/broadcom"
|
|
+EXTRA_CFLAGS += -I"drivers/misc/vc04_services"
|
|
+EXTRA_CFLAGS += -I"drivers/misc/vc04_services/interface/vchi"
|
|
+EXTRA_CFLAGS += -I"drivers/misc/vc04_services/interface/vchiq_arm"
|
|
+
|
|
+EXTRA_CFLAGS += -D__KERNEL__
|
|
+EXTRA_CFLAGS += -D__linux__
|
|
+EXTRA_CFLAGS += -Werror
|
|
+
|
|
+obj-$(CONFIG_BCM_VC_CMA) += vc-cma.o
|
|
+
|
|
+vc-cma-objs := vc_cma.o
|
|
+
|
|
--- /dev/null
|
|
+++ b/drivers/char/broadcom/vc_cma/vc_cma.c
|
|
@@ -0,0 +1,1145 @@
|
|
+/**
|
|
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
|
|
+ *
|
|
+ * Redistribution and use in source and binary forms, with or without
|
|
+ * modification, are permitted provided that the following conditions
|
|
+ * are met:
|
|
+ * 1. Redistributions of source code must retain the above copyright
|
|
+ * notice, this list of conditions, and the following disclaimer,
|
|
+ * without modification.
|
|
+ * 2. Redistributions in binary form must reproduce the above copyright
|
|
+ * notice, this list of conditions and the following disclaimer in the
|
|
+ * documentation and/or other materials provided with the distribution.
|
|
+ * 3. The names of the above-listed copyright holders may not be used
|
|
+ * to endorse or promote products derived from this software without
|
|
+ * specific prior written permission.
|
|
+ *
|
|
+ * ALTERNATIVELY, this software may be distributed under the terms of the
|
|
+ * GNU General Public License ("GPL") version 2, as published by the Free
|
|
+ * Software Foundation.
|
|
+ *
|
|
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
|
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
|
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
|
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
|
|
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
|
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
|
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
|
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
|
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
|
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
|
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
+ */
|
|
+
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/module.h>
|
|
+#include <linux/kthread.h>
|
|
+#include <linux/fs.h>
|
|
+#include <linux/device.h>
|
|
+#include <linux/cdev.h>
|
|
+#include <linux/mm.h>
|
|
+#include <linux/proc_fs.h>
|
|
+#include <linux/seq_file.h>
|
|
+#include <linux/dma-mapping.h>
|
|
+#include <linux/dma-contiguous.h>
|
|
+#include <linux/platform_device.h>
|
|
+#include <linux/uaccess.h>
|
|
+#include <asm/cacheflush.h>
|
|
+
|
|
+#include "vc_cma.h"
|
|
+
|
|
+#include "vchiq_util.h"
|
|
+#include "vchiq_connected.h"
|
|
+//#include "debug_sym.h"
|
|
+//#include "vc_mem.h"
|
|
+
|
|
+#define DRIVER_NAME "vc-cma"
|
|
+
|
|
+#define LOG_DBG(fmt, ...) \
|
|
+ if (vc_cma_debug) \
|
|
+ printk(KERN_INFO fmt "\n", ##__VA_ARGS__)
|
|
+#define LOG_ERR(fmt, ...) \
|
|
+ printk(KERN_ERR fmt "\n", ##__VA_ARGS__)
|
|
+
|
|
+#define VC_CMA_FOURCC VCHIQ_MAKE_FOURCC('C', 'M', 'A', ' ')
|
|
+#define VC_CMA_VERSION 2
|
|
+
|
|
+#define VC_CMA_CHUNK_ORDER 6 /* 256K */
|
|
+#define VC_CMA_CHUNK_SIZE (4096 << VC_CMA_CHUNK_ORDER)
|
|
+#define VC_CMA_MAX_PARAMS_PER_MSG \
|
|
+ ((VCHIQ_MAX_MSG_SIZE - sizeof(unsigned short))/sizeof(unsigned short))
|
|
+#define VC_CMA_RESERVE_COUNT_MAX 16
|
|
+
|
|
+#define PAGES_PER_CHUNK (VC_CMA_CHUNK_SIZE / PAGE_SIZE)
|
|
+
|
|
+#define VCADDR_TO_PHYSADDR(vcaddr) (mm_vc_mem_phys_addr + vcaddr)
|
|
+
|
|
+#define loud_error(...) \
|
|
+ LOG_ERR("===== " __VA_ARGS__)
|
|
+
|
|
+enum {
|
|
+ VC_CMA_MSG_QUIT,
|
|
+ VC_CMA_MSG_OPEN,
|
|
+ VC_CMA_MSG_TICK,
|
|
+ VC_CMA_MSG_ALLOC, /* chunk count */
|
|
+ VC_CMA_MSG_FREE, /* chunk, chunk, ... */
|
|
+ VC_CMA_MSG_ALLOCATED, /* chunk, chunk, ... */
|
|
+ VC_CMA_MSG_REQUEST_ALLOC, /* chunk count */
|
|
+ VC_CMA_MSG_REQUEST_FREE, /* chunk count */
|
|
+ VC_CMA_MSG_RESERVE, /* bytes lo, bytes hi */
|
|
+ VC_CMA_MSG_UPDATE_RESERVE,
|
|
+ VC_CMA_MSG_MAX
|
|
+};
|
|
+
|
|
+struct cma_msg {
|
|
+ unsigned short type;
|
|
+ unsigned short params[VC_CMA_MAX_PARAMS_PER_MSG];
|
|
+};
|
|
+
|
|
+struct vc_cma_reserve_user {
|
|
+ unsigned int pid;
|
|
+ unsigned int reserve;
|
|
+};
|
|
+
|
|
+/* Device (/dev) related variables */
|
|
+static dev_t vc_cma_devnum;
|
|
+static struct class *vc_cma_class;
|
|
+static struct cdev vc_cma_cdev;
|
|
+static int vc_cma_inited;
|
|
+static int vc_cma_debug;
|
|
+
|
|
+/* Proc entry */
|
|
+static struct proc_dir_entry *vc_cma_proc_entry;
|
|
+
|
|
+phys_addr_t vc_cma_base;
|
|
+struct page *vc_cma_base_page;
|
|
+unsigned int vc_cma_size;
|
|
+EXPORT_SYMBOL(vc_cma_size);
|
|
+unsigned int vc_cma_initial;
|
|
+unsigned int vc_cma_chunks;
|
|
+unsigned int vc_cma_chunks_used;
|
|
+unsigned int vc_cma_chunks_reserved;
|
|
+
|
|
+static int in_loud_error;
|
|
+
|
|
+unsigned int vc_cma_reserve_total;
|
|
+unsigned int vc_cma_reserve_count;
|
|
+struct vc_cma_reserve_user vc_cma_reserve_users[VC_CMA_RESERVE_COUNT_MAX];
|
|
+static DEFINE_SEMAPHORE(vc_cma_reserve_mutex);
|
|
+static DEFINE_SEMAPHORE(vc_cma_worker_queue_push_mutex);
|
|
+
|
|
+static u64 vc_cma_dma_mask = DMA_BIT_MASK(32);
|
|
+static struct platform_device vc_cma_device = {
|
|
+ .name = "vc-cma",
|
|
+ .id = 0,
|
|
+ .dev = {
|
|
+ .dma_mask = &vc_cma_dma_mask,
|
|
+ .coherent_dma_mask = DMA_BIT_MASK(32),
|
|
+ },
|
|
+};
|
|
+
|
|
+static VCHIQ_INSTANCE_T cma_instance;
|
|
+static VCHIQ_SERVICE_HANDLE_T cma_service;
|
|
+static VCHIU_QUEUE_T cma_msg_queue;
|
|
+static struct task_struct *cma_worker;
|
|
+
|
|
+static int vc_cma_set_reserve(unsigned int reserve, unsigned int pid);
|
|
+static int vc_cma_alloc_chunks(int num_chunks, struct cma_msg *reply);
|
|
+static VCHIQ_STATUS_T cma_service_callback(VCHIQ_REASON_T reason,
|
|
+ VCHIQ_HEADER_T * header,
|
|
+ VCHIQ_SERVICE_HANDLE_T service,
|
|
+ void *bulk_userdata);
|
|
+static void send_vc_msg(unsigned short type,
|
|
+ unsigned short param1, unsigned short param2);
|
|
+static bool send_worker_msg(VCHIQ_HEADER_T * msg);
|
|
+
|
|
+static int early_vc_cma_mem(char *p)
|
|
+{
|
|
+ unsigned int new_size;
|
|
+ printk(KERN_NOTICE "early_vc_cma_mem(%s)", p);
|
|
+ vc_cma_size = memparse(p, &p);
|
|
+ vc_cma_initial = vc_cma_size;
|
|
+ if (*p == '/')
|
|
+ vc_cma_size = memparse(p + 1, &p);
|
|
+ if (*p == '@')
|
|
+ vc_cma_base = memparse(p + 1, &p);
|
|
+
|
|
+ new_size = (vc_cma_size - ((-vc_cma_base) & (VC_CMA_CHUNK_SIZE - 1)))
|
|
+ & ~(VC_CMA_CHUNK_SIZE - 1);
|
|
+ if (new_size > vc_cma_size)
|
|
+ vc_cma_size = 0;
|
|
+ vc_cma_initial = (vc_cma_initial + VC_CMA_CHUNK_SIZE - 1)
|
|
+ & ~(VC_CMA_CHUNK_SIZE - 1);
|
|
+ if (vc_cma_initial > vc_cma_size)
|
|
+ vc_cma_initial = vc_cma_size;
|
|
+ vc_cma_base = (vc_cma_base + VC_CMA_CHUNK_SIZE - 1)
|
|
+ & ~(VC_CMA_CHUNK_SIZE - 1);
|
|
+
|
|
+ printk(KERN_NOTICE " -> initial %x, size %x, base %x", vc_cma_initial,
|
|
+ vc_cma_size, (unsigned int)vc_cma_base);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+early_param("vc-cma-mem", early_vc_cma_mem);
|
|
+
|
|
+void vc_cma_early_init(void)
|
|
+{
|
|
+ LOG_DBG("vc_cma_early_init - vc_cma_chunks = %d", vc_cma_chunks);
|
|
+ if (vc_cma_size) {
|
|
+ int rc = platform_device_register(&vc_cma_device);
|
|
+ LOG_DBG("platform_device_register -> %d", rc);
|
|
+ }
|
|
+}
|
|
+
|
|
+void vc_cma_reserve(void)
|
|
+{
|
|
+ /* if vc_cma_size is set, then declare vc CMA area of the same
|
|
+ * size from the end of memory
|
|
+ */
|
|
+ if (vc_cma_size) {
|
|
+ if (dma_declare_contiguous(NULL /*&vc_cma_device.dev*/, vc_cma_size,
|
|
+ vc_cma_base, 0) == 0) {
|
|
+ } else {
|
|
+ LOG_ERR("vc_cma: dma_declare_contiguous(%x,%x) failed",
|
|
+ vc_cma_size, (unsigned int)vc_cma_base);
|
|
+ vc_cma_size = 0;
|
|
+ }
|
|
+ }
|
|
+ vc_cma_chunks = vc_cma_size / VC_CMA_CHUNK_SIZE;
|
|
+}
|
|
+
|
|
+/****************************************************************************
|
|
+*
|
|
+* vc_cma_open
|
|
+*
|
|
+***************************************************************************/
|
|
+
|
|
+static int vc_cma_open(struct inode *inode, struct file *file)
|
|
+{
|
|
+ (void)inode;
|
|
+ (void)file;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/****************************************************************************
|
|
+*
|
|
+* vc_cma_release
|
|
+*
|
|
+***************************************************************************/
|
|
+
|
|
+static int vc_cma_release(struct inode *inode, struct file *file)
|
|
+{
|
|
+ (void)inode;
|
|
+ (void)file;
|
|
+
|
|
+ vc_cma_set_reserve(0, current->tgid);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/****************************************************************************
|
|
+*
|
|
+* vc_cma_ioctl
|
|
+*
|
|
+***************************************************************************/
|
|
+
|
|
+static long vc_cma_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
|
+{
|
|
+ int rc = 0;
|
|
+
|
|
+ (void)cmd;
|
|
+ (void)arg;
|
|
+
|
|
+ switch (cmd) {
|
|
+ case VC_CMA_IOC_RESERVE:
|
|
+ rc = vc_cma_set_reserve((unsigned int)arg, current->tgid);
|
|
+ if (rc >= 0)
|
|
+ rc = 0;
|
|
+ break;
|
|
+ default:
|
|
+ LOG_ERR("vc-cma: Unknown ioctl %x", cmd);
|
|
+ return -ENOTTY;
|
|
+ }
|
|
+
|
|
+ return rc;
|
|
+}
|
|
+
|
|
+/****************************************************************************
|
|
+*
|
|
+* File Operations for the driver.
|
|
+*
|
|
+***************************************************************************/
|
|
+
|
|
+static const struct file_operations vc_cma_fops = {
|
|
+ .owner = THIS_MODULE,
|
|
+ .open = vc_cma_open,
|
|
+ .release = vc_cma_release,
|
|
+ .unlocked_ioctl = vc_cma_ioctl,
|
|
+};
|
|
+
|
|
+/****************************************************************************
|
|
+*
|
|
+* vc_cma_proc_open
|
|
+*
|
|
+***************************************************************************/
|
|
+
|
|
+static int vc_cma_show_info(struct seq_file *m, void *v)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ seq_printf(m, "Videocore CMA:\n");
|
|
+ seq_printf(m, " Base : %08x\n", (unsigned int)vc_cma_base);
|
|
+ seq_printf(m, " Length : %08x\n", vc_cma_size);
|
|
+ seq_printf(m, " Initial : %08x\n", vc_cma_initial);
|
|
+ seq_printf(m, " Chunk size : %08x\n", VC_CMA_CHUNK_SIZE);
|
|
+ seq_printf(m, " Chunks : %4d (%d bytes)\n",
|
|
+ (int)vc_cma_chunks,
|
|
+ (int)(vc_cma_chunks * VC_CMA_CHUNK_SIZE));
|
|
+ seq_printf(m, " Used : %4d (%d bytes)\n",
|
|
+ (int)vc_cma_chunks_used,
|
|
+ (int)(vc_cma_chunks_used * VC_CMA_CHUNK_SIZE));
|
|
+ seq_printf(m, " Reserved : %4d (%d bytes)\n",
|
|
+ (unsigned int)vc_cma_chunks_reserved,
|
|
+ (int)(vc_cma_chunks_reserved * VC_CMA_CHUNK_SIZE));
|
|
+
|
|
+ for (i = 0; i < vc_cma_reserve_count; i++) {
|
|
+ struct vc_cma_reserve_user *user = &vc_cma_reserve_users[i];
|
|
+ seq_printf(m, " PID %5d: %d bytes\n", user->pid,
|
|
+ user->reserve);
|
|
+ }
|
|
+
|
|
+ seq_printf(m, "\n");
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int vc_cma_proc_open(struct inode *inode, struct file *file)
|
|
+{
|
|
+ return single_open(file, vc_cma_show_info, NULL);
|
|
+}
|
|
+
|
|
+/****************************************************************************
|
|
+*
|
|
+* vc_cma_proc_write
|
|
+*
|
|
+***************************************************************************/
|
|
+
|
|
+static int vc_cma_proc_write(struct file *file,
|
|
+ const char __user *buffer,
|
|
+ size_t size, loff_t *ppos)
|
|
+{
|
|
+ int rc = -EFAULT;
|
|
+ char input_str[20];
|
|
+
|
|
+ memset(input_str, 0, sizeof(input_str));
|
|
+
|
|
+ if (size > sizeof(input_str)) {
|
|
+ LOG_ERR("%s: input string length too long", __func__);
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ if (copy_from_user(input_str, buffer, size - 1)) {
|
|
+ LOG_ERR("%s: failed to get input string", __func__);
|
|
+ goto out;
|
|
+ }
|
|
+#define ALLOC_STR "alloc"
|
|
+#define FREE_STR "free"
|
|
+#define DEBUG_STR "debug"
|
|
+#define RESERVE_STR "reserve"
|
|
+ if (strncmp(input_str, ALLOC_STR, strlen(ALLOC_STR)) == 0) {
|
|
+ int size;
|
|
+ char *p = input_str + strlen(ALLOC_STR);
|
|
+
|
|
+ while (*p == ' ')
|
|
+ p++;
|
|
+ size = memparse(p, NULL);
|
|
+ LOG_ERR("/proc/vc-cma: alloc %d", size);
|
|
+ if (size)
|
|
+ send_vc_msg(VC_CMA_MSG_REQUEST_FREE,
|
|
+ size / VC_CMA_CHUNK_SIZE, 0);
|
|
+ else
|
|
+ LOG_ERR("invalid size '%s'", p);
|
|
+ rc = size;
|
|
+ } else if (strncmp(input_str, FREE_STR, strlen(FREE_STR)) == 0) {
|
|
+ int size;
|
|
+ char *p = input_str + strlen(FREE_STR);
|
|
+
|
|
+ while (*p == ' ')
|
|
+ p++;
|
|
+ size = memparse(p, NULL);
|
|
+ LOG_ERR("/proc/vc-cma: free %d", size);
|
|
+ if (size)
|
|
+ send_vc_msg(VC_CMA_MSG_REQUEST_ALLOC,
|
|
+ size / VC_CMA_CHUNK_SIZE, 0);
|
|
+ else
|
|
+ LOG_ERR("invalid size '%s'", p);
|
|
+ rc = size;
|
|
+ } else if (strncmp(input_str, DEBUG_STR, strlen(DEBUG_STR)) == 0) {
|
|
+ char *p = input_str + strlen(DEBUG_STR);
|
|
+ while (*p == ' ')
|
|
+ p++;
|
|
+ if ((strcmp(p, "on") == 0) || (strcmp(p, "1") == 0))
|
|
+ vc_cma_debug = 1;
|
|
+ else if ((strcmp(p, "off") == 0) || (strcmp(p, "0") == 0))
|
|
+ vc_cma_debug = 0;
|
|
+ LOG_ERR("/proc/vc-cma: debug %s", vc_cma_debug ? "on" : "off");
|
|
+ rc = size;
|
|
+ } else if (strncmp(input_str, RESERVE_STR, strlen(RESERVE_STR)) == 0) {
|
|
+ int size;
|
|
+ int reserved;
|
|
+ char *p = input_str + strlen(RESERVE_STR);
|
|
+ while (*p == ' ')
|
|
+ p++;
|
|
+ size = memparse(p, NULL);
|
|
+
|
|
+ reserved = vc_cma_set_reserve(size, current->tgid);
|
|
+ rc = (reserved >= 0) ? size : reserved;
|
|
+ }
|
|
+
|
|
+out:
|
|
+ return rc;
|
|
+}
|
|
+
|
|
+/****************************************************************************
|
|
+*
|
|
+* File Operations for /proc interface.
|
|
+*
|
|
+***************************************************************************/
|
|
+
|
|
+static const struct file_operations vc_cma_proc_fops = {
|
|
+ .open = vc_cma_proc_open,
|
|
+ .read = seq_read,
|
|
+ .write = vc_cma_proc_write,
|
|
+ .llseek = seq_lseek,
|
|
+ .release = single_release
|
|
+};
|
|
+
|
|
+static int vc_cma_set_reserve(unsigned int reserve, unsigned int pid)
|
|
+{
|
|
+ struct vc_cma_reserve_user *user = NULL;
|
|
+ int delta = 0;
|
|
+ int i;
|
|
+
|
|
+ if (down_interruptible(&vc_cma_reserve_mutex))
|
|
+ return -ERESTARTSYS;
|
|
+
|
|
+ for (i = 0; i < vc_cma_reserve_count; i++) {
|
|
+ if (pid == vc_cma_reserve_users[i].pid) {
|
|
+ user = &vc_cma_reserve_users[i];
|
|
+ delta = reserve - user->reserve;
|
|
+ if (reserve)
|
|
+ user->reserve = reserve;
|
|
+ else {
|
|
+ /* Remove this entry by copying downwards */
|
|
+ while ((i + 1) < vc_cma_reserve_count) {
|
|
+ user[0].pid = user[1].pid;
|
|
+ user[0].reserve = user[1].reserve;
|
|
+ user++;
|
|
+ i++;
|
|
+ }
|
|
+ vc_cma_reserve_count--;
|
|
+ user = NULL;
|
|
+ }
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (reserve && !user) {
|
|
+ if (vc_cma_reserve_count == VC_CMA_RESERVE_COUNT_MAX) {
|
|
+ LOG_ERR("vc-cma: Too many reservations - "
|
|
+ "increase CMA_RESERVE_COUNT_MAX");
|
|
+ up(&vc_cma_reserve_mutex);
|
|
+ return -EBUSY;
|
|
+ }
|
|
+ user = &vc_cma_reserve_users[vc_cma_reserve_count];
|
|
+ user->pid = pid;
|
|
+ user->reserve = reserve;
|
|
+ delta = reserve;
|
|
+ vc_cma_reserve_count++;
|
|
+ }
|
|
+
|
|
+ vc_cma_reserve_total += delta;
|
|
+
|
|
+ send_vc_msg(VC_CMA_MSG_RESERVE,
|
|
+ vc_cma_reserve_total & 0xffff, vc_cma_reserve_total >> 16);
|
|
+
|
|
+ send_worker_msg((VCHIQ_HEADER_T *) VC_CMA_MSG_UPDATE_RESERVE);
|
|
+
|
|
+ LOG_DBG("/proc/vc-cma: reserve %d (PID %d) - total %u",
|
|
+ reserve, pid, vc_cma_reserve_total);
|
|
+
|
|
+ up(&vc_cma_reserve_mutex);
|
|
+
|
|
+ return vc_cma_reserve_total;
|
|
+}
|
|
+
|
|
+static VCHIQ_STATUS_T cma_service_callback(VCHIQ_REASON_T reason,
|
|
+ VCHIQ_HEADER_T * header,
|
|
+ VCHIQ_SERVICE_HANDLE_T service,
|
|
+ void *bulk_userdata)
|
|
+{
|
|
+ switch (reason) {
|
|
+ case VCHIQ_MESSAGE_AVAILABLE:
|
|
+ if (!send_worker_msg(header))
|
|
+ return VCHIQ_RETRY;
|
|
+ break;
|
|
+ case VCHIQ_SERVICE_CLOSED:
|
|
+ LOG_DBG("CMA service closed");
|
|
+ break;
|
|
+ default:
|
|
+ LOG_ERR("Unexpected CMA callback reason %d", reason);
|
|
+ break;
|
|
+ }
|
|
+ return VCHIQ_SUCCESS;
|
|
+}
|
|
+
|
|
+static void send_vc_msg(unsigned short type,
|
|
+ unsigned short param1, unsigned short param2)
|
|
+{
|
|
+ unsigned short msg[] = { type, param1, param2 };
|
|
+ VCHIQ_ELEMENT_T elem = { &msg, sizeof(msg) };
|
|
+ VCHIQ_STATUS_T ret;
|
|
+ vchiq_use_service(cma_service);
|
|
+ ret = vchiq_queue_message(cma_service, &elem, 1);
|
|
+ vchiq_release_service(cma_service);
|
|
+ if (ret != VCHIQ_SUCCESS)
|
|
+ LOG_ERR("vchiq_queue_message returned %x", ret);
|
|
+}
|
|
+
|
|
+static bool send_worker_msg(VCHIQ_HEADER_T * msg)
|
|
+{
|
|
+ if (down_interruptible(&vc_cma_worker_queue_push_mutex))
|
|
+ return false;
|
|
+ vchiu_queue_push(&cma_msg_queue, msg);
|
|
+ up(&vc_cma_worker_queue_push_mutex);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static int vc_cma_alloc_chunks(int num_chunks, struct cma_msg *reply)
|
|
+{
|
|
+ int i;
|
|
+ for (i = 0; i < num_chunks; i++) {
|
|
+ struct page *chunk;
|
|
+ unsigned int chunk_num;
|
|
+ uint8_t *chunk_addr;
|
|
+ size_t chunk_size = PAGES_PER_CHUNK << PAGE_SHIFT;
|
|
+
|
|
+ chunk = dma_alloc_from_contiguous(NULL /*&vc_cma_device.dev*/,
|
|
+ PAGES_PER_CHUNK,
|
|
+ VC_CMA_CHUNK_ORDER);
|
|
+ if (!chunk)
|
|
+ break;
|
|
+
|
|
+ chunk_addr = page_address(chunk);
|
|
+ dmac_flush_range(chunk_addr, chunk_addr + chunk_size);
|
|
+ outer_inv_range(__pa(chunk_addr), __pa(chunk_addr) +
|
|
+ chunk_size);
|
|
+
|
|
+ chunk_num =
|
|
+ (page_to_phys(chunk) - vc_cma_base) / VC_CMA_CHUNK_SIZE;
|
|
+ BUG_ON(((page_to_phys(chunk) - vc_cma_base) %
|
|
+ VC_CMA_CHUNK_SIZE) != 0);
|
|
+ if (chunk_num >= vc_cma_chunks) {
|
|
+ LOG_ERR("%s: ===============================",
|
|
+ __func__);
|
|
+ LOG_ERR("%s: chunk phys %x, vc_cma %x-%x - "
|
|
+ "bad SPARSEMEM configuration?",
|
|
+ __func__, (unsigned int)page_to_phys(chunk),
|
|
+ vc_cma_base, vc_cma_base + vc_cma_size - 1);
|
|
+ LOG_ERR("%s: dev->cma_area = %p\n", __func__,
|
|
+ vc_cma_device.dev.cma_area);
|
|
+ LOG_ERR("%s: ===============================",
|
|
+ __func__);
|
|
+ break;
|
|
+ }
|
|
+ reply->params[i] = chunk_num;
|
|
+ vc_cma_chunks_used++;
|
|
+ }
|
|
+
|
|
+ if (i < num_chunks) {
|
|
+ LOG_ERR("%s: dma_alloc_from_contiguous failed "
|
|
+ "for %x bytes (alloc %d of %d, %d free)",
|
|
+ __func__, VC_CMA_CHUNK_SIZE, i,
|
|
+ num_chunks, vc_cma_chunks - vc_cma_chunks_used);
|
|
+ num_chunks = i;
|
|
+ }
|
|
+
|
|
+ LOG_DBG("CMA allocated %d chunks -> %d used",
|
|
+ num_chunks, vc_cma_chunks_used);
|
|
+ reply->type = VC_CMA_MSG_ALLOCATED;
|
|
+
|
|
+ {
|
|
+ VCHIQ_ELEMENT_T elem = {
|
|
+ reply,
|
|
+ offsetof(struct cma_msg, params[0]) +
|
|
+ num_chunks * sizeof(reply->params[0])
|
|
+ };
|
|
+ VCHIQ_STATUS_T ret;
|
|
+ vchiq_use_service(cma_service);
|
|
+ ret = vchiq_queue_message(cma_service, &elem, 1);
|
|
+ vchiq_release_service(cma_service);
|
|
+ if (ret != VCHIQ_SUCCESS)
|
|
+ LOG_ERR("vchiq_queue_message return " "%x", ret);
|
|
+ }
|
|
+
|
|
+ return num_chunks;
|
|
+}
|
|
+
|
|
+static int cma_worker_proc(void *param)
|
|
+{
|
|
+ static struct cma_msg reply;
|
|
+ (void)param;
|
|
+
|
|
+ while (1) {
|
|
+ VCHIQ_HEADER_T *msg;
|
|
+ static struct cma_msg msg_copy;
|
|
+ struct cma_msg *cma_msg = &msg_copy;
|
|
+ int type, msg_size;
|
|
+
|
|
+ msg = vchiu_queue_pop(&cma_msg_queue);
|
|
+ if ((unsigned int)msg >= VC_CMA_MSG_MAX) {
|
|
+ msg_size = msg->size;
|
|
+ memcpy(&msg_copy, msg->data, msg_size);
|
|
+ type = cma_msg->type;
|
|
+ vchiq_release_message(cma_service, msg);
|
|
+ } else {
|
|
+ msg_size = 0;
|
|
+ type = (int)msg;
|
|
+ if (type == VC_CMA_MSG_QUIT)
|
|
+ break;
|
|
+ else if (type == VC_CMA_MSG_UPDATE_RESERVE) {
|
|
+ msg = NULL;
|
|
+ cma_msg = NULL;
|
|
+ } else {
|
|
+ BUG();
|
|
+ continue;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ switch (type) {
|
|
+ case VC_CMA_MSG_ALLOC:{
|
|
+ int num_chunks, free_chunks;
|
|
+ num_chunks = cma_msg->params[0];
|
|
+ free_chunks =
|
|
+ vc_cma_chunks - vc_cma_chunks_used;
|
|
+ LOG_DBG("CMA_MSG_ALLOC(%d chunks)", num_chunks);
|
|
+ if (num_chunks > VC_CMA_MAX_PARAMS_PER_MSG) {
|
|
+ LOG_ERR
|
|
+ ("CMA_MSG_ALLOC - chunk count (%d) "
|
|
+ "exceeds VC_CMA_MAX_PARAMS_PER_MSG (%d)",
|
|
+ num_chunks,
|
|
+ VC_CMA_MAX_PARAMS_PER_MSG);
|
|
+ num_chunks = VC_CMA_MAX_PARAMS_PER_MSG;
|
|
+ }
|
|
+
|
|
+ if (num_chunks > free_chunks) {
|
|
+ LOG_ERR
|
|
+ ("CMA_MSG_ALLOC - chunk count (%d) "
|
|
+ "exceeds free chunks (%d)",
|
|
+ num_chunks, free_chunks);
|
|
+ num_chunks = free_chunks;
|
|
+ }
|
|
+
|
|
+ vc_cma_alloc_chunks(num_chunks, &reply);
|
|
+ }
|
|
+ break;
|
|
+
|
|
+ case VC_CMA_MSG_FREE:{
|
|
+ int chunk_count =
|
|
+ (msg_size -
|
|
+ offsetof(struct cma_msg,
|
|
+ params)) /
|
|
+ sizeof(cma_msg->params[0]);
|
|
+ int i;
|
|
+ BUG_ON(chunk_count <= 0);
|
|
+
|
|
+ LOG_DBG("CMA_MSG_FREE(%d chunks - %x, ...)",
|
|
+ chunk_count, cma_msg->params[0]);
|
|
+ for (i = 0; i < chunk_count; i++) {
|
|
+ int chunk_num = cma_msg->params[i];
|
|
+ struct page *page = vc_cma_base_page +
|
|
+ chunk_num * PAGES_PER_CHUNK;
|
|
+ if (chunk_num >= vc_cma_chunks) {
|
|
+ LOG_ERR
|
|
+ ("CMA_MSG_FREE - chunk %d of %d"
|
|
+ " (value %x) exceeds maximum "
|
|
+ "(%x)", i, chunk_count,
|
|
+ chunk_num,
|
|
+ vc_cma_chunks - 1);
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ if (!dma_release_from_contiguous
|
|
+ (NULL /*&vc_cma_device.dev*/, page,
|
|
+ PAGES_PER_CHUNK)) {
|
|
+ LOG_ERR
|
|
+ ("CMA_MSG_FREE - failed to "
|
|
+ "release chunk %d (phys %x, "
|
|
+ "page %x)", chunk_num,
|
|
+ page_to_phys(page),
|
|
+ (unsigned int)page);
|
|
+ }
|
|
+ vc_cma_chunks_used--;
|
|
+ }
|
|
+ LOG_DBG("CMA released %d chunks -> %d used",
|
|
+ i, vc_cma_chunks_used);
|
|
+ }
|
|
+ break;
|
|
+
|
|
+ case VC_CMA_MSG_UPDATE_RESERVE:{
|
|
+ int chunks_needed =
|
|
+ ((vc_cma_reserve_total + VC_CMA_CHUNK_SIZE -
|
|
+ 1)
|
|
+ / VC_CMA_CHUNK_SIZE) -
|
|
+ vc_cma_chunks_reserved;
|
|
+
|
|
+ LOG_DBG
|
|
+ ("CMA_MSG_UPDATE_RESERVE(%d chunks needed)",
|
|
+ chunks_needed);
|
|
+
|
|
+ /* Cap the reservations to what is available */
|
|
+ if (chunks_needed > 0) {
|
|
+ if (chunks_needed >
|
|
+ (vc_cma_chunks -
|
|
+ vc_cma_chunks_used))
|
|
+ chunks_needed =
|
|
+ (vc_cma_chunks -
|
|
+ vc_cma_chunks_used);
|
|
+
|
|
+ chunks_needed =
|
|
+ vc_cma_alloc_chunks(chunks_needed,
|
|
+ &reply);
|
|
+ }
|
|
+
|
|
+ LOG_DBG
|
|
+ ("CMA_MSG_UPDATE_RESERVE(%d chunks allocated)",
|
|
+ chunks_needed);
|
|
+ vc_cma_chunks_reserved += chunks_needed;
|
|
+ }
|
|
+ break;
|
|
+
|
|
+ default:
|
|
+ LOG_ERR("unexpected msg type %d", type);
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ LOG_DBG("quitting...");
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/****************************************************************************
|
|
+*
|
|
+* vc_cma_connected_init
|
|
+*
|
|
+* This function is called once the videocore has been connected.
|
|
+*
|
|
+***************************************************************************/
|
|
+
|
|
+static void vc_cma_connected_init(void)
|
|
+{
|
|
+ VCHIQ_SERVICE_PARAMS_T service_params;
|
|
+
|
|
+ LOG_DBG("vc_cma_connected_init");
|
|
+
|
|
+ if (!vchiu_queue_init(&cma_msg_queue, 16)) {
|
|
+ LOG_ERR("could not create CMA msg queue");
|
|
+ goto fail_queue;
|
|
+ }
|
|
+
|
|
+ if (vchiq_initialise(&cma_instance) != VCHIQ_SUCCESS)
|
|
+ goto fail_vchiq_init;
|
|
+
|
|
+ vchiq_connect(cma_instance);
|
|
+
|
|
+ service_params.fourcc = VC_CMA_FOURCC;
|
|
+ service_params.callback = cma_service_callback;
|
|
+ service_params.userdata = NULL;
|
|
+ service_params.version = VC_CMA_VERSION;
|
|
+ service_params.version_min = VC_CMA_VERSION;
|
|
+
|
|
+ if (vchiq_open_service(cma_instance, &service_params,
|
|
+ &cma_service) != VCHIQ_SUCCESS) {
|
|
+ LOG_ERR("failed to open service - already in use?");
|
|
+ goto fail_vchiq_open;
|
|
+ }
|
|
+
|
|
+ vchiq_release_service(cma_service);
|
|
+
|
|
+ cma_worker = kthread_create(cma_worker_proc, NULL, "cma_worker");
|
|
+ if (!cma_worker) {
|
|
+ LOG_ERR("could not create CMA worker thread");
|
|
+ goto fail_worker;
|
|
+ }
|
|
+ set_user_nice(cma_worker, -20);
|
|
+ wake_up_process(cma_worker);
|
|
+
|
|
+ return;
|
|
+
|
|
+fail_worker:
|
|
+ vchiq_close_service(cma_service);
|
|
+fail_vchiq_open:
|
|
+ vchiq_shutdown(cma_instance);
|
|
+fail_vchiq_init:
|
|
+ vchiu_queue_delete(&cma_msg_queue);
|
|
+fail_queue:
|
|
+ return;
|
|
+}
|
|
+
|
|
+void
|
|
+loud_error_header(void)
|
|
+{
|
|
+ if (in_loud_error)
|
|
+ return;
|
|
+
|
|
+ LOG_ERR("============================================================"
|
|
+ "================");
|
|
+ LOG_ERR("============================================================"
|
|
+ "================");
|
|
+ LOG_ERR("=====");
|
|
+
|
|
+ in_loud_error = 1;
|
|
+}
|
|
+
|
|
+void
|
|
+loud_error_footer(void)
|
|
+{
|
|
+ if (!in_loud_error)
|
|
+ return;
|
|
+
|
|
+ LOG_ERR("=====");
|
|
+ LOG_ERR("============================================================"
|
|
+ "================");
|
|
+ LOG_ERR("============================================================"
|
|
+ "================");
|
|
+
|
|
+ in_loud_error = 0;
|
|
+}
|
|
+
|
|
+#if 1
|
|
+static int check_cma_config(void) { return 1; }
|
|
+#else
|
|
+static int
|
|
+read_vc_debug_var(VC_MEM_ACCESS_HANDLE_T handle,
|
|
+ const char *symbol,
|
|
+ void *buf, size_t bufsize)
|
|
+{
|
|
+ VC_MEM_ADDR_T vcMemAddr;
|
|
+ size_t vcMemSize;
|
|
+ uint8_t *mapAddr;
|
|
+ off_t vcMapAddr;
|
|
+
|
|
+ if (!LookupVideoCoreSymbol(handle, symbol,
|
|
+ &vcMemAddr,
|
|
+ &vcMemSize)) {
|
|
+ loud_error_header();
|
|
+ loud_error(
|
|
+ "failed to find VC symbol \"%s\".",
|
|
+ symbol);
|
|
+ loud_error_footer();
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ if (vcMemSize != bufsize) {
|
|
+ loud_error_header();
|
|
+ loud_error(
|
|
+ "VC symbol \"%s\" is the wrong size.",
|
|
+ symbol);
|
|
+ loud_error_footer();
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ vcMapAddr = (off_t)vcMemAddr & VC_MEM_TO_ARM_ADDR_MASK;
|
|
+ vcMapAddr += mm_vc_mem_phys_addr;
|
|
+ mapAddr = ioremap_nocache(vcMapAddr, vcMemSize);
|
|
+ if (mapAddr == 0) {
|
|
+ loud_error_header();
|
|
+ loud_error(
|
|
+ "failed to ioremap \"%s\" @ 0x%x "
|
|
+ "(phys: 0x%x, size: %u).",
|
|
+ symbol,
|
|
+ (unsigned int)vcMapAddr,
|
|
+ (unsigned int)vcMemAddr,
|
|
+ (unsigned int)vcMemSize);
|
|
+ loud_error_footer();
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ memcpy(buf, mapAddr, bufsize);
|
|
+ iounmap(mapAddr);
|
|
+
|
|
+ return 1;
|
|
+}
|
|
+
|
|
+
|
|
+static int
|
|
+check_cma_config(void)
|
|
+{
|
|
+ VC_MEM_ACCESS_HANDLE_T mem_hndl;
|
|
+ VC_MEM_ADDR_T mempool_start;
|
|
+ VC_MEM_ADDR_T mempool_end;
|
|
+ VC_MEM_ADDR_T mempool_offline_start;
|
|
+ VC_MEM_ADDR_T mempool_offline_end;
|
|
+ VC_MEM_ADDR_T cam_alloc_base;
|
|
+ VC_MEM_ADDR_T cam_alloc_size;
|
|
+ VC_MEM_ADDR_T cam_alloc_end;
|
|
+ int success = 0;
|
|
+
|
|
+ if (OpenVideoCoreMemory(&mem_hndl) != 0)
|
|
+ goto out;
|
|
+
|
|
+ /* Read the relevant VideoCore variables */
|
|
+ if (!read_vc_debug_var(mem_hndl, "__MEMPOOL_START",
|
|
+ &mempool_start,
|
|
+ sizeof(mempool_start)))
|
|
+ goto close;
|
|
+
|
|
+ if (!read_vc_debug_var(mem_hndl, "__MEMPOOL_END",
|
|
+ &mempool_end,
|
|
+ sizeof(mempool_end)))
|
|
+ goto close;
|
|
+
|
|
+ if (!read_vc_debug_var(mem_hndl, "__MEMPOOL_OFFLINE_START",
|
|
+ &mempool_offline_start,
|
|
+ sizeof(mempool_offline_start)))
|
|
+ goto close;
|
|
+
|
|
+ if (!read_vc_debug_var(mem_hndl, "__MEMPOOL_OFFLINE_END",
|
|
+ &mempool_offline_end,
|
|
+ sizeof(mempool_offline_end)))
|
|
+ goto close;
|
|
+
|
|
+ if (!read_vc_debug_var(mem_hndl, "cam_alloc_base",
|
|
+ &cam_alloc_base,
|
|
+ sizeof(cam_alloc_base)))
|
|
+ goto close;
|
|
+
|
|
+ if (!read_vc_debug_var(mem_hndl, "cam_alloc_size",
|
|
+ &cam_alloc_size,
|
|
+ sizeof(cam_alloc_size)))
|
|
+ goto close;
|
|
+
|
|
+ cam_alloc_end = cam_alloc_base + cam_alloc_size;
|
|
+
|
|
+ success = 1;
|
|
+
|
|
+ /* Now the sanity checks */
|
|
+ if (!mempool_offline_start)
|
|
+ mempool_offline_start = mempool_start;
|
|
+ if (!mempool_offline_end)
|
|
+ mempool_offline_end = mempool_end;
|
|
+
|
|
+ if (VCADDR_TO_PHYSADDR(mempool_offline_start) != vc_cma_base) {
|
|
+ loud_error_header();
|
|
+ loud_error(
|
|
+ "__MEMPOOL_OFFLINE_START(%x -> %lx) doesn't match "
|
|
+ "vc_cma_base(%x)",
|
|
+ mempool_offline_start,
|
|
+ VCADDR_TO_PHYSADDR(mempool_offline_start),
|
|
+ vc_cma_base);
|
|
+ success = 0;
|
|
+ }
|
|
+
|
|
+ if (VCADDR_TO_PHYSADDR(mempool_offline_end) !=
|
|
+ (vc_cma_base + vc_cma_size)) {
|
|
+ loud_error_header();
|
|
+ loud_error(
|
|
+ "__MEMPOOL_OFFLINE_END(%x -> %lx) doesn't match "
|
|
+ "vc_cma_base(%x) + vc_cma_size(%x) = %x",
|
|
+ mempool_offline_start,
|
|
+ VCADDR_TO_PHYSADDR(mempool_offline_end),
|
|
+ vc_cma_base, vc_cma_size, vc_cma_base + vc_cma_size);
|
|
+ success = 0;
|
|
+ }
|
|
+
|
|
+ if (mempool_end < mempool_start) {
|
|
+ loud_error_header();
|
|
+ loud_error(
|
|
+ "__MEMPOOL_END(%x) must not be before "
|
|
+ "__MEMPOOL_START(%x)",
|
|
+ mempool_end,
|
|
+ mempool_start);
|
|
+ success = 0;
|
|
+ }
|
|
+
|
|
+ if (mempool_offline_end < mempool_offline_start) {
|
|
+ loud_error_header();
|
|
+ loud_error(
|
|
+ "__MEMPOOL_OFFLINE_END(%x) must not be before "
|
|
+ "__MEMPOOL_OFFLINE_START(%x)",
|
|
+ mempool_offline_end,
|
|
+ mempool_offline_start);
|
|
+ success = 0;
|
|
+ }
|
|
+
|
|
+ if (mempool_offline_start < mempool_start) {
|
|
+ loud_error_header();
|
|
+ loud_error(
|
|
+ "__MEMPOOL_OFFLINE_START(%x) must not be before "
|
|
+ "__MEMPOOL_START(%x)",
|
|
+ mempool_offline_start,
|
|
+ mempool_start);
|
|
+ success = 0;
|
|
+ }
|
|
+
|
|
+ if (mempool_offline_end > mempool_end) {
|
|
+ loud_error_header();
|
|
+ loud_error(
|
|
+ "__MEMPOOL_OFFLINE_END(%x) must not be after "
|
|
+ "__MEMPOOL_END(%x)",
|
|
+ mempool_offline_end,
|
|
+ mempool_end);
|
|
+ success = 0;
|
|
+ }
|
|
+
|
|
+ if ((cam_alloc_base < mempool_end) &&
|
|
+ (cam_alloc_end > mempool_start)) {
|
|
+ loud_error_header();
|
|
+ loud_error(
|
|
+ "cam_alloc pool(%x-%x) overlaps "
|
|
+ "mempool(%x-%x)",
|
|
+ cam_alloc_base, cam_alloc_end,
|
|
+ mempool_start, mempool_end);
|
|
+ success = 0;
|
|
+ }
|
|
+
|
|
+ loud_error_footer();
|
|
+
|
|
+close:
|
|
+ CloseVideoCoreMemory(mem_hndl);
|
|
+
|
|
+out:
|
|
+ return success;
|
|
+}
|
|
+#endif
|
|
+
|
|
+static int vc_cma_init(void)
|
|
+{
|
|
+ int rc = -EFAULT;
|
|
+ struct device *dev;
|
|
+
|
|
+ if (!check_cma_config())
|
|
+ goto out_release;
|
|
+
|
|
+ printk(KERN_INFO "vc-cma: Videocore CMA driver\n");
|
|
+ printk(KERN_INFO "vc-cma: vc_cma_base = 0x%08x\n", vc_cma_base);
|
|
+ printk(KERN_INFO "vc-cma: vc_cma_size = 0x%08x (%u MiB)\n",
|
|
+ vc_cma_size, vc_cma_size / (1024 * 1024));
|
|
+ printk(KERN_INFO "vc-cma: vc_cma_initial = 0x%08x (%u MiB)\n",
|
|
+ vc_cma_initial, vc_cma_initial / (1024 * 1024));
|
|
+
|
|
+ vc_cma_base_page = phys_to_page(vc_cma_base);
|
|
+
|
|
+ if (vc_cma_chunks) {
|
|
+ int chunks_needed = vc_cma_initial / VC_CMA_CHUNK_SIZE;
|
|
+
|
|
+ for (vc_cma_chunks_used = 0;
|
|
+ vc_cma_chunks_used < chunks_needed; vc_cma_chunks_used++) {
|
|
+ struct page *chunk;
|
|
+ chunk = dma_alloc_from_contiguous(NULL /*&vc_cma_device.dev*/,
|
|
+ PAGES_PER_CHUNK,
|
|
+ VC_CMA_CHUNK_ORDER);
|
|
+ if (!chunk)
|
|
+ break;
|
|
+ BUG_ON(((page_to_phys(chunk) - vc_cma_base) %
|
|
+ VC_CMA_CHUNK_SIZE) != 0);
|
|
+ }
|
|
+ if (vc_cma_chunks_used != chunks_needed) {
|
|
+ LOG_ERR("%s: dma_alloc_from_contiguous failed (%d "
|
|
+ "bytes, allocation %d of %d)",
|
|
+ __func__, VC_CMA_CHUNK_SIZE,
|
|
+ vc_cma_chunks_used, chunks_needed);
|
|
+ goto out_release;
|
|
+ }
|
|
+
|
|
+ vchiq_add_connected_callback(vc_cma_connected_init);
|
|
+ }
|
|
+
|
|
+ rc = alloc_chrdev_region(&vc_cma_devnum, 0, 1, DRIVER_NAME);
|
|
+ if (rc < 0) {
|
|
+ LOG_ERR("%s: alloc_chrdev_region failed (rc=%d)", __func__, rc);
|
|
+ goto out_release;
|
|
+ }
|
|
+
|
|
+ cdev_init(&vc_cma_cdev, &vc_cma_fops);
|
|
+ rc = cdev_add(&vc_cma_cdev, vc_cma_devnum, 1);
|
|
+ if (rc != 0) {
|
|
+ LOG_ERR("%s: cdev_add failed (rc=%d)", __func__, rc);
|
|
+ goto out_unregister;
|
|
+ }
|
|
+
|
|
+ vc_cma_class = class_create(THIS_MODULE, DRIVER_NAME);
|
|
+ if (IS_ERR(vc_cma_class)) {
|
|
+ rc = PTR_ERR(vc_cma_class);
|
|
+ LOG_ERR("%s: class_create failed (rc=%d)", __func__, rc);
|
|
+ goto out_cdev_del;
|
|
+ }
|
|
+
|
|
+ dev = device_create(vc_cma_class, NULL, vc_cma_devnum, NULL,
|
|
+ DRIVER_NAME);
|
|
+ if (IS_ERR(dev)) {
|
|
+ rc = PTR_ERR(dev);
|
|
+ LOG_ERR("%s: device_create failed (rc=%d)", __func__, rc);
|
|
+ goto out_class_destroy;
|
|
+ }
|
|
+
|
|
+ vc_cma_proc_entry = create_proc_entry(DRIVER_NAME, 0444, NULL);
|
|
+ if (vc_cma_proc_entry == NULL) {
|
|
+ rc = -EFAULT;
|
|
+ LOG_ERR("%s: create_proc_entry failed", __func__);
|
|
+ goto out_device_destroy;
|
|
+ }
|
|
+
|
|
+ vc_cma_proc_entry->proc_fops = &vc_cma_proc_fops;
|
|
+
|
|
+ vc_cma_inited = 1;
|
|
+ return 0;
|
|
+
|
|
+out_device_destroy:
|
|
+ device_destroy(vc_cma_class, vc_cma_devnum);
|
|
+
|
|
+out_class_destroy:
|
|
+ class_destroy(vc_cma_class);
|
|
+ vc_cma_class = NULL;
|
|
+
|
|
+out_cdev_del:
|
|
+ cdev_del(&vc_cma_cdev);
|
|
+
|
|
+out_unregister:
|
|
+ unregister_chrdev_region(vc_cma_devnum, 1);
|
|
+
|
|
+out_release:
|
|
+ /* It is tempting to try to clean up by calling
|
|
+ dma_release_from_contiguous for all allocated chunks, but it isn't
|
|
+ a very safe thing to do. If vc_cma_initial is non-zero it is because
|
|
+ VideoCore is already using that memory, so giving it back to Linux
|
|
+ is likely to be fatal.
|
|
+ */
|
|
+ return -1;
|
|
+}
|
|
+
|
|
+/****************************************************************************
|
|
+*
|
|
+* vc_cma_exit
|
|
+*
|
|
+***************************************************************************/
|
|
+
|
|
+static void __exit vc_cma_exit(void)
|
|
+{
|
|
+ LOG_DBG("%s: called", __func__);
|
|
+
|
|
+ if (vc_cma_inited) {
|
|
+ remove_proc_entry(vc_cma_proc_entry->name, NULL);
|
|
+ device_destroy(vc_cma_class, vc_cma_devnum);
|
|
+ class_destroy(vc_cma_class);
|
|
+ cdev_del(&vc_cma_cdev);
|
|
+ unregister_chrdev_region(vc_cma_devnum, 1);
|
|
+ }
|
|
+}
|
|
+
|
|
+module_init(vc_cma_init);
|
|
+module_exit(vc_cma_exit);
|
|
+MODULE_LICENSE("GPL");
|
|
+MODULE_AUTHOR("Broadcom Corporation");
|
|
--- a/drivers/char/Makefile
|
|
+++ b/drivers/char/Makefile
|
|
@@ -62,3 +62,6 @@ obj-$(CONFIG_JS_RTC) += js-rtc.o
|
|
js-rtc-y = rtc.o
|
|
|
|
obj-$(CONFIG_TILE_SROM) += tile-srom.o
|
|
+
|
|
+obj-$(CONFIG_BRCM_CHAR_DRIVERS) += broadcom/
|
|
+
|
|
--- a/drivers/char/Kconfig
|
|
+++ b/drivers/char/Kconfig
|
|
@@ -586,6 +586,8 @@ config DEVPORT
|
|
|
|
source "drivers/s390/char/Kconfig"
|
|
|
|
+source "drivers/char/broadcom/Kconfig"
|
|
+
|
|
config MSM_SMD_PKT
|
|
bool "Enable device interface for some SMD packet ports"
|
|
default n
|
|
--- a/drivers/misc/Kconfig
|
|
+++ b/drivers/misc/Kconfig
|
|
@@ -536,4 +536,6 @@ source "drivers/misc/carma/Kconfig"
|
|
source "drivers/misc/altera-stapl/Kconfig"
|
|
source "drivers/misc/mei/Kconfig"
|
|
source "drivers/misc/vmw_vmci/Kconfig"
|
|
+source "drivers/misc/vc04_services/Kconfig"
|
|
endmenu
|
|
+
|
|
--- a/drivers/misc/Makefile
|
|
+++ b/drivers/misc/Makefile
|
|
@@ -53,3 +53,4 @@ obj-$(CONFIG_INTEL_MEI) += mei/
|
|
obj-$(CONFIG_VMWARE_VMCI) += vmw_vmci/
|
|
obj-$(CONFIG_LATTICE_ECP3_CONFIG) += lattice-ecp3-config.o
|
|
obj-$(CONFIG_SRAM) += sram.o
|
|
+obj-$(CONFIG_BCM2708_VCHIQ) += vc04_services/
|
|
--- /dev/null
|
|
+++ b/drivers/misc/vc04_services/interface/vchi/connections/connection.h
|
|
@@ -0,0 +1,328 @@
|
|
+/**
|
|
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
|
|
+ *
|
|
+ * Redistribution and use in source and binary forms, with or without
|
|
+ * modification, are permitted provided that the following conditions
|
|
+ * are met:
|
|
+ * 1. Redistributions of source code must retain the above copyright
|
|
+ * notice, this list of conditions, and the following disclaimer,
|
|
+ * without modification.
|
|
+ * 2. Redistributions in binary form must reproduce the above copyright
|
|
+ * notice, this list of conditions and the following disclaimer in the
|
|
+ * documentation and/or other materials provided with the distribution.
|
|
+ * 3. The names of the above-listed copyright holders may not be used
|
|
+ * to endorse or promote products derived from this software without
|
|
+ * specific prior written permission.
|
|
+ *
|
|
+ * ALTERNATIVELY, this software may be distributed under the terms of the
|
|
+ * GNU General Public License ("GPL") version 2, as published by the Free
|
|
+ * Software Foundation.
|
|
+ *
|
|
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
|
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
|
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
|
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
|
|
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
|
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
|
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
|
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
|
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
|
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
|
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
+ */
|
|
+
|
|
+#ifndef CONNECTION_H_
|
|
+#define CONNECTION_H_
|
|
+
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/types.h>
|
|
+#include <linux/semaphore.h>
|
|
+
|
|
+#include "interface/vchi/vchi_cfg_internal.h"
|
|
+#include "interface/vchi/vchi_common.h"
|
|
+#include "interface/vchi/message_drivers/message.h"
|
|
+
|
|
+/******************************************************************************
|
|
+ Global defs
|
|
+ *****************************************************************************/
|
|
+
|
|
+// Opaque handle for a connection / service pair
|
|
+typedef struct opaque_vchi_connection_connected_service_handle_t *VCHI_CONNECTION_SERVICE_HANDLE_T;
|
|
+
|
|
+// opaque handle to the connection state information
|
|
+typedef struct opaque_vchi_connection_info_t VCHI_CONNECTION_STATE_T;
|
|
+
|
|
+typedef struct vchi_connection_t VCHI_CONNECTION_T;
|
|
+
|
|
+
|
|
+/******************************************************************************
|
|
+ API
|
|
+ *****************************************************************************/
|
|
+
|
|
+// Routine to init a connection with a particular low level driver
|
|
+typedef VCHI_CONNECTION_STATE_T * (*VCHI_CONNECTION_INIT_T)( struct vchi_connection_t * connection,
|
|
+ const VCHI_MESSAGE_DRIVER_T * driver );
|
|
+
|
|
+// Routine to control CRC enabling at a connection level
|
|
+typedef int32_t (*VCHI_CONNECTION_CRC_CONTROL_T)( VCHI_CONNECTION_STATE_T *state_handle,
|
|
+ VCHI_CRC_CONTROL_T control );
|
|
+
|
|
+// Routine to create a service
|
|
+typedef int32_t (*VCHI_CONNECTION_SERVICE_CONNECT_T)( VCHI_CONNECTION_STATE_T *state_handle,
|
|
+ int32_t service_id,
|
|
+ uint32_t rx_fifo_size,
|
|
+ uint32_t tx_fifo_size,
|
|
+ int server,
|
|
+ VCHI_CALLBACK_T callback,
|
|
+ void *callback_param,
|
|
+ int32_t want_crc,
|
|
+ int32_t want_unaligned_bulk_rx,
|
|
+ int32_t want_unaligned_bulk_tx,
|
|
+ VCHI_CONNECTION_SERVICE_HANDLE_T *service_handle );
|
|
+
|
|
+// Routine to close a service
|
|
+typedef int32_t (*VCHI_CONNECTION_SERVICE_DISCONNECT_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle );
|
|
+
|
|
+// Routine to queue a message
|
|
+typedef int32_t (*VCHI_CONNECTION_SERVICE_QUEUE_MESSAGE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
|
|
+ const void *data,
|
|
+ uint32_t data_size,
|
|
+ VCHI_FLAGS_T flags,
|
|
+ void *msg_handle );
|
|
+
|
|
+// scatter-gather (vector) message queueing
|
|
+typedef int32_t (*VCHI_CONNECTION_SERVICE_QUEUE_MESSAGEV_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
|
|
+ VCHI_MSG_VECTOR_T *vector,
|
|
+ uint32_t count,
|
|
+ VCHI_FLAGS_T flags,
|
|
+ void *msg_handle );
|
|
+
|
|
+// Routine to dequeue a message
|
|
+typedef int32_t (*VCHI_CONNECTION_SERVICE_DEQUEUE_MESSAGE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
|
|
+ void *data,
|
|
+ uint32_t max_data_size_to_read,
|
|
+ uint32_t *actual_msg_size,
|
|
+ VCHI_FLAGS_T flags );
|
|
+
|
|
+// Routine to peek at a message
|
|
+typedef int32_t (*VCHI_CONNECTION_SERVICE_PEEK_MESSAGE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
|
|
+ void **data,
|
|
+ uint32_t *msg_size,
|
|
+ VCHI_FLAGS_T flags );
|
|
+
|
|
+// Routine to hold a message
|
|
+typedef int32_t (*VCHI_CONNECTION_SERVICE_HOLD_MESSAGE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
|
|
+ void **data,
|
|
+ uint32_t *msg_size,
|
|
+ VCHI_FLAGS_T flags,
|
|
+ void **message_handle );
|
|
+
|
|
+// Routine to initialise a received message iterator
|
|
+typedef int32_t (*VCHI_CONNECTION_SERVICE_LOOKAHEAD_MESSAGE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
|
|
+ VCHI_MSG_ITER_T *iter,
|
|
+ VCHI_FLAGS_T flags );
|
|
+
|
|
+// Routine to release a held message
|
|
+typedef int32_t (*VCHI_CONNECTION_HELD_MSG_RELEASE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
|
|
+ void *message_handle );
|
|
+
|
|
+// Routine to get info on a held message
|
|
+typedef int32_t (*VCHI_CONNECTION_HELD_MSG_INFO_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
|
|
+ void *message_handle,
|
|
+ void **data,
|
|
+ int32_t *msg_size,
|
|
+ uint32_t *tx_timestamp,
|
|
+ uint32_t *rx_timestamp );
|
|
+
|
|
+// Routine to check whether the iterator has a next message
|
|
+typedef int32_t (*VCHI_CONNECTION_MSG_ITER_HAS_NEXT_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service,
|
|
+ const VCHI_MSG_ITER_T *iter );
|
|
+
|
|
+// Routine to advance the iterator
|
|
+typedef int32_t (*VCHI_CONNECTION_MSG_ITER_NEXT_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service,
|
|
+ VCHI_MSG_ITER_T *iter,
|
|
+ void **data,
|
|
+ uint32_t *msg_size );
|
|
+
|
|
+// Routine to remove the last message returned by the iterator
|
|
+typedef int32_t (*VCHI_CONNECTION_MSG_ITER_REMOVE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service,
|
|
+ VCHI_MSG_ITER_T *iter );
|
|
+
|
|
+// Routine to hold the last message returned by the iterator
|
|
+typedef int32_t (*VCHI_CONNECTION_MSG_ITER_HOLD_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service,
|
|
+ VCHI_MSG_ITER_T *iter,
|
|
+ void **msg_handle );
|
|
+
|
|
+// Routine to transmit bulk data
|
|
+typedef int32_t (*VCHI_CONNECTION_BULK_QUEUE_TRANSMIT_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
|
|
+ const void *data_src,
|
|
+ uint32_t data_size,
|
|
+ VCHI_FLAGS_T flags,
|
|
+ void *bulk_handle );
|
|
+
|
|
+// Routine to receive data
|
|
+typedef int32_t (*VCHI_CONNECTION_BULK_QUEUE_RECEIVE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
|
|
+ void *data_dst,
|
|
+ uint32_t data_size,
|
|
+ VCHI_FLAGS_T flags,
|
|
+ void *bulk_handle );
|
|
+
|
|
+// Routine to report if a server is available
|
|
+typedef int32_t (*VCHI_CONNECTION_SERVER_PRESENT)( VCHI_CONNECTION_STATE_T *state, int32_t service_id, int32_t peer_flags );
|
|
+
|
|
+// Routine to report the number of RX slots available
|
|
+typedef int (*VCHI_CONNECTION_RX_SLOTS_AVAILABLE)( const VCHI_CONNECTION_STATE_T *state );
|
|
+
|
|
+// Routine to report the RX slot size
|
|
+typedef uint32_t (*VCHI_CONNECTION_RX_SLOT_SIZE)( const VCHI_CONNECTION_STATE_T *state );
|
|
+
|
|
+// Callback to indicate that the other side has added a buffer to the rx bulk DMA FIFO
|
|
+typedef void (*VCHI_CONNECTION_RX_BULK_BUFFER_ADDED)(VCHI_CONNECTION_STATE_T *state,
|
|
+ int32_t service,
|
|
+ uint32_t length,
|
|
+ MESSAGE_TX_CHANNEL_T channel,
|
|
+ uint32_t channel_params,
|
|
+ uint32_t data_length,
|
|
+ uint32_t data_offset);
|
|
+
|
|
+// Callback to inform a service that a Xon or Xoff message has been received
|
|
+typedef void (*VCHI_CONNECTION_FLOW_CONTROL)(VCHI_CONNECTION_STATE_T *state, int32_t service_id, int32_t xoff);
|
|
+
|
|
+// Callback to inform a service that a server available reply message has been received
|
|
+typedef void (*VCHI_CONNECTION_SERVER_AVAILABLE_REPLY)(VCHI_CONNECTION_STATE_T *state, int32_t service_id, uint32_t flags);
|
|
+
|
|
+// Callback to indicate that bulk auxiliary messages have arrived
|
|
+typedef void (*VCHI_CONNECTION_BULK_AUX_RECEIVED)(VCHI_CONNECTION_STATE_T *state);
|
|
+
|
|
+// Callback to indicate that bulk auxiliary messages have arrived
|
|
+typedef void (*VCHI_CONNECTION_BULK_AUX_TRANSMITTED)(VCHI_CONNECTION_STATE_T *state, void *handle);
|
|
+
|
|
+// Callback with all the connection info you require
|
|
+typedef void (*VCHI_CONNECTION_INFO)(VCHI_CONNECTION_STATE_T *state, uint32_t protocol_version, uint32_t slot_size, uint32_t num_slots, uint32_t min_bulk_size);
|
|
+
|
|
+// Callback to inform of a disconnect
|
|
+typedef void (*VCHI_CONNECTION_DISCONNECT)(VCHI_CONNECTION_STATE_T *state, uint32_t flags);
|
|
+
|
|
+// Callback to inform of a power control request
|
|
+typedef void (*VCHI_CONNECTION_POWER_CONTROL)(VCHI_CONNECTION_STATE_T *state, MESSAGE_TX_CHANNEL_T channel, int32_t enable);
|
|
+
|
|
+// allocate memory suitably aligned for this connection
|
|
+typedef void * (*VCHI_BUFFER_ALLOCATE)(VCHI_CONNECTION_SERVICE_HANDLE_T service_handle, uint32_t * length);
|
|
+
|
|
+// free memory allocated by buffer_allocate
|
|
+typedef void (*VCHI_BUFFER_FREE)(VCHI_CONNECTION_SERVICE_HANDLE_T service_handle, void * address);
|
|
+
|
|
+
|
|
+/******************************************************************************
|
|
+ System driver struct
|
|
+ *****************************************************************************/
|
|
+
|
|
+struct opaque_vchi_connection_api_t
|
|
+{
|
|
+ // Routine to init the connection
|
|
+ VCHI_CONNECTION_INIT_T init;
|
|
+
|
|
+ // Connection-level CRC control
|
|
+ VCHI_CONNECTION_CRC_CONTROL_T crc_control;
|
|
+
|
|
+ // Routine to connect to or create service
|
|
+ VCHI_CONNECTION_SERVICE_CONNECT_T service_connect;
|
|
+
|
|
+ // Routine to disconnect from a service
|
|
+ VCHI_CONNECTION_SERVICE_DISCONNECT_T service_disconnect;
|
|
+
|
|
+ // Routine to queue a message
|
|
+ VCHI_CONNECTION_SERVICE_QUEUE_MESSAGE_T service_queue_msg;
|
|
+
|
|
+ // scatter-gather (vector) message queue
|
|
+ VCHI_CONNECTION_SERVICE_QUEUE_MESSAGEV_T service_queue_msgv;
|
|
+
|
|
+ // Routine to dequeue a message
|
|
+ VCHI_CONNECTION_SERVICE_DEQUEUE_MESSAGE_T service_dequeue_msg;
|
|
+
|
|
+ // Routine to peek at a message
|
|
+ VCHI_CONNECTION_SERVICE_PEEK_MESSAGE_T service_peek_msg;
|
|
+
|
|
+ // Routine to hold a message
|
|
+ VCHI_CONNECTION_SERVICE_HOLD_MESSAGE_T service_hold_msg;
|
|
+
|
|
+ // Routine to initialise a received message iterator
|
|
+ VCHI_CONNECTION_SERVICE_LOOKAHEAD_MESSAGE_T service_look_ahead_msg;
|
|
+
|
|
+ // Routine to release a message
|
|
+ VCHI_CONNECTION_HELD_MSG_RELEASE_T held_msg_release;
|
|
+
|
|
+ // Routine to get information on a held message
|
|
+ VCHI_CONNECTION_HELD_MSG_INFO_T held_msg_info;
|
|
+
|
|
+ // Routine to check for next message on iterator
|
|
+ VCHI_CONNECTION_MSG_ITER_HAS_NEXT_T msg_iter_has_next;
|
|
+
|
|
+ // Routine to get next message on iterator
|
|
+ VCHI_CONNECTION_MSG_ITER_NEXT_T msg_iter_next;
|
|
+
|
|
+ // Routine to remove the last message returned by iterator
|
|
+ VCHI_CONNECTION_MSG_ITER_REMOVE_T msg_iter_remove;
|
|
+
|
|
+ // Routine to hold the last message returned by iterator
|
|
+ VCHI_CONNECTION_MSG_ITER_HOLD_T msg_iter_hold;
|
|
+
|
|
+ // Routine to transmit bulk data
|
|
+ VCHI_CONNECTION_BULK_QUEUE_TRANSMIT_T bulk_queue_transmit;
|
|
+
|
|
+ // Routine to receive data
|
|
+ VCHI_CONNECTION_BULK_QUEUE_RECEIVE_T bulk_queue_receive;
|
|
+
|
|
+ // Routine to report the available servers
|
|
+ VCHI_CONNECTION_SERVER_PRESENT server_present;
|
|
+
|
|
+ // Routine to report the number of RX slots available
|
|
+ VCHI_CONNECTION_RX_SLOTS_AVAILABLE connection_rx_slots_available;
|
|
+
|
|
+ // Routine to report the RX slot size
|
|
+ VCHI_CONNECTION_RX_SLOT_SIZE connection_rx_slot_size;
|
|
+
|
|
+ // Callback to indicate that the other side has added a buffer to the rx bulk DMA FIFO
|
|
+ VCHI_CONNECTION_RX_BULK_BUFFER_ADDED rx_bulk_buffer_added;
|
|
+
|
|
+ // Callback to inform a service that a Xon or Xoff message has been received
|
|
+ VCHI_CONNECTION_FLOW_CONTROL flow_control;
|
|
+
|
|
+ // Callback to inform a service that a server available reply message has been received
|
|
+ VCHI_CONNECTION_SERVER_AVAILABLE_REPLY server_available_reply;
|
|
+
|
|
+ // Callback to indicate that bulk auxiliary messages have arrived
|
|
+ VCHI_CONNECTION_BULK_AUX_RECEIVED bulk_aux_received;
|
|
+
|
|
+ // Callback to indicate that a bulk auxiliary message has been transmitted
|
|
+ VCHI_CONNECTION_BULK_AUX_TRANSMITTED bulk_aux_transmitted;
|
|
+
|
|
+ // Callback to provide information about the connection
|
|
+ VCHI_CONNECTION_INFO connection_info;
|
|
+
|
|
+ // Callback to notify that peer has requested disconnect
|
|
+ VCHI_CONNECTION_DISCONNECT disconnect;
|
|
+
|
|
+ // Callback to notify that peer has requested power change
|
|
+ VCHI_CONNECTION_POWER_CONTROL power_control;
|
|
+
|
|
+ // allocate memory suitably aligned for this connection
|
|
+ VCHI_BUFFER_ALLOCATE buffer_allocate;
|
|
+
|
|
+ // free memory allocated by buffer_allocate
|
|
+ VCHI_BUFFER_FREE buffer_free;
|
|
+
|
|
+};
|
|
+
|
|
+struct vchi_connection_t {
|
|
+ const VCHI_CONNECTION_API_T *api;
|
|
+ VCHI_CONNECTION_STATE_T *state;
|
|
+#ifdef VCHI_COARSE_LOCKING
|
|
+ struct semaphore sem;
|
|
+#endif
|
|
+};
|
|
+
|
|
+
|
|
+#endif /* CONNECTION_H_ */
|
|
+
|
|
+/****************************** End of file **********************************/
|
|
--- /dev/null
|
|
+++ b/drivers/misc/vc04_services/interface/vchi/message_drivers/message.h
|
|
@@ -0,0 +1,204 @@
|
|
+/**
|
|
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
|
|
+ *
|
|
+ * Redistribution and use in source and binary forms, with or without
|
|
+ * modification, are permitted provided that the following conditions
|
|
+ * are met:
|
|
+ * 1. Redistributions of source code must retain the above copyright
|
|
+ * notice, this list of conditions, and the following disclaimer,
|
|
+ * without modification.
|
|
+ * 2. Redistributions in binary form must reproduce the above copyright
|
|
+ * notice, this list of conditions and the following disclaimer in the
|
|
+ * documentation and/or other materials provided with the distribution.
|
|
+ * 3. The names of the above-listed copyright holders may not be used
|
|
+ * to endorse or promote products derived from this software without
|
|
+ * specific prior written permission.
|
|
+ *
|
|
+ * ALTERNATIVELY, this software may be distributed under the terms of the
|
|
+ * GNU General Public License ("GPL") version 2, as published by the Free
|
|
+ * Software Foundation.
|
|
+ *
|
|
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
|
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
|
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
|
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
|
|
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
|
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
|
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
|
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
|
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
|
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
|
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
+ */
|
|
+
|
|
+#ifndef _VCHI_MESSAGE_H_
|
|
+#define _VCHI_MESSAGE_H_
|
|
+
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/types.h>
|
|
+#include <linux/semaphore.h>
|
|
+
|
|
+#include "interface/vchi/vchi_cfg_internal.h"
|
|
+#include "interface/vchi/vchi_common.h"
|
|
+
|
|
+
|
|
+typedef enum message_event_type {
|
|
+ MESSAGE_EVENT_NONE,
|
|
+ MESSAGE_EVENT_NOP,
|
|
+ MESSAGE_EVENT_MESSAGE,
|
|
+ MESSAGE_EVENT_SLOT_COMPLETE,
|
|
+ MESSAGE_EVENT_RX_BULK_PAUSED,
|
|
+ MESSAGE_EVENT_RX_BULK_COMPLETE,
|
|
+ MESSAGE_EVENT_TX_COMPLETE,
|
|
+ MESSAGE_EVENT_MSG_DISCARDED
|
|
+} MESSAGE_EVENT_TYPE_T;
|
|
+
|
|
+typedef enum vchi_msg_flags
|
|
+{
|
|
+ VCHI_MSG_FLAGS_NONE = 0x0,
|
|
+ VCHI_MSG_FLAGS_TERMINATE_DMA = 0x1
|
|
+} VCHI_MSG_FLAGS_T;
|
|
+
|
|
+typedef enum message_tx_channel
|
|
+{
|
|
+ MESSAGE_TX_CHANNEL_MESSAGE = 0,
|
|
+ MESSAGE_TX_CHANNEL_BULK = 1 // drivers may provide multiple bulk channels, from 1 upwards
|
|
+} MESSAGE_TX_CHANNEL_T;
|
|
+
|
|
+// Macros used for cycling through bulk channels
|
|
+#define MESSAGE_TX_CHANNEL_BULK_PREV(c) (MESSAGE_TX_CHANNEL_BULK+((c)-MESSAGE_TX_CHANNEL_BULK+VCHI_MAX_BULK_TX_CHANNELS_PER_CONNECTION-1)%VCHI_MAX_BULK_TX_CHANNELS_PER_CONNECTION)
|
|
+#define MESSAGE_TX_CHANNEL_BULK_NEXT(c) (MESSAGE_TX_CHANNEL_BULK+((c)-MESSAGE_TX_CHANNEL_BULK+1)%VCHI_MAX_BULK_TX_CHANNELS_PER_CONNECTION)
|
|
+
|
|
+typedef enum message_rx_channel
|
|
+{
|
|
+ MESSAGE_RX_CHANNEL_MESSAGE = 0,
|
|
+ MESSAGE_RX_CHANNEL_BULK = 1 // drivers may provide multiple bulk channels, from 1 upwards
|
|
+} MESSAGE_RX_CHANNEL_T;
|
|
+
|
|
+// Message receive slot information
|
|
+typedef struct rx_msg_slot_info {
|
|
+
|
|
+ struct rx_msg_slot_info *next;
|
|
+ //struct slot_info *prev;
|
|
+#if !defined VCHI_COARSE_LOCKING
|
|
+ struct semaphore sem;
|
|
+#endif
|
|
+
|
|
+ uint8_t *addr; // base address of slot
|
|
+ uint32_t len; // length of slot in bytes
|
|
+
|
|
+ uint32_t write_ptr; // hardware causes this to advance
|
|
+ uint32_t read_ptr; // this module does the reading
|
|
+ int active; // is this slot in the hardware dma fifo?
|
|
+ uint32_t msgs_parsed; // count how many messages are in this slot
|
|
+ uint32_t msgs_released; // how many messages have been released
|
|
+ void *state; // connection state information
|
|
+ uint8_t ref_count[VCHI_MAX_SERVICES_PER_CONNECTION]; // reference count for slots held by services
|
|
+} RX_MSG_SLOTINFO_T;
|
|
+
|
|
+// The message driver no longer needs to know about the fields of RX_BULK_SLOTINFO_T - sort this out.
|
|
+// In particular, it mustn't use addr and len - they're the client buffer, but the message
|
|
+// driver will be tasked with sending the aligned core section.
|
|
+typedef struct rx_bulk_slotinfo_t {
|
|
+ struct rx_bulk_slotinfo_t *next;
|
|
+
|
|
+ struct semaphore *blocking;
|
|
+
|
|
+ // needed by DMA
|
|
+ void *addr;
|
|
+ uint32_t len;
|
|
+
|
|
+ // needed for the callback
|
|
+ void *service;
|
|
+ void *handle;
|
|
+ VCHI_FLAGS_T flags;
|
|
+} RX_BULK_SLOTINFO_T;
|
|
+
|
|
+
|
|
+/* ----------------------------------------------------------------------
|
|
+ * each connection driver will have a pool of the following struct.
|
|
+ *
|
|
+ * the pool will be managed by vchi_qman_*
|
|
+ * this means there will be multiple queues (single linked lists)
|
|
+ * a given struct message_info will be on exactly one of these queues
|
|
+ * at any one time
|
|
+ * -------------------------------------------------------------------- */
|
|
+typedef struct rx_message_info {
|
|
+
|
|
+ struct message_info *next;
|
|
+ //struct message_info *prev;
|
|
+
|
|
+ uint8_t *addr;
|
|
+ uint32_t len;
|
|
+ RX_MSG_SLOTINFO_T *slot; // points to whichever slot contains this message
|
|
+ uint32_t tx_timestamp;
|
|
+ uint32_t rx_timestamp;
|
|
+
|
|
+} RX_MESSAGE_INFO_T;
|
|
+
|
|
+typedef struct {
|
|
+ MESSAGE_EVENT_TYPE_T type;
|
|
+
|
|
+ struct {
|
|
+ // for messages
|
|
+ void *addr; // address of message
|
|
+ uint16_t slot_delta; // whether this message indicated slot delta
|
|
+ uint32_t len; // length of message
|
|
+ RX_MSG_SLOTINFO_T *slot; // slot this message is in
|
|
+ int32_t service; // service id this message is destined for
|
|
+ uint32_t tx_timestamp; // timestamp from the header
|
|
+ uint32_t rx_timestamp; // timestamp when we parsed it
|
|
+ } message;
|
|
+
|
|
+ // FIXME: cleanup slot reporting...
|
|
+ RX_MSG_SLOTINFO_T *rx_msg;
|
|
+ RX_BULK_SLOTINFO_T *rx_bulk;
|
|
+ void *tx_handle;
|
|
+ MESSAGE_TX_CHANNEL_T tx_channel;
|
|
+
|
|
+} MESSAGE_EVENT_T;
|
|
+
|
|
+
|
|
+// callbacks
|
|
+typedef void VCHI_MESSAGE_DRIVER_EVENT_CALLBACK_T( void *state );
|
|
+
|
|
+typedef struct {
|
|
+ VCHI_MESSAGE_DRIVER_EVENT_CALLBACK_T *event_callback;
|
|
+} VCHI_MESSAGE_DRIVER_OPEN_T;
|
|
+
|
|
+
|
|
+// handle to this instance of message driver (as returned by ->open)
|
|
+typedef struct opaque_mhandle_t *VCHI_MDRIVER_HANDLE_T;
|
|
+
|
|
+struct opaque_vchi_message_driver_t {
|
|
+ VCHI_MDRIVER_HANDLE_T *(*open)( VCHI_MESSAGE_DRIVER_OPEN_T *params, void *state );
|
|
+ int32_t (*suspending)( VCHI_MDRIVER_HANDLE_T *handle );
|
|
+ int32_t (*resumed)( VCHI_MDRIVER_HANDLE_T *handle );
|
|
+ int32_t (*power_control)( VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T, int32_t enable );
|
|
+ int32_t (*add_msg_rx_slot)( VCHI_MDRIVER_HANDLE_T *handle, RX_MSG_SLOTINFO_T *slot ); // rx message
|
|
+ int32_t (*add_bulk_rx)( VCHI_MDRIVER_HANDLE_T *handle, void *data, uint32_t len, RX_BULK_SLOTINFO_T *slot ); // rx data (bulk)
|
|
+ int32_t (*send)( VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T channel, const void *data, uint32_t len, VCHI_MSG_FLAGS_T flags, void *send_handle ); // tx (message & bulk)
|
|
+ void (*next_event)( VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_EVENT_T *event ); // get the next event from message_driver
|
|
+ int32_t (*enable)( VCHI_MDRIVER_HANDLE_T *handle );
|
|
+ int32_t (*form_message)( VCHI_MDRIVER_HANDLE_T *handle, int32_t service_id, VCHI_MSG_VECTOR_T *vector, uint32_t count, void
|
|
+ *address, uint32_t length_avail, uint32_t max_total_length, int32_t pad_to_fill, int32_t allow_partial );
|
|
+
|
|
+ int32_t (*update_message)( VCHI_MDRIVER_HANDLE_T *handle, void *dest, int16_t *slot_count );
|
|
+ int32_t (*buffer_aligned)( VCHI_MDRIVER_HANDLE_T *handle, int tx, int uncached, const void *address, const uint32_t length );
|
|
+ void * (*allocate_buffer)( VCHI_MDRIVER_HANDLE_T *handle, uint32_t *length );
|
|
+ void (*free_buffer)( VCHI_MDRIVER_HANDLE_T *handle, void *address );
|
|
+ int (*rx_slot_size)( VCHI_MDRIVER_HANDLE_T *handle, int msg_size );
|
|
+ int (*tx_slot_size)( VCHI_MDRIVER_HANDLE_T *handle, int msg_size );
|
|
+
|
|
+ int32_t (*tx_supports_terminate)( const VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T channel );
|
|
+ uint32_t (*tx_bulk_chunk_size)( const VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T channel );
|
|
+ int (*tx_alignment)( const VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T channel );
|
|
+ int (*rx_alignment)( const VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_RX_CHANNEL_T channel );
|
|
+ void (*form_bulk_aux)( VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T channel, const void *data, uint32_t len, uint32_t chunk_size, const void **aux_data, int32_t *aux_len );
|
|
+ void (*debug)( VCHI_MDRIVER_HANDLE_T *handle );
|
|
+};
|
|
+
|
|
+
|
|
+#endif // _VCHI_MESSAGE_H_
|
|
+
|
|
+/****************************** End of file ***********************************/
|
|
--- /dev/null
|
|
+++ b/drivers/misc/vc04_services/interface/vchi/vchi_cfg.h
|
|
@@ -0,0 +1,224 @@
|
|
+/**
|
|
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
|
|
+ *
|
|
+ * Redistribution and use in source and binary forms, with or without
|
|
+ * modification, are permitted provided that the following conditions
|
|
+ * are met:
|
|
+ * 1. Redistributions of source code must retain the above copyright
|
|
+ * notice, this list of conditions, and the following disclaimer,
|
|
+ * without modification.
|
|
+ * 2. Redistributions in binary form must reproduce the above copyright
|
|
+ * notice, this list of conditions and the following disclaimer in the
|
|
+ * documentation and/or other materials provided with the distribution.
|
|
+ * 3. The names of the above-listed copyright holders may not be used
|
|
+ * to endorse or promote products derived from this software without
|
|
+ * specific prior written permission.
|
|
+ *
|
|
+ * ALTERNATIVELY, this software may be distributed under the terms of the
|
|
+ * GNU General Public License ("GPL") version 2, as published by the Free
|
|
+ * Software Foundation.
|
|
+ *
|
|
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
|
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
|
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
|
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
|
|
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
|
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
|
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
|
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
|
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
|
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
|
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
+ */
|
|
+
|
|
+#ifndef VCHI_CFG_H_
|
|
+#define VCHI_CFG_H_
|
|
+
|
|
+/****************************************************************************************
|
|
+ * Defines in this first section are part of the VCHI API and may be examined by VCHI
|
|
+ * services.
|
|
+ ***************************************************************************************/
|
|
+
|
|
+/* Required alignment of base addresses for bulk transfer, if unaligned transfers are not enabled */
|
|
+/* Really determined by the message driver, and should be available from a run-time call. */
|
|
+#ifndef VCHI_BULK_ALIGN
|
|
+# if __VCCOREVER__ >= 0x04000000
|
|
+# define VCHI_BULK_ALIGN 32 // Allows for the need to do cache cleans
|
|
+# else
|
|
+# define VCHI_BULK_ALIGN 16
|
|
+# endif
|
|
+#endif
|
|
+
|
|
+/* Required length multiple for bulk transfers, if unaligned transfers are not enabled */
|
|
+/* May be less than or greater than VCHI_BULK_ALIGN */
|
|
+/* Really determined by the message driver, and should be available from a run-time call. */
|
|
+#ifndef VCHI_BULK_GRANULARITY
|
|
+# if __VCCOREVER__ >= 0x04000000
|
|
+# define VCHI_BULK_GRANULARITY 32 // Allows for the need to do cache cleans
|
|
+# else
|
|
+# define VCHI_BULK_GRANULARITY 16
|
|
+# endif
|
|
+#endif
|
|
+
|
|
+/* The largest possible message to be queued with vchi_msg_queue. */
|
|
+#ifndef VCHI_MAX_MSG_SIZE
|
|
+# if defined VCHI_LOCAL_HOST_PORT
|
|
+# define VCHI_MAX_MSG_SIZE 16384 // makes file transfers fast, but should they be using bulk?
|
|
+# else
|
|
+# define VCHI_MAX_MSG_SIZE 4096 // NOTE: THIS MUST BE LARGER THAN OR EQUAL TO THE SIZE OF THE KHRONOS MERGE BUFFER!!
|
|
+# endif
|
|
+#endif
|
|
+
|
|
+/******************************************************************************************
|
|
+ * Defines below are system configuration options, and should not be used by VCHI services.
|
|
+ *****************************************************************************************/
|
|
+
|
|
+/* How many connections can we support? A localhost implementation uses 2 connections,
|
|
+ * 1 for host-app, 1 for VMCS, and these are hooked together by a loopback MPHI VCFW
|
|
+ * driver. */
|
|
+#ifndef VCHI_MAX_NUM_CONNECTIONS
|
|
+# define VCHI_MAX_NUM_CONNECTIONS 3
|
|
+#endif
|
|
+
|
|
+/* How many services can we open per connection? Extending this doesn't cost processing time, just a small
|
|
+ * amount of static memory. */
|
|
+#ifndef VCHI_MAX_SERVICES_PER_CONNECTION
|
|
+# define VCHI_MAX_SERVICES_PER_CONNECTION 36
|
|
+#endif
|
|
+
|
|
+/* Adjust if using a message driver that supports more logical TX channels */
|
|
+#ifndef VCHI_MAX_BULK_TX_CHANNELS_PER_CONNECTION
|
|
+# define VCHI_MAX_BULK_TX_CHANNELS_PER_CONNECTION 9 // 1 MPHI + 8 CCP2 logical channels
|
|
+#endif
|
|
+
|
|
+/* Adjust if using a message driver that supports more logical RX channels */
|
|
+#ifndef VCHI_MAX_BULK_RX_CHANNELS_PER_CONNECTION
|
|
+# define VCHI_MAX_BULK_RX_CHANNELS_PER_CONNECTION 1 // 1 MPHI
|
|
+#endif
|
|
+
|
|
+/* How many receive slots do we use. This times VCHI_MAX_MSG_SIZE gives the effective
|
|
+ * receive queue space, less message headers. */
|
|
+#ifndef VCHI_NUM_READ_SLOTS
|
|
+# if defined(VCHI_LOCAL_HOST_PORT)
|
|
+# define VCHI_NUM_READ_SLOTS 4
|
|
+# else
|
|
+# define VCHI_NUM_READ_SLOTS 48
|
|
+# endif
|
|
+#endif
|
|
+
|
|
+/* Do we utilise overrun facility for receive message slots? Can aid peer transmit
|
|
+ * performance. Only define on VideoCore end, talking to host.
|
|
+ */
|
|
+//#define VCHI_MSG_RX_OVERRUN
|
|
+
|
|
+/* How many transmit slots do we use. Generally don't need many, as the hardware driver
|
|
+ * underneath VCHI will usually have its own buffering. */
|
|
+#ifndef VCHI_NUM_WRITE_SLOTS
|
|
+# define VCHI_NUM_WRITE_SLOTS 4
|
|
+#endif
|
|
+
|
|
+/* If a service has held or queued received messages in VCHI_XOFF_THRESHOLD or more slots,
|
|
+ * then it's taking up too much buffer space, and the peer service will be told to stop
|
|
+ * transmitting with an XOFF message. For this to be effective, the VCHI_NUM_READ_SLOTS
|
|
+ * needs to be considerably bigger than VCHI_NUM_WRITE_SLOTS, or the transmit latency
|
|
+ * is too high. */
|
|
+#ifndef VCHI_XOFF_THRESHOLD
|
|
+# define VCHI_XOFF_THRESHOLD (VCHI_NUM_READ_SLOTS / 2)
|
|
+#endif
|
|
+
|
|
+/* After we've sent an XOFF, the peer will be told to resume transmission once the local
|
|
+ * service has dequeued/released enough messages that it's now occupying
|
|
+ * VCHI_XON_THRESHOLD slots or fewer. */
|
|
+#ifndef VCHI_XON_THRESHOLD
|
|
+# define VCHI_XON_THRESHOLD (VCHI_NUM_READ_SLOTS / 4)
|
|
+#endif
|
|
+
|
|
+/* A size below which a bulk transfer omits the handshake completely and always goes
|
|
+ * via the message channel, if bulk auxiliary is being sent on that service. (The user
|
|
+ * can guarantee this by enabling unaligned transmits).
|
|
+ * Not API. */
|
|
+#ifndef VCHI_MIN_BULK_SIZE
|
|
+# define VCHI_MIN_BULK_SIZE ( VCHI_MAX_MSG_SIZE / 2 < 4096 ? VCHI_MAX_MSG_SIZE / 2 : 4096 )
|
|
+#endif
|
|
+
|
|
+/* Maximum size of bulk transmission chunks, for each interface type. A trade-off between
|
|
+ * speed and latency; the smaller the chunk size the better change of messages and other
|
|
+ * bulk transmissions getting in when big bulk transfers are happening. Set to 0 to not
|
|
+ * break transmissions into chunks.
|
|
+ */
|
|
+#ifndef VCHI_MAX_BULK_CHUNK_SIZE_MPHI
|
|
+# define VCHI_MAX_BULK_CHUNK_SIZE_MPHI (16 * 1024)
|
|
+#endif
|
|
+
|
|
+/* NB Chunked CCP2 transmissions violate the letter of the CCP2 spec by using "JPEG8" mode
|
|
+ * with multiple-line frames. Only use if the receiver can cope. */
|
|
+#ifndef VCHI_MAX_BULK_CHUNK_SIZE_CCP2
|
|
+# define VCHI_MAX_BULK_CHUNK_SIZE_CCP2 0
|
|
+#endif
|
|
+
|
|
+/* How many TX messages can we have pending in our transmit slots. Once exhausted,
|
|
+ * vchi_msg_queue will be blocked. */
|
|
+#ifndef VCHI_TX_MSG_QUEUE_SIZE
|
|
+# define VCHI_TX_MSG_QUEUE_SIZE 256
|
|
+#endif
|
|
+
|
|
+/* How many RX messages can we have parsed in the receive slots. Once exhausted, parsing
|
|
+ * will be suspended until older messages are dequeued/released. */
|
|
+#ifndef VCHI_RX_MSG_QUEUE_SIZE
|
|
+# define VCHI_RX_MSG_QUEUE_SIZE 256
|
|
+#endif
|
|
+
|
|
+/* Really should be able to cope if we run out of received message descriptors, by
|
|
+ * suspending parsing as the comment above says, but we don't. This sweeps the issue
|
|
+ * under the carpet. */
|
|
+#if VCHI_RX_MSG_QUEUE_SIZE < (VCHI_MAX_MSG_SIZE/16 + 1) * VCHI_NUM_READ_SLOTS
|
|
+# undef VCHI_RX_MSG_QUEUE_SIZE
|
|
+# define VCHI_RX_MSG_QUEUE_SIZE (VCHI_MAX_MSG_SIZE/16 + 1) * VCHI_NUM_READ_SLOTS
|
|
+#endif
|
|
+
|
|
+/* How many bulk transmits can we have pending. Once exhausted, vchi_bulk_queue_transmit
|
|
+ * will be blocked. */
|
|
+#ifndef VCHI_TX_BULK_QUEUE_SIZE
|
|
+# define VCHI_TX_BULK_QUEUE_SIZE 64
|
|
+#endif
|
|
+
|
|
+/* How many bulk receives can we have pending. Once exhausted, vchi_bulk_queue_receive
|
|
+ * will be blocked. */
|
|
+#ifndef VCHI_RX_BULK_QUEUE_SIZE
|
|
+# define VCHI_RX_BULK_QUEUE_SIZE 64
|
|
+#endif
|
|
+
|
|
+/* A limit on how many outstanding bulk requests we expect the peer to give us. If
|
|
+ * the peer asks for more than this, VCHI will fail and assert. The number is determined
|
|
+ * by the peer's hardware - it's the number of outstanding requests that can be queued
|
|
+ * on all bulk channels. VC3's MPHI peripheral allows 16. */
|
|
+#ifndef VCHI_MAX_PEER_BULK_REQUESTS
|
|
+# define VCHI_MAX_PEER_BULK_REQUESTS 32
|
|
+#endif
|
|
+
|
|
+/* Define VCHI_CCP2TX_MANUAL_POWER if the host tells us when to turn the CCP2
|
|
+ * transmitter on and off.
|
|
+ */
|
|
+/*#define VCHI_CCP2TX_MANUAL_POWER*/
|
|
+
|
|
+#ifndef VCHI_CCP2TX_MANUAL_POWER
|
|
+
|
|
+/* Timeout (in milliseconds) for putting the CCP2TX interface into IDLE state. Set
|
|
+ * negative for no IDLE.
|
|
+ */
|
|
+# ifndef VCHI_CCP2TX_IDLE_TIMEOUT
|
|
+# define VCHI_CCP2TX_IDLE_TIMEOUT 5
|
|
+# endif
|
|
+
|
|
+/* Timeout (in milliseconds) for putting the CCP2TX interface into OFF state. Set
|
|
+ * negative for no OFF.
|
|
+ */
|
|
+# ifndef VCHI_CCP2TX_OFF_TIMEOUT
|
|
+# define VCHI_CCP2TX_OFF_TIMEOUT 1000
|
|
+# endif
|
|
+
|
|
+#endif /* VCHI_CCP2TX_MANUAL_POWER */
|
|
+
|
|
+#endif /* VCHI_CFG_H_ */
|
|
+
|
|
+/****************************** End of file **********************************/
|
|
--- /dev/null
|
|
+++ b/drivers/misc/vc04_services/interface/vchi/vchi_cfg_internal.h
|
|
@@ -0,0 +1,71 @@
|
|
+/**
|
|
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
|
|
+ *
|
|
+ * Redistribution and use in source and binary forms, with or without
|
|
+ * modification, are permitted provided that the following conditions
|
|
+ * are met:
|
|
+ * 1. Redistributions of source code must retain the above copyright
|
|
+ * notice, this list of conditions, and the following disclaimer,
|
|
+ * without modification.
|
|
+ * 2. Redistributions in binary form must reproduce the above copyright
|
|
+ * notice, this list of conditions and the following disclaimer in the
|
|
+ * documentation and/or other materials provided with the distribution.
|
|
+ * 3. The names of the above-listed copyright holders may not be used
|
|
+ * to endorse or promote products derived from this software without
|
|
+ * specific prior written permission.
|
|
+ *
|
|
+ * ALTERNATIVELY, this software may be distributed under the terms of the
|
|
+ * GNU General Public License ("GPL") version 2, as published by the Free
|
|
+ * Software Foundation.
|
|
+ *
|
|
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
|
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
|
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
|
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
|
|
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
|
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
|
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
|
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
|
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
|
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
|
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
+ */
|
|
+
|
|
+#ifndef VCHI_CFG_INTERNAL_H_
|
|
+#define VCHI_CFG_INTERNAL_H_
|
|
+
|
|
+/****************************************************************************************
|
|
+ * Control optimisation attempts.
|
|
+ ***************************************************************************************/
|
|
+
|
|
+// Don't use lots of short-term locks - use great long ones, reducing the overall locks-per-second
|
|
+#define VCHI_COARSE_LOCKING
|
|
+
|
|
+// Avoid lock then unlock on exit from blocking queue operations (msg tx, bulk rx/tx)
|
|
+// (only relevant if VCHI_COARSE_LOCKING)
|
|
+#define VCHI_ELIDE_BLOCK_EXIT_LOCK
|
|
+
|
|
+// Avoid lock on non-blocking peek
|
|
+// (only relevant if VCHI_COARSE_LOCKING)
|
|
+#define VCHI_AVOID_PEEK_LOCK
|
|
+
|
|
+// Use one slot-handler thread per connection, rather than 1 thread dealing with all connections in rotation.
|
|
+#define VCHI_MULTIPLE_HANDLER_THREADS
|
|
+
|
|
+// Put free descriptors onto the head of the free queue, rather than the tail, so that we don't thrash
|
|
+// our way through the pool of descriptors.
|
|
+#define VCHI_PUSH_FREE_DESCRIPTORS_ONTO_HEAD
|
|
+
|
|
+// Don't issue a MSG_AVAILABLE callback for every single message. Possibly only safe if VCHI_COARSE_LOCKING.
|
|
+#define VCHI_FEWER_MSG_AVAILABLE_CALLBACKS
|
|
+
|
|
+// Don't use message descriptors for TX messages that don't need them
|
|
+#define VCHI_MINIMISE_TX_MSG_DESCRIPTORS
|
|
+
|
|
+// Nano-locks for multiqueue
|
|
+//#define VCHI_MQUEUE_NANOLOCKS
|
|
+
|
|
+// Lock-free(er) dequeuing
|
|
+//#define VCHI_RX_NANOLOCKS
|
|
+
|
|
+#endif /*VCHI_CFG_INTERNAL_H_*/
|
|
--- /dev/null
|
|
+++ b/drivers/misc/vc04_services/interface/vchi/vchi_common.h
|
|
@@ -0,0 +1,163 @@
|
|
+/**
|
|
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
|
|
+ *
|
|
+ * Redistribution and use in source and binary forms, with or without
|
|
+ * modification, are permitted provided that the following conditions
|
|
+ * are met:
|
|
+ * 1. Redistributions of source code must retain the above copyright
|
|
+ * notice, this list of conditions, and the following disclaimer,
|
|
+ * without modification.
|
|
+ * 2. Redistributions in binary form must reproduce the above copyright
|
|
+ * notice, this list of conditions and the following disclaimer in the
|
|
+ * documentation and/or other materials provided with the distribution.
|
|
+ * 3. The names of the above-listed copyright holders may not be used
|
|
+ * to endorse or promote products derived from this software without
|
|
+ * specific prior written permission.
|
|
+ *
|
|
+ * ALTERNATIVELY, this software may be distributed under the terms of the
|
|
+ * GNU General Public License ("GPL") version 2, as published by the Free
|
|
+ * Software Foundation.
|
|
+ *
|
|
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
|
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
|
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
|
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
|
|
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
|
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
|
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
|
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
|
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
|
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
|
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
+ */
|
|
+
|
|
+#ifndef VCHI_COMMON_H_
|
|
+#define VCHI_COMMON_H_
|
|
+
|
|
+
|
|
+//flags used when sending messages (must be bitmapped)
|
|
+typedef enum
|
|
+{
|
|
+ VCHI_FLAGS_NONE = 0x0,
|
|
+ VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE = 0x1, // waits for message to be received, or sent (NB. not the same as being seen on other side)
|
|
+ VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE = 0x2, // run a callback when message sent
|
|
+ VCHI_FLAGS_BLOCK_UNTIL_QUEUED = 0x4, // return once the transfer is in a queue ready to go
|
|
+ VCHI_FLAGS_ALLOW_PARTIAL = 0x8,
|
|
+ VCHI_FLAGS_BLOCK_UNTIL_DATA_READ = 0x10,
|
|
+ VCHI_FLAGS_CALLBACK_WHEN_DATA_READ = 0x20,
|
|
+
|
|
+ VCHI_FLAGS_ALIGN_SLOT = 0x000080, // internal use only
|
|
+ VCHI_FLAGS_BULK_AUX_QUEUED = 0x010000, // internal use only
|
|
+ VCHI_FLAGS_BULK_AUX_COMPLETE = 0x020000, // internal use only
|
|
+ VCHI_FLAGS_BULK_DATA_QUEUED = 0x040000, // internal use only
|
|
+ VCHI_FLAGS_BULK_DATA_COMPLETE = 0x080000, // internal use only
|
|
+ VCHI_FLAGS_INTERNAL = 0xFF0000
|
|
+} VCHI_FLAGS_T;
|
|
+
|
|
+// constants for vchi_crc_control()
|
|
+typedef enum {
|
|
+ VCHI_CRC_NOTHING = -1,
|
|
+ VCHI_CRC_PER_SERVICE = 0,
|
|
+ VCHI_CRC_EVERYTHING = 1,
|
|
+} VCHI_CRC_CONTROL_T;
|
|
+
|
|
+//callback reasons when an event occurs on a service
|
|
+typedef enum
|
|
+{
|
|
+ VCHI_CALLBACK_REASON_MIN,
|
|
+
|
|
+ //This indicates that there is data available
|
|
+ //handle is the msg id that was transmitted with the data
|
|
+ // When a message is received and there was no FULL message available previously, send callback
|
|
+ // Tasks get kicked by the callback, reset their event and try and read from the fifo until it fails
|
|
+ VCHI_CALLBACK_MSG_AVAILABLE,
|
|
+ VCHI_CALLBACK_MSG_SENT,
|
|
+ VCHI_CALLBACK_MSG_SPACE_AVAILABLE, // XXX not yet implemented
|
|
+
|
|
+ // This indicates that a transfer from the other side has completed
|
|
+ VCHI_CALLBACK_BULK_RECEIVED,
|
|
+ //This indicates that data queued up to be sent has now gone
|
|
+ //handle is the msg id that was used when sending the data
|
|
+ VCHI_CALLBACK_BULK_SENT,
|
|
+ VCHI_CALLBACK_BULK_RX_SPACE_AVAILABLE, // XXX not yet implemented
|
|
+ VCHI_CALLBACK_BULK_TX_SPACE_AVAILABLE, // XXX not yet implemented
|
|
+
|
|
+ VCHI_CALLBACK_SERVICE_CLOSED,
|
|
+
|
|
+ // this side has sent XOFF to peer due to lack of data consumption by service
|
|
+ // (suggests the service may need to take some recovery action if it has
|
|
+ // been deliberately holding off consuming data)
|
|
+ VCHI_CALLBACK_SENT_XOFF,
|
|
+ VCHI_CALLBACK_SENT_XON,
|
|
+
|
|
+ // indicates that a bulk transfer has finished reading the source buffer
|
|
+ VCHI_CALLBACK_BULK_DATA_READ,
|
|
+
|
|
+ // power notification events (currently host side only)
|
|
+ VCHI_CALLBACK_PEER_OFF,
|
|
+ VCHI_CALLBACK_PEER_SUSPENDED,
|
|
+ VCHI_CALLBACK_PEER_ON,
|
|
+ VCHI_CALLBACK_PEER_RESUMED,
|
|
+ VCHI_CALLBACK_FORCED_POWER_OFF,
|
|
+
|
|
+#ifdef USE_VCHIQ_ARM
|
|
+ // some extra notifications provided by vchiq_arm
|
|
+ VCHI_CALLBACK_SERVICE_OPENED,
|
|
+ VCHI_CALLBACK_BULK_RECEIVE_ABORTED,
|
|
+ VCHI_CALLBACK_BULK_TRANSMIT_ABORTED,
|
|
+#endif
|
|
+
|
|
+ VCHI_CALLBACK_REASON_MAX
|
|
+} VCHI_CALLBACK_REASON_T;
|
|
+
|
|
+//Calback used by all services / bulk transfers
|
|
+typedef void (*VCHI_CALLBACK_T)( void *callback_param, //my service local param
|
|
+ VCHI_CALLBACK_REASON_T reason,
|
|
+ void *handle ); //for transmitting msg's only
|
|
+
|
|
+
|
|
+
|
|
+/*
|
|
+ * Define vector struct for scatter-gather (vector) operations
|
|
+ * Vectors can be nested - if a vector element has negative length, then
|
|
+ * the data pointer is treated as pointing to another vector array, with
|
|
+ * '-vec_len' elements. Thus to append a header onto an existing vector,
|
|
+ * you can do this:
|
|
+ *
|
|
+ * void foo(const VCHI_MSG_VECTOR_T *v, int n)
|
|
+ * {
|
|
+ * VCHI_MSG_VECTOR_T nv[2];
|
|
+ * nv[0].vec_base = my_header;
|
|
+ * nv[0].vec_len = sizeof my_header;
|
|
+ * nv[1].vec_base = v;
|
|
+ * nv[1].vec_len = -n;
|
|
+ * ...
|
|
+ *
|
|
+ */
|
|
+typedef struct vchi_msg_vector {
|
|
+ const void *vec_base;
|
|
+ int32_t vec_len;
|
|
+} VCHI_MSG_VECTOR_T;
|
|
+
|
|
+// Opaque type for a connection API
|
|
+typedef struct opaque_vchi_connection_api_t VCHI_CONNECTION_API_T;
|
|
+
|
|
+// Opaque type for a message driver
|
|
+typedef struct opaque_vchi_message_driver_t VCHI_MESSAGE_DRIVER_T;
|
|
+
|
|
+
|
|
+// Iterator structure for reading ahead through received message queue. Allocated by client,
|
|
+// initialised by vchi_msg_look_ahead. Fields are for internal VCHI use only.
|
|
+// Iterates over messages in queue at the instant of the call to vchi_msg_lookahead -
|
|
+// will not proceed to messages received since. Behaviour is undefined if an iterator
|
|
+// is used again after messages for that service are removed/dequeued by any
|
|
+// means other than vchi_msg_iter_... calls on the iterator itself.
|
|
+typedef struct {
|
|
+ struct opaque_vchi_service_t *service;
|
|
+ void *last;
|
|
+ void *next;
|
|
+ void *remove;
|
|
+} VCHI_MSG_ITER_T;
|
|
+
|
|
+
|
|
+#endif // VCHI_COMMON_H_
|
|
--- /dev/null
|
|
+++ b/drivers/misc/vc04_services/interface/vchi/vchi.h
|
|
@@ -0,0 +1,373 @@
|
|
+/**
|
|
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
|
|
+ *
|
|
+ * Redistribution and use in source and binary forms, with or without
|
|
+ * modification, are permitted provided that the following conditions
|
|
+ * are met:
|
|
+ * 1. Redistributions of source code must retain the above copyright
|
|
+ * notice, this list of conditions, and the following disclaimer,
|
|
+ * without modification.
|
|
+ * 2. Redistributions in binary form must reproduce the above copyright
|
|
+ * notice, this list of conditions and the following disclaimer in the
|
|
+ * documentation and/or other materials provided with the distribution.
|
|
+ * 3. The names of the above-listed copyright holders may not be used
|
|
+ * to endorse or promote products derived from this software without
|
|
+ * specific prior written permission.
|
|
+ *
|
|
+ * ALTERNATIVELY, this software may be distributed under the terms of the
|
|
+ * GNU General Public License ("GPL") version 2, as published by the Free
|
|
+ * Software Foundation.
|
|
+ *
|
|
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
|
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
|
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
|
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
|
|
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
|
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
|
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
|
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
|
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
|
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
|
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
+ */
|
|
+
|
|
+#ifndef VCHI_H_
|
|
+#define VCHI_H_
|
|
+
|
|
+#include "interface/vchi/vchi_cfg.h"
|
|
+#include "interface/vchi/vchi_common.h"
|
|
+#include "interface/vchi/connections/connection.h"
|
|
+#include "vchi_mh.h"
|
|
+
|
|
+
|
|
+/******************************************************************************
|
|
+ Global defs
|
|
+ *****************************************************************************/
|
|
+
|
|
+#define VCHI_BULK_ROUND_UP(x) ((((unsigned long)(x))+VCHI_BULK_ALIGN-1) & ~(VCHI_BULK_ALIGN-1))
|
|
+#define VCHI_BULK_ROUND_DOWN(x) (((unsigned long)(x)) & ~(VCHI_BULK_ALIGN-1))
|
|
+#define VCHI_BULK_ALIGN_NBYTES(x) (VCHI_BULK_ALIGNED(x) ? 0 : (VCHI_BULK_ALIGN - ((unsigned long)(x) & (VCHI_BULK_ALIGN-1))))
|
|
+
|
|
+#ifdef USE_VCHIQ_ARM
|
|
+#define VCHI_BULK_ALIGNED(x) 1
|
|
+#else
|
|
+#define VCHI_BULK_ALIGNED(x) (((unsigned long)(x) & (VCHI_BULK_ALIGN-1)) == 0)
|
|
+#endif
|
|
+
|
|
+struct vchi_version {
|
|
+ uint32_t version;
|
|
+ uint32_t version_min;
|
|
+};
|
|
+#define VCHI_VERSION(v_) { v_, v_ }
|
|
+#define VCHI_VERSION_EX(v_, m_) { v_, m_ }
|
|
+
|
|
+typedef enum
|
|
+{
|
|
+ VCHI_VEC_POINTER,
|
|
+ VCHI_VEC_HANDLE,
|
|
+ VCHI_VEC_LIST
|
|
+} VCHI_MSG_VECTOR_TYPE_T;
|
|
+
|
|
+typedef struct vchi_msg_vector_ex {
|
|
+
|
|
+ VCHI_MSG_VECTOR_TYPE_T type;
|
|
+ union
|
|
+ {
|
|
+ // a memory handle
|
|
+ struct
|
|
+ {
|
|
+ VCHI_MEM_HANDLE_T handle;
|
|
+ uint32_t offset;
|
|
+ int32_t vec_len;
|
|
+ } handle;
|
|
+
|
|
+ // an ordinary data pointer
|
|
+ struct
|
|
+ {
|
|
+ const void *vec_base;
|
|
+ int32_t vec_len;
|
|
+ } ptr;
|
|
+
|
|
+ // a nested vector list
|
|
+ struct
|
|
+ {
|
|
+ struct vchi_msg_vector_ex *vec;
|
|
+ uint32_t vec_len;
|
|
+ } list;
|
|
+ } u;
|
|
+} VCHI_MSG_VECTOR_EX_T;
|
|
+
|
|
+
|
|
+// Construct an entry in a msg vector for a pointer (p) of length (l)
|
|
+#define VCHI_VEC_POINTER(p,l) VCHI_VEC_POINTER, { { (VCHI_MEM_HANDLE_T)(p), (l) } }
|
|
+
|
|
+// Construct an entry in a msg vector for a message handle (h), starting at offset (o) of length (l)
|
|
+#define VCHI_VEC_HANDLE(h,o,l) VCHI_VEC_HANDLE, { { (h), (o), (l) } }
|
|
+
|
|
+// Macros to manipulate 'FOURCC' values
|
|
+#define MAKE_FOURCC(x) ((int32_t)( (x[0] << 24) | (x[1] << 16) | (x[2] << 8) | x[3] ))
|
|
+#define FOURCC_TO_CHAR(x) (x >> 24) & 0xFF,(x >> 16) & 0xFF,(x >> 8) & 0xFF, x & 0xFF
|
|
+
|
|
+
|
|
+// Opaque service information
|
|
+struct opaque_vchi_service_t;
|
|
+
|
|
+// Descriptor for a held message. Allocated by client, initialised by vchi_msg_hold,
|
|
+// vchi_msg_iter_hold or vchi_msg_iter_hold_next. Fields are for internal VCHI use only.
|
|
+typedef struct
|
|
+{
|
|
+ struct opaque_vchi_service_t *service;
|
|
+ void *message;
|
|
+} VCHI_HELD_MSG_T;
|
|
+
|
|
+
|
|
+
|
|
+// structure used to provide the information needed to open a server or a client
|
|
+typedef struct {
|
|
+ struct vchi_version version;
|
|
+ int32_t service_id;
|
|
+ VCHI_CONNECTION_T *connection;
|
|
+ uint32_t rx_fifo_size;
|
|
+ uint32_t tx_fifo_size;
|
|
+ VCHI_CALLBACK_T callback;
|
|
+ void *callback_param;
|
|
+ /* client intends to receive bulk transfers of
|
|
+ odd lengths or into unaligned buffers */
|
|
+ int32_t want_unaligned_bulk_rx;
|
|
+ /* client intends to transmit bulk transfers of
|
|
+ odd lengths or out of unaligned buffers */
|
|
+ int32_t want_unaligned_bulk_tx;
|
|
+ /* client wants to check CRCs on (bulk) xfers.
|
|
+ Only needs to be set at 1 end - will do both directions. */
|
|
+ int32_t want_crc;
|
|
+} SERVICE_CREATION_T;
|
|
+
|
|
+// Opaque handle for a VCHI instance
|
|
+typedef struct opaque_vchi_instance_handle_t *VCHI_INSTANCE_T;
|
|
+
|
|
+// Opaque handle for a server or client
|
|
+typedef struct opaque_vchi_service_handle_t *VCHI_SERVICE_HANDLE_T;
|
|
+
|
|
+// Service registration & startup
|
|
+typedef void (*VCHI_SERVICE_INIT)(VCHI_INSTANCE_T initialise_instance, VCHI_CONNECTION_T **connections, uint32_t num_connections);
|
|
+
|
|
+typedef struct service_info_tag {
|
|
+ const char * const vll_filename; /* VLL to load to start this service. This is an empty string if VLL is "static" */
|
|
+ VCHI_SERVICE_INIT init; /* Service initialisation function */
|
|
+ void *vll_handle; /* VLL handle; NULL when unloaded or a "static VLL" in build */
|
|
+} SERVICE_INFO_T;
|
|
+
|
|
+/******************************************************************************
|
|
+ Global funcs - implementation is specific to which side you are on (local / remote)
|
|
+ *****************************************************************************/
|
|
+
|
|
+#ifdef __cplusplus
|
|
+extern "C" {
|
|
+#endif
|
|
+
|
|
+extern /*@observer@*/ VCHI_CONNECTION_T * vchi_create_connection( const VCHI_CONNECTION_API_T * function_table,
|
|
+ const VCHI_MESSAGE_DRIVER_T * low_level);
|
|
+
|
|
+
|
|
+// Routine used to initialise the vchi on both local + remote connections
|
|
+extern int32_t vchi_initialise( VCHI_INSTANCE_T *instance_handle );
|
|
+
|
|
+extern int32_t vchi_exit( void );
|
|
+
|
|
+extern int32_t vchi_connect( VCHI_CONNECTION_T **connections,
|
|
+ const uint32_t num_connections,
|
|
+ VCHI_INSTANCE_T instance_handle );
|
|
+
|
|
+//When this is called, ensure that all services have no data pending.
|
|
+//Bulk transfers can remain 'queued'
|
|
+extern int32_t vchi_disconnect( VCHI_INSTANCE_T instance_handle );
|
|
+
|
|
+// Global control over bulk CRC checking
|
|
+extern int32_t vchi_crc_control( VCHI_CONNECTION_T *connection,
|
|
+ VCHI_CRC_CONTROL_T control );
|
|
+
|
|
+// helper functions
|
|
+extern void * vchi_allocate_buffer(VCHI_SERVICE_HANDLE_T handle, uint32_t *length);
|
|
+extern void vchi_free_buffer(VCHI_SERVICE_HANDLE_T handle, void *address);
|
|
+extern uint32_t vchi_current_time(VCHI_INSTANCE_T instance_handle);
|
|
+
|
|
+
|
|
+/******************************************************************************
|
|
+ Global service API
|
|
+ *****************************************************************************/
|
|
+// Routine to create a named service
|
|
+extern int32_t vchi_service_create( VCHI_INSTANCE_T instance_handle,
|
|
+ SERVICE_CREATION_T *setup,
|
|
+ VCHI_SERVICE_HANDLE_T *handle );
|
|
+
|
|
+// Routine to destory a service
|
|
+extern int32_t vchi_service_destroy( const VCHI_SERVICE_HANDLE_T handle );
|
|
+
|
|
+// Routine to open a named service
|
|
+extern int32_t vchi_service_open( VCHI_INSTANCE_T instance_handle,
|
|
+ SERVICE_CREATION_T *setup,
|
|
+ VCHI_SERVICE_HANDLE_T *handle);
|
|
+
|
|
+extern int32_t vchi_get_peer_version( const VCHI_SERVICE_HANDLE_T handle,
|
|
+ short *peer_version );
|
|
+
|
|
+// Routine to close a named service
|
|
+extern int32_t vchi_service_close( const VCHI_SERVICE_HANDLE_T handle );
|
|
+
|
|
+// Routine to increment ref count on a named service
|
|
+extern int32_t vchi_service_use( const VCHI_SERVICE_HANDLE_T handle );
|
|
+
|
|
+// Routine to decrement ref count on a named service
|
|
+extern int32_t vchi_service_release( const VCHI_SERVICE_HANDLE_T handle );
|
|
+
|
|
+// Routine to send a message accross a service
|
|
+extern int32_t vchi_msg_queue( VCHI_SERVICE_HANDLE_T handle,
|
|
+ const void *data,
|
|
+ uint32_t data_size,
|
|
+ VCHI_FLAGS_T flags,
|
|
+ void *msg_handle );
|
|
+
|
|
+// scatter-gather (vector) and send message
|
|
+int32_t vchi_msg_queuev_ex( VCHI_SERVICE_HANDLE_T handle,
|
|
+ VCHI_MSG_VECTOR_EX_T *vector,
|
|
+ uint32_t count,
|
|
+ VCHI_FLAGS_T flags,
|
|
+ void *msg_handle );
|
|
+
|
|
+// legacy scatter-gather (vector) and send message, only handles pointers
|
|
+int32_t vchi_msg_queuev( VCHI_SERVICE_HANDLE_T handle,
|
|
+ VCHI_MSG_VECTOR_T *vector,
|
|
+ uint32_t count,
|
|
+ VCHI_FLAGS_T flags,
|
|
+ void *msg_handle );
|
|
+
|
|
+// Routine to receive a msg from a service
|
|
+// Dequeue is equivalent to hold, copy into client buffer, release
|
|
+extern int32_t vchi_msg_dequeue( VCHI_SERVICE_HANDLE_T handle,
|
|
+ void *data,
|
|
+ uint32_t max_data_size_to_read,
|
|
+ uint32_t *actual_msg_size,
|
|
+ VCHI_FLAGS_T flags );
|
|
+
|
|
+// Routine to look at a message in place.
|
|
+// The message is not dequeued, so a subsequent call to peek or dequeue
|
|
+// will return the same message.
|
|
+extern int32_t vchi_msg_peek( VCHI_SERVICE_HANDLE_T handle,
|
|
+ void **data,
|
|
+ uint32_t *msg_size,
|
|
+ VCHI_FLAGS_T flags );
|
|
+
|
|
+// Routine to remove a message after it has been read in place with peek
|
|
+// The first message on the queue is dequeued.
|
|
+extern int32_t vchi_msg_remove( VCHI_SERVICE_HANDLE_T handle );
|
|
+
|
|
+// Routine to look at a message in place.
|
|
+// The message is dequeued, so the caller is left holding it; the descriptor is
|
|
+// filled in and must be released when the user has finished with the message.
|
|
+extern int32_t vchi_msg_hold( VCHI_SERVICE_HANDLE_T handle,
|
|
+ void **data, // } may be NULL, as info can be
|
|
+ uint32_t *msg_size, // } obtained from HELD_MSG_T
|
|
+ VCHI_FLAGS_T flags,
|
|
+ VCHI_HELD_MSG_T *message_descriptor );
|
|
+
|
|
+// Initialise an iterator to look through messages in place
|
|
+extern int32_t vchi_msg_look_ahead( VCHI_SERVICE_HANDLE_T handle,
|
|
+ VCHI_MSG_ITER_T *iter,
|
|
+ VCHI_FLAGS_T flags );
|
|
+
|
|
+/******************************************************************************
|
|
+ Global service support API - operations on held messages and message iterators
|
|
+ *****************************************************************************/
|
|
+
|
|
+// Routine to get the address of a held message
|
|
+extern void *vchi_held_msg_ptr( const VCHI_HELD_MSG_T *message );
|
|
+
|
|
+// Routine to get the size of a held message
|
|
+extern int32_t vchi_held_msg_size( const VCHI_HELD_MSG_T *message );
|
|
+
|
|
+// Routine to get the transmit timestamp as written into the header by the peer
|
|
+extern uint32_t vchi_held_msg_tx_timestamp( const VCHI_HELD_MSG_T *message );
|
|
+
|
|
+// Routine to get the reception timestamp, written as we parsed the header
|
|
+extern uint32_t vchi_held_msg_rx_timestamp( const VCHI_HELD_MSG_T *message );
|
|
+
|
|
+// Routine to release a held message after it has been processed
|
|
+extern int32_t vchi_held_msg_release( VCHI_HELD_MSG_T *message );
|
|
+
|
|
+// Indicates whether the iterator has a next message.
|
|
+extern int32_t vchi_msg_iter_has_next( const VCHI_MSG_ITER_T *iter );
|
|
+
|
|
+// Return the pointer and length for the next message and advance the iterator.
|
|
+extern int32_t vchi_msg_iter_next( VCHI_MSG_ITER_T *iter,
|
|
+ void **data,
|
|
+ uint32_t *msg_size );
|
|
+
|
|
+// Remove the last message returned by vchi_msg_iter_next.
|
|
+// Can only be called once after each call to vchi_msg_iter_next.
|
|
+extern int32_t vchi_msg_iter_remove( VCHI_MSG_ITER_T *iter );
|
|
+
|
|
+// Hold the last message returned by vchi_msg_iter_next.
|
|
+// Can only be called once after each call to vchi_msg_iter_next.
|
|
+extern int32_t vchi_msg_iter_hold( VCHI_MSG_ITER_T *iter,
|
|
+ VCHI_HELD_MSG_T *message );
|
|
+
|
|
+// Return information for the next message, and hold it, advancing the iterator.
|
|
+extern int32_t vchi_msg_iter_hold_next( VCHI_MSG_ITER_T *iter,
|
|
+ void **data, // } may be NULL
|
|
+ uint32_t *msg_size, // }
|
|
+ VCHI_HELD_MSG_T *message );
|
|
+
|
|
+
|
|
+/******************************************************************************
|
|
+ Global bulk API
|
|
+ *****************************************************************************/
|
|
+
|
|
+// Routine to prepare interface for a transfer from the other side
|
|
+extern int32_t vchi_bulk_queue_receive( VCHI_SERVICE_HANDLE_T handle,
|
|
+ void *data_dst,
|
|
+ uint32_t data_size,
|
|
+ VCHI_FLAGS_T flags,
|
|
+ void *transfer_handle );
|
|
+
|
|
+
|
|
+// Prepare interface for a transfer from the other side into relocatable memory.
|
|
+int32_t vchi_bulk_queue_receive_reloc( const VCHI_SERVICE_HANDLE_T handle,
|
|
+ VCHI_MEM_HANDLE_T h_dst,
|
|
+ uint32_t offset,
|
|
+ uint32_t data_size,
|
|
+ const VCHI_FLAGS_T flags,
|
|
+ void * const bulk_handle );
|
|
+
|
|
+// Routine to queue up data ready for transfer to the other (once they have signalled they are ready)
|
|
+extern int32_t vchi_bulk_queue_transmit( VCHI_SERVICE_HANDLE_T handle,
|
|
+ const void *data_src,
|
|
+ uint32_t data_size,
|
|
+ VCHI_FLAGS_T flags,
|
|
+ void *transfer_handle );
|
|
+
|
|
+
|
|
+/******************************************************************************
|
|
+ Configuration plumbing
|
|
+ *****************************************************************************/
|
|
+
|
|
+// function prototypes for the different mid layers (the state info gives the different physical connections)
|
|
+extern const VCHI_CONNECTION_API_T *single_get_func_table( void );
|
|
+//extern const VCHI_CONNECTION_API_T *local_server_get_func_table( void );
|
|
+//extern const VCHI_CONNECTION_API_T *local_client_get_func_table( void );
|
|
+
|
|
+// declare all message drivers here
|
|
+const VCHI_MESSAGE_DRIVER_T *vchi_mphi_message_driver_func_table( void );
|
|
+
|
|
+#ifdef __cplusplus
|
|
+}
|
|
+#endif
|
|
+
|
|
+extern int32_t vchi_bulk_queue_transmit_reloc( VCHI_SERVICE_HANDLE_T handle,
|
|
+ VCHI_MEM_HANDLE_T h_src,
|
|
+ uint32_t offset,
|
|
+ uint32_t data_size,
|
|
+ VCHI_FLAGS_T flags,
|
|
+ void *transfer_handle );
|
|
+#endif /* VCHI_H_ */
|
|
+
|
|
+/****************************** End of file **********************************/
|
|
--- /dev/null
|
|
+++ b/drivers/misc/vc04_services/interface/vchi/vchi_mh.h
|
|
@@ -0,0 +1,42 @@
|
|
+/**
|
|
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
|
|
+ *
|
|
+ * Redistribution and use in source and binary forms, with or without
|
|
+ * modification, are permitted provided that the following conditions
|
|
+ * are met:
|
|
+ * 1. Redistributions of source code must retain the above copyright
|
|
+ * notice, this list of conditions, and the following disclaimer,
|
|
+ * without modification.
|
|
+ * 2. Redistributions in binary form must reproduce the above copyright
|
|
+ * notice, this list of conditions and the following disclaimer in the
|
|
+ * documentation and/or other materials provided with the distribution.
|
|
+ * 3. The names of the above-listed copyright holders may not be used
|
|
+ * to endorse or promote products derived from this software without
|
|
+ * specific prior written permission.
|
|
+ *
|
|
+ * ALTERNATIVELY, this software may be distributed under the terms of the
|
|
+ * GNU General Public License ("GPL") version 2, as published by the Free
|
|
+ * Software Foundation.
|
|
+ *
|
|
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
|
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
|
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
|
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
|
|
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
|
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
|
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
|
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
|
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
|
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
|
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
+ */
|
|
+
|
|
+#ifndef VCHI_MH_H_
|
|
+#define VCHI_MH_H_
|
|
+
|
|
+#include <linux/types.h>
|
|
+
|
|
+typedef int32_t VCHI_MEM_HANDLE_T;
|
|
+#define VCHI_MEM_HANDLE_INVALID 0
|
|
+
|
|
+#endif
|
|
--- /dev/null
|
|
+++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
|
|
@@ -0,0 +1,538 @@
|
|
+/**
|
|
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
|
|
+ *
|
|
+ * Redistribution and use in source and binary forms, with or without
|
|
+ * modification, are permitted provided that the following conditions
|
|
+ * are met:
|
|
+ * 1. Redistributions of source code must retain the above copyright
|
|
+ * notice, this list of conditions, and the following disclaimer,
|
|
+ * without modification.
|
|
+ * 2. Redistributions in binary form must reproduce the above copyright
|
|
+ * notice, this list of conditions and the following disclaimer in the
|
|
+ * documentation and/or other materials provided with the distribution.
|
|
+ * 3. The names of the above-listed copyright holders may not be used
|
|
+ * to endorse or promote products derived from this software without
|
|
+ * specific prior written permission.
|
|
+ *
|
|
+ * ALTERNATIVELY, this software may be distributed under the terms of the
|
|
+ * GNU General Public License ("GPL") version 2, as published by the Free
|
|
+ * Software Foundation.
|
|
+ *
|
|
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
|
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
|
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
|
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
|
|
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
|
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
|
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
|
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
|
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
|
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
|
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
+ */
|
|
+
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/types.h>
|
|
+#include <linux/errno.h>
|
|
+#include <linux/interrupt.h>
|
|
+#include <linux/irq.h>
|
|
+#include <linux/pagemap.h>
|
|
+#include <linux/dma-mapping.h>
|
|
+#include <linux/version.h>
|
|
+#include <linux/io.h>
|
|
+#include <linux/uaccess.h>
|
|
+#include <asm/pgtable.h>
|
|
+
|
|
+#include <mach/irqs.h>
|
|
+
|
|
+#include <mach/platform.h>
|
|
+#include <mach/vcio.h>
|
|
+
|
|
+#define TOTAL_SLOTS (VCHIQ_SLOT_ZERO_SLOTS + 2 * 32)
|
|
+
|
|
+#define VCHIQ_DOORBELL_IRQ IRQ_ARM_DOORBELL_0
|
|
+#define VCHIQ_ARM_ADDRESS(x) ((void *)__virt_to_bus((unsigned)x))
|
|
+
|
|
+#include "vchiq_arm.h"
|
|
+#include "vchiq_2835.h"
|
|
+#include "vchiq_connected.h"
|
|
+
|
|
+#define MAX_FRAGMENTS (VCHIQ_NUM_CURRENT_BULKS * 2)
|
|
+
|
|
+typedef struct vchiq_2835_state_struct {
|
|
+ int inited;
|
|
+ VCHIQ_ARM_STATE_T arm_state;
|
|
+} VCHIQ_2835_ARM_STATE_T;
|
|
+
|
|
+static char *g_slot_mem;
|
|
+static int g_slot_mem_size;
|
|
+dma_addr_t g_slot_phys;
|
|
+static FRAGMENTS_T *g_fragments_base;
|
|
+static FRAGMENTS_T *g_free_fragments;
|
|
+struct semaphore g_free_fragments_sema;
|
|
+
|
|
+extern int vchiq_arm_log_level;
|
|
+
|
|
+static DEFINE_SEMAPHORE(g_free_fragments_mutex);
|
|
+
|
|
+static irqreturn_t
|
|
+vchiq_doorbell_irq(int irq, void *dev_id);
|
|
+
|
|
+static int
|
|
+create_pagelist(char __user *buf, size_t count, unsigned short type,
|
|
+ struct task_struct *task, PAGELIST_T ** ppagelist);
|
|
+
|
|
+static void
|
|
+free_pagelist(PAGELIST_T *pagelist, int actual);
|
|
+
|
|
+int __init
|
|
+vchiq_platform_init(VCHIQ_STATE_T *state)
|
|
+{
|
|
+ VCHIQ_SLOT_ZERO_T *vchiq_slot_zero;
|
|
+ int frag_mem_size;
|
|
+ int err;
|
|
+ int i;
|
|
+
|
|
+ /* Allocate space for the channels in coherent memory */
|
|
+ g_slot_mem_size = PAGE_ALIGN(TOTAL_SLOTS * VCHIQ_SLOT_SIZE);
|
|
+ frag_mem_size = PAGE_ALIGN(sizeof(FRAGMENTS_T) * MAX_FRAGMENTS);
|
|
+
|
|
+ g_slot_mem = dma_alloc_coherent(NULL, g_slot_mem_size + frag_mem_size,
|
|
+ &g_slot_phys, GFP_ATOMIC);
|
|
+
|
|
+ if (!g_slot_mem) {
|
|
+ vchiq_log_error(vchiq_arm_log_level,
|
|
+ "Unable to allocate channel memory");
|
|
+ err = -ENOMEM;
|
|
+ goto failed_alloc;
|
|
+ }
|
|
+
|
|
+ WARN_ON(((int)g_slot_mem & (PAGE_SIZE - 1)) != 0);
|
|
+
|
|
+ vchiq_slot_zero = vchiq_init_slots(g_slot_mem, g_slot_mem_size);
|
|
+ if (!vchiq_slot_zero) {
|
|
+ err = -EINVAL;
|
|
+ goto failed_init_slots;
|
|
+ }
|
|
+
|
|
+ vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX] =
|
|
+ (int)g_slot_phys + g_slot_mem_size;
|
|
+ vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX] =
|
|
+ MAX_FRAGMENTS;
|
|
+
|
|
+ g_fragments_base = (FRAGMENTS_T *)(g_slot_mem + g_slot_mem_size);
|
|
+ g_slot_mem_size += frag_mem_size;
|
|
+
|
|
+ g_free_fragments = g_fragments_base;
|
|
+ for (i = 0; i < (MAX_FRAGMENTS - 1); i++) {
|
|
+ *(FRAGMENTS_T **)&g_fragments_base[i] =
|
|
+ &g_fragments_base[i + 1];
|
|
+ }
|
|
+ *(FRAGMENTS_T **)&g_fragments_base[i] = NULL;
|
|
+ sema_init(&g_free_fragments_sema, MAX_FRAGMENTS);
|
|
+
|
|
+ if (vchiq_init_state(state, vchiq_slot_zero, 0/*slave*/) !=
|
|
+ VCHIQ_SUCCESS) {
|
|
+ err = -EINVAL;
|
|
+ goto failed_vchiq_init;
|
|
+ }
|
|
+
|
|
+ err = request_irq(VCHIQ_DOORBELL_IRQ, vchiq_doorbell_irq,
|
|
+ IRQF_IRQPOLL, "VCHIQ doorbell",
|
|
+ state);
|
|
+ if (err < 0) {
|
|
+ vchiq_log_error(vchiq_arm_log_level, "%s: failed to register "
|
|
+ "irq=%d err=%d", __func__,
|
|
+ VCHIQ_DOORBELL_IRQ, err);
|
|
+ goto failed_request_irq;
|
|
+ }
|
|
+
|
|
+ /* Send the base address of the slots to VideoCore */
|
|
+
|
|
+ dsb(); /* Ensure all writes have completed */
|
|
+
|
|
+ bcm_mailbox_write(MBOX_CHAN_VCHIQ, (unsigned int)g_slot_phys);
|
|
+
|
|
+ vchiq_log_info(vchiq_arm_log_level,
|
|
+ "vchiq_init - done (slots %x, phys %x)",
|
|
+ (unsigned int)vchiq_slot_zero, g_slot_phys);
|
|
+
|
|
+ vchiq_call_connected_callbacks();
|
|
+
|
|
+ return 0;
|
|
+
|
|
+failed_request_irq:
|
|
+failed_vchiq_init:
|
|
+failed_init_slots:
|
|
+ dma_free_coherent(NULL, g_slot_mem_size, g_slot_mem, g_slot_phys);
|
|
+
|
|
+failed_alloc:
|
|
+ return err;
|
|
+}
|
|
+
|
|
+void __exit
|
|
+vchiq_platform_exit(VCHIQ_STATE_T *state)
|
|
+{
|
|
+ free_irq(VCHIQ_DOORBELL_IRQ, state);
|
|
+ dma_free_coherent(NULL, g_slot_mem_size,
|
|
+ g_slot_mem, g_slot_phys);
|
|
+}
|
|
+
|
|
+
|
|
+VCHIQ_STATUS_T
|
|
+vchiq_platform_init_state(VCHIQ_STATE_T *state)
|
|
+{
|
|
+ VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
|
|
+ state->platform_state = kzalloc(sizeof(VCHIQ_2835_ARM_STATE_T), GFP_KERNEL);
|
|
+ ((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->inited = 1;
|
|
+ status = vchiq_arm_init_state(state, &((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->arm_state);
|
|
+ if(status != VCHIQ_SUCCESS)
|
|
+ {
|
|
+ ((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->inited = 0;
|
|
+ }
|
|
+ return status;
|
|
+}
|
|
+
|
|
+VCHIQ_ARM_STATE_T*
|
|
+vchiq_platform_get_arm_state(VCHIQ_STATE_T *state)
|
|
+{
|
|
+ if(!((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->inited)
|
|
+ {
|
|
+ BUG();
|
|
+ }
|
|
+ return &((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->arm_state;
|
|
+}
|
|
+
|
|
+void
|
|
+remote_event_signal(REMOTE_EVENT_T *event)
|
|
+{
|
|
+ wmb();
|
|
+
|
|
+ event->fired = 1;
|
|
+
|
|
+ dsb(); /* data barrier operation */
|
|
+
|
|
+ if (event->armed) {
|
|
+ /* trigger vc interrupt */
|
|
+
|
|
+ writel(0, __io_address(ARM_0_BELL2));
|
|
+ }
|
|
+}
|
|
+
|
|
+int
|
|
+vchiq_copy_from_user(void *dst, const void *src, int size)
|
|
+{
|
|
+ if ((uint32_t)src < TASK_SIZE) {
|
|
+ return copy_from_user(dst, src, size);
|
|
+ } else {
|
|
+ memcpy(dst, src, size);
|
|
+ return 0;
|
|
+ }
|
|
+}
|
|
+
|
|
+VCHIQ_STATUS_T
|
|
+vchiq_prepare_bulk_data(VCHIQ_BULK_T *bulk, VCHI_MEM_HANDLE_T memhandle,
|
|
+ void *offset, int size, int dir)
|
|
+{
|
|
+ PAGELIST_T *pagelist;
|
|
+ int ret;
|
|
+
|
|
+ WARN_ON(memhandle != VCHI_MEM_HANDLE_INVALID);
|
|
+
|
|
+ ret = create_pagelist((char __user *)offset, size,
|
|
+ (dir == VCHIQ_BULK_RECEIVE)
|
|
+ ? PAGELIST_READ
|
|
+ : PAGELIST_WRITE,
|
|
+ current,
|
|
+ &pagelist);
|
|
+ if (ret != 0)
|
|
+ return VCHIQ_ERROR;
|
|
+
|
|
+ bulk->handle = memhandle;
|
|
+ bulk->data = VCHIQ_ARM_ADDRESS(pagelist);
|
|
+
|
|
+ /* Store the pagelist address in remote_data, which isn't used by the
|
|
+ slave. */
|
|
+ bulk->remote_data = pagelist;
|
|
+
|
|
+ return VCHIQ_SUCCESS;
|
|
+}
|
|
+
|
|
+void
|
|
+vchiq_complete_bulk(VCHIQ_BULK_T *bulk)
|
|
+{
|
|
+ if (bulk && bulk->remote_data && bulk->actual)
|
|
+ free_pagelist((PAGELIST_T *)bulk->remote_data, bulk->actual);
|
|
+}
|
|
+
|
|
+void
|
|
+vchiq_transfer_bulk(VCHIQ_BULK_T *bulk)
|
|
+{
|
|
+ /*
|
|
+ * This should only be called on the master (VideoCore) side, but
|
|
+ * provide an implementation to avoid the need for ifdefery.
|
|
+ */
|
|
+ BUG();
|
|
+}
|
|
+
|
|
+void
|
|
+vchiq_dump_platform_state(void *dump_context)
|
|
+{
|
|
+ char buf[80];
|
|
+ int len;
|
|
+ len = snprintf(buf, sizeof(buf),
|
|
+ " Platform: 2835 (VC master)");
|
|
+ vchiq_dump(dump_context, buf, len + 1);
|
|
+}
|
|
+
|
|
+VCHIQ_STATUS_T
|
|
+vchiq_platform_suspend(VCHIQ_STATE_T *state)
|
|
+{
|
|
+ return VCHIQ_ERROR;
|
|
+}
|
|
+
|
|
+VCHIQ_STATUS_T
|
|
+vchiq_platform_resume(VCHIQ_STATE_T *state)
|
|
+{
|
|
+ return VCHIQ_SUCCESS;
|
|
+}
|
|
+
|
|
+void
|
|
+vchiq_platform_paused(VCHIQ_STATE_T *state)
|
|
+{
|
|
+}
|
|
+
|
|
+void
|
|
+vchiq_platform_resumed(VCHIQ_STATE_T *state)
|
|
+{
|
|
+}
|
|
+
|
|
+int
|
|
+vchiq_platform_videocore_wanted(VCHIQ_STATE_T* state)
|
|
+{
|
|
+ return 1; // autosuspend not supported - videocore always wanted
|
|
+}
|
|
+
|
|
+int
|
|
+vchiq_platform_use_suspend_timer(void)
|
|
+{
|
|
+ return 0;
|
|
+}
|
|
+void
|
|
+vchiq_dump_platform_use_state(VCHIQ_STATE_T *state)
|
|
+{
|
|
+ vchiq_log_info((vchiq_arm_log_level>=VCHIQ_LOG_INFO),"Suspend timer not in use");
|
|
+}
|
|
+void
|
|
+vchiq_platform_handle_timeout(VCHIQ_STATE_T *state)
|
|
+{
|
|
+ (void)state;
|
|
+}
|
|
+/*
|
|
+ * Local functions
|
|
+ */
|
|
+
|
|
+static irqreturn_t
|
|
+vchiq_doorbell_irq(int irq, void *dev_id)
|
|
+{
|
|
+ VCHIQ_STATE_T *state = dev_id;
|
|
+ irqreturn_t ret = IRQ_NONE;
|
|
+ unsigned int status;
|
|
+
|
|
+ /* Read (and clear) the doorbell */
|
|
+ status = readl(__io_address(ARM_0_BELL0));
|
|
+
|
|
+ if (status & 0x4) { /* Was the doorbell rung? */
|
|
+ remote_event_pollall(state);
|
|
+ ret = IRQ_HANDLED;
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+/* There is a potential problem with partial cache lines (pages?)
|
|
+** at the ends of the block when reading. If the CPU accessed anything in
|
|
+** the same line (page?) then it may have pulled old data into the cache,
|
|
+** obscuring the new data underneath. We can solve this by transferring the
|
|
+** partial cache lines separately, and allowing the ARM to copy into the
|
|
+** cached area.
|
|
+
|
|
+** N.B. This implementation plays slightly fast and loose with the Linux
|
|
+** driver programming rules, e.g. its use of __virt_to_bus instead of
|
|
+** dma_map_single, but it isn't a multi-platform driver and it benefits
|
|
+** from increased speed as a result.
|
|
+*/
|
|
+
|
|
+static int
|
|
+create_pagelist(char __user *buf, size_t count, unsigned short type,
|
|
+ struct task_struct *task, PAGELIST_T ** ppagelist)
|
|
+{
|
|
+ PAGELIST_T *pagelist;
|
|
+ struct page **pages;
|
|
+ struct page *page;
|
|
+ unsigned long *addrs;
|
|
+ unsigned int num_pages, offset, i;
|
|
+ char *addr, *base_addr, *next_addr;
|
|
+ int run, addridx, actual_pages;
|
|
+
|
|
+ offset = (unsigned int)buf & (PAGE_SIZE - 1);
|
|
+ num_pages = (count + offset + PAGE_SIZE - 1) / PAGE_SIZE;
|
|
+
|
|
+ *ppagelist = NULL;
|
|
+
|
|
+ /* Allocate enough storage to hold the page pointers and the page
|
|
+ ** list
|
|
+ */
|
|
+ pagelist = kmalloc(sizeof(PAGELIST_T) +
|
|
+ (num_pages * sizeof(unsigned long)) +
|
|
+ (num_pages * sizeof(pages[0])),
|
|
+ GFP_KERNEL);
|
|
+
|
|
+ vchiq_log_trace(vchiq_arm_log_level,
|
|
+ "create_pagelist - %x", (unsigned int)pagelist);
|
|
+ if (!pagelist)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ addrs = pagelist->addrs;
|
|
+ pages = (struct page **)(addrs + num_pages);
|
|
+
|
|
+ down_read(&task->mm->mmap_sem);
|
|
+ actual_pages = get_user_pages(task, task->mm,
|
|
+ (unsigned long)buf & ~(PAGE_SIZE - 1), num_pages,
|
|
+ (type == PAGELIST_READ) /*Write */ , 0 /*Force */ ,
|
|
+ pages, NULL /*vmas */);
|
|
+ up_read(&task->mm->mmap_sem);
|
|
+
|
|
+ if (actual_pages != num_pages)
|
|
+ {
|
|
+ /* This is probably due to the process being killed */
|
|
+ while (actual_pages > 0)
|
|
+ {
|
|
+ actual_pages--;
|
|
+ page_cache_release(pages[actual_pages]);
|
|
+ }
|
|
+ kfree(pagelist);
|
|
+ if (actual_pages == 0)
|
|
+ actual_pages = -ENOMEM;
|
|
+ return actual_pages;
|
|
+ }
|
|
+
|
|
+ pagelist->length = count;
|
|
+ pagelist->type = type;
|
|
+ pagelist->offset = offset;
|
|
+
|
|
+ /* Group the pages into runs of contiguous pages */
|
|
+
|
|
+ base_addr = VCHIQ_ARM_ADDRESS(page_address(pages[0]));
|
|
+ next_addr = base_addr + PAGE_SIZE;
|
|
+ addridx = 0;
|
|
+ run = 0;
|
|
+
|
|
+ for (i = 1; i < num_pages; i++) {
|
|
+ addr = VCHIQ_ARM_ADDRESS(page_address(pages[i]));
|
|
+ if ((addr == next_addr) && (run < (PAGE_SIZE - 1))) {
|
|
+ next_addr += PAGE_SIZE;
|
|
+ run++;
|
|
+ } else {
|
|
+ addrs[addridx] = (unsigned long)base_addr + run;
|
|
+ addridx++;
|
|
+ base_addr = addr;
|
|
+ next_addr = addr + PAGE_SIZE;
|
|
+ run = 0;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ addrs[addridx] = (unsigned long)base_addr + run;
|
|
+ addridx++;
|
|
+
|
|
+ /* Partial cache lines (fragments) require special measures */
|
|
+ if ((type == PAGELIST_READ) &&
|
|
+ ((pagelist->offset & (CACHE_LINE_SIZE - 1)) ||
|
|
+ ((pagelist->offset + pagelist->length) &
|
|
+ (CACHE_LINE_SIZE - 1)))) {
|
|
+ FRAGMENTS_T *fragments;
|
|
+
|
|
+ if (down_interruptible(&g_free_fragments_sema) != 0) {
|
|
+ kfree(pagelist);
|
|
+ return -EINTR;
|
|
+ }
|
|
+
|
|
+ WARN_ON(g_free_fragments == NULL);
|
|
+
|
|
+ down(&g_free_fragments_mutex);
|
|
+ fragments = (FRAGMENTS_T *) g_free_fragments;
|
|
+ WARN_ON(fragments == NULL);
|
|
+ g_free_fragments = *(FRAGMENTS_T **) g_free_fragments;
|
|
+ up(&g_free_fragments_mutex);
|
|
+ pagelist->type =
|
|
+ PAGELIST_READ_WITH_FRAGMENTS + (fragments -
|
|
+ g_fragments_base);
|
|
+ }
|
|
+
|
|
+ for (page = virt_to_page(pagelist);
|
|
+ page <= virt_to_page(addrs + num_pages - 1); page++) {
|
|
+ flush_dcache_page(page);
|
|
+ }
|
|
+
|
|
+ *ppagelist = pagelist;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void
|
|
+free_pagelist(PAGELIST_T *pagelist, int actual)
|
|
+{
|
|
+ struct page **pages;
|
|
+ unsigned int num_pages, i;
|
|
+
|
|
+ vchiq_log_trace(vchiq_arm_log_level,
|
|
+ "free_pagelist - %x, %d", (unsigned int)pagelist, actual);
|
|
+
|
|
+ num_pages =
|
|
+ (pagelist->length + pagelist->offset + PAGE_SIZE - 1) /
|
|
+ PAGE_SIZE;
|
|
+
|
|
+ pages = (struct page **)(pagelist->addrs + num_pages);
|
|
+
|
|
+ /* Deal with any partial cache lines (fragments) */
|
|
+ if (pagelist->type >= PAGELIST_READ_WITH_FRAGMENTS) {
|
|
+ FRAGMENTS_T *fragments = g_fragments_base +
|
|
+ (pagelist->type - PAGELIST_READ_WITH_FRAGMENTS);
|
|
+ int head_bytes, tail_bytes;
|
|
+ head_bytes = (CACHE_LINE_SIZE - pagelist->offset) &
|
|
+ (CACHE_LINE_SIZE - 1);
|
|
+ tail_bytes = (pagelist->offset + actual) &
|
|
+ (CACHE_LINE_SIZE - 1);
|
|
+
|
|
+ if ((actual >= 0) && (head_bytes != 0)) {
|
|
+ if (head_bytes > actual)
|
|
+ head_bytes = actual;
|
|
+
|
|
+ memcpy((char *)page_address(pages[0]) +
|
|
+ pagelist->offset,
|
|
+ fragments->headbuf,
|
|
+ head_bytes);
|
|
+ }
|
|
+ if ((actual >= 0) && (head_bytes < actual) &&
|
|
+ (tail_bytes != 0)) {
|
|
+ memcpy((char *)page_address(pages[num_pages - 1]) +
|
|
+ ((pagelist->offset + actual) &
|
|
+ (PAGE_SIZE - 1) & ~(CACHE_LINE_SIZE - 1)),
|
|
+ fragments->tailbuf, tail_bytes);
|
|
+ }
|
|
+
|
|
+ down(&g_free_fragments_mutex);
|
|
+ *(FRAGMENTS_T **) fragments = g_free_fragments;
|
|
+ g_free_fragments = fragments;
|
|
+ up(&g_free_fragments_mutex);
|
|
+ up(&g_free_fragments_sema);
|
|
+ }
|
|
+
|
|
+ for (i = 0; i < num_pages; i++) {
|
|
+ if (pagelist->type != PAGELIST_WRITE)
|
|
+ set_page_dirty(pages[i]);
|
|
+ page_cache_release(pages[i]);
|
|
+ }
|
|
+
|
|
+ kfree(pagelist);
|
|
+}
|
|
--- /dev/null
|
|
+++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835.h
|
|
@@ -0,0 +1,42 @@
|
|
+/**
|
|
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
|
|
+ *
|
|
+ * Redistribution and use in source and binary forms, with or without
|
|
+ * modification, are permitted provided that the following conditions
|
|
+ * are met:
|
|
+ * 1. Redistributions of source code must retain the above copyright
|
|
+ * notice, this list of conditions, and the following disclaimer,
|
|
+ * without modification.
|
|
+ * 2. Redistributions in binary form must reproduce the above copyright
|
|
+ * notice, this list of conditions and the following disclaimer in the
|
|
+ * documentation and/or other materials provided with the distribution.
|
|
+ * 3. The names of the above-listed copyright holders may not be used
|
|
+ * to endorse or promote products derived from this software without
|
|
+ * specific prior written permission.
|
|
+ *
|
|
+ * ALTERNATIVELY, this software may be distributed under the terms of the
|
|
+ * GNU General Public License ("GPL") version 2, as published by the Free
|
|
+ * Software Foundation.
|
|
+ *
|
|
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
|
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
|
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
|
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
|
|
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
|
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
|
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
|
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
|
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
|
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
|
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
+ */
|
|
+
|
|
+#ifndef VCHIQ_2835_H
|
|
+#define VCHIQ_2835_H
|
|
+
|
|
+#include "vchiq_pagelist.h"
|
|
+
|
|
+#define VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX 0
|
|
+#define VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX 1
|
|
+
|
|
+#endif /* VCHIQ_2835_H */
|
|
--- /dev/null
|
|
+++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.c
|
|
@@ -0,0 +1,2806 @@
|
|
+/**
|
|
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
|
|
+ *
|
|
+ * Redistribution and use in source and binary forms, with or without
|
|
+ * modification, are permitted provided that the following conditions
|
|
+ * are met:
|
|
+ * 1. Redistributions of source code must retain the above copyright
|
|
+ * notice, this list of conditions, and the following disclaimer,
|
|
+ * without modification.
|
|
+ * 2. Redistributions in binary form must reproduce the above copyright
|
|
+ * notice, this list of conditions and the following disclaimer in the
|
|
+ * documentation and/or other materials provided with the distribution.
|
|
+ * 3. The names of the above-listed copyright holders may not be used
|
|
+ * to endorse or promote products derived from this software without
|
|
+ * specific prior written permission.
|
|
+ *
|
|
+ * ALTERNATIVELY, this software may be distributed under the terms of the
|
|
+ * GNU General Public License ("GPL") version 2, as published by the Free
|
|
+ * Software Foundation.
|
|
+ *
|
|
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
|
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
|
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
|
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
|
|
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
|
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
|
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
|
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
|
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
|
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
|
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
+ */
|
|
+
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/module.h>
|
|
+#include <linux/types.h>
|
|
+#include <linux/errno.h>
|
|
+#include <linux/cdev.h>
|
|
+#include <linux/fs.h>
|
|
+#include <linux/device.h>
|
|
+#include <linux/mm.h>
|
|
+#include <linux/highmem.h>
|
|
+#include <linux/pagemap.h>
|
|
+#include <linux/bug.h>
|
|
+#include <linux/semaphore.h>
|
|
+#include <linux/list.h>
|
|
+#include <linux/proc_fs.h>
|
|
+
|
|
+#include "vchiq_core.h"
|
|
+#include "vchiq_ioctl.h"
|
|
+#include "vchiq_arm.h"
|
|
+
|
|
+#define DEVICE_NAME "vchiq"
|
|
+
|
|
+/* Override the default prefix, which would be vchiq_arm (from the filename) */
|
|
+#undef MODULE_PARAM_PREFIX
|
|
+#define MODULE_PARAM_PREFIX DEVICE_NAME "."
|
|
+
|
|
+#define VCHIQ_MINOR 0
|
|
+
|
|
+/* Some per-instance constants */
|
|
+#define MAX_COMPLETIONS 16
|
|
+#define MAX_SERVICES 64
|
|
+#define MAX_ELEMENTS 8
|
|
+#define MSG_QUEUE_SIZE 64
|
|
+
|
|
+#define KEEPALIVE_VER 1
|
|
+#define KEEPALIVE_VER_MIN KEEPALIVE_VER
|
|
+
|
|
+/* Run time control of log level, based on KERN_XXX level. */
|
|
+int vchiq_arm_log_level = VCHIQ_LOG_DEFAULT;
|
|
+int vchiq_susp_log_level = VCHIQ_LOG_ERROR;
|
|
+
|
|
+#define SUSPEND_TIMER_TIMEOUT_MS 100
|
|
+#define SUSPEND_RETRY_TIMER_TIMEOUT_MS 1000
|
|
+
|
|
+#define VC_SUSPEND_NUM_OFFSET 3 /* number of values before idle which are -ve */
|
|
+static const char *const suspend_state_names[] = {
|
|
+ "VC_SUSPEND_FORCE_CANCELED",
|
|
+ "VC_SUSPEND_REJECTED",
|
|
+ "VC_SUSPEND_FAILED",
|
|
+ "VC_SUSPEND_IDLE",
|
|
+ "VC_SUSPEND_REQUESTED",
|
|
+ "VC_SUSPEND_IN_PROGRESS",
|
|
+ "VC_SUSPEND_SUSPENDED"
|
|
+};
|
|
+#define VC_RESUME_NUM_OFFSET 1 /* number of values before idle which are -ve */
|
|
+static const char *const resume_state_names[] = {
|
|
+ "VC_RESUME_FAILED",
|
|
+ "VC_RESUME_IDLE",
|
|
+ "VC_RESUME_REQUESTED",
|
|
+ "VC_RESUME_IN_PROGRESS",
|
|
+ "VC_RESUME_RESUMED"
|
|
+};
|
|
+/* The number of times we allow force suspend to timeout before actually
|
|
+** _forcing_ suspend. This is to cater for SW which fails to release vchiq
|
|
+** correctly - we don't want to prevent ARM suspend indefinitely in this case.
|
|
+*/
|
|
+#define FORCE_SUSPEND_FAIL_MAX 8
|
|
+
|
|
+/* The time in ms allowed for videocore to go idle when force suspend has been
|
|
+ * requested */
|
|
+#define FORCE_SUSPEND_TIMEOUT_MS 200
|
|
+
|
|
+
|
|
+static void suspend_timer_callback(unsigned long context);
|
|
+static int vchiq_proc_add_instance(VCHIQ_INSTANCE_T instance);
|
|
+static void vchiq_proc_remove_instance(VCHIQ_INSTANCE_T instance);
|
|
+
|
|
+
|
|
+typedef struct user_service_struct {
|
|
+ VCHIQ_SERVICE_T *service;
|
|
+ void *userdata;
|
|
+ VCHIQ_INSTANCE_T instance;
|
|
+ int is_vchi;
|
|
+ int dequeue_pending;
|
|
+ int message_available_pos;
|
|
+ int msg_insert;
|
|
+ int msg_remove;
|
|
+ struct semaphore insert_event;
|
|
+ struct semaphore remove_event;
|
|
+ VCHIQ_HEADER_T * msg_queue[MSG_QUEUE_SIZE];
|
|
+} USER_SERVICE_T;
|
|
+
|
|
+struct bulk_waiter_node {
|
|
+ struct bulk_waiter bulk_waiter;
|
|
+ int pid;
|
|
+ struct list_head list;
|
|
+};
|
|
+
|
|
+struct vchiq_instance_struct {
|
|
+ VCHIQ_STATE_T *state;
|
|
+ VCHIQ_COMPLETION_DATA_T completions[MAX_COMPLETIONS];
|
|
+ int completion_insert;
|
|
+ int completion_remove;
|
|
+ struct semaphore insert_event;
|
|
+ struct semaphore remove_event;
|
|
+ struct mutex completion_mutex;
|
|
+
|
|
+ int connected;
|
|
+ int closing;
|
|
+ int pid;
|
|
+ int mark;
|
|
+
|
|
+ struct list_head bulk_waiter_list;
|
|
+ struct mutex bulk_waiter_list_mutex;
|
|
+
|
|
+ struct proc_dir_entry *proc_entry;
|
|
+};
|
|
+
|
|
+typedef struct dump_context_struct {
|
|
+ char __user *buf;
|
|
+ size_t actual;
|
|
+ size_t space;
|
|
+ loff_t offset;
|
|
+} DUMP_CONTEXT_T;
|
|
+
|
|
+static struct cdev vchiq_cdev;
|
|
+static dev_t vchiq_devid;
|
|
+static VCHIQ_STATE_T g_state;
|
|
+static struct class *vchiq_class;
|
|
+static struct device *vchiq_dev;
|
|
+static DEFINE_SPINLOCK(msg_queue_spinlock);
|
|
+
|
|
+static const char *const ioctl_names[] = {
|
|
+ "CONNECT",
|
|
+ "SHUTDOWN",
|
|
+ "CREATE_SERVICE",
|
|
+ "REMOVE_SERVICE",
|
|
+ "QUEUE_MESSAGE",
|
|
+ "QUEUE_BULK_TRANSMIT",
|
|
+ "QUEUE_BULK_RECEIVE",
|
|
+ "AWAIT_COMPLETION",
|
|
+ "DEQUEUE_MESSAGE",
|
|
+ "GET_CLIENT_ID",
|
|
+ "GET_CONFIG",
|
|
+ "CLOSE_SERVICE",
|
|
+ "USE_SERVICE",
|
|
+ "RELEASE_SERVICE",
|
|
+ "SET_SERVICE_OPTION",
|
|
+ "DUMP_PHYS_MEM"
|
|
+};
|
|
+
|
|
+vchiq_static_assert((sizeof(ioctl_names)/sizeof(ioctl_names[0])) ==
|
|
+ (VCHIQ_IOC_MAX + 1));
|
|
+
|
|
+static void
|
|
+dump_phys_mem(void *virt_addr, uint32_t num_bytes);
|
|
+
|
|
+/****************************************************************************
|
|
+*
|
|
+* add_completion
|
|
+*
|
|
+***************************************************************************/
|
|
+
|
|
+static VCHIQ_STATUS_T
|
|
+add_completion(VCHIQ_INSTANCE_T instance, VCHIQ_REASON_T reason,
|
|
+ VCHIQ_HEADER_T *header, USER_SERVICE_T *user_service,
|
|
+ void *bulk_userdata)
|
|
+{
|
|
+ VCHIQ_COMPLETION_DATA_T *completion;
|
|
+ DEBUG_INITIALISE(g_state.local)
|
|
+
|
|
+ while (instance->completion_insert ==
|
|
+ (instance->completion_remove + MAX_COMPLETIONS)) {
|
|
+ /* Out of space - wait for the client */
|
|
+ DEBUG_TRACE(SERVICE_CALLBACK_LINE);
|
|
+ vchiq_log_trace(vchiq_arm_log_level,
|
|
+ "add_completion - completion queue full");
|
|
+ DEBUG_COUNT(COMPLETION_QUEUE_FULL_COUNT);
|
|
+ if (down_interruptible(&instance->remove_event) != 0) {
|
|
+ vchiq_log_info(vchiq_arm_log_level,
|
|
+ "service_callback interrupted");
|
|
+ return VCHIQ_RETRY;
|
|
+ } else if (instance->closing) {
|
|
+ vchiq_log_info(vchiq_arm_log_level,
|
|
+ "service_callback closing");
|
|
+ return VCHIQ_ERROR;
|
|
+ }
|
|
+ DEBUG_TRACE(SERVICE_CALLBACK_LINE);
|
|
+ }
|
|
+
|
|
+ completion =
|
|
+ &instance->completions[instance->completion_insert &
|
|
+ (MAX_COMPLETIONS - 1)];
|
|
+
|
|
+ completion->header = header;
|
|
+ completion->reason = reason;
|
|
+ /* N.B. service_userdata is updated while processing AWAIT_COMPLETION */
|
|
+ completion->service_userdata = user_service->service;
|
|
+ completion->bulk_userdata = bulk_userdata;
|
|
+
|
|
+ if (reason == VCHIQ_SERVICE_CLOSED)
|
|
+ /* Take an extra reference, to be held until
|
|
+ this CLOSED notification is delivered. */
|
|
+ lock_service(user_service->service);
|
|
+
|
|
+ /* A write barrier is needed here to ensure that the entire completion
|
|
+ record is written out before the insert point. */
|
|
+ wmb();
|
|
+
|
|
+ if (reason == VCHIQ_MESSAGE_AVAILABLE)
|
|
+ user_service->message_available_pos =
|
|
+ instance->completion_insert;
|
|
+ instance->completion_insert++;
|
|
+
|
|
+ up(&instance->insert_event);
|
|
+
|
|
+ return VCHIQ_SUCCESS;
|
|
+}
|
|
+
|
|
+/****************************************************************************
|
|
+*
|
|
+* service_callback
|
|
+*
|
|
+***************************************************************************/
|
|
+
|
|
+static VCHIQ_STATUS_T
|
|
+service_callback(VCHIQ_REASON_T reason, VCHIQ_HEADER_T *header,
|
|
+ VCHIQ_SERVICE_HANDLE_T handle, void *bulk_userdata)
|
|
+{
|
|
+ /* How do we ensure the callback goes to the right client?
|
|
+ ** The service_user data points to a USER_SERVICE_T record containing
|
|
+ ** the original callback and the user state structure, which contains a
|
|
+ ** circular buffer for completion records.
|
|
+ */
|
|
+ USER_SERVICE_T *user_service;
|
|
+ VCHIQ_SERVICE_T *service;
|
|
+ VCHIQ_INSTANCE_T instance;
|
|
+ DEBUG_INITIALISE(g_state.local)
|
|
+
|
|
+ DEBUG_TRACE(SERVICE_CALLBACK_LINE);
|
|
+
|
|
+ service = handle_to_service(handle);
|
|
+ BUG_ON(!service);
|
|
+ user_service = (USER_SERVICE_T *)service->base.userdata;
|
|
+ instance = user_service->instance;
|
|
+
|
|
+ if (!instance || instance->closing)
|
|
+ return VCHIQ_SUCCESS;
|
|
+
|
|
+ vchiq_log_trace(vchiq_arm_log_level,
|
|
+ "service_callback - service %lx(%d), reason %d, header %lx, "
|
|
+ "instance %lx, bulk_userdata %lx",
|
|
+ (unsigned long)user_service,
|
|
+ service->localport,
|
|
+ reason, (unsigned long)header,
|
|
+ (unsigned long)instance, (unsigned long)bulk_userdata);
|
|
+
|
|
+ if (header && user_service->is_vchi) {
|
|
+ spin_lock(&msg_queue_spinlock);
|
|
+ while (user_service->msg_insert ==
|
|
+ (user_service->msg_remove + MSG_QUEUE_SIZE)) {
|
|
+ spin_unlock(&msg_queue_spinlock);
|
|
+ DEBUG_TRACE(SERVICE_CALLBACK_LINE);
|
|
+ DEBUG_COUNT(MSG_QUEUE_FULL_COUNT);
|
|
+ vchiq_log_trace(vchiq_arm_log_level,
|
|
+ "service_callback - msg queue full");
|
|
+ /* If there is no MESSAGE_AVAILABLE in the completion
|
|
+ ** queue, add one
|
|
+ */
|
|
+ if ((user_service->message_available_pos -
|
|
+ instance->completion_remove) < 0) {
|
|
+ VCHIQ_STATUS_T status;
|
|
+ vchiq_log_info(vchiq_arm_log_level,
|
|
+ "Inserting extra MESSAGE_AVAILABLE");
|
|
+ DEBUG_TRACE(SERVICE_CALLBACK_LINE);
|
|
+ status = add_completion(instance, reason,
|
|
+ NULL, user_service, bulk_userdata);
|
|
+ if (status != VCHIQ_SUCCESS) {
|
|
+ DEBUG_TRACE(SERVICE_CALLBACK_LINE);
|
|
+ return status;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ DEBUG_TRACE(SERVICE_CALLBACK_LINE);
|
|
+ if (down_interruptible(&user_service->remove_event)
|
|
+ != 0) {
|
|
+ vchiq_log_info(vchiq_arm_log_level,
|
|
+ "service_callback interrupted");
|
|
+ DEBUG_TRACE(SERVICE_CALLBACK_LINE);
|
|
+ return VCHIQ_RETRY;
|
|
+ } else if (instance->closing) {
|
|
+ vchiq_log_info(vchiq_arm_log_level,
|
|
+ "service_callback closing");
|
|
+ DEBUG_TRACE(SERVICE_CALLBACK_LINE);
|
|
+ return VCHIQ_ERROR;
|
|
+ }
|
|
+ DEBUG_TRACE(SERVICE_CALLBACK_LINE);
|
|
+ spin_lock(&msg_queue_spinlock);
|
|
+ }
|
|
+
|
|
+ user_service->msg_queue[user_service->msg_insert &
|
|
+ (MSG_QUEUE_SIZE - 1)] = header;
|
|
+ user_service->msg_insert++;
|
|
+ spin_unlock(&msg_queue_spinlock);
|
|
+
|
|
+ up(&user_service->insert_event);
|
|
+
|
|
+ /* If there is a thread waiting in DEQUEUE_MESSAGE, or if
|
|
+ ** there is a MESSAGE_AVAILABLE in the completion queue then
|
|
+ ** bypass the completion queue.
|
|
+ */
|
|
+ if (((user_service->message_available_pos -
|
|
+ instance->completion_remove) >= 0) ||
|
|
+ user_service->dequeue_pending) {
|
|
+ DEBUG_TRACE(SERVICE_CALLBACK_LINE);
|
|
+ user_service->dequeue_pending = 0;
|
|
+ return VCHIQ_SUCCESS;
|
|
+ }
|
|
+
|
|
+ header = NULL;
|
|
+ }
|
|
+ DEBUG_TRACE(SERVICE_CALLBACK_LINE);
|
|
+
|
|
+ return add_completion(instance, reason, header, user_service,
|
|
+ bulk_userdata);
|
|
+}
|
|
+
|
|
+/****************************************************************************
|
|
+*
|
|
+* vchiq_ioctl
|
|
+*
|
|
+***************************************************************************/
|
|
+
|
|
+static long
|
|
+vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
|
+{
|
|
+ VCHIQ_INSTANCE_T instance = file->private_data;
|
|
+ VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
|
|
+ VCHIQ_SERVICE_T *service = NULL;
|
|
+ long ret = 0;
|
|
+ int i, rc;
|
|
+ DEBUG_INITIALISE(g_state.local)
|
|
+
|
|
+ vchiq_log_trace(vchiq_arm_log_level,
|
|
+ "vchiq_ioctl - instance %x, cmd %s, arg %lx",
|
|
+ (unsigned int)instance,
|
|
+ ((_IOC_TYPE(cmd) == VCHIQ_IOC_MAGIC) &&
|
|
+ (_IOC_NR(cmd) <= VCHIQ_IOC_MAX)) ?
|
|
+ ioctl_names[_IOC_NR(cmd)] : "<invalid>", arg);
|
|
+
|
|
+ switch (cmd) {
|
|
+ case VCHIQ_IOC_SHUTDOWN:
|
|
+ if (!instance->connected)
|
|
+ break;
|
|
+
|
|
+ /* Remove all services */
|
|
+ i = 0;
|
|
+ while ((service = next_service_by_instance(instance->state,
|
|
+ instance, &i)) != NULL) {
|
|
+ status = vchiq_remove_service(service->handle);
|
|
+ unlock_service(service);
|
|
+ if (status != VCHIQ_SUCCESS)
|
|
+ break;
|
|
+ }
|
|
+ service = NULL;
|
|
+
|
|
+ if (status == VCHIQ_SUCCESS) {
|
|
+ /* Wake the completion thread and ask it to exit */
|
|
+ instance->closing = 1;
|
|
+ up(&instance->insert_event);
|
|
+ }
|
|
+
|
|
+ break;
|
|
+
|
|
+ case VCHIQ_IOC_CONNECT:
|
|
+ if (instance->connected) {
|
|
+ ret = -EINVAL;
|
|
+ break;
|
|
+ }
|
|
+ rc = mutex_lock_interruptible(&instance->state->mutex);
|
|
+ if (rc != 0) {
|
|
+ vchiq_log_error(vchiq_arm_log_level,
|
|
+ "vchiq: connect: could not lock mutex for "
|
|
+ "state %d: %d",
|
|
+ instance->state->id, rc);
|
|
+ ret = -EINTR;
|
|
+ break;
|
|
+ }
|
|
+ status = vchiq_connect_internal(instance->state, instance);
|
|
+ mutex_unlock(&instance->state->mutex);
|
|
+
|
|
+ if (status == VCHIQ_SUCCESS)
|
|
+ instance->connected = 1;
|
|
+ else
|
|
+ vchiq_log_error(vchiq_arm_log_level,
|
|
+ "vchiq: could not connect: %d", status);
|
|
+ break;
|
|
+
|
|
+ case VCHIQ_IOC_CREATE_SERVICE: {
|
|
+ VCHIQ_CREATE_SERVICE_T args;
|
|
+ USER_SERVICE_T *user_service = NULL;
|
|
+ void *userdata;
|
|
+ int srvstate;
|
|
+
|
|
+ if (copy_from_user
|
|
+ (&args, (const void __user *)arg,
|
|
+ sizeof(args)) != 0) {
|
|
+ ret = -EFAULT;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ user_service = kmalloc(sizeof(USER_SERVICE_T), GFP_KERNEL);
|
|
+ if (!user_service) {
|
|
+ ret = -ENOMEM;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ if (args.is_open) {
|
|
+ if (!instance->connected) {
|
|
+ ret = -ENOTCONN;
|
|
+ kfree(user_service);
|
|
+ break;
|
|
+ }
|
|
+ srvstate = VCHIQ_SRVSTATE_OPENING;
|
|
+ } else {
|
|
+ srvstate =
|
|
+ instance->connected ?
|
|
+ VCHIQ_SRVSTATE_LISTENING :
|
|
+ VCHIQ_SRVSTATE_HIDDEN;
|
|
+ }
|
|
+
|
|
+ userdata = args.params.userdata;
|
|
+ args.params.callback = service_callback;
|
|
+ args.params.userdata = user_service;
|
|
+ service = vchiq_add_service_internal(
|
|
+ instance->state,
|
|
+ &args.params, srvstate,
|
|
+ instance);
|
|
+
|
|
+ if (service != NULL) {
|
|
+ user_service->service = service;
|
|
+ user_service->userdata = userdata;
|
|
+ user_service->instance = instance;
|
|
+ user_service->is_vchi = args.is_vchi;
|
|
+ user_service->dequeue_pending = 0;
|
|
+ user_service->message_available_pos =
|
|
+ instance->completion_remove - 1;
|
|
+ user_service->msg_insert = 0;
|
|
+ user_service->msg_remove = 0;
|
|
+ sema_init(&user_service->insert_event, 0);
|
|
+ sema_init(&user_service->remove_event, 0);
|
|
+
|
|
+ if (args.is_open) {
|
|
+ status = vchiq_open_service_internal
|
|
+ (service, instance->pid);
|
|
+ if (status != VCHIQ_SUCCESS) {
|
|
+ vchiq_remove_service(service->handle);
|
|
+ service = NULL;
|
|
+ ret = (status == VCHIQ_RETRY) ?
|
|
+ -EINTR : -EIO;
|
|
+ user_service->service = NULL;
|
|
+ user_service->instance = NULL;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (copy_to_user((void __user *)
|
|
+ &(((VCHIQ_CREATE_SERVICE_T __user *)
|
|
+ arg)->handle),
|
|
+ (const void *)&service->handle,
|
|
+ sizeof(service->handle)) != 0) {
|
|
+ ret = -EFAULT;
|
|
+ vchiq_remove_service(service->handle);
|
|
+ kfree(user_service);
|
|
+ }
|
|
+
|
|
+ service = NULL;
|
|
+ } else {
|
|
+ ret = -EEXIST;
|
|
+ kfree(user_service);
|
|
+ }
|
|
+ } break;
|
|
+
|
|
+ case VCHIQ_IOC_CLOSE_SERVICE: {
|
|
+ VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
|
|
+
|
|
+ service = find_service_for_instance(instance, handle);
|
|
+ if (service != NULL)
|
|
+ status = vchiq_close_service(service->handle);
|
|
+ else
|
|
+ ret = -EINVAL;
|
|
+ } break;
|
|
+
|
|
+ case VCHIQ_IOC_REMOVE_SERVICE: {
|
|
+ VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
|
|
+
|
|
+ service = find_service_for_instance(instance, handle);
|
|
+ if (service != NULL)
|
|
+ status = vchiq_remove_service(service->handle);
|
|
+ else
|
|
+ ret = -EINVAL;
|
|
+ } break;
|
|
+
|
|
+ case VCHIQ_IOC_USE_SERVICE:
|
|
+ case VCHIQ_IOC_RELEASE_SERVICE: {
|
|
+ VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
|
|
+
|
|
+ service = find_service_for_instance(instance, handle);
|
|
+ if (service != NULL) {
|
|
+ status = (cmd == VCHIQ_IOC_USE_SERVICE) ?
|
|
+ vchiq_use_service_internal(service) :
|
|
+ vchiq_release_service_internal(service);
|
|
+ if (status != VCHIQ_SUCCESS) {
|
|
+ vchiq_log_error(vchiq_susp_log_level,
|
|
+ "%s: cmd %s returned error %d for "
|
|
+ "service %c%c%c%c:%03d",
|
|
+ __func__,
|
|
+ (cmd == VCHIQ_IOC_USE_SERVICE) ?
|
|
+ "VCHIQ_IOC_USE_SERVICE" :
|
|
+ "VCHIQ_IOC_RELEASE_SERVICE",
|
|
+ status,
|
|
+ VCHIQ_FOURCC_AS_4CHARS(
|
|
+ service->base.fourcc),
|
|
+ service->client_id);
|
|
+ ret = -EINVAL;
|
|
+ }
|
|
+ } else
|
|
+ ret = -EINVAL;
|
|
+ } break;
|
|
+
|
|
+ case VCHIQ_IOC_QUEUE_MESSAGE: {
|
|
+ VCHIQ_QUEUE_MESSAGE_T args;
|
|
+ if (copy_from_user
|
|
+ (&args, (const void __user *)arg,
|
|
+ sizeof(args)) != 0) {
|
|
+ ret = -EFAULT;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ service = find_service_for_instance(instance, args.handle);
|
|
+
|
|
+ if ((service != NULL) && (args.count <= MAX_ELEMENTS)) {
|
|
+ /* Copy elements into kernel space */
|
|
+ VCHIQ_ELEMENT_T elements[MAX_ELEMENTS];
|
|
+ if (copy_from_user(elements, args.elements,
|
|
+ args.count * sizeof(VCHIQ_ELEMENT_T)) == 0)
|
|
+ status = vchiq_queue_message
|
|
+ (args.handle,
|
|
+ elements, args.count);
|
|
+ else
|
|
+ ret = -EFAULT;
|
|
+ } else {
|
|
+ ret = -EINVAL;
|
|
+ }
|
|
+ } break;
|
|
+
|
|
+ case VCHIQ_IOC_QUEUE_BULK_TRANSMIT:
|
|
+ case VCHIQ_IOC_QUEUE_BULK_RECEIVE: {
|
|
+ VCHIQ_QUEUE_BULK_TRANSFER_T args;
|
|
+ struct bulk_waiter_node *waiter = NULL;
|
|
+ VCHIQ_BULK_DIR_T dir =
|
|
+ (cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT) ?
|
|
+ VCHIQ_BULK_TRANSMIT : VCHIQ_BULK_RECEIVE;
|
|
+
|
|
+ if (copy_from_user
|
|
+ (&args, (const void __user *)arg,
|
|
+ sizeof(args)) != 0) {
|
|
+ ret = -EFAULT;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ service = find_service_for_instance(instance, args.handle);
|
|
+ if (!service) {
|
|
+ ret = -EINVAL;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ if (args.mode == VCHIQ_BULK_MODE_BLOCKING) {
|
|
+ waiter = kzalloc(sizeof(struct bulk_waiter_node),
|
|
+ GFP_KERNEL);
|
|
+ if (!waiter) {
|
|
+ ret = -ENOMEM;
|
|
+ break;
|
|
+ }
|
|
+ args.userdata = &waiter->bulk_waiter;
|
|
+ } else if (args.mode == VCHIQ_BULK_MODE_WAITING) {
|
|
+ struct list_head *pos;
|
|
+ mutex_lock(&instance->bulk_waiter_list_mutex);
|
|
+ list_for_each(pos, &instance->bulk_waiter_list) {
|
|
+ if (list_entry(pos, struct bulk_waiter_node,
|
|
+ list)->pid == current->pid) {
|
|
+ waiter = list_entry(pos,
|
|
+ struct bulk_waiter_node,
|
|
+ list);
|
|
+ list_del(pos);
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ }
|
|
+ mutex_unlock(&instance->bulk_waiter_list_mutex);
|
|
+ if (!waiter) {
|
|
+ vchiq_log_error(vchiq_arm_log_level,
|
|
+ "no bulk_waiter found for pid %d",
|
|
+ current->pid);
|
|
+ ret = -ESRCH;
|
|
+ break;
|
|
+ }
|
|
+ vchiq_log_info(vchiq_arm_log_level,
|
|
+ "found bulk_waiter %x for pid %d",
|
|
+ (unsigned int)waiter, current->pid);
|
|
+ args.userdata = &waiter->bulk_waiter;
|
|
+ }
|
|
+ status = vchiq_bulk_transfer
|
|
+ (args.handle,
|
|
+ VCHI_MEM_HANDLE_INVALID,
|
|
+ args.data, args.size,
|
|
+ args.userdata, args.mode,
|
|
+ dir);
|
|
+ if (!waiter)
|
|
+ break;
|
|
+ if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) ||
|
|
+ !waiter->bulk_waiter.bulk) {
|
|
+ if (waiter->bulk_waiter.bulk) {
|
|
+ /* Cancel the signal when the transfer
|
|
+ ** completes. */
|
|
+ spin_lock(&bulk_waiter_spinlock);
|
|
+ waiter->bulk_waiter.bulk->userdata = NULL;
|
|
+ spin_unlock(&bulk_waiter_spinlock);
|
|
+ }
|
|
+ kfree(waiter);
|
|
+ } else {
|
|
+ const VCHIQ_BULK_MODE_T mode_waiting =
|
|
+ VCHIQ_BULK_MODE_WAITING;
|
|
+ waiter->pid = current->pid;
|
|
+ mutex_lock(&instance->bulk_waiter_list_mutex);
|
|
+ list_add(&waiter->list, &instance->bulk_waiter_list);
|
|
+ mutex_unlock(&instance->bulk_waiter_list_mutex);
|
|
+ vchiq_log_info(vchiq_arm_log_level,
|
|
+ "saved bulk_waiter %x for pid %d",
|
|
+ (unsigned int)waiter, current->pid);
|
|
+
|
|
+ if (copy_to_user((void __user *)
|
|
+ &(((VCHIQ_QUEUE_BULK_TRANSFER_T __user *)
|
|
+ arg)->mode),
|
|
+ (const void *)&mode_waiting,
|
|
+ sizeof(mode_waiting)) != 0)
|
|
+ ret = -EFAULT;
|
|
+ }
|
|
+ } break;
|
|
+
|
|
+ case VCHIQ_IOC_AWAIT_COMPLETION: {
|
|
+ VCHIQ_AWAIT_COMPLETION_T args;
|
|
+
|
|
+ DEBUG_TRACE(AWAIT_COMPLETION_LINE);
|
|
+ if (!instance->connected) {
|
|
+ ret = -ENOTCONN;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ if (copy_from_user(&args, (const void __user *)arg,
|
|
+ sizeof(args)) != 0) {
|
|
+ ret = -EFAULT;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ mutex_lock(&instance->completion_mutex);
|
|
+
|
|
+ DEBUG_TRACE(AWAIT_COMPLETION_LINE);
|
|
+ while ((instance->completion_remove ==
|
|
+ instance->completion_insert)
|
|
+ && !instance->closing) {
|
|
+ int rc;
|
|
+ DEBUG_TRACE(AWAIT_COMPLETION_LINE);
|
|
+ mutex_unlock(&instance->completion_mutex);
|
|
+ rc = down_interruptible(&instance->insert_event);
|
|
+ mutex_lock(&instance->completion_mutex);
|
|
+ if (rc != 0) {
|
|
+ DEBUG_TRACE(AWAIT_COMPLETION_LINE);
|
|
+ vchiq_log_info(vchiq_arm_log_level,
|
|
+ "AWAIT_COMPLETION interrupted");
|
|
+ ret = -EINTR;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ DEBUG_TRACE(AWAIT_COMPLETION_LINE);
|
|
+
|
|
+ /* A read memory barrier is needed to stop prefetch of a stale
|
|
+ ** completion record
|
|
+ */
|
|
+ rmb();
|
|
+
|
|
+ if (ret == 0) {
|
|
+ int msgbufcount = args.msgbufcount;
|
|
+ for (ret = 0; ret < args.count; ret++) {
|
|
+ VCHIQ_COMPLETION_DATA_T *completion;
|
|
+ VCHIQ_SERVICE_T *service;
|
|
+ USER_SERVICE_T *user_service;
|
|
+ VCHIQ_HEADER_T *header;
|
|
+ if (instance->completion_remove ==
|
|
+ instance->completion_insert)
|
|
+ break;
|
|
+ completion = &instance->completions[
|
|
+ instance->completion_remove &
|
|
+ (MAX_COMPLETIONS - 1)];
|
|
+
|
|
+ service = completion->service_userdata;
|
|
+ user_service = service->base.userdata;
|
|
+ completion->service_userdata =
|
|
+ user_service->userdata;
|
|
+
|
|
+ header = completion->header;
|
|
+ if (header) {
|
|
+ void __user *msgbuf;
|
|
+ int msglen;
|
|
+
|
|
+ msglen = header->size +
|
|
+ sizeof(VCHIQ_HEADER_T);
|
|
+ /* This must be a VCHIQ-style service */
|
|
+ if (args.msgbufsize < msglen) {
|
|
+ vchiq_log_error(
|
|
+ vchiq_arm_log_level,
|
|
+ "header %x: msgbufsize"
|
|
+ " %x < msglen %x",
|
|
+ (unsigned int)header,
|
|
+ args.msgbufsize,
|
|
+ msglen);
|
|
+ WARN(1, "invalid message "
|
|
+ "size\n");
|
|
+ if (ret == 0)
|
|
+ ret = -EMSGSIZE;
|
|
+ break;
|
|
+ }
|
|
+ if (msgbufcount <= 0)
|
|
+ /* Stall here for lack of a
|
|
+ ** buffer for the message. */
|
|
+ break;
|
|
+ /* Get the pointer from user space */
|
|
+ msgbufcount--;
|
|
+ if (copy_from_user(&msgbuf,
|
|
+ (const void __user *)
|
|
+ &args.msgbufs[msgbufcount],
|
|
+ sizeof(msgbuf)) != 0) {
|
|
+ if (ret == 0)
|
|
+ ret = -EFAULT;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ /* Copy the message to user space */
|
|
+ if (copy_to_user(msgbuf, header,
|
|
+ msglen) != 0) {
|
|
+ if (ret == 0)
|
|
+ ret = -EFAULT;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ /* Now it has been copied, the message
|
|
+ ** can be released. */
|
|
+ vchiq_release_message(service->handle,
|
|
+ header);
|
|
+
|
|
+ /* The completion must point to the
|
|
+ ** msgbuf. */
|
|
+ completion->header = msgbuf;
|
|
+ }
|
|
+
|
|
+ if (completion->reason ==
|
|
+ VCHIQ_SERVICE_CLOSED) {
|
|
+ unlock_service(service);
|
|
+ kfree(user_service);
|
|
+ }
|
|
+
|
|
+ if (copy_to_user((void __user *)(
|
|
+ (size_t)args.buf +
|
|
+ ret * sizeof(VCHIQ_COMPLETION_DATA_T)),
|
|
+ completion,
|
|
+ sizeof(VCHIQ_COMPLETION_DATA_T)) != 0) {
|
|
+ if (ret == 0)
|
|
+ ret = -EFAULT;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ instance->completion_remove++;
|
|
+ }
|
|
+
|
|
+ if (msgbufcount != args.msgbufcount) {
|
|
+ if (copy_to_user((void __user *)
|
|
+ &((VCHIQ_AWAIT_COMPLETION_T *)arg)->
|
|
+ msgbufcount,
|
|
+ &msgbufcount,
|
|
+ sizeof(msgbufcount)) != 0) {
|
|
+ ret = -EFAULT;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (ret != 0)
|
|
+ up(&instance->remove_event);
|
|
+ mutex_unlock(&instance->completion_mutex);
|
|
+ DEBUG_TRACE(AWAIT_COMPLETION_LINE);
|
|
+ } break;
|
|
+
|
|
+ case VCHIQ_IOC_DEQUEUE_MESSAGE: {
|
|
+ VCHIQ_DEQUEUE_MESSAGE_T args;
|
|
+ USER_SERVICE_T *user_service;
|
|
+ VCHIQ_HEADER_T *header;
|
|
+
|
|
+ DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
|
|
+ if (copy_from_user
|
|
+ (&args, (const void __user *)arg,
|
|
+ sizeof(args)) != 0) {
|
|
+ ret = -EFAULT;
|
|
+ break;
|
|
+ }
|
|
+ service = find_service_for_instance(instance, args.handle);
|
|
+ if (!service) {
|
|
+ ret = -EINVAL;
|
|
+ break;
|
|
+ }
|
|
+ user_service = (USER_SERVICE_T *)service->base.userdata;
|
|
+ if (user_service->is_vchi == 0) {
|
|
+ ret = -EINVAL;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ spin_lock(&msg_queue_spinlock);
|
|
+ if (user_service->msg_remove == user_service->msg_insert) {
|
|
+ if (!args.blocking) {
|
|
+ spin_unlock(&msg_queue_spinlock);
|
|
+ DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
|
|
+ ret = -EWOULDBLOCK;
|
|
+ break;
|
|
+ }
|
|
+ user_service->dequeue_pending = 1;
|
|
+ do {
|
|
+ spin_unlock(&msg_queue_spinlock);
|
|
+ DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
|
|
+ if (down_interruptible(
|
|
+ &user_service->insert_event) != 0) {
|
|
+ vchiq_log_info(vchiq_arm_log_level,
|
|
+ "DEQUEUE_MESSAGE interrupted");
|
|
+ ret = -EINTR;
|
|
+ break;
|
|
+ }
|
|
+ spin_lock(&msg_queue_spinlock);
|
|
+ } while (user_service->msg_remove ==
|
|
+ user_service->msg_insert);
|
|
+
|
|
+ if (ret)
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ BUG_ON((int)(user_service->msg_insert -
|
|
+ user_service->msg_remove) < 0);
|
|
+
|
|
+ header = user_service->msg_queue[user_service->msg_remove &
|
|
+ (MSG_QUEUE_SIZE - 1)];
|
|
+ user_service->msg_remove++;
|
|
+ spin_unlock(&msg_queue_spinlock);
|
|
+
|
|
+ up(&user_service->remove_event);
|
|
+ if (header == NULL)
|
|
+ ret = -ENOTCONN;
|
|
+ else if (header->size <= args.bufsize) {
|
|
+ /* Copy to user space if msgbuf is not NULL */
|
|
+ if ((args.buf == NULL) ||
|
|
+ (copy_to_user((void __user *)args.buf,
|
|
+ header->data,
|
|
+ header->size) == 0)) {
|
|
+ ret = header->size;
|
|
+ vchiq_release_message(
|
|
+ service->handle,
|
|
+ header);
|
|
+ } else
|
|
+ ret = -EFAULT;
|
|
+ } else {
|
|
+ vchiq_log_error(vchiq_arm_log_level,
|
|
+ "header %x: bufsize %x < size %x",
|
|
+ (unsigned int)header, args.bufsize,
|
|
+ header->size);
|
|
+ WARN(1, "invalid size\n");
|
|
+ ret = -EMSGSIZE;
|
|
+ }
|
|
+ DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
|
|
+ } break;
|
|
+
|
|
+ case VCHIQ_IOC_GET_CLIENT_ID: {
|
|
+ VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
|
|
+
|
|
+ ret = vchiq_get_client_id(handle);
|
|
+ } break;
|
|
+
|
|
+ case VCHIQ_IOC_GET_CONFIG: {
|
|
+ VCHIQ_GET_CONFIG_T args;
|
|
+ VCHIQ_CONFIG_T config;
|
|
+
|
|
+ if (copy_from_user(&args, (const void __user *)arg,
|
|
+ sizeof(args)) != 0) {
|
|
+ ret = -EFAULT;
|
|
+ break;
|
|
+ }
|
|
+ if (args.config_size > sizeof(config)) {
|
|
+ ret = -EINVAL;
|
|
+ break;
|
|
+ }
|
|
+ status = vchiq_get_config(instance, args.config_size, &config);
|
|
+ if (status == VCHIQ_SUCCESS) {
|
|
+ if (copy_to_user((void __user *)args.pconfig,
|
|
+ &config, args.config_size) != 0) {
|
|
+ ret = -EFAULT;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ } break;
|
|
+
|
|
+ case VCHIQ_IOC_SET_SERVICE_OPTION: {
|
|
+ VCHIQ_SET_SERVICE_OPTION_T args;
|
|
+
|
|
+ if (copy_from_user(
|
|
+ &args, (const void __user *)arg,
|
|
+ sizeof(args)) != 0) {
|
|
+ ret = -EFAULT;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ service = find_service_for_instance(instance, args.handle);
|
|
+ if (!service) {
|
|
+ ret = -EINVAL;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ status = vchiq_set_service_option(
|
|
+ args.handle, args.option, args.value);
|
|
+ } break;
|
|
+
|
|
+ case VCHIQ_IOC_DUMP_PHYS_MEM: {
|
|
+ VCHIQ_DUMP_MEM_T args;
|
|
+
|
|
+ if (copy_from_user
|
|
+ (&args, (const void __user *)arg,
|
|
+ sizeof(args)) != 0) {
|
|
+ ret = -EFAULT;
|
|
+ break;
|
|
+ }
|
|
+ dump_phys_mem(args.virt_addr, args.num_bytes);
|
|
+ } break;
|
|
+
|
|
+ default:
|
|
+ ret = -ENOTTY;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ if (service)
|
|
+ unlock_service(service);
|
|
+
|
|
+ if (ret == 0) {
|
|
+ if (status == VCHIQ_ERROR)
|
|
+ ret = -EIO;
|
|
+ else if (status == VCHIQ_RETRY)
|
|
+ ret = -EINTR;
|
|
+ }
|
|
+
|
|
+ if ((status == VCHIQ_SUCCESS) && (ret < 0) && (ret != -EINTR) &&
|
|
+ (ret != -EWOULDBLOCK))
|
|
+ vchiq_log_info(vchiq_arm_log_level,
|
|
+ " ioctl instance %lx, cmd %s -> status %d, %ld",
|
|
+ (unsigned long)instance,
|
|
+ (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
|
|
+ ioctl_names[_IOC_NR(cmd)] :
|
|
+ "<invalid>",
|
|
+ status, ret);
|
|
+ else
|
|
+ vchiq_log_trace(vchiq_arm_log_level,
|
|
+ " ioctl instance %lx, cmd %s -> status %d, %ld",
|
|
+ (unsigned long)instance,
|
|
+ (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
|
|
+ ioctl_names[_IOC_NR(cmd)] :
|
|
+ "<invalid>",
|
|
+ status, ret);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+/****************************************************************************
|
|
+*
|
|
+* vchiq_open
|
|
+*
|
|
+***************************************************************************/
|
|
+
|
|
+static int
|
|
+vchiq_open(struct inode *inode, struct file *file)
|
|
+{
|
|
+ int dev = iminor(inode) & 0x0f;
|
|
+ vchiq_log_info(vchiq_arm_log_level, "vchiq_open");
|
|
+ switch (dev) {
|
|
+ case VCHIQ_MINOR: {
|
|
+ int ret;
|
|
+ VCHIQ_STATE_T *state = vchiq_get_state();
|
|
+ VCHIQ_INSTANCE_T instance;
|
|
+
|
|
+ if (!state) {
|
|
+ vchiq_log_error(vchiq_arm_log_level,
|
|
+ "vchiq has no connection to VideoCore");
|
|
+ return -ENOTCONN;
|
|
+ }
|
|
+
|
|
+ instance = kzalloc(sizeof(*instance), GFP_KERNEL);
|
|
+ if (!instance)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ instance->state = state;
|
|
+ instance->pid = current->tgid;
|
|
+
|
|
+ ret = vchiq_proc_add_instance(instance);
|
|
+ if (ret != 0) {
|
|
+ kfree(instance);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ sema_init(&instance->insert_event, 0);
|
|
+ sema_init(&instance->remove_event, 0);
|
|
+ mutex_init(&instance->completion_mutex);
|
|
+ mutex_init(&instance->bulk_waiter_list_mutex);
|
|
+ INIT_LIST_HEAD(&instance->bulk_waiter_list);
|
|
+
|
|
+ file->private_data = instance;
|
|
+ } break;
|
|
+
|
|
+ default:
|
|
+ vchiq_log_error(vchiq_arm_log_level,
|
|
+ "Unknown minor device: %d", dev);
|
|
+ return -ENXIO;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/****************************************************************************
|
|
+*
|
|
+* vchiq_release
|
|
+*
|
|
+***************************************************************************/
|
|
+
|
|
+static int
|
|
+vchiq_release(struct inode *inode, struct file *file)
|
|
+{
|
|
+ int dev = iminor(inode) & 0x0f;
|
|
+ int ret = 0;
|
|
+ switch (dev) {
|
|
+ case VCHIQ_MINOR: {
|
|
+ VCHIQ_INSTANCE_T instance = file->private_data;
|
|
+ VCHIQ_STATE_T *state = vchiq_get_state();
|
|
+ VCHIQ_SERVICE_T *service;
|
|
+ int i;
|
|
+
|
|
+ vchiq_log_info(vchiq_arm_log_level,
|
|
+ "vchiq_release: instance=%lx",
|
|
+ (unsigned long)instance);
|
|
+
|
|
+ if (!state) {
|
|
+ ret = -EPERM;
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ /* Ensure videocore is awake to allow termination. */
|
|
+ vchiq_use_internal(instance->state, NULL,
|
|
+ USE_TYPE_VCHIQ);
|
|
+
|
|
+ mutex_lock(&instance->completion_mutex);
|
|
+
|
|
+ /* Wake the completion thread and ask it to exit */
|
|
+ instance->closing = 1;
|
|
+ up(&instance->insert_event);
|
|
+
|
|
+ mutex_unlock(&instance->completion_mutex);
|
|
+
|
|
+ /* Wake the slot handler if the completion queue is full. */
|
|
+ up(&instance->remove_event);
|
|
+
|
|
+ /* Mark all services for termination... */
|
|
+ i = 0;
|
|
+ while ((service = next_service_by_instance(state, instance,
|
|
+ &i)) != NULL) {
|
|
+ USER_SERVICE_T *user_service = service->base.userdata;
|
|
+
|
|
+ /* Wake the slot handler if the msg queue is full. */
|
|
+ up(&user_service->remove_event);
|
|
+
|
|
+ vchiq_terminate_service_internal(service);
|
|
+ unlock_service(service);
|
|
+ }
|
|
+
|
|
+ /* ...and wait for them to die */
|
|
+ i = 0;
|
|
+ while ((service = next_service_by_instance(state, instance, &i))
|
|
+ != NULL) {
|
|
+ USER_SERVICE_T *user_service = service->base.userdata;
|
|
+
|
|
+ down(&service->remove_event);
|
|
+
|
|
+ BUG_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
|
|
+
|
|
+ spin_lock(&msg_queue_spinlock);
|
|
+
|
|
+ while (user_service->msg_remove !=
|
|
+ user_service->msg_insert) {
|
|
+ VCHIQ_HEADER_T *header = user_service->
|
|
+ msg_queue[user_service->msg_remove &
|
|
+ (MSG_QUEUE_SIZE - 1)];
|
|
+ user_service->msg_remove++;
|
|
+ spin_unlock(&msg_queue_spinlock);
|
|
+
|
|
+ if (header)
|
|
+ vchiq_release_message(
|
|
+ service->handle,
|
|
+ header);
|
|
+ spin_lock(&msg_queue_spinlock);
|
|
+ }
|
|
+
|
|
+ spin_unlock(&msg_queue_spinlock);
|
|
+
|
|
+ unlock_service(service);
|
|
+ kfree(user_service);
|
|
+ }
|
|
+
|
|
+ /* Release any closed services */
|
|
+ while (instance->completion_remove !=
|
|
+ instance->completion_insert) {
|
|
+ VCHIQ_COMPLETION_DATA_T *completion;
|
|
+ VCHIQ_SERVICE_T *service;
|
|
+ completion = &instance->completions[
|
|
+ instance->completion_remove &
|
|
+ (MAX_COMPLETIONS - 1)];
|
|
+ service = completion->service_userdata;
|
|
+ if (completion->reason == VCHIQ_SERVICE_CLOSED)
|
|
+ unlock_service(service);
|
|
+ instance->completion_remove++;
|
|
+ }
|
|
+
|
|
+ /* Release the PEER service count. */
|
|
+ vchiq_release_internal(instance->state, NULL);
|
|
+
|
|
+ {
|
|
+ struct list_head *pos, *next;
|
|
+ list_for_each_safe(pos, next,
|
|
+ &instance->bulk_waiter_list) {
|
|
+ struct bulk_waiter_node *waiter;
|
|
+ waiter = list_entry(pos,
|
|
+ struct bulk_waiter_node,
|
|
+ list);
|
|
+ list_del(pos);
|
|
+ vchiq_log_info(vchiq_arm_log_level,
|
|
+ "bulk_waiter - cleaned up %x "
|
|
+ "for pid %d",
|
|
+ (unsigned int)waiter, waiter->pid);
|
|
+ kfree(waiter);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ vchiq_proc_remove_instance(instance);
|
|
+
|
|
+ kfree(instance);
|
|
+ file->private_data = NULL;
|
|
+ } break;
|
|
+
|
|
+ default:
|
|
+ vchiq_log_error(vchiq_arm_log_level,
|
|
+ "Unknown minor device: %d", dev);
|
|
+ ret = -ENXIO;
|
|
+ }
|
|
+
|
|
+out:
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+/****************************************************************************
|
|
+*
|
|
+* vchiq_dump
|
|
+*
|
|
+***************************************************************************/
|
|
+
|
|
+void
|
|
+vchiq_dump(void *dump_context, const char *str, int len)
|
|
+{
|
|
+ DUMP_CONTEXT_T *context = (DUMP_CONTEXT_T *)dump_context;
|
|
+
|
|
+ if (context->actual < context->space) {
|
|
+ int copy_bytes;
|
|
+ if (context->offset > 0) {
|
|
+ int skip_bytes = min(len, (int)context->offset);
|
|
+ str += skip_bytes;
|
|
+ len -= skip_bytes;
|
|
+ context->offset -= skip_bytes;
|
|
+ if (context->offset > 0)
|
|
+ return;
|
|
+ }
|
|
+ copy_bytes = min(len, (int)(context->space - context->actual));
|
|
+ if (copy_bytes == 0)
|
|
+ return;
|
|
+ if (copy_to_user(context->buf + context->actual, str,
|
|
+ copy_bytes))
|
|
+ context->actual = -EFAULT;
|
|
+ context->actual += copy_bytes;
|
|
+ len -= copy_bytes;
|
|
+
|
|
+ /* If tne terminating NUL is included in the length, then it
|
|
+ ** marks the end of a line and should be replaced with a
|
|
+ ** carriage return. */
|
|
+ if ((len == 0) && (str[copy_bytes - 1] == '\0')) {
|
|
+ char cr = '\n';
|
|
+ if (copy_to_user(context->buf + context->actual - 1,
|
|
+ &cr, 1))
|
|
+ context->actual = -EFAULT;
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
+/****************************************************************************
|
|
+*
|
|
+* vchiq_dump_platform_instance_state
|
|
+*
|
|
+***************************************************************************/
|
|
+
|
|
+void
|
|
+vchiq_dump_platform_instances(void *dump_context)
|
|
+{
|
|
+ VCHIQ_STATE_T *state = vchiq_get_state();
|
|
+ char buf[80];
|
|
+ int len;
|
|
+ int i;
|
|
+
|
|
+ /* There is no list of instances, so instead scan all services,
|
|
+ marking those that have been dumped. */
|
|
+
|
|
+ for (i = 0; i < state->unused_service; i++) {
|
|
+ VCHIQ_SERVICE_T *service = state->services[i];
|
|
+ VCHIQ_INSTANCE_T instance;
|
|
+
|
|
+ if (service && (service->base.callback == service_callback)) {
|
|
+ instance = service->instance;
|
|
+ if (instance)
|
|
+ instance->mark = 0;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ for (i = 0; i < state->unused_service; i++) {
|
|
+ VCHIQ_SERVICE_T *service = state->services[i];
|
|
+ VCHIQ_INSTANCE_T instance;
|
|
+
|
|
+ if (service && (service->base.callback == service_callback)) {
|
|
+ instance = service->instance;
|
|
+ if (instance && !instance->mark) {
|
|
+ len = snprintf(buf, sizeof(buf),
|
|
+ "Instance %x: pid %d,%s completions "
|
|
+ "%d/%d",
|
|
+ (unsigned int)instance, instance->pid,
|
|
+ instance->connected ? " connected, " :
|
|
+ "",
|
|
+ instance->completion_insert -
|
|
+ instance->completion_remove,
|
|
+ MAX_COMPLETIONS);
|
|
+
|
|
+ vchiq_dump(dump_context, buf, len + 1);
|
|
+
|
|
+ instance->mark = 1;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
+/****************************************************************************
|
|
+*
|
|
+* vchiq_dump_platform_service_state
|
|
+*
|
|
+***************************************************************************/
|
|
+
|
|
+void
|
|
+vchiq_dump_platform_service_state(void *dump_context, VCHIQ_SERVICE_T *service)
|
|
+{
|
|
+ USER_SERVICE_T *user_service = (USER_SERVICE_T *)service->base.userdata;
|
|
+ char buf[80];
|
|
+ int len;
|
|
+
|
|
+ len = snprintf(buf, sizeof(buf), " instance %x",
|
|
+ (unsigned int)service->instance);
|
|
+
|
|
+ if ((service->base.callback == service_callback) &&
|
|
+ user_service->is_vchi) {
|
|
+ len += snprintf(buf + len, sizeof(buf) - len,
|
|
+ ", %d/%d messages",
|
|
+ user_service->msg_insert - user_service->msg_remove,
|
|
+ MSG_QUEUE_SIZE);
|
|
+
|
|
+ if (user_service->dequeue_pending)
|
|
+ len += snprintf(buf + len, sizeof(buf) - len,
|
|
+ " (dequeue pending)");
|
|
+ }
|
|
+
|
|
+ vchiq_dump(dump_context, buf, len + 1);
|
|
+}
|
|
+
|
|
+/****************************************************************************
|
|
+*
|
|
+* dump_user_mem
|
|
+*
|
|
+***************************************************************************/
|
|
+
|
|
+static void
|
|
+dump_phys_mem(void *virt_addr, uint32_t num_bytes)
|
|
+{
|
|
+ int rc;
|
|
+ uint8_t *end_virt_addr = virt_addr + num_bytes;
|
|
+ int num_pages;
|
|
+ int offset;
|
|
+ int end_offset;
|
|
+ int page_idx;
|
|
+ int prev_idx;
|
|
+ struct page *page;
|
|
+ struct page **pages;
|
|
+ uint8_t *kmapped_virt_ptr;
|
|
+
|
|
+ /* Align virtAddr and endVirtAddr to 16 byte boundaries. */
|
|
+
|
|
+ virt_addr = (void *)((unsigned long)virt_addr & ~0x0fuL);
|
|
+ end_virt_addr = (void *)(((unsigned long)end_virt_addr + 15uL) &
|
|
+ ~0x0fuL);
|
|
+
|
|
+ offset = (int)(long)virt_addr & (PAGE_SIZE - 1);
|
|
+ end_offset = (int)(long)end_virt_addr & (PAGE_SIZE - 1);
|
|
+
|
|
+ num_pages = (offset + num_bytes + PAGE_SIZE - 1) / PAGE_SIZE;
|
|
+
|
|
+ pages = kmalloc(sizeof(struct page *) * num_pages, GFP_KERNEL);
|
|
+ if (pages == NULL) {
|
|
+ vchiq_log_error(vchiq_arm_log_level,
|
|
+ "Unable to allocation memory for %d pages\n",
|
|
+ num_pages);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ down_read(¤t->mm->mmap_sem);
|
|
+ rc = get_user_pages(current, /* task */
|
|
+ current->mm, /* mm */
|
|
+ (unsigned long)virt_addr, /* start */
|
|
+ num_pages, /* len */
|
|
+ 0, /* write */
|
|
+ 0, /* force */
|
|
+ pages, /* pages (array of page pointers) */
|
|
+ NULL); /* vmas */
|
|
+ up_read(¤t->mm->mmap_sem);
|
|
+
|
|
+ prev_idx = -1;
|
|
+ page = NULL;
|
|
+
|
|
+ while (offset < end_offset) {
|
|
+
|
|
+ int page_offset = offset % PAGE_SIZE;
|
|
+ page_idx = offset / PAGE_SIZE;
|
|
+
|
|
+ if (page_idx != prev_idx) {
|
|
+
|
|
+ if (page != NULL)
|
|
+ kunmap(page);
|
|
+ page = pages[page_idx];
|
|
+ kmapped_virt_ptr = kmap(page);
|
|
+
|
|
+ prev_idx = page_idx;
|
|
+ }
|
|
+
|
|
+ if (vchiq_arm_log_level >= VCHIQ_LOG_TRACE)
|
|
+ vchiq_log_dump_mem("ph",
|
|
+ (uint32_t)(unsigned long)&kmapped_virt_ptr[
|
|
+ page_offset],
|
|
+ &kmapped_virt_ptr[page_offset], 16);
|
|
+
|
|
+ offset += 16;
|
|
+ }
|
|
+ if (page != NULL)
|
|
+ kunmap(page);
|
|
+
|
|
+ for (page_idx = 0; page_idx < num_pages; page_idx++)
|
|
+ page_cache_release(pages[page_idx]);
|
|
+
|
|
+ kfree(pages);
|
|
+}
|
|
+
|
|
+/****************************************************************************
|
|
+*
|
|
+* vchiq_read
|
|
+*
|
|
+***************************************************************************/
|
|
+
|
|
+static ssize_t
|
|
+vchiq_read(struct file *file, char __user *buf,
|
|
+ size_t count, loff_t *ppos)
|
|
+{
|
|
+ DUMP_CONTEXT_T context;
|
|
+ context.buf = buf;
|
|
+ context.actual = 0;
|
|
+ context.space = count;
|
|
+ context.offset = *ppos;
|
|
+
|
|
+ vchiq_dump_state(&context, &g_state);
|
|
+
|
|
+ *ppos += context.actual;
|
|
+
|
|
+ return context.actual;
|
|
+}
|
|
+
|
|
+VCHIQ_STATE_T *
|
|
+vchiq_get_state(void)
|
|
+{
|
|
+
|
|
+ if (g_state.remote == NULL)
|
|
+ printk(KERN_ERR "%s: g_state.remote == NULL\n", __func__);
|
|
+ else if (g_state.remote->initialised != 1)
|
|
+ printk(KERN_NOTICE "%s: g_state.remote->initialised != 1 (%d)\n",
|
|
+ __func__, g_state.remote->initialised);
|
|
+
|
|
+ return ((g_state.remote != NULL) &&
|
|
+ (g_state.remote->initialised == 1)) ? &g_state : NULL;
|
|
+}
|
|
+
|
|
+static const struct file_operations
|
|
+vchiq_fops = {
|
|
+ .owner = THIS_MODULE,
|
|
+ .unlocked_ioctl = vchiq_ioctl,
|
|
+ .open = vchiq_open,
|
|
+ .release = vchiq_release,
|
|
+ .read = vchiq_read
|
|
+};
|
|
+
|
|
+/*
|
|
+ * Autosuspend related functionality
|
|
+ */
|
|
+
|
|
+int
|
|
+vchiq_videocore_wanted(VCHIQ_STATE_T *state)
|
|
+{
|
|
+ VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
|
|
+ if (!arm_state)
|
|
+ /* autosuspend not supported - always return wanted */
|
|
+ return 1;
|
|
+ else if (arm_state->blocked_count)
|
|
+ return 1;
|
|
+ else if (!arm_state->videocore_use_count)
|
|
+ /* usage count zero - check for override unless we're forcing */
|
|
+ if (arm_state->resume_blocked)
|
|
+ return 0;
|
|
+ else
|
|
+ return vchiq_platform_videocore_wanted(state);
|
|
+ else
|
|
+ /* non-zero usage count - videocore still required */
|
|
+ return 1;
|
|
+}
|
|
+
|
|
+static VCHIQ_STATUS_T
|
|
+vchiq_keepalive_vchiq_callback(VCHIQ_REASON_T reason,
|
|
+ VCHIQ_HEADER_T *header,
|
|
+ VCHIQ_SERVICE_HANDLE_T service_user,
|
|
+ void *bulk_user)
|
|
+{
|
|
+ vchiq_log_error(vchiq_susp_log_level,
|
|
+ "%s callback reason %d", __func__, reason);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int
|
|
+vchiq_keepalive_thread_func(void *v)
|
|
+{
|
|
+ VCHIQ_STATE_T *state = (VCHIQ_STATE_T *) v;
|
|
+ VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
|
|
+
|
|
+ VCHIQ_STATUS_T status;
|
|
+ VCHIQ_INSTANCE_T instance;
|
|
+ VCHIQ_SERVICE_HANDLE_T ka_handle;
|
|
+
|
|
+ VCHIQ_SERVICE_PARAMS_T params = {
|
|
+ .fourcc = VCHIQ_MAKE_FOURCC('K', 'E', 'E', 'P'),
|
|
+ .callback = vchiq_keepalive_vchiq_callback,
|
|
+ .version = KEEPALIVE_VER,
|
|
+ .version_min = KEEPALIVE_VER_MIN
|
|
+ };
|
|
+
|
|
+ status = vchiq_initialise(&instance);
|
|
+ if (status != VCHIQ_SUCCESS) {
|
|
+ vchiq_log_error(vchiq_susp_log_level,
|
|
+ "%s vchiq_initialise failed %d", __func__, status);
|
|
+ goto exit;
|
|
+ }
|
|
+
|
|
+ status = vchiq_connect(instance);
|
|
+ if (status != VCHIQ_SUCCESS) {
|
|
+ vchiq_log_error(vchiq_susp_log_level,
|
|
+ "%s vchiq_connect failed %d", __func__, status);
|
|
+ goto shutdown;
|
|
+ }
|
|
+
|
|
+ status = vchiq_add_service(instance, ¶ms, &ka_handle);
|
|
+ if (status != VCHIQ_SUCCESS) {
|
|
+ vchiq_log_error(vchiq_susp_log_level,
|
|
+ "%s vchiq_open_service failed %d", __func__, status);
|
|
+ goto shutdown;
|
|
+ }
|
|
+
|
|
+ while (1) {
|
|
+ long rc = 0, uc = 0;
|
|
+ if (wait_for_completion_interruptible(&arm_state->ka_evt)
|
|
+ != 0) {
|
|
+ vchiq_log_error(vchiq_susp_log_level,
|
|
+ "%s interrupted", __func__);
|
|
+ flush_signals(current);
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ /* read and clear counters. Do release_count then use_count to
|
|
+ * prevent getting more releases than uses */
|
|
+ rc = atomic_xchg(&arm_state->ka_release_count, 0);
|
|
+ uc = atomic_xchg(&arm_state->ka_use_count, 0);
|
|
+
|
|
+ /* Call use/release service the requisite number of times.
|
|
+ * Process use before release so use counts don't go negative */
|
|
+ while (uc--) {
|
|
+ atomic_inc(&arm_state->ka_use_ack_count);
|
|
+ status = vchiq_use_service(ka_handle);
|
|
+ if (status != VCHIQ_SUCCESS) {
|
|
+ vchiq_log_error(vchiq_susp_log_level,
|
|
+ "%s vchiq_use_service error %d",
|
|
+ __func__, status);
|
|
+ }
|
|
+ }
|
|
+ while (rc--) {
|
|
+ status = vchiq_release_service(ka_handle);
|
|
+ if (status != VCHIQ_SUCCESS) {
|
|
+ vchiq_log_error(vchiq_susp_log_level,
|
|
+ "%s vchiq_release_service error %d",
|
|
+ __func__, status);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+shutdown:
|
|
+ vchiq_shutdown(instance);
|
|
+exit:
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+
|
|
+
|
|
+VCHIQ_STATUS_T
|
|
+vchiq_arm_init_state(VCHIQ_STATE_T *state, VCHIQ_ARM_STATE_T *arm_state)
|
|
+{
|
|
+ VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
|
|
+
|
|
+ if (arm_state) {
|
|
+ rwlock_init(&arm_state->susp_res_lock);
|
|
+
|
|
+ init_completion(&arm_state->ka_evt);
|
|
+ atomic_set(&arm_state->ka_use_count, 0);
|
|
+ atomic_set(&arm_state->ka_use_ack_count, 0);
|
|
+ atomic_set(&arm_state->ka_release_count, 0);
|
|
+
|
|
+ init_completion(&arm_state->vc_suspend_complete);
|
|
+
|
|
+ init_completion(&arm_state->vc_resume_complete);
|
|
+ /* Initialise to 'done' state. We only want to block on resume
|
|
+ * completion while videocore is suspended. */
|
|
+ set_resume_state(arm_state, VC_RESUME_RESUMED);
|
|
+
|
|
+ init_completion(&arm_state->resume_blocker);
|
|
+ /* Initialise to 'done' state. We only want to block on this
|
|
+ * completion while resume is blocked */
|
|
+ complete_all(&arm_state->resume_blocker);
|
|
+
|
|
+ init_completion(&arm_state->blocked_blocker);
|
|
+ /* Initialise to 'done' state. We only want to block on this
|
|
+ * completion while things are waiting on the resume blocker */
|
|
+ complete_all(&arm_state->blocked_blocker);
|
|
+
|
|
+ arm_state->suspend_timer_timeout = SUSPEND_TIMER_TIMEOUT_MS;
|
|
+ arm_state->suspend_timer_running = 0;
|
|
+ init_timer(&arm_state->suspend_timer);
|
|
+ arm_state->suspend_timer.data = (unsigned long)(state);
|
|
+ arm_state->suspend_timer.function = suspend_timer_callback;
|
|
+
|
|
+ arm_state->first_connect = 0;
|
|
+
|
|
+ }
|
|
+ return status;
|
|
+}
|
|
+
|
|
+/*
|
|
+** Functions to modify the state variables;
|
|
+** set_suspend_state
|
|
+** set_resume_state
|
|
+**
|
|
+** There are more state variables than we might like, so ensure they remain in
|
|
+** step. Suspend and resume state are maintained separately, since most of
|
|
+** these state machines can operate independently. However, there are a few
|
|
+** states where state transitions in one state machine cause a reset to the
|
|
+** other state machine. In addition, there are some completion events which
|
|
+** need to occur on state machine reset and end-state(s), so these are also
|
|
+** dealt with in these functions.
|
|
+**
|
|
+** In all states we set the state variable according to the input, but in some
|
|
+** cases we perform additional steps outlined below;
|
|
+**
|
|
+** VC_SUSPEND_IDLE - Initialise the suspend completion at the same time.
|
|
+** The suspend completion is completed after any suspend
|
|
+** attempt. When we reset the state machine we also reset
|
|
+** the completion. This reset occurs when videocore is
|
|
+** resumed, and also if we initiate suspend after a suspend
|
|
+** failure.
|
|
+**
|
|
+** VC_SUSPEND_IN_PROGRESS - This state is considered the point of no return for
|
|
+** suspend - ie from this point on we must try to suspend
|
|
+** before resuming can occur. We therefore also reset the
|
|
+** resume state machine to VC_RESUME_IDLE in this state.
|
|
+**
|
|
+** VC_SUSPEND_SUSPENDED - Suspend has completed successfully. Also call
|
|
+** complete_all on the suspend completion to notify
|
|
+** anything waiting for suspend to happen.
|
|
+**
|
|
+** VC_SUSPEND_REJECTED - Videocore rejected suspend. Videocore will also
|
|
+** initiate resume, so no need to alter resume state.
|
|
+** We call complete_all on the suspend completion to notify
|
|
+** of suspend rejection.
|
|
+**
|
|
+** VC_SUSPEND_FAILED - We failed to initiate videocore suspend. We notify the
|
|
+** suspend completion and reset the resume state machine.
|
|
+**
|
|
+** VC_RESUME_IDLE - Initialise the resume completion at the same time. The
|
|
+** resume completion is in it's 'done' state whenever
|
|
+** videcore is running. Therfore, the VC_RESUME_IDLE state
|
|
+** implies that videocore is suspended.
|
|
+** Hence, any thread which needs to wait until videocore is
|
|
+** running can wait on this completion - it will only block
|
|
+** if videocore is suspended.
|
|
+**
|
|
+** VC_RESUME_RESUMED - Resume has completed successfully. Videocore is running.
|
|
+** Call complete_all on the resume completion to unblock
|
|
+** any threads waiting for resume. Also reset the suspend
|
|
+** state machine to it's idle state.
|
|
+**
|
|
+** VC_RESUME_FAILED - Currently unused - no mechanism to fail resume exists.
|
|
+*/
|
|
+
|
|
+inline void
|
|
+set_suspend_state(VCHIQ_ARM_STATE_T *arm_state,
|
|
+ enum vc_suspend_status new_state)
|
|
+{
|
|
+ /* set the state in all cases */
|
|
+ arm_state->vc_suspend_state = new_state;
|
|
+
|
|
+ /* state specific additional actions */
|
|
+ switch (new_state) {
|
|
+ case VC_SUSPEND_FORCE_CANCELED:
|
|
+ complete_all(&arm_state->vc_suspend_complete);
|
|
+ break;
|
|
+ case VC_SUSPEND_REJECTED:
|
|
+ complete_all(&arm_state->vc_suspend_complete);
|
|
+ break;
|
|
+ case VC_SUSPEND_FAILED:
|
|
+ complete_all(&arm_state->vc_suspend_complete);
|
|
+ arm_state->vc_resume_state = VC_RESUME_RESUMED;
|
|
+ complete_all(&arm_state->vc_resume_complete);
|
|
+ break;
|
|
+ case VC_SUSPEND_IDLE:
|
|
+ INIT_COMPLETION(arm_state->vc_suspend_complete);
|
|
+ break;
|
|
+ case VC_SUSPEND_REQUESTED:
|
|
+ break;
|
|
+ case VC_SUSPEND_IN_PROGRESS:
|
|
+ set_resume_state(arm_state, VC_RESUME_IDLE);
|
|
+ break;
|
|
+ case VC_SUSPEND_SUSPENDED:
|
|
+ complete_all(&arm_state->vc_suspend_complete);
|
|
+ break;
|
|
+ default:
|
|
+ BUG();
|
|
+ break;
|
|
+ }
|
|
+}
|
|
+
|
|
+inline void
|
|
+set_resume_state(VCHIQ_ARM_STATE_T *arm_state,
|
|
+ enum vc_resume_status new_state)
|
|
+{
|
|
+ /* set the state in all cases */
|
|
+ arm_state->vc_resume_state = new_state;
|
|
+
|
|
+ /* state specific additional actions */
|
|
+ switch (new_state) {
|
|
+ case VC_RESUME_FAILED:
|
|
+ break;
|
|
+ case VC_RESUME_IDLE:
|
|
+ INIT_COMPLETION(arm_state->vc_resume_complete);
|
|
+ break;
|
|
+ case VC_RESUME_REQUESTED:
|
|
+ break;
|
|
+ case VC_RESUME_IN_PROGRESS:
|
|
+ break;
|
|
+ case VC_RESUME_RESUMED:
|
|
+ complete_all(&arm_state->vc_resume_complete);
|
|
+ set_suspend_state(arm_state, VC_SUSPEND_IDLE);
|
|
+ break;
|
|
+ default:
|
|
+ BUG();
|
|
+ break;
|
|
+ }
|
|
+}
|
|
+
|
|
+
|
|
+/* should be called with the write lock held */
|
|
+inline void
|
|
+start_suspend_timer(VCHIQ_ARM_STATE_T *arm_state)
|
|
+{
|
|
+ del_timer(&arm_state->suspend_timer);
|
|
+ arm_state->suspend_timer.expires = jiffies +
|
|
+ msecs_to_jiffies(arm_state->
|
|
+ suspend_timer_timeout);
|
|
+ add_timer(&arm_state->suspend_timer);
|
|
+ arm_state->suspend_timer_running = 1;
|
|
+}
|
|
+
|
|
+/* should be called with the write lock held */
|
|
+static inline void
|
|
+stop_suspend_timer(VCHIQ_ARM_STATE_T *arm_state)
|
|
+{
|
|
+ if (arm_state->suspend_timer_running) {
|
|
+ del_timer(&arm_state->suspend_timer);
|
|
+ arm_state->suspend_timer_running = 0;
|
|
+ }
|
|
+}
|
|
+
|
|
+static inline int
|
|
+need_resume(VCHIQ_STATE_T *state)
|
|
+{
|
|
+ VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
|
|
+ return (arm_state->vc_suspend_state > VC_SUSPEND_IDLE) &&
|
|
+ (arm_state->vc_resume_state < VC_RESUME_REQUESTED) &&
|
|
+ vchiq_videocore_wanted(state);
|
|
+}
|
|
+
|
|
+static int
|
|
+block_resume(VCHIQ_ARM_STATE_T *arm_state)
|
|
+{
|
|
+ int status = VCHIQ_SUCCESS;
|
|
+ const unsigned long timeout_val =
|
|
+ msecs_to_jiffies(FORCE_SUSPEND_TIMEOUT_MS);
|
|
+ int resume_count = 0;
|
|
+
|
|
+ /* Allow any threads which were blocked by the last force suspend to
|
|
+ * complete if they haven't already. Only give this one shot; if
|
|
+ * blocked_count is incremented after blocked_blocker is completed
|
|
+ * (which only happens when blocked_count hits 0) then those threads
|
|
+ * will have to wait until next time around */
|
|
+ if (arm_state->blocked_count) {
|
|
+ INIT_COMPLETION(arm_state->blocked_blocker);
|
|
+ write_unlock_bh(&arm_state->susp_res_lock);
|
|
+ vchiq_log_info(vchiq_susp_log_level, "%s wait for previously "
|
|
+ "blocked clients", __func__);
|
|
+ if (wait_for_completion_interruptible_timeout(
|
|
+ &arm_state->blocked_blocker, timeout_val)
|
|
+ <= 0) {
|
|
+ vchiq_log_error(vchiq_susp_log_level, "%s wait for "
|
|
+ "previously blocked clients failed" , __func__);
|
|
+ status = VCHIQ_ERROR;
|
|
+ write_lock_bh(&arm_state->susp_res_lock);
|
|
+ goto out;
|
|
+ }
|
|
+ vchiq_log_info(vchiq_susp_log_level, "%s previously blocked "
|
|
+ "clients resumed", __func__);
|
|
+ write_lock_bh(&arm_state->susp_res_lock);
|
|
+ }
|
|
+
|
|
+ /* We need to wait for resume to complete if it's in process */
|
|
+ while (arm_state->vc_resume_state != VC_RESUME_RESUMED &&
|
|
+ arm_state->vc_resume_state > VC_RESUME_IDLE) {
|
|
+ if (resume_count > 1) {
|
|
+ status = VCHIQ_ERROR;
|
|
+ vchiq_log_error(vchiq_susp_log_level, "%s waited too "
|
|
+ "many times for resume" , __func__);
|
|
+ goto out;
|
|
+ }
|
|
+ write_unlock_bh(&arm_state->susp_res_lock);
|
|
+ vchiq_log_info(vchiq_susp_log_level, "%s wait for resume",
|
|
+ __func__);
|
|
+ if (wait_for_completion_interruptible_timeout(
|
|
+ &arm_state->vc_resume_complete, timeout_val)
|
|
+ <= 0) {
|
|
+ vchiq_log_error(vchiq_susp_log_level, "%s wait for "
|
|
+ "resume failed (%s)", __func__,
|
|
+ resume_state_names[arm_state->vc_resume_state +
|
|
+ VC_RESUME_NUM_OFFSET]);
|
|
+ status = VCHIQ_ERROR;
|
|
+ write_lock_bh(&arm_state->susp_res_lock);
|
|
+ goto out;
|
|
+ }
|
|
+ vchiq_log_info(vchiq_susp_log_level, "%s resumed", __func__);
|
|
+ write_lock_bh(&arm_state->susp_res_lock);
|
|
+ resume_count++;
|
|
+ }
|
|
+ INIT_COMPLETION(arm_state->resume_blocker);
|
|
+ arm_state->resume_blocked = 1;
|
|
+
|
|
+out:
|
|
+ return status;
|
|
+}
|
|
+
|
|
+static inline void
|
|
+unblock_resume(VCHIQ_ARM_STATE_T *arm_state)
|
|
+{
|
|
+ complete_all(&arm_state->resume_blocker);
|
|
+ arm_state->resume_blocked = 0;
|
|
+}
|
|
+
|
|
+/* Initiate suspend via slot handler. Should be called with the write lock
|
|
+ * held */
|
|
+VCHIQ_STATUS_T
|
|
+vchiq_arm_vcsuspend(VCHIQ_STATE_T *state)
|
|
+{
|
|
+ VCHIQ_STATUS_T status = VCHIQ_ERROR;
|
|
+ VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
|
|
+
|
|
+ if (!arm_state)
|
|
+ goto out;
|
|
+
|
|
+ vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
|
|
+ status = VCHIQ_SUCCESS;
|
|
+
|
|
+
|
|
+ switch (arm_state->vc_suspend_state) {
|
|
+ case VC_SUSPEND_REQUESTED:
|
|
+ vchiq_log_info(vchiq_susp_log_level, "%s: suspend already "
|
|
+ "requested", __func__);
|
|
+ break;
|
|
+ case VC_SUSPEND_IN_PROGRESS:
|
|
+ vchiq_log_info(vchiq_susp_log_level, "%s: suspend already in "
|
|
+ "progress", __func__);
|
|
+ break;
|
|
+
|
|
+ default:
|
|
+ /* We don't expect to be in other states, so log but continue
|
|
+ * anyway */
|
|
+ vchiq_log_error(vchiq_susp_log_level,
|
|
+ "%s unexpected suspend state %s", __func__,
|
|
+ suspend_state_names[arm_state->vc_suspend_state +
|
|
+ VC_SUSPEND_NUM_OFFSET]);
|
|
+ /* fall through */
|
|
+ case VC_SUSPEND_REJECTED:
|
|
+ case VC_SUSPEND_FAILED:
|
|
+ /* Ensure any idle state actions have been run */
|
|
+ set_suspend_state(arm_state, VC_SUSPEND_IDLE);
|
|
+ /* fall through */
|
|
+ case VC_SUSPEND_IDLE:
|
|
+ vchiq_log_info(vchiq_susp_log_level,
|
|
+ "%s: suspending", __func__);
|
|
+ set_suspend_state(arm_state, VC_SUSPEND_REQUESTED);
|
|
+ /* kick the slot handler thread to initiate suspend */
|
|
+ request_poll(state, NULL, 0);
|
|
+ break;
|
|
+ }
|
|
+
|
|
+out:
|
|
+ vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, status);
|
|
+ return status;
|
|
+}
|
|
+
|
|
+void
|
|
+vchiq_platform_check_suspend(VCHIQ_STATE_T *state)
|
|
+{
|
|
+ VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
|
|
+ int susp = 0;
|
|
+
|
|
+ if (!arm_state)
|
|
+ goto out;
|
|
+
|
|
+ vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
|
|
+
|
|
+ write_lock_bh(&arm_state->susp_res_lock);
|
|
+ if (arm_state->vc_suspend_state == VC_SUSPEND_REQUESTED &&
|
|
+ arm_state->vc_resume_state == VC_RESUME_RESUMED) {
|
|
+ set_suspend_state(arm_state, VC_SUSPEND_IN_PROGRESS);
|
|
+ susp = 1;
|
|
+ }
|
|
+ write_unlock_bh(&arm_state->susp_res_lock);
|
|
+
|
|
+ if (susp)
|
|
+ vchiq_platform_suspend(state);
|
|
+
|
|
+out:
|
|
+ vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
|
|
+ return;
|
|
+}
|
|
+
|
|
+
|
|
+static void
|
|
+output_timeout_error(VCHIQ_STATE_T *state)
|
|
+{
|
|
+ VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
|
|
+ char service_err[50] = "";
|
|
+ int vc_use_count = arm_state->videocore_use_count;
|
|
+ int active_services = state->unused_service;
|
|
+ int i;
|
|
+
|
|
+ if (!arm_state->videocore_use_count) {
|
|
+ snprintf(service_err, 50, " Videocore usecount is 0");
|
|
+ goto output_msg;
|
|
+ }
|
|
+ for (i = 0; i < active_services; i++) {
|
|
+ VCHIQ_SERVICE_T *service_ptr = state->services[i];
|
|
+ if (service_ptr && service_ptr->service_use_count &&
|
|
+ (service_ptr->srvstate != VCHIQ_SRVSTATE_FREE)) {
|
|
+ snprintf(service_err, 50, " %c%c%c%c(%d) service has "
|
|
+ "use count %d%s", VCHIQ_FOURCC_AS_4CHARS(
|
|
+ service_ptr->base.fourcc),
|
|
+ service_ptr->client_id,
|
|
+ service_ptr->service_use_count,
|
|
+ service_ptr->service_use_count ==
|
|
+ vc_use_count ? "" : " (+ more)");
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+
|
|
+output_msg:
|
|
+ vchiq_log_error(vchiq_susp_log_level,
|
|
+ "timed out waiting for vc suspend (%d).%s",
|
|
+ arm_state->autosuspend_override, service_err);
|
|
+
|
|
+}
|
|
+
|
|
+/* Try to get videocore into suspended state, regardless of autosuspend state.
|
|
+** We don't actually force suspend, since videocore may get into a bad state
|
|
+** if we force suspend at a bad time. Instead, we wait for autosuspend to
|
|
+** determine a good point to suspend. If this doesn't happen within 100ms we
|
|
+** report failure.
|
|
+**
|
|
+** Returns VCHIQ_SUCCESS if videocore suspended successfully, VCHIQ_RETRY if
|
|
+** videocore failed to suspend in time or VCHIQ_ERROR if interrupted.
|
|
+*/
|
|
+VCHIQ_STATUS_T
|
|
+vchiq_arm_force_suspend(VCHIQ_STATE_T *state)
|
|
+{
|
|
+ VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
|
|
+ VCHIQ_STATUS_T status = VCHIQ_ERROR;
|
|
+ long rc = 0;
|
|
+ int repeat = -1;
|
|
+
|
|
+ if (!arm_state)
|
|
+ goto out;
|
|
+
|
|
+ vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
|
|
+
|
|
+ write_lock_bh(&arm_state->susp_res_lock);
|
|
+
|
|
+ status = block_resume(arm_state);
|
|
+ if (status != VCHIQ_SUCCESS)
|
|
+ goto unlock;
|
|
+ if (arm_state->vc_suspend_state == VC_SUSPEND_SUSPENDED) {
|
|
+ /* Already suspended - just block resume and exit */
|
|
+ vchiq_log_info(vchiq_susp_log_level, "%s already suspended",
|
|
+ __func__);
|
|
+ status = VCHIQ_SUCCESS;
|
|
+ goto unlock;
|
|
+ } else if (arm_state->vc_suspend_state <= VC_SUSPEND_IDLE) {
|
|
+ /* initiate suspend immediately in the case that we're waiting
|
|
+ * for the timeout */
|
|
+ stop_suspend_timer(arm_state);
|
|
+ if (!vchiq_videocore_wanted(state)) {
|
|
+ vchiq_log_info(vchiq_susp_log_level, "%s videocore "
|
|
+ "idle, initiating suspend", __func__);
|
|
+ status = vchiq_arm_vcsuspend(state);
|
|
+ } else if (arm_state->autosuspend_override <
|
|
+ FORCE_SUSPEND_FAIL_MAX) {
|
|
+ vchiq_log_info(vchiq_susp_log_level, "%s letting "
|
|
+ "videocore go idle", __func__);
|
|
+ status = VCHIQ_SUCCESS;
|
|
+ } else {
|
|
+ vchiq_log_warning(vchiq_susp_log_level, "%s failed too "
|
|
+ "many times - attempting suspend", __func__);
|
|
+ status = vchiq_arm_vcsuspend(state);
|
|
+ }
|
|
+ } else {
|
|
+ vchiq_log_info(vchiq_susp_log_level, "%s videocore suspend "
|
|
+ "in progress - wait for completion", __func__);
|
|
+ status = VCHIQ_SUCCESS;
|
|
+ }
|
|
+
|
|
+ /* Wait for suspend to happen due to system idle (not forced..) */
|
|
+ if (status != VCHIQ_SUCCESS)
|
|
+ goto unblock_resume;
|
|
+
|
|
+ do {
|
|
+ write_unlock_bh(&arm_state->susp_res_lock);
|
|
+
|
|
+ rc = wait_for_completion_interruptible_timeout(
|
|
+ &arm_state->vc_suspend_complete,
|
|
+ msecs_to_jiffies(FORCE_SUSPEND_TIMEOUT_MS));
|
|
+
|
|
+ write_lock_bh(&arm_state->susp_res_lock);
|
|
+ if (rc < 0) {
|
|
+ vchiq_log_warning(vchiq_susp_log_level, "%s "
|
|
+ "interrupted waiting for suspend", __func__);
|
|
+ status = VCHIQ_ERROR;
|
|
+ goto unblock_resume;
|
|
+ } else if (rc == 0) {
|
|
+ if (arm_state->vc_suspend_state > VC_SUSPEND_IDLE) {
|
|
+ /* Repeat timeout once if in progress */
|
|
+ if (repeat < 0) {
|
|
+ repeat = 1;
|
|
+ continue;
|
|
+ }
|
|
+ }
|
|
+ arm_state->autosuspend_override++;
|
|
+ output_timeout_error(state);
|
|
+
|
|
+ status = VCHIQ_RETRY;
|
|
+ goto unblock_resume;
|
|
+ }
|
|
+ } while (0 < (repeat--));
|
|
+
|
|
+ /* Check and report state in case we need to abort ARM suspend */
|
|
+ if (arm_state->vc_suspend_state != VC_SUSPEND_SUSPENDED) {
|
|
+ status = VCHIQ_RETRY;
|
|
+ vchiq_log_error(vchiq_susp_log_level,
|
|
+ "%s videocore suspend failed (state %s)", __func__,
|
|
+ suspend_state_names[arm_state->vc_suspend_state +
|
|
+ VC_SUSPEND_NUM_OFFSET]);
|
|
+ /* Reset the state only if it's still in an error state.
|
|
+ * Something could have already initiated another suspend. */
|
|
+ if (arm_state->vc_suspend_state < VC_SUSPEND_IDLE)
|
|
+ set_suspend_state(arm_state, VC_SUSPEND_IDLE);
|
|
+
|
|
+ goto unblock_resume;
|
|
+ }
|
|
+
|
|
+ /* successfully suspended - unlock and exit */
|
|
+ goto unlock;
|
|
+
|
|
+unblock_resume:
|
|
+ /* all error states need to unblock resume before exit */
|
|
+ unblock_resume(arm_state);
|
|
+
|
|
+unlock:
|
|
+ write_unlock_bh(&arm_state->susp_res_lock);
|
|
+
|
|
+out:
|
|
+ vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, status);
|
|
+ return status;
|
|
+}
|
|
+
|
|
+void
|
|
+vchiq_check_suspend(VCHIQ_STATE_T *state)
|
|
+{
|
|
+ VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
|
|
+
|
|
+ if (!arm_state)
|
|
+ goto out;
|
|
+
|
|
+ vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
|
|
+
|
|
+ write_lock_bh(&arm_state->susp_res_lock);
|
|
+ if (arm_state->vc_suspend_state != VC_SUSPEND_SUSPENDED &&
|
|
+ arm_state->first_connect &&
|
|
+ !vchiq_videocore_wanted(state)) {
|
|
+ vchiq_arm_vcsuspend(state);
|
|
+ }
|
|
+ write_unlock_bh(&arm_state->susp_res_lock);
|
|
+
|
|
+out:
|
|
+ vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
|
|
+ return;
|
|
+}
|
|
+
|
|
+
|
|
+int
|
|
+vchiq_arm_allow_resume(VCHIQ_STATE_T *state)
|
|
+{
|
|
+ VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
|
|
+ int resume = 0;
|
|
+ int ret = -1;
|
|
+
|
|
+ if (!arm_state)
|
|
+ goto out;
|
|
+
|
|
+ vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
|
|
+
|
|
+ write_lock_bh(&arm_state->susp_res_lock);
|
|
+ unblock_resume(arm_state);
|
|
+ resume = vchiq_check_resume(state);
|
|
+ write_unlock_bh(&arm_state->susp_res_lock);
|
|
+
|
|
+ if (resume) {
|
|
+ if (wait_for_completion_interruptible(
|
|
+ &arm_state->vc_resume_complete) < 0) {
|
|
+ vchiq_log_error(vchiq_susp_log_level,
|
|
+ "%s interrupted", __func__);
|
|
+ /* failed, cannot accurately derive suspend
|
|
+ * state, so exit early. */
|
|
+ goto out;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ read_lock_bh(&arm_state->susp_res_lock);
|
|
+ if (arm_state->vc_suspend_state == VC_SUSPEND_SUSPENDED) {
|
|
+ vchiq_log_info(vchiq_susp_log_level,
|
|
+ "%s: Videocore remains suspended", __func__);
|
|
+ } else {
|
|
+ vchiq_log_info(vchiq_susp_log_level,
|
|
+ "%s: Videocore resumed", __func__);
|
|
+ ret = 0;
|
|
+ }
|
|
+ read_unlock_bh(&arm_state->susp_res_lock);
|
|
+out:
|
|
+ vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+/* This function should be called with the write lock held */
|
|
+int
|
|
+vchiq_check_resume(VCHIQ_STATE_T *state)
|
|
+{
|
|
+ VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
|
|
+ int resume = 0;
|
|
+
|
|
+ if (!arm_state)
|
|
+ goto out;
|
|
+
|
|
+ vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
|
|
+
|
|
+ if (need_resume(state)) {
|
|
+ set_resume_state(arm_state, VC_RESUME_REQUESTED);
|
|
+ request_poll(state, NULL, 0);
|
|
+ resume = 1;
|
|
+ }
|
|
+
|
|
+out:
|
|
+ vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
|
|
+ return resume;
|
|
+}
|
|
+
|
|
+void
|
|
+vchiq_platform_check_resume(VCHIQ_STATE_T *state)
|
|
+{
|
|
+ VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
|
|
+ int res = 0;
|
|
+
|
|
+ if (!arm_state)
|
|
+ goto out;
|
|
+
|
|
+ vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
|
|
+
|
|
+ write_lock_bh(&arm_state->susp_res_lock);
|
|
+ if (arm_state->wake_address == 0) {
|
|
+ vchiq_log_info(vchiq_susp_log_level,
|
|
+ "%s: already awake", __func__);
|
|
+ goto unlock;
|
|
+ }
|
|
+ if (arm_state->vc_resume_state == VC_RESUME_IN_PROGRESS) {
|
|
+ vchiq_log_info(vchiq_susp_log_level,
|
|
+ "%s: already resuming", __func__);
|
|
+ goto unlock;
|
|
+ }
|
|
+
|
|
+ if (arm_state->vc_resume_state == VC_RESUME_REQUESTED) {
|
|
+ set_resume_state(arm_state, VC_RESUME_IN_PROGRESS);
|
|
+ res = 1;
|
|
+ } else
|
|
+ vchiq_log_trace(vchiq_susp_log_level,
|
|
+ "%s: not resuming (resume state %s)", __func__,
|
|
+ resume_state_names[arm_state->vc_resume_state +
|
|
+ VC_RESUME_NUM_OFFSET]);
|
|
+
|
|
+unlock:
|
|
+ write_unlock_bh(&arm_state->susp_res_lock);
|
|
+
|
|
+ if (res)
|
|
+ vchiq_platform_resume(state);
|
|
+
|
|
+out:
|
|
+ vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
|
|
+ return;
|
|
+
|
|
+}
|
|
+
|
|
+
|
|
+
|
|
+VCHIQ_STATUS_T
|
|
+vchiq_use_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
|
|
+ enum USE_TYPE_E use_type)
|
|
+{
|
|
+ VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
|
|
+ VCHIQ_STATUS_T ret = VCHIQ_SUCCESS;
|
|
+ char entity[16];
|
|
+ int *entity_uc;
|
|
+ int local_uc, local_entity_uc;
|
|
+
|
|
+ if (!arm_state)
|
|
+ goto out;
|
|
+
|
|
+ vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
|
|
+
|
|
+ if (use_type == USE_TYPE_VCHIQ) {
|
|
+ sprintf(entity, "VCHIQ: ");
|
|
+ entity_uc = &arm_state->peer_use_count;
|
|
+ } else if (service) {
|
|
+ sprintf(entity, "%c%c%c%c:%03d",
|
|
+ VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
|
|
+ service->client_id);
|
|
+ entity_uc = &service->service_use_count;
|
|
+ } else {
|
|
+ vchiq_log_error(vchiq_susp_log_level, "%s null service "
|
|
+ "ptr", __func__);
|
|
+ ret = VCHIQ_ERROR;
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ write_lock_bh(&arm_state->susp_res_lock);
|
|
+ while (arm_state->resume_blocked) {
|
|
+ /* If we call 'use' while force suspend is waiting for suspend,
|
|
+ * then we're about to block the thread which the force is
|
|
+ * waiting to complete, so we're bound to just time out. In this
|
|
+ * case, set the suspend state such that the wait will be
|
|
+ * canceled, so we can complete as quickly as possible. */
|
|
+ if (arm_state->resume_blocked && arm_state->vc_suspend_state ==
|
|
+ VC_SUSPEND_IDLE) {
|
|
+ set_suspend_state(arm_state, VC_SUSPEND_FORCE_CANCELED);
|
|
+ break;
|
|
+ }
|
|
+ /* If suspend is already in progress then we need to block */
|
|
+ if (!try_wait_for_completion(&arm_state->resume_blocker)) {
|
|
+ /* Indicate that there are threads waiting on the resume
|
|
+ * blocker. These need to be allowed to complete before
|
|
+ * a _second_ call to force suspend can complete,
|
|
+ * otherwise low priority threads might never actually
|
|
+ * continue */
|
|
+ arm_state->blocked_count++;
|
|
+ write_unlock_bh(&arm_state->susp_res_lock);
|
|
+ vchiq_log_info(vchiq_susp_log_level, "%s %s resume "
|
|
+ "blocked - waiting...", __func__, entity);
|
|
+ if (wait_for_completion_killable(
|
|
+ &arm_state->resume_blocker) != 0) {
|
|
+ vchiq_log_error(vchiq_susp_log_level, "%s %s "
|
|
+ "wait for resume blocker interrupted",
|
|
+ __func__, entity);
|
|
+ ret = VCHIQ_ERROR;
|
|
+ write_lock_bh(&arm_state->susp_res_lock);
|
|
+ arm_state->blocked_count--;
|
|
+ write_unlock_bh(&arm_state->susp_res_lock);
|
|
+ goto out;
|
|
+ }
|
|
+ vchiq_log_info(vchiq_susp_log_level, "%s %s resume "
|
|
+ "unblocked", __func__, entity);
|
|
+ write_lock_bh(&arm_state->susp_res_lock);
|
|
+ if (--arm_state->blocked_count == 0)
|
|
+ complete_all(&arm_state->blocked_blocker);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ stop_suspend_timer(arm_state);
|
|
+
|
|
+ local_uc = ++arm_state->videocore_use_count;
|
|
+ local_entity_uc = ++(*entity_uc);
|
|
+
|
|
+ /* If there's a pending request which hasn't yet been serviced then
|
|
+ * just clear it. If we're past VC_SUSPEND_REQUESTED state then
|
|
+ * vc_resume_complete will block until we either resume or fail to
|
|
+ * suspend */
|
|
+ if (arm_state->vc_suspend_state <= VC_SUSPEND_REQUESTED)
|
|
+ set_suspend_state(arm_state, VC_SUSPEND_IDLE);
|
|
+
|
|
+ if ((use_type != USE_TYPE_SERVICE_NO_RESUME) && need_resume(state)) {
|
|
+ set_resume_state(arm_state, VC_RESUME_REQUESTED);
|
|
+ vchiq_log_info(vchiq_susp_log_level,
|
|
+ "%s %s count %d, state count %d",
|
|
+ __func__, entity, local_entity_uc, local_uc);
|
|
+ request_poll(state, NULL, 0);
|
|
+ } else
|
|
+ vchiq_log_trace(vchiq_susp_log_level,
|
|
+ "%s %s count %d, state count %d",
|
|
+ __func__, entity, *entity_uc, local_uc);
|
|
+
|
|
+
|
|
+ write_unlock_bh(&arm_state->susp_res_lock);
|
|
+
|
|
+ /* Completion is in a done state when we're not suspended, so this won't
|
|
+ * block for the non-suspended case. */
|
|
+ if (!try_wait_for_completion(&arm_state->vc_resume_complete)) {
|
|
+ vchiq_log_info(vchiq_susp_log_level, "%s %s wait for resume",
|
|
+ __func__, entity);
|
|
+ if (wait_for_completion_killable(
|
|
+ &arm_state->vc_resume_complete) != 0) {
|
|
+ vchiq_log_error(vchiq_susp_log_level, "%s %s wait for "
|
|
+ "resume interrupted", __func__, entity);
|
|
+ ret = VCHIQ_ERROR;
|
|
+ goto out;
|
|
+ }
|
|
+ vchiq_log_info(vchiq_susp_log_level, "%s %s resumed", __func__,
|
|
+ entity);
|
|
+ }
|
|
+
|
|
+ if (ret == VCHIQ_SUCCESS) {
|
|
+ VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
|
|
+ long ack_cnt = atomic_xchg(&arm_state->ka_use_ack_count, 0);
|
|
+ while (ack_cnt && (status == VCHIQ_SUCCESS)) {
|
|
+ /* Send the use notify to videocore */
|
|
+ status = vchiq_send_remote_use_active(state);
|
|
+ if (status == VCHIQ_SUCCESS)
|
|
+ ack_cnt--;
|
|
+ else
|
|
+ atomic_add(ack_cnt,
|
|
+ &arm_state->ka_use_ack_count);
|
|
+ }
|
|
+ }
|
|
+
|
|
+out:
|
|
+ vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+VCHIQ_STATUS_T
|
|
+vchiq_release_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service)
|
|
+{
|
|
+ VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
|
|
+ VCHIQ_STATUS_T ret = VCHIQ_SUCCESS;
|
|
+ char entity[16];
|
|
+ int *entity_uc;
|
|
+ int local_uc, local_entity_uc;
|
|
+
|
|
+ if (!arm_state)
|
|
+ goto out;
|
|
+
|
|
+ vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
|
|
+
|
|
+ if (service) {
|
|
+ sprintf(entity, "%c%c%c%c:%03d",
|
|
+ VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
|
|
+ service->client_id);
|
|
+ entity_uc = &service->service_use_count;
|
|
+ } else {
|
|
+ sprintf(entity, "PEER: ");
|
|
+ entity_uc = &arm_state->peer_use_count;
|
|
+ }
|
|
+
|
|
+ write_lock_bh(&arm_state->susp_res_lock);
|
|
+ if (!arm_state->videocore_use_count || !(*entity_uc)) {
|
|
+ /* Don't use BUG_ON - don't allow user thread to crash kernel */
|
|
+ WARN_ON(!arm_state->videocore_use_count);
|
|
+ WARN_ON(!(*entity_uc));
|
|
+ ret = VCHIQ_ERROR;
|
|
+ goto unlock;
|
|
+ }
|
|
+ local_uc = --arm_state->videocore_use_count;
|
|
+ local_entity_uc = --(*entity_uc);
|
|
+
|
|
+ if (!vchiq_videocore_wanted(state)) {
|
|
+ if (vchiq_platform_use_suspend_timer() &&
|
|
+ !arm_state->resume_blocked) {
|
|
+ /* Only use the timer if we're not trying to force
|
|
+ * suspend (=> resume_blocked) */
|
|
+ start_suspend_timer(arm_state);
|
|
+ } else {
|
|
+ vchiq_log_info(vchiq_susp_log_level,
|
|
+ "%s %s count %d, state count %d - suspending",
|
|
+ __func__, entity, *entity_uc,
|
|
+ arm_state->videocore_use_count);
|
|
+ vchiq_arm_vcsuspend(state);
|
|
+ }
|
|
+ } else
|
|
+ vchiq_log_trace(vchiq_susp_log_level,
|
|
+ "%s %s count %d, state count %d",
|
|
+ __func__, entity, *entity_uc,
|
|
+ arm_state->videocore_use_count);
|
|
+
|
|
+unlock:
|
|
+ write_unlock_bh(&arm_state->susp_res_lock);
|
|
+
|
|
+out:
|
|
+ vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+void
|
|
+vchiq_on_remote_use(VCHIQ_STATE_T *state)
|
|
+{
|
|
+ VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
|
|
+ vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
|
|
+ atomic_inc(&arm_state->ka_use_count);
|
|
+ complete(&arm_state->ka_evt);
|
|
+}
|
|
+
|
|
+void
|
|
+vchiq_on_remote_release(VCHIQ_STATE_T *state)
|
|
+{
|
|
+ VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
|
|
+ vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
|
|
+ atomic_inc(&arm_state->ka_release_count);
|
|
+ complete(&arm_state->ka_evt);
|
|
+}
|
|
+
|
|
+VCHIQ_STATUS_T
|
|
+vchiq_use_service_internal(VCHIQ_SERVICE_T *service)
|
|
+{
|
|
+ return vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);
|
|
+}
|
|
+
|
|
+VCHIQ_STATUS_T
|
|
+vchiq_release_service_internal(VCHIQ_SERVICE_T *service)
|
|
+{
|
|
+ return vchiq_release_internal(service->state, service);
|
|
+}
|
|
+
|
|
+static void suspend_timer_callback(unsigned long context)
|
|
+{
|
|
+ VCHIQ_STATE_T *state = (VCHIQ_STATE_T *)context;
|
|
+ VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
|
|
+ if (!arm_state)
|
|
+ goto out;
|
|
+ vchiq_log_info(vchiq_susp_log_level,
|
|
+ "%s - suspend timer expired - check suspend", __func__);
|
|
+ vchiq_check_suspend(state);
|
|
+out:
|
|
+ return;
|
|
+}
|
|
+
|
|
+VCHIQ_STATUS_T
|
|
+vchiq_use_service_no_resume(VCHIQ_SERVICE_HANDLE_T handle)
|
|
+{
|
|
+ VCHIQ_STATUS_T ret = VCHIQ_ERROR;
|
|
+ VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
|
|
+ if (service) {
|
|
+ ret = vchiq_use_internal(service->state, service,
|
|
+ USE_TYPE_SERVICE_NO_RESUME);
|
|
+ unlock_service(service);
|
|
+ }
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+VCHIQ_STATUS_T
|
|
+vchiq_use_service(VCHIQ_SERVICE_HANDLE_T handle)
|
|
+{
|
|
+ VCHIQ_STATUS_T ret = VCHIQ_ERROR;
|
|
+ VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
|
|
+ if (service) {
|
|
+ ret = vchiq_use_internal(service->state, service,
|
|
+ USE_TYPE_SERVICE);
|
|
+ unlock_service(service);
|
|
+ }
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+VCHIQ_STATUS_T
|
|
+vchiq_release_service(VCHIQ_SERVICE_HANDLE_T handle)
|
|
+{
|
|
+ VCHIQ_STATUS_T ret = VCHIQ_ERROR;
|
|
+ VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
|
|
+ if (service) {
|
|
+ ret = vchiq_release_internal(service->state, service);
|
|
+ unlock_service(service);
|
|
+ }
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+void
|
|
+vchiq_dump_service_use_state(VCHIQ_STATE_T *state)
|
|
+{
|
|
+ VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
|
|
+ int i, j = 0;
|
|
+ /* Only dump 64 services */
|
|
+ static const int local_max_services = 64;
|
|
+ /* If there's more than 64 services, only dump ones with
|
|
+ * non-zero counts */
|
|
+ int only_nonzero = 0;
|
|
+ static const char *nz = "<-- preventing suspend";
|
|
+
|
|
+ enum vc_suspend_status vc_suspend_state;
|
|
+ enum vc_resume_status vc_resume_state;
|
|
+ int peer_count;
|
|
+ int vc_use_count;
|
|
+ int active_services;
|
|
+ struct service_data_struct {
|
|
+ int fourcc;
|
|
+ int clientid;
|
|
+ int use_count;
|
|
+ } service_data[local_max_services];
|
|
+
|
|
+ if (!arm_state)
|
|
+ return;
|
|
+
|
|
+ read_lock_bh(&arm_state->susp_res_lock);
|
|
+ vc_suspend_state = arm_state->vc_suspend_state;
|
|
+ vc_resume_state = arm_state->vc_resume_state;
|
|
+ peer_count = arm_state->peer_use_count;
|
|
+ vc_use_count = arm_state->videocore_use_count;
|
|
+ active_services = state->unused_service;
|
|
+ if (active_services > local_max_services)
|
|
+ only_nonzero = 1;
|
|
+
|
|
+ for (i = 0; (i < active_services) && (j < local_max_services); i++) {
|
|
+ VCHIQ_SERVICE_T *service_ptr = state->services[i];
|
|
+ if (!service_ptr)
|
|
+ continue;
|
|
+
|
|
+ if (only_nonzero && !service_ptr->service_use_count)
|
|
+ continue;
|
|
+
|
|
+ if (service_ptr->srvstate != VCHIQ_SRVSTATE_FREE) {
|
|
+ service_data[j].fourcc = service_ptr->base.fourcc;
|
|
+ service_data[j].clientid = service_ptr->client_id;
|
|
+ service_data[j++].use_count = service_ptr->
|
|
+ service_use_count;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ read_unlock_bh(&arm_state->susp_res_lock);
|
|
+
|
|
+ vchiq_log_warning(vchiq_susp_log_level,
|
|
+ "-- Videcore suspend state: %s --",
|
|
+ suspend_state_names[vc_suspend_state + VC_SUSPEND_NUM_OFFSET]);
|
|
+ vchiq_log_warning(vchiq_susp_log_level,
|
|
+ "-- Videcore resume state: %s --",
|
|
+ resume_state_names[vc_resume_state + VC_RESUME_NUM_OFFSET]);
|
|
+
|
|
+ if (only_nonzero)
|
|
+ vchiq_log_warning(vchiq_susp_log_level, "Too many active "
|
|
+ "services (%d). Only dumping up to first %d services "
|
|
+ "with non-zero use-count", active_services,
|
|
+ local_max_services);
|
|
+
|
|
+ for (i = 0; i < j; i++) {
|
|
+ vchiq_log_warning(vchiq_susp_log_level,
|
|
+ "----- %c%c%c%c:%d service count %d %s",
|
|
+ VCHIQ_FOURCC_AS_4CHARS(service_data[i].fourcc),
|
|
+ service_data[i].clientid,
|
|
+ service_data[i].use_count,
|
|
+ service_data[i].use_count ? nz : "");
|
|
+ }
|
|
+ vchiq_log_warning(vchiq_susp_log_level,
|
|
+ "----- VCHIQ use count count %d", peer_count);
|
|
+ vchiq_log_warning(vchiq_susp_log_level,
|
|
+ "--- Overall vchiq instance use count %d", vc_use_count);
|
|
+
|
|
+ vchiq_dump_platform_use_state(state);
|
|
+}
|
|
+
|
|
+VCHIQ_STATUS_T
|
|
+vchiq_check_service(VCHIQ_SERVICE_T *service)
|
|
+{
|
|
+ VCHIQ_ARM_STATE_T *arm_state;
|
|
+ VCHIQ_STATUS_T ret = VCHIQ_ERROR;
|
|
+
|
|
+ if (!service || !service->state)
|
|
+ goto out;
|
|
+
|
|
+ vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
|
|
+
|
|
+ arm_state = vchiq_platform_get_arm_state(service->state);
|
|
+
|
|
+ read_lock_bh(&arm_state->susp_res_lock);
|
|
+ if (service->service_use_count)
|
|
+ ret = VCHIQ_SUCCESS;
|
|
+ read_unlock_bh(&arm_state->susp_res_lock);
|
|
+
|
|
+ if (ret == VCHIQ_ERROR) {
|
|
+ vchiq_log_error(vchiq_susp_log_level,
|
|
+ "%s ERROR - %c%c%c%c:%d service count %d, "
|
|
+ "state count %d, videocore suspend state %s", __func__,
|
|
+ VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
|
|
+ service->client_id, service->service_use_count,
|
|
+ arm_state->videocore_use_count,
|
|
+ suspend_state_names[arm_state->vc_suspend_state +
|
|
+ VC_SUSPEND_NUM_OFFSET]);
|
|
+ vchiq_dump_service_use_state(service->state);
|
|
+ }
|
|
+out:
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+/* stub functions */
|
|
+void vchiq_on_remote_use_active(VCHIQ_STATE_T *state)
|
|
+{
|
|
+ (void)state;
|
|
+}
|
|
+
|
|
+void vchiq_platform_conn_state_changed(VCHIQ_STATE_T *state,
|
|
+ VCHIQ_CONNSTATE_T oldstate, VCHIQ_CONNSTATE_T newstate)
|
|
+{
|
|
+ VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
|
|
+ vchiq_log_info(vchiq_susp_log_level, "%d: %s->%s", state->id,
|
|
+ get_conn_state_name(oldstate), get_conn_state_name(newstate));
|
|
+ if (state->conn_state == VCHIQ_CONNSTATE_CONNECTED) {
|
|
+ write_lock_bh(&arm_state->susp_res_lock);
|
|
+ if (!arm_state->first_connect) {
|
|
+ char threadname[10];
|
|
+ arm_state->first_connect = 1;
|
|
+ write_unlock_bh(&arm_state->susp_res_lock);
|
|
+ snprintf(threadname, sizeof(threadname), "VCHIQka-%d",
|
|
+ state->id);
|
|
+ arm_state->ka_thread = kthread_create(
|
|
+ &vchiq_keepalive_thread_func,
|
|
+ (void *)state,
|
|
+ threadname);
|
|
+ if (arm_state->ka_thread == NULL) {
|
|
+ vchiq_log_error(vchiq_susp_log_level,
|
|
+ "vchiq: FATAL: couldn't create thread %s",
|
|
+ threadname);
|
|
+ } else {
|
|
+ wake_up_process(arm_state->ka_thread);
|
|
+ }
|
|
+ } else
|
|
+ write_unlock_bh(&arm_state->susp_res_lock);
|
|
+ }
|
|
+}
|
|
+
|
|
+
|
|
+/****************************************************************************
|
|
+*
|
|
+* vchiq_init - called when the module is loaded.
|
|
+*
|
|
+***************************************************************************/
|
|
+
|
|
+static int __init
|
|
+vchiq_init(void)
|
|
+{
|
|
+ int err;
|
|
+ void *ptr_err;
|
|
+
|
|
+ /* create proc entries */
|
|
+ err = vchiq_proc_init();
|
|
+ if (err != 0)
|
|
+ goto failed_proc_init;
|
|
+
|
|
+ err = alloc_chrdev_region(&vchiq_devid, VCHIQ_MINOR, 1, DEVICE_NAME);
|
|
+ if (err != 0) {
|
|
+ vchiq_log_error(vchiq_arm_log_level,
|
|
+ "Unable to allocate device number");
|
|
+ goto failed_alloc_chrdev;
|
|
+ }
|
|
+ cdev_init(&vchiq_cdev, &vchiq_fops);
|
|
+ vchiq_cdev.owner = THIS_MODULE;
|
|
+ err = cdev_add(&vchiq_cdev, vchiq_devid, 1);
|
|
+ if (err != 0) {
|
|
+ vchiq_log_error(vchiq_arm_log_level,
|
|
+ "Unable to register device");
|
|
+ goto failed_cdev_add;
|
|
+ }
|
|
+
|
|
+ /* create sysfs entries */
|
|
+ vchiq_class = class_create(THIS_MODULE, DEVICE_NAME);
|
|
+ ptr_err = vchiq_class;
|
|
+ if (IS_ERR(ptr_err))
|
|
+ goto failed_class_create;
|
|
+
|
|
+ vchiq_dev = device_create(vchiq_class, NULL,
|
|
+ vchiq_devid, NULL, "vchiq");
|
|
+ ptr_err = vchiq_dev;
|
|
+ if (IS_ERR(ptr_err))
|
|
+ goto failed_device_create;
|
|
+
|
|
+ err = vchiq_platform_init(&g_state);
|
|
+ if (err != 0)
|
|
+ goto failed_platform_init;
|
|
+
|
|
+ vchiq_log_info(vchiq_arm_log_level,
|
|
+ "vchiq: initialised - version %d (min %d), device %d.%d",
|
|
+ VCHIQ_VERSION, VCHIQ_VERSION_MIN,
|
|
+ MAJOR(vchiq_devid), MINOR(vchiq_devid));
|
|
+
|
|
+ return 0;
|
|
+
|
|
+failed_platform_init:
|
|
+ device_destroy(vchiq_class, vchiq_devid);
|
|
+failed_device_create:
|
|
+ class_destroy(vchiq_class);
|
|
+failed_class_create:
|
|
+ cdev_del(&vchiq_cdev);
|
|
+ err = PTR_ERR(ptr_err);
|
|
+failed_cdev_add:
|
|
+ unregister_chrdev_region(vchiq_devid, 1);
|
|
+failed_alloc_chrdev:
|
|
+ vchiq_proc_deinit();
|
|
+failed_proc_init:
|
|
+ vchiq_log_warning(vchiq_arm_log_level, "could not load vchiq");
|
|
+ return err;
|
|
+}
|
|
+
|
|
+static int vchiq_instance_get_use_count(VCHIQ_INSTANCE_T instance)
|
|
+{
|
|
+ VCHIQ_SERVICE_T *service;
|
|
+ int use_count = 0, i;
|
|
+ i = 0;
|
|
+ while ((service = next_service_by_instance(instance->state,
|
|
+ instance, &i)) != NULL) {
|
|
+ use_count += service->service_use_count;
|
|
+ unlock_service(service);
|
|
+ }
|
|
+ return use_count;
|
|
+}
|
|
+
|
|
+/* read the per-process use-count */
|
|
+static int proc_read_use_count(char *page, char **start,
|
|
+ off_t off, int count,
|
|
+ int *eof, void *data)
|
|
+{
|
|
+ VCHIQ_INSTANCE_T instance = data;
|
|
+ int len, use_count;
|
|
+
|
|
+ use_count = vchiq_instance_get_use_count(instance);
|
|
+ len = snprintf(page+off, count, "%d\n", use_count);
|
|
+
|
|
+ return len;
|
|
+}
|
|
+
|
|
+/* add an instance (process) to the proc entries */
|
|
+static int vchiq_proc_add_instance(VCHIQ_INSTANCE_T instance)
|
|
+{
|
|
+ char pidstr[32];
|
|
+ struct proc_dir_entry *top, *use_count;
|
|
+ struct proc_dir_entry *clients = vchiq_clients_top();
|
|
+ int pid = instance->pid;
|
|
+
|
|
+ snprintf(pidstr, sizeof(pidstr), "%d", pid);
|
|
+ top = proc_mkdir(pidstr, clients);
|
|
+ if (!top)
|
|
+ goto fail_top;
|
|
+#if 0
|
|
+ use_count = create_proc_read_entry("use_count",
|
|
+ 0444, top,
|
|
+ proc_read_use_count,
|
|
+ instance);
|
|
+ if (!use_count)
|
|
+ goto fail_use_count;
|
|
+
|
|
+ instance->proc_entry = top;
|
|
+#endif
|
|
+ return 0;
|
|
+
|
|
+fail_use_count:
|
|
+#if 0
|
|
+ remove_proc_entry(top->name, clients);
|
|
+#endif
|
|
+fail_top:
|
|
+ return -ENOMEM;
|
|
+}
|
|
+
|
|
+static void vchiq_proc_remove_instance(VCHIQ_INSTANCE_T instance)
|
|
+{
|
|
+#if 0
|
|
+ struct proc_dir_entry *clients = vchiq_clients_top();
|
|
+ remove_proc_entry("use_count", instance->proc_entry);
|
|
+ remove_proc_entry(instance->proc_entry->name, clients);
|
|
+#endif
|
|
+}
|
|
+
|
|
+/****************************************************************************
|
|
+*
|
|
+* vchiq_exit - called when the module is unloaded.
|
|
+*
|
|
+***************************************************************************/
|
|
+
|
|
+static void __exit
|
|
+vchiq_exit(void)
|
|
+{
|
|
+ vchiq_platform_exit(&g_state);
|
|
+ device_destroy(vchiq_class, vchiq_devid);
|
|
+ class_destroy(vchiq_class);
|
|
+ cdev_del(&vchiq_cdev);
|
|
+ unregister_chrdev_region(vchiq_devid, 1);
|
|
+}
|
|
+
|
|
+module_init(vchiq_init);
|
|
+module_exit(vchiq_exit);
|
|
+MODULE_LICENSE("GPL");
|
|
+MODULE_AUTHOR("Broadcom Corporation");
|
|
--- /dev/null
|
|
+++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.h
|
|
@@ -0,0 +1,212 @@
|
|
+/**
|
|
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
|
|
+ *
|
|
+ * Redistribution and use in source and binary forms, with or without
|
|
+ * modification, are permitted provided that the following conditions
|
|
+ * are met:
|
|
+ * 1. Redistributions of source code must retain the above copyright
|
|
+ * notice, this list of conditions, and the following disclaimer,
|
|
+ * without modification.
|
|
+ * 2. Redistributions in binary form must reproduce the above copyright
|
|
+ * notice, this list of conditions and the following disclaimer in the
|
|
+ * documentation and/or other materials provided with the distribution.
|
|
+ * 3. The names of the above-listed copyright holders may not be used
|
|
+ * to endorse or promote products derived from this software without
|
|
+ * specific prior written permission.
|
|
+ *
|
|
+ * ALTERNATIVELY, this software may be distributed under the terms of the
|
|
+ * GNU General Public License ("GPL") version 2, as published by the Free
|
|
+ * Software Foundation.
|
|
+ *
|
|
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
|
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
|
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
|
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
|
|
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
|
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
|
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
|
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
|
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
|
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
|
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
+ */
|
|
+
|
|
+#ifndef VCHIQ_ARM_H
|
|
+#define VCHIQ_ARM_H
|
|
+
|
|
+#include <linux/mutex.h>
|
|
+#include <linux/semaphore.h>
|
|
+#include <linux/atomic.h>
|
|
+#include "vchiq_core.h"
|
|
+
|
|
+
|
|
+enum vc_suspend_status {
|
|
+ VC_SUSPEND_FORCE_CANCELED = -3, /* Force suspend canceled, too busy */
|
|
+ VC_SUSPEND_REJECTED = -2, /* Videocore rejected suspend request */
|
|
+ VC_SUSPEND_FAILED = -1, /* Videocore suspend failed */
|
|
+ VC_SUSPEND_IDLE = 0, /* VC active, no suspend actions */
|
|
+ VC_SUSPEND_REQUESTED, /* User has requested suspend */
|
|
+ VC_SUSPEND_IN_PROGRESS, /* Slot handler has recvd suspend request */
|
|
+ VC_SUSPEND_SUSPENDED /* Videocore suspend succeeded */
|
|
+};
|
|
+
|
|
+enum vc_resume_status {
|
|
+ VC_RESUME_FAILED = -1, /* Videocore resume failed */
|
|
+ VC_RESUME_IDLE = 0, /* VC suspended, no resume actions */
|
|
+ VC_RESUME_REQUESTED, /* User has requested resume */
|
|
+ VC_RESUME_IN_PROGRESS, /* Slot handler has received resume request */
|
|
+ VC_RESUME_RESUMED /* Videocore resumed successfully (active) */
|
|
+};
|
|
+
|
|
+
|
|
+enum USE_TYPE_E {
|
|
+ USE_TYPE_SERVICE,
|
|
+ USE_TYPE_SERVICE_NO_RESUME,
|
|
+ USE_TYPE_VCHIQ
|
|
+};
|
|
+
|
|
+
|
|
+
|
|
+typedef struct vchiq_arm_state_struct {
|
|
+ /* Keepalive-related data */
|
|
+ struct task_struct *ka_thread;
|
|
+ struct completion ka_evt;
|
|
+ atomic_t ka_use_count;
|
|
+ atomic_t ka_use_ack_count;
|
|
+ atomic_t ka_release_count;
|
|
+
|
|
+ struct completion vc_suspend_complete;
|
|
+ struct completion vc_resume_complete;
|
|
+
|
|
+ rwlock_t susp_res_lock;
|
|
+ enum vc_suspend_status vc_suspend_state;
|
|
+ enum vc_resume_status vc_resume_state;
|
|
+
|
|
+ unsigned int wake_address;
|
|
+
|
|
+ struct timer_list suspend_timer;
|
|
+ int suspend_timer_timeout;
|
|
+ int suspend_timer_running;
|
|
+
|
|
+ /* Global use count for videocore.
|
|
+ ** This is equal to the sum of the use counts for all services. When
|
|
+ ** this hits zero the videocore suspend procedure will be initiated.
|
|
+ */
|
|
+ int videocore_use_count;
|
|
+
|
|
+ /* Use count to track requests from videocore peer.
|
|
+ ** This use count is not associated with a service, so needs to be
|
|
+ ** tracked separately with the state.
|
|
+ */
|
|
+ int peer_use_count;
|
|
+
|
|
+ /* Flag to indicate whether resume is blocked. This happens when the
|
|
+ ** ARM is suspending
|
|
+ */
|
|
+ struct completion resume_blocker;
|
|
+ int resume_blocked;
|
|
+ struct completion blocked_blocker;
|
|
+ int blocked_count;
|
|
+
|
|
+ int autosuspend_override;
|
|
+
|
|
+ /* Flag to indicate that the first vchiq connect has made it through.
|
|
+ ** This means that both sides should be fully ready, and we should
|
|
+ ** be able to suspend after this point.
|
|
+ */
|
|
+ int first_connect;
|
|
+
|
|
+ unsigned long long suspend_start_time;
|
|
+ unsigned long long sleep_start_time;
|
|
+ unsigned long long resume_start_time;
|
|
+ unsigned long long last_wake_time;
|
|
+
|
|
+} VCHIQ_ARM_STATE_T;
|
|
+
|
|
+extern int vchiq_arm_log_level;
|
|
+extern int vchiq_susp_log_level;
|
|
+
|
|
+extern int __init
|
|
+vchiq_platform_init(VCHIQ_STATE_T *state);
|
|
+
|
|
+extern void __exit
|
|
+vchiq_platform_exit(VCHIQ_STATE_T *state);
|
|
+
|
|
+extern VCHIQ_STATE_T *
|
|
+vchiq_get_state(void);
|
|
+
|
|
+extern VCHIQ_STATUS_T
|
|
+vchiq_arm_vcsuspend(VCHIQ_STATE_T *state);
|
|
+
|
|
+extern VCHIQ_STATUS_T
|
|
+vchiq_arm_force_suspend(VCHIQ_STATE_T *state);
|
|
+
|
|
+extern int
|
|
+vchiq_arm_allow_resume(VCHIQ_STATE_T *state);
|
|
+
|
|
+extern VCHIQ_STATUS_T
|
|
+vchiq_arm_vcresume(VCHIQ_STATE_T *state);
|
|
+
|
|
+extern VCHIQ_STATUS_T
|
|
+vchiq_arm_init_state(VCHIQ_STATE_T *state, VCHIQ_ARM_STATE_T *arm_state);
|
|
+
|
|
+extern int
|
|
+vchiq_check_resume(VCHIQ_STATE_T *state);
|
|
+
|
|
+extern void
|
|
+vchiq_check_suspend(VCHIQ_STATE_T *state);
|
|
+
|
|
+extern VCHIQ_STATUS_T
|
|
+vchiq_use_service(VCHIQ_SERVICE_HANDLE_T handle);
|
|
+
|
|
+extern VCHIQ_STATUS_T
|
|
+vchiq_release_service(VCHIQ_SERVICE_HANDLE_T handle);
|
|
+
|
|
+extern VCHIQ_STATUS_T
|
|
+vchiq_check_service(VCHIQ_SERVICE_T *service);
|
|
+
|
|
+extern VCHIQ_STATUS_T
|
|
+vchiq_platform_suspend(VCHIQ_STATE_T *state);
|
|
+
|
|
+extern int
|
|
+vchiq_platform_videocore_wanted(VCHIQ_STATE_T *state);
|
|
+
|
|
+extern int
|
|
+vchiq_platform_use_suspend_timer(void);
|
|
+
|
|
+extern void
|
|
+vchiq_dump_platform_use_state(VCHIQ_STATE_T *state);
|
|
+
|
|
+extern void
|
|
+vchiq_dump_service_use_state(VCHIQ_STATE_T *state);
|
|
+
|
|
+extern VCHIQ_ARM_STATE_T*
|
|
+vchiq_platform_get_arm_state(VCHIQ_STATE_T *state);
|
|
+
|
|
+extern int
|
|
+vchiq_videocore_wanted(VCHIQ_STATE_T *state);
|
|
+
|
|
+extern VCHIQ_STATUS_T
|
|
+vchiq_use_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
|
|
+ enum USE_TYPE_E use_type);
|
|
+extern VCHIQ_STATUS_T
|
|
+vchiq_release_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service);
|
|
+
|
|
+void
|
|
+set_suspend_state(VCHIQ_ARM_STATE_T *arm_state,
|
|
+ enum vc_suspend_status new_state);
|
|
+
|
|
+void
|
|
+set_resume_state(VCHIQ_ARM_STATE_T *arm_state,
|
|
+ enum vc_resume_status new_state);
|
|
+
|
|
+void
|
|
+start_suspend_timer(VCHIQ_ARM_STATE_T *arm_state);
|
|
+
|
|
+extern int vchiq_proc_init(void);
|
|
+extern void vchiq_proc_deinit(void);
|
|
+extern struct proc_dir_entry *vchiq_proc_top(void);
|
|
+extern struct proc_dir_entry *vchiq_clients_top(void);
|
|
+
|
|
+
|
|
+#endif /* VCHIQ_ARM_H */
|
|
--- /dev/null
|
|
+++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_build_info.h
|
|
@@ -0,0 +1,37 @@
|
|
+/**
|
|
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
|
|
+ *
|
|
+ * Redistribution and use in source and binary forms, with or without
|
|
+ * modification, are permitted provided that the following conditions
|
|
+ * are met:
|
|
+ * 1. Redistributions of source code must retain the above copyright
|
|
+ * notice, this list of conditions, and the following disclaimer,
|
|
+ * without modification.
|
|
+ * 2. Redistributions in binary form must reproduce the above copyright
|
|
+ * notice, this list of conditions and the following disclaimer in the
|
|
+ * documentation and/or other materials provided with the distribution.
|
|
+ * 3. The names of the above-listed copyright holders may not be used
|
|
+ * to endorse or promote products derived from this software without
|
|
+ * specific prior written permission.
|
|
+ *
|
|
+ * ALTERNATIVELY, this software may be distributed under the terms of the
|
|
+ * GNU General Public License ("GPL") version 2, as published by the Free
|
|
+ * Software Foundation.
|
|
+ *
|
|
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
|
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
|
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
|
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
|
|
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
|
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
|
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
|
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
|
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
|
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
|
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
+ */
|
|
+
|
|
+const char *vchiq_get_build_hostname(void);
|
|
+const char *vchiq_get_build_version(void);
|
|
+const char *vchiq_get_build_time(void);
|
|
+const char *vchiq_get_build_date(void);
|
|
--- /dev/null
|
|
+++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_cfg.h
|
|
@@ -0,0 +1,60 @@
|
|
+/**
|
|
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
|
|
+ *
|
|
+ * Redistribution and use in source and binary forms, with or without
|
|
+ * modification, are permitted provided that the following conditions
|
|
+ * are met:
|
|
+ * 1. Redistributions of source code must retain the above copyright
|
|
+ * notice, this list of conditions, and the following disclaimer,
|
|
+ * without modification.
|
|
+ * 2. Redistributions in binary form must reproduce the above copyright
|
|
+ * notice, this list of conditions and the following disclaimer in the
|
|
+ * documentation and/or other materials provided with the distribution.
|
|
+ * 3. The names of the above-listed copyright holders may not be used
|
|
+ * to endorse or promote products derived from this software without
|
|
+ * specific prior written permission.
|
|
+ *
|
|
+ * ALTERNATIVELY, this software may be distributed under the terms of the
|
|
+ * GNU General Public License ("GPL") version 2, as published by the Free
|
|
+ * Software Foundation.
|
|
+ *
|
|
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
|
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
|
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
|
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
|
|
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
|
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
|
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
|
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
|
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
|
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
|
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
+ */
|
|
+
|
|
+#ifndef VCHIQ_CFG_H
|
|
+#define VCHIQ_CFG_H
|
|
+
|
|
+#define VCHIQ_MAGIC VCHIQ_MAKE_FOURCC('V', 'C', 'H', 'I')
|
|
+/* The version of VCHIQ - change with any non-trivial change */
|
|
+#define VCHIQ_VERSION 6
|
|
+/* The minimum compatible version - update to match VCHIQ_VERSION with any
|
|
+** incompatible change */
|
|
+#define VCHIQ_VERSION_MIN 3
|
|
+
|
|
+#define VCHIQ_MAX_STATES 1
|
|
+#define VCHIQ_MAX_SERVICES 4096
|
|
+#define VCHIQ_MAX_SLOTS 128
|
|
+#define VCHIQ_MAX_SLOTS_PER_SIDE 64
|
|
+
|
|
+#define VCHIQ_NUM_CURRENT_BULKS 32
|
|
+#define VCHIQ_NUM_SERVICE_BULKS 4
|
|
+
|
|
+#ifndef VCHIQ_ENABLE_DEBUG
|
|
+#define VCHIQ_ENABLE_DEBUG 1
|
|
+#endif
|
|
+
|
|
+#ifndef VCHIQ_ENABLE_STATS
|
|
+#define VCHIQ_ENABLE_STATS 1
|
|
+#endif
|
|
+
|
|
+#endif /* VCHIQ_CFG_H */
|
|
--- /dev/null
|
|
+++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.c
|
|
@@ -0,0 +1,119 @@
|
|
+/**
|
|
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
|
|
+ *
|
|
+ * Redistribution and use in source and binary forms, with or without
|
|
+ * modification, are permitted provided that the following conditions
|
|
+ * are met:
|
|
+ * 1. Redistributions of source code must retain the above copyright
|
|
+ * notice, this list of conditions, and the following disclaimer,
|
|
+ * without modification.
|
|
+ * 2. Redistributions in binary form must reproduce the above copyright
|
|
+ * notice, this list of conditions and the following disclaimer in the
|
|
+ * documentation and/or other materials provided with the distribution.
|
|
+ * 3. The names of the above-listed copyright holders may not be used
|
|
+ * to endorse or promote products derived from this software without
|
|
+ * specific prior written permission.
|
|
+ *
|
|
+ * ALTERNATIVELY, this software may be distributed under the terms of the
|
|
+ * GNU General Public License ("GPL") version 2, as published by the Free
|
|
+ * Software Foundation.
|
|
+ *
|
|
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
|
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
|
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
|
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
|
|
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
|
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
|
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
|
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
|
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
|
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
|
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
+ */
|
|
+
|
|
+#include "vchiq_connected.h"
|
|
+#include "vchiq_core.h"
|
|
+#include <linux/module.h>
|
|
+#include <linux/mutex.h>
|
|
+
|
|
+#define MAX_CALLBACKS 10
|
|
+
|
|
+static int g_connected;
|
|
+static int g_num_deferred_callbacks;
|
|
+static VCHIQ_CONNECTED_CALLBACK_T g_deferred_callback[MAX_CALLBACKS];
|
|
+static int g_once_init;
|
|
+static struct mutex g_connected_mutex;
|
|
+
|
|
+/****************************************************************************
|
|
+*
|
|
+* Function to initialize our lock.
|
|
+*
|
|
+***************************************************************************/
|
|
+
|
|
+static void connected_init(void)
|
|
+{
|
|
+ if (!g_once_init) {
|
|
+ mutex_init(&g_connected_mutex);
|
|
+ g_once_init = 1;
|
|
+ }
|
|
+}
|
|
+
|
|
+/****************************************************************************
|
|
+*
|
|
+* This function is used to defer initialization until the vchiq stack is
|
|
+* initialized. If the stack is already initialized, then the callback will
|
|
+* be made immediately, otherwise it will be deferred until
|
|
+* vchiq_call_connected_callbacks is called.
|
|
+*
|
|
+***************************************************************************/
|
|
+
|
|
+void vchiq_add_connected_callback(VCHIQ_CONNECTED_CALLBACK_T callback)
|
|
+{
|
|
+ connected_init();
|
|
+
|
|
+ if (mutex_lock_interruptible(&g_connected_mutex) != 0)
|
|
+ return;
|
|
+
|
|
+ if (g_connected)
|
|
+ /* We're already connected. Call the callback immediately. */
|
|
+
|
|
+ callback();
|
|
+ else {
|
|
+ if (g_num_deferred_callbacks >= MAX_CALLBACKS)
|
|
+ vchiq_log_error(vchiq_core_log_level,
|
|
+ "There already %d callback registered - "
|
|
+ "please increase MAX_CALLBACKS",
|
|
+ g_num_deferred_callbacks);
|
|
+ else {
|
|
+ g_deferred_callback[g_num_deferred_callbacks] =
|
|
+ callback;
|
|
+ g_num_deferred_callbacks++;
|
|
+ }
|
|
+ }
|
|
+ mutex_unlock(&g_connected_mutex);
|
|
+}
|
|
+
|
|
+/****************************************************************************
|
|
+*
|
|
+* This function is called by the vchiq stack once it has been connected to
|
|
+* the videocore and clients can start to use the stack.
|
|
+*
|
|
+***************************************************************************/
|
|
+
|
|
+void vchiq_call_connected_callbacks(void)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ connected_init();
|
|
+
|
|
+ if (mutex_lock_interruptible(&g_connected_mutex) != 0)
|
|
+ return;
|
|
+
|
|
+ for (i = 0; i < g_num_deferred_callbacks; i++)
|
|
+ g_deferred_callback[i]();
|
|
+
|
|
+ g_num_deferred_callbacks = 0;
|
|
+ g_connected = 1;
|
|
+ mutex_unlock(&g_connected_mutex);
|
|
+}
|
|
+EXPORT_SYMBOL(vchiq_add_connected_callback);
|
|
--- /dev/null
|
|
+++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.h
|
|
@@ -0,0 +1,51 @@
|
|
+/**
|
|
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
|
|
+ *
|
|
+ * Redistribution and use in source and binary forms, with or without
|
|
+ * modification, are permitted provided that the following conditions
|
|
+ * are met:
|
|
+ * 1. Redistributions of source code must retain the above copyright
|
|
+ * notice, this list of conditions, and the following disclaimer,
|
|
+ * without modification.
|
|
+ * 2. Redistributions in binary form must reproduce the above copyright
|
|
+ * notice, this list of conditions and the following disclaimer in the
|
|
+ * documentation and/or other materials provided with the distribution.
|
|
+ * 3. The names of the above-listed copyright holders may not be used
|
|
+ * to endorse or promote products derived from this software without
|
|
+ * specific prior written permission.
|
|
+ *
|
|
+ * ALTERNATIVELY, this software may be distributed under the terms of the
|
|
+ * GNU General Public License ("GPL") version 2, as published by the Free
|
|
+ * Software Foundation.
|
|
+ *
|
|
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
|
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
|
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
|
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
|
|
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
|
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
|
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
|
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
|
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
|
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
|
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
+ */
|
|
+
|
|
+#ifndef VCHIQ_CONNECTED_H
|
|
+#define VCHIQ_CONNECTED_H
|
|
+
|
|
+/* ---- Include Files ----------------------------------------------------- */
|
|
+
|
|
+/* ---- Constants and Types ---------------------------------------------- */
|
|
+
|
|
+typedef void (*VCHIQ_CONNECTED_CALLBACK_T)(void);
|
|
+
|
|
+/* ---- Variable Externs ------------------------------------------------- */
|
|
+
|
|
+/* ---- Function Prototypes ---------------------------------------------- */
|
|
+
|
|
+void vchiq_add_connected_callback(VCHIQ_CONNECTED_CALLBACK_T callback);
|
|
+void vchiq_call_connected_callbacks(void);
|
|
+
|
|
+#endif /* VCHIQ_CONNECTED_H */
|
|
+
|
|
--- /dev/null
|
|
+++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.c
|
|
@@ -0,0 +1,3818 @@
|
|
+/**
|
|
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
|
|
+ *
|
|
+ * Redistribution and use in source and binary forms, with or without
|
|
+ * modification, are permitted provided that the following conditions
|
|
+ * are met:
|
|
+ * 1. Redistributions of source code must retain the above copyright
|
|
+ * notice, this list of conditions, and the following disclaimer,
|
|
+ * without modification.
|
|
+ * 2. Redistributions in binary form must reproduce the above copyright
|
|
+ * notice, this list of conditions and the following disclaimer in the
|
|
+ * documentation and/or other materials provided with the distribution.
|
|
+ * 3. The names of the above-listed copyright holders may not be used
|
|
+ * to endorse or promote products derived from this software without
|
|
+ * specific prior written permission.
|
|
+ *
|
|
+ * ALTERNATIVELY, this software may be distributed under the terms of the
|
|
+ * GNU General Public License ("GPL") version 2, as published by the Free
|
|
+ * Software Foundation.
|
|
+ *
|
|
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
|
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
|
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
|
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
|
|
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
|
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
|
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
|
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
|
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
|
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
|
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
+ */
|
|
+
|
|
+#include "vchiq_core.h"
|
|
+
|
|
+#define VCHIQ_SLOT_HANDLER_STACK 8192
|
|
+
|
|
+#define HANDLE_STATE_SHIFT 12
|
|
+
|
|
+#define SLOT_INFO_FROM_INDEX(state, index) (state->slot_info + (index))
|
|
+#define SLOT_DATA_FROM_INDEX(state, index) (state->slot_data + (index))
|
|
+#define SLOT_INDEX_FROM_DATA(state, data) \
|
|
+ (((unsigned int)((char *)data - (char *)state->slot_data)) / \
|
|
+ VCHIQ_SLOT_SIZE)
|
|
+#define SLOT_INDEX_FROM_INFO(state, info) \
|
|
+ ((unsigned int)(info - state->slot_info))
|
|
+#define SLOT_QUEUE_INDEX_FROM_POS(pos) \
|
|
+ ((int)((unsigned int)(pos) / VCHIQ_SLOT_SIZE))
|
|
+
|
|
+
|
|
+#define BULK_INDEX(x) (x & (VCHIQ_NUM_SERVICE_BULKS - 1))
|
|
+
|
|
+
|
|
+struct vchiq_open_payload {
|
|
+ int fourcc;
|
|
+ int client_id;
|
|
+ short version;
|
|
+ short version_min;
|
|
+};
|
|
+
|
|
+struct vchiq_openack_payload {
|
|
+ short version;
|
|
+};
|
|
+
|
|
+/* we require this for consistency between endpoints */
|
|
+vchiq_static_assert(sizeof(VCHIQ_HEADER_T) == 8);
|
|
+vchiq_static_assert(IS_POW2(sizeof(VCHIQ_HEADER_T)));
|
|
+vchiq_static_assert(IS_POW2(VCHIQ_NUM_CURRENT_BULKS));
|
|
+vchiq_static_assert(IS_POW2(VCHIQ_NUM_SERVICE_BULKS));
|
|
+vchiq_static_assert(IS_POW2(VCHIQ_MAX_SERVICES));
|
|
+vchiq_static_assert(VCHIQ_VERSION >= VCHIQ_VERSION_MIN);
|
|
+
|
|
+/* Run time control of log level, based on KERN_XXX level. */
|
|
+int vchiq_core_log_level = VCHIQ_LOG_DEFAULT;
|
|
+int vchiq_core_msg_log_level = VCHIQ_LOG_DEFAULT;
|
|
+int vchiq_sync_log_level = VCHIQ_LOG_DEFAULT;
|
|
+
|
|
+static atomic_t pause_bulks_count = ATOMIC_INIT(0);
|
|
+
|
|
+static DEFINE_SPINLOCK(service_spinlock);
|
|
+DEFINE_SPINLOCK(bulk_waiter_spinlock);
|
|
+DEFINE_SPINLOCK(quota_spinlock);
|
|
+
|
|
+VCHIQ_STATE_T *vchiq_states[VCHIQ_MAX_STATES];
|
|
+static unsigned int handle_seq;
|
|
+
|
|
+static const char *const srvstate_names[] = {
|
|
+ "FREE",
|
|
+ "HIDDEN",
|
|
+ "LISTENING",
|
|
+ "OPENING",
|
|
+ "OPEN",
|
|
+ "OPENSYNC",
|
|
+ "CLOSESENT",
|
|
+ "CLOSERECVD",
|
|
+ "CLOSEWAIT",
|
|
+ "CLOSED"
|
|
+};
|
|
+
|
|
+static const char *const reason_names[] = {
|
|
+ "SERVICE_OPENED",
|
|
+ "SERVICE_CLOSED",
|
|
+ "MESSAGE_AVAILABLE",
|
|
+ "BULK_TRANSMIT_DONE",
|
|
+ "BULK_RECEIVE_DONE",
|
|
+ "BULK_TRANSMIT_ABORTED",
|
|
+ "BULK_RECEIVE_ABORTED"
|
|
+};
|
|
+
|
|
+static const char *const conn_state_names[] = {
|
|
+ "DISCONNECTED",
|
|
+ "CONNECTING",
|
|
+ "CONNECTED",
|
|
+ "PAUSING",
|
|
+ "PAUSE_SENT",
|
|
+ "PAUSED",
|
|
+ "RESUMING",
|
|
+ "PAUSE_TIMEOUT",
|
|
+ "RESUME_TIMEOUT"
|
|
+};
|
|
+
|
|
+
|
|
+static void
|
|
+release_message_sync(VCHIQ_STATE_T *state, VCHIQ_HEADER_T *header);
|
|
+
|
|
+static const char *msg_type_str(unsigned int msg_type)
|
|
+{
|
|
+ switch (msg_type) {
|
|
+ case VCHIQ_MSG_PADDING: return "PADDING";
|
|
+ case VCHIQ_MSG_CONNECT: return "CONNECT";
|
|
+ case VCHIQ_MSG_OPEN: return "OPEN";
|
|
+ case VCHIQ_MSG_OPENACK: return "OPENACK";
|
|
+ case VCHIQ_MSG_CLOSE: return "CLOSE";
|
|
+ case VCHIQ_MSG_DATA: return "DATA";
|
|
+ case VCHIQ_MSG_BULK_RX: return "BULK_RX";
|
|
+ case VCHIQ_MSG_BULK_TX: return "BULK_TX";
|
|
+ case VCHIQ_MSG_BULK_RX_DONE: return "BULK_RX_DONE";
|
|
+ case VCHIQ_MSG_BULK_TX_DONE: return "BULK_TX_DONE";
|
|
+ case VCHIQ_MSG_PAUSE: return "PAUSE";
|
|
+ case VCHIQ_MSG_RESUME: return "RESUME";
|
|
+ case VCHIQ_MSG_REMOTE_USE: return "REMOTE_USE";
|
|
+ case VCHIQ_MSG_REMOTE_RELEASE: return "REMOTE_RELEASE";
|
|
+ case VCHIQ_MSG_REMOTE_USE_ACTIVE: return "REMOTE_USE_ACTIVE";
|
|
+ }
|
|
+ return "???";
|
|
+}
|
|
+
|
|
+static inline void
|
|
+vchiq_set_service_state(VCHIQ_SERVICE_T *service, int newstate)
|
|
+{
|
|
+ vchiq_log_info(vchiq_core_log_level, "%d: srv:%d %s->%s",
|
|
+ service->state->id, service->localport,
|
|
+ srvstate_names[service->srvstate],
|
|
+ srvstate_names[newstate]);
|
|
+ service->srvstate = newstate;
|
|
+}
|
|
+
|
|
+VCHIQ_SERVICE_T *
|
|
+find_service_by_handle(VCHIQ_SERVICE_HANDLE_T handle)
|
|
+{
|
|
+ VCHIQ_SERVICE_T *service;
|
|
+
|
|
+ spin_lock(&service_spinlock);
|
|
+ service = handle_to_service(handle);
|
|
+ if (service && (service->srvstate != VCHIQ_SRVSTATE_FREE) &&
|
|
+ (service->handle == handle)) {
|
|
+ BUG_ON(service->ref_count == 0);
|
|
+ service->ref_count++;
|
|
+ } else
|
|
+ service = NULL;
|
|
+ spin_unlock(&service_spinlock);
|
|
+
|
|
+ if (!service)
|
|
+ vchiq_log_info(vchiq_core_log_level,
|
|
+ "Invalid service handle 0x%x", handle);
|
|
+
|
|
+ return service;
|
|
+}
|
|
+
|
|
+VCHIQ_SERVICE_T *
|
|
+find_service_by_port(VCHIQ_STATE_T *state, int localport)
|
|
+{
|
|
+ VCHIQ_SERVICE_T *service = NULL;
|
|
+ if ((unsigned int)localport <= VCHIQ_PORT_MAX) {
|
|
+ spin_lock(&service_spinlock);
|
|
+ service = state->services[localport];
|
|
+ if (service && (service->srvstate != VCHIQ_SRVSTATE_FREE)) {
|
|
+ BUG_ON(service->ref_count == 0);
|
|
+ service->ref_count++;
|
|
+ } else
|
|
+ service = NULL;
|
|
+ spin_unlock(&service_spinlock);
|
|
+ }
|
|
+
|
|
+ if (!service)
|
|
+ vchiq_log_info(vchiq_core_log_level,
|
|
+ "Invalid port %d", localport);
|
|
+
|
|
+ return service;
|
|
+}
|
|
+
|
|
+VCHIQ_SERVICE_T *
|
|
+find_service_for_instance(VCHIQ_INSTANCE_T instance,
|
|
+ VCHIQ_SERVICE_HANDLE_T handle) {
|
|
+ VCHIQ_SERVICE_T *service;
|
|
+
|
|
+ spin_lock(&service_spinlock);
|
|
+ service = handle_to_service(handle);
|
|
+ if (service && (service->srvstate != VCHIQ_SRVSTATE_FREE) &&
|
|
+ (service->handle == handle) &&
|
|
+ (service->instance == instance)) {
|
|
+ BUG_ON(service->ref_count == 0);
|
|
+ service->ref_count++;
|
|
+ } else
|
|
+ service = NULL;
|
|
+ spin_unlock(&service_spinlock);
|
|
+
|
|
+ if (!service)
|
|
+ vchiq_log_info(vchiq_core_log_level,
|
|
+ "Invalid service handle 0x%x", handle);
|
|
+
|
|
+ return service;
|
|
+}
|
|
+
|
|
+VCHIQ_SERVICE_T *
|
|
+next_service_by_instance(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance,
|
|
+ int *pidx)
|
|
+{
|
|
+ VCHIQ_SERVICE_T *service = NULL;
|
|
+ int idx = *pidx;
|
|
+
|
|
+ spin_lock(&service_spinlock);
|
|
+ while (idx < state->unused_service) {
|
|
+ VCHIQ_SERVICE_T *srv = state->services[idx++];
|
|
+ if (srv && (srv->srvstate != VCHIQ_SRVSTATE_FREE) &&
|
|
+ (srv->instance == instance)) {
|
|
+ service = srv;
|
|
+ BUG_ON(service->ref_count == 0);
|
|
+ service->ref_count++;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ spin_unlock(&service_spinlock);
|
|
+
|
|
+ *pidx = idx;
|
|
+
|
|
+ return service;
|
|
+}
|
|
+
|
|
+void
|
|
+lock_service(VCHIQ_SERVICE_T *service)
|
|
+{
|
|
+ spin_lock(&service_spinlock);
|
|
+ BUG_ON(!service || (service->ref_count == 0));
|
|
+ if (service)
|
|
+ service->ref_count++;
|
|
+ spin_unlock(&service_spinlock);
|
|
+}
|
|
+
|
|
+void
|
|
+unlock_service(VCHIQ_SERVICE_T *service)
|
|
+{
|
|
+ VCHIQ_STATE_T *state = service->state;
|
|
+ spin_lock(&service_spinlock);
|
|
+ BUG_ON(!service || (service->ref_count == 0));
|
|
+ if (service && service->ref_count) {
|
|
+ service->ref_count--;
|
|
+ if (!service->ref_count) {
|
|
+ BUG_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
|
|
+ state->services[service->localport] = NULL;
|
|
+ } else
|
|
+ service = NULL;
|
|
+ }
|
|
+ spin_unlock(&service_spinlock);
|
|
+
|
|
+ kfree(service);
|
|
+}
|
|
+
|
|
+int
|
|
+vchiq_get_client_id(VCHIQ_SERVICE_HANDLE_T handle)
|
|
+{
|
|
+ VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
|
|
+ int id;
|
|
+
|
|
+ id = service ? service->client_id : 0;
|
|
+ if (service)
|
|
+ unlock_service(service);
|
|
+
|
|
+ return id;
|
|
+}
|
|
+
|
|
+void *
|
|
+vchiq_get_service_userdata(VCHIQ_SERVICE_HANDLE_T handle)
|
|
+{
|
|
+ VCHIQ_SERVICE_T *service = handle_to_service(handle);
|
|
+
|
|
+ return service ? service->base.userdata : NULL;
|
|
+}
|
|
+
|
|
+int
|
|
+vchiq_get_service_fourcc(VCHIQ_SERVICE_HANDLE_T handle)
|
|
+{
|
|
+ VCHIQ_SERVICE_T *service = handle_to_service(handle);
|
|
+
|
|
+ return service ? service->base.fourcc : 0;
|
|
+}
|
|
+
|
|
+static void
|
|
+mark_service_closing_internal(VCHIQ_SERVICE_T *service, int sh_thread)
|
|
+{
|
|
+ VCHIQ_STATE_T *state = service->state;
|
|
+ VCHIQ_SERVICE_QUOTA_T *service_quota;
|
|
+
|
|
+ service->closing = 1;
|
|
+
|
|
+ /* Synchronise with other threads. */
|
|
+ mutex_lock(&state->recycle_mutex);
|
|
+ mutex_unlock(&state->recycle_mutex);
|
|
+ if (!sh_thread || (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT)) {
|
|
+ /* If we're pausing then the slot_mutex is held until resume
|
|
+ * by the slot handler. Therefore don't try to acquire this
|
|
+ * mutex if we're the slot handler and in the pause sent state.
|
|
+ * We don't need to in this case anyway. */
|
|
+ mutex_lock(&state->slot_mutex);
|
|
+ mutex_unlock(&state->slot_mutex);
|
|
+ }
|
|
+
|
|
+ /* Unblock any sending thread. */
|
|
+ service_quota = &state->service_quotas[service->localport];
|
|
+ up(&service_quota->quota_event);
|
|
+}
|
|
+
|
|
+static void
|
|
+mark_service_closing(VCHIQ_SERVICE_T *service)
|
|
+{
|
|
+ mark_service_closing_internal(service, 0);
|
|
+}
|
|
+
|
|
+static inline VCHIQ_STATUS_T
|
|
+make_service_callback(VCHIQ_SERVICE_T *service, VCHIQ_REASON_T reason,
|
|
+ VCHIQ_HEADER_T *header, void *bulk_userdata)
|
|
+{
|
|
+ VCHIQ_STATUS_T status;
|
|
+ vchiq_log_trace(vchiq_core_log_level, "%d: callback:%d (%s, %x, %x)",
|
|
+ service->state->id, service->localport, reason_names[reason],
|
|
+ (unsigned int)header, (unsigned int)bulk_userdata);
|
|
+ status = service->base.callback(reason, header, service->handle,
|
|
+ bulk_userdata);
|
|
+ if (status == VCHIQ_ERROR) {
|
|
+ vchiq_log_warning(vchiq_core_log_level,
|
|
+ "%d: ignoring ERROR from callback to service %x",
|
|
+ service->state->id, service->handle);
|
|
+ status = VCHIQ_SUCCESS;
|
|
+ }
|
|
+ return status;
|
|
+}
|
|
+
|
|
+inline void
|
|
+vchiq_set_conn_state(VCHIQ_STATE_T *state, VCHIQ_CONNSTATE_T newstate)
|
|
+{
|
|
+ VCHIQ_CONNSTATE_T oldstate = state->conn_state;
|
|
+ vchiq_log_info(vchiq_core_log_level, "%d: %s->%s", state->id,
|
|
+ conn_state_names[oldstate],
|
|
+ conn_state_names[newstate]);
|
|
+ state->conn_state = newstate;
|
|
+ vchiq_platform_conn_state_changed(state, oldstate, newstate);
|
|
+}
|
|
+
|
|
+static inline void
|
|
+remote_event_create(REMOTE_EVENT_T *event)
|
|
+{
|
|
+ event->armed = 0;
|
|
+ /* Don't clear the 'fired' flag because it may already have been set
|
|
+ ** by the other side. */
|
|
+ sema_init(event->event, 0);
|
|
+}
|
|
+
|
|
+static inline void
|
|
+remote_event_destroy(REMOTE_EVENT_T *event)
|
|
+{
|
|
+ (void)event;
|
|
+}
|
|
+
|
|
+static inline int
|
|
+remote_event_wait(REMOTE_EVENT_T *event)
|
|
+{
|
|
+ if (!event->fired) {
|
|
+ event->armed = 1;
|
|
+ dsb();
|
|
+ if (!event->fired) {
|
|
+ if (down_interruptible(event->event) != 0) {
|
|
+ event->armed = 0;
|
|
+ return 0;
|
|
+ }
|
|
+ }
|
|
+ event->armed = 0;
|
|
+ wmb();
|
|
+ }
|
|
+
|
|
+ event->fired = 0;
|
|
+ return 1;
|
|
+}
|
|
+
|
|
+static inline void
|
|
+remote_event_signal_local(REMOTE_EVENT_T *event)
|
|
+{
|
|
+ event->armed = 0;
|
|
+ up(event->event);
|
|
+}
|
|
+
|
|
+static inline void
|
|
+remote_event_poll(REMOTE_EVENT_T *event)
|
|
+{
|
|
+ if (event->fired && event->armed)
|
|
+ remote_event_signal_local(event);
|
|
+}
|
|
+
|
|
+void
|
|
+remote_event_pollall(VCHIQ_STATE_T *state)
|
|
+{
|
|
+ remote_event_poll(&state->local->sync_trigger);
|
|
+ remote_event_poll(&state->local->sync_release);
|
|
+ remote_event_poll(&state->local->trigger);
|
|
+ remote_event_poll(&state->local->recycle);
|
|
+}
|
|
+
|
|
+/* Round up message sizes so that any space at the end of a slot is always big
|
|
+** enough for a header. This relies on header size being a power of two, which
|
|
+** has been verified earlier by a static assertion. */
|
|
+
|
|
+static inline unsigned int
|
|
+calc_stride(unsigned int size)
|
|
+{
|
|
+ /* Allow room for the header */
|
|
+ size += sizeof(VCHIQ_HEADER_T);
|
|
+
|
|
+ /* Round up */
|
|
+ return (size + sizeof(VCHIQ_HEADER_T) - 1) & ~(sizeof(VCHIQ_HEADER_T)
|
|
+ - 1);
|
|
+}
|
|
+
|
|
+/* Called by the slot handler thread */
|
|
+static VCHIQ_SERVICE_T *
|
|
+get_listening_service(VCHIQ_STATE_T *state, int fourcc)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ WARN_ON(fourcc == VCHIQ_FOURCC_INVALID);
|
|
+
|
|
+ for (i = 0; i < state->unused_service; i++) {
|
|
+ VCHIQ_SERVICE_T *service = state->services[i];
|
|
+ if (service &&
|
|
+ (service->public_fourcc == fourcc) &&
|
|
+ ((service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
|
|
+ ((service->srvstate == VCHIQ_SRVSTATE_OPEN) &&
|
|
+ (service->remoteport == VCHIQ_PORT_FREE)))) {
|
|
+ lock_service(service);
|
|
+ return service;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+/* Called by the slot handler thread */
|
|
+static VCHIQ_SERVICE_T *
|
|
+get_connected_service(VCHIQ_STATE_T *state, unsigned int port)
|
|
+{
|
|
+ int i;
|
|
+ for (i = 0; i < state->unused_service; i++) {
|
|
+ VCHIQ_SERVICE_T *service = state->services[i];
|
|
+ if (service && (service->srvstate == VCHIQ_SRVSTATE_OPEN)
|
|
+ && (service->remoteport == port)) {
|
|
+ lock_service(service);
|
|
+ return service;
|
|
+ }
|
|
+ }
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+inline void
|
|
+request_poll(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service, int poll_type)
|
|
+{
|
|
+ uint32_t value;
|
|
+
|
|
+ if (service) {
|
|
+ do {
|
|
+ value = atomic_read(&service->poll_flags);
|
|
+ } while (atomic_cmpxchg(&service->poll_flags, value,
|
|
+ value | (1 << poll_type)) != value);
|
|
+
|
|
+ do {
|
|
+ value = atomic_read(&state->poll_services[
|
|
+ service->localport>>5]);
|
|
+ } while (atomic_cmpxchg(
|
|
+ &state->poll_services[service->localport>>5],
|
|
+ value, value | (1 << (service->localport & 0x1f)))
|
|
+ != value);
|
|
+ }
|
|
+
|
|
+ state->poll_needed = 1;
|
|
+ wmb();
|
|
+
|
|
+ /* ... and ensure the slot handler runs. */
|
|
+ remote_event_signal_local(&state->local->trigger);
|
|
+}
|
|
+
|
|
+/* Called from queue_message, by the slot handler and application threads,
|
|
+** with slot_mutex held */
|
|
+static VCHIQ_HEADER_T *
|
|
+reserve_space(VCHIQ_STATE_T *state, int space, int is_blocking)
|
|
+{
|
|
+ VCHIQ_SHARED_STATE_T *local = state->local;
|
|
+ int tx_pos = state->local_tx_pos;
|
|
+ int slot_space = VCHIQ_SLOT_SIZE - (tx_pos & VCHIQ_SLOT_MASK);
|
|
+
|
|
+ if (space > slot_space) {
|
|
+ VCHIQ_HEADER_T *header;
|
|
+ /* Fill the remaining space with padding */
|
|
+ WARN_ON(state->tx_data == NULL);
|
|
+ header = (VCHIQ_HEADER_T *)
|
|
+ (state->tx_data + (tx_pos & VCHIQ_SLOT_MASK));
|
|
+ header->msgid = VCHIQ_MSGID_PADDING;
|
|
+ header->size = slot_space - sizeof(VCHIQ_HEADER_T);
|
|
+
|
|
+ tx_pos += slot_space;
|
|
+ }
|
|
+
|
|
+ /* If necessary, get the next slot. */
|
|
+ if ((tx_pos & VCHIQ_SLOT_MASK) == 0) {
|
|
+ int slot_index;
|
|
+
|
|
+ /* If there is no free slot... */
|
|
+
|
|
+ if (down_trylock(&state->slot_available_event) != 0) {
|
|
+ /* ...wait for one. */
|
|
+
|
|
+ VCHIQ_STATS_INC(state, slot_stalls);
|
|
+
|
|
+ /* But first, flush through the last slot. */
|
|
+ state->local_tx_pos = tx_pos;
|
|
+ local->tx_pos = tx_pos;
|
|
+ remote_event_signal(&state->remote->trigger);
|
|
+
|
|
+ if (!is_blocking ||
|
|
+ (down_interruptible(
|
|
+ &state->slot_available_event) != 0))
|
|
+ return NULL; /* No space available */
|
|
+ }
|
|
+
|
|
+ BUG_ON(tx_pos ==
|
|
+ (state->slot_queue_available * VCHIQ_SLOT_SIZE));
|
|
+
|
|
+ slot_index = local->slot_queue[
|
|
+ SLOT_QUEUE_INDEX_FROM_POS(tx_pos) &
|
|
+ VCHIQ_SLOT_QUEUE_MASK];
|
|
+ state->tx_data =
|
|
+ (char *)SLOT_DATA_FROM_INDEX(state, slot_index);
|
|
+ }
|
|
+
|
|
+ state->local_tx_pos = tx_pos + space;
|
|
+
|
|
+ return (VCHIQ_HEADER_T *)(state->tx_data + (tx_pos & VCHIQ_SLOT_MASK));
|
|
+}
|
|
+
|
|
+/* Called by the recycle thread. */
|
|
+static void
|
|
+process_free_queue(VCHIQ_STATE_T *state)
|
|
+{
|
|
+ VCHIQ_SHARED_STATE_T *local = state->local;
|
|
+ BITSET_T service_found[BITSET_SIZE(VCHIQ_MAX_SERVICES)];
|
|
+ int slot_queue_available;
|
|
+
|
|
+ /* Use a read memory barrier to ensure that any state that may have
|
|
+ ** been modified by another thread is not masked by stale prefetched
|
|
+ ** values. */
|
|
+ rmb();
|
|
+
|
|
+ /* Find slots which have been freed by the other side, and return them
|
|
+ ** to the available queue. */
|
|
+ slot_queue_available = state->slot_queue_available;
|
|
+
|
|
+ while (slot_queue_available != local->slot_queue_recycle) {
|
|
+ unsigned int pos;
|
|
+ int slot_index = local->slot_queue[slot_queue_available++ &
|
|
+ VCHIQ_SLOT_QUEUE_MASK];
|
|
+ char *data = (char *)SLOT_DATA_FROM_INDEX(state, slot_index);
|
|
+ int data_found = 0;
|
|
+
|
|
+ vchiq_log_trace(vchiq_core_log_level, "%d: pfq %d=%x %x %x",
|
|
+ state->id, slot_index, (unsigned int)data,
|
|
+ local->slot_queue_recycle, slot_queue_available);
|
|
+
|
|
+ /* Initialise the bitmask for services which have used this
|
|
+ ** slot */
|
|
+ BITSET_ZERO(service_found);
|
|
+
|
|
+ pos = 0;
|
|
+
|
|
+ while (pos < VCHIQ_SLOT_SIZE) {
|
|
+ VCHIQ_HEADER_T *header =
|
|
+ (VCHIQ_HEADER_T *)(data + pos);
|
|
+ int msgid = header->msgid;
|
|
+ if (VCHIQ_MSG_TYPE(msgid) == VCHIQ_MSG_DATA) {
|
|
+ int port = VCHIQ_MSG_SRCPORT(msgid);
|
|
+ VCHIQ_SERVICE_QUOTA_T *service_quota =
|
|
+ &state->service_quotas[port];
|
|
+ int count;
|
|
+ spin_lock("a_spinlock);
|
|
+ count = service_quota->message_use_count;
|
|
+ if (count > 0)
|
|
+ service_quota->message_use_count =
|
|
+ count - 1;
|
|
+ spin_unlock("a_spinlock);
|
|
+
|
|
+ if (count == service_quota->message_quota)
|
|
+ /* Signal the service that it
|
|
+ ** has dropped below its quota
|
|
+ */
|
|
+ up(&service_quota->quota_event);
|
|
+ else if (count == 0) {
|
|
+ vchiq_log_error(vchiq_core_log_level,
|
|
+ "service %d "
|
|
+ "message_use_count=%d "
|
|
+ "(header %x, msgid %x, "
|
|
+ "header->msgid %x, "
|
|
+ "header->size %x)",
|
|
+ port,
|
|
+ service_quota->
|
|
+ message_use_count,
|
|
+ (unsigned int)header, msgid,
|
|
+ header->msgid,
|
|
+ header->size);
|
|
+ WARN(1, "invalid message use count\n");
|
|
+ }
|
|
+ if (!BITSET_IS_SET(service_found, port)) {
|
|
+ /* Set the found bit for this service */
|
|
+ BITSET_SET(service_found, port);
|
|
+
|
|
+ spin_lock("a_spinlock);
|
|
+ count = service_quota->slot_use_count;
|
|
+ if (count > 0)
|
|
+ service_quota->slot_use_count =
|
|
+ count - 1;
|
|
+ spin_unlock("a_spinlock);
|
|
+
|
|
+ if (count > 0) {
|
|
+ /* Signal the service in case
|
|
+ ** it has dropped below its
|
|
+ ** quota */
|
|
+ up(&service_quota->quota_event);
|
|
+ vchiq_log_trace(
|
|
+ vchiq_core_log_level,
|
|
+ "%d: pfq:%d %x@%x - "
|
|
+ "slot_use->%d",
|
|
+ state->id, port,
|
|
+ header->size,
|
|
+ (unsigned int)header,
|
|
+ count - 1);
|
|
+ } else {
|
|
+ vchiq_log_error(
|
|
+ vchiq_core_log_level,
|
|
+ "service %d "
|
|
+ "slot_use_count"
|
|
+ "=%d (header %x"
|
|
+ ", msgid %x, "
|
|
+ "header->msgid"
|
|
+ " %x, header->"
|
|
+ "size %x)",
|
|
+ port, count,
|
|
+ (unsigned int)header,
|
|
+ msgid,
|
|
+ header->msgid,
|
|
+ header->size);
|
|
+ WARN(1, "bad slot use count\n");
|
|
+ }
|
|
+ }
|
|
+
|
|
+ data_found = 1;
|
|
+ }
|
|
+
|
|
+ pos += calc_stride(header->size);
|
|
+ if (pos > VCHIQ_SLOT_SIZE) {
|
|
+ vchiq_log_error(vchiq_core_log_level,
|
|
+ "pfq - pos %x: header %x, msgid %x, "
|
|
+ "header->msgid %x, header->size %x",
|
|
+ pos, (unsigned int)header, msgid,
|
|
+ header->msgid, header->size);
|
|
+ WARN(1, "invalid slot position\n");
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (data_found) {
|
|
+ int count;
|
|
+ spin_lock("a_spinlock);
|
|
+ count = state->data_use_count;
|
|
+ if (count > 0)
|
|
+ state->data_use_count =
|
|
+ count - 1;
|
|
+ spin_unlock("a_spinlock);
|
|
+ if (count == state->data_quota)
|
|
+ up(&state->data_quota_event);
|
|
+ }
|
|
+
|
|
+ state->slot_queue_available = slot_queue_available;
|
|
+ up(&state->slot_available_event);
|
|
+ }
|
|
+}
|
|
+
|
|
+/* Called by the slot handler and application threads */
|
|
+static VCHIQ_STATUS_T
|
|
+queue_message(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
|
|
+ int msgid, const VCHIQ_ELEMENT_T *elements,
|
|
+ int count, int size, int is_blocking)
|
|
+{
|
|
+ VCHIQ_SHARED_STATE_T *local;
|
|
+ VCHIQ_SERVICE_QUOTA_T *service_quota = NULL;
|
|
+ VCHIQ_HEADER_T *header;
|
|
+ int type = VCHIQ_MSG_TYPE(msgid);
|
|
+
|
|
+ unsigned int stride;
|
|
+
|
|
+ local = state->local;
|
|
+
|
|
+ stride = calc_stride(size);
|
|
+
|
|
+ WARN_ON(!(stride <= VCHIQ_SLOT_SIZE));
|
|
+
|
|
+ if ((type != VCHIQ_MSG_RESUME) &&
|
|
+ (mutex_lock_interruptible(&state->slot_mutex) != 0))
|
|
+ return VCHIQ_RETRY;
|
|
+
|
|
+ if (type == VCHIQ_MSG_DATA) {
|
|
+ int tx_end_index;
|
|
+
|
|
+ BUG_ON(!service);
|
|
+
|
|
+ if (service->closing) {
|
|
+ /* The service has been closed */
|
|
+ mutex_unlock(&state->slot_mutex);
|
|
+ return VCHIQ_ERROR;
|
|
+ }
|
|
+
|
|
+ service_quota = &state->service_quotas[service->localport];
|
|
+
|
|
+ spin_lock("a_spinlock);
|
|
+
|
|
+ /* Ensure this service doesn't use more than its quota of
|
|
+ ** messages or slots */
|
|
+ tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
|
|
+ state->local_tx_pos + stride - 1);
|
|
+
|
|
+ /* Ensure data messages don't use more than their quota of
|
|
+ ** slots */
|
|
+ while ((tx_end_index != state->previous_data_index) &&
|
|
+ (state->data_use_count == state->data_quota)) {
|
|
+ VCHIQ_STATS_INC(state, data_stalls);
|
|
+ spin_unlock("a_spinlock);
|
|
+ mutex_unlock(&state->slot_mutex);
|
|
+
|
|
+ if (down_interruptible(&state->data_quota_event)
|
|
+ != 0)
|
|
+ return VCHIQ_RETRY;
|
|
+
|
|
+ mutex_lock(&state->slot_mutex);
|
|
+ spin_lock("a_spinlock);
|
|
+ tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
|
|
+ state->local_tx_pos + stride - 1);
|
|
+ if ((tx_end_index == state->previous_data_index) ||
|
|
+ (state->data_use_count < state->data_quota)) {
|
|
+ /* Pass the signal on to other waiters */
|
|
+ up(&state->data_quota_event);
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ while ((service_quota->message_use_count ==
|
|
+ service_quota->message_quota) ||
|
|
+ ((tx_end_index != service_quota->previous_tx_index) &&
|
|
+ (service_quota->slot_use_count ==
|
|
+ service_quota->slot_quota))) {
|
|
+ spin_unlock("a_spinlock);
|
|
+ vchiq_log_trace(vchiq_core_log_level,
|
|
+ "%d: qm:%d %s,%x - quota stall "
|
|
+ "(msg %d, slot %d)",
|
|
+ state->id, service->localport,
|
|
+ msg_type_str(type), size,
|
|
+ service_quota->message_use_count,
|
|
+ service_quota->slot_use_count);
|
|
+ VCHIQ_SERVICE_STATS_INC(service, quota_stalls);
|
|
+ mutex_unlock(&state->slot_mutex);
|
|
+ if (down_interruptible(&service_quota->quota_event)
|
|
+ != 0)
|
|
+ return VCHIQ_RETRY;
|
|
+ if (service->closing)
|
|
+ return VCHIQ_ERROR;
|
|
+ if (mutex_lock_interruptible(&state->slot_mutex) != 0)
|
|
+ return VCHIQ_RETRY;
|
|
+ if (service->srvstate != VCHIQ_SRVSTATE_OPEN) {
|
|
+ /* The service has been closed */
|
|
+ mutex_unlock(&state->slot_mutex);
|
|
+ return VCHIQ_ERROR;
|
|
+ }
|
|
+ spin_lock("a_spinlock);
|
|
+ tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
|
|
+ state->local_tx_pos + stride - 1);
|
|
+ }
|
|
+
|
|
+ spin_unlock("a_spinlock);
|
|
+ }
|
|
+
|
|
+ header = reserve_space(state, stride, is_blocking);
|
|
+
|
|
+ if (!header) {
|
|
+ if (service)
|
|
+ VCHIQ_SERVICE_STATS_INC(service, slot_stalls);
|
|
+ mutex_unlock(&state->slot_mutex);
|
|
+ return VCHIQ_RETRY;
|
|
+ }
|
|
+
|
|
+ if (type == VCHIQ_MSG_DATA) {
|
|
+ int i, pos;
|
|
+ int tx_end_index;
|
|
+ int slot_use_count;
|
|
+
|
|
+ vchiq_log_info(vchiq_core_log_level,
|
|
+ "%d: qm %s@%x,%x (%d->%d)",
|
|
+ state->id,
|
|
+ msg_type_str(VCHIQ_MSG_TYPE(msgid)),
|
|
+ (unsigned int)header, size,
|
|
+ VCHIQ_MSG_SRCPORT(msgid),
|
|
+ VCHIQ_MSG_DSTPORT(msgid));
|
|
+
|
|
+ BUG_ON(!service);
|
|
+
|
|
+ for (i = 0, pos = 0; i < (unsigned int)count;
|
|
+ pos += elements[i++].size)
|
|
+ if (elements[i].size) {
|
|
+ if (vchiq_copy_from_user
|
|
+ (header->data + pos, elements[i].data,
|
|
+ (size_t) elements[i].size) !=
|
|
+ VCHIQ_SUCCESS) {
|
|
+ mutex_unlock(&state->slot_mutex);
|
|
+ VCHIQ_SERVICE_STATS_INC(service,
|
|
+ error_count);
|
|
+ return VCHIQ_ERROR;
|
|
+ }
|
|
+ if (i == 0) {
|
|
+ if (vchiq_core_msg_log_level >=
|
|
+ VCHIQ_LOG_INFO)
|
|
+ vchiq_log_dump_mem("Sent", 0,
|
|
+ header->data + pos,
|
|
+ min(64u,
|
|
+ elements[0].size));
|
|
+ }
|
|
+ }
|
|
+
|
|
+ spin_lock("a_spinlock);
|
|
+ service_quota->message_use_count++;
|
|
+
|
|
+ tx_end_index =
|
|
+ SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos - 1);
|
|
+
|
|
+ /* If this transmission can't fit in the last slot used by any
|
|
+ ** service, the data_use_count must be increased. */
|
|
+ if (tx_end_index != state->previous_data_index) {
|
|
+ state->previous_data_index = tx_end_index;
|
|
+ state->data_use_count++;
|
|
+ }
|
|
+
|
|
+ /* If this isn't the same slot last used by this service,
|
|
+ ** the service's slot_use_count must be increased. */
|
|
+ if (tx_end_index != service_quota->previous_tx_index) {
|
|
+ service_quota->previous_tx_index = tx_end_index;
|
|
+ slot_use_count = ++service_quota->slot_use_count;
|
|
+ } else {
|
|
+ slot_use_count = 0;
|
|
+ }
|
|
+
|
|
+ spin_unlock("a_spinlock);
|
|
+
|
|
+ if (slot_use_count)
|
|
+ vchiq_log_trace(vchiq_core_log_level,
|
|
+ "%d: qm:%d %s,%x - slot_use->%d (hdr %p)",
|
|
+ state->id, service->localport,
|
|
+ msg_type_str(VCHIQ_MSG_TYPE(msgid)), size,
|
|
+ slot_use_count, header);
|
|
+
|
|
+ VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
|
|
+ VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
|
|
+ } else {
|
|
+ vchiq_log_info(vchiq_core_log_level,
|
|
+ "%d: qm %s@%x,%x (%d->%d)", state->id,
|
|
+ msg_type_str(VCHIQ_MSG_TYPE(msgid)),
|
|
+ (unsigned int)header, size,
|
|
+ VCHIQ_MSG_SRCPORT(msgid),
|
|
+ VCHIQ_MSG_DSTPORT(msgid));
|
|
+ if (size != 0) {
|
|
+ WARN_ON(!((count == 1) && (size == elements[0].size)));
|
|
+ memcpy(header->data, elements[0].data,
|
|
+ elements[0].size);
|
|
+ }
|
|
+ VCHIQ_STATS_INC(state, ctrl_tx_count);
|
|
+ }
|
|
+
|
|
+ header->msgid = msgid;
|
|
+ header->size = size;
|
|
+
|
|
+ {
|
|
+ int svc_fourcc;
|
|
+
|
|
+ svc_fourcc = service
|
|
+ ? service->base.fourcc
|
|
+ : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
|
|
+
|
|
+ vchiq_log_info(vchiq_core_msg_log_level,
|
|
+ "Sent Msg %s(%u) to %c%c%c%c s:%u d:%d len:%d",
|
|
+ msg_type_str(VCHIQ_MSG_TYPE(msgid)),
|
|
+ VCHIQ_MSG_TYPE(msgid),
|
|
+ VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
|
|
+ VCHIQ_MSG_SRCPORT(msgid),
|
|
+ VCHIQ_MSG_DSTPORT(msgid),
|
|
+ size);
|
|
+ }
|
|
+
|
|
+ /* Make sure the new header is visible to the peer. */
|
|
+ wmb();
|
|
+
|
|
+ /* Make the new tx_pos visible to the peer. */
|
|
+ local->tx_pos = state->local_tx_pos;
|
|
+ wmb();
|
|
+
|
|
+ if (service && (type == VCHIQ_MSG_CLOSE))
|
|
+ vchiq_set_service_state(service, VCHIQ_SRVSTATE_CLOSESENT);
|
|
+
|
|
+ if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_PAUSE)
|
|
+ mutex_unlock(&state->slot_mutex);
|
|
+
|
|
+ remote_event_signal(&state->remote->trigger);
|
|
+
|
|
+ return VCHIQ_SUCCESS;
|
|
+}
|
|
+
|
|
+/* Called by the slot handler and application threads */
|
|
+static VCHIQ_STATUS_T
|
|
+queue_message_sync(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
|
|
+ int msgid, const VCHIQ_ELEMENT_T *elements,
|
|
+ int count, int size, int is_blocking)
|
|
+{
|
|
+ VCHIQ_SHARED_STATE_T *local;
|
|
+ VCHIQ_HEADER_T *header;
|
|
+
|
|
+ local = state->local;
|
|
+
|
|
+ if ((VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_RESUME) &&
|
|
+ (mutex_lock_interruptible(&state->sync_mutex) != 0))
|
|
+ return VCHIQ_RETRY;
|
|
+
|
|
+ remote_event_wait(&local->sync_release);
|
|
+
|
|
+ rmb();
|
|
+
|
|
+ header = (VCHIQ_HEADER_T *)SLOT_DATA_FROM_INDEX(state,
|
|
+ local->slot_sync);
|
|
+
|
|
+ {
|
|
+ int oldmsgid = header->msgid;
|
|
+ if (oldmsgid != VCHIQ_MSGID_PADDING)
|
|
+ vchiq_log_error(vchiq_core_log_level,
|
|
+ "%d: qms - msgid %x, not PADDING",
|
|
+ state->id, oldmsgid);
|
|
+ }
|
|
+
|
|
+ if (service) {
|
|
+ int i, pos;
|
|
+
|
|
+ vchiq_log_info(vchiq_sync_log_level,
|
|
+ "%d: qms %s@%x,%x (%d->%d)", state->id,
|
|
+ msg_type_str(VCHIQ_MSG_TYPE(msgid)),
|
|
+ (unsigned int)header, size,
|
|
+ VCHIQ_MSG_SRCPORT(msgid),
|
|
+ VCHIQ_MSG_DSTPORT(msgid));
|
|
+
|
|
+ for (i = 0, pos = 0; i < (unsigned int)count;
|
|
+ pos += elements[i++].size)
|
|
+ if (elements[i].size) {
|
|
+ if (vchiq_copy_from_user
|
|
+ (header->data + pos, elements[i].data,
|
|
+ (size_t) elements[i].size) !=
|
|
+ VCHIQ_SUCCESS) {
|
|
+ mutex_unlock(&state->sync_mutex);
|
|
+ VCHIQ_SERVICE_STATS_INC(service,
|
|
+ error_count);
|
|
+ return VCHIQ_ERROR;
|
|
+ }
|
|
+ if (i == 0) {
|
|
+ if (vchiq_sync_log_level >=
|
|
+ VCHIQ_LOG_TRACE)
|
|
+ vchiq_log_dump_mem("Sent Sync",
|
|
+ 0, header->data + pos,
|
|
+ min(64u,
|
|
+ elements[0].size));
|
|
+ }
|
|
+ }
|
|
+
|
|
+ VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
|
|
+ VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
|
|
+ } else {
|
|
+ vchiq_log_info(vchiq_sync_log_level,
|
|
+ "%d: qms %s@%x,%x (%d->%d)", state->id,
|
|
+ msg_type_str(VCHIQ_MSG_TYPE(msgid)),
|
|
+ (unsigned int)header, size,
|
|
+ VCHIQ_MSG_SRCPORT(msgid),
|
|
+ VCHIQ_MSG_DSTPORT(msgid));
|
|
+ if (size != 0) {
|
|
+ WARN_ON(!((count == 1) && (size == elements[0].size)));
|
|
+ memcpy(header->data, elements[0].data,
|
|
+ elements[0].size);
|
|
+ }
|
|
+ VCHIQ_STATS_INC(state, ctrl_tx_count);
|
|
+ }
|
|
+
|
|
+ header->size = size;
|
|
+ header->msgid = msgid;
|
|
+
|
|
+ if (vchiq_sync_log_level >= VCHIQ_LOG_TRACE) {
|
|
+ int svc_fourcc;
|
|
+
|
|
+ svc_fourcc = service
|
|
+ ? service->base.fourcc
|
|
+ : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
|
|
+
|
|
+ vchiq_log_trace(vchiq_sync_log_level,
|
|
+ "Sent Sync Msg %s(%u) to %c%c%c%c s:%u d:%d len:%d",
|
|
+ msg_type_str(VCHIQ_MSG_TYPE(msgid)),
|
|
+ VCHIQ_MSG_TYPE(msgid),
|
|
+ VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
|
|
+ VCHIQ_MSG_SRCPORT(msgid),
|
|
+ VCHIQ_MSG_DSTPORT(msgid),
|
|
+ size);
|
|
+ }
|
|
+
|
|
+ /* Make sure the new header is visible to the peer. */
|
|
+ wmb();
|
|
+
|
|
+ remote_event_signal(&state->remote->sync_trigger);
|
|
+
|
|
+ if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_PAUSE)
|
|
+ mutex_unlock(&state->sync_mutex);
|
|
+
|
|
+ return VCHIQ_SUCCESS;
|
|
+}
|
|
+
|
|
+static inline void
|
|
+claim_slot(VCHIQ_SLOT_INFO_T *slot)
|
|
+{
|
|
+ slot->use_count++;
|
|
+}
|
|
+
|
|
+static void
|
|
+release_slot(VCHIQ_STATE_T *state, VCHIQ_SLOT_INFO_T *slot_info,
|
|
+ VCHIQ_HEADER_T *header, VCHIQ_SERVICE_T *service)
|
|
+{
|
|
+ int release_count;
|
|
+
|
|
+ mutex_lock(&state->recycle_mutex);
|
|
+
|
|
+ if (header) {
|
|
+ int msgid = header->msgid;
|
|
+ if (((msgid & VCHIQ_MSGID_CLAIMED) == 0) ||
|
|
+ (service && service->closing)) {
|
|
+ mutex_unlock(&state->recycle_mutex);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ /* Rewrite the message header to prevent a double
|
|
+ ** release */
|
|
+ header->msgid = msgid & ~VCHIQ_MSGID_CLAIMED;
|
|
+ }
|
|
+
|
|
+ release_count = slot_info->release_count;
|
|
+ slot_info->release_count = ++release_count;
|
|
+
|
|
+ if (release_count == slot_info->use_count) {
|
|
+ int slot_queue_recycle;
|
|
+ /* Add to the freed queue */
|
|
+
|
|
+ /* A read barrier is necessary here to prevent speculative
|
|
+ ** fetches of remote->slot_queue_recycle from overtaking the
|
|
+ ** mutex. */
|
|
+ rmb();
|
|
+
|
|
+ slot_queue_recycle = state->remote->slot_queue_recycle;
|
|
+ state->remote->slot_queue[slot_queue_recycle &
|
|
+ VCHIQ_SLOT_QUEUE_MASK] =
|
|
+ SLOT_INDEX_FROM_INFO(state, slot_info);
|
|
+ state->remote->slot_queue_recycle = slot_queue_recycle + 1;
|
|
+ vchiq_log_info(vchiq_core_log_level,
|
|
+ "%d: release_slot %d - recycle->%x",
|
|
+ state->id, SLOT_INDEX_FROM_INFO(state, slot_info),
|
|
+ state->remote->slot_queue_recycle);
|
|
+
|
|
+ /* A write barrier is necessary, but remote_event_signal
|
|
+ ** contains one. */
|
|
+ remote_event_signal(&state->remote->recycle);
|
|
+ }
|
|
+
|
|
+ mutex_unlock(&state->recycle_mutex);
|
|
+}
|
|
+
|
|
+/* Called by the slot handler - don't hold the bulk mutex */
|
|
+static VCHIQ_STATUS_T
|
|
+notify_bulks(VCHIQ_SERVICE_T *service, VCHIQ_BULK_QUEUE_T *queue,
|
|
+ int retry_poll)
|
|
+{
|
|
+ VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
|
|
+
|
|
+ vchiq_log_trace(vchiq_core_log_level,
|
|
+ "%d: nb:%d %cx - p=%x rn=%x r=%x",
|
|
+ service->state->id, service->localport,
|
|
+ (queue == &service->bulk_tx) ? 't' : 'r',
|
|
+ queue->process, queue->remote_notify, queue->remove);
|
|
+
|
|
+ if (service->state->is_master) {
|
|
+ while (queue->remote_notify != queue->process) {
|
|
+ VCHIQ_BULK_T *bulk =
|
|
+ &queue->bulks[BULK_INDEX(queue->remote_notify)];
|
|
+ int msgtype = (bulk->dir == VCHIQ_BULK_TRANSMIT) ?
|
|
+ VCHIQ_MSG_BULK_RX_DONE : VCHIQ_MSG_BULK_TX_DONE;
|
|
+ int msgid = VCHIQ_MAKE_MSG(msgtype, service->localport,
|
|
+ service->remoteport);
|
|
+ VCHIQ_ELEMENT_T element = { &bulk->actual, 4 };
|
|
+ /* Only reply to non-dummy bulk requests */
|
|
+ if (bulk->remote_data) {
|
|
+ status = queue_message(service->state, NULL,
|
|
+ msgid, &element, 1, 4, 0);
|
|
+ if (status != VCHIQ_SUCCESS)
|
|
+ break;
|
|
+ }
|
|
+ queue->remote_notify++;
|
|
+ }
|
|
+ } else {
|
|
+ queue->remote_notify = queue->process;
|
|
+ }
|
|
+
|
|
+ if (status == VCHIQ_SUCCESS) {
|
|
+ while (queue->remove != queue->remote_notify) {
|
|
+ VCHIQ_BULK_T *bulk =
|
|
+ &queue->bulks[BULK_INDEX(queue->remove)];
|
|
+
|
|
+ /* Only generate callbacks for non-dummy bulk
|
|
+ ** requests, and non-terminated services */
|
|
+ if (bulk->data && service->instance) {
|
|
+ if (bulk->actual != VCHIQ_BULK_ACTUAL_ABORTED) {
|
|
+ if (bulk->dir == VCHIQ_BULK_TRANSMIT) {
|
|
+ VCHIQ_SERVICE_STATS_INC(service,
|
|
+ bulk_tx_count);
|
|
+ VCHIQ_SERVICE_STATS_ADD(service,
|
|
+ bulk_tx_bytes,
|
|
+ bulk->actual);
|
|
+ } else {
|
|
+ VCHIQ_SERVICE_STATS_INC(service,
|
|
+ bulk_rx_count);
|
|
+ VCHIQ_SERVICE_STATS_ADD(service,
|
|
+ bulk_rx_bytes,
|
|
+ bulk->actual);
|
|
+ }
|
|
+ } else {
|
|
+ VCHIQ_SERVICE_STATS_INC(service,
|
|
+ bulk_aborted_count);
|
|
+ }
|
|
+ if (bulk->mode == VCHIQ_BULK_MODE_BLOCKING) {
|
|
+ struct bulk_waiter *waiter;
|
|
+ spin_lock(&bulk_waiter_spinlock);
|
|
+ waiter = bulk->userdata;
|
|
+ if (waiter) {
|
|
+ waiter->actual = bulk->actual;
|
|
+ up(&waiter->event);
|
|
+ }
|
|
+ spin_unlock(&bulk_waiter_spinlock);
|
|
+ } else if (bulk->mode ==
|
|
+ VCHIQ_BULK_MODE_CALLBACK) {
|
|
+ VCHIQ_REASON_T reason = (bulk->dir ==
|
|
+ VCHIQ_BULK_TRANSMIT) ?
|
|
+ ((bulk->actual ==
|
|
+ VCHIQ_BULK_ACTUAL_ABORTED) ?
|
|
+ VCHIQ_BULK_TRANSMIT_ABORTED :
|
|
+ VCHIQ_BULK_TRANSMIT_DONE) :
|
|
+ ((bulk->actual ==
|
|
+ VCHIQ_BULK_ACTUAL_ABORTED) ?
|
|
+ VCHIQ_BULK_RECEIVE_ABORTED :
|
|
+ VCHIQ_BULK_RECEIVE_DONE);
|
|
+ status = make_service_callback(service,
|
|
+ reason, NULL, bulk->userdata);
|
|
+ if (status == VCHIQ_RETRY)
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ queue->remove++;
|
|
+ up(&service->bulk_remove_event);
|
|
+ }
|
|
+ if (!retry_poll)
|
|
+ status = VCHIQ_SUCCESS;
|
|
+ }
|
|
+
|
|
+ if (status == VCHIQ_RETRY)
|
|
+ request_poll(service->state, service,
|
|
+ (queue == &service->bulk_tx) ?
|
|
+ VCHIQ_POLL_TXNOTIFY : VCHIQ_POLL_RXNOTIFY);
|
|
+
|
|
+ return status;
|
|
+}
|
|
+
|
|
+/* Called by the slot handler thread */
|
|
+static void
|
|
+poll_services(VCHIQ_STATE_T *state)
|
|
+{
|
|
+ int group, i;
|
|
+
|
|
+ for (group = 0; group < BITSET_SIZE(state->unused_service); group++) {
|
|
+ uint32_t flags;
|
|
+ flags = atomic_xchg(&state->poll_services[group], 0);
|
|
+ for (i = 0; flags; i++) {
|
|
+ if (flags & (1 << i)) {
|
|
+ VCHIQ_SERVICE_T *service =
|
|
+ find_service_by_port(state,
|
|
+ (group<<5) + i);
|
|
+ uint32_t service_flags;
|
|
+ flags &= ~(1 << i);
|
|
+ if (!service)
|
|
+ continue;
|
|
+ service_flags =
|
|
+ atomic_xchg(&service->poll_flags, 0);
|
|
+ if (service_flags &
|
|
+ (1 << VCHIQ_POLL_REMOVE)) {
|
|
+ vchiq_log_info(vchiq_core_log_level,
|
|
+ "%d: ps - remove %d<->%d",
|
|
+ state->id, service->localport,
|
|
+ service->remoteport);
|
|
+
|
|
+ /* Make it look like a client, because
|
|
+ it must be removed and not left in
|
|
+ the LISTENING state. */
|
|
+ service->public_fourcc =
|
|
+ VCHIQ_FOURCC_INVALID;
|
|
+
|
|
+ if (vchiq_close_service_internal(
|
|
+ service, 0/*!close_recvd*/) !=
|
|
+ VCHIQ_SUCCESS)
|
|
+ request_poll(state, service,
|
|
+ VCHIQ_POLL_REMOVE);
|
|
+ } else if (service_flags &
|
|
+ (1 << VCHIQ_POLL_TERMINATE)) {
|
|
+ vchiq_log_info(vchiq_core_log_level,
|
|
+ "%d: ps - terminate %d<->%d",
|
|
+ state->id, service->localport,
|
|
+ service->remoteport);
|
|
+ if (vchiq_close_service_internal(
|
|
+ service, 0/*!close_recvd*/) !=
|
|
+ VCHIQ_SUCCESS)
|
|
+ request_poll(state, service,
|
|
+ VCHIQ_POLL_TERMINATE);
|
|
+ }
|
|
+ if (service_flags & (1 << VCHIQ_POLL_TXNOTIFY))
|
|
+ notify_bulks(service,
|
|
+ &service->bulk_tx,
|
|
+ 1/*retry_poll*/);
|
|
+ if (service_flags & (1 << VCHIQ_POLL_RXNOTIFY))
|
|
+ notify_bulks(service,
|
|
+ &service->bulk_rx,
|
|
+ 1/*retry_poll*/);
|
|
+ unlock_service(service);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
+/* Called by the slot handler or application threads, holding the bulk mutex. */
|
|
+static int
|
|
+resolve_bulks(VCHIQ_SERVICE_T *service, VCHIQ_BULK_QUEUE_T *queue)
|
|
+{
|
|
+ VCHIQ_STATE_T *state = service->state;
|
|
+ int resolved = 0;
|
|
+ int rc;
|
|
+
|
|
+ while ((queue->process != queue->local_insert) &&
|
|
+ (queue->process != queue->remote_insert)) {
|
|
+ VCHIQ_BULK_T *bulk = &queue->bulks[BULK_INDEX(queue->process)];
|
|
+
|
|
+ vchiq_log_trace(vchiq_core_log_level,
|
|
+ "%d: rb:%d %cx - li=%x ri=%x p=%x",
|
|
+ state->id, service->localport,
|
|
+ (queue == &service->bulk_tx) ? 't' : 'r',
|
|
+ queue->local_insert, queue->remote_insert,
|
|
+ queue->process);
|
|
+
|
|
+ WARN_ON(!((int)(queue->local_insert - queue->process) > 0));
|
|
+ WARN_ON(!((int)(queue->remote_insert - queue->process) > 0));
|
|
+
|
|
+ rc = mutex_lock_interruptible(&state->bulk_transfer_mutex);
|
|
+ if (rc != 0)
|
|
+ break;
|
|
+
|
|
+ vchiq_transfer_bulk(bulk);
|
|
+ mutex_unlock(&state->bulk_transfer_mutex);
|
|
+
|
|
+ if (vchiq_core_msg_log_level >= VCHIQ_LOG_INFO) {
|
|
+ const char *header = (queue == &service->bulk_tx) ?
|
|
+ "Send Bulk to" : "Recv Bulk from";
|
|
+ if (bulk->actual != VCHIQ_BULK_ACTUAL_ABORTED)
|
|
+ vchiq_log_info(vchiq_core_msg_log_level,
|
|
+ "%s %c%c%c%c d:%d len:%d %x<->%x",
|
|
+ header,
|
|
+ VCHIQ_FOURCC_AS_4CHARS(
|
|
+ service->base.fourcc),
|
|
+ service->remoteport,
|
|
+ bulk->size,
|
|
+ (unsigned int)bulk->data,
|
|
+ (unsigned int)bulk->remote_data);
|
|
+ else
|
|
+ vchiq_log_info(vchiq_core_msg_log_level,
|
|
+ "%s %c%c%c%c d:%d ABORTED - tx len:%d,"
|
|
+ " rx len:%d %x<->%x",
|
|
+ header,
|
|
+ VCHIQ_FOURCC_AS_4CHARS(
|
|
+ service->base.fourcc),
|
|
+ service->remoteport,
|
|
+ bulk->size,
|
|
+ bulk->remote_size,
|
|
+ (unsigned int)bulk->data,
|
|
+ (unsigned int)bulk->remote_data);
|
|
+ }
|
|
+
|
|
+ vchiq_complete_bulk(bulk);
|
|
+ queue->process++;
|
|
+ resolved++;
|
|
+ }
|
|
+ return resolved;
|
|
+}
|
|
+
|
|
+/* Called with the bulk_mutex held */
|
|
+static void
|
|
+abort_outstanding_bulks(VCHIQ_SERVICE_T *service, VCHIQ_BULK_QUEUE_T *queue)
|
|
+{
|
|
+ int is_tx = (queue == &service->bulk_tx);
|
|
+ vchiq_log_trace(vchiq_core_log_level,
|
|
+ "%d: aob:%d %cx - li=%x ri=%x p=%x",
|
|
+ service->state->id, service->localport, is_tx ? 't' : 'r',
|
|
+ queue->local_insert, queue->remote_insert, queue->process);
|
|
+
|
|
+ WARN_ON(!((int)(queue->local_insert - queue->process) >= 0));
|
|
+ WARN_ON(!((int)(queue->remote_insert - queue->process) >= 0));
|
|
+
|
|
+ while ((queue->process != queue->local_insert) ||
|
|
+ (queue->process != queue->remote_insert)) {
|
|
+ VCHIQ_BULK_T *bulk = &queue->bulks[BULK_INDEX(queue->process)];
|
|
+
|
|
+ if (queue->process == queue->remote_insert) {
|
|
+ /* fabricate a matching dummy bulk */
|
|
+ bulk->remote_data = NULL;
|
|
+ bulk->remote_size = 0;
|
|
+ queue->remote_insert++;
|
|
+ }
|
|
+
|
|
+ if (queue->process != queue->local_insert) {
|
|
+ vchiq_complete_bulk(bulk);
|
|
+
|
|
+ vchiq_log_info(vchiq_core_msg_log_level,
|
|
+ "%s %c%c%c%c d:%d ABORTED - tx len:%d, "
|
|
+ "rx len:%d",
|
|
+ is_tx ? "Send Bulk to" : "Recv Bulk from",
|
|
+ VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
|
|
+ service->remoteport,
|
|
+ bulk->size,
|
|
+ bulk->remote_size);
|
|
+ } else {
|
|
+ /* fabricate a matching dummy bulk */
|
|
+ bulk->data = NULL;
|
|
+ bulk->size = 0;
|
|
+ bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
|
|
+ bulk->dir = is_tx ? VCHIQ_BULK_TRANSMIT :
|
|
+ VCHIQ_BULK_RECEIVE;
|
|
+ queue->local_insert++;
|
|
+ }
|
|
+
|
|
+ queue->process++;
|
|
+ }
|
|
+}
|
|
+
|
|
+/* Called from the slot handler thread */
|
|
+static void
|
|
+pause_bulks(VCHIQ_STATE_T *state)
|
|
+{
|
|
+ if (unlikely(atomic_inc_return(&pause_bulks_count) != 1)) {
|
|
+ WARN_ON_ONCE(1);
|
|
+ atomic_set(&pause_bulks_count, 1);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ /* Block bulk transfers from all services */
|
|
+ mutex_lock(&state->bulk_transfer_mutex);
|
|
+}
|
|
+
|
|
+/* Called from the slot handler thread */
|
|
+static void
|
|
+resume_bulks(VCHIQ_STATE_T *state)
|
|
+{
|
|
+ int i;
|
|
+ if (unlikely(atomic_dec_return(&pause_bulks_count) != 0)) {
|
|
+ WARN_ON_ONCE(1);
|
|
+ atomic_set(&pause_bulks_count, 0);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ /* Allow bulk transfers from all services */
|
|
+ mutex_unlock(&state->bulk_transfer_mutex);
|
|
+
|
|
+ if (state->deferred_bulks == 0)
|
|
+ return;
|
|
+
|
|
+ /* Deal with any bulks which had to be deferred due to being in
|
|
+ * paused state. Don't try to match up to number of deferred bulks
|
|
+ * in case we've had something come and close the service in the
|
|
+ * interim - just process all bulk queues for all services */
|
|
+ vchiq_log_info(vchiq_core_log_level, "%s: processing %d deferred bulks",
|
|
+ __func__, state->deferred_bulks);
|
|
+
|
|
+ for (i = 0; i < state->unused_service; i++) {
|
|
+ VCHIQ_SERVICE_T *service = state->services[i];
|
|
+ int resolved_rx = 0;
|
|
+ int resolved_tx = 0;
|
|
+ if (!service || (service->srvstate != VCHIQ_SRVSTATE_OPEN))
|
|
+ continue;
|
|
+
|
|
+ mutex_lock(&service->bulk_mutex);
|
|
+ resolved_rx = resolve_bulks(service, &service->bulk_rx);
|
|
+ resolved_tx = resolve_bulks(service, &service->bulk_tx);
|
|
+ mutex_unlock(&service->bulk_mutex);
|
|
+ if (resolved_rx)
|
|
+ notify_bulks(service, &service->bulk_rx, 1);
|
|
+ if (resolved_tx)
|
|
+ notify_bulks(service, &service->bulk_tx, 1);
|
|
+ }
|
|
+ state->deferred_bulks = 0;
|
|
+}
|
|
+
|
|
+static int
|
|
+parse_open(VCHIQ_STATE_T *state, VCHIQ_HEADER_T *header)
|
|
+{
|
|
+ VCHIQ_SERVICE_T *service = NULL;
|
|
+ int msgid, size;
|
|
+ int type;
|
|
+ unsigned int localport, remoteport;
|
|
+
|
|
+ msgid = header->msgid;
|
|
+ size = header->size;
|
|
+ type = VCHIQ_MSG_TYPE(msgid);
|
|
+ localport = VCHIQ_MSG_DSTPORT(msgid);
|
|
+ remoteport = VCHIQ_MSG_SRCPORT(msgid);
|
|
+ if (size >= sizeof(struct vchiq_open_payload)) {
|
|
+ const struct vchiq_open_payload *payload =
|
|
+ (struct vchiq_open_payload *)header->data;
|
|
+ unsigned int fourcc;
|
|
+
|
|
+ fourcc = payload->fourcc;
|
|
+ vchiq_log_info(vchiq_core_log_level,
|
|
+ "%d: prs OPEN@%x (%d->'%c%c%c%c')",
|
|
+ state->id, (unsigned int)header,
|
|
+ localport,
|
|
+ VCHIQ_FOURCC_AS_4CHARS(fourcc));
|
|
+
|
|
+ service = get_listening_service(state, fourcc);
|
|
+
|
|
+ if (service) {
|
|
+ /* A matching service exists */
|
|
+ short version = payload->version;
|
|
+ short version_min = payload->version_min;
|
|
+ if ((service->version < version_min) ||
|
|
+ (version < service->version_min)) {
|
|
+ /* Version mismatch */
|
|
+ vchiq_loud_error_header();
|
|
+ vchiq_loud_error("%d: service %d (%c%c%c%c) "
|
|
+ "version mismatch - local (%d, min %d)"
|
|
+ " vs. remote (%d, min %d)",
|
|
+ state->id, service->localport,
|
|
+ VCHIQ_FOURCC_AS_4CHARS(fourcc),
|
|
+ service->version, service->version_min,
|
|
+ version, version_min);
|
|
+ vchiq_loud_error_footer();
|
|
+ unlock_service(service);
|
|
+ goto fail_open;
|
|
+ }
|
|
+ service->peer_version = version;
|
|
+
|
|
+ if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
|
|
+ struct vchiq_openack_payload ack_payload = {
|
|
+ service->version
|
|
+ };
|
|
+ VCHIQ_ELEMENT_T body = {
|
|
+ &ack_payload,
|
|
+ sizeof(ack_payload)
|
|
+ };
|
|
+
|
|
+ /* Acknowledge the OPEN */
|
|
+ if (service->sync) {
|
|
+ if (queue_message_sync(state, NULL,
|
|
+ VCHIQ_MAKE_MSG(
|
|
+ VCHIQ_MSG_OPENACK,
|
|
+ service->localport,
|
|
+ remoteport),
|
|
+ &body, 1, sizeof(ack_payload),
|
|
+ 0) == VCHIQ_RETRY)
|
|
+ goto bail_not_ready;
|
|
+ } else {
|
|
+ if (queue_message(state, NULL,
|
|
+ VCHIQ_MAKE_MSG(
|
|
+ VCHIQ_MSG_OPENACK,
|
|
+ service->localport,
|
|
+ remoteport),
|
|
+ &body, 1, sizeof(ack_payload),
|
|
+ 0) == VCHIQ_RETRY)
|
|
+ goto bail_not_ready;
|
|
+ }
|
|
+
|
|
+ /* The service is now open */
|
|
+ vchiq_set_service_state(service,
|
|
+ service->sync ? VCHIQ_SRVSTATE_OPENSYNC
|
|
+ : VCHIQ_SRVSTATE_OPEN);
|
|
+ }
|
|
+
|
|
+ service->remoteport = remoteport;
|
|
+ service->client_id = ((int *)header->data)[1];
|
|
+ if (make_service_callback(service, VCHIQ_SERVICE_OPENED,
|
|
+ NULL, NULL) == VCHIQ_RETRY) {
|
|
+ /* Bail out if not ready */
|
|
+ service->remoteport = VCHIQ_PORT_FREE;
|
|
+ goto bail_not_ready;
|
|
+ }
|
|
+
|
|
+ /* Success - the message has been dealt with */
|
|
+ unlock_service(service);
|
|
+ return 1;
|
|
+ }
|
|
+ }
|
|
+
|
|
+fail_open:
|
|
+ /* No available service, or an invalid request - send a CLOSE */
|
|
+ if (queue_message(state, NULL,
|
|
+ VCHIQ_MAKE_MSG(VCHIQ_MSG_CLOSE, 0, VCHIQ_MSG_SRCPORT(msgid)),
|
|
+ NULL, 0, 0, 0) == VCHIQ_RETRY)
|
|
+ goto bail_not_ready;
|
|
+
|
|
+ return 1;
|
|
+
|
|
+bail_not_ready:
|
|
+ unlock_service(service);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/* Called by the slot handler thread */
|
|
+static void
|
|
+parse_rx_slots(VCHIQ_STATE_T *state)
|
|
+{
|
|
+ VCHIQ_SHARED_STATE_T *remote = state->remote;
|
|
+ VCHIQ_SERVICE_T *service = NULL;
|
|
+ int tx_pos;
|
|
+ DEBUG_INITIALISE(state->local)
|
|
+
|
|
+ tx_pos = remote->tx_pos;
|
|
+
|
|
+ while (state->rx_pos != tx_pos) {
|
|
+ VCHIQ_HEADER_T *header;
|
|
+ int msgid, size;
|
|
+ int type;
|
|
+ unsigned int localport, remoteport;
|
|
+
|
|
+ DEBUG_TRACE(PARSE_LINE);
|
|
+ if (!state->rx_data) {
|
|
+ int rx_index;
|
|
+ WARN_ON(!((state->rx_pos & VCHIQ_SLOT_MASK) == 0));
|
|
+ rx_index = remote->slot_queue[
|
|
+ SLOT_QUEUE_INDEX_FROM_POS(state->rx_pos) &
|
|
+ VCHIQ_SLOT_QUEUE_MASK];
|
|
+ state->rx_data = (char *)SLOT_DATA_FROM_INDEX(state,
|
|
+ rx_index);
|
|
+ state->rx_info = SLOT_INFO_FROM_INDEX(state, rx_index);
|
|
+
|
|
+ /* Initialise use_count to one, and increment
|
|
+ ** release_count at the end of the slot to avoid
|
|
+ ** releasing the slot prematurely. */
|
|
+ state->rx_info->use_count = 1;
|
|
+ state->rx_info->release_count = 0;
|
|
+ }
|
|
+
|
|
+ header = (VCHIQ_HEADER_T *)(state->rx_data +
|
|
+ (state->rx_pos & VCHIQ_SLOT_MASK));
|
|
+ DEBUG_VALUE(PARSE_HEADER, (int)header);
|
|
+ msgid = header->msgid;
|
|
+ DEBUG_VALUE(PARSE_MSGID, msgid);
|
|
+ size = header->size;
|
|
+ type = VCHIQ_MSG_TYPE(msgid);
|
|
+ localport = VCHIQ_MSG_DSTPORT(msgid);
|
|
+ remoteport = VCHIQ_MSG_SRCPORT(msgid);
|
|
+
|
|
+ if (type != VCHIQ_MSG_DATA)
|
|
+ VCHIQ_STATS_INC(state, ctrl_rx_count);
|
|
+
|
|
+ switch (type) {
|
|
+ case VCHIQ_MSG_OPENACK:
|
|
+ case VCHIQ_MSG_CLOSE:
|
|
+ case VCHIQ_MSG_DATA:
|
|
+ case VCHIQ_MSG_BULK_RX:
|
|
+ case VCHIQ_MSG_BULK_TX:
|
|
+ case VCHIQ_MSG_BULK_RX_DONE:
|
|
+ case VCHIQ_MSG_BULK_TX_DONE:
|
|
+ service = find_service_by_port(state, localport);
|
|
+ if ((!service || service->remoteport != remoteport) &&
|
|
+ (localport == 0) &&
|
|
+ (type == VCHIQ_MSG_CLOSE)) {
|
|
+ /* This could be a CLOSE from a client which
|
|
+ hadn't yet received the OPENACK - look for
|
|
+ the connected service */
|
|
+ if (service)
|
|
+ unlock_service(service);
|
|
+ service = get_connected_service(state,
|
|
+ remoteport);
|
|
+ if (service)
|
|
+ vchiq_log_warning(vchiq_core_log_level,
|
|
+ "%d: prs %s@%x (%d->%d) - "
|
|
+ "found connected service %d",
|
|
+ state->id, msg_type_str(type),
|
|
+ (unsigned int)header,
|
|
+ remoteport, localport,
|
|
+ service->localport);
|
|
+ }
|
|
+
|
|
+ if (!service) {
|
|
+ vchiq_log_error(vchiq_core_log_level,
|
|
+ "%d: prs %s@%x (%d->%d) - "
|
|
+ "invalid/closed service %d",
|
|
+ state->id, msg_type_str(type),
|
|
+ (unsigned int)header,
|
|
+ remoteport, localport, localport);
|
|
+ goto skip_message;
|
|
+ }
|
|
+ break;
|
|
+ default:
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ if (vchiq_core_msg_log_level >= VCHIQ_LOG_INFO) {
|
|
+ int svc_fourcc;
|
|
+
|
|
+ svc_fourcc = service
|
|
+ ? service->base.fourcc
|
|
+ : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
|
|
+ vchiq_log_info(vchiq_core_msg_log_level,
|
|
+ "Rcvd Msg %s(%u) from %c%c%c%c s:%d d:%d "
|
|
+ "len:%d",
|
|
+ msg_type_str(type), type,
|
|
+ VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
|
|
+ remoteport, localport, size);
|
|
+ if (size > 0)
|
|
+ vchiq_log_dump_mem("Rcvd", 0, header->data,
|
|
+ min(64, size));
|
|
+ }
|
|
+
|
|
+ if (((unsigned int)header & VCHIQ_SLOT_MASK) + calc_stride(size)
|
|
+ > VCHIQ_SLOT_SIZE) {
|
|
+ vchiq_log_error(vchiq_core_log_level,
|
|
+ "header %x (msgid %x) - size %x too big for "
|
|
+ "slot",
|
|
+ (unsigned int)header, (unsigned int)msgid,
|
|
+ (unsigned int)size);
|
|
+ WARN(1, "oversized for slot\n");
|
|
+ }
|
|
+
|
|
+ switch (type) {
|
|
+ case VCHIQ_MSG_OPEN:
|
|
+ WARN_ON(!(VCHIQ_MSG_DSTPORT(msgid) == 0));
|
|
+ if (!parse_open(state, header))
|
|
+ goto bail_not_ready;
|
|
+ break;
|
|
+ case VCHIQ_MSG_OPENACK:
|
|
+ if (size >= sizeof(struct vchiq_openack_payload)) {
|
|
+ const struct vchiq_openack_payload *payload =
|
|
+ (struct vchiq_openack_payload *)
|
|
+ header->data;
|
|
+ service->peer_version = payload->version;
|
|
+ }
|
|
+ vchiq_log_info(vchiq_core_log_level,
|
|
+ "%d: prs OPENACK@%x,%x (%d->%d) v:%d",
|
|
+ state->id, (unsigned int)header, size,
|
|
+ remoteport, localport, service->peer_version);
|
|
+ if (service->srvstate ==
|
|
+ VCHIQ_SRVSTATE_OPENING) {
|
|
+ service->remoteport = remoteport;
|
|
+ vchiq_set_service_state(service,
|
|
+ VCHIQ_SRVSTATE_OPEN);
|
|
+ up(&service->remove_event);
|
|
+ } else
|
|
+ vchiq_log_error(vchiq_core_log_level,
|
|
+ "OPENACK received in state %s",
|
|
+ srvstate_names[service->srvstate]);
|
|
+ break;
|
|
+ case VCHIQ_MSG_CLOSE:
|
|
+ WARN_ON(size != 0); /* There should be no data */
|
|
+
|
|
+ vchiq_log_info(vchiq_core_log_level,
|
|
+ "%d: prs CLOSE@%x (%d->%d)",
|
|
+ state->id, (unsigned int)header,
|
|
+ remoteport, localport);
|
|
+
|
|
+ mark_service_closing_internal(service, 1);
|
|
+
|
|
+ if (vchiq_close_service_internal(service,
|
|
+ 1/*close_recvd*/) == VCHIQ_RETRY)
|
|
+ goto bail_not_ready;
|
|
+
|
|
+ vchiq_log_info(vchiq_core_log_level,
|
|
+ "Close Service %c%c%c%c s:%u d:%d",
|
|
+ VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
|
|
+ service->localport,
|
|
+ service->remoteport);
|
|
+ break;
|
|
+ case VCHIQ_MSG_DATA:
|
|
+ vchiq_log_trace(vchiq_core_log_level,
|
|
+ "%d: prs DATA@%x,%x (%d->%d)",
|
|
+ state->id, (unsigned int)header, size,
|
|
+ remoteport, localport);
|
|
+
|
|
+ if ((service->remoteport == remoteport)
|
|
+ && (service->srvstate ==
|
|
+ VCHIQ_SRVSTATE_OPEN)) {
|
|
+ header->msgid = msgid | VCHIQ_MSGID_CLAIMED;
|
|
+ claim_slot(state->rx_info);
|
|
+ DEBUG_TRACE(PARSE_LINE);
|
|
+ if (make_service_callback(service,
|
|
+ VCHIQ_MESSAGE_AVAILABLE, header,
|
|
+ NULL) == VCHIQ_RETRY) {
|
|
+ DEBUG_TRACE(PARSE_LINE);
|
|
+ goto bail_not_ready;
|
|
+ }
|
|
+ VCHIQ_SERVICE_STATS_INC(service, ctrl_rx_count);
|
|
+ VCHIQ_SERVICE_STATS_ADD(service, ctrl_rx_bytes,
|
|
+ size);
|
|
+ } else {
|
|
+ VCHIQ_STATS_INC(state, error_count);
|
|
+ }
|
|
+ break;
|
|
+ case VCHIQ_MSG_CONNECT:
|
|
+ vchiq_log_info(vchiq_core_log_level,
|
|
+ "%d: prs CONNECT@%x",
|
|
+ state->id, (unsigned int)header);
|
|
+ up(&state->connect);
|
|
+ break;
|
|
+ case VCHIQ_MSG_BULK_RX:
|
|
+ case VCHIQ_MSG_BULK_TX: {
|
|
+ VCHIQ_BULK_QUEUE_T *queue;
|
|
+ WARN_ON(!state->is_master);
|
|
+ queue = (type == VCHIQ_MSG_BULK_RX) ?
|
|
+ &service->bulk_tx : &service->bulk_rx;
|
|
+ if ((service->remoteport == remoteport)
|
|
+ && (service->srvstate ==
|
|
+ VCHIQ_SRVSTATE_OPEN)) {
|
|
+ VCHIQ_BULK_T *bulk;
|
|
+ int resolved = 0;
|
|
+
|
|
+ DEBUG_TRACE(PARSE_LINE);
|
|
+ if (mutex_lock_interruptible(
|
|
+ &service->bulk_mutex) != 0) {
|
|
+ DEBUG_TRACE(PARSE_LINE);
|
|
+ goto bail_not_ready;
|
|
+ }
|
|
+
|
|
+ WARN_ON(!(queue->remote_insert < queue->remove +
|
|
+ VCHIQ_NUM_SERVICE_BULKS));
|
|
+ bulk = &queue->bulks[
|
|
+ BULK_INDEX(queue->remote_insert)];
|
|
+ bulk->remote_data =
|
|
+ (void *)((int *)header->data)[0];
|
|
+ bulk->remote_size = ((int *)header->data)[1];
|
|
+ wmb();
|
|
+
|
|
+ vchiq_log_info(vchiq_core_log_level,
|
|
+ "%d: prs %s@%x (%d->%d) %x@%x",
|
|
+ state->id, msg_type_str(type),
|
|
+ (unsigned int)header,
|
|
+ remoteport, localport,
|
|
+ bulk->remote_size,
|
|
+ (unsigned int)bulk->remote_data);
|
|
+
|
|
+ queue->remote_insert++;
|
|
+
|
|
+ if (atomic_read(&pause_bulks_count)) {
|
|
+ state->deferred_bulks++;
|
|
+ vchiq_log_info(vchiq_core_log_level,
|
|
+ "%s: deferring bulk (%d)",
|
|
+ __func__,
|
|
+ state->deferred_bulks);
|
|
+ if (state->conn_state !=
|
|
+ VCHIQ_CONNSTATE_PAUSE_SENT)
|
|
+ vchiq_log_error(
|
|
+ vchiq_core_log_level,
|
|
+ "%s: bulks paused in "
|
|
+ "unexpected state %s",
|
|
+ __func__,
|
|
+ conn_state_names[
|
|
+ state->conn_state]);
|
|
+ } else if (state->conn_state ==
|
|
+ VCHIQ_CONNSTATE_CONNECTED) {
|
|
+ DEBUG_TRACE(PARSE_LINE);
|
|
+ resolved = resolve_bulks(service,
|
|
+ queue);
|
|
+ }
|
|
+
|
|
+ mutex_unlock(&service->bulk_mutex);
|
|
+ if (resolved)
|
|
+ notify_bulks(service, queue,
|
|
+ 1/*retry_poll*/);
|
|
+ }
|
|
+ } break;
|
|
+ case VCHIQ_MSG_BULK_RX_DONE:
|
|
+ case VCHIQ_MSG_BULK_TX_DONE:
|
|
+ WARN_ON(state->is_master);
|
|
+ if ((service->remoteport == remoteport)
|
|
+ && (service->srvstate !=
|
|
+ VCHIQ_SRVSTATE_FREE)) {
|
|
+ VCHIQ_BULK_QUEUE_T *queue;
|
|
+ VCHIQ_BULK_T *bulk;
|
|
+
|
|
+ queue = (type == VCHIQ_MSG_BULK_RX_DONE) ?
|
|
+ &service->bulk_rx : &service->bulk_tx;
|
|
+
|
|
+ DEBUG_TRACE(PARSE_LINE);
|
|
+ if (mutex_lock_interruptible(
|
|
+ &service->bulk_mutex) != 0) {
|
|
+ DEBUG_TRACE(PARSE_LINE);
|
|
+ goto bail_not_ready;
|
|
+ }
|
|
+ if ((int)(queue->remote_insert -
|
|
+ queue->local_insert) >= 0) {
|
|
+ vchiq_log_error(vchiq_core_log_level,
|
|
+ "%d: prs %s@%x (%d->%d) "
|
|
+ "unexpected (ri=%d,li=%d)",
|
|
+ state->id, msg_type_str(type),
|
|
+ (unsigned int)header,
|
|
+ remoteport, localport,
|
|
+ queue->remote_insert,
|
|
+ queue->local_insert);
|
|
+ mutex_unlock(&service->bulk_mutex);
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ BUG_ON(queue->process == queue->local_insert);
|
|
+ BUG_ON(queue->process != queue->remote_insert);
|
|
+
|
|
+ bulk = &queue->bulks[
|
|
+ BULK_INDEX(queue->remote_insert)];
|
|
+ bulk->actual = *(int *)header->data;
|
|
+ queue->remote_insert++;
|
|
+
|
|
+ vchiq_log_info(vchiq_core_log_level,
|
|
+ "%d: prs %s@%x (%d->%d) %x@%x",
|
|
+ state->id, msg_type_str(type),
|
|
+ (unsigned int)header,
|
|
+ remoteport, localport,
|
|
+ bulk->actual, (unsigned int)bulk->data);
|
|
+
|
|
+ vchiq_log_trace(vchiq_core_log_level,
|
|
+ "%d: prs:%d %cx li=%x ri=%x p=%x",
|
|
+ state->id, localport,
|
|
+ (type == VCHIQ_MSG_BULK_RX_DONE) ?
|
|
+ 'r' : 't',
|
|
+ queue->local_insert,
|
|
+ queue->remote_insert, queue->process);
|
|
+
|
|
+ DEBUG_TRACE(PARSE_LINE);
|
|
+ WARN_ON(queue->process == queue->local_insert);
|
|
+ vchiq_complete_bulk(bulk);
|
|
+ queue->process++;
|
|
+ mutex_unlock(&service->bulk_mutex);
|
|
+ DEBUG_TRACE(PARSE_LINE);
|
|
+ notify_bulks(service, queue, 1/*retry_poll*/);
|
|
+ DEBUG_TRACE(PARSE_LINE);
|
|
+ }
|
|
+ break;
|
|
+ case VCHIQ_MSG_PADDING:
|
|
+ vchiq_log_trace(vchiq_core_log_level,
|
|
+ "%d: prs PADDING@%x,%x",
|
|
+ state->id, (unsigned int)header, size);
|
|
+ break;
|
|
+ case VCHIQ_MSG_PAUSE:
|
|
+ /* If initiated, signal the application thread */
|
|
+ vchiq_log_trace(vchiq_core_log_level,
|
|
+ "%d: prs PAUSE@%x,%x",
|
|
+ state->id, (unsigned int)header, size);
|
|
+ if (state->conn_state == VCHIQ_CONNSTATE_PAUSED) {
|
|
+ vchiq_log_error(vchiq_core_log_level,
|
|
+ "%d: PAUSE received in state PAUSED",
|
|
+ state->id);
|
|
+ break;
|
|
+ }
|
|
+ if (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT) {
|
|
+ /* Send a PAUSE in response */
|
|
+ if (queue_message(state, NULL,
|
|
+ VCHIQ_MAKE_MSG(VCHIQ_MSG_PAUSE, 0, 0),
|
|
+ NULL, 0, 0, 0) == VCHIQ_RETRY)
|
|
+ goto bail_not_ready;
|
|
+ if (state->is_master)
|
|
+ pause_bulks(state);
|
|
+ }
|
|
+ /* At this point slot_mutex is held */
|
|
+ vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSED);
|
|
+ vchiq_platform_paused(state);
|
|
+ break;
|
|
+ case VCHIQ_MSG_RESUME:
|
|
+ vchiq_log_trace(vchiq_core_log_level,
|
|
+ "%d: prs RESUME@%x,%x",
|
|
+ state->id, (unsigned int)header, size);
|
|
+ /* Release the slot mutex */
|
|
+ mutex_unlock(&state->slot_mutex);
|
|
+ if (state->is_master)
|
|
+ resume_bulks(state);
|
|
+ vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
|
|
+ vchiq_platform_resumed(state);
|
|
+ break;
|
|
+
|
|
+ case VCHIQ_MSG_REMOTE_USE:
|
|
+ vchiq_on_remote_use(state);
|
|
+ break;
|
|
+ case VCHIQ_MSG_REMOTE_RELEASE:
|
|
+ vchiq_on_remote_release(state);
|
|
+ break;
|
|
+ case VCHIQ_MSG_REMOTE_USE_ACTIVE:
|
|
+ vchiq_on_remote_use_active(state);
|
|
+ break;
|
|
+
|
|
+ default:
|
|
+ vchiq_log_error(vchiq_core_log_level,
|
|
+ "%d: prs invalid msgid %x@%x,%x",
|
|
+ state->id, msgid, (unsigned int)header, size);
|
|
+ WARN(1, "invalid message\n");
|
|
+ break;
|
|
+ }
|
|
+
|
|
+skip_message:
|
|
+ if (service) {
|
|
+ unlock_service(service);
|
|
+ service = NULL;
|
|
+ }
|
|
+
|
|
+ state->rx_pos += calc_stride(size);
|
|
+
|
|
+ DEBUG_TRACE(PARSE_LINE);
|
|
+ /* Perform some housekeeping when the end of the slot is
|
|
+ ** reached. */
|
|
+ if ((state->rx_pos & VCHIQ_SLOT_MASK) == 0) {
|
|
+ /* Remove the extra reference count. */
|
|
+ release_slot(state, state->rx_info, NULL, NULL);
|
|
+ state->rx_data = NULL;
|
|
+ }
|
|
+ }
|
|
+
|
|
+bail_not_ready:
|
|
+ if (service)
|
|
+ unlock_service(service);
|
|
+}
|
|
+
|
|
+/* Called by the slot handler thread */
|
|
+static int
|
|
+slot_handler_func(void *v)
|
|
+{
|
|
+ VCHIQ_STATE_T *state = (VCHIQ_STATE_T *) v;
|
|
+ VCHIQ_SHARED_STATE_T *local = state->local;
|
|
+ DEBUG_INITIALISE(local)
|
|
+
|
|
+ while (1) {
|
|
+ DEBUG_COUNT(SLOT_HANDLER_COUNT);
|
|
+ DEBUG_TRACE(SLOT_HANDLER_LINE);
|
|
+ remote_event_wait(&local->trigger);
|
|
+
|
|
+ rmb();
|
|
+
|
|
+ DEBUG_TRACE(SLOT_HANDLER_LINE);
|
|
+ if (state->poll_needed) {
|
|
+ /* Check if we need to suspend - may change our
|
|
+ * conn_state */
|
|
+ vchiq_platform_check_suspend(state);
|
|
+
|
|
+ state->poll_needed = 0;
|
|
+
|
|
+ /* Handle service polling and other rare conditions here
|
|
+ ** out of the mainline code */
|
|
+ switch (state->conn_state) {
|
|
+ case VCHIQ_CONNSTATE_CONNECTED:
|
|
+ /* Poll the services as requested */
|
|
+ poll_services(state);
|
|
+ break;
|
|
+
|
|
+ case VCHIQ_CONNSTATE_PAUSING:
|
|
+ if (state->is_master)
|
|
+ pause_bulks(state);
|
|
+ if (queue_message(state, NULL,
|
|
+ VCHIQ_MAKE_MSG(VCHIQ_MSG_PAUSE, 0, 0),
|
|
+ NULL, 0, 0, 0) != VCHIQ_RETRY) {
|
|
+ vchiq_set_conn_state(state,
|
|
+ VCHIQ_CONNSTATE_PAUSE_SENT);
|
|
+ } else {
|
|
+ if (state->is_master)
|
|
+ resume_bulks(state);
|
|
+ /* Retry later */
|
|
+ state->poll_needed = 1;
|
|
+ }
|
|
+ break;
|
|
+
|
|
+ case VCHIQ_CONNSTATE_PAUSED:
|
|
+ vchiq_platform_resume(state);
|
|
+ break;
|
|
+
|
|
+ case VCHIQ_CONNSTATE_RESUMING:
|
|
+ if (queue_message(state, NULL,
|
|
+ VCHIQ_MAKE_MSG(VCHIQ_MSG_RESUME, 0, 0),
|
|
+ NULL, 0, 0, 0) != VCHIQ_RETRY) {
|
|
+ if (state->is_master)
|
|
+ resume_bulks(state);
|
|
+ vchiq_set_conn_state(state,
|
|
+ VCHIQ_CONNSTATE_CONNECTED);
|
|
+ vchiq_platform_resumed(state);
|
|
+ } else {
|
|
+ /* This should really be impossible,
|
|
+ ** since the PAUSE should have flushed
|
|
+ ** through outstanding messages. */
|
|
+ vchiq_log_error(vchiq_core_log_level,
|
|
+ "Failed to send RESUME "
|
|
+ "message");
|
|
+ BUG();
|
|
+ }
|
|
+ break;
|
|
+
|
|
+ case VCHIQ_CONNSTATE_PAUSE_TIMEOUT:
|
|
+ case VCHIQ_CONNSTATE_RESUME_TIMEOUT:
|
|
+ vchiq_platform_handle_timeout(state);
|
|
+ break;
|
|
+ default:
|
|
+ break;
|
|
+ }
|
|
+
|
|
+
|
|
+ }
|
|
+
|
|
+ DEBUG_TRACE(SLOT_HANDLER_LINE);
|
|
+ parse_rx_slots(state);
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+
|
|
+/* Called by the recycle thread */
|
|
+static int
|
|
+recycle_func(void *v)
|
|
+{
|
|
+ VCHIQ_STATE_T *state = (VCHIQ_STATE_T *) v;
|
|
+ VCHIQ_SHARED_STATE_T *local = state->local;
|
|
+
|
|
+ while (1) {
|
|
+ remote_event_wait(&local->recycle);
|
|
+
|
|
+ process_free_queue(state);
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+
|
|
+/* Called by the sync thread */
|
|
+static int
|
|
+sync_func(void *v)
|
|
+{
|
|
+ VCHIQ_STATE_T *state = (VCHIQ_STATE_T *) v;
|
|
+ VCHIQ_SHARED_STATE_T *local = state->local;
|
|
+ VCHIQ_HEADER_T *header = (VCHIQ_HEADER_T *)SLOT_DATA_FROM_INDEX(state,
|
|
+ state->remote->slot_sync);
|
|
+
|
|
+ while (1) {
|
|
+ VCHIQ_SERVICE_T *service;
|
|
+ int msgid, size;
|
|
+ int type;
|
|
+ unsigned int localport, remoteport;
|
|
+
|
|
+ remote_event_wait(&local->sync_trigger);
|
|
+
|
|
+ rmb();
|
|
+
|
|
+ msgid = header->msgid;
|
|
+ size = header->size;
|
|
+ type = VCHIQ_MSG_TYPE(msgid);
|
|
+ localport = VCHIQ_MSG_DSTPORT(msgid);
|
|
+ remoteport = VCHIQ_MSG_SRCPORT(msgid);
|
|
+
|
|
+ service = find_service_by_port(state, localport);
|
|
+
|
|
+ if (!service) {
|
|
+ vchiq_log_error(vchiq_sync_log_level,
|
|
+ "%d: sf %s@%x (%d->%d) - "
|
|
+ "invalid/closed service %d",
|
|
+ state->id, msg_type_str(type),
|
|
+ (unsigned int)header,
|
|
+ remoteport, localport, localport);
|
|
+ release_message_sync(state, header);
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ if (vchiq_sync_log_level >= VCHIQ_LOG_TRACE) {
|
|
+ int svc_fourcc;
|
|
+
|
|
+ svc_fourcc = service
|
|
+ ? service->base.fourcc
|
|
+ : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
|
|
+ vchiq_log_trace(vchiq_sync_log_level,
|
|
+ "Rcvd Msg %s from %c%c%c%c s:%d d:%d len:%d",
|
|
+ msg_type_str(type),
|
|
+ VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
|
|
+ remoteport, localport, size);
|
|
+ if (size > 0)
|
|
+ vchiq_log_dump_mem("Rcvd", 0, header->data,
|
|
+ min(64, size));
|
|
+ }
|
|
+
|
|
+ switch (type) {
|
|
+ case VCHIQ_MSG_OPENACK:
|
|
+ if (size >= sizeof(struct vchiq_openack_payload)) {
|
|
+ const struct vchiq_openack_payload *payload =
|
|
+ (struct vchiq_openack_payload *)
|
|
+ header->data;
|
|
+ service->peer_version = payload->version;
|
|
+ }
|
|
+ vchiq_log_info(vchiq_sync_log_level,
|
|
+ "%d: sf OPENACK@%x,%x (%d->%d) v:%d",
|
|
+ state->id, (unsigned int)header, size,
|
|
+ remoteport, localport, service->peer_version);
|
|
+ if (service->srvstate == VCHIQ_SRVSTATE_OPENING) {
|
|
+ service->remoteport = remoteport;
|
|
+ vchiq_set_service_state(service,
|
|
+ VCHIQ_SRVSTATE_OPENSYNC);
|
|
+ up(&service->remove_event);
|
|
+ }
|
|
+ release_message_sync(state, header);
|
|
+ break;
|
|
+
|
|
+ case VCHIQ_MSG_DATA:
|
|
+ vchiq_log_trace(vchiq_sync_log_level,
|
|
+ "%d: sf DATA@%x,%x (%d->%d)",
|
|
+ state->id, (unsigned int)header, size,
|
|
+ remoteport, localport);
|
|
+
|
|
+ if ((service->remoteport == remoteport) &&
|
|
+ (service->srvstate ==
|
|
+ VCHIQ_SRVSTATE_OPENSYNC)) {
|
|
+ if (make_service_callback(service,
|
|
+ VCHIQ_MESSAGE_AVAILABLE, header,
|
|
+ NULL) == VCHIQ_RETRY)
|
|
+ vchiq_log_error(vchiq_sync_log_level,
|
|
+ "synchronous callback to "
|
|
+ "service %d returns "
|
|
+ "VCHIQ_RETRY",
|
|
+ localport);
|
|
+ }
|
|
+ break;
|
|
+
|
|
+ default:
|
|
+ vchiq_log_error(vchiq_sync_log_level,
|
|
+ "%d: sf unexpected msgid %x@%x,%x",
|
|
+ state->id, msgid, (unsigned int)header, size);
|
|
+ release_message_sync(state, header);
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ unlock_service(service);
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+
|
|
+static void
|
|
+init_bulk_queue(VCHIQ_BULK_QUEUE_T *queue)
|
|
+{
|
|
+ queue->local_insert = 0;
|
|
+ queue->remote_insert = 0;
|
|
+ queue->process = 0;
|
|
+ queue->remote_notify = 0;
|
|
+ queue->remove = 0;
|
|
+}
|
|
+
|
|
+
|
|
+inline const char *
|
|
+get_conn_state_name(VCHIQ_CONNSTATE_T conn_state)
|
|
+{
|
|
+ return conn_state_names[conn_state];
|
|
+}
|
|
+
|
|
+
|
|
+VCHIQ_SLOT_ZERO_T *
|
|
+vchiq_init_slots(void *mem_base, int mem_size)
|
|
+{
|
|
+ int mem_align = (VCHIQ_SLOT_SIZE - (int)mem_base) & VCHIQ_SLOT_MASK;
|
|
+ VCHIQ_SLOT_ZERO_T *slot_zero =
|
|
+ (VCHIQ_SLOT_ZERO_T *)((char *)mem_base + mem_align);
|
|
+ int num_slots = (mem_size - mem_align)/VCHIQ_SLOT_SIZE;
|
|
+ int first_data_slot = VCHIQ_SLOT_ZERO_SLOTS;
|
|
+
|
|
+ /* Ensure there is enough memory to run an absolutely minimum system */
|
|
+ num_slots -= first_data_slot;
|
|
+
|
|
+ if (num_slots < 4) {
|
|
+ vchiq_log_error(vchiq_core_log_level,
|
|
+ "vchiq_init_slots - insufficient memory %x bytes",
|
|
+ mem_size);
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ memset(slot_zero, 0, sizeof(VCHIQ_SLOT_ZERO_T));
|
|
+
|
|
+ slot_zero->magic = VCHIQ_MAGIC;
|
|
+ slot_zero->version = VCHIQ_VERSION;
|
|
+ slot_zero->version_min = VCHIQ_VERSION_MIN;
|
|
+ slot_zero->slot_zero_size = sizeof(VCHIQ_SLOT_ZERO_T);
|
|
+ slot_zero->slot_size = VCHIQ_SLOT_SIZE;
|
|
+ slot_zero->max_slots = VCHIQ_MAX_SLOTS;
|
|
+ slot_zero->max_slots_per_side = VCHIQ_MAX_SLOTS_PER_SIDE;
|
|
+
|
|
+ slot_zero->master.slot_sync = first_data_slot;
|
|
+ slot_zero->master.slot_first = first_data_slot + 1;
|
|
+ slot_zero->master.slot_last = first_data_slot + (num_slots/2) - 1;
|
|
+ slot_zero->slave.slot_sync = first_data_slot + (num_slots/2);
|
|
+ slot_zero->slave.slot_first = first_data_slot + (num_slots/2) + 1;
|
|
+ slot_zero->slave.slot_last = first_data_slot + num_slots - 1;
|
|
+
|
|
+ return slot_zero;
|
|
+}
|
|
+
|
|
+VCHIQ_STATUS_T
|
|
+vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero,
|
|
+ int is_master)
|
|
+{
|
|
+ VCHIQ_SHARED_STATE_T *local;
|
|
+ VCHIQ_SHARED_STATE_T *remote;
|
|
+ VCHIQ_STATUS_T status;
|
|
+ char threadname[10];
|
|
+ static int id;
|
|
+ int i;
|
|
+
|
|
+ vchiq_log_warning(vchiq_core_log_level,
|
|
+ "%s: slot_zero = 0x%08lx, is_master = %d",
|
|
+ __func__, (unsigned long)slot_zero, is_master);
|
|
+
|
|
+ /* Check the input configuration */
|
|
+
|
|
+ if (slot_zero->magic != VCHIQ_MAGIC) {
|
|
+ vchiq_loud_error_header();
|
|
+ vchiq_loud_error("Invalid VCHIQ magic value found.");
|
|
+ vchiq_loud_error("slot_zero=%x: magic=%x (expected %x)",
|
|
+ (unsigned int)slot_zero, slot_zero->magic, VCHIQ_MAGIC);
|
|
+ vchiq_loud_error_footer();
|
|
+ return VCHIQ_ERROR;
|
|
+ }
|
|
+
|
|
+ if (slot_zero->version < VCHIQ_VERSION_MIN) {
|
|
+ vchiq_loud_error_header();
|
|
+ vchiq_loud_error("Incompatible VCHIQ versions found.");
|
|
+ vchiq_loud_error("slot_zero=%x: VideoCore version=%d "
|
|
+ "(minimum %d)",
|
|
+ (unsigned int)slot_zero, slot_zero->version,
|
|
+ VCHIQ_VERSION_MIN);
|
|
+ vchiq_loud_error("Restart with a newer VideoCore image.");
|
|
+ vchiq_loud_error_footer();
|
|
+ return VCHIQ_ERROR;
|
|
+ }
|
|
+
|
|
+ if (VCHIQ_VERSION < slot_zero->version_min) {
|
|
+ vchiq_loud_error_header();
|
|
+ vchiq_loud_error("Incompatible VCHIQ versions found.");
|
|
+ vchiq_loud_error("slot_zero=%x: version=%d (VideoCore "
|
|
+ "minimum %d)",
|
|
+ (unsigned int)slot_zero, VCHIQ_VERSION,
|
|
+ slot_zero->version_min);
|
|
+ vchiq_loud_error("Restart with a newer kernel.");
|
|
+ vchiq_loud_error_footer();
|
|
+ return VCHIQ_ERROR;
|
|
+ }
|
|
+
|
|
+ if ((slot_zero->slot_zero_size != sizeof(VCHIQ_SLOT_ZERO_T)) ||
|
|
+ (slot_zero->slot_size != VCHIQ_SLOT_SIZE) ||
|
|
+ (slot_zero->max_slots != VCHIQ_MAX_SLOTS) ||
|
|
+ (slot_zero->max_slots_per_side != VCHIQ_MAX_SLOTS_PER_SIDE)) {
|
|
+ vchiq_loud_error_header();
|
|
+ if (slot_zero->slot_zero_size != sizeof(VCHIQ_SLOT_ZERO_T))
|
|
+ vchiq_loud_error("slot_zero=%x: slot_zero_size=%x "
|
|
+ "(expected %x)",
|
|
+ (unsigned int)slot_zero,
|
|
+ slot_zero->slot_zero_size,
|
|
+ sizeof(VCHIQ_SLOT_ZERO_T));
|
|
+ if (slot_zero->slot_size != VCHIQ_SLOT_SIZE)
|
|
+ vchiq_loud_error("slot_zero=%x: slot_size=%d "
|
|
+ "(expected %d",
|
|
+ (unsigned int)slot_zero, slot_zero->slot_size,
|
|
+ VCHIQ_SLOT_SIZE);
|
|
+ if (slot_zero->max_slots != VCHIQ_MAX_SLOTS)
|
|
+ vchiq_loud_error("slot_zero=%x: max_slots=%d "
|
|
+ "(expected %d)",
|
|
+ (unsigned int)slot_zero, slot_zero->max_slots,
|
|
+ VCHIQ_MAX_SLOTS);
|
|
+ if (slot_zero->max_slots_per_side != VCHIQ_MAX_SLOTS_PER_SIDE)
|
|
+ vchiq_loud_error("slot_zero=%x: max_slots_per_side=%d "
|
|
+ "(expected %d)",
|
|
+ (unsigned int)slot_zero,
|
|
+ slot_zero->max_slots_per_side,
|
|
+ VCHIQ_MAX_SLOTS_PER_SIDE);
|
|
+ vchiq_loud_error_footer();
|
|
+ return VCHIQ_ERROR;
|
|
+ }
|
|
+
|
|
+ if (is_master) {
|
|
+ local = &slot_zero->master;
|
|
+ remote = &slot_zero->slave;
|
|
+ } else {
|
|
+ local = &slot_zero->slave;
|
|
+ remote = &slot_zero->master;
|
|
+ }
|
|
+
|
|
+ if (local->initialised) {
|
|
+ vchiq_loud_error_header();
|
|
+ if (remote->initialised)
|
|
+ vchiq_loud_error("local state has already been "
|
|
+ "initialised");
|
|
+ else
|
|
+ vchiq_loud_error("master/slave mismatch - two %ss",
|
|
+ is_master ? "master" : "slave");
|
|
+ vchiq_loud_error_footer();
|
|
+ return VCHIQ_ERROR;
|
|
+ }
|
|
+
|
|
+ memset(state, 0, sizeof(VCHIQ_STATE_T));
|
|
+
|
|
+ state->id = id++;
|
|
+ state->is_master = is_master;
|
|
+
|
|
+ /*
|
|
+ initialize shared state pointers
|
|
+ */
|
|
+
|
|
+ state->local = local;
|
|
+ state->remote = remote;
|
|
+ state->slot_data = (VCHIQ_SLOT_T *)slot_zero;
|
|
+
|
|
+ /*
|
|
+ initialize events and mutexes
|
|
+ */
|
|
+
|
|
+ sema_init(&state->connect, 0);
|
|
+ mutex_init(&state->mutex);
|
|
+ sema_init(&state->trigger_event, 0);
|
|
+ sema_init(&state->recycle_event, 0);
|
|
+ sema_init(&state->sync_trigger_event, 0);
|
|
+ sema_init(&state->sync_release_event, 0);
|
|
+
|
|
+ mutex_init(&state->slot_mutex);
|
|
+ mutex_init(&state->recycle_mutex);
|
|
+ mutex_init(&state->sync_mutex);
|
|
+ mutex_init(&state->bulk_transfer_mutex);
|
|
+
|
|
+ sema_init(&state->slot_available_event, 0);
|
|
+ sema_init(&state->slot_remove_event, 0);
|
|
+ sema_init(&state->data_quota_event, 0);
|
|
+
|
|
+ state->slot_queue_available = 0;
|
|
+
|
|
+ for (i = 0; i < VCHIQ_MAX_SERVICES; i++) {
|
|
+ VCHIQ_SERVICE_QUOTA_T *service_quota =
|
|
+ &state->service_quotas[i];
|
|
+ sema_init(&service_quota->quota_event, 0);
|
|
+ }
|
|
+
|
|
+ for (i = local->slot_first; i <= local->slot_last; i++) {
|
|
+ local->slot_queue[state->slot_queue_available++] = i;
|
|
+ up(&state->slot_available_event);
|
|
+ }
|
|
+
|
|
+ state->default_slot_quota = state->slot_queue_available/2;
|
|
+ state->default_message_quota =
|
|
+ min((unsigned short)(state->default_slot_quota * 256),
|
|
+ (unsigned short)~0);
|
|
+
|
|
+ state->previous_data_index = -1;
|
|
+ state->data_use_count = 0;
|
|
+ state->data_quota = state->slot_queue_available - 1;
|
|
+
|
|
+ local->trigger.event = &state->trigger_event;
|
|
+ remote_event_create(&local->trigger);
|
|
+ local->tx_pos = 0;
|
|
+
|
|
+ local->recycle.event = &state->recycle_event;
|
|
+ remote_event_create(&local->recycle);
|
|
+ local->slot_queue_recycle = state->slot_queue_available;
|
|
+
|
|
+ local->sync_trigger.event = &state->sync_trigger_event;
|
|
+ remote_event_create(&local->sync_trigger);
|
|
+
|
|
+ local->sync_release.event = &state->sync_release_event;
|
|
+ remote_event_create(&local->sync_release);
|
|
+
|
|
+ /* At start-of-day, the slot is empty and available */
|
|
+ ((VCHIQ_HEADER_T *)SLOT_DATA_FROM_INDEX(state, local->slot_sync))->msgid
|
|
+ = VCHIQ_MSGID_PADDING;
|
|
+ remote_event_signal_local(&local->sync_release);
|
|
+
|
|
+ local->debug[DEBUG_ENTRIES] = DEBUG_MAX;
|
|
+
|
|
+ status = vchiq_platform_init_state(state);
|
|
+
|
|
+ /*
|
|
+ bring up slot handler thread
|
|
+ */
|
|
+ snprintf(threadname, sizeof(threadname), "VCHIQ-%d", state->id);
|
|
+ state->slot_handler_thread = kthread_create(&slot_handler_func,
|
|
+ (void *)state,
|
|
+ threadname);
|
|
+
|
|
+ if (state->slot_handler_thread == NULL) {
|
|
+ vchiq_loud_error_header();
|
|
+ vchiq_loud_error("couldn't create thread %s", threadname);
|
|
+ vchiq_loud_error_footer();
|
|
+ return VCHIQ_ERROR;
|
|
+ }
|
|
+ set_user_nice(state->slot_handler_thread, -19);
|
|
+ wake_up_process(state->slot_handler_thread);
|
|
+
|
|
+ snprintf(threadname, sizeof(threadname), "VCHIQr-%d", state->id);
|
|
+ state->recycle_thread = kthread_create(&recycle_func,
|
|
+ (void *)state,
|
|
+ threadname);
|
|
+ if (state->recycle_thread == NULL) {
|
|
+ vchiq_loud_error_header();
|
|
+ vchiq_loud_error("couldn't create thread %s", threadname);
|
|
+ vchiq_loud_error_footer();
|
|
+ return VCHIQ_ERROR;
|
|
+ }
|
|
+ set_user_nice(state->recycle_thread, -19);
|
|
+ wake_up_process(state->recycle_thread);
|
|
+
|
|
+ snprintf(threadname, sizeof(threadname), "VCHIQs-%d", state->id);
|
|
+ state->sync_thread = kthread_create(&sync_func,
|
|
+ (void *)state,
|
|
+ threadname);
|
|
+ if (state->sync_thread == NULL) {
|
|
+ vchiq_loud_error_header();
|
|
+ vchiq_loud_error("couldn't create thread %s", threadname);
|
|
+ vchiq_loud_error_footer();
|
|
+ return VCHIQ_ERROR;
|
|
+ }
|
|
+ set_user_nice(state->sync_thread, -20);
|
|
+ wake_up_process(state->sync_thread);
|
|
+
|
|
+ BUG_ON(state->id >= VCHIQ_MAX_STATES);
|
|
+ vchiq_states[state->id] = state;
|
|
+
|
|
+ /* Indicate readiness to the other side */
|
|
+ local->initialised = 1;
|
|
+
|
|
+ return status;
|
|
+}
|
|
+
|
|
+/* Called from application thread when a client or server service is created. */
|
|
+VCHIQ_SERVICE_T *
|
|
+vchiq_add_service_internal(VCHIQ_STATE_T *state,
|
|
+ const VCHIQ_SERVICE_PARAMS_T *params, int srvstate,
|
|
+ VCHIQ_INSTANCE_T instance)
|
|
+{
|
|
+ VCHIQ_SERVICE_T *service;
|
|
+
|
|
+ service = kmalloc(sizeof(VCHIQ_SERVICE_T), GFP_KERNEL);
|
|
+ if (service) {
|
|
+ service->base.fourcc = params->fourcc;
|
|
+ service->base.callback = params->callback;
|
|
+ service->base.userdata = params->userdata;
|
|
+ service->handle = VCHIQ_SERVICE_HANDLE_INVALID;
|
|
+ service->ref_count = 1;
|
|
+ service->srvstate = VCHIQ_SRVSTATE_FREE;
|
|
+ service->localport = VCHIQ_PORT_FREE;
|
|
+ service->remoteport = VCHIQ_PORT_FREE;
|
|
+
|
|
+ service->public_fourcc = (srvstate == VCHIQ_SRVSTATE_OPENING) ?
|
|
+ VCHIQ_FOURCC_INVALID : params->fourcc;
|
|
+ service->client_id = 0;
|
|
+ service->auto_close = 1;
|
|
+ service->sync = 0;
|
|
+ service->closing = 0;
|
|
+ atomic_set(&service->poll_flags, 0);
|
|
+ service->version = params->version;
|
|
+ service->version_min = params->version_min;
|
|
+ service->state = state;
|
|
+ service->instance = instance;
|
|
+ service->service_use_count = 0;
|
|
+ init_bulk_queue(&service->bulk_tx);
|
|
+ init_bulk_queue(&service->bulk_rx);
|
|
+ sema_init(&service->remove_event, 0);
|
|
+ sema_init(&service->bulk_remove_event, 0);
|
|
+ mutex_init(&service->bulk_mutex);
|
|
+ memset(&service->stats, 0, sizeof(service->stats));
|
|
+ } else {
|
|
+ vchiq_log_error(vchiq_core_log_level,
|
|
+ "Out of memory");
|
|
+ }
|
|
+
|
|
+ if (service) {
|
|
+ VCHIQ_SERVICE_T **pservice = NULL;
|
|
+ int i;
|
|
+
|
|
+ /* Although it is perfectly possible to use service_spinlock
|
|
+ ** to protect the creation of services, it is overkill as it
|
|
+ ** disables interrupts while the array is searched.
|
|
+ ** The only danger is of another thread trying to create a
|
|
+ ** service - service deletion is safe.
|
|
+ ** Therefore it is preferable to use state->mutex which,
|
|
+ ** although slower to claim, doesn't block interrupts while
|
|
+ ** it is held.
|
|
+ */
|
|
+
|
|
+ mutex_lock(&state->mutex);
|
|
+
|
|
+ /* Prepare to use a previously unused service */
|
|
+ if (state->unused_service < VCHIQ_MAX_SERVICES)
|
|
+ pservice = &state->services[state->unused_service];
|
|
+
|
|
+ if (srvstate == VCHIQ_SRVSTATE_OPENING) {
|
|
+ for (i = 0; i < state->unused_service; i++) {
|
|
+ VCHIQ_SERVICE_T *srv = state->services[i];
|
|
+ if (!srv) {
|
|
+ pservice = &state->services[i];
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ } else {
|
|
+ for (i = (state->unused_service - 1); i >= 0; i--) {
|
|
+ VCHIQ_SERVICE_T *srv = state->services[i];
|
|
+ if (!srv)
|
|
+ pservice = &state->services[i];
|
|
+ else if ((srv->public_fourcc == params->fourcc)
|
|
+ && ((srv->instance != instance) ||
|
|
+ (srv->base.callback !=
|
|
+ params->callback))) {
|
|
+ /* There is another server using this
|
|
+ ** fourcc which doesn't match. */
|
|
+ pservice = NULL;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (pservice) {
|
|
+ service->localport = (pservice - state->services);
|
|
+ if (!handle_seq)
|
|
+ handle_seq = VCHIQ_MAX_STATES *
|
|
+ VCHIQ_MAX_SERVICES;
|
|
+ service->handle = handle_seq |
|
|
+ (state->id * VCHIQ_MAX_SERVICES) |
|
|
+ service->localport;
|
|
+ handle_seq += VCHIQ_MAX_STATES * VCHIQ_MAX_SERVICES;
|
|
+ *pservice = service;
|
|
+ if (pservice == &state->services[state->unused_service])
|
|
+ state->unused_service++;
|
|
+ }
|
|
+
|
|
+ mutex_unlock(&state->mutex);
|
|
+
|
|
+ if (!pservice) {
|
|
+ kfree(service);
|
|
+ service = NULL;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (service) {
|
|
+ VCHIQ_SERVICE_QUOTA_T *service_quota =
|
|
+ &state->service_quotas[service->localport];
|
|
+ service_quota->slot_quota = state->default_slot_quota;
|
|
+ service_quota->message_quota = state->default_message_quota;
|
|
+ if (service_quota->slot_use_count == 0)
|
|
+ service_quota->previous_tx_index =
|
|
+ SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos)
|
|
+ - 1;
|
|
+
|
|
+ /* Bring this service online */
|
|
+ vchiq_set_service_state(service, srvstate);
|
|
+
|
|
+ vchiq_log_info(vchiq_core_msg_log_level,
|
|
+ "%s Service %c%c%c%c SrcPort:%d",
|
|
+ (srvstate == VCHIQ_SRVSTATE_OPENING)
|
|
+ ? "Open" : "Add",
|
|
+ VCHIQ_FOURCC_AS_4CHARS(params->fourcc),
|
|
+ service->localport);
|
|
+ }
|
|
+
|
|
+ /* Don't unlock the service - leave it with a ref_count of 1. */
|
|
+
|
|
+ return service;
|
|
+}
|
|
+
|
|
+VCHIQ_STATUS_T
|
|
+vchiq_open_service_internal(VCHIQ_SERVICE_T *service, int client_id)
|
|
+{
|
|
+ struct vchiq_open_payload payload = {
|
|
+ service->base.fourcc,
|
|
+ client_id,
|
|
+ service->version,
|
|
+ service->version_min
|
|
+ };
|
|
+ VCHIQ_ELEMENT_T body = { &payload, sizeof(payload) };
|
|
+ VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
|
|
+
|
|
+ service->client_id = client_id;
|
|
+ vchiq_use_service_internal(service);
|
|
+ status = queue_message(service->state, NULL,
|
|
+ VCHIQ_MAKE_MSG(VCHIQ_MSG_OPEN, service->localport, 0),
|
|
+ &body, 1, sizeof(payload), 1);
|
|
+ if (status == VCHIQ_SUCCESS) {
|
|
+ if (down_interruptible(&service->remove_event) != 0) {
|
|
+ status = VCHIQ_RETRY;
|
|
+ vchiq_release_service_internal(service);
|
|
+ } else if ((service->srvstate != VCHIQ_SRVSTATE_OPEN) &&
|
|
+ (service->srvstate != VCHIQ_SRVSTATE_OPENSYNC)) {
|
|
+ if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT)
|
|
+ vchiq_log_error(vchiq_core_log_level,
|
|
+ "%d: osi - srvstate = %s (ref %d)",
|
|
+ service->state->id,
|
|
+ srvstate_names[service->srvstate],
|
|
+ service->ref_count);
|
|
+ status = VCHIQ_ERROR;
|
|
+ VCHIQ_SERVICE_STATS_INC(service, error_count);
|
|
+ vchiq_release_service_internal(service);
|
|
+ }
|
|
+ }
|
|
+ return status;
|
|
+}
|
|
+
|
|
+static void
|
|
+release_service_messages(VCHIQ_SERVICE_T *service)
|
|
+{
|
|
+ VCHIQ_STATE_T *state = service->state;
|
|
+ int slot_last = state->remote->slot_last;
|
|
+ int i;
|
|
+
|
|
+ /* Release any claimed messages */
|
|
+ for (i = state->remote->slot_first; i <= slot_last; i++) {
|
|
+ VCHIQ_SLOT_INFO_T *slot_info =
|
|
+ SLOT_INFO_FROM_INDEX(state, i);
|
|
+ if (slot_info->release_count != slot_info->use_count) {
|
|
+ char *data =
|
|
+ (char *)SLOT_DATA_FROM_INDEX(state, i);
|
|
+ unsigned int pos, end;
|
|
+
|
|
+ end = VCHIQ_SLOT_SIZE;
|
|
+ if (data == state->rx_data)
|
|
+ /* This buffer is still being read from - stop
|
|
+ ** at the current read position */
|
|
+ end = state->rx_pos & VCHIQ_SLOT_MASK;
|
|
+
|
|
+ pos = 0;
|
|
+
|
|
+ while (pos < end) {
|
|
+ VCHIQ_HEADER_T *header =
|
|
+ (VCHIQ_HEADER_T *)(data + pos);
|
|
+ int msgid = header->msgid;
|
|
+ int port = VCHIQ_MSG_DSTPORT(msgid);
|
|
+ if ((port == service->localport) &&
|
|
+ (msgid & VCHIQ_MSGID_CLAIMED)) {
|
|
+ vchiq_log_info(vchiq_core_log_level,
|
|
+ " fsi - hdr %x",
|
|
+ (unsigned int)header);
|
|
+ release_slot(state, slot_info, header,
|
|
+ NULL);
|
|
+ }
|
|
+ pos += calc_stride(header->size);
|
|
+ if (pos > VCHIQ_SLOT_SIZE) {
|
|
+ vchiq_log_error(vchiq_core_log_level,
|
|
+ "fsi - pos %x: header %x, "
|
|
+ "msgid %x, header->msgid %x, "
|
|
+ "header->size %x",
|
|
+ pos, (unsigned int)header,
|
|
+ msgid, header->msgid,
|
|
+ header->size);
|
|
+ WARN(1, "invalid slot position\n");
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
+static int
|
|
+do_abort_bulks(VCHIQ_SERVICE_T *service)
|
|
+{
|
|
+ VCHIQ_STATUS_T status;
|
|
+
|
|
+ /* Abort any outstanding bulk transfers */
|
|
+ if (mutex_lock_interruptible(&service->bulk_mutex) != 0)
|
|
+ return 0;
|
|
+ abort_outstanding_bulks(service, &service->bulk_tx);
|
|
+ abort_outstanding_bulks(service, &service->bulk_rx);
|
|
+ mutex_unlock(&service->bulk_mutex);
|
|
+
|
|
+ status = notify_bulks(service, &service->bulk_tx, 0/*!retry_poll*/);
|
|
+ if (status == VCHIQ_SUCCESS)
|
|
+ status = notify_bulks(service, &service->bulk_rx,
|
|
+ 0/*!retry_poll*/);
|
|
+ return (status == VCHIQ_SUCCESS);
|
|
+}
|
|
+
|
|
+static VCHIQ_STATUS_T
|
|
+close_service_complete(VCHIQ_SERVICE_T *service, int failstate)
|
|
+{
|
|
+ VCHIQ_STATUS_T status;
|
|
+ int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
|
|
+ int newstate;
|
|
+
|
|
+ switch (service->srvstate) {
|
|
+ case VCHIQ_SRVSTATE_OPEN:
|
|
+ case VCHIQ_SRVSTATE_CLOSESENT:
|
|
+ case VCHIQ_SRVSTATE_CLOSERECVD:
|
|
+ if (is_server) {
|
|
+ if (service->auto_close) {
|
|
+ service->client_id = 0;
|
|
+ service->remoteport = VCHIQ_PORT_FREE;
|
|
+ newstate = VCHIQ_SRVSTATE_LISTENING;
|
|
+ } else
|
|
+ newstate = VCHIQ_SRVSTATE_CLOSEWAIT;
|
|
+ } else
|
|
+ newstate = VCHIQ_SRVSTATE_CLOSED;
|
|
+ vchiq_set_service_state(service, newstate);
|
|
+ break;
|
|
+ case VCHIQ_SRVSTATE_LISTENING:
|
|
+ break;
|
|
+ default:
|
|
+ vchiq_log_error(vchiq_core_log_level,
|
|
+ "close_service_complete(%x) called in state %s",
|
|
+ service->handle, srvstate_names[service->srvstate]);
|
|
+ WARN(1, "close_service_complete in unexpected state\n");
|
|
+ return VCHIQ_ERROR;
|
|
+ }
|
|
+
|
|
+ status = make_service_callback(service,
|
|
+ VCHIQ_SERVICE_CLOSED, NULL, NULL);
|
|
+
|
|
+ if (status != VCHIQ_RETRY) {
|
|
+ int uc = service->service_use_count;
|
|
+ int i;
|
|
+ /* Complete the close process */
|
|
+ for (i = 0; i < uc; i++)
|
|
+ /* cater for cases where close is forced and the
|
|
+ ** client may not close all it's handles */
|
|
+ vchiq_release_service_internal(service);
|
|
+
|
|
+ service->client_id = 0;
|
|
+ service->remoteport = VCHIQ_PORT_FREE;
|
|
+
|
|
+ if (service->srvstate == VCHIQ_SRVSTATE_CLOSED)
|
|
+ vchiq_free_service_internal(service);
|
|
+ else if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT) {
|
|
+ if (is_server)
|
|
+ service->closing = 0;
|
|
+
|
|
+ up(&service->remove_event);
|
|
+ }
|
|
+ } else
|
|
+ vchiq_set_service_state(service, failstate);
|
|
+
|
|
+ return status;
|
|
+}
|
|
+
|
|
+/* Called by the slot handler */
|
|
+VCHIQ_STATUS_T
|
|
+vchiq_close_service_internal(VCHIQ_SERVICE_T *service, int close_recvd)
|
|
+{
|
|
+ VCHIQ_STATE_T *state = service->state;
|
|
+ VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
|
|
+ int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
|
|
+
|
|
+ vchiq_log_info(vchiq_core_log_level, "%d: csi:%d,%d (%s)",
|
|
+ service->state->id, service->localport, close_recvd,
|
|
+ srvstate_names[service->srvstate]);
|
|
+
|
|
+ switch (service->srvstate) {
|
|
+ case VCHIQ_SRVSTATE_CLOSED:
|
|
+ case VCHIQ_SRVSTATE_HIDDEN:
|
|
+ case VCHIQ_SRVSTATE_LISTENING:
|
|
+ case VCHIQ_SRVSTATE_CLOSEWAIT:
|
|
+ if (close_recvd)
|
|
+ vchiq_log_error(vchiq_core_log_level,
|
|
+ "vchiq_close_service_internal(1) called "
|
|
+ "in state %s",
|
|
+ srvstate_names[service->srvstate]);
|
|
+ else if (is_server) {
|
|
+ if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
|
|
+ status = VCHIQ_ERROR;
|
|
+ } else {
|
|
+ service->client_id = 0;
|
|
+ service->remoteport = VCHIQ_PORT_FREE;
|
|
+ if (service->srvstate ==
|
|
+ VCHIQ_SRVSTATE_CLOSEWAIT)
|
|
+ vchiq_set_service_state(service,
|
|
+ VCHIQ_SRVSTATE_LISTENING);
|
|
+ }
|
|
+ up(&service->remove_event);
|
|
+ } else
|
|
+ vchiq_free_service_internal(service);
|
|
+ break;
|
|
+ case VCHIQ_SRVSTATE_OPENING:
|
|
+ if (close_recvd) {
|
|
+ /* The open was rejected - tell the user */
|
|
+ vchiq_set_service_state(service,
|
|
+ VCHIQ_SRVSTATE_CLOSEWAIT);
|
|
+ up(&service->remove_event);
|
|
+ } else {
|
|
+ /* Shutdown mid-open - let the other side know */
|
|
+ status = queue_message(state, service,
|
|
+ VCHIQ_MAKE_MSG
|
|
+ (VCHIQ_MSG_CLOSE,
|
|
+ service->localport,
|
|
+ VCHIQ_MSG_DSTPORT(service->remoteport)),
|
|
+ NULL, 0, 0, 0);
|
|
+ }
|
|
+ break;
|
|
+
|
|
+ case VCHIQ_SRVSTATE_OPENSYNC:
|
|
+ mutex_lock(&state->sync_mutex);
|
|
+ /* Drop through */
|
|
+
|
|
+ case VCHIQ_SRVSTATE_OPEN:
|
|
+ if (state->is_master || close_recvd) {
|
|
+ if (!do_abort_bulks(service))
|
|
+ status = VCHIQ_RETRY;
|
|
+ }
|
|
+
|
|
+ release_service_messages(service);
|
|
+
|
|
+ if (status == VCHIQ_SUCCESS)
|
|
+ status = queue_message(state, service,
|
|
+ VCHIQ_MAKE_MSG
|
|
+ (VCHIQ_MSG_CLOSE,
|
|
+ service->localport,
|
|
+ VCHIQ_MSG_DSTPORT(service->remoteport)),
|
|
+ NULL, 0, 0, 0);
|
|
+
|
|
+ if (status == VCHIQ_SUCCESS) {
|
|
+ if (!close_recvd)
|
|
+ break;
|
|
+ } else if (service->srvstate == VCHIQ_SRVSTATE_OPENSYNC) {
|
|
+ mutex_unlock(&state->sync_mutex);
|
|
+ break;
|
|
+ } else
|
|
+ break;
|
|
+
|
|
+ status = close_service_complete(service,
|
|
+ VCHIQ_SRVSTATE_CLOSERECVD);
|
|
+ break;
|
|
+
|
|
+ case VCHIQ_SRVSTATE_CLOSESENT:
|
|
+ if (!close_recvd)
|
|
+ /* This happens when a process is killed mid-close */
|
|
+ break;
|
|
+
|
|
+ if (!state->is_master) {
|
|
+ if (!do_abort_bulks(service)) {
|
|
+ status = VCHIQ_RETRY;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (status == VCHIQ_SUCCESS)
|
|
+ status = close_service_complete(service,
|
|
+ VCHIQ_SRVSTATE_CLOSERECVD);
|
|
+ break;
|
|
+
|
|
+ case VCHIQ_SRVSTATE_CLOSERECVD:
|
|
+ if (!close_recvd && is_server)
|
|
+ /* Force into LISTENING mode */
|
|
+ vchiq_set_service_state(service,
|
|
+ VCHIQ_SRVSTATE_LISTENING);
|
|
+ status = close_service_complete(service,
|
|
+ VCHIQ_SRVSTATE_CLOSERECVD);
|
|
+ break;
|
|
+
|
|
+ default:
|
|
+ vchiq_log_error(vchiq_core_log_level,
|
|
+ "vchiq_close_service_internal(%d) called in state %s",
|
|
+ close_recvd, srvstate_names[service->srvstate]);
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ return status;
|
|
+}
|
|
+
|
|
+/* Called from the application process upon process death */
|
|
+void
|
|
+vchiq_terminate_service_internal(VCHIQ_SERVICE_T *service)
|
|
+{
|
|
+ VCHIQ_STATE_T *state = service->state;
|
|
+
|
|
+ vchiq_log_info(vchiq_core_log_level, "%d: tsi - (%d<->%d)",
|
|
+ state->id, service->localport, service->remoteport);
|
|
+
|
|
+ mark_service_closing(service);
|
|
+
|
|
+ /* Mark the service for removal by the slot handler */
|
|
+ request_poll(state, service, VCHIQ_POLL_REMOVE);
|
|
+}
|
|
+
|
|
+/* Called from the slot handler */
|
|
+void
|
|
+vchiq_free_service_internal(VCHIQ_SERVICE_T *service)
|
|
+{
|
|
+ VCHIQ_STATE_T *state = service->state;
|
|
+
|
|
+ vchiq_log_info(vchiq_core_log_level, "%d: fsi - (%d)",
|
|
+ state->id, service->localport);
|
|
+
|
|
+ switch (service->srvstate) {
|
|
+ case VCHIQ_SRVSTATE_OPENING:
|
|
+ case VCHIQ_SRVSTATE_CLOSED:
|
|
+ case VCHIQ_SRVSTATE_HIDDEN:
|
|
+ case VCHIQ_SRVSTATE_LISTENING:
|
|
+ case VCHIQ_SRVSTATE_CLOSEWAIT:
|
|
+ break;
|
|
+ default:
|
|
+ vchiq_log_error(vchiq_core_log_level,
|
|
+ "%d: fsi - (%d) in state %s",
|
|
+ state->id, service->localport,
|
|
+ srvstate_names[service->srvstate]);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ vchiq_set_service_state(service, VCHIQ_SRVSTATE_FREE);
|
|
+
|
|
+ up(&service->remove_event);
|
|
+
|
|
+ /* Release the initial lock */
|
|
+ unlock_service(service);
|
|
+}
|
|
+
|
|
+VCHIQ_STATUS_T
|
|
+vchiq_connect_internal(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance)
|
|
+{
|
|
+ VCHIQ_SERVICE_T *service;
|
|
+ int i;
|
|
+
|
|
+ /* Find all services registered to this client and enable them. */
|
|
+ i = 0;
|
|
+ while ((service = next_service_by_instance(state, instance,
|
|
+ &i)) != NULL) {
|
|
+ if (service->srvstate == VCHIQ_SRVSTATE_HIDDEN)
|
|
+ vchiq_set_service_state(service,
|
|
+ VCHIQ_SRVSTATE_LISTENING);
|
|
+ unlock_service(service);
|
|
+ }
|
|
+
|
|
+ if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED) {
|
|
+ if (queue_message(state, NULL,
|
|
+ VCHIQ_MAKE_MSG(VCHIQ_MSG_CONNECT, 0, 0), NULL, 0,
|
|
+ 0, 1) == VCHIQ_RETRY)
|
|
+ return VCHIQ_RETRY;
|
|
+
|
|
+ vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTING);
|
|
+ }
|
|
+
|
|
+ if (state->conn_state == VCHIQ_CONNSTATE_CONNECTING) {
|
|
+ if (down_interruptible(&state->connect) != 0)
|
|
+ return VCHIQ_RETRY;
|
|
+
|
|
+ vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
|
|
+ up(&state->connect);
|
|
+ }
|
|
+
|
|
+ return VCHIQ_SUCCESS;
|
|
+}
|
|
+
|
|
+VCHIQ_STATUS_T
|
|
+vchiq_shutdown_internal(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance)
|
|
+{
|
|
+ VCHIQ_SERVICE_T *service;
|
|
+ int i;
|
|
+
|
|
+ /* Find all services registered to this client and enable them. */
|
|
+ i = 0;
|
|
+ while ((service = next_service_by_instance(state, instance,
|
|
+ &i)) != NULL) {
|
|
+ (void)vchiq_remove_service(service->handle);
|
|
+ unlock_service(service);
|
|
+ }
|
|
+
|
|
+ return VCHIQ_SUCCESS;
|
|
+}
|
|
+
|
|
+VCHIQ_STATUS_T
|
|
+vchiq_pause_internal(VCHIQ_STATE_T *state)
|
|
+{
|
|
+ VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
|
|
+
|
|
+ switch (state->conn_state) {
|
|
+ case VCHIQ_CONNSTATE_CONNECTED:
|
|
+ /* Request a pause */
|
|
+ vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSING);
|
|
+ request_poll(state, NULL, 0);
|
|
+ break;
|
|
+ default:
|
|
+ vchiq_log_error(vchiq_core_log_level,
|
|
+ "vchiq_pause_internal in state %s\n",
|
|
+ conn_state_names[state->conn_state]);
|
|
+ status = VCHIQ_ERROR;
|
|
+ VCHIQ_STATS_INC(state, error_count);
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ return status;
|
|
+}
|
|
+
|
|
+VCHIQ_STATUS_T
|
|
+vchiq_resume_internal(VCHIQ_STATE_T *state)
|
|
+{
|
|
+ VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
|
|
+
|
|
+ if (state->conn_state == VCHIQ_CONNSTATE_PAUSED) {
|
|
+ vchiq_set_conn_state(state, VCHIQ_CONNSTATE_RESUMING);
|
|
+ request_poll(state, NULL, 0);
|
|
+ } else {
|
|
+ status = VCHIQ_ERROR;
|
|
+ VCHIQ_STATS_INC(state, error_count);
|
|
+ }
|
|
+
|
|
+ return status;
|
|
+}
|
|
+
|
|
+VCHIQ_STATUS_T
|
|
+vchiq_close_service(VCHIQ_SERVICE_HANDLE_T handle)
|
|
+{
|
|
+ /* Unregister the service */
|
|
+ VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
|
|
+ VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
|
|
+
|
|
+ if (!service)
|
|
+ return VCHIQ_ERROR;
|
|
+
|
|
+ vchiq_log_info(vchiq_core_log_level,
|
|
+ "%d: close_service:%d",
|
|
+ service->state->id, service->localport);
|
|
+
|
|
+ if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
|
|
+ (service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
|
|
+ (service->srvstate == VCHIQ_SRVSTATE_HIDDEN)) {
|
|
+ unlock_service(service);
|
|
+ return VCHIQ_ERROR;
|
|
+ }
|
|
+
|
|
+ mark_service_closing(service);
|
|
+
|
|
+ if (current == service->state->slot_handler_thread) {
|
|
+ status = vchiq_close_service_internal(service,
|
|
+ 0/*!close_recvd*/);
|
|
+ BUG_ON(status == VCHIQ_RETRY);
|
|
+ } else {
|
|
+ /* Mark the service for termination by the slot handler */
|
|
+ request_poll(service->state, service, VCHIQ_POLL_TERMINATE);
|
|
+ }
|
|
+
|
|
+ while (1) {
|
|
+ if (down_interruptible(&service->remove_event) != 0) {
|
|
+ status = VCHIQ_RETRY;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
|
|
+ (service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
|
|
+ (service->srvstate == VCHIQ_SRVSTATE_OPEN))
|
|
+ break;
|
|
+
|
|
+ vchiq_log_warning(vchiq_core_log_level,
|
|
+ "%d: close_service:%d - waiting in state %s",
|
|
+ service->state->id, service->localport,
|
|
+ srvstate_names[service->srvstate]);
|
|
+ }
|
|
+
|
|
+ if ((status == VCHIQ_SUCCESS) &&
|
|
+ (service->srvstate != VCHIQ_SRVSTATE_FREE) &&
|
|
+ (service->srvstate != VCHIQ_SRVSTATE_LISTENING))
|
|
+ status = VCHIQ_ERROR;
|
|
+
|
|
+ unlock_service(service);
|
|
+
|
|
+ return status;
|
|
+}
|
|
+
|
|
+VCHIQ_STATUS_T
|
|
+vchiq_remove_service(VCHIQ_SERVICE_HANDLE_T handle)
|
|
+{
|
|
+ /* Unregister the service */
|
|
+ VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
|
|
+ VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
|
|
+
|
|
+ if (!service)
|
|
+ return VCHIQ_ERROR;
|
|
+
|
|
+ vchiq_log_info(vchiq_core_log_level,
|
|
+ "%d: remove_service:%d",
|
|
+ service->state->id, service->localport);
|
|
+
|
|
+ if (service->srvstate == VCHIQ_SRVSTATE_FREE) {
|
|
+ unlock_service(service);
|
|
+ return VCHIQ_ERROR;
|
|
+ }
|
|
+
|
|
+ mark_service_closing(service);
|
|
+
|
|
+ if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) ||
|
|
+ (current == service->state->slot_handler_thread)) {
|
|
+ /* Make it look like a client, because it must be removed and
|
|
+ not left in the LISTENING state. */
|
|
+ service->public_fourcc = VCHIQ_FOURCC_INVALID;
|
|
+
|
|
+ status = vchiq_close_service_internal(service,
|
|
+ 0/*!close_recvd*/);
|
|
+ BUG_ON(status == VCHIQ_RETRY);
|
|
+ } else {
|
|
+ /* Mark the service for removal by the slot handler */
|
|
+ request_poll(service->state, service, VCHIQ_POLL_REMOVE);
|
|
+ }
|
|
+ while (1) {
|
|
+ if (down_interruptible(&service->remove_event) != 0) {
|
|
+ status = VCHIQ_RETRY;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
|
|
+ (service->srvstate == VCHIQ_SRVSTATE_OPEN))
|
|
+ break;
|
|
+
|
|
+ vchiq_log_warning(vchiq_core_log_level,
|
|
+ "%d: remove_service:%d - waiting in state %s",
|
|
+ service->state->id, service->localport,
|
|
+ srvstate_names[service->srvstate]);
|
|
+ }
|
|
+
|
|
+ if ((status == VCHIQ_SUCCESS) &&
|
|
+ (service->srvstate != VCHIQ_SRVSTATE_FREE))
|
|
+ status = VCHIQ_ERROR;
|
|
+
|
|
+ unlock_service(service);
|
|
+
|
|
+ return status;
|
|
+}
|
|
+
|
|
+
|
|
+/* This function may be called by kernel threads or user threads.
|
|
+ * User threads may receive VCHIQ_RETRY to indicate that a signal has been
|
|
+ * received and the call should be retried after being returned to user
|
|
+ * context.
|
|
+ * When called in blocking mode, the userdata field points to a bulk_waiter
|
|
+ * structure.
|
|
+ */
|
|
+VCHIQ_STATUS_T
|
|
+vchiq_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle,
|
|
+ VCHI_MEM_HANDLE_T memhandle, void *offset, int size, void *userdata,
|
|
+ VCHIQ_BULK_MODE_T mode, VCHIQ_BULK_DIR_T dir)
|
|
+{
|
|
+ VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
|
|
+ VCHIQ_BULK_QUEUE_T *queue;
|
|
+ VCHIQ_BULK_T *bulk;
|
|
+ VCHIQ_STATE_T *state;
|
|
+ struct bulk_waiter *bulk_waiter = NULL;
|
|
+ const char dir_char = (dir == VCHIQ_BULK_TRANSMIT) ? 't' : 'r';
|
|
+ const int dir_msgtype = (dir == VCHIQ_BULK_TRANSMIT) ?
|
|
+ VCHIQ_MSG_BULK_TX : VCHIQ_MSG_BULK_RX;
|
|
+ VCHIQ_STATUS_T status = VCHIQ_ERROR;
|
|
+
|
|
+ if (!service ||
|
|
+ (service->srvstate != VCHIQ_SRVSTATE_OPEN) ||
|
|
+ ((memhandle == VCHI_MEM_HANDLE_INVALID) && (offset == NULL)) ||
|
|
+ (vchiq_check_service(service) != VCHIQ_SUCCESS))
|
|
+ goto error_exit;
|
|
+
|
|
+ switch (mode) {
|
|
+ case VCHIQ_BULK_MODE_NOCALLBACK:
|
|
+ case VCHIQ_BULK_MODE_CALLBACK:
|
|
+ break;
|
|
+ case VCHIQ_BULK_MODE_BLOCKING:
|
|
+ bulk_waiter = (struct bulk_waiter *)userdata;
|
|
+ sema_init(&bulk_waiter->event, 0);
|
|
+ bulk_waiter->actual = 0;
|
|
+ bulk_waiter->bulk = NULL;
|
|
+ break;
|
|
+ case VCHIQ_BULK_MODE_WAITING:
|
|
+ bulk_waiter = (struct bulk_waiter *)userdata;
|
|
+ bulk = bulk_waiter->bulk;
|
|
+ goto waiting;
|
|
+ default:
|
|
+ goto error_exit;
|
|
+ }
|
|
+
|
|
+ state = service->state;
|
|
+
|
|
+ queue = (dir == VCHIQ_BULK_TRANSMIT) ?
|
|
+ &service->bulk_tx : &service->bulk_rx;
|
|
+
|
|
+ if (mutex_lock_interruptible(&service->bulk_mutex) != 0) {
|
|
+ status = VCHIQ_RETRY;
|
|
+ goto error_exit;
|
|
+ }
|
|
+
|
|
+ if (queue->local_insert == queue->remove + VCHIQ_NUM_SERVICE_BULKS) {
|
|
+ VCHIQ_SERVICE_STATS_INC(service, bulk_stalls);
|
|
+ do {
|
|
+ mutex_unlock(&service->bulk_mutex);
|
|
+ if (down_interruptible(&service->bulk_remove_event)
|
|
+ != 0) {
|
|
+ status = VCHIQ_RETRY;
|
|
+ goto error_exit;
|
|
+ }
|
|
+ if (mutex_lock_interruptible(&service->bulk_mutex)
|
|
+ != 0) {
|
|
+ status = VCHIQ_RETRY;
|
|
+ goto error_exit;
|
|
+ }
|
|
+ } while (queue->local_insert == queue->remove +
|
|
+ VCHIQ_NUM_SERVICE_BULKS);
|
|
+ }
|
|
+
|
|
+ bulk = &queue->bulks[BULK_INDEX(queue->local_insert)];
|
|
+
|
|
+ bulk->mode = mode;
|
|
+ bulk->dir = dir;
|
|
+ bulk->userdata = userdata;
|
|
+ bulk->size = size;
|
|
+ bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
|
|
+
|
|
+ if (vchiq_prepare_bulk_data(bulk, memhandle, offset, size, dir) !=
|
|
+ VCHIQ_SUCCESS)
|
|
+ goto unlock_error_exit;
|
|
+
|
|
+ wmb();
|
|
+
|
|
+ vchiq_log_info(vchiq_core_log_level,
|
|
+ "%d: bt (%d->%d) %cx %x@%x %x",
|
|
+ state->id,
|
|
+ service->localport, service->remoteport, dir_char,
|
|
+ size, (unsigned int)bulk->data, (unsigned int)userdata);
|
|
+
|
|
+ if (state->is_master) {
|
|
+ queue->local_insert++;
|
|
+ if (resolve_bulks(service, queue))
|
|
+ request_poll(state, service,
|
|
+ (dir == VCHIQ_BULK_TRANSMIT) ?
|
|
+ VCHIQ_POLL_TXNOTIFY : VCHIQ_POLL_RXNOTIFY);
|
|
+ } else {
|
|
+ int payload[2] = { (int)bulk->data, bulk->size };
|
|
+ VCHIQ_ELEMENT_T element = { payload, sizeof(payload) };
|
|
+
|
|
+ status = queue_message(state, NULL,
|
|
+ VCHIQ_MAKE_MSG(dir_msgtype,
|
|
+ service->localport, service->remoteport),
|
|
+ &element, 1, sizeof(payload), 1);
|
|
+ if (status != VCHIQ_SUCCESS) {
|
|
+ vchiq_complete_bulk(bulk);
|
|
+ goto unlock_error_exit;
|
|
+ }
|
|
+ queue->local_insert++;
|
|
+ }
|
|
+
|
|
+ mutex_unlock(&service->bulk_mutex);
|
|
+
|
|
+ vchiq_log_trace(vchiq_core_log_level,
|
|
+ "%d: bt:%d %cx li=%x ri=%x p=%x",
|
|
+ state->id,
|
|
+ service->localport, dir_char,
|
|
+ queue->local_insert, queue->remote_insert, queue->process);
|
|
+
|
|
+waiting:
|
|
+ unlock_service(service);
|
|
+
|
|
+ status = VCHIQ_SUCCESS;
|
|
+
|
|
+ if (bulk_waiter) {
|
|
+ bulk_waiter->bulk = bulk;
|
|
+ if (down_interruptible(&bulk_waiter->event) != 0)
|
|
+ status = VCHIQ_RETRY;
|
|
+ else if (bulk_waiter->actual == VCHIQ_BULK_ACTUAL_ABORTED)
|
|
+ status = VCHIQ_ERROR;
|
|
+ }
|
|
+
|
|
+ return status;
|
|
+
|
|
+unlock_error_exit:
|
|
+ mutex_unlock(&service->bulk_mutex);
|
|
+
|
|
+error_exit:
|
|
+ if (service)
|
|
+ unlock_service(service);
|
|
+ return status;
|
|
+}
|
|
+
|
|
+VCHIQ_STATUS_T
|
|
+vchiq_queue_message(VCHIQ_SERVICE_HANDLE_T handle,
|
|
+ const VCHIQ_ELEMENT_T *elements, unsigned int count)
|
|
+{
|
|
+ VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
|
|
+ VCHIQ_STATUS_T status = VCHIQ_ERROR;
|
|
+
|
|
+ unsigned int size = 0;
|
|
+ unsigned int i;
|
|
+
|
|
+ if (!service ||
|
|
+ (vchiq_check_service(service) != VCHIQ_SUCCESS))
|
|
+ goto error_exit;
|
|
+
|
|
+ for (i = 0; i < (unsigned int)count; i++) {
|
|
+ if (elements[i].size) {
|
|
+ if (elements[i].data == NULL) {
|
|
+ VCHIQ_SERVICE_STATS_INC(service, error_count);
|
|
+ goto error_exit;
|
|
+ }
|
|
+ size += elements[i].size;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (size > VCHIQ_MAX_MSG_SIZE) {
|
|
+ VCHIQ_SERVICE_STATS_INC(service, error_count);
|
|
+ goto error_exit;
|
|
+ }
|
|
+
|
|
+ switch (service->srvstate) {
|
|
+ case VCHIQ_SRVSTATE_OPEN:
|
|
+ status = queue_message(service->state, service,
|
|
+ VCHIQ_MAKE_MSG(VCHIQ_MSG_DATA,
|
|
+ service->localport,
|
|
+ service->remoteport),
|
|
+ elements, count, size, 1);
|
|
+ break;
|
|
+ case VCHIQ_SRVSTATE_OPENSYNC:
|
|
+ status = queue_message_sync(service->state, service,
|
|
+ VCHIQ_MAKE_MSG(VCHIQ_MSG_DATA,
|
|
+ service->localport,
|
|
+ service->remoteport),
|
|
+ elements, count, size, 1);
|
|
+ break;
|
|
+ default:
|
|
+ status = VCHIQ_ERROR;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+error_exit:
|
|
+ if (service)
|
|
+ unlock_service(service);
|
|
+
|
|
+ return status;
|
|
+}
|
|
+
|
|
+void
|
|
+vchiq_release_message(VCHIQ_SERVICE_HANDLE_T handle, VCHIQ_HEADER_T *header)
|
|
+{
|
|
+ VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
|
|
+ VCHIQ_SHARED_STATE_T *remote;
|
|
+ VCHIQ_STATE_T *state;
|
|
+ int slot_index;
|
|
+
|
|
+ if (!service)
|
|
+ return;
|
|
+
|
|
+ state = service->state;
|
|
+ remote = state->remote;
|
|
+
|
|
+ slot_index = SLOT_INDEX_FROM_DATA(state, (void *)header);
|
|
+
|
|
+ if ((slot_index >= remote->slot_first) &&
|
|
+ (slot_index <= remote->slot_last)) {
|
|
+ int msgid = header->msgid;
|
|
+ if (msgid & VCHIQ_MSGID_CLAIMED) {
|
|
+ VCHIQ_SLOT_INFO_T *slot_info =
|
|
+ SLOT_INFO_FROM_INDEX(state, slot_index);
|
|
+
|
|
+ release_slot(state, slot_info, header, service);
|
|
+ }
|
|
+ } else if (slot_index == remote->slot_sync)
|
|
+ release_message_sync(state, header);
|
|
+
|
|
+ unlock_service(service);
|
|
+}
|
|
+
|
|
+static void
|
|
+release_message_sync(VCHIQ_STATE_T *state, VCHIQ_HEADER_T *header)
|
|
+{
|
|
+ header->msgid = VCHIQ_MSGID_PADDING;
|
|
+ wmb();
|
|
+ remote_event_signal(&state->remote->sync_release);
|
|
+}
|
|
+
|
|
+VCHIQ_STATUS_T
|
|
+vchiq_get_peer_version(VCHIQ_SERVICE_HANDLE_T handle, short *peer_version)
|
|
+{
|
|
+ VCHIQ_STATUS_T status = VCHIQ_ERROR;
|
|
+ VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
|
|
+
|
|
+ if (!service ||
|
|
+ (vchiq_check_service(service) != VCHIQ_SUCCESS) ||
|
|
+ !peer_version)
|
|
+ goto exit;
|
|
+ *peer_version = service->peer_version;
|
|
+ status = VCHIQ_SUCCESS;
|
|
+
|
|
+exit:
|
|
+ if (service)
|
|
+ unlock_service(service);
|
|
+ return status;
|
|
+}
|
|
+
|
|
+VCHIQ_STATUS_T
|
|
+vchiq_get_config(VCHIQ_INSTANCE_T instance,
|
|
+ int config_size, VCHIQ_CONFIG_T *pconfig)
|
|
+{
|
|
+ VCHIQ_CONFIG_T config;
|
|
+
|
|
+ (void)instance;
|
|
+
|
|
+ config.max_msg_size = VCHIQ_MAX_MSG_SIZE;
|
|
+ config.bulk_threshold = VCHIQ_MAX_MSG_SIZE;
|
|
+ config.max_outstanding_bulks = VCHIQ_NUM_SERVICE_BULKS;
|
|
+ config.max_services = VCHIQ_MAX_SERVICES;
|
|
+ config.version = VCHIQ_VERSION;
|
|
+ config.version_min = VCHIQ_VERSION_MIN;
|
|
+
|
|
+ if (config_size > sizeof(VCHIQ_CONFIG_T))
|
|
+ return VCHIQ_ERROR;
|
|
+
|
|
+ memcpy(pconfig, &config,
|
|
+ min(config_size, (int)(sizeof(VCHIQ_CONFIG_T))));
|
|
+
|
|
+ return VCHIQ_SUCCESS;
|
|
+}
|
|
+
|
|
+VCHIQ_STATUS_T
|
|
+vchiq_set_service_option(VCHIQ_SERVICE_HANDLE_T handle,
|
|
+ VCHIQ_SERVICE_OPTION_T option, int value)
|
|
+{
|
|
+ VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
|
|
+ VCHIQ_STATUS_T status = VCHIQ_ERROR;
|
|
+
|
|
+ if (service) {
|
|
+ switch (option) {
|
|
+ case VCHIQ_SERVICE_OPTION_AUTOCLOSE:
|
|
+ service->auto_close = value;
|
|
+ status = VCHIQ_SUCCESS;
|
|
+ break;
|
|
+
|
|
+ case VCHIQ_SERVICE_OPTION_SLOT_QUOTA: {
|
|
+ VCHIQ_SERVICE_QUOTA_T *service_quota =
|
|
+ &service->state->service_quotas[
|
|
+ service->localport];
|
|
+ if (value == 0)
|
|
+ value = service->state->default_slot_quota;
|
|
+ if ((value >= service_quota->slot_use_count) &&
|
|
+ (value < (unsigned short)~0)) {
|
|
+ service_quota->slot_quota = value;
|
|
+ if ((value >= service_quota->slot_use_count) &&
|
|
+ (service_quota->message_quota >=
|
|
+ service_quota->message_use_count)) {
|
|
+ /* Signal the service that it may have
|
|
+ ** dropped below its quota */
|
|
+ up(&service_quota->quota_event);
|
|
+ }
|
|
+ status = VCHIQ_SUCCESS;
|
|
+ }
|
|
+ } break;
|
|
+
|
|
+ case VCHIQ_SERVICE_OPTION_MESSAGE_QUOTA: {
|
|
+ VCHIQ_SERVICE_QUOTA_T *service_quota =
|
|
+ &service->state->service_quotas[
|
|
+ service->localport];
|
|
+ if (value == 0)
|
|
+ value = service->state->default_message_quota;
|
|
+ if ((value >= service_quota->message_use_count) &&
|
|
+ (value < (unsigned short)~0)) {
|
|
+ service_quota->message_quota = value;
|
|
+ if ((value >=
|
|
+ service_quota->message_use_count) &&
|
|
+ (service_quota->slot_quota >=
|
|
+ service_quota->slot_use_count))
|
|
+ /* Signal the service that it may have
|
|
+ ** dropped below its quota */
|
|
+ up(&service_quota->quota_event);
|
|
+ status = VCHIQ_SUCCESS;
|
|
+ }
|
|
+ } break;
|
|
+
|
|
+ case VCHIQ_SERVICE_OPTION_SYNCHRONOUS:
|
|
+ if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) ||
|
|
+ (service->srvstate ==
|
|
+ VCHIQ_SRVSTATE_LISTENING)) {
|
|
+ service->sync = value;
|
|
+ status = VCHIQ_SUCCESS;
|
|
+ }
|
|
+ break;
|
|
+
|
|
+ default:
|
|
+ break;
|
|
+ }
|
|
+ unlock_service(service);
|
|
+ }
|
|
+
|
|
+ return status;
|
|
+}
|
|
+
|
|
+void
|
|
+vchiq_dump_shared_state(void *dump_context, VCHIQ_STATE_T *state,
|
|
+ VCHIQ_SHARED_STATE_T *shared, const char *label)
|
|
+{
|
|
+ static const char *const debug_names[] = {
|
|
+ "<entries>",
|
|
+ "SLOT_HANDLER_COUNT",
|
|
+ "SLOT_HANDLER_LINE",
|
|
+ "PARSE_LINE",
|
|
+ "PARSE_HEADER",
|
|
+ "PARSE_MSGID",
|
|
+ "AWAIT_COMPLETION_LINE",
|
|
+ "DEQUEUE_MESSAGE_LINE",
|
|
+ "SERVICE_CALLBACK_LINE",
|
|
+ "MSG_QUEUE_FULL_COUNT",
|
|
+ "COMPLETION_QUEUE_FULL_COUNT"
|
|
+ };
|
|
+ int i;
|
|
+
|
|
+ char buf[80];
|
|
+ int len;
|
|
+ len = snprintf(buf, sizeof(buf),
|
|
+ " %s: slots %d-%d tx_pos=%x recycle=%x",
|
|
+ label, shared->slot_first, shared->slot_last,
|
|
+ shared->tx_pos, shared->slot_queue_recycle);
|
|
+ vchiq_dump(dump_context, buf, len + 1);
|
|
+
|
|
+ len = snprintf(buf, sizeof(buf),
|
|
+ " Slots claimed:");
|
|
+ vchiq_dump(dump_context, buf, len + 1);
|
|
+
|
|
+ for (i = shared->slot_first; i <= shared->slot_last; i++) {
|
|
+ VCHIQ_SLOT_INFO_T slot_info = *SLOT_INFO_FROM_INDEX(state, i);
|
|
+ if (slot_info.use_count != slot_info.release_count) {
|
|
+ len = snprintf(buf, sizeof(buf),
|
|
+ " %d: %d/%d", i, slot_info.use_count,
|
|
+ slot_info.release_count);
|
|
+ vchiq_dump(dump_context, buf, len + 1);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ for (i = 1; i < shared->debug[DEBUG_ENTRIES]; i++) {
|
|
+ len = snprintf(buf, sizeof(buf), " DEBUG: %s = %d(%x)",
|
|
+ debug_names[i], shared->debug[i], shared->debug[i]);
|
|
+ vchiq_dump(dump_context, buf, len + 1);
|
|
+ }
|
|
+}
|
|
+
|
|
+void
|
|
+vchiq_dump_state(void *dump_context, VCHIQ_STATE_T *state)
|
|
+{
|
|
+ char buf[80];
|
|
+ int len;
|
|
+ int i;
|
|
+
|
|
+ len = snprintf(buf, sizeof(buf), "State %d: %s", state->id,
|
|
+ conn_state_names[state->conn_state]);
|
|
+ vchiq_dump(dump_context, buf, len + 1);
|
|
+
|
|
+ len = snprintf(buf, sizeof(buf),
|
|
+ " tx_pos=%x(@%x), rx_pos=%x(@%x)",
|
|
+ state->local->tx_pos,
|
|
+ (uint32_t)state->tx_data +
|
|
+ (state->local_tx_pos & VCHIQ_SLOT_MASK),
|
|
+ state->rx_pos,
|
|
+ (uint32_t)state->rx_data +
|
|
+ (state->rx_pos & VCHIQ_SLOT_MASK));
|
|
+ vchiq_dump(dump_context, buf, len + 1);
|
|
+
|
|
+ len = snprintf(buf, sizeof(buf),
|
|
+ " Version: %d (min %d)",
|
|
+ VCHIQ_VERSION, VCHIQ_VERSION_MIN);
|
|
+ vchiq_dump(dump_context, buf, len + 1);
|
|
+
|
|
+ if (VCHIQ_ENABLE_STATS) {
|
|
+ len = snprintf(buf, sizeof(buf),
|
|
+ " Stats: ctrl_tx_count=%d, ctrl_rx_count=%d, "
|
|
+ "error_count=%d",
|
|
+ state->stats.ctrl_tx_count, state->stats.ctrl_rx_count,
|
|
+ state->stats.error_count);
|
|
+ vchiq_dump(dump_context, buf, len + 1);
|
|
+ }
|
|
+
|
|
+ len = snprintf(buf, sizeof(buf),
|
|
+ " Slots: %d available (%d data), %d recyclable, %d stalls "
|
|
+ "(%d data)",
|
|
+ ((state->slot_queue_available * VCHIQ_SLOT_SIZE) -
|
|
+ state->local_tx_pos) / VCHIQ_SLOT_SIZE,
|
|
+ state->data_quota - state->data_use_count,
|
|
+ state->local->slot_queue_recycle - state->slot_queue_available,
|
|
+ state->stats.slot_stalls, state->stats.data_stalls);
|
|
+ vchiq_dump(dump_context, buf, len + 1);
|
|
+
|
|
+ vchiq_dump_platform_state(dump_context);
|
|
+
|
|
+ vchiq_dump_shared_state(dump_context, state, state->local, "Local");
|
|
+ vchiq_dump_shared_state(dump_context, state, state->remote, "Remote");
|
|
+
|
|
+ vchiq_dump_platform_instances(dump_context);
|
|
+
|
|
+ for (i = 0; i < state->unused_service; i++) {
|
|
+ VCHIQ_SERVICE_T *service = find_service_by_port(state, i);
|
|
+
|
|
+ if (service) {
|
|
+ vchiq_dump_service_state(dump_context, service);
|
|
+ unlock_service(service);
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
+void
|
|
+vchiq_dump_service_state(void *dump_context, VCHIQ_SERVICE_T *service)
|
|
+{
|
|
+ char buf[80];
|
|
+ int len;
|
|
+
|
|
+ len = snprintf(buf, sizeof(buf), "Service %d: %s (ref %u)",
|
|
+ service->localport, srvstate_names[service->srvstate],
|
|
+ service->ref_count - 1); /*Don't include the lock just taken*/
|
|
+
|
|
+ if (service->srvstate != VCHIQ_SRVSTATE_FREE) {
|
|
+ char remoteport[30];
|
|
+ VCHIQ_SERVICE_QUOTA_T *service_quota =
|
|
+ &service->state->service_quotas[service->localport];
|
|
+ int fourcc = service->base.fourcc;
|
|
+ int tx_pending, rx_pending;
|
|
+ if (service->remoteport != VCHIQ_PORT_FREE) {
|
|
+ int len2 = snprintf(remoteport, sizeof(remoteport),
|
|
+ "%d", service->remoteport);
|
|
+ if (service->public_fourcc != VCHIQ_FOURCC_INVALID)
|
|
+ snprintf(remoteport + len2,
|
|
+ sizeof(remoteport) - len2,
|
|
+ " (client %x)", service->client_id);
|
|
+ } else
|
|
+ strcpy(remoteport, "n/a");
|
|
+
|
|
+ len += snprintf(buf + len, sizeof(buf) - len,
|
|
+ " '%c%c%c%c' remote %s (msg use %d/%d, slot use %d/%d)",
|
|
+ VCHIQ_FOURCC_AS_4CHARS(fourcc),
|
|
+ remoteport,
|
|
+ service_quota->message_use_count,
|
|
+ service_quota->message_quota,
|
|
+ service_quota->slot_use_count,
|
|
+ service_quota->slot_quota);
|
|
+
|
|
+ vchiq_dump(dump_context, buf, len + 1);
|
|
+
|
|
+ tx_pending = service->bulk_tx.local_insert -
|
|
+ service->bulk_tx.remote_insert;
|
|
+
|
|
+ rx_pending = service->bulk_rx.local_insert -
|
|
+ service->bulk_rx.remote_insert;
|
|
+
|
|
+ len = snprintf(buf, sizeof(buf),
|
|
+ " Bulk: tx_pending=%d (size %d),"
|
|
+ " rx_pending=%d (size %d)",
|
|
+ tx_pending,
|
|
+ tx_pending ? service->bulk_tx.bulks[
|
|
+ BULK_INDEX(service->bulk_tx.remove)].size : 0,
|
|
+ rx_pending,
|
|
+ rx_pending ? service->bulk_rx.bulks[
|
|
+ BULK_INDEX(service->bulk_rx.remove)].size : 0);
|
|
+
|
|
+ if (VCHIQ_ENABLE_STATS) {
|
|
+ vchiq_dump(dump_context, buf, len + 1);
|
|
+
|
|
+ len = snprintf(buf, sizeof(buf),
|
|
+ " Ctrl: tx_count=%d, tx_bytes=%llu, "
|
|
+ "rx_count=%d, rx_bytes=%llu",
|
|
+ service->stats.ctrl_tx_count,
|
|
+ service->stats.ctrl_tx_bytes,
|
|
+ service->stats.ctrl_rx_count,
|
|
+ service->stats.ctrl_rx_bytes);
|
|
+ vchiq_dump(dump_context, buf, len + 1);
|
|
+
|
|
+ len = snprintf(buf, sizeof(buf),
|
|
+ " Bulk: tx_count=%d, tx_bytes=%llu, "
|
|
+ "rx_count=%d, rx_bytes=%llu",
|
|
+ service->stats.bulk_tx_count,
|
|
+ service->stats.bulk_tx_bytes,
|
|
+ service->stats.bulk_rx_count,
|
|
+ service->stats.bulk_rx_bytes);
|
|
+ vchiq_dump(dump_context, buf, len + 1);
|
|
+
|
|
+ len = snprintf(buf, sizeof(buf),
|
|
+ " %d quota stalls, %d slot stalls, "
|
|
+ "%d bulk stalls, %d aborted, %d errors",
|
|
+ service->stats.quota_stalls,
|
|
+ service->stats.slot_stalls,
|
|
+ service->stats.bulk_stalls,
|
|
+ service->stats.bulk_aborted_count,
|
|
+ service->stats.error_count);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ vchiq_dump(dump_context, buf, len + 1);
|
|
+
|
|
+ if (service->srvstate != VCHIQ_SRVSTATE_FREE)
|
|
+ vchiq_dump_platform_service_state(dump_context, service);
|
|
+}
|
|
+
|
|
+
|
|
+void
|
|
+vchiq_loud_error_header(void)
|
|
+{
|
|
+ vchiq_log_error(vchiq_core_log_level,
|
|
+ "============================================================"
|
|
+ "================");
|
|
+ vchiq_log_error(vchiq_core_log_level,
|
|
+ "============================================================"
|
|
+ "================");
|
|
+ vchiq_log_error(vchiq_core_log_level, "=====");
|
|
+}
|
|
+
|
|
+void
|
|
+vchiq_loud_error_footer(void)
|
|
+{
|
|
+ vchiq_log_error(vchiq_core_log_level, "=====");
|
|
+ vchiq_log_error(vchiq_core_log_level,
|
|
+ "============================================================"
|
|
+ "================");
|
|
+ vchiq_log_error(vchiq_core_log_level,
|
|
+ "============================================================"
|
|
+ "================");
|
|
+}
|
|
+
|
|
+
|
|
+VCHIQ_STATUS_T vchiq_send_remote_use(VCHIQ_STATE_T *state)
|
|
+{
|
|
+ VCHIQ_STATUS_T status = VCHIQ_RETRY;
|
|
+ if (state->conn_state != VCHIQ_CONNSTATE_DISCONNECTED)
|
|
+ status = queue_message(state, NULL,
|
|
+ VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_USE, 0, 0),
|
|
+ NULL, 0, 0, 0);
|
|
+ return status;
|
|
+}
|
|
+
|
|
+VCHIQ_STATUS_T vchiq_send_remote_release(VCHIQ_STATE_T *state)
|
|
+{
|
|
+ VCHIQ_STATUS_T status = VCHIQ_RETRY;
|
|
+ if (state->conn_state != VCHIQ_CONNSTATE_DISCONNECTED)
|
|
+ status = queue_message(state, NULL,
|
|
+ VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_RELEASE, 0, 0),
|
|
+ NULL, 0, 0, 0);
|
|
+ return status;
|
|
+}
|
|
+
|
|
+VCHIQ_STATUS_T vchiq_send_remote_use_active(VCHIQ_STATE_T *state)
|
|
+{
|
|
+ VCHIQ_STATUS_T status = VCHIQ_RETRY;
|
|
+ if (state->conn_state != VCHIQ_CONNSTATE_DISCONNECTED)
|
|
+ status = queue_message(state, NULL,
|
|
+ VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_USE_ACTIVE, 0, 0),
|
|
+ NULL, 0, 0, 0);
|
|
+ return status;
|
|
+}
|
|
+
|
|
+void vchiq_log_dump_mem(const char *label, uint32_t addr, const void *voidMem,
|
|
+ size_t numBytes)
|
|
+{
|
|
+ const uint8_t *mem = (const uint8_t *)voidMem;
|
|
+ size_t offset;
|
|
+ char lineBuf[100];
|
|
+ char *s;
|
|
+
|
|
+ while (numBytes > 0) {
|
|
+ s = lineBuf;
|
|
+
|
|
+ for (offset = 0; offset < 16; offset++) {
|
|
+ if (offset < numBytes)
|
|
+ s += snprintf(s, 4, "%02x ", mem[offset]);
|
|
+ else
|
|
+ s += snprintf(s, 4, " ");
|
|
+ }
|
|
+
|
|
+ for (offset = 0; offset < 16; offset++) {
|
|
+ if (offset < numBytes) {
|
|
+ uint8_t ch = mem[offset];
|
|
+
|
|
+ if ((ch < ' ') || (ch > '~'))
|
|
+ ch = '.';
|
|
+ *s++ = (char)ch;
|
|
+ }
|
|
+ }
|
|
+ *s++ = '\0';
|
|
+
|
|
+ if ((label != NULL) && (*label != '\0'))
|
|
+ vchiq_log_trace(VCHIQ_LOG_TRACE,
|
|
+ "%s: %08x: %s", label, addr, lineBuf);
|
|
+ else
|
|
+ vchiq_log_trace(VCHIQ_LOG_TRACE,
|
|
+ "%08x: %s", addr, lineBuf);
|
|
+
|
|
+ addr += 16;
|
|
+ mem += 16;
|
|
+ if (numBytes > 16)
|
|
+ numBytes -= 16;
|
|
+ else
|
|
+ numBytes = 0;
|
|
+ }
|
|
+}
|
|
--- /dev/null
|
|
+++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.h
|
|
@@ -0,0 +1,703 @@
|
|
+/**
|
|
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
|
|
+ *
|
|
+ * Redistribution and use in source and binary forms, with or without
|
|
+ * modification, are permitted provided that the following conditions
|
|
+ * are met:
|
|
+ * 1. Redistributions of source code must retain the above copyright
|
|
+ * notice, this list of conditions, and the following disclaimer,
|
|
+ * without modification.
|
|
+ * 2. Redistributions in binary form must reproduce the above copyright
|
|
+ * notice, this list of conditions and the following disclaimer in the
|
|
+ * documentation and/or other materials provided with the distribution.
|
|
+ * 3. The names of the above-listed copyright holders may not be used
|
|
+ * to endorse or promote products derived from this software without
|
|
+ * specific prior written permission.
|
|
+ *
|
|
+ * ALTERNATIVELY, this software may be distributed under the terms of the
|
|
+ * GNU General Public License ("GPL") version 2, as published by the Free
|
|
+ * Software Foundation.
|
|
+ *
|
|
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
|
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
|
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
|
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
|
|
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
|
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
|
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
|
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
|
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
|
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
|
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
+ */
|
|
+
|
|
+#ifndef VCHIQ_CORE_H
|
|
+#define VCHIQ_CORE_H
|
|
+
|
|
+#include <linux/mutex.h>
|
|
+#include <linux/semaphore.h>
|
|
+#include <linux/kthread.h>
|
|
+
|
|
+#include "vchiq_cfg.h"
|
|
+
|
|
+#include "vchiq.h"
|
|
+
|
|
+/* Run time control of log level, based on KERN_XXX level. */
|
|
+#define VCHIQ_LOG_DEFAULT 4
|
|
+#define VCHIQ_LOG_ERROR 3
|
|
+#define VCHIQ_LOG_WARNING 4
|
|
+#define VCHIQ_LOG_INFO 6
|
|
+#define VCHIQ_LOG_TRACE 7
|
|
+
|
|
+#define VCHIQ_LOG_PREFIX KERN_INFO "vchiq: "
|
|
+
|
|
+#ifndef vchiq_log_error
|
|
+#define vchiq_log_error(cat, fmt, ...) \
|
|
+ do { if (cat >= VCHIQ_LOG_ERROR) \
|
|
+ printk(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0)
|
|
+#endif
|
|
+#ifndef vchiq_log_warning
|
|
+#define vchiq_log_warning(cat, fmt, ...) \
|
|
+ do { if (cat >= VCHIQ_LOG_WARNING) \
|
|
+ printk(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0)
|
|
+#endif
|
|
+#ifndef vchiq_log_info
|
|
+#define vchiq_log_info(cat, fmt, ...) \
|
|
+ do { if (cat >= VCHIQ_LOG_INFO) \
|
|
+ printk(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0)
|
|
+#endif
|
|
+#ifndef vchiq_log_trace
|
|
+#define vchiq_log_trace(cat, fmt, ...) \
|
|
+ do { if (cat >= VCHIQ_LOG_TRACE) \
|
|
+ printk(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0)
|
|
+#endif
|
|
+
|
|
+#define vchiq_loud_error(...) \
|
|
+ vchiq_log_error(vchiq_core_log_level, "===== " __VA_ARGS__)
|
|
+
|
|
+#ifndef vchiq_static_assert
|
|
+#define vchiq_static_assert(cond) __attribute__((unused)) \
|
|
+ extern int vchiq_static_assert[(cond) ? 1 : -1]
|
|
+#endif
|
|
+
|
|
+#define IS_POW2(x) (x && ((x & (x - 1)) == 0))
|
|
+
|
|
+/* Ensure that the slot size and maximum number of slots are powers of 2 */
|
|
+vchiq_static_assert(IS_POW2(VCHIQ_SLOT_SIZE));
|
|
+vchiq_static_assert(IS_POW2(VCHIQ_MAX_SLOTS));
|
|
+vchiq_static_assert(IS_POW2(VCHIQ_MAX_SLOTS_PER_SIDE));
|
|
+
|
|
+#define VCHIQ_SLOT_MASK (VCHIQ_SLOT_SIZE - 1)
|
|
+#define VCHIQ_SLOT_QUEUE_MASK (VCHIQ_MAX_SLOTS_PER_SIDE - 1)
|
|
+#define VCHIQ_SLOT_ZERO_SLOTS ((sizeof(VCHIQ_SLOT_ZERO_T) + \
|
|
+ VCHIQ_SLOT_SIZE - 1) / VCHIQ_SLOT_SIZE)
|
|
+
|
|
+#define VCHIQ_MSG_PADDING 0 /* - */
|
|
+#define VCHIQ_MSG_CONNECT 1 /* - */
|
|
+#define VCHIQ_MSG_OPEN 2 /* + (srcport, -), fourcc, client_id */
|
|
+#define VCHIQ_MSG_OPENACK 3 /* + (srcport, dstport) */
|
|
+#define VCHIQ_MSG_CLOSE 4 /* + (srcport, dstport) */
|
|
+#define VCHIQ_MSG_DATA 5 /* + (srcport, dstport) */
|
|
+#define VCHIQ_MSG_BULK_RX 6 /* + (srcport, dstport), data, size */
|
|
+#define VCHIQ_MSG_BULK_TX 7 /* + (srcport, dstport), data, size */
|
|
+#define VCHIQ_MSG_BULK_RX_DONE 8 /* + (srcport, dstport), actual */
|
|
+#define VCHIQ_MSG_BULK_TX_DONE 9 /* + (srcport, dstport), actual */
|
|
+#define VCHIQ_MSG_PAUSE 10 /* - */
|
|
+#define VCHIQ_MSG_RESUME 11 /* - */
|
|
+#define VCHIQ_MSG_REMOTE_USE 12 /* - */
|
|
+#define VCHIQ_MSG_REMOTE_RELEASE 13 /* - */
|
|
+#define VCHIQ_MSG_REMOTE_USE_ACTIVE 14 /* - */
|
|
+
|
|
+#define VCHIQ_PORT_MAX (VCHIQ_MAX_SERVICES - 1)
|
|
+#define VCHIQ_PORT_FREE 0x1000
|
|
+#define VCHIQ_PORT_IS_VALID(port) (port < VCHIQ_PORT_FREE)
|
|
+#define VCHIQ_MAKE_MSG(type, srcport, dstport) \
|
|
+ ((type<<24) | (srcport<<12) | (dstport<<0))
|
|
+#define VCHIQ_MSG_TYPE(msgid) ((unsigned int)msgid >> 24)
|
|
+#define VCHIQ_MSG_SRCPORT(msgid) \
|
|
+ (unsigned short)(((unsigned int)msgid >> 12) & 0xfff)
|
|
+#define VCHIQ_MSG_DSTPORT(msgid) \
|
|
+ ((unsigned short)msgid & 0xfff)
|
|
+
|
|
+#define VCHIQ_FOURCC_AS_4CHARS(fourcc) \
|
|
+ ((fourcc) >> 24) & 0xff, \
|
|
+ ((fourcc) >> 16) & 0xff, \
|
|
+ ((fourcc) >> 8) & 0xff, \
|
|
+ (fourcc) & 0xff
|
|
+
|
|
+/* Ensure the fields are wide enough */
|
|
+vchiq_static_assert(VCHIQ_MSG_SRCPORT(VCHIQ_MAKE_MSG(0, 0, VCHIQ_PORT_MAX))
|
|
+ == 0);
|
|
+vchiq_static_assert(VCHIQ_MSG_TYPE(VCHIQ_MAKE_MSG(0, VCHIQ_PORT_MAX, 0)) == 0);
|
|
+vchiq_static_assert((unsigned int)VCHIQ_PORT_MAX <
|
|
+ (unsigned int)VCHIQ_PORT_FREE);
|
|
+
|
|
+#define VCHIQ_MSGID_PADDING VCHIQ_MAKE_MSG(VCHIQ_MSG_PADDING, 0, 0)
|
|
+#define VCHIQ_MSGID_CLAIMED 0x40000000
|
|
+
|
|
+#define VCHIQ_FOURCC_INVALID 0x00000000
|
|
+#define VCHIQ_FOURCC_IS_LEGAL(fourcc) (fourcc != VCHIQ_FOURCC_INVALID)
|
|
+
|
|
+#define VCHIQ_BULK_ACTUAL_ABORTED -1
|
|
+
|
|
+typedef uint32_t BITSET_T;
|
|
+
|
|
+vchiq_static_assert((sizeof(BITSET_T) * 8) == 32);
|
|
+
|
|
+#define BITSET_SIZE(b) ((b + 31) >> 5)
|
|
+#define BITSET_WORD(b) (b >> 5)
|
|
+#define BITSET_BIT(b) (1 << (b & 31))
|
|
+#define BITSET_ZERO(bs) memset(bs, 0, sizeof(bs))
|
|
+#define BITSET_IS_SET(bs, b) (bs[BITSET_WORD(b)] & BITSET_BIT(b))
|
|
+#define BITSET_SET(bs, b) (bs[BITSET_WORD(b)] |= BITSET_BIT(b))
|
|
+#define BITSET_CLR(bs, b) (bs[BITSET_WORD(b)] &= ~BITSET_BIT(b))
|
|
+
|
|
+#if VCHIQ_ENABLE_STATS
|
|
+#define VCHIQ_STATS_INC(state, stat) (state->stats. stat++)
|
|
+#define VCHIQ_SERVICE_STATS_INC(service, stat) (service->stats. stat++)
|
|
+#define VCHIQ_SERVICE_STATS_ADD(service, stat, addend) \
|
|
+ (service->stats. stat += addend)
|
|
+#else
|
|
+#define VCHIQ_STATS_INC(state, stat) ((void)0)
|
|
+#define VCHIQ_SERVICE_STATS_INC(service, stat) ((void)0)
|
|
+#define VCHIQ_SERVICE_STATS_ADD(service, stat, addend) ((void)0)
|
|
+#endif
|
|
+
|
|
+enum {
|
|
+ DEBUG_ENTRIES,
|
|
+#if VCHIQ_ENABLE_DEBUG
|
|
+ DEBUG_SLOT_HANDLER_COUNT,
|
|
+ DEBUG_SLOT_HANDLER_LINE,
|
|
+ DEBUG_PARSE_LINE,
|
|
+ DEBUG_PARSE_HEADER,
|
|
+ DEBUG_PARSE_MSGID,
|
|
+ DEBUG_AWAIT_COMPLETION_LINE,
|
|
+ DEBUG_DEQUEUE_MESSAGE_LINE,
|
|
+ DEBUG_SERVICE_CALLBACK_LINE,
|
|
+ DEBUG_MSG_QUEUE_FULL_COUNT,
|
|
+ DEBUG_COMPLETION_QUEUE_FULL_COUNT,
|
|
+#endif
|
|
+ DEBUG_MAX
|
|
+};
|
|
+
|
|
+#if VCHIQ_ENABLE_DEBUG
|
|
+
|
|
+#define DEBUG_INITIALISE(local) int *debug_ptr = (local)->debug;
|
|
+#define DEBUG_TRACE(d) \
|
|
+ do { debug_ptr[DEBUG_ ## d] = __LINE__; dsb(); } while (0)
|
|
+#define DEBUG_VALUE(d, v) \
|
|
+ do { debug_ptr[DEBUG_ ## d] = (v); dsb(); } while (0)
|
|
+#define DEBUG_COUNT(d) \
|
|
+ do { debug_ptr[DEBUG_ ## d]++; dsb(); } while (0)
|
|
+
|
|
+#else /* VCHIQ_ENABLE_DEBUG */
|
|
+
|
|
+#define DEBUG_INITIALISE(local)
|
|
+#define DEBUG_TRACE(d)
|
|
+#define DEBUG_VALUE(d, v)
|
|
+#define DEBUG_COUNT(d)
|
|
+
|
|
+#endif /* VCHIQ_ENABLE_DEBUG */
|
|
+
|
|
+typedef enum {
|
|
+ VCHIQ_CONNSTATE_DISCONNECTED,
|
|
+ VCHIQ_CONNSTATE_CONNECTING,
|
|
+ VCHIQ_CONNSTATE_CONNECTED,
|
|
+ VCHIQ_CONNSTATE_PAUSING,
|
|
+ VCHIQ_CONNSTATE_PAUSE_SENT,
|
|
+ VCHIQ_CONNSTATE_PAUSED,
|
|
+ VCHIQ_CONNSTATE_RESUMING,
|
|
+ VCHIQ_CONNSTATE_PAUSE_TIMEOUT,
|
|
+ VCHIQ_CONNSTATE_RESUME_TIMEOUT
|
|
+} VCHIQ_CONNSTATE_T;
|
|
+
|
|
+enum {
|
|
+ VCHIQ_SRVSTATE_FREE,
|
|
+ VCHIQ_SRVSTATE_HIDDEN,
|
|
+ VCHIQ_SRVSTATE_LISTENING,
|
|
+ VCHIQ_SRVSTATE_OPENING,
|
|
+ VCHIQ_SRVSTATE_OPEN,
|
|
+ VCHIQ_SRVSTATE_OPENSYNC,
|
|
+ VCHIQ_SRVSTATE_CLOSESENT,
|
|
+ VCHIQ_SRVSTATE_CLOSERECVD,
|
|
+ VCHIQ_SRVSTATE_CLOSEWAIT,
|
|
+ VCHIQ_SRVSTATE_CLOSED
|
|
+};
|
|
+
|
|
+enum {
|
|
+ VCHIQ_POLL_TERMINATE,
|
|
+ VCHIQ_POLL_REMOVE,
|
|
+ VCHIQ_POLL_TXNOTIFY,
|
|
+ VCHIQ_POLL_RXNOTIFY,
|
|
+ VCHIQ_POLL_COUNT
|
|
+};
|
|
+
|
|
+typedef enum {
|
|
+ VCHIQ_BULK_TRANSMIT,
|
|
+ VCHIQ_BULK_RECEIVE
|
|
+} VCHIQ_BULK_DIR_T;
|
|
+
|
|
+typedef struct vchiq_bulk_struct {
|
|
+ short mode;
|
|
+ short dir;
|
|
+ void *userdata;
|
|
+ VCHI_MEM_HANDLE_T handle;
|
|
+ void *data;
|
|
+ int size;
|
|
+ void *remote_data;
|
|
+ int remote_size;
|
|
+ int actual;
|
|
+} VCHIQ_BULK_T;
|
|
+
|
|
+typedef struct vchiq_bulk_queue_struct {
|
|
+ int local_insert; /* Where to insert the next local bulk */
|
|
+ int remote_insert; /* Where to insert the next remote bulk (master) */
|
|
+ int process; /* Bulk to transfer next */
|
|
+ int remote_notify; /* Bulk to notify the remote client of next (mstr) */
|
|
+ int remove; /* Bulk to notify the local client of, and remove,
|
|
+ ** next */
|
|
+ VCHIQ_BULK_T bulks[VCHIQ_NUM_SERVICE_BULKS];
|
|
+} VCHIQ_BULK_QUEUE_T;
|
|
+
|
|
+typedef struct remote_event_struct {
|
|
+ int armed;
|
|
+ int fired;
|
|
+ struct semaphore *event;
|
|
+} REMOTE_EVENT_T;
|
|
+
|
|
+typedef struct opaque_platform_state_t *VCHIQ_PLATFORM_STATE_T;
|
|
+
|
|
+typedef struct vchiq_state_struct VCHIQ_STATE_T;
|
|
+
|
|
+typedef struct vchiq_slot_struct {
|
|
+ char data[VCHIQ_SLOT_SIZE];
|
|
+} VCHIQ_SLOT_T;
|
|
+
|
|
+typedef struct vchiq_slot_info_struct {
|
|
+ /* Use two counters rather than one to avoid the need for a mutex. */
|
|
+ short use_count;
|
|
+ short release_count;
|
|
+} VCHIQ_SLOT_INFO_T;
|
|
+
|
|
+typedef struct vchiq_service_struct {
|
|
+ VCHIQ_SERVICE_BASE_T base;
|
|
+ VCHIQ_SERVICE_HANDLE_T handle;
|
|
+ unsigned int ref_count;
|
|
+ int srvstate;
|
|
+ unsigned int localport;
|
|
+ unsigned int remoteport;
|
|
+ int public_fourcc;
|
|
+ int client_id;
|
|
+ char auto_close;
|
|
+ char sync;
|
|
+ char closing;
|
|
+ atomic_t poll_flags;
|
|
+ short version;
|
|
+ short version_min;
|
|
+ short peer_version;
|
|
+
|
|
+ VCHIQ_STATE_T *state;
|
|
+ VCHIQ_INSTANCE_T instance;
|
|
+
|
|
+ int service_use_count;
|
|
+
|
|
+ VCHIQ_BULK_QUEUE_T bulk_tx;
|
|
+ VCHIQ_BULK_QUEUE_T bulk_rx;
|
|
+
|
|
+ struct semaphore remove_event;
|
|
+ struct semaphore bulk_remove_event;
|
|
+ struct mutex bulk_mutex;
|
|
+
|
|
+ struct service_stats_struct {
|
|
+ int quota_stalls;
|
|
+ int slot_stalls;
|
|
+ int bulk_stalls;
|
|
+ int error_count;
|
|
+ int ctrl_tx_count;
|
|
+ int ctrl_rx_count;
|
|
+ int bulk_tx_count;
|
|
+ int bulk_rx_count;
|
|
+ int bulk_aborted_count;
|
|
+ uint64_t ctrl_tx_bytes;
|
|
+ uint64_t ctrl_rx_bytes;
|
|
+ uint64_t bulk_tx_bytes;
|
|
+ uint64_t bulk_rx_bytes;
|
|
+ } stats;
|
|
+} VCHIQ_SERVICE_T;
|
|
+
|
|
+/* The quota information is outside VCHIQ_SERVICE_T so that it can be
|
|
+ statically allocated, since for accounting reasons a service's slot
|
|
+ usage is carried over between users of the same port number.
|
|
+ */
|
|
+typedef struct vchiq_service_quota_struct {
|
|
+ unsigned short slot_quota;
|
|
+ unsigned short slot_use_count;
|
|
+ unsigned short message_quota;
|
|
+ unsigned short message_use_count;
|
|
+ struct semaphore quota_event;
|
|
+ int previous_tx_index;
|
|
+} VCHIQ_SERVICE_QUOTA_T;
|
|
+
|
|
+typedef struct vchiq_shared_state_struct {
|
|
+
|
|
+ /* A non-zero value here indicates that the content is valid. */
|
|
+ int initialised;
|
|
+
|
|
+ /* The first and last (inclusive) slots allocated to the owner. */
|
|
+ int slot_first;
|
|
+ int slot_last;
|
|
+
|
|
+ /* The slot allocated to synchronous messages from the owner. */
|
|
+ int slot_sync;
|
|
+
|
|
+ /* Signalling this event indicates that owner's slot handler thread
|
|
+ ** should run. */
|
|
+ REMOTE_EVENT_T trigger;
|
|
+
|
|
+ /* Indicates the byte position within the stream where the next message
|
|
+ ** will be written. The least significant bits are an index into the
|
|
+ ** slot. The next bits are the index of the slot in slot_queue. */
|
|
+ int tx_pos;
|
|
+
|
|
+ /* This event should be signalled when a slot is recycled. */
|
|
+ REMOTE_EVENT_T recycle;
|
|
+
|
|
+ /* The slot_queue index where the next recycled slot will be written. */
|
|
+ int slot_queue_recycle;
|
|
+
|
|
+ /* This event should be signalled when a synchronous message is sent. */
|
|
+ REMOTE_EVENT_T sync_trigger;
|
|
+
|
|
+ /* This event should be signalled when a synchronous message has been
|
|
+ ** released. */
|
|
+ REMOTE_EVENT_T sync_release;
|
|
+
|
|
+ /* A circular buffer of slot indexes. */
|
|
+ int slot_queue[VCHIQ_MAX_SLOTS_PER_SIDE];
|
|
+
|
|
+ /* Debugging state */
|
|
+ int debug[DEBUG_MAX];
|
|
+} VCHIQ_SHARED_STATE_T;
|
|
+
|
|
+typedef struct vchiq_slot_zero_struct {
|
|
+ int magic;
|
|
+ short version;
|
|
+ short version_min;
|
|
+ int slot_zero_size;
|
|
+ int slot_size;
|
|
+ int max_slots;
|
|
+ int max_slots_per_side;
|
|
+ int platform_data[2];
|
|
+ VCHIQ_SHARED_STATE_T master;
|
|
+ VCHIQ_SHARED_STATE_T slave;
|
|
+ VCHIQ_SLOT_INFO_T slots[VCHIQ_MAX_SLOTS];
|
|
+} VCHIQ_SLOT_ZERO_T;
|
|
+
|
|
+struct vchiq_state_struct {
|
|
+ int id;
|
|
+ int initialised;
|
|
+ VCHIQ_CONNSTATE_T conn_state;
|
|
+ int is_master;
|
|
+
|
|
+ VCHIQ_SHARED_STATE_T *local;
|
|
+ VCHIQ_SHARED_STATE_T *remote;
|
|
+ VCHIQ_SLOT_T *slot_data;
|
|
+
|
|
+ unsigned short default_slot_quota;
|
|
+ unsigned short default_message_quota;
|
|
+
|
|
+ /* Event indicating connect message received */
|
|
+ struct semaphore connect;
|
|
+
|
|
+ /* Mutex protecting services */
|
|
+ struct mutex mutex;
|
|
+ VCHIQ_INSTANCE_T *instance;
|
|
+
|
|
+ /* Processes incoming messages */
|
|
+ struct task_struct *slot_handler_thread;
|
|
+
|
|
+ /* Processes recycled slots */
|
|
+ struct task_struct *recycle_thread;
|
|
+
|
|
+ /* Processes synchronous messages */
|
|
+ struct task_struct *sync_thread;
|
|
+
|
|
+ /* Local implementation of the trigger remote event */
|
|
+ struct semaphore trigger_event;
|
|
+
|
|
+ /* Local implementation of the recycle remote event */
|
|
+ struct semaphore recycle_event;
|
|
+
|
|
+ /* Local implementation of the sync trigger remote event */
|
|
+ struct semaphore sync_trigger_event;
|
|
+
|
|
+ /* Local implementation of the sync release remote event */
|
|
+ struct semaphore sync_release_event;
|
|
+
|
|
+ char *tx_data;
|
|
+ char *rx_data;
|
|
+ VCHIQ_SLOT_INFO_T *rx_info;
|
|
+
|
|
+ struct mutex slot_mutex;
|
|
+
|
|
+ struct mutex recycle_mutex;
|
|
+
|
|
+ struct mutex sync_mutex;
|
|
+
|
|
+ struct mutex bulk_transfer_mutex;
|
|
+
|
|
+ /* Indicates the byte position within the stream from where the next
|
|
+ ** message will be read. The least significant bits are an index into
|
|
+ ** the slot.The next bits are the index of the slot in
|
|
+ ** remote->slot_queue. */
|
|
+ int rx_pos;
|
|
+
|
|
+ /* A cached copy of local->tx_pos. Only write to local->tx_pos, and read
|
|
+ from remote->tx_pos. */
|
|
+ int local_tx_pos;
|
|
+
|
|
+ /* The slot_queue index of the slot to become available next. */
|
|
+ int slot_queue_available;
|
|
+
|
|
+ /* A flag to indicate if any poll has been requested */
|
|
+ int poll_needed;
|
|
+
|
|
+ /* Ths index of the previous slot used for data messages. */
|
|
+ int previous_data_index;
|
|
+
|
|
+ /* The number of slots occupied by data messages. */
|
|
+ unsigned short data_use_count;
|
|
+
|
|
+ /* The maximum number of slots to be occupied by data messages. */
|
|
+ unsigned short data_quota;
|
|
+
|
|
+ /* An array of bit sets indicating which services must be polled. */
|
|
+ atomic_t poll_services[BITSET_SIZE(VCHIQ_MAX_SERVICES)];
|
|
+
|
|
+ /* The number of the first unused service */
|
|
+ int unused_service;
|
|
+
|
|
+ /* Signalled when a free slot becomes available. */
|
|
+ struct semaphore slot_available_event;
|
|
+
|
|
+ struct semaphore slot_remove_event;
|
|
+
|
|
+ /* Signalled when a free data slot becomes available. */
|
|
+ struct semaphore data_quota_event;
|
|
+
|
|
+ /* Incremented when there are bulk transfers which cannot be processed
|
|
+ * whilst paused and must be processed on resume */
|
|
+ int deferred_bulks;
|
|
+
|
|
+ struct state_stats_struct {
|
|
+ int slot_stalls;
|
|
+ int data_stalls;
|
|
+ int ctrl_tx_count;
|
|
+ int ctrl_rx_count;
|
|
+ int error_count;
|
|
+ } stats;
|
|
+
|
|
+ VCHIQ_SERVICE_T * services[VCHIQ_MAX_SERVICES];
|
|
+ VCHIQ_SERVICE_QUOTA_T service_quotas[VCHIQ_MAX_SERVICES];
|
|
+ VCHIQ_SLOT_INFO_T slot_info[VCHIQ_MAX_SLOTS];
|
|
+
|
|
+ VCHIQ_PLATFORM_STATE_T platform_state;
|
|
+};
|
|
+
|
|
+struct bulk_waiter {
|
|
+ VCHIQ_BULK_T *bulk;
|
|
+ struct semaphore event;
|
|
+ int actual;
|
|
+};
|
|
+
|
|
+extern spinlock_t bulk_waiter_spinlock;
|
|
+
|
|
+extern int vchiq_core_log_level;
|
|
+extern int vchiq_core_msg_log_level;
|
|
+extern int vchiq_sync_log_level;
|
|
+
|
|
+extern VCHIQ_STATE_T *vchiq_states[VCHIQ_MAX_STATES];
|
|
+
|
|
+extern const char *
|
|
+get_conn_state_name(VCHIQ_CONNSTATE_T conn_state);
|
|
+
|
|
+extern VCHIQ_SLOT_ZERO_T *
|
|
+vchiq_init_slots(void *mem_base, int mem_size);
|
|
+
|
|
+extern VCHIQ_STATUS_T
|
|
+vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero,
|
|
+ int is_master);
|
|
+
|
|
+extern VCHIQ_STATUS_T
|
|
+vchiq_connect_internal(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance);
|
|
+
|
|
+extern VCHIQ_SERVICE_T *
|
|
+vchiq_add_service_internal(VCHIQ_STATE_T *state,
|
|
+ const VCHIQ_SERVICE_PARAMS_T *params, int srvstate,
|
|
+ VCHIQ_INSTANCE_T instance);
|
|
+
|
|
+extern VCHIQ_STATUS_T
|
|
+vchiq_open_service_internal(VCHIQ_SERVICE_T *service, int client_id);
|
|
+
|
|
+extern VCHIQ_STATUS_T
|
|
+vchiq_close_service_internal(VCHIQ_SERVICE_T *service, int close_recvd);
|
|
+
|
|
+extern void
|
|
+vchiq_terminate_service_internal(VCHIQ_SERVICE_T *service);
|
|
+
|
|
+extern void
|
|
+vchiq_free_service_internal(VCHIQ_SERVICE_T *service);
|
|
+
|
|
+extern VCHIQ_STATUS_T
|
|
+vchiq_shutdown_internal(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance);
|
|
+
|
|
+extern VCHIQ_STATUS_T
|
|
+vchiq_pause_internal(VCHIQ_STATE_T *state);
|
|
+
|
|
+extern VCHIQ_STATUS_T
|
|
+vchiq_resume_internal(VCHIQ_STATE_T *state);
|
|
+
|
|
+extern void
|
|
+remote_event_pollall(VCHIQ_STATE_T *state);
|
|
+
|
|
+extern VCHIQ_STATUS_T
|
|
+vchiq_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle,
|
|
+ VCHI_MEM_HANDLE_T memhandle, void *offset, int size, void *userdata,
|
|
+ VCHIQ_BULK_MODE_T mode, VCHIQ_BULK_DIR_T dir);
|
|
+
|
|
+extern void
|
|
+vchiq_dump_state(void *dump_context, VCHIQ_STATE_T *state);
|
|
+
|
|
+extern void
|
|
+vchiq_dump_service_state(void *dump_context, VCHIQ_SERVICE_T *service);
|
|
+
|
|
+extern void
|
|
+vchiq_loud_error_header(void);
|
|
+
|
|
+extern void
|
|
+vchiq_loud_error_footer(void);
|
|
+
|
|
+extern void
|
|
+request_poll(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service, int poll_type);
|
|
+
|
|
+static inline VCHIQ_SERVICE_T *
|
|
+handle_to_service(VCHIQ_SERVICE_HANDLE_T handle)
|
|
+{
|
|
+ VCHIQ_STATE_T *state = vchiq_states[(handle / VCHIQ_MAX_SERVICES) &
|
|
+ (VCHIQ_MAX_STATES - 1)];
|
|
+ if (!state)
|
|
+ return NULL;
|
|
+
|
|
+ return state->services[handle & (VCHIQ_MAX_SERVICES - 1)];
|
|
+}
|
|
+
|
|
+extern VCHIQ_SERVICE_T *
|
|
+find_service_by_handle(VCHIQ_SERVICE_HANDLE_T handle);
|
|
+
|
|
+extern VCHIQ_SERVICE_T *
|
|
+find_service_by_port(VCHIQ_STATE_T *state, int localport);
|
|
+
|
|
+extern VCHIQ_SERVICE_T *
|
|
+find_service_for_instance(VCHIQ_INSTANCE_T instance,
|
|
+ VCHIQ_SERVICE_HANDLE_T handle);
|
|
+
|
|
+extern VCHIQ_SERVICE_T *
|
|
+next_service_by_instance(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance,
|
|
+ int *pidx);
|
|
+
|
|
+extern void
|
|
+lock_service(VCHIQ_SERVICE_T *service);
|
|
+
|
|
+extern void
|
|
+unlock_service(VCHIQ_SERVICE_T *service);
|
|
+
|
|
+/* The following functions are called from vchiq_core, and external
|
|
+** implementations must be provided. */
|
|
+
|
|
+extern VCHIQ_STATUS_T
|
|
+vchiq_prepare_bulk_data(VCHIQ_BULK_T *bulk,
|
|
+ VCHI_MEM_HANDLE_T memhandle, void *offset, int size, int dir);
|
|
+
|
|
+extern void
|
|
+vchiq_transfer_bulk(VCHIQ_BULK_T *bulk);
|
|
+
|
|
+extern void
|
|
+vchiq_complete_bulk(VCHIQ_BULK_T *bulk);
|
|
+
|
|
+extern VCHIQ_STATUS_T
|
|
+vchiq_copy_from_user(void *dst, const void *src, int size);
|
|
+
|
|
+extern void
|
|
+remote_event_signal(REMOTE_EVENT_T *event);
|
|
+
|
|
+void
|
|
+vchiq_platform_check_suspend(VCHIQ_STATE_T *state);
|
|
+
|
|
+extern void
|
|
+vchiq_platform_paused(VCHIQ_STATE_T *state);
|
|
+
|
|
+extern VCHIQ_STATUS_T
|
|
+vchiq_platform_resume(VCHIQ_STATE_T *state);
|
|
+
|
|
+extern void
|
|
+vchiq_platform_resumed(VCHIQ_STATE_T *state);
|
|
+
|
|
+extern void
|
|
+vchiq_dump(void *dump_context, const char *str, int len);
|
|
+
|
|
+extern void
|
|
+vchiq_dump_platform_state(void *dump_context);
|
|
+
|
|
+extern void
|
|
+vchiq_dump_platform_instances(void *dump_context);
|
|
+
|
|
+extern void
|
|
+vchiq_dump_platform_service_state(void *dump_context,
|
|
+ VCHIQ_SERVICE_T *service);
|
|
+
|
|
+extern VCHIQ_STATUS_T
|
|
+vchiq_use_service_internal(VCHIQ_SERVICE_T *service);
|
|
+
|
|
+extern VCHIQ_STATUS_T
|
|
+vchiq_release_service_internal(VCHIQ_SERVICE_T *service);
|
|
+
|
|
+extern void
|
|
+vchiq_on_remote_use(VCHIQ_STATE_T *state);
|
|
+
|
|
+extern void
|
|
+vchiq_on_remote_release(VCHIQ_STATE_T *state);
|
|
+
|
|
+extern VCHIQ_STATUS_T
|
|
+vchiq_platform_init_state(VCHIQ_STATE_T *state);
|
|
+
|
|
+extern VCHIQ_STATUS_T
|
|
+vchiq_check_service(VCHIQ_SERVICE_T *service);
|
|
+
|
|
+extern void
|
|
+vchiq_on_remote_use_active(VCHIQ_STATE_T *state);
|
|
+
|
|
+extern VCHIQ_STATUS_T
|
|
+vchiq_send_remote_use(VCHIQ_STATE_T *state);
|
|
+
|
|
+extern VCHIQ_STATUS_T
|
|
+vchiq_send_remote_release(VCHIQ_STATE_T *state);
|
|
+
|
|
+extern VCHIQ_STATUS_T
|
|
+vchiq_send_remote_use_active(VCHIQ_STATE_T *state);
|
|
+
|
|
+extern void
|
|
+vchiq_platform_conn_state_changed(VCHIQ_STATE_T *state,
|
|
+ VCHIQ_CONNSTATE_T oldstate, VCHIQ_CONNSTATE_T newstate);
|
|
+
|
|
+extern void
|
|
+vchiq_platform_handle_timeout(VCHIQ_STATE_T *state);
|
|
+
|
|
+extern void
|
|
+vchiq_set_conn_state(VCHIQ_STATE_T *state, VCHIQ_CONNSTATE_T newstate);
|
|
+
|
|
+
|
|
+extern void
|
|
+vchiq_log_dump_mem(const char *label, uint32_t addr, const void *voidMem,
|
|
+ size_t numBytes);
|
|
+
|
|
+#endif
|
|
--- /dev/null
|
|
+++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_genversion
|
|
@@ -0,0 +1,89 @@
|
|
+#!/usr/bin/perl -w
|
|
+
|
|
+use strict;
|
|
+
|
|
+#
|
|
+# Generate a version from available information
|
|
+#
|
|
+
|
|
+my $prefix = shift @ARGV;
|
|
+my $root = shift @ARGV;
|
|
+
|
|
+
|
|
+if ( not defined $root ) {
|
|
+ die "usage: $0 prefix root-dir\n";
|
|
+}
|
|
+
|
|
+if ( ! -d $root ) {
|
|
+ die "root directory $root not found\n";
|
|
+}
|
|
+
|
|
+my $version = "unknown";
|
|
+my $tainted = "";
|
|
+
|
|
+if ( -d "$root/.git" ) {
|
|
+ # attempt to work out git version. only do so
|
|
+ # on a linux build host, as cygwin builds are
|
|
+ # already slow enough
|
|
+
|
|
+ if ( -f "/usr/bin/git" || -f "/usr/local/bin/git" ) {
|
|
+ if (not open(F, "git --git-dir $root/.git rev-parse --verify HEAD|")) {
|
|
+ $version = "no git version";
|
|
+ }
|
|
+ else {
|
|
+ $version = <F>;
|
|
+ $version =~ s/[ \r\n]*$//; # chomp may not be enough (cygwin).
|
|
+ $version =~ s/^[ \r\n]*//; # chomp may not be enough (cygwin).
|
|
+ }
|
|
+
|
|
+ if (open(G, "git --git-dir $root/.git status --porcelain|")) {
|
|
+ $tainted = <G>;
|
|
+ $tainted =~ s/[ \r\n]*$//; # chomp may not be enough (cygwin).
|
|
+ $tainted =~ s/^[ \r\n]*//; # chomp may not be enough (cygwin).
|
|
+ if (length $tainted) {
|
|
+ $version = join ' ', $version, "(tainted)";
|
|
+ }
|
|
+ else {
|
|
+ $version = join ' ', $version, "(clean)";
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
+my $hostname = `hostname`;
|
|
+$hostname =~ s/[ \r\n]*$//; # chomp may not be enough (cygwin).
|
|
+$hostname =~ s/^[ \r\n]*//; # chomp may not be enough (cygwin).
|
|
+
|
|
+
|
|
+print STDERR "Version $version\n";
|
|
+print <<EOF;
|
|
+#include "${prefix}_build_info.h"
|
|
+#include <linux/broadcom/vc_debug_sym.h>
|
|
+
|
|
+VC_DEBUG_DECLARE_STRING_VAR( ${prefix}_build_hostname, "$hostname" );
|
|
+VC_DEBUG_DECLARE_STRING_VAR( ${prefix}_build_version, "$version" );
|
|
+VC_DEBUG_DECLARE_STRING_VAR( ${prefix}_build_time, __TIME__ );
|
|
+VC_DEBUG_DECLARE_STRING_VAR( ${prefix}_build_date, __DATE__ );
|
|
+
|
|
+const char *vchiq_get_build_hostname( void )
|
|
+{
|
|
+ return vchiq_build_hostname;
|
|
+}
|
|
+
|
|
+const char *vchiq_get_build_version( void )
|
|
+{
|
|
+ return vchiq_build_version;
|
|
+}
|
|
+
|
|
+const char *vchiq_get_build_date( void )
|
|
+{
|
|
+ return vchiq_build_date;
|
|
+}
|
|
+
|
|
+const char *vchiq_get_build_time( void )
|
|
+{
|
|
+ return vchiq_build_time;
|
|
+}
|
|
+EOF
|
|
+
|
|
+
|
|
--- /dev/null
|
|
+++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq.h
|
|
@@ -0,0 +1,41 @@
|
|
+/**
|
|
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
|
|
+ *
|
|
+ * Redistribution and use in source and binary forms, with or without
|
|
+ * modification, are permitted provided that the following conditions
|
|
+ * are met:
|
|
+ * 1. Redistributions of source code must retain the above copyright
|
|
+ * notice, this list of conditions, and the following disclaimer,
|
|
+ * without modification.
|
|
+ * 2. Redistributions in binary form must reproduce the above copyright
|
|
+ * notice, this list of conditions and the following disclaimer in the
|
|
+ * documentation and/or other materials provided with the distribution.
|
|
+ * 3. The names of the above-listed copyright holders may not be used
|
|
+ * to endorse or promote products derived from this software without
|
|
+ * specific prior written permission.
|
|
+ *
|
|
+ * ALTERNATIVELY, this software may be distributed under the terms of the
|
|
+ * GNU General Public License ("GPL") version 2, as published by the Free
|
|
+ * Software Foundation.
|
|
+ *
|
|
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
|
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
|
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
|
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
|
|
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
|
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
|
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
|
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
|
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
|
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
|
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
+ */
|
|
+
|
|
+#ifndef VCHIQ_VCHIQ_H
|
|
+#define VCHIQ_VCHIQ_H
|
|
+
|
|
+#include "vchiq_if.h"
|
|
+#include "vchiq_util.h"
|
|
+
|
|
+#endif
|
|
+
|
|
--- /dev/null
|
|
+++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_if.h
|
|
@@ -0,0 +1,188 @@
|
|
+/**
|
|
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
|
|
+ *
|
|
+ * Redistribution and use in source and binary forms, with or without
|
|
+ * modification, are permitted provided that the following conditions
|
|
+ * are met:
|
|
+ * 1. Redistributions of source code must retain the above copyright
|
|
+ * notice, this list of conditions, and the following disclaimer,
|
|
+ * without modification.
|
|
+ * 2. Redistributions in binary form must reproduce the above copyright
|
|
+ * notice, this list of conditions and the following disclaimer in the
|
|
+ * documentation and/or other materials provided with the distribution.
|
|
+ * 3. The names of the above-listed copyright holders may not be used
|
|
+ * to endorse or promote products derived from this software without
|
|
+ * specific prior written permission.
|
|
+ *
|
|
+ * ALTERNATIVELY, this software may be distributed under the terms of the
|
|
+ * GNU General Public License ("GPL") version 2, as published by the Free
|
|
+ * Software Foundation.
|
|
+ *
|
|
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
|
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
|
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
|
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
|
|
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
|
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
|
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
|
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
|
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
|
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
|
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
+ */
|
|
+
|
|
+#ifndef VCHIQ_IF_H
|
|
+#define VCHIQ_IF_H
|
|
+
|
|
+#include "interface/vchi/vchi_mh.h"
|
|
+
|
|
+#define VCHIQ_SERVICE_HANDLE_INVALID 0
|
|
+
|
|
+#define VCHIQ_SLOT_SIZE 4096
|
|
+#define VCHIQ_MAX_MSG_SIZE (VCHIQ_SLOT_SIZE - sizeof(VCHIQ_HEADER_T))
|
|
+#define VCHIQ_CHANNEL_SIZE VCHIQ_MAX_MSG_SIZE /* For backwards compatibility */
|
|
+
|
|
+#define VCHIQ_MAKE_FOURCC(x0, x1, x2, x3) \
|
|
+ (((x0) << 24) | ((x1) << 16) | ((x2) << 8) | (x3))
|
|
+#define VCHIQ_GET_SERVICE_USERDATA(service) vchiq_get_service_userdata(service)
|
|
+#define VCHIQ_GET_SERVICE_FOURCC(service) vchiq_get_service_fourcc(service)
|
|
+
|
|
+typedef enum {
|
|
+ VCHIQ_SERVICE_OPENED, /* service, -, - */
|
|
+ VCHIQ_SERVICE_CLOSED, /* service, -, - */
|
|
+ VCHIQ_MESSAGE_AVAILABLE, /* service, header, - */
|
|
+ VCHIQ_BULK_TRANSMIT_DONE, /* service, -, bulk_userdata */
|
|
+ VCHIQ_BULK_RECEIVE_DONE, /* service, -, bulk_userdata */
|
|
+ VCHIQ_BULK_TRANSMIT_ABORTED, /* service, -, bulk_userdata */
|
|
+ VCHIQ_BULK_RECEIVE_ABORTED /* service, -, bulk_userdata */
|
|
+} VCHIQ_REASON_T;
|
|
+
|
|
+typedef enum {
|
|
+ VCHIQ_ERROR = -1,
|
|
+ VCHIQ_SUCCESS = 0,
|
|
+ VCHIQ_RETRY = 1
|
|
+} VCHIQ_STATUS_T;
|
|
+
|
|
+typedef enum {
|
|
+ VCHIQ_BULK_MODE_CALLBACK,
|
|
+ VCHIQ_BULK_MODE_BLOCKING,
|
|
+ VCHIQ_BULK_MODE_NOCALLBACK,
|
|
+ VCHIQ_BULK_MODE_WAITING /* Reserved for internal use */
|
|
+} VCHIQ_BULK_MODE_T;
|
|
+
|
|
+typedef enum {
|
|
+ VCHIQ_SERVICE_OPTION_AUTOCLOSE,
|
|
+ VCHIQ_SERVICE_OPTION_SLOT_QUOTA,
|
|
+ VCHIQ_SERVICE_OPTION_MESSAGE_QUOTA,
|
|
+ VCHIQ_SERVICE_OPTION_SYNCHRONOUS
|
|
+} VCHIQ_SERVICE_OPTION_T;
|
|
+
|
|
+typedef struct vchiq_header_struct {
|
|
+ /* The message identifier - opaque to applications. */
|
|
+ int msgid;
|
|
+
|
|
+ /* Size of message data. */
|
|
+ unsigned int size;
|
|
+
|
|
+ char data[0]; /* message */
|
|
+} VCHIQ_HEADER_T;
|
|
+
|
|
+typedef struct {
|
|
+ const void *data;
|
|
+ unsigned int size;
|
|
+} VCHIQ_ELEMENT_T;
|
|
+
|
|
+typedef unsigned int VCHIQ_SERVICE_HANDLE_T;
|
|
+
|
|
+typedef VCHIQ_STATUS_T (*VCHIQ_CALLBACK_T)(VCHIQ_REASON_T, VCHIQ_HEADER_T *,
|
|
+ VCHIQ_SERVICE_HANDLE_T, void *);
|
|
+
|
|
+typedef struct vchiq_service_base_struct {
|
|
+ int fourcc;
|
|
+ VCHIQ_CALLBACK_T callback;
|
|
+ void *userdata;
|
|
+} VCHIQ_SERVICE_BASE_T;
|
|
+
|
|
+typedef struct vchiq_service_params_struct {
|
|
+ int fourcc;
|
|
+ VCHIQ_CALLBACK_T callback;
|
|
+ void *userdata;
|
|
+ short version; /* Increment for non-trivial changes */
|
|
+ short version_min; /* Update for incompatible changes */
|
|
+} VCHIQ_SERVICE_PARAMS_T;
|
|
+
|
|
+typedef struct vchiq_config_struct {
|
|
+ unsigned int max_msg_size;
|
|
+ unsigned int bulk_threshold; /* The message size above which it
|
|
+ is better to use a bulk transfer
|
|
+ (<= max_msg_size) */
|
|
+ unsigned int max_outstanding_bulks;
|
|
+ unsigned int max_services;
|
|
+ short version; /* The version of VCHIQ */
|
|
+ short version_min; /* The minimum compatible version of VCHIQ */
|
|
+} VCHIQ_CONFIG_T;
|
|
+
|
|
+typedef struct vchiq_instance_struct *VCHIQ_INSTANCE_T;
|
|
+typedef void (*VCHIQ_REMOTE_USE_CALLBACK_T)(void *cb_arg);
|
|
+
|
|
+extern VCHIQ_STATUS_T vchiq_initialise(VCHIQ_INSTANCE_T *pinstance);
|
|
+extern VCHIQ_STATUS_T vchiq_shutdown(VCHIQ_INSTANCE_T instance);
|
|
+extern VCHIQ_STATUS_T vchiq_connect(VCHIQ_INSTANCE_T instance);
|
|
+extern VCHIQ_STATUS_T vchiq_add_service(VCHIQ_INSTANCE_T instance,
|
|
+ const VCHIQ_SERVICE_PARAMS_T *params,
|
|
+ VCHIQ_SERVICE_HANDLE_T *pservice);
|
|
+extern VCHIQ_STATUS_T vchiq_open_service(VCHIQ_INSTANCE_T instance,
|
|
+ const VCHIQ_SERVICE_PARAMS_T *params,
|
|
+ VCHIQ_SERVICE_HANDLE_T *pservice);
|
|
+extern VCHIQ_STATUS_T vchiq_close_service(VCHIQ_SERVICE_HANDLE_T service);
|
|
+extern VCHIQ_STATUS_T vchiq_remove_service(VCHIQ_SERVICE_HANDLE_T service);
|
|
+extern VCHIQ_STATUS_T vchiq_use_service(VCHIQ_SERVICE_HANDLE_T service);
|
|
+extern VCHIQ_STATUS_T vchiq_use_service_no_resume(
|
|
+ VCHIQ_SERVICE_HANDLE_T service);
|
|
+extern VCHIQ_STATUS_T vchiq_release_service(VCHIQ_SERVICE_HANDLE_T service);
|
|
+
|
|
+extern VCHIQ_STATUS_T vchiq_queue_message(VCHIQ_SERVICE_HANDLE_T service,
|
|
+ const VCHIQ_ELEMENT_T *elements, unsigned int count);
|
|
+extern void vchiq_release_message(VCHIQ_SERVICE_HANDLE_T service,
|
|
+ VCHIQ_HEADER_T *header);
|
|
+extern VCHIQ_STATUS_T vchiq_queue_bulk_transmit(VCHIQ_SERVICE_HANDLE_T service,
|
|
+ const void *data, unsigned int size, void *userdata);
|
|
+extern VCHIQ_STATUS_T vchiq_queue_bulk_receive(VCHIQ_SERVICE_HANDLE_T service,
|
|
+ void *data, unsigned int size, void *userdata);
|
|
+extern VCHIQ_STATUS_T vchiq_queue_bulk_transmit_handle(
|
|
+ VCHIQ_SERVICE_HANDLE_T service, VCHI_MEM_HANDLE_T handle,
|
|
+ const void *offset, unsigned int size, void *userdata);
|
|
+extern VCHIQ_STATUS_T vchiq_queue_bulk_receive_handle(
|
|
+ VCHIQ_SERVICE_HANDLE_T service, VCHI_MEM_HANDLE_T handle,
|
|
+ void *offset, unsigned int size, void *userdata);
|
|
+extern VCHIQ_STATUS_T vchiq_bulk_transmit(VCHIQ_SERVICE_HANDLE_T service,
|
|
+ const void *data, unsigned int size, void *userdata,
|
|
+ VCHIQ_BULK_MODE_T mode);
|
|
+extern VCHIQ_STATUS_T vchiq_bulk_receive(VCHIQ_SERVICE_HANDLE_T service,
|
|
+ void *data, unsigned int size, void *userdata,
|
|
+ VCHIQ_BULK_MODE_T mode);
|
|
+extern VCHIQ_STATUS_T vchiq_bulk_transmit_handle(VCHIQ_SERVICE_HANDLE_T service,
|
|
+ VCHI_MEM_HANDLE_T handle, const void *offset, unsigned int size,
|
|
+ void *userdata, VCHIQ_BULK_MODE_T mode);
|
|
+extern VCHIQ_STATUS_T vchiq_bulk_receive_handle(VCHIQ_SERVICE_HANDLE_T service,
|
|
+ VCHI_MEM_HANDLE_T handle, void *offset, unsigned int size,
|
|
+ void *userdata, VCHIQ_BULK_MODE_T mode);
|
|
+extern int vchiq_get_client_id(VCHIQ_SERVICE_HANDLE_T service);
|
|
+extern void *vchiq_get_service_userdata(VCHIQ_SERVICE_HANDLE_T service);
|
|
+extern int vchiq_get_service_fourcc(VCHIQ_SERVICE_HANDLE_T service);
|
|
+extern VCHIQ_STATUS_T vchiq_get_config(VCHIQ_INSTANCE_T instance,
|
|
+ int config_size, VCHIQ_CONFIG_T *pconfig);
|
|
+extern VCHIQ_STATUS_T vchiq_set_service_option(VCHIQ_SERVICE_HANDLE_T service,
|
|
+ VCHIQ_SERVICE_OPTION_T option, int value);
|
|
+
|
|
+extern VCHIQ_STATUS_T vchiq_remote_use(VCHIQ_INSTANCE_T instance,
|
|
+ VCHIQ_REMOTE_USE_CALLBACK_T callback, void *cb_arg);
|
|
+extern VCHIQ_STATUS_T vchiq_remote_release(VCHIQ_INSTANCE_T instance);
|
|
+
|
|
+extern VCHIQ_STATUS_T vchiq_dump_phys_mem(VCHIQ_SERVICE_HANDLE_T service,
|
|
+ void *ptr, size_t num_bytes);
|
|
+
|
|
+extern VCHIQ_STATUS_T vchiq_get_peer_version(VCHIQ_SERVICE_HANDLE_T handle,
|
|
+ short *peer_version);
|
|
+
|
|
+#endif /* VCHIQ_IF_H */
|
|
--- /dev/null
|
|
+++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_ioctl.h
|
|
@@ -0,0 +1,129 @@
|
|
+/**
|
|
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
|
|
+ *
|
|
+ * Redistribution and use in source and binary forms, with or without
|
|
+ * modification, are permitted provided that the following conditions
|
|
+ * are met:
|
|
+ * 1. Redistributions of source code must retain the above copyright
|
|
+ * notice, this list of conditions, and the following disclaimer,
|
|
+ * without modification.
|
|
+ * 2. Redistributions in binary form must reproduce the above copyright
|
|
+ * notice, this list of conditions and the following disclaimer in the
|
|
+ * documentation and/or other materials provided with the distribution.
|
|
+ * 3. The names of the above-listed copyright holders may not be used
|
|
+ * to endorse or promote products derived from this software without
|
|
+ * specific prior written permission.
|
|
+ *
|
|
+ * ALTERNATIVELY, this software may be distributed under the terms of the
|
|
+ * GNU General Public License ("GPL") version 2, as published by the Free
|
|
+ * Software Foundation.
|
|
+ *
|
|
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
|
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
|
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
|
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
|
|
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
|
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
|
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
|
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
|
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
|
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
|
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
+ */
|
|
+
|
|
+#ifndef VCHIQ_IOCTLS_H
|
|
+#define VCHIQ_IOCTLS_H
|
|
+
|
|
+#include <linux/ioctl.h>
|
|
+#include "vchiq_if.h"
|
|
+
|
|
+#define VCHIQ_IOC_MAGIC 0xc4
|
|
+#define VCHIQ_INVALID_HANDLE (~0)
|
|
+
|
|
+typedef struct {
|
|
+ VCHIQ_SERVICE_PARAMS_T params;
|
|
+ int is_open;
|
|
+ int is_vchi;
|
|
+ unsigned int handle; /* OUT */
|
|
+} VCHIQ_CREATE_SERVICE_T;
|
|
+
|
|
+typedef struct {
|
|
+ unsigned int handle;
|
|
+ unsigned int count;
|
|
+ const VCHIQ_ELEMENT_T *elements;
|
|
+} VCHIQ_QUEUE_MESSAGE_T;
|
|
+
|
|
+typedef struct {
|
|
+ unsigned int handle;
|
|
+ void *data;
|
|
+ unsigned int size;
|
|
+ void *userdata;
|
|
+ VCHIQ_BULK_MODE_T mode;
|
|
+} VCHIQ_QUEUE_BULK_TRANSFER_T;
|
|
+
|
|
+typedef struct {
|
|
+ VCHIQ_REASON_T reason;
|
|
+ VCHIQ_HEADER_T *header;
|
|
+ void *service_userdata;
|
|
+ void *bulk_userdata;
|
|
+} VCHIQ_COMPLETION_DATA_T;
|
|
+
|
|
+typedef struct {
|
|
+ unsigned int count;
|
|
+ VCHIQ_COMPLETION_DATA_T *buf;
|
|
+ unsigned int msgbufsize;
|
|
+ unsigned int msgbufcount; /* IN/OUT */
|
|
+ void **msgbufs;
|
|
+} VCHIQ_AWAIT_COMPLETION_T;
|
|
+
|
|
+typedef struct {
|
|
+ unsigned int handle;
|
|
+ int blocking;
|
|
+ unsigned int bufsize;
|
|
+ void *buf;
|
|
+} VCHIQ_DEQUEUE_MESSAGE_T;
|
|
+
|
|
+typedef struct {
|
|
+ unsigned int config_size;
|
|
+ VCHIQ_CONFIG_T *pconfig;
|
|
+} VCHIQ_GET_CONFIG_T;
|
|
+
|
|
+typedef struct {
|
|
+ unsigned int handle;
|
|
+ VCHIQ_SERVICE_OPTION_T option;
|
|
+ int value;
|
|
+} VCHIQ_SET_SERVICE_OPTION_T;
|
|
+
|
|
+typedef struct {
|
|
+ void *virt_addr;
|
|
+ size_t num_bytes;
|
|
+} VCHIQ_DUMP_MEM_T;
|
|
+
|
|
+#define VCHIQ_IOC_CONNECT _IO(VCHIQ_IOC_MAGIC, 0)
|
|
+#define VCHIQ_IOC_SHUTDOWN _IO(VCHIQ_IOC_MAGIC, 1)
|
|
+#define VCHIQ_IOC_CREATE_SERVICE \
|
|
+ _IOWR(VCHIQ_IOC_MAGIC, 2, VCHIQ_CREATE_SERVICE_T)
|
|
+#define VCHIQ_IOC_REMOVE_SERVICE _IO(VCHIQ_IOC_MAGIC, 3)
|
|
+#define VCHIQ_IOC_QUEUE_MESSAGE \
|
|
+ _IOW(VCHIQ_IOC_MAGIC, 4, VCHIQ_QUEUE_MESSAGE_T)
|
|
+#define VCHIQ_IOC_QUEUE_BULK_TRANSMIT \
|
|
+ _IOWR(VCHIQ_IOC_MAGIC, 5, VCHIQ_QUEUE_BULK_TRANSFER_T)
|
|
+#define VCHIQ_IOC_QUEUE_BULK_RECEIVE \
|
|
+ _IOWR(VCHIQ_IOC_MAGIC, 6, VCHIQ_QUEUE_BULK_TRANSFER_T)
|
|
+#define VCHIQ_IOC_AWAIT_COMPLETION \
|
|
+ _IOWR(VCHIQ_IOC_MAGIC, 7, VCHIQ_AWAIT_COMPLETION_T)
|
|
+#define VCHIQ_IOC_DEQUEUE_MESSAGE \
|
|
+ _IOWR(VCHIQ_IOC_MAGIC, 8, VCHIQ_DEQUEUE_MESSAGE_T)
|
|
+#define VCHIQ_IOC_GET_CLIENT_ID _IO(VCHIQ_IOC_MAGIC, 9)
|
|
+#define VCHIQ_IOC_GET_CONFIG \
|
|
+ _IOWR(VCHIQ_IOC_MAGIC, 10, VCHIQ_GET_CONFIG_T)
|
|
+#define VCHIQ_IOC_CLOSE_SERVICE _IO(VCHIQ_IOC_MAGIC, 11)
|
|
+#define VCHIQ_IOC_USE_SERVICE _IO(VCHIQ_IOC_MAGIC, 12)
|
|
+#define VCHIQ_IOC_RELEASE_SERVICE _IO(VCHIQ_IOC_MAGIC, 13)
|
|
+#define VCHIQ_IOC_SET_SERVICE_OPTION \
|
|
+ _IOW(VCHIQ_IOC_MAGIC, 14, VCHIQ_SET_SERVICE_OPTION_T)
|
|
+#define VCHIQ_IOC_DUMP_PHYS_MEM \
|
|
+ _IOW(VCHIQ_IOC_MAGIC, 15, VCHIQ_DUMP_MEM_T)
|
|
+#define VCHIQ_IOC_MAX 15
|
|
+
|
|
+#endif
|
|
--- /dev/null
|
|
+++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c
|
|
@@ -0,0 +1,454 @@
|
|
+/**
|
|
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
|
|
+ *
|
|
+ * Redistribution and use in source and binary forms, with or without
|
|
+ * modification, are permitted provided that the following conditions
|
|
+ * are met:
|
|
+ * 1. Redistributions of source code must retain the above copyright
|
|
+ * notice, this list of conditions, and the following disclaimer,
|
|
+ * without modification.
|
|
+ * 2. Redistributions in binary form must reproduce the above copyright
|
|
+ * notice, this list of conditions and the following disclaimer in the
|
|
+ * documentation and/or other materials provided with the distribution.
|
|
+ * 3. The names of the above-listed copyright holders may not be used
|
|
+ * to endorse or promote products derived from this software without
|
|
+ * specific prior written permission.
|
|
+ *
|
|
+ * ALTERNATIVELY, this software may be distributed under the terms of the
|
|
+ * GNU General Public License ("GPL") version 2, as published by the Free
|
|
+ * Software Foundation.
|
|
+ *
|
|
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
|
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
|
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
|
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
|
|
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
|
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
|
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
|
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
|
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
|
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
|
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
+ */
|
|
+
|
|
+/* ---- Include Files ---------------------------------------------------- */
|
|
+
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/module.h>
|
|
+#include <linux/mutex.h>
|
|
+
|
|
+#include "vchiq_core.h"
|
|
+#include "vchiq_arm.h"
|
|
+
|
|
+/* ---- Public Variables ------------------------------------------------- */
|
|
+
|
|
+/* ---- Private Constants and Types -------------------------------------- */
|
|
+
|
|
+struct bulk_waiter_node {
|
|
+ struct bulk_waiter bulk_waiter;
|
|
+ int pid;
|
|
+ struct list_head list;
|
|
+};
|
|
+
|
|
+struct vchiq_instance_struct {
|
|
+ VCHIQ_STATE_T *state;
|
|
+
|
|
+ int connected;
|
|
+
|
|
+ struct list_head bulk_waiter_list;
|
|
+ struct mutex bulk_waiter_list_mutex;
|
|
+};
|
|
+
|
|
+static VCHIQ_STATUS_T
|
|
+vchiq_blocking_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle, void *data,
|
|
+ unsigned int size, VCHIQ_BULK_DIR_T dir);
|
|
+
|
|
+/****************************************************************************
|
|
+*
|
|
+* vchiq_initialise
|
|
+*
|
|
+***************************************************************************/
|
|
+#define VCHIQ_INIT_RETRIES 10
|
|
+VCHIQ_STATUS_T vchiq_initialise(VCHIQ_INSTANCE_T *instanceOut)
|
|
+{
|
|
+ VCHIQ_STATUS_T status = VCHIQ_ERROR;
|
|
+ VCHIQ_STATE_T *state;
|
|
+ VCHIQ_INSTANCE_T instance = NULL;
|
|
+ int i;
|
|
+
|
|
+ vchiq_log_trace(vchiq_core_log_level, "%s called", __func__);
|
|
+
|
|
+ /* VideoCore may not be ready due to boot up timing.
|
|
+ It may never be ready if kernel and firmware are mismatched, so don't block forever. */
|
|
+ for (i=0; i<VCHIQ_INIT_RETRIES; i++) {
|
|
+ state = vchiq_get_state();
|
|
+ if (state)
|
|
+ break;
|
|
+ udelay(500);
|
|
+ }
|
|
+ if (i==VCHIQ_INIT_RETRIES) {
|
|
+ vchiq_log_error(vchiq_core_log_level,
|
|
+ "%s: videocore not initialized\n", __func__);
|
|
+ goto failed;
|
|
+ } else if (i>0) {
|
|
+ vchiq_log_warning(vchiq_core_log_level,
|
|
+ "%s: videocore initialized after %d retries\n", __func__, i);
|
|
+ }
|
|
+
|
|
+ instance = kzalloc(sizeof(*instance), GFP_KERNEL);
|
|
+ if (!instance) {
|
|
+ vchiq_log_error(vchiq_core_log_level,
|
|
+ "%s: error allocating vchiq instance\n", __func__);
|
|
+ goto failed;
|
|
+ }
|
|
+
|
|
+ instance->connected = 0;
|
|
+ instance->state = state;
|
|
+ mutex_init(&instance->bulk_waiter_list_mutex);
|
|
+ INIT_LIST_HEAD(&instance->bulk_waiter_list);
|
|
+
|
|
+ *instanceOut = instance;
|
|
+
|
|
+ status = VCHIQ_SUCCESS;
|
|
+
|
|
+failed:
|
|
+ vchiq_log_trace(vchiq_core_log_level,
|
|
+ "%s(%p): returning %d", __func__, instance, status);
|
|
+
|
|
+ return status;
|
|
+}
|
|
+EXPORT_SYMBOL(vchiq_initialise);
|
|
+
|
|
+/****************************************************************************
|
|
+*
|
|
+* vchiq_shutdown
|
|
+*
|
|
+***************************************************************************/
|
|
+
|
|
+VCHIQ_STATUS_T vchiq_shutdown(VCHIQ_INSTANCE_T instance)
|
|
+{
|
|
+ VCHIQ_STATUS_T status;
|
|
+ VCHIQ_STATE_T *state = instance->state;
|
|
+
|
|
+ vchiq_log_trace(vchiq_core_log_level,
|
|
+ "%s(%p) called", __func__, instance);
|
|
+
|
|
+ if (mutex_lock_interruptible(&state->mutex) != 0)
|
|
+ return VCHIQ_RETRY;
|
|
+
|
|
+ /* Remove all services */
|
|
+ status = vchiq_shutdown_internal(state, instance);
|
|
+
|
|
+ mutex_unlock(&state->mutex);
|
|
+
|
|
+ vchiq_log_trace(vchiq_core_log_level,
|
|
+ "%s(%p): returning %d", __func__, instance, status);
|
|
+
|
|
+ if (status == VCHIQ_SUCCESS) {
|
|
+ struct list_head *pos, *next;
|
|
+ list_for_each_safe(pos, next,
|
|
+ &instance->bulk_waiter_list) {
|
|
+ struct bulk_waiter_node *waiter;
|
|
+ waiter = list_entry(pos,
|
|
+ struct bulk_waiter_node,
|
|
+ list);
|
|
+ list_del(pos);
|
|
+ vchiq_log_info(vchiq_arm_log_level,
|
|
+ "bulk_waiter - cleaned up %x "
|
|
+ "for pid %d",
|
|
+ (unsigned int)waiter, waiter->pid);
|
|
+ kfree(waiter);
|
|
+ }
|
|
+ kfree(instance);
|
|
+ }
|
|
+
|
|
+ return status;
|
|
+}
|
|
+EXPORT_SYMBOL(vchiq_shutdown);
|
|
+
|
|
+/****************************************************************************
|
|
+*
|
|
+* vchiq_is_connected
|
|
+*
|
|
+***************************************************************************/
|
|
+
|
|
+int vchiq_is_connected(VCHIQ_INSTANCE_T instance)
|
|
+{
|
|
+ return instance->connected;
|
|
+}
|
|
+
|
|
+/****************************************************************************
|
|
+*
|
|
+* vchiq_connect
|
|
+*
|
|
+***************************************************************************/
|
|
+
|
|
+VCHIQ_STATUS_T vchiq_connect(VCHIQ_INSTANCE_T instance)
|
|
+{
|
|
+ VCHIQ_STATUS_T status;
|
|
+ VCHIQ_STATE_T *state = instance->state;
|
|
+
|
|
+ vchiq_log_trace(vchiq_core_log_level,
|
|
+ "%s(%p) called", __func__, instance);
|
|
+
|
|
+ if (mutex_lock_interruptible(&state->mutex) != 0) {
|
|
+ vchiq_log_trace(vchiq_core_log_level,
|
|
+ "%s: call to mutex_lock failed", __func__);
|
|
+ status = VCHIQ_RETRY;
|
|
+ goto failed;
|
|
+ }
|
|
+ status = vchiq_connect_internal(state, instance);
|
|
+
|
|
+ if (status == VCHIQ_SUCCESS)
|
|
+ instance->connected = 1;
|
|
+
|
|
+ mutex_unlock(&state->mutex);
|
|
+
|
|
+failed:
|
|
+ vchiq_log_trace(vchiq_core_log_level,
|
|
+ "%s(%p): returning %d", __func__, instance, status);
|
|
+
|
|
+ return status;
|
|
+}
|
|
+EXPORT_SYMBOL(vchiq_connect);
|
|
+
|
|
+/****************************************************************************
|
|
+*
|
|
+* vchiq_add_service
|
|
+*
|
|
+***************************************************************************/
|
|
+
|
|
+VCHIQ_STATUS_T vchiq_add_service(
|
|
+ VCHIQ_INSTANCE_T instance,
|
|
+ const VCHIQ_SERVICE_PARAMS_T *params,
|
|
+ VCHIQ_SERVICE_HANDLE_T *phandle)
|
|
+{
|
|
+ VCHIQ_STATUS_T status;
|
|
+ VCHIQ_STATE_T *state = instance->state;
|
|
+ VCHIQ_SERVICE_T *service = NULL;
|
|
+ int srvstate;
|
|
+
|
|
+ vchiq_log_trace(vchiq_core_log_level,
|
|
+ "%s(%p) called", __func__, instance);
|
|
+
|
|
+ *phandle = VCHIQ_SERVICE_HANDLE_INVALID;
|
|
+
|
|
+ srvstate = vchiq_is_connected(instance)
|
|
+ ? VCHIQ_SRVSTATE_LISTENING
|
|
+ : VCHIQ_SRVSTATE_HIDDEN;
|
|
+
|
|
+ service = vchiq_add_service_internal(
|
|
+ state,
|
|
+ params,
|
|
+ srvstate,
|
|
+ instance);
|
|
+
|
|
+ if (service) {
|
|
+ *phandle = service->handle;
|
|
+ status = VCHIQ_SUCCESS;
|
|
+ } else
|
|
+ status = VCHIQ_ERROR;
|
|
+
|
|
+ vchiq_log_trace(vchiq_core_log_level,
|
|
+ "%s(%p): returning %d", __func__, instance, status);
|
|
+
|
|
+ return status;
|
|
+}
|
|
+EXPORT_SYMBOL(vchiq_add_service);
|
|
+
|
|
+/****************************************************************************
|
|
+*
|
|
+* vchiq_open_service
|
|
+*
|
|
+***************************************************************************/
|
|
+
|
|
+VCHIQ_STATUS_T vchiq_open_service(
|
|
+ VCHIQ_INSTANCE_T instance,
|
|
+ const VCHIQ_SERVICE_PARAMS_T *params,
|
|
+ VCHIQ_SERVICE_HANDLE_T *phandle)
|
|
+{
|
|
+ VCHIQ_STATUS_T status = VCHIQ_ERROR;
|
|
+ VCHIQ_STATE_T *state = instance->state;
|
|
+ VCHIQ_SERVICE_T *service = NULL;
|
|
+
|
|
+ vchiq_log_trace(vchiq_core_log_level,
|
|
+ "%s(%p) called", __func__, instance);
|
|
+
|
|
+ *phandle = VCHIQ_SERVICE_HANDLE_INVALID;
|
|
+
|
|
+ if (!vchiq_is_connected(instance))
|
|
+ goto failed;
|
|
+
|
|
+ service = vchiq_add_service_internal(state,
|
|
+ params,
|
|
+ VCHIQ_SRVSTATE_OPENING,
|
|
+ instance);
|
|
+
|
|
+ if (service) {
|
|
+ status = vchiq_open_service_internal(service, current->pid);
|
|
+ if (status == VCHIQ_SUCCESS)
|
|
+ *phandle = service->handle;
|
|
+ else
|
|
+ vchiq_remove_service(service->handle);
|
|
+ }
|
|
+
|
|
+failed:
|
|
+ vchiq_log_trace(vchiq_core_log_level,
|
|
+ "%s(%p): returning %d", __func__, instance, status);
|
|
+
|
|
+ return status;
|
|
+}
|
|
+EXPORT_SYMBOL(vchiq_open_service);
|
|
+
|
|
+VCHIQ_STATUS_T
|
|
+vchiq_queue_bulk_transmit(VCHIQ_SERVICE_HANDLE_T handle,
|
|
+ const void *data, unsigned int size, void *userdata)
|
|
+{
|
|
+ return vchiq_bulk_transfer(handle,
|
|
+ VCHI_MEM_HANDLE_INVALID, (void *)data, size, userdata,
|
|
+ VCHIQ_BULK_MODE_CALLBACK, VCHIQ_BULK_TRANSMIT);
|
|
+}
|
|
+EXPORT_SYMBOL(vchiq_queue_bulk_transmit);
|
|
+
|
|
+VCHIQ_STATUS_T
|
|
+vchiq_queue_bulk_receive(VCHIQ_SERVICE_HANDLE_T handle, void *data,
|
|
+ unsigned int size, void *userdata)
|
|
+{
|
|
+ return vchiq_bulk_transfer(handle,
|
|
+ VCHI_MEM_HANDLE_INVALID, data, size, userdata,
|
|
+ VCHIQ_BULK_MODE_CALLBACK, VCHIQ_BULK_RECEIVE);
|
|
+}
|
|
+EXPORT_SYMBOL(vchiq_queue_bulk_receive);
|
|
+
|
|
+VCHIQ_STATUS_T
|
|
+vchiq_bulk_transmit(VCHIQ_SERVICE_HANDLE_T handle, const void *data,
|
|
+ unsigned int size, void *userdata, VCHIQ_BULK_MODE_T mode)
|
|
+{
|
|
+ VCHIQ_STATUS_T status;
|
|
+
|
|
+ switch (mode) {
|
|
+ case VCHIQ_BULK_MODE_NOCALLBACK:
|
|
+ case VCHIQ_BULK_MODE_CALLBACK:
|
|
+ status = vchiq_bulk_transfer(handle,
|
|
+ VCHI_MEM_HANDLE_INVALID, (void *)data, size, userdata,
|
|
+ mode, VCHIQ_BULK_TRANSMIT);
|
|
+ break;
|
|
+ case VCHIQ_BULK_MODE_BLOCKING:
|
|
+ status = vchiq_blocking_bulk_transfer(handle,
|
|
+ (void *)data, size, VCHIQ_BULK_TRANSMIT);
|
|
+ break;
|
|
+ default:
|
|
+ return VCHIQ_ERROR;
|
|
+ }
|
|
+
|
|
+ return status;
|
|
+}
|
|
+EXPORT_SYMBOL(vchiq_bulk_transmit);
|
|
+
|
|
+VCHIQ_STATUS_T
|
|
+vchiq_bulk_receive(VCHIQ_SERVICE_HANDLE_T handle, void *data,
|
|
+ unsigned int size, void *userdata, VCHIQ_BULK_MODE_T mode)
|
|
+{
|
|
+ VCHIQ_STATUS_T status;
|
|
+
|
|
+ switch (mode) {
|
|
+ case VCHIQ_BULK_MODE_NOCALLBACK:
|
|
+ case VCHIQ_BULK_MODE_CALLBACK:
|
|
+ status = vchiq_bulk_transfer(handle,
|
|
+ VCHI_MEM_HANDLE_INVALID, data, size, userdata,
|
|
+ mode, VCHIQ_BULK_RECEIVE);
|
|
+ break;
|
|
+ case VCHIQ_BULK_MODE_BLOCKING:
|
|
+ status = vchiq_blocking_bulk_transfer(handle,
|
|
+ (void *)data, size, VCHIQ_BULK_RECEIVE);
|
|
+ break;
|
|
+ default:
|
|
+ return VCHIQ_ERROR;
|
|
+ }
|
|
+
|
|
+ return status;
|
|
+}
|
|
+EXPORT_SYMBOL(vchiq_bulk_receive);
|
|
+
|
|
+static VCHIQ_STATUS_T
|
|
+vchiq_blocking_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle, void *data,
|
|
+ unsigned int size, VCHIQ_BULK_DIR_T dir)
|
|
+{
|
|
+ VCHIQ_INSTANCE_T instance;
|
|
+ VCHIQ_SERVICE_T *service;
|
|
+ VCHIQ_STATUS_T status;
|
|
+ struct bulk_waiter_node *waiter = NULL;
|
|
+ struct list_head *pos;
|
|
+
|
|
+ service = find_service_by_handle(handle);
|
|
+ if (!service)
|
|
+ return VCHIQ_ERROR;
|
|
+
|
|
+ instance = service->instance;
|
|
+
|
|
+ unlock_service(service);
|
|
+
|
|
+ mutex_lock(&instance->bulk_waiter_list_mutex);
|
|
+ list_for_each(pos, &instance->bulk_waiter_list) {
|
|
+ if (list_entry(pos, struct bulk_waiter_node,
|
|
+ list)->pid == current->pid) {
|
|
+ waiter = list_entry(pos,
|
|
+ struct bulk_waiter_node,
|
|
+ list);
|
|
+ list_del(pos);
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ mutex_unlock(&instance->bulk_waiter_list_mutex);
|
|
+
|
|
+ if (waiter) {
|
|
+ VCHIQ_BULK_T *bulk = waiter->bulk_waiter.bulk;
|
|
+ if (bulk) {
|
|
+ /* This thread has an outstanding bulk transfer. */
|
|
+ if ((bulk->data != data) ||
|
|
+ (bulk->size != size)) {
|
|
+ /* This is not a retry of the previous one.
|
|
+ ** Cancel the signal when the transfer
|
|
+ ** completes. */
|
|
+ spin_lock(&bulk_waiter_spinlock);
|
|
+ bulk->userdata = NULL;
|
|
+ spin_unlock(&bulk_waiter_spinlock);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (!waiter) {
|
|
+ waiter = kzalloc(sizeof(struct bulk_waiter_node), GFP_KERNEL);
|
|
+ if (!waiter) {
|
|
+ vchiq_log_error(vchiq_core_log_level,
|
|
+ "%s - out of memory", __func__);
|
|
+ return VCHIQ_ERROR;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ status = vchiq_bulk_transfer(handle, VCHI_MEM_HANDLE_INVALID,
|
|
+ data, size, &waiter->bulk_waiter, VCHIQ_BULK_MODE_BLOCKING,
|
|
+ dir);
|
|
+ if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) ||
|
|
+ !waiter->bulk_waiter.bulk) {
|
|
+ VCHIQ_BULK_T *bulk = waiter->bulk_waiter.bulk;
|
|
+ if (bulk) {
|
|
+ /* Cancel the signal when the transfer
|
|
+ ** completes. */
|
|
+ spin_lock(&bulk_waiter_spinlock);
|
|
+ bulk->userdata = NULL;
|
|
+ spin_unlock(&bulk_waiter_spinlock);
|
|
+ }
|
|
+ kfree(waiter);
|
|
+ } else {
|
|
+ waiter->pid = current->pid;
|
|
+ mutex_lock(&instance->bulk_waiter_list_mutex);
|
|
+ list_add(&waiter->list, &instance->bulk_waiter_list);
|
|
+ mutex_unlock(&instance->bulk_waiter_list_mutex);
|
|
+ vchiq_log_info(vchiq_arm_log_level,
|
|
+ "saved bulk_waiter %x for pid %d",
|
|
+ (unsigned int)waiter, current->pid);
|
|
+ }
|
|
+
|
|
+ return status;
|
|
+}
|
|
--- /dev/null
|
|
+++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_memdrv.h
|
|
@@ -0,0 +1,71 @@
|
|
+/**
|
|
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
|
|
+ *
|
|
+ * Redistribution and use in source and binary forms, with or without
|
|
+ * modification, are permitted provided that the following conditions
|
|
+ * are met:
|
|
+ * 1. Redistributions of source code must retain the above copyright
|
|
+ * notice, this list of conditions, and the following disclaimer,
|
|
+ * without modification.
|
|
+ * 2. Redistributions in binary form must reproduce the above copyright
|
|
+ * notice, this list of conditions and the following disclaimer in the
|
|
+ * documentation and/or other materials provided with the distribution.
|
|
+ * 3. The names of the above-listed copyright holders may not be used
|
|
+ * to endorse or promote products derived from this software without
|
|
+ * specific prior written permission.
|
|
+ *
|
|
+ * ALTERNATIVELY, this software may be distributed under the terms of the
|
|
+ * GNU General Public License ("GPL") version 2, as published by the Free
|
|
+ * Software Foundation.
|
|
+ *
|
|
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
|
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
|
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
|
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
|
|
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
|
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
|
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
|
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
|
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
|
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
|
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
+ */
|
|
+
|
|
+#ifndef VCHIQ_MEMDRV_H
|
|
+#define VCHIQ_MEMDRV_H
|
|
+
|
|
+/* ---- Include Files ----------------------------------------------------- */
|
|
+
|
|
+#include <linux/kernel.h>
|
|
+#include "vchiq_if.h"
|
|
+
|
|
+/* ---- Constants and Types ---------------------------------------------- */
|
|
+
|
|
+typedef struct {
|
|
+ void *armSharedMemVirt;
|
|
+ dma_addr_t armSharedMemPhys;
|
|
+ size_t armSharedMemSize;
|
|
+
|
|
+ void *vcSharedMemVirt;
|
|
+ dma_addr_t vcSharedMemPhys;
|
|
+ size_t vcSharedMemSize;
|
|
+} VCHIQ_SHARED_MEM_INFO_T;
|
|
+
|
|
+/* ---- Variable Externs ------------------------------------------------- */
|
|
+
|
|
+/* ---- Function Prototypes ---------------------------------------------- */
|
|
+
|
|
+void vchiq_get_shared_mem_info(VCHIQ_SHARED_MEM_INFO_T *info);
|
|
+
|
|
+VCHIQ_STATUS_T vchiq_memdrv_initialise(void);
|
|
+
|
|
+VCHIQ_STATUS_T vchiq_userdrv_create_instance(
|
|
+ const VCHIQ_PLATFORM_DATA_T * platform_data);
|
|
+
|
|
+VCHIQ_STATUS_T vchiq_userdrv_suspend(
|
|
+ const VCHIQ_PLATFORM_DATA_T * platform_data);
|
|
+
|
|
+VCHIQ_STATUS_T vchiq_userdrv_resume(
|
|
+ const VCHIQ_PLATFORM_DATA_T * platform_data);
|
|
+
|
|
+#endif
|
|
--- /dev/null
|
|
+++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_pagelist.h
|
|
@@ -0,0 +1,58 @@
|
|
+/**
|
|
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
|
|
+ *
|
|
+ * Redistribution and use in source and binary forms, with or without
|
|
+ * modification, are permitted provided that the following conditions
|
|
+ * are met:
|
|
+ * 1. Redistributions of source code must retain the above copyright
|
|
+ * notice, this list of conditions, and the following disclaimer,
|
|
+ * without modification.
|
|
+ * 2. Redistributions in binary form must reproduce the above copyright
|
|
+ * notice, this list of conditions and the following disclaimer in the
|
|
+ * documentation and/or other materials provided with the distribution.
|
|
+ * 3. The names of the above-listed copyright holders may not be used
|
|
+ * to endorse or promote products derived from this software without
|
|
+ * specific prior written permission.
|
|
+ *
|
|
+ * ALTERNATIVELY, this software may be distributed under the terms of the
|
|
+ * GNU General Public License ("GPL") version 2, as published by the Free
|
|
+ * Software Foundation.
|
|
+ *
|
|
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
|
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
|
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
|
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
|
|
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
|
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
|
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
|
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
|
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
|
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
|
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
+ */
|
|
+
|
|
+#ifndef VCHIQ_PAGELIST_H
|
|
+#define VCHIQ_PAGELIST_H
|
|
+
|
|
+#ifndef PAGE_SIZE
|
|
+#define PAGE_SIZE 4096
|
|
+#endif
|
|
+#define CACHE_LINE_SIZE 32
|
|
+#define PAGELIST_WRITE 0
|
|
+#define PAGELIST_READ 1
|
|
+#define PAGELIST_READ_WITH_FRAGMENTS 2
|
|
+
|
|
+typedef struct pagelist_struct {
|
|
+ unsigned long length;
|
|
+ unsigned short type;
|
|
+ unsigned short offset;
|
|
+ unsigned long addrs[1]; /* N.B. 12 LSBs hold the number of following
|
|
+ pages at consecutive addresses. */
|
|
+} PAGELIST_T;
|
|
+
|
|
+typedef struct fragments_struct {
|
|
+ char headbuf[CACHE_LINE_SIZE];
|
|
+ char tailbuf[CACHE_LINE_SIZE];
|
|
+} FRAGMENTS_T;
|
|
+
|
|
+#endif /* VCHIQ_PAGELIST_H */
|
|
--- /dev/null
|
|
+++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_proc.c
|
|
@@ -0,0 +1,243 @@
|
|
+/**
|
|
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
|
|
+ *
|
|
+ * Redistribution and use in source and binary forms, with or without
|
|
+ * modification, are permitted provided that the following conditions
|
|
+ * are met:
|
|
+ * 1. Redistributions of source code must retain the above copyright
|
|
+ * notice, this list of conditions, and the following disclaimer,
|
|
+ * without modification.
|
|
+ * 2. Redistributions in binary form must reproduce the above copyright
|
|
+ * notice, this list of conditions and the following disclaimer in the
|
|
+ * documentation and/or other materials provided with the distribution.
|
|
+ * 3. The names of the above-listed copyright holders may not be used
|
|
+ * to endorse or promote products derived from this software without
|
|
+ * specific prior written permission.
|
|
+ *
|
|
+ * ALTERNATIVELY, this software may be distributed under the terms of the
|
|
+ * GNU General Public License ("GPL") version 2, as published by the Free
|
|
+ * Software Foundation.
|
|
+ *
|
|
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
|
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
|
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
|
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
|
|
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
|
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
|
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
|
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
|
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
|
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
|
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
+ */
|
|
+
|
|
+
|
|
+#include <linux/proc_fs.h>
|
|
+#include "vchiq_core.h"
|
|
+#include "vchiq_arm.h"
|
|
+
|
|
+struct vchiq_proc_info {
|
|
+ /* Global 'vc' proc entry used by all instances */
|
|
+ struct proc_dir_entry *vc_cfg_dir;
|
|
+
|
|
+ /* one entry per client process */
|
|
+ struct proc_dir_entry *clients;
|
|
+
|
|
+ /* log categories */
|
|
+ struct proc_dir_entry *log_categories;
|
|
+};
|
|
+
|
|
+static struct vchiq_proc_info proc_info;
|
|
+
|
|
+struct proc_dir_entry *vchiq_proc_top(void)
|
|
+{
|
|
+ BUG_ON(proc_info.vc_cfg_dir == NULL);
|
|
+ return proc_info.vc_cfg_dir;
|
|
+}
|
|
+
|
|
+/****************************************************************************
|
|
+*
|
|
+* log category entries
|
|
+*
|
|
+***************************************************************************/
|
|
+#define PROC_WRITE_BUF_SIZE 256
|
|
+
|
|
+#define VCHIQ_LOG_ERROR_STR "error"
|
|
+#define VCHIQ_LOG_WARNING_STR "warning"
|
|
+#define VCHIQ_LOG_INFO_STR "info"
|
|
+#define VCHIQ_LOG_TRACE_STR "trace"
|
|
+
|
|
+static int log_cfg_read(char *buffer,
|
|
+ char **start,
|
|
+ off_t off,
|
|
+ int count,
|
|
+ int *eof,
|
|
+ void *data)
|
|
+{
|
|
+ int len = 0;
|
|
+ char *log_value = NULL;
|
|
+
|
|
+ switch (*((int *)data)) {
|
|
+ case VCHIQ_LOG_ERROR:
|
|
+ log_value = VCHIQ_LOG_ERROR_STR;
|
|
+ break;
|
|
+ case VCHIQ_LOG_WARNING:
|
|
+ log_value = VCHIQ_LOG_WARNING_STR;
|
|
+ break;
|
|
+ case VCHIQ_LOG_INFO:
|
|
+ log_value = VCHIQ_LOG_INFO_STR;
|
|
+ break;
|
|
+ case VCHIQ_LOG_TRACE:
|
|
+ log_value = VCHIQ_LOG_TRACE_STR;
|
|
+ break;
|
|
+ default:
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ len += sprintf(buffer + len,
|
|
+ "%s\n",
|
|
+ log_value ? log_value : "(null)");
|
|
+
|
|
+ return len;
|
|
+}
|
|
+
|
|
+
|
|
+static int log_cfg_write(struct file *file,
|
|
+ const char __user *buffer,
|
|
+ unsigned long count,
|
|
+ void *data)
|
|
+{
|
|
+ int *log_module = data;
|
|
+ char kbuf[PROC_WRITE_BUF_SIZE + 1];
|
|
+
|
|
+ (void)file;
|
|
+
|
|
+ memset(kbuf, 0, PROC_WRITE_BUF_SIZE + 1);
|
|
+ if (count >= PROC_WRITE_BUF_SIZE)
|
|
+ count = PROC_WRITE_BUF_SIZE;
|
|
+
|
|
+ if (copy_from_user(kbuf,
|
|
+ buffer,
|
|
+ count) != 0)
|
|
+ return -EFAULT;
|
|
+ kbuf[count - 1] = 0;
|
|
+
|
|
+ if (strncmp("error", kbuf, strlen("error")) == 0)
|
|
+ *log_module = VCHIQ_LOG_ERROR;
|
|
+ else if (strncmp("warning", kbuf, strlen("warning")) == 0)
|
|
+ *log_module = VCHIQ_LOG_WARNING;
|
|
+ else if (strncmp("info", kbuf, strlen("info")) == 0)
|
|
+ *log_module = VCHIQ_LOG_INFO;
|
|
+ else if (strncmp("trace", kbuf, strlen("trace")) == 0)
|
|
+ *log_module = VCHIQ_LOG_TRACE;
|
|
+ else
|
|
+ *log_module = VCHIQ_LOG_DEFAULT;
|
|
+
|
|
+ return count;
|
|
+}
|
|
+
|
|
+/* Log category proc entries */
|
|
+struct vchiq_proc_log_entry {
|
|
+ const char *name;
|
|
+ int *plevel;
|
|
+ struct proc_dir_entry *dir;
|
|
+};
|
|
+
|
|
+static struct vchiq_proc_log_entry vchiq_proc_log_entries[] = {
|
|
+ { "core", &vchiq_core_log_level },
|
|
+ { "msg", &vchiq_core_msg_log_level },
|
|
+ { "sync", &vchiq_sync_log_level },
|
|
+ { "susp", &vchiq_susp_log_level },
|
|
+ { "arm", &vchiq_arm_log_level },
|
|
+};
|
|
+static int n_log_entries =
|
|
+ sizeof(vchiq_proc_log_entries)/sizeof(vchiq_proc_log_entries[0]);
|
|
+
|
|
+/* create an entry under /proc/vc/log for each log category */
|
|
+static int vchiq_proc_create_log_entries(struct proc_dir_entry *top)
|
|
+{
|
|
+ struct proc_dir_entry *dir;
|
|
+ size_t i;
|
|
+ int ret = 0;
|
|
+#if 0
|
|
+ dir = proc_mkdir("log", proc_info.vc_cfg_dir);
|
|
+ if (!dir)
|
|
+ return -ENOMEM;
|
|
+ proc_info.log_categories = dir;
|
|
+
|
|
+ for (i = 0; i < n_log_entries; i++) {
|
|
+ dir = create_proc_entry(vchiq_proc_log_entries[i].name,
|
|
+ 0644,
|
|
+ proc_info.log_categories);
|
|
+ if (!dir) {
|
|
+ ret = -ENOMEM;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ dir->read_proc = &log_cfg_read;
|
|
+ dir->write_proc = &log_cfg_write;
|
|
+ dir->data = (void *)vchiq_proc_log_entries[i].plevel;
|
|
+
|
|
+ vchiq_proc_log_entries[i].dir = dir;
|
|
+ }
|
|
+#endif
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+
|
|
+int vchiq_proc_init(void)
|
|
+{
|
|
+ BUG_ON(proc_info.vc_cfg_dir != NULL);
|
|
+
|
|
+ proc_info.vc_cfg_dir = proc_mkdir("vc", NULL);
|
|
+ if (proc_info.vc_cfg_dir == NULL)
|
|
+ goto fail;
|
|
+
|
|
+ proc_info.clients = proc_mkdir("clients",
|
|
+ proc_info.vc_cfg_dir);
|
|
+ if (!proc_info.clients)
|
|
+ goto fail;
|
|
+
|
|
+ if (vchiq_proc_create_log_entries(proc_info.vc_cfg_dir) != 0)
|
|
+ goto fail;
|
|
+
|
|
+ return 0;
|
|
+
|
|
+fail:
|
|
+ vchiq_proc_deinit();
|
|
+ vchiq_log_error(vchiq_arm_log_level,
|
|
+ "%s: failed to create proc directory",
|
|
+ __func__);
|
|
+
|
|
+ return -ENOMEM;
|
|
+}
|
|
+
|
|
+/* remove all the proc entries */
|
|
+void vchiq_proc_deinit(void)
|
|
+{
|
|
+ /* log category entries */
|
|
+#if 0
|
|
+ if (proc_info.log_categories) {
|
|
+ size_t i;
|
|
+ for (i = 0; i < n_log_entries; i++)
|
|
+ if (vchiq_proc_log_entries[i].dir)
|
|
+ remove_proc_entry(
|
|
+ vchiq_proc_log_entries[i].name,
|
|
+ proc_info.log_categories);
|
|
+
|
|
+ remove_proc_entry(proc_info.log_categories->name,
|
|
+ proc_info.vc_cfg_dir);
|
|
+ }
|
|
+ if (proc_info.clients)
|
|
+ remove_proc_entry(proc_info.clients->name,
|
|
+ proc_info.vc_cfg_dir);
|
|
+ if (proc_info.vc_cfg_dir)
|
|
+ remove_proc_entry(proc_info.vc_cfg_dir->name, NULL);
|
|
+#endif
|
|
+}
|
|
+
|
|
+struct proc_dir_entry *vchiq_clients_top(void)
|
|
+{
|
|
+ return proc_info.clients;
|
|
+}
|
|
+
|
|
--- /dev/null
|
|
+++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_shim.c
|
|
@@ -0,0 +1,815 @@
|
|
+/**
|
|
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
|
|
+ *
|
|
+ * Redistribution and use in source and binary forms, with or without
|
|
+ * modification, are permitted provided that the following conditions
|
|
+ * are met:
|
|
+ * 1. Redistributions of source code must retain the above copyright
|
|
+ * notice, this list of conditions, and the following disclaimer,
|
|
+ * without modification.
|
|
+ * 2. Redistributions in binary form must reproduce the above copyright
|
|
+ * notice, this list of conditions and the following disclaimer in the
|
|
+ * documentation and/or other materials provided with the distribution.
|
|
+ * 3. The names of the above-listed copyright holders may not be used
|
|
+ * to endorse or promote products derived from this software without
|
|
+ * specific prior written permission.
|
|
+ *
|
|
+ * ALTERNATIVELY, this software may be distributed under the terms of the
|
|
+ * GNU General Public License ("GPL") version 2, as published by the Free
|
|
+ * Software Foundation.
|
|
+ *
|
|
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
|
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
|
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
|
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
|
|
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
|
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
|
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
|
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
|
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
|
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
|
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
+ */
|
|
+#include <linux/module.h>
|
|
+#include <linux/types.h>
|
|
+
|
|
+#include "interface/vchi/vchi.h"
|
|
+#include "vchiq.h"
|
|
+#include "vchiq_core.h"
|
|
+
|
|
+#include "vchiq_util.h"
|
|
+
|
|
+#include <stddef.h>
|
|
+
|
|
+#define vchiq_status_to_vchi(status) ((int32_t)status)
|
|
+
|
|
+typedef struct {
|
|
+ VCHIQ_SERVICE_HANDLE_T handle;
|
|
+
|
|
+ VCHIU_QUEUE_T queue;
|
|
+
|
|
+ VCHI_CALLBACK_T callback;
|
|
+ void *callback_param;
|
|
+} SHIM_SERVICE_T;
|
|
+
|
|
+/* ----------------------------------------------------------------------
|
|
+ * return pointer to the mphi message driver function table
|
|
+ * -------------------------------------------------------------------- */
|
|
+const VCHI_MESSAGE_DRIVER_T *
|
|
+vchi_mphi_message_driver_func_table(void)
|
|
+{
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+/* ----------------------------------------------------------------------
|
|
+ * return a pointer to the 'single' connection driver fops
|
|
+ * -------------------------------------------------------------------- */
|
|
+const VCHI_CONNECTION_API_T *
|
|
+single_get_func_table(void)
|
|
+{
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+VCHI_CONNECTION_T *vchi_create_connection(
|
|
+ const VCHI_CONNECTION_API_T *function_table,
|
|
+ const VCHI_MESSAGE_DRIVER_T *low_level)
|
|
+{
|
|
+ (void)function_table;
|
|
+ (void)low_level;
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+/***********************************************************
|
|
+ * Name: vchi_msg_peek
|
|
+ *
|
|
+ * Arguments: const VCHI_SERVICE_HANDLE_T handle,
|
|
+ * void **data,
|
|
+ * uint32_t *msg_size,
|
|
+
|
|
+
|
|
+ * VCHI_FLAGS_T flags
|
|
+ *
|
|
+ * Description: Routine to return a pointer to the current message (to allow in
|
|
+ * place processing). The message can be removed using
|
|
+ * vchi_msg_remove when you're finished
|
|
+ *
|
|
+ * Returns: int32_t - success == 0
|
|
+ *
|
|
+ ***********************************************************/
|
|
+int32_t vchi_msg_peek(VCHI_SERVICE_HANDLE_T handle,
|
|
+ void **data,
|
|
+ uint32_t *msg_size,
|
|
+ VCHI_FLAGS_T flags)
|
|
+{
|
|
+ SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
|
|
+ VCHIQ_HEADER_T *header;
|
|
+
|
|
+ WARN_ON((flags != VCHI_FLAGS_NONE) &&
|
|
+ (flags != VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE));
|
|
+
|
|
+ if (flags == VCHI_FLAGS_NONE)
|
|
+ if (vchiu_queue_is_empty(&service->queue))
|
|
+ return -1;
|
|
+
|
|
+ header = vchiu_queue_peek(&service->queue);
|
|
+
|
|
+ *data = header->data;
|
|
+ *msg_size = header->size;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+EXPORT_SYMBOL(vchi_msg_peek);
|
|
+
|
|
+/***********************************************************
|
|
+ * Name: vchi_msg_remove
|
|
+ *
|
|
+ * Arguments: const VCHI_SERVICE_HANDLE_T handle,
|
|
+ *
|
|
+ * Description: Routine to remove a message (after it has been read with
|
|
+ * vchi_msg_peek)
|
|
+ *
|
|
+ * Returns: int32_t - success == 0
|
|
+ *
|
|
+ ***********************************************************/
|
|
+int32_t vchi_msg_remove(VCHI_SERVICE_HANDLE_T handle)
|
|
+{
|
|
+ SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
|
|
+ VCHIQ_HEADER_T *header;
|
|
+
|
|
+ header = vchiu_queue_pop(&service->queue);
|
|
+
|
|
+ vchiq_release_message(service->handle, header);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+EXPORT_SYMBOL(vchi_msg_remove);
|
|
+
|
|
+/***********************************************************
|
|
+ * Name: vchi_msg_queue
|
|
+ *
|
|
+ * Arguments: VCHI_SERVICE_HANDLE_T handle,
|
|
+ * const void *data,
|
|
+ * uint32_t data_size,
|
|
+ * VCHI_FLAGS_T flags,
|
|
+ * void *msg_handle,
|
|
+ *
|
|
+ * Description: Thin wrapper to queue a message onto a connection
|
|
+ *
|
|
+ * Returns: int32_t - success == 0
|
|
+ *
|
|
+ ***********************************************************/
|
|
+int32_t vchi_msg_queue(VCHI_SERVICE_HANDLE_T handle,
|
|
+ const void *data,
|
|
+ uint32_t data_size,
|
|
+ VCHI_FLAGS_T flags,
|
|
+ void *msg_handle)
|
|
+{
|
|
+ SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
|
|
+ VCHIQ_ELEMENT_T element = {data, data_size};
|
|
+ VCHIQ_STATUS_T status;
|
|
+
|
|
+ (void)msg_handle;
|
|
+
|
|
+ WARN_ON(flags != VCHI_FLAGS_BLOCK_UNTIL_QUEUED);
|
|
+
|
|
+ status = vchiq_queue_message(service->handle, &element, 1);
|
|
+
|
|
+ /* vchiq_queue_message() may return VCHIQ_RETRY, so we need to
|
|
+ ** implement a retry mechanism since this function is supposed
|
|
+ ** to block until queued
|
|
+ */
|
|
+ while (status == VCHIQ_RETRY) {
|
|
+ msleep(1);
|
|
+ status = vchiq_queue_message(service->handle, &element, 1);
|
|
+ }
|
|
+
|
|
+ return vchiq_status_to_vchi(status);
|
|
+}
|
|
+EXPORT_SYMBOL(vchi_msg_queue);
|
|
+
|
|
+/***********************************************************
|
|
+ * Name: vchi_bulk_queue_receive
|
|
+ *
|
|
+ * Arguments: VCHI_BULK_HANDLE_T handle,
|
|
+ * void *data_dst,
|
|
+ * const uint32_t data_size,
|
|
+ * VCHI_FLAGS_T flags
|
|
+ * void *bulk_handle
|
|
+ *
|
|
+ * Description: Routine to setup a rcv buffer
|
|
+ *
|
|
+ * Returns: int32_t - success == 0
|
|
+ *
|
|
+ ***********************************************************/
|
|
+int32_t vchi_bulk_queue_receive(VCHI_SERVICE_HANDLE_T handle,
|
|
+ void *data_dst,
|
|
+ uint32_t data_size,
|
|
+ VCHI_FLAGS_T flags,
|
|
+ void *bulk_handle)
|
|
+{
|
|
+ SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
|
|
+ VCHIQ_BULK_MODE_T mode;
|
|
+ VCHIQ_STATUS_T status;
|
|
+
|
|
+ switch ((int)flags) {
|
|
+ case VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE
|
|
+ | VCHI_FLAGS_BLOCK_UNTIL_QUEUED:
|
|
+ WARN_ON(!service->callback);
|
|
+ mode = VCHIQ_BULK_MODE_CALLBACK;
|
|
+ break;
|
|
+ case VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE:
|
|
+ mode = VCHIQ_BULK_MODE_BLOCKING;
|
|
+ break;
|
|
+ case VCHI_FLAGS_BLOCK_UNTIL_QUEUED:
|
|
+ case VCHI_FLAGS_NONE:
|
|
+ mode = VCHIQ_BULK_MODE_NOCALLBACK;
|
|
+ break;
|
|
+ default:
|
|
+ WARN(1, "unsupported message\n");
|
|
+ return vchiq_status_to_vchi(VCHIQ_ERROR);
|
|
+ }
|
|
+
|
|
+ status = vchiq_bulk_receive(service->handle, data_dst, data_size,
|
|
+ bulk_handle, mode);
|
|
+
|
|
+ /* vchiq_bulk_receive() may return VCHIQ_RETRY, so we need to
|
|
+ ** implement a retry mechanism since this function is supposed
|
|
+ ** to block until queued
|
|
+ */
|
|
+ while (status == VCHIQ_RETRY) {
|
|
+ msleep(1);
|
|
+ status = vchiq_bulk_receive(service->handle, data_dst,
|
|
+ data_size, bulk_handle, mode);
|
|
+ }
|
|
+
|
|
+ return vchiq_status_to_vchi(status);
|
|
+}
|
|
+EXPORT_SYMBOL(vchi_bulk_queue_receive);
|
|
+
|
|
+/***********************************************************
|
|
+ * Name: vchi_bulk_queue_transmit
|
|
+ *
|
|
+ * Arguments: VCHI_BULK_HANDLE_T handle,
|
|
+ * const void *data_src,
|
|
+ * uint32_t data_size,
|
|
+ * VCHI_FLAGS_T flags,
|
|
+ * void *bulk_handle
|
|
+ *
|
|
+ * Description: Routine to transmit some data
|
|
+ *
|
|
+ * Returns: int32_t - success == 0
|
|
+ *
|
|
+ ***********************************************************/
|
|
+int32_t vchi_bulk_queue_transmit(VCHI_SERVICE_HANDLE_T handle,
|
|
+ const void *data_src,
|
|
+ uint32_t data_size,
|
|
+ VCHI_FLAGS_T flags,
|
|
+ void *bulk_handle)
|
|
+{
|
|
+ SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
|
|
+ VCHIQ_BULK_MODE_T mode;
|
|
+ VCHIQ_STATUS_T status;
|
|
+
|
|
+ switch ((int)flags) {
|
|
+ case VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE
|
|
+ | VCHI_FLAGS_BLOCK_UNTIL_QUEUED:
|
|
+ WARN_ON(!service->callback);
|
|
+ mode = VCHIQ_BULK_MODE_CALLBACK;
|
|
+ break;
|
|
+ case VCHI_FLAGS_BLOCK_UNTIL_DATA_READ:
|
|
+ case VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE:
|
|
+ mode = VCHIQ_BULK_MODE_BLOCKING;
|
|
+ break;
|
|
+ case VCHI_FLAGS_BLOCK_UNTIL_QUEUED:
|
|
+ case VCHI_FLAGS_NONE:
|
|
+ mode = VCHIQ_BULK_MODE_NOCALLBACK;
|
|
+ break;
|
|
+ default:
|
|
+ WARN(1, "unsupported message\n");
|
|
+ return vchiq_status_to_vchi(VCHIQ_ERROR);
|
|
+ }
|
|
+
|
|
+ status = vchiq_bulk_transmit(service->handle, data_src, data_size,
|
|
+ bulk_handle, mode);
|
|
+
|
|
+ /* vchiq_bulk_transmit() may return VCHIQ_RETRY, so we need to
|
|
+ ** implement a retry mechanism since this function is supposed
|
|
+ ** to block until queued
|
|
+ */
|
|
+ while (status == VCHIQ_RETRY) {
|
|
+ msleep(1);
|
|
+ status = vchiq_bulk_transmit(service->handle, data_src,
|
|
+ data_size, bulk_handle, mode);
|
|
+ }
|
|
+
|
|
+ return vchiq_status_to_vchi(status);
|
|
+}
|
|
+EXPORT_SYMBOL(vchi_bulk_queue_transmit);
|
|
+
|
|
+/***********************************************************
|
|
+ * Name: vchi_msg_dequeue
|
|
+ *
|
|
+ * Arguments: VCHI_SERVICE_HANDLE_T handle,
|
|
+ * void *data,
|
|
+ * uint32_t max_data_size_to_read,
|
|
+ * uint32_t *actual_msg_size
|
|
+ * VCHI_FLAGS_T flags
|
|
+ *
|
|
+ * Description: Routine to dequeue a message into the supplied buffer
|
|
+ *
|
|
+ * Returns: int32_t - success == 0
|
|
+ *
|
|
+ ***********************************************************/
|
|
+int32_t vchi_msg_dequeue(VCHI_SERVICE_HANDLE_T handle,
|
|
+ void *data,
|
|
+ uint32_t max_data_size_to_read,
|
|
+ uint32_t *actual_msg_size,
|
|
+ VCHI_FLAGS_T flags)
|
|
+{
|
|
+ SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
|
|
+ VCHIQ_HEADER_T *header;
|
|
+
|
|
+ WARN_ON((flags != VCHI_FLAGS_NONE) &&
|
|
+ (flags != VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE));
|
|
+
|
|
+ if (flags == VCHI_FLAGS_NONE)
|
|
+ if (vchiu_queue_is_empty(&service->queue))
|
|
+ return -1;
|
|
+
|
|
+ header = vchiu_queue_pop(&service->queue);
|
|
+
|
|
+ memcpy(data, header->data, header->size < max_data_size_to_read ?
|
|
+ header->size : max_data_size_to_read);
|
|
+
|
|
+ *actual_msg_size = header->size;
|
|
+
|
|
+ vchiq_release_message(service->handle, header);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+EXPORT_SYMBOL(vchi_msg_dequeue);
|
|
+
|
|
+/***********************************************************
|
|
+ * Name: vchi_msg_queuev
|
|
+ *
|
|
+ * Arguments: VCHI_SERVICE_HANDLE_T handle,
|
|
+ * VCHI_MSG_VECTOR_T *vector,
|
|
+ * uint32_t count,
|
|
+ * VCHI_FLAGS_T flags,
|
|
+ * void *msg_handle
|
|
+ *
|
|
+ * Description: Thin wrapper to queue a message onto a connection
|
|
+ *
|
|
+ * Returns: int32_t - success == 0
|
|
+ *
|
|
+ ***********************************************************/
|
|
+
|
|
+vchiq_static_assert(sizeof(VCHI_MSG_VECTOR_T) == sizeof(VCHIQ_ELEMENT_T));
|
|
+vchiq_static_assert(offsetof(VCHI_MSG_VECTOR_T, vec_base) ==
|
|
+ offsetof(VCHIQ_ELEMENT_T, data));
|
|
+vchiq_static_assert(offsetof(VCHI_MSG_VECTOR_T, vec_len) ==
|
|
+ offsetof(VCHIQ_ELEMENT_T, size));
|
|
+
|
|
+int32_t vchi_msg_queuev(VCHI_SERVICE_HANDLE_T handle,
|
|
+ VCHI_MSG_VECTOR_T *vector,
|
|
+ uint32_t count,
|
|
+ VCHI_FLAGS_T flags,
|
|
+ void *msg_handle)
|
|
+{
|
|
+ SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
|
|
+
|
|
+ (void)msg_handle;
|
|
+
|
|
+ WARN_ON(flags != VCHI_FLAGS_BLOCK_UNTIL_QUEUED);
|
|
+
|
|
+ return vchiq_status_to_vchi(vchiq_queue_message(service->handle,
|
|
+ (const VCHIQ_ELEMENT_T *)vector, count));
|
|
+}
|
|
+EXPORT_SYMBOL(vchi_msg_queuev);
|
|
+
|
|
+/***********************************************************
|
|
+ * Name: vchi_held_msg_release
|
|
+ *
|
|
+ * Arguments: VCHI_HELD_MSG_T *message
|
|
+ *
|
|
+ * Description: Routine to release a held message (after it has been read with
|
|
+ * vchi_msg_hold)
|
|
+ *
|
|
+ * Returns: int32_t - success == 0
|
|
+ *
|
|
+ ***********************************************************/
|
|
+int32_t vchi_held_msg_release(VCHI_HELD_MSG_T *message)
|
|
+{
|
|
+ vchiq_release_message((VCHIQ_SERVICE_HANDLE_T)message->service,
|
|
+ (VCHIQ_HEADER_T *)message->message);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/***********************************************************
|
|
+ * Name: vchi_msg_hold
|
|
+ *
|
|
+ * Arguments: VCHI_SERVICE_HANDLE_T handle,
|
|
+ * void **data,
|
|
+ * uint32_t *msg_size,
|
|
+ * VCHI_FLAGS_T flags,
|
|
+ * VCHI_HELD_MSG_T *message_handle
|
|
+ *
|
|
+ * Description: Routine to return a pointer to the current message (to allow
|
|
+ * in place processing). The message is dequeued - don't forget
|
|
+ * to release the message using vchi_held_msg_release when you're
|
|
+ * finished.
|
|
+ *
|
|
+ * Returns: int32_t - success == 0
|
|
+ *
|
|
+ ***********************************************************/
|
|
+int32_t vchi_msg_hold(VCHI_SERVICE_HANDLE_T handle,
|
|
+ void **data,
|
|
+ uint32_t *msg_size,
|
|
+ VCHI_FLAGS_T flags,
|
|
+ VCHI_HELD_MSG_T *message_handle)
|
|
+{
|
|
+ SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
|
|
+ VCHIQ_HEADER_T *header;
|
|
+
|
|
+ WARN_ON((flags != VCHI_FLAGS_NONE) &&
|
|
+ (flags != VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE));
|
|
+
|
|
+ if (flags == VCHI_FLAGS_NONE)
|
|
+ if (vchiu_queue_is_empty(&service->queue))
|
|
+ return -1;
|
|
+
|
|
+ header = vchiu_queue_pop(&service->queue);
|
|
+
|
|
+ *data = header->data;
|
|
+ *msg_size = header->size;
|
|
+
|
|
+ message_handle->service =
|
|
+ (struct opaque_vchi_service_t *)service->handle;
|
|
+ message_handle->message = header;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/***********************************************************
|
|
+ * Name: vchi_initialise
|
|
+ *
|
|
+ * Arguments: VCHI_INSTANCE_T *instance_handle
|
|
+ * VCHI_CONNECTION_T **connections
|
|
+ * const uint32_t num_connections
|
|
+ *
|
|
+ * Description: Initialises the hardware but does not transmit anything
|
|
+ * When run as a Host App this will be called twice hence the need
|
|
+ * to malloc the state information
|
|
+ *
|
|
+ * Returns: 0 if successful, failure otherwise
|
|
+ *
|
|
+ ***********************************************************/
|
|
+
|
|
+int32_t vchi_initialise(VCHI_INSTANCE_T *instance_handle)
|
|
+{
|
|
+ VCHIQ_INSTANCE_T instance;
|
|
+ VCHIQ_STATUS_T status;
|
|
+
|
|
+ status = vchiq_initialise(&instance);
|
|
+
|
|
+ *instance_handle = (VCHI_INSTANCE_T)instance;
|
|
+
|
|
+ return vchiq_status_to_vchi(status);
|
|
+}
|
|
+EXPORT_SYMBOL(vchi_initialise);
|
|
+
|
|
+/***********************************************************
|
|
+ * Name: vchi_connect
|
|
+ *
|
|
+ * Arguments: VCHI_CONNECTION_T **connections
|
|
+ * const uint32_t num_connections
|
|
+ * VCHI_INSTANCE_T instance_handle)
|
|
+ *
|
|
+ * Description: Starts the command service on each connection,
|
|
+ * causing INIT messages to be pinged back and forth
|
|
+ *
|
|
+ * Returns: 0 if successful, failure otherwise
|
|
+ *
|
|
+ ***********************************************************/
|
|
+int32_t vchi_connect(VCHI_CONNECTION_T **connections,
|
|
+ const uint32_t num_connections,
|
|
+ VCHI_INSTANCE_T instance_handle)
|
|
+{
|
|
+ VCHIQ_INSTANCE_T instance = (VCHIQ_INSTANCE_T)instance_handle;
|
|
+
|
|
+ (void)connections;
|
|
+ (void)num_connections;
|
|
+
|
|
+ return vchiq_connect(instance);
|
|
+}
|
|
+EXPORT_SYMBOL(vchi_connect);
|
|
+
|
|
+
|
|
+/***********************************************************
|
|
+ * Name: vchi_disconnect
|
|
+ *
|
|
+ * Arguments: VCHI_INSTANCE_T instance_handle
|
|
+ *
|
|
+ * Description: Stops the command service on each connection,
|
|
+ * causing DE-INIT messages to be pinged back and forth
|
|
+ *
|
|
+ * Returns: 0 if successful, failure otherwise
|
|
+ *
|
|
+ ***********************************************************/
|
|
+int32_t vchi_disconnect(VCHI_INSTANCE_T instance_handle)
|
|
+{
|
|
+ VCHIQ_INSTANCE_T instance = (VCHIQ_INSTANCE_T)instance_handle;
|
|
+ return vchiq_status_to_vchi(vchiq_shutdown(instance));
|
|
+}
|
|
+EXPORT_SYMBOL(vchi_disconnect);
|
|
+
|
|
+
|
|
+/***********************************************************
|
|
+ * Name: vchi_service_open
|
|
+ * Name: vchi_service_create
|
|
+ *
|
|
+ * Arguments: VCHI_INSTANCE_T *instance_handle
|
|
+ * SERVICE_CREATION_T *setup,
|
|
+ * VCHI_SERVICE_HANDLE_T *handle
|
|
+ *
|
|
+ * Description: Routine to open a service
|
|
+ *
|
|
+ * Returns: int32_t - success == 0
|
|
+ *
|
|
+ ***********************************************************/
|
|
+
|
|
+static VCHIQ_STATUS_T shim_callback(VCHIQ_REASON_T reason,
|
|
+ VCHIQ_HEADER_T *header, VCHIQ_SERVICE_HANDLE_T handle, void *bulk_user)
|
|
+{
|
|
+ SHIM_SERVICE_T *service =
|
|
+ (SHIM_SERVICE_T *)VCHIQ_GET_SERVICE_USERDATA(handle);
|
|
+
|
|
+ switch (reason) {
|
|
+ case VCHIQ_MESSAGE_AVAILABLE:
|
|
+ vchiu_queue_push(&service->queue, header);
|
|
+
|
|
+ if (service->callback)
|
|
+ service->callback(service->callback_param,
|
|
+ VCHI_CALLBACK_MSG_AVAILABLE, NULL);
|
|
+ break;
|
|
+ case VCHIQ_BULK_TRANSMIT_DONE:
|
|
+ if (service->callback)
|
|
+ service->callback(service->callback_param,
|
|
+ VCHI_CALLBACK_BULK_SENT, bulk_user);
|
|
+ break;
|
|
+ case VCHIQ_BULK_RECEIVE_DONE:
|
|
+ if (service->callback)
|
|
+ service->callback(service->callback_param,
|
|
+ VCHI_CALLBACK_BULK_RECEIVED, bulk_user);
|
|
+ break;
|
|
+ case VCHIQ_SERVICE_CLOSED:
|
|
+ if (service->callback)
|
|
+ service->callback(service->callback_param,
|
|
+ VCHI_CALLBACK_SERVICE_CLOSED, NULL);
|
|
+ break;
|
|
+ case VCHIQ_SERVICE_OPENED:
|
|
+ /* No equivalent VCHI reason */
|
|
+ break;
|
|
+ case VCHIQ_BULK_TRANSMIT_ABORTED:
|
|
+ if (service->callback)
|
|
+ service->callback(service->callback_param,
|
|
+ VCHI_CALLBACK_BULK_TRANSMIT_ABORTED, bulk_user);
|
|
+ break;
|
|
+ case VCHIQ_BULK_RECEIVE_ABORTED:
|
|
+ if (service->callback)
|
|
+ service->callback(service->callback_param,
|
|
+ VCHI_CALLBACK_BULK_RECEIVE_ABORTED, bulk_user);
|
|
+ break;
|
|
+ default:
|
|
+ WARN(1, "not supported\n");
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ return VCHIQ_SUCCESS;
|
|
+}
|
|
+
|
|
+static SHIM_SERVICE_T *service_alloc(VCHIQ_INSTANCE_T instance,
|
|
+ SERVICE_CREATION_T *setup)
|
|
+{
|
|
+ SHIM_SERVICE_T *service = kzalloc(sizeof(SHIM_SERVICE_T), GFP_KERNEL);
|
|
+
|
|
+ (void)instance;
|
|
+
|
|
+ if (service) {
|
|
+ if (vchiu_queue_init(&service->queue, 64)) {
|
|
+ service->callback = setup->callback;
|
|
+ service->callback_param = setup->callback_param;
|
|
+ } else {
|
|
+ kfree(service);
|
|
+ service = NULL;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return service;
|
|
+}
|
|
+
|
|
+static void service_free(SHIM_SERVICE_T *service)
|
|
+{
|
|
+ if (service) {
|
|
+ vchiu_queue_delete(&service->queue);
|
|
+ kfree(service);
|
|
+ }
|
|
+}
|
|
+
|
|
+int32_t vchi_service_open(VCHI_INSTANCE_T instance_handle,
|
|
+ SERVICE_CREATION_T *setup,
|
|
+ VCHI_SERVICE_HANDLE_T *handle)
|
|
+{
|
|
+ VCHIQ_INSTANCE_T instance = (VCHIQ_INSTANCE_T)instance_handle;
|
|
+ SHIM_SERVICE_T *service = service_alloc(instance, setup);
|
|
+ if (service) {
|
|
+ VCHIQ_SERVICE_PARAMS_T params;
|
|
+ VCHIQ_STATUS_T status;
|
|
+
|
|
+ memset(¶ms, 0, sizeof(params));
|
|
+ params.fourcc = setup->service_id;
|
|
+ params.callback = shim_callback;
|
|
+ params.userdata = service;
|
|
+ params.version = setup->version.version;
|
|
+ params.version_min = setup->version.version_min;
|
|
+
|
|
+ status = vchiq_open_service(instance, ¶ms,
|
|
+ &service->handle);
|
|
+ if (status != VCHIQ_SUCCESS) {
|
|
+ service_free(service);
|
|
+ service = NULL;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ *handle = (VCHI_SERVICE_HANDLE_T)service;
|
|
+
|
|
+ return (service != NULL) ? 0 : -1;
|
|
+}
|
|
+EXPORT_SYMBOL(vchi_service_open);
|
|
+
|
|
+int32_t vchi_service_create(VCHI_INSTANCE_T instance_handle,
|
|
+ SERVICE_CREATION_T *setup,
|
|
+ VCHI_SERVICE_HANDLE_T *handle)
|
|
+{
|
|
+ VCHIQ_INSTANCE_T instance = (VCHIQ_INSTANCE_T)instance_handle;
|
|
+ SHIM_SERVICE_T *service = service_alloc(instance, setup);
|
|
+ if (service) {
|
|
+ VCHIQ_SERVICE_PARAMS_T params;
|
|
+ VCHIQ_STATUS_T status;
|
|
+
|
|
+ memset(¶ms, 0, sizeof(params));
|
|
+ params.fourcc = setup->service_id;
|
|
+ params.callback = shim_callback;
|
|
+ params.userdata = service;
|
|
+ params.version = setup->version.version;
|
|
+ params.version_min = setup->version.version_min;
|
|
+ status = vchiq_add_service(instance, ¶ms, &service->handle);
|
|
+
|
|
+ if (status != VCHIQ_SUCCESS) {
|
|
+ service_free(service);
|
|
+ service = NULL;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ *handle = (VCHI_SERVICE_HANDLE_T)service;
|
|
+
|
|
+ return (service != NULL) ? 0 : -1;
|
|
+}
|
|
+EXPORT_SYMBOL(vchi_service_create);
|
|
+
|
|
+int32_t vchi_service_close(const VCHI_SERVICE_HANDLE_T handle)
|
|
+{
|
|
+ int32_t ret = -1;
|
|
+ SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
|
|
+ if (service) {
|
|
+ VCHIQ_STATUS_T status = vchiq_close_service(service->handle);
|
|
+ if (status == VCHIQ_SUCCESS) {
|
|
+ service_free(service);
|
|
+ service = NULL;
|
|
+ }
|
|
+
|
|
+ ret = vchiq_status_to_vchi(status);
|
|
+ }
|
|
+ return ret;
|
|
+}
|
|
+EXPORT_SYMBOL(vchi_service_close);
|
|
+
|
|
+int32_t vchi_service_destroy(const VCHI_SERVICE_HANDLE_T handle)
|
|
+{
|
|
+ int32_t ret = -1;
|
|
+ SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
|
|
+ if (service) {
|
|
+ VCHIQ_STATUS_T status = vchiq_remove_service(service->handle);
|
|
+ if (status == VCHIQ_SUCCESS) {
|
|
+ service_free(service);
|
|
+ service = NULL;
|
|
+ }
|
|
+
|
|
+ ret = vchiq_status_to_vchi(status);
|
|
+ }
|
|
+ return ret;
|
|
+}
|
|
+EXPORT_SYMBOL(vchi_service_destroy);
|
|
+
|
|
+int32_t vchi_get_peer_version( const VCHI_SERVICE_HANDLE_T handle, short *peer_version )
|
|
+{
|
|
+ int32_t ret = -1;
|
|
+ SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
|
|
+ if(service)
|
|
+ {
|
|
+ VCHIQ_STATUS_T status = vchiq_get_peer_version(service->handle, peer_version);
|
|
+ ret = vchiq_status_to_vchi( status );
|
|
+ }
|
|
+ return ret;
|
|
+}
|
|
+EXPORT_SYMBOL(vchi_get_peer_version);
|
|
+
|
|
+/* ----------------------------------------------------------------------
|
|
+ * read a uint32_t from buffer.
|
|
+ * network format is defined to be little endian
|
|
+ * -------------------------------------------------------------------- */
|
|
+uint32_t
|
|
+vchi_readbuf_uint32(const void *_ptr)
|
|
+{
|
|
+ const unsigned char *ptr = _ptr;
|
|
+ return ptr[0] | (ptr[1] << 8) | (ptr[2] << 16) | (ptr[3] << 24);
|
|
+}
|
|
+
|
|
+/* ----------------------------------------------------------------------
|
|
+ * write a uint32_t to buffer.
|
|
+ * network format is defined to be little endian
|
|
+ * -------------------------------------------------------------------- */
|
|
+void
|
|
+vchi_writebuf_uint32(void *_ptr, uint32_t value)
|
|
+{
|
|
+ unsigned char *ptr = _ptr;
|
|
+ ptr[0] = (unsigned char)((value >> 0) & 0xFF);
|
|
+ ptr[1] = (unsigned char)((value >> 8) & 0xFF);
|
|
+ ptr[2] = (unsigned char)((value >> 16) & 0xFF);
|
|
+ ptr[3] = (unsigned char)((value >> 24) & 0xFF);
|
|
+}
|
|
+
|
|
+/* ----------------------------------------------------------------------
|
|
+ * read a uint16_t from buffer.
|
|
+ * network format is defined to be little endian
|
|
+ * -------------------------------------------------------------------- */
|
|
+uint16_t
|
|
+vchi_readbuf_uint16(const void *_ptr)
|
|
+{
|
|
+ const unsigned char *ptr = _ptr;
|
|
+ return ptr[0] | (ptr[1] << 8);
|
|
+}
|
|
+
|
|
+/* ----------------------------------------------------------------------
|
|
+ * write a uint16_t into the buffer.
|
|
+ * network format is defined to be little endian
|
|
+ * -------------------------------------------------------------------- */
|
|
+void
|
|
+vchi_writebuf_uint16(void *_ptr, uint16_t value)
|
|
+{
|
|
+ unsigned char *ptr = _ptr;
|
|
+ ptr[0] = (value >> 0) & 0xFF;
|
|
+ ptr[1] = (value >> 8) & 0xFF;
|
|
+}
|
|
+
|
|
+/***********************************************************
|
|
+ * Name: vchi_service_use
|
|
+ *
|
|
+ * Arguments: const VCHI_SERVICE_HANDLE_T handle
|
|
+ *
|
|
+ * Description: Routine to increment refcount on a service
|
|
+ *
|
|
+ * Returns: void
|
|
+ *
|
|
+ ***********************************************************/
|
|
+int32_t vchi_service_use(const VCHI_SERVICE_HANDLE_T handle)
|
|
+{
|
|
+ int32_t ret = -1;
|
|
+ SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
|
|
+ if (service)
|
|
+ ret = vchiq_status_to_vchi(vchiq_use_service(service->handle));
|
|
+ return ret;
|
|
+}
|
|
+EXPORT_SYMBOL(vchi_service_use);
|
|
+
|
|
+/***********************************************************
|
|
+ * Name: vchi_service_release
|
|
+ *
|
|
+ * Arguments: const VCHI_SERVICE_HANDLE_T handle
|
|
+ *
|
|
+ * Description: Routine to decrement refcount on a service
|
|
+ *
|
|
+ * Returns: void
|
|
+ *
|
|
+ ***********************************************************/
|
|
+int32_t vchi_service_release(const VCHI_SERVICE_HANDLE_T handle)
|
|
+{
|
|
+ int32_t ret = -1;
|
|
+ SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
|
|
+ if (service)
|
|
+ ret = vchiq_status_to_vchi(
|
|
+ vchiq_release_service(service->handle));
|
|
+ return ret;
|
|
+}
|
|
+EXPORT_SYMBOL(vchi_service_release);
|
|
--- /dev/null
|
|
+++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.c
|
|
@@ -0,0 +1,120 @@
|
|
+/**
|
|
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
|
|
+ *
|
|
+ * Redistribution and use in source and binary forms, with or without
|
|
+ * modification, are permitted provided that the following conditions
|
|
+ * are met:
|
|
+ * 1. Redistributions of source code must retain the above copyright
|
|
+ * notice, this list of conditions, and the following disclaimer,
|
|
+ * without modification.
|
|
+ * 2. Redistributions in binary form must reproduce the above copyright
|
|
+ * notice, this list of conditions and the following disclaimer in the
|
|
+ * documentation and/or other materials provided with the distribution.
|
|
+ * 3. The names of the above-listed copyright holders may not be used
|
|
+ * to endorse or promote products derived from this software without
|
|
+ * specific prior written permission.
|
|
+ *
|
|
+ * ALTERNATIVELY, this software may be distributed under the terms of the
|
|
+ * GNU General Public License ("GPL") version 2, as published by the Free
|
|
+ * Software Foundation.
|
|
+ *
|
|
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
|
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
|
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
|
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
|
|
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
|
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
|
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
|
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
|
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
|
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
|
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
+ */
|
|
+
|
|
+#include "vchiq_util.h"
|
|
+
|
|
+static inline int is_pow2(int i)
|
|
+{
|
|
+ return i && !(i & (i - 1));
|
|
+}
|
|
+
|
|
+int vchiu_queue_init(VCHIU_QUEUE_T *queue, int size)
|
|
+{
|
|
+ WARN_ON(!is_pow2(size));
|
|
+
|
|
+ queue->size = size;
|
|
+ queue->read = 0;
|
|
+ queue->write = 0;
|
|
+
|
|
+ sema_init(&queue->pop, 0);
|
|
+ sema_init(&queue->push, 0);
|
|
+
|
|
+ queue->storage = kzalloc(size * sizeof(VCHIQ_HEADER_T *), GFP_KERNEL);
|
|
+ if (queue->storage == NULL) {
|
|
+ vchiu_queue_delete(queue);
|
|
+ return 0;
|
|
+ }
|
|
+ return 1;
|
|
+}
|
|
+
|
|
+void vchiu_queue_delete(VCHIU_QUEUE_T *queue)
|
|
+{
|
|
+ if (queue->storage != NULL)
|
|
+ kfree(queue->storage);
|
|
+}
|
|
+
|
|
+int vchiu_queue_is_empty(VCHIU_QUEUE_T *queue)
|
|
+{
|
|
+ return queue->read == queue->write;
|
|
+}
|
|
+
|
|
+int vchiu_queue_is_full(VCHIU_QUEUE_T *queue)
|
|
+{
|
|
+ return queue->write == queue->read + queue->size;
|
|
+}
|
|
+
|
|
+void vchiu_queue_push(VCHIU_QUEUE_T *queue, VCHIQ_HEADER_T *header)
|
|
+{
|
|
+ while (queue->write == queue->read + queue->size) {
|
|
+ if (down_interruptible(&queue->pop) != 0) {
|
|
+ flush_signals(current);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ queue->storage[queue->write & (queue->size - 1)] = header;
|
|
+
|
|
+ queue->write++;
|
|
+
|
|
+ up(&queue->push);
|
|
+}
|
|
+
|
|
+VCHIQ_HEADER_T *vchiu_queue_peek(VCHIU_QUEUE_T *queue)
|
|
+{
|
|
+ while (queue->write == queue->read) {
|
|
+ if (down_interruptible(&queue->push) != 0) {
|
|
+ flush_signals(current);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ up(&queue->push); // We haven't removed anything from the queue.
|
|
+ return queue->storage[queue->read & (queue->size - 1)];
|
|
+}
|
|
+
|
|
+VCHIQ_HEADER_T *vchiu_queue_pop(VCHIU_QUEUE_T *queue)
|
|
+{
|
|
+ VCHIQ_HEADER_T *header;
|
|
+
|
|
+ while (queue->write == queue->read) {
|
|
+ if (down_interruptible(&queue->push) != 0) {
|
|
+ flush_signals(current);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ header = queue->storage[queue->read & (queue->size - 1)];
|
|
+
|
|
+ queue->read++;
|
|
+
|
|
+ up(&queue->pop);
|
|
+
|
|
+ return header;
|
|
+}
|
|
--- /dev/null
|
|
+++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.h
|
|
@@ -0,0 +1,82 @@
|
|
+/**
|
|
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
|
|
+ *
|
|
+ * Redistribution and use in source and binary forms, with or without
|
|
+ * modification, are permitted provided that the following conditions
|
|
+ * are met:
|
|
+ * 1. Redistributions of source code must retain the above copyright
|
|
+ * notice, this list of conditions, and the following disclaimer,
|
|
+ * without modification.
|
|
+ * 2. Redistributions in binary form must reproduce the above copyright
|
|
+ * notice, this list of conditions and the following disclaimer in the
|
|
+ * documentation and/or other materials provided with the distribution.
|
|
+ * 3. The names of the above-listed copyright holders may not be used
|
|
+ * to endorse or promote products derived from this software without
|
|
+ * specific prior written permission.
|
|
+ *
|
|
+ * ALTERNATIVELY, this software may be distributed under the terms of the
|
|
+ * GNU General Public License ("GPL") version 2, as published by the Free
|
|
+ * Software Foundation.
|
|
+ *
|
|
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
|
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
|
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
|
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
|
|
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
|
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
|
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
|
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
|
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
|
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
|
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
+ */
|
|
+
|
|
+#ifndef VCHIQ_UTIL_H
|
|
+#define VCHIQ_UTIL_H
|
|
+
|
|
+#include <linux/types.h>
|
|
+#include <linux/semaphore.h>
|
|
+#include <linux/mutex.h>
|
|
+#include <linux/bitops.h>
|
|
+#include <linux/kthread.h>
|
|
+#include <linux/wait.h>
|
|
+#include <linux/vmalloc.h>
|
|
+#include <linux/jiffies.h>
|
|
+#include <linux/delay.h>
|
|
+#include <linux/string.h>
|
|
+#include <linux/types.h>
|
|
+#include <linux/interrupt.h>
|
|
+#include <linux/random.h>
|
|
+#include <linux/sched.h>
|
|
+#include <linux/ctype.h>
|
|
+#include <linux/uaccess.h>
|
|
+#include <linux/time.h> /* for time_t */
|
|
+#include <linux/slab.h>
|
|
+#include <linux/vmalloc.h>
|
|
+
|
|
+#include "vchiq_if.h"
|
|
+
|
|
+typedef struct {
|
|
+ int size;
|
|
+ int read;
|
|
+ int write;
|
|
+
|
|
+ struct semaphore pop;
|
|
+ struct semaphore push;
|
|
+
|
|
+ VCHIQ_HEADER_T **storage;
|
|
+} VCHIU_QUEUE_T;
|
|
+
|
|
+extern int vchiu_queue_init(VCHIU_QUEUE_T *queue, int size);
|
|
+extern void vchiu_queue_delete(VCHIU_QUEUE_T *queue);
|
|
+
|
|
+extern int vchiu_queue_is_empty(VCHIU_QUEUE_T *queue);
|
|
+extern int vchiu_queue_is_full(VCHIU_QUEUE_T *queue);
|
|
+
|
|
+extern void vchiu_queue_push(VCHIU_QUEUE_T *queue, VCHIQ_HEADER_T *header);
|
|
+
|
|
+extern VCHIQ_HEADER_T *vchiu_queue_peek(VCHIU_QUEUE_T *queue);
|
|
+extern VCHIQ_HEADER_T *vchiu_queue_pop(VCHIU_QUEUE_T *queue);
|
|
+
|
|
+#endif
|
|
+
|
|
--- /dev/null
|
|
+++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_version.c
|
|
@@ -0,0 +1,59 @@
|
|
+/**
|
|
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
|
|
+ *
|
|
+ * Redistribution and use in source and binary forms, with or without
|
|
+ * modification, are permitted provided that the following conditions
|
|
+ * are met:
|
|
+ * 1. Redistributions of source code must retain the above copyright
|
|
+ * notice, this list of conditions, and the following disclaimer,
|
|
+ * without modification.
|
|
+ * 2. Redistributions in binary form must reproduce the above copyright
|
|
+ * notice, this list of conditions and the following disclaimer in the
|
|
+ * documentation and/or other materials provided with the distribution.
|
|
+ * 3. The names of the above-listed copyright holders may not be used
|
|
+ * to endorse or promote products derived from this software without
|
|
+ * specific prior written permission.
|
|
+ *
|
|
+ * ALTERNATIVELY, this software may be distributed under the terms of the
|
|
+ * GNU General Public License ("GPL") version 2, as published by the Free
|
|
+ * Software Foundation.
|
|
+ *
|
|
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
|
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
|
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
|
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
|
|
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
|
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
|
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
|
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
|
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
|
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
|
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
+ */
|
|
+#include "vchiq_build_info.h"
|
|
+#include <linux/broadcom/vc_debug_sym.h>
|
|
+
|
|
+VC_DEBUG_DECLARE_STRING_VAR( vchiq_build_hostname, "dc4-arm-01" );
|
|
+VC_DEBUG_DECLARE_STRING_VAR( vchiq_build_version, "9245b4c35b99b3870e1f7dc598c5692b3c66a6f0 (tainted)" );
|
|
+VC_DEBUG_DECLARE_STRING_VAR( vchiq_build_time, __TIME__ );
|
|
+VC_DEBUG_DECLARE_STRING_VAR( vchiq_build_date, __DATE__ );
|
|
+
|
|
+const char *vchiq_get_build_hostname( void )
|
|
+{
|
|
+ return vchiq_build_hostname;
|
|
+}
|
|
+
|
|
+const char *vchiq_get_build_version( void )
|
|
+{
|
|
+ return vchiq_build_version;
|
|
+}
|
|
+
|
|
+const char *vchiq_get_build_date( void )
|
|
+{
|
|
+ return vchiq_build_date;
|
|
+}
|
|
+
|
|
+const char *vchiq_get_build_time( void )
|
|
+{
|
|
+ return vchiq_build_time;
|
|
+}
|
|
--- /dev/null
|
|
+++ b/drivers/misc/vc04_services/Kconfig
|
|
@@ -0,0 +1,10 @@
|
|
+config BCM2708_VCHIQ
|
|
+ tristate "Videocore VCHIQ"
|
|
+ depends on MACH_BCM2708
|
|
+ default y
|
|
+ help
|
|
+ Kernel to VideoCore communication interface for the
|
|
+ BCM2708 family of products.
|
|
+ Defaults to Y when the Broadcom Videocore services
|
|
+ are included in the build, N otherwise.
|
|
+
|
|
--- /dev/null
|
|
+++ b/drivers/misc/vc04_services/Makefile
|
|
@@ -0,0 +1,18 @@
|
|
+ifeq ($(CONFIG_MACH_BCM2708),y)
|
|
+
|
|
+obj-$(CONFIG_BCM2708_VCHIQ) += vchiq.o
|
|
+
|
|
+vchiq-objs := \
|
|
+ interface/vchiq_arm/vchiq_core.o \
|
|
+ interface/vchiq_arm/vchiq_arm.o \
|
|
+ interface/vchiq_arm/vchiq_kern_lib.o \
|
|
+ interface/vchiq_arm/vchiq_2835_arm.o \
|
|
+ interface/vchiq_arm/vchiq_proc.o \
|
|
+ interface/vchiq_arm/vchiq_shim.o \
|
|
+ interface/vchiq_arm/vchiq_util.o \
|
|
+ interface/vchiq_arm/vchiq_connected.o \
|
|
+
|
|
+EXTRA_CFLAGS += -DVCOS_VERIFY_BKPTS=1 -Idrivers/misc/vc04_services -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000
|
|
+
|
|
+endif
|
|
+
|
|
--- /dev/null
|
|
+++ b/include/linux/broadcom/vc_cma.h
|
|
@@ -0,0 +1,30 @@
|
|
+/*****************************************************************************
|
|
+* Copyright 2012 Broadcom Corporation. All rights reserved.
|
|
+*
|
|
+* Unless you and Broadcom execute a separate written software license
|
|
+* agreement governing use of this software, this software is licensed to you
|
|
+* under the terms of the GNU General Public License version 2, available at
|
|
+* http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
|
|
+*
|
|
+* Notwithstanding the above, under no circumstances may you combine this
|
|
+* software in any way with any other Broadcom software provided under a
|
|
+* license other than the GPL, without Broadcom's express prior written
|
|
+* consent.
|
|
+*****************************************************************************/
|
|
+
|
|
+#if !defined( VC_CMA_H )
|
|
+#define VC_CMA_H
|
|
+
|
|
+#include <linux/ioctl.h>
|
|
+
|
|
+#define VC_CMA_IOC_MAGIC 0xc5
|
|
+
|
|
+#define VC_CMA_IOC_RESERVE _IO(VC_CMA_IOC_MAGIC, 0)
|
|
+
|
|
+#ifdef __KERNEL__
|
|
+extern void __init vc_cma_early_init(void);
|
|
+extern void __init vc_cma_reserve(void);
|
|
+#endif
|
|
+
|
|
+#endif /* VC_CMA_H */
|
|
+
|