clear-pkgs-linux-iot-lts2018/0064-media-intel-ipu4-virti...

3576 lines
95 KiB
Diff

From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
From: "Yew, Chang Ching" <chang.ching.yew@intel.com>
Date: Tue, 23 Oct 2018 13:26:38 +0800
Subject: [PATCH] media: intel-ipu4: virtio: IPU virtio backend and frontend
drivers base-code
Base code to enable IPU virtualization for ACRN squashed from
following patches from
https://github.com/intel/linux-intel-lts/tree/4.14/base/drivers/media/pci/intel/virtio/
These patches added the virtio front end driver support for user OS and
virtio back end driver support for service OS.
ff9935 media: intel-ipu4: [VIRT] Poll function to return if put buf list is not empty
4943b7 media: intel-ipu4: [VIRT] Fxied failed to stream close properly issue.
a2d80c media: intel-ipu4: [VIRT] Adding x86_64 arch dependency.
7da2ee media: intel-ipu4: [VIRT] vhm_request struct removed the valid member.
1865e3 media: intel-ipu4: [VIRT] Fix vq get idx func handling of multi VQ concurrent request.
113db9 media: intel-ipu4: [VIRT] Fixed memory leak in BE get buf function.
35aa2a media: intel-ipu4: [VIRT] Implementing poll without wait at BE.
dd2c7a media: intel-ipu4: [VIRT] Implementation for 2 VQs
660300 media: intel-ipu4: [VIRT] Fixing synchronization with locks
a74061 media: intel-ipu4: [VIRT] Added 32-bit compatibility mode.
06ca2c media: intel-ipu4: [VIRT] Fixing buffer lookup issue
a67055 media: intel-ipu4: [VIRT] Removing compilation warnings & printk
1d0bf8 media: intel-ipu4: [VIRT] Virtio implementation of PUT_BUF & modify get buf list
71c663 media: intel-ipu4: [VIRT] Creating ipu mediation baseline v0.1
848b4f media: intel-ipu4: [VIRT] Buffer mapping between sos & uos
b5f40a media: intel-ipu4: [VIRT] Adding Buffer mapping & virtual stream initialization.
f08c9d media: intel-ipu4: [VIRT] Base code for IPU virtio support.
Change-Id: I3912b9838d7acef1806d2f9045602360b8f98212
Signed-off-by: Yew, Chang Ching <chang.ching.yew@intel.com>
---
drivers/media/pci/intel/virtio/Makefile | 10 +
drivers/media/pci/intel/virtio/Makefile.virt | 22 +
.../intel/virtio/intel-ipu4-para-virt-drv.c | 1330 +++++++++++++++++
.../intel/virtio/intel-ipu4-para-virt-drv.h | 38 +
.../virtio/intel-ipu4-virtio-be-bridge.c | 202 +++
.../virtio/intel-ipu4-virtio-be-bridge.h | 26 +
.../virtio/intel-ipu4-virtio-be-pipeline.c | 211 +++
.../virtio/intel-ipu4-virtio-be-pipeline.h | 27 +
.../virtio/intel-ipu4-virtio-be-stream.c | 389 +++++
.../virtio/intel-ipu4-virtio-be-stream.h | 26 +
.../pci/intel/virtio/intel-ipu4-virtio-be.c | 520 +++++++
.../pci/intel/virtio/intel-ipu4-virtio-be.h | 15 +
.../intel/virtio/intel-ipu4-virtio-common.c | 61 +
.../intel/virtio/intel-ipu4-virtio-common.h | 131 ++
.../virtio/intel-ipu4-virtio-fe-payload.c | 55 +
.../virtio/intel-ipu4-virtio-fe-payload.h | 14 +
.../virtio/intel-ipu4-virtio-fe-pipeline.c | 44 +
.../virtio/intel-ipu4-virtio-fe-pipeline.h | 19 +
.../pci/intel/virtio/intel-ipu4-virtio-fe.c | 243 +++
19 files changed, 3383 insertions(+)
create mode 100644 drivers/media/pci/intel/virtio/Makefile
create mode 100644 drivers/media/pci/intel/virtio/Makefile.virt
create mode 100644 drivers/media/pci/intel/virtio/intel-ipu4-para-virt-drv.c
create mode 100644 drivers/media/pci/intel/virtio/intel-ipu4-para-virt-drv.h
create mode 100644 drivers/media/pci/intel/virtio/intel-ipu4-virtio-be-bridge.c
create mode 100644 drivers/media/pci/intel/virtio/intel-ipu4-virtio-be-bridge.h
create mode 100644 drivers/media/pci/intel/virtio/intel-ipu4-virtio-be-pipeline.c
create mode 100644 drivers/media/pci/intel/virtio/intel-ipu4-virtio-be-pipeline.h
create mode 100644 drivers/media/pci/intel/virtio/intel-ipu4-virtio-be-stream.c
create mode 100644 drivers/media/pci/intel/virtio/intel-ipu4-virtio-be-stream.h
create mode 100644 drivers/media/pci/intel/virtio/intel-ipu4-virtio-be.c
create mode 100644 drivers/media/pci/intel/virtio/intel-ipu4-virtio-be.h
create mode 100644 drivers/media/pci/intel/virtio/intel-ipu4-virtio-common.c
create mode 100644 drivers/media/pci/intel/virtio/intel-ipu4-virtio-common.h
create mode 100644 drivers/media/pci/intel/virtio/intel-ipu4-virtio-fe-payload.c
create mode 100644 drivers/media/pci/intel/virtio/intel-ipu4-virtio-fe-payload.h
create mode 100644 drivers/media/pci/intel/virtio/intel-ipu4-virtio-fe-pipeline.c
create mode 100644 drivers/media/pci/intel/virtio/intel-ipu4-virtio-fe-pipeline.h
create mode 100644 drivers/media/pci/intel/virtio/intel-ipu4-virtio-fe.c
diff --git a/drivers/media/pci/intel/virtio/Makefile b/drivers/media/pci/intel/virtio/Makefile
new file mode 100644
index 000000000000..0f4eab5addfc
--- /dev/null
+++ b/drivers/media/pci/intel/virtio/Makefile
@@ -0,0 +1,10 @@
+ifneq ($(EXTERNAL_BUILD), 1)
+srcpath := $(srctree)
+endif
+
+IPU_STEP = bxtB0
+
+include $(srcpath)/$(src)/Makefile.virt
+
+ccflags-y += -I$(srcpath)/$(src)/../../../../../include/
+ccflags-y += -I$(srcpath)/$(src)/../
diff --git a/drivers/media/pci/intel/virtio/Makefile.virt b/drivers/media/pci/intel/virtio/Makefile.virt
new file mode 100644
index 000000000000..c3c30c4bf921
--- /dev/null
+++ b/drivers/media/pci/intel/virtio/Makefile.virt
@@ -0,0 +1,22 @@
+ifndef IPU_STEP
+ $(error No IPU_STEP was defined. Stopping.)
+endif
+
+TARGET_MODULE:=intel-ipu-virt-$(IPU_STEP)
+
+$(TARGET_MODULE)-objs += ../virtio/intel-ipu4-virtio-common.o
+
+
+ifdef CONFIG_VIDEO_INTEL_IPU_VIRTIO_BE
+ $(TARGET_MODULE)-objs += ../virtio/intel-ipu4-virtio-be-pipeline.o
+ $(TARGET_MODULE)-objs += ../virtio/intel-ipu4-virtio-be-bridge.o
+ $(TARGET_MODULE)-objs += ../virtio/intel-ipu4-virtio-be.o
+ $(TARGET_MODULE)-objs += ../virtio/intel-ipu4-virtio-be-stream.o
+else
+ $(TARGET_MODULE)-objs += ../virtio/intel-ipu4-virtio-fe-pipeline.o
+ $(TARGET_MODULE)-objs += ../virtio/intel-ipu4-virtio-fe-payload.o
+ $(TARGET_MODULE)-objs += ../virtio/intel-ipu4-virtio-fe.o
+ $(TARGET_MODULE)-objs += ../virtio/intel-ipu4-para-virt-drv.o
+endif
+
+obj-$(CONFIG_VIDEO_INTEL_IPU_ACRN) := $(TARGET_MODULE).o
diff --git a/drivers/media/pci/intel/virtio/intel-ipu4-para-virt-drv.c b/drivers/media/pci/intel/virtio/intel-ipu4-para-virt-drv.c
new file mode 100644
index 000000000000..3f6d541c87fe
--- /dev/null
+++ b/drivers/media/pci/intel/virtio/intel-ipu4-para-virt-drv.c
@@ -0,0 +1,1330 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+/*
+ * Copyright (C) 2018 Intel Corporation
+ */
+
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+#include <linux/kthread.h>
+#include <linux/poll.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/dma-buf.h>
+#include <linux/compat.h>
+
+#include "intel-ipu4-virtio-common.h"
+#include "intel-ipu4-para-virt-drv.h"
+#include "intel-ipu4-virtio-fe-pipeline.h"
+#include "intel-ipu4-virtio-fe-payload.h"
+#include "./ici/ici-isys-stream.h"
+#include "./ici/ici-isys-pipeline-device.h"
+
+
+static dev_t virt_pipeline_dev_t;
+static struct class *virt_pipeline_class;
+static struct ici_isys_pipeline_device *pipeline_dev;
+
+static dev_t virt_stream_dev_t;
+static struct class *virt_stream_class;
+static int virt_stream_devs_registered;
+static int stream_dev_init;
+
+static struct ipu4_virtio_ctx *g_fe_priv;
+
+struct mutex fop_mutex;
+
+#ifdef CONFIG_COMPAT
+struct timeval32 {
+ __u32 tv_sec;
+ __u32 tv_usec;
+} __attribute__((__packed__));
+
+struct ici_frame_plane32 {
+ __u32 bytes_used;
+ __u32 length;
+ union {
+ compat_uptr_t userptr;
+ __s32 dmafd;
+ } mem;
+ __u32 data_offset;
+ __u32 reserved[2];
+} __attribute__((__packed__));
+
+struct ici_frame_info32 {
+ __u32 frame_type;
+ __u32 field;
+ __u32 flag;
+ __u32 frame_buf_id;
+ struct timeval32 frame_timestamp;
+ __u32 frame_sequence_id;
+ __u32 mem_type; /* _DMA or _USER_PTR */
+ struct ici_frame_plane32 frame_planes[ICI_MAX_PLANES]; /* multi-planar */
+ __u32 num_planes; /* =1 single-planar &gt; 1 multi-planar array size */
+ __u32 reserved[2];
+} __attribute__((__packed__));
+
+#define ICI_IOC_GET_BUF32 _IOWR(MAJOR_STREAM, 3, struct ici_frame_info32)
+#define ICI_IOC_PUT_BUF32 _IOWR(MAJOR_STREAM, 4, struct ici_frame_info32)
+
+static void copy_from_user_frame_info32(struct ici_frame_info *kp, struct ici_frame_info32 __user *up)
+{
+ int i;
+ compat_uptr_t userptr;
+
+ get_user(kp->frame_type, &up->frame_type);
+ get_user(kp->field, &up->field);
+ get_user(kp->flag, &up->flag);
+ get_user(kp->frame_buf_id, &up->frame_buf_id);
+ get_user(kp->frame_timestamp.tv_sec, &up->frame_timestamp.tv_sec);
+ get_user(kp->frame_timestamp.tv_usec, &up->frame_timestamp.tv_usec);
+ get_user(kp->frame_sequence_id, &up->frame_sequence_id);
+ get_user(kp->mem_type, &up->mem_type);
+ get_user(kp->num_planes, &up->num_planes);
+ for (i = 0; i < kp->num_planes; i++) {
+ get_user(kp->frame_planes[i].bytes_used, &up->frame_planes[i].bytes_used);
+ get_user(kp->frame_planes[i].length, &up->frame_planes[i].length);
+ if (kp->mem_type == ICI_MEM_USERPTR) {
+ get_user(userptr, &up->frame_planes[i].mem.userptr);
+ kp->frame_planes[i].mem.userptr = (unsigned long) compat_ptr(userptr);
+ } else if (kp->mem_type == ICI_MEM_DMABUF) {
+ get_user(kp->frame_planes[i].mem.dmafd, &up->frame_planes[i].mem.dmafd);
+ };
+ get_user(kp->frame_planes[i].data_offset, &up->frame_planes[i].data_offset);
+ }
+}
+
+static void copy_to_user_frame_info32(struct ici_frame_info *kp, struct ici_frame_info32 __user *up)
+{
+ int i;
+ compat_uptr_t userptr;
+
+ put_user(kp->frame_type, &up->frame_type);
+ put_user(kp->field, &up->field);
+ put_user(kp->flag, &up->flag);
+ put_user(kp->frame_buf_id, &up->frame_buf_id);
+ put_user(kp->frame_timestamp.tv_sec, &up->frame_timestamp.tv_sec);
+ put_user(kp->frame_timestamp.tv_usec, &up->frame_timestamp.tv_usec);
+ put_user(kp->frame_sequence_id, &up->frame_sequence_id);
+ put_user(kp->mem_type, &up->mem_type);
+ put_user(kp->num_planes, &up->num_planes);
+ for (i = 0; i < kp->num_planes; i++) {
+ put_user(kp->frame_planes[i].bytes_used, &up->frame_planes[i].bytes_used);
+ put_user(kp->frame_planes[i].length, &up->frame_planes[i].length);
+ if (kp->mem_type == ICI_MEM_USERPTR) {
+ userptr = (unsigned long)compat_ptr(kp->frame_planes[i].mem.userptr);
+ put_user(userptr, &up->frame_planes[i].mem.userptr);
+ } else if (kp->mem_type == ICI_MEM_DMABUF) {
+ get_user(kp->frame_planes[i].mem.dmafd, &up->frame_planes[i].mem.dmafd);
+ }
+ put_user(kp->frame_planes[i].data_offset, &up->frame_planes[i].data_offset);
+ }
+}
+#endif
+
+static int get_userpages(struct device *dev, struct ici_frame_plane *frame_plane,
+ struct ici_kframe_plane *kframe_plane)
+{
+ unsigned long start, end, addr;
+ int npages, array_size;
+ struct page **pages;
+ int nr = 0;
+ int ret = 0;
+ struct sg_table *sgt;
+ unsigned int i;
+ u64 page_table_ref;
+ u64 *page_table;
+ addr = (unsigned long)frame_plane->mem.userptr;
+ start = addr & PAGE_MASK;
+ end = PAGE_ALIGN(addr + frame_plane->length);
+ npages = (end - start) >> PAGE_SHIFT;
+ array_size = npages * sizeof(struct page *);
+
+ if (!npages)
+ return -EINVAL;
+
+ page_table = kcalloc(npages, sizeof(*page_table), GFP_KERNEL);
+ if (!page_table) {
+ pr_err("Shared Page table for mediation failed\n");
+ return -ENOMEM;
+ }
+
+ pr_debug("%s:%d Number of Pages:%d frame_length:%d\n", __func__, __LINE__, npages, frame_plane->length);
+ sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt)
+ return -ENOMEM;
+ if (array_size <= PAGE_SIZE)
+ pages = kzalloc(array_size, GFP_KERNEL);
+ else
+ pages = vzalloc(array_size);
+ if (!pages)
+ return -ENOMEM;
+
+ down_read(&current->mm->mmap_sem);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)
+ nr = get_user_pages(current, current->mm,
+ start, npages, 1, 0, pages, NULL);
+#else
+ nr = get_user_pages(start, npages, FOLL_WRITE, pages, NULL);
+#endif
+ if (nr < npages)
+ goto error_free_pages;
+ /* Share physical address of pages */
+ for (i = 0; i < npages; i++)
+ page_table[i] = page_to_phys(pages[i]);
+
+ pr_debug("UOS phy page add %lld offset:%ld\n", page_table[0], addr & ~PAGE_MASK);
+ page_table_ref = virt_to_phys(page_table);
+ kframe_plane->page_table_ref = page_table_ref;
+ kframe_plane->npages = npages;
+ up_read(&current->mm->mmap_sem);
+ return ret;
+error_free_pages:
+ if (pages) {
+ for (i = 0; i < nr; i++)
+ put_page(pages[i]);
+ }
+ kfree(sgt);
+ return -ENOMEM;
+}
+
+static struct ici_frame_buf_wrapper *frame_buf_lookup(struct ici_isys_frame_buf_list *buf_list, struct ici_frame_info *user_frame_info)
+{
+ struct ici_frame_buf_wrapper *buf;
+ int i;
+ int mem_type = user_frame_info->mem_type;
+
+ list_for_each_entry(buf, &buf_list->getbuf_list, uos_node) {
+ for (i = 0; i < user_frame_info->num_planes; i++) {
+ struct ici_frame_plane *new_plane = &user_frame_info->frame_planes[i];
+ struct ici_frame_plane *cur_plane = &buf->frame_info.frame_planes[i];
+
+ if (buf->state != ICI_BUF_PREPARED &&
+ buf->state != ICI_BUF_DONE)
+ continue;
+
+ switch (mem_type) {
+ case ICI_MEM_USERPTR:
+ if (new_plane->mem.userptr == cur_plane->mem.userptr)
+ return buf;
+ break;
+ case ICI_MEM_DMABUF:
+ if (new_plane->mem.dmafd == cur_plane->mem.dmafd)
+ return buf;
+ break;
+ }
+ //TODO: add multiplaner checks
+ }
+ }
+ return NULL;
+}
+static void put_userpages(struct ici_kframe_plane *kframe_plane)
+{
+ struct sg_table *sgt = kframe_plane->sgt;
+ struct scatterlist *sgl;
+ unsigned int i;
+ struct mm_struct *mm = current->active_mm;
+
+ if (!mm) {
+ pr_err("Failed to get active mm_struct ptr from current process.\n");
+ return;
+ }
+
+ down_read(&mm->mmap_sem);
+ for_each_sg(sgt->sgl, sgl, sgt->orig_nents, i) {
+ struct page *page = sg_page(sgl);
+
+ unsigned int npages = PAGE_ALIGN(sgl->offset + sgl->length) >> PAGE_SHIFT;
+ unsigned int page_no;
+
+ for (page_no = 0; page_no < npages; ++page_no, ++page) {
+ set_page_dirty_lock(page);
+ put_page(page);
+ }
+ }
+
+ kfree(sgt);
+ kframe_plane->sgt = NULL;
+
+ up_read(&mm->mmap_sem);
+}
+
+static void put_dma(struct ici_kframe_plane *kframe_plane)
+{
+ struct sg_table *sgt = kframe_plane->sgt;
+
+ if (WARN_ON(!kframe_plane->db_attach)) {
+ pr_err("trying to unpin a not attached buffer\n");
+ return;
+ }
+
+ if (WARN_ON(!sgt)) {
+ pr_err("dmabuf buffer is already unpinned\n");
+ return;
+ }
+
+ if (kframe_plane->kaddr) {
+ dma_buf_vunmap(kframe_plane->db_attach->dmabuf,
+ kframe_plane->kaddr);
+ kframe_plane->kaddr = NULL;
+ }
+ dma_buf_unmap_attachment(kframe_plane->db_attach, sgt,
+ DMA_BIDIRECTIONAL);
+
+ kframe_plane->dma_addr = 0;
+ kframe_plane->sgt = NULL;
+
+}
+
+static int map_dma(struct device *dev, struct ici_frame_plane *frame_plane,
+ struct ici_kframe_plane *kframe_plane)
+{
+
+ int ret = 0;
+ int fd = frame_plane->mem.dmafd;
+
+ kframe_plane->dbdbuf = dma_buf_get(fd);
+ if (!kframe_plane->dbdbuf) {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ if (frame_plane->length == 0)
+ kframe_plane->length = kframe_plane->dbdbuf->size;
+ else
+ kframe_plane->length = frame_plane->length;
+
+ kframe_plane->fd = fd;
+ kframe_plane->db_attach = dma_buf_attach(kframe_plane->dbdbuf, dev);
+
+ if (IS_ERR(kframe_plane->db_attach)) {
+ ret = PTR_ERR(kframe_plane->db_attach);
+ goto error_put;
+ }
+
+ kframe_plane->sgt = dma_buf_map_attachment(kframe_plane->db_attach,
+ DMA_BIDIRECTIONAL);
+ if (IS_ERR_OR_NULL(kframe_plane->sgt)) {
+ ret = -EINVAL;
+ kframe_plane->sgt = NULL;
+ pr_err("map attachment failed\n");
+ goto error_detach;
+ }
+
+ kframe_plane->dma_addr = sg_dma_address(kframe_plane->sgt->sgl);
+ kframe_plane->kaddr = dma_buf_vmap(kframe_plane->dbdbuf);
+
+ if (!kframe_plane->kaddr) {
+ ret = -EINVAL;
+ goto error_detach;
+ }
+
+ pr_debug("MAPBUF: mapped fd %d\n", fd);
+
+ return 0;
+
+error_detach:
+ dma_buf_detach(kframe_plane->dbdbuf, kframe_plane->db_attach);
+error_put:
+ dma_buf_put(kframe_plane->dbdbuf);
+error:
+ return ret;
+}
+
+static void unmap_buf(struct ici_frame_buf_wrapper *buf)
+{
+ int i;
+
+ for (i = 0; i < buf->frame_info.num_planes; i++) {
+ struct ici_kframe_plane *kframe_plane =
+ &buf->kframe_info.planes[i];
+ switch (kframe_plane->mem_type) {
+ case ICI_MEM_USERPTR:
+ put_userpages(kframe_plane);
+ break;
+ case ICI_MEM_DMABUF:
+ put_dma(kframe_plane);
+ break;
+ default:
+ pr_debug("not supported memory type: %d\n", kframe_plane->mem_type);
+ break;
+ }
+ }
+}
+struct ici_frame_buf_wrapper *get_buf(struct virtual_stream *vstream, struct ici_frame_info *frame_info)
+{
+ int res;
+ unsigned i;
+ struct ici_frame_buf_wrapper *buf;
+
+ struct ici_kframe_plane *kframe_plane;
+ struct ici_isys_frame_buf_list *buf_list = &vstream->buf_list;
+ int mem_type = frame_info->mem_type;
+
+ if (mem_type != ICI_MEM_USERPTR && mem_type != ICI_MEM_DMABUF) {
+ pr_err("Memory type not supproted\n");
+ return NULL;
+ }
+
+ if (!frame_info->frame_planes[0].length) {
+ pr_err("User length not set\n");
+ return NULL;
+ }
+
+ buf = frame_buf_lookup(buf_list, frame_info);
+ if (buf) {
+ pr_debug("Frame buffer found in the list: %ld\n", buf->frame_info.frame_planes[0].mem.userptr);
+ buf->state = ICI_BUF_PREPARED;
+ return buf;
+ }
+ pr_debug("Creating new buffer in the list\n");
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+ if (!buf)
+ return NULL;
+
+ buf->buf_id = frame_info->frame_buf_id;
+ buf->uos_buf_list = buf_list;
+ memcpy(&buf->frame_info, frame_info, sizeof(buf->frame_info));
+
+ switch (mem_type) {
+ case ICI_MEM_USERPTR:
+ if (!frame_info->frame_planes[0].mem.userptr) {
+ pr_err("User pointer not define\n");
+ return NULL;
+ }
+ for (i = 0; i < frame_info->num_planes; i++) {
+ kframe_plane = &buf->kframe_info.planes[i];
+ kframe_plane->mem_type = ICI_MEM_USERPTR;
+ res = get_userpages(&vstream->strm_dev.dev, &frame_info->frame_planes[i],
+ kframe_plane);
+ if (res)
+ return NULL;
+ }
+ break;
+ case ICI_MEM_DMABUF:
+ for (i = 0; i < frame_info->num_planes; i++) {
+ kframe_plane = &buf->kframe_info.planes[i];
+ kframe_plane->mem_type = ICI_MEM_DMABUF;
+ res = map_dma(&vstream->strm_dev.dev, &frame_info->frame_planes[i],
+ kframe_plane);
+ if (res)
+ return NULL;
+ }
+
+ break;
+ }
+ mutex_lock(&buf_list->mutex);
+ buf->state = ICI_BUF_PREPARED;
+ list_add_tail(&buf->uos_node, &buf_list->getbuf_list);
+ mutex_unlock(&buf_list->mutex);
+ return buf;
+}
+
+//Call from Stream-OFF and if Stream-ON fails
+void buf_stream_cancel(struct virtual_stream *vstream)
+{
+ struct ici_isys_frame_buf_list *buf_list = &vstream->buf_list;
+ struct ici_frame_buf_wrapper *buf;
+ struct ici_frame_buf_wrapper *next_buf;
+
+ list_for_each_entry_safe(buf, next_buf,
+ &buf_list->getbuf_list, uos_node) {
+ list_del(&buf->uos_node);
+ }
+ list_for_each_entry_safe(buf, next_buf,
+ &buf_list->putbuf_list, uos_node) {
+ list_del(&buf->uos_node);
+ }
+}
+
+static int virt_isys_set_format(struct file *file, void *fh,
+ struct ici_stream_format *sf)
+{
+ struct ici_stream_device *strm_dev = fh;
+ struct virtual_stream *vstream = dev_to_vstream(strm_dev);
+ struct ipu4_virtio_ctx *fe_ctx = vstream->ctx;
+ struct ipu4_virtio_req *req;
+ int rval = 0;
+ int op[10];
+
+ pr_debug("Calling Set Format\n");
+
+ req = kcalloc(1, sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+ op[0] = vstream->virt_dev_id;
+ op[1] = 0;
+
+ req->payload = virt_to_phys(sf);
+
+ intel_ipu4_virtio_create_req(req, IPU4_CMD_SET_FORMAT, &op[0]);
+
+ rval = fe_ctx->bknd_ops->send_req(fe_ctx->domid, req, true, IPU_VIRTIO_QUEUE_0);
+ if (rval) {
+ dev_err(&strm_dev->dev, "Failed to open virtual device\n");
+ kfree(req);
+ return rval;
+ }
+ kfree(req);
+
+ return rval;
+}
+
+static int virt_isys_stream_on(struct file *file, void *fh)
+{
+ struct ici_stream_device *strm_dev = fh;
+ struct virtual_stream *vstream = dev_to_vstream(strm_dev);
+ struct ipu4_virtio_ctx *fe_ctx = vstream->ctx;
+ struct ipu4_virtio_req *req;
+ int rval = 0;
+ int op[10];
+ pr_debug("Calling Stream ON\n");
+ req = kcalloc(1, sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+ op[0] = vstream->virt_dev_id;
+ op[1] = 0;
+
+ intel_ipu4_virtio_create_req(req, IPU4_CMD_STREAM_ON, &op[0]);
+
+ rval = fe_ctx->bknd_ops->send_req(fe_ctx->domid, req, true, IPU_VIRTIO_QUEUE_0);
+ if (rval) {
+ dev_err(&strm_dev->dev, "Failed to open virtual device\n");
+ kfree(req);
+ return rval;
+ }
+ kfree(req);
+
+ req = kcalloc(1, sizeof(*req), GFP_KERNEL);
+ if (!req && !fe_ctx)
+ return -ENOMEM;
+
+ return rval;
+}
+
+static int virt_isys_stream_off(struct file *file, void *fh)
+{
+ struct ici_stream_device *strm_dev = fh;
+ struct virtual_stream *vstream = dev_to_vstream(strm_dev);
+ struct ipu4_virtio_ctx *fe_ctx = vstream->ctx;
+ struct ipu4_virtio_req *req;
+ int rval = 0;
+ int op[10];
+
+ pr_debug("Calling Stream OFF\n");
+ req = kcalloc(1, sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+ op[0] = vstream->virt_dev_id;
+ op[1] = 0;
+
+ intel_ipu4_virtio_create_req(req, IPU4_CMD_STREAM_OFF, &op[0]);
+
+ rval = fe_ctx->bknd_ops->send_req(fe_ctx->domid, req, true, IPU_VIRTIO_QUEUE_0);
+ if (rval) {
+ dev_err(&strm_dev->dev, "Failed to open virtual device\n");
+ kfree(req);
+ return rval;
+ }
+ kfree(req);
+
+ buf_stream_cancel(vstream);
+
+ return rval;
+}
+
+static int virt_isys_getbuf(struct file *file, void *fh,
+ struct ici_frame_info *user_frame_info)
+{
+ struct ici_stream_device *strm_dev = fh;
+ struct virtual_stream *vstream = dev_to_vstream(strm_dev);
+ struct ipu4_virtio_ctx *fe_ctx = vstream->ctx;
+ struct ipu4_virtio_req *req;
+ struct ici_frame_buf_wrapper *buf;
+ int rval = 0;
+ int op[3];
+
+ pr_debug("Calling Get Buffer\n");
+
+ buf = get_buf(vstream, user_frame_info);
+ if (!buf) {
+ dev_err(&strm_dev->dev, "Failed to map buffer: %d\n", rval);
+ return -ENOMEM;
+ }
+
+ req = kcalloc(1, sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ op[0] = vstream->virt_dev_id;
+ op[1] = 0;
+ op[2] = user_frame_info->mem_type;
+ req->payload = virt_to_phys(buf);
+
+ intel_ipu4_virtio_create_req(req, IPU4_CMD_GET_BUF, &op[0]);
+
+ rval = fe_ctx->bknd_ops->send_req(fe_ctx->domid, req, true, IPU_VIRTIO_QUEUE_0);
+ if (rval) {
+ dev_err(&strm_dev->dev, "Failed to Get Buffer\n");
+ kfree(req);
+ return rval;
+ }
+ kfree(req);
+
+ return rval;
+}
+
+static int virt_isys_putbuf(struct file *file, void *fh,
+ struct ici_frame_info *user_frame_info)
+{
+ struct ici_stream_device *strm_dev = fh;
+ struct virtual_stream *vstream = dev_to_vstream(strm_dev);
+ struct ipu4_virtio_ctx *fe_ctx = vstream->ctx;
+ struct ipu4_virtio_req *req;
+ int rval = 0;
+ int op[2];
+
+ pr_debug("Calling Put Buffer\n");
+
+ req = kcalloc(1, sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ op[0] = vstream->virt_dev_id;
+ op[1] = 0;
+ req->payload = virt_to_phys(user_frame_info);
+
+ intel_ipu4_virtio_create_req(req, IPU4_CMD_PUT_BUF, &op[0]);
+
+ rval = fe_ctx->bknd_ops->send_req(fe_ctx->domid, req, true, IPU_VIRTIO_QUEUE_0);
+ if (rval) {
+ dev_err(&strm_dev->dev, "Failed to Get Buffer\n");
+ kfree(req);
+ return rval;
+ }
+ kfree(req);
+
+ req = kcalloc(1, sizeof(*req), GFP_KERNEL);
+ if (!req && !fe_ctx)
+ return -ENOMEM;
+
+ return rval;
+}
+
+static unsigned int stream_fop_poll(struct file *file, struct ici_stream_device *dev)
+{
+ struct ipu4_virtio_req *req;
+ struct virtual_stream *vstream = dev_to_vstream(dev);
+ struct ipu4_virtio_ctx *fe_ctx = vstream->ctx;
+ struct ici_stream_device *strm_dev = file->private_data;
+ int rval = 0;
+ int op[2];
+
+ dev_dbg(&strm_dev->dev, "stream_fop_poll %d\n", vstream->virt_dev_id);
+ get_device(&dev->dev);
+
+ req = kcalloc(1, sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ op[0] = vstream->virt_dev_id;
+ op[1] = 0;
+
+ intel_ipu4_virtio_create_req(req, IPU4_CMD_POLL, &op[0]);
+
+ mutex_lock(&fop_mutex);
+
+ rval = fe_ctx->bknd_ops->send_req(fe_ctx->domid, req, true, IPU_VIRTIO_QUEUE_1);
+ if (rval) {
+ mutex_unlock(&fop_mutex);
+ dev_err(&strm_dev->dev, "Failed to open virtual device\n");
+ kfree(req);
+ return rval;
+ }
+
+ mutex_unlock(&fop_mutex);
+
+ rval = req->func_ret;
+ kfree(req);
+
+ return rval;
+}
+
+static int virt_stream_fop_open(struct inode *inode, struct file *file)
+{
+ struct ici_stream_device *strm_dev = inode_to_intel_ipu_stream_device(inode);
+ struct ipu4_virtio_req *req;
+ struct virtual_stream *vstream = dev_to_vstream(strm_dev);
+ struct ipu4_virtio_ctx *fe_ctx = vstream->ctx;
+ int rval = 0;
+ int op[3];
+ pr_debug("%s %d", __func__, vstream->virt_dev_id);
+ get_device(&strm_dev->dev);
+
+ file->private_data = strm_dev;
+
+ if (!fe_ctx)
+ return -EINVAL;
+
+ req = kcalloc(1, sizeof(*req), GFP_KERNEL);
+ if (!req) {
+ dev_err(&strm_dev->dev, "Virtio Req buffer failed\n");
+ return -ENOMEM;
+ }
+
+ op[0] = vstream->virt_dev_id;
+ op[1] = 1;
+
+ intel_ipu4_virtio_create_req(req, IPU4_CMD_DEVICE_OPEN, &op[0]);
+
+ mutex_lock(&fop_mutex);
+
+ rval = fe_ctx->bknd_ops->send_req(fe_ctx->domid, req, true, IPU_VIRTIO_QUEUE_1);
+ if (rval) {
+ mutex_unlock(&fop_mutex);
+ dev_err(&strm_dev->dev, "Failed to open virtual device\n");
+ kfree(req);
+ return rval;
+ }
+ kfree(req);
+
+ mutex_unlock(&fop_mutex);
+
+ return rval;
+}
+
+static int virt_stream_fop_release(struct inode *inode, struct file *file)
+{
+ struct ici_stream_device *strm_dev = inode_to_intel_ipu_stream_device(inode);
+ struct ipu4_virtio_req *req;
+ struct virtual_stream *vstream = dev_to_vstream(strm_dev);
+ struct ipu4_virtio_ctx *fe_ctx = vstream->ctx;
+ int rval = 0;
+ int op[2];
+ pr_debug("%s %d", __func__, vstream->virt_dev_id);
+ put_device(&strm_dev->dev);
+
+ req = kcalloc(1, sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ op[0] = vstream->virt_dev_id;
+ op[1] = 0;
+
+ intel_ipu4_virtio_create_req(req, IPU4_CMD_DEVICE_CLOSE, &op[0]);
+
+ mutex_lock(&fop_mutex);
+
+ rval = fe_ctx->bknd_ops->send_req(fe_ctx->domid, req, true, IPU_VIRTIO_QUEUE_1);
+ if (rval) {
+ mutex_unlock(&fop_mutex);
+ dev_err(&strm_dev->dev, "Failed to close virtual device\n");
+ kfree(req);
+ return rval;
+ }
+ kfree(req);
+
+ mutex_unlock(&fop_mutex);
+
+ return rval;
+}
+
+static unsigned int virt_stream_fop_poll(struct file *file,
+ struct poll_table_struct *poll)
+{
+ struct ici_stream_device *as = file->private_data;
+ unsigned int res = POLLERR | POLLHUP;
+
+ dev_dbg(&as->dev, "virt_stream_fop_poll for:%s\n", as->name);
+
+ res = stream_fop_poll(file, as);
+
+ //res = POLLIN;
+
+ dev_dbg(&as->dev, "virt_stream_fop_poll res %u\n", res);
+
+ return res;
+}
+
+static long virt_stream_ioctl32(struct file *file, unsigned int ioctl_cmd,
+ unsigned long ioctl_arg)
+{
+ union isys_ioctl_cmd_args {
+ struct ici_frame_info frame_info;
+ struct ici_stream_format sf;
+ };
+ void __user *up = compat_ptr(ioctl_arg);
+ union isys_ioctl_cmd_args *data = NULL;
+ int err = 0;
+ struct ici_stream_device *dev = file->private_data;
+
+ mutex_lock(dev->mutex);
+ switch (ioctl_cmd) {
+ case ICI_IOC_STREAM_ON:
+ pr_debug("IPU FE IOCTL STREAM_ON\n");
+ err = virt_isys_stream_on(file, dev);
+ break;
+ case ICI_IOC_STREAM_OFF:
+ pr_debug("IPU FE IOCTL STREAM_OFF\n");
+ err = virt_isys_stream_off(file, dev);
+ break;
+ case ICI_IOC_GET_BUF32:
+ pr_debug("IPU FE IOCTL GET_BUF\n");
+ data = (union isys_ioctl_cmd_args *) kzalloc(sizeof(union isys_ioctl_cmd_args), GFP_KERNEL);
+ copy_from_user_frame_info32(&data->frame_info, up);
+ err = virt_isys_getbuf(file, dev, &data->frame_info);
+ copy_to_user_frame_info32(&data->frame_info, up);
+ kfree(data);
+ if (err) {
+ mutex_unlock(dev->mutex);
+ return -EFAULT;
+ }
+ break;
+ case ICI_IOC_PUT_BUF32:
+ pr_debug("IPU FE IOCTL PUT_BUF\n");
+ data = (union isys_ioctl_cmd_args *) kzalloc(sizeof(union isys_ioctl_cmd_args), GFP_KERNEL);
+ copy_from_user_frame_info32(&data->frame_info, up);
+ err = virt_isys_putbuf(file, dev, &data->frame_info);
+ copy_to_user_frame_info32(&data->frame_info, up);
+ kfree(data);
+ if (err) {
+ mutex_unlock(dev->mutex);
+ return -EFAULT;
+ }
+ break;
+ case ICI_IOC_SET_FORMAT:
+ pr_debug("IPU FE IOCTL SET_FORMAT\n");
+ if (_IOC_SIZE(ioctl_cmd) > sizeof(union isys_ioctl_cmd_args)) {
+ mutex_unlock(dev->mutex);
+ return -ENOTTY;
+ }
+
+ data = (union isys_ioctl_cmd_args *) kzalloc(sizeof(union isys_ioctl_cmd_args), GFP_KERNEL);
+ err = copy_from_user(data, up, _IOC_SIZE(ioctl_cmd));
+ if (err) {
+ kfree(data);
+ mutex_unlock(dev->mutex);
+ return -EFAULT;
+ }
+ err = virt_isys_set_format(file, dev, &data->sf);
+ err = copy_to_user(up, data, _IOC_SIZE(ioctl_cmd));
+ if (err) {
+ kfree(data);
+ mutex_unlock(dev->mutex);
+ return -EFAULT;
+ }
+ kfree(data);
+ break;
+
+ default:
+ err = -ENOTTY;
+ break;
+ }
+
+ mutex_unlock(dev->mutex);
+
+ return 0;
+}
+
+static long virt_stream_ioctl(struct file *file, unsigned int ioctl_cmd,
+ unsigned long ioctl_arg)
+{
+ union isys_ioctl_cmd_args {
+ struct ici_frame_info frame_info;
+ struct ici_stream_format sf;
+ };
+ int err = 0;
+ union isys_ioctl_cmd_args *data = NULL;
+ struct ici_stream_device *dev = file->private_data;
+ void __user *up = (void __user *)ioctl_arg;
+
+ bool copy = (ioctl_cmd != ICI_IOC_STREAM_ON &&
+ ioctl_cmd != ICI_IOC_STREAM_OFF);
+
+ if (copy) {
+ if (_IOC_SIZE(ioctl_cmd) > sizeof(union isys_ioctl_cmd_args))
+ return -ENOTTY;
+
+ data = (union isys_ioctl_cmd_args *) kzalloc(sizeof(union isys_ioctl_cmd_args), GFP_KERNEL);
+ if (_IOC_DIR(ioctl_cmd) & _IOC_WRITE) {
+ err = copy_from_user(data, up,
+ _IOC_SIZE(ioctl_cmd));
+ if (err) {
+ kfree(data);
+ return -EFAULT;
+ }
+ }
+ }
+
+ mutex_lock(dev->mutex);
+ switch (ioctl_cmd) {
+ case ICI_IOC_STREAM_ON:
+ err = virt_isys_stream_on(file, dev);
+ break;
+ case ICI_IOC_STREAM_OFF:
+ err = virt_isys_stream_off(file, dev);
+ break;
+ case ICI_IOC_GET_BUF:
+ err = virt_isys_getbuf(file, dev, &data->frame_info);
+ break;
+ case ICI_IOC_PUT_BUF:
+ err = virt_isys_putbuf(file, dev, &data->frame_info);
+ break;
+ case ICI_IOC_SET_FORMAT:
+ err = virt_isys_set_format(file, dev, &data->sf);
+ break;
+ default:
+ err = -ENOTTY;
+ break;
+ }
+
+ mutex_unlock(dev->mutex);
+
+ if (copy) {
+ err = copy_to_user(up, data, _IOC_SIZE(ioctl_cmd));
+ kfree(data);
+ }
+ return 0;
+}
+
+
+static const struct file_operations virt_stream_fops = {
+ .owner = THIS_MODULE,
+ .open = virt_stream_fop_open, /* calls strm_dev->fops->open() */
+ .unlocked_ioctl = virt_stream_ioctl, /* calls strm_dev->ipu_ioctl_ops->() */
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = virt_stream_ioctl32,
+#endif
+ .release = virt_stream_fop_release, /* calls strm_dev->fops->release() */
+ .poll = virt_stream_fop_poll, /* calls strm_dev->fops->poll() */
+};
+
+/* Called on device_unregister */
+static void base_device_release(struct device *sd)
+{
+}
+
+int virt_frame_buf_init(struct ici_isys_frame_buf_list *buf_list)
+{
+ buf_list->drv_priv = NULL;
+ mutex_init(&buf_list->mutex);
+ spin_lock_init(&buf_list->lock);
+ spin_lock_init(&buf_list->short_packet_queue_lock);
+ INIT_LIST_HEAD(&buf_list->getbuf_list);
+ INIT_LIST_HEAD(&buf_list->putbuf_list);
+ INIT_LIST_HEAD(&buf_list->interlacebuf_list);
+ init_waitqueue_head(&buf_list->wait);
+ return 0;
+}
+
+static int virt_ici_stream_init(struct virtual_stream *vstream,
+ struct ici_stream_device *strm_dev)
+{
+ int rval;
+ int num;
+ struct ipu4_virtio_ctx *fe_ctx;
+
+ if (!stream_dev_init) {
+ virt_stream_dev_t = MKDEV(MAJOR_STREAM, 0);
+
+ rval = register_chrdev_region(virt_stream_dev_t,
+ MAX_STREAM_DEVICES, ICI_STREAM_DEVICE_NAME);
+ if (rval) {
+ pr_err("can't register virt_ici stream chrdev region (%d)\n", rval);
+ return rval;
+ }
+
+ virt_stream_class = class_create(THIS_MODULE, ICI_STREAM_DEVICE_NAME);
+ if (IS_ERR(virt_stream_class)) {
+ unregister_chrdev_region(virt_stream_dev_t, MAX_STREAM_DEVICES);
+ pr_err("Failed to register device class %s\n", ICI_STREAM_DEVICE_NAME);
+ return PTR_ERR(virt_stream_class);
+ }
+ stream_dev_init++;
+ }
+
+ num = virt_stream_devs_registered;
+ strm_dev->minor = -1;
+ cdev_init(&strm_dev->cdev, &virt_stream_fops);
+ strm_dev->cdev.owner = virt_stream_fops.owner;
+
+ rval = cdev_add(&strm_dev->cdev, MKDEV(MAJOR(virt_stream_dev_t), num), 1);
+ if (rval) {
+ pr_err("%s: failed to add cdevice\n", __func__);
+ return rval;
+ }
+
+ strm_dev->dev.class = virt_stream_class;
+ strm_dev->dev.devt = MKDEV(MAJOR(virt_stream_dev_t), num);
+ dev_set_name(&strm_dev->dev, "%s%d", ICI_STREAM_DEVICE_NAME, num);
+
+ rval = device_register(&strm_dev->dev);
+ if (rval < 0) {
+ pr_err("%s: device_register failed\n", __func__);
+ cdev_del(&strm_dev->cdev);
+ return rval;
+ }
+ strm_dev->dev.release = base_device_release;
+ strlcpy(strm_dev->name, strm_dev->dev.kobj.name, sizeof(strm_dev->name));
+ strm_dev->minor = num;
+ vstream->virt_dev_id = num;
+
+ virt_stream_devs_registered++;
+
+ fe_ctx = kcalloc(1, sizeof(struct ipu4_virtio_ctx),
+ GFP_KERNEL);
+
+ if (!fe_ctx)
+ return -ENOMEM;
+
+ fe_ctx->bknd_ops = &ipu4_virtio_bknd_ops;
+
+ if (fe_ctx->bknd_ops->init) {
+ rval = fe_ctx->bknd_ops->init();
+ if (rval < 0) {
+ pr_err("failed to initialize backend.\n");
+ return rval;
+ }
+ }
+
+ fe_ctx->domid = fe_ctx->bknd_ops->get_vm_id();
+ vstream->ctx = fe_ctx;
+ dev_dbg(&strm_dev->dev, "IPU FE registered with domid:%d\n", fe_ctx->domid);
+
+ return 0;
+}
+
+static void virt_ici_stream_exit(void)
+{
+ class_unregister(virt_stream_class);
+ unregister_chrdev_region(virt_stream_dev_t, MAX_STREAM_DEVICES);
+
+ pr_notice("Virtual stream device unregistered\n");
+}
+
+static int virt_pipeline_fop_open(struct inode *inode, struct file *file)
+{
+ struct ici_isys_pipeline_device *dev = inode_to_ici_isys_pipeline_device(inode);
+ struct ipu4_virtio_req *req;
+ int rval = 0;
+ int op[2];
+ pr_debug("virt pipeline open\n");
+ get_device(&dev->dev);
+
+ file->private_data = dev;
+
+ req = kcalloc(1, sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ op[0] = dev->minor;
+ op[1] = 0;
+
+ intel_ipu4_virtio_create_req(req, IPU4_CMD_PIPELINE_OPEN, &op[0]);
+
+ rval = g_fe_priv->bknd_ops->send_req(g_fe_priv->domid, req, true, IPU_VIRTIO_QUEUE_1);
+ if (rval) {
+ pr_err("Failed to open virtual device\n");
+ kfree(req);
+ return rval;
+ }
+ kfree(req);
+
+ return rval;
+}
+
+static int virt_pipeline_fop_release(struct inode *inode, struct file *file)
+{
+ int rval = 0;
+ int op[2];
+ struct ipu4_virtio_req *req;
+
+ struct ici_isys_pipeline_device *pipe_dev =
+ inode_to_ici_isys_pipeline_device(inode);
+
+ put_device(&pipe_dev->dev);
+
+ req = kcalloc(1, sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ op[0] = pipe_dev->minor;
+ op[1] = 0;
+
+ intel_ipu4_virtio_create_req(req, IPU4_CMD_PIPELINE_CLOSE, &op[0]);
+
+ rval = g_fe_priv->bknd_ops->send_req(g_fe_priv->domid, req, true, IPU_VIRTIO_QUEUE_1);
+ if (rval) {
+ pr_err("Failed to close virtual device\n");
+ kfree(req);
+ return rval;
+ }
+ kfree(req);
+
+ return rval;
+}
+
+static long virt_pipeline_ioctl_common(void __user *up,
+ struct file *file, unsigned int ioctl_cmd,
+ unsigned long ioctl_arg)
+{
+ union isys_ioctl_cmd_args {
+ struct ici_node_desc node_desc;
+ struct ici_link_desc link;
+ struct ici_pad_framefmt pad_prop;
+ struct ici_pad_supported_format_desc
+ format_desc;
+ struct ici_links_query links_query;
+ struct ici_pad_selection pad_sel;
+ };
+ int err = 0;
+ union isys_ioctl_cmd_args *data = NULL;
+ struct ici_isys_pipeline_device *dev = file->private_data;
+
+ if (_IOC_SIZE(ioctl_cmd) > sizeof(union isys_ioctl_cmd_args))
+ return -ENOTTY;
+
+ data = (union isys_ioctl_cmd_args *) kzalloc(sizeof(union isys_ioctl_cmd_args), GFP_KERNEL);
+ if (_IOC_DIR(ioctl_cmd) & _IOC_WRITE) {
+ err = copy_from_user(data, up,
+ _IOC_SIZE(ioctl_cmd));
+ if (err) {
+ kfree(data);
+ return -EFAULT;
+ }
+ }
+ mutex_lock(&dev->mutex);
+ switch (ioctl_cmd) {
+ case ICI_IOC_ENUM_NODES:
+ err = process_pipeline(file, g_fe_priv,
+ (void *)&data->node_desc, IPU4_CMD_ENUM_NODES);
+ break;
+ case ICI_IOC_ENUM_LINKS:
+ pr_debug("virt_pipeline_ioctl: ICI_IOC_ENUM_LINKS\n");
+ err = process_pipeline(file, g_fe_priv, (void *)&data->links_query, IPU4_CMD_ENUM_LINKS);
+ break;
+ case ICI_IOC_SETUP_PIPE:
+ pr_debug("virt_pipeline_ioctl: ICI_IOC_SETUP_PIPE\n");
+ err = process_pipeline(file, g_fe_priv,
+ (void *)&data->link, IPU4_CMD_SETUP_PIPE);
+ break;
+ case ICI_IOC_SET_FRAMEFMT:
+ pr_debug("virt_pipeline_ioctl: ICI_IOC_SET_FRAMEFMT\n");
+ err = process_pipeline(file, g_fe_priv,
+ (void *)&data->pad_prop, IPU4_CMD_SET_FRAMEFMT);
+ break;
+ case ICI_IOC_GET_FRAMEFMT:
+ pr_debug("virt_pipeline_ioctl: ICI_IOC_GET_FRAMEFMT\n");
+ err = process_pipeline(file, g_fe_priv,
+ (void *)&data->pad_prop, IPU4_CMD_GET_FRAMEFMT);
+ break;
+ case ICI_IOC_GET_SUPPORTED_FRAMEFMT:
+ pr_debug("virt_pipeline_ioctl: ICI_IOC_GET_SUPPORTED_FRAMEFMT\n");
+ err = process_pipeline(file, g_fe_priv,
+ (void *)&data->format_desc, IPU4_CMD_GET_SUPPORTED_FRAMEFMT);
+ break;
+ case ICI_IOC_SET_SELECTION:
+ pr_debug("virt_pipeline_ioctl: ICI_IOC_SET_SELECTION\n");
+ err = process_pipeline(file, g_fe_priv,
+ (void *)&data->pad_sel, IPU4_CMD_SET_SELECTION);
+ break;
+ case ICI_IOC_GET_SELECTION:
+ pr_debug("virt_pipeline_ioctl: ICI_IOC_GET_SELECTION\n");
+ err = process_pipeline(file, g_fe_priv,
+ (void *)&data->pad_sel, IPU4_CMD_GET_SELECTION);
+ break;
+ default:
+ err = -ENOTTY;
+ break;
+ }
+
+ mutex_unlock(&dev->mutex);
+ if (err < 0) {
+ kfree(data);
+ return err;
+ }
+
+ if (_IOC_DIR(ioctl_cmd) & _IOC_READ) {
+ err = copy_to_user(up, data,
+ _IOC_SIZE(ioctl_cmd));
+ if (err) {
+ kfree(data);
+ return -EFAULT;
+ }
+ }
+ kfree(data);
+
+ return 0;
+}
+
+static long virt_pipeline_ioctl(struct file *file, unsigned int ioctl_cmd,
+ unsigned long ioctl_arg)
+{
+ void __user *up = (void __user *)ioctl_arg;
+ return virt_pipeline_ioctl_common(up, file, ioctl_cmd, ioctl_arg);
+}
+
+static long virt_pipeline_ioctl32(struct file *file, unsigned int ioctl_cmd,
+ unsigned long ioctl_arg)
+{
+ void __user *up = compat_ptr(ioctl_arg);
+ return virt_pipeline_ioctl_common(up, file, ioctl_cmd, ioctl_arg);
+}
+
+static const struct file_operations virt_pipeline_fops = {
+ .owner = THIS_MODULE,
+ .open = virt_pipeline_fop_open,
+ .unlocked_ioctl = virt_pipeline_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = virt_pipeline_ioctl32,
+#endif
+ .release = virt_pipeline_fop_release,
+};
+
+static int virt_fe_init(void)
+{
+ int rval;
+
+ g_fe_priv = kcalloc(1, sizeof(struct ipu4_virtio_ctx),
+ GFP_KERNEL);
+
+ if (!g_fe_priv)
+ return -ENOMEM;
+
+ g_fe_priv->bknd_ops = &ipu4_virtio_bknd_ops;
+
+ if (g_fe_priv->bknd_ops->init) {
+ rval = g_fe_priv->bknd_ops->init();
+ if (rval < 0) {
+ pr_err("failed to initialize backend.\n");
+ return rval;
+ }
+ }
+
+ g_fe_priv->domid = g_fe_priv->bknd_ops->get_vm_id();
+
+ pr_debug("FE registered with domid:%d\n", g_fe_priv->domid);
+
+ return 0;
+}
+
+static int virt_ici_pipeline_init(void)
+{
+ int rval;
+ pr_notice("Initializing pipeline\n");
+ virt_pipeline_dev_t = MKDEV(MAJOR_PIPELINE, 0);
+
+ rval = register_chrdev_region(virt_pipeline_dev_t,
+ MAX_PIPELINE_DEVICES, ICI_PIPELINE_DEVICE_NAME);
+ if (rval) {
+ pr_err("can't register virt_ici stream chrdev region (%d)\n",
+ rval);
+ return rval;
+ }
+
+ virt_pipeline_class = class_create(THIS_MODULE, ICI_PIPELINE_DEVICE_NAME);
+ if (IS_ERR(virt_pipeline_class)) {
+ unregister_chrdev_region(virt_pipeline_dev_t, MAX_PIPELINE_DEVICES);
+ pr_err("Failed to register device class %s\n", ICI_PIPELINE_DEVICE_NAME);
+ return PTR_ERR(virt_pipeline_class);
+ }
+
+ pipeline_dev = kzalloc(sizeof(*pipeline_dev), GFP_KERNEL);
+ if (!pipeline_dev)
+ return -ENOMEM;
+ pipeline_dev->minor = -1;
+ cdev_init(&pipeline_dev->cdev, &virt_pipeline_fops);
+ pipeline_dev->cdev.owner = virt_pipeline_fops.owner;
+
+ rval = cdev_add(&pipeline_dev->cdev, MKDEV(MAJOR_PIPELINE, MINOR_PIPELINE), 1);
+ if (rval) {
+ pr_err("%s: failed to add cdevice\n", __func__);
+ return rval;
+ }
+
+ pipeline_dev->dev.class = virt_pipeline_class;
+ pipeline_dev->dev.devt = MKDEV(MAJOR_PIPELINE, MINOR_PIPELINE);
+ dev_set_name(&pipeline_dev->dev, "%s", ICI_PIPELINE_DEVICE_NAME);
+
+ rval = device_register(&pipeline_dev->dev);
+ if (rval < 0) {
+ pr_err("%s: device_register failed\n", __func__);
+ cdev_del(&pipeline_dev->cdev);
+ return rval;
+ }
+ pipeline_dev->dev.release = base_device_release;
+ strlcpy(pipeline_dev->name, pipeline_dev->dev.kobj.name, sizeof(pipeline_dev->name));
+ pipeline_dev->minor = MINOR_PIPELINE;
+
+ return 0;
+}
+
+static int __init virt_ici_init(void)
+{
+ struct virtual_stream *vstream;
+ int rval = 0, i;
+ pr_notice("Initializing IPU Para virtual driver\n");
+ for (i = 0; i < MAX_ISYS_VIRT_STREAM; i++) {
+
+ vstream = kzalloc(sizeof(*vstream), GFP_KERNEL);
+ if (!vstream)
+ return -ENOMEM;
+ mutex_init(&vstream->mutex);
+ mutex_init(&fop_mutex);
+ vstream->strm_dev.mutex = &vstream->mutex;
+
+ rval = virt_frame_buf_init(&vstream->buf_list);
+ if (rval)
+ goto init_fail;
+
+ dev_set_drvdata(&vstream->strm_dev.dev, vstream);
+
+ mutex_lock(&vstream->mutex);
+ rval = virt_ici_stream_init(vstream, &vstream->strm_dev);
+ mutex_unlock(&vstream->mutex);
+
+ if (rval)
+ goto init_fail;
+ }
+
+ rval = virt_ici_pipeline_init();
+ if (rval)
+ goto init_fail;
+
+ rval = virt_fe_init();
+ return rval;
+
+init_fail:
+ mutex_destroy(&vstream->mutex);
+ mutex_destroy(&fop_mutex);
+ kfree(vstream);
+ return rval;
+}
+
+static void virt_ici_pipeline_exit(void)
+{
+ class_unregister(virt_pipeline_class);
+ unregister_chrdev_region(virt_pipeline_dev_t, MAX_PIPELINE_DEVICES);
+ if (pipeline_dev)
+ kfree((void *)pipeline_dev);
+ if (g_fe_priv)
+ kfree((void *)g_fe_priv);
+
+ pr_notice("virt_ici pipeline device unregistered\n");
+}
+
+static void __exit virt_ici_exit(void)
+{
+ virt_ici_stream_exit();
+ virt_ici_pipeline_exit();
+}
+
+module_init(virt_ici_init);
+module_exit(virt_ici_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("Intel IPU Para virtualize ici input system driver");
+MODULE_AUTHOR("Kushal Bandi <kushal.bandi@intel.com>");
+
+
diff --git a/drivers/media/pci/intel/virtio/intel-ipu4-para-virt-drv.h b/drivers/media/pci/intel/virtio/intel-ipu4-para-virt-drv.h
new file mode 100644
index 000000000000..f44954b03be2
--- /dev/null
+++ b/drivers/media/pci/intel/virtio/intel-ipu4-para-virt-drv.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */
+/*
+ * Copyright (C) 2018 Intel Corporation
+ */
+
+#ifndef INTEL_IPU4_PARA_VIRT_H
+#define INTEL_IPU4_PARA_VIRT_H
+
+#include <linux/version.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+
+#include "./ici/ici-isys-stream-device.h"
+#include "./ici/ici-isys-frame-buf.h"
+#include "intel-ipu4-virtio-common.h"
+
+#define MAX_STREAM_DEVICES 64
+#define MAX_PIPELINE_DEVICES 1
+#define MAX_ISYS_VIRT_STREAM 34
+
+struct virtual_stream {
+ struct mutex mutex;
+ struct ici_stream_device strm_dev;
+ int virt_dev_id;
+ int actual_fd;
+ struct ipu4_virtio_ctx *ctx;
+ struct ici_isys_frame_buf_list buf_list;
+};
+
+
+#define dev_to_vstream(dev) \
+ container_of(dev, struct virtual_stream, strm_dev)
+
+#endif
diff --git a/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be-bridge.c b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be-bridge.c
new file mode 100644
index 000000000000..b88fe6d75bc2
--- /dev/null
+++ b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be-bridge.c
@@ -0,0 +1,202 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+/*
+ * Copyright (C) 2018 Intel Corporation
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/workqueue.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include <linux/syscalls.h>
+
+#include "intel-ipu4-virtio-be-bridge.h"
+#include "./ici/ici-isys-frame-buf.h"
+#include "intel-ipu4-virtio-be-pipeline.h"
+#include "intel-ipu4-virtio-be-stream.h"
+
+int intel_ipu4_virtio_msg_parse(int domid, struct ipu4_virtio_req *req)
+{
+ int ret = 0;
+ if (!req) {
+ pr_err("IPU mediator: request is NULL\n");
+ return -EINVAL;
+ }
+ if ((req->cmd < IPU4_CMD_DEVICE_OPEN) ||
+ (req->cmd >= IPU4_CMD_GET_N)) {
+ pr_err("IPU mediator: invalid command\n");
+ return -EINVAL;
+ }
+ switch (req->cmd) {
+ case IPU4_CMD_POLL:
+ /*
+ * Open video device node
+ * op0 - virtual device node number
+ * op1 - Actual device fd. By default set to 0
+ */
+ pr_debug("%s: process_poll pre", __func__);
+ req->stat = process_poll(domid, req);
+ pr_debug("%s: process_poll post", __func__);
+ break;
+ case IPU4_CMD_DEVICE_OPEN:
+ /*
+ * Open video device node
+ * op0 - virtual device node number
+ * op1 - Actual device fd. By default set to 0
+ */
+ pr_debug("DEVICE_OPEN: virtual_dev_id:%d actual_fd:%d\n", req->op[0], req->op[1]);
+ ret = process_device_open(domid, req);
+ if (ret)
+ req->stat = IPU4_REQ_ERROR;
+ else
+ req->stat = IPU4_REQ_PROCESSED;
+ break;
+ case IPU4_CMD_DEVICE_CLOSE:
+ /*
+ * Close video device node
+ * op0 - virtual device node number
+ * op1 - Actual device fd. By default set to 0
+ */
+ pr_debug("DEVICE_CLOSE: virtual_dev_id:%d actual_fd:%d\n", req->op[0], req->op[1]);
+ ret = process_device_close(domid, req);
+ if (ret)
+ req->stat = IPU4_REQ_ERROR;
+ else
+ req->stat = IPU4_REQ_PROCESSED;
+ break;
+ case IPU4_CMD_STREAM_ON:
+ /* Start Stream
+ * op0 - virtual device node number
+ * op1 - Actual device fd. By default set to 0
+ */
+ pr_debug("STREAM ON: virtual_dev_id:%d actual_fd:%d\n", req->op[0], req->op[1]);
+ ret = process_stream_on(domid, req);
+ if (ret)
+ req->stat = IPU4_REQ_ERROR;
+ else
+ req->stat = IPU4_REQ_PROCESSED;
+ break;
+ case IPU4_CMD_STREAM_OFF:
+ /* Stop Stream
+ * op0 - virtual device node number
+ * op1 - Actual device fd. By default set to 0
+ */
+ pr_debug("STREAM OFF: virtual_dev_id:%d actual_fd:%d\n", req->op[0], req->op[1]);
+ ret = process_stream_off(domid, req);
+ if (ret)
+ req->stat = IPU4_REQ_ERROR;
+ else
+ req->stat = IPU4_REQ_PROCESSED;
+ break;
+ case IPU4_CMD_GET_BUF:
+ /* Set Format of a given video node
+ * op0 - virtual device node number
+ * op1 - Actual device fd. By default set to 0
+ * op2 - Memory Type 1: USER_PTR 2: DMA_PTR
+ * op3 - Number of planes
+ * op4 - Buffer ID
+ * op5 - Length of Buffer
+ */
+
+ ret = process_get_buf(domid, req);
+ if (ret)
+ req->stat = IPU4_REQ_ERROR;
+ else
+ req->stat = IPU4_REQ_PROCESSED;
+ break;
+ case IPU4_CMD_PUT_BUF:
+ /* Set Format of a given video node
+ * op0 - virtual device node number
+ * op1 - Actual device fd. By default set to 0
+ * op2 - Memory Type 1: USER_PTR 2: DMA_PTR
+ */
+ ret = process_put_buf(domid, req);
+ if (ret)
+ req->stat = IPU4_REQ_ERROR;
+ else
+ req->stat = IPU4_REQ_PROCESSED;
+ break;
+ case IPU4_CMD_SET_FORMAT:
+ ret = process_set_format(domid, req);
+ if (ret)
+ req->stat = IPU4_REQ_ERROR;
+ else
+ req->stat = IPU4_REQ_PROCESSED;
+ break;
+ case IPU4_CMD_PIPELINE_OPEN:
+ ret = process_pipeline_open(domid, req);
+ if (ret)
+ req->stat = IPU4_REQ_ERROR;
+ else
+ req->stat = IPU4_REQ_PROCESSED;
+ break;
+ case IPU4_CMD_PIPELINE_CLOSE:
+ ret = process_pipeline_close(domid, req);
+ if (ret)
+ req->stat = IPU4_REQ_ERROR;
+ else
+ req->stat = IPU4_REQ_PROCESSED;
+ break;
+ case IPU4_CMD_ENUM_NODES:
+ ret = process_enum_nodes(domid, req);
+ if (ret)
+ req->stat = IPU4_REQ_ERROR;
+ else
+ req->stat = IPU4_REQ_PROCESSED;
+ break;
+ case IPU4_CMD_ENUM_LINKS:
+ ret = process_enum_links(domid, req);
+ if (ret)
+ req->stat = IPU4_REQ_ERROR;
+ else
+ req->stat = IPU4_REQ_PROCESSED;
+ break;
+ case IPU4_CMD_SETUP_PIPE:
+ ret = process_setup_pipe(domid, req);
+ if (ret)
+ req->stat = IPU4_REQ_ERROR;
+ else
+ req->stat = IPU4_REQ_PROCESSED;
+ break;
+ case IPU4_CMD_SET_FRAMEFMT:
+ ret = process_set_framefmt(domid, req);
+ if (ret)
+ req->stat = IPU4_REQ_ERROR;
+ else
+ req->stat = IPU4_REQ_PROCESSED;
+ break;
+ case IPU4_CMD_GET_FRAMEFMT:
+ ret = process_get_framefmt(domid, req);
+ if (ret)
+ req->stat = IPU4_REQ_ERROR;
+ else
+ req->stat = IPU4_REQ_PROCESSED;
+ break;
+ case IPU4_CMD_GET_SUPPORTED_FRAMEFMT:
+ ret = process_get_supported_framefmt(domid, req);
+ if (ret)
+ req->stat = IPU4_REQ_ERROR;
+ else
+ req->stat = IPU4_REQ_PROCESSED;
+ break;
+ case IPU4_CMD_SET_SELECTION:
+ ret = process_pad_set_sel(domid, req);
+ if (ret)
+ req->stat = IPU4_REQ_ERROR;
+ else
+ req->stat = IPU4_REQ_PROCESSED;
+ break;
+ case IPU4_CMD_GET_SELECTION:
+ ret = process_pad_get_sel(domid, req);
+ if (ret)
+ req->stat = IPU4_REQ_ERROR;
+ else
+ req->stat = IPU4_REQ_PROCESSED;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
diff --git a/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be-bridge.h b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be-bridge.h
new file mode 100644
index 000000000000..25238f29bc33
--- /dev/null
+++ b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be-bridge.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */
+/*
+ * Copyright (C) 2018 Intel Corporation
+ */
+
+#ifndef __IPU4_VIRTIO_BE_BRIDGE__
+#define __IPU4_VIRTIO_BE_BRIDGE__
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/vhm/acrn_vhm_mm.h>
+
+#include "intel-ipu4-virtio-common.h"
+
+int intel_ipu4_virtio_msg_parse(int domid, struct ipu4_virtio_req *req);
+
+void intel_ipu4_virtio_create_req(struct ipu4_virtio_req *req,
+ enum intel_ipu4_virtio_command cmd, int *op);
+
+int intel_ipu4_virtio_msg_parse(int domid, struct ipu4_virtio_req *req);
+
+
+#endif
+
+
diff --git a/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be-pipeline.c b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be-pipeline.c
new file mode 100644
index 000000000000..3adf5b4c9640
--- /dev/null
+++ b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be-pipeline.c
@@ -0,0 +1,211 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+/*
+ * Copyright (C) 2018 Intel Corporation
+ */
+
+#include <linux/kernel.h>
+#include <linux/file.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+
+#include <media/ici.h>
+#include <linux/vhm/acrn_vhm_mm.h>
+#include "intel-ipu4-virtio-be-pipeline.h"
+#include "./ici/ici-isys-pipeline.h"
+#include "./ici/ici-isys-pipeline-device.h"
+
+static struct file *pipeline;
+static int guestID = -1;
+
+int process_pipeline_open(int domid, struct ipu4_virtio_req *req)
+{
+ if (guestID != -1 && guestID != domid) {
+ pr_err("%s: pipeline device already opened by other guest! %d %d", __func__, guestID, domid);
+ return -1;
+ }
+
+ pr_info("process_device_open: /dev/intel_pipeline");
+ pipeline = filp_open("/dev/intel_pipeline", O_RDWR | O_NONBLOCK, 0);
+ guestID = domid;
+
+ return 0;
+}
+
+int process_pipeline_close(int domid, struct ipu4_virtio_req *req)
+{
+ pr_info("%s: %d", __func__, req->op[0]);
+
+ filp_close(pipeline, 0);
+ guestID = -1;
+
+ return 0;
+}
+
+int process_enum_nodes(int domid, struct ipu4_virtio_req *req)
+{
+ int err = 0;
+ struct ici_isys_pipeline_device *dev = pipeline->private_data;
+ struct ici_node_desc *host_virt;
+
+ pr_debug("%s\n", __func__);
+
+ host_virt = (struct ici_node_desc *)map_guest_phys(domid, req->payload, PAGE_SIZE);
+ if (host_virt == NULL) {
+ pr_err("process_enum_nodes: NULL host_virt");
+ return 0;
+ }
+
+ err = dev->pipeline_ioctl_ops->pipeline_enum_nodes(pipeline, dev, host_virt);
+
+ return err;
+}
+
+int process_enum_links(int domid, struct ipu4_virtio_req *req)
+{
+ int err = 0;
+ struct ici_isys_pipeline_device *dev = pipeline->private_data;
+ struct ici_links_query *host_virt;
+
+ pr_debug("%s\n", __func__);
+
+ host_virt = (struct ici_links_query *)map_guest_phys(domid, req->payload, PAGE_SIZE);
+ if (host_virt == NULL) {
+ pr_err("%s: NULL host_virt\n", __func__);
+ return 0;
+ }
+ err = dev->pipeline_ioctl_ops->pipeline_enum_links(pipeline, dev, host_virt);
+
+ return err;
+}
+int process_get_supported_framefmt(int domid, struct ipu4_virtio_req *req)
+{
+ int err = 0;
+ struct ici_isys_pipeline_device *dev = pipeline->private_data;
+ struct ici_pad_supported_format_desc *host_virt;
+
+ pr_debug("%s\n", __func__);
+
+ host_virt = (struct ici_pad_supported_format_desc *)map_guest_phys(domid, req->payload, PAGE_SIZE);
+ if (host_virt == NULL) {
+ pr_err("%s: NULL host_virt\n", __func__);
+ return 0;
+ }
+ err = dev->pipeline_ioctl_ops->pad_get_supported_format(pipeline, dev, host_virt);
+
+ return err;
+}
+
+int process_set_framefmt(int domid, struct ipu4_virtio_req *req)
+{
+ int err = 0;
+ struct ici_isys_pipeline_device *dev = pipeline->private_data;
+ struct ici_pad_framefmt *host_virt;
+
+ pr_debug("%s\n", __func__);
+
+ host_virt = (struct ici_pad_framefmt *)map_guest_phys(domid, req->payload, PAGE_SIZE);
+ if (host_virt == NULL) {
+ pr_err("%s: NULL host_virt\n", __func__);
+ return 0;
+ }
+ err = dev->pipeline_ioctl_ops->pad_set_ffmt(pipeline, dev, host_virt);
+
+ return err;
+}
+
+int process_get_framefmt(int domid, struct ipu4_virtio_req *req)
+{
+ int err = 0;
+ struct ici_isys_pipeline_device *dev = pipeline->private_data;
+ struct ici_pad_framefmt *host_virt;
+
+ pr_debug("%s\n", __func__);
+
+ host_virt = (struct ici_pad_framefmt *)map_guest_phys(domid, req->payload, PAGE_SIZE);
+ if (host_virt == NULL) {
+ pr_err("%s: NULL host_virt\n", __func__);
+ return 0;
+ }
+ err = dev->pipeline_ioctl_ops->pad_get_ffmt(pipeline, dev, host_virt);
+
+ return err;
+}
+
+int process_setup_pipe(int domid, struct ipu4_virtio_req *req)
+{
+ int err = 0;
+ struct ici_isys_pipeline_device *dev = pipeline->private_data;
+ struct ici_link_desc *host_virt;
+
+ pr_debug("%s\n", __func__);
+
+ host_virt = (struct ici_link_desc *)map_guest_phys(domid, req->payload, PAGE_SIZE);
+ if (host_virt == NULL) {
+ pr_err("%s: NULL host_virt\n", __func__);
+ return 0;
+ }
+ err = dev->pipeline_ioctl_ops->pipeline_setup_pipe(pipeline, dev, host_virt);
+
+ return err;
+}
+
+int process_pad_set_sel(int domid, struct ipu4_virtio_req *req)
+{
+ int err = 0;
+ struct ici_isys_pipeline_device *dev = pipeline->private_data;
+ struct ici_pad_selection *host_virt;
+
+ pr_debug("%s\n", __func__);
+
+ host_virt = (struct ici_pad_selection *)map_guest_phys(domid, req->payload, PAGE_SIZE);
+ if (host_virt == NULL) {
+ pr_err("%s: NULL host_virt\n", __func__);
+ return 0;
+ }
+ err = dev->pipeline_ioctl_ops->pad_set_sel(pipeline, dev, host_virt);
+
+ return err;
+}
+
+int process_pad_get_sel(int domid, struct ipu4_virtio_req *req)
+{
+ int err = 0;
+ struct ici_isys_pipeline_device *dev = pipeline->private_data;
+ struct ici_pad_selection *host_virt;
+
+ pr_debug("%s\n", __func__);
+
+ host_virt = (struct ici_pad_selection *)map_guest_phys(domid, req->payload, PAGE_SIZE);
+ if (host_virt == NULL) {
+ pr_err("%s: NULL host_virt\n", __func__);
+ return 0;
+ }
+ err = dev->pipeline_ioctl_ops->pad_get_sel(pipeline, dev, host_virt);
+
+ return err;
+}
+
+/*
+ union isys_ioctl_cmd_args {
+ struct ici_node_desc node_desc;
+ struct ici_link_desc link;
+ struct ici_pad_framefmt pad_prop;
+ struct ici_pad_supported_format_desc
+ format_desc;
+ struct ici_links_query links_query;
+ struct ici_pad_selection pad_sel;
+ };
+
+ .pipeline_setup_pipe = ici_setup_link,
+ .pipeline_enum_nodes = pipeline_enum_nodes,
+ .pipeline_enum_links = pipeline_enum_links,
+ .pad_set_ffmt = ici_pipeline_set_ffmt,
+ .pad_get_ffmt = ici_pipeline_get_ffmt,
+ .pad_get_supported_format =
+ ici_pipeline_get_supported_format,
+ .pad_set_sel = ici_pipeline_set_sel,
+ .pad_get_sel = ici_pipeline_get_sel,
+
+*/
+
diff --git a/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be-pipeline.h b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be-pipeline.h
new file mode 100644
index 000000000000..df65e88050ea
--- /dev/null
+++ b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be-pipeline.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */
+/*
+ * Copyright (C) 2018 Intel Corporation
+ */
+
+#ifndef __IPU4_VIRTIO_BE_PIPELINE__
+#define __IPU4_VIRTIO_BE_PIPELINE__
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+
+#include "intel-ipu4-virtio-common.h"
+
+int process_pipeline_open(int domid, struct ipu4_virtio_req *req);
+int process_pipeline_close(int domid, struct ipu4_virtio_req *req);
+int process_enum_nodes(int domid, struct ipu4_virtio_req *req);
+int process_enum_links(int domid, struct ipu4_virtio_req *req);
+int process_get_supported_framefmt(int domid, struct ipu4_virtio_req *req);
+int process_set_framefmt(int domid, struct ipu4_virtio_req *req);
+int process_get_framefmt(int domid, struct ipu4_virtio_req *req);
+int process_pad_set_sel(int domid, struct ipu4_virtio_req *req);
+int process_pad_get_sel(int domid, struct ipu4_virtio_req *req);
+int process_setup_pipe(int domid, struct ipu4_virtio_req *req);
+
+#endif
+
+
diff --git a/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be-stream.c b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be-stream.c
new file mode 100644
index 000000000000..9c6bbf6fb5be
--- /dev/null
+++ b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be-stream.c
@@ -0,0 +1,389 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+/*
+ * Copyright (C) 2018 Intel Corporation
+ */
+
+#include <linux/kernel.h>
+#include <linux/file.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+
+#include <linux/hashtable.h>
+#include <linux/pagemap.h>
+#include <media/ici.h>
+#include <linux/vhm/acrn_vhm_mm.h>
+#include "./ici/ici-isys-stream-device.h"
+#include "./ici/ici-isys-stream.h"
+#include "./ici/ici-isys-frame-buf.h"
+#include "intel-ipu4-virtio-be-stream.h"
+#include "intel-ipu4-virtio-be.h"
+
+#define MAX_SIZE 6 // max 2^6
+
+#define dev_to_stream(dev) \
+ container_of(dev, struct ici_isys_stream, strm_dev)
+
+DECLARE_HASHTABLE(STREAM_NODE_HASH, MAX_SIZE);
+static bool hash_initialised;
+
+struct stream_node {
+ int client_id;
+ struct file *f;
+ struct hlist_node node;
+};
+
+int frame_done_callback(void)
+{
+ notify_fe();
+ return 0;
+}
+
+int process_device_open(int domid, struct ipu4_virtio_req *req)
+{
+ char node_name[25];
+ struct stream_node *sn = NULL;
+
+ if (!hash_initialised) {
+ hash_init(STREAM_NODE_HASH);
+ hash_initialised = true;
+ }
+ hash_for_each_possible(STREAM_NODE_HASH, sn, node, req->op[0]) {
+ if (sn != NULL) {
+ if (sn->client_id != domid) {
+ pr_err("process_device_open: stream device %d already opened by other guest!", sn->client_id);
+ return -EBUSY;
+ }
+ pr_info("process_device_open: stream device %d already opened by client %d", req->op[0], domid);
+ return 0;
+ }
+ }
+
+ sprintf(node_name, "/dev/intel_stream%d", req->op[0]);
+ pr_info("process_device_open: %s", node_name);
+ sn = kzalloc(sizeof(struct stream_node), GFP_KERNEL);
+ sn->f = filp_open(node_name, O_RDWR | O_NONBLOCK, 0);
+ sn->client_id = domid;
+
+ hash_add(STREAM_NODE_HASH, &sn->node, req->op[0]);
+
+ return 0;
+}
+
+int process_device_close(int domid, struct ipu4_virtio_req *req)
+{
+ struct stream_node *sn = NULL;
+ if (!hash_initialised)
+ return 0; //no node has been opened, do nothing
+
+ pr_info("process_device_close: %d", req->op[0]);
+
+ hash_for_each_possible(STREAM_NODE_HASH, sn, node, req->op[0]) {
+ if (sn != NULL) {
+ pr_err("process_device_close: %d closed", req->op[0]);
+ hash_del(&sn->node);
+ filp_close(sn->f, 0);
+ kfree(sn);
+ }
+ }
+
+ return 0;
+}
+
+int process_set_format(int domid, struct ipu4_virtio_req *req)
+{
+ struct stream_node *sn = NULL;
+ struct ici_stream_device *strm_dev;
+ struct ici_stream_format *host_virt;
+ int err, found;
+
+ pr_debug("process_set_format: %d %d", hash_initialised, req->op[0]);
+
+ if (!hash_initialised)
+ return -1;
+
+ found = 0;
+ hash_for_each_possible(STREAM_NODE_HASH, sn, node, req->op[0]) {
+ if (sn != NULL) {
+ pr_err("process_set_format: node %d %p", req->op[0], sn);
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found) {
+ pr_debug("%s: stream not found %d\n", __func__, req->op[0]);
+ return -1;
+ }
+
+ strm_dev = sn->f->private_data;
+ if (strm_dev == NULL) {
+ pr_err("Native IPU stream device not found\n");
+ return -1;
+ }
+
+ host_virt = (struct ici_stream_format *)map_guest_phys(domid, req->payload, PAGE_SIZE);
+ if (host_virt == NULL) {
+ pr_err("process_set_format: NULL host_virt");
+ return -1;
+ }
+
+ err = strm_dev->ipu_ioctl_ops->ici_set_format(sn->f, strm_dev, host_virt);
+
+ if (err)
+ pr_err("intel_ipu4_pvirt: internal set fmt failed\n");
+
+ return 0;
+}
+
+int process_poll(int domid, struct ipu4_virtio_req *req)
+{
+ struct stream_node *sn = NULL;
+ struct ici_isys_stream *as;
+ bool found, empty;
+ unsigned long flags = 0;
+
+ pr_debug("%s: %d %d", __func__, hash_initialised, req->op[0]);
+
+ if (!hash_initialised)
+ return -1;
+
+ found = false;
+ hash_for_each_possible(STREAM_NODE_HASH, sn, node, req->op[0]) {
+ if (sn != NULL) {
+ pr_debug("process_put_buf: node %d %p", req->op[0], sn);
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ pr_debug("%s: stream not found %d\n", __func__, req->op[0]);
+ return -1;
+ }
+
+ as = dev_to_stream(sn->f->private_data);
+ spin_lock_irqsave(&as->buf_list.lock, flags);
+ empty = list_empty(&as->buf_list.putbuf_list);
+ spin_unlock_irqrestore(&as->buf_list.lock, flags);
+ if (!empty) {
+ req->func_ret = 1;
+ return IPU4_REQ_PROCESSED;
+ } else
+ return IPU4_REQ_NEEDS_FOLLOW_UP;
+}
+
+int process_put_buf(int domid, struct ipu4_virtio_req *req)
+{
+ struct stream_node *sn = NULL;
+ struct ici_stream_device *strm_dev;
+ struct ici_frame_info *host_virt;
+ int err, found;
+
+ pr_debug("process_put_buf: %d %d", hash_initialised, req->op[0]);
+
+ if (!hash_initialised)
+ return -1;
+
+ found = 0;
+ hash_for_each_possible(STREAM_NODE_HASH, sn, node, req->op[0]) {
+ if (sn != NULL) {
+ pr_debug("process_put_buf: node %d %p", req->op[0], sn);
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found) {
+ pr_debug("%s: stream not found %d\n", __func__, req->op[0]);
+ return -1;
+ }
+
+ strm_dev = sn->f->private_data;
+ if (strm_dev == NULL) {
+ pr_err("Native IPU stream device not found\n");
+ return -1;
+ }
+
+ host_virt = (struct ici_frame_info *)map_guest_phys(domid, req->payload, PAGE_SIZE);
+ if (host_virt == NULL) {
+ pr_err("process_put_buf: NULL host_virt");
+ return -1;
+ }
+ err = strm_dev->ipu_ioctl_ops->ici_put_buf(sn->f, strm_dev, host_virt);
+
+ if (err)
+ pr_err("process_put_buf: ici_put_buf failed\n");
+
+ return 0;
+}
+
+int process_get_buf(int domid, struct ipu4_virtio_req *req)
+{
+ struct stream_node *sn = NULL;
+ struct ici_frame_buf_wrapper *shared_buf;
+ struct ici_stream_device *strm_dev;
+ int k, i = 0;
+ void *pageaddr;
+ u64 *page_table = NULL;
+ struct page **data_pages = NULL;
+ int err, found;
+
+ pr_debug("process_get_buf: %d %d", hash_initialised, req->op[0]);
+
+ if (!hash_initialised)
+ return -1;
+
+ found = 0;
+ hash_for_each_possible(STREAM_NODE_HASH, sn, node, req->op[0]) {
+ if (sn != NULL) {
+ pr_debug("process_get_buf: node %d %p", req->op[0], sn);
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found) {
+ pr_debug("%s: stream not found %d\n", __func__, req->op[0]);
+ return -1;
+ }
+
+ pr_debug("GET_BUF: Mapping buffer\n");
+ shared_buf = (struct ici_frame_buf_wrapper *)map_guest_phys(domid, req->payload, PAGE_SIZE);
+ if (!shared_buf) {
+ pr_err("SOS Failed to map Buffer from UserOS\n");
+ req->stat = IPU4_REQ_ERROR;
+ }
+ data_pages = kcalloc(shared_buf->kframe_info.planes[0].npages, sizeof(struct page *), GFP_KERNEL);
+ if (data_pages == NULL) {
+ pr_err("SOS Failed alloc data page set\n");
+ req->stat = IPU4_REQ_ERROR;
+ }
+ pr_debug("Total number of pages:%d\n", shared_buf->kframe_info.planes[0].npages);
+
+ page_table = (u64 *)map_guest_phys(domid, shared_buf->kframe_info.planes[0].page_table_ref, PAGE_SIZE);
+
+ if (page_table == NULL) {
+ pr_err("SOS Failed to map page table\n");
+ req->stat = IPU4_REQ_ERROR;
+ kfree(data_pages);
+ return -1;
+ }
+
+ else {
+ pr_debug("SOS first page %lld\n", page_table[0]);
+ k = 0;
+ for (i = 0; i < shared_buf->kframe_info.planes[0].npages; i++) {
+ pageaddr = map_guest_phys(domid, page_table[i], PAGE_SIZE);
+ if (pageaddr == NULL) {
+ pr_err("Cannot map pages from UOS\n");
+ req->stat = IPU4_REQ_ERROR;
+ break;
+ }
+
+ data_pages[k] = virt_to_page(pageaddr);
+ k++;
+ }
+ }
+
+ strm_dev = sn->f->private_data;
+ if (strm_dev == NULL) {
+ pr_err("Native IPU stream device not found\n");
+ kfree(data_pages);
+ return -1;
+ }
+ err = strm_dev->ipu_ioctl_ops->ici_get_buf_virt(sn->f, strm_dev, shared_buf, data_pages);
+
+ if (err)
+ pr_err("process_get_buf: ici_get_buf_virt failed\n");
+
+ kfree(data_pages);
+ return 0;
+}
+
+int process_stream_on(int domid, struct ipu4_virtio_req *req)
+{
+ struct stream_node *sn = NULL;
+ struct ici_isys_stream *as;
+ struct ici_stream_device *strm_dev;
+ int err, found;
+
+ pr_debug("process_stream_on: %d %d", hash_initialised, req->op[0]);
+
+ if (!hash_initialised)
+ return -1;
+
+ found = 0;
+ hash_for_each_possible(STREAM_NODE_HASH, sn, node, req->op[0]) {
+ if (sn != NULL) {
+ pr_err("process_stream_on: node %d %p", req->op[0], sn);
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found) {
+ pr_debug("%s: stream not found %d\n", __func__, req->op[0]);
+ return -1;
+ }
+
+ strm_dev = sn->f->private_data;
+ if (strm_dev == NULL) {
+ pr_err("Native IPU stream device not found\n");
+ return -1;
+ }
+
+ as = dev_to_stream(strm_dev);
+ as->frame_done_notify_queue = frame_done_callback;
+
+ err = strm_dev->ipu_ioctl_ops->ici_stream_on(sn->f, strm_dev);
+
+ if (err)
+ pr_err("process_stream_on: stream on failed\n");
+
+ return 0;
+}
+
+int process_stream_off(int domid, struct ipu4_virtio_req *req)
+{
+ struct stream_node *sn = NULL;
+ struct ici_stream_device *strm_dev;
+ struct ici_isys_stream *as;
+ int err, found;
+
+ pr_debug("process_stream_off: %d %d", hash_initialised, req->op[0]);
+
+ if (!hash_initialised)
+ return -1;
+
+ found = 0;
+ hash_for_each_possible(STREAM_NODE_HASH, sn, node, req->op[0]) {
+ if (sn != NULL) {
+ pr_err("process_stream_off: node %d %p", req->op[0], sn);
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found) {
+ pr_debug("%s: stream not found %d\n", __func__, req->op[0]);
+ return -1;
+ }
+
+ strm_dev = sn->f->private_data;
+ if (strm_dev == NULL) {
+ pr_err("Native IPU stream device not found\n");
+ return -1;
+ }
+
+ err = strm_dev->ipu_ioctl_ops->ici_stream_off(sn->f, strm_dev);
+
+ if (err)
+ pr_err("process_stream_off: stream off failed\n");
+
+ as = dev_to_stream(strm_dev);
+ as->frame_done_notify_queue();
+ as->frame_done_notify_queue = NULL;
+
+ return 0;
+}
diff --git a/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be-stream.h b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be-stream.h
new file mode 100644
index 000000000000..0d85b3561274
--- /dev/null
+++ b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be-stream.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */
+/*
+ * Copyright (C) 2018 Intel Corporation
+ */
+
+#ifndef __IPU4_VIRTIO_BE_STREAM__
+#define __IPU4_VIRTIO_BE_STREAM__
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+
+#include "intel-ipu4-virtio-common.h"
+
+int process_set_format(int domid, struct ipu4_virtio_req *req);
+int process_device_open(int domid, struct ipu4_virtio_req *req);
+int process_device_close(int domid, struct ipu4_virtio_req *req);
+int process_poll(int domid, struct ipu4_virtio_req *req);
+int process_put_buf(int domid, struct ipu4_virtio_req *req);
+int process_stream_on(int domid, struct ipu4_virtio_req *req);
+int process_stream_off(int domid, struct ipu4_virtio_req *req);
+int process_get_buf(int domid, struct ipu4_virtio_req *req);
+
+
+#endif
+
+
diff --git a/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be.c b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be.c
new file mode 100644
index 000000000000..aa64d09adb35
--- /dev/null
+++ b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be.c
@@ -0,0 +1,520 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+/*
+ * Copyright (C) 2018 Intel Corporation
+ */
+
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/uio.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include <linux/vbs/vq.h>
+#include <linux/hashtable.h>
+
+#include "intel-ipu4-virtio-common.h"
+#include "intel-ipu4-virtio-be-bridge.h"
+#include "intel-ipu4-virtio-be.h"
+
+/**
+ * struct ipu4_virtio_be_priv - Backend of virtio-rng based on VBS-K
+ *
+ * @dev : instance of struct virtio_dev_info
+ * @vqs : instances of struct virtio_vq_info
+ * @hwrng : device specific member
+ * @node : hashtable maintaining multiple connections
+ * from multiple guests/devices
+ */
+struct ipu4_virtio_be_priv {
+ struct virtio_dev_info dev;
+ struct virtio_vq_info vqs[IPU_VIRTIO_QUEUE_MAX];
+ bool busy;
+ struct ipu4_virtio_req *pending_tx_req;
+ struct mutex lock;
+ /*
+ * Each VBS-K module might serve multiple connections
+ * from multiple guests/device models/VBS-Us, so better
+ * to maintain the connections in a list, and here we
+ * use hashtable as an example.
+ */
+ struct hlist_node node;
+};
+
+struct vq_request_data {
+ struct virtio_vq_info *vq;
+ struct ipu4_virtio_req *req;
+ int len;
+ uint16_t idx;
+};
+
+struct vq_request_data vq_req;
+
+#define RNG_MAX_HASH_BITS 4 /* MAX is 2^4 */
+#define HASH_NAME vbs_hash
+
+DECLARE_HASHTABLE(HASH_NAME, RNG_MAX_HASH_BITS);
+static int ipu_vbk_hash_initialized;
+static int ipu_vbk_connection_cnt;
+
+/* function declarations */
+static int handle_kick(int client_id, long unsigned int *req_cnt);
+static void ipu_vbk_reset(struct ipu4_virtio_be_priv *rng);
+static void ipu_vbk_stop(struct ipu4_virtio_be_priv *rng);
+static void ipu_vbk_flush(struct ipu4_virtio_be_priv *rng);
+
+#ifdef RUNTIME_CTRL
+static int ipu_vbk_enable_vq(struct ipu4_virtio_be_priv *rng,
+ struct virtio_vq_info *vq);
+static void ipu_vbk_disable_vq(struct ipu4_virtio_be_priv *rng,
+ struct virtio_vq_info *vq);
+static void ipu_vbk_stop_vq(struct ipu4_virtio_be_priv *rng,
+ struct virtio_vq_info *vq);
+static void ipu_vbk_flush_vq(struct ipu4_virtio_be_priv *rng, int index);
+#endif
+
+/* hash table related functions */
+static void ipu_vbk_hash_init(void)
+{
+ if (ipu_vbk_hash_initialized)
+ return;
+
+ hash_init(HASH_NAME);
+ ipu_vbk_hash_initialized = 1;
+}
+
+static int ipu_vbk_hash_add(struct ipu4_virtio_be_priv *entry)
+{
+ if (!ipu_vbk_hash_initialized) {
+ pr_err("RNG hash table not initialized!\n");
+ return -1;
+ }
+
+ hash_add(HASH_NAME, &entry->node, virtio_dev_client_id(&entry->dev));
+ return 0;
+}
+
+static struct ipu4_virtio_be_priv *ipu_vbk_hash_find(int client_id)
+{
+ struct ipu4_virtio_be_priv *entry;
+ int bkt;
+
+ if (!ipu_vbk_hash_initialized) {
+ pr_err("RNG hash table not initialized!\n");
+ return NULL;
+ }
+
+ hash_for_each(HASH_NAME, bkt, entry, node)
+ if (virtio_dev_client_id(&entry->dev) == client_id)
+ return entry;
+
+ pr_err("Not found item matching client_id!\n");
+ return NULL;
+}
+
+static int ipu_vbk_hash_del(int client_id)
+{
+ struct ipu4_virtio_be_priv *entry;
+ int bkt;
+
+ if (!ipu_vbk_hash_initialized) {
+ pr_err("RNG hash table not initialized!\n");
+ return -1;
+ }
+
+ hash_for_each(HASH_NAME, bkt, entry, node)
+ if (virtio_dev_client_id(&entry->dev) == client_id) {
+ hash_del(&entry->node);
+ return 0;
+ }
+
+ pr_err("%s failed, not found matching client_id!\n",
+ __func__);
+ return -1;
+}
+
+static int ipu_vbk_hash_del_all(void)
+{
+ struct ipu4_virtio_be_priv *entry;
+ int bkt;
+
+ if (!ipu_vbk_hash_initialized) {
+ pr_err("RNG hash table not initialized!\n");
+ return -1;
+ }
+
+ hash_for_each(HASH_NAME, bkt, entry, node)
+ hash_del(&entry->node);
+
+ return 0;
+}
+
+static void handle_vq_kick(struct ipu4_virtio_be_priv *priv, int vq_idx)
+{
+ struct iovec iov;
+ struct ipu4_virtio_be_priv *be;
+ struct virtio_vq_info *vq;
+ struct ipu4_virtio_req *req = NULL;
+ int len;
+ int ret;
+ uint16_t idx;
+
+ pr_debug("%s: vq_idx %d\n", __func__, vq_idx);
+
+ be = priv;
+
+ if (!be) {
+ pr_err("rng is NULL! Cannot proceed!\n");
+ return;
+ }
+
+ vq = &(be->vqs[vq_idx]);
+
+ while (virtio_vq_has_descs(vq)) {
+ virtio_vq_getchain(vq, &idx, &iov, 1, NULL);
+
+ /* device specific operations, for example: */
+ pr_debug("iov base %p len %lx\n", iov.iov_base, iov.iov_len);
+
+ if (iov.iov_len != sizeof(struct ipu4_virtio_req)) {
+ if (iov.iov_len == sizeof(int)) {
+ *((int *)iov.iov_base) = 1;
+ len = iov.iov_len;
+ printk(KERN_NOTICE "IPU VBK handle kick from vmid:%d\n", 1);
+ } else {
+ len = 0;
+ printk(KERN_WARNING "received request with wrong size");
+ printk(KERN_WARNING "%zu != %zu\n",
+ iov.iov_len,
+ sizeof(struct ipu4_virtio_req));
+ }
+
+ pr_debug("vtrnd: vtrnd_notify(): %d\r\n", len);
+ virtio_vq_relchain(vq, idx, len);
+ continue;
+ }
+
+ req = (struct ipu4_virtio_req *)iov.iov_base;
+ ret = intel_ipu4_virtio_msg_parse(1, req);
+ len = iov.iov_len;
+
+ if (req->stat == IPU4_REQ_NEEDS_FOLLOW_UP) {
+ vq_req.vq = vq;
+ vq_req.req = req;
+ vq_req.idx = idx;
+ vq_req.len = len;
+ } else
+ virtio_vq_relchain(vq, idx, len);
+ }
+ pr_debug("IPU VBK data process on VQ Done\n");
+ if (req && req->stat != IPU4_REQ_NEEDS_FOLLOW_UP)
+ virtio_vq_endchains(vq, 1);
+}
+
+static int handle_kick(int client_id, long unsigned *ioreqs_map)
+{
+ int val[IPU_VIRTIO_QUEUE_MAX], i, count;
+ struct ipu4_virtio_be_priv *priv;
+
+ if (unlikely(bitmap_empty(ioreqs_map, VHM_REQUEST_MAX)))
+ return -EINVAL;
+
+ pr_debug("%s: IPU VBK handle kick!\n", __func__);
+
+ priv = ipu_vbk_hash_find(client_id);
+ if (priv == NULL) {
+ pr_err("%s: client %d not found!\n",
+ __func__, client_id);
+ return -EINVAL;
+ }
+
+ count = ipu_virtio_vqs_index_get(&priv->dev, ioreqs_map, val, IPU_VIRTIO_QUEUE_MAX);
+
+ for (i = 0; i < count; i++) {
+ if (val[i] >= 0) {
+ handle_vq_kick(priv, val[i]);
+ }
+ }
+
+ return 0;
+}
+
+static int ipu_vbk_open(struct inode *inode, struct file *f)
+{
+ struct ipu4_virtio_be_priv *priv;
+ struct virtio_dev_info *dev;
+ struct virtio_vq_info *vqs;
+ int i;
+
+ priv = kcalloc(1, sizeof(struct ipu4_virtio_be_priv),
+ GFP_KERNEL);
+
+ if (priv == NULL) {
+ pr_err("Failed to allocate memory for ipu4_virtio_be_priv!\n");
+ return -ENOMEM;
+ }
+
+ vqs = &priv->vqs[0];
+
+ dev = &priv->dev;
+
+ strncpy(dev->name, "vbs_ipu", VBS_NAME_LEN);
+ dev->dev_notify = handle_kick;
+
+
+ for (i = 0; i < IPU_VIRTIO_QUEUE_MAX; i++) {
+ vqs[i].dev = dev;
+ vqs[i].vq_notify = NULL;
+ }
+
+ /* link dev and vqs */
+ dev->vqs = vqs;
+
+ virtio_dev_init(dev, vqs, IPU_VIRTIO_QUEUE_MAX);
+
+ priv->pending_tx_req = kcalloc(1, sizeof(struct ipu4_virtio_req),
+ GFP_KERNEL);
+
+ mutex_init(&priv->lock);
+
+ f->private_data = priv;
+
+ /* init a hash table to maintain multi-connections */
+ ipu_vbk_hash_init();
+
+ return 0;
+}
+
+static int ipu_vbk_release(struct inode *inode, struct file *f)
+{
+ struct ipu4_virtio_be_priv *priv = f->private_data;
+ int i;
+
+ if (!priv)
+ pr_err("%s: UNLIKELY rng NULL!\n",
+ __func__);
+
+ ipu_vbk_stop(priv);
+ ipu_vbk_flush(priv);
+ for (i = 0; i < IPU_VIRTIO_QUEUE_MAX; i++)
+ virtio_vq_reset(&(priv->vqs[i]));
+
+ /* device specific release */
+ ipu_vbk_reset(priv);
+
+ pr_debug("ipu_vbk_connection cnt is %d\n",
+ ipu_vbk_connection_cnt);
+
+ if (priv && ipu_vbk_connection_cnt--)
+ ipu_vbk_hash_del(virtio_dev_client_id(&priv->dev));
+ if (!ipu_vbk_connection_cnt) {
+ pr_debug("ipu4_virtio_be_priv remove all hash entries\n");
+ ipu_vbk_hash_del_all();
+ }
+
+ kfree(priv);
+
+ pr_debug("%s done\n", __func__);
+ return 0;
+}
+
+static long ipu_vbk_ioctl(struct file *f, unsigned int ioctl,
+ unsigned long arg)
+{
+ struct ipu4_virtio_be_priv *priv = f->private_data;
+ void __user *argp = (void __user *)arg;
+ /*u64 __user *featurep = argp;*/
+ /*u64 features;*/
+ int r;
+
+ if (priv == NULL) {
+ pr_err("No IPU backend private data\n");
+ return -EINVAL;
+ }
+ switch (ioctl) {
+/*
+ * case VHOST_GET_FEATURES:
+ * features = VHOST_NET_FEATURES;
+ * if (copy_to_user(featurep, &features, sizeof features))
+ * return -EFAULT;
+ * return 0;
+ * case VHOST_SET_FEATURES:
+ * if (copy_from_user(&features, featurep, sizeof features))
+ * return -EFAULT;
+ * if (features & ~VHOST_NET_FEATURES)
+ * return -EOPNOTSUPP;
+ * return vhost_net_set_features(n, features);
+ */
+ case VBS_SET_VQ:
+ /*
+ * we handle this here because we want to register VHM client
+ * after handling VBS_K_SET_VQ request
+ */
+ r = virtio_vqs_ioctl(&priv->dev, ioctl, argp);
+ if (r == -ENOIOCTLCMD) {
+ pr_err("VBS_K_SET_VQ: virtio_vqs_ioctl failed!\n");
+ return -EFAULT;
+ }
+ /* Register VHM client */
+ if (virtio_dev_register(&priv->dev) < 0) {
+ pr_err("failed to register VHM client!\n");
+ return -EFAULT;
+ }
+ /* Added to local hash table */
+ if (ipu_vbk_hash_add(priv) < 0) {
+ pr_err("failed to add to hashtable!\n");
+ return -EFAULT;
+ }
+ /* Increment counter */
+ ipu_vbk_connection_cnt++;
+ return r;
+ default:
+ /*mutex_lock(&n->dev.mutex);*/
+ r = virtio_dev_ioctl(&priv->dev, ioctl, argp);
+ if (r == -ENOIOCTLCMD)
+ r = virtio_vqs_ioctl(&priv->dev, ioctl, argp);
+ else
+ ipu_vbk_flush(priv);
+ /*mutex_unlock(&n->dev.mutex);*/
+ return r;
+ }
+}
+
+int notify_fe(void)
+{
+ if (vq_req.vq) {
+ pr_debug("%s: notifying fe", __func__);
+ vq_req.req->func_ret = 1;
+ virtio_vq_relchain(vq_req.vq, vq_req.idx, vq_req.len);
+ virtio_vq_endchains(vq_req.vq, 1);
+ vq_req.vq = NULL;
+ } else
+ pr_debug("%s: NULL vq!", __func__);
+
+ return 0;
+}
+
+int ipu_virtio_vqs_index_get(struct virtio_dev_info *dev, unsigned long *ioreqs_map,
+ int *vqs_index, int max_vqs_index)
+{
+ int idx = 0;
+ struct vhm_request *req;
+ int vcpu;
+
+ if (dev == NULL) {
+ pr_err("%s: dev is NULL!\n", __func__);
+ return -EINVAL;
+ }
+
+ while (idx < max_vqs_index) {
+ vcpu = find_first_bit(ioreqs_map, dev->_ctx.max_vcpu);
+ if (vcpu == dev->_ctx.max_vcpu)
+ break;
+ req = &dev->_ctx.req_buf[vcpu];
+ if (atomic_read(&req->processed) == REQ_STATE_PROCESSING &&
+ req->client == dev->_ctx.vhm_client_id) {
+ if (req->reqs.pio_request.direction == REQUEST_READ) {
+ /* currently we handle kick only,
+ * so read will return 0
+ */
+ pr_debug("%s: read request!\n", __func__);
+ if (dev->io_range_type == PIO_RANGE)
+ req->reqs.pio_request.value = 0;
+ else
+ req->reqs.mmio_request.value = 0;
+ } else {
+ pr_debug("%s: write request! type %d\n",
+ __func__, req->type);
+ if (dev->io_range_type == PIO_RANGE)
+ vqs_index[idx++] = req->reqs.pio_request.value;
+ else
+ vqs_index[idx++] = req->reqs.mmio_request.value;
+ }
+ smp_mb();
+ atomic_set(&req->processed, REQ_STATE_COMPLETE);
+ acrn_ioreq_complete_request(req->client, vcpu);
+ }
+ }
+
+ return idx;
+}
+
+/* device specific function to cleanup itself */
+static void ipu_vbk_reset(struct ipu4_virtio_be_priv *rng)
+{
+}
+
+/* device specific function */
+static void ipu_vbk_stop(struct ipu4_virtio_be_priv *rng)
+{
+ virtio_dev_deregister(&rng->dev);
+}
+
+/* device specific function */
+static void ipu_vbk_flush(struct ipu4_virtio_be_priv *rng)
+{
+}
+
+#ifdef RUNTIME_CTRL
+/* device specific function */
+static int ipu_vbk_enable_vq(struct ipu4_virtio_be_priv *rng,
+ struct virtio_vq_info *vq)
+{
+ return 0;
+}
+
+/* device specific function */
+static void ipu_vbk_disable_vq(struct ipu4_virtio_be_priv *rng,
+ struct virtio_vq_info *vq)
+{
+}
+
+/* device specific function */
+static void ipu_vbk_stop_vq(struct ipu4_virtio_be_priv *rng,
+ struct virtio_vq_info *vq)
+{
+}
+
+/* device specific function */
+static void ipu_vbk_flush_vq(struct ipu4_virtio_be_priv *rng, int index)
+{
+}
+
+/* Set feature bits in kernel side device */
+static int ipu_vbk_set_features(struct ipu4_virtio_be_priv *rng, u64 features)
+{
+ return 0;
+}
+#endif
+
+static const struct file_operations vbs_fops = {
+ .owner = THIS_MODULE,
+ .release = ipu_vbk_release,
+ .unlocked_ioctl = ipu_vbk_ioctl,
+ .open = ipu_vbk_open,
+ .llseek = noop_llseek,
+};
+
+static struct miscdevice vbs_misc = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "vbs_ipu",
+ .fops = &vbs_fops,
+};
+
+static int ipu_vbk_init(void)
+{
+ return misc_register(&vbs_misc);
+}
+module_init(ipu_vbk_init);
+
+static void ipu_vbk_exit(void)
+{
+ misc_deregister(&vbs_misc);
+}
+module_exit(ipu_vbk_exit);
+
+MODULE_VERSION("0.1");
+MODULE_AUTHOR("Intel Corporation");
+MODULE_DESCRIPTION("IPU4 virtio driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be.h b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be.h
new file mode 100644
index 000000000000..999b543b58f6
--- /dev/null
+++ b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */
+/*
+ * Copyright (C) 2018 Intel Corporation
+ */
+
+#ifndef __IPU4_VIRTIO_BE__
+#define __IPU4_VIRTIO_BE__
+
+#include <linux/vbs/vbs.h>
+
+int notify_fe(void);
+int ipu_virtio_vqs_index_get(struct virtio_dev_info *dev, unsigned long *ioreqs_map,
+ int *vqs_index, int max_vqs_index);
+
+#endif
diff --git a/drivers/media/pci/intel/virtio/intel-ipu4-virtio-common.c b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-common.c
new file mode 100644
index 000000000000..457c6bdf78a8
--- /dev/null
+++ b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-common.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+/*
+ * Copyright (C) 2018 Intel Corporation
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/cdev.h>
+#include <linux/hashtable.h>
+
+#include "intel-ipu4-virtio-common.h"
+
+DECLARE_HASHTABLE(ipu4_virtio_fe_hash, MAX_ENTRY_FE);
+
+void ipu4_virtio_fe_table_init(void)
+{
+ hash_init(ipu4_virtio_fe_hash);
+}
+
+int ipu4_virtio_fe_add(struct ipu4_virtio_fe_info *fe_info)
+{
+ struct ipu4_virtio_fe_info_entry *info_entry;
+
+ info_entry = kmalloc(sizeof(*info_entry), GFP_KERNEL);
+
+ if (!info_entry)
+ return -ENOMEM;
+
+ info_entry->info = fe_info;
+
+ hash_add(ipu4_virtio_fe_hash, &info_entry->node,
+ info_entry->info->client_id);
+
+ return 0;
+}
+
+struct ipu4_virtio_fe_info *ipu4_virtio_fe_find(int client_id)
+{
+ struct ipu4_virtio_fe_info_entry *info_entry;
+ int bkt;
+
+ hash_for_each(ipu4_virtio_fe_hash, bkt, info_entry, node)
+ if (info_entry->info->client_id == client_id)
+ return info_entry->info;
+
+ return NULL;
+}
+
+struct ipu4_virtio_fe_info *ipu4_virtio_fe_find_by_vmid(int vmid)
+{
+ struct ipu4_virtio_fe_info_entry *info_entry;
+ int bkt;
+
+ hash_for_each(ipu4_virtio_fe_hash, bkt, info_entry, node)
+ if (info_entry->info->vmid == vmid)
+ return info_entry->info;
+
+ return NULL;
+}
diff --git a/drivers/media/pci/intel/virtio/intel-ipu4-virtio-common.h b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-common.h
new file mode 100644
index 000000000000..8b2260b46169
--- /dev/null
+++ b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-common.h
@@ -0,0 +1,131 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */
+/*
+ * Copyright (C) 2018 Intel Corporation
+ */
+
+#ifndef __IPU4_VIRTIO_COMMON_H__
+#define __IPU4_VIRTIO_COMMON_H__
+
+
+/*
+ * CWP uses physicall addresses for memory sharing,
+ * so size of one page ref will be 64-bits
+ */
+
+#define REFS_PER_PAGE (PAGE_SIZE/sizeof(u64))
+
+/* Defines size of requests circular buffer */
+#define REQ_RING_SIZE 128
+
+#define MAX_NUMBER_OF_OPERANDS 64
+
+#define MAX_ENTRY_FE 7
+
+enum virio_queue_type {
+ IPU_VIRTIO_QUEUE_0 = 0,
+ IPU_VIRTIO_QUEUE_1,
+ IPU_VIRTIO_QUEUE_MAX
+};
+
+struct ipu4_virtio_req {
+ unsigned int req_id;
+ unsigned int stat;
+ unsigned int cmd;
+ unsigned int func_ret;
+ unsigned int op[MAX_NUMBER_OF_OPERANDS];
+ u64 payload;
+};
+struct test_payload {
+ unsigned int data1;
+ long int data2;
+ char name[256];
+};
+/*Not used*/
+struct ipu4_virtio_resp {
+ unsigned int resp_id;
+ unsigned int stat;
+ unsigned int cmd;
+ unsigned int op[MAX_NUMBER_OF_OPERANDS];
+};
+
+/*Not used*/
+struct ipu4_virtio_fe_info {
+ struct ipu4_virtio_be_priv *priv;
+ int client_id;
+ int vmid;
+ int max_vcpu;
+ struct vhm_request *req_buf;
+};
+
+/*Not used*/
+struct ipu4_virtio_fe_info_entry {
+ struct ipu4_virtio_fe_info *info;
+ struct hlist_node node;
+};
+
+struct ipu4_bknd_ops {
+ /* backed initialization routine */
+ int (*init)(void);
+
+ /* backed cleanup routine */
+ void (*cleanup)(void);
+
+ /* retreiving id of current virtual machine */
+ int (*get_vm_id)(void);
+
+ int (*send_req)(int, struct ipu4_virtio_req *, int, int);
+};
+
+struct ipu4_virtio_ctx {
+ /* VM(domain) id of current VM instance */
+ int domid;
+
+ /* backend ops - hypervisor specific */
+ struct ipu4_bknd_ops *bknd_ops;
+
+ /* flag that shows whether backend is initialized */
+ bool initialized;
+
+ /* device global lock */
+ struct mutex lock;
+};
+
+enum intel_ipu4_virtio_command {
+ IPU4_CMD_DEVICE_OPEN = 0x1,
+ IPU4_CMD_DEVICE_CLOSE,
+ IPU4_CMD_STREAM_ON,
+ IPU4_CMD_STREAM_OFF,
+ IPU4_CMD_GET_BUF,
+ IPU4_CMD_PUT_BUF,
+ IPU4_CMD_SET_FORMAT,
+ IPU4_CMD_ENUM_NODES,
+ IPU4_CMD_ENUM_LINKS,
+ IPU4_CMD_SETUP_PIPE,
+ IPU4_CMD_SET_FRAMEFMT,
+ IPU4_CMD_GET_FRAMEFMT,
+ IPU4_CMD_GET_SUPPORTED_FRAMEFMT,
+ IPU4_CMD_SET_SELECTION,
+ IPU4_CMD_GET_SELECTION,
+ IPU4_CMD_POLL,
+ IPU4_CMD_PIPELINE_OPEN,
+ IPU4_CMD_PIPELINE_CLOSE,
+ IPU4_CMD_GET_N
+};
+
+enum intel_ipu4_virtio_req_feedback {
+ IPU4_REQ_PROCESSED,
+ IPU4_REQ_NEEDS_FOLLOW_UP,
+ IPU4_REQ_ERROR,
+ IPU4_REQ_NOT_RESPONDED
+};
+extern struct ipu4_bknd_ops ipu4_virtio_bknd_ops;
+
+void ipu4_virtio_fe_table_init(void);
+
+int ipu4_virtio_fe_add(struct ipu4_virtio_fe_info *fe_info);
+
+struct ipu4_virtio_fe_info *ipu4_virtio_fe_find(int client_id);
+
+struct ipu4_virtio_fe_info *ipu4_virtio_fe_find_by_vmid(int vmid);
+
+#endif
diff --git a/drivers/media/pci/intel/virtio/intel-ipu4-virtio-fe-payload.c b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-fe-payload.c
new file mode 100644
index 000000000000..44edf7414a15
--- /dev/null
+++ b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-fe-payload.c
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+/*
+ * Copyright (C) 2018 Intel Corporation
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/workqueue.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include <linux/syscalls.h>
+
+#include "intel-ipu4-virtio-common.h"
+#include "intel-ipu4-virtio-fe-payload.h"
+
+void intel_ipu4_virtio_create_req(struct ipu4_virtio_req *req,
+ enum intel_ipu4_virtio_command cmd, int *op)
+{
+ int i;
+
+ req->stat = IPU4_REQ_NOT_RESPONDED;
+ req->cmd = cmd;
+
+ switch (cmd) {
+ case IPU4_CMD_POLL:
+ case IPU4_CMD_DEVICE_OPEN:
+ case IPU4_CMD_DEVICE_CLOSE:
+ case IPU4_CMD_STREAM_ON:
+ case IPU4_CMD_STREAM_OFF:
+ case IPU4_CMD_PUT_BUF:
+ case IPU4_CMD_SET_FORMAT:
+ case IPU4_CMD_ENUM_NODES:
+ case IPU4_CMD_ENUM_LINKS:
+ case IPU4_CMD_SETUP_PIPE:
+ case IPU4_CMD_SET_FRAMEFMT:
+ case IPU4_CMD_GET_FRAMEFMT:
+ case IPU4_CMD_GET_SUPPORTED_FRAMEFMT:
+ case IPU4_CMD_SET_SELECTION:
+ case IPU4_CMD_GET_SELECTION:
+ /* Open video device node
+ * op0 - virtual device node number
+ * op1 - Actual device fd. By default set to 0
+ */
+ for (i = 0; i < 2; i++)
+ req->op[i] = op[i];
+ break;
+ case IPU4_CMD_GET_BUF:
+ for (i = 0; i < 3; i++)
+ req->op[i] = op[i];
+ break;
+ default:
+ return;
+ }
+}
diff --git a/drivers/media/pci/intel/virtio/intel-ipu4-virtio-fe-payload.h b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-fe-payload.h
new file mode 100644
index 000000000000..173c31a54692
--- /dev/null
+++ b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-fe-payload.h
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+/*
+ * Copyright (C) 2018 Intel Corporation
+ */
+
+#ifndef __IPU4_VIRTIO_FE_PAYLOAD__
+#define __IPU4_VIRTIO_FE_PAYLOAD__
+
+#include "intel-ipu4-virtio-common.h"
+
+void intel_ipu4_virtio_create_req(struct ipu4_virtio_req *req,
+ enum intel_ipu4_virtio_command cmd, int *op);
+
+#endif
\ No newline at end of file
diff --git a/drivers/media/pci/intel/virtio/intel-ipu4-virtio-fe-pipeline.c b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-fe-pipeline.c
new file mode 100644
index 000000000000..0f5d8b6f83ec
--- /dev/null
+++ b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-fe-pipeline.c
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */
+/*
+ * Copyright (C) 2018 Intel Corporation
+ */
+
+#include <linux/ioctl.h>
+#include <linux/device.h>
+#include <linux/cdev.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+
+#include "intel-ipu4-virtio-fe-payload.h"
+#include "intel-ipu4-virtio-fe-pipeline.h"
+
+int process_pipeline(struct file *file, struct ipu4_virtio_ctx *fe_priv,
+ void *data, int cmd)
+{
+ struct ipu4_virtio_req *req;
+ int rval = 0;
+ int op[10];
+
+ op[0] = 0;
+ op[1] = 0;
+
+ req = kcalloc(1, sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ req->payload = virt_to_phys(data);
+
+ intel_ipu4_virtio_create_req(req, cmd, &op[0]);
+
+ rval = fe_priv->bknd_ops->send_req(fe_priv->domid, req, true, IPU_VIRTIO_QUEUE_0);
+ if (rval) {
+ pr_err("Failed to send request to BE\n");
+ kfree(req);
+ return rval;
+ }
+
+ kfree(req);
+
+ return rval;
+}
+
diff --git a/drivers/media/pci/intel/virtio/intel-ipu4-virtio-fe-pipeline.h b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-fe-pipeline.h
new file mode 100644
index 000000000000..d1fbe106beda
--- /dev/null
+++ b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-fe-pipeline.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */
+/*
+ * Copyright (C) 2018 Intel Corporation
+ */
+
+#ifndef IPU4_VIRTIO_FE_PIPELINE_H
+#define IPU4_VIRTIO_FE_PIPELINE_H
+
+#include <media/ici.h>
+
+#include "virtio/intel-ipu4-virtio-common.h"
+
+int process_pipeline(struct file *file,
+ struct ipu4_virtio_ctx *fe_priv,
+ void *data,
+ int cmd);
+
+
+#endif
diff --git a/drivers/media/pci/intel/virtio/intel-ipu4-virtio-fe.c b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-fe.c
new file mode 100644
index 000000000000..d95e52a09b32
--- /dev/null
+++ b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-fe.c
@@ -0,0 +1,243 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+/*
+ * Copyright (C) 2018 Intel Corporation
+ */
+
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/scatterlist.h>
+#include <linux/spinlock.h>
+#include <linux/virtio.h>
+#include <linux/virtio_ids.h>
+#include <linux/virtio_config.h>
+#include <linux/module.h>
+#include "intel-ipu4-virtio-common.h"
+
+static DEFINE_IDA(index_ida);
+
+struct ipu4_virtio_uos {
+ struct virtqueue *vq[IPU_VIRTIO_QUEUE_MAX];
+ struct completion have_data;
+ char name[25];
+ unsigned int data_avail;
+ int index;
+ bool busy;
+ int vmid;
+};
+
+struct completion completion_queue[IPU_VIRTIO_QUEUE_MAX];
+
+/* Assuming there will be one FE instance per VM */
+static struct ipu4_virtio_uos *ipu4_virtio_fe;
+
+static void ipu_virtio_fe_tx_done_vq_0(struct virtqueue *vq)
+{
+ struct ipu4_virtio_uos *priv = (struct ipu4_virtio_uos *)vq->vdev->priv;
+
+ /* We can get spurious callbacks, e.g. shared IRQs + virtio_pci. */
+ if (!virtqueue_get_buf(vq, &priv->data_avail))
+ return;
+
+ complete(&completion_queue[0]);
+ pr_debug("IPU FE:%s vmid:%d TX for VQ 0 done\n", __func__, priv->vmid);
+}
+
+static void ipu_virtio_fe_tx_done_vq_1(struct virtqueue *vq)
+{
+ struct ipu4_virtio_uos *priv = (struct ipu4_virtio_uos *)vq->vdev->priv;
+
+ /* We can get spurious callbacks, e.g. shared IRQs + virtio_pci. */
+ if (!virtqueue_get_buf(vq, &priv->data_avail))
+ return;
+
+ complete(&completion_queue[1]);
+ pr_debug("IPU FE:%s vmid:%d TX for VQ 1 done\n", __func__, priv->vmid);
+}
+
+/* The host will fill any buffer we give it with sweet, sweet randomness. */
+static void ipu_virtio_fe_register_buffer(struct ipu4_virtio_uos *vi, void *buf, size_t size,
+ int nqueue)
+{
+ struct scatterlist sg;
+ if (nqueue >= IPU_VIRTIO_QUEUE_MAX) {
+ pr_debug("Number of queue exceeding max queue number\n");
+ return;
+ }
+
+ sg_init_one(&sg, buf, size);
+
+ /* There should always be room for one buffer. */
+ virtqueue_add_inbuf(vi->vq[nqueue], &sg, 1, buf, GFP_KERNEL);
+
+ virtqueue_kick(vi->vq[nqueue]);
+}
+
+static int ipu_virtio_fe_probe_common(struct virtio_device *vdev)
+{
+ int err, index, i;
+ struct ipu4_virtio_uos *priv = NULL;
+ vq_callback_t *callbacks[] = {ipu_virtio_fe_tx_done_vq_0,
+ ipu_virtio_fe_tx_done_vq_1};
+ static const char *names[] = {"csi_queue_0", "csi_queue_1"};
+ priv = kzalloc(sizeof(struct ipu4_virtio_uos), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->index = index = ida_simple_get(&index_ida, 0, 0, GFP_KERNEL);
+ if (index < 0) {
+ err = index;
+ goto err_ida;
+ }
+ sprintf(priv->name, "virtio_.%d", index);
+ for (i = 0; i < IPU_VIRTIO_QUEUE_MAX; i++)
+ init_completion(&completion_queue[i]);
+ priv->vmid = -1;
+ vdev->priv = priv;
+ err = virtio_find_vqs(vdev, IPU_VIRTIO_QUEUE_MAX,
+ priv->vq, callbacks, names, NULL);
+ if (err)
+ goto err_find;
+
+ ipu4_virtio_fe = priv;
+
+ return 0;
+
+err_find:
+ ida_simple_remove(&index_ida, index);
+err_ida:
+ kfree(priv);
+ return err;
+}
+
+static void ipu_virtio_fe_remove_common(struct virtio_device *vdev)
+{
+ struct ipu4_virtio_uos *priv = vdev->priv;
+ int i;
+
+ priv->data_avail = 0;
+ for (i = 0; i < IPU_VIRTIO_QUEUE_MAX; i++)
+ complete(&completion_queue[i]);
+ vdev->config->reset(vdev);
+ priv->busy = false;
+
+ vdev->config->del_vqs(vdev);
+ //ida_simple_remove(&index_ida, priv->index);
+ kfree(priv);
+}
+
+static int ipu_virtio_fe_send_req(int vmid, struct ipu4_virtio_req *req,
+ int wait, int idx)
+{
+ struct ipu4_virtio_uos *priv = ipu4_virtio_fe;
+ int ret = 0;
+ pr_debug("IPU FE:%s\n", __func__);
+ if (priv == NULL) {
+ pr_err("IPU Backend not connected\n");
+ return -ENOENT;
+ }
+
+ init_completion(&completion_queue[idx]);
+ ipu_virtio_fe_register_buffer(ipu4_virtio_fe, req, sizeof(*req), idx);
+ wait_for_completion(&completion_queue[idx]);
+
+ return ret;
+}
+static int ipu_virtio_fe_get_vmid(void)
+{
+ struct ipu4_virtio_uos *priv = ipu4_virtio_fe;
+
+ if (ipu4_virtio_fe == NULL) {
+ pr_err("IPU Backend not connected\n");
+ return -1;
+ }
+ return priv->vmid;
+}
+
+int ipu_virtio_fe_register(void)
+{
+ pr_debug("IPU FE:%s\n", __func__);
+ return 0;
+}
+
+void ipu_virtio_fe_unregister(void)
+{
+ pr_debug("IPU FE:%s\n", __func__);
+ return;
+}
+static int virt_probe(struct virtio_device *vdev)
+{
+ return ipu_virtio_fe_probe_common(vdev);
+}
+
+static void virt_remove(struct virtio_device *vdev)
+{
+ ipu_virtio_fe_remove_common(vdev);
+}
+
+static void virt_scan(struct virtio_device *vdev)
+{
+ struct ipu4_virtio_uos *vi = (struct ipu4_virtio_uos *)vdev->priv;
+ int timeout = 1000;
+
+ if (vi == NULL) {
+ pr_err("IPU No frontend private data\n");
+ return;
+ }
+ ipu_virtio_fe_register_buffer(vi, &vi->vmid, sizeof(vi->vmid),
+ IPU_VIRTIO_QUEUE_0);
+
+ while (timeout--) {
+ if (vi->vmid > 0)
+ break;
+ usleep_range(100, 120);
+ }
+ pr_debug("IPU FE:%s vmid:%d\n", __func__, vi->vmid);
+
+ if (timeout < 0)
+ pr_err("IPU Cannot query vmid\n");
+
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int virt_freeze(struct virtio_device *vdev)
+{
+ ipu_virtio_fe_remove_common(vdev);
+ return 0;
+}
+
+static int virt_restore(struct virtio_device *vdev)
+{
+ return ipu_virtio_fe_probe_common(vdev);
+}
+#endif
+
+static struct virtio_device_id id_table[] = {
+ { VIRTIO_ID_IPU, VIRTIO_DEV_ANY_ID },
+ { 0 },
+};
+
+struct ipu4_bknd_ops ipu4_virtio_bknd_ops = {
+ .init = ipu_virtio_fe_register,
+ .cleanup = ipu_virtio_fe_unregister,
+ .get_vm_id = ipu_virtio_fe_get_vmid,
+ .send_req = ipu_virtio_fe_send_req
+};
+
+static struct virtio_driver virtio_driver = {
+ .driver.name = KBUILD_MODNAME,
+ .driver.owner = THIS_MODULE,
+ .id_table = id_table,
+ .probe = virt_probe,
+ .remove = virt_remove,
+ .scan = virt_scan,
+#ifdef CONFIG_PM_SLEEP
+ .freeze = virt_freeze,
+ .restore = virt_restore,
+#endif
+};
+
+
+module_virtio_driver(virtio_driver);
+MODULE_DEVICE_TABLE(virtio, id_table);
+MODULE_DESCRIPTION("IPU4 virtio driver");
+MODULE_LICENSE("Dual BSD/GPL");
--
https://clearlinux.org