mirror of
https://github.com/armbian/build
synced 2025-09-24 19:47:06 +07:00
32590 lines
1004 KiB
Diff
32590 lines
1004 KiB
Diff
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
|
|
From: Patrick Yavitz <pyavitz@armbian.com>
|
|
Date: Fri, 21 Jun 2024 11:54:06 -0400
|
|
Subject: add spacemit patch set
|
|
|
|
source: https://gitee.com/bianbu-linux/linux-6.1
|
|
|
|
Signed-off-by: Patrick Yavitz <pyavitz@armbian.com>
|
|
---
|
|
drivers/media/platform/spacemit/vpu_k1x/Kbuild | 58 +
|
|
drivers/media/platform/spacemit/vpu_k1x/Kconfig | 26 +
|
|
drivers/media/platform/spacemit/vpu_k1x/Makefile | 11 +
|
|
drivers/media/platform/spacemit/vpu_k1x/dev/mvx_dev.c | 758 ++
|
|
drivers/media/platform/spacemit/vpu_k1x/dev/mvx_dev.h | 77 +
|
|
drivers/media/platform/spacemit/vpu_k1x/dev/mvx_hwreg.c | 469 ++
|
|
drivers/media/platform/spacemit/vpu_k1x/dev/mvx_hwreg.h | 230 +
|
|
drivers/media/platform/spacemit/vpu_k1x/dev/mvx_hwreg_v500.c | 65 +
|
|
drivers/media/platform/spacemit/vpu_k1x/dev/mvx_hwreg_v500.h | 48 +
|
|
drivers/media/platform/spacemit/vpu_k1x/dev/mvx_hwreg_v52_v76.c | 97 +
|
|
drivers/media/platform/spacemit/vpu_k1x/dev/mvx_hwreg_v52_v76.h | 49 +
|
|
drivers/media/platform/spacemit/vpu_k1x/dev/mvx_hwreg_v550.c | 82 +
|
|
drivers/media/platform/spacemit/vpu_k1x/dev/mvx_hwreg_v550.h | 48 +
|
|
drivers/media/platform/spacemit/vpu_k1x/dev/mvx_hwreg_v61.c | 95 +
|
|
drivers/media/platform/spacemit/vpu_k1x/dev/mvx_hwreg_v61.h | 49 +
|
|
drivers/media/platform/spacemit/vpu_k1x/dev/mvx_lsid.c | 342 +
|
|
drivers/media/platform/spacemit/vpu_k1x/dev/mvx_lsid.h | 166 +
|
|
drivers/media/platform/spacemit/vpu_k1x/dev/mvx_scheduler.c | 944 +++
|
|
drivers/media/platform/spacemit/vpu_k1x/dev/mvx_scheduler.h | 201 +
|
|
drivers/media/platform/spacemit/vpu_k1x/external/fw_v2/mve_protocol_def.h | 1776 +++++
|
|
drivers/media/platform/spacemit/vpu_k1x/external/fw_v3/mve_protocol_def.h | 1741 +++++
|
|
drivers/media/platform/spacemit/vpu_k1x/if/mvx_bitops.h | 91 +
|
|
drivers/media/platform/spacemit/vpu_k1x/if/mvx_buffer.c | 524 ++
|
|
drivers/media/platform/spacemit/vpu_k1x/if/mvx_buffer.h | 413 ++
|
|
drivers/media/platform/spacemit/vpu_k1x/if/mvx_firmware.c | 600 ++
|
|
drivers/media/platform/spacemit/vpu_k1x/if/mvx_firmware.h | 921 +++
|
|
drivers/media/platform/spacemit/vpu_k1x/if/mvx_firmware_cache.c | 800 ++
|
|
drivers/media/platform/spacemit/vpu_k1x/if/mvx_firmware_cache.h | 246 +
|
|
drivers/media/platform/spacemit/vpu_k1x/if/mvx_firmware_priv.h | 163 +
|
|
drivers/media/platform/spacemit/vpu_k1x/if/mvx_firmware_v2.c | 3410 +++++++++
|
|
drivers/media/platform/spacemit/vpu_k1x/if/mvx_firmware_v3.c | 171 +
|
|
drivers/media/platform/spacemit/vpu_k1x/if/mvx_if.c | 239 +
|
|
drivers/media/platform/spacemit/vpu_k1x/if/mvx_if.h | 471 ++
|
|
drivers/media/platform/spacemit/vpu_k1x/if/mvx_mmu.c | 1366 ++++
|
|
drivers/media/platform/spacemit/vpu_k1x/if/mvx_mmu.h | 492 ++
|
|
drivers/media/platform/spacemit/vpu_k1x/if/mvx_secure.c | 425 ++
|
|
drivers/media/platform/spacemit/vpu_k1x/if/mvx_secure.h | 139 +
|
|
drivers/media/platform/spacemit/vpu_k1x/if/mvx_session.c | 3635 ++++++++++
|
|
drivers/media/platform/spacemit/vpu_k1x/if/mvx_session.h | 1144 +++
|
|
drivers/media/platform/spacemit/vpu_k1x/if/v4l2/mvx-v4l2-controls.h | 478 ++
|
|
drivers/media/platform/spacemit/vpu_k1x/if/v4l2/mvx_ext_if.h | 87 +
|
|
drivers/media/platform/spacemit/vpu_k1x/if/v4l2/mvx_ext_v4l2.c | 182 +
|
|
drivers/media/platform/spacemit/vpu_k1x/if/v4l2/mvx_v4l2_buffer.c | 515 ++
|
|
drivers/media/platform/spacemit/vpu_k1x/if/v4l2/mvx_v4l2_buffer.h | 167 +
|
|
drivers/media/platform/spacemit/vpu_k1x/if/v4l2/mvx_v4l2_ctrls.c | 1446 ++++
|
|
drivers/media/platform/spacemit/vpu_k1x/if/v4l2/mvx_v4l2_ctrls.h | 64 +
|
|
drivers/media/platform/spacemit/vpu_k1x/if/v4l2/mvx_v4l2_fops.c | 208 +
|
|
drivers/media/platform/spacemit/vpu_k1x/if/v4l2/mvx_v4l2_fops.h | 64 +
|
|
drivers/media/platform/spacemit/vpu_k1x/if/v4l2/mvx_v4l2_session.c | 620 ++
|
|
drivers/media/platform/spacemit/vpu_k1x/if/v4l2/mvx_v4l2_session.h | 243 +
|
|
drivers/media/platform/spacemit/vpu_k1x/if/v4l2/mvx_v4l2_vidioc.c | 1663 +++++
|
|
drivers/media/platform/spacemit/vpu_k1x/if/v4l2/mvx_v4l2_vidioc.h | 147 +
|
|
drivers/media/platform/spacemit/vpu_k1x/mvx_driver.c | 70 +
|
|
drivers/media/platform/spacemit/vpu_k1x/mvx_dvfs.c | 1341 ++++
|
|
drivers/media/platform/spacemit/vpu_k1x/mvx_dvfs.h | 101 +
|
|
drivers/media/platform/spacemit/vpu_k1x/mvx_log.c | 931 +++
|
|
drivers/media/platform/spacemit/vpu_k1x/mvx_log.h | 386 +
|
|
drivers/media/platform/spacemit/vpu_k1x/mvx_log_group.c | 168 +
|
|
drivers/media/platform/spacemit/vpu_k1x/mvx_log_group.h | 68 +
|
|
drivers/media/platform/spacemit/vpu_k1x/mvx_log_ram.h | 212 +
|
|
drivers/media/platform/spacemit/vpu_k1x/mvx_pm_runtime.c | 85 +
|
|
drivers/media/platform/spacemit/vpu_k1x/mvx_pm_runtime.h | 67 +
|
|
drivers/media/platform/spacemit/vpu_k1x/mvx_seq.c | 95 +
|
|
drivers/media/platform/spacemit/vpu_k1x/mvx_seq.h | 94 +
|
|
drivers/media/platform/spacemit/vpu_k1x/sconscript | 36 +
|
|
65 files changed, 32120 insertions(+)
|
|
|
|
diff --git a/drivers/media/platform/spacemit/vpu_k1x/Kbuild b/drivers/media/platform/spacemit/vpu_k1x/Kbuild
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/vpu_k1x/Kbuild
|
|
@@ -0,0 +1,58 @@
|
|
+###########################################################
|
|
+# Set the include-path according to the defined interface.
|
|
+###########################################################
|
|
+
|
|
+ccflags-y += -I$(srctree)/drivers/media/platform/spacemit/vpu_k1x
|
|
+ccflags-y += -I$(srctree)/drivers/media/platform/spacemit/vpu_k1x/if
|
|
+ccflags-y += -I$(srctree)/drivers/media/platform/spacemit/vpu_k1x/dev
|
|
+ccflags-y += -I$(srctree)/drivers/media/platform/spacemit/vpu_k1x/if/v4l2
|
|
+ccflags-y += -I$(srctree)/drivers/media/platform/spacemit/vpu_k1x/external
|
|
+
|
|
+ccflags-$(CONFIG_VIDEO_LINLON_K1X_FTRACE) += -DMVX_LOG_FTRACE_ENABLE
|
|
+ccflags-$(CONFIG_VIDEO_LINLON_K1X_PRINT_FILE) += -DMVX_LOG_PRINT_FILE_ENABLE
|
|
+ccflags-y += $(EXTRA_CCFLAGS)
|
|
+
|
|
+###########################################################
|
|
+# Define build targets and what files to include.
|
|
+###########################################################
|
|
+
|
|
+# Amvx module
|
|
+obj-$(CONFIG_VIDEO_LINLON_K1X) := amvx.o
|
|
+
|
|
+# Add objects for if module.
|
|
+if-y := if/mvx_if.o \
|
|
+ if/mvx_buffer.o \
|
|
+ if/mvx_firmware_cache.o \
|
|
+ if/mvx_firmware.o \
|
|
+ if/mvx_firmware_v2.o \
|
|
+ if/mvx_firmware_v3.o \
|
|
+ if/mvx_mmu.o \
|
|
+ if/mvx_secure.o \
|
|
+ if/mvx_session.o
|
|
+
|
|
+# Add external interface.
|
|
+if-y += if/v4l2/mvx_ext_v4l2.o \
|
|
+ if/v4l2/mvx_v4l2_buffer.o \
|
|
+ if/v4l2/mvx_v4l2_session.o \
|
|
+ if/v4l2/mvx_v4l2_vidioc.o \
|
|
+ if/v4l2/mvx_v4l2_fops.o \
|
|
+ if/v4l2/mvx_v4l2_ctrls.o
|
|
+
|
|
+# Add objects for dev module.
|
|
+dev-y := dev/mvx_dev.o \
|
|
+ dev/mvx_hwreg.o \
|
|
+ dev/mvx_hwreg_v500.o \
|
|
+ dev/mvx_hwreg_v550.o \
|
|
+ dev/mvx_hwreg_v61.o \
|
|
+ dev/mvx_hwreg_v52_v76.o \
|
|
+ dev/mvx_lsid.o \
|
|
+ dev/mvx_scheduler.o \
|
|
+ mvx_pm_runtime.o
|
|
+
|
|
+# Add driver objects.
|
|
+amvx-y := mvx_driver.o \
|
|
+ mvx_seq.o \
|
|
+ mvx_log.o \
|
|
+ mvx_log_group.o \
|
|
+ mvx_dvfs.o \
|
|
+ $(if-y) $(dev-y)
|
|
diff --git a/drivers/media/platform/spacemit/vpu_k1x/Kconfig b/drivers/media/platform/spacemit/vpu_k1x/Kconfig
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/vpu_k1x/Kconfig
|
|
@@ -0,0 +1,26 @@
|
|
+config VIDEO_LINLON_K1X
|
|
+ tristate "Linlon VPU support."
|
|
+ depends on VIDEO_DEV && SOC_SPACEMIT_K1X
|
|
+# depends on VIDEO_V4L2
|
|
+ select VIDEOBUF2_VMALLOC
|
|
+ select VIDEOBUF2_CORE
|
|
+ select DMA_SHARED_BUFFER
|
|
+ select VIDEOBUF2_MEMOPS
|
|
+ select VIDEOBUF2_DMA_SG
|
|
+ default n
|
|
+ help
|
|
+ This enables support for the Linlon VPU family.
|
|
+
|
|
+config VIDEO_LINLON_FTRACE_K1X
|
|
+ depends on VIDEO_LINLON_K1X
|
|
+ bool "Send kernel space logs to ftrace."
|
|
+ default n
|
|
+ help
|
|
+ Send kernel space logs to ftrace.
|
|
+
|
|
+config VIDEO_LINLON_PRINT_FILE_K1X
|
|
+ depends on VIDEO_LINLON_K1X
|
|
+ bool "Append file and line number to kernel space log messages."
|
|
+ default y
|
|
+ help
|
|
+ Append file and line number to kernel space log messages.
|
|
diff --git a/drivers/media/platform/spacemit/vpu_k1x/Makefile b/drivers/media/platform/spacemit/vpu_k1x/Makefile
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/vpu_k1x/Makefile
|
|
@@ -0,0 +1,11 @@
|
|
+all: mono_v4l2
|
|
+
|
|
+mono_v4l2:
|
|
+ @env CONFIG_VIDEO_LINLON=m CONFIG_VIDEO_LINLON_MONO=y CONFIG_VIDEO_LINLON_IF_V4L2=y $(MAKE) -C $(KDIR) M=$(CURDIR) modules
|
|
+
|
|
+clean:
|
|
+ @rm -rf *.ko
|
|
+ @find . -type f -name '*.o' -delete
|
|
+ @rm -rf *.mod.c
|
|
+ @rm -f Module.symvers
|
|
+ @rm -f modules.order
|
|
diff --git a/drivers/media/platform/spacemit/vpu_k1x/dev/mvx_dev.c b/drivers/media/platform/spacemit/vpu_k1x/dev/mvx_dev.c
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/vpu_k1x/dev/mvx_dev.c
|
|
@@ -0,0 +1,758 @@
|
|
+/*
|
|
+ * The confidential and proprietary information contained in this file may
|
|
+ * only be used by a person authorised under and to the extent permitted
|
|
+ * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd.
|
|
+ * ALL RIGHTS RESERVED
|
|
+ *
|
|
+ * This entire notice must be reproduced on all copies of this file
|
|
+ * and copies of this file may only be made by a person if such person is
|
|
+ * permitted to do so under the terms of a subsisting license agreement
|
|
+ * from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * SPDX-License-Identifier: GPL-2.0-only
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU General Public License
|
|
+ * as published by the Free Software Foundation; either version 2
|
|
+ * of the License, or (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
+ *
|
|
+ */
|
|
+
|
|
+/****************************************************************************
|
|
+ * Includes
|
|
+ ****************************************************************************/
|
|
+
|
|
+#include <linux/bitops.h>
|
|
+#include <linux/debugfs.h>
|
|
+#include <linux/device.h>
|
|
+#include <linux/err.h>
|
|
+#include <linux/interrupt.h>
|
|
+#include <linux/pm_runtime.h>
|
|
+#include <linux/of_address.h>
|
|
+#include <linux/of_device.h>
|
|
+#include <linux/of_irq.h>
|
|
+#include <linux/module.h>
|
|
+#include <linux/pci.h>
|
|
+#include <linux/platform_device.h>
|
|
+#include <linux/pm_runtime.h>
|
|
+#include <linux/pm.h>
|
|
+#include <linux/printk.h>
|
|
+#include <linux/workqueue.h>
|
|
+#include <linux/clk.h>
|
|
+#include <linux/delay.h>
|
|
+#include "mvx_bitops.h"
|
|
+#include "mvx_dev.h"
|
|
+#include "mvx_if.h"
|
|
+#include "mvx_session.h"
|
|
+#include "mvx_log_group.h"
|
|
+#include "mvx_pm_runtime.h"
|
|
+#include "mvx_dvfs.h"
|
|
+
|
|
+/****************************************************************************
|
|
+ * Defines
|
|
+ ****************************************************************************/
|
|
+
|
|
+/**
|
|
+ * Name of the MVx dev device.
|
|
+ */
|
|
+#define MVX_DEV_NAME "amvx_dev"
|
|
+
|
|
+#define MVX_PCI_VENDOR 0x13b5
|
|
+#define MVX_PCI_DEVICE 0x0001
|
|
+
|
|
+/****************************************************************************
|
|
+ * Types
|
|
+ ****************************************************************************/
|
|
+
|
|
+/**
|
|
+ * struct mvx_client_session - Device session.
|
|
+ *
|
|
+ * When the if module registers a session this structure is returned.
|
|
+ */
|
|
+struct mvx_client_session {
|
|
+ struct mvx_dev_ctx *ctx;
|
|
+ struct mvx_sched_session session;
|
|
+};
|
|
+/****************************************************************************
|
|
+ * Static variables and functions
|
|
+ ****************************************************************************/
|
|
+
|
|
+static struct mvx_dev_ctx *client_ops_to_ctx(struct mvx_client_ops *client)
|
|
+{
|
|
+ return container_of(client, struct mvx_dev_ctx, client_ops);
|
|
+}
|
|
+
|
|
+static void get_hw_ver(struct mvx_client_ops *client,
|
|
+ struct mvx_hw_ver *hw_ver)
|
|
+{
|
|
+ struct mvx_dev_ctx *ctx = client_ops_to_ctx(client);
|
|
+
|
|
+ hw_ver->id = ctx->hw_id;
|
|
+ hw_ver->revision = ctx->hw_revision;
|
|
+ hw_ver->patch = ctx->hw_patch;
|
|
+}
|
|
+
|
|
+static void get_formats(struct mvx_client_ops *client,
|
|
+ enum mvx_direction direction,
|
|
+ uint64_t *formats)
|
|
+{
|
|
+ struct mvx_dev_ctx *ctx = client_ops_to_ctx(client);
|
|
+ uint32_t fuses;
|
|
+
|
|
+ *formats = 0;
|
|
+
|
|
+ ctx->hwreg.ops.get_formats(direction, formats);
|
|
+
|
|
+ /* Remove formats based on fuses. */
|
|
+ fuses = ctx->fuses;
|
|
+ if (fuses & MVX_HWREG_FUSE_DISABLE_AFBC) {
|
|
+ mvx_clear_bit(MVX_FORMAT_YUV420_AFBC_8, formats);
|
|
+ mvx_clear_bit(MVX_FORMAT_YUV420_AFBC_10, formats);
|
|
+ mvx_clear_bit(MVX_FORMAT_YUV422_AFBC_8, formats);
|
|
+ mvx_clear_bit(MVX_FORMAT_YUV422_AFBC_10, formats);
|
|
+ }
|
|
+
|
|
+ if (fuses & MVX_HWREG_FUSE_DISABLE_REAL)
|
|
+ mvx_clear_bit(MVX_FORMAT_RV, formats);
|
|
+
|
|
+ if (fuses & MVX_HWREG_FUSE_DISABLE_VPX) {
|
|
+ mvx_clear_bit(MVX_FORMAT_VP8, formats);
|
|
+ mvx_clear_bit(MVX_FORMAT_VP9, formats);
|
|
+ }
|
|
+
|
|
+ if (fuses & MVX_HWREG_FUSE_DISABLE_HEVC)
|
|
+ mvx_clear_bit(MVX_FORMAT_HEVC, formats);
|
|
+}
|
|
+
|
|
+static unsigned int get_ncores(struct mvx_client_ops *client)
|
|
+{
|
|
+ struct mvx_dev_ctx *ctx = client_ops_to_ctx(client);
|
|
+ uint32_t ncores;
|
|
+
|
|
+ ncores = ctx->ncores;
|
|
+
|
|
+ return ncores;
|
|
+}
|
|
+
|
|
+static struct mvx_client_session *register_session(
|
|
+ struct mvx_client_ops *client,
|
|
+ struct mvx_if_session *isession)
|
|
+{
|
|
+ struct mvx_dev_ctx *ctx = client_ops_to_ctx(client);
|
|
+ struct mvx_client_session *csession;
|
|
+ int ret;
|
|
+
|
|
+ csession = devm_kzalloc(ctx->dev, sizeof(*csession), GFP_KERNEL);
|
|
+ if (csession == NULL)
|
|
+ return ERR_PTR(-ENOMEM);
|
|
+
|
|
+ csession->ctx = ctx;
|
|
+
|
|
+ ret = mvx_pm_runtime_get_sync(ctx->dev);
|
|
+ if (ret < 0)
|
|
+ goto free_session;
|
|
+
|
|
+ ret = mvx_sched_session_construct(&csession->session, isession);
|
|
+ if (ret != 0)
|
|
+ goto runtime_put;
|
|
+
|
|
+ MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_INFO,
|
|
+ "Register client session. csession=0x%p, isession=0x%p.",
|
|
+ csession, isession);
|
|
+
|
|
+ return csession;
|
|
+
|
|
+runtime_put:
|
|
+ mvx_pm_runtime_put_sync(csession->ctx->dev);
|
|
+free_session:
|
|
+ devm_kfree(ctx->dev, csession);
|
|
+
|
|
+ return ERR_PTR(ret);
|
|
+}
|
|
+
|
|
+static void unregister_session(struct mvx_client_session *csession)
|
|
+{
|
|
+ MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_INFO,
|
|
+ "%p Unregister client session. csession=0x%p.",
|
|
+ mvx_if_session_to_session(csession->session.isession),
|
|
+ csession);
|
|
+
|
|
+ mvx_sched_terminate(&csession->ctx->scheduler, &csession->session);
|
|
+ mvx_sched_session_destruct(&csession->session);
|
|
+
|
|
+ mvx_pm_runtime_put_sync(csession->ctx->dev);
|
|
+
|
|
+ devm_kfree(csession->ctx->dev, csession);
|
|
+}
|
|
+
|
|
+static int switch_in(struct mvx_client_session *csession)
|
|
+{
|
|
+ struct mvx_dev_ctx *ctx = csession->ctx;
|
|
+ int ret;
|
|
+
|
|
+ ret = mvx_sched_switch_in(&ctx->scheduler, &csession->session);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int send_irq(struct mvx_client_session *csession)
|
|
+{
|
|
+ struct mvx_dev_ctx *ctx = csession->ctx;
|
|
+ int ret;
|
|
+
|
|
+ ret = mvx_sched_send_irq(&ctx->scheduler, &csession->session);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int flush_mmu(struct mvx_client_session *csession)
|
|
+{
|
|
+ struct mvx_dev_ctx *ctx = csession->ctx;
|
|
+ int ret;
|
|
+
|
|
+ ret = mvx_sched_flush_mmu(&ctx->scheduler, &csession->session);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static void print_debug(struct mvx_client_session *csession)
|
|
+{
|
|
+ struct mvx_dev_ctx *ctx = csession->ctx;
|
|
+
|
|
+ mvx_sched_print_debug(&ctx->scheduler, &csession->session);
|
|
+}
|
|
+
|
|
+static void wait_session_idle(struct mvx_client_session *csession)
|
|
+{
|
|
+ struct mvx_dev_ctx *ctx = csession->ctx;
|
|
+
|
|
+ mvx_sched_wait_session_idle(&ctx->scheduler, &csession->session);
|
|
+ }
|
|
+
|
|
+
|
|
+static struct mvx_dev_ctx *work_to_ctx(struct work_struct *work)
|
|
+{
|
|
+ return container_of(work, struct mvx_dev_ctx, work);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * irq_bottom() - Handle IRQ bottom.
|
|
+ * @work: Work struct that is part of the context structure.
|
|
+ *
|
|
+ * This function is called from a work queue and id doing the actual work of
|
|
+ * handling the interrupt.
|
|
+ */
|
|
+static void irq_bottom(struct work_struct *work)
|
|
+{
|
|
+ struct mvx_dev_ctx *ctx = work_to_ctx(work);
|
|
+ uint32_t nlsid;
|
|
+ uint32_t i;
|
|
+
|
|
+ nlsid = ctx->scheduler.nlsid;
|
|
+ for (i = 0; i < nlsid; i++)
|
|
+ if (test_and_clear_bit(i, &ctx->irqve))
|
|
+ mvx_sched_handle_irq(&ctx->scheduler, i);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * irq_top() - Handle IRQ top.
|
|
+ * @irq: IRQ number.
|
|
+ * @dev_id: Pointer to context.
|
|
+ *
|
|
+ * This function is called in interrupt context. It should be short and must not
|
|
+ * block.
|
|
+ *
|
|
+ * Return: IRQ status if the IRQ was handled or not.
|
|
+ */
|
|
+static irqreturn_t irq_top(int irq,
|
|
+ void *dev_id)
|
|
+{
|
|
+ struct mvx_dev_ctx *ctx = dev_id;
|
|
+ uint32_t nlsid;
|
|
+ uint32_t irqve;
|
|
+ int ret = IRQ_NONE;
|
|
+
|
|
+ nlsid = mvx_hwreg_read(&ctx->hwreg, MVX_HWREG_NLSID);
|
|
+ irqve = mvx_hwreg_read(&ctx->hwreg, MVX_HWREG_IRQVE);
|
|
+ while (nlsid-- > 0)
|
|
+ if ((irqve >> nlsid) & 0x1) {
|
|
+ mvx_hwreg_write_lsid(&ctx->hwreg,
|
|
+ nlsid,
|
|
+ MVX_HWREG_LIRQVE,
|
|
+ 0);
|
|
+ wmb(); //make sure the LIRQVE is cleared.
|
|
+ set_bit(nlsid, &ctx->irqve);
|
|
+ ret = IRQ_HANDLED;
|
|
+ }
|
|
+
|
|
+ queue_work(ctx->work_queue, &ctx->work);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static void mvx_pm_disable_clk(struct device *dev)
|
|
+{
|
|
+ struct clk* clock;
|
|
+ struct mvx_dev_ctx *ctx;
|
|
+
|
|
+ ctx = dev_get_drvdata(dev);
|
|
+ clock = ctx->clock;
|
|
+
|
|
+ if (!IS_ERR_OR_NULL(clock))
|
|
+ {
|
|
+ clk_disable_unprepare(clock);
|
|
+ }
|
|
+}
|
|
+
|
|
+static void mvx_pm_enable_clk(struct device *dev)
|
|
+{
|
|
+ struct clk* clock;
|
|
+ struct mvx_dev_ctx *ctx;
|
|
+
|
|
+ ctx = dev_get_drvdata(dev);
|
|
+ clock = ctx->clock;
|
|
+
|
|
+ if (!IS_ERR_OR_NULL(clock))
|
|
+ {
|
|
+ clk_prepare_enable(clock);
|
|
+ }
|
|
+}
|
|
+
|
|
+
|
|
+static int mvx_dev_probe(struct device *dev,
|
|
+ struct resource *iores,
|
|
+ struct resource *irqres)
|
|
+{
|
|
+ struct mvx_dev_ctx *ctx;
|
|
+ int ret;
|
|
+
|
|
+ /* Create device context and store pointer in device private data. */
|
|
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
|
|
+ if (ctx == NULL)
|
|
+ return -EINVAL;
|
|
+
|
|
+ ctx->dev = dev;
|
|
+ ctx->clock = devm_clk_get(dev, NULL);
|
|
+ mutex_init(&ctx->pm_mutex);
|
|
+
|
|
+ dev_set_drvdata(dev, ctx);
|
|
+ mvx_dvfs_init(dev);
|
|
+
|
|
+ ret = mvx_pm_runtime_get_sync(ctx->dev);
|
|
+ if (ret < 0)
|
|
+ goto free_ctx;
|
|
+
|
|
+ /*handle reset for k1x*/
|
|
+ ctx->rst = devm_reset_control_get_optional_exclusive(dev, NULL);
|
|
+ if (IS_ERR(ctx->rst))
|
|
+ goto exit_reset;
|
|
+
|
|
+ reset_control_deassert(ctx->rst);
|
|
+ clk_set_rate(ctx->clock, 819200000);
|
|
+
|
|
+ /* Setup client ops callbacks. */
|
|
+ ctx->client_ops.get_hw_ver = get_hw_ver;
|
|
+ ctx->client_ops.get_formats = get_formats;
|
|
+ ctx->client_ops.get_ncores = get_ncores;
|
|
+ ctx->client_ops.register_session = register_session;
|
|
+ ctx->client_ops.unregister_session = unregister_session;
|
|
+ ctx->client_ops.switch_in = switch_in;
|
|
+ ctx->client_ops.send_irq = send_irq;
|
|
+ ctx->client_ops.flush_mmu = flush_mmu;
|
|
+ ctx->client_ops.print_debug = print_debug;
|
|
+ ctx->client_ops.wait_session_idle = wait_session_idle;
|
|
+
|
|
+ /* Create if context. */
|
|
+ ctx->if_ops = mvx_if_create(dev, &ctx->client_ops, ctx);
|
|
+ if (IS_ERR(ctx->if_ops))
|
|
+ goto runtime_put;
|
|
+
|
|
+ /* Create debugfs entry */
|
|
+ if (IS_ENABLED(CONFIG_DEBUG_FS)) {
|
|
+ char name[20];
|
|
+
|
|
+ scnprintf(name, sizeof(name), "%s%u", MVX_DEV_NAME, dev->id);
|
|
+ ctx->dentry = debugfs_create_dir(name, NULL);
|
|
+ if (IS_ERR_OR_NULL(ctx->dentry)) {
|
|
+ ret = -EINVAL;
|
|
+ goto destroy_if;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /* Construct hw register context. */
|
|
+ ret = mvx_hwreg_construct(&ctx->hwreg, dev, iores, ctx->dentry);
|
|
+ if (ret != 0)
|
|
+ goto destruct_dentry;
|
|
+
|
|
+ ret = mvx_sched_construct(&ctx->scheduler, dev, ctx->if_ops,
|
|
+ &ctx->hwreg, ctx->dentry);
|
|
+ if (ret != 0)
|
|
+ goto destruct_hwreg;
|
|
+
|
|
+ ctx->fuses = mvx_hwreg_read(&ctx->hwreg, MVX_HWREG_FUSE);
|
|
+ ctx->ncores = mvx_hwreg_read(&ctx->hwreg, MVX_HWREG_NCORES);
|
|
+
|
|
+ /* Create work queue for IRQ handler. */
|
|
+ ctx->work_queue = alloc_workqueue(dev_name(dev), WQ_UNBOUND, 1);
|
|
+ if (ctx->work_queue == NULL) {
|
|
+ MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_ERROR,
|
|
+ "Failed to create work queue.");
|
|
+ ret = -EINVAL;
|
|
+ goto destruct_sched;
|
|
+ }
|
|
+
|
|
+ INIT_WORK(&ctx->work, irq_bottom);
|
|
+
|
|
+ /* Request IRQ handler. */
|
|
+ ctx->irq = irqres->start;
|
|
+ ret = request_irq(ctx->irq, irq_top,
|
|
+ IRQF_SHARED | (irqres->flags & IRQF_TRIGGER_MASK),
|
|
+ dev_name(dev), ctx);
|
|
+ if (ret != 0) {
|
|
+ MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_ERROR,
|
|
+ "Failed to request IRQ. irq=%u, ret=%d.",
|
|
+ ctx->irq,
|
|
+ ret);
|
|
+ goto workqueue_destroy;
|
|
+ }
|
|
+
|
|
+ MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_WARNING,
|
|
+ "Linlon v%x identified. cores=%u, nlsid=%u, id=%u, fuse=%08x, hw_id=%08x, hw_rev=%08x",
|
|
+ mvx_hwreg_get_hw_id(&ctx->hwreg, NULL, NULL),
|
|
+ mvx_hwreg_read(&ctx->hwreg, MVX_HWREG_NCORES),
|
|
+ mvx_hwreg_read(&ctx->hwreg, MVX_HWREG_NLSID),
|
|
+ dev->id, ctx->fuses, ctx->hw_id, ctx->hw_revision);
|
|
+
|
|
+ ret = mvx_pm_runtime_put_sync(ctx->dev);
|
|
+ if (ret < 0)
|
|
+ goto irq_free;
|
|
+
|
|
+ return 0;
|
|
+
|
|
+irq_free:
|
|
+ free_irq(ctx->irq, ctx);
|
|
+
|
|
+workqueue_destroy:
|
|
+ destroy_workqueue(ctx->work_queue);
|
|
+
|
|
+destruct_sched:
|
|
+ mvx_sched_destruct(&ctx->scheduler);
|
|
+
|
|
+destruct_hwreg:
|
|
+ mvx_hwreg_destruct(&ctx->hwreg);
|
|
+
|
|
+destruct_dentry:
|
|
+ if (IS_ENABLED(CONFIG_DEBUG_FS))
|
|
+ debugfs_remove_recursive(ctx->dentry);
|
|
+
|
|
+destroy_if:
|
|
+ mvx_if_destroy(ctx->if_ops);
|
|
+
|
|
+runtime_put:
|
|
+ pm_runtime_put_sync(ctx->dev);
|
|
+
|
|
+exit_reset:
|
|
+ reset_control_assert(ctx->rst);
|
|
+
|
|
+free_ctx:
|
|
+ devm_kfree(dev, ctx);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int mvx_dev_remove(struct mvx_dev_ctx *ctx)
|
|
+{
|
|
+ MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_INFO, "remove");
|
|
+ mvx_dvfs_deinit(ctx->dev);
|
|
+
|
|
+ mvx_if_destroy(ctx->if_ops);
|
|
+ free_irq(ctx->irq, ctx);
|
|
+ destroy_workqueue(ctx->work_queue);
|
|
+ mvx_sched_destruct(&ctx->scheduler);
|
|
+ mvx_hwreg_destruct(&ctx->hwreg);
|
|
+ dev_set_drvdata(ctx->dev, NULL);
|
|
+
|
|
+ if (IS_ENABLED(CONFIG_DEBUG_FS))
|
|
+ debugfs_remove_recursive(ctx->dentry);
|
|
+
|
|
+ mvx_pm_disable_clk(ctx->dev);
|
|
+ reset_control_assert(ctx->rst);
|
|
+
|
|
+ devm_kfree(ctx->dev, ctx);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/****************************************************************************
|
|
+ * Platform driver
|
|
+ ****************************************************************************/
|
|
+
|
|
+static int mvx_pdev_probe(struct platform_device *pdev)
|
|
+{
|
|
+ struct resource iores;
|
|
+ struct resource irqres;
|
|
+ int irq;
|
|
+ int ret;
|
|
+
|
|
+ MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_INFO, "probe");
|
|
+
|
|
+ /* Get resource. */
|
|
+ ret = of_address_to_resource(pdev->dev.of_node, 0, &iores);
|
|
+ if (ret != 0) {
|
|
+ MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_ERROR,
|
|
+ "Failed to get address of resource. ret=%d.",
|
|
+ ret);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ irq = platform_get_irq(pdev, 0);
|
|
+
|
|
+ if (irq < 0) {
|
|
+ MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_ERROR,
|
|
+ "Failed to get IRQ resource.");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ irqres.start = irq;
|
|
+ irqres.end = irq;
|
|
+ irqres.flags = 0;
|
|
+
|
|
+ pm_runtime_enable(&pdev->dev);
|
|
+
|
|
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(34));
|
|
+
|
|
+ ret = mvx_dev_probe(&pdev->dev, &iores, &irqres);
|
|
+ if (ret != 0)
|
|
+ pm_runtime_disable(&pdev->dev);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int mvx_pdev_remove(struct platform_device *pdev)
|
|
+{
|
|
+ struct mvx_dev_ctx *ctx = platform_get_drvdata(pdev);
|
|
+ int ret;
|
|
+
|
|
+ MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_INFO, "pdev remove");
|
|
+
|
|
+ ret = mvx_dev_remove(ctx);
|
|
+
|
|
+ pm_runtime_disable(&pdev->dev);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+#ifdef CONFIG_PM
|
|
+static void reset_hw(struct device *dev)
|
|
+{
|
|
+ uint32_t ncores;
|
|
+ uint32_t corelsid_mask;
|
|
+ struct mvx_dev_ctx *ctx;
|
|
+ ctx = dev_get_drvdata(dev);
|
|
+
|
|
+ if (ctx->hwreg.registers != NULL) {
|
|
+ ncores = mvx_hwreg_read(&ctx->hwreg, MVX_HWREG_NCORES);
|
|
+ corelsid_mask = 0;
|
|
+ for (; ncores > 0; --ncores) {
|
|
+ corelsid_mask = (corelsid_mask << 4) | 0xF;
|
|
+ }
|
|
+ mvx_hwreg_write(&ctx->hwreg, MVX_HWREG_RESET, 1);
|
|
+ while (corelsid_mask != mvx_hwreg_read(&ctx->hwreg, MVX_HWREG_CORELSID)){}
|
|
+ mvx_hwreg_write(&ctx->hwreg, MVX_HWREG_CLKFORCE, 0);
|
|
+ }
|
|
+}
|
|
+
|
|
+static int b_backto_active;
|
|
+
|
|
+static int mvx_pm_poweron(struct device *dev)
|
|
+{
|
|
+ struct mvx_dev_ctx *ctx = dev_get_drvdata(dev);
|
|
+
|
|
+ MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_INFO, "mvx_pm_poweron");
|
|
+ mvx_pm_enable_clk(dev);
|
|
+ reset_hw(dev);
|
|
+ mvx_sched_resume(&ctx->scheduler);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int mvx_pm_poweroff(struct device *dev)
|
|
+{
|
|
+ struct mvx_dev_ctx *ctx = dev_get_drvdata(dev);
|
|
+
|
|
+ MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_INFO, "mvx_pm_poweroff");
|
|
+ mvx_sched_suspend(&ctx->scheduler);
|
|
+ mvx_pm_disable_clk(dev);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int mvx_pm_suspend(struct device *dev)
|
|
+{
|
|
+ MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_INFO, "mvx_pm_suspend start. b_backto_active=%d", b_backto_active);
|
|
+
|
|
+ if (!pm_runtime_status_suspended(dev)) {
|
|
+ mvx_pm_poweroff(dev);
|
|
+ b_backto_active = true;
|
|
+ } else {
|
|
+ b_backto_active = false;
|
|
+ }
|
|
+
|
|
+ MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_INFO, "mvx_pm_suspend exit. b_backto_active=%d", b_backto_active);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int mvx_pm_resume(struct device *dev)
|
|
+{
|
|
+ MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_INFO, "mvx_pm_resume start. b_backto_active=%d", b_backto_active);
|
|
+
|
|
+ if (b_backto_active) {
|
|
+ mvx_pm_poweron(dev);
|
|
+ }
|
|
+ MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_INFO, "mvx_pm_resume exit. b_backto_active=%d", b_backto_active);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int mvx_pm_runtime_suspend(struct device *dev)
|
|
+{
|
|
+ MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_INFO, "mvx_pm_runtime_suspend");
|
|
+ mvx_pm_disable_clk(dev);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int mvx_pm_runtime_resume(struct device *dev)
|
|
+{
|
|
+ MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_INFO, "mvx_pm_runtime_resume");
|
|
+ mvx_pm_enable_clk(dev);
|
|
+ reset_hw(dev);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int mvx_pm_runtime_idle(struct device *dev)
|
|
+{
|
|
+ MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_INFO, "mvx_pm_runtime_idle");
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static const struct dev_pm_ops mvx_dev_pm_ops = {
|
|
+ .suspend = mvx_pm_suspend,
|
|
+ .resume = mvx_pm_resume,
|
|
+ .runtime_suspend = mvx_pm_runtime_suspend,
|
|
+ .runtime_resume = mvx_pm_runtime_resume,
|
|
+ .runtime_idle = mvx_pm_runtime_idle,
|
|
+};
|
|
+#endif /* CONFIG_PM */
|
|
+
|
|
+static const struct of_device_id mvx_dev_match_table[] = {
|
|
+ { .compatible = "arm,mali-mve" },
|
|
+ { .compatible = "arm,mali-v500" },
|
|
+ { .compatible = "arm,mali-v550" },
|
|
+ { .compatible = "arm,mali-v61" },
|
|
+ { .compatible = "arm china,linlon-v5" },
|
|
+ { .compatible = "arm china,linlon-v7" },
|
|
+ { { 0 } }
|
|
+};
|
|
+
|
|
+static struct platform_driver mvx_dev_driver = {
|
|
+ .probe = mvx_pdev_probe,
|
|
+ .remove = mvx_pdev_remove,
|
|
+ .driver = {
|
|
+ .name = MVX_DEV_NAME,
|
|
+ .owner = THIS_MODULE,
|
|
+ .of_match_table = mvx_dev_match_table,
|
|
+#ifdef CONFIG_PM
|
|
+ .pm = &mvx_dev_pm_ops
|
|
+#endif /* CONFIG_PM */
|
|
+ }
|
|
+};
|
|
+
|
|
+/****************************************************************************
|
|
+ * PCI driver
|
|
+ ****************************************************************************/
|
|
+
|
|
+/* LCOV_EXCL_START */
|
|
+static int mvx_pci_probe(struct pci_dev *pdev,
|
|
+ const struct pci_device_id *id)
|
|
+{
|
|
+ static unsigned int dev_id;
|
|
+ struct resource irqres = {
|
|
+ .start = pdev->irq,
|
|
+ .end = pdev->irq,
|
|
+ .flags = 0
|
|
+ };
|
|
+ pdev->dev.id = dev_id++;
|
|
+ return mvx_dev_probe(&pdev->dev, &pdev->resource[1], &irqres);
|
|
+}
|
|
+
|
|
+static void mvx_pci_remove(struct pci_dev *pdev)
|
|
+{
|
|
+ struct mvx_dev_ctx *ctx = pci_get_drvdata(pdev);
|
|
+
|
|
+ mvx_dev_remove(ctx);
|
|
+}
|
|
+
|
|
+static struct pci_device_id mvx_pci_device_id[] = {
|
|
+ { PCI_DEVICE(MVX_PCI_VENDOR,
|
|
+ MVX_PCI_DEVICE) },
|
|
+ { 0, }
|
|
+};
|
|
+
|
|
+MODULE_DEVICE_TABLE(pci, mvx_pci_device_id);
|
|
+
|
|
+static struct pci_driver mvx_pci_driver = {
|
|
+ .name = MVX_DEV_NAME,
|
|
+ .id_table = mvx_pci_device_id,
|
|
+ .probe = mvx_pci_probe,
|
|
+ .remove = mvx_pci_remove
|
|
+};
|
|
+/* LCOV_EXCL_STOP */
|
|
+
|
|
+/****************************************************************************
|
|
+ * Exported variables and functions
|
|
+ ****************************************************************************/
|
|
+
|
|
+int mvx_dev_init(void)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ ret = platform_driver_register(&mvx_dev_driver);
|
|
+ if (ret != 0) {
|
|
+ pr_err("mvx_dev: Failed to register driver.\n");
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ /* LCOV_EXCL_START */
|
|
+ ret = pci_register_driver(&mvx_pci_driver);
|
|
+ if (ret != 0) {
|
|
+ pr_err("mvx_dev: Failed to register PCI driver.\n");
|
|
+ goto unregister_driver;
|
|
+ }
|
|
+
|
|
+ /* LCOV_EXCL_STOP */
|
|
+
|
|
+ return 0;
|
|
+
|
|
+unregister_driver:
|
|
+ platform_driver_unregister(&mvx_dev_driver); /* LCOV_EXCL_LINE */
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+void mvx_dev_exit(void)
|
|
+{
|
|
+ pci_unregister_driver(&mvx_pci_driver); /* LCOV_EXCL_LINE */
|
|
+ platform_driver_unregister(&mvx_dev_driver);
|
|
+}
|
|
diff --git a/drivers/media/platform/spacemit/vpu_k1x/dev/mvx_dev.h b/drivers/media/platform/spacemit/vpu_k1x/dev/mvx_dev.h
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/vpu_k1x/dev/mvx_dev.h
|
|
@@ -0,0 +1,77 @@
|
|
+/*
|
|
+ * The confidential and proprietary information contained in this file may
|
|
+ * only be used by a person authorised under and to the extent permitted
|
|
+ * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd.
|
|
+ * ALL RIGHTS RESERVED
|
|
+ *
|
|
+ * This entire notice must be reproduced on all copies of this file
|
|
+ * and copies of this file may only be made by a person if such person is
|
|
+ * permitted to do so under the terms of a subsisting license agreement
|
|
+ * from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * SPDX-License-Identifier: GPL-2.0-only
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU General Public License
|
|
+ * as published by the Free Software Foundation; either version 2
|
|
+ * of the License, or (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
+ *
|
|
+ */
|
|
+
|
|
+#ifndef _MVX_DEV_H_
|
|
+#define _MVX_DEV_H_
|
|
+
|
|
+#include <linux/reset.h>
|
|
+#include "mvx_hwreg.h"
|
|
+#include "mvx_scheduler.h"
|
|
+
|
|
+/**
|
|
+ * struct mvx_dev_ctx - Private context for the MVx dev device.
|
|
+ */
|
|
+struct mvx_dev_ctx {
|
|
+ struct device *dev;
|
|
+ struct mvx_if_ops *if_ops;
|
|
+ struct mvx_client_ops client_ops;
|
|
+ struct mvx_hwreg hwreg;
|
|
+ struct mvx_sched scheduler;
|
|
+ unsigned int irq;
|
|
+ struct workqueue_struct *work_queue;
|
|
+ struct work_struct work;
|
|
+ unsigned long irqve;
|
|
+ struct dentry *dentry;
|
|
+ struct clk* clock;
|
|
+ struct mutex pm_mutex;
|
|
+ uint32_t fuses;
|
|
+ uint32_t ncores;
|
|
+ enum mvx_hw_id hw_id;
|
|
+ uint32_t hw_revision;
|
|
+ uint32_t hw_patch;
|
|
+ struct reset_control *rst;
|
|
+};
|
|
+
|
|
+/****************************************************************************
|
|
+ * Exported functions
|
|
+ ****************************************************************************/
|
|
+
|
|
+/**
|
|
+ * mvx_dev_init() - Initialize the dev device.
|
|
+ */
|
|
+int mvx_dev_init(void);
|
|
+
|
|
+/**
|
|
+ * mvx_dev_exit() - Remove and exit the dev device.
|
|
+ */
|
|
+void mvx_dev_exit(void);
|
|
+
|
|
+#endif /* _MVX_DEV_H_ */
|
|
diff --git a/drivers/media/platform/spacemit/vpu_k1x/dev/mvx_hwreg.c b/drivers/media/platform/spacemit/vpu_k1x/dev/mvx_hwreg.c
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/vpu_k1x/dev/mvx_hwreg.c
|
|
@@ -0,0 +1,469 @@
|
|
+/*
|
|
+ * The confidential and proprietary information contained in this file may
|
|
+ * only be used by a person authorised under and to the extent permitted
|
|
+ * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd.
|
|
+ * ALL RIGHTS RESERVED
|
|
+ *
|
|
+ * This entire notice must be reproduced on all copies of this file
|
|
+ * and copies of this file may only be made by a person if such person is
|
|
+ * permitted to do so under the terms of a subsisting license agreement
|
|
+ * from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * SPDX-License-Identifier: GPL-2.0-only
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU General Public License
|
|
+ * as published by the Free Software Foundation; either version 2
|
|
+ * of the License, or (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
+ *
|
|
+ */
|
|
+
|
|
+/****************************************************************************
|
|
+ * Includes
|
|
+ ****************************************************************************/
|
|
+
|
|
+#include <linux/device.h>
|
|
+#include <linux/of_address.h>
|
|
+#include <linux/clk.h>
|
|
+#include "mvx_log_group.h"
|
|
+#include "mvx_hwreg.h"
|
|
+#include "mvx_hwreg_v500.h"
|
|
+#include "mvx_hwreg_v550.h"
|
|
+#include "mvx_hwreg_v61.h"
|
|
+#include "mvx_hwreg_v52_v76.h"
|
|
+#include "mvx_pm_runtime.h"
|
|
+#include "mvx_dev.h"
|
|
+
|
|
+/****************************************************************************
|
|
+ * Static functions
|
|
+ ****************************************************************************/
|
|
+
|
|
+static unsigned int get_offset(enum mvx_hwreg_what what)
|
|
+{
|
|
+ switch (what) {
|
|
+ case MVX_HWREG_HARDWARE_ID:
|
|
+ return 0x0;
|
|
+ case MVX_HWREG_ENABLE:
|
|
+ return 0x4;
|
|
+ case MVX_HWREG_NCORES:
|
|
+ return 0x8;
|
|
+ case MVX_HWREG_NLSID:
|
|
+ return 0xc;
|
|
+ case MVX_HWREG_CORELSID:
|
|
+ return 0x10;
|
|
+ case MVX_HWREG_JOBQUEUE:
|
|
+ return 0x14;
|
|
+ case MVX_HWREG_IRQVE:
|
|
+ return 0x18;
|
|
+ case MVX_HWREG_CLKFORCE:
|
|
+ return 0x24;
|
|
+ case MVX_HWREG_FUSE:
|
|
+ return 0x34;
|
|
+ case MVX_HWREG_CONFIG:
|
|
+ return 0x38;
|
|
+ case MVX_HWREG_PROTCTRL:
|
|
+ return 0x40;
|
|
+ case MVX_HWREG_RESET:
|
|
+ return 0x50;
|
|
+ default:
|
|
+ return 0;
|
|
+ }
|
|
+}
|
|
+
|
|
+static unsigned int get_lsid_offset(unsigned int lsid,
|
|
+ enum mvx_hwreg_lsid what)
|
|
+{
|
|
+ unsigned int offset = 0x0200 + 0x40 * lsid;
|
|
+
|
|
+ switch (what) {
|
|
+ case MVX_HWREG_CTRL:
|
|
+ offset += 0x0;
|
|
+ break;
|
|
+ case MVX_HWREG_MMU_CTRL:
|
|
+ offset += 0x4;
|
|
+ break;
|
|
+ case MVX_HWREG_NPROT:
|
|
+ offset += 0x8;
|
|
+ break;
|
|
+ case MVX_HWREG_ALLOC:
|
|
+ offset += 0xc;
|
|
+ break;
|
|
+ case MVX_HWREG_FLUSH_ALL:
|
|
+ offset += 0x10;
|
|
+ break;
|
|
+ case MVX_HWREG_SCHED:
|
|
+ offset += 0x14;
|
|
+ break;
|
|
+ case MVX_HWREG_TERMINATE:
|
|
+ offset += 0x18;
|
|
+ break;
|
|
+ case MVX_HWREG_LIRQVE:
|
|
+ offset += 0x1c;
|
|
+ break;
|
|
+ case MVX_HWREG_IRQHOST:
|
|
+ offset += 0x20;
|
|
+ break;
|
|
+ case MVX_HWREG_INTSIG:
|
|
+ offset += 0x24;
|
|
+ break;
|
|
+ case MVX_HWREG_STREAMID:
|
|
+ offset += 0x2c;
|
|
+ break;
|
|
+ case MVX_HWREG_BUSATTR_0:
|
|
+ offset += 0x30;
|
|
+ break;
|
|
+ case MVX_HWREG_BUSATTR_1:
|
|
+ offset += 0x34;
|
|
+ break;
|
|
+ case MVX_HWREG_BUSATTR_2:
|
|
+ offset += 0x38;
|
|
+ break;
|
|
+ case MVX_HWREG_BUSATTR_3:
|
|
+ offset += 0x3c;
|
|
+ break;
|
|
+ default:
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ return offset;
|
|
+}
|
|
+
|
|
+static enum mvx_hw_id get_hw_id(void *registers,
|
|
+ uint32_t *revision,
|
|
+ uint32_t *patch)
|
|
+{
|
|
+ uint32_t value;
|
|
+
|
|
+ value = readl(registers);
|
|
+
|
|
+ if (revision != NULL)
|
|
+ *revision = (value >> 8) & 0xff;
|
|
+
|
|
+ if (patch != NULL)
|
|
+ *patch = value & 0xff;
|
|
+
|
|
+ switch (value >> 16) {
|
|
+ case 0x5650:
|
|
+ return MVE_v500;
|
|
+ case 0x5655:
|
|
+ return MVE_v550;
|
|
+ case 0x5660:
|
|
+ case 0x5661:
|
|
+ return MVE_v61;
|
|
+ case 0x5662:
|
|
+ case 0x5663:
|
|
+ return MVE_v52_v76;
|
|
+ default:
|
|
+ MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_ERROR,
|
|
+ "Unknown hardware version. version=0x%08x.",
|
|
+ value);
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ return MVE_Unknown;
|
|
+}
|
|
+
|
|
+static int regs_show(struct seq_file *s,
|
|
+ void *v)
|
|
+{
|
|
+ struct mvx_hwreg *hwreg = (struct mvx_hwreg *)s->private;
|
|
+ int ret;
|
|
+
|
|
+ ret = mvx_pm_runtime_get_sync(hwreg->dev);
|
|
+ if (ret < 0)
|
|
+ return 0;
|
|
+
|
|
+ seq_printf(s, "HARDWARE_ID = 0x%08x\n",
|
|
+ mvx_hwreg_read(hwreg, MVX_HWREG_HARDWARE_ID));
|
|
+ seq_printf(s, "ENABLE = 0x%08x\n",
|
|
+ mvx_hwreg_read(hwreg, MVX_HWREG_ENABLE));
|
|
+ seq_printf(s, "NCORES = 0x%08x\n",
|
|
+ mvx_hwreg_read(hwreg, MVX_HWREG_NCORES));
|
|
+ seq_printf(s, "NLSID = 0x%08x\n",
|
|
+ mvx_hwreg_read(hwreg, MVX_HWREG_NLSID));
|
|
+ seq_printf(s, "CORELSID = 0x%08x\n",
|
|
+ mvx_hwreg_read(hwreg, MVX_HWREG_CORELSID));
|
|
+ seq_printf(s, "JOBQUEUE = 0x%08x\n",
|
|
+ mvx_hwreg_read(hwreg, MVX_HWREG_JOBQUEUE));
|
|
+ seq_printf(s, "IRQVE = 0x%08x\n",
|
|
+ mvx_hwreg_read(hwreg, MVX_HWREG_IRQVE));
|
|
+ seq_printf(s, "CLKFORCE = 0x%08x\n",
|
|
+ mvx_hwreg_read(hwreg, MVX_HWREG_CLKFORCE));
|
|
+ seq_printf(s, "FUSE = 0x%08x\n",
|
|
+ mvx_hwreg_read(hwreg, MVX_HWREG_FUSE));
|
|
+ seq_printf(s, "CONFIG = 0x%08x\n",
|
|
+ mvx_hwreg_read(hwreg, MVX_HWREG_CONFIG));
|
|
+ seq_printf(s, "PROTCTRL = 0x%08x\n",
|
|
+ mvx_hwreg_read(hwreg, MVX_HWREG_PROTCTRL));
|
|
+ seq_printf(s, "RESET = 0x%08x\n",
|
|
+ mvx_hwreg_read(hwreg, MVX_HWREG_RESET));
|
|
+ seq_puts(s, "\n");
|
|
+
|
|
+ mvx_pm_runtime_put_sync(hwreg->dev);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int regs_open(struct inode *inode,
|
|
+ struct file *file)
|
|
+{
|
|
+ return single_open(file, regs_show, inode->i_private);
|
|
+}
|
|
+
|
|
+static const struct file_operations regs_fops = {
|
|
+ .open = regs_open,
|
|
+ .read = seq_read,
|
|
+ .llseek = seq_lseek,
|
|
+ .release = single_release
|
|
+};
|
|
+
|
|
+static int regs_debugfs_init(struct mvx_hwreg *hwreg,
|
|
+ struct dentry *parent)
|
|
+{
|
|
+ struct dentry *dentry;
|
|
+
|
|
+ dentry = debugfs_create_file("regs", 0400, parent, hwreg,
|
|
+ ®s_fops);
|
|
+ if (IS_ERR_OR_NULL(dentry))
|
|
+ return -ENOMEM;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int lsid_regs_show(struct seq_file *s,
|
|
+ void *v)
|
|
+{
|
|
+ struct mvx_lsid_hwreg *lsid_hwreg = (struct mvx_lsid_hwreg *)s->private;
|
|
+ struct mvx_hwreg *hwreg = lsid_hwreg->hwreg;
|
|
+ int lsid = lsid_hwreg->lsid;
|
|
+ int ret;
|
|
+
|
|
+ ret = mvx_pm_runtime_get_sync(hwreg->dev);
|
|
+ if (ret < 0)
|
|
+ return 0;
|
|
+
|
|
+ seq_printf(s, "CTRL = 0x%08x\n",
|
|
+ mvx_hwreg_read_lsid(hwreg, lsid, MVX_HWREG_CTRL));
|
|
+ seq_printf(s, "MMU_CTRL = 0x%08x\n",
|
|
+ mvx_hwreg_read_lsid(hwreg, lsid, MVX_HWREG_MMU_CTRL));
|
|
+ seq_printf(s, "NPROT = 0x%08x\n",
|
|
+ mvx_hwreg_read_lsid(hwreg, lsid, MVX_HWREG_NPROT));
|
|
+ seq_printf(s, "ALLOC = 0x%08x\n",
|
|
+ mvx_hwreg_read_lsid(hwreg, lsid, MVX_HWREG_ALLOC));
|
|
+ seq_printf(s, "FLUSH_ALL = 0x%08x\n",
|
|
+ mvx_hwreg_read_lsid(hwreg, lsid, MVX_HWREG_FLUSH_ALL));
|
|
+ seq_printf(s, "SCHED = 0x%08x\n",
|
|
+ mvx_hwreg_read_lsid(hwreg, lsid, MVX_HWREG_SCHED));
|
|
+ seq_printf(s, "TERMINATE = 0x%08x\n",
|
|
+ mvx_hwreg_read_lsid(hwreg, lsid, MVX_HWREG_TERMINATE));
|
|
+ seq_printf(s, "LIRQVE = 0x%08x\n",
|
|
+ mvx_hwreg_read_lsid(hwreg, lsid, MVX_HWREG_LIRQVE));
|
|
+ seq_printf(s, "IRQHOST = 0x%08x\n",
|
|
+ mvx_hwreg_read_lsid(hwreg, lsid, MVX_HWREG_IRQHOST));
|
|
+ seq_printf(s, "INTSIG = 0x%08x\n",
|
|
+ mvx_hwreg_read_lsid(hwreg, lsid, MVX_HWREG_INTSIG));
|
|
+ seq_printf(s, "STREAMID = 0x%08x\n",
|
|
+ mvx_hwreg_read_lsid(hwreg, lsid, MVX_HWREG_STREAMID));
|
|
+ seq_printf(s, "BUSATTR_0 = 0x%08x\n",
|
|
+ mvx_hwreg_read_lsid(hwreg, lsid, MVX_HWREG_BUSATTR_0));
|
|
+ seq_printf(s, "BUSATTR_1 = 0x%08x\n",
|
|
+ mvx_hwreg_read_lsid(hwreg, lsid, MVX_HWREG_BUSATTR_1));
|
|
+ seq_printf(s, "BUSATTR_2 = 0x%08x\n",
|
|
+ mvx_hwreg_read_lsid(hwreg, lsid, MVX_HWREG_BUSATTR_2));
|
|
+ seq_printf(s, "BUSATTR_3 = 0x%08x\n",
|
|
+ mvx_hwreg_read_lsid(hwreg, lsid, MVX_HWREG_BUSATTR_3));
|
|
+ seq_puts(s, "\n");
|
|
+
|
|
+ mvx_pm_runtime_put_sync(hwreg->dev);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int lsid_regs_open(struct inode *inode,
|
|
+ struct file *file)
|
|
+{
|
|
+ return single_open(file, lsid_regs_show, inode->i_private);
|
|
+}
|
|
+
|
|
+static const struct file_operations lsid_regs_fops = {
|
|
+ .open = lsid_regs_open,
|
|
+ .read = seq_read,
|
|
+ .llseek = seq_lseek,
|
|
+ .release = single_release
|
|
+};
|
|
+
|
|
+static int lsid_regs_debugfs_init(struct mvx_lsid_hwreg *lsid_hwreg,
|
|
+ struct dentry *parent)
|
|
+{
|
|
+ struct dentry *dentry;
|
|
+ char name[20];
|
|
+
|
|
+ scnprintf(name, sizeof(name), "lsid%u_regs", lsid_hwreg->lsid);
|
|
+
|
|
+ dentry = debugfs_create_file(name, 0400, parent, lsid_hwreg,
|
|
+ &lsid_regs_fops);
|
|
+ if (IS_ERR_OR_NULL(dentry))
|
|
+ return -ENOMEM;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int debugfs_init(struct mvx_hwreg *hwreg,
|
|
+ struct dentry *parent)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ if (IS_ENABLED(CONFIG_DEBUG_FS)) {
|
|
+ int lsid;
|
|
+
|
|
+ ret = regs_debugfs_init(hwreg, parent);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+
|
|
+ for (lsid = 0; lsid < MVX_LSID_MAX; ++lsid) {
|
|
+ ret = lsid_regs_debugfs_init(&hwreg->lsid_hwreg[lsid],
|
|
+ parent);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/****************************************************************************
|
|
+ * Exported functions
|
|
+ ****************************************************************************/
|
|
+
|
|
+int mvx_hwreg_construct(struct mvx_hwreg *hwreg,
|
|
+ struct device *dev,
|
|
+ struct resource *res,
|
|
+ struct dentry *parent)
|
|
+{
|
|
+ char const *name = dev_name(dev);
|
|
+ enum mvx_hw_id hw_id;
|
|
+ int ret;
|
|
+ int lsid;
|
|
+ struct mvx_dev_ctx *ctx = dev_get_drvdata(dev);
|
|
+
|
|
+ hwreg->dev = dev;
|
|
+
|
|
+ hwreg->res = request_mem_region(res->start, resource_size(res), name);
|
|
+ if (hwreg->res == NULL) {
|
|
+ MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_ERROR,
|
|
+ "Failed to request mem region. start=0x%llx, size=0x%llx.",
|
|
+ res->start, resource_size(res));
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ hwreg->registers = ioremap(res->start, resource_size(res));
|
|
+ if (hwreg->registers == NULL) {
|
|
+ MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_ERROR,
|
|
+ "Failed to iomap region. start=0x%llx, size=0x%llx.",
|
|
+ res->start, resource_size(res));
|
|
+ ret = -ENOMEM;
|
|
+ goto release_mem;
|
|
+ }
|
|
+
|
|
+ hw_id = get_hw_id(hwreg->registers, &ctx->hw_revision, &ctx->hw_patch);
|
|
+ switch (hw_id) {
|
|
+ case MVE_v500:
|
|
+ hwreg->ops.get_formats = mvx_hwreg_get_formats_v500;
|
|
+ break;
|
|
+ case MVE_v550:
|
|
+ hwreg->ops.get_formats = mvx_hwreg_get_formats_v550;
|
|
+ break;
|
|
+ case MVE_v61:
|
|
+ hwreg->ops.get_formats = mvx_hwreg_get_formats_v61;
|
|
+ break;
|
|
+ case MVE_v52_v76:
|
|
+ hwreg->ops.get_formats = mvx_hwreg_get_formats_v52_v76;
|
|
+ break;
|
|
+ default:
|
|
+ ret = -EINVAL;
|
|
+ goto unmap_io;
|
|
+ }
|
|
+
|
|
+ ctx->hw_id = hw_id;
|
|
+
|
|
+ for (lsid = 0; lsid < MVX_LSID_MAX; ++lsid) {
|
|
+ hwreg->lsid_hwreg[lsid].hwreg = hwreg;
|
|
+ hwreg->lsid_hwreg[lsid].lsid = lsid;
|
|
+ }
|
|
+
|
|
+ if (IS_ENABLED(CONFIG_DEBUG_FS)) {
|
|
+ ret = debugfs_init(hwreg, parent);
|
|
+ if (ret != 0)
|
|
+ goto unmap_io;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+
|
|
+unmap_io:
|
|
+ iounmap(hwreg->registers);
|
|
+
|
|
+release_mem:
|
|
+ release_mem_region(res->start, resource_size(res));
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+void mvx_hwreg_destruct(struct mvx_hwreg *hwreg)
|
|
+{
|
|
+ iounmap(hwreg->registers);
|
|
+ release_mem_region(hwreg->res->start, resource_size(hwreg->res));
|
|
+}
|
|
+
|
|
+uint32_t mvx_hwreg_read(struct mvx_hwreg *hwreg,
|
|
+ enum mvx_hwreg_what what)
|
|
+{
|
|
+ unsigned int offset = get_offset(what);
|
|
+
|
|
+ return readl(hwreg->registers + offset);
|
|
+}
|
|
+
|
|
+void mvx_hwreg_write(struct mvx_hwreg *hwreg,
|
|
+ enum mvx_hwreg_what what,
|
|
+ uint32_t value)
|
|
+{
|
|
+ unsigned int offset = get_offset(what);
|
|
+
|
|
+ writel(value, hwreg->registers + offset);
|
|
+}
|
|
+
|
|
+uint32_t mvx_hwreg_read_lsid(struct mvx_hwreg *hwreg,
|
|
+ unsigned int lsid,
|
|
+ enum mvx_hwreg_lsid what)
|
|
+{
|
|
+ unsigned int offset = get_lsid_offset(lsid, what);
|
|
+
|
|
+ return readl(hwreg->registers + offset);
|
|
+}
|
|
+
|
|
+void mvx_hwreg_write_lsid(struct mvx_hwreg *hwreg,
|
|
+ unsigned int lsid,
|
|
+ enum mvx_hwreg_lsid what,
|
|
+ uint32_t value)
|
|
+{
|
|
+ unsigned int offset = get_lsid_offset(lsid, what);
|
|
+
|
|
+ writel(value, hwreg->registers + offset);
|
|
+}
|
|
+
|
|
+enum mvx_hw_id mvx_hwreg_get_hw_id(struct mvx_hwreg *hwreg,
|
|
+ uint32_t *revision,
|
|
+ uint32_t *patch)
|
|
+{
|
|
+ return get_hw_id(hwreg->registers, revision, patch);
|
|
+}
|
|
diff --git a/drivers/media/platform/spacemit/vpu_k1x/dev/mvx_hwreg.h b/drivers/media/platform/spacemit/vpu_k1x/dev/mvx_hwreg.h
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/vpu_k1x/dev/mvx_hwreg.h
|
|
@@ -0,0 +1,230 @@
|
|
+/*
|
|
+ * The confidential and proprietary information contained in this file may
|
|
+ * only be used by a person authorised under and to the extent permitted
|
|
+ * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd.
|
|
+ * ALL RIGHTS RESERVED
|
|
+ *
|
|
+ * This entire notice must be reproduced on all copies of this file
|
|
+ * and copies of this file may only be made by a person if such person is
|
|
+ * permitted to do so under the terms of a subsisting license agreement
|
|
+ * from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * SPDX-License-Identifier: GPL-2.0-only
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU General Public License
|
|
+ * as published by the Free Software Foundation; either version 2
|
|
+ * of the License, or (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
+ *
|
|
+ */
|
|
+
|
|
+#ifndef _MVX_HW_REG_
|
|
+#define _MVX_HW_REG_
|
|
+
|
|
+/****************************************************************************
|
|
+ * Includes
|
|
+ ****************************************************************************/
|
|
+
|
|
+#include <linux/debugfs.h>
|
|
+#include <linux/types.h>
|
|
+#include <linux/ioport.h>
|
|
+#include "mvx_if.h"
|
|
+#include "mvx_lsid.h"
|
|
+
|
|
+/****************************************************************************
|
|
+ * Defines
|
|
+ ****************************************************************************/
|
|
+
|
|
+#define MVX_HWREG_FUSE_DISABLE_AFBC (1 << 0)
|
|
+#define MVX_HWREG_FUSE_DISABLE_REAL (1 << 1)
|
|
+#define MVX_HWREG_FUSE_DISABLE_VPX (1 << 2)
|
|
+#define MVX_HWREG_FUSE_DISABLE_HEVC (1 << 3)
|
|
+
|
|
+#define MVE_JOBQUEUE_JOB_BITS 8
|
|
+#define MVE_JOBQUEUE_JOB_MASK ((1 << MVE_JOBQUEUE_JOB_BITS) - 1)
|
|
+#define MVE_JOBQUEUE_JOB_INVALID 0xf
|
|
+#define MVE_JOBQUEUE_NJOBS 4
|
|
+#define MVE_JOBQUEUE_LSID_SHIFT 0
|
|
+#define MVE_JOBQUEUE_LSID_BITS 4
|
|
+#define MVE_JOBQUEUE_LSID_MASK ((1 << MVE_JOBQUEUE_LSID_BITS) - 1)
|
|
+#define MVE_JOBQUEUE_NCORES_SHIFT 4
|
|
+#define MVE_JOBQUEUE_NCORES_BITS 4
|
|
+
|
|
+#define MVE_CORELSID_LSID_BITS 4
|
|
+#define MVX_CORELSID_LSID_MASK ((1 << MVE_CORELSID_LSID_BITS) - 1)
|
|
+
|
|
+#define MVE_CTRL_DISALLOW_SHIFT 0
|
|
+#define MVE_CTRL_DISALLOW_BITS 8
|
|
+#define MVE_CTRL_DISALLOW_MASK ((1 << MVE_CTRL_DISALLOW_BITS) - 1)
|
|
+#define MVE_CTRL_MAXCORES_SHIFT 8
|
|
+#define MVE_CTRL_MAXCORES_BITS 4
|
|
+#define MVE_CTRL_MAXCORES_MASK ((1 << MVE_CTRL_MAXCORES_BITS) - 1)
|
|
+
|
|
+#define MVE_ALLOC_FREE 0
|
|
+#define MVE_ALLOC_NON_PROTECTED 1
|
|
+#define MVE_ALLOC_PROTECTED 2
|
|
+
|
|
+/****************************************************************************
|
|
+ * Types
|
|
+ ****************************************************************************/
|
|
+
|
|
+struct device;
|
|
+
|
|
+/**
|
|
+ * enum mvx_hwreg_what - Hardware registers that can be read or written.
|
|
+ */
|
|
+enum mvx_hwreg_what {
|
|
+ MVX_HWREG_HARDWARE_ID,
|
|
+ MVX_HWREG_ENABLE,
|
|
+ MVX_HWREG_NCORES,
|
|
+ MVX_HWREG_NLSID,
|
|
+ MVX_HWREG_CORELSID,
|
|
+ MVX_HWREG_JOBQUEUE,
|
|
+ MVX_HWREG_IRQVE,
|
|
+ MVX_HWREG_CLKFORCE,
|
|
+ MVX_HWREG_FUSE,
|
|
+ MVX_HWREG_PROTCTRL,
|
|
+ MVX_HWREG_RESET,
|
|
+ MVX_HWREG_CONFIG,
|
|
+ MVX_HWREG_WHAT_MAX
|
|
+};
|
|
+
|
|
+/**
|
|
+ * enum mvx_hwreg_lsid - Hardware registers per LSID.
|
|
+ */
|
|
+enum mvx_hwreg_lsid {
|
|
+ MVX_HWREG_CTRL,
|
|
+ MVX_HWREG_MMU_CTRL,
|
|
+ MVX_HWREG_NPROT,
|
|
+ MVX_HWREG_ALLOC,
|
|
+ MVX_HWREG_FLUSH_ALL,
|
|
+ MVX_HWREG_SCHED,
|
|
+ MVX_HWREG_TERMINATE,
|
|
+ MVX_HWREG_LIRQVE,
|
|
+ MVX_HWREG_IRQHOST,
|
|
+ MVX_HWREG_INTSIG,
|
|
+ MVX_HWREG_STREAMID,
|
|
+ MVX_HWREG_BUSATTR_0,
|
|
+ MVX_HWREG_BUSATTR_1,
|
|
+ MVX_HWREG_BUSATTR_2,
|
|
+ MVX_HWREG_BUSATTR_3,
|
|
+ MVX_HWREG_LSID_MAX
|
|
+};
|
|
+
|
|
+struct mvx_hwreg;
|
|
+
|
|
+/**
|
|
+ * struct mvx_lsid_hwreg - Helper struct used for debugfs reading of lsid
|
|
+ * dependent registers.
|
|
+ */
|
|
+struct mvx_lsid_hwreg {
|
|
+ struct mvx_hwreg *hwreg;
|
|
+ unsigned int lsid;
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct mvx_hwreg - Context class for the hardware register interface.
|
|
+ */
|
|
+struct mvx_hwreg {
|
|
+ struct device *dev;
|
|
+ struct resource *res;
|
|
+ void *registers;
|
|
+ struct mvx_lsid_hwreg lsid_hwreg[MVX_LSID_MAX];
|
|
+ struct {
|
|
+ void (*get_formats)(enum mvx_direction direction,
|
|
+ uint64_t *formats);
|
|
+ } ops;
|
|
+};
|
|
+
|
|
+/****************************************************************************
|
|
+ * Exported functions
|
|
+ ****************************************************************************/
|
|
+
|
|
+/**
|
|
+ * mvx_hwreg_construct() - Construct the hardware register object.
|
|
+ * @hwreg: Pointer to hwreg object.
|
|
+ * @dev: Pointer to device struct.
|
|
+ * @res: Memory resource.
|
|
+ * @parent: Parent debugfs directory entry.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+int mvx_hwreg_construct(struct mvx_hwreg *hwreg,
|
|
+ struct device *dev,
|
|
+ struct resource *res,
|
|
+ struct dentry *parent);
|
|
+
|
|
+/**
|
|
+ * mvx_hwreg_destruct() - Destroy the hardware register object.
|
|
+ * @hwreg: Pointer to hwreg object.
|
|
+ */
|
|
+void mvx_hwreg_destruct(struct mvx_hwreg *hwreg);
|
|
+
|
|
+/**
|
|
+ * mvx_hwreg_read() - Read hardware register.
|
|
+ * @hwreg: Pointer to hwreg object.
|
|
+ * @what: Which register to read.
|
|
+ *
|
|
+ * Return: Value of register.
|
|
+ */
|
|
+uint32_t mvx_hwreg_read(struct mvx_hwreg *hwreg,
|
|
+ enum mvx_hwreg_what what);
|
|
+
|
|
+/**
|
|
+ * mvx_hwreg_write() - Write hardware register.
|
|
+ * @hwreg: Pointer to hwreg object.
|
|
+ * @what: Which register to write.
|
|
+ * @value: Value to write.
|
|
+ */
|
|
+void mvx_hwreg_write(struct mvx_hwreg *hwreg,
|
|
+ enum mvx_hwreg_what what,
|
|
+ uint32_t value);
|
|
+
|
|
+/**
|
|
+ * mvx_hwreg_read_lsid() - Read LSID hardware register.
|
|
+ * @hwreg: Pointer to hwreg object.
|
|
+ * @lsid: LSID register index.
|
|
+ * @what: Which register to read.
|
|
+ *
|
|
+ * Return: Value of register.
|
|
+ */
|
|
+uint32_t mvx_hwreg_read_lsid(struct mvx_hwreg *hwreg,
|
|
+ unsigned int lsid,
|
|
+ enum mvx_hwreg_lsid what);
|
|
+
|
|
+/**
|
|
+ * mvx_hwreg_write_lsid() - Write LSID hardware register.
|
|
+ * @hwreg: Pointer to hwreg object.
|
|
+ * @lsid: LSID register index.
|
|
+ * @what: Which register to write.
|
|
+ * @value: Value to write.
|
|
+ */
|
|
+void mvx_hwreg_write_lsid(struct mvx_hwreg *hwreg,
|
|
+ unsigned int lsid,
|
|
+ enum mvx_hwreg_lsid what,
|
|
+ uint32_t value);
|
|
+
|
|
+/**
|
|
+ * mvx_hwreg_get_hw_id() - Get hardware id.
|
|
+ * @hwreg: Pointer to hwreg object.
|
|
+ * @revision: Hardware revision.
|
|
+ * @patch: Hardware patch revision.
|
|
+ *
|
|
+ * Return: Hardware id.
|
|
+ */
|
|
+enum mvx_hw_id mvx_hwreg_get_hw_id(struct mvx_hwreg *hwreg,
|
|
+ uint32_t *revision,
|
|
+ uint32_t *patch);
|
|
+
|
|
+#endif /* _MVX_HW_REG_ */
|
|
diff --git a/drivers/media/platform/spacemit/vpu_k1x/dev/mvx_hwreg_v500.c b/drivers/media/platform/spacemit/vpu_k1x/dev/mvx_hwreg_v500.c
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/vpu_k1x/dev/mvx_hwreg_v500.c
|
|
@@ -0,0 +1,65 @@
|
|
+/*
|
|
+ * The confidential and proprietary information contained in this file may
|
|
+ * only be used by a person authorised under and to the extent permitted
|
|
+ * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd.
|
|
+ * ALL RIGHTS RESERVED
|
|
+ *
|
|
+ * This entire notice must be reproduced on all copies of this file
|
|
+ * and copies of this file may only be made by a person if such person is
|
|
+ * permitted to do so under the terms of a subsisting license agreement
|
|
+ * from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * SPDX-License-Identifier: GPL-2.0-only
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU General Public License
|
|
+ * as published by the Free Software Foundation; either version 2
|
|
+ * of the License, or (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
+ *
|
|
+ */
|
|
+
|
|
+/****************************************************************************
|
|
+ * Includes
|
|
+ ****************************************************************************/
|
|
+
|
|
+#include "mvx_bitops.h"
|
|
+#include "mvx_hwreg_v500.h"
|
|
+
|
|
+/****************************************************************************
|
|
+ * Exported functions
|
|
+ ****************************************************************************/
|
|
+
|
|
+void mvx_hwreg_get_formats_v500(enum mvx_direction direction,
|
|
+ uint64_t *formats)
|
|
+{
|
|
+ if (direction == MVX_DIR_INPUT) {
|
|
+ mvx_set_bit(MVX_FORMAT_H263, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_H264, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_MPEG2, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_MPEG4, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_RV, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_VC1, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_VP8, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV420_I420, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV420_NV12, formats);
|
|
+ } else {
|
|
+ mvx_set_bit(MVX_FORMAT_H264, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_HEVC, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_VP8, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV420_AFBC_8, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV422_AFBC_8, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV420_I420, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV420_NV12, formats);
|
|
+ }
|
|
+}
|
|
diff --git a/drivers/media/platform/spacemit/vpu_k1x/dev/mvx_hwreg_v500.h b/drivers/media/platform/spacemit/vpu_k1x/dev/mvx_hwreg_v500.h
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/vpu_k1x/dev/mvx_hwreg_v500.h
|
|
@@ -0,0 +1,48 @@
|
|
+/*
|
|
+ * The confidential and proprietary information contained in this file may
|
|
+ * only be used by a person authorised under and to the extent permitted
|
|
+ * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd.
|
|
+ * ALL RIGHTS RESERVED
|
|
+ *
|
|
+ * This entire notice must be reproduced on all copies of this file
|
|
+ * and copies of this file may only be made by a person if such person is
|
|
+ * permitted to do so under the terms of a subsisting license agreement
|
|
+ * from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * SPDX-License-Identifier: GPL-2.0-only
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU General Public License
|
|
+ * as published by the Free Software Foundation; either version 2
|
|
+ * of the License, or (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
+ *
|
|
+ */
|
|
+
|
|
+#ifndef _MVX_HWREG_V500_H_
|
|
+#define _MVX_HWREG_V500_H_
|
|
+
|
|
+/****************************************************************************
|
|
+ * Includes
|
|
+ ****************************************************************************/
|
|
+
|
|
+#include "mvx_if.h"
|
|
+
|
|
+/****************************************************************************
|
|
+ * Exported functions
|
|
+ ****************************************************************************/
|
|
+
|
|
+void mvx_hwreg_get_formats_v500(enum mvx_direction direction,
|
|
+ uint64_t *formats);
|
|
+
|
|
+#endif /* _MVX_HWREG_V500_H_ */
|
|
diff --git a/drivers/media/platform/spacemit/vpu_k1x/dev/mvx_hwreg_v52_v76.c b/drivers/media/platform/spacemit/vpu_k1x/dev/mvx_hwreg_v52_v76.c
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/vpu_k1x/dev/mvx_hwreg_v52_v76.c
|
|
@@ -0,0 +1,97 @@
|
|
+/*
|
|
+ * The confidential and proprietary information contained in this file may
|
|
+ * only be used by a person authorised under and to the extent permitted
|
|
+ * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd.
|
|
+ * ALL RIGHTS RESERVED
|
|
+ *
|
|
+ * This entire notice must be reproduced on all copies of this file
|
|
+ * and copies of this file may only be made by a person if such person is
|
|
+ * permitted to do so under the terms of a subsisting license agreement
|
|
+ * from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * SPDX-License-Identifier: GPL-2.0-only
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU General Public License
|
|
+ * as published by the Free Software Foundation; either version 2
|
|
+ * of the License, or (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
+ *
|
|
+ */
|
|
+
|
|
+/****************************************************************************
|
|
+ * Includes
|
|
+ ****************************************************************************/
|
|
+
|
|
+#include <linux/io.h>
|
|
+#include "mvx_bitops.h"
|
|
+#include "mvx_hwreg_v52_v76.h"
|
|
+
|
|
+/****************************************************************************
|
|
+ * Exported functions
|
|
+ ****************************************************************************/
|
|
+
|
|
+void mvx_hwreg_get_formats_v52_v76(enum mvx_direction direction,
|
|
+ uint64_t *formats)
|
|
+{
|
|
+ if (direction == MVX_DIR_INPUT) {
|
|
+ mvx_set_bit(MVX_FORMAT_AVS, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_AVS2, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_H263, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_H264, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_HEVC, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_JPEG, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_MPEG2, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_MPEG4, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_RV, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_VC1, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_VP8, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_VP9, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV420_AFBC_8, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV420_AFBC_10, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV422_AFBC_8, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV422_AFBC_10, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV420_I420, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV420_NV12, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV420_NV21, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV420_P010, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV420_Y0L2, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV420_AQB1, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV422_YUY2, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV422_UYVY, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV422_Y210, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_RGBA_8888, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_BGRA_8888, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_ARGB_8888, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_ABGR_8888, formats);
|
|
+ } else {
|
|
+ mvx_set_bit(MVX_FORMAT_H264, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_HEVC, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_JPEG, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_VP8, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_VP9, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV420_AFBC_8, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV420_AFBC_10, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV422_AFBC_8, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV422_AFBC_10, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV420_I420, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV420_NV12, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV420_NV21, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV420_P010, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV420_Y0L2, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV420_AQB1, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV422_YUY2, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV422_UYVY, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV422_Y210, formats);
|
|
+ }
|
|
+}
|
|
diff --git a/drivers/media/platform/spacemit/vpu_k1x/dev/mvx_hwreg_v52_v76.h b/drivers/media/platform/spacemit/vpu_k1x/dev/mvx_hwreg_v52_v76.h
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/vpu_k1x/dev/mvx_hwreg_v52_v76.h
|
|
@@ -0,0 +1,49 @@
|
|
+/*
|
|
+ * The confidential and proprietary information contained in this file may
|
|
+ * only be used by a person authorised under and to the extent permitted
|
|
+ * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd.
|
|
+ * ALL RIGHTS RESERVED
|
|
+ *
|
|
+ * This entire notice must be reproduced on all copies of this file
|
|
+ * and copies of this file may only be made by a person if such person is
|
|
+ * permitted to do so under the terms of a subsisting license agreement
|
|
+ * from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * SPDX-License-Identifier: GPL-2.0-only
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU General Public License
|
|
+ * as published by the Free Software Foundation; either version 2
|
|
+ * of the License, or (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
+ *
|
|
+ */
|
|
+
|
|
+#ifndef _MVX_HWREG_V52_V76_H_
|
|
+#define _MVX_HWREG_V52_V76_H_
|
|
+
|
|
+/****************************************************************************
|
|
+ * Includes
|
|
+ ****************************************************************************/
|
|
+
|
|
+#include "mvx_hwreg.h"
|
|
+#include "mvx_if.h"
|
|
+
|
|
+/****************************************************************************
|
|
+ * Exported functions
|
|
+ ****************************************************************************/
|
|
+
|
|
+void mvx_hwreg_get_formats_v52_v76(enum mvx_direction direction,
|
|
+ uint64_t *formats);
|
|
+
|
|
+#endif /* _MVX_HWREG_V52_V76_H_ */
|
|
diff --git a/drivers/media/platform/spacemit/vpu_k1x/dev/mvx_hwreg_v550.c b/drivers/media/platform/spacemit/vpu_k1x/dev/mvx_hwreg_v550.c
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/vpu_k1x/dev/mvx_hwreg_v550.c
|
|
@@ -0,0 +1,82 @@
|
|
+/*
|
|
+ * The confidential and proprietary information contained in this file may
|
|
+ * only be used by a person authorised under and to the extent permitted
|
|
+ * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd.
|
|
+ * ALL RIGHTS RESERVED
|
|
+ *
|
|
+ * This entire notice must be reproduced on all copies of this file
|
|
+ * and copies of this file may only be made by a person if such person is
|
|
+ * permitted to do so under the terms of a subsisting license agreement
|
|
+ * from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * SPDX-License-Identifier: GPL-2.0-only
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU General Public License
|
|
+ * as published by the Free Software Foundation; either version 2
|
|
+ * of the License, or (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
+ *
|
|
+ */
|
|
+
|
|
+/****************************************************************************
|
|
+ * Includes
|
|
+ ****************************************************************************/
|
|
+
|
|
+#include "mvx_bitops.h"
|
|
+#include "mvx_hwreg_v550.h"
|
|
+
|
|
+/****************************************************************************
|
|
+ * Exported functions
|
|
+ ****************************************************************************/
|
|
+
|
|
+void mvx_hwreg_get_formats_v550(enum mvx_direction direction,
|
|
+ uint64_t *formats)
|
|
+{
|
|
+ if (direction == MVX_DIR_INPUT) {
|
|
+ mvx_set_bit(MVX_FORMAT_H263, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_H264, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_HEVC, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_JPEG, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_MPEG2, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_MPEG4, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_RV, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_VC1, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_VP8, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV420_I420, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV420_NV12, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV420_NV21, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV420_P010, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV420_Y0L2, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV420_AQB1, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV422_YUY2, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV422_UYVY, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV422_Y210, formats);
|
|
+ } else {
|
|
+ mvx_set_bit(MVX_FORMAT_H264, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_VP8, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV420_AFBC_8, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV420_AFBC_10, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV422_AFBC_8, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV422_AFBC_10, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV420_I420, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV420_NV12, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV420_NV21, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV420_P010, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV420_Y0L2, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV420_AQB1, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV422_YUY2, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV422_UYVY, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV422_Y210, formats);
|
|
+ }
|
|
+}
|
|
diff --git a/drivers/media/platform/spacemit/vpu_k1x/dev/mvx_hwreg_v550.h b/drivers/media/platform/spacemit/vpu_k1x/dev/mvx_hwreg_v550.h
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/vpu_k1x/dev/mvx_hwreg_v550.h
|
|
@@ -0,0 +1,48 @@
|
|
+/*
|
|
+ * The confidential and proprietary information contained in this file may
|
|
+ * only be used by a person authorised under and to the extent permitted
|
|
+ * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd.
|
|
+ * ALL RIGHTS RESERVED
|
|
+ *
|
|
+ * This entire notice must be reproduced on all copies of this file
|
|
+ * and copies of this file may only be made by a person if such person is
|
|
+ * permitted to do so under the terms of a subsisting license agreement
|
|
+ * from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * SPDX-License-Identifier: GPL-2.0-only
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU General Public License
|
|
+ * as published by the Free Software Foundation; either version 2
|
|
+ * of the License, or (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
+ *
|
|
+ */
|
|
+
|
|
+#ifndef _MVX_HWREG_V550_H_
|
|
+#define _MVX_HWREG_V550_H_
|
|
+
|
|
+/****************************************************************************
|
|
+ * Includes
|
|
+ ****************************************************************************/
|
|
+
|
|
+#include "mvx_if.h"
|
|
+
|
|
+/****************************************************************************
|
|
+ * Exported functions
|
|
+ ****************************************************************************/
|
|
+
|
|
+void mvx_hwreg_get_formats_v550(enum mvx_direction direction,
|
|
+ uint64_t *formats);
|
|
+
|
|
+#endif /* _MVX_HWREG_V550_H_ */
|
|
diff --git a/drivers/media/platform/spacemit/vpu_k1x/dev/mvx_hwreg_v61.c b/drivers/media/platform/spacemit/vpu_k1x/dev/mvx_hwreg_v61.c
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/vpu_k1x/dev/mvx_hwreg_v61.c
|
|
@@ -0,0 +1,95 @@
|
|
+/*
|
|
+ * The confidential and proprietary information contained in this file may
|
|
+ * only be used by a person authorised under and to the extent permitted
|
|
+ * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd.
|
|
+ * ALL RIGHTS RESERVED
|
|
+ *
|
|
+ * This entire notice must be reproduced on all copies of this file
|
|
+ * and copies of this file may only be made by a person if such person is
|
|
+ * permitted to do so under the terms of a subsisting license agreement
|
|
+ * from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * SPDX-License-Identifier: GPL-2.0-only
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU General Public License
|
|
+ * as published by the Free Software Foundation; either version 2
|
|
+ * of the License, or (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
+ *
|
|
+ */
|
|
+
|
|
+/****************************************************************************
|
|
+ * Includes
|
|
+ ****************************************************************************/
|
|
+
|
|
+#include <linux/io.h>
|
|
+#include "mvx_bitops.h"
|
|
+#include "mvx_hwreg_v61.h"
|
|
+
|
|
+/****************************************************************************
|
|
+ * Exported functions
|
|
+ ****************************************************************************/
|
|
+
|
|
+void mvx_hwreg_get_formats_v61(enum mvx_direction direction,
|
|
+ uint64_t *formats)
|
|
+{
|
|
+ if (direction == MVX_DIR_INPUT) {
|
|
+ mvx_set_bit(MVX_FORMAT_H263, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_H264, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_HEVC, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_JPEG, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_MPEG2, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_MPEG4, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_RV, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_VC1, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_VP8, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_VP9, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV420_AFBC_8, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV420_AFBC_10, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV422_AFBC_8, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV422_AFBC_10, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV420_I420, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV420_NV12, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV420_NV21, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV420_P010, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV420_Y0L2, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV420_AQB1, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV422_YUY2, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV422_UYVY, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV422_Y210, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_RGBA_8888, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_BGRA_8888, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_ARGB_8888, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_ABGR_8888, formats);
|
|
+ } else {
|
|
+ mvx_set_bit(MVX_FORMAT_H264, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_HEVC, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_JPEG, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_VP8, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_VP9, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV420_AFBC_8, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV420_AFBC_10, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV422_AFBC_8, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV422_AFBC_10, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV420_I420, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV420_NV12, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV420_NV21, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV420_P010, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV420_Y0L2, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV420_AQB1, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV422_YUY2, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV422_UYVY, formats);
|
|
+ mvx_set_bit(MVX_FORMAT_YUV422_Y210, formats);
|
|
+ }
|
|
+}
|
|
diff --git a/drivers/media/platform/spacemit/vpu_k1x/dev/mvx_hwreg_v61.h b/drivers/media/platform/spacemit/vpu_k1x/dev/mvx_hwreg_v61.h
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/vpu_k1x/dev/mvx_hwreg_v61.h
|
|
@@ -0,0 +1,49 @@
|
|
+/*
|
|
+ * The confidential and proprietary information contained in this file may
|
|
+ * only be used by a person authorised under and to the extent permitted
|
|
+ * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd.
|
|
+ * ALL RIGHTS RESERVED
|
|
+ *
|
|
+ * This entire notice must be reproduced on all copies of this file
|
|
+ * and copies of this file may only be made by a person if such person is
|
|
+ * permitted to do so under the terms of a subsisting license agreement
|
|
+ * from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * SPDX-License-Identifier: GPL-2.0-only
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU General Public License
|
|
+ * as published by the Free Software Foundation; either version 2
|
|
+ * of the License, or (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
+ *
|
|
+ */
|
|
+
|
|
+#ifndef _MVX_HWREG_V61_H_
|
|
+#define _MVX_HWREG_V61_H_
|
|
+
|
|
+/****************************************************************************
|
|
+ * Includes
|
|
+ ****************************************************************************/
|
|
+
|
|
+#include "mvx_hwreg.h"
|
|
+#include "mvx_if.h"
|
|
+
|
|
+/****************************************************************************
|
|
+ * Exported functions
|
|
+ ****************************************************************************/
|
|
+
|
|
+void mvx_hwreg_get_formats_v61(enum mvx_direction direction,
|
|
+ uint64_t *formats);
|
|
+
|
|
+#endif /* _MVX_HWREG_V61_H_ */
|
|
diff --git a/drivers/media/platform/spacemit/vpu_k1x/dev/mvx_lsid.c b/drivers/media/platform/spacemit/vpu_k1x/dev/mvx_lsid.c
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/vpu_k1x/dev/mvx_lsid.c
|
|
@@ -0,0 +1,342 @@
|
|
+/*
|
|
+ * The confidential and proprietary information contained in this file may
|
|
+ * only be used by a person authorised under and to the extent permitted
|
|
+ * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd.
|
|
+ * ALL RIGHTS RESERVED
|
|
+ *
|
|
+ * This entire notice must be reproduced on all copies of this file
|
|
+ * and copies of this file may only be made by a person if such person is
|
|
+ * permitted to do so under the terms of a subsisting license agreement
|
|
+ * from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * SPDX-License-Identifier: GPL-2.0-only
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU General Public License
|
|
+ * as published by the Free Software Foundation; either version 2
|
|
+ * of the License, or (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
+ *
|
|
+ */
|
|
+
|
|
+/****************************************************************************
|
|
+ * Includes
|
|
+ ****************************************************************************/
|
|
+
|
|
+#include <linux/device.h>
|
|
+#include <linux/of.h>
|
|
+#include "mvx_if.h"
|
|
+#include "mvx_hwreg.h"
|
|
+#include "mvx_lsid.h"
|
|
+#include "mvx_log_group.h"
|
|
+#include "mvx_session.h"
|
|
+
|
|
+/****************************************************************************
|
|
+ * Private functions
|
|
+ ****************************************************************************/
|
|
+
|
|
+static bool is_alloc(struct mvx_lsid *lsid)
|
|
+{
|
|
+ uint32_t alloc;
|
|
+
|
|
+ alloc = mvx_hwreg_read_lsid(lsid->hwreg, lsid->lsid,
|
|
+ MVX_HWREG_ALLOC);
|
|
+
|
|
+ return alloc != MVE_ALLOC_FREE;
|
|
+}
|
|
+
|
|
+static uint32_t get_core_lsid(uint32_t reg,
|
|
+ unsigned int core)
|
|
+{
|
|
+ return (reg >> (MVE_CORELSID_LSID_BITS * core)) &
|
|
+ MVX_CORELSID_LSID_MASK;
|
|
+}
|
|
+
|
|
+static uint32_t get_jobqueue_job(uint32_t reg,
|
|
+ unsigned int nr)
|
|
+{
|
|
+ return (reg >> (MVE_JOBQUEUE_JOB_BITS * nr)) & MVE_JOBQUEUE_JOB_MASK;
|
|
+}
|
|
+
|
|
+static uint32_t set_jobqueue_job(uint32_t reg,
|
|
+ unsigned int nr,
|
|
+ uint32_t job)
|
|
+{
|
|
+ reg &= ~(MVE_JOBQUEUE_JOB_MASK << (nr * MVE_JOBQUEUE_JOB_BITS));
|
|
+ reg |= job << (MVE_JOBQUEUE_JOB_BITS * nr);
|
|
+ return reg;
|
|
+}
|
|
+
|
|
+static uint32_t get_jobqueue_lsid(uint32_t reg,
|
|
+ unsigned int nr)
|
|
+{
|
|
+ return (reg >> (MVE_JOBQUEUE_JOB_BITS * nr + MVE_JOBQUEUE_LSID_SHIFT)) &
|
|
+ MVE_JOBQUEUE_LSID_MASK;
|
|
+}
|
|
+
|
|
+static uint32_t set_lsid_ncores(uint32_t reg,
|
|
+ unsigned int nr,
|
|
+ unsigned int lsid,
|
|
+ unsigned int ncores)
|
|
+{
|
|
+ reg &= ~(MVE_JOBQUEUE_JOB_MASK << (nr * MVE_JOBQUEUE_JOB_BITS));
|
|
+ reg |= ((lsid << MVE_JOBQUEUE_LSID_SHIFT) |
|
|
+ ((ncores - 1) << MVE_JOBQUEUE_NCORES_SHIFT)) <<
|
|
+ (nr * MVE_JOBQUEUE_JOB_BITS);
|
|
+
|
|
+ return reg;
|
|
+}
|
|
+
|
|
+/****************************************************************************
|
|
+ * Exported functions
|
|
+ ****************************************************************************/
|
|
+
|
|
+int mvx_lsid_construct(struct mvx_lsid *lsid,
|
|
+ struct device *dev,
|
|
+ struct mvx_hwreg *hwreg,
|
|
+ unsigned int id)
|
|
+{
|
|
+ lsid->dev = dev;
|
|
+ lsid->hwreg = hwreg;
|
|
+ lsid->session = NULL;
|
|
+ lsid->lsid = id;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+void mvx_lsid_destruct(struct mvx_lsid *lsid)
|
|
+{}
|
|
+
|
|
+int mvx_lsid_map(struct mvx_lsid *lsid,
|
|
+ struct mvx_lsid_pcb *pcb)
|
|
+{
|
|
+ struct mvx_hwreg *hwreg = lsid->hwreg;
|
|
+ uint32_t alloc;
|
|
+ uint32_t busattr[4];
|
|
+ int ret;
|
|
+
|
|
+ /* Check that the LSID is not already allocated. */
|
|
+ if (is_alloc(lsid)) {
|
|
+ MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_WARNING,
|
|
+ "Failed to map session to LSID. LSID already allocated. lsid=%u.",
|
|
+ lsid->lsid);
|
|
+ return -EFAULT;
|
|
+ }
|
|
+
|
|
+ /* Allocate LSID. */
|
|
+ alloc = pcb->nprot == 0 ? MVE_ALLOC_PROTECTED : MVE_ALLOC_NON_PROTECTED;
|
|
+ mvx_hwreg_write_lsid(hwreg, lsid->lsid, MVX_HWREG_ALLOC, alloc);
|
|
+
|
|
+ mvx_hwreg_write_lsid(hwreg, lsid->lsid, MVX_HWREG_TERMINATE, 1);
|
|
+ do {
|
|
+ ret = mvx_hwreg_read_lsid(hwreg, lsid->lsid,
|
|
+ MVX_HWREG_TERMINATE);
|
|
+ } while (ret != 0);
|
|
+
|
|
+ /* Configure number of cores to use and which to cores to disable. */
|
|
+ mvx_hwreg_write_lsid(hwreg, lsid->lsid, MVX_HWREG_CTRL,
|
|
+ pcb->ctrl);
|
|
+
|
|
+ /* Configure MMU L0 entry and flush MMU tables. */
|
|
+ mvx_hwreg_write_lsid(hwreg, lsid->lsid, MVX_HWREG_MMU_CTRL,
|
|
+ pcb->mmu_ctrl);
|
|
+ mvx_hwreg_write_lsid(hwreg, lsid->lsid, MVX_HWREG_FLUSH_ALL, 0);
|
|
+
|
|
+ if (of_property_read_u32_array(lsid->dev->of_node, "busattr", busattr,
|
|
+ ARRAY_SIZE(busattr))) {
|
|
+ MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_VERBOSE,
|
|
+ "busattr in of_node is not available.");
|
|
+
|
|
+ /* We apply default values in this case. */
|
|
+ busattr[0] = 0;
|
|
+ busattr[1] = 0;
|
|
+ busattr[2] = 0x33;
|
|
+ busattr[3] = 0x33;
|
|
+ } else {
|
|
+ int i;
|
|
+
|
|
+ for (i = 0; i < ARRAY_SIZE(busattr); i++)
|
|
+ MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_VERBOSE,
|
|
+ "busattr[%d] = 0x%x.", i,
|
|
+ busattr[i]);
|
|
+ }
|
|
+
|
|
+ mvx_hwreg_write_lsid(hwreg, lsid->lsid, MVX_HWREG_BUSATTR_0,
|
|
+ busattr[0]);
|
|
+ mvx_hwreg_write_lsid(hwreg, lsid->lsid, MVX_HWREG_BUSATTR_1,
|
|
+ busattr[1]);
|
|
+ mvx_hwreg_write_lsid(hwreg, lsid->lsid, MVX_HWREG_BUSATTR_2,
|
|
+ busattr[2]);
|
|
+ mvx_hwreg_write_lsid(hwreg, lsid->lsid, MVX_HWREG_BUSATTR_3,
|
|
+ busattr[3]);
|
|
+
|
|
+ /* Restore interrupt registers. */
|
|
+ mvx_hwreg_write_lsid(hwreg, lsid->lsid, MVX_HWREG_LIRQVE, 0);
|
|
+ mvx_hwreg_write_lsid(hwreg, lsid->lsid, MVX_HWREG_IRQHOST,
|
|
+ pcb->irqhost);
|
|
+
|
|
+ /*
|
|
+ * Make sure all register writes have completed before scheduling is
|
|
+ * enabled.
|
|
+ */
|
|
+ wmb();
|
|
+
|
|
+ /* Enable scheduling. */
|
|
+ mvx_hwreg_write_lsid(hwreg, lsid->lsid, MVX_HWREG_SCHED, 1);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+void mvx_lsid_unmap(struct mvx_lsid *lsid,
|
|
+ struct mvx_lsid_pcb *pcb)
|
|
+{
|
|
+ struct mvx_hwreg *hwreg = lsid->hwreg;
|
|
+
|
|
+ if (!is_alloc(lsid)) {
|
|
+ MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_WARNING,
|
|
+ "LSID was not allocated. lsid=%u.",
|
|
+ lsid->lsid);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ /* Disable scheduling. */
|
|
+ mvx_hwreg_write_lsid(hwreg, lsid->lsid, MVX_HWREG_SCHED, 0);
|
|
+
|
|
+ /* Store registers in process control block. */
|
|
+ pcb->ctrl = mvx_hwreg_read_lsid(hwreg, lsid->lsid, MVX_HWREG_CTRL);
|
|
+ pcb->mmu_ctrl = mvx_hwreg_read_lsid(hwreg, lsid->lsid,
|
|
+ MVX_HWREG_MMU_CTRL);
|
|
+ pcb->irqhost = mvx_hwreg_read_lsid(hwreg, lsid->lsid,
|
|
+ MVX_HWREG_IRQHOST);
|
|
+ pcb->nprot = mvx_hwreg_read_lsid(hwreg, lsid->lsid, MVX_HWREG_NPROT);
|
|
+
|
|
+ /* Deallocate LSID. */
|
|
+ mvx_hwreg_write_lsid(hwreg, lsid->lsid, MVX_HWREG_ALLOC,
|
|
+ MVE_ALLOC_FREE);
|
|
+}
|
|
+
|
|
+int mvx_lsid_jobqueue_add(struct mvx_lsid *lsid,
|
|
+ unsigned int ncores)
|
|
+{
|
|
+ struct mvx_hwreg *hwreg = lsid->hwreg;
|
|
+ uint32_t jobqueue;
|
|
+ int i;
|
|
+
|
|
+ /* Disable scheduling. */
|
|
+ mvx_hwreg_write(hwreg, MVX_HWREG_ENABLE, 0);
|
|
+
|
|
+ jobqueue = mvx_hwreg_read(hwreg, MVX_HWREG_JOBQUEUE);
|
|
+
|
|
+ /* Search if the LSID is already in the job queue. */
|
|
+ for (i = 0; i < MVE_JOBQUEUE_NJOBS; i++)
|
|
+ if (get_jobqueue_lsid(jobqueue, i) == lsid->lsid)
|
|
+ goto jobqueue_enable;
|
|
+
|
|
+ /* Search for a free slot in the job queue. */
|
|
+ for (i = 0; i < MVE_JOBQUEUE_NJOBS; i++)
|
|
+ if (get_jobqueue_lsid(jobqueue, i) ==
|
|
+ MVE_JOBQUEUE_JOB_INVALID) {
|
|
+ jobqueue = set_lsid_ncores(jobqueue, i, lsid->lsid,
|
|
+ ncores);
|
|
+ mvx_hwreg_write(hwreg, MVX_HWREG_JOBQUEUE, jobqueue);
|
|
+ break;
|
|
+ }
|
|
+
|
|
+jobqueue_enable:
|
|
+ /* Reenable scheduling. */
|
|
+ mvx_hwreg_write(hwreg, MVX_HWREG_ENABLE, 1);
|
|
+
|
|
+ return i < MVE_JOBQUEUE_NJOBS ? 0 : -EAGAIN;
|
|
+}
|
|
+
|
|
+void mvx_lsid_send_irq(struct mvx_lsid *lsid)
|
|
+{
|
|
+ struct mvx_hwreg *hwreg = lsid->hwreg;
|
|
+
|
|
+ mvx_hwreg_write_lsid(hwreg, lsid->lsid, MVX_HWREG_IRQHOST, 1);
|
|
+}
|
|
+
|
|
+void mvx_lsid_flush_mmu(struct mvx_lsid *lsid)
|
|
+{
|
|
+ struct mvx_hwreg *hwreg = lsid->hwreg;
|
|
+
|
|
+ mvx_hwreg_write_lsid(hwreg, lsid->lsid, MVX_HWREG_FLUSH_ALL, 0);
|
|
+}
|
|
+
|
|
+void mvx_lsid_terminate(struct mvx_lsid *lsid)
|
|
+{
|
|
+ struct mvx_hwreg *hwreg = lsid->hwreg;
|
|
+ uint32_t ret;
|
|
+
|
|
+ mvx_hwreg_write_lsid(hwreg, lsid->lsid, MVX_HWREG_TERMINATE, 1);
|
|
+
|
|
+ do {
|
|
+ ret = mvx_hwreg_read_lsid(hwreg, lsid->lsid,
|
|
+ MVX_HWREG_TERMINATE);
|
|
+ } while (ret != 0);
|
|
+}
|
|
+
|
|
+void mvx_lsid_jobqueue_remove(struct mvx_lsid *lsid)
|
|
+{
|
|
+ struct mvx_hwreg *hwreg = lsid->hwreg;
|
|
+ uint32_t jobqueue;
|
|
+ int i;
|
|
+ int j;
|
|
+ uint32_t ncores = mvx_hwreg_read(hwreg, MVX_HWREG_NCORES);
|
|
+
|
|
+ /* Disable scheduling. */
|
|
+ mvx_hwreg_write(hwreg, MVX_HWREG_ENABLE, 0);
|
|
+
|
|
+ jobqueue = mvx_hwreg_read(hwreg, MVX_HWREG_JOBQUEUE);
|
|
+
|
|
+ /* Copy job entries that do not match the LSID to be removed. */
|
|
+ for (i = 0, j = 0; i < MVE_JOBQUEUE_NJOBS; i++)
|
|
+ if (get_jobqueue_lsid(jobqueue, i) != lsid->lsid)
|
|
+ jobqueue = set_jobqueue_job(
|
|
+ jobqueue, j++, get_jobqueue_job(jobqueue, i));
|
|
+
|
|
+ /* Blank out remaining job entries. */
|
|
+ for (; j < MVE_JOBQUEUE_NJOBS; j++)
|
|
+ jobqueue = set_lsid_ncores(jobqueue, j,
|
|
+ MVE_JOBQUEUE_JOB_INVALID, ncores);
|
|
+
|
|
+ mvx_hwreg_write(hwreg, MVX_HWREG_JOBQUEUE, jobqueue);
|
|
+
|
|
+ /* Reenable scheduling. */
|
|
+ mvx_hwreg_write(hwreg, MVX_HWREG_ENABLE, 1);
|
|
+}
|
|
+
|
|
+bool mvx_lsid_idle(struct mvx_lsid *lsid)
|
|
+{
|
|
+ struct mvx_hwreg *hwreg = lsid->hwreg;
|
|
+ uint32_t jobqueue;
|
|
+ uint32_t corelsid;
|
|
+ uint32_t ncores;
|
|
+ uint32_t i;
|
|
+
|
|
+ jobqueue = mvx_hwreg_read(hwreg, MVX_HWREG_JOBQUEUE);
|
|
+ corelsid = mvx_hwreg_read(hwreg, MVX_HWREG_CORELSID);
|
|
+ ncores = mvx_hwreg_read(hwreg, MVX_HWREG_NCORES);
|
|
+
|
|
+ /* Check if LSID is found in job queue. */
|
|
+ for (i = 0; i < MVE_JOBQUEUE_NJOBS; i++)
|
|
+ if (get_jobqueue_lsid(jobqueue, i) == lsid->lsid)
|
|
+ return false;
|
|
+
|
|
+ /* Check if LSID is found in core lsid. */
|
|
+ for (i = 0; i < ncores; i++)
|
|
+ if (get_core_lsid(corelsid, i) == lsid->lsid)
|
|
+ return false;
|
|
+
|
|
+ return true;
|
|
+}
|
|
diff --git a/drivers/media/platform/spacemit/vpu_k1x/dev/mvx_lsid.h b/drivers/media/platform/spacemit/vpu_k1x/dev/mvx_lsid.h
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/vpu_k1x/dev/mvx_lsid.h
|
|
@@ -0,0 +1,166 @@
|
|
+/*
|
|
+ * The confidential and proprietary information contained in this file may
|
|
+ * only be used by a person authorised under and to the extent permitted
|
|
+ * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd.
|
|
+ * ALL RIGHTS RESERVED
|
|
+ *
|
|
+ * This entire notice must be reproduced on all copies of this file
|
|
+ * and copies of this file may only be made by a person if such person is
|
|
+ * permitted to do so under the terms of a subsisting license agreement
|
|
+ * from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * SPDX-License-Identifier: GPL-2.0-only
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU General Public License
|
|
+ * as published by the Free Software Foundation; either version 2
|
|
+ * of the License, or (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
+ *
|
|
+ */
|
|
+
|
|
+#ifndef _MVX_LSID_H_
|
|
+#define _MVX_LSID_H_
|
|
+
|
|
+/****************************************************************************
|
|
+ * Includes
|
|
+ ****************************************************************************/
|
|
+
|
|
+#include <linux/types.h>
|
|
+
|
|
+/****************************************************************************
|
|
+ * Defines
|
|
+ ****************************************************************************/
|
|
+
|
|
+#define MVX_LSID_MAX 4
|
|
+
|
|
+/****************************************************************************
|
|
+ * Types
|
|
+ ****************************************************************************/
|
|
+
|
|
+struct device;
|
|
+struct mvx_hwreg;
|
|
+struct mvx_sched_session;
|
|
+
|
|
+/**
|
|
+ * struct mvx_lsid_pcb - LSID process control block.
|
|
+ *
|
|
+ * This structure is used to store the register map when a session is unmapped
|
|
+ * from a LSID, so it can be restored again when the session is remapped.
|
|
+ */
|
|
+struct mvx_lsid_pcb {
|
|
+ uint32_t ctrl;
|
|
+ uint32_t mmu_ctrl;
|
|
+ uint32_t irqhost;
|
|
+ uint32_t nprot;
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct mvx_lsid - LSID class.
|
|
+ */
|
|
+struct mvx_lsid {
|
|
+ struct device *dev;
|
|
+ struct mvx_hwreg *hwreg;
|
|
+ struct mvx_sched_session *session;
|
|
+ unsigned int lsid;
|
|
+};
|
|
+
|
|
+/****************************************************************************
|
|
+ * Exported functions
|
|
+ ****************************************************************************/
|
|
+
|
|
+/**
|
|
+ * mvx_lsid_construct() - Construct the LSID object.
|
|
+ * @lsid: Pointer to LSID object.
|
|
+ * @dev: Pointer to device.
|
|
+ * @hwreg: Pointer to hwreg object.
|
|
+ * @id: LSID number.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+int mvx_lsid_construct(struct mvx_lsid *lsid,
|
|
+ struct device *dev,
|
|
+ struct mvx_hwreg *hwreg,
|
|
+ unsigned int id);
|
|
+
|
|
+/**
|
|
+ * mvx_lsid_destruct() - Destruct the LSID object.
|
|
+ * @lsid: Pointer to LSID object.
|
|
+ */
|
|
+void mvx_lsid_destruct(struct mvx_lsid *lsid);
|
|
+
|
|
+/**
|
|
+ * mvx_lsid_map() - Map a session to this LSID.
|
|
+ * @lsid: Pointer to LSID object.
|
|
+ * @pcb: Process control block to be restored.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+int mvx_lsid_map(struct mvx_lsid *lsid,
|
|
+ struct mvx_lsid_pcb *pcb);
|
|
+
|
|
+/**
|
|
+ * mvx_lsid_unmap() - Unmap session from LSID.
|
|
+ * @lsid: Pointer to LSID object.
|
|
+ * @pcb: Process control block where the registers are stored.
|
|
+ *
|
|
+ * A LSID must not be unmapped if it is present in the job queue or core LSID.
|
|
+ * It is the responsibility of the scheduler to guarantee that the LSID is idle
|
|
+ * before it is unmapped.
|
|
+ */
|
|
+void mvx_lsid_unmap(struct mvx_lsid *lsid,
|
|
+ struct mvx_lsid_pcb *pcb);
|
|
+
|
|
+/**
|
|
+ * mvx_lsid_jobqueue_add() - Add LSID to job queue.
|
|
+ * @lsid: Pointer to LSID object.
|
|
+ * @ncores: Number of cores to request.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+int mvx_lsid_jobqueue_add(struct mvx_lsid *lsid,
|
|
+ unsigned int ncores);
|
|
+
|
|
+/**
|
|
+ * mvx_lsid_send_irq() - Send IRQ to firmware.
|
|
+ * @lsid: Pointer to LSID object.
|
|
+ */
|
|
+void mvx_lsid_send_irq(struct mvx_lsid *lsid);
|
|
+
|
|
+/**
|
|
+ * mvx_lsid_flush_mmu() - Flush MMU tables.
|
|
+ * @lsid: Pointer to LSID object.
|
|
+ */
|
|
+void mvx_lsid_flush_mmu(struct mvx_lsid *lsid);
|
|
+
|
|
+/**
|
|
+ * mvx_lsid_terminate() - Terminate the LSID.
|
|
+ * @lsid: Pointer to LSID object.
|
|
+ */
|
|
+void mvx_lsid_terminate(struct mvx_lsid *lsid);
|
|
+
|
|
+/**
|
|
+ * mvx_lsid_jobqueue_remove() - Remove LSID from job queue.
|
|
+ * @lsid: Pointer to LSID object.
|
|
+ */
|
|
+void mvx_lsid_jobqueue_remove(struct mvx_lsid *lsid);
|
|
+
|
|
+/**
|
|
+ * mvx_lsid_idle() - Check if LSID is idle.
|
|
+ * @lsid: Pointer to LSID object.
|
|
+ *
|
|
+ * Return: true if LSID is idle, else false.
|
|
+ */
|
|
+bool mvx_lsid_idle(struct mvx_lsid *lsid);
|
|
+
|
|
+#endif /* _MVX_LSID_H_ */
|
|
diff --git a/drivers/media/platform/spacemit/vpu_k1x/dev/mvx_scheduler.c b/drivers/media/platform/spacemit/vpu_k1x/dev/mvx_scheduler.c
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/vpu_k1x/dev/mvx_scheduler.c
|
|
@@ -0,0 +1,944 @@
|
|
+/*
|
|
+ * The confidential and proprietary information contained in this file may
|
|
+ * only be used by a person authorised under and to the extent permitted
|
|
+ * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd.
|
|
+ * ALL RIGHTS RESERVED
|
|
+ *
|
|
+ * This entire notice must be reproduced on all copies of this file
|
|
+ * and copies of this file may only be made by a person if such person is
|
|
+ * permitted to do so under the terms of a subsisting license agreement
|
|
+ * from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * SPDX-License-Identifier: GPL-2.0-only
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU General Public License
|
|
+ * as published by the Free Software Foundation; either version 2
|
|
+ * of the License, or (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
+ *
|
|
+ */
|
|
+
|
|
+/****************************************************************************
|
|
+ * Includes
|
|
+ ****************************************************************************/
|
|
+
|
|
+#include <linux/errno.h>
|
|
+#include <linux/debugfs.h>
|
|
+#include <linux/device.h>
|
|
+#include <linux/workqueue.h>
|
|
+#include <linux/delay.h>
|
|
+#include "mvx_if.h"
|
|
+#include "mvx_hwreg.h"
|
|
+#include "mvx_mmu.h"
|
|
+#include "mvx_scheduler.h"
|
|
+#include "mvx_session.h"
|
|
+#include "mvx_seq.h"
|
|
+#include "mvx_pm_runtime.h"
|
|
+#include "mvx_log_group.h"
|
|
+#include "mvx_dvfs.h"
|
|
+
|
|
+/****************************************************************************
|
|
+ * Static functions
|
|
+ ****************************************************************************/
|
|
+
|
|
+static struct mvx_lsid *find_free_lsid(struct mvx_sched *sched)
|
|
+{
|
|
+ unsigned int i;
|
|
+
|
|
+ for (i = 0; i < sched->nlsid; i++)
|
|
+ if (sched->lsid[i].session == NULL)
|
|
+ return &sched->lsid[i];
|
|
+
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+static struct mvx_lsid *find_idle_lsid(struct mvx_sched *sched)
|
|
+{
|
|
+ unsigned int i;
|
|
+
|
|
+ for (i = 0; i < sched->nlsid; i++) {
|
|
+ bool idle;
|
|
+
|
|
+ idle = mvx_lsid_idle(&sched->lsid[i]);
|
|
+ if (idle != false)
|
|
+ return &sched->lsid[i];
|
|
+ }
|
|
+
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+static int map_session(struct mvx_sched *sched,
|
|
+ struct mvx_sched_session *session,
|
|
+ struct mvx_lsid *lsid)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_INFO,
|
|
+ "%p Map LSID. lsid=%u, jobqueue=%08x, corelsid=%08x.",
|
|
+ mvx_if_session_to_session(session->isession),
|
|
+ lsid->lsid,
|
|
+ mvx_hwreg_read(sched->hwreg, MVX_HWREG_JOBQUEUE),
|
|
+ mvx_hwreg_read(sched->hwreg, MVX_HWREG_CORELSID));
|
|
+
|
|
+ ret = mvx_lsid_map(lsid, &session->pcb);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+
|
|
+ session->lsid = lsid;
|
|
+ lsid->session = session;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void unmap_session(struct mvx_sched *sched,
|
|
+ struct mvx_sched_session *session)
|
|
+{
|
|
+ struct mvx_lsid *lsid = session->lsid;
|
|
+
|
|
+ if (lsid == NULL)
|
|
+ return;
|
|
+
|
|
+ MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_INFO,
|
|
+ "%p Unmap LSID. lsid=%u, jobqueue=%08x, corelsid=%08x.",
|
|
+ mvx_if_session_to_session(session->isession),
|
|
+ lsid->lsid,
|
|
+ mvx_hwreg_read(sched->hwreg, MVX_HWREG_JOBQUEUE),
|
|
+ mvx_hwreg_read(sched->hwreg, MVX_HWREG_CORELSID));
|
|
+
|
|
+ mvx_lsid_unmap(lsid, &session->pcb);
|
|
+ session->lsid = NULL;
|
|
+ lsid->session = NULL;
|
|
+}
|
|
+
|
|
+static struct list_head *list_find_node(struct list_head *list,
|
|
+ struct list_head *node)
|
|
+{
|
|
+ struct list_head *i;
|
|
+
|
|
+ list_for_each(i, list) {
|
|
+ if (i == node)
|
|
+ return i;
|
|
+ }
|
|
+
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * pending list is only updated when sched is locked.
|
|
+ * a session can only be added once
|
|
+ *
|
|
+ * notify_list = []
|
|
+ * lock_sched
|
|
+ * for pending in pending_list:
|
|
+ * if is_mapped(pending):
|
|
+ * jobqueue.add(pending)
|
|
+ * pending_list.remove(pending)
|
|
+ * continue
|
|
+ *
|
|
+ * l = free_lsid
|
|
+ * if l is Nul:
|
|
+ * l = idle_lsid
|
|
+ * if l is Nul:
|
|
+ * break
|
|
+ * if is_mapped(l):
|
|
+ * s = session[l]
|
|
+ * unmap(s)
|
|
+ * notify_list.add(s)
|
|
+ *
|
|
+ * map(pending)
|
|
+ * jobqueue.add(pending)
|
|
+ * pending_list.remove(pending)
|
|
+ * unlock_sched
|
|
+ *
|
|
+ * for s in notify_list:
|
|
+ * session_notify(s)
|
|
+ * notify_list.remove(s)
|
|
+ */
|
|
+static void sched_task(struct work_struct *ws)
|
|
+{
|
|
+ struct mvx_sched *sched =
|
|
+ container_of(ws, struct mvx_sched, sched_task);
|
|
+ struct mvx_sched_session *pending;
|
|
+ struct mvx_sched_session *unmapped;
|
|
+ struct mvx_sched_session *tmp;
|
|
+ struct mvx_session *m_session;
|
|
+ LIST_HEAD(notify_list);
|
|
+ int ret;
|
|
+
|
|
+ mvx_pm_runtime_get_sync(sched->dev);
|
|
+ ret = mutex_lock_interruptible(&sched->mutex);
|
|
+ if (ret != 0) {
|
|
+ mvx_pm_runtime_put_sync(sched->dev);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Try to map sessions from pending queue while possible.
|
|
+ */
|
|
+ list_for_each_entry_safe(pending, tmp, &sched->pending, pending) {
|
|
+ struct mvx_lsid *lsid;
|
|
+
|
|
+ /*
|
|
+ * This session is already mapped to LSID.
|
|
+ * Just make sure it is scheduled.
|
|
+ */
|
|
+ if (pending->lsid != NULL) {
|
|
+ m_session = mvx_if_session_to_session(pending->isession);
|
|
+ if (sched->is_suspend == true && m_session != NULL && m_session->switched_in == false) {
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ ret = mvx_lsid_jobqueue_add(pending->lsid,
|
|
+ pending->isession->ncores);
|
|
+ if (ret != 0) {
|
|
+ MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_WARNING,
|
|
+ "Cannot add pending session to job queue. csession=%p, mvx_session=%p",
|
|
+ pending,
|
|
+ mvx_if_session_to_session(
|
|
+ pending->isession));
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ pending->in_pending = false;
|
|
+ list_del(&pending->pending);
|
|
+ continue;
|
|
+ } else if (sched->is_suspend == true) {
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ /* Find LSID to be used for the pending session. */
|
|
+ lsid = find_free_lsid(sched);
|
|
+ if (lsid == NULL)
|
|
+ lsid = find_idle_lsid(sched);
|
|
+
|
|
+ if (lsid == NULL)
|
|
+ break;
|
|
+
|
|
+ /*
|
|
+ * This LSID was mapped to some session. We have to notify
|
|
+ * the session about an irq in case there are messages in
|
|
+ * a message queue.
|
|
+ *
|
|
+ * Notifications are done after pending list is processed.
|
|
+ */
|
|
+ if (lsid->session != NULL) {
|
|
+ struct mvx_sched_session *unmapped = lsid->session;
|
|
+
|
|
+ unmap_session(sched, unmapped);
|
|
+
|
|
+ /*
|
|
+ * If the reference count is 0, then the session is
|
|
+ * about to be removed and should be ignored.
|
|
+ */
|
|
+ ret = kref_get_unless_zero(&unmapped->isession->kref);
|
|
+ if (ret != 0) {
|
|
+ if (list_find_node(¬ify_list,
|
|
+ &unmapped->notify))
|
|
+ /*
|
|
+ * Consider a situation when a session
|
|
+ * that was unmapped from LSID and added
|
|
+ * notify_list was also present in the
|
|
+ * pending_list. It is possible that
|
|
+ * such a session will be mapped to the
|
|
+ * new LSID, executed by the hardware
|
|
+ * and switched to idle state while
|
|
+ * this function is still looping
|
|
+ * through pending list.
|
|
+ *
|
|
+ * If it happens, then this session
|
|
+ * might be unmapped again in order to
|
|
+ * make a room for another pending
|
|
+ * session. As a result we will try to
|
|
+ * add this session to notify_list
|
|
+ * again. This will break notify list
|
|
+ * and could lead to crashes or hangs.
|
|
+ *
|
|
+ * However, it is safe just to skip
|
|
+ * adding the session to notify_list if
|
|
+ * it is already there, because it will
|
|
+ * be processed anyway.
|
|
+ */
|
|
+ kref_put(&unmapped->isession->kref,
|
|
+ unmapped->isession->release);
|
|
+ else
|
|
+ list_add_tail(&unmapped->notify,
|
|
+ ¬ify_list);
|
|
+ } else {
|
|
+ MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_INFO,
|
|
+ "Ref is zero. csession=%p",
|
|
+ unmapped);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ ret = map_session(sched, pending, lsid);
|
|
+ if (ret != 0) {
|
|
+ MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_WARNING,
|
|
+ "Cannot map pending session. csession=%p, mvx_session=%p",
|
|
+ pending,
|
|
+ mvx_if_session_to_session(
|
|
+ pending->isession));
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ ret = mvx_lsid_jobqueue_add(lsid, pending->isession->ncores);
|
|
+ if (ret != 0) {
|
|
+ MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_WARNING,
|
|
+ "Cannot add pending session to job queue. csession=%p, mvx_session=%p",
|
|
+ pending,
|
|
+ mvx_if_session_to_session(
|
|
+ pending->isession));
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ pending->in_pending = false;
|
|
+ list_del(&pending->pending);
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * It is important that the scheduler mutex is released before the
|
|
+ * callbacks to the if-module are invoked. The if-module may issue
|
|
+ * requests to the dev-module (for example switch_in()) that would
|
|
+ * otherwise deadlock.
|
|
+ */
|
|
+ mutex_unlock(&sched->mutex);
|
|
+
|
|
+ list_for_each_entry_safe(unmapped, tmp, ¬ify_list, notify) {
|
|
+ struct mvx_if_session *iunmapped = unmapped->isession;
|
|
+
|
|
+ list_del(&unmapped->notify);
|
|
+
|
|
+ mutex_lock(iunmapped->mutex);
|
|
+ sched->if_ops->irq(iunmapped);
|
|
+ ret = kref_put(&iunmapped->kref, iunmapped->release);
|
|
+ if (ret == 0)
|
|
+ mutex_unlock(iunmapped->mutex);
|
|
+ }
|
|
+
|
|
+ mvx_pm_runtime_put_sync(sched->dev);
|
|
+}
|
|
+
|
|
+static void sched_session_print(struct seq_file *s,
|
|
+ struct mvx_sched_session *session,
|
|
+ struct mvx_hwreg *hwreg,
|
|
+ int ind)
|
|
+{
|
|
+ struct mvx_lsid *lsid;
|
|
+
|
|
+ if (session == NULL)
|
|
+ return;
|
|
+
|
|
+ mvx_seq_printf(s, "Client session", ind, "%p\n", session->isession);
|
|
+ mvx_seq_printf(s, "Dev session", ind, "%p\n", session);
|
|
+ mvx_seq_printf(s, "MVX session", ind, "%p\n",
|
|
+ mvx_if_session_to_session(session->isession));
|
|
+
|
|
+ lsid = session->lsid;
|
|
+ if (lsid == NULL)
|
|
+ return;
|
|
+
|
|
+ mvx_seq_printf(s, "IRQ host", ind, "%d\n",
|
|
+ mvx_hwreg_read_lsid(hwreg, lsid->lsid,
|
|
+ MVX_HWREG_IRQHOST));
|
|
+ mvx_seq_printf(s, "IRQ MVE", ind, "%d\n",
|
|
+ mvx_hwreg_read_lsid(hwreg, lsid->lsid,
|
|
+ MVX_HWREG_LIRQVE));
|
|
+}
|
|
+
|
|
+static int sched_show(struct seq_file *s,
|
|
+ void *v)
|
|
+{
|
|
+ struct mvx_sched *sched = (struct mvx_sched *)s->private;
|
|
+ struct mvx_hwreg *hwreg = sched->hwreg;
|
|
+ struct mvx_sched_session *session;
|
|
+ int i;
|
|
+ int ret;
|
|
+
|
|
+ ret = mvx_pm_runtime_get_sync(hwreg->dev);
|
|
+ if (ret < 0)
|
|
+ return 0;
|
|
+
|
|
+ ret = mutex_lock_interruptible(&sched->mutex);
|
|
+ if (ret != 0) {
|
|
+ mvx_pm_runtime_put_sync(hwreg->dev);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ mvx_seq_printf(s, "Core LSID", 0, "%08x\n",
|
|
+ mvx_hwreg_read(hwreg, MVX_HWREG_CORELSID));
|
|
+ mvx_seq_printf(s, "Job queue", 0, "%08x\n",
|
|
+ mvx_hwreg_read(hwreg, MVX_HWREG_JOBQUEUE));
|
|
+ seq_puts(s, "\n");
|
|
+
|
|
+ seq_puts(s, "scheduled:\n");
|
|
+ for (i = 0; i < sched->nlsid; ++i) {
|
|
+ mvx_seq_printf(s, "LSID", 1, "%d\n", i);
|
|
+ session = sched->lsid[i].session;
|
|
+ sched_session_print(s, session, hwreg, 2);
|
|
+ }
|
|
+
|
|
+ seq_puts(s, "pending:\n");
|
|
+ i = 0;
|
|
+ list_for_each_entry(session, &sched->pending, pending) {
|
|
+ char tmp[10];
|
|
+
|
|
+ scnprintf(tmp, sizeof(tmp), "%d", i++);
|
|
+ mvx_seq_printf(s, tmp, 1, "\n");
|
|
+ sched_session_print(s, session, hwreg, 2);
|
|
+ }
|
|
+
|
|
+ mutex_unlock(&sched->mutex);
|
|
+ mvx_pm_runtime_put_sync(hwreg->dev);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int sched_open(struct inode *inode,
|
|
+ struct file *file)
|
|
+{
|
|
+ return single_open(file, sched_show, inode->i_private);
|
|
+}
|
|
+
|
|
+static const struct file_operations sched_fops = {
|
|
+ .open = sched_open,
|
|
+ .read = seq_read,
|
|
+ .llseek = seq_lseek,
|
|
+ .release = single_release
|
|
+};
|
|
+
|
|
+int sched_debugfs_init(struct mvx_sched *sched,
|
|
+ struct dentry *parent)
|
|
+{
|
|
+ struct dentry *dentry;
|
|
+
|
|
+ dentry = debugfs_create_file("sched", 0400, parent, sched,
|
|
+ &sched_fops);
|
|
+ if (IS_ERR_OR_NULL(dentry))
|
|
+ return -ENOMEM;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/****************************************************************************
|
|
+ * Exported functions
|
|
+ ****************************************************************************/
|
|
+
|
|
+int mvx_sched_construct(struct mvx_sched *sched,
|
|
+ struct device *dev,
|
|
+ struct mvx_if_ops *if_ops,
|
|
+ struct mvx_hwreg *hwreg,
|
|
+ struct dentry *parent)
|
|
+{
|
|
+ unsigned int lsid;
|
|
+ int ret;
|
|
+
|
|
+ sched->dev = dev;
|
|
+ sched->hwreg = hwreg;
|
|
+ sched->if_ops = if_ops;
|
|
+ sched->is_suspend = false;
|
|
+ mutex_init(&sched->mutex);
|
|
+ INIT_LIST_HEAD(&sched->pending);
|
|
+ INIT_WORK(&sched->sched_task, sched_task);
|
|
+ sched->sched_queue = create_singlethread_workqueue("mvx_sched");
|
|
+ if (!sched->sched_queue) {
|
|
+ MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_WARNING,
|
|
+ "Cannot create work queue");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ sched->nlsid = mvx_hwreg_read(hwreg, MVX_HWREG_NLSID);
|
|
+
|
|
+ for (lsid = 0; lsid < sched->nlsid; lsid++) {
|
|
+ ret = mvx_lsid_construct(&sched->lsid[lsid], dev, hwreg, lsid);
|
|
+ if (ret != 0)
|
|
+ goto destruct_lsid;
|
|
+ }
|
|
+
|
|
+ if (IS_ENABLED(CONFIG_DEBUG_FS)) {
|
|
+ ret = sched_debugfs_init(sched, parent);
|
|
+ if (ret != 0)
|
|
+ goto destruct_lsid;
|
|
+ }
|
|
+
|
|
+ mvx_hwreg_write(hwreg, MVX_HWREG_RESET, 1);
|
|
+ mvx_hwreg_write(hwreg, MVX_HWREG_CLKFORCE, 0);
|
|
+
|
|
+ return 0;
|
|
+
|
|
+destruct_lsid:
|
|
+ while (lsid-- > 0)
|
|
+ mvx_lsid_destruct(&sched->lsid[lsid]);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+void mvx_sched_destruct(struct mvx_sched *sched)
|
|
+{
|
|
+ destroy_workqueue(sched->sched_queue);
|
|
+
|
|
+ while (sched->nlsid-- > 0)
|
|
+ mvx_lsid_destruct(&sched->lsid[sched->nlsid]);
|
|
+}
|
|
+
|
|
+int mvx_sched_session_construct(struct mvx_sched_session *session,
|
|
+ struct mvx_if_session *isession)
|
|
+{
|
|
+ uint32_t disallow;
|
|
+ uint32_t maxcores;
|
|
+
|
|
+ session->isession = isession;
|
|
+ INIT_LIST_HEAD(&session->pending);
|
|
+ INIT_LIST_HEAD(&session->notify);
|
|
+ session->lsid = NULL;
|
|
+ session->in_pending = false;
|
|
+
|
|
+ memset(&session->pcb, 0, sizeof(session->pcb));
|
|
+
|
|
+ disallow = (0xffffffff << isession->ncores) & MVE_CTRL_DISALLOW_MASK;
|
|
+ maxcores = isession->ncores & MVE_CTRL_MAXCORES_MASK;
|
|
+ session->pcb.ctrl = (disallow << MVE_CTRL_DISALLOW_SHIFT) |
|
|
+ (maxcores << MVE_CTRL_MAXCORES_SHIFT);
|
|
+
|
|
+ session->pcb.mmu_ctrl = isession->l0_pte;
|
|
+ session->pcb.nprot = isession->securevideo == false;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+void mvx_sched_session_destruct(struct mvx_sched_session *session)
|
|
+{}
|
|
+
|
|
+int mvx_sched_switch_in(struct mvx_sched *sched,
|
|
+ struct mvx_sched_session *session)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_INFO,
|
|
+ "%p Switch in session. jobqueue=%08x, coreslid=%08x.",
|
|
+ mvx_if_session_to_session(session->isession),
|
|
+ mvx_hwreg_read(sched->hwreg, MVX_HWREG_JOBQUEUE),
|
|
+ mvx_hwreg_read(sched->hwreg, MVX_HWREG_CORELSID));
|
|
+
|
|
+ ret = mutex_lock_interruptible(&sched->mutex);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+
|
|
+ if (session->in_pending) {
|
|
+ MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_VERBOSE,
|
|
+ "Session is already in pending state.");
|
|
+ goto unlock_mutex;
|
|
+ }
|
|
+
|
|
+ session->in_pending = true;
|
|
+ list_add_tail(&session->pending, &sched->pending);
|
|
+ queue_work(sched->sched_queue, &sched->sched_task);
|
|
+
|
|
+unlock_mutex:
|
|
+ mutex_unlock(&sched->mutex);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int mvx_sched_send_irq(struct mvx_sched *sched,
|
|
+ struct mvx_sched_session *session)
|
|
+{
|
|
+ mutex_lock(&sched->mutex);
|
|
+
|
|
+ MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_DEBUG,
|
|
+ "%p Send irq. lsid=%d, jobqueue=%08x, corelsid=%08x.",
|
|
+ mvx_if_session_to_session(session->isession),
|
|
+ session->lsid == NULL ? -1 : session->lsid->lsid,
|
|
+ mvx_hwreg_read(sched->hwreg, MVX_HWREG_JOBQUEUE),
|
|
+ mvx_hwreg_read(sched->hwreg, MVX_HWREG_CORELSID));
|
|
+
|
|
+ if (session->lsid == NULL)
|
|
+ session->pcb.irqhost = 1;
|
|
+ else
|
|
+ mvx_lsid_send_irq(session->lsid);
|
|
+
|
|
+ mutex_unlock(&sched->mutex);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int mvx_sched_flush_mmu(struct mvx_sched *sched,
|
|
+ struct mvx_sched_session *session)
|
|
+{
|
|
+ mutex_lock(&sched->mutex);
|
|
+
|
|
+ if (session->lsid != NULL)
|
|
+ mvx_lsid_flush_mmu(session->lsid);
|
|
+
|
|
+ mutex_unlock(&sched->mutex);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void print_session(struct mvx_sched *sched,
|
|
+ struct mvx_sched_session *session,
|
|
+ struct mvx_session *s)
|
|
+{
|
|
+ int lsid = -1;
|
|
+ uint32_t irqve = 0;
|
|
+ uint32_t irqhost = 0;
|
|
+
|
|
+ if (session != NULL && session->lsid != NULL) {
|
|
+ struct mvx_hwreg *hwreg = sched->hwreg;
|
|
+
|
|
+ lsid = session->lsid->lsid;
|
|
+ irqve = mvx_hwreg_read_lsid(hwreg, lsid, MVX_HWREG_LIRQVE);
|
|
+ irqhost = mvx_hwreg_read_lsid(hwreg, lsid, MVX_HWREG_IRQHOST);
|
|
+ }
|
|
+
|
|
+ MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_WARNING,
|
|
+ "%p session=%p, lsid=%d, irqve=%08x, irqhost=%08x",
|
|
+ s, mvx_if_session_to_session(session->isession), lsid,
|
|
+ irqve, irqhost);
|
|
+}
|
|
+
|
|
+void mvx_sched_wait_session_idle(struct mvx_sched *sched,
|
|
+ struct mvx_sched_session *session)
|
|
+{
|
|
+ bool is_idle = 0;
|
|
+ int wait_count = 20;
|
|
+
|
|
+ mutex_lock(&sched->mutex);
|
|
+
|
|
+ if (session->lsid != NULL) {
|
|
+ is_idle = mvx_lsid_idle(session->lsid);
|
|
+ if (is_idle == 0) {
|
|
+ do {
|
|
+ mutex_unlock(&sched->mutex);
|
|
+ msleep(50);
|
|
+ mutex_lock(&sched->mutex);
|
|
+ if (session->lsid != NULL) {
|
|
+ is_idle = mvx_lsid_idle(session->lsid);
|
|
+ } else {
|
|
+ is_idle = 1;
|
|
+ }
|
|
+ } while(wait_count-- && is_idle != 1);
|
|
+ }
|
|
+
|
|
+ if (is_idle == 0) {
|
|
+ mvx_lsid_terminate(session->lsid);
|
|
+ }
|
|
+ }
|
|
+ mutex_unlock(&sched->mutex);
|
|
+}
|
|
+
|
|
+void mvx_sched_print_debug(struct mvx_sched *sched,
|
|
+ struct mvx_sched_session *session)
|
|
+{
|
|
+ struct mvx_hwreg *hwreg = sched->hwreg;
|
|
+ struct mvx_sched_session *pending;
|
|
+ struct mvx_sched_session *tmp;
|
|
+ struct mvx_session *s = mvx_if_session_to_session(session->isession);
|
|
+ unsigned int i;
|
|
+ int ret;
|
|
+
|
|
+ mvx_pm_runtime_get_sync(sched->dev);
|
|
+
|
|
+ ret = mutex_lock_interruptible(&sched->mutex);
|
|
+ if (ret != 0) {
|
|
+ mvx_pm_runtime_put_sync(sched->dev);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_WARNING, "%p Current session:", s);
|
|
+ print_session(sched, session, s);
|
|
+
|
|
+ MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_WARNING, "%p Pending queue:", s);
|
|
+ list_for_each_entry_safe(pending, tmp, &sched->pending, pending) {
|
|
+ print_session(sched, pending, s);
|
|
+ }
|
|
+
|
|
+ MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_WARNING, "%p Print register:", s);
|
|
+
|
|
+ MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_WARNING,
|
|
+ "%p jobqueue=%08x, corelsid=%08x, irqve=%08x,HARDWARE_ID=0x%08x,ENABLE=0x%08x,NCORES=0x%08x,NLSID=0x%08x,CLKFORCE=0x%08x,PROTCTRL=0x%08x,RESET=0x%08x,FUSE=0x%08x",
|
|
+ s,
|
|
+ mvx_hwreg_read(hwreg, MVX_HWREG_JOBQUEUE),
|
|
+ mvx_hwreg_read(hwreg, MVX_HWREG_CORELSID),
|
|
+ mvx_hwreg_read(hwreg, MVX_HWREG_IRQVE),
|
|
+ mvx_hwreg_read(hwreg, MVX_HWREG_HARDWARE_ID),
|
|
+ mvx_hwreg_read(hwreg, MVX_HWREG_ENABLE),
|
|
+ mvx_hwreg_read(hwreg, MVX_HWREG_NCORES),
|
|
+ mvx_hwreg_read(hwreg, MVX_HWREG_NLSID),
|
|
+ mvx_hwreg_read(hwreg, MVX_HWREG_CLKFORCE),
|
|
+ mvx_hwreg_read(hwreg, MVX_HWREG_PROTCTRL),
|
|
+ mvx_hwreg_read(hwreg, MVX_HWREG_RESET),
|
|
+ mvx_hwreg_read(hwreg, MVX_HWREG_FUSE));
|
|
+
|
|
+ for (i = 0; i < sched->nlsid; i++) {
|
|
+ struct mvx_sched_session *ss = sched->lsid[i].session;
|
|
+ struct mvx_session *ls = NULL;
|
|
+
|
|
+ if (ss != NULL)
|
|
+ ls = mvx_if_session_to_session(ss->isession);
|
|
+
|
|
+ MVX_LOG_PRINT(
|
|
+ &mvx_log_dev, MVX_LOG_WARNING,
|
|
+ "%p lsid=%u, session=%p, irqve=%08x, irqhost=%08x,CTRL=0x%08x,MMU_CTRL=0x%08x,NPROT=0x%08x,ALLOC=0x%08x,FLUSH_ALL=0x%08x,SCHED=0x%08x,TERMINATE=0x%08x,INTSIG=0x%08x,STREAMID=0x%08x",
|
|
+ s, i, ls,
|
|
+ mvx_hwreg_read_lsid(hwreg, i, MVX_HWREG_LIRQVE),
|
|
+ mvx_hwreg_read_lsid(hwreg, i, MVX_HWREG_IRQHOST),
|
|
+ mvx_hwreg_read_lsid(hwreg, i, MVX_HWREG_CTRL),
|
|
+ mvx_hwreg_read_lsid(hwreg, i, MVX_HWREG_MMU_CTRL),
|
|
+ mvx_hwreg_read_lsid(hwreg, i, MVX_HWREG_NPROT),
|
|
+ mvx_hwreg_read_lsid(hwreg, i, MVX_HWREG_ALLOC),
|
|
+ mvx_hwreg_read_lsid(hwreg, i, MVX_HWREG_FLUSH_ALL),
|
|
+ mvx_hwreg_read_lsid(hwreg, i, MVX_HWREG_SCHED),
|
|
+ mvx_hwreg_read_lsid(hwreg, i, MVX_HWREG_TERMINATE),
|
|
+ mvx_hwreg_read_lsid(hwreg, i, MVX_HWREG_INTSIG),
|
|
+ mvx_hwreg_read_lsid(hwreg, i, MVX_HWREG_STREAMID));
|
|
+ }
|
|
+
|
|
+ mutex_unlock(&sched->mutex);
|
|
+
|
|
+ mvx_pm_runtime_put_sync(sched->dev);
|
|
+}
|
|
+
|
|
+void mvx_sched_handle_irq(struct mvx_sched *sched,
|
|
+ unsigned int lsid)
|
|
+{
|
|
+ struct mvx_sched_session *session;
|
|
+ struct mvx_if_session *isession = NULL;
|
|
+ int ret;
|
|
+
|
|
+ ret = mutex_lock_interruptible(&sched->mutex);
|
|
+ if (ret != 0)
|
|
+ return;
|
|
+
|
|
+ /*
|
|
+ * If a session has been terminated/unmapped just before the IRQ bottom
|
|
+ * handler has been executed, then the session pointer will be NULL or
|
|
+ * may even point at a different session. This is an unharmful
|
|
+ * situation.
|
|
+ *
|
|
+ * If the reference count is 0, then the session is about to be removed
|
|
+ * and should be ignored.
|
|
+ */
|
|
+ session = sched->lsid[lsid].session;
|
|
+ if (session != NULL) {
|
|
+ ret = kref_get_unless_zero(&session->isession->kref);
|
|
+ if (ret != 0)
|
|
+ isession = session->isession;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * It is important that the scheduler mutex is released before the
|
|
+ * callbacks to the if-module are invoked. The if-module may issue
|
|
+ * requests to the dev-module (for example switch_in()) that would
|
|
+ * otherwise deadlock.
|
|
+ */
|
|
+ mutex_unlock(&sched->mutex);
|
|
+
|
|
+ /* Inform if-session that an IRQ was received. */
|
|
+ if (isession != NULL) {
|
|
+ mutex_lock(isession->mutex);
|
|
+ sched->if_ops->irq(isession);
|
|
+ ret = kref_put(&isession->kref, isession->release);
|
|
+
|
|
+ if (ret == 0)
|
|
+ mutex_unlock(isession->mutex);
|
|
+ }
|
|
+
|
|
+ ret = mutex_lock_interruptible(&sched->mutex);
|
|
+ if (ret != 0)
|
|
+ return;
|
|
+
|
|
+ if (!list_empty(&sched->pending)) {
|
|
+ queue_work(sched->sched_queue, &sched->sched_task);
|
|
+ }
|
|
+ mutex_unlock(&sched->mutex);
|
|
+}
|
|
+
|
|
+void mvx_sched_terminate(struct mvx_sched *sched,
|
|
+ struct mvx_sched_session *session)
|
|
+{
|
|
+ struct list_head *head;
|
|
+ struct list_head *tmp;
|
|
+
|
|
+ mutex_lock(&sched->mutex);
|
|
+
|
|
+ if (session->lsid != NULL) {
|
|
+ mvx_lsid_jobqueue_remove(session->lsid);
|
|
+ mvx_lsid_terminate(session->lsid);
|
|
+ unmap_session(sched, session);
|
|
+ }
|
|
+
|
|
+ list_for_each_safe(head, tmp, &sched->pending) {
|
|
+ if (head == &session->pending) {
|
|
+ list_del(head);
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ mutex_unlock(&sched->mutex);
|
|
+}
|
|
+
|
|
+static void suspend_session(struct mvx_sched *sched,
|
|
+ struct mvx_sched_session *session)
|
|
+{
|
|
+ struct mvx_lsid *lsid = session->lsid;
|
|
+
|
|
+ if (lsid == NULL)
|
|
+ return;
|
|
+
|
|
+ MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_WARNING,
|
|
+ "%p suspend_session LSID. lsid=%u, jobqueue=%08x, corelsid=%08x.",
|
|
+ mvx_if_session_to_session(session->isession),
|
|
+ lsid->lsid,
|
|
+ mvx_hwreg_read(sched->hwreg, MVX_HWREG_JOBQUEUE),
|
|
+ mvx_hwreg_read(sched->hwreg, MVX_HWREG_CORELSID));
|
|
+
|
|
+ mvx_lsid_unmap(lsid, &session->pcb);
|
|
+}
|
|
+
|
|
+static int resume_session(struct mvx_sched *sched,
|
|
+ struct mvx_sched_session *session,
|
|
+ struct mvx_lsid *lsid)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_WARNING,
|
|
+ "%p resume_session LSID. lsid=%u, jobqueue=%08x, corelsid=%08x.",
|
|
+ mvx_if_session_to_session(session->isession),
|
|
+ lsid->lsid,
|
|
+ mvx_hwreg_read(sched->hwreg, MVX_HWREG_JOBQUEUE),
|
|
+ mvx_hwreg_read(sched->hwreg, MVX_HWREG_CORELSID));
|
|
+
|
|
+ ret = mvx_lsid_map(lsid, &session->pcb);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void switch_out_session(struct mvx_sched *sched, unsigned int lsid_id)
|
|
+{
|
|
+ int i;
|
|
+ int ret;
|
|
+ struct mvx_session *session = NULL;
|
|
+ struct mvx_if_session *isession = NULL;
|
|
+ struct mvx_sched_session *sched_session = sched->lsid[lsid_id].session;
|
|
+ int wait_count = 20;
|
|
+
|
|
+ if (sched_session != NULL) {
|
|
+ ret = kref_get_unless_zero(&sched_session->isession->kref);
|
|
+ if (ret != 0)
|
|
+ isession = sched_session->isession;
|
|
+ }
|
|
+
|
|
+ mutex_unlock(&sched->mutex);
|
|
+
|
|
+ if (isession != NULL) {
|
|
+ session = mvx_if_session_to_session(sched_session->isession);
|
|
+ mutex_lock(isession->mutex);
|
|
+ if (session != NULL) {
|
|
+ session->is_suspend = true;
|
|
+ }
|
|
+ ret = kref_put(&isession->kref, isession->release);
|
|
+
|
|
+ if (ret == 0)
|
|
+ mutex_unlock(isession->mutex);
|
|
+ }
|
|
+
|
|
+ mutex_lock(&sched->mutex);
|
|
+ for (i = 0; i < wait_count; i++) {
|
|
+ if (sched->lsid[lsid_id].session != NULL && session != NULL && session->switched_in == false) {
|
|
+ MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_WARNING,
|
|
+ "%p finish switch_out session LSID. lsid=%u.", session, sched_session->lsid->lsid);
|
|
+ break;
|
|
+ }
|
|
+ mutex_unlock(&sched->mutex);
|
|
+ msleep(10);
|
|
+
|
|
+ mutex_lock(&sched->mutex);
|
|
+
|
|
+ if (sched->lsid[lsid_id].session != NULL && session != NULL) {
|
|
+ MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_WARNING,
|
|
+ "%p wait switch_out session LSID. lsid=%u. loop=%d, switch_in=%d", session, sched_session->lsid->lsid, i, session->switched_in);
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
+static int switch_in_session(struct mvx_sched_session *sched_session)
|
|
+{
|
|
+ int ret;
|
|
+ struct mvx_session *session;
|
|
+ session = mvx_if_session_to_session(sched_session->isession);
|
|
+ if (session == NULL) {
|
|
+ MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_WARNING, "session is null when switch in.");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ session->is_suspend = false;
|
|
+ if (session->fw.msg_pending > 0) {
|
|
+ session->idle_count = 0;
|
|
+
|
|
+ if (session->switched_in != false)
|
|
+ return 0;
|
|
+
|
|
+ MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_INFO, "switch_in_session.");
|
|
+
|
|
+ ret = session->client_ops->switch_in(session->csession);
|
|
+ if (ret != 0) {
|
|
+ MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_WARNING, "Failed to switch in session.");
|
|
+ session->error = ret;
|
|
+ wake_up(&session->waitq);
|
|
+ session->event(session, MVX_SESSION_EVENT_ERROR, (void *)session->error);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ session->switched_in = true;
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+void mvx_sched_suspend(struct mvx_sched *sched)
|
|
+{
|
|
+ unsigned int i;
|
|
+
|
|
+ mutex_lock(&sched->mutex);
|
|
+ sched->is_suspend = true;
|
|
+ mvx_dvfs_suspend_session();
|
|
+
|
|
+ for (i = 0; i < sched->nlsid; i++) {
|
|
+ if (sched->lsid[i].session != NULL) {
|
|
+ switch_out_session(sched, i);
|
|
+ if (sched->lsid[i].session != NULL) {
|
|
+ mvx_lsid_terminate(sched->lsid[i].session->lsid);
|
|
+ suspend_session(sched, sched->lsid[i].session);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ mutex_unlock(&sched->mutex);
|
|
+}
|
|
+
|
|
+void mvx_sched_resume(struct mvx_sched *sched)
|
|
+{
|
|
+ unsigned int i;
|
|
+
|
|
+ mutex_lock(&sched->mutex);
|
|
+
|
|
+ for (i = 0; i < sched->nlsid; i++) {
|
|
+ if (sched->lsid[i].session != NULL) {
|
|
+ resume_session(sched, sched->lsid[i].session, &(sched->lsid[i]));
|
|
+ switch_in_session(sched->lsid[i].session);
|
|
+ }
|
|
+ }
|
|
+ sched->is_suspend = false;
|
|
+ mvx_dvfs_resume_session();
|
|
+ mutex_unlock(&sched->mutex);
|
|
+}
|
|
diff --git a/drivers/media/platform/spacemit/vpu_k1x/dev/mvx_scheduler.h b/drivers/media/platform/spacemit/vpu_k1x/dev/mvx_scheduler.h
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/vpu_k1x/dev/mvx_scheduler.h
|
|
@@ -0,0 +1,201 @@
|
|
+/*
|
|
+ * The confidential and proprietary information contained in this file may
|
|
+ * only be used by a person authorised under and to the extent permitted
|
|
+ * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd.
|
|
+ * ALL RIGHTS RESERVED
|
|
+ *
|
|
+ * This entire notice must be reproduced on all copies of this file
|
|
+ * and copies of this file may only be made by a person if such person is
|
|
+ * permitted to do so under the terms of a subsisting license agreement
|
|
+ * from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * SPDX-License-Identifier: GPL-2.0-only
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU General Public License
|
|
+ * as published by the Free Software Foundation; either version 2
|
|
+ * of the License, or (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
+ *
|
|
+ */
|
|
+
|
|
+#ifndef _MVX_SCHEDULER_H_
|
|
+#define _MVX_SCHEDULER_H_
|
|
+
|
|
+/****************************************************************************
|
|
+ * Includes
|
|
+ ****************************************************************************/
|
|
+
|
|
+#include <linux/kref.h>
|
|
+#include <linux/list.h>
|
|
+#include <linux/mutex.h>
|
|
+#include "mvx_lsid.h"
|
|
+
|
|
+/****************************************************************************
|
|
+ * Types
|
|
+ ****************************************************************************/
|
|
+
|
|
+struct mvx_if_ops;
|
|
+struct mvx_hwreg;
|
|
+
|
|
+/**
|
|
+ * struct mvx_sched - Scheduler class.
|
|
+ * @dev: Pointer to device.
|
|
+ * @if_ops: Pointer to if module operations.
|
|
+ * @hwreg: Pointer to hwreg.
|
|
+ * @mutex: Mutex protecting the scheduler.
|
|
+ * @pending: List if sessions pending scheduling.
|
|
+ * @nlsid: Number of LSID.
|
|
+ * @lsid: Array of LSID instances.
|
|
+ */
|
|
+struct mvx_sched {
|
|
+ struct device *dev;
|
|
+ struct mvx_if_ops *if_ops;
|
|
+ struct mvx_hwreg *hwreg;
|
|
+ struct mutex mutex;
|
|
+ struct list_head pending;
|
|
+ unsigned int nlsid;
|
|
+ struct mvx_lsid lsid[MVX_LSID_MAX];
|
|
+ struct work_struct sched_task;
|
|
+ struct workqueue_struct *sched_queue;
|
|
+ bool is_suspend;
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct mvx_sched_session - Client session class.
|
|
+ * @isession: Pointer to if session.
|
|
+ * @head: List head used to insert session into scheduler pending list.
|
|
+ * @lsid: Pointer to LSID the session is mapped to.
|
|
+ * @pcb: LSID pcb.
|
|
+ *
|
|
+ * This struct is used to keep track of sessions specific information.
|
|
+ */
|
|
+struct mvx_sched_session {
|
|
+ struct mvx_if_session *isession;
|
|
+ struct list_head pending;
|
|
+ struct list_head notify;
|
|
+ struct mvx_lsid *lsid;
|
|
+ struct mvx_lsid_pcb pcb;
|
|
+ bool in_pending;
|
|
+};
|
|
+
|
|
+/****************************************************************************
|
|
+ * Exported functions
|
|
+ ****************************************************************************/
|
|
+
|
|
+/**
|
|
+ * mvx_sched_construct() - Construct the scheduler object.
|
|
+ * @sched: Pointer to scheduler object.
|
|
+ * @dev: Pointer to device.
|
|
+ * @if_ops: Pointer to if ops.
|
|
+ * @hwreg: Pointer to hwreg.
|
|
+ * @parent: Pointer to parent debugfs directory entry.
|
|
+ *
|
|
+ * Return: 0 on success, else errorr code.
|
|
+ */
|
|
+int mvx_sched_construct(struct mvx_sched *sched,
|
|
+ struct device *dev,
|
|
+ struct mvx_if_ops *if_ops,
|
|
+ struct mvx_hwreg *hwreg,
|
|
+ struct dentry *parent);
|
|
+
|
|
+/**
|
|
+ * mvx_sched_destruct() - Destruct the scheduler object.
|
|
+ * @sched: Pointer to scheduler object.
|
|
+ */
|
|
+void mvx_sched_destruct(struct mvx_sched *sched);
|
|
+
|
|
+/**
|
|
+ * mvx_sched_session_construct() - Construct the scheduler session object.
|
|
+ * @if_ops: If module operations.
|
|
+ * @session: Pointer to session object.
|
|
+ * @isession: Pointer to if session.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+int mvx_sched_session_construct(struct mvx_sched_session *session,
|
|
+ struct mvx_if_session *isession);
|
|
+
|
|
+/**
|
|
+ * mvx_sched_session_destruct() - Destruct the scheduler session object.
|
|
+ * @session: Pointer to session object.
|
|
+ *
|
|
+ * The client must make sure the session is terminated before the destructor
|
|
+ * is called.
|
|
+ */
|
|
+void mvx_sched_session_destruct(struct mvx_sched_session *session);
|
|
+
|
|
+/**
|
|
+ * mvx_sched_switch_in() - Switch in a session.
|
|
+ * @sched: Pointer to scheduler object.
|
|
+ * @session: Pointer to session object.
|
|
+ *
|
|
+ * Map a session to a LSID and schedule session for execution. If no LSID
|
|
+ * is available the session is placed in the pending queue.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+int mvx_sched_switch_in(struct mvx_sched *sched,
|
|
+ struct mvx_sched_session *session);
|
|
+
|
|
+/**
|
|
+ * mvx_sched_send_irq() - Send IRQ to session.
|
|
+ * @sched: Pointer to scheduler object.
|
|
+ * @session: Pointer to session object.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+int mvx_sched_send_irq(struct mvx_sched *sched,
|
|
+ struct mvx_sched_session *session);
|
|
+
|
|
+/**
|
|
+ * mvx_sched_flush_mmu() - Flush MMU tables.
|
|
+ * @sched: Pointer to scheduler object.
|
|
+ * @session: Pointer to session object.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+int mvx_sched_flush_mmu(struct mvx_sched *sched,
|
|
+ struct mvx_sched_session *session);
|
|
+
|
|
+/**
|
|
+ * mvx_sched_handle_irq() - Handle interrupt for a LSID.
|
|
+ * @sched: Pointer to scheduler object.
|
|
+ * @lsid: LSID number.
|
|
+ */
|
|
+void mvx_sched_handle_irq(struct mvx_sched *sched,
|
|
+ unsigned int lsid);
|
|
+
|
|
+/**
|
|
+ * mvx_sched_terminate() - Terminate a session.
|
|
+ * @sched: Pointer to scheduler object.
|
|
+ * @session: Pointer to session object.
|
|
+ */
|
|
+void mvx_sched_terminate(struct mvx_sched *sched,
|
|
+ struct mvx_sched_session *session);
|
|
+
|
|
+/**
|
|
+ * mvx_sched_print_debug() - Print debug information.
|
|
+ * @sched: Pointer to scheduler object.
|
|
+ * @session: Pointer to session object.
|
|
+ */
|
|
+void mvx_sched_print_debug(struct mvx_sched *sched,
|
|
+ struct mvx_sched_session *session);
|
|
+
|
|
+void mvx_sched_suspend(struct mvx_sched *sched);
|
|
+
|
|
+void mvx_sched_resume(struct mvx_sched *sched);
|
|
+
|
|
+void mvx_sched_wait_session_idle(struct mvx_sched *sched, struct mvx_sched_session *session);
|
|
+
|
|
+#endif /* _MVX_SCHEDULER_H_ */
|
|
diff --git a/drivers/media/platform/spacemit/vpu_k1x/external/fw_v2/mve_protocol_def.h b/drivers/media/platform/spacemit/vpu_k1x/external/fw_v2/mve_protocol_def.h
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/vpu_k1x/external/fw_v2/mve_protocol_def.h
|
|
@@ -0,0 +1,1776 @@
|
|
+/*
|
|
+ * The confidential and proprietary information contained in this file may
|
|
+ * only be used by a person authorised under and to the extent permitted
|
|
+ * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd.
|
|
+ * ALL RIGHTS RESERVED
|
|
+ *
|
|
+ * This entire notice must be reproduced on all copies of this file
|
|
+ * and copies of this file may only be made by a person if such person is
|
|
+ * permitted to do so under the terms of a subsisting license agreement
|
|
+ * from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * SPDX-License-Identifier: GPL-2.0-only
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU General Public License
|
|
+ * as published by the Free Software Foundation; either version 2
|
|
+ * of the License, or (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
+ *
|
|
+ */
|
|
+
|
|
+/*
|
|
+ * Copyright:
|
|
+ * ----------------------------------------------------------------------------
|
|
+ * This confidential and proprietary software may be used only as authorized
|
|
+ * by a licensing agreement from Arm Technology (China) Co., Ltd.
|
|
+ * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd.
|
|
+ * The entire notice above must be reproduced on all authorized copies and
|
|
+ * copies may only be made to the extent permitted by a licensing agreement
|
|
+ * from Arm Technology (China) Co., Ltd.
|
|
+ * ----------------------------------------------------------------------------
|
|
+ */
|
|
+#ifndef __FW_INCLUDE__MVE_PROTOCOL_DEF_H__
|
|
+#define __FW_INCLUDE__MVE_PROTOCOL_DEF_H__
|
|
+
|
|
+#ifdef __cplusplus
|
|
+extern "C" {
|
|
+#endif
|
|
+
|
|
+#ifdef __KERNEL__
|
|
+#include <linux/types.h>
|
|
+#else
|
|
+#include <stdint.h>
|
|
+#endif
|
|
+
|
|
+/*****************************************************************************
|
|
+ *
|
|
+ * Communication protocol between the host/driver and the MVE firmware,
|
|
+ * the 'host interface'.
|
|
+ *
|
|
+ * MVE == LINLON Video Engine
|
|
+ *
|
|
+ * Protocol version 2.5
|
|
+ *
|
|
+ * Note: Message structs may be expanded in the future; the host should
|
|
+ * use the 'size' of the message to determine how many bytes to
|
|
+ * read from the message queue, rather than a sizeof(struct).
|
|
+ *
|
|
+ ****************************************************************************/
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+/*****************************************************************************
|
|
+ *
|
|
+ * Virtual memory regions
|
|
+ *
|
|
+ * ..._ADDR_BEGIN gives the starting virtual address of the region,
|
|
+ * and ..._ADDR_END the (non-inclusive) ending address, such that
|
|
+ * the size of the region is obtained with the subtraction
|
|
+ * (..._ADDR_END - ..._ADDR_BEGIN).
|
|
+ *
|
|
+ ****************************************************************************/
|
|
+
|
|
+/* Memory region for first firmware instance */
|
|
+#define MVE_MEM_REGION_FW_INSTANCE0_ADDR_BEGIN (0x00000000u)
|
|
+#define MVE_MEM_REGION_FW_INSTANCE0_ADDR_END (0x000FFFFFu + 1)
|
|
+
|
|
+/*
|
|
+ * Areas for communication between host and MVE are placed in the interval
|
|
+ * 0x10079000 - 0x1007FFFF, see special defines further down.
|
|
+ */
|
|
+
|
|
+/* PROTECTED virtual memory region */
|
|
+#define MVE_MEM_REGION_PROTECTED_ADDR_BEGIN (0x20000000u)
|
|
+#define MVE_MEM_REGION_PROTECTED_ADDR_END (0x4FFFFFFFu + 1)
|
|
+
|
|
+/* FRAMEBUF virtual memory region */
|
|
+#define MVE_MEM_REGION_FRAMEBUF_ADDR_BEGIN (0x50000000u)
|
|
+#define MVE_MEM_REGION_FRAMEBUF_ADDR_END (0x7FFFFFFFu + 1)
|
|
+
|
|
+/* Memory regions for other firmware instances */
|
|
+#define MVE_MEM_REGION_FW_INSTANCE1_ADDR_BEGIN (0x80000000u)
|
|
+#define MVE_MEM_REGION_FW_INSTANCE1_ADDR_END \
|
|
+ (MVE_MEM_REGION_FW_INSTANCE1_ADDR_BEGIN + MVE_MEM_REGION_FW_INSTANCE0_ADDR_END)
|
|
+
|
|
+#define MVE_MEM_REGION_FW_INSTANCE2_ADDR_BEGIN (0x90000000u)
|
|
+#define MVE_MEM_REGION_FW_INSTANCE2_ADDR_END \
|
|
+ (MVE_MEM_REGION_FW_INSTANCE2_ADDR_BEGIN + MVE_MEM_REGION_FW_INSTANCE0_ADDR_END)
|
|
+
|
|
+#define MVE_MEM_REGION_FW_INSTANCE3_ADDR_BEGIN (0xA0000000u)
|
|
+#define MVE_MEM_REGION_FW_INSTANCE3_ADDR_END \
|
|
+ (MVE_MEM_REGION_FW_INSTANCE3_ADDR_BEGIN + MVE_MEM_REGION_FW_INSTANCE0_ADDR_END)
|
|
+
|
|
+#define MVE_MEM_REGION_FW_INSTANCE4_ADDR_BEGIN (0xB0000000u)
|
|
+#define MVE_MEM_REGION_FW_INSTANCE4_ADDR_END \
|
|
+ (MVE_MEM_REGION_FW_INSTANCE4_ADDR_BEGIN + MVE_MEM_REGION_FW_INSTANCE0_ADDR_END)
|
|
+
|
|
+#define MVE_MEM_REGION_FW_INSTANCE5_ADDR_BEGIN (0xC0000000u)
|
|
+#define MVE_MEM_REGION_FW_INSTANCE5_ADDR_END \
|
|
+ (MVE_MEM_REGION_FW_INSTANCE5_ADDR_BEGIN + MVE_MEM_REGION_FW_INSTANCE0_ADDR_END)
|
|
+
|
|
+#define MVE_MEM_REGION_FW_INSTANCE6_ADDR_BEGIN (0xD0000000u)
|
|
+#define MVE_MEM_REGION_FW_INSTANCE6_ADDR_END \
|
|
+ (MVE_MEM_REGION_FW_INSTANCE6_ADDR_BEGIN + MVE_MEM_REGION_FW_INSTANCE0_ADDR_END)
|
|
+
|
|
+#define MVE_MEM_REGION_FW_INSTANCE7_ADDR_BEGIN (0xE0000000u)
|
|
+#define MVE_MEM_REGION_FW_INSTANCE7_ADDR_END \
|
|
+ (MVE_MEM_REGION_FW_INSTANCE7_ADDR_BEGIN + MVE_MEM_REGION_FW_INSTANCE0_ADDR_END)
|
|
+
|
|
+/* 0xF0000000 - 0xFFFFFFFF is used internally in MVE */
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+/*****************************************************************************
|
|
+ *
|
|
+ * Communication queues between HOST/DRIVER and MVE
|
|
+ *
|
|
+ * Address for queue for messages in to MVE,
|
|
+ * one struct mve_comm_area_host located here
|
|
+ *
|
|
+ ****************************************************************************/
|
|
+
|
|
+#define MVE_COMM_MSG_INQ_ADDR (0x10079000u)
|
|
+
|
|
+/* Address for queue for messages out from MVE,
|
|
+ * one struct mve_comm_area_mve located here
|
|
+ */
|
|
+#define MVE_COMM_MSG_OUTQ_ADDR (0x1007A000u)
|
|
+
|
|
+/* Address for queue for input buffers in to MVE,
|
|
+ * one struct mve_comm_area_host located here
|
|
+ */
|
|
+#define MVE_COMM_BUF_INQ_ADDR (0x1007B000u)
|
|
+
|
|
+/* Address for queue for input buffers returned from MVE,
|
|
+ * one struct mve_comm_area_mve located here
|
|
+ */
|
|
+#define MVE_COMM_BUF_INRQ_ADDR (0x1007C000u)
|
|
+
|
|
+/* Address for queue for output buffers in to MVE,
|
|
+ * one struct mve_comm_area_host located here
|
|
+ */
|
|
+#define MVE_COMM_BUF_OUTQ_ADDR (0x1007D000u)
|
|
+
|
|
+/* Address for queue for output buffers returned from MVE,
|
|
+ * one struct mve_comm_area_mve located here
|
|
+ */
|
|
+#define MVE_COMM_BUF_OUTRQ_ADDR (0x1007E000u)
|
|
+
|
|
+/* One struct mve_rpc_communication_area located here */
|
|
+#define MVE_COMM_RPC_ADDR (0x1007F000u)
|
|
+
|
|
+/* Address for ram_print buffer in FW */
|
|
+#define MVE_FW_PRINT_RAM_ADDR (0x10100000u)
|
|
+#define MVE_FW_PRINT_RAM_SIZE (0x80000u)
|
|
+
|
|
+/* One page of memory (4 kB) is used for each queue,
|
|
+ * so maximum 1024 words, but need room for some counters as well,
|
|
+ * see structs mve_comm_area_mve and mve_comm_area_host below.
|
|
+ */
|
|
+#define MVE_COMM_QUEUE_SIZE_IN_WORDS 1020
|
|
+
|
|
+/* This is the part of the message area that is written by host. */
|
|
+struct mve_comm_area_host
|
|
+{
|
|
+ volatile uint16_t out_rpos;
|
|
+ volatile uint16_t in_wpos;
|
|
+ volatile uint32_t reserved[ 3 ];
|
|
+ /*
|
|
+ * Queue of messages to MVE, each block of data prefixed with
|
|
+ * a mve_msg_header
|
|
+ */
|
|
+ volatile uint32_t in_data[ MVE_COMM_QUEUE_SIZE_IN_WORDS ];
|
|
+};
|
|
+
|
|
+/* This is the part of the message area that is written by MVE. */
|
|
+struct mve_comm_area_mve
|
|
+{
|
|
+ volatile uint16_t out_wpos;
|
|
+ volatile uint16_t in_rpos;
|
|
+ volatile uint32_t reserved[ 3 ];
|
|
+ /*
|
|
+ * Queue of messages to host, each block of data prefixed with
|
|
+ * a mve_msg_header
|
|
+ */
|
|
+ volatile uint32_t out_data[ MVE_COMM_QUEUE_SIZE_IN_WORDS ];
|
|
+};
|
|
+
|
|
+#define MVE_RPC_AREA_SIZE_IN_WORDS 256
|
|
+#define MVE_RPC_DATA_SIZE_IN_WORDS (MVE_RPC_AREA_SIZE_IN_WORDS - 3)
|
|
+union mve_rpc_params
|
|
+{
|
|
+ volatile uint32_t data[ MVE_RPC_DATA_SIZE_IN_WORDS ];
|
|
+ struct
|
|
+ {
|
|
+ char string[ MVE_RPC_DATA_SIZE_IN_WORDS * 4 ];
|
|
+ } debug_print;
|
|
+ struct
|
|
+ {
|
|
+ uint32_t size;
|
|
+ uint32_t max_size;
|
|
+ uint8_t region; /* Memory region selection */
|
|
+ #define MVE_MEM_REGION_PROTECTED (0)
|
|
+ #define MVE_MEM_REGION_OUTBUF (1)
|
|
+ #define MVE_MEM_REGION_FRAMEBUF (MVE_MEM_REGION_OUTBUF)
|
|
+
|
|
+ /* The newly allocated memory must be placed
|
|
+ * on (at least) a 2^(log2_alignment) boundary
|
|
+ */
|
|
+ uint8_t log2_alignment;
|
|
+ } mem_alloc;
|
|
+ struct
|
|
+ {
|
|
+ uint32_t ve_pointer;
|
|
+ uint32_t new_size;
|
|
+ } mem_resize;
|
|
+ struct
|
|
+ {
|
|
+ uint32_t ve_pointer;
|
|
+ } mem_free;
|
|
+};
|
|
+
|
|
+struct mve_rpc_communication_area
|
|
+{
|
|
+ volatile uint32_t state;
|
|
+ #define MVE_RPC_STATE_FREE (0)
|
|
+ #define MVE_RPC_STATE_PARAM (1)
|
|
+ #define MVE_RPC_STATE_RETURN (2)
|
|
+ volatile uint32_t call_id;
|
|
+ #define MVE_RPC_FUNCTION_DEBUG_PRINTF (1)
|
|
+ #define MVE_RPC_FUNCTION_MEM_ALLOC (2)
|
|
+ #define MVE_RPC_FUNCTION_MEM_RESIZE (3)
|
|
+ #define MVE_RPC_FUNCTION_MEM_FREE (4)
|
|
+ volatile uint32_t size;
|
|
+ union mve_rpc_params params;
|
|
+};
|
|
+
|
|
+struct mve_fw_ram_print_head_aera
|
|
+{
|
|
+ volatile uint32_t rd_cnt;
|
|
+ volatile uint32_t reserved0[15];
|
|
+
|
|
+ volatile uint32_t flag;
|
|
+ volatile uint32_t index;
|
|
+ volatile uint32_t wr_cnt;
|
|
+ volatile uint32_t reserved1[13];
|
|
+};
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+/*********************************************************************
|
|
+ *
|
|
+ * Message codes
|
|
+ *
|
|
+ *********************************************************************/
|
|
+
|
|
+/* Messages consist of one struct mve_msg_header, possibly followed
|
|
+ * by extra data.
|
|
+ */
|
|
+struct mve_msg_header
|
|
+{
|
|
+ uint16_t code;
|
|
+ /* REQUESTs are messages from the
|
|
+ * host/driver to the firmware: Code: Extra data in message: */
|
|
+ #define MVE_REQUEST_CODE_GO (1001) /* no extra data */
|
|
+ #define MVE_REQUEST_CODE_STOP (1002) /* no extra data */
|
|
+ #define MVE_REQUEST_CODE_INPUT_FLUSH (1003) /* no extra data */
|
|
+ #define MVE_REQUEST_CODE_OUTPUT_FLUSH (1004) /* no extra data */
|
|
+ #define MVE_REQUEST_CODE_SWITCH (1005) /* no extra data */
|
|
+ #define MVE_REQUEST_CODE_PING (1006) /* no extra data */
|
|
+ #define MVE_REQUEST_CODE_DUMP (1008) /* no extra data */
|
|
+ #define MVE_REQUEST_CODE_JOB (1009) /* struct mve_request_job */
|
|
+ #define MVE_REQUEST_CODE_SET_OPTION (1010) /* struct mve_request_set_option (variable size) */
|
|
+ #define MVE_REQUEST_CODE_RELEASE_REF_FRAME (1011) /* struct mve_request_release_ref_frame */
|
|
+ #define MVE_REQUEST_CODE_IDLE_ACK (1012) /* no extra data */
|
|
+ #define MVE_REQUEST_CODE_DEBUG (1013) /* level: 0 for disable, refer to fw_log_level */
|
|
+ /* RESPONSEs are messages from
|
|
+ * the firmware to the host: */
|
|
+ #define MVE_RESPONSE_CODE_SWITCHED_IN (2001) /* struct mve_response_switched_in */
|
|
+ #define MVE_RESPONSE_CODE_SWITCHED_OUT (2002) /* struct mve_response_switched_out */
|
|
+ #define MVE_RESPONSE_CODE_SET_OPTION_CONFIRM (2003) /* no extra data */
|
|
+ #define MVE_RESPONSE_CODE_JOB_DEQUEUED (2004) /* struct mve_response_job_dequeued */
|
|
+ #define MVE_RESPONSE_CODE_INPUT (2005) /* no extra data, but buffer placed in buffer queue */
|
|
+ #define MVE_RESPONSE_CODE_OUTPUT (2006) /* no extra data, but buffer placed in buffer queue */
|
|
+ #define MVE_RESPONSE_CODE_INPUT_FLUSHED (2007) /* no extra data */
|
|
+ #define MVE_RESPONSE_CODE_OUTPUT_FLUSHED (2008) /* no extra data */
|
|
+ #define MVE_RESPONSE_CODE_PONG (2009) /* no extra data */
|
|
+ #define MVE_RESPONSE_CODE_ERROR (2010) /* struct mve_response_error */
|
|
+ #define MVE_RESPONSE_CODE_STATE_CHANGE (2011) /* struct mve_response_state_change */
|
|
+ #define MVE_RESPONSE_CODE_DUMP (2012) /* no extra data */
|
|
+ #define MVE_RESPONSE_CODE_IDLE (2013) /* no extra data */
|
|
+ #define MVE_RESPONSE_CODE_FRAME_ALLOC_PARAM (2014) /* struct mve_response_frame_alloc_parameters */
|
|
+ #define MVE_RESPONSE_CODE_SEQUENCE_PARAMETERS (2015) /* struct mve_response_sequence_parameters */
|
|
+ #define MVE_RESPONSE_CODE_EVENT (2016) /* struct mve_response_event (variable size) */
|
|
+ #define MVE_RESPONSE_CODE_SET_OPTION_FAIL (2017) /* struct mve_response_set_option_failed */
|
|
+ #define MVE_RESPONSE_CODE_REF_FRAME_UNUSED (2018) /* struct mve_response_ref_frame_unused */
|
|
+ #define MVE_RESPONSE_CODE_DEBUG (2019) /* no extra data */
|
|
+ /* BUFFERs are sent from host to firmware,
|
|
+ * and then return at some time: */
|
|
+ #define MVE_BUFFER_CODE_FRAME (3001) /* struct mve_buffer_frame */
|
|
+ #define MVE_BUFFER_CODE_BITSTREAM (3002) /* struct mve_buffer_bitstream */
|
|
+ #define MVE_BUFFER_CODE_PARAM (3003) /* struct mve_buffer_param */
|
|
+ #define MVE_BUFFER_CODE_GENERAL (3004) /* struct mve_buffer_general */
|
|
+
|
|
+ uint16_t size; /* size in bytes of trailing data, 0 if none */
|
|
+};
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+/*********************************************************************
|
|
+ *
|
|
+ * REQUESTs are messages from the host to the firmware
|
|
+ *
|
|
+ * Some of the MVE_REQUEST_CODE_ codes are followed by one of the
|
|
+ * structs below.
|
|
+ *
|
|
+ *********************************************************************/
|
|
+
|
|
+struct mve_request_job
|
|
+{
|
|
+ uint16_t cores; /* >= 1, number of cores to use, must match request to HW scheduler */
|
|
+ uint16_t frames; /* number of frames to process, zero means infinite */
|
|
+ uint32_t flags; /* can be zero */
|
|
+ #define MVE_JOB_FLAG_DISABLE_BNDMGR (0x01)
|
|
+};
|
|
+
|
|
+struct mve_request_set_option
|
|
+{
|
|
+ uint32_t index;
|
|
+ #define MVE_SET_OPT_INDEX_NALU_FORMAT (1) /* see arg, MVE_OPT_NALU_FORMAT_ */
|
|
+ #define MVE_SET_OPT_INDEX_STREAM_ESCAPING (2) /* arg=1 to enable (default), arg=0 to disable */
|
|
+ #define MVE_SET_OPT_INDEX_PROFILE_LEVEL (3) /* data.profile_level */
|
|
+ #define MVE_SET_OPT_INDEX_HOST_PROTOCOL_PRINTS (4) /* arg=1 to enable, arg=0 to disable (default) */
|
|
+ #define MVE_SET_OPT_INDEX_PROFILING (5) /* arg=1 to enable, arg=0 to disable (default) */
|
|
+ #define MVE_SET_OPT_INDEX_DISABLE_FEATURES (6) /* see arg, MVE_OPT_DISABLE_FEATURE_ */
|
|
+ #define MVE_SET_OPT_INDEX_IGNORE_STREAM_HEADERS (7) /* decode, arg=1 to enable,
|
|
+ * arg=0 to disable (default) */
|
|
+ #define MVE_SET_OPT_INDEX_FRAME_REORDERING (8) /* decode, arg=1 to enable (default),
|
|
+ * arg=0 to disable */
|
|
+ #define MVE_SET_OPT_INDEX_INTBUF_SIZE (9) /* decode, arg = suggested limit of intermediate
|
|
+ * buffer allocation */
|
|
+ #define MVE_SET_OPT_INDEX_ENC_P_FRAMES (16) /* encode, arg = nPFrames */
|
|
+ #define MVE_SET_OPT_INDEX_ENC_B_FRAMES (17) /* encode, arg = number of B frames */
|
|
+ #define MVE_SET_OPT_INDEX_GOP_TYPE (18) /* encode, see arg */
|
|
+ #define MVE_SET_OPT_INDEX_INTRA_MB_REFRESH (19) /* encode, arg */
|
|
+ #define MVE_SET_OPT_INDEX_ENC_CONSTR_IPRED (20) /* encode, arg = 0 or 1 */
|
|
+ #define MVE_SET_OPT_INDEX_ENC_ENTROPY_SYNC (21) /* encode, arg = 0 or 1 */
|
|
+ #define MVE_SET_OPT_INDEX_ENC_TEMPORAL_MVP (22) /* encode, arg = 0 or 1 */
|
|
+ #define MVE_SET_OPT_INDEX_TILES (23) /* encode, data.tiles */
|
|
+ #define MVE_SET_OPT_INDEX_ENC_MIN_LUMA_CB_SIZE (24) /* HEVC encode, arg = 8 or 16,
|
|
+ * for sizes 8x8 or 16x16 */
|
|
+ #define MVE_SET_OPT_INDEX_ENC_MB_TYPE_ENABLE (25) /* encode, see arg */
|
|
+ #define MVE_SET_OPT_INDEX_ENC_MB_TYPE_DISABLE (26) /* encode, see arg */
|
|
+ #define MVE_SET_OPT_INDEX_ENC_H264_CABAC (27) /* encode, arg = 0 or 1, enabled by default */
|
|
+ #define MVE_SET_OPT_INDEX_ENC_SLICE_SPACING (28) /* encode, arg = suggested number of
|
|
+ * CTUs/macroblocks in a slice */
|
|
+ #define MVE_SET_OPT_INDEX_ENC_VP9_PROB_UPDATE (30) /* VP9 encode, see arg */
|
|
+ #define MVE_SET_OPT_INDEX_RESYNC_INTERVAL (31) /* JPEG encode, arg = nRestartInterval
|
|
+ * = nResynchMarkerSpacing */
|
|
+ #define MVE_SET_OPT_INDEX_HUFFMAN_TABLE (32) /* JPEG encode, data.huffman_table */
|
|
+ #define MVE_SET_OPT_INDEX_QUANT_TABLE (33) /* JPEG encode, data.quant_table */
|
|
+ #define MVE_SET_OPT_INDEX_ENC_EXPOSE_REF_FRAMES (34) /* encode only, disabled by default */
|
|
+ #define MVE_SET_OPT_INDEX_MBINFO_OUTPUT (35) /* encode, arg=1 to enable,
|
|
+ * arg=0 to disable (default) */
|
|
+ #define MVE_SET_OPT_INDEX_MV_SEARCH_RANGE (36) /* encode, data,motion_vector_search_range */
|
|
+ #define MVE_SET_OPT_INDEX_ENC_STREAM_BITDEPTH (38) /* encode, data.bitdepth, to set other bitdepth
|
|
+ * of encoded stream than of input frames */
|
|
+ #define MVE_SET_OPT_INDEX_ENC_STREAM_CHROMA_FORMAT (39) /* encode, arg, to set other chroma format of
|
|
+ * encoded stream than of input frames */
|
|
+ #define MVE_SET_OPT_INDEX_ENC_RGB_TO_YUV_MODE (40) /* encode, arg, select which way RGB is converted
|
|
+ * to YUV before encoding */
|
|
+ #define MVE_SET_OPT_INDEX_ENC_BANDWIDTH_LIMIT (41) /* encode, arg, the maxium bandwidth limit defined
|
|
+ * by host */
|
|
+ #define MVE_SET_OPT_INDEX_WATCHDOG_TIMEOUT (42) /* arg=timeout, arg=0 to disable */
|
|
+ #define MVE_SET_OPT_INDEX_ENC_CABAC_INIT_IDC (43) /* encode, arg; 0,1,2 for H264; 0,1 for HEVC */
|
|
+ #define MVE_SET_OPT_INDEX_ENC_ADPTIVE_QUANTISATION (44) /* encode (h264 and hevc) */
|
|
+ #define MVE_SET_OPT_INDEX_QP_DELTA_I_P (45)
|
|
+ #define MVE_SET_OPT_INDEX_QP_DELTA_I_B_REF (46)
|
|
+ #define MVE_SET_OPT_INDEX_QP_DELTA_I_B_NONREF (47)
|
|
+ #define MVE_SET_OPT_INDEX_CB_QP_OFFSET (48)
|
|
+ #define MVE_SET_OPT_INDEX_CR_QP_OFFSET (49)
|
|
+ #define MVE_SET_OPT_INDEX_LAMBDA_SCALE (50) /* encode, data.lambda_scale */
|
|
+ #define MVE_SET_OPT_INDEX_ENC_MAX_NUM_CORES (51) /* maximum number of cores */
|
|
+ #define MVE_SET_OPT_INDEX_ENC_EXTRA_REFS (52) /* configure number of extra ref buffers */
|
|
+ #define MVE_SET_OPT_INDEX_QP_DELTA_RAW_I_P (53)
|
|
+ #define MVE_SET_OPT_INDEX_QP_DELTA_RAW_I_B_REF (54)
|
|
+ #define MVE_SET_OPT_INDEX_QP_DELTA_RAW_I_B_NONREF (55)
|
|
+ #define MVE_SET_OPT_INDEX_ENC_FIXED_QP (56)
|
|
+ /* ARBITRARY_DOWNSCALE */
|
|
+ #define MVE_SET_OPT_INDEX_DEC_DOWNSCALE (57) /* decode, set downscaled width and height */
|
|
+ #define MVE_SET_OPT_INDEX_FLUSHLESS_REFBANK (58) /* configure AFBC ref bank for individual buffer
|
|
+ * allocation. Forced internally for flushless
|
|
+ * resolution change codecs */
|
|
+ #define MVE_SET_OPT_INDEX_ENC_CROP_RARAM_LEFT (62)
|
|
+ #define MVE_SET_OPT_INDEX_ENC_CROP_RARAM_RIGHT (63)
|
|
+ #define MVE_SET_OPT_INDEX_ENC_CROP_RARAM_TOP (64)
|
|
+ #define MVE_SET_OPT_INDEX_ENC_CROP_RARAM_BOTTOM (65)
|
|
+ /* LONG_TERM_REFERENCE */
|
|
+ #define MVE_SET_OPT_INDEX_ENC_LTR_MODE (66)
|
|
+ #define MVE_SET_OPT_INDEX_ENC_LTR_PERIOD (67)
|
|
+ #define MVE_SET_OPT_INDEX_DEC_DOWNSCALE_POS_MODE (69)
|
|
+
|
|
+ union
|
|
+ {
|
|
+ uint32_t arg; /* Most options only need a uint32_t as argument */
|
|
+ /* For option MVE_SET_OPT_INDEX_NALU_FORMAT, arg should
|
|
+ * be one of these: */
|
|
+ #define MVE_OPT_NALU_FORMAT_START_CODES (1)
|
|
+ #define MVE_OPT_NALU_FORMAT_ONE_NALU_PER_BUFFER (2)
|
|
+ #define MVE_OPT_NALU_FORMAT_ONE_BYTE_LENGTH_FIELD (4)
|
|
+ #define MVE_OPT_NALU_FORMAT_TWO_BYTE_LENGTH_FIELD (8)
|
|
+ #define MVE_OPT_NALU_FORMAT_FOUR_BYTE_LENGTH_FIELD (16)
|
|
+ /* For option MVE_SET_OPT_INDEX_GOP_TYPE, arg should
|
|
+ * be one of these: */
|
|
+ #define MVE_OPT_GOP_TYPE_BIDIRECTIONAL (1)
|
|
+ #define MVE_OPT_GOP_TYPE_LOW_DELAY (2)
|
|
+ #define MVE_OPT_GOP_TYPE_PYRAMID (3)
|
|
+ /* For option MVE_SET_OPT_INDEX_ENC_VP9_PROB_UPDATE,
|
|
+ * arg should be one of these: */
|
|
+ #define MVE_OPT_VP9_PROB_UPDATE_DISABLED (0)
|
|
+ #define MVE_OPT_VP9_PROB_UPDATE_IMPLICIT (1)
|
|
+ #define MVE_OPT_VP9_PROB_UPDATE_EXPLICIT (2)
|
|
+ /* For option MVE_SET_OPT_INDEX_DISABLE_FEATURES, arg
|
|
+ * should be a bitmask with features to disable: */
|
|
+ #define MVE_OPT_DISABLE_FEATURE_AFBC_COMP (0x00000001) /* VDMA AFBC Compression */
|
|
+ #define MVE_OPT_DISABLE_FEATURE_REF_CACHE (0x00000002) /* REF caching */
|
|
+ #define MVE_OPT_DISABLE_FEATURE_DEBLOCK (0x00000004) /* Deblocking */
|
|
+ #define MVE_OPT_DISABLE_FEATURE_SAO (0x00000008) /* SAO */
|
|
+ #define MVE_OPT_DISABLE_FEATURE_PIC_OUTPUT (0x00000020) /* Picture Output Removal */
|
|
+ #define MVE_OPT_DISABLE_FEATURE_PIPE (0x00000040) /* Pipe (i.e. parser-only) */
|
|
+ #define MVE_OPT_DISABLE_FEATURE_SLEEP (0x00000080) /* Clock gating
|
|
+ * (SOC_SYSCTRL.SLEEP bit) */
|
|
+ #define MVE_OPT_DISABLE_FEATURE_AFBC_LEGACY_REF (0x00000100) /* Enables tiled AFBC format in
|
|
+ * reference buffers. Ignored
|
|
+ * for decode AFBC output */
|
|
+ #define MVE_OPT_DISABLE_FEATURE_REF_PICS (0x00000400) /* Forces use of static 16x16
|
|
+ * reference pics */
|
|
+ #define MVE_OPT_DISABLE_FEATURE_CHNG_RECT_WA (0x00000800) /* Disables workaround */
|
|
+ #define MVE_OPT_DISABLE_FEATURE_REFSZ_LIMIT (0x00001000) /* Disable REFSZ bw limit */
|
|
+ /* For options MVE_SET_OPT_INDEX_ENC_MB_TYPE_ENABLE
|
|
+ * and MVE_SET_OPT_INDEX_ENC_MB_TYPE_DISABLE, arg
|
|
+ * should be a bitmask of MVE_MBTYPEs: */
|
|
+ #define MVE_MBTYPE_4x4 (0x00000001) /* 4x4 inter */
|
|
+ #define MVE_MBTYPE_4x8 (0x00000002) /* 4x8 inter */
|
|
+ #define MVE_MBTYPE_8x4 (0x00000004) /* 8x4 inter */
|
|
+ #define MVE_MBTYPE_8x8 (0x00000008) /* 8x8 inter */
|
|
+ #define MVE_MBTYPE_8x16 (0x00000010) /* 8x16 inter */
|
|
+ #define MVE_MBTYPE_16x8 (0x00000020) /* 16x8 inter */
|
|
+ #define MVE_MBTYPE_16x16 (0x00000040) /* 16x16 inter */
|
|
+ #define MVE_MBTYPE_PSKIP (0x00000080) /* P Skip inter */
|
|
+ #define MVE_MBTYPE_I4x4 (0x00000100) /* 4x4 intra */
|
|
+ #define MVE_MBTYPE_I8x8 (0x00000200) /* 8x8 intra */
|
|
+ #define MVE_MBTYPE_I16x16 (0x00000400) /* 16x16 intra */
|
|
+ #define MVE_MBTYPE_I32x32 (0x00000800) /* 32x32 intra */
|
|
+ #define MVE_MBTYPE_16x32 (0x00001000) /* 16x32 inter */
|
|
+ #define MVE_MBTYPE_32x16 (0x00002000) /* 32x16 inter */
|
|
+ #define MVE_MBTYPE_32x32 (0x00004000) /* 32x32 inter */
|
|
+ /* For option MVE_SET_OPT_INDEX_ENC_RGB_TO_YUV_MODE,
|
|
+ * arg should be one of these: */
|
|
+ #define MVE_OPT_RGB_TO_YUV_BT601_STUDIO (0)
|
|
+ #define MVE_OPT_RGB_TO_YUV_BT601_FULL (1)
|
|
+ #define MVE_OPT_RGB_TO_YUV_BT709_STUDIO (2)
|
|
+ #define MVE_OPT_RGB_TO_YUV_BT709_FULL (3)
|
|
+ /* For option MVE_SET_OPT_INDEX_ENC_EXPOSE_REF_FRAMES,
|
|
+ * arg should be one of: */
|
|
+ #define MVE_OPT_REF_OUTPUT_NONE (0) /* No REF output */
|
|
+ #define MVE_OPT_REF_OUTPUT_USED_FOR_REF (1) /* Output reference frames */
|
|
+ #define MVE_OPT_REF_OUTPUT_ALL (2) /* Output/reconstruct all frames */
|
|
+ struct
|
|
+ {
|
|
+ uint16_t profile;
|
|
+ /* AVC/H.264 profiles */
|
|
+ #define MVE_OPT_PROFILE_H264_BASELINE (1)
|
|
+ #define MVE_OPT_PROFILE_H264_MAIN (2)
|
|
+ #define MVE_OPT_PROFILE_H264_HIGH (3)
|
|
+ /* HEVC/H.265 profiles */
|
|
+ #define MVE_OPT_PROFILE_H265_MAIN (1)
|
|
+ #define MVE_OPT_PROFILE_H265_MAIN_STILL (2)
|
|
+ #define MVE_OPT_PROFILE_H265_MAIN_INTRA (3)
|
|
+ #define MVE_OPT_PROFILE_H265_MAIN_10 (4)
|
|
+ /* VC-1 profiles */
|
|
+ #define MVE_OPT_PROFILE_VC1_SIMPLE (1)
|
|
+ #define MVE_OPT_PROFILE_VC1_MAIN (2)
|
|
+ #define MVE_OPT_PROFILE_VC1_ADVANCED (3)
|
|
+ /* VP8 profiles */
|
|
+ #define MVE_OPT_PROFILE_VP8_MAIN (1)
|
|
+ uint16_t level;
|
|
+ /* AVC/H.264 levels */
|
|
+ #define MVE_OPT_LEVEL_H264_1 (1)
|
|
+ #define MVE_OPT_LEVEL_H264_1b (2)
|
|
+ #define MVE_OPT_LEVEL_H264_11 (3)
|
|
+ #define MVE_OPT_LEVEL_H264_12 (4)
|
|
+ #define MVE_OPT_LEVEL_H264_13 (5)
|
|
+ #define MVE_OPT_LEVEL_H264_2 (6)
|
|
+ #define MVE_OPT_LEVEL_H264_21 (7)
|
|
+ #define MVE_OPT_LEVEL_H264_22 (8)
|
|
+ #define MVE_OPT_LEVEL_H264_3 (9)
|
|
+ #define MVE_OPT_LEVEL_H264_31 (10)
|
|
+ #define MVE_OPT_LEVEL_H264_32 (11)
|
|
+ #define MVE_OPT_LEVEL_H264_4 (12)
|
|
+ #define MVE_OPT_LEVEL_H264_41 (13)
|
|
+ #define MVE_OPT_LEVEL_H264_42 (14)
|
|
+ #define MVE_OPT_LEVEL_H264_5 (15)
|
|
+ #define MVE_OPT_LEVEL_H264_51 (16)
|
|
+ #define MVE_OPT_LEVEL_H264_52 (17)
|
|
+ #define MVE_OPT_LEVEL_H264_6 (18)
|
|
+ #define MVE_OPT_LEVEL_H264_61 (19)
|
|
+ #define MVE_OPT_LEVEL_H264_62 (20)
|
|
+ #define MVE_OPT_LEVEL_H264_USER_SUPPLIED_BASE (32)
|
|
+ /* The value (MVE_OPT_LEVEL_H264_USER_SUPPLIED_BASE + level_idc) encodes a user
|
|
+ * supplied level_idc value in the range 0 to 255 inclusive. If the host supplies a level_idc
|
|
+ * value by this method then the encoder will encode this level_idc value in the bitstream
|
|
+ * without checking the validity of the level_idc value
|
|
+ */
|
|
+ #define MVE_OPT_LEVEL_H264_USER_SUPPLIED_MAX (MVE_OPT_LEVEL_H264_USER_SUPPLIED_BASE + 255)
|
|
+ /* HEVC/H.265 levels */
|
|
+ #define MVE_OPT_LEVEL_H265_MAIN_TIER_1 (1)
|
|
+ #define MVE_OPT_LEVEL_H265_HIGH_TIER_1 (2)
|
|
+ #define MVE_OPT_LEVEL_H265_MAIN_TIER_2 (3)
|
|
+ #define MVE_OPT_LEVEL_H265_HIGH_TIER_2 (4)
|
|
+ #define MVE_OPT_LEVEL_H265_MAIN_TIER_21 (5)
|
|
+ #define MVE_OPT_LEVEL_H265_HIGH_TIER_21 (6)
|
|
+ #define MVE_OPT_LEVEL_H265_MAIN_TIER_3 (7)
|
|
+ #define MVE_OPT_LEVEL_H265_HIGH_TIER_3 (8)
|
|
+ #define MVE_OPT_LEVEL_H265_MAIN_TIER_31 (9)
|
|
+ #define MVE_OPT_LEVEL_H265_HIGH_TIER_31 (10)
|
|
+ #define MVE_OPT_LEVEL_H265_MAIN_TIER_4 (11)
|
|
+ #define MVE_OPT_LEVEL_H265_HIGH_TIER_4 (12)
|
|
+ #define MVE_OPT_LEVEL_H265_MAIN_TIER_41 (13)
|
|
+ #define MVE_OPT_LEVEL_H265_HIGH_TIER_41 (14)
|
|
+ #define MVE_OPT_LEVEL_H265_MAIN_TIER_5 (15)
|
|
+ #define MVE_OPT_LEVEL_H265_HIGH_TIER_5 (16)
|
|
+ #define MVE_OPT_LEVEL_H265_MAIN_TIER_51 (17)
|
|
+ #define MVE_OPT_LEVEL_H265_HIGH_TIER_51 (18)
|
|
+ #define MVE_OPT_LEVEL_H265_MAIN_TIER_52 (19)
|
|
+ #define MVE_OPT_LEVEL_H265_HIGH_TIER_52 (20)
|
|
+ #define MVE_OPT_LEVEL_H265_MAIN_TIER_6 (21)
|
|
+ #define MVE_OPT_LEVEL_H265_HIGH_TIER_6 (22)
|
|
+ #define MVE_OPT_LEVEL_H265_MAIN_TIER_61 (23)
|
|
+ #define MVE_OPT_LEVEL_H265_HIGH_TIER_61 (24)
|
|
+ #define MVE_OPT_LEVEL_H265_MAIN_TIER_62 (25)
|
|
+ #define MVE_OPT_LEVEL_H265_HIGH_TIER_62 (26)
|
|
+ } profile_level;
|
|
+ struct
|
|
+ {
|
|
+ int32_t mv_search_range_x;
|
|
+ int32_t mv_search_range_y;
|
|
+ } motion_vector_search_range;
|
|
+ struct
|
|
+ {
|
|
+ uint32_t type;
|
|
+ #define MVE_OPT_HUFFMAN_TABLE_DC_LUMA (1)
|
|
+ #define MVE_OPT_HUFFMAN_TABLE_AC_LUMA (2)
|
|
+ #define MVE_OPT_HUFFMAN_TABLE_DC_CHROMA (3)
|
|
+ #define MVE_OPT_HUFFMAN_TABLE_AC_CHROMA (4)
|
|
+ uint8_t number_of_huffman_of_code_length[ 16 ];
|
|
+ uint8_t table[ 162 ]; /* 12 are used for DC, 162 for AC */
|
|
+ } huffman_table;
|
|
+ struct
|
|
+ {
|
|
+ uint32_t type;
|
|
+ #define MVE_OPT_QUANT_TABLE_LUMA (1)
|
|
+ #define MVE_OPT_QUANT_TABLE_CHROMA (2)
|
|
+ uint8_t matrix[ 64 ];
|
|
+ } quant_table;
|
|
+ struct
|
|
+ {
|
|
+ /* For HEVC, tile_cols must be zero. For VP9, tile_rows
|
|
+ * and tile_cols must be powers of 2. */
|
|
+ uint16_t tile_rows;
|
|
+ uint16_t tile_cols;
|
|
+ } tiles;
|
|
+ struct
|
|
+ {
|
|
+ uint16_t luma_bitdepth;
|
|
+ uint16_t chroma_bitdepth;
|
|
+ } bitdepth;
|
|
+ struct
|
|
+ {
|
|
+ /* Scale factors, and their square roots, for the lambda
|
|
+ * coefficients used by the encoder, in unsigned Q8 fixed-point
|
|
+ * format. Default (no scaling) is 1.0 (so 0x0100 in hex).
|
|
+ */
|
|
+ uint16_t lambda_scale_i_q8;
|
|
+ uint16_t lambda_scale_sqrt_i_q8;
|
|
+ uint16_t lambda_scale_p_q8;
|
|
+ uint16_t lambda_scale_sqrt_p_q8;
|
|
+ uint16_t lambda_scale_b_ref_q8;
|
|
+ uint16_t lambda_scale_sqrt_b_ref_q8;
|
|
+ uint16_t lambda_scale_b_nonref_q8;
|
|
+ uint16_t lambda_scale_sqrt_b_nonref_q8;
|
|
+ } lambda_scale;
|
|
+ /* ARBITRARY_DOWNSCALE */
|
|
+ struct
|
|
+ {
|
|
+ uint16_t width;
|
|
+ uint16_t height;
|
|
+ } downscaled_frame;
|
|
+ struct
|
|
+ {
|
|
+ uint32_t mode;
|
|
+ } dsl_pos;
|
|
+ } data;
|
|
+};
|
|
+
|
|
+struct mve_request_release_ref_frame
|
|
+{
|
|
+ /* Decode: For a frame buffer that MVE has returned
|
|
+ * marked as _REF_FRAME, the host can send this message
|
|
+ * to ask the MVE to release the buffer as soon as it is
|
|
+ * no longer used as reference anymore. (Otherwise, in
|
|
+ * normal operation, the host would re-enqueue the buffer
|
|
+ * to the MVE when it has been displayed and can be over-
|
|
+ * written with a new frame.)
|
|
+ *
|
|
+ * Note: When a frame is no longer used as reference depends
|
|
+ * on the stream being decoded, and there is no way to
|
|
+ * guarantee a short response time, the response may not
|
|
+ * come until the end of the stream.
|
|
+ *
|
|
+ * Encode: Return this reference buffer to the firmware
|
|
+ * so it can be reused. This is only useful when the
|
|
+ * MVE_SET_OPT_INDEX_ENC_EXPOSE_REF_FRAMES is used and reference
|
|
+ * frames are reported by events (and must be returned).
|
|
+ */
|
|
+ uint32_t buffer_address;
|
|
+};
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+/*********************************************************************
|
|
+ *
|
|
+ * RESPONSEs are messages from the host to the firmware
|
|
+ *
|
|
+ * Some of the MVE_RESPONSE_CODE_ codes are followed by one of the
|
|
+ * structs below.
|
|
+ *
|
|
+ *********************************************************************/
|
|
+
|
|
+/* Sent when firmware has booted.
|
|
+ */
|
|
+struct mve_response_switched_in
|
|
+{
|
|
+ uint32_t core;
|
|
+};
|
|
+
|
|
+/* Sent when last core in a session has switched out.
|
|
+ */
|
|
+struct mve_response_switched_out
|
|
+{
|
|
+ uint32_t core;
|
|
+ uint32_t reason;
|
|
+ uint32_t sub_reason;
|
|
+};
|
|
+
|
|
+/* Response confirming state transition after either GO or STOP
|
|
+ * command from host.
|
|
+ */
|
|
+struct mve_response_state_change
|
|
+{
|
|
+ uint32_t new_state;
|
|
+ #define MVE_STATE_STOPPED (0)
|
|
+ #define MVE_STATE_RUNNING (2)
|
|
+};
|
|
+
|
|
+/* Message sent when the all cores in the session have dequeued a
|
|
+ * job from the firmware job queue.
|
|
+ */
|
|
+struct mve_response_job_dequeued
|
|
+{
|
|
+ uint32_t valid_job;
|
|
+};
|
|
+
|
|
+/* Fatal error message from firmware, if sent then no further
|
|
+ * operation is possible.
|
|
+ */
|
|
+struct mve_response_error
|
|
+{
|
|
+ uint32_t error_code;
|
|
+ #define MVE_ERROR_ABORT (1)
|
|
+ #define MVE_ERROR_OUT_OF_MEMORY (2)
|
|
+ #define MVE_ERROR_ASSERT (3)
|
|
+ #define MVE_ERROR_UNSUPPORTED (4)
|
|
+ #define MVE_ERROR_INVALID_BUFFER (6)
|
|
+ #define MVE_ERROR_INVALID_STATE (8)
|
|
+ #define MVE_ERROR_WATCHDOG (9)
|
|
+
|
|
+ #define MVE_MAX_ERROR_MESSAGE_SIZE (128)
|
|
+ char message[ MVE_MAX_ERROR_MESSAGE_SIZE ];
|
|
+};
|
|
+
|
|
+/* When a set-option succeeds, a confirmation message is
|
|
+ * sent, including the index-code for that particular option.
|
|
+ */
|
|
+struct mve_response_set_option_confirm
|
|
+{
|
|
+ uint32_t index; /* Same as 'index' in struct mve_request_set_option */
|
|
+};
|
|
+
|
|
+/* If a set-option request fails, this message is returned.
|
|
+ * This is not a fatal error. The set-option had no effect,
|
|
+ * and the session is still alive.
|
|
+ * For example, trying to set an option with a too large
|
|
+ * or small parameter would result in this message.
|
|
+ * The included text string is meant for development and
|
|
+ * debugging purposes only.
|
|
+ * (When a set-option succeeds the set-option-confirm
|
|
+ * message code is sent instead.)
|
|
+ */
|
|
+struct mve_response_set_option_fail
|
|
+{
|
|
+ uint32_t index; /* Same as 'index' in struct mve_request_set_option */
|
|
+ char message[ MVE_MAX_ERROR_MESSAGE_SIZE ];
|
|
+};
|
|
+
|
|
+/* Decode only: This message is sent from MVE to the host so that it can
|
|
+ * allocate large enough output buffers. Output buffers that are to small
|
|
+ * will be returned to the host marked as 'rejected'.
|
|
+ */
|
|
+struct mve_response_frame_alloc_parameters
|
|
+{
|
|
+ /* Please note that the below information is a hint
|
|
+ * for what buffers to allocate, it does not say
|
|
+ * what actual resolution an output picture has.
|
|
+ */
|
|
+
|
|
+ /* To use if allocating PLANAR YUV output buffers: */
|
|
+ uint16_t planar_alloc_frame_width;
|
|
+ uint16_t planar_alloc_frame_height;
|
|
+
|
|
+ /* To use if allocating AFBC output buffers
|
|
+ * (if interlace, each field needs this size):
|
|
+ */
|
|
+ uint32_t afbc_alloc_bytes;
|
|
+
|
|
+ /* For situations where downscaled AFBC is supported,
|
|
+ * this number of bytes is needed for the downscaled frame.
|
|
+ */
|
|
+ uint32_t afbc_alloc_bytes_downscaled;
|
|
+
|
|
+ /* When the host allocates an AFBC frame buffer, it should normally set
|
|
+ * the the afbc_width_in_superblocks to be at least this recommended value.
|
|
+ * Buffers with smaller values are likely to be returned rejected by the MVE.
|
|
+ * See also comments above for afbc_alloc_bytes and
|
|
+ * afbc_alloc_bytes_downscaled, they describe the situations where the
|
|
+ * different values are used.
|
|
+ */
|
|
+ uint16_t afbc_width_in_superblocks;
|
|
+ uint16_t afbc_width_in_superblocks_downscaled;
|
|
+
|
|
+ /* For PLANAR YUV output, every plane's address need to be adjusted to get
|
|
+ * optimal AXI bursts when the pixel data is written, the values below may
|
|
+ * be used to calculate address offsets.
|
|
+ */
|
|
+ uint16_t cropx;
|
|
+ uint16_t cropy;
|
|
+
|
|
+ uint32_t mbinfo_alloc_bytes; /* Only for debugging */
|
|
+
|
|
+
|
|
+ /* downscaled frame width/height for decode */
|
|
+ /* ARBITRARY_DOWNSCALE */
|
|
+ uint16_t dsl_frame_width;
|
|
+ uint16_t dsl_frame_height;
|
|
+ uint16_t dsl_pos_mode;
|
|
+ uint8_t ctu_size; /* EXPORT_SEQ_INFO */
|
|
+};
|
|
+
|
|
+/* Decode only: This message is sent from MVE to the host so that it can
|
|
+ * allocate suitable output buffers. The needed size of the buffer is sent
|
|
+ * in a separate message (above).
|
|
+ * When MVE sends the message below, it enters a waiting-state and will not
|
|
+ * make any progress until the host sends an output-flush command, upon
|
|
+ * which MVE will return all output buffers, followed by a message saying
|
|
+ * that the output has been flushed. Only then should the host start
|
|
+ * enqueueing new output buffers.
|
|
+ */
|
|
+struct mve_response_sequence_parameters
|
|
+{
|
|
+ /* Other stream parameters affecting buffer allocation,
|
|
+ * any change in these values will trigger a flush.
|
|
+ */
|
|
+ uint8_t interlace; /* 0 or 1 */
|
|
+ uint8_t chroma_format;
|
|
+ #define MVE_CHROMA_FORMAT_MONO (0x0)
|
|
+ #define MVE_CHROMA_FORMAT_420 (0x1)
|
|
+ #define MVE_CHROMA_FORMAT_422 (0x2)
|
|
+ #define MVE_CHROMA_FORMAT_440 (0x3)
|
|
+ #define MVE_CHROMA_FORMAT_ARGB (0x4)
|
|
+ uint8_t bitdepth_luma; /* 8, 9 or 10 */
|
|
+ uint8_t bitdepth_chroma; /* 8, 9 or 10 */
|
|
+ uint8_t num_buffers_planar; /* number of planar buffers needed */
|
|
+ uint8_t num_buffers_afbc; /* number of AFBC buffers needed, for
|
|
+ * AFBC output more buffers are needed
|
|
+ * (for planar output, the firmware
|
|
+ * will allocate extra memory via RPC)
|
|
+ */
|
|
+ uint8_t range_mapping_enabled; /* VC-1 AP specific feature, if enabled
|
|
+ * then AFBC buffers may need special
|
|
+ * filtering before they can be
|
|
+ * displayed correctly. If the host is
|
|
+ * not able to do that, then planar output
|
|
+ * should be used, for which MVE
|
|
+ * automatically performs the filtering.
|
|
+ */
|
|
+ uint8_t reserved0;
|
|
+};
|
|
+
|
|
+struct mve_response_ref_frame_unused
|
|
+{
|
|
+ /* Decode only: If requested by the host with the message
|
|
+ * MVE_REQUEST_CODE_RELEASE_REF_FRAME, the MVE will respond
|
|
+ * with this message when (if ever) the buffer is no longer
|
|
+ * used.
|
|
+ */
|
|
+ uint32_t unused_buffer_address;
|
|
+};
|
|
+
|
|
+
|
|
+/* This message is only for debugging and performance profiling.
|
|
+ * Is sent by the firmware if the corresponding options is enabled.
|
|
+ */
|
|
+struct mve_event_processed
|
|
+{
|
|
+ uint8_t pic_format;
|
|
+ uint8_t qp;
|
|
+ uint8_t pad0;
|
|
+ uint8_t pad1;
|
|
+ uint32_t parse_start_time; /* Timestamp, absolute time */
|
|
+ uint32_t parse_end_time; /* Timestamp, absolute time */
|
|
+ uint32_t parse_idle_time; /* Definition of idle here is waiting for in/out buffers or available RAM */
|
|
+
|
|
+ uint32_t pipe_start_time; /* Timestamp */
|
|
+ uint32_t pipe_end_time; /* Timestamp, end-start = process time. Idle time while in a frame is
|
|
+ * not measured. */
|
|
+ uint32_t pipe_idle_time; /* Always 0 in decode, */
|
|
+
|
|
+ uint32_t parser_coreid; /* Core used to parse this frame */
|
|
+ uint32_t pipe_coreid; /* Core used to pipe this frame */
|
|
+
|
|
+ uint32_t bitstream_bits; /* Number of bitstream bits used for this frame. */
|
|
+
|
|
+ uint32_t intermediate_buffer_size; /* Size of intermediate (mbinfo/residuals) buffer after this frame was
|
|
+ * parsed. */
|
|
+ uint32_t total_memory_allocated; /* after the frame was parsed. Including reference frames. */
|
|
+
|
|
+ uint32_t bus_read_bytes; /* bus read bytes */
|
|
+ uint32_t bus_write_bytes; /* bus written bytes */
|
|
+
|
|
+ uint32_t afbc_bytes; /* afbc data transferred */
|
|
+
|
|
+ uint32_t slice0_end_time; /* Timestamp, absolute time */
|
|
+ uint32_t stream_start_time; /* Timestamp, absolute stream start time */
|
|
+ uint32_t stream_open_time; /* Timestamp, absolute stream open time */
|
|
+};
|
|
+
|
|
+/* This message is sent by the firmware if the option
|
|
+ * MVE_SET_OPT_INDEX_ENC_EXPOSE_REF_FRAMES is enabled
|
|
+ */
|
|
+struct mve_event_ref_frame
|
|
+{
|
|
+ uint32_t ref_addr; /* MVE virtual address of AFBC reference frame */
|
|
+ uint32_t ref_width; /* Width of display area in luma pixels */
|
|
+ uint32_t ref_height; /* Height of display area in luma pixels */
|
|
+ uint32_t ref_mb_width; /* Width in macroblocks */
|
|
+ uint32_t ref_mb_height; /* Height in macroblocks */
|
|
+ uint32_t ref_left_crop; /* Left crop in luma pixels */
|
|
+ uint32_t ref_top_crop; /* Top crop in luma pixels */
|
|
+ uint32_t ref_frame_size; /* Total AFBC frame size in bytes */
|
|
+ uint32_t ref_display_order; /* Display picture order count */
|
|
+ uint16_t bit_width; /* Bit width of the YUV either 8 or 10 */
|
|
+ uint16_t tiled_headers; /* AFBC format is tiled */
|
|
+ uint64_t user_data_tag; /* User data tag of corresponding input buffer */
|
|
+};
|
|
+
|
|
+/* This message is only for debugging, is sent by the firmware if event tracing
|
|
+ * is enabled.
|
|
+ */
|
|
+struct mve_event_trace_buffers
|
|
+{
|
|
+ uint16_t reserved;
|
|
+ uint8_t num_cores;
|
|
+ uint8_t rasc_mask;
|
|
+ #define MVE_MAX_TRACE_BUFFERS 40
|
|
+ /* this array will contain one buffer per rasc in rasc_mask per num_core */
|
|
+ struct
|
|
+ {
|
|
+ uint32_t rasc_addr; /* rasc address of the buffer */
|
|
+ uint32_t size; /* size of the buffer in bytes */
|
|
+ } buffers[MVE_MAX_TRACE_BUFFERS];
|
|
+};
|
|
+
|
|
+/* 'Events' are informative messages, the host is not required to react in
|
|
+ * any particular way.
|
|
+ */
|
|
+struct mve_response_event
|
|
+{
|
|
+ uint32_t event_code;
|
|
+ #define MVE_EVENT_ERROR_STREAM_CORRUPT (1) /* message, text string */
|
|
+ #define MVE_EVENT_ERROR_STREAM_NOT_SUPPORTED (2) /* message, text string */
|
|
+ #define MVE_EVENT_PROCESSED (3) /* struct mve_event_processed */
|
|
+ #define MVE_EVENT_REF_FRAME (4) /* struct mve_event_ref_frame */
|
|
+ #define MVE_EVENT_TRACE_BUFFERS (5) /* struct mve_event_trace_buffers */
|
|
+ union
|
|
+ {
|
|
+ struct mve_event_processed event_processed;
|
|
+ struct mve_event_ref_frame event_ref_frame;
|
|
+ struct mve_event_trace_buffers event_trace_buffers;
|
|
+ char message[ MVE_MAX_ERROR_MESSAGE_SIZE ];
|
|
+ } event_data;
|
|
+}__attribute__((packed));
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+/*********************************************************************
|
|
+ *
|
|
+ * BUFFERs are sent both ways, from host to firmware and back again
|
|
+ *
|
|
+ * Each MVE_BUFFER_CODE_ code is followed by one of the structs
|
|
+ * below.
|
|
+ *
|
|
+ *********************************************************************/
|
|
+
|
|
+/* Flags in mve_buffer_frame::frame_flags:
|
|
+ * Set by whom? Meaning:
|
|
+ * DECODE: ENCODE:
|
|
+ * MVE_BUFFER_FRAME_FLAG_INTERLACE host - Buffer is interlaced (both top and
|
|
+ * bottom fields are allocated)
|
|
+ * MVE_BUFFER_FRAME_FLAG_BOT_FIRST fw - Bottom field should be displayed
|
|
+ * first (only if interlaced)
|
|
+ * MVE_BUFFER_FRAME_FLAG_TOP_PRESENT fw host Top field present (or full frame if
|
|
+ * not interlaced)
|
|
+ * MVE_BUFFER_FRAME_FLAG_BOT_PRESENT fw - Bottom present (only if interlaced)
|
|
+ *
|
|
+ * MVE_BUFFER_FRAME_FLAG_ROTATION_* host host Decode: MVE will rotate the output frame
|
|
+ * according to this setting.
|
|
+ * Encode: MVE will rotate the input frame
|
|
+ * according to this setting before
|
|
+ * encoding them.
|
|
+ * MVE_BUFFER_FRAME_FLAG_SCALING_MASK host - Output pictures should be downscaled
|
|
+ *
|
|
+ * MVE_BUFFER_FRAME_FLAG_MIRROR_* - host Input frame should be mirrored before encoding
|
|
+ *
|
|
+ * MVE_BUFFER_FRAME_FLAG_REJECTED fw - Buffer was too small, host should re-allocate
|
|
+ *
|
|
+ * MVE_BUFFER_FRAME_FLAG_CORRUPT fw - Frame contains visual corruption
|
|
+ *
|
|
+ * MVE_BUFFER_FRAME_FLAG_DECODE_ONLY fw - Frame should not be displayed
|
|
+ *
|
|
+ * MVE_BUFFER_FRAME_FLAG_REF_FRAME fw - Frame is used by MVE as reference, host must
|
|
+ * not change, just re-enqueue when displayed
|
|
+ * MVE_BUFFER_FRAME_FLAG_EOS fw host This is the last frame in the stream.
|
|
+ */
|
|
+
|
|
+/* mve_buffer_frame_planar stores uncompressed YUV pictures.
|
|
+ * ________________________________________
|
|
+ * | ^ | | ^
|
|
+ * |<-:--visible_frame_width---->| | :
|
|
+ * | : | | :
|
|
+ * | : | | :
|
|
+ * | visible_frame_height | | max_frame_height
|
|
+ * | : | | :
|
|
+ * | : | | :
|
|
+ * |__v__________________________| | :
|
|
+ * | | :
|
|
+ * |<-------------max_frame_width---------->| :
|
|
+ * |________________________________________| v
|
|
+ *
|
|
+ */
|
|
+struct mve_buffer_frame_planar
|
|
+{
|
|
+ /* Y,Cb,Cr top field */
|
|
+ uint32_t plane_top[ 3 ];
|
|
+
|
|
+ /* Y,Cb,Cr bottom field (interlace only) */
|
|
+ uint32_t plane_bot[ 3 ];
|
|
+
|
|
+ /* Stride between rows, in bytes */
|
|
+ int32_t stride[ 3 ];
|
|
+
|
|
+ /* Size of largest frame allowed to put in this buffer */
|
|
+ uint16_t max_frame_width;
|
|
+ uint16_t max_frame_height;
|
|
+};
|
|
+
|
|
+/* mve_buffer_frame_afbc stores AFBC compressed content that is also used
|
|
+ * as the reference frame. Out of loop processing (crop, rotation,
|
|
+ * range reduction) must be supported by the user of this buffer and
|
|
+ * the parameters are signaled within the buffer descriptor below.
|
|
+ * ________________________________________
|
|
+ * | ^ |
|
|
+ * | cropy |
|
|
+ * | v_____________________________ |
|
|
+ * |<-cropx->| ^ ||
|
|
+ * | |<-:--visible_frame_width---->||
|
|
+ * | | : ||
|
|
+ * | | : ||
|
|
+ * | | visible_frame_height ||
|
|
+ * | | : ||
|
|
+ * | | : ||
|
|
+ * | |__v__________________________||
|
|
+ * |________________________________________|
|
|
+ *
|
|
+ * <----- superblock_width --------------->
|
|
+ * * afbc_width_in_superblocks
|
|
+ *
|
|
+ * Note that the sizes and cropping values need not be multiples of 16.
|
|
+ *
|
|
+ * For interlaced streams, the values refer to a full frame,
|
|
+ * while the output is actually separated into fields. Thus for fields,
|
|
+ * cropy and visible_frame_height should be divided by two.
|
|
+ *
|
|
+ * For dual-downscaled AFBC output (not supported for interlace),
|
|
+ * then the cropx, cropy, visible_frame_width and visible_frame_height
|
|
+ * should be divided by two for the downscaled plane.
|
|
+ */
|
|
+struct mve_buffer_frame_afbc
|
|
+{
|
|
+ uint32_t plane[ 2 ]; /* Addresses for up to two AFBC planes:
|
|
+ * Top and bottom fields for interlace,
|
|
+ * or standard and optional downscaled output. */
|
|
+ uint32_t alloc_bytes[ 2 ]; /* Size of allocation for each plane */
|
|
+ uint16_t cropx; /* Luma x crop */
|
|
+ uint16_t cropy; /* Luma y crop */
|
|
+ uint16_t afbc_width_in_superblocks[ 2 ]; /* Width of AFBC frame buffer, in units
|
|
+ * of superblock width (32 or 16).
|
|
+ * If dual-downscaled output is chosen,
|
|
+ * this width can be different for the
|
|
+ * two planes.
|
|
+ * For first plane:
|
|
+ * (cropx + frame_width)
|
|
+ * <= superblock_width * afbc_width...
|
|
+ */
|
|
+ uint32_t afbc_params; /* AFBC parameters */
|
|
+ #define MVE_BUFFER_FRAME_AFBC_TILED_BODY (0x00000001) /* Output body blocks should be tiled */
|
|
+ #define MVE_BUFFER_FRAME_AFBC_TILED_HEADER (0x00000002) /* Output headers should be tiled */
|
|
+ #define MVE_BUFFER_FRAME_AFBC_32X8_SUPERBLOCK (0x00000004) /* Super block is 32x8, default is 16x16,
|
|
+ * (only supported as input for encode) */
|
|
+ #define MVE_BUFFER_FRAME_AFBC_DN_FORCE_8BIT (0x00000008) /* For downscaled AFBC plane: It shall
|
|
+ * be 8-bit, even if full-scale is 10-bit */
|
|
+ #define MVE_BUFFER_FRAME_AFBC_DN_FORCE_420 (0x00000010) /* For downscaled AFBC plane: It shall
|
|
+ * be 4:2:0, even if full-scale is 4:2:2 */
|
|
+ #define MVE_BUFFER_FRAME_AFBC_STRIDE_SET_BY_MVE (0x00000020) /* Decode only: By default, the host should
|
|
+ set the afbc_width_in_superblocks. If the
|
|
+ value is zero, or if this bit is set, then
|
|
+ the MVE sets an appropriate value. */
|
|
+ #define MVE_BUFFER_FRAME_AFBC_BLOCK_SPLIT (0x00000040) /* For Superblock layout, block_split mode
|
|
+ should be enabled*/
|
|
+};
|
|
+
|
|
+/*
|
|
+ * The FRAME buffer stores the common information for PLANAR and AFBC buffers,
|
|
+ * and a union of PLANAR and AFBC specific information.
|
|
+ */
|
|
+struct mve_buffer_frame
|
|
+{
|
|
+ /* For identification of the buffer, this is not changed by
|
|
+ * the firmware. */
|
|
+ uint64_t host_handle;
|
|
+
|
|
+ /* For matching input buffer with output buffers, the firmware
|
|
+ * copies these values between frame buffers and bitstream buffers. */
|
|
+ uint64_t user_data_tag;
|
|
+
|
|
+ /* Frame buffer flags, see commentary above */
|
|
+ uint32_t frame_flags;
|
|
+ #define MVE_BUFFER_FRAME_FLAG_INTERLACE (0x00000001)
|
|
+ #define MVE_BUFFER_FRAME_FLAG_BOT_FIRST (0x00000002)
|
|
+ #define MVE_BUFFER_FRAME_FLAG_TOP_PRESENT (0x00000004)
|
|
+ #define MVE_BUFFER_FRAME_FLAG_BOT_PRESENT (0x00000008)
|
|
+ #define MVE_BUFFER_FRAME_FLAG_ROTATION_90 (0x00000010)
|
|
+ #define MVE_BUFFER_FRAME_FLAG_ROTATION_180 (0x00000020)
|
|
+ #define MVE_BUFFER_FRAME_FLAG_ROTATION_270 (0x00000030)
|
|
+ #define MVE_BUFFER_FRAME_FLAG_SCALING_MASK (0x000000C0)
|
|
+ #define MVE_BUFFER_FRAME_FLAG_MIRROR_HORI (0x00000100)
|
|
+ #define MVE_BUFFER_FRAME_FLAG_MIRROR_VERT (0x00000200)
|
|
+ #define MVE_BUFFER_FRAME_FLAG_REJECTED (0x00001000)
|
|
+ #define MVE_BUFFER_FRAME_FLAG_CORRUPT (0x00002000)
|
|
+ #define MVE_BUFFER_FRAME_FLAG_DECODE_ONLY (0x00004000)
|
|
+ #define MVE_BUFFER_FRAME_FLAG_REF_FRAME (0x00008000)
|
|
+ #define MVE_BUFFER_FRAME_FLAG_EOS (0x00010000)
|
|
+ /*ARBITRARY_DOWNSCALE*/
|
|
+ #define MVE_BUFFER_FRAME_FLAG_SCALING_MASKX (0xFF000000) //8bit
|
|
+ #define MVE_BUFFER_FRAME_FLAG_SCALING_MASKY (0x00FE0000) //7bit
|
|
+
|
|
+ /* Height (in luma samples) of visible part of frame,
|
|
+ * may be smaller than allocated frame size. */
|
|
+ uint16_t visible_frame_height;
|
|
+
|
|
+ /* Width (in luma samples) of visible part of frame,
|
|
+ * may be smaller than allocated frame size. */
|
|
+ uint16_t visible_frame_width;
|
|
+
|
|
+ /* Color format of buffer */
|
|
+ uint16_t format;
|
|
+ /* format bitfield: */
|
|
+ #define MVE_FORMAT_BF_C (0) /* 3 bits, chroma subsampling */
|
|
+ #define MVE_FORMAT_BF_B (4) /* 4 bits, max bitdepth minus 8 */
|
|
+ #define MVE_FORMAT_BF_N (8) /* 2 bits, number of planes */
|
|
+ #define MVE_FORMAT_BF_V (12) /* 2 bits, format variant */
|
|
+ #define MVE_FORMAT_BF_A (15) /* 1 bit, AFBC bit */
|
|
+ /* formats: */
|
|
+ #define MVE_FORMAT_YUV420_AFBC_8 ( (MVE_CHROMA_FORMAT_420 << MVE_FORMAT_BF_C) | \
|
|
+ ( ( 8 - 8) << MVE_FORMAT_BF_B) | \
|
|
+ ( 1 << MVE_FORMAT_BF_A) )
|
|
+
|
|
+ #define MVE_FORMAT_YUV420_AFBC_10 ( (MVE_CHROMA_FORMAT_420 << MVE_FORMAT_BF_C) | \
|
|
+ ( (10 - 8) << MVE_FORMAT_BF_B) | \
|
|
+ ( 1 << MVE_FORMAT_BF_A) )
|
|
+
|
|
+ #define MVE_FORMAT_YUV422_AFBC_8 ( (MVE_CHROMA_FORMAT_422 << MVE_FORMAT_BF_C) | \
|
|
+ ( ( 8 - 8) << MVE_FORMAT_BF_B) | \
|
|
+ ( 1 << MVE_FORMAT_BF_A) )
|
|
+
|
|
+ #define MVE_FORMAT_YUV422_AFBC_10 ( (MVE_CHROMA_FORMAT_422 << MVE_FORMAT_BF_C) | \
|
|
+ ( (10 - 8) << MVE_FORMAT_BF_B) | \
|
|
+ ( 1 << MVE_FORMAT_BF_A) )
|
|
+
|
|
+ #define MVE_FORMAT_YUV420_I420 ( (MVE_CHROMA_FORMAT_420 << MVE_FORMAT_BF_C) | \
|
|
+ ( ( 8 - 8) << MVE_FORMAT_BF_B) | \
|
|
+ ( 3 << MVE_FORMAT_BF_N) | \
|
|
+ ( 0 << MVE_FORMAT_BF_V) )
|
|
+
|
|
+ #define MVE_FORMAT_YUV420_NV12 ( (MVE_CHROMA_FORMAT_420 << MVE_FORMAT_BF_C) | \
|
|
+ ( ( 8 - 8) << MVE_FORMAT_BF_B) | \
|
|
+ ( 2 << MVE_FORMAT_BF_N) | \
|
|
+ ( 0 << MVE_FORMAT_BF_V) )
|
|
+
|
|
+ #define MVE_FORMAT_YUV420_NV21 ( (MVE_CHROMA_FORMAT_420 << MVE_FORMAT_BF_C) | \
|
|
+ ( ( 8 - 8) << MVE_FORMAT_BF_B) | \
|
|
+ ( 2 << MVE_FORMAT_BF_N) | \
|
|
+ ( 1 << MVE_FORMAT_BF_V) )
|
|
+
|
|
+ #define MVE_FORMAT_YUV420_P010 ( (MVE_CHROMA_FORMAT_420 << MVE_FORMAT_BF_C) | \
|
|
+ ( (16 - 8) << MVE_FORMAT_BF_B) | \
|
|
+ ( 2 << MVE_FORMAT_BF_N) | \
|
|
+ ( 0 << MVE_FORMAT_BF_V) )
|
|
+
|
|
+ #define MVE_FORMAT_YUV420_Y0L2 ( (MVE_CHROMA_FORMAT_420 << MVE_FORMAT_BF_C) | \
|
|
+ ( (10 - 8) << MVE_FORMAT_BF_B) | \
|
|
+ ( 1 << MVE_FORMAT_BF_N) | \
|
|
+ ( 0 << MVE_FORMAT_BF_V) )
|
|
+
|
|
+ #define MVE_FORMAT_YUV420_AQB1 ( (MVE_CHROMA_FORMAT_420 << MVE_FORMAT_BF_C) | \
|
|
+ ( (10 - 8) << MVE_FORMAT_BF_B) | \
|
|
+ ( 1 << MVE_FORMAT_BF_N) | \
|
|
+ ( 1 << MVE_FORMAT_BF_V) )
|
|
+
|
|
+ #define MVE_FORMAT_YUV422_YUY2 ( (MVE_CHROMA_FORMAT_422 << MVE_FORMAT_BF_C) | \
|
|
+ ( ( 8 - 8) << MVE_FORMAT_BF_B) | \
|
|
+ ( 1 << MVE_FORMAT_BF_N) | \
|
|
+ ( 0 << MVE_FORMAT_BF_V) )
|
|
+
|
|
+ #define MVE_FORMAT_YUV422_UYVY ( (MVE_CHROMA_FORMAT_422 << MVE_FORMAT_BF_C) | \
|
|
+ ( ( 8 - 8) << MVE_FORMAT_BF_B) | \
|
|
+ ( 1 << MVE_FORMAT_BF_N) | \
|
|
+ ( 1 << MVE_FORMAT_BF_V) )
|
|
+
|
|
+ #define MVE_FORMAT_YUV422_Y210 ( (MVE_CHROMA_FORMAT_422 << MVE_FORMAT_BF_C) | \
|
|
+ ( (16 - 8) << MVE_FORMAT_BF_B) | \
|
|
+ ( 1 << MVE_FORMAT_BF_N) | \
|
|
+ ( 0 << MVE_FORMAT_BF_V) )
|
|
+
|
|
+ #define MVE_FORMAT_RGBA_8888 ( (MVE_CHROMA_FORMAT_ARGB << MVE_FORMAT_BF_C) | \
|
|
+ ( ( 8 - 8) << MVE_FORMAT_BF_B) | \
|
|
+ ( 1 << MVE_FORMAT_BF_N) | \
|
|
+ ( 0 << MVE_FORMAT_BF_V) )
|
|
+
|
|
+ #define MVE_FORMAT_BGRA_8888 ( (MVE_CHROMA_FORMAT_ARGB << MVE_FORMAT_BF_C) | \
|
|
+ ( ( 8 - 8) << MVE_FORMAT_BF_B) | \
|
|
+ ( 1 << MVE_FORMAT_BF_N) | \
|
|
+ ( 1 << MVE_FORMAT_BF_V) )
|
|
+
|
|
+ #define MVE_FORMAT_ARGB_8888 ( (MVE_CHROMA_FORMAT_ARGB << MVE_FORMAT_BF_C) | \
|
|
+ ( ( 8 - 8) << MVE_FORMAT_BF_B) | \
|
|
+ ( 1 << MVE_FORMAT_BF_N) | \
|
|
+ ( 2 << MVE_FORMAT_BF_V) )
|
|
+
|
|
+ #define MVE_FORMAT_ABGR_8888 ( (MVE_CHROMA_FORMAT_ARGB << MVE_FORMAT_BF_C) | \
|
|
+ ( ( 8 - 8) << MVE_FORMAT_BF_B) | \
|
|
+ ( 1 << MVE_FORMAT_BF_N) | \
|
|
+ ( 3 << MVE_FORMAT_BF_V) )
|
|
+
|
|
+ #define MVE_FORMAT_MBINFO (0x0001) /* only used for debugging */
|
|
+
|
|
+ #define MVE_FORMAT_UNUSED (0x0000)
|
|
+
|
|
+ uint16_t reserved0; /* force 'data' to be 4-byte aligned */
|
|
+
|
|
+ union
|
|
+ {
|
|
+ struct mve_buffer_frame_planar planar;
|
|
+ struct mve_buffer_frame_afbc afbc;
|
|
+ } data;
|
|
+
|
|
+ uint32_t reserved1; /* force size to be multiple of 8 bytes */
|
|
+};
|
|
+
|
|
+/* The bitstream buffer stores a number of bitstream bytes */
|
|
+struct mve_buffer_bitstream
|
|
+{
|
|
+ /* For identification of the buffer, this is not changed by
|
|
+ * the firmware. */
|
|
+ uint64_t host_handle;
|
|
+
|
|
+ /* For matching input buffer with output buffers, the firmware
|
|
+ * copies these values between frame buffers and bitstream buffers. */
|
|
+ uint64_t user_data_tag;
|
|
+
|
|
+ /* BufferFlags */
|
|
+ uint32_t bitstream_flags;
|
|
+ #define MVE_BUFFER_BITSTREAM_FLAG_EOS (0x00000001)
|
|
+ #define MVE_BUFFER_BITSTREAM_FLAG_ENDOFFRAME (0x00000010)
|
|
+ #define MVE_BUFFER_BITSTREAM_FLAG_SYNCFRAME (0x00000020)
|
|
+ #define MVE_BUFFER_BITSTREAM_FLAG_CODECCONFIG (0x00000080)
|
|
+ #define MVE_BUFFER_BITSTREAM_FLAG_ENDOFSUBFRAME (0x00000400)
|
|
+
|
|
+ /* Length of allocated buffer */
|
|
+ uint32_t bitstream_alloc_bytes;
|
|
+
|
|
+ /* Byte offset from start to first byte */
|
|
+ uint32_t bitstream_offset;
|
|
+
|
|
+ /* Number of bytes in the buffer */
|
|
+ uint32_t bitstream_filled_len;
|
|
+
|
|
+ /* Pointer to buffer start */
|
|
+ uint32_t bitstream_buf_addr;
|
|
+
|
|
+ /* frame_type. 0:I, 1:P, 2:B, 3:b */
|
|
+ uint8_t frame_type;
|
|
+
|
|
+ /* Pad to force 8-byte alignment */
|
|
+ //uint32_t reserved;
|
|
+ uint8_t reserved[3];
|
|
+};
|
|
+
|
|
+/*
|
|
+ * Define a region in 16x16 units
|
|
+ *
|
|
+ * The region is macroblock positions (x,y) in the range
|
|
+ * mbx_left <= x < mbx_right
|
|
+ * mby_top <= y < mby_bottom
|
|
+ */
|
|
+struct mve_buffer_param_region
|
|
+{
|
|
+ uint16_t mbx_left; /* macroblock x left edge (inclusive) */
|
|
+ uint16_t mbx_right; /* macroblock x right edge (exclusive) */
|
|
+ uint16_t mby_top; /* macroblock y top edge (inclusive) */
|
|
+ uint16_t mby_bottom; /* macroblock y bottom edge (exclusive) */
|
|
+ int16_t qp_delta; /* QP delta value for this region, this
|
|
+ * delta applies to QP values in the ranges:
|
|
+ * H264: 0-51
|
|
+ * HEVC: 0-51
|
|
+ * VP9: 0-255 */
|
|
+ uint16_t reserved;
|
|
+};
|
|
+
|
|
+/* input for encoder,
|
|
+ * the mve_buffer_param_regions buffer stores the information for FRAME buffers,
|
|
+ * and the information for regions of interest.
|
|
+ */
|
|
+struct mve_buffer_param_regions
|
|
+{
|
|
+ uint8_t n_regions; /* Number of regions */
|
|
+ uint8_t reserved[ 3 ];
|
|
+ #define MVE_MAX_FRAME_REGIONS 16
|
|
+ struct mve_buffer_param_region region[ MVE_MAX_FRAME_REGIONS ];
|
|
+};
|
|
+
|
|
+/* the block parameter record specifies the various properties of a quad */
|
|
+struct mve_block_param_record
|
|
+{
|
|
+ uint16_t qp_delta;
|
|
+ /* Bitset of four 4-bit QP delta values for a quad.
|
|
+ * For H.264 and HEVC these are qp delta values in the range -8 to +7.
|
|
+ * For Vp9 these are segment map values in the range 0 to 7.
|
|
+ */
|
|
+ #define MVE_BLOCK_PARAM_RECORD_QP_DELTA_TOP_LEFT_16X16 (0)
|
|
+ #define MVE_BLOCK_PARAM_RECORD_QP_DELTA_TOP_LEFT_16X16_SZ (4)
|
|
+ #define MVE_BLOCK_PARAM_RECORD_QP_DELTA_TOP_RIGHT_16X16 (4)
|
|
+ #define MVE_BLOCK_PARAM_RECORD_QP_DELTA_TOP_RIGHT_16X16_SZ (4)
|
|
+ #define MVE_BLOCK_PARAM_RECORD_QP_DELTA_BOT_LEFT_16X16 (8)
|
|
+ #define MVE_BLOCK_PARAM_RECORD_QP_DELTA_BOT_LEFT_16X16_SZ (4)
|
|
+ #define MVE_BLOCK_PARAM_RECORD_QP_DELTA_BOT_RIGHT_16X16 (12)
|
|
+ #define MVE_BLOCK_PARAM_RECORD_QP_DELTA_BOT_RIGHT_16X16_SZ (4)
|
|
+
|
|
+ #define MVE_BLOCK_PARAM_RECORD_VP9_SEGID_TOP_LEFT_16X16 (0)
|
|
+ #define MVE_BLOCK_PARAM_RECORD_VP9_SEGID_TOP_LEFT_16X16_SZ (3)
|
|
+ #define MVE_BLOCK_PARAM_RECORD_VP9_SEGID_TOP_RIGHT_16X16 (4)
|
|
+ #define MVE_BLOCK_PARAM_RECORD_VP9_SEGID_TOP_RIGHT_16X16_SZ (3)
|
|
+ #define MVE_BLOCK_PARAM_RECORD_VP9_SEGID_BOT_LEFT_16X16 (8)
|
|
+ #define MVE_BLOCK_PARAM_RECORD_VP9_SEGID_BOT_LEFT_16X16_SZ (3)
|
|
+ #define MVE_BLOCK_PARAM_RECORD_VP9_SEGID_BOT_RIGHT_16X16 (12)
|
|
+ #define MVE_BLOCK_PARAM_RECORD_VP9_SEGID_BOT_RIGHT_16X16_SZ (3)
|
|
+
|
|
+ uint8_t force;
|
|
+ #define MVE_BLOCK_PARAM_RECORD_FORCE_NONE (0x00)
|
|
+ #define MVE_BLOCK_PARAM_RECORD_FORCE_QP (0x01)
|
|
+ #define MVE_BLOCK_PARAM_RECORD_FORCE_32X32 (0x02)
|
|
+ #define MVE_BLOCK_PARAM_RECORD_FORCE_RB (0x04)
|
|
+
|
|
+ uint8_t reserved;
|
|
+};
|
|
+
|
|
+/* block configuration uncompressed rows header. this configures the size of the
|
|
+ * uncompressed body. */
|
|
+struct mve_buffer_general_rows_uncomp_hdr
|
|
+{
|
|
+ uint8_t n_cols_minus1; /* number of quad cols in picture minus 1 */
|
|
+ uint8_t n_rows_minus1; /* number of quad rows in picture minus 1 */
|
|
+ uint8_t reserved[2];
|
|
+};
|
|
+
|
|
+/* block configuration uncompressed rows body. this structure contains an array
|
|
+ * of block parameter records whose length is (n_cols_minus1 + 1) * (n_rows_minus1 + 1)
|
|
+ * elements. therefore the allocation of this structure needs to be dynamic and
|
|
+ * a pointer to the allocated memory should then be assigned to the general
|
|
+ * purpose buffer data pointer
|
|
+ */
|
|
+struct mve_buffer_general_rows_uncomp_body
|
|
+{
|
|
+ /* the size of this array is variable and not necessarily equal to 1.
|
|
+ * therefore the sizeof operator should not be used
|
|
+ */
|
|
+ struct mve_block_param_record bpr[1];
|
|
+};
|
|
+
|
|
+/* input for encoder, block level configurations.
|
|
+ * the row based block configurations can be defined in different formats. they
|
|
+ * are stored in the blk_cfgs union and identified by the blk_cfg_type member.
|
|
+ * these definitions consist of a header and body pair. the header part contains
|
|
+ * configuration information for the body. the body part describes the actual
|
|
+ * layout of the data buffer pointed to by the mve_buffer_general_hdr buffer_ptr.
|
|
+ */
|
|
+struct mve_buffer_general_block_configs
|
|
+{
|
|
+ uint8_t blk_cfg_type;
|
|
+ #define MVE_BLOCK_CONFIGS_TYPE_NONE (0x00)
|
|
+ #define MVE_BLOCK_CONFIGS_TYPE_ROW_UNCOMP (0xff)
|
|
+ uint8_t reserved[3];
|
|
+ union
|
|
+ {
|
|
+ struct mve_buffer_general_rows_uncomp_hdr rows_uncomp;
|
|
+ } blk_cfgs;
|
|
+};
|
|
+
|
|
+/* for the mve_buffer_general_encoder_stats buffer type the body data
|
|
+ * is an array of records of the following format, one record for each
|
|
+ * 32x32 pixel block of the picture. Values that are marked "per CTU"
|
|
+ * are only valid in the first 32x32 block of the 64x64 CTU for HEVC or VP9.
|
|
+ * For H.264 the "per CTU" values are the sum of four 16x16 macroblocks.
|
|
+ * Each record is 12 bytes and must be 32-bit aligned.
|
|
+ */
|
|
+struct mve_block_stats_record_full
|
|
+{
|
|
+ uint8_t intra_count; // number of 8x8 blocks in the CTU that are intra
|
|
+ uint8_t reserved0; // not used
|
|
+ uint16_t bit_estimate; // bit estimate for the CTU
|
|
+ uint16_t luma_mean; // luminance mean
|
|
+ uint16_t luma_cplx; // luma complexity measure (0=flat)
|
|
+ int16_t rmv_x; // rough x motion vector in pixels
|
|
+ int16_t rmv_y; // rough y motion vector in pixels
|
|
+};
|
|
+
|
|
+/* input for encoder, block level statistics
|
|
+ * this buffer is added to the encoder input frame queue, before the
|
|
+ * input frame for which statistics are to be gathered. The buffer is
|
|
+ * filled duing frame analysis and returned in the input buffer return
|
|
+ * queue before the input buffer is returned.
|
|
+ */
|
|
+struct mve_buffer_general_encoder_stats
|
|
+{
|
|
+ uint8_t encoder_stats_type;
|
|
+ #define MVE_ENCODER_STATS_TYPE_FULL (0x01)
|
|
+ uint8_t frame_type; // See MVE_FRAME_TYPE_*
|
|
+ #define MVE_FRAME_TYPE_I 0
|
|
+ #define MVE_FRAME_TYPE_P 1
|
|
+ #define MVE_FRAME_TYPE_B 2
|
|
+ uint8_t used_as_reference; // 0=No, 1=Yes
|
|
+ uint8_t qp; // base quantizer used for the frame
|
|
+ // HEVC, H.264: 0-51. VP9: 0-63
|
|
+ uint32_t picture_count; // display order picture count
|
|
+ uint16_t num_cols; // number of columns (each 32 pixels wide)
|
|
+ uint16_t num_rows; // number of rows (each 32 pixels high)
|
|
+ uint32_t ref_pic_count[2]; // display order picture count of references
|
|
+ // unused values are set to zero
|
|
+};
|
|
+
|
|
+/* input for encoder */
|
|
+struct mve_buffer_param_qp
|
|
+{
|
|
+ /* QP (quantization parameter) for encode.
|
|
+ *
|
|
+ * When used to set fixed QP for encode, with rate control
|
|
+ * disabled, then the valid ranges are:
|
|
+ * H264: 0-51
|
|
+ * HEVC: 0-51
|
|
+ * VP8: 0-63
|
|
+ * VP9: 0-63
|
|
+ * Note: The QP must be set separately for I, P and B frames.
|
|
+ *
|
|
+ * But when this message is used with the regions-feature,
|
|
+ * then the valid ranges are the internal bitstream ranges:
|
|
+ * H264: 0-51
|
|
+ * HEVC: 0-51
|
|
+ * VP8: 0-127
|
|
+ * VP9: 0-255
|
|
+ */
|
|
+ int32_t qp;
|
|
+};
|
|
+
|
|
+/* output from decoder */
|
|
+struct mve_buffer_param_display_size
|
|
+{
|
|
+ uint16_t display_width;
|
|
+ uint16_t display_height;
|
|
+};
|
|
+
|
|
+/* output from decoder, colour information needed for hdr */
|
|
+struct mve_buffer_param_colour_description
|
|
+{
|
|
+ uint32_t flags;
|
|
+ #define MVE_BUFFER_PARAM_COLOUR_FLAG_MASTERING_DISPLAY_DATA_VALID (1)
|
|
+ #define MVE_BUFFER_PARAM_COLOUR_FLAG_CONTENT_LIGHT_DATA_VALID (2)
|
|
+
|
|
+ uint8_t range; /* Unspecified=0, Limited=1, Full=2 */
|
|
+ #define MVE_BUFFER_PARAM_COLOUR_RANGE_UNSPECIFIED (0)
|
|
+ #define MVE_BUFFER_PARAM_COLOUR_RANGE_LIMITED (1)
|
|
+ #define MVE_BUFFER_PARAM_COLOUR_RANGE_FULL (2)
|
|
+
|
|
+ uint8_t colour_primaries; /* see hevc spec. E.3.1 */
|
|
+ uint8_t transfer_characteristics; /* see hevc spec. E.3.1 */
|
|
+ uint8_t matrix_coeff; /* see hevc spec. E.3.1 */
|
|
+
|
|
+ uint16_t mastering_display_primaries_x[3]; /* see hevc spec. D.3.27 */
|
|
+ uint16_t mastering_display_primaries_y[3]; /* see hevc spec. D.3.27 */
|
|
+ uint16_t mastering_white_point_x; /* see hevc spec. D.3.27 */
|
|
+ uint16_t mastering_white_point_y; /* see hevc spec. D.3.27 */
|
|
+ uint32_t max_display_mastering_luminance; /* see hevc spec. D.3.27 */
|
|
+ uint32_t min_display_mastering_luminance; /* see hevc spec. D.3.27 */
|
|
+
|
|
+ uint32_t max_content_light_level; /* see hevc spec. D.3.35 */
|
|
+ uint32_t avg_content_light_level; /* see hevc spec. D.3.35 */
|
|
+
|
|
+ uint8_t video_format_present_flag;
|
|
+ uint8_t video_format;
|
|
+ uint8_t aspect_ratio_info_present_flag;
|
|
+ uint8_t aspect_ratio_idc;
|
|
+ uint8_t timing_flag_info_present_flag;
|
|
+ uint16_t sar_width;
|
|
+ uint16_t sar_height;
|
|
+ uint32_t num_units_in_tick;
|
|
+ uint32_t time_scale;
|
|
+
|
|
+ uint32_t reserved[2];
|
|
+};
|
|
+
|
|
+struct mve_buffer_param_sei_user_data_unregistered
|
|
+{
|
|
+ uint8_t flags;
|
|
+ #define MVE_BUFFER_PARAM_USER_DATA_UNREGISTERED_VALID (1)
|
|
+ uint8_t uuid[16];
|
|
+ char user_data[256 - 35];
|
|
+ uint8_t user_data_len;
|
|
+
|
|
+ uint8_t reserved[5];
|
|
+};
|
|
+
|
|
+/* output from decoder see hevc spec. D.3.3 */
|
|
+struct mve_buffer_param_frame_field_info
|
|
+{
|
|
+ uint8_t pic_struct;
|
|
+ uint8_t source_scan_type;
|
|
+ uint8_t duplicate_flag;
|
|
+ uint8_t reserved;
|
|
+};
|
|
+
|
|
+/* output from decoder, VC-1 specific feature only relevant
|
|
+ * if using AFBC output
|
|
+ */
|
|
+struct mve_buffer_param_range_map
|
|
+{
|
|
+ uint8_t luma_map_enabled;
|
|
+ uint8_t luma_map_value;
|
|
+ uint8_t chroma_map_enabled;
|
|
+ uint8_t chroma_map_value;
|
|
+};
|
|
+
|
|
+/* input for encoder */
|
|
+struct mve_buffer_param_rate_control
|
|
+{
|
|
+ uint32_t rate_control_mode;
|
|
+ #define MVE_OPT_RATE_CONTROL_MODE_OFF (0)
|
|
+ #define MVE_OPT_RATE_CONTROL_MODE_STANDARD (1)
|
|
+ #define MVE_OPT_RATE_CONTROL_MODE_VARIABLE (2)
|
|
+ #define MVE_OPT_RATE_CONTROL_MODE_CONSTANT (3)
|
|
+ #define MVE_OPT_RATE_CONTROL_MODE_C_VARIABLE (4)
|
|
+ uint32_t target_bitrate; /* in bits per second */
|
|
+ uint32_t maximum_bitrate; /* in bits per second */
|
|
+};
|
|
+
|
|
+/* input for encoder */
|
|
+struct mve_buffer_param_rate_control_qp_range
|
|
+{
|
|
+ int32_t qp_min;
|
|
+ int32_t qp_max;
|
|
+};
|
|
+
|
|
+/* input for encoder, see hevc spec. D.3.16 */
|
|
+struct mve_buffer_param_frame_packing
|
|
+{
|
|
+ uint32_t flags;
|
|
+ #define MVE_BUFFER_PARAM_FRAME_PACKING_FLAG_QUINCUNX_SAMPLING (1)
|
|
+ #define MVE_BUFFER_PARAM_FRAME_PACKING_FLAG_SPATIAL_FLIPPING (2)
|
|
+ #define MVE_BUFFER_PARAM_FRAME_PACKING_FLAG_FRAME0_FLIPPED (4)
|
|
+ #define MVE_BUFFER_PARAM_FRAME_PACKING_FLAG_FIELD_VIEWS (8)
|
|
+ #define MVE_BUFFER_PARAM_FRAME_PACKING_FLAG_CURRENT_FRAME_IS_FRAME0 (16)
|
|
+
|
|
+ uint8_t frame_packing_arrangement_type;
|
|
+ uint8_t content_interpretation_type;
|
|
+
|
|
+ uint8_t frame0_grid_position_x;
|
|
+ uint8_t frame0_grid_position_y;
|
|
+ uint8_t frame1_grid_position_x;
|
|
+ uint8_t frame1_grid_position_y;
|
|
+
|
|
+ uint8_t reserved[ 2 ];
|
|
+};
|
|
+
|
|
+struct mve_buffer_param_rectangle
|
|
+{
|
|
+ uint16_t x_left; /* pixel x left edge (inclusive) */
|
|
+ uint16_t x_right; /* pixel x right edge (exclusive) */
|
|
+ uint16_t y_top; /* pixel y top edge (inclusive) */
|
|
+ uint16_t y_bottom; /* pixel y bottom edge (exclusive) */
|
|
+};
|
|
+
|
|
+/* input for encoder,
|
|
+ * indicate which parts of the source picture has changed.
|
|
+ * The encoder can (optionally) use this information to
|
|
+ * reduce memory bandwidth.
|
|
+ *
|
|
+ * n_rectangles=0 indicates the source picture is unchanged.
|
|
+ *
|
|
+ * This parameter only applies to the picture that immediately
|
|
+ * follows (and not to subsequent ones).
|
|
+ */
|
|
+struct mve_buffer_param_change_rectangles
|
|
+{
|
|
+ uint8_t n_rectangles; /* Number of rectangles */
|
|
+ uint8_t reserved[3];
|
|
+ #define MVE_MAX_FRAME_CHANGE_RECTANGLES 2
|
|
+ struct mve_buffer_param_rectangle rectangles[MVE_MAX_FRAME_CHANGE_RECTANGLES];
|
|
+};
|
|
+
|
|
+/* input for VP9 encoder,
|
|
+ * specify the qp deltas for each segment map index.
|
|
+ * These are intended to be used with block configs only.
|
|
+ */
|
|
+struct mve_buffer_param_vp9_segmap
|
|
+{
|
|
+ #define VP9SPEC_MAX_SEGMENTS 8
|
|
+ int8_t qp_delta[VP9SPEC_MAX_SEGMENTS]; /* Qp delta to use for each segment map region */
|
|
+ int8_t num_segments; /* Number of active segments (to set coding probability) */
|
|
+};
|
|
+
|
|
+/* Parameters that are sent in the same communication channels
|
|
+ * as the buffers. A parameter applies to all subsequent buffers.
|
|
+ * Some types are only valid for decode, and some only for encode.
|
|
+ */
|
|
+struct mve_buffer_param
|
|
+{
|
|
+ uint32_t type; /* Extra data: */
|
|
+ #define MVE_BUFFER_PARAM_TYPE_QP (2) /* qp */
|
|
+ #define MVE_BUFFER_PARAM_TYPE_REGIONS (3) /* regions */
|
|
+ #define MVE_BUFFER_PARAM_TYPE_DISPLAY_SIZE (5) /* display_size */
|
|
+ #define MVE_BUFFER_PARAM_TYPE_RANGE_MAP (6) /* range_map */
|
|
+ #define MVE_BUFFER_PARAM_TYPE_FRAME_RATE (9) /* arg, in frames per second, as a
|
|
+ * fixed point Q16 value, for example
|
|
+ * 0x001e0000 == 30.0 fps */
|
|
+ #define MVE_BUFFER_PARAM_TYPE_RATE_CONTROL (10) /* rate_control */
|
|
+ #define MVE_BUFFER_PARAM_TYPE_QP_I (12) /* qp for I frames, when no rate control */
|
|
+ #define MVE_BUFFER_PARAM_TYPE_QP_P (13) /* qp for P frames, when no rate control */
|
|
+ #define MVE_BUFFER_PARAM_TYPE_QP_B (14) /* qp for B frames, when no rate control */
|
|
+ #define MVE_BUFFER_PARAM_TYPE_COLOUR_DESCRIPTION (15) /* colour_description */
|
|
+ #define MVE_BUFFER_PARAM_TYPE_FRAME_PACKING (16) /* frame_packing */
|
|
+ #define MVE_BUFFER_PARAM_TYPE_FRAME_FIELD_INFO (17) /* frame_field_info */
|
|
+ #define MVE_BUFFER_PARAM_TYPE_GOP_RESET (18) /* no extra data */
|
|
+ #define MVE_BUFFER_PARAM_TYPE_DPB_HELD_FRAMES (19) /* arg, number of output buffers that are
|
|
+ * complete and held by firmware in the
|
|
+ * DPB for reordering purposes.
|
|
+ * Valid after the next frame is output */
|
|
+ #define MVE_BUFFER_PARAM_TYPE_CHANGE_RECTANGLES (20) /* change rectangles */
|
|
+ #define MVE_BUFFER_PARAM_TYPE_RATE_CONTROL_QP_RANGE (21) /* rate_control_qp_range */
|
|
+ #define MVE_BUFFER_PARAM_TYPE_RATE_CONTROL_HRD_BUF_SIZE (23) /* arg */
|
|
+ #define MVE_BUFFER_PARAM_TYPE_VP9_SEGMAP (24) /* VP9 segment map settings */
|
|
+ #define MVE_BUFFER_PARAM_TYPE_RATE_CONTROL_QP_RANGE_I (25) /* special range for I frames,
|
|
+ * rate_control_qp_range */
|
|
+ #define MVE_BUFFER_PARAM_TYPE_SEI_USER_DATA_UNREGISTERED (26) /* sei user_data_unregistered */
|
|
+
|
|
+ union
|
|
+ {
|
|
+ uint32_t arg; /* some parameters only need a uint32_t as argument */
|
|
+ struct mve_buffer_param_qp qp;
|
|
+ struct mve_buffer_param_regions regions;
|
|
+ struct mve_buffer_param_display_size display_size;
|
|
+ struct mve_buffer_param_range_map range_map;
|
|
+ struct mve_buffer_param_rate_control rate_control;
|
|
+ struct mve_buffer_param_rate_control_qp_range rate_control_qp_range;
|
|
+ struct mve_buffer_param_colour_description colour_description;
|
|
+ struct mve_buffer_param_frame_packing frame_packing;
|
|
+ struct mve_buffer_param_frame_field_info frame_field_info;
|
|
+ struct mve_buffer_param_change_rectangles change_rectangles;
|
|
+ struct mve_buffer_param_vp9_segmap vp9_segmap;
|
|
+ struct mve_buffer_param_sei_user_data_unregistered user_data_unregistered;
|
|
+ } data;
|
|
+};
|
|
+
|
|
+
|
|
+/* The general purpose buffer header stores the common fields of an
|
|
+ * mve_buffer_general. it contains the pointer to the data buffer that contains
|
|
+ * the general purpose data
|
|
+ */
|
|
+struct mve_buffer_general_hdr
|
|
+{
|
|
+ /* For identification of the buffer, this is not changed by the firmware. */
|
|
+ uint64_t host_handle;
|
|
+
|
|
+ /* this depends upon the type of the general purpose buffer */
|
|
+ uint64_t user_data_tag;
|
|
+
|
|
+ /* pointer to the buffer containing the general purpose data. the format
|
|
+ * of this data is defined by the configuration in the mve_buffer_general */
|
|
+ uint32_t buffer_ptr;
|
|
+
|
|
+ /* size of the buffer pointed to by buffer_ptr */
|
|
+ uint32_t buffer_size;
|
|
+
|
|
+ /* selects the type of semantics to use for the general purpose buffer. it
|
|
+ * tags (or discriminates) the union config member in mve_buffer_general
|
|
+ */
|
|
+ uint16_t type; /* Extra data: */
|
|
+ #define MVE_BUFFER_GENERAL_TYPE_BLOCK_CONFIGS (1) /* block_configs */
|
|
+ #define MVE_BUFFER_GENERAL_TYPE_ENCODER_STATS (4) /* encoder_stats */
|
|
+
|
|
+ /* size of the mve_buffer_general config member */
|
|
+ uint16_t config_size;
|
|
+
|
|
+ /* pad to force 8-byte alignment */
|
|
+ uint32_t reserved;
|
|
+};
|
|
+
|
|
+/* The general purpose buffer consists of a header and a configuration. The
|
|
+ * header contains a pointer to a buffer whose format is described by the
|
|
+ * configuration. The type of configuration is indicated by the type value in
|
|
+ * the header. N.B. In use, the size of the config part of this structure is
|
|
+ * defined in the header and is not necessarily equal to that returned by the
|
|
+ * sizeof() operator. This allows a more size efficient communication between
|
|
+ * the host and firmware.
|
|
+ */
|
|
+struct mve_buffer_general
|
|
+{
|
|
+ struct mve_buffer_general_hdr header;
|
|
+
|
|
+ /* used to describe the configuration of the general purpose buffer data
|
|
+ * pointed to be buffer_ptr
|
|
+ */
|
|
+ union
|
|
+ {
|
|
+ struct mve_buffer_general_block_configs block_configs;
|
|
+ struct mve_buffer_general_encoder_stats encoder_stats;
|
|
+ } config;
|
|
+};
|
|
+
|
|
+#ifdef __cplusplus
|
|
+}
|
|
+#endif
|
|
+
|
|
+#endif /* __FW_INCLUDE__MVE_PROTOCOL_DEF_H__ */
|
|
diff --git a/drivers/media/platform/spacemit/vpu_k1x/external/fw_v3/mve_protocol_def.h b/drivers/media/platform/spacemit/vpu_k1x/external/fw_v3/mve_protocol_def.h
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/vpu_k1x/external/fw_v3/mve_protocol_def.h
|
|
@@ -0,0 +1,1741 @@
|
|
+/*
|
|
+ * The confidential and proprietary information contained in this file may
|
|
+ * only be used by a person authorised under and to the extent permitted
|
|
+ * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd.
|
|
+ * ALL RIGHTS RESERVED
|
|
+ *
|
|
+ * This entire notice must be reproduced on all copies of this file
|
|
+ * and copies of this file may only be made by a person if such person is
|
|
+ * permitted to do so under the terms of a subsisting license agreement
|
|
+ * from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * SPDX-License-Identifier: GPL-2.0-only
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU General Public License
|
|
+ * as published by the Free Software Foundation; either version 2
|
|
+ * of the License, or (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
+ *
|
|
+ */
|
|
+
|
|
+/*
|
|
+ * Copyright:
|
|
+ * ----------------------------------------------------------------------------
|
|
+ * This confidential and proprietary software may be used only as authorized
|
|
+ * by a licensing agreement from Arm Technology (China) Co., Ltd.
|
|
+ * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd.
|
|
+ * The entire notice above must be reproduced on all authorized copies and
|
|
+ * copies may only be made to the extent permitted by a licensing agreement
|
|
+ * from Arm Technology (China) Co., Ltd.
|
|
+ * ----------------------------------------------------------------------------
|
|
+ */
|
|
+#ifndef __FW_INCLUDE__MVE_PROTOCOL_DEF_H__
|
|
+#define __FW_INCLUDE__MVE_PROTOCOL_DEF_H__
|
|
+
|
|
+#ifdef __cplusplus
|
|
+extern "C" {
|
|
+#endif
|
|
+
|
|
+#ifdef __KERNEL__
|
|
+#include <linux/types.h>
|
|
+#else
|
|
+#include <stdint.h>
|
|
+#endif
|
|
+
|
|
+/*****************************************************************************
|
|
+ *
|
|
+ * Communication protocol between the host/driver and the MVE firmware,
|
|
+ * the 'host interface'.
|
|
+ *
|
|
+ * MVE == LINLON Video Engine
|
|
+ *
|
|
+ * Protocol version 3.3
|
|
+ *
|
|
+ * Note: Message structs may be expanded in the future; the host should
|
|
+ * use the 'size' of the message to determine how many bytes to
|
|
+ * read from the message queue, rather than a sizeof(struct).
|
|
+ *
|
|
+ ****************************************************************************/
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+/*****************************************************************************
|
|
+ *
|
|
+ * Virtual memory regions
|
|
+ *
|
|
+ * ..._ADDR_BEGIN gives the starting virtual address of the region,
|
|
+ * and ..._ADDR_END the (non-inclusive) ending address, such that
|
|
+ * the size of the region is obtained with the subtraction
|
|
+ * (..._ADDR_END - ..._ADDR_BEGIN).
|
|
+ *
|
|
+ ****************************************************************************/
|
|
+
|
|
+/* Memory region for first firmware instance */
|
|
+#define MVE_MEM_REGION_FW_INSTANCE0_ADDR_BEGIN (0x00000000u)
|
|
+#define MVE_MEM_REGION_FW_INSTANCE0_ADDR_END (0x000FFFFFu + 1)
|
|
+
|
|
+/* Memory regions for other firmware instances */
|
|
+#define MVE_MEM_REGION_FW_INSTANCE1_ADDR_BEGIN (0x01000000u)
|
|
+#define MVE_MEM_REGION_FW_INSTANCE1_ADDR_END \
|
|
+ (MVE_MEM_REGION_FW_INSTANCE1_ADDR_BEGIN + MVE_MEM_REGION_FW_INSTANCE0_ADDR_END)
|
|
+
|
|
+#define MVE_MEM_REGION_FW_INSTANCE2_ADDR_BEGIN (0x02000000u)
|
|
+#define MVE_MEM_REGION_FW_INSTANCE2_ADDR_END \
|
|
+ (MVE_MEM_REGION_FW_INSTANCE2_ADDR_BEGIN + MVE_MEM_REGION_FW_INSTANCE0_ADDR_END)
|
|
+
|
|
+#define MVE_MEM_REGION_FW_INSTANCE3_ADDR_BEGIN (0x03000000u)
|
|
+#define MVE_MEM_REGION_FW_INSTANCE3_ADDR_END \
|
|
+ (MVE_MEM_REGION_FW_INSTANCE3_ADDR_BEGIN + MVE_MEM_REGION_FW_INSTANCE0_ADDR_END)
|
|
+
|
|
+#define MVE_MEM_REGION_FW_INSTANCE4_ADDR_BEGIN (0x04000000u)
|
|
+#define MVE_MEM_REGION_FW_INSTANCE4_ADDR_END \
|
|
+ (MVE_MEM_REGION_FW_INSTANCE4_ADDR_BEGIN + MVE_MEM_REGION_FW_INSTANCE0_ADDR_END)
|
|
+
|
|
+#define MVE_MEM_REGION_FW_INSTANCE5_ADDR_BEGIN (0x05000000u)
|
|
+#define MVE_MEM_REGION_FW_INSTANCE5_ADDR_END \
|
|
+ (MVE_MEM_REGION_FW_INSTANCE5_ADDR_BEGIN + MVE_MEM_REGION_FW_INSTANCE0_ADDR_END)
|
|
+
|
|
+#define MVE_MEM_REGION_FW_INSTANCE6_ADDR_BEGIN (0x06000000u)
|
|
+#define MVE_MEM_REGION_FW_INSTANCE6_ADDR_END \
|
|
+ (MVE_MEM_REGION_FW_INSTANCE6_ADDR_BEGIN + MVE_MEM_REGION_FW_INSTANCE0_ADDR_END)
|
|
+
|
|
+#define MVE_MEM_REGION_FW_INSTANCE7_ADDR_BEGIN (0x07000000u)
|
|
+#define MVE_MEM_REGION_FW_INSTANCE7_ADDR_END \
|
|
+ (MVE_MEM_REGION_FW_INSTANCE7_ADDR_BEGIN + MVE_MEM_REGION_FW_INSTANCE0_ADDR_END)
|
|
+
|
|
+/*
|
|
+ * Areas for communication between host and MVE are placed in the interval
|
|
+ * 0x10079000 - 0x1007FFFF, see special defines further down.
|
|
+ */
|
|
+
|
|
+/* PROTECTED virtual memory region */
|
|
+#define MVE_MEM_REGION_PROTECTED_ADDR_BEGIN (0x20000000u)
|
|
+#define MVE_MEM_REGION_PROTECTED_ADDR_END (0x6FFFFFFFu + 1)
|
|
+
|
|
+/* FRAMEBUF virtual memory region */
|
|
+#define MVE_MEM_REGION_FRAMEBUF_ADDR_BEGIN (0x70000000u)
|
|
+#define MVE_MEM_REGION_FRAMEBUF_ADDR_END (0xEFFFFFFFu + 1)
|
|
+
|
|
+/* 0xF0000000 - 0xFFFFFFFF is used internally in MVE */
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+/*****************************************************************************
|
|
+ *
|
|
+ * Communication queues between HOST/DRIVER and MVE
|
|
+ *
|
|
+ * Address for queue for messages in to MVE,
|
|
+ * one struct mve_comm_area_host located here
|
|
+ *
|
|
+ ****************************************************************************/
|
|
+
|
|
+#define MVE_COMM_MSG_INQ_ADDR (0x10079000u)
|
|
+
|
|
+/* Address for queue for messages out from MVE,
|
|
+ * one struct mve_comm_area_mve located here
|
|
+ */
|
|
+#define MVE_COMM_MSG_OUTQ_ADDR (0x1007A000u)
|
|
+
|
|
+/* Address for queue for input buffers in to MVE,
|
|
+ * one struct mve_comm_area_host located here
|
|
+ */
|
|
+#define MVE_COMM_BUF_INQ_ADDR (0x1007B000u)
|
|
+
|
|
+/* Address for queue for input buffers returned from MVE,
|
|
+ * one struct mve_comm_area_mve located here
|
|
+ */
|
|
+#define MVE_COMM_BUF_INRQ_ADDR (0x1007C000u)
|
|
+
|
|
+/* Address for queue for output buffers in to MVE,
|
|
+ * one struct mve_comm_area_host located here
|
|
+ */
|
|
+#define MVE_COMM_BUF_OUTQ_ADDR (0x1007D000u)
|
|
+
|
|
+/* Address for queue for output buffers returned from MVE,
|
|
+ * one struct mve_comm_area_mve located here
|
|
+ */
|
|
+#define MVE_COMM_BUF_OUTRQ_ADDR (0x1007E000u)
|
|
+
|
|
+/* One struct mve_rpc_communication_area located here */
|
|
+#define MVE_COMM_RPC_ADDR (0x1007F000u)
|
|
+
|
|
+/* Address for ram_print buffer in FW */
|
|
+#define MVE_FW_PRINT_RAM_ADDR (0x10100000u)
|
|
+#define MVE_FW_PRINT_RAM_SIZE (0x80000u)
|
|
+
|
|
+/* One page of memory (4 kB) is used for each queue,
|
|
+ * so maximum 1024 words, but need room for some counters as well,
|
|
+ * see structs mve_comm_area_mve and mve_comm_area_host below.
|
|
+ */
|
|
+#define MVE_COMM_QUEUE_SIZE_IN_WORDS 1020
|
|
+
|
|
+/* This is the part of the message area that is written by host. */
|
|
+struct mve_comm_area_host
|
|
+{
|
|
+ volatile uint16_t out_rpos;
|
|
+ volatile uint16_t in_wpos;
|
|
+ volatile uint32_t reserved[ 3 ];
|
|
+ /*
|
|
+ * Queue of messages to MVE, each block of data prefixed with
|
|
+ * a mve_msg_header
|
|
+ */
|
|
+ volatile uint32_t in_data[ MVE_COMM_QUEUE_SIZE_IN_WORDS ];
|
|
+};
|
|
+
|
|
+/* This is the part of the message area that is written by MVE. */
|
|
+struct mve_comm_area_mve
|
|
+{
|
|
+ volatile uint16_t out_wpos;
|
|
+ volatile uint16_t in_rpos;
|
|
+ volatile uint32_t reserved[ 3 ];
|
|
+ /*
|
|
+ * Queue of messages to host, each block of data prefixed with
|
|
+ * a mve_msg_header
|
|
+ */
|
|
+ volatile uint32_t out_data[ MVE_COMM_QUEUE_SIZE_IN_WORDS ];
|
|
+};
|
|
+
|
|
+#define MVE_RPC_AREA_SIZE_IN_WORDS 256
|
|
+#define MVE_RPC_DATA_SIZE_IN_WORDS (MVE_RPC_AREA_SIZE_IN_WORDS - 3)
|
|
+union mve_rpc_params
|
|
+{
|
|
+ volatile uint32_t data[ MVE_RPC_DATA_SIZE_IN_WORDS ];
|
|
+ struct
|
|
+ {
|
|
+ char string[ MVE_RPC_DATA_SIZE_IN_WORDS * 4 ];
|
|
+ } debug_print;
|
|
+ struct
|
|
+ {
|
|
+ uint32_t size;
|
|
+ uint32_t max_size;
|
|
+ uint8_t region; /* Memory region selection */
|
|
+ #define MVE_MEM_REGION_PROTECTED (0)
|
|
+ #define MVE_MEM_REGION_OUTBUF (1)
|
|
+ #define MVE_MEM_REGION_FRAMEBUF (MVE_MEM_REGION_OUTBUF)
|
|
+
|
|
+ /* The newly allocated memory must be placed
|
|
+ * on (at least) a 2^(log2_alignment) boundary
|
|
+ */
|
|
+ uint8_t log2_alignment;
|
|
+ } mem_alloc;
|
|
+ struct
|
|
+ {
|
|
+ uint32_t ve_pointer;
|
|
+ uint32_t new_size;
|
|
+ } mem_resize;
|
|
+ struct
|
|
+ {
|
|
+ uint32_t ve_pointer;
|
|
+ } mem_free;
|
|
+};
|
|
+
|
|
+struct mve_rpc_communication_area
|
|
+{
|
|
+ volatile uint32_t state;
|
|
+ #define MVE_RPC_STATE_FREE (0)
|
|
+ #define MVE_RPC_STATE_PARAM (1)
|
|
+ #define MVE_RPC_STATE_RETURN (2)
|
|
+ volatile uint32_t call_id;
|
|
+ #define MVE_RPC_FUNCTION_DEBUG_PRINTF (1)
|
|
+ #define MVE_RPC_FUNCTION_MEM_ALLOC (2)
|
|
+ #define MVE_RPC_FUNCTION_MEM_RESIZE (3)
|
|
+ #define MVE_RPC_FUNCTION_MEM_FREE (4)
|
|
+ volatile uint32_t size;
|
|
+ union mve_rpc_params params;
|
|
+};
|
|
+
|
|
+struct mve_fw_ram_print_head_aera
|
|
+{
|
|
+ volatile uint32_t rd_cnt;
|
|
+ volatile uint32_t reserved0[15];
|
|
+
|
|
+ volatile uint32_t flag;
|
|
+ volatile uint32_t index;
|
|
+ volatile uint32_t wr_cnt;
|
|
+ volatile uint32_t reserved1[13];
|
|
+};
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+/*********************************************************************
|
|
+ *
|
|
+ * Message codes
|
|
+ *
|
|
+ *********************************************************************/
|
|
+
|
|
+/* Messages consist of one struct mve_msg_header, possibly followed
|
|
+ * by extra data.
|
|
+ */
|
|
+struct mve_msg_header
|
|
+{
|
|
+ uint16_t code;
|
|
+ /* REQUESTs are messages from the
|
|
+ * host/driver to the firmware: Code: Extra data in message: */
|
|
+ #define MVE_REQUEST_CODE_GO (1001) /* no extra data */
|
|
+ #define MVE_REQUEST_CODE_STOP (1002) /* no extra data */
|
|
+ #define MVE_REQUEST_CODE_INPUT_FLUSH (1003) /* no extra data */
|
|
+ #define MVE_REQUEST_CODE_OUTPUT_FLUSH (1004) /* no extra data */
|
|
+ #define MVE_REQUEST_CODE_SWITCH (1005) /* no extra data */
|
|
+ #define MVE_REQUEST_CODE_PING (1006) /* no extra data */
|
|
+ #define MVE_REQUEST_CODE_DUMP (1008) /* no extra data */
|
|
+ #define MVE_REQUEST_CODE_JOB (1009) /* struct mve_request_job */
|
|
+ #define MVE_REQUEST_CODE_SET_OPTION (1010) /* struct mve_request_set_option (variable size) */
|
|
+ #define MVE_REQUEST_CODE_RELEASE_REF_FRAME (1011) /* struct mve_request_release_ref_frame */
|
|
+ #define MVE_REQUEST_CODE_IDLE_ACK (1012) /* no extra data */
|
|
+ #define MVE_REQUEST_CODE_DEBUG (1013) /* level: 0 for disable, refer to fw_log_level */
|
|
+ /* RESPONSEs are messages from
|
|
+ * the firmware to the host: */
|
|
+ #define MVE_RESPONSE_CODE_SWITCHED_IN (2001) /* struct mve_response_switched_in */
|
|
+ #define MVE_RESPONSE_CODE_SWITCHED_OUT (2002) /* struct mve_response_switched_out */
|
|
+ #define MVE_RESPONSE_CODE_SET_OPTION_CONFIRM (2003) /* no extra data */
|
|
+ #define MVE_RESPONSE_CODE_JOB_DEQUEUED (2004) /* struct mve_response_job_dequeued */
|
|
+ #define MVE_RESPONSE_CODE_INPUT (2005) /* no extra data, but buffer placed in buffer queue */
|
|
+ #define MVE_RESPONSE_CODE_OUTPUT (2006) /* no extra data, but buffer placed in buffer queue */
|
|
+ #define MVE_RESPONSE_CODE_INPUT_FLUSHED (2007) /* no extra data */
|
|
+ #define MVE_RESPONSE_CODE_OUTPUT_FLUSHED (2008) /* no extra data */
|
|
+ #define MVE_RESPONSE_CODE_PONG (2009) /* no extra data */
|
|
+ #define MVE_RESPONSE_CODE_ERROR (2010) /* struct mve_response_error */
|
|
+ #define MVE_RESPONSE_CODE_STATE_CHANGE (2011) /* struct mve_response_state_change */
|
|
+ #define MVE_RESPONSE_CODE_DUMP (2012) /* no extra data */
|
|
+ #define MVE_RESPONSE_CODE_IDLE (2013) /* no extra data */
|
|
+ #define MVE_RESPONSE_CODE_FRAME_ALLOC_PARAM (2014) /* struct mve_response_frame_alloc_parameters */
|
|
+ #define MVE_RESPONSE_CODE_SEQUENCE_PARAMETERS (2015) /* struct mve_response_sequence_parameters */
|
|
+ #define MVE_RESPONSE_CODE_EVENT (2016) /* struct mve_response_event (variable size) */
|
|
+ #define MVE_RESPONSE_CODE_SET_OPTION_FAIL (2017) /* struct mve_response_set_option_failed */
|
|
+ #define MVE_RESPONSE_CODE_REF_FRAME_UNUSED (2018) /* struct mve_response_ref_frame_unused */
|
|
+ #define MVE_RESPONSE_CODE_DEBUG (2019) /* no extra data */
|
|
+ /* BUFFERs are sent from host to firmware,
|
|
+ * and then return at some time: */
|
|
+ #define MVE_BUFFER_CODE_FRAME (3001) /* struct mve_buffer_frame */
|
|
+ #define MVE_BUFFER_CODE_BITSTREAM (3002) /* struct mve_buffer_bitstream */
|
|
+ #define MVE_BUFFER_CODE_PARAM (3003) /* struct mve_buffer_param */
|
|
+ #define MVE_BUFFER_CODE_GENERAL (3004) /* struct mve_buffer_general */
|
|
+
|
|
+ uint16_t size; /* size in bytes of trailing data, 0 if none */
|
|
+};
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+enum fw_log_level{
|
|
+ FW_LOG_ERROR = 1,
|
|
+ FW_LOG_WARNING,
|
|
+ FW_LOG_INFO,
|
|
+ FW_LOG_DEBUG,
|
|
+ FW_LOG_VERBOSE,
|
|
+ FW_LOG_MAX
|
|
+};
|
|
+
|
|
+
|
|
+/*********************************************************************
|
|
+ *
|
|
+ * REQUESTs are messages from the host to the firmware
|
|
+ *
|
|
+ * Some of the MVE_REQUEST_CODE_ codes are followed by one of the
|
|
+ * structs below.
|
|
+ *
|
|
+ *********************************************************************/
|
|
+
|
|
+struct mve_request_job
|
|
+{
|
|
+ uint16_t cores; /* >= 1, number of cores to use, must match request to HW scheduler */
|
|
+ uint16_t frames; /* number of frames to process, zero means infinite */
|
|
+ uint32_t flags; /* can be zero */
|
|
+ #define MVE_JOB_FLAG_DISABLE_BNDMGR (0x01)
|
|
+};
|
|
+
|
|
+struct mve_request_set_option
|
|
+{
|
|
+ uint32_t index;
|
|
+ #define MVE_SET_OPT_INDEX_NALU_FORMAT (1) /* see arg, MVE_OPT_NALU_FORMAT_ */
|
|
+ #define MVE_SET_OPT_INDEX_STREAM_ESCAPING (2) /* arg=1 to enable (default), arg=0 to disable */
|
|
+ #define MVE_SET_OPT_INDEX_PROFILE_LEVEL (3) /* data.profile_level */
|
|
+ #define MVE_SET_OPT_INDEX_HOST_PROTOCOL_PRINTS (4) /* arg=1 to enable, arg=0 to disable (default) */
|
|
+ #define MVE_SET_OPT_INDEX_PROFILING (5) /* arg=1 to enable, arg=0 to disable (default) */
|
|
+ #define MVE_SET_OPT_INDEX_DISABLE_FEATURES (6) /* see arg, MVE_OPT_DISABLE_FEATURE_ */
|
|
+ #define MVE_SET_OPT_INDEX_IGNORE_STREAM_HEADERS (7) /* decode, arg=1 to enable,
|
|
+ * arg=0 to disable (default) */
|
|
+ #define MVE_SET_OPT_INDEX_FRAME_REORDERING (8) /* decode, arg=1 to enable (default),
|
|
+ * arg=0 to disable */
|
|
+ #define MVE_SET_OPT_INDEX_INTBUF_SIZE (9) /* decode, arg = suggested limit of intermediate
|
|
+ * buffer allocation */
|
|
+ #define MVE_SET_OPT_INDEX_ENC_P_FRAMES (16) /* encode, arg = nPFrames */
|
|
+ #define MVE_SET_OPT_INDEX_ENC_B_FRAMES (17) /* encode, arg = number of B frames */
|
|
+ #define MVE_SET_OPT_INDEX_GOP_TYPE (18) /* encode, see arg */
|
|
+ #define MVE_SET_OPT_INDEX_INTRA_MB_REFRESH (19) /* encode, arg */
|
|
+ #define MVE_SET_OPT_INDEX_ENC_CONSTR_IPRED (20) /* encode, arg = 0 or 1 */
|
|
+ #define MVE_SET_OPT_INDEX_ENC_ENTROPY_SYNC (21) /* encode, arg = 0 or 1 */
|
|
+ #define MVE_SET_OPT_INDEX_ENC_TEMPORAL_MVP (22) /* encode, arg = 0 or 1 */
|
|
+ #define MVE_SET_OPT_INDEX_TILES (23) /* encode, data.tiles */
|
|
+ #define MVE_SET_OPT_INDEX_ENC_MIN_LUMA_CB_SIZE (24) /* HEVC encode, arg = 8 or 16,
|
|
+ * for sizes 8x8 or 16x16 */
|
|
+ #define MVE_SET_OPT_INDEX_ENC_MB_TYPE_ENABLE (25) /* encode, see arg */
|
|
+ #define MVE_SET_OPT_INDEX_ENC_MB_TYPE_DISABLE (26) /* encode, see arg */
|
|
+ #define MVE_SET_OPT_INDEX_ENC_H264_CABAC (27) /* encode, arg = 0 or 1, enabled by default */
|
|
+ #define MVE_SET_OPT_INDEX_ENC_SLICE_SPACING (28) /* encode, arg = suggested number of
|
|
+ * CTUs/macroblocks in a slice */
|
|
+ #define MVE_SET_OPT_INDEX_ENC_VP9_PROB_UPDATE (30) /* VP9 encode, see arg */
|
|
+ #define MVE_SET_OPT_INDEX_RESYNC_INTERVAL (31) /* JPEG encode, arg = nRestartInterval
|
|
+ * = nResynchMarkerSpacing */
|
|
+ #define MVE_SET_OPT_INDEX_HUFFMAN_TABLE (32) /* JPEG encode, data.huffman_table */
|
|
+ #define MVE_SET_OPT_INDEX_QUANT_TABLE (33) /* JPEG encode, data.quant_table */
|
|
+ #define MVE_SET_OPT_INDEX_ENC_EXPOSE_REF_FRAMES (34) /* encode debug, arg = 0 or 1,
|
|
+ * disabled by default */
|
|
+ #define MVE_SET_OPT_INDEX_MBINFO_OUTPUT (35) /* encode, arg=1 to enable,
|
|
+ * arg=0 to disable (default) */
|
|
+ #define MVE_SET_OPT_INDEX_MV_SEARCH_RANGE (36) /* encode, data,motion_vector_search_range */
|
|
+ #define MVE_SET_OPT_INDEX_ENC_STREAM_BITDEPTH (38) /* encode, data.bitdepth, to set other bitdepth
|
|
+ * of encoded stream than of input frames */
|
|
+ #define MVE_SET_OPT_INDEX_ENC_STREAM_CHROMA_FORMAT (39) /* encode, arg, to set other chroma format of
|
|
+ * encoded stream than of input frames */
|
|
+ #define MVE_SET_OPT_INDEX_ENC_RGB_TO_YUV_MODE (40) /* encode, arg, select which way RGB is converted
|
|
+ * to YUV before encoding */
|
|
+ #define MVE_SET_OPT_INDEX_ENC_BANDWIDTH_LIMIT (41) /* encode, arg, the maxium bandwidth limit defined
|
|
+ * by host */
|
|
+ #define MVE_SET_OPT_INDEX_WATCHDOG_TIMEOUT (42) /* arg=timeout, arg=0 to disable */
|
|
+ #define MVE_SET_OPT_INDEX_ENC_CABAC_INIT_IDC (43) /* encode, arg; 0,1,2 for H264; 0,1 for HEVC */
|
|
+ #define MVE_SET_OPT_INDEX_ENC_ADPTIVE_QUANTISATION (44) /* encode (h264 and hevc) */
|
|
+ #define MVE_SET_OPT_INDEX_QP_DELTA_I_P (45)
|
|
+ #define MVE_SET_OPT_INDEX_QP_DELTA_I_B_REF (46)
|
|
+ #define MVE_SET_OPT_INDEX_QP_DELTA_I_B_NONREF (47)
|
|
+ #define MVE_SET_OPT_INDEX_CB_QP_OFFSET (48)
|
|
+ #define MVE_SET_OPT_INDEX_CR_QP_OFFSET (49)
|
|
+ #define MVE_SET_OPT_INDEX_LAMBDA_SCALE (50) /* encode, data.lambda_scale */
|
|
+ #define MVE_SET_OPT_INDEX_ENC_MAX_NUM_CORES (51) /* maximum number of cores */
|
|
+ #define MVE_SET_OPT_INDEX_ENC_FIXED_QP (56)
|
|
+ /* ARBITRARY_DOWNSCALE */
|
|
+ #define MVE_SET_OPT_INDEX_DEC_DOWNSCALE (57) /* decode, set downscaled width and height */
|
|
+ /* SAO_EVAL */
|
|
+ #define MVE_SET_OPT_INDEX_ENC_SAO_PENALTY (58)
|
|
+ #define MVE_SET_OPT_INDEX_ENC_SAO_EN (59) /* do not use now */
|
|
+ #define MVE_SET_OPT_INDEX_ENC_SAO_LUMA_EN (60)
|
|
+ #define MVE_SET_OPT_INDEX_ENC_SAO_CHROMA_EN (61)
|
|
+ /* MVE_ENCODE_CROP_FEATURE */
|
|
+ #define MVE_SET_OPT_INDEX_ENC_CROP_RARAM_LEFT (62)
|
|
+ #define MVE_SET_OPT_INDEX_ENC_CROP_RARAM_RIGHT (63)
|
|
+ #define MVE_SET_OPT_INDEX_ENC_CROP_RARAM_TOP (64)
|
|
+ #define MVE_SET_OPT_INDEX_ENC_CROP_RARAM_BOTTOM (65)
|
|
+ /* LONG_TERM_REFERENCE */
|
|
+ #define MVE_SET_OPT_INDEX_ENC_LTR_MODE (66)
|
|
+ #define MVE_SET_OPT_INDEX_ENC_LTR_PERIOD (67)
|
|
+ #define MVE_SET_OPT_INDEX_DEC_DOWNSCALE_POS_MODE (69)
|
|
+
|
|
+ union
|
|
+ {
|
|
+ uint32_t arg; /* Most options only need a uint32_t as argument */
|
|
+ /* For option MVE_SET_OPT_INDEX_NALU_FORMAT, arg should
|
|
+ * be one of these: */
|
|
+ #define MVE_OPT_NALU_FORMAT_START_CODES (1)
|
|
+ #define MVE_OPT_NALU_FORMAT_ONE_NALU_PER_BUFFER (2)
|
|
+ #define MVE_OPT_NALU_FORMAT_ONE_BYTE_LENGTH_FIELD (4)
|
|
+ #define MVE_OPT_NALU_FORMAT_TWO_BYTE_LENGTH_FIELD (8)
|
|
+ #define MVE_OPT_NALU_FORMAT_FOUR_BYTE_LENGTH_FIELD (16)
|
|
+ #define MVE_OPT_NALU_FORMAT_MBINFO (32) /* only used for debugging */
|
|
+ /* For option MVE_SET_OPT_INDEX_GOP_TYPE, arg should
|
|
+ * be one of these: */
|
|
+ #define MVE_OPT_GOP_TYPE_BIDIRECTIONAL (1)
|
|
+ #define MVE_OPT_GOP_TYPE_LOW_DELAY (2)
|
|
+ #define MVE_OPT_GOP_TYPE_PYRAMID (3)
|
|
+ /* For option MVE_SET_OPT_INDEX_ENC_VP9_PROB_UPDATE,
|
|
+ * arg should be one of these: */
|
|
+ #define MVE_OPT_VP9_PROB_UPDATE_DISABLED (0)
|
|
+ #define MVE_OPT_VP9_PROB_UPDATE_IMPLICIT (1)
|
|
+ #define MVE_OPT_VP9_PROB_UPDATE_EXPLICIT (2)
|
|
+ /* For option MVE_SET_OPT_INDEX_DISABLE_FEATURES, arg
|
|
+ * should be a bitmask with features to disable: */
|
|
+ #define MVE_OPT_DISABLE_FEATURE_AFBC_COMP (0x00000001) /* VDMA AFBC Compression */
|
|
+ #define MVE_OPT_DISABLE_FEATURE_REF_CACHE (0x00000002) /* REF caching */
|
|
+ #define MVE_OPT_DISABLE_FEATURE_DEBLOCK (0x00000004) /* Deblocking */
|
|
+ #define MVE_OPT_DISABLE_FEATURE_SAO (0x00000008) /* SAO */
|
|
+ #define MVE_OPT_DISABLE_FEATURE_PIC_OUTPUT (0x00000020) /* Picture Output Removal */
|
|
+ #define MVE_OPT_DISABLE_FEATURE_PIPE (0x00000040) /* Pipe (i.e. parser-only) */
|
|
+ #define MVE_OPT_DISABLE_FEATURE_SLEEP (0x00000080) /* Clock gating
|
|
+ * (SOC_SYSCTRL.SLEEP bit) */
|
|
+ #define MVE_OPT_DISABLE_FEATURE_AFBC_LEGACY_REF (0x00000100) /* Enables tiled AFBC format in
|
|
+ * reference buffers. Ignored
|
|
+ * for decode AFBC output */
|
|
+ #define MVE_OPT_DISABLE_FEATURE_REF_PICS (0x00000400) /* Forces use of static 16x16
|
|
+ * reference pics */
|
|
+ #define MVE_OPT_DISABLE_FEATURE_CHNG_RECT_WA (0x00000800) /* Disables workaround */
|
|
+ #define MVE_OPT_DISABLE_FEATURE_REFSZ_LIMIT (0x00001000) /* Disable REFSZ bw limit */
|
|
+ /* For options MVE_SET_OPT_INDEX_ENC_MB_TYPE_ENABLE
|
|
+ * and MVE_SET_OPT_INDEX_ENC_MB_TYPE_DISABLE, arg
|
|
+ * should be a bitmask of MVE_MBTYPEs: */
|
|
+ #define MVE_MBTYPE_4x4 (0x00000001) /* 4x4 inter */
|
|
+ #define MVE_MBTYPE_4x8 (0x00000002) /* 4x8 inter */
|
|
+ #define MVE_MBTYPE_8x4 (0x00000004) /* 8x4 inter */
|
|
+ #define MVE_MBTYPE_8x8 (0x00000008) /* 8x8 inter */
|
|
+ #define MVE_MBTYPE_8x16 (0x00000010) /* 8x16 inter */
|
|
+ #define MVE_MBTYPE_16x8 (0x00000020) /* 16x8 inter */
|
|
+ #define MVE_MBTYPE_16x16 (0x00000040) /* 16x16 inter */
|
|
+ #define MVE_MBTYPE_PSKIP (0x00000080) /* P Skip inter */
|
|
+ #define MVE_MBTYPE_I4x4 (0x00000100) /* 4x4 intra */
|
|
+ #define MVE_MBTYPE_I8x8 (0x00000200) /* 8x8 intra */
|
|
+ #define MVE_MBTYPE_I16x16 (0x00000400) /* 16x16 intra */
|
|
+ #define MVE_MBTYPE_I32x32 (0x00000800) /* 32x32 intra */
|
|
+ #define MVE_MBTYPE_16x32 (0x00001000) /* 16x32 inter */
|
|
+ #define MVE_MBTYPE_32x16 (0x00002000) /* 32x16 inter */
|
|
+ #define MVE_MBTYPE_32x32 (0x00004000) /* 32x32 inter */
|
|
+ /* For option MVE_SET_OPT_INDEX_ENC_RGB_TO_YUV_MODE,
|
|
+ * arg should be one of these: */
|
|
+ #define MVE_OPT_RGB_TO_YUV_BT601_STUDIO (0)
|
|
+ #define MVE_OPT_RGB_TO_YUV_BT601_FULL (1)
|
|
+ #define MVE_OPT_RGB_TO_YUV_BT709_STUDIO (2)
|
|
+ #define MVE_OPT_RGB_TO_YUV_BT709_FULL (3)
|
|
+ struct
|
|
+ {
|
|
+ uint16_t profile;
|
|
+ /* AVC/H.264 profiles */
|
|
+ #define MVE_OPT_PROFILE_H264_BASELINE (1)
|
|
+ #define MVE_OPT_PROFILE_H264_MAIN (2)
|
|
+ #define MVE_OPT_PROFILE_H264_HIGH (3)
|
|
+ #define MVE_OPT_PROFILE_H264_HIGH_10 (4)
|
|
+ /* HEVC/H.265 profiles */
|
|
+ #define MVE_OPT_PROFILE_H265_MAIN (1)
|
|
+ #define MVE_OPT_PROFILE_H265_MAIN_STILL (2)
|
|
+ #define MVE_OPT_PROFILE_H265_MAIN_INTRA (3)
|
|
+ #define MVE_OPT_PROFILE_H265_MAIN_10 (4)
|
|
+ /* VC-1 profiles */
|
|
+ #define MVE_OPT_PROFILE_VC1_SIMPLE (1)
|
|
+ #define MVE_OPT_PROFILE_VC1_MAIN (2)
|
|
+ #define MVE_OPT_PROFILE_VC1_ADVANCED (3)
|
|
+ /* VP8 profiles */
|
|
+ #define MVE_OPT_PROFILE_VP8_MAIN (1)
|
|
+ uint16_t level;
|
|
+ /* AVC/H.264 levels */
|
|
+ #define MVE_OPT_LEVEL_H264_1 (1)
|
|
+ #define MVE_OPT_LEVEL_H264_1b (2)
|
|
+ #define MVE_OPT_LEVEL_H264_11 (3)
|
|
+ #define MVE_OPT_LEVEL_H264_12 (4)
|
|
+ #define MVE_OPT_LEVEL_H264_13 (5)
|
|
+ #define MVE_OPT_LEVEL_H264_2 (6)
|
|
+ #define MVE_OPT_LEVEL_H264_21 (7)
|
|
+ #define MVE_OPT_LEVEL_H264_22 (8)
|
|
+ #define MVE_OPT_LEVEL_H264_3 (9)
|
|
+ #define MVE_OPT_LEVEL_H264_31 (10)
|
|
+ #define MVE_OPT_LEVEL_H264_32 (11)
|
|
+ #define MVE_OPT_LEVEL_H264_4 (12)
|
|
+ #define MVE_OPT_LEVEL_H264_41 (13)
|
|
+ #define MVE_OPT_LEVEL_H264_42 (14)
|
|
+ #define MVE_OPT_LEVEL_H264_5 (15)
|
|
+ #define MVE_OPT_LEVEL_H264_51 (16)
|
|
+ #define MVE_OPT_LEVEL_H264_52 (17)
|
|
+ #define MVE_OPT_LEVEL_H264_6 (18)
|
|
+ #define MVE_OPT_LEVEL_H264_61 (19)
|
|
+ #define MVE_OPT_LEVEL_H264_62 (20)
|
|
+ #define MVE_OPT_LEVEL_H264_USER_SUPPLIED_BASE (32)
|
|
+ /* The value (MVE_OPT_LEVEL_H264_USER_SUPPLIED_BASE + level_idc) encodes a user
|
|
+ * supplied level_idc value in the range 0 to 255 inclusive. If the host supplies a level_idc
|
|
+ * value by this method then the encoder will encode this level_idc value in the bitstream
|
|
+ * without checking the validity of the level_idc value
|
|
+ */
|
|
+ #define MVE_OPT_LEVEL_H264_USER_SUPPLIED_MAX (MVE_OPT_LEVEL_H264_USER_SUPPLIED_BASE + 255)
|
|
+ /* HEVC/H.265 levels */
|
|
+ #define MVE_OPT_LEVEL_H265_MAIN_TIER_1 (1)
|
|
+ #define MVE_OPT_LEVEL_H265_HIGH_TIER_1 (2)
|
|
+ #define MVE_OPT_LEVEL_H265_MAIN_TIER_2 (3)
|
|
+ #define MVE_OPT_LEVEL_H265_HIGH_TIER_2 (4)
|
|
+ #define MVE_OPT_LEVEL_H265_MAIN_TIER_21 (5)
|
|
+ #define MVE_OPT_LEVEL_H265_HIGH_TIER_21 (6)
|
|
+ #define MVE_OPT_LEVEL_H265_MAIN_TIER_3 (7)
|
|
+ #define MVE_OPT_LEVEL_H265_HIGH_TIER_3 (8)
|
|
+ #define MVE_OPT_LEVEL_H265_MAIN_TIER_31 (9)
|
|
+ #define MVE_OPT_LEVEL_H265_HIGH_TIER_31 (10)
|
|
+ #define MVE_OPT_LEVEL_H265_MAIN_TIER_4 (11)
|
|
+ #define MVE_OPT_LEVEL_H265_HIGH_TIER_4 (12)
|
|
+ #define MVE_OPT_LEVEL_H265_MAIN_TIER_41 (13)
|
|
+ #define MVE_OPT_LEVEL_H265_HIGH_TIER_41 (14)
|
|
+ #define MVE_OPT_LEVEL_H265_MAIN_TIER_5 (15)
|
|
+ #define MVE_OPT_LEVEL_H265_HIGH_TIER_5 (16)
|
|
+ #define MVE_OPT_LEVEL_H265_MAIN_TIER_51 (17)
|
|
+ #define MVE_OPT_LEVEL_H265_HIGH_TIER_51 (18)
|
|
+ #define MVE_OPT_LEVEL_H265_MAIN_TIER_52 (19)
|
|
+ #define MVE_OPT_LEVEL_H265_HIGH_TIER_52 (20)
|
|
+ #define MVE_OPT_LEVEL_H265_MAIN_TIER_6 (21)
|
|
+ #define MVE_OPT_LEVEL_H265_HIGH_TIER_6 (22)
|
|
+ #define MVE_OPT_LEVEL_H265_MAIN_TIER_61 (23)
|
|
+ #define MVE_OPT_LEVEL_H265_HIGH_TIER_61 (24)
|
|
+ #define MVE_OPT_LEVEL_H265_MAIN_TIER_62 (25)
|
|
+ #define MVE_OPT_LEVEL_H265_HIGH_TIER_62 (26)
|
|
+ } profile_level;
|
|
+ struct
|
|
+ {
|
|
+ int32_t mv_search_range_x;
|
|
+ int32_t mv_search_range_y;
|
|
+ } motion_vector_search_range;
|
|
+ struct
|
|
+ {
|
|
+ uint32_t type;
|
|
+ #define MVE_OPT_HUFFMAN_TABLE_DC_LUMA (1)
|
|
+ #define MVE_OPT_HUFFMAN_TABLE_AC_LUMA (2)
|
|
+ #define MVE_OPT_HUFFMAN_TABLE_DC_CHROMA (3)
|
|
+ #define MVE_OPT_HUFFMAN_TABLE_AC_CHROMA (4)
|
|
+ uint8_t number_of_huffman_of_code_length[ 16 ];
|
|
+ uint8_t table[ 162 ]; /* 12 are used for DC, 162 for AC */
|
|
+ } huffman_table;
|
|
+ struct
|
|
+ {
|
|
+ uint32_t type;
|
|
+ #define MVE_OPT_QUANT_TABLE_LUMA (1)
|
|
+ #define MVE_OPT_QUANT_TABLE_CHROMA (2)
|
|
+ uint8_t matrix[ 64 ];
|
|
+ } quant_table;
|
|
+ struct
|
|
+ {
|
|
+ /* For HEVC, tile_cols must be zero. For VP9, tile_rows
|
|
+ * and tile_cols must be powers of 2. */
|
|
+ uint16_t tile_rows;
|
|
+ uint16_t tile_cols;
|
|
+ } tiles;
|
|
+ struct
|
|
+ {
|
|
+ uint16_t luma_bitdepth;
|
|
+ uint16_t chroma_bitdepth;
|
|
+ } bitdepth;
|
|
+ struct
|
|
+ {
|
|
+ /* Scale factors, and their square roots, for the lambda
|
|
+ * coefficients used by the encoder, in unsigned Q8 fixed-point
|
|
+ * format. Default (no scaling) is 1.0 (so 0x0100 in hex).
|
|
+ */
|
|
+ uint16_t lambda_scale_i_q8;
|
|
+ uint16_t lambda_scale_sqrt_i_q8;
|
|
+ uint16_t lambda_scale_p_q8;
|
|
+ uint16_t lambda_scale_sqrt_p_q8;
|
|
+ uint16_t lambda_scale_b_ref_q8;
|
|
+ uint16_t lambda_scale_sqrt_b_ref_q8;
|
|
+ uint16_t lambda_scale_b_nonref_q8;
|
|
+ uint16_t lambda_scale_sqrt_b_nonref_q8;
|
|
+ } lambda_scale;
|
|
+ /* ARBITRARY_DOWNSCALE */
|
|
+ struct
|
|
+ {
|
|
+ uint16_t width;
|
|
+ uint16_t height;
|
|
+ } downscaled_frame;
|
|
+ struct
|
|
+ {
|
|
+ uint32_t mode;
|
|
+ } dsl_pos;
|
|
+ } data;
|
|
+};
|
|
+
|
|
+struct mve_request_release_ref_frame
|
|
+{
|
|
+ /* Decode only: For a frame buffer that MVE has returned
|
|
+ * marked as _REF_FRAME, the host can send this message
|
|
+ * to ask the MVE to release the buffer as soon as it is
|
|
+ * no longer used as reference anymore. (Otherwise, in
|
|
+ * normal operation, the host would re-enqueue the buffer
|
|
+ * to the MVE when it has been displayed and can be over-
|
|
+ * written with a new frame.)
|
|
+ *
|
|
+ * Note: When a frame is no longer used as reference depends
|
|
+ * on the stream being decoded, and there is no way to
|
|
+ * guarantee a short response time, the response may not
|
|
+ * come until the end of the stream.
|
|
+ */
|
|
+ uint32_t buffer_address;
|
|
+};
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+/*********************************************************************
|
|
+ *
|
|
+ * RESPONSEs are messages from the host to the firmware
|
|
+ *
|
|
+ * Some of the MVE_RESPONSE_CODE_ codes are followed by one of the
|
|
+ * structs below.
|
|
+ *
|
|
+ *********************************************************************/
|
|
+
|
|
+/* Sent when firmware has booted.
|
|
+ */
|
|
+struct mve_response_switched_in
|
|
+{
|
|
+ uint32_t core;
|
|
+};
|
|
+
|
|
+/* Sent when last core in a session has switched out.
|
|
+ */
|
|
+struct mve_response_switched_out
|
|
+{
|
|
+ uint32_t core;
|
|
+ uint32_t reason;
|
|
+ uint32_t sub_reason;
|
|
+};
|
|
+
|
|
+/* Response confirming state transition after either GO or STOP
|
|
+ * command from host.
|
|
+ */
|
|
+struct mve_response_state_change
|
|
+{
|
|
+ uint32_t new_state;
|
|
+ #define MVE_STATE_STOPPED (0)
|
|
+ #define MVE_STATE_RUNNING (2)
|
|
+};
|
|
+
|
|
+/* Message sent when the all cores in the session have dequeued a
|
|
+ * job from the firmware job queue.
|
|
+ */
|
|
+struct mve_response_job_dequeued
|
|
+{
|
|
+ uint32_t valid_job;
|
|
+};
|
|
+
|
|
+/* Fatal error message from firmware, if sent then no further
|
|
+ * operation is possible.
|
|
+ */
|
|
+struct mve_response_error
|
|
+{
|
|
+ uint32_t error_code;
|
|
+ #define MVE_ERROR_ABORT (1)
|
|
+ #define MVE_ERROR_OUT_OF_MEMORY (2)
|
|
+ #define MVE_ERROR_ASSERT (3)
|
|
+ #define MVE_ERROR_UNSUPPORTED (4)
|
|
+ #define MVE_ERROR_INVALID_BUFFER (6)
|
|
+ #define MVE_ERROR_INVALID_STATE (8)
|
|
+ #define MVE_ERROR_WATCHDOG (9)
|
|
+
|
|
+ #define MVE_MAX_ERROR_MESSAGE_SIZE (128)
|
|
+ char message[ MVE_MAX_ERROR_MESSAGE_SIZE ];
|
|
+};
|
|
+
|
|
+/* When a set-option succeeds, a confirmation message is
|
|
+ * sent, including the index-code for that particular option.
|
|
+ */
|
|
+struct mve_response_set_option_confirm
|
|
+{
|
|
+ uint32_t index; /* Same as 'index' in struct mve_request_set_option */
|
|
+};
|
|
+
|
|
+/* If a set-option request fails, this message is returned.
|
|
+ * This is not a fatal error. The set-option had no effect,
|
|
+ * and the session is still alive.
|
|
+ * For example, trying to set an option with a too large
|
|
+ * or small parameter would result in this message.
|
|
+ * The included text string is meant for development and
|
|
+ * debugging purposes only.
|
|
+ * (When a set-option succeeds the set-option-confirm
|
|
+ * message code is sent instead.)
|
|
+ */
|
|
+struct mve_response_set_option_fail
|
|
+{
|
|
+ uint32_t index; /* Same as 'index' in struct mve_request_set_option */
|
|
+ char message[ MVE_MAX_ERROR_MESSAGE_SIZE ];
|
|
+};
|
|
+
|
|
+/* Decode only: This message is sent from MVE to the host so that it can
|
|
+ * allocate large enough output buffers. Output buffers that are to small
|
|
+ * will be returned to the host marked as 'rejected'.
|
|
+ */
|
|
+struct mve_response_frame_alloc_parameters
|
|
+{
|
|
+ /* Please note that the below information is a hint
|
|
+ * for what buffers to allocate, it does not say
|
|
+ * what actual resolution an output picture has.
|
|
+ */
|
|
+
|
|
+ /* To use if allocating PLANAR YUV output buffers: */
|
|
+ uint16_t planar_alloc_frame_width;
|
|
+ uint16_t planar_alloc_frame_height;
|
|
+
|
|
+ /* To use if allocating AFBC output buffers
|
|
+ * (if interlace, each field needs this size):
|
|
+ */
|
|
+ uint32_t afbc_alloc_bytes;
|
|
+
|
|
+ /* For situations where downscaled AFBC is supported,
|
|
+ * this number of bytes is needed for the downscaled frame.
|
|
+ */
|
|
+ uint32_t afbc_alloc_bytes_downscaled;
|
|
+
|
|
+ /* When the host allocates an AFBC frame buffer, it should normally set
|
|
+ * the the afbc_width_in_superblocks to be at least this recommended value.
|
|
+ * Buffers with smaller values are likely to be returned rejected by the MVE.
|
|
+ * See also comments above for afbc_alloc_bytes and
|
|
+ * afbc_alloc_bytes_downscaled, they describe the situations where the
|
|
+ * different values are used.
|
|
+ */
|
|
+ uint16_t afbc_width_in_superblocks;
|
|
+ uint16_t afbc_width_in_superblocks_downscaled;
|
|
+
|
|
+ /* For PLANAR YUV output, every plane's address need to be adjusted to get
|
|
+ * optimal AXI bursts when the pixel data is written, the values below may
|
|
+ * be used to calculate address offsets.
|
|
+ */
|
|
+ uint16_t cropx;
|
|
+ uint16_t cropy;
|
|
+
|
|
+ uint32_t mbinfo_alloc_bytes; /* Only for debugging */
|
|
+
|
|
+
|
|
+ /* downscaled frame width/height for decode */
|
|
+ /* ARBITRARY_DOWNSCALE */
|
|
+ uint16_t dsl_frame_width;
|
|
+ uint16_t dsl_frame_height;
|
|
+ uint16_t dsl_pos_mode;
|
|
+ uint8_t ctu_size; /* EXPORT_SEQ_INFO */
|
|
+};
|
|
+
|
|
+/* Decode only: This message is sent from MVE to the host so that it can
|
|
+ * allocate suitable output buffers. The needed size of the buffer is sent
|
|
+ * in a separate message (above).
|
|
+ * When MVE sends the message below, it enters a waiting-state and will not
|
|
+ * make any progress until the host sends an output-flush command, upon
|
|
+ * which MVE will return all output buffers, followed by a message saying
|
|
+ * that the output has been flushed. Only then should the host start
|
|
+ * enqueueing new output buffers.
|
|
+ */
|
|
+struct mve_response_sequence_parameters
|
|
+{
|
|
+ /* Other stream parameters affecting buffer allocation,
|
|
+ * any change in these values will trigger a flush.
|
|
+ */
|
|
+ uint8_t interlace; /* 0 or 1 */
|
|
+ uint8_t chroma_format;
|
|
+ #define MVE_CHROMA_FORMAT_MONO (0x0)
|
|
+ #define MVE_CHROMA_FORMAT_420 (0x1)
|
|
+ #define MVE_CHROMA_FORMAT_422 (0x2)
|
|
+ #define MVE_CHROMA_FORMAT_440 (0x3)
|
|
+ #define MVE_CHROMA_FORMAT_ARGB (0x4)
|
|
+ uint8_t bitdepth_luma; /* 8, 9 or 10 */
|
|
+ uint8_t bitdepth_chroma; /* 8, 9 or 10 */
|
|
+ uint8_t num_buffers_planar; /* number of planar buffers needed */
|
|
+ uint8_t num_buffers_afbc; /* number of AFBC buffers needed, for
|
|
+ * AFBC output more buffers are needed
|
|
+ * (for planar output, the firmware
|
|
+ * will allocate extra memory via RPC)
|
|
+ */
|
|
+ uint8_t range_mapping_enabled; /* VC-1 AP specific feature, if enabled
|
|
+ * then AFBC buffers may need special
|
|
+ * filtering before they can be
|
|
+ * displayed correctly. If the host is
|
|
+ * not able to do that, then planar output
|
|
+ * should be used, for which MVE
|
|
+ * automatically performs the filtering.
|
|
+ */
|
|
+ uint8_t reserved0;
|
|
+};
|
|
+
|
|
+struct mve_response_ref_frame_unused
|
|
+{
|
|
+ /* Decode only: If requested by the host with the message
|
|
+ * MVE_REQUEST_CODE_RELEASE_REF_FRAME, the MVE will respond
|
|
+ * with this message when (if ever) the buffer is no longer
|
|
+ * used.
|
|
+ */
|
|
+ uint32_t unused_buffer_address;
|
|
+};
|
|
+
|
|
+
|
|
+/* This message is only for debugging and performance profiling.
|
|
+ * Is sent by the firmware if the corresponding options is enabled.
|
|
+ */
|
|
+struct mve_event_processed
|
|
+{
|
|
+ uint8_t pic_format;
|
|
+ uint8_t qp;
|
|
+ uint8_t pad0;
|
|
+ uint8_t pad1;
|
|
+ uint32_t parse_start_time; /* Timestamp, absolute time */
|
|
+ uint32_t parse_end_time; /* Timestamp, absolute time */
|
|
+ uint32_t parse_idle_time; /* Definition of idle here is waiting for in/out buffers or available RAM */
|
|
+
|
|
+ uint32_t pipe_start_time; /* Timestamp */
|
|
+ uint32_t pipe_end_time; /* Timestamp, end-start = process time. Idle time while in a frame is
|
|
+ * not measured. */
|
|
+ uint32_t pipe_idle_time; /* Always 0 in decode, */
|
|
+
|
|
+ uint32_t parser_coreid; /* Core used to parse this frame */
|
|
+ uint32_t pipe_coreid; /* Core used to pipe this frame */
|
|
+
|
|
+ uint32_t bitstream_bits; /* Number of bitstream bits used for this frame. */
|
|
+
|
|
+ uint32_t intermediate_buffer_size; /* Size of intermediate (mbinfo/residuals) buffer after this frame was
|
|
+ * parsed. */
|
|
+ uint32_t total_memory_allocated; /* after the frame was parsed. Including reference frames. */
|
|
+
|
|
+ uint32_t bus_read_bytes; /* bus read bytes */
|
|
+ uint32_t bus_write_bytes; /* bus written bytes */
|
|
+
|
|
+ uint32_t afbc_bytes; /* afbc data transferred */
|
|
+
|
|
+ uint32_t slice0_end_time; /* Timestamp, absolute time */
|
|
+ uint32_t stream_start_time; /* Timestamp, absolute stream start time */
|
|
+ uint32_t stream_open_time; /* Timestamp, absolute stream open time */
|
|
+};
|
|
+
|
|
+/* This message is only for debugging, is sent by the
|
|
+ * firmware if the corresponding option is enabled.
|
|
+ */
|
|
+struct mve_event_ref_frame
|
|
+{
|
|
+ uint32_t ref_addr; /* MVE virtual address of AFBC reference frame */
|
|
+ uint32_t ref_width; /* Width of display area in luma pixels */
|
|
+ uint32_t ref_height; /* Height of display area in luma pixels */
|
|
+ uint32_t ref_mb_width; /* Width in macroblocks */
|
|
+ uint32_t ref_mb_height; /* Height in macroblocks */
|
|
+ uint32_t ref_left_crop; /* Left crop in luma pixels */
|
|
+ uint32_t ref_top_crop; /* Top crop in luma pixels */
|
|
+ uint32_t ref_frame_size; /* Total AFBC frame size in bytes */
|
|
+ uint32_t ref_display_order;
|
|
+ uint16_t bit_width; /* bit width of the YUV either 8 or 10 */
|
|
+ uint16_t tiled_headers; /* AFBC format is tiled */
|
|
+};
|
|
+
|
|
+/* This message is only for debugging, is sent by the firmware if event tracing
|
|
+ * is enabled.
|
|
+ */
|
|
+struct mve_event_trace_buffers
|
|
+{
|
|
+ uint16_t reserved;
|
|
+ uint8_t num_cores;
|
|
+ uint8_t rasc_mask;
|
|
+ #define MVE_MAX_TRACE_BUFFERS 40
|
|
+ /* this array will contain one buffer per rasc in rasc_mask per num_core */
|
|
+ struct
|
|
+ {
|
|
+ uint32_t rasc_addr; /* rasc address of the buffer */
|
|
+ uint32_t size; /* size of the buffer in bytes */
|
|
+ } buffers[MVE_MAX_TRACE_BUFFERS];
|
|
+};
|
|
+
|
|
+/* 'Events' are informative messages, the host is not required to react in
|
|
+ * any particular way.
|
|
+ */
|
|
+struct mve_response_event
|
|
+{
|
|
+ uint32_t event_code;
|
|
+ #define MVE_EVENT_ERROR_STREAM_CORRUPT (1) /* message, text string */
|
|
+ #define MVE_EVENT_ERROR_STREAM_NOT_SUPPORTED (2) /* message, text string */
|
|
+ #define MVE_EVENT_PROCESSED (3) /* struct mve_event_processed */
|
|
+ #define MVE_EVENT_REF_FRAME (4) /* struct mve_event_ref_frame */
|
|
+ #define MVE_EVENT_TRACE_BUFFERS (5) /* struct mve_event_trace_buffers */
|
|
+ union
|
|
+ {
|
|
+ struct mve_event_processed event_processed;
|
|
+ struct mve_event_ref_frame event_ref_frame;
|
|
+ struct mve_event_trace_buffers event_trace_buffers;
|
|
+ char message[ MVE_MAX_ERROR_MESSAGE_SIZE ];
|
|
+ } event_data;
|
|
+}__attribute__((packed));
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+/*********************************************************************
|
|
+ *
|
|
+ * BUFFERs are sent both ways, from host to firmware and back again
|
|
+ *
|
|
+ * Each MVE_BUFFER_CODE_ code is followed by one of the structs
|
|
+ * below.
|
|
+ *
|
|
+ *********************************************************************/
|
|
+
|
|
+/* Flags in mve_buffer_frame::frame_flags:
|
|
+ * Set by whom? Meaning:
|
|
+ * DECODE: ENCODE:
|
|
+ * MVE_BUFFER_FRAME_FLAG_INTERLACE host - Buffer is interlaced (both top and
|
|
+ * bottom fields are allocated)
|
|
+ * MVE_BUFFER_FRAME_FLAG_BOT_FIRST fw - Bottom field should be displayed
|
|
+ * first (only if interlaced)
|
|
+ * MVE_BUFFER_FRAME_FLAG_TOP_PRESENT fw host Top field present (or full frame if
|
|
+ * not interlaced)
|
|
+ * MVE_BUFFER_FRAME_FLAG_BOT_PRESENT fw - Bottom present (only if interlaced)
|
|
+ *
|
|
+ * MVE_BUFFER_FRAME_FLAG_ROTATION_* host host Decode: MVE will rotate the output frame
|
|
+ * according to this setting.
|
|
+ * Encode: MVE will rotate the input frame
|
|
+ * according to this setting before
|
|
+ * encoding them.
|
|
+ * MVE_BUFFER_FRAME_FLAG_SCALING_MASK host - Output pictures should be downscaled
|
|
+ *
|
|
+ * MVE_BUFFER_FRAME_FLAG_MIRROR_* - host Input frame should be mirrored before encoding
|
|
+ *
|
|
+ * MVE_BUFFER_FRAME_FLAG_REJECTED fw - Buffer was too small, host should re-allocate
|
|
+ *
|
|
+ * MVE_BUFFER_FRAME_FLAG_CORRUPT fw - Frame contains visual corruption
|
|
+ *
|
|
+ * MVE_BUFFER_FRAME_FLAG_DECODE_ONLY fw - Frame should not be displayed
|
|
+ *
|
|
+ * MVE_BUFFER_FRAME_FLAG_REF_FRAME fw - Frame is used by MVE as reference, host must
|
|
+ * not change, just re-enqueue when displayed
|
|
+ * MVE_BUFFER_FRAME_FLAG_EOS fw host This is the last frame in the stream.
|
|
+ */
|
|
+
|
|
+/* mve_buffer_frame_planar stores uncompressed YUV pictures.
|
|
+ * ________________________________________
|
|
+ * | ^ | | ^
|
|
+ * |<-:--visible_frame_width---->| | :
|
|
+ * | : | | :
|
|
+ * | : | | :
|
|
+ * | visible_frame_height | | max_frame_height
|
|
+ * | : | | :
|
|
+ * | : | | :
|
|
+ * |__v__________________________| | :
|
|
+ * | | :
|
|
+ * |<-------------max_frame_width---------->| :
|
|
+ * |________________________________________| v
|
|
+ *
|
|
+ */
|
|
+struct mve_buffer_frame_planar
|
|
+{
|
|
+ /* Y,Cb,Cr top field */
|
|
+ uint32_t plane_top[ 3 ];
|
|
+
|
|
+ /* Y,Cb,Cr bottom field (interlace only) */
|
|
+ uint32_t plane_bot[ 3 ];
|
|
+
|
|
+ /* Stride between rows, in bytes */
|
|
+ int32_t stride[ 3 ];
|
|
+
|
|
+ /* Size of largest frame allowed to put in this buffer */
|
|
+ uint16_t max_frame_width;
|
|
+ uint16_t max_frame_height;
|
|
+};
|
|
+
|
|
+/* mve_buffer_frame_afbc stores AFBC compressed content that is also used
|
|
+ * as the reference frame. Out of loop processing (crop, rotation,
|
|
+ * range reduction) must be supported by the user of this buffer and
|
|
+ * the parameters are signaled within the buffer descriptor below.
|
|
+ * ________________________________________
|
|
+ * | ^ |
|
|
+ * | cropy |
|
|
+ * | v_____________________________ |
|
|
+ * |<-cropx->| ^ ||
|
|
+ * | |<-:--visible_frame_width---->||
|
|
+ * | | : ||
|
|
+ * | | : ||
|
|
+ * | | visible_frame_height ||
|
|
+ * | | : ||
|
|
+ * | | : ||
|
|
+ * | |__v__________________________||
|
|
+ * |________________________________________|
|
|
+ *
|
|
+ * <----- superblock_width --------------->
|
|
+ * * afbc_width_in_superblocks
|
|
+ *
|
|
+ * Note that the sizes and cropping values need not be multiples of 16.
|
|
+ *
|
|
+ * For interlaced streams, the values refer to a full frame,
|
|
+ * while the output is actually separated into fields. Thus for fields,
|
|
+ * cropy and visible_frame_height should be divided by two.
|
|
+ *
|
|
+ * For dual-downscaled AFBC output (not supported for interlace),
|
|
+ * then the cropx, cropy, visible_frame_width and visible_frame_height
|
|
+ * should be divided by two for the downscaled plane.
|
|
+ */
|
|
+struct mve_buffer_frame_afbc
|
|
+{
|
|
+ uint32_t plane[ 2 ]; /* Addresses for up to two AFBC planes:
|
|
+ * Top and bottom fields for interlace,
|
|
+ * or standard and optional downscaled output. */
|
|
+ uint32_t alloc_bytes[ 2 ]; /* Size of allocation for each plane */
|
|
+ uint16_t cropx; /* Luma x crop */
|
|
+ uint16_t cropy; /* Luma y crop */
|
|
+ uint16_t afbc_width_in_superblocks[ 2 ]; /* Width of AFBC frame buffer, in units
|
|
+ * of superblock width (32 or 16).
|
|
+ * If dual-downscaled output is chosen,
|
|
+ * this width can be different for the
|
|
+ * two planes.
|
|
+ * For first plane:
|
|
+ * (cropx + frame_width)
|
|
+ * <= superblock_width * afbc_width...
|
|
+ */
|
|
+ uint32_t afbc_params; /* AFBC parameters */
|
|
+ #define MVE_BUFFER_FRAME_AFBC_TILED_BODY (0x00000001) /* Output body blocks should be tiled */
|
|
+ #define MVE_BUFFER_FRAME_AFBC_TILED_HEADER (0x00000002) /* Output headers should be tiled */
|
|
+ #define MVE_BUFFER_FRAME_AFBC_32X8_SUPERBLOCK (0x00000004) /* Super block is 32x8, default is 16x16,
|
|
+ * (only supported as input for encode) */
|
|
+ #define MVE_BUFFER_FRAME_AFBC_DN_FORCE_8BIT (0x00000008) /* For downscaled AFBC plane: It shall
|
|
+ * be 8-bit, even if full-scale is 10-bit */
|
|
+ #define MVE_BUFFER_FRAME_AFBC_DN_FORCE_420 (0x00000010) /* For downscaled AFBC plane: It shall
|
|
+ * be 4:2:0, even if full-scale is 4:2:2 */
|
|
+ #define MVE_BUFFER_FRAME_AFBC_STRIDE_SET_BY_MVE (0x00000020) /* Decode only: By default, the host should
|
|
+ set the afbc_width_in_superblocks. If the
|
|
+ value is zero, or if this bit is set, then
|
|
+ the MVE sets an appropriate value. */
|
|
+ #define MVE_BUFFER_FRAME_AFBC_BLOCK_SPLIT (0x00000040) /* For Superblock layout, block_split mode
|
|
+ should be enabled*/
|
|
+};
|
|
+
|
|
+/*
|
|
+ * The FRAME buffer stores the common information for PLANAR and AFBC buffers,
|
|
+ * and a union of PLANAR and AFBC specific information.
|
|
+ */
|
|
+struct mve_buffer_frame
|
|
+{
|
|
+ /* For identification of the buffer, this is not changed by
|
|
+ * the firmware. */
|
|
+ uint64_t host_handle;
|
|
+
|
|
+ /* For matching input buffer with output buffers, the firmware
|
|
+ * copies these values between frame buffers and bitstream buffers. */
|
|
+ uint64_t user_data_tag;
|
|
+
|
|
+ /* Frame buffer flags, see commentary above */
|
|
+ uint32_t frame_flags;
|
|
+ #define MVE_BUFFER_FRAME_FLAG_INTERLACE (0x00000001)
|
|
+ #define MVE_BUFFER_FRAME_FLAG_BOT_FIRST (0x00000002)
|
|
+ #define MVE_BUFFER_FRAME_FLAG_TOP_PRESENT (0x00000004)
|
|
+ #define MVE_BUFFER_FRAME_FLAG_BOT_PRESENT (0x00000008)
|
|
+ #define MVE_BUFFER_FRAME_FLAG_ROTATION_90 (0x00000010)
|
|
+ #define MVE_BUFFER_FRAME_FLAG_ROTATION_180 (0x00000020)
|
|
+ #define MVE_BUFFER_FRAME_FLAG_ROTATION_270 (0x00000030)
|
|
+ #define MVE_BUFFER_FRAME_FLAG_SCALING_MASK (0x000000C0)
|
|
+ #define MVE_BUFFER_FRAME_FLAG_MIRROR_HORI (0x00000100)
|
|
+ #define MVE_BUFFER_FRAME_FLAG_MIRROR_VERT (0x00000200)
|
|
+ #define MVE_BUFFER_FRAME_FLAG_REJECTED (0x00001000)
|
|
+ #define MVE_BUFFER_FRAME_FLAG_CORRUPT (0x00002000)
|
|
+ #define MVE_BUFFER_FRAME_FLAG_DECODE_ONLY (0x00004000)
|
|
+ #define MVE_BUFFER_FRAME_FLAG_REF_FRAME (0x00008000)
|
|
+ #define MVE_BUFFER_FRAME_FLAG_EOS (0x00010000)
|
|
+ /*ARBITRARY_DOWNSCALE*/
|
|
+ #define MVE_BUFFER_FRAME_FLAG_SCALING_MASKX (0xFF000000) //8bit
|
|
+ #define MVE_BUFFER_FRAME_FLAG_SCALING_MASKY (0x00FE0000) //7bit
|
|
+
|
|
+ /* Height (in luma samples) of visible part of frame,
|
|
+ * may be smaller than allocated frame size. */
|
|
+ uint16_t visible_frame_height;
|
|
+
|
|
+ /* Width (in luma samples) of visible part of frame,
|
|
+ * may be smaller than allocated frame size. */
|
|
+ uint16_t visible_frame_width;
|
|
+
|
|
+ /* Color format of buffer */
|
|
+ uint16_t format;
|
|
+ /* format bitfield: */
|
|
+ #define MVE_FORMAT_BF_C (0) /* 3 bits, chroma subsampling */
|
|
+ #define MVE_FORMAT_BF_B (4) /* 4 bits, max bitdepth minus 8 */
|
|
+ #define MVE_FORMAT_BF_N (8) /* 2 bits, number of planes */
|
|
+ #define MVE_FORMAT_BF_V (12) /* 2 bits, format variant */
|
|
+ #define MVE_FORMAT_BF_A (15) /* 1 bit, AFBC bit */
|
|
+ /* formats: */
|
|
+ #define MVE_FORMAT_YUV420_AFBC_8 ( (MVE_CHROMA_FORMAT_420 << MVE_FORMAT_BF_C) | \
|
|
+ ( ( 8 - 8) << MVE_FORMAT_BF_B) | \
|
|
+ ( 1 << MVE_FORMAT_BF_A) )
|
|
+
|
|
+ #define MVE_FORMAT_YUV420_AFBC_10 ( (MVE_CHROMA_FORMAT_420 << MVE_FORMAT_BF_C) | \
|
|
+ ( (10 - 8) << MVE_FORMAT_BF_B) | \
|
|
+ ( 1 << MVE_FORMAT_BF_A) )
|
|
+
|
|
+ #define MVE_FORMAT_YUV422_AFBC_8 ( (MVE_CHROMA_FORMAT_422 << MVE_FORMAT_BF_C) | \
|
|
+ ( ( 8 - 8) << MVE_FORMAT_BF_B) | \
|
|
+ ( 1 << MVE_FORMAT_BF_A) )
|
|
+
|
|
+ #define MVE_FORMAT_YUV422_AFBC_10 ( (MVE_CHROMA_FORMAT_422 << MVE_FORMAT_BF_C) | \
|
|
+ ( (10 - 8) << MVE_FORMAT_BF_B) | \
|
|
+ ( 1 << MVE_FORMAT_BF_A) )
|
|
+
|
|
+ #define MVE_FORMAT_YUV420_I420 ( (MVE_CHROMA_FORMAT_420 << MVE_FORMAT_BF_C) | \
|
|
+ ( ( 8 - 8) << MVE_FORMAT_BF_B) | \
|
|
+ ( 3 << MVE_FORMAT_BF_N) | \
|
|
+ ( 0 << MVE_FORMAT_BF_V) )
|
|
+
|
|
+ #define MVE_FORMAT_YUV420_I420_10 ( (MVE_CHROMA_FORMAT_420 << MVE_FORMAT_BF_C) | \
|
|
+ ( ( 10 - 8) << MVE_FORMAT_BF_B) | \
|
|
+ ( 3 << MVE_FORMAT_BF_N) | \
|
|
+ ( 0 << MVE_FORMAT_BF_V) )
|
|
+
|
|
+ #define MVE_FORMAT_YUV420_NV12 ( (MVE_CHROMA_FORMAT_420 << MVE_FORMAT_BF_C) | \
|
|
+ ( ( 8 - 8) << MVE_FORMAT_BF_B) | \
|
|
+ ( 2 << MVE_FORMAT_BF_N) | \
|
|
+ ( 0 << MVE_FORMAT_BF_V) )
|
|
+
|
|
+ #define MVE_FORMAT_YUV420_NV21 ( (MVE_CHROMA_FORMAT_420 << MVE_FORMAT_BF_C) | \
|
|
+ ( ( 8 - 8) << MVE_FORMAT_BF_B) | \
|
|
+ ( 2 << MVE_FORMAT_BF_N) | \
|
|
+ ( 1 << MVE_FORMAT_BF_V) )
|
|
+
|
|
+ #define MVE_FORMAT_YUV420_P010 ( (MVE_CHROMA_FORMAT_420 << MVE_FORMAT_BF_C) | \
|
|
+ ( (16 - 8) << MVE_FORMAT_BF_B) | \
|
|
+ ( 2 << MVE_FORMAT_BF_N) | \
|
|
+ ( 0 << MVE_FORMAT_BF_V) )
|
|
+
|
|
+ #define MVE_FORMAT_YUV420_Y0L2 ( (MVE_CHROMA_FORMAT_420 << MVE_FORMAT_BF_C) | \
|
|
+ ( (10 - 8) << MVE_FORMAT_BF_B) | \
|
|
+ ( 1 << MVE_FORMAT_BF_N) | \
|
|
+ ( 0 << MVE_FORMAT_BF_V) )
|
|
+
|
|
+ #define MVE_FORMAT_YUV420_AQB1 ( (MVE_CHROMA_FORMAT_420 << MVE_FORMAT_BF_C) | \
|
|
+ ( (10 - 8) << MVE_FORMAT_BF_B) | \
|
|
+ ( 1 << MVE_FORMAT_BF_N) | \
|
|
+ ( 1 << MVE_FORMAT_BF_V) )
|
|
+
|
|
+ #define MVE_FORMAT_YUV422_YUY2 ( (MVE_CHROMA_FORMAT_422 << MVE_FORMAT_BF_C) | \
|
|
+ ( ( 8 - 8) << MVE_FORMAT_BF_B) | \
|
|
+ ( 1 << MVE_FORMAT_BF_N) | \
|
|
+ ( 0 << MVE_FORMAT_BF_V) )
|
|
+
|
|
+ #define MVE_FORMAT_YUV422_UYVY ( (MVE_CHROMA_FORMAT_422 << MVE_FORMAT_BF_C) | \
|
|
+ ( ( 8 - 8) << MVE_FORMAT_BF_B) | \
|
|
+ ( 1 << MVE_FORMAT_BF_N) | \
|
|
+ ( 1 << MVE_FORMAT_BF_V) )
|
|
+
|
|
+ #define MVE_FORMAT_YUV422_Y210 ( (MVE_CHROMA_FORMAT_422 << MVE_FORMAT_BF_C) | \
|
|
+ ( (16 - 8) << MVE_FORMAT_BF_B) | \
|
|
+ ( 1 << MVE_FORMAT_BF_N) | \
|
|
+ ( 0 << MVE_FORMAT_BF_V) )
|
|
+
|
|
+ #define MVE_FORMAT_RGBA_8888 ( (MVE_CHROMA_FORMAT_ARGB << MVE_FORMAT_BF_C) | \
|
|
+ ( ( 8 - 8) << MVE_FORMAT_BF_B) | \
|
|
+ ( 1 << MVE_FORMAT_BF_N) | \
|
|
+ ( 0 << MVE_FORMAT_BF_V) )
|
|
+
|
|
+ #define MVE_FORMAT_BGRA_8888 ( (MVE_CHROMA_FORMAT_ARGB << MVE_FORMAT_BF_C) | \
|
|
+ ( ( 8 - 8) << MVE_FORMAT_BF_B) | \
|
|
+ ( 1 << MVE_FORMAT_BF_N) | \
|
|
+ ( 1 << MVE_FORMAT_BF_V) )
|
|
+
|
|
+ #define MVE_FORMAT_ARGB_8888 ( (MVE_CHROMA_FORMAT_ARGB << MVE_FORMAT_BF_C) | \
|
|
+ ( ( 8 - 8) << MVE_FORMAT_BF_B) | \
|
|
+ ( 1 << MVE_FORMAT_BF_N) | \
|
|
+ ( 2 << MVE_FORMAT_BF_V) )
|
|
+
|
|
+ #define MVE_FORMAT_ABGR_8888 ( (MVE_CHROMA_FORMAT_ARGB << MVE_FORMAT_BF_C) | \
|
|
+ ( ( 8 - 8) << MVE_FORMAT_BF_B) | \
|
|
+ ( 1 << MVE_FORMAT_BF_N) | \
|
|
+ ( 3 << MVE_FORMAT_BF_V) )
|
|
+
|
|
+ #define MVE_FORMAT_MBINFO (0x0001) /* only used for debugging */
|
|
+
|
|
+ #define MVE_FORMAT_UNUSED (0x0000)
|
|
+
|
|
+ uint16_t reserved0; /* force 'data' to be 4-byte aligned */
|
|
+
|
|
+ union
|
|
+ {
|
|
+ struct mve_buffer_frame_planar planar;
|
|
+ struct mve_buffer_frame_afbc afbc;
|
|
+ } data;
|
|
+
|
|
+ uint32_t reserved1; /* force size to be multiple of 8 bytes */
|
|
+};
|
|
+
|
|
+/* The bitstream buffer stores a number of bitstream bytes */
|
|
+struct mve_buffer_bitstream
|
|
+{
|
|
+ /* For identification of the buffer, this is not changed by
|
|
+ * the firmware. */
|
|
+ uint64_t host_handle;
|
|
+
|
|
+ /* For matching input buffer with output buffers, the firmware
|
|
+ * copies these values between frame buffers and bitstream buffers. */
|
|
+ uint64_t user_data_tag;
|
|
+
|
|
+ /* BufferFlags */
|
|
+ uint32_t bitstream_flags;
|
|
+ #define MVE_BUFFER_BITSTREAM_FLAG_EOS (0x00000001)
|
|
+ #define MVE_BUFFER_BITSTREAM_FLAG_ENDOFFRAME (0x00000010)
|
|
+ #define MVE_BUFFER_BITSTREAM_FLAG_SYNCFRAME (0x00000020)
|
|
+ #define MVE_BUFFER_BITSTREAM_FLAG_CODECCONFIG (0x00000080)
|
|
+ #define MVE_BUFFER_BITSTREAM_FLAG_ENDOFSUBFRAME (0x00000400)
|
|
+
|
|
+ /* Length of allocated buffer */
|
|
+ uint32_t bitstream_alloc_bytes;
|
|
+
|
|
+ /* Byte offset from start to first byte */
|
|
+ uint32_t bitstream_offset;
|
|
+
|
|
+ /* Number of bytes in the buffer */
|
|
+ uint32_t bitstream_filled_len;
|
|
+
|
|
+ /* Pointer to buffer start */
|
|
+ uint32_t bitstream_buf_addr;
|
|
+
|
|
+ /* frame_type. 0:I, 1:p, 2:B, 3:b */
|
|
+ uint8_t frame_type;
|
|
+
|
|
+ /* Pad to force 8-byte alignment */
|
|
+ //uint32_t reserved;
|
|
+ uint8_t reserved[3];
|
|
+};
|
|
+
|
|
+/*
|
|
+ * Define a region in 16x16 units
|
|
+ *
|
|
+ * The region is macroblock positions (x,y) in the range
|
|
+ * mbx_left <= x < mbx_right
|
|
+ * mby_top <= y < mby_bottom
|
|
+ */
|
|
+struct mve_buffer_param_region
|
|
+{
|
|
+ uint16_t mbx_left; /* macroblock x left edge (inclusive) */
|
|
+ uint16_t mbx_right; /* macroblock x right edge (exclusive) */
|
|
+ uint16_t mby_top; /* macroblock y top edge (inclusive) */
|
|
+ uint16_t mby_bottom; /* macroblock y bottom edge (exclusive) */
|
|
+ int16_t qp_delta; /* QP delta value for this region, this
|
|
+ * delta applies to QP values in the ranges:
|
|
+ * H264: 0-51
|
|
+ * HEVC: 0-51
|
|
+ * VP9: 0-255 */
|
|
+ uint16_t reserved;
|
|
+};
|
|
+
|
|
+/* input for encoder,
|
|
+ * the mve_buffer_param_regions buffer stores the information for FRAME buffers,
|
|
+ * and the information for regions of interest.
|
|
+ */
|
|
+struct mve_buffer_param_regions
|
|
+{
|
|
+ uint8_t n_regions; /* Number of regions */
|
|
+ uint8_t reserved[ 3 ];
|
|
+ #define MVE_MAX_FRAME_REGIONS 16
|
|
+ struct mve_buffer_param_region region[ MVE_MAX_FRAME_REGIONS ];
|
|
+};
|
|
+
|
|
+/* the block parameter record specifies the various properties of a quad */
|
|
+struct mve_block_param_record
|
|
+{
|
|
+ uint16_t qp_delta; /* Bitset of four 4-bit QP delta values for a quad */
|
|
+ #define MVE_BLOCK_PARAM_RECORD_QP_DELTA_TOP_LEFT_16X16 (0)
|
|
+ #define MVE_BLOCK_PARAM_RECORD_QP_DELTA_TOP_LEFT_16X16_SZ (4)
|
|
+ #define MVE_BLOCK_PARAM_RECORD_QP_DELTA_TOP_RIGHT_16X16 (4)
|
|
+ #define MVE_BLOCK_PARAM_RECORD_QP_DELTA_TOP_RIGHT_16X16_SZ (4)
|
|
+ #define MVE_BLOCK_PARAM_RECORD_QP_DELTA_BOT_LEFT_16X16 (8)
|
|
+ #define MVE_BLOCK_PARAM_RECORD_QP_DELTA_BOT_LEFT_16X16_SZ (4)
|
|
+ #define MVE_BLOCK_PARAM_RECORD_QP_DELTA_BOT_RIGHT_16X16 (12)
|
|
+ #define MVE_BLOCK_PARAM_RECORD_QP_DELTA_BOT_RIGHT_16X16_SZ (4)
|
|
+
|
|
+ uint8_t force;
|
|
+ #define MVE_BLOCK_PARAM_RECORD_FORCE_NONE (0x00)
|
|
+ #define MVE_BLOCK_PARAM_RECORD_FORCE_QP (0x01)
|
|
+
|
|
+ uint8_t reserved;
|
|
+};
|
|
+
|
|
+/* block configuration uncompressed rows header. this configures the size of the
|
|
+ * uncompressed body. */
|
|
+struct mve_buffer_general_rows_uncomp_hdr
|
|
+{
|
|
+ uint8_t n_cols_minus1; /* number of quad cols in picture minus 1 */
|
|
+ uint8_t n_rows_minus1; /* number of quad rows in picture minus 1 */
|
|
+ uint8_t reserved[2];
|
|
+};
|
|
+
|
|
+/* block configuration uncompressed rows body. this structure contains an array
|
|
+ * of block parameter records whose length is (n_cols_minus1 + 1) * (n_rows_minus1 + 1)
|
|
+ * elements. therefore the allocation of this structure needs to be dynamic and
|
|
+ * a pointer to the allocated memory should then be assigned to the general
|
|
+ * purpose buffer data pointer
|
|
+ */
|
|
+struct mve_buffer_general_rows_uncomp_body
|
|
+{
|
|
+ /* the size of this array is variable and not necessarily equal to 1.
|
|
+ * therefore the sizeof operator should not be used
|
|
+ */
|
|
+ struct mve_block_param_record bpr[1];
|
|
+};
|
|
+
|
|
+/* input for encoder, block level configurations.
|
|
+ * the row based block configurations can be defined in different formats. they
|
|
+ * are stored in the blk_cfgs union and identified by the blk_cfg_type member.
|
|
+ * these definitions consist of a header and body pair. the header part contains
|
|
+ * configuration information for the body. the body part describes the actual
|
|
+ * layout of the data buffer pointed to by the mve_buffer_general_hdr buffer_ptr.
|
|
+ */
|
|
+struct mve_buffer_general_block_configs
|
|
+{
|
|
+ uint8_t blk_cfg_type;
|
|
+ #define MVE_BLOCK_CONFIGS_TYPE_NONE (0x00)
|
|
+ #define MVE_BLOCK_CONFIGS_TYPE_ROW_UNCOMP (0xff)
|
|
+ uint8_t reserved[3];
|
|
+ union
|
|
+ {
|
|
+ struct mve_buffer_general_rows_uncomp_hdr rows_uncomp;
|
|
+ } blk_cfgs;
|
|
+};
|
|
+
|
|
+
|
|
+/* input for encoder */
|
|
+struct mve_buffer_param_qp
|
|
+{
|
|
+ /* QP (quantization parameter) for encode.
|
|
+ *
|
|
+ * When used to set fixed QP for encode, with rate control
|
|
+ * disabled, then the valid ranges are:
|
|
+ * H264: 0-51
|
|
+ * HEVC: 0-51
|
|
+ * VP8: 0-63
|
|
+ * VP9: 0-63
|
|
+ * Note: The QP must be set separately for I, P and B frames.
|
|
+ *
|
|
+ * But when this message is used with the regions-feature,
|
|
+ * then the valid ranges are the internal bitstream ranges:
|
|
+ * H264: 0-51
|
|
+ * HEVC: 0-51
|
|
+ * VP8: 0-127
|
|
+ * VP9: 0-255
|
|
+ */
|
|
+ int32_t qp;
|
|
+};
|
|
+
|
|
+/* output from decoder */
|
|
+struct mve_buffer_param_display_size
|
|
+{
|
|
+ uint16_t display_width;
|
|
+ uint16_t display_height;
|
|
+};
|
|
+
|
|
+/* output from decoder, colour information needed for hdr */
|
|
+struct mve_buffer_param_colour_description
|
|
+{
|
|
+ uint32_t flags;
|
|
+ #define MVE_BUFFER_PARAM_COLOUR_FLAG_MASTERING_DISPLAY_DATA_VALID (1)
|
|
+ #define MVE_BUFFER_PARAM_COLOUR_FLAG_CONTENT_LIGHT_DATA_VALID (2)
|
|
+
|
|
+ uint8_t range; /* Unspecified=0, Limited=1, Full=2 */
|
|
+ #define MVE_BUFFER_PARAM_COLOUR_RANGE_UNSPECIFIED (0)
|
|
+ #define MVE_BUFFER_PARAM_COLOUR_RANGE_LIMITED (1)
|
|
+ #define MVE_BUFFER_PARAM_COLOUR_RANGE_FULL (2)
|
|
+
|
|
+ uint8_t colour_primaries; /* see hevc spec. E.3.1 */
|
|
+ uint8_t transfer_characteristics; /* see hevc spec. E.3.1 */
|
|
+ uint8_t matrix_coeff; /* see hevc spec. E.3.1 */
|
|
+
|
|
+ uint16_t mastering_display_primaries_x[3]; /* see hevc spec. D.3.27 */
|
|
+ uint16_t mastering_display_primaries_y[3]; /* see hevc spec. D.3.27 */
|
|
+ uint16_t mastering_white_point_x; /* see hevc spec. D.3.27 */
|
|
+ uint16_t mastering_white_point_y; /* see hevc spec. D.3.27 */
|
|
+ uint32_t max_display_mastering_luminance; /* see hevc spec. D.3.27 */
|
|
+ uint32_t min_display_mastering_luminance; /* see hevc spec. D.3.27 */
|
|
+
|
|
+ uint32_t max_content_light_level; /* see hevc spec. D.3.35 */
|
|
+ uint32_t avg_content_light_level; /* see hevc spec. D.3.35 */
|
|
+
|
|
+ uint8_t video_format_present_flag;
|
|
+ uint8_t video_format;
|
|
+ uint8_t aspect_ratio_info_present_flag;
|
|
+ uint8_t aspect_ratio_idc;
|
|
+ uint8_t timing_flag_info_present_flag;
|
|
+ uint16_t sar_width;
|
|
+ uint16_t sar_height;
|
|
+ uint32_t num_units_in_tick;
|
|
+ uint32_t time_scale;
|
|
+};
|
|
+
|
|
+struct mve_buffer_param_sei_user_data_unregistered
|
|
+{
|
|
+ uint8_t flags;
|
|
+ #define MVE_BUFFER_PARAM_USER_DATA_UNREGISTERED_VALID (1)
|
|
+ uint8_t uuid[16];
|
|
+ char user_data[256 - 35];
|
|
+ uint8_t user_data_len;
|
|
+
|
|
+ uint8_t reserved[5];
|
|
+};
|
|
+
|
|
+/* output from decoder see hevc spec. D.3.3 */
|
|
+struct mve_buffer_param_frame_field_info
|
|
+{
|
|
+ uint8_t pic_struct;
|
|
+ uint8_t source_scan_type;
|
|
+ uint8_t duplicate_flag;
|
|
+ uint8_t reserved;
|
|
+};
|
|
+
|
|
+/* output from decoder, VC-1 specific feature only relevant
|
|
+ * if using AFBC output
|
|
+ */
|
|
+struct mve_buffer_param_range_map
|
|
+{
|
|
+ uint8_t luma_map_enabled;
|
|
+ uint8_t luma_map_value;
|
|
+ uint8_t chroma_map_enabled;
|
|
+ uint8_t chroma_map_value;
|
|
+};
|
|
+
|
|
+/* input for encoder */
|
|
+struct mve_buffer_param_rate_control
|
|
+{
|
|
+ uint32_t rate_control_mode;
|
|
+ #define MVE_OPT_RATE_CONTROL_MODE_OFF (0)
|
|
+ #define MVE_OPT_RATE_CONTROL_MODE_STANDARD (1)
|
|
+ #define MVE_OPT_RATE_CONTROL_MODE_VARIABLE (2)
|
|
+ #define MVE_OPT_RATE_CONTROL_MODE_CONSTANT (3)
|
|
+ #define MVE_OPT_RATE_CONTROL_MODE_C_VARIABLE (4)
|
|
+ uint32_t target_bitrate; /* in bits per second */
|
|
+ uint32_t maximum_bitrate; /* in bits per second */
|
|
+};
|
|
+
|
|
+/* input for encoder */
|
|
+struct mve_buffer_param_rate_control_qp_range
|
|
+{
|
|
+ int32_t qp_min;
|
|
+ int32_t qp_max;
|
|
+};
|
|
+
|
|
+/* input for encoder, see hevc spec. D.3.16 */
|
|
+struct mve_buffer_param_frame_packing
|
|
+{
|
|
+ uint32_t flags;
|
|
+ #define MVE_BUFFER_PARAM_FRAME_PACKING_FLAG_QUINCUNX_SAMPLING (1)
|
|
+ #define MVE_BUFFER_PARAM_FRAME_PACKING_FLAG_SPATIAL_FLIPPING (2)
|
|
+ #define MVE_BUFFER_PARAM_FRAME_PACKING_FLAG_FRAME0_FLIPPED (4)
|
|
+ #define MVE_BUFFER_PARAM_FRAME_PACKING_FLAG_FIELD_VIEWS (8)
|
|
+ #define MVE_BUFFER_PARAM_FRAME_PACKING_FLAG_CURRENT_FRAME_IS_FRAME0 (16)
|
|
+
|
|
+ uint8_t frame_packing_arrangement_type;
|
|
+ uint8_t content_interpretation_type;
|
|
+
|
|
+ uint8_t frame0_grid_position_x;
|
|
+ uint8_t frame0_grid_position_y;
|
|
+ uint8_t frame1_grid_position_x;
|
|
+ uint8_t frame1_grid_position_y;
|
|
+
|
|
+ uint8_t reserved[ 2 ];
|
|
+};
|
|
+
|
|
+struct mve_buffer_param_rectangle
|
|
+{
|
|
+ uint16_t x_left; /* pixel x left edge (inclusive) */
|
|
+ uint16_t x_right; /* pixel x right edge (exclusive) */
|
|
+ uint16_t y_top; /* pixel y top edge (inclusive) */
|
|
+ uint16_t y_bottom; /* pixel y bottom edge (exclusive) */
|
|
+};
|
|
+
|
|
+/* input for encoder,
|
|
+ * indicate which parts of the source picture has changed.
|
|
+ * The encoder can (optionally) use this information to
|
|
+ * reduce memory bandwidth.
|
|
+ *
|
|
+ * n_rectangles=0 indicates the source picture is unchanged.
|
|
+ *
|
|
+ * This parameter only applies to the picture that immediately
|
|
+ * follows (and not to subsequent ones).
|
|
+ */
|
|
+struct mve_buffer_param_change_rectangles
|
|
+{
|
|
+ uint8_t n_rectangles; /* Number of rectangles */
|
|
+ uint8_t reserved[3];
|
|
+ #define MVE_MAX_FRAME_CHANGE_RECTANGLES 2
|
|
+ struct mve_buffer_param_rectangle rectangles[MVE_MAX_FRAME_CHANGE_RECTANGLES];
|
|
+};
|
|
+
|
|
+
|
|
+/* Parameters that are sent in the same communication channels
|
|
+ * as the buffers. A parameter applies to all subsequent buffers.
|
|
+ * Some types are only valid for decode, and some only for encode.
|
|
+ */
|
|
+struct mve_buffer_param
|
|
+{
|
|
+ uint32_t type; /* Extra data: */
|
|
+ #define MVE_BUFFER_PARAM_TYPE_QP (2) /* qp */
|
|
+ #define MVE_BUFFER_PARAM_TYPE_REGIONS (3) /* regions */
|
|
+ #define MVE_BUFFER_PARAM_TYPE_DISPLAY_SIZE (5) /* display_size */
|
|
+ #define MVE_BUFFER_PARAM_TYPE_RANGE_MAP (6) /* range_map */
|
|
+ #define MVE_BUFFER_PARAM_TYPE_FRAME_RATE (9) /* arg, in frames per second, as a
|
|
+ * fixed point Q16 value, for example
|
|
+ * 0x001e0000 == 30.0 fps */
|
|
+ #define MVE_BUFFER_PARAM_TYPE_RATE_CONTROL (10) /* rate_control */
|
|
+ #define MVE_BUFFER_PARAM_TYPE_QP_I (12) /* qp for I frames, when no rate control */
|
|
+ #define MVE_BUFFER_PARAM_TYPE_QP_P (13) /* qp for P frames, when no rate control */
|
|
+ #define MVE_BUFFER_PARAM_TYPE_QP_B (14) /* qp for B frames, when no rate control */
|
|
+ #define MVE_BUFFER_PARAM_TYPE_COLOUR_DESCRIPTION (15) /* colour_description */
|
|
+ #define MVE_BUFFER_PARAM_TYPE_FRAME_PACKING (16) /* frame_packing */
|
|
+ #define MVE_BUFFER_PARAM_TYPE_FRAME_FIELD_INFO (17) /* frame_field_info */
|
|
+ #define MVE_BUFFER_PARAM_TYPE_GOP_RESET (18) /* no extra data */
|
|
+ #define MVE_BUFFER_PARAM_TYPE_DPB_HELD_FRAMES (19) /* arg, number of output buffers that are
|
|
+ * complete and held by firmware in the
|
|
+ * DPB for reordering purposes.
|
|
+ * Valid after the next frame is output */
|
|
+ #define MVE_BUFFER_PARAM_TYPE_CHANGE_RECTANGLES (20) /* change rectangles */
|
|
+ #define MVE_BUFFER_PARAM_TYPE_RATE_CONTROL_QP_RANGE (21) /* rate_control_qp_range */
|
|
+ #define MVE_BUFFER_PARAM_TYPE_RATE_CONTROL_HRD_BUF_SIZE (23) /* arg */
|
|
+ #define MVE_BUFFER_PARAM_TYPE_RATE_CONTROL_QP_RANGE_I (25) /* special range for I frames,
|
|
+ * rate_control_qp_range */
|
|
+ #define MVE_BUFFER_PARAM_TYPE_SEI_USER_DATA_UNREGISTERED (26) /* sei user_data_unregistered */
|
|
+
|
|
+ union
|
|
+ {
|
|
+ uint32_t arg; /* some parameters only need a uint32_t as argument */
|
|
+ struct mve_buffer_param_qp qp;
|
|
+ struct mve_buffer_param_regions regions;
|
|
+ struct mve_buffer_param_display_size display_size;
|
|
+ struct mve_buffer_param_range_map range_map;
|
|
+ struct mve_buffer_param_rate_control rate_control;
|
|
+ struct mve_buffer_param_rate_control_qp_range rate_control_qp_range;
|
|
+ struct mve_buffer_param_colour_description colour_description;
|
|
+ struct mve_buffer_param_frame_packing frame_packing;
|
|
+ struct mve_buffer_param_frame_field_info frame_field_info;
|
|
+ struct mve_buffer_param_change_rectangles change_rectangles;
|
|
+ struct mve_buffer_param_sei_user_data_unregistered user_data_unregistered;
|
|
+ } data;
|
|
+};
|
|
+
|
|
+/* output from decoder, assertive display statistics.
|
|
+ * buffer_ptr points to a buffer of luma quad average values for the picture
|
|
+ * that can be used as a thumbnail. the type of content used to generate the
|
|
+ * assertive display statistics is indicated by MVE_AD_STATS_PIC_FMT_INTERLACED.
|
|
+ * for progressive content; the arrangement is in raster format with dimensions
|
|
+ * thumbnail_width by thumbnail_height. the overall frame average luma and
|
|
+ * chroma values are returned in frame_average.
|
|
+ * for interlaced content; the arrangement is in raster format, top field
|
|
+ * followed by bottom field with each field having dimensions thumbnail_width by
|
|
+ * thumbnail_height. the field averages for luma and chroma values are combined
|
|
+ * and returned in an overall value for the frame (frame_average).
|
|
+ */
|
|
+struct mve_buffer_general_ad_stats
|
|
+{
|
|
+
|
|
+ uint32_t frame_averages;
|
|
+ // bitfields
|
|
+ #define MVE_AD_STATS_PIC_AVGS_Y (0)
|
|
+ #define MVE_AD_STATS_PIC_AVGS_Y_SZ (12)
|
|
+ #define MVE_AD_STATS_PIC_AVGS_CB (12)
|
|
+ #define MVE_AD_STATS_PIC_AVGS_CB_SZ (10)
|
|
+ #define MVE_AD_STATS_PIC_AVGS_CR (22)
|
|
+ #define MVE_AD_STATS_PIC_AVGS_CR_SZ (10)
|
|
+ uint16_t thumbnail_width;
|
|
+ uint16_t thumbnail_height;
|
|
+ uint8_t ad_stats_flags;
|
|
+ #define MVE_AD_STATS_PIC_FMT_PROGRESSIVE (0)
|
|
+ #define MVE_AD_STATS_PIC_FMT_INTERLACED (1)
|
|
+ uint8_t reserved[3];
|
|
+};
|
|
+
|
|
+/* The general purpose buffer header stores the common fields of an
|
|
+ * mve_buffer_general. it contains the pointer to the data buffer that contains
|
|
+ * the general purpose data
|
|
+ */
|
|
+struct mve_buffer_general_hdr
|
|
+{
|
|
+ /* For identification of the buffer, this is not changed by the firmware. */
|
|
+ uint64_t host_handle;
|
|
+
|
|
+ /* this depends upon the type of the general purpose buffer */
|
|
+ uint64_t user_data_tag;
|
|
+
|
|
+ /* pointer to the buffer containing the general purpose data. the format
|
|
+ * of this data is defined by the configuration in the mve_buffer_general */
|
|
+ uint32_t buffer_ptr;
|
|
+
|
|
+ /* size of the buffer pointed to by buffer_ptr */
|
|
+ uint32_t buffer_size;
|
|
+
|
|
+ /* selects the type of semantics to use for the general purpose buffer. it
|
|
+ * tags (or discriminates) the union config member in mve_buffer_general
|
|
+ */
|
|
+ uint16_t type; /* Extra data: */
|
|
+ #define MVE_BUFFER_GENERAL_TYPE_INVALID (0) /* invalid */
|
|
+ #define MVE_BUFFER_GENERAL_TYPE_BLOCK_CONFIGS (1) /* block_configs */
|
|
+ #define MVE_BUFFER_GENERAL_TYPE_AD_STATS (2) /* assertive display statistics */
|
|
+
|
|
+ /* size of the mve_buffer_general config member */
|
|
+ uint16_t config_size;
|
|
+
|
|
+ /* pad to force 8-byte alignment */
|
|
+ uint32_t reserved;
|
|
+};
|
|
+
|
|
+/* The general purpose buffer consists of a header and a configuration. The
|
|
+ * header contains a pointer to a buffer whose format is described by the
|
|
+ * configuration. The type of configuration is indicated by the type value in
|
|
+ * the header. N.B. In use, the size of the config part of this structure is
|
|
+ * defined in the header and is not necessarily equal to that returned by the
|
|
+ * sizeof() operator. This allows a more size efficient communication between
|
|
+ * the host and firmware.
|
|
+ */
|
|
+struct mve_buffer_general
|
|
+{
|
|
+ struct mve_buffer_general_hdr header;
|
|
+
|
|
+ /* used to describe the configuration of the general purpose buffer data
|
|
+ * pointed to be buffer_ptr
|
|
+ */
|
|
+ union
|
|
+ {
|
|
+ struct mve_buffer_general_block_configs block_configs;
|
|
+ struct mve_buffer_general_ad_stats ad_stats;
|
|
+ } config;
|
|
+};
|
|
+
|
|
+#ifdef __cplusplus
|
|
+}
|
|
+#endif
|
|
+
|
|
+#endif /* __FW_INCLUDE__MVE_PROTOCOL_DEF_H__ */
|
|
diff --git a/drivers/media/platform/spacemit/vpu_k1x/if/mvx_bitops.h b/drivers/media/platform/spacemit/vpu_k1x/if/mvx_bitops.h
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/vpu_k1x/if/mvx_bitops.h
|
|
@@ -0,0 +1,91 @@
|
|
+/*
|
|
+ * The confidential and proprietary information contained in this file may
|
|
+ * only be used by a person authorised under and to the extent permitted
|
|
+ * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd.
|
|
+ * ALL RIGHTS RESERVED
|
|
+ *
|
|
+ * This entire notice must be reproduced on all copies of this file
|
|
+ * and copies of this file may only be made by a person if such person is
|
|
+ * permitted to do so under the terms of a subsisting license agreement
|
|
+ * from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * SPDX-License-Identifier: GPL-2.0-only
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU General Public License
|
|
+ * as published by the Free Software Foundation; either version 2
|
|
+ * of the License, or (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
+ *
|
|
+ */
|
|
+
|
|
+#ifndef _MVX_BITOPS_H_
|
|
+#define _MVX_BITOPS_H_
|
|
+
|
|
+/****************************************************************************
|
|
+ * Includes
|
|
+ ****************************************************************************/
|
|
+
|
|
+#include <linux/bug.h>
|
|
+#include <linux/kernel.h>
|
|
+
|
|
+/****************************************************************************
|
|
+ * Static functions
|
|
+ ****************************************************************************/
|
|
+
|
|
+/**
|
|
+ * mvx_set_bit() - Set a bit in the bitmask.
|
|
+ * @bit: Bit to be set.
|
|
+ * @addr: Pointer to bitmask.
|
|
+ *
|
|
+ * Works similar to set_bit but uses no locks, is not atomic and protects
|
|
+ * agains overflow.
|
|
+ */
|
|
+static inline void mvx_set_bit(unsigned int bit,
|
|
+ uint64_t *addr)
|
|
+{
|
|
+ BUG_ON(bit >= (sizeof(*addr) * 8));
|
|
+ *addr |= 1ull << bit;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * mvx_clear_bit() - Clear a bit in the bitmask.
|
|
+ * @bit: Bit to be cleared.
|
|
+ * @addr: Pointer to bitmask.
|
|
+ *
|
|
+ * Works similar to clear_bit but uses no locks, is not atomic and protects
|
|
+ * agains overflow.
|
|
+ */
|
|
+static inline void mvx_clear_bit(unsigned int bit,
|
|
+ uint64_t *addr)
|
|
+{
|
|
+ BUG_ON(bit >= (sizeof(*addr) * 8));
|
|
+ *addr &= ~(1ull << bit);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * mvx_test_bit() - Test a bit in the bitmask.
|
|
+ * @bit: Bit to be tested.
|
|
+ * @addr: Pointer to bitmask.
|
|
+ *
|
|
+ * Works similar to test_bit but uses no locks, is not atomic and protects
|
|
+ * agains overflow.
|
|
+ */
|
|
+static inline bool mvx_test_bit(unsigned int bit,
|
|
+ uint64_t *addr)
|
|
+{
|
|
+ BUG_ON(bit >= (sizeof(*addr) * 8));
|
|
+ return 0 != (*addr & (1ull << bit));
|
|
+}
|
|
+
|
|
+#endif /* _MVX_BITOPS_H_ */
|
|
diff --git a/drivers/media/platform/spacemit/vpu_k1x/if/mvx_buffer.c b/drivers/media/platform/spacemit/vpu_k1x/if/mvx_buffer.c
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/vpu_k1x/if/mvx_buffer.c
|
|
@@ -0,0 +1,524 @@
|
|
+/*
|
|
+ * The confidential and proprietary information contained in this file may
|
|
+ * only be used by a person authorised under and to the extent permitted
|
|
+ * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd.
|
|
+ * ALL RIGHTS RESERVED
|
|
+ *
|
|
+ * This entire notice must be reproduced on all copies of this file
|
|
+ * and copies of this file may only be made by a person if such person is
|
|
+ * permitted to do so under the terms of a subsisting license agreement
|
|
+ * from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * SPDX-License-Identifier: GPL-2.0-only
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU General Public License
|
|
+ * as published by the Free Software Foundation; either version 2
|
|
+ * of the License, or (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
+ *
|
|
+ */
|
|
+
|
|
+/****************************************************************************
|
|
+ * Includes
|
|
+ ****************************************************************************/
|
|
+
|
|
+#include <linux/kernel.h>
|
|
+#include "mvx_buffer.h"
|
|
+#include "mvx_seq.h"
|
|
+#include "mvx_log_group.h"
|
|
+
|
|
+/****************************************************************************
|
|
+ * Defines
|
|
+ ****************************************************************************/
|
|
+
|
|
+/**
|
|
+ * Each 2x2 pixel square is subsampled. How many samples that are taken depends
|
|
+ * on the color format, but typically the luma channel (Y) gets 4 samples and
|
|
+ * the luma channels (UV) get 2 or 4 samples.
|
|
+ */
|
|
+#define SUBSAMPLE_PIXELS 2
|
|
+
|
|
+/****************************************************************************
|
|
+ * Static functions
|
|
+ ****************************************************************************/
|
|
+
|
|
+/**
|
|
+ * get_stride() - Get 3 plane stride for 2x2 pixels square.
|
|
+ * @format: MVX frame format.
|
|
+ * @stride: [plane 0, plane 1, plane 2][x, y] stride.
|
|
+ *
|
|
+ * Calculate the stride in bytes for each plane for a subsampled (2x2) pixels
|
|
+ * square.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+static int get_stride(enum mvx_format format,
|
|
+ uint8_t *nplanes,
|
|
+ unsigned int stride[MVX_BUFFER_NPLANES][2])
|
|
+{
|
|
+ switch (format) {
|
|
+ case MVX_FORMAT_YUV420_I420:
|
|
+ *nplanes = 3;
|
|
+ stride[0][0] = 2;
|
|
+ stride[0][1] = 2;
|
|
+ stride[1][0] = 1;
|
|
+ stride[1][1] = 1;
|
|
+ stride[2][0] = 1;
|
|
+ stride[2][1] = 1;
|
|
+ break;
|
|
+ case MVX_FORMAT_YUV420_NV12:
|
|
+ case MVX_FORMAT_YUV420_NV21:
|
|
+ *nplanes = 2;
|
|
+ stride[0][0] = 2;
|
|
+ stride[0][1] = 2;
|
|
+ stride[1][0] = 2;
|
|
+ stride[1][1] = 1;
|
|
+ stride[2][0] = 0;
|
|
+ stride[2][1] = 0;
|
|
+ break;
|
|
+ case MVX_FORMAT_YUV420_P010:
|
|
+ *nplanes = 2;
|
|
+ stride[0][0] = 4;
|
|
+ stride[0][1] = 2;
|
|
+ stride[1][0] = 4;
|
|
+ stride[1][1] = 1;
|
|
+ stride[2][0] = 0;
|
|
+ stride[2][1] = 0;
|
|
+ break;
|
|
+ case MVX_FORMAT_YUV420_Y0L2:
|
|
+ case MVX_FORMAT_YUV420_AQB1:
|
|
+ *nplanes = 1;
|
|
+ stride[0][0] = 8;
|
|
+ stride[0][1] = 1;
|
|
+ stride[1][0] = 0;
|
|
+ stride[1][1] = 0;
|
|
+ stride[2][0] = 0;
|
|
+ stride[2][1] = 0;
|
|
+ break;
|
|
+ case MVX_FORMAT_YUV422_YUY2:
|
|
+ case MVX_FORMAT_YUV422_UYVY:
|
|
+ *nplanes = 1;
|
|
+ stride[0][0] = 4;
|
|
+ stride[0][1] = 2;
|
|
+ stride[1][0] = 0;
|
|
+ stride[1][1] = 0;
|
|
+ stride[2][0] = 0;
|
|
+ stride[2][1] = 0;
|
|
+ break;
|
|
+ case MVX_FORMAT_YUV422_Y210:
|
|
+ case MVX_FORMAT_RGBA_8888:
|
|
+ case MVX_FORMAT_BGRA_8888:
|
|
+ case MVX_FORMAT_ARGB_8888:
|
|
+ case MVX_FORMAT_ABGR_8888:
|
|
+ *nplanes = 1;
|
|
+ stride[0][0] = 8;
|
|
+ stride[0][1] = 2;
|
|
+ stride[1][0] = 0;
|
|
+ stride[1][1] = 0;
|
|
+ stride[2][0] = 0;
|
|
+ stride[2][1] = 0;
|
|
+ break;
|
|
+ default:
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int map_plane(struct mvx_buffer *buf,
|
|
+ mvx_mmu_va begin,
|
|
+ mvx_mmu_va end,
|
|
+ unsigned int plane)
|
|
+{
|
|
+ while (begin < end) {
|
|
+ struct mvx_buffer_plane *p = &buf->planes[plane];
|
|
+ int ret;
|
|
+
|
|
+ ret = mvx_mmu_map_pages(buf->mmu, begin, p->pages,
|
|
+ MVX_ATTR_SHARED_RW,
|
|
+ MVX_ACCESS_READ_WRITE);
|
|
+ if (ret == 0) {
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_INFO,
|
|
+ "Memory map buffer. buf=%p, plane=%u, va=0x%x, size=%zu.",
|
|
+ buf, plane, p->pages->va,
|
|
+ mvx_buffer_size(buf, plane));
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ if (ret != -EAGAIN)
|
|
+ return ret;
|
|
+
|
|
+ begin += 1 * 1024 * 1024; /* 1 MB. */
|
|
+ }
|
|
+
|
|
+ return -ENOMEM;
|
|
+}
|
|
+
|
|
+/****************************************************************************
|
|
+ * External functions
|
|
+ ****************************************************************************/
|
|
+
|
|
+void mvx_buffer_show(struct mvx_buffer *buf,
|
|
+ struct seq_file *s)
|
|
+{
|
|
+ int i;
|
|
+ int ind = 0;
|
|
+
|
|
+ mvx_seq_printf(s, "mvx_buffer", ind, "%p\n", buf);
|
|
+
|
|
+ ind++;
|
|
+ mvx_seq_printf(s, "format", ind, "0x%x\n", buf->format);
|
|
+ mvx_seq_printf(s, "dir", ind, "%u\n", buf->dir);
|
|
+ mvx_seq_printf(s, "flags", ind, "0x%0x\n", buf->flags);
|
|
+ mvx_seq_printf(s, "width", ind, "%u\n", buf->width);
|
|
+ mvx_seq_printf(s, "height", ind, "%u\n", buf->height);
|
|
+ mvx_seq_printf(s, "nplanes", ind, "%u\n", buf->nplanes);
|
|
+ mvx_seq_printf(s, "planes", ind, "\n");
|
|
+ ind++;
|
|
+ for (i = 0; i < buf->nplanes; ++i) {
|
|
+ char tag[10];
|
|
+ struct mvx_buffer_plane *plane = &buf->planes[i];
|
|
+
|
|
+ scnprintf(tag, sizeof(tag), "#%d", i);
|
|
+ mvx_seq_printf(s, tag, ind,
|
|
+ "va: 0x%08x, size: %10zu, stride: %5u, filled: %10u\n",
|
|
+ mvx_buffer_va(buf, i),
|
|
+ mvx_buffer_size(buf, i),
|
|
+ plane->stride,
|
|
+ plane->filled);
|
|
+ }
|
|
+
|
|
+ ind--;
|
|
+}
|
|
+
|
|
+int mvx_buffer_construct(struct mvx_buffer *buf,
|
|
+ struct device *dev,
|
|
+ struct mvx_mmu *mmu,
|
|
+ enum mvx_direction dir,
|
|
+ unsigned int nplanes,
|
|
+ struct sg_table **sgt)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ if (nplanes > MVX_BUFFER_NPLANES) {
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING,
|
|
+ "Failed to construct buffer. Too many planes. nplanes=%u.",
|
|
+ nplanes);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ memset(buf, 0, sizeof(*buf));
|
|
+
|
|
+ buf->dev = dev;
|
|
+ buf->mmu = mmu;
|
|
+ buf->dir = dir;
|
|
+ buf->nplanes = nplanes;
|
|
+
|
|
+ for (i = 0; i < buf->nplanes; ++i) {
|
|
+ struct mvx_buffer_plane *plane = &buf->planes[i];
|
|
+
|
|
+ if (sgt[i] == NULL)
|
|
+ break;
|
|
+
|
|
+ plane->pages = mvx_mmu_alloc_pages_sg(dev, sgt[i], 0);
|
|
+ if (IS_ERR(plane->pages))
|
|
+ goto free_pages;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+
|
|
+free_pages:
|
|
+ while (i--)
|
|
+ mvx_mmu_free_pages(buf->planes[i].pages);
|
|
+
|
|
+ return -ENOMEM;
|
|
+}
|
|
+
|
|
+void mvx_buffer_destruct(struct mvx_buffer *buf)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ mvx_buffer_unmap(buf);
|
|
+
|
|
+ for (i = 0; i < buf->nplanes; i++)
|
|
+ if (buf->planes[i].pages != NULL)
|
|
+ mvx_mmu_free_pages(buf->planes[i].pages);
|
|
+}
|
|
+
|
|
+int mvx_buffer_map(struct mvx_buffer *buf,
|
|
+ mvx_mmu_va begin,
|
|
+ mvx_mmu_va end)
|
|
+{
|
|
+ int i;
|
|
+ int ret = 0;
|
|
+
|
|
+ for (i = 0; i < buf->nplanes; i++) {
|
|
+ struct mvx_buffer_plane *plane = &buf->planes[i];
|
|
+
|
|
+ if (plane->pages != NULL) {
|
|
+ ret = map_plane(buf, begin, end, i);
|
|
+ if (ret != 0) {
|
|
+ mvx_buffer_unmap(buf);
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+void mvx_buffer_unmap(struct mvx_buffer *buf)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ for (i = 0; i < buf->nplanes; i++) {
|
|
+ struct mvx_buffer_plane *plane = &buf->planes[i];
|
|
+
|
|
+ if ((plane->pages != NULL) && (plane->pages->va != 0))
|
|
+ mvx_mmu_unmap_pages(plane->pages);
|
|
+ }
|
|
+}
|
|
+
|
|
+bool mvx_buffer_is_mapped(struct mvx_buffer *buf)
|
|
+{
|
|
+ return (buf->planes[0].pages != NULL) &&
|
|
+ (buf->planes[0].pages->va != 0);
|
|
+}
|
|
+
|
|
+int mvx_buffer_synch(struct mvx_buffer *buf,
|
|
+ enum dma_data_direction dir)
|
|
+{
|
|
+ int i;
|
|
+ int ret;
|
|
+ int page_count = 0;
|
|
+ for (i = 0; i < buf->nplanes; i++) {
|
|
+ struct mvx_buffer_plane *plane = &buf->planes[i];
|
|
+ /*calculate page offset of plane, follow as 'mvx_buffer_va' */
|
|
+ int page_off = (plane->offset + plane->pages->offset) >> PAGE_SHIFT;
|
|
+ /*calculate page end of plane if filled is valid */
|
|
+ int page_off_end = plane->pages->count;
|
|
+ if (plane->filled) {
|
|
+ page_off_end = (plane->offset + plane->pages->offset + plane->filled + PAGE_SIZE -1 ) >> PAGE_SHIFT;
|
|
+ }
|
|
+ page_count = page_off_end - page_off;
|
|
+ if (page_count + page_off > plane->pages->count) {
|
|
+ page_count = plane->pages->count - page_off;
|
|
+ }
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_INFO,
|
|
+ "plane [%d] sync pages: %p, dir: %d, page offset: %d, pages %d",
|
|
+ i,
|
|
+ plane->pages->pages[0],
|
|
+ buf->dir,
|
|
+ page_off, page_count);
|
|
+ if (plane->pages != NULL) {
|
|
+ ret = mvx_mmu_synch_pages(plane->pages, dir, page_off, page_count);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+void mvx_buffer_clear(struct mvx_buffer *buf)
|
|
+{
|
|
+ unsigned int i;
|
|
+
|
|
+ buf->flags = 0;
|
|
+
|
|
+ for (i = 0; i < buf->nplanes; i++)
|
|
+ buf->planes[i].filled = 0;
|
|
+}
|
|
+
|
|
+int mvx_buffer_filled_set(struct mvx_buffer *buf,
|
|
+ unsigned int plane,
|
|
+ unsigned int filled,
|
|
+ unsigned int offset)
|
|
+{
|
|
+ struct mvx_buffer_plane *p = &buf->planes[plane];
|
|
+ size_t size = mvx_buffer_size(buf, plane);
|
|
+
|
|
+ if (plane > buf->nplanes)
|
|
+ return -EINVAL;
|
|
+
|
|
+ if (size < (filled + offset)) {
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING,
|
|
+ "Buffer plane too small. plane=%d, size=%zu, filled=%u, offset=%u.",
|
|
+ plane, size, filled, offset);
|
|
+ buf->flags |= MVX_BUFFER_FRAME_NEED_REALLOC;
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+
|
|
+ p->filled = filled;
|
|
+ p->offset = offset;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+size_t mvx_buffer_size(struct mvx_buffer *buf,
|
|
+ unsigned int plane)
|
|
+{
|
|
+ struct mvx_buffer_plane *p = &buf->planes[plane];
|
|
+
|
|
+ if (plane >= buf->nplanes || p->pages == NULL)
|
|
+ return 0;
|
|
+
|
|
+ return mvx_mmu_size_pages(p->pages);
|
|
+}
|
|
+
|
|
+mvx_mmu_va mvx_buffer_va(struct mvx_buffer *buf,
|
|
+ unsigned int plane)
|
|
+{
|
|
+ struct mvx_buffer_plane *p = &buf->planes[plane];
|
|
+
|
|
+ if (plane >= buf->nplanes || p->pages == NULL)
|
|
+ return 0;
|
|
+
|
|
+ return p->pages->va + p->pages->offset + p->offset;
|
|
+}
|
|
+
|
|
+int mvx_buffer_frame_dim(enum mvx_format format,
|
|
+ unsigned int width,
|
|
+ unsigned int height,
|
|
+ uint8_t *nplanes,
|
|
+ unsigned int *stride,
|
|
+ unsigned int *size)
|
|
+{
|
|
+ unsigned int s[MVX_BUFFER_NPLANES][2];
|
|
+ unsigned int __nplanes = *nplanes;
|
|
+ int i;
|
|
+ int ret;
|
|
+
|
|
+ ret = get_stride(format, nplanes, s);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+
|
|
+ for (i = 0; i < *nplanes; i++) {
|
|
+ const unsigned int stride_align = 1;
|
|
+ unsigned int tmp = DIV_ROUND_UP(width * s[i][0],
|
|
+ SUBSAMPLE_PIXELS);
|
|
+ /* Use optimal stride if no special stride was requested. */
|
|
+ if (i >= __nplanes || stride[i] == 0)
|
|
+ stride[i] = round_up(tmp, stride_align);
|
|
+ /* Else make sure to round up to minimum stride. */
|
|
+ else
|
|
+ stride[i] = max(stride[i], tmp);
|
|
+
|
|
+ size[i] = DIV_ROUND_UP(height * s[i][1],
|
|
+ SUBSAMPLE_PIXELS ) * stride[i];
|
|
+ }
|
|
+ /* a workaround patch for nv12/nv21/p010 odd height/width output*/
|
|
+ if (*nplanes == 2 && (width % 2 != 0 || height % 2 != 0)) {
|
|
+ unsigned int tmp = DIV_ROUND_UP(width, SUBSAMPLE_PIXELS) * s[1][0];
|
|
+ stride[1] = max(stride[1], tmp);
|
|
+ size[1] = DIV_ROUND_UP(height * s[1][1],
|
|
+ SUBSAMPLE_PIXELS ) * stride[1];
|
|
+ }
|
|
+
|
|
+ for (i = *nplanes; i < MVX_BUFFER_NPLANES; i++) {
|
|
+ size[i] = 0;
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int mvx_buffer_frame_set(struct mvx_buffer *buf,
|
|
+ enum mvx_format format,
|
|
+ unsigned int width,
|
|
+ unsigned int height,
|
|
+ unsigned int *stride,
|
|
+ unsigned int *size,
|
|
+ bool interlaced)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ buf->format = format;
|
|
+ buf->width = width;
|
|
+ buf->height = height;
|
|
+
|
|
+ for (i = 0; i < buf->nplanes; i++) {
|
|
+ struct mvx_buffer_plane *plane = &buf->planes[i];
|
|
+
|
|
+ plane->stride = stride[i];
|
|
+
|
|
+ if (buf->dir == MVX_DIR_OUTPUT) {
|
|
+ int ret;
|
|
+
|
|
+ ret = mvx_buffer_filled_set(buf, i, size[i], plane->offset);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ /* Verify that plane has correct length. */
|
|
+ if (plane->filled > 0 && plane->filled != size[i]) {
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_INFO,
|
|
+ "Buffer filled length does not match plane size. plane=%i, filled=%zu, size=%u.",
|
|
+ i, plane->filled, size[i]);
|
|
+ //return -ENOMEM;
|
|
+ }
|
|
+
|
|
+ /* Verify that there is no buffer overflow. */
|
|
+ if ((plane->filled + plane->offset) > mvx_buffer_size(buf, i)) {
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING,
|
|
+ "Buffer plane size is too small. plane=%i, size=%zu, size=%u, filled=%u, offset=%u",
|
|
+ i, size[i], mvx_buffer_size(buf, i), plane->filled, plane->offset);
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (interlaced != false){
|
|
+ buf->flags |= MVX_BUFFER_INTERLACE;
|
|
+ } else {
|
|
+ buf->flags &= ~MVX_BUFFER_INTERLACE;
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int mvx_buffer_afbc_set(struct mvx_buffer *buf,
|
|
+ enum mvx_format format,
|
|
+ unsigned int width,
|
|
+ unsigned int height,
|
|
+ unsigned int afbc_width,
|
|
+ unsigned int size,
|
|
+ bool interlaced)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ buf->format = format;
|
|
+ buf->width = width;
|
|
+ buf->height = height;
|
|
+ buf->planes[0].afbc_width = afbc_width;
|
|
+
|
|
+ if (buf->dir == MVX_DIR_INPUT) {
|
|
+ buf->crop_left = 0;
|
|
+ buf->crop_top = 0;
|
|
+ }
|
|
+
|
|
+ if (buf->dir == MVX_DIR_OUTPUT) {
|
|
+ ret = mvx_buffer_filled_set(buf, 0, size, buf->planes[0].offset);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ if (size > mvx_buffer_size(buf, 0)) {
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING,
|
|
+ "AFBC buffer too small. buf_size=%zu, size=%u.",
|
|
+ size, mvx_buffer_size(buf, 0));
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+
|
|
+ if (interlaced != false)
|
|
+ buf->flags |= MVX_BUFFER_INTERLACE;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
diff --git a/drivers/media/platform/spacemit/vpu_k1x/if/mvx_buffer.h b/drivers/media/platform/spacemit/vpu_k1x/if/mvx_buffer.h
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/vpu_k1x/if/mvx_buffer.h
|
|
@@ -0,0 +1,413 @@
|
|
+/*
|
|
+ * The confidential and proprietary information contained in this file may
|
|
+ * only be used by a person authorised under and to the extent permitted
|
|
+ * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd.
|
|
+ * ALL RIGHTS RESERVED
|
|
+ *
|
|
+ * This entire notice must be reproduced on all copies of this file
|
|
+ * and copies of this file may only be made by a person if such person is
|
|
+ * permitted to do so under the terms of a subsisting license agreement
|
|
+ * from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * SPDX-License-Identifier: GPL-2.0-only
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU General Public License
|
|
+ * as published by the Free Software Foundation; either version 2
|
|
+ * of the License, or (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
+ *
|
|
+ */
|
|
+
|
|
+#ifndef _MVX_BUFFER_H_
|
|
+#define _MVX_BUFFER_H_
|
|
+
|
|
+/****************************************************************************
|
|
+ * Includes
|
|
+ ****************************************************************************/
|
|
+
|
|
+#include <linux/seq_file.h>
|
|
+#include <linux/types.h>
|
|
+#include "mvx_if.h"
|
|
+#include "mvx_mmu.h"
|
|
+/****************************************************************************
|
|
+ * Defines
|
|
+ ****************************************************************************/
|
|
+
|
|
+#define MVX_BUFFER_NPLANES 3
|
|
+#define MVX_ROI_QP_NUMS 10
|
|
+
|
|
+/****************************************************************************
|
|
+ * Types
|
|
+ ****************************************************************************/
|
|
+
|
|
+struct device;
|
|
+
|
|
+/**
|
|
+ * struct mvx_buffer_plane - Plane information.
|
|
+ * @pages: MMU pages object.
|
|
+ * @stride: Horizontal stride in bytes.
|
|
+ * @filled: Number of bytes written to this plane. For a frame buffer this
|
|
+ * value should always match the size of the plane.
|
|
+ * @offset: Offset in bytes from begin of buffer to first bitstream data.
|
|
+ * @afbc_width: AFBC width in superblocks.
|
|
+ */
|
|
+struct mvx_buffer_plane {
|
|
+ struct mvx_mmu_pages *pages;
|
|
+ unsigned int stride;
|
|
+ unsigned int filled;
|
|
+ unsigned int offset;
|
|
+ unsigned int afbc_width;
|
|
+};
|
|
+
|
|
+struct mvx_buffer_general_encoder_stats
|
|
+{
|
|
+ uint8_t encoder_stats_type;
|
|
+ #define MVX_ENCODER_STATS_TYPE_FULL (0x01)
|
|
+ uint8_t frame_type; // See MVE_FRAME_TYPE_*
|
|
+ #define MVX_FRAME_TYPE_I 0
|
|
+ #define MVX_FRAME_TYPE_P 1
|
|
+ #define MVX_FRAME_TYPE_B 2
|
|
+ uint8_t used_as_reference; // 0=No, 1=Yes
|
|
+ uint8_t qp; // base quantizer used for the frame
|
|
+ // HEVC, H.264: 0-51. VP9: 0-63
|
|
+ uint32_t picture_count; // display order picture count
|
|
+ uint16_t num_cols; // number of columns (each 32 pixels wide)
|
|
+ uint16_t num_rows; // number of rows (each 32 pixels high)
|
|
+ uint32_t ref_pic_count[2]; // display order picture count of references
|
|
+ // unused values are set to zero
|
|
+};
|
|
+
|
|
+struct mvx_buffer_general_rows_uncomp_hdr
|
|
+{
|
|
+ uint8_t n_cols_minus1; /* number of quad cols in picture minus 1 */
|
|
+ uint8_t n_rows_minus1; /* number of quad rows in picture minus 1 */
|
|
+ uint8_t reserved[2];
|
|
+};
|
|
+
|
|
+struct mvx_buffer_general_block_configs
|
|
+{
|
|
+ uint8_t blk_cfg_type;
|
|
+ #define MVX_BLOCK_CONFIGS_TYPE_NONE (0x00)
|
|
+ #define MVX_BLOCK_CONFIGS_TYPE_ROW_UNCOMP (0xff)
|
|
+ uint8_t reserved[3];
|
|
+ union
|
|
+ {
|
|
+ struct mvx_buffer_general_rows_uncomp_hdr rows_uncomp;
|
|
+ } blk_cfgs;
|
|
+};
|
|
+
|
|
+struct mvx_buffer_general_hdr
|
|
+{
|
|
+ /* For identification of the buffer, this is not changed by the firmware. */
|
|
+ uint64_t host_handle;
|
|
+
|
|
+ /* this depends upon the type of the general purpose buffer */
|
|
+ uint64_t user_data_tag;
|
|
+
|
|
+ /* pointer to the buffer containing the general purpose data. the format
|
|
+ * of this data is defined by the configuration in the mve_buffer_general */
|
|
+ uint32_t buffer_ptr;
|
|
+
|
|
+ /* size of the buffer pointed to by buffer_ptr */
|
|
+ uint32_t buffer_size;
|
|
+
|
|
+ /* selects the type of semantics to use for the general purpose buffer. it
|
|
+ * tags (or discriminates) the union config member in mve_buffer_general
|
|
+ */
|
|
+ uint16_t type; /* Extra data: */
|
|
+ #define MVX_BUFFER_GENERAL_TYPE_BLOCK_CONFIGS (1) /* block_configs */
|
|
+ #define MVX_BUFFER_GENERAL_TYPE_ENCODER_STATS (4) /* encoder_stats */
|
|
+
|
|
+ /* size of the mve_buffer_general config member */
|
|
+ uint16_t config_size;
|
|
+
|
|
+ /* pad to force 8-byte alignment */
|
|
+ uint32_t reserved;
|
|
+};
|
|
+
|
|
+struct mvx_buffer_general
|
|
+{
|
|
+ struct mvx_buffer_general_hdr header;
|
|
+
|
|
+ /* used to describe the configuration of the general purpose buffer data
|
|
+ * pointed to be buffer_ptr
|
|
+ */
|
|
+ union
|
|
+ {
|
|
+ struct mvx_buffer_general_block_configs block_configs;
|
|
+ struct mvx_buffer_general_encoder_stats encoder_stats;
|
|
+ } config;
|
|
+};
|
|
+
|
|
+
|
|
+/**
|
|
+ * struct mvx_buffer - Buffer descriptor.
|
|
+ * @dev: Pointer to device.
|
|
+ * @mmu: Pointer to MMU.
|
|
+ * @head: List head used to add buffer to various queues.
|
|
+ * @format: Bitstream or frame format.
|
|
+ * @dir: Direction the buffer was allocated for.
|
|
+ * @user_data: User data copied from input- to output buffer.
|
|
+ * @flags: Buffer flags.
|
|
+ * @width: Frame width in pixels.
|
|
+ * @height: Frame height in pixels.
|
|
+ * @crop_left: Left crop in pixels.
|
|
+ * @crop_top: Top crop in pixels.
|
|
+ * @nplanes: Number of planes.
|
|
+ * @planes: Array or planes.
|
|
+ */
|
|
+struct mvx_buffer {
|
|
+ struct device *dev;
|
|
+ struct mvx_mmu *mmu;
|
|
+ struct list_head head;
|
|
+ enum mvx_format format;
|
|
+ enum mvx_direction dir;
|
|
+ uint64_t user_data;
|
|
+ unsigned int flags;
|
|
+ unsigned int width;
|
|
+ unsigned int height;
|
|
+ unsigned int crop_left;
|
|
+ unsigned int crop_top;
|
|
+ unsigned int nplanes;
|
|
+ struct mvx_buffer_plane planes[MVX_BUFFER_NPLANES];
|
|
+ struct mvx_buffer_general general;
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct mvx_buffer - Buffer descriptor.
|
|
+ * @head: List head used to add buffer to corrupt buffer queues.
|
|
+ * @user_data: User data copied from input- to output buffer.
|
|
+ */
|
|
+struct mvx_corrupt_buffer {
|
|
+ struct list_head head;
|
|
+ uint64_t user_data;
|
|
+};
|
|
+
|
|
+#define MVX_BUFFER_EOS 0x00000001
|
|
+#define MVX_BUFFER_EOF 0x00000002
|
|
+#define MVX_BUFFER_CORRUPT 0x00000004
|
|
+#define MVX_BUFFER_REJECTED 0x00000008
|
|
+#define MVX_BUFFER_DECODE_ONLY 0x00000010
|
|
+#define MVX_BUFFER_CODEC_CONFIG 0x00000020
|
|
+#define MVX_BUFFER_AFBC_TILED_HEADERS 0x00000040
|
|
+#define MVX_BUFFER_AFBC_TILED_BODY 0x00000080
|
|
+#define MVX_BUFFER_AFBC_32X8_SUPERBLOCK 0x00000100
|
|
+#define MVX_BUFFER_INTERLACE 0x00000200
|
|
+#define MVX_BUFFER_END_OF_SUB_FRAME 0x00000400
|
|
+#define MVX_BUFFER_FRAME_PRESENT 0x00000800
|
|
+
|
|
+
|
|
+#define MVX_BUFFER_FRAME_FLAG_ROTATION_90 0x00001000 /* Frame is rotated 90 degrees */
|
|
+#define MVX_BUFFER_FRAME_FLAG_ROTATION_180 0x00002000 /* Frame is rotated 180 degrees */
|
|
+#define MVX_BUFFER_FRAME_FLAG_ROTATION_270 0x00003000 /* Frame is rotated 270 degrees */
|
|
+#define MVX_BUFFER_FRAME_FLAG_ROTATION_MASK 0x00003000
|
|
+
|
|
+#define MVX_BUFFER_FRAME_FLAG_MIRROR_HORI 0x00010000
|
|
+#define MVX_BUFFER_FRAME_FLAG_MIRROR_VERT 0x00020000
|
|
+#define MVX_BUFFER_FRAME_FLAG_MIRROR_MASK 0x00030000
|
|
+
|
|
+#define MVX_BUFFER_FRAME_FLAG_SCALING_2 0x00004000 /* Frame is scaled by half */
|
|
+#define MVX_BUFFER_FRAME_FLAG_SCALING_4 0x00008000 /* Frame is scaled by quarter */
|
|
+#define MVX_BUFFER_FRAME_FLAG_SCALING_MASK 0x0000C000
|
|
+
|
|
+#define MVX_BUFFER_FRAME_FLAG_GENERAL 0x00040000 /* Frame is a general buffer */
|
|
+#define MVX_BUFFER_FRAME_FLAG_ROI 0x00080000 /* This buffer has a roi region */
|
|
+
|
|
+#define MVX_BUFFER_FRAME_NEED_REALLOC 0x00100000 /* This buffer needs realloc */
|
|
+
|
|
+#define MVX_BUFFER_FRAME_FLAG_IFRAME 0x20000000
|
|
+#define MVX_BUFFER_FRAME_FLAG_PFRAME 0x40000000
|
|
+#define MVX_BUFFER_FRAME_FLAG_BFRAME 0x80000000
|
|
+
|
|
+#define MVX_BUFFER_AFBC_BLOCK_SPLIT 0x10000000
|
|
+
|
|
+#define MVX_BUFFER_FLAG_DISABLE_CACHE_MAINTENANCE 0x01000000 /*disable cache maintenance for buffer */
|
|
+/****************************************************************************
|
|
+ * External functions
|
|
+ ****************************************************************************/
|
|
+
|
|
+/**
|
|
+ * mvx_buffer_construct() - Construct the buffer object.
|
|
+ * @buf: Pointer to buffer.
|
|
+ * @dev: Pointer to device.
|
|
+ * @mmu: Pointer to MMU.
|
|
+ * @dir: Which direction the buffer was allocated for.
|
|
+ * @nplanes: Number of planes.
|
|
+ * @sgt: Array with SG tables. Each table contains a list of memory
|
|
+ * pages for corresponding plane.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+int mvx_buffer_construct(struct mvx_buffer *buf,
|
|
+ struct device *dev,
|
|
+ struct mvx_mmu *mmu,
|
|
+ enum mvx_direction dir,
|
|
+ unsigned int nplanes,
|
|
+ struct sg_table **sgt);
|
|
+
|
|
+/**
|
|
+ * mvx_buffer_construct() - Destruct the buffer object.
|
|
+ * @buf: Pointer to buffer.
|
|
+ */
|
|
+void mvx_buffer_destruct(struct mvx_buffer *buf);
|
|
+
|
|
+/**
|
|
+ * mvx_buffer_map() - Map the buffer to the MVE virtual address space.
|
|
+ * @buf: Pointer to buffer.
|
|
+ * @begin: MVE virtual begin address.
|
|
+ * @end: MVE virtual end address.
|
|
+ *
|
|
+ * Try to MMU map the buffer anywhere between the begin and end addresses.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+int mvx_buffer_map(struct mvx_buffer *buf,
|
|
+ mvx_mmu_va begin,
|
|
+ mvx_mmu_va end);
|
|
+
|
|
+/**
|
|
+ * mvx_buffer_unmap() - Unmap the buffer from the MVE virtual address space.
|
|
+ * @buf: Pointer to buffer.
|
|
+ */
|
|
+void mvx_buffer_unmap(struct mvx_buffer *buf);
|
|
+
|
|
+/**
|
|
+ * mvx_buffer_is_mapped() - Return if buffer has been mapped.
|
|
+ * @buf: Pointer to buffer.
|
|
+ *
|
|
+ * Return: True if mapped, else false.
|
|
+ */
|
|
+bool mvx_buffer_is_mapped(struct mvx_buffer *buf);
|
|
+
|
|
+/**
|
|
+ * mvx_buffer_synch() - Synch the data caches.
|
|
+ * @buf: Pointer to buffer.
|
|
+ * @dir: Data direction.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+int mvx_buffer_synch(struct mvx_buffer *buf,
|
|
+ enum dma_data_direction dir);
|
|
+
|
|
+/**
|
|
+ * mvx_buffer_clear() - Clear and empty the buffer.
|
|
+ * @buf: Pointer to buffer.
|
|
+ */
|
|
+void mvx_buffer_clear(struct mvx_buffer *buf);
|
|
+
|
|
+/**
|
|
+ * mvx_buffer_filled_set() - Set filled bytes for each plane.
|
|
+ * @buf: Pointer to buffer.
|
|
+ * @plane: Plane index.
|
|
+ * @filled: Number of bytes filled.
|
|
+ * @offset: Number of bytes offset.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+int mvx_buffer_filled_set(struct mvx_buffer *buf,
|
|
+ unsigned int plane,
|
|
+ unsigned int filled,
|
|
+ unsigned int offset);
|
|
+
|
|
+/**
|
|
+ * mvx_buffer_size() - Get size in bytes for a plane.
|
|
+ * @buf: Pointer to buffer.
|
|
+ * @plane: Which plane to get size for.
|
|
+ *
|
|
+ * Return: Size of plane.
|
|
+ */
|
|
+size_t mvx_buffer_size(struct mvx_buffer *buf,
|
|
+ unsigned int plane);
|
|
+
|
|
+/**
|
|
+ * mvx_buffer_va() - Get VA for a plane.
|
|
+ * @buf: Pointer to buffer.
|
|
+ * @plane: Plane index.
|
|
+ *
|
|
+ * Return: VA address of plane, 0 if unmapped.
|
|
+ */
|
|
+mvx_mmu_va mvx_buffer_va(struct mvx_buffer *buf,
|
|
+ unsigned int plane);
|
|
+
|
|
+/**
|
|
+ * mvx_buffer_frame_dim() - Get frame buffer dimensions.
|
|
+ * @format: Bitstream or frame format.
|
|
+ * @width: Width in pixels.
|
|
+ * @height: Height in pixels.
|
|
+ * @nplanes: Number of planes for this format.
|
|
+ * @stride: Horizontal stride in bytes.
|
|
+ * @size: Size in bytes for each plane.
|
|
+ *
|
|
+ * If *nplanes is larger than 0 then the stride is used as input to tell this
|
|
+ * function which stride that is desired, but it might be modified if the
|
|
+ * stride is too short or not optimal for the MVE hardware.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+int mvx_buffer_frame_dim(enum mvx_format format,
|
|
+ unsigned int width,
|
|
+ unsigned int height,
|
|
+ uint8_t *nplanes,
|
|
+ unsigned int *stride,
|
|
+ unsigned int *size);
|
|
+
|
|
+/**
|
|
+ * mvx_buffer_frame_set() - Set frame dimensions.
|
|
+ * @buf: Pointer to buffer.
|
|
+ * @format: Bitstream or frame format.
|
|
+ * @width: Width in pixels.
|
|
+ * @height: Height in pixels.
|
|
+ * @stride: Horizontal stride in bytes.
|
|
+ * @size: Size in bytes for each plane.
|
|
+ * @interlaced: Defines if the buffer is interlaced.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+int mvx_buffer_frame_set(struct mvx_buffer *buf,
|
|
+ enum mvx_format format,
|
|
+ unsigned int width,
|
|
+ unsigned int height,
|
|
+ unsigned int *stride,
|
|
+ unsigned int *size,
|
|
+ bool interlaced);
|
|
+
|
|
+/**
|
|
+ * mvx_buffer_afbc_set() - Set AFBC dimensions.
|
|
+ * @buf: Pointer to buffer.
|
|
+ * @format: Bitstream or frame format.
|
|
+ * @width: Width in pixels.
|
|
+ * @height: Height in pixels.
|
|
+ * @afbc_width: AFBC width in superblocks.
|
|
+ * @size: Size in bytes for AFBC plane.
|
|
+ * @interlaced: Defines if the buffer is interlaced.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+int mvx_buffer_afbc_set(struct mvx_buffer *buf,
|
|
+ enum mvx_format format,
|
|
+ unsigned int width,
|
|
+ unsigned int height,
|
|
+ unsigned int afbc_width,
|
|
+ unsigned int size,
|
|
+ bool interlaced);
|
|
+
|
|
+/**
|
|
+ * mvx_buffer_show() - Print debug information into seq-file.
|
|
+ * @buf: Pointer to buffer.
|
|
+ * @s: Seq-file to print to.
|
|
+ */
|
|
+void mvx_buffer_show(struct mvx_buffer *buf,
|
|
+ struct seq_file *s);
|
|
+
|
|
+#endif /* _MVX_BUFFER_H_ */
|
|
diff --git a/drivers/media/platform/spacemit/vpu_k1x/if/mvx_firmware.c b/drivers/media/platform/spacemit/vpu_k1x/if/mvx_firmware.c
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/vpu_k1x/if/mvx_firmware.c
|
|
@@ -0,0 +1,600 @@
|
|
+/*
|
|
+ * The confidential and proprietary information contained in this file may
|
|
+ * only be used by a person authorised under and to the extent permitted
|
|
+ * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd.
|
|
+ * ALL RIGHTS RESERVED
|
|
+ *
|
|
+ * This entire notice must be reproduced on all copies of this file
|
|
+ * and copies of this file may only be made by a person if such person is
|
|
+ * permitted to do so under the terms of a subsisting license agreement
|
|
+ * from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * SPDX-License-Identifier: GPL-2.0-only
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU General Public License
|
|
+ * as published by the Free Software Foundation; either version 2
|
|
+ * of the License, or (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
+ *
|
|
+ */
|
|
+
|
|
+/****************************************************************************
|
|
+ * Includes
|
|
+ ****************************************************************************/
|
|
+
|
|
+#include <linux/atomic.h>
|
|
+#include <linux/debugfs.h>
|
|
+#include <linux/delay.h>
|
|
+#include <linux/device.h>
|
|
+#include <linux/dma-buf.h>
|
|
+#include <linux/errno.h>
|
|
+#include <linux/gfp.h>
|
|
+#include <linux/list.h>
|
|
+#include <linux/mm.h>
|
|
+#include <linux/firmware.h>
|
|
+#include <linux/kobject.h>
|
|
+#include <linux/kthread.h>
|
|
+#include <linux/timer.h>
|
|
+#include <linux/version.h>
|
|
+#include "mvx_if.h"
|
|
+#include "mvx_log_group.h"
|
|
+#include "mvx_firmware_cache.h"
|
|
+#include "mvx_firmware_priv.h"
|
|
+#include "mvx_mmu.h"
|
|
+#include "mvx_secure.h"
|
|
+#include "mvx_seq.h"
|
|
+
|
|
+/****************************************************************************
|
|
+ * Defines
|
|
+ ****************************************************************************/
|
|
+
|
|
+#define FW_TEXT_BASE_ADDR 0x1000u
|
|
+
|
|
+/****************************************************************************
|
|
+ * Private functions
|
|
+ ****************************************************************************/
|
|
+
|
|
+/**
|
|
+ * test_bit_32() - 32 bit version Linux test_bit.
|
|
+ *
|
|
+ * Test if bit is set in bitmap array.
|
|
+ */
|
|
+static bool test_bit_32(int bit,
|
|
+ const uint32_t *addr)
|
|
+{
|
|
+ return 0 != (addr[bit >> 5] & (1 << (bit & 0x1f)));
|
|
+}
|
|
+
|
|
+/**
|
|
+ * get_major_version() - Get firmware major version.
|
|
+ *
|
|
+ * Return: Major version.
|
|
+ */
|
|
+static unsigned int get_major_version(const struct mvx_fw_bin *fw_bin)
|
|
+{
|
|
+ if (fw_bin->securevideo != false)
|
|
+ return fw_bin->secure.securefw->protocol.major;
|
|
+ else
|
|
+ return fw_bin->nonsecure.header->protocol_major;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * get_minor_version() - Get firmware minor version.
|
|
+ *
|
|
+ * Return: Minor version.
|
|
+ */
|
|
+static unsigned int get_minor_version(const struct mvx_fw_bin *fw_bin)
|
|
+{
|
|
+ if (fw_bin->securevideo != false)
|
|
+ return fw_bin->secure.securefw->protocol.minor;
|
|
+ else
|
|
+ return fw_bin->nonsecure.header->protocol_minor;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * fw_unmap() - Remove MMU mappings and release allocated memory.
|
|
+ */
|
|
+static void fw_unmap(struct mvx_fw *fw)
|
|
+{
|
|
+ unsigned int i;
|
|
+ uint32_t begin;
|
|
+ uint32_t end;
|
|
+ int ret;
|
|
+
|
|
+ if (fw->fw_bin->securevideo == false) {
|
|
+ /* Unmap a region of 4 MB for each core. */
|
|
+ for (i = 0; i < fw->ncores; i++) {
|
|
+ ret = fw->ops.get_region(MVX_FW_REGION_CORE_0 + i,
|
|
+ &begin, &end);
|
|
+ if (ret == 0)
|
|
+ mvx_mmu_unmap_va(fw->mmu, begin,
|
|
+ 4 * 1024 * 1024);
|
|
+ }
|
|
+
|
|
+ if (!IS_ERR_OR_NULL(fw->text))
|
|
+ mvx_mmu_free_pages(fw->text);
|
|
+
|
|
+ if (!IS_ERR_OR_NULL(fw->bss))
|
|
+ mvx_mmu_free_pages(fw->bss);
|
|
+
|
|
+ if (!IS_ERR_OR_NULL(fw->bss_shared))
|
|
+ mvx_mmu_free_pages(fw->bss_shared);
|
|
+ }
|
|
+
|
|
+ fw->ops.unmap_protocol(fw);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * fw_map_core() - Map pages for the text and BSS segments for one core.
|
|
+ *
|
|
+ * This function assumes that the fw instance has been correctly allocated
|
|
+ * and instansiated and will therefor not make any NULL pointer checks. It
|
|
+ * assumes that all pointers - for example to the mmu or firmware binary - have
|
|
+ * been correctly set up.
|
|
+ */
|
|
+static int fw_map_core(struct mvx_fw *fw,
|
|
+ unsigned int core)
|
|
+{
|
|
+ int ret;
|
|
+ const struct mvx_fw_header *header = fw->fw_bin->nonsecure.header;
|
|
+ mvx_mmu_va fw_base;
|
|
+ mvx_mmu_va end;
|
|
+ mvx_mmu_va va;
|
|
+ unsigned int i;
|
|
+ unsigned int bss_cnt = core * fw->fw_bin->nonsecure.bss_cnt;
|
|
+ unsigned int bss_scnt = 0;
|
|
+
|
|
+ /*
|
|
+ * Get the base address where the pages for this cores should be
|
|
+ * mapped.
|
|
+ */
|
|
+ ret = fw->ops.get_region(MVX_FW_REGION_CORE_0 + core, &fw_base, &end);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+
|
|
+ /* Map text segment. */
|
|
+ ret = mvx_mmu_map_pages(fw->mmu,
|
|
+ fw_base + FW_TEXT_BASE_ADDR,
|
|
+ fw->text,
|
|
+ MVX_ATTR_PRIVATE,
|
|
+ MVX_ACCESS_EXECUTABLE);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+
|
|
+ /* Map bss shared and private pages. */
|
|
+ va = header->bss_start_address;
|
|
+ for (i = 0; i < header->bss_bitmap_size; i++) {
|
|
+ if (va >= header->master_rw_start_address &&
|
|
+ va < (header->master_rw_start_address +
|
|
+ header->master_rw_size))
|
|
+ ret = mvx_mmu_map_pa(
|
|
+ fw->mmu,
|
|
+ fw_base + va,
|
|
+ fw->bss_shared->pages[bss_scnt++],
|
|
+ MVE_PAGE_SIZE,
|
|
+ MVX_ATTR_PRIVATE,
|
|
+ MVX_ACCESS_READ_WRITE);
|
|
+ else if (test_bit_32(i, header->bss_bitmap))
|
|
+ ret = mvx_mmu_map_pa(fw->mmu,
|
|
+ fw_base + va,
|
|
+ fw->bss->pages[bss_cnt++],
|
|
+ MVE_PAGE_SIZE,
|
|
+ MVX_ATTR_PRIVATE,
|
|
+ MVX_ACCESS_READ_WRITE);
|
|
+
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+
|
|
+ va += MVE_PAGE_SIZE;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * fw_map() - Map up MMU tables.
|
|
+ */
|
|
+static int fw_map(struct mvx_fw *fw)
|
|
+{
|
|
+ int ret;
|
|
+ unsigned int i;
|
|
+
|
|
+ if (fw->fw_bin->securevideo != false) {
|
|
+ /* Map MMU tables for each core. */
|
|
+ for (i = 0; i < fw->ncores; i++) {
|
|
+ mvx_mmu_va fw_base;
|
|
+ mvx_mmu_va end;
|
|
+ phys_addr_t l2 = fw->fw_bin->secure.securefw->l2pages +
|
|
+ i * MVE_PAGE_SIZE;
|
|
+
|
|
+ ret = fw->ops.get_region(MVX_FW_REGION_CORE_0 + i,
|
|
+ &fw_base, &end);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+
|
|
+ ret = mvx_mmu_map_l2(fw->mmu, fw_base, l2);
|
|
+ if (ret != 0)
|
|
+ goto unmap_fw;
|
|
+ }
|
|
+ } else {
|
|
+ const struct mvx_fw_bin *fw_bin = fw->fw_bin;
|
|
+ const struct mvx_fw_header *header = fw_bin->nonsecure.header;
|
|
+
|
|
+ /* Allocate memory for text segment. */
|
|
+ fw->text = mvx_mmu_alloc_pages(fw->dev,
|
|
+ fw_bin->nonsecure.text_cnt,
|
|
+ 0);
|
|
+ if (IS_ERR(fw->text))
|
|
+ return PTR_ERR(fw->text);
|
|
+
|
|
+ /* Allocate memory for BSS segment. */
|
|
+ fw->bss = mvx_mmu_alloc_pages(
|
|
+ fw->dev, fw_bin->nonsecure.bss_cnt * fw->ncores, 0);
|
|
+ if (IS_ERR(fw->bss)) {
|
|
+ ret = PTR_ERR(fw->bss);
|
|
+ goto unmap_fw;
|
|
+ }
|
|
+
|
|
+ /* Allocate memory for BSS shared segment. */
|
|
+ fw->bss_shared = mvx_mmu_alloc_pages(
|
|
+ fw->dev, fw_bin->nonsecure.sbss_cnt, 0);
|
|
+ if (IS_ERR(fw->bss_shared)) {
|
|
+ ret = PTR_ERR(fw->bss_shared);
|
|
+ goto unmap_fw;
|
|
+ }
|
|
+
|
|
+ /* Map MMU tables for each core. */
|
|
+ for (i = 0; i < fw->ncores; i++) {
|
|
+ ret = fw_map_core(fw, i);
|
|
+ if (ret != 0)
|
|
+ goto unmap_fw;
|
|
+ }
|
|
+
|
|
+ /* Copy firmware binary. */
|
|
+ ret = mvx_mmu_write(fw->mmu, FW_TEXT_BASE_ADDR,
|
|
+ fw_bin->nonsecure.fw->data,
|
|
+ header->text_length);
|
|
+ if (ret != 0)
|
|
+ goto unmap_fw;
|
|
+ }
|
|
+
|
|
+ /* Map MMU tables for the message queues. */
|
|
+ ret = fw->ops.map_protocol(fw);
|
|
+ if (ret != 0)
|
|
+ goto unmap_fw;
|
|
+
|
|
+ return 0;
|
|
+
|
|
+unmap_fw:
|
|
+ fw_unmap(fw);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * Callbacks and handlers for FW stats.
|
|
+ */
|
|
+static int fw_stat_show(struct seq_file *s,
|
|
+ void *v)
|
|
+{
|
|
+ struct mvx_fw *fw = (struct mvx_fw *)s->private;
|
|
+ const struct mvx_fw_bin *fw_bin = fw->fw_bin;
|
|
+
|
|
+ mvx_seq_printf(s, "mvx_fw", 0, "%p\n", fw);
|
|
+ seq_puts(s, "\n");
|
|
+
|
|
+ mvx_seq_printf(s, "mmu", 0, "%p\n", fw->mmu);
|
|
+
|
|
+ if (fw_bin->securevideo == false) {
|
|
+ mvx_seq_printf(s, "text", 0, "%p\n", fw->text);
|
|
+ mvx_seq_printf(s, "bss", 0, "%p\n", fw->bss);
|
|
+ mvx_seq_printf(s, "bss_shared", 0, "%p\n", fw->bss_shared);
|
|
+ }
|
|
+
|
|
+ seq_puts(s, "\n");
|
|
+
|
|
+ mvx_seq_printf(s, "msg_host", 0, "%p\n", fw->msg_host);
|
|
+ mvx_seq_printf(s, "msg_mve", 0, "%p\n", fw->msg_mve);
|
|
+ mvx_seq_printf(s, "buf_in_host", 0, "%p\n", fw->buf_in_host);
|
|
+ mvx_seq_printf(s, "buf_in_mve", 0, "%p\n", fw->buf_in_mve);
|
|
+ mvx_seq_printf(s, "buf_out_host", 0, "%p\n", fw->buf_out_host);
|
|
+ mvx_seq_printf(s, "buf_out_mve", 0, "%p\n", fw->buf_out_mve);
|
|
+ seq_puts(s, "\n");
|
|
+
|
|
+ fw->ops.print_stat(fw, 0, s);
|
|
+ seq_puts(s, "\n");
|
|
+
|
|
+ mvx_seq_printf(s, "rpc", 0, "%p\n", fw->rpc);
|
|
+ mvx_seq_printf(s, "ncores", 0, "%u\n", fw->ncores);
|
|
+ mvx_seq_printf(s, "msg_pending", 0, "%u\n", fw->msg_pending);
|
|
+ seq_puts(s, "\n");
|
|
+
|
|
+ mvx_seq_printf(s, "ops.map_protocol", 0, "%ps\n",
|
|
+ fw->ops.map_protocol);
|
|
+ mvx_seq_printf(s, "ops.unmap_protocol", 0, "%ps\n",
|
|
+ fw->ops.unmap_protocol);
|
|
+ mvx_seq_printf(s, "ops.get_region", 0, "%ps\n",
|
|
+ fw->ops.get_region);
|
|
+ mvx_seq_printf(s, "ops.get_message", 0, "%ps\n",
|
|
+ fw->ops.get_message);
|
|
+ mvx_seq_printf(s, "ops.put_message", 0, "%ps\n",
|
|
+ fw->ops.put_message);
|
|
+ mvx_seq_printf(s, "ops.handle_rpc", 0, "%ps\n",
|
|
+ fw->ops.handle_rpc);
|
|
+ seq_puts(s, "\n");
|
|
+
|
|
+ mvx_seq_printf(s, "fw_bin", 0, "%p\n", fw_bin);
|
|
+ mvx_seq_printf(s, "fw_bin.cache", 0, "%p\n", fw_bin->cache);
|
|
+ mvx_seq_printf(s, "fw_bin.filename", 0, "%s\n", fw_bin->filename);
|
|
+ mvx_seq_printf(s, "fw_bin.format", 0, "%u\n", fw_bin->format);
|
|
+ mvx_seq_printf(s, "fw_bin.dir", 0, "%s\n",
|
|
+ (fw_bin->dir == MVX_DIR_INPUT) ? "in" :
|
|
+ (fw_bin->dir == MVX_DIR_OUTPUT) ? "out" :
|
|
+ "invalid");
|
|
+
|
|
+ if (fw_bin->securevideo == false) {
|
|
+ mvx_seq_printf(s, "fw_bin.text_cnt", 0, "%u\n",
|
|
+ fw_bin->nonsecure.text_cnt);
|
|
+ mvx_seq_printf(s, "fw_bin.bss_cnt", 0, "%u\n",
|
|
+ fw_bin->nonsecure.bss_cnt);
|
|
+ mvx_seq_printf(s, "fw_bin.sbss_cnt", 0, "%u\n",
|
|
+ fw_bin->nonsecure.sbss_cnt);
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int fw_stat_open(struct inode *inode,
|
|
+ struct file *file)
|
|
+{
|
|
+ return single_open(file, fw_stat_show, inode->i_private);
|
|
+}
|
|
+
|
|
+static const struct file_operations fw_stat_fops = {
|
|
+ .open = fw_stat_open,
|
|
+ .read = seq_read,
|
|
+ .llseek = seq_lseek,
|
|
+ .release = single_release
|
|
+};
|
|
+
|
|
+static void *rpcmem_seq_start(struct seq_file *s,
|
|
+ loff_t *pos)
|
|
+{
|
|
+ struct mvx_fw *fw = s->private;
|
|
+ int ret;
|
|
+
|
|
+ ret = mutex_lock_interruptible(&fw->rpcmem_mutex);
|
|
+ if (ret != 0)
|
|
+ return ERR_PTR(-EINVAL);
|
|
+
|
|
+ return mvx_seq_hash_start(fw->dev, fw->rpc_mem, HASH_SIZE(
|
|
+ fw->rpc_mem), *pos);
|
|
+}
|
|
+
|
|
+static void *rpcmem_seq_next(struct seq_file *s,
|
|
+ void *v,
|
|
+ loff_t *pos)
|
|
+{
|
|
+ struct mvx_fw *fw = s->private;
|
|
+
|
|
+ return mvx_seq_hash_next(v, fw->rpc_mem, HASH_SIZE(fw->rpc_mem), pos);
|
|
+}
|
|
+
|
|
+static void rpcmem_seq_stop(struct seq_file *s,
|
|
+ void *v)
|
|
+{
|
|
+ struct mvx_fw *fw = s->private;
|
|
+
|
|
+ mutex_unlock(&fw->rpcmem_mutex);
|
|
+ mvx_seq_hash_stop(v);
|
|
+}
|
|
+
|
|
+static int rpcmem_seq_show(struct seq_file *s,
|
|
+ void *v)
|
|
+{
|
|
+ struct mvx_seq_hash_it *it = v;
|
|
+ struct mvx_mmu_pages *pages = hlist_entry(it->node,
|
|
+ struct mvx_mmu_pages, node);
|
|
+
|
|
+ if (pages == NULL)
|
|
+ return 0;
|
|
+
|
|
+ seq_printf(s, "va = %08x, cap = %08zu, count = %08zu\n",
|
|
+ pages->va, pages->capacity, pages->count);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static const struct seq_operations rpcmem_seq_ops = {
|
|
+ .start = rpcmem_seq_start,
|
|
+ .next = rpcmem_seq_next,
|
|
+ .stop = rpcmem_seq_stop,
|
|
+ .show = rpcmem_seq_show
|
|
+};
|
|
+
|
|
+static int rpcmem_open(struct inode *inode,
|
|
+ struct file *file)
|
|
+{
|
|
+ int ret;
|
|
+ struct seq_file *s;
|
|
+ struct mvx_fw *fw = inode->i_private;
|
|
+
|
|
+ ret = seq_open(file, &rpcmem_seq_ops);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+
|
|
+ s = file->private_data;
|
|
+ s->private = fw;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static const struct file_operations rpcmem_fops = {
|
|
+ .open = rpcmem_open,
|
|
+ .read = seq_read,
|
|
+ .llseek = seq_lseek,
|
|
+ .release = seq_release
|
|
+};
|
|
+
|
|
+/**
|
|
+ * fw_debugfs_init() - Create debugfs entries for mvx_fw.
|
|
+ */
|
|
+static int fw_debugfs_init(struct mvx_fw *fw,
|
|
+ struct dentry *parent)
|
|
+{
|
|
+ int ret;
|
|
+ struct dentry *dentry;
|
|
+
|
|
+ fw->dentry = debugfs_create_dir("fw", parent);
|
|
+ if (IS_ERR_OR_NULL(fw->dentry))
|
|
+ return -ENOMEM;
|
|
+
|
|
+ dentry = debugfs_create_file("stat", 0400, fw->dentry, fw,
|
|
+ &fw_stat_fops);
|
|
+ if (IS_ERR_OR_NULL(dentry)) {
|
|
+ ret = -ENOMEM;
|
|
+ goto remove_dentry;
|
|
+ }
|
|
+
|
|
+ if (fw->fw_bin->securevideo == false) {
|
|
+ ret = mvx_mmu_pages_debugfs_init(fw->text, "text", fw->dentry);
|
|
+ if (ret != 0)
|
|
+ goto remove_dentry;
|
|
+
|
|
+ ret = mvx_mmu_pages_debugfs_init(fw->bss, "bss", fw->dentry);
|
|
+ if (ret != 0)
|
|
+ goto remove_dentry;
|
|
+
|
|
+ ret = mvx_mmu_pages_debugfs_init(fw->bss_shared, "bss_shared",
|
|
+ fw->dentry);
|
|
+ if (ret != 0)
|
|
+ goto remove_dentry;
|
|
+
|
|
+ dentry = debugfs_create_file("rpc_mem", 0400, fw->dentry, fw,
|
|
+ &rpcmem_fops);
|
|
+ if (IS_ERR_OR_NULL(dentry)) {
|
|
+ ret = -ENOMEM;
|
|
+ goto remove_dentry;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+
|
|
+remove_dentry:
|
|
+ debugfs_remove_recursive(fw->dentry);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+/****************************************************************************
|
|
+ * Exported functions
|
|
+ ****************************************************************************/
|
|
+
|
|
+int mvx_fw_factory(struct mvx_fw *fw,
|
|
+ struct mvx_fw_bin *fw_bin,
|
|
+ struct mvx_mmu *mmu,
|
|
+ struct mvx_session *session,
|
|
+ struct mvx_client_ops *client_ops,
|
|
+ struct mvx_client_session *csession,
|
|
+ unsigned int ncores,
|
|
+ struct dentry *parent)
|
|
+{
|
|
+ unsigned int major;
|
|
+ unsigned int minor;
|
|
+ int ret;
|
|
+
|
|
+ /* Verifty that firmware loading was successful. */
|
|
+ if ((fw_bin->securevideo == false &&
|
|
+ IS_ERR_OR_NULL(fw_bin->nonsecure.fw)) ||
|
|
+ (fw_bin->securevideo != false &&
|
|
+ IS_ERR_OR_NULL(fw_bin->secure.securefw))) {
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING,
|
|
+ "Firmware binary was loaded with error.");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (fw_bin->securevideo != false &&
|
|
+ ncores > fw_bin->secure.securefw->ncores) {
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING,
|
|
+ "Too few secure cores setup. max_ncores=%u, ncores=%u.",
|
|
+ fw_bin->secure.securefw->ncores, ncores);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ major = get_major_version(fw_bin);
|
|
+ minor = get_minor_version(fw_bin);
|
|
+
|
|
+ /* Call constructor for derived class based on protocol version. */
|
|
+ switch (major) {
|
|
+ case 2:
|
|
+ ret = mvx_fw_construct_v2(fw, fw_bin, mmu, session, client_ops,
|
|
+ csession, ncores, major, minor);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+
|
|
+ break;
|
|
+ case 3:
|
|
+ ret = mvx_fw_construct_v3(fw, fw_bin, mmu, session, client_ops,
|
|
+ csession, ncores, major, minor);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+
|
|
+ break;
|
|
+ default:
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING,
|
|
+ "Unsupported firmware interface revision. major=%u, minor=%u.",
|
|
+ major, minor);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ /* Map up the MMU tables. */
|
|
+ ret = fw_map(fw);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+
|
|
+ if (IS_ENABLED(CONFIG_DEBUG_FS))
|
|
+ ret = fw_debugfs_init(fw, parent);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int mvx_fw_construct(struct mvx_fw *fw,
|
|
+ struct mvx_fw_bin *fw_bin,
|
|
+ struct mvx_mmu *mmu,
|
|
+ struct mvx_session *session,
|
|
+ struct mvx_client_ops *client_ops,
|
|
+ struct mvx_client_session *csession,
|
|
+ unsigned int ncores)
|
|
+{
|
|
+ memset(fw, 0, sizeof(*fw));
|
|
+
|
|
+ fw->dev = fw_bin->dev;
|
|
+ fw->mmu = mmu;
|
|
+ fw->session = session;
|
|
+ fw->client_ops = client_ops;
|
|
+ fw->csession = csession;
|
|
+ fw->ncores = ncores;
|
|
+ fw->fw_bin = fw_bin;
|
|
+ mutex_init(&fw->rpcmem_mutex);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+void mvx_fw_destruct(struct mvx_fw *fw)
|
|
+{
|
|
+ if (IS_ENABLED(CONFIG_DEBUG_FS))
|
|
+ debugfs_remove_recursive(fw->dentry);
|
|
+
|
|
+ /* Release and unmap allocates pages. */
|
|
+ fw_unmap(fw);
|
|
+}
|
|
diff --git a/drivers/media/platform/spacemit/vpu_k1x/if/mvx_firmware.h b/drivers/media/platform/spacemit/vpu_k1x/if/mvx_firmware.h
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/vpu_k1x/if/mvx_firmware.h
|
|
@@ -0,0 +1,921 @@
|
|
+/*
|
|
+ * The confidential and proprietary information contained in this file may
|
|
+ * only be used by a person authorised under and to the extent permitted
|
|
+ * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd.
|
|
+ * ALL RIGHTS RESERVED
|
|
+ *
|
|
+ * This entire notice must be reproduced on all copies of this file
|
|
+ * and copies of this file may only be made by a person if such person is
|
|
+ * permitted to do so under the terms of a subsisting license agreement
|
|
+ * from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * SPDX-License-Identifier: GPL-2.0-only
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU General Public License
|
|
+ * as published by the Free Software Foundation; either version 2
|
|
+ * of the License, or (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
+ *
|
|
+ */
|
|
+
|
|
+#ifndef _MVX_FIRMWARE_H_
|
|
+#define _MVX_FIRMWARE_H_
|
|
+
|
|
+/****************************************************************************
|
|
+ * Includes
|
|
+ ****************************************************************************/
|
|
+
|
|
+#include <linux/hashtable.h>
|
|
+#include <linux/kref.h>
|
|
+#include <linux/list.h>
|
|
+#include <linux/mutex.h>
|
|
+#include <linux/seq_file.h>
|
|
+#include "mvx_if.h"
|
|
+#include "mvx_buffer.h"
|
|
+
|
|
+/****************************************************************************
|
|
+ * Defines
|
|
+ ****************************************************************************/
|
|
+
|
|
+#define MVX_FW_HTABLE_BITS 3
|
|
+#define MVX_FW_QUANT_LEN 64
|
|
+
|
|
+/****************************************************************************
|
|
+ * Firmware communication types
|
|
+ ****************************************************************************/
|
|
+
|
|
+/**
|
|
+ * enum mvx_fw_buffer_attr
|
|
+ */
|
|
+enum mvx_fw_buffer_attr {
|
|
+ MVX_FW_BUF_CACHEABLE,
|
|
+ MVX_FW_BUF_UNCACHEABLE,
|
|
+ MVX_FW_BUF_ATTR_NUM,
|
|
+};
|
|
+
|
|
+/**
|
|
+ * enum mvx_fw_state - Firmware state.
|
|
+ */
|
|
+enum mvx_fw_state {
|
|
+ MVX_FW_STATE_STOPPED,
|
|
+ MVX_FW_STATE_RUNNING
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct mvx_fw_job - Job request.
|
|
+ * @cores: Number for cores to use.
|
|
+ * @frames: Number of frames to process before job is switched out.
|
|
+ */
|
|
+struct mvx_fw_job {
|
|
+ unsigned int cores;
|
|
+ unsigned int frames;
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct mvx_fw_qp_range - QP range.
|
|
+ */
|
|
+struct mvx_fw_qp_range {
|
|
+ int min;
|
|
+ int max;
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct mvx_fw_profile_level - Profile and level.
|
|
+ */
|
|
+struct mvx_fw_profile_level {
|
|
+ unsigned int profile;
|
|
+ unsigned int level;
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct mvx_fw_tile - Tile size.
|
|
+ */
|
|
+struct mvx_fw_tile {
|
|
+ unsigned int rows;
|
|
+ unsigned int cols;
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct mvx_fw_mv - Motion vector search range.
|
|
+ */
|
|
+struct mvx_fw_mv {
|
|
+ unsigned int x;
|
|
+ unsigned int y;
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct mvx_fw_bitdepth - Bit depth.
|
|
+ */
|
|
+struct mvx_fw_bitdepth {
|
|
+ unsigned int chroma;
|
|
+ unsigned int luma;
|
|
+};
|
|
+
|
|
+struct mvx_buffer_param_region
|
|
+{
|
|
+ uint16_t mbx_left; /**< X coordinate of the left most macroblock */
|
|
+ uint16_t mbx_right; /**< X coordinate of the right most macroblock */
|
|
+ uint16_t mby_top; /**< Y coordinate of the top most macroblock */
|
|
+ uint16_t mby_bottom; /**< Y coordinate of the bottom most macroblock */
|
|
+ int16_t qp_delta; /**< QP delta value. This region will be encoded
|
|
+ * with qp = qp_default + qp_delta. */
|
|
+};
|
|
+
|
|
+struct mvx_roi_config
|
|
+{
|
|
+ unsigned int pic_index;
|
|
+ unsigned char qp_present;
|
|
+ unsigned char qp;
|
|
+ unsigned char roi_present;
|
|
+ unsigned char num_roi;
|
|
+ #define MVX_MAX_FRAME_REGIONS 16
|
|
+ struct mvx_buffer_param_region roi[MVX_MAX_FRAME_REGIONS];
|
|
+};
|
|
+
|
|
+struct mvx_buffer_param_rate_control
|
|
+{
|
|
+ uint32_t rate_control_mode;
|
|
+ #define MVX_OPT_RATE_CONTROL_MODE_OFF (0)
|
|
+ #define MVX_OPT_RATE_CONTROL_MODE_STANDARD (1)
|
|
+ #define MVX_OPT_RATE_CONTROL_MODE_VARIABLE (2)
|
|
+ #define MVX_OPT_RATE_CONTROL_MODE_CONSTANT (3)
|
|
+ #define MVX_OPT_RATE_CONTROL_MODE_C_VARIABLE (4)
|
|
+ uint32_t target_bitrate; /* in bits per second */
|
|
+ uint32_t maximum_bitrate; /* in bits per second */
|
|
+};
|
|
+
|
|
+struct mvx_dsl_frame{
|
|
+ uint32_t width;
|
|
+ uint32_t height;
|
|
+};
|
|
+
|
|
+struct mvx_dsl_ratio{
|
|
+ uint32_t hor;
|
|
+ uint32_t ver;
|
|
+};
|
|
+
|
|
+struct mvx_long_term_ref{
|
|
+ uint32_t mode;
|
|
+ uint32_t period;
|
|
+};
|
|
+/**
|
|
+ * struct mvx_fw_error - Firmware error message.
|
|
+ * @error_code: What kind of error that was reported.
|
|
+ * @message: Error message string.
|
|
+ */
|
|
+struct mvx_fw_error {
|
|
+ enum {
|
|
+ MVX_FW_ERROR_ABORT,
|
|
+ MVX_FW_ERROR_OUT_OF_MEMORY,
|
|
+ MVX_FW_ERROR_ASSERT,
|
|
+ MVX_FW_ERROR_UNSUPPORTED,
|
|
+ MVX_FW_ERROR_INVALID_BUFFER,
|
|
+ MVX_FW_ERROR_INVALID_STATE,
|
|
+ MVX_FW_ERROR_WATCHDOG
|
|
+ } error_code;
|
|
+ char message[128];
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct mvx_fw_flush - Flush firmware buffers.
|
|
+ * @dir: Which port to flush.
|
|
+ */
|
|
+struct mvx_fw_flush {
|
|
+ enum mvx_direction dir;
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct mvx_fw_alloc_param - Allocation parameters.
|
|
+ * @width: Width in pixels.
|
|
+ * @height: Height in pixels.
|
|
+ * @afbc_alloc_bytes: AFBC buffer size.
|
|
+ * @afbc_width: AFBC width in superblocks.
|
|
+ *
|
|
+ * Dimensions of a decoded frame buffer.
|
|
+ */
|
|
+struct mvx_fw_alloc_param {
|
|
+ unsigned int width;
|
|
+ unsigned int height;
|
|
+ unsigned int afbc_alloc_bytes;
|
|
+ unsigned int afbc_width;
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct mvx_fw_seq_param - Sequence parameters.
|
|
+ * @planar.buffers_min: Minimum number of planar buffers required.
|
|
+ * @afbc.buffers_min: Minimum number of AFBC buffers required.
|
|
+ */
|
|
+struct mvx_fw_seq_param {
|
|
+ struct {
|
|
+ unsigned int buffers_min;
|
|
+ } planar;
|
|
+ struct {
|
|
+ unsigned int buffers_min;
|
|
+ } afbc;
|
|
+};
|
|
+
|
|
+enum mvx_fw_range {
|
|
+ MVX_FW_RANGE_UNSPECIFIED,
|
|
+ MVX_FW_RANGE_FULL,
|
|
+ MVX_FW_RANGE_LIMITED
|
|
+};
|
|
+
|
|
+enum mvx_fw_primaries {
|
|
+ MVX_FW_PRIMARIES_UNSPECIFIED,
|
|
+ MVX_FW_PRIMARIES_BT709, /* Rec.ITU-R BT.709 */
|
|
+ MVX_FW_PRIMARIES_BT470M, /* Rec.ITU-R BT.470 System M */
|
|
+ MVX_FW_PRIMARIES_BT601_625, /* Rec.ITU-R BT.601 625 */
|
|
+ MVX_FW_PRIMARIES_BT601_525, /* Rec.ITU-R BT.601 525 */
|
|
+ MVX_FW_PRIMARIES_GENERIC_FILM, /* Generic Film */
|
|
+ MVX_FW_PRIMARIES_BT2020 /* Rec.ITU-R BT.2020 */
|
|
+};
|
|
+
|
|
+enum mvx_fw_transfer {
|
|
+ MVX_FW_TRANSFER_UNSPECIFIED,
|
|
+ MVX_FW_TRANSFER_LINEAR, /* Linear transfer characteristics */
|
|
+ MVX_FW_TRANSFER_SRGB, /* sRGB or equivalent */
|
|
+ MVX_FW_TRANSFER_SMPTE170M, /* SMPTE 170M */
|
|
+ MVX_FW_TRANSFER_GAMMA22, /* Assumed display gamma 2.2 */
|
|
+ MVX_FW_TRANSFER_GAMMA28, /* Assumed display gamma 2.8 */
|
|
+ MVX_FW_TRANSFER_ST2084, /* SMPTE ST 2084 */
|
|
+ MVX_FW_TRANSFER_HLG, /* ARIB STD-B67 hybrid-log-gamma */
|
|
+ MVX_FW_TRANSFER_SMPTE240M, /* SMPTE 240M */
|
|
+ MVX_FW_TRANSFER_XVYCC, /* IEC 61966-2-4 */
|
|
+ MVX_FW_TRANSFER_BT1361, /* Rec.ITU-R BT.1361 extended gamut */
|
|
+ MVX_FW_TRANSFER_ST428 /* SMPTE ST 428-1 */
|
|
+};
|
|
+
|
|
+enum mvx_fw_matrix {
|
|
+ MVX_FW_MATRIX_UNSPECIFIED,
|
|
+ MVX_FW_MATRIX_BT709, /* Rec.ITU-R BT.709 */
|
|
+ MVX_FW_MATRIX_BT470M, /* KR=0.30, KB=0.11 */
|
|
+ MVX_FW_MATRIX_BT601, /* Rec.ITU-R BT.601 625 */
|
|
+ MVX_FW_MATRIX_SMPTE240M, /* SMPTE 240M or equivalent */
|
|
+ MVX_FW_MATRIX_BT2020, /* Rec.ITU-R BT.2020 non-const lum */
|
|
+ MVX_FW_MATRIX_BT2020Constant /* Rec.ITU-R BT.2020 const lum */
|
|
+};
|
|
+
|
|
+struct mvx_fw_primary {
|
|
+ unsigned int x;
|
|
+ unsigned int y;
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct mvx_fw_color_desc - HDR color description.
|
|
+ */
|
|
+struct mvx_fw_color_desc {
|
|
+ unsigned int flags;
|
|
+ enum mvx_fw_range range;
|
|
+ enum mvx_fw_primaries primaries;
|
|
+ enum mvx_fw_transfer transfer;
|
|
+ enum mvx_fw_matrix matrix;
|
|
+ struct {
|
|
+ struct mvx_fw_primary r;
|
|
+ struct mvx_fw_primary g;
|
|
+ struct mvx_fw_primary b;
|
|
+ struct mvx_fw_primary w;
|
|
+ unsigned int luminance_min;
|
|
+ unsigned int luminance_max;
|
|
+ } display;
|
|
+ struct {
|
|
+ unsigned int luminance_max;
|
|
+ unsigned int luminance_average;
|
|
+ } content;
|
|
+
|
|
+ uint8_t video_format;
|
|
+ uint8_t aspect_ratio_idc;
|
|
+ uint16_t sar_width;
|
|
+ uint16_t sar_height;
|
|
+ uint32_t num_units_in_tick;
|
|
+ uint32_t time_scale;
|
|
+};
|
|
+
|
|
+struct mvx_sei_userdata{
|
|
+ uint8_t flags;
|
|
+ #define MVX_BUFFER_PARAM_USER_DATA_UNREGISTERED_VALID (1)
|
|
+ uint8_t uuid[16];
|
|
+ char user_data[256 - 35];
|
|
+ uint8_t user_data_len;
|
|
+};
|
|
+/**
|
|
+ * struct mvx_fw_set_option - Set firmware options.
|
|
+ */
|
|
+struct mvx_fw_set_option {
|
|
+ enum {
|
|
+ /**
|
|
+ * Frame rate.
|
|
+ * Extra data: frame_date.
|
|
+ */
|
|
+ MVX_FW_SET_FRAME_RATE,
|
|
+
|
|
+ /**
|
|
+ * Bitrate.
|
|
+ * Extra data: target_bitrate.
|
|
+ *
|
|
+ * When target_bitrate is other than zero, rate control
|
|
+ * in HW is enabled, otherwise rate control is disabled.
|
|
+ */
|
|
+ MVX_FW_SET_TARGET_BITRATE,
|
|
+
|
|
+ /**
|
|
+ * QP range.
|
|
+ * Extra data: qp_range.
|
|
+ *
|
|
+ * QP range when rate controller is enabled.
|
|
+ */
|
|
+ MVX_FW_SET_QP_RANGE,
|
|
+
|
|
+ /**
|
|
+ * NALU format.
|
|
+ * Extra data: nalu_format.
|
|
+ */
|
|
+ MVX_FW_SET_NALU_FORMAT,
|
|
+
|
|
+ /**
|
|
+ * Defines if stream escaping is enabled.
|
|
+ * Extra data: stream_escaping.
|
|
+ */
|
|
+ MVX_FW_SET_STREAM_ESCAPING,
|
|
+
|
|
+ /**
|
|
+ * Defines profile and level for encoder.
|
|
+ * Extra data: profile_level.
|
|
+ */
|
|
+ MVX_FW_SET_PROFILE_LEVEL,
|
|
+
|
|
+ /**
|
|
+ * Ignore stream headers.
|
|
+ * Extra data: ignore_stream_headers.
|
|
+ */
|
|
+ MVX_FW_SET_IGNORE_STREAM_HEADERS,
|
|
+
|
|
+ /**
|
|
+ * Enable frame reordering for decoder.
|
|
+ * Extra data: frame_reordering.
|
|
+ */
|
|
+ MVX_FW_SET_FRAME_REORDERING,
|
|
+
|
|
+ /**
|
|
+ * Suggested internal buffer size.
|
|
+ * Extra data: intbuf_size.
|
|
+ */
|
|
+ MVX_FW_SET_INTBUF_SIZE,
|
|
+
|
|
+ /**
|
|
+ * Number of P frames for encoder.
|
|
+ * Extra data: pb_frames.
|
|
+ */
|
|
+ MVX_FW_SET_P_FRAMES,
|
|
+
|
|
+ /**
|
|
+ * Number of B frames for encoder.
|
|
+ * Extra data: pb_frames.
|
|
+ */
|
|
+ MVX_FW_SET_B_FRAMES,
|
|
+
|
|
+ /**
|
|
+ * GOP type for encoder.
|
|
+ * Extra data: gop_type.
|
|
+ */
|
|
+ MVX_FW_SET_GOP_TYPE,
|
|
+
|
|
+ /**
|
|
+ * Intra MB refresh.
|
|
+ * Extra data: intra_mb_refresh.
|
|
+ */
|
|
+ MVX_FW_SET_INTRA_MB_REFRESH,
|
|
+
|
|
+ /**
|
|
+ * Constrained intra prediction.
|
|
+ * Extra data: constr_ipred.
|
|
+ */
|
|
+ MVX_FW_SET_CONSTR_IPRED,
|
|
+
|
|
+ /**
|
|
+ * Enable entropy synchronization.
|
|
+ * Extra data: entropy_sync.
|
|
+ */
|
|
+ MVX_FW_SET_ENTROPY_SYNC,
|
|
+
|
|
+ /**
|
|
+ * Enable temporal motion vector prediction.
|
|
+ * Extra data: temporal_mvp.
|
|
+ */
|
|
+ MVX_FW_SET_TEMPORAL_MVP,
|
|
+
|
|
+ /**
|
|
+ * Tiles size.
|
|
+ * Extra data: tile.
|
|
+ */
|
|
+ MVX_FW_SET_TILES,
|
|
+
|
|
+ /**
|
|
+ * Minimum luma coding block size.
|
|
+ * Extra data: min_luma_cb_size.
|
|
+ */
|
|
+ MVX_FW_SET_MIN_LUMA_CB_SIZE,
|
|
+
|
|
+ /**
|
|
+ * Entropy mode.
|
|
+ * Extra data: entropy_mode.
|
|
+ */
|
|
+ MVX_FW_SET_ENTROPY_MODE,
|
|
+
|
|
+ /**
|
|
+ * Suggested number of CTUs in a slice.
|
|
+ * Extra data: slice_spacing_mb.
|
|
+ */
|
|
+ MVX_FW_SET_SLICE_SPACING_MB,
|
|
+
|
|
+ /**
|
|
+ * Probability update method.
|
|
+ * Extra data: vp9_prob_update.
|
|
+ */
|
|
+ MVX_FW_SET_VP9_PROB_UPDATE,
|
|
+
|
|
+ /**
|
|
+ * Search range for motion vectors.
|
|
+ * Extra data: mv.
|
|
+ */
|
|
+ MVX_FW_SET_MV_SEARCH_RANGE,
|
|
+
|
|
+ /**
|
|
+ * Bitdepth.
|
|
+ * Extra data: bitdepth.
|
|
+ */
|
|
+ MVX_FW_SET_BITDEPTH,
|
|
+
|
|
+ /**
|
|
+ * Chroma format.
|
|
+ * Extra data: chroma_format.
|
|
+ */
|
|
+ MVX_FW_SET_CHROMA_FORMAT,
|
|
+
|
|
+ /**
|
|
+ * RGB to YUV conversion mode.
|
|
+ * Extra data: rgb_to_yuv_mode.
|
|
+ */
|
|
+ MVX_FW_SET_RGB_TO_YUV_MODE,
|
|
+
|
|
+ /**
|
|
+ * Maximum bandwidth limit.
|
|
+ * Extra data: band_limit.
|
|
+ */
|
|
+ MVX_FW_SET_BAND_LIMIT,
|
|
+
|
|
+ /**
|
|
+ * CABAC initialization table.
|
|
+ * Extra data: cabac_init_idc.
|
|
+ */
|
|
+ MVX_FW_SET_CABAC_INIT_IDC,
|
|
+
|
|
+ /**
|
|
+ * QP for I frames when rate control is disabled.
|
|
+ * Extra data: qp
|
|
+ */
|
|
+ MVX_FW_SET_QP_I,
|
|
+
|
|
+ /**
|
|
+ * QP for P frames when rate control is disabled.
|
|
+ * Extra data: qp
|
|
+ */
|
|
+ MVX_FW_SET_QP_P,
|
|
+
|
|
+ /**
|
|
+ * QP for B frames when rate control is disabled.
|
|
+ * Extra data: qp
|
|
+ */
|
|
+ MVX_FW_SET_QP_B,
|
|
+
|
|
+ /**
|
|
+ * JPEG resync interval.
|
|
+ * Extra data: resync_interval
|
|
+ */
|
|
+ MVX_FW_SET_RESYNC_INTERVAL,
|
|
+
|
|
+ /**
|
|
+ * JPEG quantization table.
|
|
+ * Extra data: quant_tbl.
|
|
+ */
|
|
+ MVX_FW_SET_QUANT_TABLE,
|
|
+
|
|
+ /**
|
|
+ * Set watchdog timeout. 0 to disable.
|
|
+ */
|
|
+ MVX_FW_SET_WATCHDOG_TIMEOUT,
|
|
+
|
|
+ /**
|
|
+ * QP for encode frame.
|
|
+ * Extra data: qp
|
|
+ */
|
|
+ MVX_FW_SET_QP_REGION,
|
|
+
|
|
+ /**
|
|
+ * ROI for encode frame.
|
|
+ * Extra data: ROI
|
|
+ */
|
|
+ MVX_FW_SET_ROI_REGIONS,
|
|
+
|
|
+ /**
|
|
+ * Rate Control for encode frame.
|
|
+ * Extra data: rate control
|
|
+ */
|
|
+ MVX_FW_SET_RATE_CONTROL,
|
|
+ /**
|
|
+ * Crop left for encode frame.
|
|
+ * Extra data: crop left
|
|
+ */
|
|
+ MVX_FW_SET_CROP_LEFT,
|
|
+ /**
|
|
+ * Crop right for encode frame.
|
|
+ * Extra data: crop right
|
|
+ */
|
|
+ MVX_FW_SET_CROP_RIGHT,
|
|
+ /**
|
|
+ * Crop top for encode frame.
|
|
+ * Extra data: crop top
|
|
+ */
|
|
+ MVX_FW_SET_CROP_TOP,
|
|
+ /**
|
|
+ * Crop bottom for encode frame.
|
|
+ * Extra data: crop bottom
|
|
+ */
|
|
+ MVX_FW_SET_CROP_BOTTOM,
|
|
+
|
|
+ MVX_FW_SET_COLOUR_DESC,
|
|
+
|
|
+ MVX_FW_SET_SEI_USERDATA,
|
|
+
|
|
+ MVX_FW_SET_HRD_BUF_SIZE,
|
|
+
|
|
+ MVX_FW_SET_DSL_FRAME,
|
|
+
|
|
+ MVX_FW_SET_LONG_TERM_REF,
|
|
+
|
|
+ MVX_FW_SET_DSL_MODE,
|
|
+
|
|
+ MVX_FW_SET_GOP_RESET,
|
|
+
|
|
+ MVX_FW_SET_INDEX_PROFILING
|
|
+ } code;
|
|
+
|
|
+ /**
|
|
+ * Extra data for an option.
|
|
+ */
|
|
+ union {
|
|
+ unsigned int frame_rate;
|
|
+ unsigned int target_bitrate;
|
|
+ struct mvx_fw_qp_range qp_range;
|
|
+ enum mvx_nalu_format nalu_format;
|
|
+ bool stream_escaping;
|
|
+ struct mvx_fw_profile_level profile_level;
|
|
+ bool ignore_stream_headers;
|
|
+ bool frame_reordering;
|
|
+ unsigned int intbuf_size;
|
|
+ unsigned int pb_frames;
|
|
+ enum mvx_gop_type gop_type;
|
|
+ unsigned int intra_mb_refresh;
|
|
+ bool constr_ipred;
|
|
+ bool entropy_sync;
|
|
+ bool temporal_mvp;
|
|
+ struct mvx_fw_tile tile;
|
|
+ unsigned int min_luma_cb_size;
|
|
+ enum mvx_entropy_mode entropy_mode;
|
|
+ unsigned int slice_spacing_mb;
|
|
+ enum mvx_vp9_prob_update vp9_prob_update;
|
|
+ struct mvx_fw_mv mv;
|
|
+ struct mvx_fw_bitdepth bitdepth;
|
|
+ unsigned int chroma_format;
|
|
+ enum mvx_rgb_to_yuv_mode rgb_to_yuv_mode;
|
|
+ unsigned int band_limit;
|
|
+ unsigned int cabac_init_idc;
|
|
+ int qp;
|
|
+ int resync_interval;
|
|
+ struct {
|
|
+ uint8_t *chroma;
|
|
+ uint8_t *luma;
|
|
+ } quant_tbl;
|
|
+ int watchdog_timeout;
|
|
+ struct mvx_roi_config roi_config;
|
|
+ struct mvx_buffer_param_rate_control rate_control;
|
|
+ unsigned int crop_left;
|
|
+ unsigned int crop_right;
|
|
+ unsigned int crop_top;
|
|
+ unsigned int crop_bottom;
|
|
+ struct mvx_fw_color_desc colour_desc;
|
|
+ struct mvx_sei_userdata userdata;
|
|
+ unsigned int nHRDBufsize;
|
|
+ struct mvx_dsl_frame dsl_frame;
|
|
+ struct mvx_long_term_ref ltr;
|
|
+ int dsl_pos_mode;
|
|
+ int index_profiling;
|
|
+ };
|
|
+};
|
|
+#define MVX_FW_COLOR_DESC_DISPLAY_VALID 0x1
|
|
+#define MVX_FW_COLOR_DESC_CONTENT_VALID 0x2
|
|
+
|
|
+/**
|
|
+ * enum mvx_fw_code - Codes for messages sent between driver and firmware.
|
|
+ */
|
|
+enum mvx_fw_code {
|
|
+ MVX_FW_CODE_ALLOC_PARAM, /* Driver <- Firmware. */
|
|
+ MVX_FW_CODE_BUFFER, /* Driver <-> Firmware. */
|
|
+ MVX_FW_CODE_ERROR, /* Driver <- Firmware. */
|
|
+ MVX_FW_CODE_IDLE, /* Driver <- Firmware. */
|
|
+ MVX_FW_CODE_FLUSH, /* Driver <-> Firmware. */
|
|
+ MVX_FW_CODE_JOB, /* Driver -> Firmware. */
|
|
+ MVX_FW_CODE_PING, /* Driver -> Firmware. */
|
|
+ MVX_FW_CODE_PONG, /* Driver <- Firmware. */
|
|
+ MVX_FW_CODE_SEQ_PARAM, /* Driver <- Firmware. */
|
|
+ MVX_FW_CODE_SET_OPTION, /* Driver <-> Firmware. */
|
|
+ MVX_FW_CODE_STATE_CHANGE, /* Driver <-> Firmware. */
|
|
+ MVX_FW_CODE_SWITCH_IN, /* Driver <- Firmware. */
|
|
+ MVX_FW_CODE_SWITCH_OUT, /* Driver <-> Firmware. */
|
|
+ MVX_FW_CODE_IDLE_ACK, /* Driver -> Firmware. */
|
|
+ MVX_FW_CODE_EOS, /* Driver <-> Firmware. */
|
|
+ MVX_FW_CODE_COLOR_DESC, /* Driver <- Firmware. */
|
|
+ MVX_FW_CODE_DUMP, /* Driver <-> Firmware. */
|
|
+ MVX_FW_CODE_DEBUG, /* Driver <-> Firmware. */
|
|
+ MVX_FW_CODE_BUFFER_GENERAL, /* Driver <-> Firmware. */
|
|
+ MVX_FW_CODE_UNKNOWN, /* Driver <- Firmware. */
|
|
+ MVX_FW_CODE_DPB_HELD_FRAMES, /* Driver <- Firmware. */
|
|
+ MVX_FW_CODE_MAX
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct mvx_fw_msg - Union of all message types.
|
|
+ */
|
|
+struct mvx_fw_msg {
|
|
+ enum mvx_fw_code code;
|
|
+ union {
|
|
+ enum mvx_fw_state state;
|
|
+ struct mvx_fw_job job;
|
|
+ struct mvx_fw_error error;
|
|
+ struct mvx_fw_set_option set_option;
|
|
+ struct mvx_fw_flush flush;
|
|
+ struct mvx_fw_alloc_param alloc_param;
|
|
+ struct mvx_fw_seq_param seq_param;
|
|
+ struct mvx_fw_color_desc color_desc;
|
|
+ struct mvx_buffer *buf;
|
|
+ uint32_t arg;
|
|
+ bool eos_is_frame;
|
|
+ };
|
|
+};
|
|
+
|
|
+/****************************************************************************
|
|
+ * Types
|
|
+ ****************************************************************************/
|
|
+
|
|
+struct device;
|
|
+struct mvx_fw_bin;
|
|
+struct mvx_mmu;
|
|
+struct mvx_mmu_pages;
|
|
+struct mvx_session;
|
|
+
|
|
+/**
|
|
+ * enum mvx_fw_region - Firmware memory regions.
|
|
+ */
|
|
+enum mvx_fw_region {
|
|
+ MVX_FW_REGION_CORE_0,
|
|
+ MVX_FW_REGION_CORE_1,
|
|
+ MVX_FW_REGION_CORE_2,
|
|
+ MVX_FW_REGION_CORE_3,
|
|
+ MVX_FW_REGION_CORE_4,
|
|
+ MVX_FW_REGION_CORE_5,
|
|
+ MVX_FW_REGION_CORE_6,
|
|
+ MVX_FW_REGION_CORE_7,
|
|
+ MVX_FW_REGION_PROTECTED,
|
|
+ MVX_FW_REGION_FRAMEBUF,
|
|
+ MVX_FW_REGION_MSG_HOST,
|
|
+ MVX_FW_REGION_MSG_MVE,
|
|
+ MVX_FW_REGION_BUF_IN_HOST,
|
|
+ MVX_FW_REGION_BUF_IN_MVE,
|
|
+ MVX_FW_REGION_BUF_OUT_HOST,
|
|
+ MVX_FW_REGION_BUF_OUT_MVE,
|
|
+ MVX_FW_REGION_RPC,
|
|
+ MVX_FW_REGION_PRINT_RAM
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct mvx_fw - Firmware class.
|
|
+ * @dev: Pointer to device.
|
|
+ * @fw_bin: Pointer to firmware binary.
|
|
+ * @mmu: Pointer to MMU object.
|
|
+ * @session: Pointer to session.
|
|
+ * @client_ops: Client operations.
|
|
+ * @csession: Client session this firmware instance is connected to.
|
|
+ * @text: Pages allocated for the text segment.
|
|
+ * @bss: Pages allocated for the bss segment.
|
|
+ * @bss_shared: Pages allocated for the shared bss segment.
|
|
+ * @dentry: Debugfs entry for the "fw" directory.
|
|
+ * @msg_host: Host message queue.
|
|
+ * @msg_mve: MVE message queue.
|
|
+ * @buf_in_host: Input buffer queue. Host enqueue filled buffers.
|
|
+ * @buf_in_mve: Input buffer queue. MVE return empty buffers.
|
|
+ * @buf_out_host: Output buffer queue. Host enqueue empty buffers.
|
|
+ * @buf_out_mve: Out buffer queue. MVE return filled buffers.
|
|
+ * @rpc: RPC communication area.
|
|
+ * @ncores: Number of cores the firmware has been mapped for.
|
|
+ * @rpc_mem: Keeps track of RPC allocated memory. Maps MVE virtual
|
|
+ * address to 'struct mvx_mmu_pages' object.
|
|
+ * @msg_pending: A subset of the messages that we are waiting for a
|
|
+ * response to.
|
|
+ * @ops: Public firmware interface.
|
|
+ * @ops_priv: Private firmware interface.
|
|
+ *
|
|
+ * There is one firmware instance per active session. The function pointers
|
|
+ * below are not reentrant and should be protected by the session mutex.
|
|
+ */
|
|
+struct mvx_fw {
|
|
+ struct device *dev;
|
|
+ const struct mvx_fw_bin *fw_bin;
|
|
+ struct mvx_mmu *mmu;
|
|
+ struct mvx_session *session;
|
|
+ struct mvx_client_ops *client_ops;
|
|
+ struct mvx_client_session *csession;
|
|
+ struct mvx_mmu_pages *text;
|
|
+ struct mvx_mmu_pages *bss;
|
|
+ struct mvx_mmu_pages *bss_shared;
|
|
+ struct dentry *dentry;
|
|
+ void *msg_host;
|
|
+ void *msg_mve;
|
|
+ void *buf_in_host;
|
|
+ void *buf_in_mve;
|
|
+ void *buf_out_host;
|
|
+ void *buf_out_mve;
|
|
+ void *rpc;
|
|
+ void *fw_print_ram;
|
|
+ unsigned int ncores;
|
|
+ DECLARE_HASHTABLE(rpc_mem, MVX_FW_HTABLE_BITS);
|
|
+ struct mutex rpcmem_mutex;
|
|
+ unsigned int msg_pending;
|
|
+ uint32_t latest_used_region_protected_pages;
|
|
+ uint32_t latest_used_region_outbuf_pages;
|
|
+ phys_addr_t buf_pa_addr[MVX_FW_REGION_PRINT_RAM+1];
|
|
+ enum mvx_fw_buffer_attr buf_attr[MVX_FW_REGION_PRINT_RAM+1];
|
|
+
|
|
+ int (*map_op[MVX_FW_BUF_ATTR_NUM])(struct mvx_fw *fw, void **data, enum mvx_fw_region region);
|
|
+ void (*unmap_op[MVX_FW_BUF_ATTR_NUM])(struct mvx_fw *fw, void **data, enum mvx_fw_region region);
|
|
+
|
|
+ struct {
|
|
+ /**
|
|
+ * map_protocol() - MMU map firmware.
|
|
+ * @fw: Pointer to firmware object.
|
|
+ */
|
|
+ int (*map_protocol)(struct mvx_fw *fw);
|
|
+
|
|
+ /**
|
|
+ * unmap_protocol() - MMU unmap firmware.
|
|
+ * @fw: Pointer to firmware object.
|
|
+ */
|
|
+ void (*unmap_protocol)(struct mvx_fw *fw);
|
|
+
|
|
+ /**
|
|
+ * get_region() - Get begin and end address for memory region.
|
|
+ * @region: Which memory region to get addresses for.
|
|
+ * @begin: MVE virtual begin address.
|
|
+ * @end: MVE virtual end address.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+ int (*get_region)(enum mvx_fw_region region,
|
|
+ uint32_t *begin,
|
|
+ uint32_t *end);
|
|
+
|
|
+ /**
|
|
+ * get_message() - Read message from firmware message queue.
|
|
+ * @fw: Pointer to firmware object.
|
|
+ * @msg: Firmware message.
|
|
+ *
|
|
+ * Return: 1 if message was received, 0 if no message was
|
|
+ * received, else error code.
|
|
+ */
|
|
+ int (*get_message)(struct mvx_fw *fw,
|
|
+ struct mvx_fw_msg *msg);
|
|
+
|
|
+ /**
|
|
+ * put_message() - Write message to firmware message queue.
|
|
+ * @fw: Pointer to firmware object.
|
|
+ * @msg: Firmware message.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+ int (*put_message)(struct mvx_fw *fw,
|
|
+ struct mvx_fw_msg *msg);
|
|
+
|
|
+ /**
|
|
+ * handle_rpc() - Handle RPC message.
|
|
+ * @fw: Pointer to firmware object.
|
|
+ *
|
|
+ * Return: 1 RPC message handled, 0 no RPC message,
|
|
+ * else error code.
|
|
+ */
|
|
+ int (*handle_rpc)(struct mvx_fw *fw);
|
|
+
|
|
+ /**
|
|
+ * handle_fw_ram_print() - Print firmware log from share ram.
|
|
+ * @fw: Pointer to firmware object.
|
|
+ *
|
|
+ * Return: 1 FW ram log printed, 0 no FW ram log printed,
|
|
+ * else error code.
|
|
+ */
|
|
+ int (*handle_fw_ram_print)(struct mvx_fw *fw);
|
|
+
|
|
+ /**
|
|
+ * print_stat() - Print debug stats to seq-file.
|
|
+ * @fw: Pointer to firmware object.
|
|
+ * @ind: Indentation level.
|
|
+ * s: Pointer to seq-file.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+ int (*print_stat)(struct mvx_fw *fw,
|
|
+ int ind,
|
|
+ struct seq_file *s);
|
|
+
|
|
+ /**
|
|
+ * print_debug() - Print debug information.
|
|
+ * @fw: Pointer to firmware object.
|
|
+ */
|
|
+ void (*print_debug)(struct mvx_fw *fw);
|
|
+ } ops;
|
|
+
|
|
+ struct {
|
|
+ /**
|
|
+ * send_idle_ack() - Send IDLE ACK message.
|
|
+ * @fw: Pointer to firmware object.
|
|
+ *
|
|
+ * IDLE ACK message will be sent to the firmware if it is
|
|
+ * supported by a host protocol, otherwise the call will be
|
|
+ * ignored.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+ int (*send_idle_ack)(struct mvx_fw *fw);
|
|
+
|
|
+ /**
|
|
+ * to_mve_profile() - Convert MVX profile to MVE value.
|
|
+ * @mvx_profile: MVX profile.
|
|
+ * @mvx_profile: MVE profile.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+ int (*to_mve_profile)(unsigned int mvx_profile,
|
|
+ uint16_t *mve_profile);
|
|
+
|
|
+ /**
|
|
+ * to_mve_level() - Convert MVX level to MVE value.
|
|
+ * @mvx_level: MVX level.
|
|
+ * @mvx_level: MVE level.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+ int (*to_mve_level)(unsigned int mvx_level,
|
|
+ uint16_t *mve_level);
|
|
+ } ops_priv;
|
|
+};
|
|
+
|
|
+/****************************************************************************
|
|
+ * Exported functions
|
|
+ ****************************************************************************/
|
|
+
|
|
+/**
|
|
+ * mvx_fw_factory() - Construct a firmware object.
|
|
+ * @fw: Pointer to fw.
|
|
+ * @fw:_bin Pointer for firmware binary.
|
|
+ * @mmu: Pointer to MMU instance.
|
|
+ * @session: Pointer to session.
|
|
+ * @client_ops: Pointer to client operations.
|
|
+ * @csession: Client session this firmware instance is registered to.
|
|
+ * @ncores: Number of cores to configure.
|
|
+ * @parent: Debugfs entry for parent debugfs directory entry.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+int mvx_fw_factory(struct mvx_fw *fw,
|
|
+ struct mvx_fw_bin *fw_bin,
|
|
+ struct mvx_mmu *mmu,
|
|
+ struct mvx_session *session,
|
|
+ struct mvx_client_ops *client_ops,
|
|
+ struct mvx_client_session *csession,
|
|
+ unsigned int ncores,
|
|
+ struct dentry *parent);
|
|
+
|
|
+/**
|
|
+ * mvx_fw_destruct() - Destroy firmware interface instance.
|
|
+ * @fw: Pointer to fw.
|
|
+ */
|
|
+void mvx_fw_destruct(struct mvx_fw *fw);
|
|
+
|
|
+#endif /* _MVX_FIRMWARE_H_ */
|
|
diff --git a/drivers/media/platform/spacemit/vpu_k1x/if/mvx_firmware_cache.c b/drivers/media/platform/spacemit/vpu_k1x/if/mvx_firmware_cache.c
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/vpu_k1x/if/mvx_firmware_cache.c
|
|
@@ -0,0 +1,800 @@
|
|
+/*
|
|
+ * The confidential and proprietary information contained in this file may
|
|
+ * only be used by a person authorised under and to the extent permitted
|
|
+ * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd.
|
|
+ * ALL RIGHTS RESERVED
|
|
+ *
|
|
+ * This entire notice must be reproduced on all copies of this file
|
|
+ * and copies of this file may only be made by a person if such person is
|
|
+ * permitted to do so under the terms of a subsisting license agreement
|
|
+ * from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * SPDX-License-Identifier: GPL-2.0-only
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU General Public License
|
|
+ * as published by the Free Software Foundation; either version 2
|
|
+ * of the License, or (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
+ *
|
|
+ */
|
|
+
|
|
+/****************************************************************************
|
|
+ * Includes
|
|
+ ****************************************************************************/
|
|
+
|
|
+#include <linux/debugfs.h>
|
|
+#include <linux/delay.h>
|
|
+#include <linux/device.h>
|
|
+#include <linux/dma-buf.h>
|
|
+#include <linux/errno.h>
|
|
+#include <linux/gfp.h>
|
|
+#include <linux/mm.h>
|
|
+#include <linux/firmware.h>
|
|
+#include <linux/kthread.h>
|
|
+#include <linux/version.h>
|
|
+#include "mvx_log_group.h"
|
|
+#include "mvx_firmware_cache.h"
|
|
+#include "mvx_log_ram.h"
|
|
+#include "mvx_mmu.h"
|
|
+#include "mvx_secure.h"
|
|
+#include "mvx_seq.h"
|
|
+
|
|
+/****************************************************************************
|
|
+ * Defines
|
|
+ ****************************************************************************/
|
|
+
|
|
+#define CACHE_CLEANUP_INTERVAL_MS 5000
|
|
+
|
|
+#define MVX_SECURE_NUMCORES 8
|
|
+
|
|
+/****************************************************************************
|
|
+ * Private functions
|
|
+ ****************************************************************************/
|
|
+
|
|
+/*
|
|
+ * Backwards compliance with older kernels.
|
|
+ */
|
|
+#if (KERNEL_VERSION(4, 11, 0) > LINUX_VERSION_CODE)
|
|
+static unsigned int kref_read(const struct kref *kref)
|
|
+{
|
|
+ return atomic_read(&kref->refcount);
|
|
+}
|
|
+
|
|
+#endif
|
|
+
|
|
+/**
|
|
+ * test_bit_32() - 32 bit version Linux test_bit.
|
|
+ *
|
|
+ * Test if bit is set in bitmap array.
|
|
+ */
|
|
+static bool test_bit_32(int bit,
|
|
+ uint32_t *addr)
|
|
+{
|
|
+ return 0 != (addr[bit >> 5] & (1 << (bit & 0x1f)));
|
|
+}
|
|
+
|
|
+/**
|
|
+ * hw_id_to_name() - Convert HW id to string
|
|
+ */
|
|
+static const char *hw_id_to_string(enum mvx_hw_id id)
|
|
+{
|
|
+ switch (id) {
|
|
+ case MVE_v500:
|
|
+ return "v500";
|
|
+ case MVE_v550:
|
|
+ return "v550";
|
|
+ case MVE_v61:
|
|
+ return "v61";
|
|
+ case MVE_v52_v76:
|
|
+ return "v52_v76";
|
|
+ default:
|
|
+ return "unknown";
|
|
+ }
|
|
+}
|
|
+
|
|
+/**
|
|
+ * get_fw_name() - Return the file name for the requested format and direction.
|
|
+ *
|
|
+ * This function will neither check if there is hardware support nor if the
|
|
+ * firmware binary is available on the file system.
|
|
+ */
|
|
+static int get_fw_name(char *filename,
|
|
+ size_t size,
|
|
+ enum mvx_format format,
|
|
+ enum mvx_direction dir,
|
|
+ struct mvx_hw_ver *hw_ver)
|
|
+{
|
|
+ const char *codec = NULL;
|
|
+ const char *enc_dec = (dir == MVX_DIR_INPUT) ? "dec" : "enc";
|
|
+ size_t n;
|
|
+
|
|
+ switch (format) {
|
|
+ case MVX_FORMAT_H263:
|
|
+ codec = "mpeg4";
|
|
+ break;
|
|
+ case MVX_FORMAT_H264:
|
|
+ codec = "h264";
|
|
+ break;
|
|
+ case MVX_FORMAT_HEVC:
|
|
+ codec = "hevc";
|
|
+ break;
|
|
+ case MVX_FORMAT_JPEG:
|
|
+ codec = "jpeg";
|
|
+ break;
|
|
+ case MVX_FORMAT_MPEG2:
|
|
+ codec = "mpeg2";
|
|
+ break;
|
|
+ case MVX_FORMAT_MPEG4:
|
|
+ codec = "mpeg4";
|
|
+ break;
|
|
+ case MVX_FORMAT_RV:
|
|
+ codec = "rv";
|
|
+ break;
|
|
+ case MVX_FORMAT_VC1:
|
|
+ codec = "vc1";
|
|
+ break;
|
|
+ case MVX_FORMAT_VP8:
|
|
+ codec = "vp8";
|
|
+ break;
|
|
+ case MVX_FORMAT_VP9:
|
|
+ codec = "vp9";
|
|
+ break;
|
|
+ case MVX_FORMAT_AVS2:
|
|
+ codec = "avs2";
|
|
+ break;
|
|
+ case MVX_FORMAT_AVS:
|
|
+ codec = "avs";
|
|
+ break;
|
|
+ default:
|
|
+ return -ENOENT;
|
|
+ }
|
|
+
|
|
+ n = snprintf(filename, size, "linlon-%s-%u-%u/%s%s.fwb",
|
|
+ hw_id_to_string(hw_ver->id),
|
|
+ hw_ver->revision,
|
|
+ hw_ver->patch,
|
|
+ codec, enc_dec);
|
|
+ if (n >= size)
|
|
+ return -ENOENT;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static struct mvx_fw_bin *kobj_to_fw_bin(struct kobject *kobj)
|
|
+{
|
|
+ return container_of(kobj, struct mvx_fw_bin, kobj);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * fw_bin_destroy() - Destroy instance of firmware binary.
|
|
+ */
|
|
+static void fw_bin_destroy(struct kobject *kobj)
|
|
+{
|
|
+ struct mvx_fw_bin *fw_bin = kobj_to_fw_bin(kobj);
|
|
+
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_INFO,
|
|
+ "Releasing firmware binary. bin=0x%p.", fw_bin);
|
|
+
|
|
+ if (fw_bin->securevideo == false &&
|
|
+ IS_ERR_OR_NULL(fw_bin->nonsecure.fw) == false)
|
|
+ release_firmware(fw_bin->nonsecure.fw);
|
|
+
|
|
+ if (fw_bin->securevideo != false &&
|
|
+ IS_ERR_OR_NULL(fw_bin->secure.securefw) == false)
|
|
+ mvx_secure_release_firmware(fw_bin->secure.securefw);
|
|
+
|
|
+ list_del(&fw_bin->cache_head);
|
|
+ devm_kfree(fw_bin->dev, fw_bin);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * fw_bin_validate() - Verify that the loaded firmware is a valid binary.
|
|
+ */
|
|
+static int fw_bin_validate(const struct firmware *fw,
|
|
+ struct device *dev)
|
|
+{
|
|
+ struct mvx_fw_header *header = (struct mvx_fw_header *)fw->data;
|
|
+
|
|
+ if (fw->size < sizeof(*header)) {
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING,
|
|
+ "Firmware binary size smaller than firmware header. size=%zu.",
|
|
+ fw->size);
|
|
+ return -EFAULT;
|
|
+ }
|
|
+
|
|
+ if (header->text_length > fw->size) {
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING,
|
|
+ "Firmware text length larger than firmware binary size. text_length=%u, size=%zu.",
|
|
+ header->text_length,
|
|
+ fw->size);
|
|
+ return -EFAULT;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * fw_bin_callback() - Call firmware ready callback.
|
|
+ */
|
|
+static void fw_bin_callback(struct mvx_fw_bin *fw_bin)
|
|
+{
|
|
+ struct mvx_fw_event *event;
|
|
+ struct mvx_fw_event *tmp;
|
|
+ int ret;
|
|
+
|
|
+ /*
|
|
+ * Continue even if lock fails, or else any waiting session will
|
|
+ * be blocked forever.
|
|
+ */
|
|
+ ret = mutex_lock_interruptible(&fw_bin->mutex);
|
|
+
|
|
+ /*
|
|
+ * Inform all clients that the firmware has been loaded. This must be
|
|
+ * done even if the firmware load fails, or else the clients will hung
|
|
+ * waiting for a firmware load the will never happen.
|
|
+ */
|
|
+ list_for_each_entry_safe(event, tmp, &fw_bin->event_list, head) {
|
|
+ list_del(&event->head);
|
|
+ event->fw_bin_ready(fw_bin, event->arg, false);
|
|
+ }
|
|
+
|
|
+ if (ret == 0)
|
|
+ mutex_unlock(&fw_bin->mutex);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * secure_request_firmware_done() - Firmware load callback routine.
|
|
+ */
|
|
+static void secure_request_firmware_done(struct mvx_secure_firmware *securefw,
|
|
+ void *arg)
|
|
+{
|
|
+ struct mvx_fw_bin *fw_bin = arg;
|
|
+
|
|
+ if (securefw == NULL) {
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING,
|
|
+ "Failed to load secure firmware binary. filename=%s.",
|
|
+ fw_bin->filename);
|
|
+ securefw = ERR_PTR(-EINVAL);
|
|
+ goto fw_bin_callback;
|
|
+ }
|
|
+
|
|
+fw_bin_callback:
|
|
+ fw_bin->secure.securefw = securefw;
|
|
+
|
|
+ fw_bin_callback(fw_bin);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * request_firmware_done() - Callback routine after firmware has been loaded.
|
|
+ */
|
|
+static void request_firmware_done(const struct firmware *fw,
|
|
+ void *arg)
|
|
+{
|
|
+ struct mvx_fw_bin *fw_bin = arg;
|
|
+ struct mvx_fw_header *header;
|
|
+ mvx_mmu_va va;
|
|
+ int ret;
|
|
+ uint32_t i;
|
|
+
|
|
+ BUG_ON(!arg);
|
|
+
|
|
+ if (fw == NULL) {
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING,
|
|
+ "Failed to load firmware binary. filename=%s.",
|
|
+ fw_bin->filename);
|
|
+ fw = ERR_PTR(-EINVAL);
|
|
+ goto fw_ready_callback;
|
|
+ }
|
|
+
|
|
+ ret = fw_bin_validate(fw, fw_bin->dev);
|
|
+ if (ret != 0) {
|
|
+ release_firmware(fw);
|
|
+ fw = ERR_PTR(ret);
|
|
+ goto fw_ready_callback;
|
|
+ }
|
|
+
|
|
+ header = (struct mvx_fw_header *)fw->data;
|
|
+ fw_bin->nonsecure.header = header;
|
|
+
|
|
+ /* Calculate number of pages needed for the text segment. */
|
|
+ fw_bin->nonsecure.text_cnt =
|
|
+ (header->text_length + MVE_PAGE_SIZE - 1) >> MVE_PAGE_SHIFT;
|
|
+
|
|
+ /* Calculate number of pages needed for the BSS segments. */
|
|
+ va = header->bss_start_address;
|
|
+ for (i = 0; i < header->bss_bitmap_size; i++) {
|
|
+ if (va >= header->master_rw_start_address &&
|
|
+ va < (header->master_rw_start_address +
|
|
+ header->master_rw_size))
|
|
+ fw_bin->nonsecure.sbss_cnt++;
|
|
+ else if (test_bit_32(i, header->bss_bitmap))
|
|
+ fw_bin->nonsecure.bss_cnt++;
|
|
+
|
|
+ va += MVE_PAGE_SIZE;
|
|
+ }
|
|
+
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_INFO,
|
|
+ "Loaded firmware binary. bin=0x%p, major=%u, minor=%u, info=\"%s\", jump=0x%x, pages={text=%u, bss=%u, shared=%u}, text_length=%u, bss=0x%x.",
|
|
+ fw_bin,
|
|
+ header->protocol_major,
|
|
+ header->protocol_minor,
|
|
+ header->info_string,
|
|
+ header->rasc_jmp,
|
|
+ fw_bin->nonsecure.text_cnt,
|
|
+ fw_bin->nonsecure.bss_cnt,
|
|
+ fw_bin->nonsecure.sbss_cnt,
|
|
+ header->text_length,
|
|
+ header->bss_start_address);
|
|
+
|
|
+fw_ready_callback:
|
|
+ fw_bin->nonsecure.fw = fw;
|
|
+
|
|
+ fw_bin_callback(fw_bin);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * hwvercmp() - Compare two hardware versions.
|
|
+ *
|
|
+ * Semantic of this function equivalent to strcmp().
|
|
+ */
|
|
+static int hwvercmp(struct mvx_hw_ver *v1,
|
|
+ struct mvx_hw_ver *v2)
|
|
+{
|
|
+ if (v1->id != v2->id)
|
|
+ return v1->id - v2->id;
|
|
+
|
|
+ if (v1->revision != v2->revision)
|
|
+ return v1->revision - v2->revision;
|
|
+
|
|
+ if (v1->patch != v2->patch)
|
|
+ return v1->patch - v2->patch;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static ssize_t path_show(struct kobject *kobj,
|
|
+ struct kobj_attribute *attr,
|
|
+ char *buf)
|
|
+{
|
|
+ struct mvx_fw_bin *fw_bin = kobj_to_fw_bin(kobj);
|
|
+
|
|
+ return scnprintf(buf, PAGE_SIZE, "%s\n", fw_bin->filename);
|
|
+}
|
|
+
|
|
+static ssize_t hw_ver_show(struct kobject *kobj,
|
|
+ struct kobj_attribute *attr,
|
|
+ char *buf)
|
|
+{
|
|
+ struct mvx_fw_bin *fw_bin = kobj_to_fw_bin(kobj);
|
|
+ struct mvx_hw_ver *hw_ver = &fw_bin->hw_ver;
|
|
+
|
|
+ return scnprintf(buf, PAGE_SIZE, "%s-%u-%u\n",
|
|
+ hw_id_to_string(hw_ver->id),
|
|
+ hw_ver->revision,
|
|
+ hw_ver->patch);
|
|
+}
|
|
+
|
|
+static ssize_t count_show(struct kobject *kobj,
|
|
+ struct kobj_attribute *attr,
|
|
+ char *buf)
|
|
+{
|
|
+ return scnprintf(buf, PAGE_SIZE, "%d\n",
|
|
+ kref_read(&kobj->kref) - 1);
|
|
+}
|
|
+
|
|
+static ssize_t dirty_show(struct kobject *kobj,
|
|
+ struct kobj_attribute *attr,
|
|
+ char *buf)
|
|
+{
|
|
+ struct mvx_fw_bin *fw_bin = kobj_to_fw_bin(kobj);
|
|
+ int dirty = 0;
|
|
+
|
|
+ if (atomic_read(&fw_bin->flush_cnt) !=
|
|
+ atomic_read(&fw_bin->cache->flush_cnt))
|
|
+ dirty = 1;
|
|
+
|
|
+ return scnprintf(buf, PAGE_SIZE, "%d\n", dirty);
|
|
+}
|
|
+
|
|
+static struct kobj_attribute path_attr = __ATTR_RO(path);
|
|
+static struct kobj_attribute count_attr = __ATTR_RO(count);
|
|
+static struct kobj_attribute hw_ver_attr = __ATTR_RO(hw_ver);
|
|
+static struct kobj_attribute dirty_attr = __ATTR_RO(dirty);
|
|
+
|
|
+static struct attribute *fw_bin_attrs[] = {
|
|
+ &path_attr.attr,
|
|
+ &count_attr.attr,
|
|
+ &hw_ver_attr.attr,
|
|
+ &dirty_attr.attr,
|
|
+ NULL
|
|
+};
|
|
+
|
|
+static const struct attribute_group fw_bin_group = {
|
|
+ .name = "",
|
|
+ .attrs = fw_bin_attrs
|
|
+};
|
|
+
|
|
+static const struct attribute_group *fw_bin_groups[] = {
|
|
+ &fw_bin_group,
|
|
+ NULL
|
|
+};
|
|
+
|
|
+static struct kobj_type fw_bin_ktype = {
|
|
+ .release = fw_bin_destroy,
|
|
+ .sysfs_ops = &kobj_sysfs_ops,
|
|
+ .default_groups = fw_bin_groups
|
|
+};
|
|
+
|
|
+/**
|
|
+ * fw_bin_create() - Create a new firmware binary instance.
|
|
+ */
|
|
+static struct mvx_fw_bin *fw_bin_create(struct mvx_fw_cache *cache,
|
|
+ enum mvx_format format,
|
|
+ enum mvx_direction dir,
|
|
+ struct mvx_hw_ver *hw_ver,
|
|
+ bool securevideo)
|
|
+{
|
|
+ struct mvx_fw_bin *fw_bin;
|
|
+ int ret;
|
|
+
|
|
+ /* Allocate object and initialize members. */
|
|
+ fw_bin = devm_kzalloc(cache->dev, sizeof(*fw_bin), GFP_KERNEL);
|
|
+ if (fw_bin == NULL)
|
|
+ return ERR_PTR(-ENOMEM);
|
|
+
|
|
+ fw_bin->dev = cache->dev;
|
|
+ fw_bin->cache = cache;
|
|
+ fw_bin->format = format;
|
|
+ fw_bin->dir = dir;
|
|
+ fw_bin->hw_ver = *hw_ver;
|
|
+ atomic_set(&fw_bin->flush_cnt, atomic_read(&cache->flush_cnt));
|
|
+ mutex_init(&fw_bin->mutex);
|
|
+ INIT_LIST_HEAD(&fw_bin->cache_head);
|
|
+ INIT_LIST_HEAD(&fw_bin->event_list);
|
|
+
|
|
+ fw_bin->securevideo = securevideo;
|
|
+ if (securevideo != false)
|
|
+ fw_bin->secure.secure = cache->secure;
|
|
+
|
|
+ ret = kobject_init_and_add(&fw_bin->kobj, &fw_bin_ktype, &cache->kobj,
|
|
+ "%lx", (unsigned long)fw_bin);
|
|
+ if (ret != 0)
|
|
+ goto free_fw_bin;
|
|
+
|
|
+ ret = get_fw_name(fw_bin->filename, sizeof(fw_bin->filename), format,
|
|
+ dir, &fw_bin->hw_ver);
|
|
+ if (ret != 0) {
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_ERROR,
|
|
+ "No firmware available. format=%d, direction=%d.",
|
|
+ format, dir);
|
|
+ goto free_fw_bin;
|
|
+ }
|
|
+
|
|
+ kobject_get(&fw_bin->kobj);
|
|
+
|
|
+ if (securevideo != false)
|
|
+ ret = mvx_secure_request_firmware_nowait(
|
|
+ cache->secure, fw_bin->filename, MVX_SECURE_NUMCORES,
|
|
+ fw_bin,
|
|
+ secure_request_firmware_done);
|
|
+ else
|
|
+ ret = request_firmware_nowait(THIS_MODULE, true,
|
|
+ fw_bin->filename,
|
|
+ fw_bin->dev, GFP_KERNEL, fw_bin,
|
|
+ request_firmware_done);
|
|
+
|
|
+ if (ret != 0) {
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_ERROR,
|
|
+ "Failed to request firmware. filename=%s, securevideo=%d.",
|
|
+ fw_bin->filename, securevideo);
|
|
+ kobject_put(&fw_bin->kobj);
|
|
+ goto free_fw_bin;
|
|
+ }
|
|
+
|
|
+ return fw_bin;
|
|
+
|
|
+free_fw_bin:
|
|
+ kobject_put(&fw_bin->kobj);
|
|
+
|
|
+ return ERR_PTR(ret);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * fw_bin_get() - Get reference to firmware binary.
|
|
+ *
|
|
+ * If firmware binary has already been loaded the reference count is increased,
|
|
+ * else the function tries to create a new descriptor and load the firmware
|
|
+ * into memory.
|
|
+ */
|
|
+static struct mvx_fw_bin *fw_bin_get(struct mvx_fw_cache *cache,
|
|
+ enum mvx_format format,
|
|
+ enum mvx_direction dir,
|
|
+ struct mvx_hw_ver *hw_ver,
|
|
+ bool securevideo)
|
|
+{
|
|
+ struct mvx_fw_bin *fw_bin = NULL;
|
|
+ struct mvx_fw_bin *tmp;
|
|
+ int ret;
|
|
+
|
|
+ ret = mutex_lock_interruptible(&cache->mutex);
|
|
+ if (ret != 0)
|
|
+ return ERR_PTR(ret);
|
|
+
|
|
+ /* Search if firmware binary has already been loaded. */
|
|
+ list_for_each_entry(tmp, &cache->fw_bin_list, cache_head) {
|
|
+ if (tmp->format == format && tmp->dir == dir &&
|
|
+ hwvercmp(&tmp->hw_ver, hw_ver) == 0 &&
|
|
+ tmp->securevideo == securevideo &&
|
|
+ atomic_read(&tmp->flush_cnt) ==
|
|
+ atomic_read(&cache->flush_cnt)) {
|
|
+ fw_bin = tmp;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /* If firmware was not found, then try to request firmware. */
|
|
+ if (fw_bin == NULL) {
|
|
+ fw_bin = fw_bin_create(cache, format, dir, hw_ver, securevideo);
|
|
+ if (!IS_ERR(fw_bin))
|
|
+ list_add(&fw_bin->cache_head, &cache->fw_bin_list);
|
|
+ } else {
|
|
+ kobject_get(&fw_bin->kobj);
|
|
+ }
|
|
+
|
|
+ mutex_unlock(&cache->mutex);
|
|
+
|
|
+ return fw_bin;
|
|
+}
|
|
+
|
|
+/****************************************************************************
|
|
+ * Private functions
|
|
+ ****************************************************************************/
|
|
+
|
|
+static struct mvx_fw_cache *kobj_to_fw_cache(struct kobject *kobj)
|
|
+{
|
|
+ return container_of(kobj, struct mvx_fw_cache, kobj);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * cache_flush_show() - FW cache flush status is always 0.
|
|
+ */
|
|
+static ssize_t cache_flush_show(struct kobject *kobj,
|
|
+ struct kobj_attribute *attr,
|
|
+ char *buf)
|
|
+{
|
|
+ return scnprintf(buf, PAGE_SIZE, "0\n");
|
|
+}
|
|
+
|
|
+/**
|
|
+ * cache_flush_store() - Trigger FW cache flush.
|
|
+ */
|
|
+static ssize_t cache_flush_store(struct kobject *kobj,
|
|
+ struct kobj_attribute *attr,
|
|
+ const char *buf,
|
|
+ size_t size)
|
|
+{
|
|
+ struct mvx_fw_cache *cache = kobj_to_fw_cache(kobj);
|
|
+
|
|
+ atomic_inc(&cache->flush_cnt);
|
|
+ return size;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * Sysfs attribute which triggers FW cache flush.
|
|
+ */
|
|
+static struct kobj_attribute cache_flush =
|
|
+ __ATTR(flush, 0600, cache_flush_show, cache_flush_store);
|
|
+
|
|
+
|
|
+static struct attribute *cache_attrs[] = {
|
|
+ &cache_flush.attr,
|
|
+ NULL
|
|
+};
|
|
+
|
|
+static const struct attribute_group cache_group = {
|
|
+ .name = "",
|
|
+ .attrs = cache_attrs
|
|
+};
|
|
+
|
|
+static const struct attribute_group *cache_groups[] = {
|
|
+ &cache_group,
|
|
+ NULL
|
|
+};
|
|
+
|
|
+static void cache_release(struct kobject *kobj)
|
|
+{
|
|
+ struct mvx_fw_cache *cache = kobj_to_fw_cache(kobj);
|
|
+
|
|
+ kthread_stop(cache->cache_thread);
|
|
+ kobject_put(cache->kobj_parent);
|
|
+}
|
|
+
|
|
+static struct kobj_type cache_ktype = {
|
|
+ .release = cache_release,
|
|
+ .sysfs_ops = &kobj_sysfs_ops,
|
|
+ .default_groups = cache_groups
|
|
+};
|
|
+
|
|
+static void cache_update(struct mvx_fw_cache *cache)
|
|
+{
|
|
+ struct mvx_fw_bin *fw_bin;
|
|
+ struct mvx_fw_bin *tmp;
|
|
+ int ret;
|
|
+
|
|
+ ret = mutex_lock_interruptible(&cache->mutex);
|
|
+ if (ret != 0)
|
|
+ return;
|
|
+
|
|
+ list_for_each_entry_safe(fw_bin, tmp, &cache->fw_bin_list, cache_head) {
|
|
+ int ref;
|
|
+
|
|
+ ref = kref_read(&fw_bin->kobj.kref);
|
|
+ if (ref == 1)
|
|
+ kobject_put(&fw_bin->kobj);
|
|
+ }
|
|
+
|
|
+ mutex_unlock(&cache->mutex);
|
|
+}
|
|
+
|
|
+static int cache_thread(void *v)
|
|
+{
|
|
+ struct mvx_fw_cache *cache = (struct mvx_fw_cache *)v;
|
|
+
|
|
+ while (!kthread_should_stop()) {
|
|
+ cache_update(cache);
|
|
+ msleep_interruptible(CACHE_CLEANUP_INTERVAL_MS);
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/****************************************************************************
|
|
+ * Exported functions
|
|
+ ****************************************************************************/
|
|
+
|
|
+int mvx_fw_cache_construct(struct mvx_fw_cache *cache,
|
|
+ struct device *dev,
|
|
+ struct mvx_secure *secure,
|
|
+ struct kobject *kobj_parent)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ cache->dev = dev;
|
|
+ cache->secure = secure;
|
|
+ cache->kobj_parent = kobject_get(kobj_parent);
|
|
+ atomic_set(&cache->flush_cnt, 0);
|
|
+ mutex_init(&cache->mutex);
|
|
+ INIT_LIST_HEAD(&cache->fw_bin_list);
|
|
+
|
|
+ ret = kobject_init_and_add(&cache->kobj, &cache_ktype,
|
|
+ kobj_parent, "fw_cache");
|
|
+ if (ret != 0)
|
|
+ goto kobj_put;
|
|
+
|
|
+ cache->cache_thread = kthread_run(cache_thread, cache, "fw_cache");
|
|
+ if (IS_ERR(cache->cache_thread))
|
|
+ goto kobj_put;
|
|
+
|
|
+ return 0;
|
|
+
|
|
+kobj_put:
|
|
+ kobject_put(&cache->kobj);
|
|
+ kobject_put(cache->kobj_parent);
|
|
+ return -EFAULT;
|
|
+}
|
|
+
|
|
+void mvx_fw_cache_destruct(struct mvx_fw_cache *cache)
|
|
+{
|
|
+ kobject_put(&cache->kobj);
|
|
+}
|
|
+
|
|
+int mvx_fw_cache_get(struct mvx_fw_cache *cache,
|
|
+ enum mvx_format format,
|
|
+ enum mvx_direction dir,
|
|
+ struct mvx_fw_event *event,
|
|
+ struct mvx_hw_ver *hw_ver,
|
|
+ bool securevideo)
|
|
+{
|
|
+ int ret;
|
|
+ struct mvx_fw_bin *fw_bin;
|
|
+
|
|
+ /* Allocate a new firmware binary or get handle to existing object. */
|
|
+ fw_bin = fw_bin_get(cache, format, dir, hw_ver, securevideo);
|
|
+ if (IS_ERR(fw_bin))
|
|
+ return PTR_ERR(fw_bin);
|
|
+
|
|
+ ret = mutex_lock_interruptible(&fw_bin->mutex);
|
|
+ if (ret != 0) {
|
|
+ mvx_fw_cache_put(cache, fw_bin);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * If the firmware binary has already been loaded, then the callback
|
|
+ * routine can be called right away.
|
|
+ * Else the callback and argument is enqueued to the firmware
|
|
+ * notification list.
|
|
+ */
|
|
+ if ((fw_bin->securevideo != false &&
|
|
+ IS_ERR_OR_NULL(fw_bin->secure.securefw) == false)) {
|
|
+ mutex_unlock(&fw_bin->mutex);
|
|
+ event->fw_bin_ready(fw_bin, event->arg, true);
|
|
+ } else if (fw_bin->securevideo == false &&
|
|
+ IS_ERR_OR_NULL(fw_bin->nonsecure.fw) == false) {
|
|
+ mutex_unlock(&fw_bin->mutex);
|
|
+ event->fw_bin_ready(fw_bin, event->arg, true);
|
|
+ } else {
|
|
+ list_add(&event->head, &fw_bin->event_list);
|
|
+ mutex_unlock(&fw_bin->mutex);
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+void mvx_fw_cache_put(struct mvx_fw_cache *cache,
|
|
+ struct mvx_fw_bin *fw_bin)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ ret = mutex_lock_interruptible(&cache->mutex);
|
|
+
|
|
+ kobject_put(&fw_bin->kobj);
|
|
+
|
|
+ if (ret == 0)
|
|
+ mutex_unlock(&cache->mutex);
|
|
+}
|
|
+
|
|
+void mvx_fw_cache_log(struct mvx_fw_bin *fw_bin,
|
|
+ struct mvx_client_session *csession)
|
|
+{
|
|
+ struct mvx_log_header header;
|
|
+ struct mvx_log_fw_binary fw_binary;
|
|
+ struct timespec64 timespec;
|
|
+ struct iovec vec[3];
|
|
+
|
|
+ if (fw_bin->securevideo != false)
|
|
+ return;
|
|
+
|
|
+ ktime_get_real_ts64(×pec);
|
|
+
|
|
+ header.magic = MVX_LOG_MAGIC;
|
|
+ header.length = sizeof(fw_binary) + sizeof(*fw_bin->nonsecure.header);
|
|
+ header.type = MVX_LOG_TYPE_FW_BINARY;
|
|
+ header.severity = MVX_LOG_INFO;
|
|
+ header.timestamp.sec = timespec.tv_sec;
|
|
+ header.timestamp.nsec = timespec.tv_nsec;
|
|
+
|
|
+ fw_binary.session = (uintptr_t)csession;
|
|
+
|
|
+ vec[0].iov_base = &header;
|
|
+ vec[0].iov_len = sizeof(header);
|
|
+
|
|
+ vec[1].iov_base = &fw_binary;
|
|
+ vec[1].iov_len = sizeof(fw_binary);
|
|
+
|
|
+ vec[2].iov_base = (void *)fw_bin->nonsecure.header;
|
|
+ vec[2].iov_len = sizeof(*fw_bin->nonsecure.header);
|
|
+
|
|
+ MVX_LOG_DATA(&mvx_log_fwif_if, MVX_LOG_INFO, vec, 3);
|
|
+}
|
|
+
|
|
+void mvx_fw_cache_get_formats(struct mvx_fw_cache *cache,
|
|
+ enum mvx_direction direction,
|
|
+ uint64_t *formats)
|
|
+{
|
|
+ /* Support all formats by default. */
|
|
+ *formats = (1ull << MVX_FORMAT_MAX) - 1ull;
|
|
+
|
|
+ /* TODO remove formats we can't find any firmware for. */
|
|
+}
|
|
diff --git a/drivers/media/platform/spacemit/vpu_k1x/if/mvx_firmware_cache.h b/drivers/media/platform/spacemit/vpu_k1x/if/mvx_firmware_cache.h
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/vpu_k1x/if/mvx_firmware_cache.h
|
|
@@ -0,0 +1,246 @@
|
|
+/*
|
|
+ * The confidential and proprietary information contained in this file may
|
|
+ * only be used by a person authorised under and to the extent permitted
|
|
+ * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd.
|
|
+ * ALL RIGHTS RESERVED
|
|
+ *
|
|
+ * This entire notice must be reproduced on all copies of this file
|
|
+ * and copies of this file may only be made by a person if such person is
|
|
+ * permitted to do so under the terms of a subsisting license agreement
|
|
+ * from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * SPDX-License-Identifier: GPL-2.0-only
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU General Public License
|
|
+ * as published by the Free Software Foundation; either version 2
|
|
+ * of the License, or (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
+ *
|
|
+ */
|
|
+
|
|
+#ifndef _MVX_FIRMWARE_CACHE_H_
|
|
+#define _MVX_FIRMWARE_CACHE_H_
|
|
+
|
|
+/****************************************************************************
|
|
+ * Includes
|
|
+ ****************************************************************************/
|
|
+
|
|
+#include <linux/atomic.h>
|
|
+#include <linux/kobject.h>
|
|
+#include <linux/list.h>
|
|
+#include <linux/mutex.h>
|
|
+#include "mvx_if.h"
|
|
+
|
|
+/****************************************************************************
|
|
+ * Types
|
|
+ ****************************************************************************/
|
|
+
|
|
+struct device;
|
|
+struct firmware;
|
|
+struct mvx_client_session;
|
|
+struct mvx_secure;
|
|
+struct mvx_secure_firmware;
|
|
+
|
|
+/**
|
|
+ * struct mvx_fw_cache - Firmware cache.
|
|
+ *
|
|
+ * There is exactly one firmware context per device. It keeps track of the
|
|
+ * firmware binaries.
|
|
+ */
|
|
+struct mvx_fw_cache {
|
|
+ struct device *dev;
|
|
+ struct mvx_secure *secure;
|
|
+ struct mutex mutex;
|
|
+ struct list_head fw_bin_list;
|
|
+ struct kobject kobj;
|
|
+ struct kobject *kobj_parent;
|
|
+ atomic_t flush_cnt;
|
|
+ struct task_struct *cache_thread;
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct mvx_fw_header - Firmware binary header.
|
|
+ * @rasc_jmp: Start address.
|
|
+ * @protocol_minor: Host internface protocol minor version.
|
|
+ * @protocol_major: Host internface protocol major version.
|
|
+ * @reserved: Reserved for future use. Always 0.
|
|
+ * @info_string: Human readable codec information.
|
|
+ * @part_number: Part number.
|
|
+ * @svn_revision: SVN revision.
|
|
+ * @version_string: Firmware version.
|
|
+ * @text_length: Length in bytes of the read-only part of the firmware.
|
|
+ * @bss_start_address: Start address for BSS segment. This is always
|
|
+ * page-aligned.
|
|
+ * @bss_bitmap_size: The number of bits used in 'bss_bitmap'.
|
|
+ * @bss_bitmap: Bitmap which pages that shall be allocated and MMU
|
|
+ * mapped. If bit N is set, then a page shall be allocated
|
|
+ * and MMU mapped to VA address
|
|
+ * FW_BASE + bss_start_address + N * MVE_PAGE_SIZE.
|
|
+ * @master_rw_start_address: Defines a region of shared pages.
|
|
+ * @master_rw_size: Defines a region of shared pages.
|
|
+ */
|
|
+struct mvx_fw_header {
|
|
+ uint32_t rasc_jmp;
|
|
+ uint8_t protocol_minor;
|
|
+ uint8_t protocol_major;
|
|
+ uint8_t reserved[2];
|
|
+ uint8_t info_string[56];
|
|
+ uint8_t part_number[8];
|
|
+ uint8_t svn_revision[8];
|
|
+ uint8_t version_string[16];
|
|
+ uint32_t text_length;
|
|
+ uint32_t bss_start_address;
|
|
+ uint32_t bss_bitmap_size;
|
|
+ uint32_t bss_bitmap[16];
|
|
+ uint32_t master_rw_start_address;
|
|
+ uint32_t master_rw_size;
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct mvx_fw_bin - Structure describing a loaded firmware binary.
|
|
+ *
|
|
+ * Multiple sessions may share the same firmware binary.
|
|
+ */
|
|
+struct mvx_fw_bin {
|
|
+ struct device *dev;
|
|
+ struct mvx_fw_cache *cache;
|
|
+ struct mutex mutex;
|
|
+ struct kobject kobj;
|
|
+ struct list_head cache_head;
|
|
+ struct list_head event_list;
|
|
+ char filename[128];
|
|
+ enum mvx_format format;
|
|
+ enum mvx_direction dir;
|
|
+ struct mvx_hw_ver hw_ver;
|
|
+ atomic_t flush_cnt;
|
|
+ bool securevideo;
|
|
+ struct {
|
|
+ const struct firmware *fw;
|
|
+ const struct mvx_fw_header *header;
|
|
+ unsigned int text_cnt;
|
|
+ unsigned int bss_cnt;
|
|
+ unsigned int sbss_cnt;
|
|
+ } nonsecure;
|
|
+ struct {
|
|
+ struct mvx_secure *secure;
|
|
+ struct mvx_secure_firmware *securefw;
|
|
+ } secure;
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct mvx_fw_event - Firmware load event notification.
|
|
+ * @head: Used by the firmware loader. Should not be used
|
|
+ * by the client.
|
|
+ * @fw_bin_ready: Callback routine invoked after the firmware binary has
|
|
+ * finished loading. Will be called both on success and
|
|
+ * failure.
|
|
+ * @arg: Argument passed to fw_bin_ready. Client may set this
|
|
+ * pointer to any value.
|
|
+ *
|
|
+ * Structure used to keep track of clients that have subscribed to event
|
|
+ * notification after the firmware binary has been loaded.
|
|
+ */
|
|
+struct mvx_fw_event {
|
|
+ struct list_head head;
|
|
+ void (*fw_bin_ready)(struct mvx_fw_bin *fw_bin,
|
|
+ void *arg,
|
|
+ bool same_thread);
|
|
+ void *arg;
|
|
+};
|
|
+
|
|
+/****************************************************************************
|
|
+ * Exported functions
|
|
+ ****************************************************************************/
|
|
+
|
|
+/**
|
|
+ * mvx_fw_cache_construct() - Construct the firmware object.
|
|
+ * @cache: Pointer to firmware cache.
|
|
+ * @dev: Pointer to device.
|
|
+ * @secure: Pointer to secure video.
|
|
+ * @kobj: Pointer to parent kobj.
|
|
+ *
|
|
+ * When FW cache is constructed, corresponding sysfs entry will be created
|
|
+ * and attached as a child to kobj.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+int mvx_fw_cache_construct(struct mvx_fw_cache *cache,
|
|
+ struct device *dev,
|
|
+ struct mvx_secure *secure,
|
|
+ struct kobject *kobj);
|
|
+
|
|
+/**
|
|
+ * mvx_fw_cache_destruct() - Destroy the firmware object.
|
|
+ * @cache: Pointer to firmware cache.
|
|
+ */
|
|
+void mvx_fw_cache_destruct(struct mvx_fw_cache *cache);
|
|
+
|
|
+/**
|
|
+ * mvx_fw_cache_get() - Get a reference to a firmware binary.
|
|
+ * @cache: Pointer for firmware cache.
|
|
+ * @format: Format used on the bitstream port.
|
|
+ * @dir: Which port that is configured as bitstream port.
|
|
+ * @event: Callback routine and argument that will be invoded after
|
|
+ * the firmware binary has been loaded.
|
|
+ * @hw_ver: MVE hardware version.
|
|
+ * @securevideo:Secure video enabled.
|
|
+ *
|
|
+ * Loading a firmware binary is an asynchronous operation. The client will be
|
|
+ * informed through a callback routine when the binary is ready.
|
|
+ *
|
|
+ * If the firmware binary is already in the cache, then the callback routine
|
|
+ * will be called directly from mvx_fw_cache_get(). The client must take care
|
|
+ * not to reaquire any mutexes already held.
|
|
+ *
|
|
+ * If the firmware binary was not found in the cache, then the callback routine
|
|
+ * will be called from a separete thread context. The client must make sure
|
|
+ * its data is protected by a mutex.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+int mvx_fw_cache_get(struct mvx_fw_cache *cache,
|
|
+ enum mvx_format format,
|
|
+ enum mvx_direction dir,
|
|
+ struct mvx_fw_event *event,
|
|
+ struct mvx_hw_ver *hw_ver,
|
|
+ bool securevideo);
|
|
+
|
|
+/**
|
|
+ * mvx_fw_cache_put() - Return firmware binary to cache and decrement the
|
|
+ * reference count.
|
|
+ * @cache: Pointer to firmware cache.
|
|
+ * @fw:_bin Pointer to firmware binary.
|
|
+ */
|
|
+void mvx_fw_cache_put(struct mvx_fw_cache *cache,
|
|
+ struct mvx_fw_bin *fw_bin);
|
|
+
|
|
+/**
|
|
+ * mvx_fw_cache_log() - Log firmware binary to ram log.
|
|
+ * @fw_bin: Pointer to firmware binary.
|
|
+ * @csession: Pointer to client session.
|
|
+ */
|
|
+void mvx_fw_cache_log(struct mvx_fw_bin *fw_bin,
|
|
+ struct mvx_client_session *csession);
|
|
+
|
|
+/**
|
|
+ * mvx_fw_cache_get_formats() - Get supported formats.
|
|
+ * @cache: Pointer to firmware cache.
|
|
+ * @direction: Input or output port.
|
|
+ * @formats: Pointer to bitmask listing supported formats.
|
|
+ */
|
|
+void mvx_fw_cache_get_formats(struct mvx_fw_cache *cache,
|
|
+ enum mvx_direction direction,
|
|
+ uint64_t *formats);
|
|
+
|
|
+#endif /* _MVX_FIRMWARE_CACHE_H_ */
|
|
diff --git a/drivers/media/platform/spacemit/vpu_k1x/if/mvx_firmware_priv.h b/drivers/media/platform/spacemit/vpu_k1x/if/mvx_firmware_priv.h
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/vpu_k1x/if/mvx_firmware_priv.h
|
|
@@ -0,0 +1,163 @@
|
|
+/*
|
|
+ * The confidential and proprietary information contained in this file may
|
|
+ * only be used by a person authorised under and to the extent permitted
|
|
+ * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd.
|
|
+ * ALL RIGHTS RESERVED
|
|
+ *
|
|
+ * This entire notice must be reproduced on all copies of this file
|
|
+ * and copies of this file may only be made by a person if such person is
|
|
+ * permitted to do so under the terms of a subsisting license agreement
|
|
+ * from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * SPDX-License-Identifier: GPL-2.0-only
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU General Public License
|
|
+ * as published by the Free Software Foundation; either version 2
|
|
+ * of the License, or (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
+ *
|
|
+ */
|
|
+
|
|
+#ifndef _MVX_FIRMWARE_PRIV_H_
|
|
+#define _MVX_FIRMWARE_PRIV_H_
|
|
+
|
|
+/****************************************************************************
|
|
+ * Includes
|
|
+ ****************************************************************************/
|
|
+
|
|
+#include "mvx_firmware.h"
|
|
+
|
|
+/****************************************************************************
|
|
+ * Defines
|
|
+ ****************************************************************************/
|
|
+
|
|
+#if !defined(MVE_REQUEST_CODE_IDLE_ACK)
|
|
+#define MVE_REQUEST_CODE_IDLE_ACK (1012)
|
|
+#endif
|
|
+
|
|
+/****************************************************************************
|
|
+ * Firmware
|
|
+ ****************************************************************************/
|
|
+
|
|
+struct mvx_client_ops;
|
|
+struct mvx_client_session;
|
|
+struct mvx_fw_bin;
|
|
+struct mvx_mmu;
|
|
+struct mvx_session;
|
|
+
|
|
+/**
|
|
+ * mvx_firmware_construct() - Firmware constructor.
|
|
+ * @fw: Pointer to firmware object.
|
|
+ * @fw_bin: Pointer to firmware binary.
|
|
+ * @mmu: Pointer to MMU.
|
|
+ * @session: Pointer to session.
|
|
+ * @client_ops: Pointer to client operations.
|
|
+ * @csession: Pointer to client session.
|
|
+ * ncores: Number of cores.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+int mvx_fw_construct(struct mvx_fw *fw,
|
|
+ struct mvx_fw_bin *fw_bin,
|
|
+ struct mvx_mmu *mmu,
|
|
+ struct mvx_session *session,
|
|
+ struct mvx_client_ops *client_ops,
|
|
+ struct mvx_client_session *csession,
|
|
+ unsigned int ncores);
|
|
+
|
|
+/****************************************************************************
|
|
+ * Firmware v2
|
|
+ ****************************************************************************/
|
|
+
|
|
+/**
|
|
+ * mvx_fw_construct_v2() - Construct the object for the firmware v2 interface.
|
|
+ * @fw: Pointer to firmware object.
|
|
+ * @fw_bin: Pointer to firmware binary.
|
|
+ * @mmu: Pointer to MMU.
|
|
+ * @session: Pointer to session.
|
|
+ * @client_ops: Pointer to client operations.
|
|
+ * @csession: Pointer to client session.
|
|
+ * ncores: Number of cores.
|
|
+ * @major: Major firmware version.
|
|
+ * @minor: Minor firmware version.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+int mvx_fw_construct_v2(struct mvx_fw *fw,
|
|
+ struct mvx_fw_bin *fw_bin,
|
|
+ struct mvx_mmu *mmu,
|
|
+ struct mvx_session *session,
|
|
+ struct mvx_client_ops *client_ops,
|
|
+ struct mvx_client_session *csession,
|
|
+ unsigned int ncores,
|
|
+ unsigned char major,
|
|
+ unsigned char minor);
|
|
+
|
|
+/**
|
|
+ * mvx_fw_send_idle_ack_v2() - Send idle ack.
|
|
+ * @fw: Pointer to firmware object.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+int mvx_fw_send_idle_ack_v2(struct mvx_fw *fw);
|
|
+
|
|
+/**
|
|
+ * mvx_fw_to_mve_profile_v2() - Convert MVX to MVE profile.
|
|
+ * @mvx_profile: Input profile.
|
|
+ * @mve_profile: Output profile.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+int mvx_fw_to_mve_profile_v2(unsigned int mvx_profile,
|
|
+ uint16_t *mve_profile);
|
|
+
|
|
+/**
|
|
+ * mvx_fw_to_mve_level_v2() - Convert MVX to MVE level.
|
|
+ * @mvx_level: Input level.
|
|
+ * @mve_level: Output level.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+int mvx_fw_to_mve_level_v2(unsigned int mvx_level,
|
|
+ uint16_t *mve_level);
|
|
+
|
|
+/****************************************************************************
|
|
+ * Firmware v3
|
|
+ ****************************************************************************/
|
|
+
|
|
+/**
|
|
+ * mvx_fw_construct_v3() - Construct the object for the firmware v3 interface.
|
|
+ * @fw: Pointer to firmware object.
|
|
+ * @fw_bin: Pointer to firmware binary.
|
|
+ * @mmu: Pointer to MMU.
|
|
+ * @session: Pointer to session.
|
|
+ * @client_ops: Pointer to client operations.
|
|
+ * @csession: Pointer to client session.
|
|
+ * ncores: Number of cores.
|
|
+ * @major: Major firmware version.
|
|
+ * @minor: Minor firmware version.
|
|
+ *
|
|
+ * Return: 0 on sucess, else error code.
|
|
+ */
|
|
+int mvx_fw_construct_v3(struct mvx_fw *fw,
|
|
+ struct mvx_fw_bin *fw_bin,
|
|
+ struct mvx_mmu *mmu,
|
|
+ struct mvx_session *session,
|
|
+ struct mvx_client_ops *client_ops,
|
|
+ struct mvx_client_session *csession,
|
|
+ unsigned int ncores,
|
|
+ unsigned char major,
|
|
+ unsigned char minor);
|
|
+
|
|
+#endif
|
|
diff --git a/drivers/media/platform/spacemit/vpu_k1x/if/mvx_firmware_v2.c b/drivers/media/platform/spacemit/vpu_k1x/if/mvx_firmware_v2.c
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/vpu_k1x/if/mvx_firmware_v2.c
|
|
@@ -0,0 +1,3410 @@
|
|
+/*
|
|
+ * The confidential and proprietary information contained in this file may
|
|
+ * only be used by a person authorised under and to the extent permitted
|
|
+ * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd.
|
|
+ * ALL RIGHTS RESERVED
|
|
+ *
|
|
+ * This entire notice must be reproduced on all copies of this file
|
|
+ * and copies of this file may only be made by a person if such person is
|
|
+ * permitted to do so under the terms of a subsisting license agreement
|
|
+ * from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * SPDX-License-Identifier: GPL-2.0-only
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU General Public License
|
|
+ * as published by the Free Software Foundation; either version 2
|
|
+ * of the License, or (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
+ *
|
|
+ */
|
|
+
|
|
+/****************************************************************************
|
|
+ * Includes
|
|
+ ****************************************************************************/
|
|
+#include <linux/dma-buf.h>
|
|
+#include <linux/dma-mapping.h>
|
|
+#include <linux/string.h>
|
|
+#include "fw_v2/mve_protocol_def.h"
|
|
+#include "mvx_firmware_cache.h"
|
|
+#include "mvx_firmware_priv.h"
|
|
+#include "mvx_log_group.h"
|
|
+#include "mvx_log_ram.h"
|
|
+#include "mvx_mmu.h"
|
|
+#include "mvx_secure.h"
|
|
+#include "mvx_seq.h"
|
|
+#include "mvx_session.h"
|
|
+#include "mvx_v4l2_session.h"
|
|
+
|
|
+/****************************************************************************
|
|
+ * Static functions
|
|
+ ****************************************************************************/
|
|
+
|
|
+/**
|
|
+ * is_afbc() - Detect if format is AFBC.
|
|
+ * @format: Color format.
|
|
+ *
|
|
+ * Return: True if AFBC, else false.
|
|
+ */
|
|
+static bool is_afbc(unsigned int format)
|
|
+{
|
|
+ return (format & (1 << MVE_FORMAT_BF_A)) != 0;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * log_message() - Log a message.
|
|
+ * @session: Pointer to session.
|
|
+ * @channel: The type of the firmware interface message;
|
|
+ * message, input buffer, output buffer or RPC
|
|
+ * @direction: The type of the firmware interface message;
|
|
+ * host->firmware or firware->host.
|
|
+ * @msg_header: The header of the message.
|
|
+ * @data: Pointer to the message data.
|
|
+ */
|
|
+static void log_message(struct mvx_session *session,
|
|
+ enum mvx_log_fwif_channel channel,
|
|
+ enum mvx_log_fwif_direction direction,
|
|
+ struct mve_msg_header *msg_header,
|
|
+ void *data)
|
|
+{
|
|
+ struct mvx_log_header header;
|
|
+ struct mvx_log_fwif fwif;
|
|
+ struct iovec vec[4];
|
|
+ struct timespec64 timespec;
|
|
+
|
|
+ ktime_get_real_ts64(×pec);
|
|
+
|
|
+ header.magic = MVX_LOG_MAGIC;
|
|
+ header.length = sizeof(fwif) + sizeof(*msg_header) + msg_header->size;
|
|
+ header.type = MVX_LOG_TYPE_FWIF;
|
|
+ header.severity = MVX_LOG_INFO;
|
|
+ header.timestamp.sec = timespec.tv_sec;
|
|
+ header.timestamp.nsec = timespec.tv_nsec;
|
|
+
|
|
+ fwif.version_major = 2;
|
|
+ fwif.version_minor = 0;
|
|
+ fwif.channel = channel;
|
|
+ fwif.direction = direction;
|
|
+ fwif.session = (uintptr_t)session;
|
|
+
|
|
+ vec[0].iov_base = &header;
|
|
+ vec[0].iov_len = sizeof(header);
|
|
+
|
|
+ vec[1].iov_base = &fwif;
|
|
+ vec[1].iov_len = sizeof(fwif);
|
|
+
|
|
+ vec[2].iov_base = msg_header;
|
|
+ vec[2].iov_len = sizeof(*msg_header);
|
|
+
|
|
+ vec[3].iov_base = data;
|
|
+ vec[3].iov_len = msg_header->size;
|
|
+
|
|
+ MVX_LOG_DATA(&mvx_log_fwif_if, MVX_LOG_INFO, vec, 4);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * log_rpc() - Log a RPC message.
|
|
+ * @session: Pointer to session.
|
|
+ * @direction: The type of the firmware interface message;
|
|
+ * host->firmware or firware->host.
|
|
+ * @rpc: RPC message.
|
|
+ */
|
|
+static void log_rpc(struct mvx_session *session,
|
|
+ enum mvx_log_fwif_direction direction,
|
|
+ struct mve_rpc_communication_area *rpc)
|
|
+{
|
|
+ struct mvx_log_header header;
|
|
+ struct mvx_log_fwif fwif;
|
|
+ size_t rpc_size;
|
|
+ struct iovec vec[3];
|
|
+ struct timespec64 timespec;
|
|
+
|
|
+ rpc_size = offsetof(typeof(*rpc), params) + rpc->size;
|
|
+
|
|
+ if (rpc_size > sizeof(*rpc))
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING,
|
|
+ "RPC message size is too large. size=%u.",
|
|
+ rpc->size);
|
|
+
|
|
+ ktime_get_real_ts64(×pec);
|
|
+
|
|
+ header.magic = MVX_LOG_MAGIC;
|
|
+ header.length = sizeof(fwif) + rpc_size;
|
|
+ header.type = MVX_LOG_TYPE_FWIF;
|
|
+ header.severity = MVX_LOG_INFO;
|
|
+ header.timestamp.sec = timespec.tv_sec;
|
|
+ header.timestamp.nsec = timespec.tv_nsec;
|
|
+
|
|
+ fwif.version_major = 2;
|
|
+ fwif.version_minor = 0;
|
|
+ fwif.channel = MVX_LOG_FWIF_CHANNEL_RPC;
|
|
+ fwif.direction = direction;
|
|
+ fwif.session = (uintptr_t)session;
|
|
+
|
|
+ vec[0].iov_base = &header;
|
|
+ vec[0].iov_len = sizeof(header);
|
|
+
|
|
+ vec[1].iov_base = &fwif;
|
|
+ vec[1].iov_len = sizeof(fwif);
|
|
+
|
|
+ vec[2].iov_base = rpc;
|
|
+ vec[2].iov_len = rpc_size;
|
|
+
|
|
+ MVX_LOG_DATA(&mvx_log_fwif_if, MVX_LOG_INFO, vec, 3);
|
|
+}
|
|
+
|
|
+static int get_stride90(enum mvx_format format,
|
|
+ uint8_t *nplanes,
|
|
+ unsigned int stride[MVX_BUFFER_NPLANES][2])
|
|
+{
|
|
+ switch (format) {
|
|
+ case MVX_FORMAT_YUV420_I420:
|
|
+ *nplanes = 3;
|
|
+ stride[0][0] = 2;
|
|
+ stride[0][1] = 2;
|
|
+ stride[1][0] = 1;
|
|
+ stride[1][1] = 1;
|
|
+ stride[2][0] = 1;
|
|
+ stride[2][1] = 1;
|
|
+ break;
|
|
+ case MVX_FORMAT_YUV420_NV12:
|
|
+ case MVX_FORMAT_YUV420_NV21:
|
|
+ *nplanes = 2;
|
|
+ stride[0][0] = 2;
|
|
+ stride[0][1] = 2;
|
|
+ stride[1][0] = 2;
|
|
+ stride[1][1] = 1;
|
|
+ stride[2][0] = 0;
|
|
+ stride[2][1] = 0;
|
|
+ break;
|
|
+ case MVX_FORMAT_YUV420_P010:
|
|
+ *nplanes = 2;
|
|
+ stride[0][0] = 4;
|
|
+ stride[0][1] = 2;
|
|
+ stride[1][0] = 4;
|
|
+ stride[1][1] = 1;
|
|
+ stride[2][0] = 0;
|
|
+ stride[2][1] = 0;
|
|
+ break;
|
|
+ case MVX_FORMAT_YUV420_Y0L2:
|
|
+ case MVX_FORMAT_YUV420_AQB1:
|
|
+ *nplanes = 1;
|
|
+ stride[0][0] = 8;
|
|
+ stride[0][1] = 1;
|
|
+ stride[1][0] = 0;
|
|
+ stride[1][1] = 0;
|
|
+ stride[2][0] = 0;
|
|
+ stride[2][1] = 0;
|
|
+ break;
|
|
+ case MVX_FORMAT_YUV422_YUY2:
|
|
+ case MVX_FORMAT_YUV422_UYVY:
|
|
+ *nplanes = 1;
|
|
+ stride[0][0] = 4;
|
|
+ stride[0][1] = 2;
|
|
+ stride[1][0] = 0;
|
|
+ stride[1][1] = 0;
|
|
+ stride[2][0] = 0;
|
|
+ stride[2][1] = 0;
|
|
+ break;
|
|
+ case MVX_FORMAT_YUV422_Y210:
|
|
+ case MVX_FORMAT_RGBA_8888:
|
|
+ case MVX_FORMAT_BGRA_8888:
|
|
+ case MVX_FORMAT_ARGB_8888:
|
|
+ case MVX_FORMAT_ABGR_8888:
|
|
+ *nplanes = 1;
|
|
+ stride[0][0] = 8;
|
|
+ stride[0][1] = 2;
|
|
+ stride[1][0] = 0;
|
|
+ stride[1][1] = 0;
|
|
+ stride[2][0] = 0;
|
|
+ stride[2][1] = 0;
|
|
+ break;
|
|
+ default:
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * read32n() - Read a number of bytes from 'src' to 'dst'.
|
|
+ * @src: Pointer to circular buffer of source data.
|
|
+ * @offset: Current offset in the circular buffer.
|
|
+ * @dst: Pointer to destination buffer.
|
|
+ * @size: Size in bytes.
|
|
+ *
|
|
+ * Return: New offset in the circular buffer.
|
|
+ */
|
|
+static unsigned int read32n(volatile uint32_t *src,
|
|
+ unsigned int offset,
|
|
+ uint32_t *dst,
|
|
+ size_t size)
|
|
+{
|
|
+ for (; size >= sizeof(uint32_t); size -= sizeof(uint32_t)) {
|
|
+ *dst++ = src[offset];
|
|
+ offset = (offset + 1) % MVE_COMM_QUEUE_SIZE_IN_WORDS;
|
|
+ }
|
|
+
|
|
+ if (size != 0) {
|
|
+ memcpy(dst, (void *)&src[offset], size);
|
|
+ offset = (offset + 1) % MVE_COMM_QUEUE_SIZE_IN_WORDS;
|
|
+ }
|
|
+
|
|
+ return offset;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * read_message() - Read message from firmware message queue.
|
|
+ * @fw: Pointer to firmware object.
|
|
+ * @host: Host communication area.
|
|
+ * @mve: MVE communication area.
|
|
+ * @code: Pointer to where the message code shall be placed.
|
|
+ * @data: Pointer to where message data shall be placed.
|
|
+ * @size: Input: the size of the data. Output: The size of the message.
|
|
+ * @channel: Firmware interface message type to log.
|
|
+ *
|
|
+ * Return: 1 if a message was read, 0 if no message was read, else error code.
|
|
+ */
|
|
+static int read_message(struct mvx_fw *fw,
|
|
+ struct mve_comm_area_host *host,
|
|
+ struct mve_comm_area_mve *mve,
|
|
+ unsigned int *code,
|
|
+ void *data,
|
|
+ size_t *size,
|
|
+ enum mvx_log_fwif_channel channel,
|
|
+ enum mvx_fw_buffer_attr mve_buf_attr,
|
|
+ enum mvx_fw_buffer_attr host_buf_attr)
|
|
+{
|
|
+ struct mve_msg_header header;
|
|
+ unsigned int rpos;
|
|
+ ssize_t capacity;
|
|
+
|
|
+ if (mve_buf_attr == MVX_FW_BUF_CACHEABLE) {
|
|
+ dma_sync_single_for_cpu(fw->dev,
|
|
+ phys_cpu2vpu(virt_to_phys(mve)),
|
|
+ MVE_PAGE_SIZE, DMA_FROM_DEVICE);
|
|
+ }
|
|
+
|
|
+ rpos = host->out_rpos;
|
|
+
|
|
+ /* Calculate how much data that is available in the buffer. */
|
|
+ capacity = mve->out_wpos - rpos;
|
|
+ if (capacity < 0)
|
|
+ capacity += MVE_COMM_QUEUE_SIZE_IN_WORDS;
|
|
+
|
|
+ if (capacity <= 0)
|
|
+ return 0;
|
|
+ /*
|
|
+ * It's possible below sequence occurs due to process out-of-order execution:
|
|
+ * 1. read msg_header first and host_area->out_rpos++;
|
|
+ * 2. firmware writes msg_header and updates mve_area->out_wpos;
|
|
+ * 3. compare host_area->out_rpos and mve_area->out_wpos, since the latter
|
|
+ * is updated in step 2, the available is not 0;
|
|
+ * 4. parse the msg_header got in step 1 and it's invalid because it should
|
|
+ * be read after update in step 2.
|
|
+ * Add barrier here to ensure msg_header is always read after firmware updates.
|
|
+ */
|
|
+ smp_rmb();
|
|
+
|
|
+ /* Read the header. */
|
|
+ rpos = read32n(mve->out_data, rpos, (uint32_t *)&header,
|
|
+ sizeof(header));
|
|
+
|
|
+ if (!((header.code >= 1001 && header.code <= 1013) || (header.code >= 2001 && header.code <= 2019) || (header.code >= 3001 && header.code <= 3004))) {
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING, "read_message header. header.size=%d, rpos=%d, code=%d, capacity=%zu", header.size, host->out_rpos, header.code, capacity);
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ /* Make sure there is enough space for both header and message. */
|
|
+ capacity -= DIV_ROUND_UP(sizeof(header) + header.size,
|
|
+ sizeof(uint32_t));
|
|
+ if (capacity < 0) {
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING,
|
|
+ "Firmware v2 msg larger than capacity. code=%u, size=%u, wpos=%u, rpos=%u.",
|
|
+ header.code, header.size, mve->out_wpos,
|
|
+ host->out_rpos);
|
|
+ return -EFAULT;
|
|
+ }
|
|
+
|
|
+ if (header.size > *size) {
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING,
|
|
+ "Firmware v2 message does not fit in buffer. code=%u, msg_size=%u, size=%zu.",
|
|
+ header.code, header.size, *size);
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+
|
|
+ /* Read message body. */
|
|
+ rpos = read32n(mve->out_data, rpos, data, header.size);
|
|
+ host->out_rpos = rpos;
|
|
+
|
|
+ /*
|
|
+ * Make sure the read pointer has been written before the cache is
|
|
+ * flushed.
|
|
+ */
|
|
+ if (host_buf_attr == MVX_FW_BUF_CACHEABLE) {
|
|
+ wmb();
|
|
+ dma_sync_single_for_device(fw->dev,
|
|
+ phys_cpu2vpu(virt_to_phys(&host->out_rpos)),
|
|
+ sizeof(host->out_rpos), DMA_TO_DEVICE);
|
|
+ }
|
|
+
|
|
+ *code = header.code;
|
|
+ *size = header.size;
|
|
+
|
|
+ /* Log firmware message. */
|
|
+ MVX_LOG_EXECUTE(&mvx_log_fwif_if, MVX_LOG_INFO,
|
|
+ log_message(fw->session, channel,
|
|
+ MVX_LOG_FWIF_DIRECTION_FIRMWARE_TO_HOST,
|
|
+ &header, data));
|
|
+
|
|
+ return 1;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * write32n() - Write a number of bytes to 'dst' from 'src'.
|
|
+ * @dst: Pointer to circular buffer of destination data.
|
|
+ * @offset: Current offset in the circular buffer.
|
|
+ * @src: Pointer to source buffer.
|
|
+ * @size: Size in bytes.
|
|
+ *
|
|
+ * Return: New offset in the circular buffer.
|
|
+ */
|
|
+static unsigned int write32n(volatile uint32_t *dst,
|
|
+ unsigned int offset,
|
|
+ uint32_t *src,
|
|
+ size_t size)
|
|
+{
|
|
+ for (; size >= sizeof(uint32_t); size -= sizeof(uint32_t)) {
|
|
+ dst[offset] = *src++;
|
|
+ offset = (offset + 1) % MVE_COMM_QUEUE_SIZE_IN_WORDS;
|
|
+ }
|
|
+
|
|
+ if (size != 0) {
|
|
+ memcpy((void *)&dst[offset], src, size);
|
|
+ offset = (offset + 1) % MVE_COMM_QUEUE_SIZE_IN_WORDS;
|
|
+ }
|
|
+
|
|
+ return offset;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * write_message() - Write message to firmware message queue.
|
|
+ * @fw: Pointer to firmware object.
|
|
+ * @host: Host communication area.
|
|
+ * @mve: MVE communication area.
|
|
+ * @code: Message code.
|
|
+ * @data: Pointer to message data. May be NULL if size if 0.
|
|
+ * @size: Size in bytes of data.
|
|
+ * @channel: Firmware interface message type to log.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+static int write_message(struct mvx_fw *fw,
|
|
+ struct mve_comm_area_host *host,
|
|
+ struct mve_comm_area_mve *mve,
|
|
+ unsigned int code,
|
|
+ void *data,
|
|
+ size_t size,
|
|
+ enum mvx_log_fwif_channel channel,
|
|
+ enum mvx_fw_buffer_attr mve_buf_attr,
|
|
+ enum mvx_fw_buffer_attr host_buf_attr)
|
|
+{
|
|
+ struct mve_msg_header header = { .code = code, .size = size };
|
|
+ ssize_t capacity;
|
|
+ unsigned int wpos;
|
|
+
|
|
+ if (mve_buf_attr == MVX_FW_BUF_CACHEABLE) {
|
|
+ dma_sync_single_for_cpu(fw->dev,
|
|
+ phys_cpu2vpu(virt_to_phys(&mve->in_rpos)),
|
|
+ sizeof(mve->in_rpos), DMA_FROM_DEVICE);
|
|
+ }
|
|
+
|
|
+ wpos = host->in_wpos;
|
|
+
|
|
+ /* Calculate how much space that is available in the buffer. */
|
|
+ capacity = mve->in_rpos - wpos;
|
|
+ if (capacity <= 0)
|
|
+ capacity += MVE_COMM_QUEUE_SIZE_IN_WORDS;
|
|
+
|
|
+ /* Make sure there is enough space for both header and message. */
|
|
+ capacity -= DIV_ROUND_UP(sizeof(header) + size, sizeof(uint32_t));
|
|
+ if (capacity <= 0)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ /* Write header. */
|
|
+ wpos = write32n(host->in_data, wpos, (uint32_t *)&header,
|
|
+ sizeof(header));
|
|
+
|
|
+ /* Write message. */
|
|
+ wpos = write32n(host->in_data, wpos, data, size);
|
|
+
|
|
+ /*
|
|
+ * Make sure all message data has been written before the cache is
|
|
+ * flushed.
|
|
+ */
|
|
+ if (host_buf_attr == MVX_FW_BUF_CACHEABLE) {
|
|
+ wmb();
|
|
+ dma_sync_single_for_device(fw->dev,
|
|
+ phys_cpu2vpu(virt_to_phys(host)),
|
|
+ MVE_PAGE_SIZE, DMA_TO_DEVICE);
|
|
+ }
|
|
+
|
|
+ host->in_wpos = wpos;
|
|
+
|
|
+ /*
|
|
+ * Make sure the write pointer has been written before the cache is
|
|
+ * flushed.
|
|
+ */
|
|
+ if (host_buf_attr == MVX_FW_BUF_CACHEABLE) {
|
|
+ wmb();
|
|
+ dma_sync_single_for_device(fw->dev,
|
|
+ phys_cpu2vpu(virt_to_phys(&host->in_wpos)),
|
|
+ sizeof(host->in_wpos), DMA_TO_DEVICE);
|
|
+ }
|
|
+
|
|
+ /* Log firmware message. */
|
|
+ MVX_LOG_EXECUTE(&mvx_log_fwif_if, MVX_LOG_INFO,
|
|
+ log_message(fw->session, channel,
|
|
+ MVX_LOG_FWIF_DIRECTION_HOST_TO_FIRMWARE,
|
|
+ &header, data));
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int get_region_v2(enum mvx_fw_region region,
|
|
+ uint32_t *begin,
|
|
+ uint32_t *end)
|
|
+{
|
|
+ switch (region) {
|
|
+ case MVX_FW_REGION_CORE_0:
|
|
+ *begin = MVE_MEM_REGION_FW_INSTANCE0_ADDR_BEGIN;
|
|
+ *end = MVE_MEM_REGION_FW_INSTANCE0_ADDR_END;
|
|
+ break;
|
|
+ case MVX_FW_REGION_CORE_1:
|
|
+ *begin = MVE_MEM_REGION_FW_INSTANCE1_ADDR_BEGIN;
|
|
+ *end = MVE_MEM_REGION_FW_INSTANCE1_ADDR_END;
|
|
+ break;
|
|
+ case MVX_FW_REGION_CORE_2:
|
|
+ *begin = MVE_MEM_REGION_FW_INSTANCE2_ADDR_BEGIN;
|
|
+ *end = MVE_MEM_REGION_FW_INSTANCE2_ADDR_END;
|
|
+ break;
|
|
+ case MVX_FW_REGION_CORE_3:
|
|
+ *begin = MVE_MEM_REGION_FW_INSTANCE3_ADDR_BEGIN;
|
|
+ *end = MVE_MEM_REGION_FW_INSTANCE3_ADDR_END;
|
|
+ break;
|
|
+ case MVX_FW_REGION_CORE_4:
|
|
+ *begin = MVE_MEM_REGION_FW_INSTANCE4_ADDR_BEGIN;
|
|
+ *end = MVE_MEM_REGION_FW_INSTANCE4_ADDR_END;
|
|
+ break;
|
|
+ case MVX_FW_REGION_CORE_5:
|
|
+ *begin = MVE_MEM_REGION_FW_INSTANCE5_ADDR_BEGIN;
|
|
+ *end = MVE_MEM_REGION_FW_INSTANCE5_ADDR_END;
|
|
+ break;
|
|
+ case MVX_FW_REGION_CORE_6:
|
|
+ *begin = MVE_MEM_REGION_FW_INSTANCE6_ADDR_BEGIN;
|
|
+ *end = MVE_MEM_REGION_FW_INSTANCE6_ADDR_END;
|
|
+ break;
|
|
+ case MVX_FW_REGION_CORE_7:
|
|
+ *begin = MVE_MEM_REGION_FW_INSTANCE7_ADDR_BEGIN;
|
|
+ *end = MVE_MEM_REGION_FW_INSTANCE7_ADDR_END;
|
|
+ break;
|
|
+ case MVX_FW_REGION_PROTECTED:
|
|
+ *begin = MVE_MEM_REGION_PROTECTED_ADDR_BEGIN;
|
|
+ *end = MVE_MEM_REGION_PROTECTED_ADDR_END;
|
|
+ break;
|
|
+ case MVX_FW_REGION_FRAMEBUF:
|
|
+ *begin = MVE_MEM_REGION_FRAMEBUF_ADDR_BEGIN;
|
|
+ *end = MVE_MEM_REGION_FRAMEBUF_ADDR_END;
|
|
+ break;
|
|
+ case MVX_FW_REGION_MSG_HOST:
|
|
+ *begin = MVE_COMM_MSG_INQ_ADDR;
|
|
+ *end = MVE_COMM_MSG_INQ_ADDR + MVE_PAGE_SIZE;
|
|
+ break;
|
|
+ case MVX_FW_REGION_MSG_MVE:
|
|
+ *begin = MVE_COMM_MSG_OUTQ_ADDR;
|
|
+ *end = MVE_COMM_MSG_OUTQ_ADDR + MVE_PAGE_SIZE;
|
|
+ break;
|
|
+ case MVX_FW_REGION_BUF_IN_HOST:
|
|
+ *begin = MVE_COMM_BUF_INQ_ADDR;
|
|
+ *end = MVE_COMM_BUF_INQ_ADDR + MVE_PAGE_SIZE;
|
|
+ break;
|
|
+ case MVX_FW_REGION_BUF_IN_MVE:
|
|
+ *begin = MVE_COMM_BUF_INRQ_ADDR;
|
|
+ *end = MVE_COMM_BUF_INRQ_ADDR + MVE_PAGE_SIZE;
|
|
+ break;
|
|
+ case MVX_FW_REGION_BUF_OUT_HOST:
|
|
+ *begin = MVE_COMM_BUF_OUTQ_ADDR;
|
|
+ *end = MVE_COMM_BUF_OUTQ_ADDR + MVE_PAGE_SIZE;
|
|
+ break;
|
|
+ case MVX_FW_REGION_BUF_OUT_MVE:
|
|
+ *begin = MVE_COMM_BUF_OUTRQ_ADDR;
|
|
+ *end = MVE_COMM_BUF_OUTRQ_ADDR + MVE_PAGE_SIZE;
|
|
+ break;
|
|
+ case MVX_FW_REGION_RPC:
|
|
+ *begin = MVE_COMM_RPC_ADDR;
|
|
+ *end = MVE_COMM_RPC_ADDR + MVE_PAGE_SIZE;
|
|
+ break;
|
|
+ case MVX_FW_REGION_PRINT_RAM:
|
|
+ *begin = MVE_FW_PRINT_RAM_ADDR;
|
|
+ *end = MVE_FW_PRINT_RAM_ADDR + MVE_FW_PRINT_RAM_SIZE;
|
|
+ break;
|
|
+ default:
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void convert_buffer_general(struct mvx_fw *fw,
|
|
+ enum mvx_direction dir,
|
|
+ struct mvx_fw_msg *msg,
|
|
+ struct mve_buffer_general *g) {
|
|
+ struct mvx_buffer *buf = (struct mvx_buffer *)g->header.host_handle;
|
|
+
|
|
+ if (g->header.host_handle == MVX_FW_CODE_EOS)
|
|
+ return;
|
|
+
|
|
+ WARN_ON(buf->dir != dir);
|
|
+
|
|
+ msg->code = MVX_FW_CODE_BUFFER_GENERAL;
|
|
+ msg->buf = buf;
|
|
+}
|
|
+
|
|
+
|
|
+static void convert_buffer_frame(struct mvx_fw *fw,
|
|
+ enum mvx_direction dir,
|
|
+ struct mvx_fw_msg *msg,
|
|
+ struct mve_buffer_frame *f)
|
|
+{
|
|
+ struct mvx_buffer *buf = (struct mvx_buffer *)f->host_handle;
|
|
+
|
|
+ if (f->host_handle == MVX_FW_CODE_EOS)
|
|
+ return;
|
|
+
|
|
+ WARN_ON(buf->dir != dir);
|
|
+
|
|
+ msg->code = MVX_FW_CODE_BUFFER;
|
|
+ msg->buf = buf;
|
|
+
|
|
+ if (dir == MVX_DIR_OUTPUT) {
|
|
+ unsigned int i;
|
|
+
|
|
+ buf->width = f->visible_frame_width;
|
|
+ buf->height = f->visible_frame_height;
|
|
+ if (buf->width == 0 || buf->height == 0 ||
|
|
+ (f->frame_flags & (MVE_BUFFER_FRAME_FLAG_TOP_PRESENT | MVE_BUFFER_FRAME_FLAG_BOT_PRESENT)) == 0)
|
|
+ for (i = 0; i < buf->nplanes; i++)
|
|
+ (void)mvx_buffer_filled_set(buf, i, 0, 0);
|
|
+
|
|
+ if (is_afbc(f->format) != false) {
|
|
+ struct mve_buffer_frame_afbc *afbc = &f->data.afbc;
|
|
+
|
|
+ buf->crop_left = afbc->cropx;
|
|
+ buf->crop_top = afbc->cropy;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ buf->user_data = f->user_data_tag;
|
|
+ buf->flags = 0;
|
|
+ if (f->frame_flags & MVE_BUFFER_FRAME_FLAG_EOS)
|
|
+ buf->flags |= MVX_BUFFER_EOS;
|
|
+
|
|
+ if (f->frame_flags & MVE_BUFFER_FRAME_FLAG_REJECTED)
|
|
+ buf->flags |= MVX_BUFFER_REJECTED;
|
|
+
|
|
+ if (f->frame_flags & MVE_BUFFER_FRAME_FLAG_CORRUPT)
|
|
+ buf->flags |= MVX_BUFFER_CORRUPT;
|
|
+
|
|
+ if (f->frame_flags & MVE_BUFFER_FRAME_FLAG_DECODE_ONLY)
|
|
+ buf->flags |= MVX_BUFFER_DECODE_ONLY;
|
|
+
|
|
+ if (f->frame_flags & (MVE_BUFFER_FRAME_FLAG_TOP_PRESENT | MVE_BUFFER_FRAME_FLAG_BOT_PRESENT)) {
|
|
+ buf->flags |= MVX_BUFFER_FRAME_PRESENT;
|
|
+ }
|
|
+
|
|
+ if (is_afbc(f->format) != false) {
|
|
+ struct mve_buffer_frame_afbc *afbc = &f->data.afbc;
|
|
+
|
|
+ if (afbc->afbc_params & MVE_BUFFER_FRAME_AFBC_TILED_HEADER)
|
|
+ buf->flags |= MVX_BUFFER_AFBC_TILED_HEADERS;
|
|
+
|
|
+ if (afbc->afbc_params & MVE_BUFFER_FRAME_AFBC_TILED_BODY)
|
|
+ buf->flags |= MVX_BUFFER_AFBC_TILED_BODY;
|
|
+
|
|
+ if (afbc->afbc_params & MVE_BUFFER_FRAME_AFBC_32X8_SUPERBLOCK)
|
|
+ buf->flags |= MVX_BUFFER_AFBC_32X8_SUPERBLOCK;
|
|
+ }
|
|
+}
|
|
+
|
|
+static void convert_buffer_bitstream(struct mvx_fw *fw,
|
|
+ enum mvx_direction dir,
|
|
+ struct mvx_fw_msg *msg,
|
|
+ struct mve_buffer_bitstream *b)
|
|
+{
|
|
+ struct mvx_buffer *buf = (struct mvx_buffer *)b->host_handle;
|
|
+
|
|
+ if (b->host_handle == MVX_FW_CODE_EOS)
|
|
+ return;
|
|
+
|
|
+ WARN_ON(buf->dir != dir);
|
|
+
|
|
+ msg->code = MVX_FW_CODE_BUFFER;
|
|
+ msg->buf = buf;
|
|
+
|
|
+ if (dir == MVX_DIR_OUTPUT)
|
|
+ mvx_buffer_filled_set(buf, 0, b->bitstream_filled_len,
|
|
+ b->bitstream_offset);
|
|
+
|
|
+ buf->user_data = b->user_data_tag;
|
|
+ buf->flags = 0;
|
|
+ if (dir == MVX_DIR_INPUT) {
|
|
+ struct mvx_corrupt_buffer *corrupt_buf;
|
|
+ struct mvx_corrupt_buffer *tmp;
|
|
+ list_for_each_entry_safe(corrupt_buf, tmp, &fw->session->buffer_corrupt_queue, head) {
|
|
+ if (corrupt_buf->user_data == buf->user_data) {
|
|
+ list_del(&corrupt_buf->head);
|
|
+ buf->flags |= MVX_BUFFER_CORRUPT;
|
|
+ vfree(corrupt_buf);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ if (b->bitstream_flags & MVE_BUFFER_BITSTREAM_FLAG_EOS)
|
|
+ buf->flags |= MVX_BUFFER_EOS;
|
|
+
|
|
+ if (b->bitstream_flags & MVE_BUFFER_BITSTREAM_FLAG_ENDOFFRAME)
|
|
+ buf->flags |= MVX_BUFFER_EOF;
|
|
+
|
|
+ if (b->bitstream_flags & MVE_BUFFER_BITSTREAM_FLAG_CODECCONFIG)
|
|
+ buf->flags |= MVX_BUFFER_CODEC_CONFIG;
|
|
+
|
|
+ if (b->frame_type == 0) {
|
|
+ buf->flags |= MVX_BUFFER_FRAME_FLAG_IFRAME;
|
|
+ } else if (b->frame_type == 1) {
|
|
+ buf->flags |= MVX_BUFFER_FRAME_FLAG_PFRAME;
|
|
+ } else if (b->frame_type == 2) {
|
|
+ buf->flags |= MVX_BUFFER_FRAME_FLAG_BFRAME;
|
|
+ }
|
|
+}
|
|
+
|
|
+static int convert_buffer_param(struct mvx_fw *fw,
|
|
+ struct mvx_fw_msg *msg,
|
|
+ struct mve_buffer_param *p)
|
|
+{
|
|
+ switch (p->type) {
|
|
+ case MVE_BUFFER_PARAM_TYPE_COLOUR_DESCRIPTION: {
|
|
+ struct mve_buffer_param_colour_description *c =
|
|
+ &p->data.colour_description;
|
|
+ struct mvx_fw_color_desc *d = &msg->color_desc;
|
|
+
|
|
+ d->flags = 0;
|
|
+
|
|
+ switch (c->range) {
|
|
+ case MVE_BUFFER_PARAM_COLOUR_RANGE_UNSPECIFIED:
|
|
+ d->range = MVX_FW_RANGE_UNSPECIFIED;
|
|
+ break;
|
|
+ case MVE_BUFFER_PARAM_COLOUR_RANGE_LIMITED:
|
|
+ d->range = MVX_FW_RANGE_LIMITED;
|
|
+ break;
|
|
+ case MVE_BUFFER_PARAM_COLOUR_RANGE_FULL:
|
|
+ d->range = MVX_FW_RANGE_FULL;
|
|
+ break;
|
|
+ default:
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING,
|
|
+ "Unknown fw buffer param color desc range. range=%u.",
|
|
+ c->range);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ /* Color primaries according to HEVC E.3.1. */
|
|
+ switch (c->colour_primaries) {
|
|
+ case 1:
|
|
+ d->primaries = MVX_FW_PRIMARIES_BT709;
|
|
+ break;
|
|
+ case 4:
|
|
+ d->primaries = MVX_FW_PRIMARIES_BT470M;
|
|
+ break;
|
|
+ case 5:
|
|
+ d->primaries = MVX_FW_PRIMARIES_BT601_625;
|
|
+ break;
|
|
+ case 6:
|
|
+ case 7:
|
|
+ d->primaries = MVX_FW_PRIMARIES_BT601_525;
|
|
+ break;
|
|
+ case 8:
|
|
+ d->primaries = MVX_FW_PRIMARIES_GENERIC_FILM;
|
|
+ break;
|
|
+ case 9:
|
|
+ d->primaries = MVX_FW_PRIMARIES_BT2020;
|
|
+ break;
|
|
+ default:
|
|
+ d->primaries = MVX_FW_PRIMARIES_UNSPECIFIED;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ /* Transfer characteristics according to HEVC E.3.1. */
|
|
+ switch (c->transfer_characteristics) {
|
|
+ case 12:
|
|
+ d->transfer = MVX_FW_TRANSFER_BT1361;
|
|
+ break;
|
|
+ case 1:
|
|
+ d->transfer = MVX_FW_TRANSFER_SMPTE170M;
|
|
+ break;
|
|
+ case 4:
|
|
+ d->transfer = MVX_FW_TRANSFER_GAMMA22;
|
|
+ break;
|
|
+ case 5:
|
|
+ d->transfer = MVX_FW_TRANSFER_GAMMA28;
|
|
+ break;
|
|
+ case 6:
|
|
+ case 14:
|
|
+ case 15:
|
|
+ d->transfer = MVX_FW_TRANSFER_SMPTE170M;
|
|
+ break;
|
|
+ case 7:
|
|
+ d->transfer = MVX_FW_TRANSFER_SMPTE240M;
|
|
+ break;
|
|
+ case 8:
|
|
+ d->transfer = MVX_FW_TRANSFER_LINEAR;
|
|
+ break;
|
|
+ case 9:
|
|
+ case 10:
|
|
+ d->transfer = MVX_FW_TRANSFER_UNSPECIFIED;
|
|
+ break;
|
|
+ case 18:
|
|
+ d->transfer = MVX_FW_TRANSFER_HLG;
|
|
+ break;
|
|
+ case 11:
|
|
+ d->transfer = MVX_FW_TRANSFER_XVYCC;
|
|
+ break;
|
|
+ case 13:
|
|
+ d->transfer = MVX_FW_TRANSFER_SRGB;
|
|
+ break;
|
|
+ case 16:
|
|
+ d->transfer = MVX_FW_TRANSFER_ST2084;
|
|
+ break;
|
|
+ case 17:
|
|
+ d->transfer = MVX_FW_TRANSFER_ST428;
|
|
+ break;
|
|
+ default:
|
|
+ d->transfer = MVX_FW_TRANSFER_UNSPECIFIED;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ /* Matrix coefficient according to HEVC E.3.1. */
|
|
+ switch (c->matrix_coeff) {
|
|
+ case 1:
|
|
+ d->matrix = MVX_FW_MATRIX_BT709;
|
|
+ break;
|
|
+ case 4:
|
|
+ d->matrix = MVX_FW_MATRIX_BT470M;
|
|
+ break;
|
|
+ case 5:
|
|
+ case 6:
|
|
+ d->matrix = MVX_FW_MATRIX_BT601;
|
|
+ break;
|
|
+ case 7:
|
|
+ d->matrix = MVX_FW_MATRIX_SMPTE240M;
|
|
+ break;
|
|
+ case 9:
|
|
+ d->matrix = MVX_FW_MATRIX_BT2020;
|
|
+ break;
|
|
+ case 10:
|
|
+ d->matrix = MVX_FW_MATRIX_BT2020Constant;
|
|
+ break;
|
|
+ default:
|
|
+ d->matrix = MVX_FW_MATRIX_UNSPECIFIED;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ if (c->flags &
|
|
+ MVE_BUFFER_PARAM_COLOUR_FLAG_MASTERING_DISPLAY_DATA_VALID) {
|
|
+ d->flags |= MVX_FW_COLOR_DESC_DISPLAY_VALID;
|
|
+
|
|
+ d->display.r.x = c->mastering_display_primaries_x[0];
|
|
+ d->display.r.y = c->mastering_display_primaries_y[0];
|
|
+ d->display.g.x = c->mastering_display_primaries_x[1];
|
|
+ d->display.g.y = c->mastering_display_primaries_y[1];
|
|
+ d->display.b.x = c->mastering_display_primaries_x[2];
|
|
+ d->display.b.y = c->mastering_display_primaries_y[2];
|
|
+ d->display.w.x = c->mastering_white_point_x;
|
|
+ d->display.w.y = c->mastering_white_point_y;
|
|
+
|
|
+ d->display.luminance_min =
|
|
+ c->min_display_mastering_luminance;
|
|
+ d->display.luminance_max =
|
|
+ c->max_display_mastering_luminance;
|
|
+ }
|
|
+
|
|
+ if (c->flags &
|
|
+ MVE_BUFFER_PARAM_COLOUR_FLAG_CONTENT_LIGHT_DATA_VALID) {
|
|
+ d->flags |= MVX_FW_COLOR_DESC_CONTENT_VALID;
|
|
+
|
|
+ d->content.luminance_max = c->max_content_light_level;
|
|
+ d->content.luminance_average =
|
|
+ c->avg_content_light_level;
|
|
+ }
|
|
+
|
|
+ msg->code = MVX_FW_CODE_COLOR_DESC;
|
|
+ break;
|
|
+ }
|
|
+ case MVE_BUFFER_PARAM_TYPE_DPB_HELD_FRAMES: {
|
|
+ msg->arg = p->data.arg;
|
|
+ msg->code = MVX_FW_CODE_DPB_HELD_FRAMES;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ default:
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_INFO,
|
|
+ "Default buffer param. type=%d", p->type);
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ return 1;
|
|
+}
|
|
+
|
|
+static int get_buffer(struct mvx_fw *fw,
|
|
+ struct mve_comm_area_host *host,
|
|
+ struct mve_comm_area_mve *mve,
|
|
+ enum mvx_direction dir,
|
|
+ struct mvx_fw_msg *msg,
|
|
+ enum mvx_log_fwif_channel channel,
|
|
+ enum mvx_fw_buffer_attr mve_buf_attr,
|
|
+ enum mvx_fw_buffer_attr host_buf_attr)
|
|
+{
|
|
+ unsigned int code;
|
|
+ union {
|
|
+ struct mve_buffer_frame frame;
|
|
+ struct mve_buffer_bitstream bitstream;
|
|
+ struct mve_buffer_param param;
|
|
+ struct mve_buffer_general general;
|
|
+ } fw_msg;
|
|
+ size_t size = sizeof(fw_msg);
|
|
+ int ret;
|
|
+ struct mvx_session *session = fw->session;
|
|
+ struct mvx_v4l2_session *vsession =
|
|
+ container_of(session, struct mvx_v4l2_session, session);
|
|
+
|
|
+ ret = read_message(fw, host, mve, &code, &fw_msg, &size, channel, mve_buf_attr, host_buf_attr);
|
|
+ if (ret <= 0)
|
|
+ return ret;
|
|
+
|
|
+ if (vsession->port[dir].q_set == false) {
|
|
+ MVX_SESSION_WARN(session, "vb2 queue is released. dir=%d, code=%d.", dir, code);
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ switch (code) {
|
|
+ case MVE_BUFFER_CODE_FRAME:
|
|
+ convert_buffer_frame(fw, dir, msg, &fw_msg.frame);
|
|
+ break;
|
|
+ case MVE_BUFFER_CODE_BITSTREAM:
|
|
+ convert_buffer_bitstream(fw, dir, msg, &fw_msg.bitstream);
|
|
+ break;
|
|
+ case MVE_BUFFER_CODE_PARAM:
|
|
+ convert_buffer_param(fw, msg, &fw_msg.param);
|
|
+ break;
|
|
+ case MVE_BUFFER_CODE_GENERAL:
|
|
+ convert_buffer_general(fw, dir, msg, &fw_msg.general);
|
|
+ break;
|
|
+ default:
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING,
|
|
+ "Unknown fw buffer code. code=%u.", code);
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ return 1;
|
|
+}
|
|
+
|
|
+static int get_message_v2(struct mvx_fw *fw,
|
|
+ struct mvx_fw_msg *msg)
|
|
+{
|
|
+ unsigned int code;
|
|
+ union {
|
|
+ struct mve_request_job job;
|
|
+ struct mve_response_state_change state_change;
|
|
+ struct mve_response_error error;
|
|
+ struct mve_response_frame_alloc_parameters alloc_param;
|
|
+ struct mve_response_sequence_parameters seq_param;
|
|
+ struct mve_response_set_option_fail set_option_fail;
|
|
+ struct mve_buffer_param buffer_param;
|
|
+ struct mve_response_event event;
|
|
+ } fw_msg;
|
|
+ size_t size = sizeof(fw_msg);
|
|
+ int ret;
|
|
+ struct mvx_session *session = fw->session;
|
|
+
|
|
+ ret = read_message(fw, fw->msg_host, fw->msg_mve, &code, &fw_msg,
|
|
+ &size, MVX_LOG_FWIF_CHANNEL_MESSAGE, fw->buf_attr[MVX_FW_REGION_MSG_MVE], fw->buf_attr[MVX_FW_REGION_MSG_HOST]);
|
|
+ if (ret <= 0)
|
|
+ return ret;
|
|
+
|
|
+ msg->code = MVX_FW_CODE_MAX;
|
|
+
|
|
+ switch (code) {
|
|
+ case MVE_RESPONSE_CODE_SWITCHED_IN:
|
|
+ msg->code = MVX_FW_CODE_SWITCH_IN;
|
|
+ break;
|
|
+ case MVE_RESPONSE_CODE_SWITCHED_OUT:
|
|
+ msg->code = MVX_FW_CODE_SWITCH_OUT;
|
|
+ break;
|
|
+ case MVE_RESPONSE_CODE_SET_OPTION_CONFIRM:
|
|
+ msg->code = MVX_FW_CODE_SET_OPTION;
|
|
+ fw->msg_pending--;
|
|
+ break;
|
|
+ case MVE_RESPONSE_CODE_SET_OPTION_FAIL: {
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING,
|
|
+ "Firmware set option failed. index=%u, msg=%s.",
|
|
+ fw_msg.set_option_fail.index,
|
|
+ fw_msg.set_option_fail.message);
|
|
+ msg->code = MVX_FW_CODE_SET_OPTION;
|
|
+ fw->msg_pending--;
|
|
+ break;
|
|
+ }
|
|
+ case MVE_RESPONSE_CODE_JOB_DEQUEUED:
|
|
+ msg->code = MVX_FW_CODE_JOB;
|
|
+ break;
|
|
+ case MVE_RESPONSE_CODE_INPUT:
|
|
+ ret = get_buffer(fw, fw->buf_in_host, fw->buf_in_mve,
|
|
+ MVX_DIR_INPUT, msg,
|
|
+ MVX_LOG_FWIF_CHANNEL_INPUT_BUFFER, fw->buf_attr[MVX_FW_REGION_BUF_IN_MVE], fw->buf_attr[MVX_FW_REGION_BUF_IN_HOST]);
|
|
+ break;
|
|
+ case MVE_RESPONSE_CODE_OUTPUT:
|
|
+ ret = get_buffer(fw, fw->buf_out_host, fw->buf_out_mve,
|
|
+ MVX_DIR_OUTPUT, msg,
|
|
+ MVX_LOG_FWIF_CHANNEL_OUTPUT_BUFFER, fw->buf_attr[MVX_FW_REGION_BUF_OUT_MVE], fw->buf_attr[MVX_FW_REGION_BUF_OUT_HOST]);
|
|
+ break;
|
|
+ case MVE_BUFFER_CODE_PARAM:
|
|
+ ret = convert_buffer_param(fw, msg, &fw_msg.buffer_param);
|
|
+ break;
|
|
+ case MVE_RESPONSE_CODE_INPUT_FLUSHED:
|
|
+ msg->code = MVX_FW_CODE_FLUSH;
|
|
+ msg->flush.dir = MVX_DIR_INPUT;
|
|
+ fw->msg_pending--;
|
|
+ break;
|
|
+ case MVE_RESPONSE_CODE_OUTPUT_FLUSHED:
|
|
+ msg->code = MVX_FW_CODE_FLUSH;
|
|
+ msg->flush.dir = MVX_DIR_OUTPUT;
|
|
+ fw->msg_pending--;
|
|
+ break;
|
|
+ case MVE_RESPONSE_CODE_PONG:
|
|
+ msg->code = MVX_FW_CODE_PONG;
|
|
+ break;
|
|
+ case MVE_RESPONSE_CODE_ERROR: {
|
|
+ msg->code = MVX_FW_CODE_ERROR;
|
|
+
|
|
+ switch (fw_msg.error.error_code) {
|
|
+ case MVE_ERROR_ABORT:
|
|
+ msg->error.error_code = MVX_FW_ERROR_ABORT;
|
|
+ break;
|
|
+ case MVE_ERROR_OUT_OF_MEMORY:
|
|
+ msg->error.error_code = MVX_FW_ERROR_OUT_OF_MEMORY;
|
|
+ break;
|
|
+ case MVE_ERROR_ASSERT:
|
|
+ msg->error.error_code = MVX_FW_ERROR_ASSERT;
|
|
+ break;
|
|
+ case MVE_ERROR_UNSUPPORTED:
|
|
+ msg->error.error_code = MVX_FW_ERROR_UNSUPPORTED;
|
|
+ break;
|
|
+ case MVE_ERROR_INVALID_BUFFER:
|
|
+ msg->error.error_code = MVX_FW_ERROR_INVALID_BUFFER;
|
|
+ break;
|
|
+ case MVE_ERROR_INVALID_STATE:
|
|
+ msg->error.error_code = MVX_FW_ERROR_INVALID_STATE;
|
|
+ break;
|
|
+ case MVE_ERROR_WATCHDOG:
|
|
+ msg->error.error_code = MVX_FW_ERROR_WATCHDOG;
|
|
+ break;
|
|
+ default:
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING,
|
|
+ "Unsupported fw error code. code=%u.",
|
|
+ fw_msg.error.error_code);
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ strlcpy(msg->error.message, fw_msg.error.message,
|
|
+ min(sizeof(msg->error.message),
|
|
+ sizeof(fw_msg.error.message)));
|
|
+
|
|
+ break;
|
|
+ }
|
|
+ case MVE_RESPONSE_CODE_STATE_CHANGE: {
|
|
+ msg->code = MVX_FW_CODE_STATE_CHANGE;
|
|
+
|
|
+ if (fw_msg.state_change.new_state == MVE_STATE_STOPPED)
|
|
+ msg->state = MVX_FW_STATE_STOPPED;
|
|
+ else
|
|
+ msg->state = MVX_FW_STATE_RUNNING;
|
|
+
|
|
+ fw->msg_pending--;
|
|
+ break;
|
|
+ }
|
|
+ case MVE_RESPONSE_CODE_DUMP:
|
|
+ msg->code = MVX_FW_CODE_DUMP;
|
|
+ fw->msg_pending--;
|
|
+ break;
|
|
+ case MVE_RESPONSE_CODE_DEBUG:
|
|
+ msg->code = MVX_FW_CODE_DEBUG;
|
|
+ fw->msg_pending--;
|
|
+ break;
|
|
+ case MVE_RESPONSE_CODE_IDLE:
|
|
+ msg->code = MVX_FW_CODE_IDLE;
|
|
+ break;
|
|
+ case MVE_RESPONSE_CODE_FRAME_ALLOC_PARAM:
|
|
+ msg->code = MVX_FW_CODE_ALLOC_PARAM;
|
|
+ msg->alloc_param.width =
|
|
+ fw_msg.alloc_param.planar_alloc_frame_width;
|
|
+ msg->alloc_param.height =
|
|
+ fw_msg.alloc_param.planar_alloc_frame_height;
|
|
+ msg->alloc_param.afbc_alloc_bytes =
|
|
+ fw_msg.alloc_param.afbc_alloc_bytes;
|
|
+ msg->alloc_param.afbc_width =
|
|
+ fw_msg.alloc_param.afbc_width_in_superblocks;
|
|
+
|
|
+ break;
|
|
+ case MVE_RESPONSE_CODE_SEQUENCE_PARAMETERS:
|
|
+ msg->code = MVX_FW_CODE_SEQ_PARAM;
|
|
+ msg->seq_param.planar.buffers_min =
|
|
+ fw_msg.seq_param.num_buffers_planar;
|
|
+ msg->seq_param.afbc.buffers_min =
|
|
+ fw_msg.seq_param.num_buffers_afbc;
|
|
+ session->port[MVX_DIR_OUTPUT].interlaced = fw_msg.seq_param.interlace;
|
|
+ break;
|
|
+ case MVE_RESPONSE_CODE_EVENT:
|
|
+ if (MVE_EVENT_ERROR_STREAM_CORRUPT == fw_msg.event.event_code) {
|
|
+ int ret = 0;
|
|
+ bool is_find = false;
|
|
+ struct mvx_corrupt_buffer *buf_corrupt;
|
|
+ uint64_t user_data = 0;
|
|
+ char* tmp;
|
|
+ char* err_msg = fw_msg.event.event_data.message;
|
|
+ char* err_msg_sep = vmalloc(sizeof(char)*MVE_MAX_ERROR_MESSAGE_SIZE);
|
|
+ memcpy(err_msg_sep,err_msg,MVE_MAX_ERROR_MESSAGE_SIZE);
|
|
+ tmp = strsep(&err_msg_sep, ",");
|
|
+ while (tmp != NULL) {
|
|
+ ret = sscanf(tmp,"UDT[%llx]",&user_data);
|
|
+ if (ret == 1) {
|
|
+ is_find = true;
|
|
+ break;
|
|
+ }
|
|
+ tmp = strsep(&err_msg_sep, ",");
|
|
+ }
|
|
+ vfree(err_msg_sep);
|
|
+ if (!is_find) {
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_ERROR, "EVENT code=%d %s.but not found UDT ret:%d",fw_msg.event.event_code,fw_msg.event.event_data.message,ret);
|
|
+ } else {
|
|
+ buf_corrupt = vmalloc(sizeof(struct mvx_corrupt_buffer));
|
|
+ buf_corrupt->user_data = user_data;
|
|
+ list_add(&buf_corrupt->head, &fw->session->buffer_corrupt_queue);
|
|
+ }
|
|
+ }
|
|
+ if (MVE_EVENT_ERROR_STREAM_NOT_SUPPORTED == fw_msg.event.event_code) {
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING, "event STREAM_NOT_SUPPORTED. code=%d. %s.",fw_msg.event.event_code,fw_msg.event.event_data.message);
|
|
+ wake_up(&session->waitq);
|
|
+ session->event(session, MVX_SESSION_EVENT_ERROR, (void *)(-EINVAL));
|
|
+ }
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_INFO,
|
|
+ "EVENT code=%d. %s",
|
|
+ fw_msg.event.event_code,
|
|
+ (MVE_EVENT_ERROR_STREAM_CORRUPT == fw_msg.event.event_code
|
|
+ || MVE_EVENT_ERROR_STREAM_NOT_SUPPORTED == fw_msg.event.event_code) ? fw_msg.event.event_data.message : "");
|
|
+ if (fw_msg.event.event_code == MVE_EVENT_PROCESSED) {
|
|
+ session->bus_read_bytes_total += fw_msg.event.event_data.event_processed.bus_read_bytes;
|
|
+ session->bus_write_bytes_total += fw_msg.event.event_data.event_processed.bus_write_bytes;
|
|
+ if (fw_msg.event.event_data.event_processed.pic_format == 0) {
|
|
+ session->frame_id++;
|
|
+ }
|
|
+ }
|
|
+ break;
|
|
+ case MVE_RESPONSE_CODE_REF_FRAME_UNUSED:
|
|
+ break;
|
|
+ default:
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING,
|
|
+ "Unknown fw message code. code=%u, size=%u.",
|
|
+ code, size);
|
|
+ msg->code = MVX_FW_CODE_UNKNOWN;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int put_buffer_general(struct mvx_fw *fw,
|
|
+ struct mve_comm_area_host *host,
|
|
+ struct mve_comm_area_mve *mve,
|
|
+ struct mvx_fw_msg *msg,
|
|
+ enum mvx_log_fwif_channel channel,
|
|
+ enum mvx_fw_buffer_attr mve_buf_attr,
|
|
+ enum mvx_fw_buffer_attr host_buf_attr)
|
|
+{
|
|
+ int ret;
|
|
+ struct mve_buffer_general g = { 0 };
|
|
+ struct mvx_buffer *buf = msg->buf;
|
|
+ g.header.host_handle = (ptrdiff_t)buf;
|
|
+ g.header.user_data_tag = buf->user_data;
|
|
+ g.header.buffer_ptr = mvx_buffer_va(buf, 0);
|
|
+ g.header.buffer_size = buf->general.header.buffer_size;
|
|
+ g.header.config_size = buf->general.header.config_size;
|
|
+ g.header.type = buf->general.header.type;
|
|
+
|
|
+ memcpy(&g.config, &buf->general.config, sizeof(buf->general.config));
|
|
+ ret = write_message(fw, host, mve, MVE_BUFFER_CODE_GENERAL, &g,
|
|
+ sizeof(g), channel, mve_buf_attr, host_buf_attr);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int put_buffer_frame(struct mvx_fw *fw,
|
|
+ struct mve_comm_area_host *host,
|
|
+ struct mve_comm_area_mve *mve,
|
|
+ struct mvx_fw_msg *msg,
|
|
+ enum mvx_log_fwif_channel channel,
|
|
+ enum mvx_fw_buffer_attr mve_buf_attr,
|
|
+ enum mvx_fw_buffer_attr host_buf_attr)
|
|
+{
|
|
+ struct mve_buffer_frame f = { 0 };
|
|
+ struct mvx_buffer *buf = msg->buf;
|
|
+ struct mvx_session *session = fw->session;
|
|
+ int ret;
|
|
+ int stride_shift = 0, stride = 0;
|
|
+ unsigned int strideRot[MVX_BUFFER_NPLANES];
|
|
+ int max_height;
|
|
+ uint32_t scaling_shift = 0;
|
|
+ uint32_t rotation = (buf->flags & MVX_BUFFER_FRAME_FLAG_ROTATION_MASK) >> 12;
|
|
+ scaling_shift = (buf->flags & MVX_BUFFER_FRAME_FLAG_SCALING_MASK) >> 14;
|
|
+ f.host_handle = (ptrdiff_t)buf;
|
|
+ f.user_data_tag = buf->user_data;
|
|
+
|
|
+ if (buf->dir == MVX_DIR_INPUT) {
|
|
+ f.visible_frame_width = buf->width;
|
|
+ f.visible_frame_height = buf->height;
|
|
+
|
|
+ if (buf->flags & MVX_BUFFER_EOS)
|
|
+ f.frame_flags |= MVE_BUFFER_FRAME_FLAG_EOS;
|
|
+
|
|
+ if (buf->planes[0].filled != 0)
|
|
+ f.frame_flags |= MVE_BUFFER_FRAME_FLAG_TOP_PRESENT;
|
|
+ }
|
|
+
|
|
+ if (buf->dir == MVX_DIR_OUTPUT && (session->dsl_ratio.hor != 1 || session->dsl_ratio.ver != 1)) {
|
|
+ f.frame_flags |= ((session->dsl_ratio.hor - 1) << 24 | (session->dsl_ratio.ver - 1) << 17);
|
|
+ }
|
|
+ if (buf->flags & MVX_BUFFER_INTERLACE)
|
|
+ f.frame_flags |= MVE_BUFFER_FRAME_FLAG_INTERLACE;
|
|
+
|
|
+ f.frame_flags |= (buf->flags & MVX_BUFFER_FRAME_FLAG_ROTATION_MASK) >> 8;
|
|
+ f.frame_flags |= (buf->flags & MVX_BUFFER_FRAME_FLAG_MIRROR_MASK) >> 8;
|
|
+ f.frame_flags |= (buf->flags & MVX_BUFFER_FRAME_FLAG_SCALING_MASK) >> 8;
|
|
+ if (buf->dir == MVX_DIR_OUTPUT && (rotation == 1 || rotation == 3)) {
|
|
+ uint8_t nplanes = 0;
|
|
+ unsigned int stride90[MVX_BUFFER_NPLANES][2];
|
|
+ int i;
|
|
+ get_stride90(buf->format, &nplanes, stride90);
|
|
+ for (i = 0; i < buf->nplanes; i++) {
|
|
+ const unsigned int stride_align = 1;
|
|
+ unsigned int tmp = DIV_ROUND_UP(buf->height * stride90[i][0], 2);
|
|
+ strideRot[i] = round_up(tmp, stride_align);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ switch (buf->format) {
|
|
+ case MVX_FORMAT_YUV420_AFBC_8:
|
|
+ f.format = MVE_FORMAT_YUV420_AFBC_8;
|
|
+ break;
|
|
+ case MVX_FORMAT_YUV420_AFBC_10:
|
|
+ f.format = MVE_FORMAT_YUV420_AFBC_10;
|
|
+ break;
|
|
+ case MVX_FORMAT_YUV422_AFBC_8:
|
|
+ f.format = MVE_FORMAT_YUV422_AFBC_8;
|
|
+ break;
|
|
+ case MVX_FORMAT_YUV422_AFBC_10:
|
|
+ f.format = MVE_FORMAT_YUV422_AFBC_10;
|
|
+ break;
|
|
+ case MVX_FORMAT_YUV420_I420:
|
|
+ f.format = MVE_FORMAT_YUV420_I420;
|
|
+ break;
|
|
+ case MVX_FORMAT_YUV420_NV12:
|
|
+ f.format = MVE_FORMAT_YUV420_NV12;
|
|
+ break;
|
|
+ case MVX_FORMAT_YUV420_NV21:
|
|
+ f.format = MVE_FORMAT_YUV420_NV21;
|
|
+ break;
|
|
+ case MVX_FORMAT_YUV420_P010:
|
|
+ f.format = MVE_FORMAT_YUV420_P010;
|
|
+ break;
|
|
+ case MVX_FORMAT_YUV420_Y0L2:
|
|
+ f.format = MVE_FORMAT_YUV420_Y0L2;
|
|
+ break;
|
|
+ case MVX_FORMAT_YUV420_AQB1:
|
|
+ f.format = MVE_FORMAT_YUV420_AQB1;
|
|
+ break;
|
|
+ case MVX_FORMAT_YUV422_YUY2:
|
|
+ f.format = MVE_FORMAT_YUV422_YUY2;
|
|
+ break;
|
|
+ case MVX_FORMAT_YUV422_UYVY:
|
|
+ f.format = MVE_FORMAT_YUV422_UYVY;
|
|
+ break;
|
|
+ case MVX_FORMAT_YUV422_Y210:
|
|
+ f.format = MVE_FORMAT_YUV422_Y210;
|
|
+ break;
|
|
+ case MVX_FORMAT_RGBA_8888:
|
|
+ f.format = MVE_FORMAT_RGBA_8888;
|
|
+ break;
|
|
+ case MVX_FORMAT_BGRA_8888:
|
|
+ f.format = MVE_FORMAT_BGRA_8888;
|
|
+ break;
|
|
+ case MVX_FORMAT_ARGB_8888:
|
|
+ f.format = MVE_FORMAT_ARGB_8888;
|
|
+ break;
|
|
+ case MVX_FORMAT_ABGR_8888:
|
|
+ f.format = MVE_FORMAT_ABGR_8888;
|
|
+ break;
|
|
+ default:
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING,
|
|
+ "Unsupported frame format. format=%u.",
|
|
+ buf->format);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (is_afbc(f.format) == false) {
|
|
+ struct mve_buffer_frame_planar *planar = &f.data.planar;
|
|
+ int i;
|
|
+
|
|
+ if (f.frame_flags & MVE_BUFFER_FRAME_FLAG_INTERLACE){
|
|
+ max_height = buf->width;
|
|
+ stride_shift = 1;
|
|
+ max_height >>= 1;
|
|
+ }
|
|
+#if 0
|
|
+ for (i = 0; i < buf->nplanes; i++) {
|
|
+ struct mvx_buffer_plane *plane = &buf->planes[i];
|
|
+
|
|
+ if (plane->stride > 0)
|
|
+ planar->plane_top[i] = mvx_buffer_va(buf, i);
|
|
+
|
|
+ planar->stride[i] = plane->stride;
|
|
+ planar->plane_bot[i] = 0;
|
|
+
|
|
+ if (f.frame_flags & MVE_BUFFER_FRAME_FLAG_INTERLACE)
|
|
+
|
|
+ planar->plane_bot[i] = planar->plane_top[i] +
|
|
+ DIV_ROUND_UP(
|
|
+ plane->filled, 2);
|
|
+ }
|
|
+#else
|
|
+ for (i = 0; i < buf->nplanes; i++) {
|
|
+ struct mvx_buffer_plane *plane = &buf->planes[i];
|
|
+
|
|
+ if (plane->stride > 0) {
|
|
+ planar->plane_top[i] = mvx_buffer_va(buf, i);
|
|
+ }
|
|
+
|
|
+ if (f.frame_flags & MVE_BUFFER_FRAME_FLAG_INTERLACE) {
|
|
+ // interlace mode
|
|
+ stride = plane->stride;
|
|
+ //stride_shift = 1;
|
|
+ if (stride_shift) {
|
|
+ stride = round_up(stride, 2) << stride_shift;
|
|
+ }
|
|
+ planar->stride[i] = stride;
|
|
+ planar->plane_bot[i] = planar->plane_top[i] +
|
|
+ (round_up(stride, 2) >> stride_shift);
|
|
+ if (buf->dir == MVX_DIR_OUTPUT && (rotation == 1 || rotation == 3)) {
|
|
+ planar->stride[i] = strideRot[i];
|
|
+ }
|
|
+ } else {
|
|
+ // frame mode
|
|
+ if (buf->dir == MVX_DIR_OUTPUT && (rotation == 1 || rotation == 3)) {
|
|
+ planar->stride[i] = strideRot[i];
|
|
+ } else {
|
|
+ planar->stride[i] = plane->stride;
|
|
+ }
|
|
+ planar->plane_bot[i] = 0;
|
|
+
|
|
+ }
|
|
+ }
|
|
+
|
|
+#endif
|
|
+ if (buf->dir == MVX_DIR_OUTPUT && (rotation == 1 || rotation == 3)) {
|
|
+ planar->max_frame_width = buf->height;
|
|
+ planar->max_frame_height = buf->width;
|
|
+ } else {
|
|
+ planar->max_frame_width = buf->width;
|
|
+ planar->max_frame_height = buf->height;
|
|
+ }
|
|
+ } else {
|
|
+ struct mve_buffer_frame_afbc *afbc = &f.data.afbc;
|
|
+
|
|
+ afbc->afbc_width_in_superblocks[0] = buf->planes[0].afbc_width;
|
|
+ afbc->plane[0] = mvx_buffer_va(buf, 0);
|
|
+
|
|
+ if (f.frame_flags & MVE_BUFFER_FRAME_FLAG_INTERLACE) {
|
|
+ afbc->alloc_bytes[0] =
|
|
+ ALIGN((buf->planes[0].filled / 2), 32);
|
|
+ afbc->alloc_bytes[1] =
|
|
+ buf->planes[0].filled - afbc->alloc_bytes[0];
|
|
+ afbc->plane[1] =
|
|
+ afbc->plane[0] + afbc->alloc_bytes[0];
|
|
+ afbc->afbc_width_in_superblocks[1] =
|
|
+ afbc->afbc_width_in_superblocks[0];
|
|
+ } else {
|
|
+ afbc->alloc_bytes[0] = buf->planes[0].filled;
|
|
+ }
|
|
+
|
|
+ afbc->afbc_params = 0;
|
|
+ if (buf->flags & MVX_BUFFER_AFBC_TILED_HEADERS)
|
|
+ afbc->afbc_params |= MVE_BUFFER_FRAME_AFBC_TILED_HEADER;
|
|
+
|
|
+ if (buf->flags & MVX_BUFFER_AFBC_TILED_BODY)
|
|
+ afbc->afbc_params |= MVE_BUFFER_FRAME_AFBC_TILED_BODY;
|
|
+
|
|
+ if (buf->flags & MVX_BUFFER_AFBC_32X8_SUPERBLOCK)
|
|
+ afbc->afbc_params |=
|
|
+ MVE_BUFFER_FRAME_AFBC_32X8_SUPERBLOCK;
|
|
+ if (buf->flags & MVX_BUFFER_AFBC_BLOCK_SPLIT)
|
|
+ afbc->afbc_params |=
|
|
+ MVE_BUFFER_FRAME_AFBC_BLOCK_SPLIT;
|
|
+ }
|
|
+
|
|
+ ret = write_message(fw, host, mve, MVE_BUFFER_CODE_FRAME,
|
|
+ &f, sizeof(f), channel, mve_buf_attr, host_buf_attr);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int put_buffer_bitstream(struct mvx_fw *fw,
|
|
+ struct mve_comm_area_host *host,
|
|
+ struct mve_comm_area_mve *mve,
|
|
+ struct mvx_fw_msg *msg,
|
|
+ enum mvx_log_fwif_channel channel,
|
|
+ enum mvx_fw_buffer_attr mve_buf_attr,
|
|
+ enum mvx_fw_buffer_attr host_buf_attr)
|
|
+{
|
|
+ struct mve_buffer_bitstream b = { 0 };
|
|
+ struct mvx_buffer *buf = msg->buf;
|
|
+ int ret;
|
|
+
|
|
+ if (buf->dir == MVX_DIR_INPUT)
|
|
+ b.bitstream_filled_len = buf->planes[0].filled;
|
|
+
|
|
+ b.host_handle = (ptrdiff_t)buf;
|
|
+ b.user_data_tag = buf->user_data;
|
|
+ b.bitstream_alloc_bytes = mvx_buffer_size(buf, 0);
|
|
+ b.bitstream_buf_addr = mvx_buffer_va(buf, 0);
|
|
+
|
|
+ if (buf->flags & MVX_BUFFER_EOS)
|
|
+ b.bitstream_flags |= MVE_BUFFER_BITSTREAM_FLAG_EOS;
|
|
+
|
|
+ if (buf->flags & MVX_BUFFER_EOF)
|
|
+ b.bitstream_flags |= MVE_BUFFER_BITSTREAM_FLAG_ENDOFFRAME;
|
|
+
|
|
+ if (buf->flags & MVX_BUFFER_END_OF_SUB_FRAME) {
|
|
+ b.bitstream_flags |= MVE_BUFFER_BITSTREAM_FLAG_ENDOFSUBFRAME;
|
|
+ }
|
|
+ if (buf->flags & MVX_BUFFER_CODEC_CONFIG) {
|
|
+ b.bitstream_flags |= MVE_BUFFER_BITSTREAM_FLAG_CODECCONFIG;
|
|
+ b.bitstream_flags |= MVE_BUFFER_BITSTREAM_FLAG_ENDOFSUBFRAME;
|
|
+ }
|
|
+
|
|
+ ret = write_message(fw, host, mve, MVE_BUFFER_CODE_BITSTREAM, &b,
|
|
+ sizeof(b), channel, mve_buf_attr, host_buf_attr);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int to_mve_nalu_format(enum mvx_nalu_format fmt,
|
|
+ int *mve_val)
|
|
+{
|
|
+ switch (fmt) {
|
|
+ case MVX_NALU_FORMAT_START_CODES:
|
|
+ *mve_val = MVE_OPT_NALU_FORMAT_START_CODES;
|
|
+ break;
|
|
+ case MVX_NALU_FORMAT_ONE_NALU_PER_BUFFER:
|
|
+ *mve_val = MVE_OPT_NALU_FORMAT_ONE_NALU_PER_BUFFER;
|
|
+ break;
|
|
+ case MVX_NALU_FORMAT_ONE_BYTE_LENGTH_FIELD:
|
|
+ *mve_val = MVE_OPT_NALU_FORMAT_ONE_BYTE_LENGTH_FIELD;
|
|
+ break;
|
|
+ case MVX_NALU_FORMAT_TWO_BYTE_LENGTH_FIELD:
|
|
+ *mve_val = MVE_OPT_NALU_FORMAT_TWO_BYTE_LENGTH_FIELD;
|
|
+ break;
|
|
+ case MVX_NALU_FORMAT_FOUR_BYTE_LENGTH_FIELD:
|
|
+ *mve_val = MVE_OPT_NALU_FORMAT_FOUR_BYTE_LENGTH_FIELD;
|
|
+ break;
|
|
+ default:
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int mvx_fw_to_mve_profile_v2(unsigned int mvx_profile,
|
|
+ uint16_t *mve_profile)
|
|
+{
|
|
+ switch (mvx_profile) {
|
|
+ case MVX_PROFILE_H264_BASELINE:
|
|
+ *mve_profile = MVE_OPT_PROFILE_H264_BASELINE;
|
|
+ break;
|
|
+ case MVX_PROFILE_H264_MAIN:
|
|
+ *mve_profile = MVE_OPT_PROFILE_H264_MAIN;
|
|
+ break;
|
|
+ case MVX_PROFILE_H264_HIGH:
|
|
+ *mve_profile = MVE_OPT_PROFILE_H264_HIGH;
|
|
+ break;
|
|
+ case MVX_PROFILE_H265_MAIN:
|
|
+ *mve_profile = MVE_OPT_PROFILE_H265_MAIN;
|
|
+ break;
|
|
+ case MVX_PROFILE_H265_MAIN_STILL:
|
|
+ *mve_profile = MVE_OPT_PROFILE_H265_MAIN_STILL;
|
|
+ break;
|
|
+ case MVX_PROFILE_H265_MAIN_INTRA:
|
|
+ *mve_profile = MVE_OPT_PROFILE_H265_MAIN_INTRA;
|
|
+ break;
|
|
+ case MVX_PROFILE_VC1_SIMPLE:
|
|
+ *mve_profile = MVE_OPT_PROFILE_VC1_SIMPLE;
|
|
+ break;
|
|
+ case MVX_PROFILE_VC1_MAIN:
|
|
+ *mve_profile = MVE_OPT_PROFILE_VC1_MAIN;
|
|
+ break;
|
|
+ case MVX_PROFILE_VC1_ADVANCED:
|
|
+ *mve_profile = MVE_OPT_PROFILE_VC1_ADVANCED;
|
|
+ break;
|
|
+ case MVX_PROFILE_VP8_MAIN:
|
|
+ *mve_profile = MVE_OPT_PROFILE_VP8_MAIN;
|
|
+ break;
|
|
+ default:
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int mvx_fw_to_mve_level_v2(unsigned int mvx_level,
|
|
+ uint16_t *mve_level)
|
|
+{
|
|
+ switch (mvx_level) {
|
|
+ case MVX_LEVEL_NONE:
|
|
+ *mve_level = 0;
|
|
+ break;
|
|
+ case MVX_LEVEL_H264_1:
|
|
+ *mve_level = MVE_OPT_LEVEL_H264_1;
|
|
+ break;
|
|
+ case MVX_LEVEL_H264_1b:
|
|
+ *mve_level = MVE_OPT_LEVEL_H264_1b;
|
|
+ break;
|
|
+ case MVX_LEVEL_H264_11:
|
|
+ *mve_level = MVE_OPT_LEVEL_H264_11;
|
|
+ break;
|
|
+ case MVX_LEVEL_H264_12:
|
|
+ *mve_level = MVE_OPT_LEVEL_H264_12;
|
|
+ break;
|
|
+ case MVX_LEVEL_H264_13:
|
|
+ *mve_level = MVE_OPT_LEVEL_H264_13;
|
|
+ break;
|
|
+ case MVX_LEVEL_H264_2:
|
|
+ *mve_level = MVE_OPT_LEVEL_H264_2;
|
|
+ break;
|
|
+ case MVX_LEVEL_H264_21:
|
|
+ *mve_level = MVE_OPT_LEVEL_H264_21;
|
|
+ break;
|
|
+ case MVX_LEVEL_H264_22:
|
|
+ *mve_level = MVE_OPT_LEVEL_H264_22;
|
|
+ break;
|
|
+ case MVX_LEVEL_H264_3:
|
|
+ *mve_level = MVE_OPT_LEVEL_H264_3;
|
|
+ break;
|
|
+ case MVX_LEVEL_H264_31:
|
|
+ *mve_level = MVE_OPT_LEVEL_H264_31;
|
|
+ break;
|
|
+ case MVX_LEVEL_H264_32:
|
|
+ *mve_level = MVE_OPT_LEVEL_H264_32;
|
|
+ break;
|
|
+ case MVX_LEVEL_H264_4:
|
|
+ *mve_level = MVE_OPT_LEVEL_H264_4;
|
|
+ break;
|
|
+ case MVX_LEVEL_H264_41:
|
|
+ *mve_level = MVE_OPT_LEVEL_H264_41;
|
|
+ break;
|
|
+ case MVX_LEVEL_H264_42:
|
|
+ *mve_level = MVE_OPT_LEVEL_H264_42;
|
|
+ break;
|
|
+ case MVX_LEVEL_H264_5:
|
|
+ *mve_level = MVE_OPT_LEVEL_H264_5;
|
|
+ break;
|
|
+ case MVX_LEVEL_H264_51:
|
|
+ *mve_level = MVE_OPT_LEVEL_H264_51;
|
|
+ break;
|
|
+
|
|
+ /**
|
|
+ * Levels supported by the HW but not by V4L2 controls API.
|
|
+ *
|
|
+ * case MVX_LEVEL_H264_52:
|
|
+ * mve_level = MVE_OPT_LEVEL_H264_52;
|
|
+ * break;
|
|
+ * case MVX_LEVEL_H264_6:
|
|
+ * mve_level = MVE_OPT_LEVEL_H264_6;
|
|
+ * break;
|
|
+ * case MVX_LEVEL_H264_61:
|
|
+ * mve_level = MVE_OPT_LEVEL_H264_61;
|
|
+ * break;
|
|
+ * case MVX_LEVEL_H264_62:
|
|
+ * mve_level = MVE_OPT_LEVEL_H264_62;
|
|
+ * break;
|
|
+ */
|
|
+ case MVX_LEVEL_H265_MAIN_1:
|
|
+ *mve_level = MVE_OPT_LEVEL_H265_MAIN_TIER_1;
|
|
+ break;
|
|
+ case MVX_LEVEL_H265_HIGH_1:
|
|
+ *mve_level = MVE_OPT_LEVEL_H265_HIGH_TIER_1;
|
|
+ break;
|
|
+ case MVX_LEVEL_H265_MAIN_2:
|
|
+ *mve_level = MVE_OPT_LEVEL_H265_MAIN_TIER_2;
|
|
+ break;
|
|
+ case MVX_LEVEL_H265_HIGH_2:
|
|
+ *mve_level = MVE_OPT_LEVEL_H265_HIGH_TIER_2;
|
|
+ break;
|
|
+ case MVX_LEVEL_H265_MAIN_21:
|
|
+ *mve_level = MVE_OPT_LEVEL_H265_MAIN_TIER_21;
|
|
+ break;
|
|
+ case MVX_LEVEL_H265_HIGH_21:
|
|
+ *mve_level = MVE_OPT_LEVEL_H265_HIGH_TIER_21;
|
|
+ break;
|
|
+ case MVX_LEVEL_H265_MAIN_3:
|
|
+ *mve_level = MVE_OPT_LEVEL_H265_MAIN_TIER_3;
|
|
+ break;
|
|
+ case MVX_LEVEL_H265_HIGH_3:
|
|
+ *mve_level = MVE_OPT_LEVEL_H265_HIGH_TIER_3;
|
|
+ break;
|
|
+ case MVX_LEVEL_H265_MAIN_31:
|
|
+ *mve_level = MVE_OPT_LEVEL_H265_MAIN_TIER_31;
|
|
+ break;
|
|
+ case MVX_LEVEL_H265_HIGH_31:
|
|
+ *mve_level = MVE_OPT_LEVEL_H265_HIGH_TIER_31;
|
|
+ break;
|
|
+ case MVX_LEVEL_H265_MAIN_4:
|
|
+ *mve_level = MVE_OPT_LEVEL_H265_MAIN_TIER_4;
|
|
+ break;
|
|
+ case MVX_LEVEL_H265_HIGH_4:
|
|
+ *mve_level = MVE_OPT_LEVEL_H265_HIGH_TIER_4;
|
|
+ break;
|
|
+ case MVX_LEVEL_H265_MAIN_41:
|
|
+ *mve_level = MVE_OPT_LEVEL_H265_MAIN_TIER_41;
|
|
+ break;
|
|
+ case MVX_LEVEL_H265_HIGH_41:
|
|
+ *mve_level = MVE_OPT_LEVEL_H265_HIGH_TIER_41;
|
|
+ break;
|
|
+ case MVX_LEVEL_H265_MAIN_5:
|
|
+ *mve_level = MVE_OPT_LEVEL_H265_MAIN_TIER_5;
|
|
+ break;
|
|
+ case MVX_LEVEL_H265_HIGH_5:
|
|
+ *mve_level = MVE_OPT_LEVEL_H265_HIGH_TIER_5;
|
|
+ break;
|
|
+ case MVX_LEVEL_H265_MAIN_51:
|
|
+ *mve_level = MVE_OPT_LEVEL_H265_MAIN_TIER_51;
|
|
+ break;
|
|
+ case MVX_LEVEL_H265_HIGH_51:
|
|
+ *mve_level = MVE_OPT_LEVEL_H265_HIGH_TIER_51;
|
|
+ break;
|
|
+ case MVX_LEVEL_H265_MAIN_52:
|
|
+ *mve_level = MVE_OPT_LEVEL_H265_MAIN_TIER_52;
|
|
+ break;
|
|
+ case MVX_LEVEL_H265_HIGH_52:
|
|
+ *mve_level = MVE_OPT_LEVEL_H265_HIGH_TIER_52;
|
|
+ break;
|
|
+ case MVX_LEVEL_H265_MAIN_6:
|
|
+ *mve_level = MVE_OPT_LEVEL_H265_MAIN_TIER_6;
|
|
+ break;
|
|
+ case MVX_LEVEL_H265_HIGH_6:
|
|
+ *mve_level = MVE_OPT_LEVEL_H265_HIGH_TIER_6;
|
|
+ break;
|
|
+ case MVX_LEVEL_H265_MAIN_61:
|
|
+ *mve_level = MVE_OPT_LEVEL_H265_MAIN_TIER_61;
|
|
+ break;
|
|
+ case MVX_LEVEL_H265_HIGH_61:
|
|
+ *mve_level = MVE_OPT_LEVEL_H265_HIGH_TIER_61;
|
|
+ break;
|
|
+ case MVX_LEVEL_H265_MAIN_62:
|
|
+ *mve_level = MVE_OPT_LEVEL_H265_MAIN_TIER_62;
|
|
+ break;
|
|
+ case MVX_LEVEL_H265_HIGH_62:
|
|
+ *mve_level = MVE_OPT_LEVEL_H265_HIGH_TIER_62;
|
|
+ break;
|
|
+ default:
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int to_mve_gop_type(enum mvx_gop_type gop,
|
|
+ unsigned int *mve_arg)
|
|
+{
|
|
+ switch (gop) {
|
|
+ case MVX_GOP_TYPE_BIDIRECTIONAL:
|
|
+ *mve_arg = MVE_OPT_GOP_TYPE_BIDIRECTIONAL;
|
|
+ break;
|
|
+ case MVX_GOP_TYPE_LOW_DELAY:
|
|
+ *mve_arg = MVE_OPT_GOP_TYPE_LOW_DELAY;
|
|
+ break;
|
|
+ case MVX_GOP_TYPE_PYRAMID:
|
|
+ *mve_arg = MVE_OPT_GOP_TYPE_PYRAMID;
|
|
+ break;
|
|
+ default:
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int to_mve_h264_cabac(enum mvx_entropy_mode entropy_mode,
|
|
+ unsigned int *mve_arg)
|
|
+{
|
|
+ switch (entropy_mode) {
|
|
+ case MVX_ENTROPY_MODE_CABAC:
|
|
+ *mve_arg = 1;
|
|
+ break;
|
|
+ case MVX_ENTROPY_MODE_CAVLC:
|
|
+ *mve_arg = 0;
|
|
+ break;
|
|
+ default:
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int to_mve_vp9_prob_update(enum mvx_vp9_prob_update prob_update,
|
|
+ unsigned int *mve_arg)
|
|
+{
|
|
+ switch (prob_update) {
|
|
+ case MVX_VP9_PROB_UPDATE_DISABLED:
|
|
+ *mve_arg = MVE_OPT_VP9_PROB_UPDATE_DISABLED;
|
|
+ break;
|
|
+ case MVX_VP9_PROB_UPDATE_IMPLICIT:
|
|
+ *mve_arg = MVE_OPT_VP9_PROB_UPDATE_IMPLICIT;
|
|
+ break;
|
|
+ case MVX_VP9_PROB_UPDATE_EXPLICIT:
|
|
+ *mve_arg = MVE_OPT_VP9_PROB_UPDATE_EXPLICIT;
|
|
+ break;
|
|
+ default:
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int to_mve_rgb_to_yuv(enum mvx_rgb_to_yuv_mode mode,
|
|
+ unsigned int *mve_arg)
|
|
+{
|
|
+ switch (mode) {
|
|
+ case MVX_RGB_TO_YUV_MODE_BT601_STUDIO:
|
|
+ *mve_arg = MVE_OPT_RGB_TO_YUV_BT601_STUDIO;
|
|
+ break;
|
|
+ case MVX_RGB_TO_YUV_MODE_BT601_FULL:
|
|
+ *mve_arg = MVE_OPT_RGB_TO_YUV_BT601_FULL;
|
|
+ break;
|
|
+ case MVX_RGB_TO_YUV_MODE_BT709_STUDIO:
|
|
+ *mve_arg = MVE_OPT_RGB_TO_YUV_BT709_STUDIO;
|
|
+ break;
|
|
+ case MVX_RGB_TO_YUV_MODE_BT709_FULL:
|
|
+ *mve_arg = MVE_OPT_RGB_TO_YUV_BT709_FULL;
|
|
+ break;
|
|
+ default:
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int put_fw_opt(struct mvx_fw *fw,
|
|
+ struct mve_request_set_option *opt,
|
|
+ size_t size)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ ret = write_message(fw, fw->msg_host, fw->msg_mve,
|
|
+ MVE_REQUEST_CODE_SET_OPTION,
|
|
+ opt, offsetof(typeof(*opt), data) + size,
|
|
+ MVX_LOG_FWIF_CHANNEL_MESSAGE, fw->buf_attr[MVX_FW_REGION_MSG_MVE], fw->buf_attr[MVX_FW_REGION_MSG_HOST]);
|
|
+
|
|
+ if (ret == 0)
|
|
+ fw->msg_pending++;
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int put_fw_buf_param(struct mvx_fw *fw,
|
|
+ struct mve_buffer_param *param,
|
|
+ size_t size)
|
|
+{
|
|
+ return write_message(fw, fw->buf_in_host, fw->buf_in_mve,
|
|
+ MVE_BUFFER_CODE_PARAM,
|
|
+ param, offsetof(typeof(*param), data) + size,
|
|
+ MVX_LOG_FWIF_CHANNEL_MESSAGE, fw->buf_attr[MVX_FW_REGION_BUF_IN_MVE], fw->buf_attr[MVX_FW_REGION_BUF_IN_HOST]);
|
|
+}
|
|
+
|
|
+static int put_message_v2(struct mvx_fw *fw,
|
|
+ struct mvx_fw_msg *msg)
|
|
+{
|
|
+ int ret = 0;
|
|
+
|
|
+ switch (msg->code) {
|
|
+ case MVX_FW_CODE_STATE_CHANGE: {
|
|
+ unsigned int code = msg->state == MVX_FW_STATE_STOPPED ?
|
|
+ MVE_REQUEST_CODE_STOP :
|
|
+ MVE_REQUEST_CODE_GO;
|
|
+
|
|
+ ret = write_message(fw, fw->msg_host, fw->msg_mve,
|
|
+ code, NULL, 0,
|
|
+ MVX_LOG_FWIF_CHANNEL_MESSAGE, fw->buf_attr[MVX_FW_REGION_MSG_MVE], fw->buf_attr[MVX_FW_REGION_MSG_HOST]);
|
|
+ if (ret == 0)
|
|
+ fw->msg_pending++;
|
|
+
|
|
+ break;
|
|
+ }
|
|
+ case MVX_FW_CODE_JOB: {
|
|
+ struct mve_request_job job;
|
|
+
|
|
+ job.cores = msg->job.cores;
|
|
+ job.frames = msg->job.frames;
|
|
+ job.flags = 0;
|
|
+
|
|
+ ret = write_message(fw, fw->msg_host, fw->msg_mve,
|
|
+ MVE_REQUEST_CODE_JOB, &job, sizeof(job),
|
|
+ MVX_LOG_FWIF_CHANNEL_MESSAGE, fw->buf_attr[MVX_FW_REGION_MSG_MVE], fw->buf_attr[MVX_FW_REGION_MSG_HOST]);
|
|
+ break;
|
|
+ }
|
|
+ case MVX_FW_CODE_SWITCH_OUT: {
|
|
+ ret = write_message(fw, fw->msg_host, fw->msg_mve,
|
|
+ MVE_REQUEST_CODE_SWITCH, NULL, 0,
|
|
+ MVX_LOG_FWIF_CHANNEL_MESSAGE, fw->buf_attr[MVX_FW_REGION_MSG_MVE], fw->buf_attr[MVX_FW_REGION_MSG_HOST]);
|
|
+ break;
|
|
+ }
|
|
+ case MVX_FW_CODE_PING: {
|
|
+ ret = write_message(fw, fw->msg_host, fw->msg_mve,
|
|
+ MVE_REQUEST_CODE_PING, NULL, 0,
|
|
+ MVX_LOG_FWIF_CHANNEL_MESSAGE, fw->buf_attr[MVX_FW_REGION_MSG_MVE], fw->buf_attr[MVX_FW_REGION_MSG_HOST]);
|
|
+ break;
|
|
+ }
|
|
+ case MVX_FW_CODE_SET_OPTION: {
|
|
+ switch (msg->set_option.code) {
|
|
+ case MVX_FW_SET_FRAME_RATE: {
|
|
+ struct mve_buffer_param param;
|
|
+
|
|
+ param.type = MVE_BUFFER_PARAM_TYPE_FRAME_RATE;
|
|
+ param.data.arg = msg->set_option.frame_rate;
|
|
+ ret = put_fw_buf_param(fw, ¶m,
|
|
+ sizeof(param.data.arg));
|
|
+ break;
|
|
+ }
|
|
+ case MVX_FW_SET_TARGET_BITRATE: {
|
|
+ struct mve_buffer_param param;
|
|
+
|
|
+ param.type = MVE_BUFFER_PARAM_TYPE_RATE_CONTROL;
|
|
+ if (msg->set_option.target_bitrate == 0) {
|
|
+ param.data.rate_control.rate_control_mode =
|
|
+ MVE_OPT_RATE_CONTROL_MODE_OFF;
|
|
+ param.data.rate_control.target_bitrate = 0;
|
|
+ } else {
|
|
+ param.data.rate_control.rate_control_mode =
|
|
+ MVE_OPT_RATE_CONTROL_MODE_STANDARD;
|
|
+ param.data.rate_control.target_bitrate =
|
|
+ msg->set_option.target_bitrate;
|
|
+ }
|
|
+
|
|
+ ret = put_fw_buf_param(fw, ¶m,
|
|
+ sizeof(param.data.rate_control));
|
|
+ break;
|
|
+ }
|
|
+ case MVX_FW_SET_RATE_CONTROL: {
|
|
+ struct mve_buffer_param param;
|
|
+
|
|
+ param.type = MVE_BUFFER_PARAM_TYPE_RATE_CONTROL;
|
|
+ if (msg->set_option.rate_control.target_bitrate == 0) {
|
|
+ param.data.rate_control.rate_control_mode =
|
|
+ MVE_OPT_RATE_CONTROL_MODE_OFF;
|
|
+ param.data.rate_control.target_bitrate = 0;
|
|
+ } else {
|
|
+ param.data.rate_control.rate_control_mode =
|
|
+ msg->set_option.rate_control.rate_control_mode;
|
|
+ param.data.rate_control.target_bitrate =
|
|
+ msg->set_option.rate_control.target_bitrate;
|
|
+ if (msg->set_option.rate_control.rate_control_mode == MVX_OPT_RATE_CONTROL_MODE_C_VARIABLE) {
|
|
+ param.data.rate_control.maximum_bitrate =
|
|
+ msg->set_option.rate_control.maximum_bitrate;
|
|
+ }
|
|
+ }
|
|
+ ret = put_fw_buf_param(fw, ¶m,
|
|
+ sizeof(param.data.rate_control));
|
|
+ break;
|
|
+
|
|
+ }
|
|
+ case MVX_FW_SET_CROP_LEFT: {
|
|
+ struct mve_request_set_option opt;
|
|
+
|
|
+ opt.index = MVE_SET_OPT_INDEX_ENC_CROP_RARAM_LEFT;
|
|
+ opt.data.arg = msg->set_option.crop_left;
|
|
+
|
|
+ ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg));
|
|
+ break;
|
|
+ }
|
|
+ case MVX_FW_SET_CROP_RIGHT: {
|
|
+ struct mve_request_set_option opt;
|
|
+
|
|
+ opt.index = MVE_SET_OPT_INDEX_ENC_CROP_RARAM_RIGHT;
|
|
+ opt.data.arg = msg->set_option.crop_right;
|
|
+
|
|
+ ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg));
|
|
+ break;
|
|
+ }
|
|
+ case MVX_FW_SET_CROP_TOP: {
|
|
+ struct mve_request_set_option opt;
|
|
+
|
|
+ opt.index = MVE_SET_OPT_INDEX_ENC_CROP_RARAM_TOP;
|
|
+ opt.data.arg = msg->set_option.crop_top;
|
|
+
|
|
+ ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg));
|
|
+
|
|
+ break;
|
|
+ }
|
|
+ case MVX_FW_SET_CROP_BOTTOM: {
|
|
+ struct mve_request_set_option opt;
|
|
+
|
|
+ opt.index = MVE_SET_OPT_INDEX_ENC_CROP_RARAM_BOTTOM;
|
|
+ opt.data.arg = msg->set_option.crop_bottom;
|
|
+
|
|
+ ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg));
|
|
+
|
|
+ break;
|
|
+ }
|
|
+ case MVX_FW_SET_INDEX_PROFILING: {
|
|
+ struct mve_request_set_option opt;
|
|
+
|
|
+ opt.index = MVE_SET_OPT_INDEX_PROFILING;
|
|
+ opt.data.arg = msg->set_option.index_profiling;
|
|
+
|
|
+ ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg));
|
|
+
|
|
+ break;
|
|
+ }
|
|
+ case MVX_FW_SET_HRD_BUF_SIZE: {
|
|
+ struct mve_buffer_param param;
|
|
+
|
|
+ param.type = MVE_BUFFER_PARAM_TYPE_RATE_CONTROL_HRD_BUF_SIZE;
|
|
+ param.data.arg = msg->set_option.nHRDBufsize;
|
|
+ ret = put_fw_buf_param(fw, ¶m,
|
|
+ sizeof(param.data.arg));
|
|
+ break;
|
|
+ }
|
|
+ case MVX_FW_SET_COLOUR_DESC: {
|
|
+ struct mve_buffer_param param;
|
|
+
|
|
+ param.type = MVE_BUFFER_PARAM_TYPE_COLOUR_DESCRIPTION;
|
|
+ param.data.colour_description.flags = msg->set_option.colour_desc.flags;
|
|
+ switch (msg->set_option.colour_desc.range)
|
|
+ {
|
|
+ case MVX_FW_RANGE_UNSPECIFIED:
|
|
+ param.data.colour_description.range = MVE_BUFFER_PARAM_COLOUR_RANGE_UNSPECIFIED;
|
|
+ break;
|
|
+ case MVX_FW_RANGE_LIMITED:
|
|
+ param.data.colour_description.range = MVE_BUFFER_PARAM_COLOUR_RANGE_LIMITED;
|
|
+ break;
|
|
+ case MVX_FW_RANGE_FULL:
|
|
+ param.data.colour_description.range = MVE_BUFFER_PARAM_COLOUR_RANGE_FULL;
|
|
+ break;
|
|
+ default:
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING,
|
|
+ "Unknown fw buffer param color desc range. range=%u.",
|
|
+ msg->set_option.colour_desc.range);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ /* Color primaries according to HEVC E.3.1. */
|
|
+ switch (msg->set_option.colour_desc.primaries) {
|
|
+ case MVX_FW_PRIMARIES_BT709:
|
|
+ param.data.colour_description.colour_primaries = 1;
|
|
+ break;
|
|
+ case MVX_FW_PRIMARIES_BT470M:
|
|
+ param.data.colour_description.colour_primaries = 4;
|
|
+ break;
|
|
+ case MVX_FW_PRIMARIES_BT601_625:
|
|
+ param.data.colour_description.colour_primaries = 5;
|
|
+ break;
|
|
+ case MVX_FW_PRIMARIES_BT601_525:
|
|
+ param.data.colour_description.colour_primaries = 6;
|
|
+ break;
|
|
+ case MVX_FW_PRIMARIES_GENERIC_FILM:
|
|
+ param.data.colour_description.colour_primaries = 8;
|
|
+ break;
|
|
+ case MVX_FW_PRIMARIES_BT2020:
|
|
+ param.data.colour_description.colour_primaries = 9;
|
|
+ break;
|
|
+ default:
|
|
+ param.data.colour_description.colour_primaries = 2;
|
|
+ break;
|
|
+ }
|
|
+ /* Transfer characteristics according to HEVC E.3.1. */
|
|
+ switch (msg->set_option.colour_desc.transfer) {
|
|
+ case MVX_FW_TRANSFER_BT1361:
|
|
+ param.data.colour_description.transfer_characteristics = 12;
|
|
+ break;
|
|
+ case MVX_FW_TRANSFER_GAMMA22:
|
|
+ param.data.colour_description.transfer_characteristics = 4;
|
|
+ break;
|
|
+ case MVX_FW_TRANSFER_GAMMA28:
|
|
+ param.data.colour_description.transfer_characteristics = 5;
|
|
+ break;
|
|
+ case MVX_FW_TRANSFER_SMPTE170M:
|
|
+ param.data.colour_description.transfer_characteristics = 6;
|
|
+ break;
|
|
+ case MVX_FW_TRANSFER_SMPTE240M:
|
|
+ param.data.colour_description.transfer_characteristics = 7;
|
|
+ break;
|
|
+ case MVX_FW_TRANSFER_LINEAR:
|
|
+ param.data.colour_description.transfer_characteristics = 8;
|
|
+ break;
|
|
+ case MVX_FW_TRANSFER_HLG:
|
|
+ param.data.colour_description.transfer_characteristics = 18;
|
|
+ break;
|
|
+ case MVX_FW_TRANSFER_XVYCC:
|
|
+ param.data.colour_description.transfer_characteristics = 11;
|
|
+ break;
|
|
+ case MVX_FW_TRANSFER_SRGB:
|
|
+ param.data.colour_description.transfer_characteristics = 13;
|
|
+ break;
|
|
+ case MVX_FW_TRANSFER_ST2084:
|
|
+ param.data.colour_description.transfer_characteristics = 16;
|
|
+ break;
|
|
+ case MVX_FW_TRANSFER_ST428:
|
|
+ param.data.colour_description.transfer_characteristics = 17;
|
|
+ break;
|
|
+ default:
|
|
+ param.data.colour_description.transfer_characteristics = 2;
|
|
+ break;
|
|
+ }
|
|
+ /* Matrix coefficient according to HEVC E.3.1. */
|
|
+ switch (msg->set_option.colour_desc.matrix) {
|
|
+ case MVX_FW_MATRIX_BT709:
|
|
+ param.data.colour_description.matrix_coeff = 1;
|
|
+ break;
|
|
+ case MVX_FW_MATRIX_BT470M:
|
|
+ param.data.colour_description.matrix_coeff = 4;
|
|
+ break;
|
|
+ case MVX_FW_MATRIX_BT601:
|
|
+ param.data.colour_description.matrix_coeff = 6;
|
|
+ break;
|
|
+ case MVX_FW_MATRIX_SMPTE240M:
|
|
+ param.data.colour_description.matrix_coeff = 7;
|
|
+ break;
|
|
+ case MVX_FW_MATRIX_BT2020:
|
|
+ param.data.colour_description.matrix_coeff = 9;
|
|
+ break;
|
|
+ case MVX_FW_MATRIX_BT2020Constant:
|
|
+ param.data.colour_description.matrix_coeff = 10;
|
|
+ break;
|
|
+ default:
|
|
+ param.data.colour_description.matrix_coeff = 2;
|
|
+ break;
|
|
+ }
|
|
+ param.data.colour_description.sar_height = msg->set_option.colour_desc.sar_height;
|
|
+ param.data.colour_description.sar_width = msg->set_option.colour_desc.sar_width;
|
|
+ if (msg->set_option.colour_desc.aspect_ratio_idc != 0) {
|
|
+ param.data.colour_description.aspect_ratio_idc = msg->set_option.colour_desc.aspect_ratio_idc;
|
|
+ param.data.colour_description.aspect_ratio_info_present_flag = 1;
|
|
+ }
|
|
+ if (msg->set_option.colour_desc.video_format != 0) {
|
|
+ param.data.colour_description.video_format = msg->set_option.colour_desc.video_format;
|
|
+ param.data.colour_description.video_format_present_flag = 1;
|
|
+ }
|
|
+ if (msg->set_option.colour_desc.time_scale != 0 || msg->set_option.colour_desc.num_units_in_tick != 0) {
|
|
+ param.data.colour_description.time_scale = msg->set_option.colour_desc.time_scale;
|
|
+ param.data.colour_description.num_units_in_tick = msg->set_option.colour_desc.num_units_in_tick;
|
|
+ param.data.colour_description.timing_flag_info_present_flag = 1;
|
|
+ }
|
|
+ if (msg->set_option.colour_desc.flags & MVX_FW_COLOR_DESC_CONTENT_VALID) {
|
|
+ param.data.colour_description.avg_content_light_level =
|
|
+ msg->set_option.colour_desc.content.luminance_average;
|
|
+ param.data.colour_description.max_content_light_level =
|
|
+ msg->set_option.colour_desc.content.luminance_max;
|
|
+ }
|
|
+ if (msg->set_option.colour_desc.flags & MVX_FW_COLOR_DESC_DISPLAY_VALID) {
|
|
+ param.data.colour_description.mastering_display_primaries_x[0] =
|
|
+ msg->set_option.colour_desc.display.r.x;
|
|
+ param.data.colour_description.mastering_display_primaries_x[1] =
|
|
+ msg->set_option.colour_desc.display.g.x;
|
|
+ param.data.colour_description.mastering_display_primaries_x[2] =
|
|
+ msg->set_option.colour_desc.display.b.x;
|
|
+ param.data.colour_description.mastering_display_primaries_y[0] =
|
|
+ msg->set_option.colour_desc.display.r.y;
|
|
+ param.data.colour_description.mastering_display_primaries_y[1] =
|
|
+ msg->set_option.colour_desc.display.g.y;
|
|
+ param.data.colour_description.mastering_display_primaries_y[2] =
|
|
+ msg->set_option.colour_desc.display.b.y;
|
|
+ param.data.colour_description.mastering_white_point_x =
|
|
+ msg->set_option.colour_desc.display.w.x;
|
|
+ param.data.colour_description.mastering_white_point_y =
|
|
+ msg->set_option.colour_desc.display.w.y;
|
|
+ param.data.colour_description.max_display_mastering_luminance =
|
|
+ msg->set_option.colour_desc.display.luminance_min;
|
|
+ param.data.colour_description.min_display_mastering_luminance =
|
|
+ msg->set_option.colour_desc.display.luminance_max;
|
|
+ }
|
|
+
|
|
+ ret = put_fw_buf_param(fw, ¶m,
|
|
+ sizeof(param.data.colour_description));
|
|
+ break;
|
|
+ }
|
|
+ case MVX_FW_SET_SEI_USERDATA: {
|
|
+ struct mve_buffer_param param;
|
|
+ param.type = MVE_BUFFER_PARAM_TYPE_SEI_USER_DATA_UNREGISTERED;
|
|
+ param.data.user_data_unregistered.user_data_len = msg->set_option.userdata.user_data_len;
|
|
+ param.data.user_data_unregistered.flags = msg->set_option.userdata.flags;
|
|
+ memcpy(¶m.data.user_data_unregistered.uuid, &msg->set_option.userdata.uuid,
|
|
+ sizeof(param.data.user_data_unregistered.uuid));
|
|
+ memcpy(¶m.data.user_data_unregistered.user_data, &msg->set_option.userdata.user_data,
|
|
+ sizeof(param.data.user_data_unregistered.user_data));
|
|
+ ret = put_fw_buf_param(fw, ¶m,
|
|
+ sizeof(param.data.user_data_unregistered));
|
|
+ break;
|
|
+ }
|
|
+ case MVX_FW_SET_NALU_FORMAT: {
|
|
+ struct mve_request_set_option opt;
|
|
+
|
|
+ opt.index = MVE_SET_OPT_INDEX_NALU_FORMAT;
|
|
+ ret = to_mve_nalu_format(msg->set_option.nalu_format,
|
|
+ &opt.data.arg);
|
|
+
|
|
+ if (ret == 0)
|
|
+ ret = put_fw_opt(fw, &opt,
|
|
+ sizeof(opt.data.arg));
|
|
+
|
|
+ break;
|
|
+ }
|
|
+ case MVX_FW_SET_STREAM_ESCAPING: {
|
|
+ struct mve_request_set_option opt;
|
|
+
|
|
+ opt.index = MVE_SET_OPT_INDEX_STREAM_ESCAPING;
|
|
+ opt.data.arg = msg->set_option.stream_escaping ? 1 : 0;
|
|
+
|
|
+ ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg));
|
|
+ break;
|
|
+ }
|
|
+ case MVX_FW_SET_PROFILE_LEVEL: {
|
|
+ struct mve_request_set_option opt;
|
|
+
|
|
+ opt.index = MVE_SET_OPT_INDEX_PROFILE_LEVEL;
|
|
+ ret = fw->ops_priv.to_mve_profile(
|
|
+ msg->set_option.profile_level.profile,
|
|
+ &opt.data.profile_level.profile);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+
|
|
+ ret = fw->ops_priv.to_mve_level(
|
|
+ msg->set_option.profile_level.level,
|
|
+ &opt.data.profile_level.level);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+
|
|
+ ret = put_fw_opt(
|
|
+ fw, &opt,
|
|
+ sizeof(opt.data.profile_level));
|
|
+
|
|
+ break;
|
|
+ }
|
|
+ case MVX_FW_SET_IGNORE_STREAM_HEADERS: {
|
|
+ struct mve_request_set_option opt;
|
|
+
|
|
+ opt.index = MVE_SET_OPT_INDEX_IGNORE_STREAM_HEADERS;
|
|
+ opt.data.arg =
|
|
+ msg->set_option.ignore_stream_headers ? 1 : 0;
|
|
+ ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg));
|
|
+ break;
|
|
+ }
|
|
+ case MVX_FW_SET_FRAME_REORDERING: {
|
|
+ struct mve_request_set_option opt;
|
|
+
|
|
+ opt.index = MVE_SET_OPT_INDEX_FRAME_REORDERING;
|
|
+ opt.data.arg = msg->set_option.frame_reordering ? 1 : 0;
|
|
+ ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg));
|
|
+ break;
|
|
+ }
|
|
+ case MVX_FW_SET_INTBUF_SIZE: {
|
|
+ struct mve_request_set_option opt;
|
|
+
|
|
+ opt.index = MVE_SET_OPT_INDEX_INTBUF_SIZE;
|
|
+ opt.data.arg = msg->set_option.intbuf_size;
|
|
+ ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg));
|
|
+ break;
|
|
+ }
|
|
+ case MVX_FW_SET_P_FRAMES: {
|
|
+ struct mve_request_set_option opt;
|
|
+
|
|
+ opt.index = MVE_SET_OPT_INDEX_ENC_P_FRAMES;
|
|
+ opt.data.arg = msg->set_option.pb_frames;
|
|
+ ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg));
|
|
+ break;
|
|
+ }
|
|
+ case MVX_FW_SET_B_FRAMES: {
|
|
+ struct mve_request_set_option opt;
|
|
+
|
|
+ opt.index = MVE_SET_OPT_INDEX_ENC_B_FRAMES;
|
|
+ opt.data.arg = msg->set_option.pb_frames;
|
|
+ ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg));
|
|
+ break;
|
|
+ }
|
|
+ case MVX_FW_SET_GOP_TYPE: {
|
|
+ struct mve_request_set_option opt;
|
|
+
|
|
+ opt.index = MVE_SET_OPT_INDEX_GOP_TYPE;
|
|
+ ret = to_mve_gop_type(msg->set_option.gop_type,
|
|
+ &opt.data.arg);
|
|
+ if (ret == 0)
|
|
+ ret = put_fw_opt(fw, &opt,
|
|
+ sizeof(opt.data.arg));
|
|
+
|
|
+ break;
|
|
+ }
|
|
+ case MVX_FW_SET_INTRA_MB_REFRESH: {
|
|
+ struct mve_request_set_option opt;
|
|
+
|
|
+ opt.index = MVE_SET_OPT_INDEX_INTRA_MB_REFRESH;
|
|
+ opt.data.arg = msg->set_option.intra_mb_refresh;
|
|
+ ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg));
|
|
+ break;
|
|
+ }
|
|
+ case MVX_FW_SET_CONSTR_IPRED: {
|
|
+ struct mve_request_set_option opt;
|
|
+
|
|
+ opt.index = MVE_SET_OPT_INDEX_ENC_CONSTR_IPRED;
|
|
+ opt.data.arg = msg->set_option.constr_ipred ? 1 : 0;
|
|
+ ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg));
|
|
+ break;
|
|
+ }
|
|
+ case MVX_FW_SET_ENTROPY_SYNC: {
|
|
+ struct mve_request_set_option opt;
|
|
+
|
|
+ opt.index = MVE_SET_OPT_INDEX_ENC_ENTROPY_SYNC;
|
|
+ opt.data.arg = msg->set_option.entropy_sync ? 1 : 0;
|
|
+ ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg));
|
|
+ break;
|
|
+ }
|
|
+ case MVX_FW_SET_TEMPORAL_MVP: {
|
|
+ struct mve_request_set_option opt;
|
|
+
|
|
+ opt.index = MVE_SET_OPT_INDEX_ENC_TEMPORAL_MVP;
|
|
+ opt.data.arg = msg->set_option.temporal_mvp ? 1 : 0;
|
|
+ ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg));
|
|
+ break;
|
|
+ }
|
|
+ case MVX_FW_SET_TILES: {
|
|
+ struct mve_request_set_option opt;
|
|
+
|
|
+ opt.index = MVE_SET_OPT_INDEX_TILES;
|
|
+ opt.data.tiles.tile_rows = msg->set_option.tile.rows;
|
|
+ opt.data.tiles.tile_cols = msg->set_option.tile.cols;
|
|
+ ret = put_fw_opt(fw, &opt, sizeof(opt.data.tiles));
|
|
+ break;
|
|
+ }
|
|
+ case MVX_FW_SET_MIN_LUMA_CB_SIZE: {
|
|
+ struct mve_request_set_option opt;
|
|
+
|
|
+ opt.index = MVE_SET_OPT_INDEX_ENC_MIN_LUMA_CB_SIZE;
|
|
+ opt.data.arg = msg->set_option.min_luma_cb_size;
|
|
+ ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg));
|
|
+ break;
|
|
+ }
|
|
+ case MVX_FW_SET_QP_RANGE: {
|
|
+ struct mve_buffer_param param;
|
|
+
|
|
+ param.type =
|
|
+ MVE_BUFFER_PARAM_TYPE_RATE_CONTROL_QP_RANGE;
|
|
+ param.data.rate_control_qp_range.qp_min =
|
|
+ msg->set_option.qp_range.min;
|
|
+ param.data.rate_control_qp_range.qp_max =
|
|
+ msg->set_option.qp_range.max;
|
|
+ ret = put_fw_buf_param(
|
|
+ fw, ¶m,
|
|
+ sizeof(param.data.rate_control_qp_range));
|
|
+ break;
|
|
+ }
|
|
+ case MVX_FW_SET_ENTROPY_MODE: {
|
|
+ struct mve_request_set_option opt;
|
|
+
|
|
+ opt.index = MVE_SET_OPT_INDEX_ENC_H264_CABAC;
|
|
+ ret = to_mve_h264_cabac(msg->set_option.entropy_mode,
|
|
+ &opt.data.arg);
|
|
+ if (ret == 0)
|
|
+ ret = put_fw_opt(fw, &opt,
|
|
+ sizeof(opt.data.arg));
|
|
+
|
|
+ break;
|
|
+ }
|
|
+ case MVX_FW_SET_SLICE_SPACING_MB: {
|
|
+ struct mve_request_set_option opt;
|
|
+
|
|
+ opt.index = MVE_SET_OPT_INDEX_ENC_SLICE_SPACING;
|
|
+ opt.data.arg = msg->set_option.slice_spacing_mb;
|
|
+ ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg));
|
|
+ break;
|
|
+ }
|
|
+ case MVX_FW_SET_VP9_PROB_UPDATE: {
|
|
+ struct mve_request_set_option opt;
|
|
+
|
|
+ opt.index = MVE_SET_OPT_INDEX_ENC_VP9_PROB_UPDATE;
|
|
+ ret = to_mve_vp9_prob_update(
|
|
+ msg->set_option.vp9_prob_update,
|
|
+ &opt.data.arg);
|
|
+ if (ret == 0)
|
|
+ ret = put_fw_opt(fw, &opt,
|
|
+ sizeof(opt.data.arg));
|
|
+
|
|
+ break;
|
|
+ }
|
|
+ case MVX_FW_SET_MV_SEARCH_RANGE: {
|
|
+ struct mve_request_set_option opt;
|
|
+
|
|
+ opt.index = MVE_SET_OPT_INDEX_MV_SEARCH_RANGE;
|
|
+ opt.data.motion_vector_search_range.mv_search_range_x =
|
|
+ msg->set_option.mv.x;
|
|
+ opt.data.motion_vector_search_range.mv_search_range_y =
|
|
+ msg->set_option.mv.y;
|
|
+ ret = put_fw_opt(
|
|
+ fw, &opt,
|
|
+ sizeof(opt.data.motion_vector_search_range));
|
|
+ break;
|
|
+ }
|
|
+ case MVX_FW_SET_BITDEPTH: {
|
|
+ struct mve_request_set_option opt;
|
|
+
|
|
+ opt.index = MVE_SET_OPT_INDEX_ENC_STREAM_BITDEPTH;
|
|
+ opt.data.bitdepth.luma_bitdepth =
|
|
+ msg->set_option.bitdepth.luma;
|
|
+ opt.data.bitdepth.chroma_bitdepth =
|
|
+ msg->set_option.bitdepth.chroma;
|
|
+ ret = put_fw_opt(fw, &opt, sizeof(opt.data.bitdepth));
|
|
+ break;
|
|
+ }
|
|
+ case MVX_FW_SET_CHROMA_FORMAT: {
|
|
+ struct mve_request_set_option opt;
|
|
+
|
|
+ opt.index = MVE_SET_OPT_INDEX_ENC_STREAM_CHROMA_FORMAT;
|
|
+ opt.data.arg = msg->set_option.chroma_format;
|
|
+ ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg));
|
|
+ break;
|
|
+ }
|
|
+ case MVX_FW_SET_RGB_TO_YUV_MODE: {
|
|
+ struct mve_request_set_option opt;
|
|
+
|
|
+ opt.index = MVE_SET_OPT_INDEX_ENC_RGB_TO_YUV_MODE;
|
|
+ ret = to_mve_rgb_to_yuv(msg->set_option.rgb_to_yuv_mode,
|
|
+ &opt.data.arg);
|
|
+ if (ret == 0)
|
|
+ ret = put_fw_opt(fw, &opt,
|
|
+ sizeof(opt.data.arg));
|
|
+
|
|
+ break;
|
|
+ }
|
|
+ case MVX_FW_SET_BAND_LIMIT: {
|
|
+ struct mve_request_set_option opt;
|
|
+
|
|
+ opt.index = MVE_SET_OPT_INDEX_ENC_BANDWIDTH_LIMIT;
|
|
+ opt.data.arg = msg->set_option.band_limit;
|
|
+ ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg));
|
|
+ break;
|
|
+ }
|
|
+ case MVX_FW_SET_CABAC_INIT_IDC: {
|
|
+ struct mve_request_set_option opt;
|
|
+
|
|
+ opt.index = MVE_SET_OPT_INDEX_ENC_CABAC_INIT_IDC;
|
|
+ opt.data.arg = msg->set_option.cabac_init_idc;
|
|
+ ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg));
|
|
+ break;
|
|
+ }
|
|
+ case MVX_FW_SET_QP_I: {
|
|
+ struct mve_buffer_param param;
|
|
+
|
|
+ param.type = MVE_BUFFER_PARAM_TYPE_QP_I;
|
|
+ param.data.qp.qp = msg->set_option.qp;
|
|
+ ret = put_fw_buf_param(fw, ¶m,
|
|
+ sizeof(param.data.qp));
|
|
+ break;
|
|
+ }
|
|
+ case MVX_FW_SET_QP_P: {
|
|
+ struct mve_buffer_param param;
|
|
+
|
|
+ param.type = MVE_BUFFER_PARAM_TYPE_QP_P;
|
|
+ param.data.qp.qp = msg->set_option.qp;
|
|
+ ret = put_fw_buf_param(fw, ¶m,
|
|
+ sizeof(param.data.qp));
|
|
+ break;
|
|
+ }
|
|
+ case MVX_FW_SET_QP_B: {
|
|
+ struct mve_buffer_param param;
|
|
+
|
|
+ param.type = MVE_BUFFER_PARAM_TYPE_QP_B;
|
|
+ param.data.qp.qp = msg->set_option.qp;
|
|
+ ret = put_fw_buf_param(fw, ¶m,
|
|
+ sizeof(param.data.qp));
|
|
+ break;
|
|
+ }
|
|
+ case MVX_FW_SET_GOP_RESET:{
|
|
+ struct mve_buffer_param param;
|
|
+ param.type = MVE_BUFFER_PARAM_TYPE_GOP_RESET;
|
|
+ ret = put_fw_buf_param(fw, ¶m, sizeof(param.data.arg));
|
|
+ break;
|
|
+ }
|
|
+ case MVX_FW_SET_RESYNC_INTERVAL: {
|
|
+ struct mve_request_set_option opt;
|
|
+
|
|
+ opt.index = MVE_SET_OPT_INDEX_RESYNC_INTERVAL;
|
|
+ opt.data.arg = msg->set_option.resync_interval;
|
|
+ ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg));
|
|
+ break;
|
|
+ }
|
|
+ case MVX_FW_SET_QUANT_TABLE: {
|
|
+ struct mve_request_set_option opt;
|
|
+
|
|
+ opt.index = MVE_SET_OPT_INDEX_QUANT_TABLE;
|
|
+
|
|
+ opt.data.quant_table.type = MVE_OPT_QUANT_TABLE_LUMA;
|
|
+ memcpy(opt.data.quant_table.matrix,
|
|
+ msg->set_option.quant_tbl.luma,
|
|
+ sizeof(opt.data.quant_table.matrix));
|
|
+ ret = put_fw_opt(fw, &opt,
|
|
+ sizeof(opt.data.quant_table));
|
|
+ if (ret != 0)
|
|
+ break;
|
|
+
|
|
+ opt.data.quant_table.type = MVE_OPT_QUANT_TABLE_CHROMA;
|
|
+ memcpy(opt.data.quant_table.matrix,
|
|
+ msg->set_option.quant_tbl.chroma,
|
|
+ sizeof(opt.data.quant_table.matrix));
|
|
+ ret = put_fw_opt(fw, &opt,
|
|
+ sizeof(opt.data.quant_table));
|
|
+ break;
|
|
+ }
|
|
+ case MVX_FW_SET_WATCHDOG_TIMEOUT: {
|
|
+ struct mve_request_set_option opt;
|
|
+
|
|
+ opt.index = MVE_SET_OPT_INDEX_WATCHDOG_TIMEOUT;
|
|
+ opt.data.arg = msg->set_option.watchdog_timeout;
|
|
+
|
|
+ ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg));
|
|
+ break;
|
|
+ }
|
|
+ case MVX_FW_SET_ROI_REGIONS: {
|
|
+ struct mve_buffer_param param;
|
|
+ int i = 0;
|
|
+ param.type = MVE_BUFFER_PARAM_TYPE_REGIONS;
|
|
+ param.data.regions.n_regions = msg->set_option.roi_config.num_roi;
|
|
+ for (;i < msg->set_option.roi_config.num_roi; i++) {
|
|
+ param.data.regions.region[i].mbx_left = msg->set_option.roi_config.roi[i].mbx_left;
|
|
+ param.data.regions.region[i].mbx_right = msg->set_option.roi_config.roi[i].mbx_right;
|
|
+ param.data.regions.region[i].mby_top = msg->set_option.roi_config.roi[i].mby_top;
|
|
+ param.data.regions.region[i].mby_bottom = msg->set_option.roi_config.roi[i].mby_bottom;
|
|
+ param.data.regions.region[i].qp_delta = msg->set_option.roi_config.roi[i].qp_delta;
|
|
+ }
|
|
+ ret = put_fw_buf_param(fw, ¶m,
|
|
+ sizeof(param.data.regions));
|
|
+ break;
|
|
+ }
|
|
+ case MVX_FW_SET_QP_REGION: {
|
|
+ struct mve_buffer_param param;
|
|
+
|
|
+ param.type = MVE_BUFFER_PARAM_TYPE_QP;
|
|
+ param.data.qp.qp = msg->set_option.qp;
|
|
+ ret = put_fw_buf_param(fw, ¶m,
|
|
+ sizeof(param.data.qp));
|
|
+ break;
|
|
+ }
|
|
+ case MVX_FW_SET_DSL_FRAME: {
|
|
+ struct mve_request_set_option opt;
|
|
+ opt.index = MVE_SET_OPT_INDEX_DEC_DOWNSCALE;
|
|
+ opt.data.downscaled_frame.width = msg->set_option.dsl_frame.width;
|
|
+ opt.data.downscaled_frame.height = msg->set_option.dsl_frame.height;
|
|
+ ret = put_fw_opt(fw, &opt, sizeof(opt.index) + sizeof(opt.data.downscaled_frame));
|
|
+ break;
|
|
+ }
|
|
+ case MVX_FW_SET_LONG_TERM_REF: {
|
|
+ struct mve_request_set_option opt;
|
|
+ if (msg->set_option.ltr.mode >= 1 && msg->set_option.ltr.mode <= 8) {
|
|
+ opt.index = MVE_SET_OPT_INDEX_ENC_LTR_MODE;
|
|
+ opt.data.arg = msg->set_option.ltr.mode;
|
|
+ ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg));
|
|
+ }
|
|
+ if (msg->set_option.ltr.period >= 2 && msg->set_option.ltr.period <= 254) {
|
|
+ opt.index = MVE_SET_OPT_INDEX_ENC_LTR_PERIOD;
|
|
+ opt.data.arg = msg->set_option.ltr.period;
|
|
+ ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg));
|
|
+ }
|
|
+ break;
|
|
+ }
|
|
+ case MVX_FW_SET_DSL_MODE: {
|
|
+ struct mve_request_set_option opt;
|
|
+ opt.index = MVE_SET_OPT_INDEX_DEC_DOWNSCALE_POS_MODE;
|
|
+ opt.data.dsl_pos.mode = msg->set_option.dsl_pos_mode;
|
|
+ ret = put_fw_opt(fw, &opt, sizeof(opt.index) + sizeof(opt.data.dsl_pos));
|
|
+ break;
|
|
+ }
|
|
+ default:
|
|
+ ret = -EINVAL;
|
|
+ }
|
|
+
|
|
+ break;
|
|
+ }
|
|
+ case MVX_FW_CODE_FLUSH: {
|
|
+ switch (msg->flush.dir) {
|
|
+ case MVX_DIR_INPUT:
|
|
+ ret = write_message(fw, fw->msg_host, fw->msg_mve,
|
|
+ MVE_REQUEST_CODE_INPUT_FLUSH, NULL,
|
|
+ 0, MVX_LOG_FWIF_CHANNEL_MESSAGE, fw->buf_attr[MVX_FW_REGION_MSG_MVE], fw->buf_attr[MVX_FW_REGION_MSG_HOST]);
|
|
+ break;
|
|
+ case MVX_DIR_OUTPUT:
|
|
+ ret = write_message(fw, fw->msg_host, fw->msg_mve,
|
|
+ MVE_REQUEST_CODE_OUTPUT_FLUSH, NULL,
|
|
+ 0, MVX_LOG_FWIF_CHANNEL_MESSAGE, fw->buf_attr[MVX_FW_REGION_MSG_MVE], fw->buf_attr[MVX_FW_REGION_MSG_HOST]);
|
|
+ break;
|
|
+ default:
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING,
|
|
+ "Invalid flush direction. dir=%d.",
|
|
+ msg->flush.dir);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (ret == 0)
|
|
+ fw->msg_pending++;
|
|
+
|
|
+ break;
|
|
+ }
|
|
+ case MVX_FW_CODE_BUFFER: {
|
|
+ struct mve_comm_area_host *host;
|
|
+ struct mve_comm_area_mve *mve;
|
|
+ enum mvx_log_fwif_channel channel;
|
|
+ enum mvx_fw_buffer_attr mve_buf_attr;
|
|
+ enum mvx_fw_buffer_attr host_buf_attr;
|
|
+
|
|
+ if (msg->buf->dir == MVX_DIR_INPUT) {
|
|
+ host = fw->buf_in_host;
|
|
+ mve = fw->buf_in_mve;
|
|
+ mve_buf_attr = fw->buf_attr[MVX_FW_REGION_BUF_IN_MVE];
|
|
+ host_buf_attr = fw->buf_attr[MVX_FW_REGION_BUF_IN_HOST];
|
|
+ channel = MVX_LOG_FWIF_CHANNEL_INPUT_BUFFER;
|
|
+ } else {
|
|
+ host = fw->buf_out_host;
|
|
+ mve = fw->buf_out_mve;
|
|
+ mve_buf_attr = fw->buf_attr[MVX_FW_REGION_BUF_OUT_MVE];
|
|
+ host_buf_attr = fw->buf_attr[MVX_FW_REGION_BUF_OUT_HOST];
|
|
+ channel = MVX_LOG_FWIF_CHANNEL_OUTPUT_BUFFER;
|
|
+ }
|
|
+
|
|
+ if (mvx_is_frame(msg->buf->format))
|
|
+ if ((msg->buf->flags & MVX_BUFFER_FRAME_FLAG_GENERAL) == MVX_BUFFER_FRAME_FLAG_GENERAL) {
|
|
+ ret = put_buffer_general(fw, host, mve, msg, channel, mve_buf_attr, host_buf_attr);
|
|
+ } else {
|
|
+ ret = put_buffer_frame(fw, host, mve, msg, channel, mve_buf_attr, host_buf_attr);
|
|
+ }
|
|
+ else
|
|
+ ret = put_buffer_bitstream(fw, host, mve, msg, channel, mve_buf_attr, host_buf_attr);
|
|
+
|
|
+ break;
|
|
+ }
|
|
+ case MVX_FW_CODE_IDLE_ACK: {
|
|
+ if (fw->ops_priv.send_idle_ack != NULL)
|
|
+ ret = fw->ops_priv.send_idle_ack(fw);
|
|
+
|
|
+ break;
|
|
+ }
|
|
+ case MVX_FW_CODE_EOS: {
|
|
+ struct mve_comm_area_host *host;
|
|
+ struct mve_comm_area_mve *mve;
|
|
+ enum mvx_log_fwif_channel channel;
|
|
+
|
|
+ /* The message is on the MVX_DIR_INPUT side. */
|
|
+ host = fw->buf_in_host;
|
|
+ mve = fw->buf_in_mve;
|
|
+ channel = MVX_LOG_FWIF_CHANNEL_INPUT_BUFFER;
|
|
+
|
|
+ if (msg->eos_is_frame != false) {
|
|
+ struct mve_buffer_frame f = {
|
|
+ .host_handle = MVX_FW_CODE_EOS,
|
|
+ .frame_flags = MVE_BUFFER_FRAME_FLAG_EOS,
|
|
+ .format = MVE_FORMAT_YUV420_NV12
|
|
+ };
|
|
+
|
|
+ ret = write_message(fw, host, mve,
|
|
+ MVE_BUFFER_CODE_FRAME,
|
|
+ &f, sizeof(f), channel, fw->buf_attr[MVX_FW_REGION_BUF_IN_MVE], fw->buf_attr[MVX_FW_REGION_BUF_IN_HOST]);
|
|
+ } else {
|
|
+ struct mve_buffer_bitstream b = {
|
|
+ .host_handle = MVX_FW_CODE_EOS,
|
|
+ .bitstream_buf_addr =
|
|
+ MVE_MEM_REGION_PROTECTED_ADDR_BEGIN,
|
|
+ .bitstream_flags =
|
|
+ MVE_BUFFER_BITSTREAM_FLAG_EOS
|
|
+ };
|
|
+
|
|
+ ret = write_message(fw, host, mve,
|
|
+ MVE_BUFFER_CODE_BITSTREAM, &b,
|
|
+ sizeof(b), channel, fw->buf_attr[MVX_FW_REGION_BUF_IN_MVE], fw->buf_attr[MVX_FW_REGION_BUF_IN_HOST]);
|
|
+ }
|
|
+
|
|
+ break;
|
|
+ }
|
|
+ case MVX_FW_CODE_DUMP: {
|
|
+ ret = write_message(fw, fw->msg_host, fw->msg_mve,
|
|
+ MVE_REQUEST_CODE_DUMP, NULL,
|
|
+ 0, MVX_LOG_FWIF_CHANNEL_MESSAGE, fw->buf_attr[MVX_FW_REGION_MSG_MVE], fw->buf_attr[MVX_FW_REGION_MSG_HOST]);
|
|
+ fw->msg_pending++;
|
|
+ break;
|
|
+ }
|
|
+ case MVX_FW_CODE_DEBUG: {
|
|
+ ret = write_message(fw, fw->msg_host, fw->msg_mve,
|
|
+ MVE_REQUEST_CODE_DEBUG, &msg->arg,
|
|
+ sizeof(msg->arg), MVX_LOG_FWIF_CHANNEL_MESSAGE, fw->buf_attr[MVX_FW_REGION_MSG_MVE], fw->buf_attr[MVX_FW_REGION_MSG_HOST]);
|
|
+ fw->msg_pending++;
|
|
+ break;
|
|
+ }
|
|
+ default: {
|
|
+ ret = -EINVAL;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (ret != 0)
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING,
|
|
+ "Firmware put message failed. ret=%d.", ret);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * find_pages() - Find a page allocate in the map.
|
|
+ * @fw: Pointer to firmware object.
|
|
+ * @va: MVE virtual address.
|
|
+ *
|
|
+ * Return: Pointer to pages, NULL if not found.
|
|
+ */
|
|
+static struct mvx_mmu_pages *find_pages(struct mvx_fw *fw,
|
|
+ mvx_mmu_va va)
|
|
+{
|
|
+ struct mvx_mmu_pages *pages;
|
|
+
|
|
+ hash_for_each_possible(fw->rpc_mem, pages, node, va) {
|
|
+ if (pages->va == va)
|
|
+ return pages;
|
|
+ }
|
|
+
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+static void rpc_mem_alloc(struct mvx_fw *fw,
|
|
+ struct mve_rpc_communication_area *rpc_area)
|
|
+{
|
|
+ union mve_rpc_params *p = &rpc_area->params;
|
|
+ enum mvx_fw_region region;
|
|
+ struct mvx_mmu_pages *pages;
|
|
+ size_t npages;
|
|
+ size_t max_pages;
|
|
+ mvx_mmu_va va = 0;
|
|
+ mvx_mmu_va va0,va_next;
|
|
+ mvx_mmu_va end;
|
|
+ int ret;
|
|
+ uint8_t log2_alignment;
|
|
+ uint32_t alignment_pages;
|
|
+ uint32_t alignment_bytes;
|
|
+ uint32_t total_used_pages = 0;
|
|
+
|
|
+ if (IS_ENABLED(CONFIG_DEBUG_FS)) {
|
|
+ ret = mutex_lock_interruptible(&fw->rpcmem_mutex);
|
|
+ if (ret != 0) {
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_ERROR,
|
|
+ "Cannot protect RPC alloc list.");
|
|
+ goto out;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ switch (p->mem_alloc.region) {
|
|
+ case MVE_MEM_REGION_PROTECTED:
|
|
+ region = MVX_FW_REGION_PROTECTED;
|
|
+ total_used_pages = fw->latest_used_region_protected_pages;
|
|
+ break;
|
|
+ case MVE_MEM_REGION_OUTBUF:
|
|
+ region = MVX_FW_REGION_FRAMEBUF;
|
|
+ total_used_pages = fw->latest_used_region_outbuf_pages;
|
|
+ break;
|
|
+ default:
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING,
|
|
+ "Unsupported RPC mem alloc region. region=%u.",
|
|
+ p->mem_alloc.region);
|
|
+ goto unlock_mutex;
|
|
+ }
|
|
+
|
|
+ ret = fw->ops.get_region(region, &va, &end);
|
|
+ if (ret != 0)
|
|
+ goto unlock_mutex;
|
|
+
|
|
+ va0 = va;
|
|
+
|
|
+ npages = DIV_ROUND_UP(p->mem_alloc.size, MVE_PAGE_SIZE);
|
|
+ max_pages = DIV_ROUND_UP(p->mem_alloc.max_size, MVE_PAGE_SIZE);
|
|
+
|
|
+ if (fw->fw_bin->securevideo != false) {
|
|
+ struct dma_buf *dmabuf;
|
|
+
|
|
+ dmabuf = mvx_secure_mem_alloc(fw->fw_bin->secure.secure,
|
|
+ p->mem_alloc.size);
|
|
+ if (IS_ERR(dmabuf))
|
|
+ goto unlock_mutex;
|
|
+
|
|
+ pages = mvx_mmu_alloc_pages_dma_buf(fw->dev, dmabuf, max_pages);
|
|
+ if (IS_ERR(pages)) {
|
|
+ dma_buf_put(dmabuf);
|
|
+ goto unlock_mutex;
|
|
+ }
|
|
+ } else {
|
|
+ pages = mvx_mmu_alloc_pages(fw->dev, npages, max_pages);
|
|
+ if (IS_ERR(pages))
|
|
+ goto unlock_mutex;
|
|
+ }
|
|
+
|
|
+ va += (total_used_pages << MVE_PAGE_SHIFT);
|
|
+ log2_alignment = p->mem_alloc.log2_alignment <= MVE_PAGE_SHIFT ? MVE_PAGE_SHIFT : p->mem_alloc.log2_alignment;
|
|
+ alignment_bytes = 1 << log2_alignment;
|
|
+ alignment_pages = alignment_bytes >> MVE_PAGE_SHIFT;
|
|
+ ret = -EINVAL;
|
|
+ while (va < end) {
|
|
+ va = (va + alignment_bytes - 1) & ~(alignment_bytes - 1);
|
|
+ ret = mvx_mmu_map_pages(fw->mmu, va, pages, MVX_ATTR_SHARED_RW,
|
|
+ MVX_ACCESS_READ_WRITE);
|
|
+ if (ret == 0){
|
|
+ va_next = va + MVE_PAGE_SIZE * pages->capacity;
|
|
+ total_used_pages = (va_next - va0) >> MVE_PAGE_SHIFT;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ //va += 16 * 1024 * 1024; /* 16MB */
|
|
+ va += alignment_bytes;
|
|
+ }
|
|
+
|
|
+ if (ret != 0) {
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING,
|
|
+ "Failed to find memory region for RPC alloc.");
|
|
+ mvx_mmu_free_pages(pages);
|
|
+ va = 0;
|
|
+ goto unlock_mutex;
|
|
+ }
|
|
+
|
|
+ switch (p->mem_alloc.region) {
|
|
+ case MVE_MEM_REGION_PROTECTED:
|
|
+ fw->latest_used_region_protected_pages = total_used_pages;
|
|
+ break;
|
|
+ case MVE_MEM_REGION_OUTBUF:
|
|
+ fw->latest_used_region_outbuf_pages = total_used_pages;
|
|
+ break;
|
|
+ default:
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ hash_add(fw->rpc_mem, &pages->node, pages->va);
|
|
+
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_INFO,
|
|
+ "RPC alloc memory. size=%u, max_size=%u, region=%u, npages=%zu, va=0x%x.",
|
|
+ p->mem_alloc.size, p->mem_alloc.max_size,
|
|
+ p->mem_alloc.region, npages, va);
|
|
+
|
|
+unlock_mutex:
|
|
+ if (IS_ENABLED(CONFIG_DEBUG_FS))
|
|
+ mutex_unlock(&fw->rpcmem_mutex);
|
|
+
|
|
+out:
|
|
+ rpc_area->size = sizeof(uint32_t);
|
|
+ p->data[0] = va;
|
|
+}
|
|
+
|
|
+static void rpc_mem_resize(struct mvx_fw *fw,
|
|
+ struct mve_rpc_communication_area *rpc_area)
|
|
+{
|
|
+ union mve_rpc_params *p = &rpc_area->params;
|
|
+ struct mvx_mmu_pages *pages;
|
|
+ mvx_mmu_va va = 0;
|
|
+ int ret;
|
|
+
|
|
+ if (IS_ENABLED(CONFIG_DEBUG_FS)) {
|
|
+ ret = mutex_lock_interruptible(&fw->rpcmem_mutex);
|
|
+ if (ret != 0) {
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_ERROR,
|
|
+ "Cannot protect RPC alloc list.");
|
|
+ goto out;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ pages = find_pages(fw, p->mem_resize.ve_pointer);
|
|
+ if (pages != 0) {
|
|
+ size_t size;
|
|
+ size_t npages;
|
|
+ int ret;
|
|
+
|
|
+ if (fw->fw_bin->securevideo != false) {
|
|
+ size = mvx_mmu_size_pages(pages);
|
|
+
|
|
+ /* The size of RPC memory is only increased. */
|
|
+ if (size < p->mem_resize.new_size) {
|
|
+ struct dma_buf *dmabuf;
|
|
+
|
|
+ size = p->mem_resize.new_size - size;
|
|
+
|
|
+ /* Allocate a new secure DMA buffer. */
|
|
+ dmabuf = mvx_secure_mem_alloc(
|
|
+ fw->fw_bin->secure.secure, size);
|
|
+ if (IS_ERR(dmabuf))
|
|
+ goto unlock_mutex;
|
|
+
|
|
+ ret = mvx_mmu_pages_append_dma_buf(
|
|
+ pages, dmabuf);
|
|
+ if (ret != 0) {
|
|
+ dma_buf_put(dmabuf);
|
|
+ goto unlock_mutex;
|
|
+ }
|
|
+ }
|
|
+ } else {
|
|
+ /* Resize the allocated pages. */
|
|
+ npages = DIV_ROUND_UP(p->mem_resize.new_size,
|
|
+ MVE_PAGE_SIZE);
|
|
+ ret = mvx_mmu_resize_pages(pages, npages);
|
|
+ if (ret != 0) {
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING,
|
|
+ "Failed to resize RPC mapped pages. ret=%d.",
|
|
+ ret);
|
|
+ goto unlock_mutex;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ va = pages->va;
|
|
+ } else {
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING,
|
|
+ "Could not find pages for RPC resize. va=0x%x.",
|
|
+ p->mem_resize.ve_pointer);
|
|
+ }
|
|
+
|
|
+ fw->client_ops->flush_mmu(fw->csession);
|
|
+
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_INFO,
|
|
+ "RPC resize memory. va=0x%x, new_size=%u.",
|
|
+ p->mem_resize.ve_pointer, p->mem_resize.new_size);
|
|
+
|
|
+unlock_mutex:
|
|
+ if (IS_ENABLED(CONFIG_DEBUG_FS))
|
|
+ mutex_unlock(&fw->rpcmem_mutex);
|
|
+
|
|
+out:
|
|
+ rpc_area->size = sizeof(uint32_t);
|
|
+ p->data[0] = va;
|
|
+}
|
|
+
|
|
+static void rpc_mem_free(struct mvx_fw *fw,
|
|
+ struct mve_rpc_communication_area *rpc_area)
|
|
+{
|
|
+ union mve_rpc_params *p = &rpc_area->params;
|
|
+ struct mvx_mmu_pages *pages;
|
|
+ int ret;
|
|
+
|
|
+ if (IS_ENABLED(CONFIG_DEBUG_FS)) {
|
|
+ ret = mutex_lock_interruptible(&fw->rpcmem_mutex);
|
|
+ if (ret != 0) {
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_ERROR,
|
|
+ "Cannot protect RPC alloc list.");
|
|
+ return;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ pages = find_pages(fw, p->mem_free.ve_pointer);
|
|
+ if (pages != NULL) {
|
|
+ hash_del(&pages->node);
|
|
+ if(MVE_MEM_REGION_PROTECTED_ADDR_BEGIN <= p->mem_free.ve_pointer && p->mem_free.ve_pointer < MVE_MEM_REGION_PROTECTED_ADDR_END){
|
|
+ fw->latest_used_region_protected_pages = fw->latest_used_region_protected_pages > pages->capacity ? fw->latest_used_region_protected_pages - pages->capacity : 0;
|
|
+ } else if(MVE_MEM_REGION_FRAMEBUF_ADDR_BEGIN <= p->mem_free.ve_pointer && p->mem_free.ve_pointer < MVE_MEM_REGION_FRAMEBUF_ADDR_END){
|
|
+ fw->latest_used_region_outbuf_pages = fw->latest_used_region_outbuf_pages > pages->capacity ? fw->latest_used_region_outbuf_pages - pages->capacity : 0;
|
|
+ }
|
|
+ mvx_mmu_free_pages(pages);
|
|
+ } else {
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING,
|
|
+ "Could not find pages for RPC free. va=0x%x.",
|
|
+ p->mem_free.ve_pointer);
|
|
+ }
|
|
+
|
|
+ fw->client_ops->flush_mmu(fw->csession);
|
|
+
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_INFO,
|
|
+ "RPC free memory. va=0x%x.", p->mem_free.ve_pointer);
|
|
+
|
|
+ rpc_area->size = 0;
|
|
+ if (IS_ENABLED(CONFIG_DEBUG_FS))
|
|
+ mutex_unlock(&fw->rpcmem_mutex);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * rstrip() - Remove trailing chars from string.
|
|
+ * @s: String to be stripped.
|
|
+ * @t: String containing chars to be stripped.
|
|
+ *
|
|
+ * Return: Pointer to stripped string.
|
|
+ */
|
|
+static char *rstrip(char *str,
|
|
+ char *trim)
|
|
+{
|
|
+ size_t l = strlen(str);
|
|
+
|
|
+ while (l-- > 0) {
|
|
+ char *t;
|
|
+
|
|
+ for (t = trim; *t != '\0'; t++)
|
|
+ if (str[l] == *t) {
|
|
+ str[l] = '\0';
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ if (*t == '\0')
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ return str;
|
|
+}
|
|
+
|
|
+static int handle_rpc_v2(struct mvx_fw *fw)
|
|
+{
|
|
+ struct mve_rpc_communication_area *rpc_area = fw->rpc;
|
|
+ int ret = 0;
|
|
+
|
|
+ if (fw->buf_attr[MVX_FW_REGION_RPC] == MVX_FW_BUF_CACHEABLE) {
|
|
+ dma_sync_single_for_cpu(fw->dev,
|
|
+ phys_cpu2vpu((virt_to_phys(rpc_area))), sizeof(*rpc_area),
|
|
+ DMA_FROM_DEVICE);
|
|
+ }
|
|
+
|
|
+ if (rpc_area->state == MVE_RPC_STATE_PARAM) {
|
|
+ ret = 1;
|
|
+
|
|
+ /* Log RPC request. */
|
|
+ MVX_LOG_EXECUTE(&mvx_log_fwif_if, MVX_LOG_INFO,
|
|
+ log_rpc(fw->session,
|
|
+ MVX_LOG_FWIF_DIRECTION_FIRMWARE_TO_HOST,
|
|
+ rpc_area));
|
|
+
|
|
+ switch (rpc_area->call_id) {
|
|
+ case MVE_RPC_FUNCTION_DEBUG_PRINTF: {
|
|
+ MVX_LOG_PRINT(
|
|
+ &mvx_log_if, MVX_LOG_INFO,
|
|
+ "RPC_PRINT=%s",
|
|
+ rstrip(rpc_area->params.debug_print.string,
|
|
+ "\n\r"));
|
|
+ break;
|
|
+ }
|
|
+ case MVE_RPC_FUNCTION_MEM_ALLOC: {
|
|
+ rpc_mem_alloc(fw, rpc_area);
|
|
+ break;
|
|
+ }
|
|
+ case MVE_RPC_FUNCTION_MEM_RESIZE: {
|
|
+ rpc_mem_resize(fw, rpc_area);
|
|
+ break;
|
|
+ }
|
|
+ case MVE_RPC_FUNCTION_MEM_FREE: {
|
|
+ rpc_mem_free(fw, rpc_area);
|
|
+ break;
|
|
+ }
|
|
+ default:
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_INFO,
|
|
+ "Unsupported RPC request. call_id=%u.",
|
|
+ rpc_area->call_id);
|
|
+ ret = -EINVAL;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Make sure the whole RPC message body has been written before
|
|
+ * the RPC message area is returned to the firmware.
|
|
+ */
|
|
+ wmb();
|
|
+ rpc_area->state = MVE_RPC_STATE_RETURN;
|
|
+
|
|
+ /* Make sure state is written before memory is flushed. */
|
|
+ if (fw->buf_attr[MVX_FW_REGION_RPC] == MVX_FW_BUF_CACHEABLE) {
|
|
+ wmb();
|
|
+ dma_sync_single_for_device(
|
|
+ fw->dev,
|
|
+ phys_cpu2vpu(virt_to_phys(rpc_area)), sizeof(*rpc_area),
|
|
+ DMA_TO_DEVICE);
|
|
+ }
|
|
+
|
|
+ /* Log RPC response. */
|
|
+ MVX_LOG_EXECUTE(&mvx_log_fwif_if, MVX_LOG_INFO,
|
|
+ log_rpc(fw->session,
|
|
+ MVX_LOG_FWIF_DIRECTION_HOST_TO_FIRMWARE,
|
|
+ rpc_area));
|
|
+
|
|
+ fw->client_ops->send_irq(fw->csession);
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+#ifdef MVX_FW_DEBUG_ENABLE
|
|
+#define RAM_PRINTBUF_SIZE MVE_FW_PRINT_RAM_SIZE
|
|
+#define RAM_PRINT_MAX_LEN (128)
|
|
+#define RAM_PRINT_BUF_CNT ((RAM_PRINTBUF_SIZE / RAM_PRINT_MAX_LEN) - 1)
|
|
+#define RAM_PRINT_FLAG (0x11223356)
|
|
+static int handle_fw_ram_print_v2(struct mvx_fw *fw)
|
|
+{
|
|
+ struct mve_fw_ram_print_head_aera *rpt_area = fw->fw_print_ram;
|
|
+ int ret = 0;
|
|
+ uint32_t wr_cnt;
|
|
+ uint32_t rd_cnt = 0;
|
|
+ uint32_t cnt;
|
|
+ uint32_t rd_idx;
|
|
+ char *print_buf = NULL;
|
|
+
|
|
+ dma_sync_single_for_cpu(fw->dev,
|
|
+ phys_cpu2vpu(virt_to_phys(rpt_area)), sizeof(*rpt_area),
|
|
+ DMA_FROM_DEVICE);
|
|
+
|
|
+ wr_cnt = rpt_area->wr_cnt;
|
|
+ rd_cnt = rpt_area->rd_cnt;
|
|
+ cnt = (rd_cnt <= wr_cnt) ? wr_cnt - rd_cnt : wr_cnt - rd_cnt + (uint32_t)~0u;
|
|
+
|
|
+ if(RAM_PRINT_FLAG == rpt_area->flag && RAM_PRINT_BUF_CNT > rpt_area->index && cnt){
|
|
+ //printk("RPT:flag=%x, idx=%u, wr_cnt=%u, rd_cnt=%u.\n", rpt_area->flag, rpt_area->index, wr_cnt, rd_cnt);
|
|
+
|
|
+ while(cnt--){
|
|
+ rd_idx = rd_cnt % RAM_PRINT_BUF_CNT;
|
|
+ print_buf = (fw->fw_print_ram + RAM_PRINT_MAX_LEN ) + rd_idx * RAM_PRINT_MAX_LEN;
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING, "FW-%u: %s\n", rd_cnt, print_buf);
|
|
+ rd_cnt++;
|
|
+ }
|
|
+
|
|
+ rpt_area->rd_cnt = rd_cnt;
|
|
+ /* Make sure rpt_area->rd_cnt is written before memory is flushed. */
|
|
+ wmb();
|
|
+ dma_sync_single_for_device(
|
|
+ fw->dev,
|
|
+ phys_cpu2vpu(virt_to_phys(&rpt_area->rd_cnt)), sizeof(rpt_area->rd_cnt),
|
|
+ DMA_TO_DEVICE);
|
|
+
|
|
+ ret = 1;
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+#endif
|
|
+
|
|
+static void unmap_msq(struct mvx_fw *fw,
|
|
+ void **data,
|
|
+ enum mvx_fw_region region)
|
|
+{
|
|
+ int ret;
|
|
+ mvx_mmu_va begin;
|
|
+ mvx_mmu_va end;
|
|
+
|
|
+ if (*data == NULL)
|
|
+ return;
|
|
+
|
|
+ ret = fw->ops.get_region(region, &begin, &end);
|
|
+ if (ret == 0)
|
|
+ mvx_mmu_unmap_va(fw->mmu, begin, MVE_PAGE_SIZE);
|
|
+
|
|
+ mvx_mmu_free_page(fw->dev, phys_cpu2vpu(virt_to_phys(*data)));
|
|
+
|
|
+ *data = NULL;
|
|
+}
|
|
+
|
|
+static int map_msq(struct mvx_fw *fw,
|
|
+ void **data,
|
|
+ enum mvx_fw_region region)
|
|
+{
|
|
+ phys_addr_t page;
|
|
+ mvx_mmu_va begin;
|
|
+ mvx_mmu_va end;
|
|
+ int ret;
|
|
+
|
|
+ /* Get virtual address where the message queue is to be mapped. */
|
|
+ ret = fw->ops.get_region(region, &begin, &end);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+
|
|
+ /* Allocate page and store Linux logical address in 'data'. */
|
|
+ page = mvx_mmu_alloc_page(fw->dev);
|
|
+ if (page == 0)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ /* Memory map region. */
|
|
+ ret = mvx_mmu_map_pa(fw->mmu, begin, page, MVE_PAGE_SIZE,
|
|
+ MVX_ATTR_SHARED_RW, MVX_ACCESS_READ_WRITE);
|
|
+ if (ret != 0) {
|
|
+ mvx_mmu_free_page(fw->dev, page);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ *data = phys_to_virt(phys_vpu2cpu(page));
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int map_msq_uncache(struct mvx_fw *fw,
|
|
+ void **data,
|
|
+ enum mvx_fw_region region)
|
|
+{
|
|
+ phys_addr_t page;
|
|
+ mvx_mmu_va begin;
|
|
+ mvx_mmu_va end;
|
|
+ void* vir_addr = NULL;
|
|
+ int ret;
|
|
+
|
|
+ /* Get virtual address where the message queue is to be mapped. */
|
|
+ ret = fw->ops.get_region(region, &begin, &end);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+
|
|
+ /* Allocate page and store Linux logical address in 'data'. */
|
|
+ page = mvx_mmu_dma_alloc_coherent(fw->dev, &vir_addr);
|
|
+ if (page == 0) {
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+
|
|
+ /* Memory map region. */
|
|
+ ret = mvx_mmu_map_pa(fw->mmu, begin, page, MVE_PAGE_SIZE,
|
|
+ MVX_ATTR_SHARED_RW, MVX_ACCESS_READ_WRITE);
|
|
+ if (ret != 0) {
|
|
+ mvx_mmu_dma_free_coherent(fw->dev, page, vir_addr);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ *data = vir_addr;
|
|
+ fw->buf_pa_addr[region] = page;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void unmap_msq_uncache(struct mvx_fw *fw,
|
|
+ void **data,
|
|
+ enum mvx_fw_region region)
|
|
+{
|
|
+ int ret;
|
|
+ mvx_mmu_va begin;
|
|
+ mvx_mmu_va end;
|
|
+
|
|
+ if (*data == NULL)
|
|
+ return;
|
|
+
|
|
+ ret = fw->ops.get_region(region, &begin, &end);
|
|
+ if (ret == 0) {
|
|
+ mvx_mmu_unmap_va(fw->mmu, begin, MVE_PAGE_SIZE);
|
|
+ }
|
|
+
|
|
+ mvx_mmu_dma_free_coherent(fw->dev, fw->buf_pa_addr[region], *data);
|
|
+
|
|
+ *data = NULL;
|
|
+ fw->buf_pa_addr[region] = 0;
|
|
+}
|
|
+
|
|
+#ifdef MVX_FW_DEBUG_ENABLE
|
|
+static void unmap_fw_print_ram(struct mvx_fw *fw,
|
|
+ void **data,
|
|
+ enum mvx_fw_region region)
|
|
+{
|
|
+ int ret;
|
|
+ mvx_mmu_va begin;
|
|
+ mvx_mmu_va end;
|
|
+
|
|
+ if (*data == NULL)
|
|
+ return;
|
|
+
|
|
+ ret = fw->ops.get_region(region, &begin, &end);
|
|
+ if (ret == 0)
|
|
+ mvx_mmu_unmap_va(fw->mmu, begin, MVE_FW_PRINT_RAM_SIZE);
|
|
+
|
|
+ mvx_mmu_free_contiguous_pages(fw->dev, phys_cpu2vpu(virt_to_phys(*data)), MVE_FW_PRINT_RAM_SIZE >> PAGE_SHIFT);
|
|
+
|
|
+ *data = NULL;
|
|
+}
|
|
+
|
|
+static int map_fw_print_ram(struct mvx_fw *fw,
|
|
+ void **data,
|
|
+ enum mvx_fw_region region)
|
|
+{
|
|
+ phys_addr_t page;
|
|
+ mvx_mmu_va begin;
|
|
+ mvx_mmu_va end;
|
|
+ int ret;
|
|
+
|
|
+ /* Get virtual address where the message queue is to be mapped. */
|
|
+ ret = fw->ops.get_region(region, &begin, &end);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+
|
|
+ /* Allocate pages and store Linux logical address in 'data'. */
|
|
+ page = mvx_mmu_alloc_contiguous_pages(fw->dev, MVE_FW_PRINT_RAM_SIZE >> PAGE_SHIFT);
|
|
+ if (page == 0)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ /* Memory map region. */
|
|
+ ret = mvx_mmu_map_pa(fw->mmu, begin, page, MVE_FW_PRINT_RAM_SIZE,
|
|
+ MVX_ATTR_SHARED_RW, MVX_ACCESS_READ_WRITE);
|
|
+ if (ret != 0) {
|
|
+ mvx_mmu_free_contiguous_pages(fw->dev, page, MVE_FW_PRINT_RAM_SIZE >> PAGE_SHIFT);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ *data = phys_to_virt(phys_vpu2cpu(page));
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+#endif
|
|
+
|
|
+static void unmap_protocol_v2(struct mvx_fw *fw)
|
|
+{
|
|
+ struct mvx_mmu_pages *pages;
|
|
+ struct hlist_node *tmp;
|
|
+ int bkt;
|
|
+
|
|
+ fw->unmap_op[fw->buf_attr[MVX_FW_REGION_MSG_HOST]](fw, &fw->msg_host, MVX_FW_REGION_MSG_HOST);
|
|
+ fw->unmap_op[fw->buf_attr[MVX_FW_REGION_MSG_MVE]](fw, &fw->msg_mve, MVX_FW_REGION_MSG_MVE);
|
|
+ fw->unmap_op[fw->buf_attr[MVX_FW_REGION_BUF_IN_HOST]](fw, &fw->buf_in_host, MVX_FW_REGION_BUF_IN_HOST);
|
|
+ fw->unmap_op[fw->buf_attr[MVX_FW_REGION_BUF_IN_MVE]](fw, &fw->buf_in_mve, MVX_FW_REGION_BUF_IN_MVE);
|
|
+ fw->unmap_op[fw->buf_attr[MVX_FW_REGION_BUF_OUT_HOST]](fw, &fw->buf_out_host, MVX_FW_REGION_BUF_OUT_HOST);
|
|
+ fw->unmap_op[fw->buf_attr[MVX_FW_REGION_BUF_OUT_MVE]](fw, &fw->buf_out_mve, MVX_FW_REGION_BUF_OUT_MVE);
|
|
+ fw->unmap_op[fw->buf_attr[MVX_FW_REGION_RPC]](fw, &fw->rpc, MVX_FW_REGION_RPC);
|
|
+
|
|
+#ifdef MVX_FW_DEBUG_ENABLE
|
|
+ unmap_fw_print_ram(fw, &fw->fw_print_ram, MVX_FW_REGION_PRINT_RAM);
|
|
+#endif
|
|
+ fw->latest_used_region_protected_pages = 0;
|
|
+ fw->latest_used_region_outbuf_pages = 0;
|
|
+
|
|
+ hash_for_each_safe(fw->rpc_mem, bkt, tmp, pages, node) {
|
|
+ hash_del(&pages->node);
|
|
+ mvx_mmu_free_pages(pages);
|
|
+ }
|
|
+}
|
|
+
|
|
+static int map_protocol_v2(struct mvx_fw *fw)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ ret = fw->map_op[fw->buf_attr[MVX_FW_REGION_MSG_HOST]](fw, &fw->msg_host, MVX_FW_REGION_MSG_HOST);
|
|
+ if (ret != 0)
|
|
+ goto unmap_fw;
|
|
+
|
|
+ ret = fw->map_op[fw->buf_attr[MVX_FW_REGION_MSG_MVE]](fw, &fw->msg_mve, MVX_FW_REGION_MSG_MVE);
|
|
+ if (ret != 0)
|
|
+ goto unmap_fw;
|
|
+
|
|
+ ret = fw->map_op[fw->buf_attr[MVX_FW_REGION_BUF_IN_HOST]](fw, &fw->buf_in_host, MVX_FW_REGION_BUF_IN_HOST);
|
|
+ if (ret != 0)
|
|
+ goto unmap_fw;
|
|
+
|
|
+ ret = fw->map_op[fw->buf_attr[MVX_FW_REGION_BUF_IN_MVE]](fw, &fw->buf_in_mve, MVX_FW_REGION_BUF_IN_MVE);
|
|
+ if (ret != 0)
|
|
+ goto unmap_fw;
|
|
+
|
|
+ ret = fw->map_op[fw->buf_attr[MVX_FW_REGION_BUF_OUT_HOST]](fw, &fw->buf_out_host, MVX_FW_REGION_BUF_OUT_HOST);
|
|
+ if (ret != 0)
|
|
+ goto unmap_fw;
|
|
+
|
|
+ ret = fw->map_op[fw->buf_attr[MVX_FW_REGION_BUF_OUT_MVE]](fw, &fw->buf_out_mve, MVX_FW_REGION_BUF_OUT_MVE);
|
|
+ if (ret != 0)
|
|
+ goto unmap_fw;
|
|
+
|
|
+ ret = fw->map_op[fw->buf_attr[MVX_FW_REGION_RPC]](fw, &fw->rpc, MVX_FW_REGION_RPC);
|
|
+ if (ret != 0)
|
|
+ goto unmap_fw;
|
|
+
|
|
+#ifdef MVX_FW_DEBUG_ENABLE
|
|
+ ret = map_fw_print_ram(fw, &fw->fw_print_ram, MVX_FW_REGION_PRINT_RAM);
|
|
+ if (ret != 0)
|
|
+ goto unmap_fw;
|
|
+#endif
|
|
+
|
|
+ return 0;
|
|
+
|
|
+unmap_fw:
|
|
+ unmap_protocol_v2(fw);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static void print_pair(char *name_in,
|
|
+ char *name_out,
|
|
+ struct device *device,
|
|
+ struct mve_comm_area_host *host,
|
|
+ struct mve_comm_area_mve *mve,
|
|
+ int ind,
|
|
+ struct seq_file *s,
|
|
+ enum mvx_fw_buffer_attr mve_buf_attr)
|
|
+{
|
|
+ if (mve_buf_attr == MVX_FW_BUF_CACHEABLE) {
|
|
+ dma_sync_single_for_cpu(device, phys_cpu2vpu(virt_to_phys(mve)),
|
|
+ MVE_PAGE_SIZE, DMA_FROM_DEVICE);
|
|
+ }
|
|
+ mvx_seq_printf(s, name_in, ind, "wr=%10d, rd=%10d, avail=%10d\n",
|
|
+ host->in_wpos, mve->in_rpos,
|
|
+ (uint16_t)(host->in_wpos - mve->in_rpos));
|
|
+ mvx_seq_printf(s, name_out, ind, "wr=%10d, rd=%10d, avail=%10d\n",
|
|
+ mve->out_wpos, host->out_rpos,
|
|
+ (uint16_t)(mve->out_wpos - host->out_rpos));
|
|
+}
|
|
+
|
|
+static int print_stat_v2(struct mvx_fw *fw,
|
|
+ int ind,
|
|
+ struct seq_file *s)
|
|
+{
|
|
+ print_pair("Msg host->mve", "Msg host<-mve",
|
|
+ fw->dev, fw->msg_host, fw->msg_mve,
|
|
+ ind, s, fw->buf_attr[MVX_FW_REGION_MSG_MVE]);
|
|
+ print_pair("Inbuf host->mve", "Inbuf host<-mve",
|
|
+ fw->dev, fw->buf_in_host, fw->buf_in_mve,
|
|
+ ind, s, fw->buf_attr[MVX_FW_REGION_BUF_IN_MVE]);
|
|
+ print_pair("Outbuf host->mve", "Outbuf host<-mve",
|
|
+ fw->dev, fw->buf_out_host, fw->buf_out_mve,
|
|
+ ind, s, fw->buf_attr[MVX_FW_REGION_BUF_OUT_MVE]);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static ssize_t get_capacity(int rpos,
|
|
+ int wpos)
|
|
+{
|
|
+ ssize_t capacity;
|
|
+
|
|
+ capacity = wpos - rpos;
|
|
+ if (capacity < 0)
|
|
+ capacity += MVE_COMM_QUEUE_SIZE_IN_WORDS;
|
|
+
|
|
+ return capacity * sizeof(uint32_t);
|
|
+}
|
|
+
|
|
+static void print_debug_v2(struct mvx_fw *fw)
|
|
+{
|
|
+ struct mve_comm_area_host *msg_host = fw->msg_host;
|
|
+ struct mve_comm_area_mve *msg_mve = fw->msg_mve;
|
|
+ unsigned int rpos, wpos;
|
|
+ ssize_t capacity;
|
|
+ struct mve_msg_header header;
|
|
+ struct mve_rpc_communication_area *rpc_area = fw->rpc;
|
|
+ union mve_rpc_params *p = &rpc_area->params;
|
|
+
|
|
+ if (fw->buf_attr[MVX_FW_REGION_MSG_MVE] == MVX_FW_BUF_CACHEABLE) {
|
|
+ dma_sync_single_for_cpu(fw->dev, phys_cpu2vpu(virt_to_phys(msg_mve)),
|
|
+ MVE_PAGE_SIZE, DMA_FROM_DEVICE);
|
|
+ }
|
|
+
|
|
+ MVX_LOG_PRINT_SESSION(&mvx_log_session_if, MVX_LOG_WARNING, fw->session,
|
|
+ "Dump message queue. msg={host={out_rpos=%u, in_wpos=%u}, mve={out_wpos=%u, in_rpos=%u}}",
|
|
+ msg_host->out_rpos, msg_host->in_wpos,
|
|
+ msg_mve->out_wpos, msg_mve->in_rpos);
|
|
+
|
|
+ MVX_LOG_PRINT_SESSION(&mvx_log_session_if, MVX_LOG_WARNING, fw->session,
|
|
+ "Dump fw rpc. state=%u, call_id=%u, size=%u, data=%u, debug_print=%s",
|
|
+ rpc_area->state, rpc_area->call_id, rpc_area->size, p->data, p->debug_print.string);
|
|
+
|
|
+ rpos = msg_host->out_rpos;
|
|
+ wpos = msg_mve->out_wpos;
|
|
+
|
|
+ while ((capacity = get_capacity(rpos, wpos)) >= sizeof(header)) {
|
|
+ unsigned int pos;
|
|
+
|
|
+ pos = read32n(msg_mve->out_data, rpos, (uint32_t *)&header,
|
|
+ sizeof(header));
|
|
+
|
|
+ MVX_LOG_PRINT_SESSION(&mvx_log_session_if, MVX_LOG_WARNING,
|
|
+ fw->session,
|
|
+ "queue={rpos=%u, wpos=%u, capacity=%u}, msg={code=%u, size=%u}",
|
|
+ rpos, wpos, capacity,
|
|
+ header.code, header.size);
|
|
+
|
|
+ capacity = get_capacity(pos, wpos);
|
|
+ if (header.size > capacity) {
|
|
+ MVX_LOG_PRINT_SESSION(
|
|
+ &mvx_log_session_if, MVX_LOG_WARNING,
|
|
+ fw->session,
|
|
+ "Size is larger than capacity. capacity=%zd, size=%u.",
|
|
+ capacity, header.size);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ rpos = (pos + DIV_ROUND_UP(header.size, sizeof(uint32_t))) %
|
|
+ MVE_COMM_QUEUE_SIZE_IN_WORDS;
|
|
+ }
|
|
+}
|
|
+
|
|
+int mvx_fw_send_idle_ack_v2(struct mvx_fw *fw)
|
|
+{
|
|
+ int ret = 0;
|
|
+
|
|
+ ret = write_message(fw, fw->msg_host, fw->msg_mve,
|
|
+ MVE_REQUEST_CODE_IDLE_ACK,
|
|
+ NULL, 0,
|
|
+ MVX_LOG_FWIF_CHANNEL_MESSAGE, fw->buf_attr[MVX_FW_REGION_MSG_MVE], fw->buf_attr[MVX_FW_REGION_MSG_HOST]);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+/****************************************************************************
|
|
+ * Exported functions
|
|
+ ****************************************************************************/
|
|
+
|
|
+int mvx_fw_construct_v2(struct mvx_fw *fw,
|
|
+ struct mvx_fw_bin *fw_bin,
|
|
+ struct mvx_mmu *mmu,
|
|
+ struct mvx_session *session,
|
|
+ struct mvx_client_ops *client_ops,
|
|
+ struct mvx_client_session *csession,
|
|
+ unsigned int ncores,
|
|
+ unsigned char major,
|
|
+ unsigned char minor)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ ret = mvx_fw_construct(fw, fw_bin, mmu, session, client_ops, csession,
|
|
+ ncores);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+
|
|
+ fw->ops.map_protocol = map_protocol_v2;
|
|
+ fw->ops.unmap_protocol = unmap_protocol_v2;
|
|
+ fw->ops.get_region = get_region_v2;
|
|
+ fw->ops.get_message = get_message_v2;
|
|
+ fw->ops.put_message = put_message_v2;
|
|
+ fw->ops.handle_rpc = handle_rpc_v2;
|
|
+#ifdef MVX_FW_DEBUG_ENABLE
|
|
+ fw->ops.handle_fw_ram_print = handle_fw_ram_print_v2;
|
|
+#endif
|
|
+ fw->ops.print_stat = print_stat_v2;
|
|
+ fw->ops.print_debug = print_debug_v2;
|
|
+ fw->ops_priv.send_idle_ack = NULL;
|
|
+ fw->ops_priv.to_mve_profile = mvx_fw_to_mve_profile_v2;
|
|
+ fw->ops_priv.to_mve_level = mvx_fw_to_mve_level_v2;
|
|
+
|
|
+ fw->buf_attr[MVX_FW_REGION_MSG_HOST] = MVX_FW_BUF_CACHEABLE;
|
|
+ fw->buf_attr[MVX_FW_REGION_MSG_MVE] = MVX_FW_BUF_UNCACHEABLE;
|
|
+ fw->buf_attr[MVX_FW_REGION_BUF_IN_HOST] = MVX_FW_BUF_CACHEABLE;
|
|
+ fw->buf_attr[MVX_FW_REGION_BUF_IN_MVE] = MVX_FW_BUF_CACHEABLE;
|
|
+ fw->buf_attr[MVX_FW_REGION_BUF_OUT_HOST] = MVX_FW_BUF_CACHEABLE;
|
|
+ fw->buf_attr[MVX_FW_REGION_BUF_OUT_MVE] = MVX_FW_BUF_CACHEABLE;
|
|
+ fw->buf_attr[MVX_FW_REGION_RPC] = MVX_FW_BUF_CACHEABLE;
|
|
+
|
|
+ fw->map_op[MVX_FW_BUF_CACHEABLE] = map_msq;
|
|
+ fw->map_op[MVX_FW_BUF_UNCACHEABLE] = map_msq_uncache;
|
|
+ fw->unmap_op[MVX_FW_BUF_CACHEABLE] = unmap_msq;
|
|
+ fw->unmap_op[MVX_FW_BUF_UNCACHEABLE] = unmap_msq_uncache;
|
|
+
|
|
+ if (major == 2 && minor >= 4)
|
|
+ fw->ops_priv.send_idle_ack = mvx_fw_send_idle_ack_v2;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
diff --git a/drivers/media/platform/spacemit/vpu_k1x/if/mvx_firmware_v3.c b/drivers/media/platform/spacemit/vpu_k1x/if/mvx_firmware_v3.c
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/vpu_k1x/if/mvx_firmware_v3.c
|
|
@@ -0,0 +1,171 @@
|
|
+/*
|
|
+ * The confidential and proprietary information contained in this file may
|
|
+ * only be used by a person authorised under and to the extent permitted
|
|
+ * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd.
|
|
+ * ALL RIGHTS RESERVED
|
|
+ *
|
|
+ * This entire notice must be reproduced on all copies of this file
|
|
+ * and copies of this file may only be made by a person if such person is
|
|
+ * permitted to do so under the terms of a subsisting license agreement
|
|
+ * from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * SPDX-License-Identifier: GPL-2.0-only
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU General Public License
|
|
+ * as published by the Free Software Foundation; either version 2
|
|
+ * of the License, or (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
+ *
|
|
+ */
|
|
+
|
|
+/****************************************************************************
|
|
+ * Includes
|
|
+ ****************************************************************************/
|
|
+
|
|
+#include "mvx_firmware_priv.h"
|
|
+#include "fw_v3/mve_protocol_def.h"
|
|
+
|
|
+/****************************************************************************
|
|
+ * Static functions
|
|
+ ****************************************************************************/
|
|
+
|
|
+static int get_region_v3(enum mvx_fw_region region,
|
|
+ uint32_t *begin,
|
|
+ uint32_t *end)
|
|
+{
|
|
+ switch (region) {
|
|
+ case MVX_FW_REGION_CORE_0:
|
|
+ *begin = MVE_MEM_REGION_FW_INSTANCE0_ADDR_BEGIN;
|
|
+ *end = MVE_MEM_REGION_FW_INSTANCE0_ADDR_END;
|
|
+ break;
|
|
+ case MVX_FW_REGION_CORE_1:
|
|
+ *begin = MVE_MEM_REGION_FW_INSTANCE1_ADDR_BEGIN;
|
|
+ *end = MVE_MEM_REGION_FW_INSTANCE1_ADDR_END;
|
|
+ break;
|
|
+ case MVX_FW_REGION_CORE_2:
|
|
+ *begin = MVE_MEM_REGION_FW_INSTANCE2_ADDR_BEGIN;
|
|
+ *end = MVE_MEM_REGION_FW_INSTANCE2_ADDR_END;
|
|
+ break;
|
|
+ case MVX_FW_REGION_CORE_3:
|
|
+ *begin = MVE_MEM_REGION_FW_INSTANCE3_ADDR_BEGIN;
|
|
+ *end = MVE_MEM_REGION_FW_INSTANCE3_ADDR_END;
|
|
+ break;
|
|
+ case MVX_FW_REGION_CORE_4:
|
|
+ *begin = MVE_MEM_REGION_FW_INSTANCE4_ADDR_BEGIN;
|
|
+ *end = MVE_MEM_REGION_FW_INSTANCE4_ADDR_END;
|
|
+ break;
|
|
+ case MVX_FW_REGION_CORE_5:
|
|
+ *begin = MVE_MEM_REGION_FW_INSTANCE5_ADDR_BEGIN;
|
|
+ *end = MVE_MEM_REGION_FW_INSTANCE5_ADDR_END;
|
|
+ break;
|
|
+ case MVX_FW_REGION_CORE_6:
|
|
+ *begin = MVE_MEM_REGION_FW_INSTANCE6_ADDR_BEGIN;
|
|
+ *end = MVE_MEM_REGION_FW_INSTANCE6_ADDR_END;
|
|
+ break;
|
|
+ case MVX_FW_REGION_CORE_7:
|
|
+ *begin = MVE_MEM_REGION_FW_INSTANCE7_ADDR_BEGIN;
|
|
+ *end = MVE_MEM_REGION_FW_INSTANCE7_ADDR_END;
|
|
+ break;
|
|
+ case MVX_FW_REGION_PROTECTED:
|
|
+ *begin = MVE_MEM_REGION_PROTECTED_ADDR_BEGIN;
|
|
+ *end = MVE_MEM_REGION_PROTECTED_ADDR_END;
|
|
+ break;
|
|
+ case MVX_FW_REGION_FRAMEBUF:
|
|
+ *begin = MVE_MEM_REGION_FRAMEBUF_ADDR_BEGIN;
|
|
+ *end = MVE_MEM_REGION_FRAMEBUF_ADDR_END;
|
|
+ break;
|
|
+ case MVX_FW_REGION_MSG_HOST:
|
|
+ *begin = MVE_COMM_MSG_INQ_ADDR;
|
|
+ *end = MVE_COMM_MSG_INQ_ADDR + MVE_PAGE_SIZE;
|
|
+ break;
|
|
+ case MVX_FW_REGION_MSG_MVE:
|
|
+ *begin = MVE_COMM_MSG_OUTQ_ADDR;
|
|
+ *end = MVE_COMM_MSG_OUTQ_ADDR + MVE_PAGE_SIZE;
|
|
+ break;
|
|
+ case MVX_FW_REGION_BUF_IN_HOST:
|
|
+ *begin = MVE_COMM_BUF_INQ_ADDR;
|
|
+ *end = MVE_COMM_BUF_INQ_ADDR + MVE_PAGE_SIZE;
|
|
+ break;
|
|
+ case MVX_FW_REGION_BUF_IN_MVE:
|
|
+ *begin = MVE_COMM_BUF_INRQ_ADDR;
|
|
+ *end = MVE_COMM_BUF_INRQ_ADDR + MVE_PAGE_SIZE;
|
|
+ break;
|
|
+ case MVX_FW_REGION_BUF_OUT_HOST:
|
|
+ *begin = MVE_COMM_BUF_OUTQ_ADDR;
|
|
+ *end = MVE_COMM_BUF_OUTQ_ADDR + MVE_PAGE_SIZE;
|
|
+ break;
|
|
+ case MVX_FW_REGION_BUF_OUT_MVE:
|
|
+ *begin = MVE_COMM_BUF_OUTRQ_ADDR;
|
|
+ *end = MVE_COMM_BUF_OUTRQ_ADDR + MVE_PAGE_SIZE;
|
|
+ break;
|
|
+ case MVX_FW_REGION_RPC:
|
|
+ *begin = MVE_COMM_RPC_ADDR;
|
|
+ *end = MVE_COMM_RPC_ADDR + MVE_PAGE_SIZE;
|
|
+ break;
|
|
+ case MVX_FW_REGION_PRINT_RAM:
|
|
+ *begin = MVE_FW_PRINT_RAM_ADDR;
|
|
+ *end = MVE_FW_PRINT_RAM_ADDR + MVE_FW_PRINT_RAM_SIZE;
|
|
+ break;
|
|
+ default:
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int to_mve_profile_v3(unsigned int mvx_profile,
|
|
+ uint16_t *mve_profile)
|
|
+{
|
|
+ int ret = 0;
|
|
+
|
|
+ switch (mvx_profile) {
|
|
+ case MVX_PROFILE_H264_HIGH_10:
|
|
+ *mve_profile = MVE_OPT_PROFILE_H264_HIGH_10;
|
|
+ break;
|
|
+ default:
|
|
+ ret = mvx_fw_to_mve_profile_v2(mvx_profile, mve_profile);
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+/****************************************************************************
|
|
+ * Exported functions
|
|
+ ****************************************************************************/
|
|
+
|
|
+int mvx_fw_construct_v3(struct mvx_fw *fw,
|
|
+ struct mvx_fw_bin *fw_bin,
|
|
+ struct mvx_mmu *mmu,
|
|
+ struct mvx_session *session,
|
|
+ struct mvx_client_ops *client_ops,
|
|
+ struct mvx_client_session *csession,
|
|
+ unsigned int ncores,
|
|
+ unsigned char major,
|
|
+ unsigned char minor)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ ret = mvx_fw_construct_v2(fw, fw_bin, mmu, session, client_ops,
|
|
+ csession, ncores, major, minor);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+
|
|
+ fw->ops.get_region = get_region_v3;
|
|
+ fw->ops_priv.to_mve_profile = to_mve_profile_v3;
|
|
+
|
|
+ if (major == 3 && minor >= 1)
|
|
+ fw->ops_priv.send_idle_ack = mvx_fw_send_idle_ack_v2;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
diff --git a/drivers/media/platform/spacemit/vpu_k1x/if/mvx_if.c b/drivers/media/platform/spacemit/vpu_k1x/if/mvx_if.c
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/vpu_k1x/if/mvx_if.c
|
|
@@ -0,0 +1,239 @@
|
|
+/*
|
|
+ * The confidential and proprietary information contained in this file may
|
|
+ * only be used by a person authorised under and to the extent permitted
|
|
+ * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd.
|
|
+ * ALL RIGHTS RESERVED
|
|
+ *
|
|
+ * This entire notice must be reproduced on all copies of this file
|
|
+ * and copies of this file may only be made by a person if such person is
|
|
+ * permitted to do so under the terms of a subsisting license agreement
|
|
+ * from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * SPDX-License-Identifier: GPL-2.0-only
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU General Public License
|
|
+ * as published by the Free Software Foundation; either version 2
|
|
+ * of the License, or (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
+ *
|
|
+ */
|
|
+
|
|
+/****************************************************************************
|
|
+ * Includes
|
|
+ ****************************************************************************/
|
|
+
|
|
+#include <linux/debugfs.h>
|
|
+#include <linux/device.h>
|
|
+#include <linux/version.h>
|
|
+#include <linux/dma-mapping.h>
|
|
+#include <linux/export.h>
|
|
+#include <linux/platform_device.h>
|
|
+#include <linux/printk.h>
|
|
+#include <linux/of_device.h>
|
|
+#include "mvx_ext_if.h"
|
|
+#include "mvx_if.h"
|
|
+#include "mvx_log_group.h"
|
|
+#include "mvx_firmware.h"
|
|
+#include "mvx_firmware_cache.h"
|
|
+#include "mvx_secure.h"
|
|
+#include "mvx_session.h"
|
|
+
|
|
+/****************************************************************************
|
|
+ * Types
|
|
+ ****************************************************************************/
|
|
+
|
|
+/**
|
|
+ * struct mvx_if_ctx - Device context.
|
|
+ *
|
|
+ * There is one instance of this structure for each device.
|
|
+ */
|
|
+struct mvx_if_ctx {
|
|
+ struct device *dev;
|
|
+ struct mvx_ext_if ext;
|
|
+ struct mvx_fw_cache firmware;
|
|
+ struct mvx_client_ops *client_ops;
|
|
+ struct mvx_if_ops if_ops;
|
|
+ struct mvx_secure secure;
|
|
+ struct kobject kobj;
|
|
+ struct completion kobj_unregister;
|
|
+ struct dentry *dentry;
|
|
+};
|
|
+
|
|
+/****************************************************************************
|
|
+ * Static variables and functions
|
|
+ ****************************************************************************/
|
|
+
|
|
+/* Physical hardware can handle 40 physical bits. */
|
|
+static uint64_t mvx_if_dma_mask = DMA_BIT_MASK(40);
|
|
+
|
|
+static struct mvx_if_ctx *if_ops_to_if_ctx(struct mvx_if_ops *ops)
|
|
+{
|
|
+ return container_of(ops, struct mvx_if_ctx, if_ops);
|
|
+}
|
|
+
|
|
+static void if_release(struct kobject *kobj)
|
|
+{
|
|
+ struct mvx_if_ctx *ctx = container_of(kobj, struct mvx_if_ctx, kobj);
|
|
+
|
|
+ complete(&ctx->kobj_unregister);
|
|
+}
|
|
+
|
|
+static struct kobj_type if_ktype = {
|
|
+ .release = if_release,
|
|
+ .sysfs_ops = &kobj_sysfs_ops
|
|
+};
|
|
+
|
|
+/****************************************************************************
|
|
+ * Exported variables and functions
|
|
+ ****************************************************************************/
|
|
+
|
|
+struct mvx_if_ops *mvx_if_create(struct device *dev,
|
|
+ struct mvx_client_ops *client_ops,
|
|
+ void *priv)
|
|
+{
|
|
+ struct mvx_if_ctx *ctx;
|
|
+ int ret;
|
|
+
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_INFO, "probe");
|
|
+
|
|
+ dev->dma_mask = &mvx_if_dma_mask;
|
|
+ dev->coherent_dma_mask = mvx_if_dma_mask;
|
|
+
|
|
+ /*
|
|
+ * This parameter is indirectly used by DMA-API to limit a lookup
|
|
+ * through a hash table with allocated DMA regions. If the value is
|
|
+ * not high enough, a lookup will be terminated too early and a false
|
|
+ * negative warning will be generated for every DMA operation.
|
|
+ *
|
|
+ * To prevent this behavior vb2-dma-contig allocator keeps this value
|
|
+ * set to the maximum requested buffer size. Unfortunately this is not
|
|
+ * done for vb2-dma-sg which we are using, so we have to implement the
|
|
+ * same logic.
|
|
+ *
|
|
+ * In this change I set a value permanently to 2Gb, but in the next
|
|
+ * commit a functionality similar to vb2-dma-contig will be added.
|
|
+ *
|
|
+ * Mentioned structure also has one more member: segment_boundary_mask.
|
|
+ * It has to be investigated if any value should be assigned to it.
|
|
+ *
|
|
+ * See the following kernel commit for the reference:
|
|
+ * 3f03396918962b2f8b888d02b23cd1e0c88bf5e5
|
|
+ */
|
|
+ dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), GFP_KERNEL);
|
|
+ if (dev->dma_parms == NULL)
|
|
+ return ERR_PTR(-ENOMEM);
|
|
+
|
|
+ dma_set_max_seg_size(dev, SZ_2G);
|
|
+
|
|
+#if (KERNEL_VERSION(4, 1, 0) <= LINUX_VERSION_CODE) && IS_ENABLED(CONFIG_OF)
|
|
+ of_dma_configure(dev, dev->of_node, true);
|
|
+#endif
|
|
+
|
|
+ /* Create device context and store pointer in device private data. */
|
|
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
|
|
+ if (ctx == NULL) {
|
|
+ ret = -ENOMEM;
|
|
+ goto free_dma_parms;
|
|
+ }
|
|
+
|
|
+ if (IS_ENABLED(CONFIG_DEBUG_FS)) {
|
|
+ char name[20];
|
|
+
|
|
+ scnprintf(name, sizeof(name), "%s%u", MVX_IF_NAME, dev->id);
|
|
+ ctx->dentry = debugfs_create_dir(name, NULL);
|
|
+ if (IS_ERR_OR_NULL(ctx->dentry)) {
|
|
+ ret = -EINVAL;
|
|
+ goto free_ctx;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /* Store context in device private data. */
|
|
+ ctx->dev = dev;
|
|
+ ctx->client_ops = client_ops;
|
|
+
|
|
+ /* Initialize if ops. */
|
|
+ ctx->if_ops.irq = mvx_session_irq;
|
|
+
|
|
+ init_completion(&ctx->kobj_unregister);
|
|
+
|
|
+ /* Create sysfs entry for the device */
|
|
+ ret = kobject_init_and_add(&ctx->kobj, &if_ktype,
|
|
+ kernel_kobj, "amvx%u", dev->id);
|
|
+ if (ret != 0) {
|
|
+ kobject_put(&ctx->kobj);
|
|
+ goto remove_debugfs;
|
|
+ }
|
|
+
|
|
+ /* Initialize secure video. */
|
|
+ ret = mvx_secure_construct(&ctx->secure, dev);
|
|
+ if (ret != 0)
|
|
+ goto delete_kobject;
|
|
+
|
|
+ /* Initialize firmware cache. */
|
|
+ ret = mvx_fw_cache_construct(&ctx->firmware, dev, &ctx->secure,
|
|
+ &ctx->kobj);
|
|
+ if (ret != 0)
|
|
+ goto destroy_secure;
|
|
+
|
|
+ /* Create the external device interface. */
|
|
+ ret = mvx_ext_if_construct(&ctx->ext, dev, &ctx->firmware,
|
|
+ ctx->client_ops, ctx->dentry);
|
|
+ if (ret != 0)
|
|
+ goto destroy_fw_cache;
|
|
+
|
|
+ return &ctx->if_ops;
|
|
+
|
|
+destroy_fw_cache:
|
|
+ mvx_fw_cache_destruct(&ctx->firmware);
|
|
+
|
|
+destroy_secure:
|
|
+ mvx_secure_destruct(&ctx->secure);
|
|
+
|
|
+delete_kobject:
|
|
+ kobject_put(&ctx->kobj);
|
|
+
|
|
+remove_debugfs:
|
|
+ if (IS_ENABLED(CONFIG_DEBUG_FS))
|
|
+ debugfs_remove_recursive(ctx->dentry);
|
|
+
|
|
+free_ctx:
|
|
+ devm_kfree(dev, ctx);
|
|
+
|
|
+free_dma_parms:
|
|
+ devm_kfree(dev, dev->dma_parms);
|
|
+
|
|
+ return ERR_PTR(ret);
|
|
+}
|
|
+
|
|
+void mvx_if_destroy(struct mvx_if_ops *if_ops)
|
|
+{
|
|
+ struct mvx_if_ctx *ctx = if_ops_to_if_ctx(if_ops);
|
|
+ struct device *dev = ctx->dev;
|
|
+
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_INFO, "remove");
|
|
+
|
|
+ mvx_ext_if_destruct(&ctx->ext);
|
|
+ mvx_fw_cache_destruct(&ctx->firmware);
|
|
+ mvx_secure_destruct(&ctx->secure);
|
|
+ kobject_put(&ctx->kobj);
|
|
+ wait_for_completion(&ctx->kobj_unregister);
|
|
+ if (IS_ENABLED(CONFIG_DEBUG_FS))
|
|
+ debugfs_remove_recursive(ctx->dentry);
|
|
+
|
|
+ devm_kfree(dev, dev->dma_parms);
|
|
+ devm_kfree(dev, ctx);
|
|
+
|
|
+ dev->dma_mask = NULL;
|
|
+ dev->coherent_dma_mask = 0;
|
|
+}
|
|
diff --git a/drivers/media/platform/spacemit/vpu_k1x/if/mvx_if.h b/drivers/media/platform/spacemit/vpu_k1x/if/mvx_if.h
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/vpu_k1x/if/mvx_if.h
|
|
@@ -0,0 +1,471 @@
|
|
+/*
|
|
+ * The confidential and proprietary information contained in this file may
|
|
+ * only be used by a person authorised under and to the extent permitted
|
|
+ * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd.
|
|
+ * ALL RIGHTS RESERVED
|
|
+ *
|
|
+ * This entire notice must be reproduced on all copies of this file
|
|
+ * and copies of this file may only be made by a person if such person is
|
|
+ * permitted to do so under the terms of a subsisting license agreement
|
|
+ * from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * SPDX-License-Identifier: GPL-2.0-only
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU General Public License
|
|
+ * as published by the Free Software Foundation; either version 2
|
|
+ * of the License, or (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
+ *
|
|
+ */
|
|
+
|
|
+#ifndef _MVX_IF_H_
|
|
+#define _MVX_IF_H_
|
|
+
|
|
+/****************************************************************************
|
|
+ * Includes
|
|
+ ****************************************************************************/
|
|
+
|
|
+#include <linux/kref.h>
|
|
+#include <linux/list.h>
|
|
+#include "mvx_mmu.h"
|
|
+
|
|
+/****************************************************************************
|
|
+ * Defines
|
|
+ ****************************************************************************/
|
|
+
|
|
+/**
|
|
+ * The name of the device driver.
|
|
+ */
|
|
+#define MVX_IF_NAME "amvx_if"
|
|
+
|
|
+/****************************************************************************
|
|
+ * Types
|
|
+ ****************************************************************************/
|
|
+
|
|
+struct device;
|
|
+struct mvx_client_ops;
|
|
+struct mvx_client_session;
|
|
+struct mvx_if_ctx;
|
|
+struct platform_device;
|
|
+
|
|
+/**
|
|
+ * enum mvx_direction - Direction from the point of view of the hardware block.
|
|
+ */
|
|
+enum mvx_direction {
|
|
+ MVX_DIR_INPUT,
|
|
+ MVX_DIR_OUTPUT,
|
|
+ MVX_DIR_MAX
|
|
+};
|
|
+
|
|
+/**
|
|
+ * enum mvx_tristate - Tristate boolean variable.
|
|
+ */
|
|
+enum mvx_tristate {
|
|
+ MVX_TRI_UNSET = -1,
|
|
+ MVX_TRI_TRUE = 0,
|
|
+ MVX_TRI_FALSE = 1
|
|
+};
|
|
+
|
|
+/**
|
|
+ * enum mvx_format - List of compressed formats and frame formats.
|
|
+ *
|
|
+ * Enumeration of formats that are supported by all know hardware revisions.
|
|
+ *
|
|
+ * The enumeration should start at 0 and should not contain any gaps.
|
|
+ */
|
|
+enum mvx_format {
|
|
+ /* Compressed formats. */
|
|
+ MVX_FORMAT_BITSTREAM_FIRST,
|
|
+ MVX_FORMAT_AVS = MVX_FORMAT_BITSTREAM_FIRST,
|
|
+ MVX_FORMAT_AVS2,
|
|
+ MVX_FORMAT_H263,
|
|
+ MVX_FORMAT_H264,
|
|
+ MVX_FORMAT_HEVC,
|
|
+ MVX_FORMAT_JPEG,
|
|
+ MVX_FORMAT_MPEG2,
|
|
+ MVX_FORMAT_MPEG4,
|
|
+ MVX_FORMAT_RV,
|
|
+ MVX_FORMAT_VC1,
|
|
+ MVX_FORMAT_VP8,
|
|
+ MVX_FORMAT_VP9,
|
|
+ MVX_FORMAT_BITSTREAM_LAST = MVX_FORMAT_VP9,
|
|
+
|
|
+ /* Uncompressed formats. */
|
|
+ MVX_FORMAT_FRAME_FIRST,
|
|
+ MVX_FORMAT_YUV420_AFBC_8 = MVX_FORMAT_FRAME_FIRST,
|
|
+ MVX_FORMAT_YUV420_AFBC_10,
|
|
+ MVX_FORMAT_YUV422_AFBC_8,
|
|
+ MVX_FORMAT_YUV422_AFBC_10,
|
|
+ MVX_FORMAT_YUV420_I420,
|
|
+ MVX_FORMAT_YUV420_NV12,
|
|
+ MVX_FORMAT_YUV420_NV21,
|
|
+ MVX_FORMAT_YUV420_P010,
|
|
+ MVX_FORMAT_YUV420_Y0L2,
|
|
+ MVX_FORMAT_YUV420_AQB1,
|
|
+ MVX_FORMAT_YUV422_YUY2,
|
|
+ MVX_FORMAT_YUV422_UYVY,
|
|
+ MVX_FORMAT_YUV422_Y210,
|
|
+ MVX_FORMAT_RGBA_8888,
|
|
+ MVX_FORMAT_BGRA_8888,
|
|
+ MVX_FORMAT_ARGB_8888,
|
|
+ MVX_FORMAT_ABGR_8888,
|
|
+ MVX_FORMAT_FRAME_LAST = MVX_FORMAT_ABGR_8888,
|
|
+
|
|
+ MVX_FORMAT_MAX
|
|
+};
|
|
+
|
|
+/**
|
|
+ * enum mvx_hw_id - Enumeration of known hardware revisions.
|
|
+ */
|
|
+enum mvx_hw_id {
|
|
+ MVE_Unknown = 0x0,
|
|
+ MVE_v500 = 0x500,
|
|
+ MVE_v550 = 0x550,
|
|
+ MVE_v61 = 0x61,
|
|
+ MVE_v52_v76 = 0x5276
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct mvx_hw_ver - Hardware version.
|
|
+ */
|
|
+struct mvx_hw_ver {
|
|
+ enum mvx_hw_id id;
|
|
+ uint32_t revision;
|
|
+ uint32_t patch;
|
|
+};
|
|
+
|
|
+/**
|
|
+ * enum mvx_nalu_format - NALU format.
|
|
+ */
|
|
+enum mvx_nalu_format {
|
|
+ MVX_NALU_FORMAT_UNDEFINED,
|
|
+ MVX_NALU_FORMAT_START_CODES,
|
|
+ MVX_NALU_FORMAT_ONE_NALU_PER_BUFFER,
|
|
+ MVX_NALU_FORMAT_ONE_BYTE_LENGTH_FIELD,
|
|
+ MVX_NALU_FORMAT_TWO_BYTE_LENGTH_FIELD,
|
|
+ MVX_NALU_FORMAT_FOUR_BYTE_LENGTH_FIELD
|
|
+};
|
|
+
|
|
+/**
|
|
+ * enum mvx_profile - Profile for encoder.
|
|
+ */
|
|
+enum mvx_profile {
|
|
+ MVX_PROFILE_NONE,
|
|
+
|
|
+ MVX_PROFILE_H264_BASELINE,
|
|
+ MVX_PROFILE_H264_MAIN,
|
|
+ MVX_PROFILE_H264_HIGH,
|
|
+ MVX_PROFILE_H264_HIGH_10,
|
|
+
|
|
+ MVX_PROFILE_H265_MAIN,
|
|
+ MVX_PROFILE_H265_MAIN_STILL,
|
|
+ MVX_PROFILE_H265_MAIN_INTRA,
|
|
+
|
|
+ MVX_PROFILE_VC1_SIMPLE,
|
|
+ MVX_PROFILE_VC1_MAIN,
|
|
+ MVX_PROFILE_VC1_ADVANCED,
|
|
+
|
|
+ MVX_PROFILE_VP8_MAIN
|
|
+};
|
|
+
|
|
+/**
|
|
+ * enum mvx_level - Level for encoder.
|
|
+ */
|
|
+enum mvx_level {
|
|
+ MVX_LEVEL_NONE,
|
|
+
|
|
+ MVX_LEVEL_H264_1,
|
|
+ MVX_LEVEL_H264_1b,
|
|
+ MVX_LEVEL_H264_11,
|
|
+ MVX_LEVEL_H264_12,
|
|
+ MVX_LEVEL_H264_13,
|
|
+ MVX_LEVEL_H264_2,
|
|
+ MVX_LEVEL_H264_21,
|
|
+ MVX_LEVEL_H264_22,
|
|
+ MVX_LEVEL_H264_3,
|
|
+ MVX_LEVEL_H264_31,
|
|
+ MVX_LEVEL_H264_32,
|
|
+ MVX_LEVEL_H264_4,
|
|
+ MVX_LEVEL_H264_41,
|
|
+ MVX_LEVEL_H264_42,
|
|
+ MVX_LEVEL_H264_5,
|
|
+ MVX_LEVEL_H264_51,
|
|
+ MVX_LEVEL_H264_52,
|
|
+ MVX_LEVEL_H264_6,
|
|
+ MVX_LEVEL_H264_61,
|
|
+ MVX_LEVEL_H264_62,
|
|
+
|
|
+ MVX_LEVEL_H265_MAIN_1,
|
|
+ MVX_LEVEL_H265_HIGH_1,
|
|
+ MVX_LEVEL_H265_MAIN_2,
|
|
+ MVX_LEVEL_H265_HIGH_2,
|
|
+ MVX_LEVEL_H265_MAIN_21,
|
|
+ MVX_LEVEL_H265_HIGH_21,
|
|
+ MVX_LEVEL_H265_MAIN_3,
|
|
+ MVX_LEVEL_H265_HIGH_3,
|
|
+ MVX_LEVEL_H265_MAIN_31,
|
|
+ MVX_LEVEL_H265_HIGH_31,
|
|
+ MVX_LEVEL_H265_MAIN_4,
|
|
+ MVX_LEVEL_H265_HIGH_4,
|
|
+ MVX_LEVEL_H265_MAIN_41,
|
|
+ MVX_LEVEL_H265_HIGH_41,
|
|
+ MVX_LEVEL_H265_MAIN_5,
|
|
+ MVX_LEVEL_H265_HIGH_5,
|
|
+ MVX_LEVEL_H265_MAIN_51,
|
|
+ MVX_LEVEL_H265_HIGH_51,
|
|
+ MVX_LEVEL_H265_MAIN_52,
|
|
+ MVX_LEVEL_H265_HIGH_52,
|
|
+ MVX_LEVEL_H265_MAIN_6,
|
|
+ MVX_LEVEL_H265_HIGH_6,
|
|
+ MVX_LEVEL_H265_MAIN_61,
|
|
+ MVX_LEVEL_H265_HIGH_61,
|
|
+ MVX_LEVEL_H265_MAIN_62,
|
|
+ MVX_LEVEL_H265_HIGH_62
|
|
+};
|
|
+
|
|
+/**
|
|
+ * enum mvx_gop_type - GOP type for encoder.
|
|
+ */
|
|
+enum mvx_gop_type {
|
|
+ MVX_GOP_TYPE_NONE,
|
|
+ MVX_GOP_TYPE_BIDIRECTIONAL,
|
|
+ MVX_GOP_TYPE_LOW_DELAY,
|
|
+ MVX_GOP_TYPE_PYRAMID
|
|
+};
|
|
+
|
|
+/**
|
|
+ * enum mvx_entropy_mode - Entropy mode for encoder.
|
|
+ */
|
|
+enum mvx_entropy_mode {
|
|
+ MVX_ENTROPY_MODE_NONE,
|
|
+ MVX_ENTROPY_MODE_CAVLC,
|
|
+ MVX_ENTROPY_MODE_CABAC
|
|
+};
|
|
+
|
|
+/**
|
|
+ * enum mvx_multi_slice_mode - Multi slice mode.
|
|
+ */
|
|
+enum mvx_multi_slice_mode {
|
|
+ MVX_MULTI_SLICE_MODE_SINGLE,
|
|
+ MVX_MULTI_SLICE_MODE_MAX_MB
|
|
+};
|
|
+
|
|
+/**
|
|
+ * enum mvx_vp9_prob_update - Probability update method.
|
|
+ */
|
|
+enum mvx_vp9_prob_update {
|
|
+ MVX_VP9_PROB_UPDATE_DISABLED,
|
|
+ MVX_VP9_PROB_UPDATE_IMPLICIT,
|
|
+ MVX_VP9_PROB_UPDATE_EXPLICIT
|
|
+};
|
|
+
|
|
+/**
|
|
+ * enum mvx_rgb_to_yuv_mode - RGB to YUV conversion mode.
|
|
+ */
|
|
+enum mvx_rgb_to_yuv_mode {
|
|
+ MVX_RGB_TO_YUV_MODE_BT601_STUDIO,
|
|
+ MVX_RGB_TO_YUV_MODE_BT601_FULL,
|
|
+ MVX_RGB_TO_YUV_MODE_BT709_STUDIO,
|
|
+ MVX_RGB_TO_YUV_MODE_BT709_FULL
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct mvx_if_session - Structure holding members needed to map a session to
|
|
+ * a hardare device.
|
|
+ * @kref: Reference counter for the session object.
|
|
+ * @release: Function pointer that shall be passed to kref_put. If the
|
|
+ * reference count reaches 0 this function will be called to
|
|
+ * destruct and deallocate the object.
|
|
+ * @ncores: Number of cores this session has been mapped to.
|
|
+ * @l0_pte: Level 0 page table entry. This value is written to the hardware
|
|
+ * MMU CTRL register to point out the location of the L1 page table
|
|
+ * and to set access permissions and bus attributes.
|
|
+ * @securevideo:Secure video enabled.
|
|
+ */
|
|
+struct mvx_if_session {
|
|
+ struct kref kref;
|
|
+ struct mutex *mutex;
|
|
+ void (*release)(struct kref *kref);
|
|
+ unsigned int ncores;
|
|
+ mvx_mmu_pte l0_pte;
|
|
+ bool securevideo;
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct mvx_if_ops - Functions pointers the registered device may use to call
|
|
+ * the if device.
|
|
+ */
|
|
+struct mvx_if_ops {
|
|
+ /**
|
|
+ * irq() - Handle IRQ sent from firmware to driver.
|
|
+ */
|
|
+ void (*irq)(struct mvx_if_session *session);
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct mvx_client_ops - Functions pointers the if device may use to call
|
|
+ * the registered device.
|
|
+ */
|
|
+struct mvx_client_ops {
|
|
+ struct list_head list;
|
|
+
|
|
+ /**
|
|
+ * get_hw_ver() - Get MVE hardware version
|
|
+ */
|
|
+ void (*get_hw_ver)(struct mvx_client_ops *client,
|
|
+ struct mvx_hw_ver *version);
|
|
+
|
|
+ /**
|
|
+ * get_formats() - Get list of supported formats.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+ void (*get_formats)(struct mvx_client_ops *client,
|
|
+ enum mvx_direction direction,
|
|
+ uint64_t *formats);
|
|
+
|
|
+ /**
|
|
+ * get_ncores() - Get number of cores.
|
|
+ *
|
|
+ * Return: Number of cores on success, else error code.
|
|
+ */
|
|
+ unsigned int (*get_ncores)(struct mvx_client_ops *client);
|
|
+
|
|
+ /*
|
|
+ * SESSION.
|
|
+ */
|
|
+
|
|
+ /**
|
|
+ * register_session() - Register if session with client.
|
|
+ *
|
|
+ * Return: Client session handle on success, else ERR_PTR.
|
|
+ */
|
|
+ struct mvx_client_session
|
|
+ *(*register_session)(struct mvx_client_ops *client,
|
|
+ struct mvx_if_session *session);
|
|
+
|
|
+ /**
|
|
+ * unregister_session() - Unregister session.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+ void (*unregister_session)(struct mvx_client_session *session);
|
|
+
|
|
+ /**
|
|
+ * switch_in() - Switch in session.
|
|
+ *
|
|
+ * After a session has been switched in it must wait for a 'switched
|
|
+ * out' event before it is allowed to requested to be switched in again.
|
|
+ * Switching in a already switched in session is regarded as an error.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+ int (*switch_in)(struct mvx_client_session *session);
|
|
+
|
|
+ /**
|
|
+ * send_irq() - Send IRQ from driver to firmware.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+ int (*send_irq)(struct mvx_client_session *session);
|
|
+
|
|
+ /**
|
|
+ * flush_mmu() - Flush MMU tables.
|
|
+ *
|
|
+ * Flushing MMU tables is only required if pages have been removed
|
|
+ * from the page tables.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+ int (*flush_mmu)(struct mvx_client_session *session);
|
|
+
|
|
+ /**
|
|
+ * print_debug() - Print debug information.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+ void (*print_debug)(struct mvx_client_session *session);
|
|
+
|
|
+ void (*wait_session_idle)(struct mvx_client_session *session);
|
|
+};
|
|
+
|
|
+/****************************************************************************
|
|
+ * Static functions
|
|
+ ****************************************************************************/
|
|
+
|
|
+/**
|
|
+ * mvx_is_bitstream(): Detect if format is of type bitstream.
|
|
+ * @format: Format.
|
|
+ *
|
|
+ * Return: True if format is bitstream, else false.
|
|
+ */
|
|
+static inline bool mvx_is_bitstream(enum mvx_format format)
|
|
+{
|
|
+ return (format >= MVX_FORMAT_BITSTREAM_FIRST) &&
|
|
+ (format <= MVX_FORMAT_BITSTREAM_LAST);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * mvx_is_frame(): Detect if format is of type frame.
|
|
+ * @format: Format.
|
|
+ *
|
|
+ * Return: True if format is frame, else false.
|
|
+ */
|
|
+static inline bool mvx_is_frame(enum mvx_format format)
|
|
+{
|
|
+ return (format >= MVX_FORMAT_FRAME_FIRST) &&
|
|
+ (format <= MVX_FORMAT_FRAME_LAST);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * mvx_is_rgb(): Detect if format is of type RGB.
|
|
+ * @format: Format.
|
|
+ *
|
|
+ * Return: True if format is RGB, else false.
|
|
+ */
|
|
+static inline bool mvx_is_rgb(enum mvx_format format)
|
|
+{
|
|
+ return (format >= MVX_FORMAT_RGBA_8888) &&
|
|
+ (format <= MVX_FORMAT_ABGR_8888);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * mvx_is_afbc(): Detect if format is of type AFBC.
|
|
+ * @format: Format.
|
|
+ *
|
|
+ * Return: True if format is AFBC, else false.
|
|
+ */
|
|
+static inline bool mvx_is_afbc(enum mvx_format format)
|
|
+{
|
|
+ return (format >= MVX_FORMAT_YUV420_AFBC_8) &&
|
|
+ (format <= MVX_FORMAT_YUV422_AFBC_10);
|
|
+}
|
|
+
|
|
+/****************************************************************************
|
|
+ * Exported functions
|
|
+ ****************************************************************************/
|
|
+
|
|
+/**
|
|
+ * mvx_if_create() - Create IF device.
|
|
+ */
|
|
+struct mvx_if_ops *mvx_if_create(struct device *dev,
|
|
+ struct mvx_client_ops *client_ops,
|
|
+ void *priv);
|
|
+
|
|
+/**
|
|
+ * mvx_if_destroy() - Destroy IF device.
|
|
+ */
|
|
+void mvx_if_destroy(struct mvx_if_ops *if_ops);
|
|
+
|
|
+#endif /* _MVX_IF_H_ */
|
|
diff --git a/drivers/media/platform/spacemit/vpu_k1x/if/mvx_mmu.c b/drivers/media/platform/spacemit/vpu_k1x/if/mvx_mmu.c
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/vpu_k1x/if/mvx_mmu.c
|
|
@@ -0,0 +1,1366 @@
|
|
+/*
|
|
+ * The confidential and proprietary information contained in this file may
|
|
+ * only be used by a person authorised under and to the extent permitted
|
|
+ * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd.
|
|
+ * ALL RIGHTS RESERVED
|
|
+ *
|
|
+ * This entire notice must be reproduced on all copies of this file
|
|
+ * and copies of this file may only be made by a person if such person is
|
|
+ * permitted to do so under the terms of a subsisting license agreement
|
|
+ * from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * SPDX-License-Identifier: GPL-2.0-only
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU General Public License
|
|
+ * as published by the Free Software Foundation; either version 2
|
|
+ * of the License, or (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
+ *
|
|
+ */
|
|
+
|
|
+/****************************************************************************
|
|
+ * Includes
|
|
+ ****************************************************************************/
|
|
+
|
|
+#include <linux/bitmap.h>
|
|
+#include <linux/debugfs.h>
|
|
+#include <linux/dma-buf.h>
|
|
+#include <linux/dma-mapping.h>
|
|
+#include <linux/gfp.h>
|
|
+#include <linux/list.h>
|
|
+#include <linux/sched.h>
|
|
+#include <asm-generic/memory_model.h>
|
|
+#include "mvx_mmu.h"
|
|
+#include "mvx_log_group.h"
|
|
+
|
|
+/****************************************************************************
|
|
+ * Defines
|
|
+ ****************************************************************************/
|
|
+
|
|
+/* Number of bits for the physical address space. */
|
|
+#define MVE_PA_BITS 40
|
|
+#define MVE_PA_MASK GENMASK_ULL(MVE_PA_BITS - 1, 0)
|
|
+
|
|
+/* Number of bits for the virtual address space. */
|
|
+#define MVE_VA_BITS 32
|
|
+#define MVE_VA_MASK GENMASK(MVE_VA_BITS - 1, 0)
|
|
+
|
|
+/* Number of bits from the VA used to index a PTE in a page. */
|
|
+#define MVE_INDEX_SHIFT 10
|
|
+#define MVE_INDEX_SIZE (1 << MVE_INDEX_SHIFT)
|
|
+#define MVE_INDEX_MASK GENMASK(MVE_INDEX_SHIFT - 1, 0)
|
|
+
|
|
+/* Access permission defines. */
|
|
+#define MVE_PTE_AP_SHIFT 0
|
|
+#define MVE_PTE_AP_BITS 2
|
|
+#define MVE_PTE_AP_MASK ((1 << MVE_PTE_AP_BITS) - 1)
|
|
+
|
|
+/* Physical address defines. */
|
|
+#define MVE_PTE_PHYSADDR_SHIFT 2
|
|
+#define MVE_PTE_PHYSADDR_BITS 28
|
|
+#define MVE_PTE_PHYSADDR_MASK ((1 << MVE_PTE_PHYSADDR_BITS) - 1)
|
|
+
|
|
+/* Attributes defines. */
|
|
+#define MVE_PTE_ATTR_SHIFT 30
|
|
+#define MVE_PTE_ATTR_BITS 2
|
|
+#define MVE_PTE_ATTR_MASK ((1 << MVE_PTE_ATTR_BITS) - 1)
|
|
+
|
|
+/* Number of levels for Page Table Walk. */
|
|
+#define MVE_PTW_LEVELS 2
|
|
+
|
|
+/*
|
|
+ * A Linux physical page can be equal in size or larger than the MVE page size.
|
|
+ * This define calculates how many MVE pages that fit in one Linux page.
|
|
+ */
|
|
+#define MVX_PAGES_PER_PAGE (PAGE_SIZE / MVE_PAGE_SIZE)
|
|
+
|
|
+/****************************************************************************
|
|
+ * Types
|
|
+ ****************************************************************************/
|
|
+
|
|
+/**
|
|
+ * struct mvx_mmu_dma_buf - MVX DMA buffer.
|
|
+ *
|
|
+ * Adds a list head to keep track of DMA buffers.
|
|
+ */
|
|
+struct mvx_mmu_dma_buf {
|
|
+ struct list_head head;
|
|
+ struct dma_buf *dmabuf;
|
|
+};
|
|
+
|
|
+/****************************************************************************
|
|
+ * Static functions
|
|
+ ****************************************************************************/
|
|
+
|
|
+/**
|
|
+ * get_index() - Return the PTE index for a given level.
|
|
+ * @va: Virtual address.
|
|
+ * @level: Level (L1=0, L2=1).
|
|
+ *
|
|
+ * 22 12 0
|
|
+ * +-------------------+-------------------+-----------------------+
|
|
+ * | Level 1 | Level 2 | Page offset |
|
|
+ * +-------------------+-------------------+-----------------------+
|
|
+ */
|
|
+static unsigned int get_index(const mvx_mmu_va va,
|
|
+ const unsigned int level)
|
|
+{
|
|
+ return (va >> (MVE_PAGE_SHIFT + (MVE_PTW_LEVELS - level - 1) *
|
|
+ MVE_INDEX_SHIFT)) & MVE_INDEX_MASK;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * get_offset() - Return the page offset.
|
|
+ * @va: Virtual address.
|
|
+ *
|
|
+ * 22 12 0
|
|
+ * +-------------------+-------------------+-----------------------+
|
|
+ * | Level 1 | Level 2 | Page offset |
|
|
+ * +-------------------+-------------------+-----------------------+
|
|
+ */
|
|
+static unsigned int get_offset(const mvx_mmu_va va)
|
|
+{
|
|
+ return va & MVE_PAGE_MASK;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * get_pa() - Return physical address stored in PTE.
|
|
+ */
|
|
+static phys_addr_t get_pa(const mvx_mmu_pte pte)
|
|
+{
|
|
+ return (((phys_addr_t)pte >> MVE_PTE_PHYSADDR_SHIFT) &
|
|
+ MVE_PTE_PHYSADDR_MASK) << MVE_PAGE_SHIFT;
|
|
+}
|
|
+
|
|
+/* LCOV_EXCL_START */
|
|
+
|
|
+/**
|
|
+ * get_attr() - Return attributes stored in PTE.
|
|
+ */
|
|
+static enum mvx_mmu_attr get_attr(const mvx_mmu_pte pte)
|
|
+{
|
|
+ return (pte >> MVE_PTE_ATTR_SHIFT) & MVE_PTE_ATTR_MASK;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * get_ap() - Return access permissions stored in PTE.
|
|
+ */
|
|
+static enum mvx_mmu_access get_ap(const mvx_mmu_pte pte)
|
|
+{
|
|
+ return (pte >> MVE_PTE_AP_SHIFT) & MVE_PTE_AP_MASK;
|
|
+}
|
|
+
|
|
+/* LCOV_EXCL_STOP */
|
|
+
|
|
+/**
|
|
+ * ptw() - Perform Page Table Walk and return pointer to L2 PTE.
|
|
+ * @mmu: Pointer to MMU context.
|
|
+ * @va: Virtual address.
|
|
+ * @alloc: True if missing L2 page should be allocated.
|
|
+ *
|
|
+ * Return: Pointer to PTE, ERR_PTR on error.
|
|
+ */
|
|
+static mvx_mmu_pte *ptw(struct mvx_mmu *mmu,
|
|
+ mvx_mmu_va va,
|
|
+ bool alloc)
|
|
+{
|
|
+ phys_addr_t l2;
|
|
+ mvx_mmu_pte *pte = mmu->page_table;
|
|
+ unsigned int index;
|
|
+
|
|
+ /* Level 1. */
|
|
+ index = get_index(va, 0);
|
|
+ l2 = get_pa(pte[index]);
|
|
+
|
|
+ /* We should never perform a page table walk for a protected page. */
|
|
+ if (test_bit(index, mmu->l2_page_is_external) != 0) {
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING,
|
|
+ "PTW virtual address to secure L2 page. va=0x%x.",
|
|
+ va);
|
|
+ return ERR_PTR(-EINVAL);
|
|
+ }
|
|
+
|
|
+ /* Map in L2 page if it is missing. */
|
|
+ if (l2 == 0) {
|
|
+ if (alloc == false) {
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING,
|
|
+ "Missing L2 page in PTW. va=0x%x.",
|
|
+ va);
|
|
+ return ERR_PTR(-EFAULT);
|
|
+ }
|
|
+
|
|
+ l2 = mvx_mmu_alloc_page(mmu->dev);
|
|
+ if (l2 == 0) {
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING,
|
|
+ "Failed to allocate L2 page. va=0x%x.",
|
|
+ va);
|
|
+ return ERR_PTR(-ENOMEM);
|
|
+ }
|
|
+
|
|
+ pte[index] = mvx_mmu_set_pte(MVX_ATTR_PRIVATE, l2,
|
|
+ MVX_ACCESS_READ_ONLY);
|
|
+ dma_sync_single_for_device(mmu->dev,
|
|
+ phys_cpu2vpu(virt_to_phys(&pte[index])),
|
|
+ sizeof(pte[index]), DMA_TO_DEVICE);
|
|
+ }
|
|
+
|
|
+ /* Level 2. */
|
|
+ index = get_index(va, 1);
|
|
+ pte = phys_to_virt(phys_vpu2cpu(l2));
|
|
+
|
|
+ return &pte[index];
|
|
+}
|
|
+
|
|
+/**
|
|
+ * map_page() - Map physical- to virtual address.
|
|
+ * @mmu: Pointer to MMU context.
|
|
+ * @va: MVE virtual address to map.
|
|
+ * @pa: Linux kernel physical address to map.
|
|
+ * @attr: MMU attributes.
|
|
+ * @access: MMU access permissions.
|
|
+ *
|
|
+ * Create new L1 and L2 entries if necessary. If mapping already exist, then
|
|
+ * error is returned.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+static int map_page(struct mvx_mmu *mmu,
|
|
+ mvx_mmu_va va,
|
|
+ phys_addr_t pa,
|
|
+ enum mvx_mmu_attr attr,
|
|
+ enum mvx_mmu_access access)
|
|
+{
|
|
+ mvx_mmu_pte *pte;
|
|
+ phys_addr_t page;
|
|
+
|
|
+ /* Check that both VA and PA are page aligned. */
|
|
+ if ((va | pa) & MVE_PAGE_MASK) {
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING,
|
|
+ "VA and PA must be page aligned. va=0x%x, pa=0x%llx.",
|
|
+ va, pa);
|
|
+ return -EFAULT;
|
|
+ }
|
|
+
|
|
+ /* Check that VA is within valid address range. */
|
|
+ if (va & ~MVE_VA_MASK) {
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING,
|
|
+ "VA out of valid range. va=0x%x.",
|
|
+ va);
|
|
+ return -EFAULT;
|
|
+ }
|
|
+
|
|
+ /* Check that PA is within valid address range. */
|
|
+ if (pa & ~MVE_PA_MASK) {
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING,
|
|
+ "PA out of valid range. pa=0x%llx.",
|
|
+ pa);
|
|
+ return -EFAULT;
|
|
+ }
|
|
+
|
|
+ pte = ptw(mmu, va, true);
|
|
+ if (IS_ERR(pte))
|
|
+ return PTR_ERR(pte);
|
|
+
|
|
+ /* Return error if page already exists. */
|
|
+ page = get_pa(*pte);
|
|
+ if (page != 0)
|
|
+ return -EAGAIN;
|
|
+
|
|
+ /* Map in physical address and flush data. */
|
|
+ *pte = mvx_mmu_set_pte(attr, pa, access);
|
|
+ dma_sync_single_for_device(mmu->dev, phys_cpu2vpu(virt_to_phys(pte)), sizeof(*pte),
|
|
+ DMA_TO_DEVICE);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * unmap_page() - Unmap a page from the virtual address space.
|
|
+ * @mmu: Pointer to MMU context.
|
|
+ * @va: Virtual address.
|
|
+ */
|
|
+static void unmap_page(struct mvx_mmu *mmu,
|
|
+ mvx_mmu_va va)
|
|
+{
|
|
+ mvx_mmu_pte *pte;
|
|
+
|
|
+ pte = ptw(mmu, va, false);
|
|
+ if (IS_ERR(pte))
|
|
+ return;
|
|
+
|
|
+ /* Unmap virtual address and flush data. */
|
|
+ *pte = 0;
|
|
+ dma_sync_single_for_device(mmu->dev, phys_cpu2vpu(virt_to_phys(pte)), sizeof(*pte),
|
|
+ DMA_TO_DEVICE);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * remap_page() - Remap virtual address.
|
|
+ * @mmu: Pointer to MMU context.
|
|
+ * @va: MVE virtual address to map.
|
|
+ * @pa: Linux kernel physical address to map.
|
|
+ * @attr: MMU attributes.
|
|
+ * @access: MMU access permissions.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+static int remap_page(struct mvx_mmu *mmu,
|
|
+ mvx_mmu_va va,
|
|
+ phys_addr_t pa,
|
|
+ enum mvx_mmu_attr attr,
|
|
+ enum mvx_mmu_access access)
|
|
+{
|
|
+ unmap_page(mmu, va);
|
|
+ return map_page(mmu, va, pa, attr, access);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * remap_pages() - Remap virtual address range.
|
|
+ * @pages: Pointer to pages object.
|
|
+ * @oldcount: Count before object was resized.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+static int remap_pages(struct mvx_mmu_pages *pages,
|
|
+ size_t oldcount)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ if (pages->mmu == NULL)
|
|
+ return 0;
|
|
+
|
|
+ /* Remap pages to no access if new count is smaller than old count. */
|
|
+ while (pages->count < oldcount) {
|
|
+ oldcount--;
|
|
+
|
|
+ ret = remap_page(pages->mmu,
|
|
+ pages->va + oldcount * MVE_PAGE_SIZE,
|
|
+ MVE_PAGE_SIZE, MVX_ATTR_PRIVATE,
|
|
+ MVX_ACCESS_NO);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ /* Map up pages if new count is larger than old count. */
|
|
+ while (pages->count > oldcount) {
|
|
+ ret = remap_page(pages->mmu,
|
|
+ pages->va + oldcount * MVE_PAGE_SIZE,
|
|
+ pages->pages[oldcount], pages->attr,
|
|
+ pages->access);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+
|
|
+ oldcount++;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * mapped_count() - Check if level 2 table entries point to mmu mapped pages.
|
|
+ * @pa: Physical address of the table entry to be checked.
|
|
+ *
|
|
+ * Return: the number of mapped pages found.
|
|
+ */
|
|
+static int mapped_count(phys_addr_t pa)
|
|
+{
|
|
+ int count = 0;
|
|
+
|
|
+ if (pa != 0) {
|
|
+ int j;
|
|
+ phys_addr_t pa2;
|
|
+ mvx_mmu_pte *l2 = phys_to_virt(phys_vpu2cpu(pa));
|
|
+
|
|
+ for (j = 0; j < MVE_INDEX_SIZE; j++) {
|
|
+ pa2 = get_pa(l2[j]);
|
|
+ if (pa2 != 0 && pa2 != MVE_PAGE_SIZE)
|
|
+ count++;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return count;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * get_sg_table_npages() - Count number of pages in SG table.
|
|
+ * @sgt: Pointer to scatter gather table.
|
|
+ *
|
|
+ * Return: Number of pages.
|
|
+ */
|
|
+static size_t get_sg_table_npages(struct sg_table *sgt)
|
|
+{
|
|
+ struct sg_page_iter piter;
|
|
+ size_t count = 0;
|
|
+
|
|
+ for_each_sg_page(sgt->sgl, &piter, sgt->nents, 0) {
|
|
+ count++;
|
|
+ }
|
|
+
|
|
+ return count;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * append_sg_table() - Append SG table to pages object.
|
|
+ * @pages: Pointer to pages object.
|
|
+ * @sgt: Pointer to scatter gather table.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+static int append_sg_table(struct mvx_mmu_pages *pages,
|
|
+ struct sg_table *sgt)
|
|
+{
|
|
+ size_t count;
|
|
+ struct sg_dma_page_iter piter;
|
|
+
|
|
+ count = get_sg_table_npages(sgt) * MVX_PAGES_PER_PAGE;
|
|
+
|
|
+ if ((pages->count + count) > pages->capacity) {
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING,
|
|
+ "Failed to append SG table. Pages capacity too small. count=%zu, capacity=%zu, append=%zu.",
|
|
+ pages->count, pages->capacity, count);
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+
|
|
+ for_each_sg_dma_page(sgt->sgl, &piter, sgt->nents, 0) {
|
|
+ int j;
|
|
+ phys_addr_t base;
|
|
+
|
|
+ base = (phys_addr_t)sg_page_iter_dma_address(&piter) &
|
|
+ PAGE_MASK;
|
|
+
|
|
+ for (j = 0; j < MVX_PAGES_PER_PAGE; ++j)
|
|
+ pages->pages[pages->count++] =
|
|
+ base + j * MVE_PAGE_SIZE;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * stat_show() - Print debugfs info into seq-file.
|
|
+ *
|
|
+ * This is a callback used by debugfs subsystem.
|
|
+ *
|
|
+ * @s: Seq-file
|
|
+ * @v: Unused
|
|
+ * return: 0 on success, else error code.
|
|
+ */
|
|
+static int stat_show(struct seq_file *s,
|
|
+ void *v)
|
|
+{
|
|
+ struct mvx_mmu_pages *pages = s->private;
|
|
+
|
|
+ seq_printf(s, "va: %08x\n", pages->va);
|
|
+ seq_printf(s, "capacity: %zu\n", pages->capacity);
|
|
+ seq_printf(s, "count: %zu\n", pages->count);
|
|
+
|
|
+ if (pages->mmu != NULL) {
|
|
+ seq_printf(s, "attr: %d\n", pages->attr);
|
|
+ seq_printf(s, "access: %d\n", pages->access);
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * stat_open() - Open debugfs file.
|
|
+ *
|
|
+ * This is a callback used by debugfs subsystem.
|
|
+ *
|
|
+ * @inode: Inode
|
|
+ * @file: File
|
|
+ * return: 0 on success, else error code.
|
|
+ */
|
|
+static int stat_open(struct inode *inode,
|
|
+ struct file *file)
|
|
+{
|
|
+ return single_open(file, stat_show, inode->i_private);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * File operations for debugfs entry.
|
|
+ */
|
|
+static const struct file_operations stat_fops = {
|
|
+ .open = stat_open,
|
|
+ .read = seq_read,
|
|
+ .llseek = seq_lseek,
|
|
+ .release = single_release
|
|
+};
|
|
+
|
|
+/**
|
|
+ * pages_seq_start() - Iterator over pages list.
|
|
+ */
|
|
+static void *pages_seq_start(struct seq_file *s,
|
|
+ loff_t *pos)
|
|
+{
|
|
+ struct mvx_mmu_pages *pages = s->private;
|
|
+
|
|
+ if (*pos >= pages->count)
|
|
+ return NULL;
|
|
+
|
|
+ seq_puts(s,
|
|
+ "#Page: [ va_start - va_end] -> [ pa_start - pa_end]\n");
|
|
+ return pos;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * pages_seq_start() - Iterator over pages list.
|
|
+ */
|
|
+static void *pages_seq_next(struct seq_file *s,
|
|
+ void *v,
|
|
+ loff_t *pos)
|
|
+{
|
|
+ struct mvx_mmu_pages *pages = s->private;
|
|
+
|
|
+ ++*pos;
|
|
+ if (*pos >= pages->count)
|
|
+ return NULL;
|
|
+
|
|
+ return pos;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * pages_seq_start() - Iterator over pages list.
|
|
+ */
|
|
+static void pages_seq_stop(struct seq_file *s,
|
|
+ void *v)
|
|
+{}
|
|
+
|
|
+/**
|
|
+ * pages_seq_start() - Iterator over pages list.
|
|
+ */
|
|
+static int pages_seq_show(struct seq_file *s,
|
|
+ void *v)
|
|
+{
|
|
+ struct mvx_mmu_pages *pages = s->private;
|
|
+ loff_t pos = *(loff_t *)v;
|
|
+
|
|
+ mvx_mmu_va va_start = pages->va + pos * MVE_PAGE_SIZE;
|
|
+ mvx_mmu_va va_end = va_start + MVE_PAGE_SIZE - 1;
|
|
+ phys_addr_t pa_start = pages->pages[pos];
|
|
+ phys_addr_t pa_end = pa_start + MVE_PAGE_SIZE - 1;
|
|
+
|
|
+ seq_printf(s, "%5llu: [0x%08x - 0x%08x] -> [%pap - %pap]\n", pos,
|
|
+ va_start, va_end, &pa_start, &pa_end);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * mpages_seq_ops - Callbacks used by an iterator over pages list.
|
|
+ */
|
|
+static const struct seq_operations pages_seq_ops = {
|
|
+ .start = pages_seq_start,
|
|
+ .next = pages_seq_next,
|
|
+ .stop = pages_seq_stop,
|
|
+ .show = pages_seq_show
|
|
+};
|
|
+
|
|
+/**
|
|
+ * list_open() - Callback for debugfs entry.
|
|
+ */
|
|
+static int list_open(struct inode *inode,
|
|
+ struct file *file)
|
|
+{
|
|
+ int ret;
|
|
+ struct seq_file *s;
|
|
+
|
|
+ ret = seq_open(file, &pages_seq_ops);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+
|
|
+ s = (struct seq_file *)file->private_data;
|
|
+ s->private = inode->i_private;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * File operations for a debugfs entry.
|
|
+ */
|
|
+static const struct file_operations list_fops = {
|
|
+ .open = list_open,
|
|
+ .read = seq_read,
|
|
+ .llseek = seq_lseek,
|
|
+ .release = seq_release
|
|
+};
|
|
+
|
|
+/****************************************************************************
|
|
+ * Exported functions
|
|
+ ****************************************************************************/
|
|
+
|
|
+int mvx_mmu_construct(struct mvx_mmu *mmu,
|
|
+ struct device *dev)
|
|
+{
|
|
+ phys_addr_t page_table;
|
|
+
|
|
+ mmu->dev = dev;
|
|
+
|
|
+ /* Allocate Page Table Base (the L1 table). */
|
|
+ page_table = mvx_mmu_alloc_page(dev);
|
|
+ if (page_table == 0)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ mmu->page_table = phys_to_virt(phys_vpu2cpu(page_table));
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+void mvx_mmu_destruct(struct mvx_mmu *mmu)
|
|
+{
|
|
+ mvx_mmu_pte *pte = mmu->page_table;
|
|
+ phys_addr_t pa;
|
|
+ int i;
|
|
+ int count = 0;
|
|
+
|
|
+ for (i = 0; i < MVE_INDEX_SIZE; i++) {
|
|
+ pa = get_pa(pte[i]);
|
|
+
|
|
+ /* Only free pages we have allocated ourselves. */
|
|
+ if (test_bit(i, mmu->l2_page_is_external) == 0) {
|
|
+ count += mapped_count(pa);
|
|
+ mvx_mmu_free_page(mmu->dev, pa);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ pa = virt_to_phys(mmu->page_table);
|
|
+ mvx_mmu_free_page(mmu->dev, phys_cpu2vpu(pa));
|
|
+
|
|
+ WARN_ON(count > 0);
|
|
+}
|
|
+
|
|
+phys_addr_t mvx_mmu_alloc_page(struct device *dev)
|
|
+{
|
|
+ struct page *page;
|
|
+ phys_addr_t pa;
|
|
+ dma_addr_t dma_handle;
|
|
+ int retry = 20;
|
|
+
|
|
+ do {
|
|
+ page = alloc_page(GFP_KERNEL | __GFP_ZERO | __GFP_RETRY_MAYFAIL);
|
|
+ } while(retry-- && page == NULL);
|
|
+
|
|
+ if (page == NULL)
|
|
+ return 0;
|
|
+
|
|
+ dma_handle = dma_map_page(dev, page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
|
|
+ if (dma_mapping_error(dev, dma_handle) != 0) {
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING,
|
|
+ "Cannot map page to DMA address space. page=%p.",
|
|
+ page);
|
|
+ goto free_page;
|
|
+ }
|
|
+
|
|
+ pa = (phys_addr_t)dma_handle;
|
|
+
|
|
+ dma_sync_single_for_device(dev, pa, PAGE_SIZE, DMA_TO_DEVICE);
|
|
+
|
|
+ return pa;
|
|
+
|
|
+free_page:
|
|
+ __free_page(page);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+phys_addr_t mvx_mmu_dma_alloc_coherent(struct device *dev, void** data)
|
|
+{
|
|
+ phys_addr_t pa;
|
|
+ dma_addr_t dma_handle;
|
|
+ int retry = 20;
|
|
+ void *virt_addr;
|
|
+
|
|
+ do {
|
|
+ virt_addr = dma_alloc_coherent(dev, PAGE_SIZE, &dma_handle, GFP_KERNEL | __GFP_ZERO | __GFP_RETRY_MAYFAIL);
|
|
+ } while(retry-- && virt_addr == NULL);
|
|
+
|
|
+ if (virt_addr == NULL) {
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING,
|
|
+ "dma alloc coherent buffer faild. retry=%d", retry);
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ pa = (phys_addr_t)dma_handle;
|
|
+
|
|
+ *data = virt_addr;
|
|
+
|
|
+ return pa;
|
|
+}
|
|
+
|
|
+void mvx_mmu_dma_free_coherent(struct device *dev,
|
|
+ phys_addr_t pa, void* data)
|
|
+{
|
|
+ if (pa == 0)
|
|
+ return;
|
|
+
|
|
+ dma_free_coherent(dev, PAGE_SIZE, data, (dma_addr_t)pa);
|
|
+}
|
|
+
|
|
+void mvx_mmu_free_contiguous_pages(struct device *dev, phys_addr_t pa, size_t npages)
|
|
+{
|
|
+ struct page *page;
|
|
+
|
|
+ if (pa == 0)
|
|
+ return;
|
|
+
|
|
+ page = phys_to_page(phys_vpu2cpu(pa));
|
|
+
|
|
+ dma_unmap_page(dev, pa, npages << PAGE_SHIFT, DMA_BIDIRECTIONAL);
|
|
+ __free_pages(page, get_order(npages << PAGE_SHIFT));
|
|
+}
|
|
+
|
|
+phys_addr_t mvx_mmu_alloc_contiguous_pages(struct device *dev, size_t npages)
|
|
+{
|
|
+ struct page *page;
|
|
+ phys_addr_t pa;
|
|
+ dma_addr_t dma_handle;
|
|
+ size_t size = (npages << PAGE_SHIFT);
|
|
+ int retry = 20;
|
|
+
|
|
+ do {
|
|
+ page = alloc_pages(GFP_KERNEL | __GFP_ZERO | __GFP_RETRY_MAYFAIL, get_order(size));
|
|
+ } while(retry-- && page == NULL);
|
|
+
|
|
+ if (page == NULL)
|
|
+ return 0;
|
|
+
|
|
+ dma_handle = dma_map_page(dev, page, 0, size, DMA_BIDIRECTIONAL);
|
|
+ if (dma_mapping_error(dev, dma_handle) != 0) {
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING,
|
|
+ "Cannot map page to DMA address space. page=%p.",
|
|
+ page);
|
|
+ goto free_pages;
|
|
+ }
|
|
+
|
|
+ pa = (phys_addr_t)dma_handle;
|
|
+
|
|
+ dma_sync_single_for_device(dev, pa, size, DMA_TO_DEVICE);
|
|
+
|
|
+ return pa;
|
|
+
|
|
+free_pages:
|
|
+ __free_pages(page, get_order(size));
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+void mvx_mmu_free_page(struct device *dev,
|
|
+ phys_addr_t pa)
|
|
+{
|
|
+ struct page *page;
|
|
+
|
|
+ if (pa == 0)
|
|
+ return;
|
|
+
|
|
+ page = phys_to_page(phys_vpu2cpu(pa));
|
|
+
|
|
+ dma_unmap_page(dev, pa, PAGE_SIZE, DMA_BIDIRECTIONAL);
|
|
+ __free_page(page);
|
|
+}
|
|
+
|
|
+struct mvx_mmu_pages *mvx_mmu_alloc_pages(struct device *dev,
|
|
+ size_t count,
|
|
+ size_t capacity)
|
|
+{
|
|
+ struct mvx_mmu_pages *pages;
|
|
+ int ret;
|
|
+
|
|
+ count = roundup(count, MVX_PAGES_PER_PAGE);
|
|
+ capacity = roundup(capacity, MVX_PAGES_PER_PAGE);
|
|
+ capacity = max(count, capacity);
|
|
+
|
|
+ pages = vmalloc(sizeof(*pages) + sizeof(phys_addr_t) * capacity);
|
|
+ if (pages == NULL)
|
|
+ return ERR_PTR(-ENOMEM);
|
|
+
|
|
+ memset(pages, 0, sizeof(*pages) + sizeof(phys_addr_t) * capacity);
|
|
+ pages->dev = dev;
|
|
+ pages->capacity = capacity;
|
|
+ INIT_LIST_HEAD(&pages->dmabuf);
|
|
+
|
|
+ for (pages->count = 0; pages->count < count; ) {
|
|
+ phys_addr_t page;
|
|
+ unsigned int i;
|
|
+
|
|
+ /*
|
|
+ * Allocate a Linux page. It will typically be of the same size
|
|
+ * as the MVE page, but could also be larger.
|
|
+ */
|
|
+ page = mvx_mmu_alloc_page(dev);
|
|
+ if (page == 0) {
|
|
+ ret = -ENOMEM;
|
|
+ goto release_pages;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * If the Linux page is larger than the MVE page, then
|
|
+ * we iterate and add physical addresses with an offset from
|
|
+ * the Linux page.
|
|
+ */
|
|
+ for (i = 0; i < MVX_PAGES_PER_PAGE; i++)
|
|
+ pages->pages[pages->count++] =
|
|
+ page + i * MVE_PAGE_SIZE;
|
|
+ }
|
|
+
|
|
+ return pages;
|
|
+
|
|
+release_pages:
|
|
+ mvx_mmu_free_pages(pages);
|
|
+
|
|
+ return ERR_PTR(ret);
|
|
+}
|
|
+
|
|
+struct mvx_mmu_pages *mvx_mmu_alloc_pages_sg(struct device *dev,
|
|
+ struct sg_table *sgt,
|
|
+ size_t capacity)
|
|
+{
|
|
+ struct mvx_mmu_pages *pages;
|
|
+ size_t count;
|
|
+ int ret;
|
|
+
|
|
+ count = get_sg_table_npages(sgt) * MVX_PAGES_PER_PAGE;
|
|
+ capacity = roundup(capacity, MVX_PAGES_PER_PAGE);
|
|
+ capacity = max(count, capacity);
|
|
+
|
|
+ pages = vmalloc(sizeof(*pages) + sizeof(phys_addr_t) * capacity);
|
|
+ if (pages == NULL)
|
|
+ return ERR_PTR(-ENOMEM);
|
|
+
|
|
+ memset(pages, 0, sizeof(*pages) + sizeof(phys_addr_t) * capacity);
|
|
+ pages->dev = dev;
|
|
+ pages->capacity = capacity;
|
|
+ pages->is_external = true;
|
|
+ pages->offset = sgt->sgl != NULL ? sgt->sgl->offset : 0;
|
|
+ INIT_LIST_HEAD(&pages->dmabuf);
|
|
+
|
|
+ ret = append_sg_table(pages, sgt);
|
|
+ if (ret != 0) {
|
|
+ vfree(pages);
|
|
+ return ERR_PTR(ret);
|
|
+ }
|
|
+
|
|
+ return pages;
|
|
+}
|
|
+
|
|
+struct mvx_mmu_pages *mvx_mmu_alloc_pages_dma_buf(struct device *dev,
|
|
+ struct dma_buf *dmabuf,
|
|
+ size_t capacity)
|
|
+{
|
|
+ struct mvx_mmu_pages *pages;
|
|
+ struct dma_buf_attachment *attach;
|
|
+ struct sg_table *sgt;
|
|
+ struct mvx_mmu_dma_buf *mbuf;
|
|
+
|
|
+ attach = dma_buf_attach(dmabuf, dev);
|
|
+ if (IS_ERR(attach)) {
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING,
|
|
+ "Failed to attach DMA buffer.");
|
|
+ return (struct mvx_mmu_pages *)attach;
|
|
+ }
|
|
+
|
|
+ sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
|
|
+ if (IS_ERR(sgt)) {
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING,
|
|
+ "Failed to get SG table from DMA buffer.");
|
|
+ pages = (struct mvx_mmu_pages *)sgt;
|
|
+ goto detach;
|
|
+ }
|
|
+
|
|
+ pages = mvx_mmu_alloc_pages_sg(dev, sgt, capacity);
|
|
+ if (IS_ERR(pages))
|
|
+ goto unmap;
|
|
+
|
|
+ mbuf = vmalloc(sizeof(*mbuf));
|
|
+ if (mbuf == NULL) {
|
|
+ mvx_mmu_free_pages(pages);
|
|
+ pages = ERR_PTR(-ENOMEM);
|
|
+ goto unmap;
|
|
+ }
|
|
+
|
|
+ memset(mbuf, 0, sizeof(*mbuf));
|
|
+ mbuf->dmabuf = dmabuf;
|
|
+ list_add_tail(&mbuf->head, &pages->dmabuf);
|
|
+
|
|
+unmap:
|
|
+ dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
|
|
+
|
|
+detach:
|
|
+ dma_buf_detach(dmabuf, attach);
|
|
+
|
|
+ return pages;
|
|
+}
|
|
+
|
|
+int mvx_mmu_pages_append_dma_buf(struct mvx_mmu_pages *pages,
|
|
+ struct dma_buf *dmabuf)
|
|
+{
|
|
+ struct dma_buf_attachment *attach;
|
|
+ struct sg_table *sgt;
|
|
+ struct mvx_mmu_dma_buf *mbuf;
|
|
+ size_t oldcount = pages->count;
|
|
+ int ret;
|
|
+
|
|
+ if (pages->is_external == false) {
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING,
|
|
+ "Can't append DMA buffer to internal pages object.");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ attach = dma_buf_attach(dmabuf, pages->dev);
|
|
+ if (IS_ERR(attach)) {
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING,
|
|
+ "Failed to attach DMA buffer.");
|
|
+ return PTR_ERR(attach);
|
|
+ }
|
|
+
|
|
+ sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
|
|
+ if (IS_ERR(sgt)) {
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING,
|
|
+ "Failed to get SG table from DMA buffer.");
|
|
+ ret = PTR_ERR(sgt);
|
|
+ goto detach;
|
|
+ }
|
|
+
|
|
+ ret = append_sg_table(pages, sgt);
|
|
+ if (ret != 0)
|
|
+ goto unmap;
|
|
+
|
|
+ ret = remap_pages(pages, oldcount);
|
|
+ if (ret != 0)
|
|
+ goto unmap;
|
|
+
|
|
+ mbuf = vmalloc(sizeof(*mbuf));
|
|
+ if (mbuf == NULL) {
|
|
+ ret = -ENOMEM;
|
|
+ goto unmap;
|
|
+ }
|
|
+
|
|
+ memset(mbuf, 0, sizeof(*mbuf));
|
|
+ mbuf->dmabuf = dmabuf;
|
|
+ list_add_tail(&mbuf->head, &pages->dmabuf);
|
|
+
|
|
+unmap:
|
|
+ dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
|
|
+
|
|
+detach:
|
|
+ dma_buf_detach(dmabuf, attach);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int mvx_mmu_resize_pages(struct mvx_mmu_pages *pages,
|
|
+ size_t npages)
|
|
+{
|
|
+ size_t oldcount = pages->count;
|
|
+
|
|
+ if (pages->is_external != false) {
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING,
|
|
+ "MMU with externally managed pages cannot be resized.");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ npages = roundup(npages, MVX_PAGES_PER_PAGE);
|
|
+
|
|
+ if (npages > pages->capacity) {
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING,
|
|
+ "New MMU pages size is larger than capacity. npages=%zu, capacity=%zu.",
|
|
+ npages, pages->capacity);
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+
|
|
+ /* Free pages if npage is smaller than allocated pages. */
|
|
+ while (pages->count > npages) {
|
|
+ pages->count--;
|
|
+
|
|
+ if ((pages->count % MVX_PAGES_PER_PAGE) == 0)
|
|
+ mvx_mmu_free_page(pages->dev,
|
|
+ pages->pages[pages->count]);
|
|
+
|
|
+ pages->pages[pages->count] = 0;
|
|
+ }
|
|
+
|
|
+ /* Allocate pages if npage is larger than allocated pages. */
|
|
+ while (pages->count < npages) {
|
|
+ phys_addr_t page;
|
|
+ unsigned int i;
|
|
+
|
|
+ page = mvx_mmu_alloc_page(pages->dev);
|
|
+ if (page == 0)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ for (i = 0; i < MVX_PAGES_PER_PAGE; i++)
|
|
+ pages->pages[pages->count++] =
|
|
+ page + i * MVE_PAGE_SIZE;
|
|
+ }
|
|
+
|
|
+ return remap_pages(pages, oldcount);
|
|
+}
|
|
+
|
|
+void mvx_mmu_free_pages(struct mvx_mmu_pages *pages)
|
|
+{
|
|
+ struct mvx_mmu_dma_buf *mbuf;
|
|
+ struct mvx_mmu_dma_buf *tmp;
|
|
+ unsigned int i;
|
|
+
|
|
+ mvx_mmu_unmap_pages(pages);
|
|
+
|
|
+ if (pages->is_external == false)
|
|
+ for (i = 0; i < pages->count; i += MVX_PAGES_PER_PAGE)
|
|
+ mvx_mmu_free_page(pages->dev, pages->pages[i]);
|
|
+
|
|
+ list_for_each_entry_safe(mbuf, tmp, &pages->dmabuf, head) {
|
|
+ dma_buf_put(mbuf->dmabuf);
|
|
+ vfree(mbuf);
|
|
+ }
|
|
+
|
|
+ vfree(pages);
|
|
+}
|
|
+
|
|
+size_t mvx_mmu_size_pages(struct mvx_mmu_pages *pages)
|
|
+{
|
|
+ return pages->count * MVE_PAGE_SIZE;
|
|
+}
|
|
+
|
|
+int mvx_mmu_synch_pages(struct mvx_mmu_pages *pages,
|
|
+ enum dma_data_direction dir, int page_off, int page_count)
|
|
+{
|
|
+ size_t i;
|
|
+ if (page_off + page_count > pages->count)
|
|
+ {
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING,
|
|
+ "Illegal mmu sync offset/size (%d/%d).",
|
|
+ page_off, page_count);
|
|
+ page_off = 0;
|
|
+ page_count = pages->count;
|
|
+ }
|
|
+ if (dir == DMA_FROM_DEVICE) {
|
|
+ for (i = 0; i < page_count; i += MVX_PAGES_PER_PAGE)
|
|
+ dma_sync_single_for_cpu(pages->dev, pages->pages[page_off+i],
|
|
+ PAGE_SIZE, DMA_FROM_DEVICE);
|
|
+ } else if (dir == DMA_TO_DEVICE) {
|
|
+ for (i = 0; i < page_count; i += MVX_PAGES_PER_PAGE)
|
|
+ dma_sync_single_for_device(pages->dev, pages->pages[page_off+i],
|
|
+ PAGE_SIZE, DMA_TO_DEVICE);
|
|
+ } else {
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING,
|
|
+ "Unsupported MMU flush direction. dir=%u.",
|
|
+ dir);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int mvx_mmu_map_pages(struct mvx_mmu *mmu,
|
|
+ mvx_mmu_va va,
|
|
+ struct mvx_mmu_pages *pages,
|
|
+ enum mvx_mmu_attr attr,
|
|
+ enum mvx_mmu_access access)
|
|
+{
|
|
+ size_t i;
|
|
+ int ret;
|
|
+
|
|
+ /* Map the allocated pages. */
|
|
+ for (i = 0; i < pages->count; i++) {
|
|
+ ret = map_page(mmu, va + i * MVE_PAGE_SIZE, pages->pages[i],
|
|
+ attr, access);
|
|
+ if (ret != 0)
|
|
+ goto unmap_pages;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Reserve the rest of the address range. Adding a dummy page with
|
|
+ * physical address 'PAGE_SIZE' should not lead to memory corruption,
|
|
+ * because the page is marked as 'no access'.
|
|
+ */
|
|
+ for (; i < pages->capacity; i++) {
|
|
+ ret = map_page(mmu, va + i * MVE_PAGE_SIZE, MVE_PAGE_SIZE,
|
|
+ MVX_ATTR_PRIVATE, MVX_ACCESS_NO);
|
|
+ if (ret != 0)
|
|
+ goto unmap_pages;
|
|
+ }
|
|
+
|
|
+ pages->mmu = mmu;
|
|
+ pages->va = va;
|
|
+ pages->attr = attr;
|
|
+ pages->access = access;
|
|
+
|
|
+ return 0;
|
|
+
|
|
+unmap_pages:
|
|
+ while (i-- > 0)
|
|
+ unmap_page(mmu, va + i * MVE_PAGE_SIZE);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+void mvx_mmu_unmap_pages(struct mvx_mmu_pages *pages)
|
|
+{
|
|
+ size_t i;
|
|
+
|
|
+ if (pages->mmu == NULL)
|
|
+ return;
|
|
+
|
|
+ for (i = 0; i < pages->capacity; i++)
|
|
+ unmap_page(pages->mmu, pages->va + i * MVE_PAGE_SIZE);
|
|
+
|
|
+ pages->mmu = NULL;
|
|
+ pages->va = 0;
|
|
+}
|
|
+
|
|
+int mvx_mmu_map_pa(struct mvx_mmu *mmu,
|
|
+ mvx_mmu_va va,
|
|
+ phys_addr_t pa,
|
|
+ size_t size,
|
|
+ enum mvx_mmu_attr attr,
|
|
+ enum mvx_mmu_access access)
|
|
+{
|
|
+ int ret;
|
|
+ size_t offset;
|
|
+
|
|
+ for (offset = 0; offset < size; offset += MVE_PAGE_SIZE) {
|
|
+ ret = map_page(mmu, va + offset, pa + offset,
|
|
+ attr, access);
|
|
+ if (ret != 0)
|
|
+ goto unmap_pages;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+
|
|
+unmap_pages:
|
|
+ /* Unroll mapped pages. */
|
|
+ while (offset > 0) {
|
|
+ offset -= MVE_PAGE_SIZE;
|
|
+ unmap_page(mmu, va + offset);
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int mvx_mmu_map_l2(struct mvx_mmu *mmu,
|
|
+ mvx_mmu_va va,
|
|
+ phys_addr_t pa)
|
|
+{
|
|
+ phys_addr_t l2;
|
|
+ mvx_mmu_pte *pte = mmu->page_table;
|
|
+ unsigned int index;
|
|
+
|
|
+ /* Level 1. */
|
|
+ index = get_index(va, 0);
|
|
+ l2 = get_pa(pte[index]);
|
|
+
|
|
+ if (l2 != 0) {
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_ERROR,
|
|
+ "Failed to map L2 page. Page already exists.");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ set_bit(index, mmu->l2_page_is_external);
|
|
+
|
|
+ pte[index] = mvx_mmu_set_pte(MVX_ATTR_PRIVATE, pa,
|
|
+ MVX_ACCESS_READ_ONLY);
|
|
+ dma_sync_single_for_device(mmu->dev,
|
|
+ phys_cpu2vpu(virt_to_phys(&pte[index])),
|
|
+ sizeof(pte[index]), DMA_TO_DEVICE);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+void mvx_mmu_unmap_va(struct mvx_mmu *mmu,
|
|
+ mvx_mmu_va va,
|
|
+ size_t size)
|
|
+{
|
|
+ size_t offset;
|
|
+
|
|
+ for (offset = 0; offset < size; offset += MVE_PAGE_SIZE)
|
|
+ unmap_page(mmu, va + offset);
|
|
+}
|
|
+
|
|
+int mvx_mmu_va_to_pa(struct mvx_mmu *mmu,
|
|
+ mvx_mmu_va va,
|
|
+ phys_addr_t *pa)
|
|
+{
|
|
+ mvx_mmu_pte *pte;
|
|
+ phys_addr_t page;
|
|
+
|
|
+ pte = ptw(mmu, va, false);
|
|
+ if (IS_ERR(pte))
|
|
+ return PTR_ERR(pte);
|
|
+
|
|
+ page = get_pa(*pte);
|
|
+ if (page == 0)
|
|
+ return -EFAULT;
|
|
+
|
|
+ *pa = page | get_offset(va);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/* LCOV_EXCL_START */
|
|
+int mvx_mmu_read(struct mvx_mmu *mmu,
|
|
+ mvx_mmu_va va,
|
|
+ void *data,
|
|
+ size_t size)
|
|
+{
|
|
+ mvx_mmu_va end = va + size;
|
|
+
|
|
+ while (va < end) {
|
|
+ int ret;
|
|
+ size_t n;
|
|
+ phys_addr_t pa = 0;
|
|
+ void *src;
|
|
+
|
|
+ /* Calculate number of bytes to be copied. */
|
|
+ n = min(end - va, MVE_PAGE_SIZE - (va & MVE_PAGE_MASK));
|
|
+
|
|
+ /* Translate virtual- to physical address. */
|
|
+ ret = mvx_mmu_va_to_pa(mmu, va, &pa);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+
|
|
+ /* Invalidate the data range. */
|
|
+ dma_sync_single_for_cpu(mmu->dev, pa, n, DMA_FROM_DEVICE);
|
|
+
|
|
+ /* Convert from physical to Linux logical address. */
|
|
+ src = phys_to_virt(phys_vpu2cpu(pa));
|
|
+ memcpy(data, src, n);
|
|
+
|
|
+ va += n;
|
|
+ data += n;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/* LCOV_EXCL_STOP */
|
|
+
|
|
+int mvx_mmu_write(struct mvx_mmu *mmu,
|
|
+ mvx_mmu_va va,
|
|
+ const void *data,
|
|
+ size_t size)
|
|
+{
|
|
+ mvx_mmu_va end = va + size;
|
|
+
|
|
+ while (va < end) {
|
|
+ int ret;
|
|
+ size_t n;
|
|
+ phys_addr_t pa = 0;
|
|
+ void *dst;
|
|
+
|
|
+ /* Calculate number of bytes to be copied. */
|
|
+ n = min(end - va, MVE_PAGE_SIZE - (va & MVE_PAGE_MASK));
|
|
+
|
|
+ /* Translate virtual- to physical address. */
|
|
+ ret = mvx_mmu_va_to_pa(mmu, va, &pa);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+
|
|
+ /* Convert from physical to Linux logical address. */
|
|
+ dst = phys_to_virt(phys_vpu2cpu(pa));
|
|
+ memcpy(dst, data, n);
|
|
+
|
|
+ /* Flush the data to memory. */
|
|
+ dma_sync_single_for_device(mmu->dev, pa, n, DMA_TO_DEVICE);
|
|
+
|
|
+ va += n;
|
|
+ data += n;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+mvx_mmu_pte mvx_mmu_set_pte(enum mvx_mmu_attr attr,
|
|
+ phys_addr_t pa,
|
|
+ enum mvx_mmu_access access)
|
|
+{
|
|
+ return (attr << MVE_PTE_ATTR_SHIFT) |
|
|
+ ((pa >> MVE_PAGE_SHIFT) << MVE_PTE_PHYSADDR_SHIFT) |
|
|
+ (access << MVE_PTE_AP_SHIFT);
|
|
+}
|
|
+
|
|
+/* LCOV_EXCL_START */
|
|
+void mvx_mmu_print(struct mvx_mmu *mmu)
|
|
+{
|
|
+ unsigned int i;
|
|
+ mvx_mmu_pte *l1 = mmu->page_table;
|
|
+
|
|
+ for (i = 0; i < MVE_INDEX_SIZE; i++) {
|
|
+ phys_addr_t pa = get_pa(l1[i]);
|
|
+ unsigned int j;
|
|
+
|
|
+ if (pa != 0) {
|
|
+ mvx_mmu_pte *l2 = phys_to_virt(phys_vpu2cpu(pa));
|
|
+
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_INFO,
|
|
+ "%-4u: PA=0x%llx, ATTR=%u, ACC=%u",
|
|
+ i, pa, get_attr(l1[i]), get_ap(l1[i]));
|
|
+
|
|
+ for (j = 0; j < MVE_INDEX_SIZE; j++) {
|
|
+ pa = get_pa(l2[j]);
|
|
+ if (pa != 0) {
|
|
+ mvx_mmu_va va;
|
|
+
|
|
+ va = (i << (MVE_INDEX_SHIFT +
|
|
+ MVE_PAGE_SHIFT)) |
|
|
+ (j << MVE_PAGE_SHIFT);
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_INFO,
|
|
+ "|------ %-4u: VA=0x%08x, PA=0x%llx, ATTR=%u, ACC=%u",
|
|
+ j,
|
|
+ va,
|
|
+ pa,
|
|
+ get_attr(l2[j]),
|
|
+ get_ap(l2[j]));
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
+/* LCOV_EXCL_STOP */
|
|
+
|
|
+int mvx_mmu_pages_debugfs_init(struct mvx_mmu_pages *pages,
|
|
+ char *name,
|
|
+ struct dentry *parent)
|
|
+{
|
|
+ struct dentry *dpages;
|
|
+ struct dentry *dentry;
|
|
+
|
|
+ dpages = debugfs_create_dir(name, parent);
|
|
+ if (IS_ERR_OR_NULL(dpages))
|
|
+ return -ENOMEM;
|
|
+
|
|
+ dentry = debugfs_create_file("stat", 0400, dpages, pages,
|
|
+ &stat_fops);
|
|
+ if (IS_ERR_OR_NULL(dentry))
|
|
+ return -ENOMEM;
|
|
+
|
|
+ dentry = debugfs_create_file("list", 0400, dpages, pages,
|
|
+ &list_fops);
|
|
+ if (IS_ERR_OR_NULL(dentry))
|
|
+ return -ENOMEM;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+unsigned long phys_vpu2cpu(unsigned long phys_addr)
|
|
+{
|
|
+ if (phys_addr >= 0x80000000UL) {
|
|
+ phys_addr += 0x80000000UL;
|
|
+ }
|
|
+ return phys_addr;
|
|
+}
|
|
+
|
|
+unsigned long phys_cpu2vpu(unsigned long phys_addr)
|
|
+{
|
|
+ if (phys_addr >= 0x100000000UL) {
|
|
+ phys_addr -= 0x80000000UL;
|
|
+ }
|
|
+ return phys_addr;
|
|
+}
|
|
diff --git a/drivers/media/platform/spacemit/vpu_k1x/if/mvx_mmu.h b/drivers/media/platform/spacemit/vpu_k1x/if/mvx_mmu.h
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/vpu_k1x/if/mvx_mmu.h
|
|
@@ -0,0 +1,492 @@
|
|
+/*
|
|
+ * The confidential and proprietary information contained in this file may
|
|
+ * only be used by a person authorised under and to the extent permitted
|
|
+ * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd.
|
|
+ * ALL RIGHTS RESERVED
|
|
+ *
|
|
+ * This entire notice must be reproduced on all copies of this file
|
|
+ * and copies of this file may only be made by a person if such person is
|
|
+ * permitted to do so under the terms of a subsisting license agreement
|
|
+ * from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * SPDX-License-Identifier: GPL-2.0-only
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU General Public License
|
|
+ * as published by the Free Software Foundation; either version 2
|
|
+ * of the License, or (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
+ *
|
|
+ */
|
|
+
|
|
+#ifndef _MVX_MMU_H_
|
|
+#define _MVX_MMU_H_
|
|
+
|
|
+/****************************************************************************
|
|
+ * Includes
|
|
+ ****************************************************************************/
|
|
+
|
|
+#include <linux/dma-mapping.h>
|
|
+#include <linux/hashtable.h>
|
|
+#include <linux/types.h>
|
|
+
|
|
+/****************************************************************************
|
|
+ * Defines
|
|
+ ****************************************************************************/
|
|
+
|
|
+/* Page size in bits. 2^12 = 4kB. */
|
|
+#define MVE_PAGE_SHIFT 12
|
|
+#define MVE_PAGE_SIZE (1 << MVE_PAGE_SHIFT)
|
|
+#define MVE_PAGE_MASK (MVE_PAGE_SIZE - 1)
|
|
+
|
|
+/* Number of page table entries per page. */
|
|
+#define MVE_PAGE_PTE_PER_PAGE (MVE_PAGE_SIZE / sizeof(mvx_mmu_pte))
|
|
+
|
|
+/****************************************************************************
|
|
+ * Types
|
|
+ ****************************************************************************/
|
|
+
|
|
+struct device;
|
|
+struct dma_buf;
|
|
+struct mvx_mmu;
|
|
+struct page;
|
|
+
|
|
+/**
|
|
+ * typedef mvx_mmu_va - 32 bit virtual address.
|
|
+ *
|
|
+ * This is the address the firmware/MVE will use.
|
|
+ */
|
|
+typedef uint32_t mvx_mmu_va;
|
|
+
|
|
+/**
|
|
+ * typedef mvx_mmu_pte - Page table entry.
|
|
+ *
|
|
+ * A PTE pointer should always point at a Linux kernel virtual address.
|
|
+ *
|
|
+ * AT - Attribute.
|
|
+ * PA - Physical address.
|
|
+ * AP - Access permission.
|
|
+ *
|
|
+ * 30 2 0
|
|
+ * +---+-------------------------------------------------------+---+
|
|
+ * | AT| PA 39:12 | AP|
|
|
+ * +---+-------------------------------------------------------+---+
|
|
+ */
|
|
+typedef uint32_t mvx_mmu_pte;
|
|
+
|
|
+enum mvx_mmu_attr {
|
|
+ MVX_ATTR_PRIVATE = 0,
|
|
+ MVX_ATTR_REFFRAME = 1,
|
|
+ MVX_ATTR_SHARED_RO = 2,
|
|
+ MVX_ATTR_SHARED_RW = 3
|
|
+};
|
|
+
|
|
+enum mvx_mmu_access {
|
|
+ MVX_ACCESS_NO = 0,
|
|
+ MVX_ACCESS_READ_ONLY = 1,
|
|
+ MVX_ACCESS_EXECUTABLE = 2,
|
|
+ MVX_ACCESS_READ_WRITE = 3
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct mvx_mmu_pages - Structure used to allocate an array of pages.
|
|
+ * @dev: Pointer to device.
|
|
+ * @node: Hash table node. Used to keep track of allocated pages objects.
|
|
+ * @mmu: Pointer to MMU instance.
|
|
+ * @va: MVE virtual address. Set to 0 if objects is unmapped.
|
|
+ * @offset: Offset from mapped VA to where the data begins.
|
|
+ * @attr: Page table attributes.
|
|
+ * @access: Page table access.
|
|
+ * @capacity: Maximum number of MVE pages this object can hold.
|
|
+ * @count: Current number of allocated pages.
|
|
+ * @is_external:If the physical pages have been externally allocated.
|
|
+ * @dmabuf: List of DMA buffers.
|
|
+ * @pages: Array of pages.
|
|
+ */
|
|
+struct mvx_mmu_pages {
|
|
+ struct device *dev;
|
|
+ struct hlist_node node;
|
|
+ struct mvx_mmu *mmu;
|
|
+ mvx_mmu_va va;
|
|
+ size_t offset;
|
|
+ enum mvx_mmu_attr attr;
|
|
+ enum mvx_mmu_access access;
|
|
+ size_t capacity;
|
|
+ size_t count;
|
|
+ bool is_external;
|
|
+ struct list_head dmabuf;
|
|
+ phys_addr_t pages[0];
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct mvx_mmu - MMU context.
|
|
+ * @dev: Pointer to device.
|
|
+ * @page_table: Virtual address to L1 page.
|
|
+ * @l2_page_is_external: Bitmap of which L2 pages that have been mapped
|
|
+ * externally.
|
|
+ */
|
|
+struct mvx_mmu {
|
|
+ struct device *dev;
|
|
+ mvx_mmu_pte *page_table;
|
|
+ DECLARE_BITMAP(l2_page_is_external, MVE_PAGE_PTE_PER_PAGE);
|
|
+};
|
|
+
|
|
+/****************************************************************************
|
|
+ * Static functions
|
|
+ ****************************************************************************/
|
|
+
|
|
+#ifndef phys_to_page
|
|
+
|
|
+/**
|
|
+ * phys_to_page() - Convert a physical address to a pointer to a page.
|
|
+ * @pa: Physical address.
|
|
+ *
|
|
+ * Return: Pointer to page struct.
|
|
+ */
|
|
+static inline struct page *phys_to_page(unsigned long pa)
|
|
+{
|
|
+ return pfn_to_page(__phys_to_pfn(pa));
|
|
+}
|
|
+
|
|
+#endif
|
|
+
|
|
+/****************************************************************************
|
|
+ * Exported functions
|
|
+ ****************************************************************************/
|
|
+
|
|
+/**
|
|
+ * mvx_mmu_construct() - Construct the MMU object.
|
|
+ * @mmu: Pointer to MMU object.
|
|
+ * @dev: Pointer to device.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+int mvx_mmu_construct(struct mvx_mmu *mmu,
|
|
+ struct device *dev);
|
|
+
|
|
+/**
|
|
+ * mvx_mmu_destruct() - Destroy the MMU object.
|
|
+ * @mmu: Pointer to MMU object.
|
|
+ */
|
|
+void mvx_mmu_destruct(struct mvx_mmu *mmu);
|
|
+
|
|
+/**
|
|
+ * mvx_mmu_alloc_contiguous_pages() - Allocate contiguous pages.
|
|
+ * dev: Pointer to device.
|
|
+ * npages: Number of pages to allocate.
|
|
+ * Return: Physical page address on success, else 0.
|
|
+ */
|
|
+phys_addr_t mvx_mmu_alloc_contiguous_pages(struct device *dev, size_t npages);
|
|
+
|
|
+/*
|
|
+ * mvx_mmu_free_contiguous_pages() - Free contiguous pages.
|
|
+ *
|
|
+ * dev: Pointer to device.
|
|
+ * pa: Physical page address or 0.
|
|
+ * npages: Number of pages to free.
|
|
+ */
|
|
+void mvx_mmu_free_contiguous_pages(struct device *dev, phys_addr_t pa, size_t npages);
|
|
+
|
|
+/**
|
|
+ * mvx_mmu_alloc_page() - Allocate one page.
|
|
+ * dev: Pointer to device.
|
|
+ *
|
|
+ * Return: Physical page address on success, else 0.
|
|
+ */
|
|
+phys_addr_t mvx_mmu_alloc_page(struct device *dev);
|
|
+
|
|
+/*
|
|
+ * mvx_mmu_free_page() - Free one page.
|
|
+ *
|
|
+ * dev: Pointer to device.
|
|
+ * pa: Physical page address or 0.
|
|
+ */
|
|
+void mvx_mmu_free_page(struct device *dev,
|
|
+ phys_addr_t pa);
|
|
+
|
|
+/**
|
|
+ * mvx_mmu_dma_alloc_coherent() - Allocate one page for non-cacheable attribute.
|
|
+ *
|
|
+ * dev: Pointer to device.
|
|
+ * data: Virtual address for cpu access.
|
|
+ * Return: Physical page address or 0.
|
|
+ */
|
|
+phys_addr_t mvx_mmu_dma_alloc_coherent(struct device *dev, void** data);
|
|
+
|
|
+/**
|
|
+ * mvx_mmu_dma_free_coherent() - Free one page for non-cacheable attribute.
|
|
+ *
|
|
+ * dev: Pointer to device.
|
|
+ * pa: Physical page address or 0.
|
|
+ * data: Virtual address for cpu access.
|
|
+ */
|
|
+void mvx_mmu_dma_free_coherent(struct device *dev, phys_addr_t pa, void* data);
|
|
+
|
|
+/**
|
|
+ * mvx_mmu_alloc_pages() - Allocate array of pages.
|
|
+ * @dev: Pointer to device.
|
|
+ * @npages Number of pages to allocate.
|
|
+ * @capacity: Maximum number of pages this allocation can be resized
|
|
+ * to. If this value is 0 or smaller than npages, then it will be
|
|
+ * set to npages.
|
|
+ *
|
|
+ * Pages are not guaranteed to be physically continuous.
|
|
+ *
|
|
+ * Return: Valid pointer on success, else ERR_PTR.
|
|
+ */
|
|
+struct mvx_mmu_pages *mvx_mmu_alloc_pages(struct device *dev,
|
|
+ size_t npages,
|
|
+ size_t capacity);
|
|
+
|
|
+/**
|
|
+ * mvx_mmu_alloc_pages_sg() - Allocate array of pages from SG table.
|
|
+ * @dev: Pointer to device.
|
|
+ * @sgt: Scatter-gatter table with pre-allocated memory pages.
|
|
+ * @capacity: Maximum number of pages this allocation can be resized
|
|
+ * to. If this value is 0 or smaller than number of pages
|
|
+ * in scatter gather table, then it will be rounded up to
|
|
+ * to SG table size.
|
|
+ *
|
|
+ * Pages are not guaranteed to be physically continuous.
|
|
+ *
|
|
+ * Return: Valid pointer on success, else ERR_PTR.
|
|
+ */
|
|
+struct mvx_mmu_pages *mvx_mmu_alloc_pages_sg(struct device *dev,
|
|
+ struct sg_table *sgt,
|
|
+ size_t capacity);
|
|
+
|
|
+/**
|
|
+ * mvx_mmu_alloc_pages_dma_buf() - Allocate pages object from DMA buffer.
|
|
+ * @dev: Pointer to device.
|
|
+ * @dma_buf: Pointer to DMA buffer.
|
|
+ * @capacity: Maximum number of pages this allocation can be resized
|
|
+ * to. If this value is 0 or smaller than number of pages
|
|
+ * in DMA buffer, then it will be rounded up to DMA buffer
|
|
+ * size.
|
|
+ *
|
|
+ * The pages object will take ownership of the DMA buffer and call
|
|
+ * dma_put_buf() when the pages object is destructed.
|
|
+ *
|
|
+ * Return: Valid pointer on success, else ERR_PTR.
|
|
+ */
|
|
+struct mvx_mmu_pages *mvx_mmu_alloc_pages_dma_buf(struct device *dev,
|
|
+ struct dma_buf *dmabuf,
|
|
+ size_t capacity);
|
|
+
|
|
+/**
|
|
+ * mvx_mmu_pages_append_dma_buf() - Append DMA buffer to pages object.
|
|
+ * @pages: Pointer to pages object.
|
|
+ * @dma_buf: Pointer to DMA buffer.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+int mvx_mmu_pages_append_dma_buf(struct mvx_mmu_pages *pages,
|
|
+ struct dma_buf *dmabuf);
|
|
+
|
|
+/**
|
|
+ * mvx_mmu_resize_pages() - Resize the page allocation.
|
|
+ * @pages: Pointer to pages object.
|
|
+ * @npages: Number of pages to allocate.
|
|
+ *
|
|
+ * If the number of pages is smaller, then pages will be freed.
|
|
+ *
|
|
+ * If the number of pages is larger, then additional memory will be allocated.
|
|
+ * The already allocates pages will keep their physical addresses.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+int mvx_mmu_resize_pages(struct mvx_mmu_pages *pages,
|
|
+ size_t npages);
|
|
+
|
|
+/**
|
|
+ * mvx_mmu_free_pages() - Free pages.
|
|
+ * @pages: Pointer to pages object.
|
|
+ */
|
|
+void mvx_mmu_free_pages(struct mvx_mmu_pages *pages);
|
|
+
|
|
+/**
|
|
+ * mvx_mmu_size_pages() - Get number of allocated bytes.
|
|
+ * @pages: Pointer to pages object.
|
|
+ *
|
|
+ * Return: Size in bytes of pages.
|
|
+ */
|
|
+size_t mvx_mmu_size_pages(struct mvx_mmu_pages *pages);
|
|
+
|
|
+/**
|
|
+ * mvx_buffer_synch() - Synch data caches.
|
|
+ * @pages: Pointer to pages object.
|
|
+ * @dir: Which direction to synch.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+int mvx_mmu_synch_pages(struct mvx_mmu_pages *pages,
|
|
+ enum dma_data_direction dir, int page_off, int page_count);
|
|
+
|
|
+/**
|
|
+ * mvx_mmu_map_pages() - Map an array of pages to a virtual address.
|
|
+ * @mmu: Pointer to MMU object.
|
|
+ * @va: Virtual address.
|
|
+ * @pages: Pointer to pages object.
|
|
+ * @attr: Bus attributes.
|
|
+ * @access: Access permission.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+int mvx_mmu_map_pages(struct mvx_mmu *mmu,
|
|
+ mvx_mmu_va va,
|
|
+ struct mvx_mmu_pages *pages,
|
|
+ enum mvx_mmu_attr attr,
|
|
+ enum mvx_mmu_access access);
|
|
+
|
|
+/**
|
|
+ * mvx_mmu_unmap_pages() - Unmap pages object.
|
|
+ * @pages: Pointer to pages object.
|
|
+ */
|
|
+void mvx_mmu_unmap_pages(struct mvx_mmu_pages *pages);
|
|
+
|
|
+/**
|
|
+ * mvx_mmu_map_pa() - Map a physical- to a virtual address.
|
|
+ * @mmu: Pointer to MMU object.
|
|
+ * @va: Virtual address.
|
|
+ * @pa: Physical address.
|
|
+ * @size: Size of area to map.
|
|
+ * @attr: Bus attributes.
|
|
+ * @access: Access permission.
|
|
+ *
|
|
+ * Both the VA and PA must be page aligned.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+int mvx_mmu_map_pa(struct mvx_mmu *mmu,
|
|
+ mvx_mmu_va va,
|
|
+ phys_addr_t pa,
|
|
+ size_t size,
|
|
+ enum mvx_mmu_attr attr,
|
|
+ enum mvx_mmu_access access);
|
|
+
|
|
+/**
|
|
+ * mvx_mmu_map_l2() - Map a L2 page.
|
|
+ * @mmu: Pointer to MMU object.
|
|
+ * @va: Virtual address.
|
|
+ * @pa: Physical address.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+int mvx_mmu_map_l2(struct mvx_mmu *mmu,
|
|
+ mvx_mmu_va va,
|
|
+ phys_addr_t pa);
|
|
+
|
|
+/**
|
|
+ * mvx_mmu_unmap_va() - Unmap a virtual address range.
|
|
+ * @mmu: Pointer to MMU object.
|
|
+ * @va: Virtual address.
|
|
+ * @size: Size of area to unmap.
|
|
+ */
|
|
+void mvx_mmu_unmap_va(struct mvx_mmu *mmu,
|
|
+ mvx_mmu_va va,
|
|
+ size_t size);
|
|
+
|
|
+/**
|
|
+ * mvx_mmu_va_to_pa() - Map a virtual- to a physical address.
|
|
+ * @mmu: Pointer to MMU object.
|
|
+ * @va: Virtual address.
|
|
+ * @pa: Pointer to physical address.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+int mvx_mmu_va_to_pa(struct mvx_mmu *mmu,
|
|
+ mvx_mmu_va va,
|
|
+ phys_addr_t *pa);
|
|
+
|
|
+/**
|
|
+ * mvx_mmu_read() - Read size bytes from virtual address.
|
|
+ * @mmu: Pointer to MMU object.
|
|
+ * @va: Source virtual address.
|
|
+ * @data: Pointer to destination data.
|
|
+ * @size: Number of bytes to copy.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+int mvx_mmu_read(struct mvx_mmu *mmu,
|
|
+ mvx_mmu_va va,
|
|
+ void *data,
|
|
+ size_t size);
|
|
+
|
|
+/**
|
|
+ * mvx_mmu_write() - Write size bytes to virtual address.
|
|
+ * @mmu: Pointer to MMU object.
|
|
+ * @va: Destination virtual address.
|
|
+ * @data: Pointer to source data.
|
|
+ * @size: Number of bytes to copy.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+int mvx_mmu_write(struct mvx_mmu *mmu,
|
|
+ mvx_mmu_va va,
|
|
+ const void *data,
|
|
+ size_t size);
|
|
+
|
|
+/**
|
|
+ * mvx_mmu_set_pte() - Construct PTE and return PTE value.
|
|
+ * @attr: Bus attributes.
|
|
+ * @pa: Physical address.
|
|
+ * @access: Access permission.
|
|
+ *
|
|
+ * Return: Page table entry.
|
|
+ */
|
|
+mvx_mmu_pte mvx_mmu_set_pte(enum mvx_mmu_attr attr,
|
|
+ phys_addr_t pa,
|
|
+ enum mvx_mmu_access access);
|
|
+
|
|
+/**
|
|
+ * mvx_mmu_print() - Print the MMU table.
|
|
+ * @mmu: Pointer to MMU object.
|
|
+ */
|
|
+void mvx_mmu_print(struct mvx_mmu *mmu);
|
|
+
|
|
+/**
|
|
+ * mvx_mmu_pages_debugfs_init() - Init debugfs entry.
|
|
+ * @pages: Pointer to MMU pages.
|
|
+ * @name: Name of debugfs entry.
|
|
+ * @parent: Parent debugfs entry.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+int mvx_mmu_pages_debugfs_init(struct mvx_mmu_pages *pages,
|
|
+ char *name,
|
|
+ struct dentry *parent);
|
|
+
|
|
+/***
|
|
+ * CPU
|
|
+ * 0x04 8000 0000 +--> +----------+
|
|
+ * | | |
|
|
+ * DEVICE DRAM | | |
|
|
+ * 0x04 0000 0000+---------+ <------+ +----------+ +---+ | |
|
|
+ * | | | | | |
|
|
+ * | | | | | DDR |
|
|
+ * | | | | | |
|
|
+ * | | | | | |
|
|
+ * | | | | | |
|
|
+ * 0x01 0000 0000| | | | +--> +----------+
|
|
+ * | | | | | | |
|
|
+ * | | | | | | IO |
|
|
+ * 0x00 8000 0000+---------+ <------+ +----------+ +---+--> +----------+
|
|
+ * | | | | | |
|
|
+ * | | | | | DDR |
|
|
+ * 0x00 0000 0000+---------+ <------+ +----------+ +------> +----------+
|
|
+ *
|
|
+ */
|
|
+unsigned long phys_vpu2cpu(unsigned long phys_addr);
|
|
+unsigned long phys_cpu2vpu(unsigned long phys_addr);
|
|
+
|
|
+#endif /* _MVX_MMU_H_ */
|
|
diff --git a/drivers/media/platform/spacemit/vpu_k1x/if/mvx_secure.c b/drivers/media/platform/spacemit/vpu_k1x/if/mvx_secure.c
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/vpu_k1x/if/mvx_secure.c
|
|
@@ -0,0 +1,425 @@
|
|
+/*
|
|
+ * The confidential and proprietary information contained in this file may
|
|
+ * only be used by a person authorised under and to the extent permitted
|
|
+ * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd.
|
|
+ * ALL RIGHTS RESERVED
|
|
+ *
|
|
+ * This entire notice must be reproduced on all copies of this file
|
|
+ * and copies of this file may only be made by a person if such person is
|
|
+ * permitted to do so under the terms of a subsisting license agreement
|
|
+ * from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * SPDX-License-Identifier: GPL-2.0-only
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU General Public License
|
|
+ * as published by the Free Software Foundation; either version 2
|
|
+ * of the License, or (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
+ *
|
|
+ */
|
|
+
|
|
+/****************************************************************************
|
|
+ * Includes
|
|
+ ****************************************************************************/
|
|
+
|
|
+#include <linux/device.h>
|
|
+#include <linux/dma-buf.h>
|
|
+#include <linux/kobject.h>
|
|
+#include <linux/wait.h>
|
|
+#include <linux/workqueue.h>
|
|
+#include "mvx_log_group.h"
|
|
+#include "mvx_secure.h"
|
|
+
|
|
+/****************************************************************************
|
|
+ * Types
|
|
+ ****************************************************************************/
|
|
+
|
|
+#pragma pack(push, 1)
|
|
+struct secure_firmware_desc {
|
|
+ int32_t fd;
|
|
+ uint32_t l2pages;
|
|
+ struct {
|
|
+ uint32_t major;
|
|
+ uint32_t minor;
|
|
+ } protocol;
|
|
+};
|
|
+#pragma pack(pop)
|
|
+
|
|
+struct mvx_secure_firmware_priv {
|
|
+ struct device *dev;
|
|
+ struct kobject kobj;
|
|
+ struct work_struct work;
|
|
+ wait_queue_head_t wait_queue;
|
|
+ struct mvx_secure_firmware fw;
|
|
+ mvx_secure_firmware_done done;
|
|
+ void *done_arg;
|
|
+};
|
|
+
|
|
+struct mvx_secure_mem {
|
|
+ struct device *dev;
|
|
+ struct kobject kobj;
|
|
+ wait_queue_head_t wait_queue;
|
|
+ struct dma_buf *dmabuf;
|
|
+};
|
|
+
|
|
+/****************************************************************************
|
|
+ * Secure
|
|
+ ****************************************************************************/
|
|
+
|
|
+int mvx_secure_construct(struct mvx_secure *secure,
|
|
+ struct device *dev)
|
|
+{
|
|
+ secure->dev = dev;
|
|
+#ifndef MODULE
|
|
+ secure->kset = kset_create_and_add("securevideo", NULL, &dev->kobj);
|
|
+ if (secure->kset == NULL) {
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING,
|
|
+ "Failed to create securevideo kset.");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+#endif
|
|
+ secure->workqueue = alloc_workqueue("mvx_securevideo",
|
|
+ WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
|
|
+ if (secure->workqueue == NULL) {
|
|
+#ifndef MODULE
|
|
+ kset_unregister(secure->kset);
|
|
+#endif
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+void mvx_secure_destruct(struct mvx_secure *secure)
|
|
+{
|
|
+ destroy_workqueue(secure->workqueue);
|
|
+#ifndef MODULE
|
|
+ kset_unregister(secure->kset);
|
|
+#endif
|
|
+}
|
|
+
|
|
+/****************************************************************************
|
|
+ * Secure firmware
|
|
+ ****************************************************************************/
|
|
+
|
|
+/**
|
|
+ * firmware_store() - Firmware sysfs store function.
|
|
+ *
|
|
+ * Store values from firmware descriptor, get the DMA handle and wake up any
|
|
+ * waiting process.
|
|
+ */
|
|
+static ssize_t firmware_store(struct kobject *kobj,
|
|
+ struct kobj_attribute *attr,
|
|
+ const char *buf,
|
|
+ size_t size)
|
|
+{
|
|
+ struct mvx_secure_firmware_priv *securefw =
|
|
+ container_of(kobj, struct mvx_secure_firmware_priv, kobj);
|
|
+ const struct secure_firmware_desc *desc = (const void *)buf;
|
|
+
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_INFO,
|
|
+ "Loaded secure firmware. fd=%d, l2=0x%llx, major=%u, minor=%u.",
|
|
+ desc->fd, desc->l2pages, desc->protocol.major,
|
|
+ desc->protocol.minor);
|
|
+
|
|
+ securefw->fw.l2pages = desc->l2pages;
|
|
+ securefw->fw.protocol.major = desc->protocol.major;
|
|
+ securefw->fw.protocol.minor = desc->protocol.minor;
|
|
+ securefw->fw.dmabuf = dma_buf_get(desc->fd);
|
|
+ if (IS_ERR_OR_NULL(securefw->fw.dmabuf))
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING,
|
|
+ "Failed to get DMA buffer from fd. fd=%d.",
|
|
+ desc->fd);
|
|
+
|
|
+ wake_up_interruptible(&securefw->wait_queue);
|
|
+
|
|
+ return size;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * secure_firmware_release() - Release secure firmware.
|
|
+ * kobj: Pointer to kobject.
|
|
+ */
|
|
+static void secure_firmware_release(struct kobject *kobj)
|
|
+{
|
|
+ struct mvx_secure_firmware_priv *securefw =
|
|
+ container_of(kobj, struct mvx_secure_firmware_priv, kobj);
|
|
+
|
|
+ if (IS_ERR_OR_NULL(securefw->fw.dmabuf) == false)
|
|
+ dma_buf_put(securefw->fw.dmabuf);
|
|
+
|
|
+ devm_kfree(securefw->dev, securefw);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * secure_firmware_wait() - Wait for firmware load.
|
|
+ * @work: Pointer to work member in mvx_secure_firmware_priv.
|
|
+ *
|
|
+ * Worker thread used to wait for a secure firmware load to complete.
|
|
+ */
|
|
+static void secure_firmware_wait(struct work_struct *work)
|
|
+{
|
|
+ struct mvx_secure_firmware_priv *securefw =
|
|
+ container_of(work, struct mvx_secure_firmware_priv, work);
|
|
+ int ret;
|
|
+
|
|
+ ret = wait_event_interruptible_timeout(securefw->wait_queue,
|
|
+ securefw->fw.dmabuf != NULL,
|
|
+ msecs_to_jiffies(10000));
|
|
+ if (ret == 0)
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING,
|
|
+ "Firmware load timed out.");
|
|
+
|
|
+ kobject_del(&securefw->kobj);
|
|
+
|
|
+ if (securefw->done != NULL)
|
|
+ securefw->done(&securefw->fw, securefw->done_arg);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * secure_firmware_create() - Create a secure firmware object.
|
|
+ * @secure: Pointer to secure context.
|
|
+ * @name: Name for secure firmware binary.
|
|
+ * @ncores: Number of cores to setup.
|
|
+ * @arg: User argument to callback routine.
|
|
+ * @done: Firware load callback routine.
|
|
+ *
|
|
+ * Return: Valid pointer on success, else ERR_PTR.
|
|
+ */
|
|
+static struct mvx_secure_firmware_priv *secure_firmware_create(
|
|
+ struct mvx_secure *secure,
|
|
+ const char *name,
|
|
+ unsigned int ncores,
|
|
+ void *arg,
|
|
+ mvx_secure_firmware_done done)
|
|
+{
|
|
+ static struct kobj_attribute attr = __ATTR_WO(firmware);
|
|
+ static struct attribute *attrs[] = {
|
|
+ &attr.attr,
|
|
+ NULL
|
|
+ };
|
|
+ static const struct attribute_group secure_group = {
|
|
+ .name = "",
|
|
+ .attrs = attrs
|
|
+ };
|
|
+
|
|
+ static const struct attribute_group *secure_groups[] = {
|
|
+ &secure_group,
|
|
+ NULL
|
|
+ };
|
|
+ static struct kobj_type secure_ktype = {
|
|
+ .sysfs_ops = &kobj_sysfs_ops,
|
|
+ .release = secure_firmware_release,
|
|
+ .default_groups = secure_groups
|
|
+ };
|
|
+ struct mvx_secure_firmware_priv *securefw;
|
|
+ char numcores_env[32];
|
|
+ char fw_env[140];
|
|
+ char *env[] = { "TYPE=firmware", numcores_env, fw_env, NULL };
|
|
+ size_t n;
|
|
+ int ret;
|
|
+
|
|
+ n = snprintf(fw_env, sizeof(fw_env), "FIRMWARE=%s.enc", name);
|
|
+ if (n >= sizeof(fw_env))
|
|
+ return ERR_PTR(-EINVAL);
|
|
+
|
|
+ n = snprintf(numcores_env, sizeof(numcores_env), "NUMCORES=%u", ncores);
|
|
+ if (n >= sizeof(numcores_env))
|
|
+ return ERR_PTR(-EINVAL);
|
|
+
|
|
+ /* Allocate and initialize the secure firmware object. */
|
|
+ securefw = devm_kzalloc(secure->dev, sizeof(*securefw), GFP_KERNEL);
|
|
+ if (securefw == NULL)
|
|
+ return ERR_PTR(-ENOMEM);
|
|
+
|
|
+ securefw->dev = secure->dev;
|
|
+ securefw->kobj.kset = secure->kset;
|
|
+ securefw->fw.ncores = ncores;
|
|
+ securefw->done = done;
|
|
+ securefw->done_arg = arg;
|
|
+ init_waitqueue_head(&securefw->wait_queue);
|
|
+
|
|
+ /* Create kobject that the user space helper can interact with. */
|
|
+ ret = kobject_init_and_add(&securefw->kobj, &secure_ktype, NULL, "%p",
|
|
+ securefw);
|
|
+ if (ret != 0)
|
|
+ goto put_kobject;
|
|
+
|
|
+ /* Notify user space helper about the secure firmware load. */
|
|
+ ret = kobject_uevent_env(&securefw->kobj, KOBJ_ADD, env);
|
|
+ if (ret != 0) {
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING,
|
|
+ "Failed to send secure firmware uevent. ret=%d.",
|
|
+ ret);
|
|
+ goto put_kobject;
|
|
+ }
|
|
+
|
|
+ return securefw;
|
|
+
|
|
+put_kobject:
|
|
+ kobject_put(&securefw->kobj);
|
|
+ devm_kfree(secure->dev, securefw);
|
|
+
|
|
+ return ERR_PTR(ret);
|
|
+}
|
|
+
|
|
+int mvx_secure_request_firmware_nowait(struct mvx_secure *secure,
|
|
+ const char *name,
|
|
+ unsigned int ncores,
|
|
+ void *arg,
|
|
+ mvx_secure_firmware_done done)
|
|
+{
|
|
+ struct mvx_secure_firmware_priv *securefw;
|
|
+
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_INFO,
|
|
+ "Request secure firmware load nowait. firmware=%s.enc.",
|
|
+ name);
|
|
+
|
|
+ securefw = secure_firmware_create(secure, name, ncores, arg, done);
|
|
+ if (IS_ERR(securefw))
|
|
+ return PTR_ERR(securefw);
|
|
+
|
|
+ INIT_WORK(&securefw->work, secure_firmware_wait);
|
|
+ queue_work(secure->workqueue, &securefw->work);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+void mvx_secure_release_firmware(struct mvx_secure_firmware *securefw)
|
|
+{
|
|
+ struct mvx_secure_firmware_priv *sfw =
|
|
+ container_of(securefw, struct mvx_secure_firmware_priv, fw);
|
|
+
|
|
+ kobject_put(&sfw->kobj);
|
|
+}
|
|
+
|
|
+/****************************************************************************
|
|
+ * Secure memory
|
|
+ ****************************************************************************/
|
|
+
|
|
+/**
|
|
+ * secure_mem_release() - Release the secure memory object.
|
|
+ */
|
|
+static void secure_mem_release(struct kobject *kobj)
|
|
+{
|
|
+ struct mvx_secure_mem *smem =
|
|
+ container_of(kobj, struct mvx_secure_mem, kobj);
|
|
+
|
|
+ devm_kfree(smem->dev, smem);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * memory_store() - Memory sysfs store function.
|
|
+ *
|
|
+ * Store values from memory descriptor, get the DMA handle and wake up any
|
|
+ * waiting process.
|
|
+ */
|
|
+static ssize_t memory_store(struct kobject *kobj,
|
|
+ struct kobj_attribute *attr,
|
|
+ const char *buf,
|
|
+ size_t size)
|
|
+{
|
|
+ struct mvx_secure_mem *smem =
|
|
+ container_of(kobj, struct mvx_secure_mem, kobj);
|
|
+ const int32_t *fd = (const int32_t *)buf;
|
|
+
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_INFO,
|
|
+ "Secure memory allocated. fd=%d.",
|
|
+ *fd);
|
|
+
|
|
+ smem->dmabuf = dma_buf_get(*fd);
|
|
+ if (IS_ERR_OR_NULL(smem->dmabuf))
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING,
|
|
+ "Failed to get DMA buffer.");
|
|
+
|
|
+ wake_up_interruptible(&smem->wait_queue);
|
|
+
|
|
+ return size;
|
|
+}
|
|
+
|
|
+struct dma_buf *mvx_secure_mem_alloc(struct mvx_secure *secure,
|
|
+ size_t size)
|
|
+{
|
|
+ static struct kobj_attribute attr = __ATTR_WO(memory);
|
|
+ static struct attribute *attrs[] = {
|
|
+ &attr.attr,
|
|
+ NULL
|
|
+ };
|
|
+ static const struct attribute_group secure_mem_group = {
|
|
+ .name = "",
|
|
+ .attrs = attrs
|
|
+ };
|
|
+
|
|
+ static const struct attribute_group *secure_mem_groups[] = {
|
|
+ &secure_mem_group,
|
|
+ NULL
|
|
+ };
|
|
+ static struct kobj_type secure_mem_ktype = {
|
|
+ .release = secure_mem_release,
|
|
+ .sysfs_ops = &kobj_sysfs_ops,
|
|
+ .default_groups = secure_mem_groups
|
|
+ };
|
|
+ struct mvx_secure_mem *smem;
|
|
+ char size_env[32];
|
|
+ char *env[] = { "TYPE=memory", size_env, NULL };
|
|
+ struct dma_buf *dmabuf = ERR_PTR(-EINVAL);
|
|
+ size_t n;
|
|
+ int ret;
|
|
+
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_INFO,
|
|
+ "Request secure memory. size=%zu.", size);
|
|
+
|
|
+ n = snprintf(size_env, sizeof(size_env), "SIZE=%zu", size);
|
|
+ if (n >= sizeof(size_env))
|
|
+ return ERR_PTR(-EINVAL);
|
|
+
|
|
+ smem = devm_kzalloc(secure->dev, sizeof(*smem), GFP_KERNEL);
|
|
+ if (smem == NULL)
|
|
+ return ERR_PTR(-ENOMEM);
|
|
+
|
|
+ smem->dev = secure->dev;
|
|
+ smem->kobj.kset = secure->kset;
|
|
+ init_waitqueue_head(&smem->wait_queue);
|
|
+
|
|
+ /* Create kobject that the user space helper can interact with. */
|
|
+ ret = kobject_init_and_add(&smem->kobj, &secure_mem_ktype, NULL, "%p",
|
|
+ &smem->kobj);
|
|
+ if (ret != 0)
|
|
+ goto put_kobject;
|
|
+
|
|
+ /* Notify user space helper about the secure firmware load. */
|
|
+ ret = kobject_uevent_env(&smem->kobj, KOBJ_ADD, env);
|
|
+ if (ret != 0) {
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING,
|
|
+ "Failed to send secure memory uevent. ret=%d.",
|
|
+ ret);
|
|
+ goto put_kobject;
|
|
+ }
|
|
+
|
|
+ ret = wait_event_interruptible_timeout(smem->wait_queue,
|
|
+ smem->dmabuf != NULL,
|
|
+ msecs_to_jiffies(1000));
|
|
+ if (ret == 0) {
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING,
|
|
+ "Secure memory allocation timed out.");
|
|
+ goto put_kobject;
|
|
+ }
|
|
+
|
|
+ dmabuf = smem->dmabuf;
|
|
+
|
|
+put_kobject:
|
|
+ kobject_put(&smem->kobj);
|
|
+
|
|
+ return dmabuf;
|
|
+}
|
|
diff --git a/drivers/media/platform/spacemit/vpu_k1x/if/mvx_secure.h b/drivers/media/platform/spacemit/vpu_k1x/if/mvx_secure.h
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/vpu_k1x/if/mvx_secure.h
|
|
@@ -0,0 +1,139 @@
|
|
+/*
|
|
+ * The confidential and proprietary information contained in this file may
|
|
+ * only be used by a person authorised under and to the extent permitted
|
|
+ * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd.
|
|
+ * ALL RIGHTS RESERVED
|
|
+ *
|
|
+ * This entire notice must be reproduced on all copies of this file
|
|
+ * and copies of this file may only be made by a person if such person is
|
|
+ * permitted to do so under the terms of a subsisting license agreement
|
|
+ * from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * SPDX-License-Identifier: GPL-2.0-only
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU General Public License
|
|
+ * as published by the Free Software Foundation; either version 2
|
|
+ * of the License, or (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
+ *
|
|
+ */
|
|
+
|
|
+#ifndef _MVX_SECURE_H_
|
|
+#define _MVX_SECURE_H_
|
|
+
|
|
+/****************************************************************************
|
|
+ * Includes
|
|
+ ****************************************************************************/
|
|
+
|
|
+#include <linux/types.h>
|
|
+
|
|
+/****************************************************************************
|
|
+ * Types
|
|
+ ****************************************************************************/
|
|
+
|
|
+struct device;
|
|
+struct dma_buf;
|
|
+struct kset;
|
|
+struct mvx_secure_firmware;
|
|
+struct workqueue_struct;
|
|
+
|
|
+/**
|
|
+ * struct mvx_secure - Secure video.
|
|
+ * @dev: Pointer to device.
|
|
+ * @kset: Kset that allows uevents to be sent.
|
|
+ * @workqueue: Work queue used to wait for firmware load.
|
|
+ */
|
|
+struct mvx_secure {
|
|
+ struct device *dev;
|
|
+ struct kset *kset;
|
|
+ struct workqueue_struct *workqueue;
|
|
+};
|
|
+
|
|
+/**
|
|
+ * typedef firmware_done - Firmware load callback.
|
|
+ */
|
|
+typedef void (*mvx_secure_firmware_done)(struct mvx_secure_firmware *,
|
|
+ void *arg);
|
|
+
|
|
+/**
|
|
+ * struct mvx_secure_firmware - Secure firmware.
|
|
+ * @dmabuf: Pointer to DMA buffer.
|
|
+ * @l2pages: Array of L2 pages. One per core.
|
|
+ * @ncores: Maximum number of cores.
|
|
+ * @major: Firmware protocol major version.
|
|
+ * @minor: Firmware protocol minor version.
|
|
+ */
|
|
+struct mvx_secure_firmware {
|
|
+ struct dma_buf *dmabuf;
|
|
+ phys_addr_t l2pages;
|
|
+ unsigned int ncores;
|
|
+ struct {
|
|
+ unsigned int major;
|
|
+ unsigned int minor;
|
|
+ } protocol;
|
|
+};
|
|
+
|
|
+/****************************************************************************
|
|
+ * Exported functions
|
|
+ ****************************************************************************/
|
|
+
|
|
+/**
|
|
+ * mvx_secure_construct() - Construct the secure object.
|
|
+ * @secure: Pointer to secure object.
|
|
+ * @dev: Pointer to device.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+int mvx_secure_construct(struct mvx_secure *secure,
|
|
+ struct device *dev);
|
|
+
|
|
+/**
|
|
+ * mvx_secure_destruct() - Destruct the secure object.
|
|
+ * @secure: Pointer to secure object.
|
|
+ */
|
|
+void mvx_secure_destruct(struct mvx_secure *secure);
|
|
+
|
|
+/**
|
|
+ * mvx_secure_request_firmware_nowait() - Request secure firmware.
|
|
+ * @secure: Pointer to secure object.
|
|
+ * @name: Name of firmware binary.
|
|
+ * @ncores: Number of cores to setup.
|
|
+ * @arg: Callback argument.
|
|
+ * @done: Done callback.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+int mvx_secure_request_firmware_nowait(struct mvx_secure *secure,
|
|
+ const char *name,
|
|
+ unsigned int ncores,
|
|
+ void *arg,
|
|
+ mvx_secure_firmware_done done);
|
|
+
|
|
+/**
|
|
+ * mvx_secure_release_firmware() - Release secure firmware.
|
|
+ * @securefw: Pointer to secure firmware.
|
|
+ */
|
|
+void mvx_secure_release_firmware(struct mvx_secure_firmware *securefw);
|
|
+
|
|
+/**
|
|
+ * mvx_secure_mem_alloc() - Secure memory allocation.
|
|
+ * @secure: Pointer to secure object.
|
|
+ * @size: Size in bytes to allocate.
|
|
+ *
|
|
+ * Return: Valid pointer on success, else ERR_PTR.
|
|
+ */
|
|
+struct dma_buf *mvx_secure_mem_alloc(struct mvx_secure *secure,
|
|
+ size_t size);
|
|
+
|
|
+#endif /* _MVX_SECURE_H_ */
|
|
diff --git a/drivers/media/platform/spacemit/vpu_k1x/if/mvx_session.c b/drivers/media/platform/spacemit/vpu_k1x/if/mvx_session.c
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/vpu_k1x/if/mvx_session.c
|
|
@@ -0,0 +1,3635 @@
|
|
+/*
|
|
+ * The confidential and proprietary information contained in this file may
|
|
+ * only be used by a person authorised under and to the extent permitted
|
|
+ * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd.
|
|
+ * ALL RIGHTS RESERVED
|
|
+ *
|
|
+ * This entire notice must be reproduced on all copies of this file
|
|
+ * and copies of this file may only be made by a person if such person is
|
|
+ * permitted to do so under the terms of a subsisting license agreement
|
|
+ * from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * SPDX-License-Identifier: GPL-2.0-only
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU General Public License
|
|
+ * as published by the Free Software Foundation; either version 2
|
|
+ * of the License, or (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
+ *
|
|
+ */
|
|
+
|
|
+/****************************************************************************
|
|
+ * Includes
|
|
+ ****************************************************************************/
|
|
+
|
|
+#include <linux/debugfs.h>
|
|
+#include <linux/device.h>
|
|
+#include <linux/list.h>
|
|
+#include <linux/module.h>
|
|
+#include <linux/poll.h>
|
|
+#include <linux/sched.h>
|
|
+#include <linux/version.h>
|
|
+
|
|
+#include "mvx-v4l2-controls.h"
|
|
+#include "mvx_bitops.h"
|
|
+#include "mvx_firmware.h"
|
|
+#include "mvx_firmware_cache.h"
|
|
+#include "mvx_session.h"
|
|
+#include "mvx_seq.h"
|
|
+#include "mvx_dvfs.h"
|
|
+#include "mvx_v4l2_session.h"
|
|
+#include "mvx_mmu.h"
|
|
+
|
|
+/****************************************************************************
|
|
+ * Private variables
|
|
+ ****************************************************************************/
|
|
+
|
|
+int session_wait_pending_timeout = 10000; //10s
|
|
+int session_wait_flush_timeout = 2000; //2s
|
|
+
|
|
+int session_watchdog_timeout = 30000; //30s
|
|
+module_param(session_watchdog_timeout, int, 0660);
|
|
+
|
|
+static int fw_watchdog_timeout;
|
|
+module_param(fw_watchdog_timeout, int, 0660);
|
|
+
|
|
+/****************************************************************************
|
|
+ * Private functions
|
|
+ ****************************************************************************/
|
|
+
|
|
+static void watchdog_start(struct mvx_session *session,
|
|
+ unsigned int timeout_ms)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ if (session->error != 0)
|
|
+ return;
|
|
+
|
|
+ MVX_SESSION_DEBUG(session, "Watchdog start. timeout_ms=%u.",
|
|
+ timeout_ms);
|
|
+
|
|
+ ret = mod_timer(&session->watchdog_timer,
|
|
+ jiffies + msecs_to_jiffies(timeout_ms));
|
|
+ if (ret != 0) {
|
|
+ MVX_SESSION_WARN(session, "Failed to start watchdog. ret=%d",
|
|
+ ret);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ kref_get(&session->isession.kref);
|
|
+}
|
|
+
|
|
+static void watchdog_stop(struct mvx_session *session)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ ret = del_timer_sync(&session->watchdog_timer);
|
|
+
|
|
+ /* ret: 0=watchdog expired, 1=watchdog still running */
|
|
+ MVX_SESSION_DEBUG(session, "Watchdog stop. ret=%d", ret);
|
|
+ session->watchdog_count = 0;
|
|
+
|
|
+ /* Decrement the kref if the watchdog was still running. */
|
|
+ if (ret != 0)
|
|
+ kref_put(&session->isession.kref, session->isession.release);
|
|
+}
|
|
+
|
|
+static void watchdog_update(struct mvx_session *session,
|
|
+ unsigned int timeout_ms)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+#ifndef MODULE
|
|
+ ret = mod_timer_pending(&session->watchdog_timer,
|
|
+ jiffies + msecs_to_jiffies(timeout_ms));
|
|
+#else
|
|
+ if (timer_pending(&session->watchdog_timer))
|
|
+ ret = mod_timer(&session->watchdog_timer,
|
|
+ jiffies + msecs_to_jiffies(timeout_ms));
|
|
+#endif
|
|
+
|
|
+ /* ret: 0=no restart, 1=restarted */
|
|
+ MVX_SESSION_DEBUG(session, "Watchdog update. ret=%d, timeout_ms=%u.",
|
|
+ ret, timeout_ms);
|
|
+}
|
|
+
|
|
+static bool is_fw_loaded(struct mvx_session *session)
|
|
+{
|
|
+ return (IS_ERR_OR_NULL(session->fw_bin) == false);
|
|
+}
|
|
+
|
|
+static void print_debug(struct mvx_session *session)
|
|
+{
|
|
+ MVX_SESSION_INFO(session, "Print debug.");
|
|
+
|
|
+ if (session->csession != NULL)
|
|
+ session->client_ops->print_debug(session->csession);
|
|
+
|
|
+ if (is_fw_loaded(session))
|
|
+ session->fw.ops.print_debug(&session->fw);
|
|
+}
|
|
+
|
|
+static void send_event_error(struct mvx_session *session,
|
|
+ long error)
|
|
+{
|
|
+ session->error = error;
|
|
+ wake_up(&session->waitq);
|
|
+ MVX_SESSION_WARN(session, "send event error. error=%ld", error);
|
|
+ session->event(session, MVX_SESSION_EVENT_ERROR,
|
|
+ (void *)session->error);
|
|
+}
|
|
+
|
|
+static void session_unregister(struct mvx_session *session)
|
|
+{
|
|
+ if (!IS_ERR_OR_NULL(session->csession)) {
|
|
+ mvx_dvfs_unregister_session(session);
|
|
+ session->client_ops->unregister_session(session->csession);
|
|
+ session->csession = NULL;
|
|
+ }
|
|
+}
|
|
+
|
|
+static void release_fw_bin(struct mvx_session *session)
|
|
+{
|
|
+ if (is_fw_loaded(session) != false) {
|
|
+ MVX_SESSION_INFO(session, "Release firmware binary.");
|
|
+
|
|
+ if (session->switched_in == true) {
|
|
+ if (IS_ERR_OR_NULL(session->csession)) {
|
|
+ MVX_SESSION_WARN(session, "release_fw_bin. csession is null. mvx_session:%p", session);
|
|
+ } else {
|
|
+ session->client_ops->wait_session_idle(session->csession);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ mvx_fw_destruct(&session->fw);
|
|
+ mvx_fw_cache_put(session->cache, session->fw_bin);
|
|
+ session->fw_bin = NULL;
|
|
+ }
|
|
+
|
|
+ watchdog_stop(session);
|
|
+ session_unregister(session);
|
|
+}
|
|
+
|
|
+static struct mvx_session *kref_to_session(struct kref *kref)
|
|
+{
|
|
+ return container_of(kref, struct mvx_session, isession.kref);
|
|
+}
|
|
+
|
|
+static void session_destructor(struct kref *kref)
|
|
+{
|
|
+ struct mvx_session *session = kref_to_session(kref);
|
|
+
|
|
+ session->destructor(session);
|
|
+}
|
|
+
|
|
+static const char *state_to_string(enum mvx_fw_state state)
|
|
+{
|
|
+ switch (state) {
|
|
+ case MVX_FW_STATE_STOPPED:
|
|
+ return "Stopped";
|
|
+ case MVX_FW_STATE_RUNNING:
|
|
+ return "Running";
|
|
+ default:
|
|
+ return "Unknown";
|
|
+ }
|
|
+}
|
|
+
|
|
+static enum mvx_direction get_bitstream_port(struct mvx_session *session)
|
|
+{
|
|
+ if (mvx_is_bitstream(session->port[MVX_DIR_INPUT].format) &&
|
|
+ mvx_is_frame(session->port[MVX_DIR_OUTPUT].format))
|
|
+ return MVX_DIR_INPUT;
|
|
+ else if (mvx_is_frame(session->port[MVX_DIR_INPUT].format) &&
|
|
+ mvx_is_bitstream(session->port[MVX_DIR_OUTPUT].format))
|
|
+ return MVX_DIR_OUTPUT;
|
|
+
|
|
+ return MVX_DIR_MAX;
|
|
+}
|
|
+
|
|
+static bool is_stream_on(struct mvx_session *session)
|
|
+{
|
|
+ return session->port[MVX_DIR_INPUT].stream_on &&
|
|
+ session->port[MVX_DIR_OUTPUT].stream_on;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * wait_pending() - Wait for procedure to finish.
|
|
+ *
|
|
+ * Wait for the number of pending firmware messages to reach 0, or for an error
|
|
+ * to happen.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+static int wait_pending(struct mvx_session *session)
|
|
+{
|
|
+ int ret = 0;
|
|
+ int count = 0;
|
|
+ struct timespec64 curtime;
|
|
+ uint64_t timestart;
|
|
+ uint64_t timeend;
|
|
+
|
|
+ while (is_fw_loaded(session) != false &&
|
|
+ session->fw.msg_pending > 0 &&
|
|
+ session->error == 0) {
|
|
+ mutex_unlock(session->isession.mutex);
|
|
+
|
|
+ count++;
|
|
+ ktime_get_boottime_ts64(&curtime);
|
|
+ timestart = curtime.tv_sec*1000*1000l + curtime.tv_nsec/1000l;
|
|
+
|
|
+ ret = wait_event_timeout(
|
|
+ session->waitq,
|
|
+ is_fw_loaded(session) == false ||
|
|
+ session->fw.msg_pending == 0 ||
|
|
+ session->error != 0,
|
|
+ msecs_to_jiffies(session->watchdog_timeout*2));
|
|
+
|
|
+ ktime_get_boottime_ts64(&curtime);
|
|
+ timeend = curtime.tv_sec*1000*1000l + curtime.tv_nsec/1000l;
|
|
+ if (timeend - timestart >= 10000000l) {
|
|
+ MVX_SESSION_WARN(session, "wait_pending cost time is greater than 10 secs. cost_time=%llu us, ret=%d, error=%d, msg_pending=%d, in_fmt=%d, out_fmt=%d", timeend - timestart, ret, session->error, session->fw.msg_pending, session->port[MVX_DIR_INPUT].format, session->port[MVX_DIR_OUTPUT].format);
|
|
+ }
|
|
+
|
|
+ if (ret < 0)
|
|
+ goto lock_mutex;
|
|
+
|
|
+ if (ret == 0) {
|
|
+ send_event_error(session, -ETIME);
|
|
+ ret = -ETIME;
|
|
+ goto lock_mutex;
|
|
+ }
|
|
+
|
|
+ mutex_lock(session->isession.mutex);
|
|
+ }
|
|
+
|
|
+ return session->error;
|
|
+
|
|
+lock_mutex:
|
|
+ mutex_lock(session->isession.mutex);
|
|
+
|
|
+ if (ret < 0) {
|
|
+ MVX_SESSION_WARN(session,
|
|
+ "Wait pending returned error. ret=%d, error=%d, msg_pending=%d.",
|
|
+ ret, session->error, session->fw.msg_pending);
|
|
+
|
|
+ print_debug(session);
|
|
+ }
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int wait_flush_done(struct mvx_session *session, enum mvx_direction dir)
|
|
+{
|
|
+ int ret = 0;
|
|
+ struct mvx_session_port *port = &session->port[dir];
|
|
+
|
|
+ while (is_fw_loaded(session) != false &&
|
|
+ port->buffer_count > 0 &&
|
|
+ session->error == 0) {
|
|
+ mutex_unlock(session->isession.mutex);
|
|
+
|
|
+ ret = wait_event_timeout(
|
|
+ session->waitq,
|
|
+ is_fw_loaded(session) == false ||
|
|
+ port->buffer_count == 0 ||
|
|
+ session->error != 0,
|
|
+ msecs_to_jiffies(session_wait_flush_timeout));
|
|
+
|
|
+ if (ret < 0)
|
|
+ goto lock_mutex;
|
|
+
|
|
+ if (ret == 0) {
|
|
+ send_event_error(session, -ETIME);
|
|
+ ret = -ETIME;
|
|
+ goto lock_mutex;
|
|
+ }
|
|
+
|
|
+ mutex_lock(session->isession.mutex);
|
|
+ }
|
|
+
|
|
+ return session->error;
|
|
+
|
|
+lock_mutex:
|
|
+ mutex_lock(session->isession.mutex);
|
|
+
|
|
+ if (ret < 0) {
|
|
+ MVX_SESSION_WARN(session,
|
|
+ "wait_flush_done returned error. ret=%d, error=%d, msg_pending=%d, buffer_count=%d",
|
|
+ ret, session->error, session->fw.msg_pending, port->buffer_count);
|
|
+
|
|
+ print_debug(session);
|
|
+ }
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int wait_switch_out(struct mvx_session *session)
|
|
+{
|
|
+ int ret = 0;
|
|
+
|
|
+ while (is_fw_loaded(session) != false &&
|
|
+ session->switched_in == true &&
|
|
+ session->error == 0) {
|
|
+ mutex_unlock(session->isession.mutex);
|
|
+
|
|
+ ret = wait_event_timeout(
|
|
+ session->waitq,
|
|
+ is_fw_loaded(session) == false ||
|
|
+ session->switched_in == false ||
|
|
+ session->error != 0,
|
|
+ msecs_to_jiffies(session_wait_pending_timeout));
|
|
+
|
|
+ if (ret < 0)
|
|
+ goto lock_mutex;
|
|
+
|
|
+ if (ret == 0) {
|
|
+ send_event_error(session, -ETIME);
|
|
+ ret = -ETIME;
|
|
+ goto lock_mutex;
|
|
+ }
|
|
+
|
|
+ mutex_lock(session->isession.mutex);
|
|
+ }
|
|
+
|
|
+ return session->error;
|
|
+
|
|
+lock_mutex:
|
|
+ mutex_lock(session->isession.mutex);
|
|
+
|
|
+ if (ret < 0) {
|
|
+ MVX_SESSION_WARN(session,
|
|
+ "wait_switch_out returned error. ret=%d, error=%d, msg_pending=%d, switch_in=%d",
|
|
+ ret, session->error, session->fw.msg_pending, session->switched_in);
|
|
+
|
|
+ print_debug(session);
|
|
+ }
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int send_irq(struct mvx_session *session)
|
|
+{
|
|
+ if (IS_ERR_OR_NULL(session->csession))
|
|
+ return -EINVAL;
|
|
+
|
|
+ return session->client_ops->send_irq(session->csession);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * switch_in() - Request the client device to switch in the session.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+static int switch_in(struct mvx_session *session)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ session->idle_count = 0;
|
|
+
|
|
+ if (session->switched_in != false)
|
|
+ return 0;
|
|
+
|
|
+ if (IS_ERR_OR_NULL(session->csession))
|
|
+ return -EINVAL;
|
|
+
|
|
+ MVX_SESSION_INFO(session, "Switch in.");
|
|
+
|
|
+ ret = session->client_ops->switch_in(session->csession);
|
|
+ if (ret != 0) {
|
|
+ MVX_SESSION_WARN(session, "Failed to switch in session.");
|
|
+ send_event_error(session, ret);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ session->switched_in = true;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * fw_send_msg() - Send firmware message and signal IRQ.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+static int fw_send_msg(struct mvx_session *session,
|
|
+ struct mvx_fw_msg *msg)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ if (session->error != 0)
|
|
+ return session->error;
|
|
+
|
|
+ ret = session->fw.ops.put_message(&session->fw, msg);
|
|
+ if (ret != 0) {
|
|
+ MVX_SESSION_WARN(session,
|
|
+ "Failed to queue firmware message.");
|
|
+ goto send_error;
|
|
+ }
|
|
+
|
|
+ ret = send_irq(session);
|
|
+ if (ret != 0) {
|
|
+ MVX_SESSION_WARN(session, "Failed to send irq.");
|
|
+ goto send_error;
|
|
+ }
|
|
+
|
|
+ return switch_in(session);
|
|
+
|
|
+send_error:
|
|
+ send_event_error(session, ret);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int fw_send_msg_simple(struct mvx_session *session,
|
|
+ enum mvx_fw_code code,
|
|
+ const char *str)
|
|
+{
|
|
+ struct mvx_fw_msg msg = { .code = code };
|
|
+
|
|
+ MVX_SESSION_INFO(session, "Firmware req: %s.", str);
|
|
+
|
|
+ return fw_send_msg(session, &msg);
|
|
+}
|
|
+
|
|
+static int fw_flush(struct mvx_session *session,
|
|
+ enum mvx_direction dir)
|
|
+{
|
|
+ struct mvx_fw_msg msg = { .code = MVX_FW_CODE_FLUSH, .flush.dir = dir };
|
|
+ int ret;
|
|
+
|
|
+ MVX_SESSION_INFO(session, "Firmware req: Flush. dir=%d.", dir);
|
|
+
|
|
+ ret = fw_send_msg(session, &msg);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+
|
|
+ session->port[dir].is_flushing = true;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int fw_state_change(struct mvx_session *session,
|
|
+ enum mvx_fw_state state)
|
|
+{
|
|
+ struct mvx_fw_msg msg = {
|
|
+ .code = MVX_FW_CODE_STATE_CHANGE,
|
|
+ .state = state
|
|
+ };
|
|
+ int ret = 0;
|
|
+
|
|
+ if (state != session->fw_state) {
|
|
+ MVX_SESSION_INFO(session,
|
|
+ "Firmware req: State change. current=%d, new=%d.",
|
|
+ session->fw_state, state);
|
|
+ ret = fw_send_msg(session, &msg);
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int fw_job(struct mvx_session *session,
|
|
+ unsigned int frames)
|
|
+{
|
|
+ struct mvx_fw_msg msg = {
|
|
+ .code = MVX_FW_CODE_JOB,
|
|
+ .job.cores = session->isession.ncores,
|
|
+ .job.frames = frames
|
|
+ };
|
|
+
|
|
+ MVX_SESSION_INFO(session, "Firmware req: Job. frames=%u.", frames);
|
|
+
|
|
+ return fw_send_msg(session, &msg);
|
|
+}
|
|
+
|
|
+static int fw_switch_out(struct mvx_session *session)
|
|
+{
|
|
+ unsigned int idle_count = session->idle_count;
|
|
+ int ret;
|
|
+
|
|
+ ret = fw_send_msg_simple(session, MVX_FW_CODE_SWITCH_OUT,
|
|
+ "Switch out");
|
|
+
|
|
+ /*
|
|
+ * Restore idle count. Switch out is the only message where we do not
|
|
+ * want to reset the idle counter.
|
|
+ */
|
|
+ session->idle_count = idle_count;
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int fw_ping(struct mvx_session *session)
|
|
+{
|
|
+ return fw_send_msg_simple(session, MVX_FW_CODE_PING, "Ping");
|
|
+}
|
|
+
|
|
+static int fw_dump(struct mvx_session *session)
|
|
+{
|
|
+ return fw_send_msg_simple(session, MVX_FW_CODE_DUMP, "Dump");
|
|
+}
|
|
+
|
|
+#ifdef MVX_FW_DEBUG_ENABLE
|
|
+static int fw_set_debug(struct mvx_session *session, uint32_t debug_level)
|
|
+{
|
|
+ struct mvx_fw_msg msg = {
|
|
+ .code = MVX_FW_CODE_DEBUG,
|
|
+ .arg = debug_level
|
|
+ };
|
|
+
|
|
+ MVX_SESSION_INFO(session, "Firmware req: Set debug. debug_level=%d.", debug_level);
|
|
+
|
|
+ return fw_send_msg(session, &msg);
|
|
+}
|
|
+#endif
|
|
+
|
|
+static int fw_set_option(struct mvx_session *session,
|
|
+ struct mvx_fw_set_option *option)
|
|
+{
|
|
+ struct mvx_fw_msg msg = {
|
|
+ .code = MVX_FW_CODE_SET_OPTION,
|
|
+ .set_option = *option
|
|
+ };
|
|
+
|
|
+ MVX_SESSION_INFO(session, "Firmware req: Set option. code=%d.",
|
|
+ option->code);
|
|
+
|
|
+ return fw_send_msg(session, &msg);
|
|
+}
|
|
+
|
|
+static bool is_encoder(struct mvx_session *session)
|
|
+{
|
|
+ return get_bitstream_port(session) == MVX_DIR_OUTPUT;
|
|
+}
|
|
+
|
|
+static int fw_eos(struct mvx_session *session)
|
|
+{
|
|
+ struct mvx_fw_msg msg = {
|
|
+ .code = MVX_FW_CODE_EOS,
|
|
+ .eos_is_frame = is_encoder(session) ? true : false
|
|
+ };
|
|
+ int ret;
|
|
+
|
|
+ MVX_SESSION_INFO(session, "Firmware req: Buffer EOS.");
|
|
+
|
|
+ ret = fw_send_msg(session, &msg);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+
|
|
+ session->port[MVX_DIR_INPUT].flushed = false;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int fw_set_qp(struct mvx_session *session,
|
|
+ int code,
|
|
+ int qp)
|
|
+{
|
|
+ struct mvx_fw_set_option option;
|
|
+ int ret;
|
|
+
|
|
+ if (qp < 0)
|
|
+ return -EINVAL;
|
|
+
|
|
+ if (qp == 0)
|
|
+ return 0;
|
|
+
|
|
+ option.code = code;
|
|
+ option.qp = qp;
|
|
+ ret = fw_set_option(session, &option);
|
|
+ if (ret != 0) {
|
|
+ MVX_SESSION_WARN(session,
|
|
+ "Failed to set QP. code=%d, ret=%d.",
|
|
+ code, ret);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int fw_set_roi_regions(struct mvx_session *session,
|
|
+ int code,
|
|
+ struct mvx_roi_config *roi)
|
|
+{
|
|
+ struct mvx_fw_set_option option;
|
|
+ int ret;
|
|
+
|
|
+ if (roi->num_roi < 0)
|
|
+ return -EINVAL;
|
|
+
|
|
+ if (roi->num_roi == 0)
|
|
+ return 0;
|
|
+
|
|
+ option.code = code;
|
|
+ option.roi_config = *roi;
|
|
+ ret = fw_set_option(session, &option);
|
|
+ if (ret != 0) {
|
|
+ MVX_SESSION_WARN(session,
|
|
+ "Failed to set ROI. code=%d, ret=%d.",
|
|
+ code, ret);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+
|
|
+static int fw_common_setup(struct mvx_session *session)
|
|
+{
|
|
+ int ret = 0;
|
|
+ struct mvx_fw_set_option option;
|
|
+ enum mvx_direction dir = get_bitstream_port(session);
|
|
+
|
|
+ if ((session->port[dir].format == MVX_FORMAT_VP8
|
|
+ || session->port[dir].format == MVX_FORMAT_VP9)
|
|
+ && session->nalu_format != MVX_NALU_FORMAT_ONE_NALU_PER_BUFFER) {
|
|
+ session->nalu_format = MVX_NALU_FORMAT_UNDEFINED;
|
|
+ }
|
|
+
|
|
+ if (session->nalu_format != MVX_NALU_FORMAT_UNDEFINED) {
|
|
+ option.code = MVX_FW_SET_NALU_FORMAT;
|
|
+ option.nalu_format = session->nalu_format;
|
|
+ ret = fw_set_option(session, &option);
|
|
+ if (ret != 0) {
|
|
+ MVX_SESSION_WARN(session,
|
|
+ "Failed to set NALU format.");
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (session->stream_escaping != MVX_TRI_UNSET) {
|
|
+ option.code = MVX_FW_SET_STREAM_ESCAPING;
|
|
+ option.stream_escaping = session->stream_escaping;
|
|
+ ret = fw_set_option(session, &option);
|
|
+ if (ret != 0) {
|
|
+ MVX_SESSION_WARN(session,
|
|
+ "Failed to set stream escaping.");
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (session->enable_profiling) {
|
|
+ MVX_SESSION_WARN(session, "[Debug]set MVX_FW_SET_INDEX_PROFILING to read profiling data.");
|
|
+ session->bus_read_bytes_total = 0;
|
|
+ session->bus_write_bytes_total = 0;
|
|
+ option.code = MVX_FW_SET_INDEX_PROFILING;
|
|
+ option.index_profiling = 1;
|
|
+ ret = fw_set_option(session, &option);
|
|
+ if (ret != 0) {
|
|
+ MVX_SESSION_WARN(session,
|
|
+ "Failed to set set index profiling");
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+/* JPEG standard, Annex K */
|
|
+static const uint8_t qtbl_chroma_ref[MVX_FW_QUANT_LEN] = {
|
|
+ 17, 18, 24, 47, 99, 99, 99, 99,
|
|
+ 18, 21, 26, 66, 99, 99, 99, 99,
|
|
+ 24, 26, 56, 99, 99, 99, 99, 99,
|
|
+ 47, 66, 99, 99, 99, 99, 99, 99,
|
|
+ 99, 99, 99, 99, 99, 99, 99, 99,
|
|
+ 99, 99, 99, 99, 99, 99, 99, 99,
|
|
+ 99, 99, 99, 99, 99, 99, 99, 99,
|
|
+ 99, 99, 99, 99, 99, 99, 99, 99
|
|
+};
|
|
+
|
|
+static const uint8_t qtbl_luma_ref[MVX_FW_QUANT_LEN] = {
|
|
+ 16, 11, 10, 16, 24, 40, 51, 61,
|
|
+ 12, 12, 14, 19, 26, 58, 60, 55,
|
|
+ 14, 13, 16, 24, 40, 57, 69, 56,
|
|
+ 14, 17, 22, 29, 51, 87, 80, 62,
|
|
+ 18, 22, 37, 56, 68, 109, 103, 77,
|
|
+ 24, 35, 55, 64, 81, 104, 113, 92,
|
|
+ 49, 64, 78, 87, 103, 121, 120, 101,
|
|
+ 72, 92, 95, 98, 112, 100, 103, 99
|
|
+};
|
|
+
|
|
+void generate_quant_tbl(int quality,
|
|
+ const uint8_t qtbl_ref[MVX_FW_QUANT_LEN],
|
|
+ uint8_t qtbl[MVX_FW_QUANT_LEN])
|
|
+{
|
|
+ int i;
|
|
+ int q;
|
|
+
|
|
+ q = (quality < 50) ? (5000 / quality) : (200 - 2 * quality);
|
|
+
|
|
+ for (i = 0; i < MVX_FW_QUANT_LEN; ++i) {
|
|
+ qtbl[i] = ((qtbl_ref[i] * q) + 50) / 100;
|
|
+ qtbl[i] = min_t(int, qtbl[i], 255);
|
|
+ qtbl[i] = max_t(int, qtbl[i], 1);
|
|
+ }
|
|
+}
|
|
+
|
|
+static int fw_encoder_setup(struct mvx_session *session)
|
|
+{
|
|
+ int ret;
|
|
+ enum mvx_format codec;
|
|
+ struct mvx_fw_set_option option;
|
|
+ enum mvx_direction dir;
|
|
+
|
|
+ dir = get_bitstream_port(session);
|
|
+ codec = session->port[dir].format;
|
|
+
|
|
+ if (session->profile[codec] != MVX_PROFILE_NONE) {
|
|
+ option.code = MVX_FW_SET_PROFILE_LEVEL;
|
|
+ option.profile_level.profile = session->profile[codec];
|
|
+ option.profile_level.level = session->level[codec];
|
|
+ ret = fw_set_option(session, &option);
|
|
+ if (ret != 0) {
|
|
+ MVX_SESSION_WARN(session,
|
|
+ "Failed to set profile/level.");
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (codec != MVX_FORMAT_JPEG) {
|
|
+ option.code = MVX_FW_SET_FRAME_RATE;
|
|
+ option.frame_rate = session->frame_rate;
|
|
+ ret = fw_set_option(session, &option);
|
|
+ if (ret != 0) {
|
|
+ MVX_SESSION_WARN(session,
|
|
+ "Failed to put frame rate.");
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ if (session->rc_type) {
|
|
+ option.code = MVX_FW_SET_RATE_CONTROL;
|
|
+ option.rate_control.target_bitrate =
|
|
+ session->rc_type ? session->target_bitrate:0;
|
|
+ option.rate_control.rate_control_mode = session->rc_type;
|
|
+ if (session->rc_type == MVX_OPT_RATE_CONTROL_MODE_C_VARIABLE) {
|
|
+ option.rate_control.maximum_bitrate = session->maximum_bitrate;
|
|
+ }
|
|
+ ret = fw_set_option(session, &option);
|
|
+ if (ret != 0) {
|
|
+ MVX_SESSION_WARN(session,
|
|
+ "Failed to put target bitrate.");
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (session->rc_enabled != false) {
|
|
+ if (session->qp[codec].min < session->qp[codec].max) {
|
|
+ option.code = MVX_FW_SET_QP_RANGE;
|
|
+ option.qp_range.min = session->qp[codec].min;
|
|
+ option.qp_range.max = session->qp[codec].max;
|
|
+ ret = fw_set_option(session, &option);
|
|
+ if (ret != 0) {
|
|
+ MVX_SESSION_WARN(session,
|
|
+ "Failed to set qp range.");
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+ } else {
|
|
+ ret = fw_set_qp(session, MVX_FW_SET_QP_I,
|
|
+ session->qp[codec].i_frame);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+
|
|
+ ret = fw_set_qp(session, MVX_FW_SET_QP_P,
|
|
+ session->qp[codec].p_frame);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+
|
|
+ ret = fw_set_qp(session, MVX_FW_SET_QP_B,
|
|
+ session->qp[codec].b_frame);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ if (session->p_frames >= 0) {
|
|
+ option.code = MVX_FW_SET_P_FRAMES;
|
|
+ option.pb_frames = session->p_frames;
|
|
+ ret = fw_set_option(session, &option);
|
|
+ if (ret != 0) {
|
|
+ MVX_SESSION_WARN(session,
|
|
+ "Failed to set P frames.");
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (session->b_frames != 0) {
|
|
+ option.code = MVX_FW_SET_B_FRAMES;
|
|
+ option.pb_frames = session->b_frames;
|
|
+ ret = fw_set_option(session, &option);
|
|
+ if (ret != 0) {
|
|
+ MVX_SESSION_WARN(session,
|
|
+ "Failed to set B frames.");
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (session->gop_type != MVX_GOP_TYPE_NONE) {
|
|
+ option.code = MVX_FW_SET_GOP_TYPE;
|
|
+ option.gop_type = session->gop_type;
|
|
+ ret = fw_set_option(session, &option);
|
|
+ if (ret != 0) {
|
|
+ MVX_SESSION_WARN(session,
|
|
+ "Failed to set GOP type.");
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (session->cyclic_intra_refresh_mb != 0) {
|
|
+ option.code = MVX_FW_SET_INTRA_MB_REFRESH;
|
|
+ option.intra_mb_refresh =
|
|
+ session->cyclic_intra_refresh_mb;
|
|
+ ret = fw_set_option(session, &option);
|
|
+ if (ret != 0) {
|
|
+ MVX_SESSION_WARN(session,
|
|
+ "Failed to set cyclic intra refresh Mb.");
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (session->constr_ipred != MVX_TRI_UNSET &&
|
|
+ (codec == MVX_FORMAT_H264 || codec == MVX_FORMAT_HEVC)) {
|
|
+ option.code = MVX_FW_SET_CONSTR_IPRED;
|
|
+ option.constr_ipred = session->constr_ipred;
|
|
+ ret = fw_set_option(session, &option);
|
|
+ if (ret != 0) {
|
|
+ MVX_SESSION_WARN(session,
|
|
+ "Failed to set constr ipred.");
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (codec == MVX_FORMAT_HEVC) {
|
|
+ if (session->entropy_sync != MVX_TRI_UNSET) {
|
|
+ option.code = MVX_FW_SET_ENTROPY_SYNC;
|
|
+ option.entropy_sync = session->entropy_sync;
|
|
+ ret = fw_set_option(session, &option);
|
|
+ if (ret != 0) {
|
|
+ MVX_SESSION_WARN(session,
|
|
+ "Failed to set entropy sync.");
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (session->temporal_mvp != MVX_TRI_UNSET) {
|
|
+ option.code = MVX_FW_SET_TEMPORAL_MVP;
|
|
+ option.temporal_mvp = session->temporal_mvp;
|
|
+ ret = fw_set_option(session, &option);
|
|
+ if (ret != 0) {
|
|
+ MVX_SESSION_WARN(session,
|
|
+ "Failed to set temporal mvp.");
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ option.code = MVX_FW_SET_MIN_LUMA_CB_SIZE;
|
|
+ option.min_luma_cb_size = session->min_luma_cb_size;
|
|
+ ret = fw_set_option(session, &option);
|
|
+ if (ret != 0) {
|
|
+ MVX_SESSION_WARN(session,
|
|
+ "Failed to set min luma cb size.");
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if ((codec == MVX_FORMAT_HEVC ||
|
|
+ codec == MVX_FORMAT_VP9) &&
|
|
+ (session->tile_rows != 0 ||
|
|
+ session->tile_cols != 0)) {
|
|
+ option.code = MVX_FW_SET_TILES;
|
|
+ option.tile.rows = session->tile_rows;
|
|
+ option.tile.cols = session->tile_cols;
|
|
+ ret = fw_set_option(session, &option);
|
|
+ if (ret != 0) {
|
|
+ MVX_SESSION_WARN(session,
|
|
+ "Failed to set tile dims.");
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (session->entropy_mode != MVX_ENTROPY_MODE_NONE &&
|
|
+ codec == MVX_FORMAT_H264) {
|
|
+ option.code = MVX_FW_SET_ENTROPY_MODE;
|
|
+ option.entropy_mode = session->entropy_mode;
|
|
+ ret = fw_set_option(session, &option);
|
|
+ if (ret != 0) {
|
|
+ MVX_SESSION_WARN(session,
|
|
+ "Failed to set entropy mode.");
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (codec == MVX_FORMAT_H264 ||
|
|
+ codec == MVX_FORMAT_HEVC) {
|
|
+ option.code = MVX_FW_SET_SLICE_SPACING_MB;
|
|
+ if (session->multi_slice_mode ==
|
|
+ MVX_MULTI_SLICE_MODE_SINGLE)
|
|
+ option.slice_spacing_mb = 0;
|
|
+ else
|
|
+ option.slice_spacing_mb =
|
|
+ session->multi_slice_max_mb;
|
|
+
|
|
+ ret = fw_set_option(session, &option);
|
|
+ if (ret != 0) {
|
|
+ MVX_SESSION_WARN(session,
|
|
+ "Failed to set slice spacing.");
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ option.code = MVX_FW_SET_CABAC_INIT_IDC;
|
|
+ option.cabac_init_idc = session->cabac_init_idc;
|
|
+ ret = fw_set_option(session, &option);
|
|
+ if (ret != 0) {
|
|
+ MVX_SESSION_WARN(session,
|
|
+ "Failed to set CABAC init IDC.");
|
|
+ return ret;
|
|
+ }
|
|
+ if (session->crop_left != 0) {
|
|
+ option.code = MVX_FW_SET_CROP_LEFT;
|
|
+ option.crop_left = session->crop_left;
|
|
+ ret = fw_set_option(session, &option);
|
|
+ if (ret != 0) {
|
|
+ MVX_SESSION_WARN(session,
|
|
+ "Failed to set crop left");
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+ if (session->crop_right != 0) {
|
|
+ option.code = MVX_FW_SET_CROP_RIGHT;
|
|
+ option.crop_right = session->crop_right;
|
|
+ ret = fw_set_option(session, &option);
|
|
+ if (ret != 0) {
|
|
+ MVX_SESSION_WARN(session,
|
|
+ "Failed to set crop right");
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+ if (session->crop_top != 0) {
|
|
+ option.code = MVX_FW_SET_CROP_TOP;
|
|
+ option.crop_top = session->crop_top;
|
|
+ ret = fw_set_option(session, &option);
|
|
+ if (ret != 0) {
|
|
+ MVX_SESSION_WARN(session,
|
|
+ "Failed to set crop top");
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+ if (session->crop_bottom != 0) {
|
|
+ option.code = MVX_FW_SET_CROP_BOTTOM;
|
|
+ option.crop_bottom = session->crop_bottom;
|
|
+ ret = fw_set_option(session, &option);
|
|
+ if (ret != 0) {
|
|
+ MVX_SESSION_WARN(session,
|
|
+ "Failed to set crop bottom");
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+ if (session->nHRDBufsize != 0) {
|
|
+ option.code = MVX_FW_SET_HRD_BUF_SIZE;
|
|
+ option.nHRDBufsize = session->nHRDBufsize;
|
|
+ ret = fw_set_option(session, &option);
|
|
+ if (ret != 0) {
|
|
+ MVX_SESSION_WARN(session,
|
|
+ "Failed to set HRD Buffer Size");
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+ if (session->color_desc.range != 0 || session->color_desc.matrix != 0 ||
|
|
+ session->color_desc.primaries != 0 || session->color_desc.transfer != 0 ||
|
|
+ session->color_desc.sar_height != 0 || session->color_desc.sar_width != 0 ||
|
|
+ session->color_desc.aspect_ratio_idc != 0) {
|
|
+ struct mvx_fw_set_option option;
|
|
+
|
|
+ option.code = MVX_FW_SET_COLOUR_DESC;
|
|
+ option.colour_desc = session->color_desc;
|
|
+ ret = fw_set_option(session, &option);
|
|
+ if (ret != 0) {
|
|
+ MVX_SESSION_WARN(session,
|
|
+ "Failed to set vui colour description");
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (session->sei_userdata.flags) {
|
|
+ option.code = MVX_FW_SET_SEI_USERDATA;
|
|
+ option.userdata = session->sei_userdata;
|
|
+ ret = fw_set_option(session, &option);
|
|
+ if (ret != 0) {
|
|
+ MVX_SESSION_WARN(session,
|
|
+ "Failed to set sei userdata");
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (session->mvx_ltr.mode != 0 || session->mvx_ltr.period != 0){
|
|
+ option.code = MVX_FW_SET_LONG_TERM_REF;
|
|
+ option.ltr.mode = session->mvx_ltr.mode;
|
|
+ option.ltr.period = session->mvx_ltr.period;
|
|
+ ret = fw_set_option(session, &option);
|
|
+ if (ret != 0) {
|
|
+ MVX_SESSION_WARN(session,
|
|
+ "Failed to set ltr mode/period");
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (codec == MVX_FORMAT_VP9) {
|
|
+ MVX_SESSION_INFO(session, "VP9 option!");
|
|
+ option.code = MVX_FW_SET_VP9_PROB_UPDATE;
|
|
+ option.vp9_prob_update = session->vp9_prob_update;
|
|
+ ret = fw_set_option(session, &option);
|
|
+ if (ret != 0) {
|
|
+ MVX_SESSION_WARN(session,
|
|
+ "Failed to set VP9 prob update mode.");
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (session->mv_h_search_range != 0 &&
|
|
+ session->mv_v_search_range != 0) {
|
|
+ option.code = MVX_FW_SET_MV_SEARCH_RANGE;
|
|
+ option.mv.x = session->mv_h_search_range;
|
|
+ option.mv.y = session->mv_v_search_range;
|
|
+ ret = fw_set_option(session, &option);
|
|
+ if (ret != 0) {
|
|
+ MVX_SESSION_WARN(session,
|
|
+ "Failed to set motion vector search range.");
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (session->bitdepth_chroma != 0 &&
|
|
+ session->bitdepth_luma != 0) {
|
|
+ option.code = MVX_FW_SET_BITDEPTH;
|
|
+ option.bitdepth.chroma = session->bitdepth_chroma;
|
|
+ option.bitdepth.luma = session->bitdepth_luma;
|
|
+ ret = fw_set_option(session, &option);
|
|
+ if (ret != 0) {
|
|
+ MVX_SESSION_WARN(session,
|
|
+ "Failed to set bitdepth.");
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (session->force_chroma_format != 0) {
|
|
+ option.code = MVX_FW_SET_CHROMA_FORMAT;
|
|
+ option.chroma_format = session->force_chroma_format;
|
|
+ ret = fw_set_option(session, &option);
|
|
+ if (ret != 0) {
|
|
+ MVX_SESSION_WARN(session,
|
|
+ "Failed to set chroma format.");
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (mvx_is_rgb(session->port[MVX_DIR_INPUT].format) != false) {
|
|
+ option.code = MVX_FW_SET_RGB_TO_YUV_MODE;
|
|
+ if (session->color_desc.primaries == MVX_FW_PRIMARIES_BT709)
|
|
+ {
|
|
+ session->rgb_to_yuv = (session->color_desc.range == MVX_FW_RANGE_FULL)?MVX_RGB_TO_YUV_MODE_BT709_FULL:MVX_RGB_TO_YUV_MODE_BT709_STUDIO;
|
|
+ }
|
|
+ else if ((session->color_desc.primaries == MVX_FW_PRIMARIES_BT601_625) ||
|
|
+ (session->color_desc.primaries == MVX_FW_PRIMARIES_BT601_525))
|
|
+ {
|
|
+ session->rgb_to_yuv = (session->color_desc.range == MVX_FW_RANGE_FULL)?MVX_RGB_TO_YUV_MODE_BT601_FULL:MVX_RGB_TO_YUV_MODE_BT601_STUDIO;
|
|
+ }
|
|
+ option.rgb_to_yuv_mode = session->rgb_to_yuv;
|
|
+ ret = fw_set_option(session, &option);
|
|
+ if (ret != 0) {
|
|
+ MVX_SESSION_WARN(session,
|
|
+ "Failed to set RGB to YUV mode.");
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (session->band_limit != 0) {
|
|
+ option.code = MVX_FW_SET_BAND_LIMIT;
|
|
+ option.band_limit = session->band_limit;
|
|
+ ret = fw_set_option(session, &option);
|
|
+ if (ret != 0) {
|
|
+ MVX_SESSION_WARN(session,
|
|
+ "Failed to set bandwidth limit.");
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (codec == MVX_FORMAT_JPEG) {
|
|
+ if (session->resync_interval >= 0) {
|
|
+ option.code = MVX_FW_SET_RESYNC_INTERVAL;
|
|
+ option.resync_interval = session->resync_interval;
|
|
+ ret = fw_set_option(session, &option);
|
|
+ if (ret != 0) {
|
|
+ MVX_SESSION_WARN(session,
|
|
+ "Failed to set resync interval.");
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (session->jpeg_quality != 0) {
|
|
+ uint8_t qtbl_chroma[MVX_FW_QUANT_LEN];
|
|
+ uint8_t qtbl_luma[MVX_FW_QUANT_LEN];
|
|
+
|
|
+ generate_quant_tbl(session->jpeg_quality,
|
|
+ qtbl_chroma_ref, qtbl_chroma);
|
|
+ generate_quant_tbl(session->jpeg_quality,
|
|
+ qtbl_luma_ref, qtbl_luma);
|
|
+ option.code = MVX_FW_SET_QUANT_TABLE;
|
|
+ option.quant_tbl.chroma = qtbl_chroma;
|
|
+ option.quant_tbl.luma = qtbl_luma;
|
|
+ ret = fw_set_option(session, &option);
|
|
+ if (ret != 0) {
|
|
+ MVX_SESSION_WARN(session,
|
|
+ "Failed to set quantization table.");
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ ret = fw_common_setup(session);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int fw_decoder_setup(struct mvx_session *session)
|
|
+{
|
|
+ int ret;
|
|
+ struct mvx_fw_set_option option;
|
|
+
|
|
+ enum mvx_format codec;
|
|
+ enum mvx_direction dir;
|
|
+
|
|
+ dir = get_bitstream_port(session);
|
|
+ codec = session->port[dir].format;
|
|
+
|
|
+ if (codec == MVX_FORMAT_VC1 &&
|
|
+ session->profile[codec] != MVX_PROFILE_NONE) {
|
|
+ option.code = MVX_FW_SET_PROFILE_LEVEL;
|
|
+ option.profile_level.profile = session->profile[codec];
|
|
+ option.profile_level.level = session->level[codec];
|
|
+ ret = fw_set_option(session, &option);
|
|
+ if (ret != 0) {
|
|
+ MVX_SESSION_WARN(session,
|
|
+ "Failed to set profile/level.");
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (session->ignore_stream_headers != MVX_TRI_UNSET) {
|
|
+ option.code = MVX_FW_SET_IGNORE_STREAM_HEADERS;
|
|
+ option.ignore_stream_headers = session->ignore_stream_headers;
|
|
+ ret = fw_set_option(session, &option);
|
|
+ if (ret != 0) {
|
|
+ MVX_SESSION_WARN(session,
|
|
+ "Failed to set ignore stream headers.");
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (session->frame_reordering != MVX_TRI_UNSET) {
|
|
+ option.code = MVX_FW_SET_FRAME_REORDERING;
|
|
+ option.frame_reordering = session->frame_reordering;
|
|
+ ret = fw_set_option(session, &option);
|
|
+ if (ret != 0) {
|
|
+ MVX_SESSION_WARN(session,
|
|
+ "Failed to set frame reordering.");
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (session->intbuf_size != 0) {
|
|
+ option.code = MVX_FW_SET_INTBUF_SIZE;
|
|
+ option.intbuf_size = session->intbuf_size;
|
|
+ ret = fw_set_option(session, &option);
|
|
+ if (ret != 0) {
|
|
+ MVX_SESSION_WARN(session,
|
|
+ "Failed to set internal buffer size.");
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (session->dsl_frame.width != 0 && session->dsl_frame.height != 0) {
|
|
+ option.code = MVX_FW_SET_DSL_FRAME;
|
|
+ option.dsl_frame.width = session->dsl_frame.width;
|
|
+ option.dsl_frame.height = session->dsl_frame.height;
|
|
+ ret = fw_set_option(session, &option);
|
|
+ if (ret != 0) {
|
|
+ MVX_SESSION_WARN(session,
|
|
+ "Failed to set DSL frame width/height.");
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (session->dsl_pos_mode >= 0 && session->dsl_pos_mode <= 2) {
|
|
+ option.code = MVX_FW_SET_DSL_MODE;
|
|
+ option.dsl_pos_mode = session->dsl_pos_mode;
|
|
+ ret = fw_set_option(session, &option);
|
|
+ if (ret != 0) {
|
|
+ MVX_SESSION_WARN(session,
|
|
+ "Failed to set DSL mode.");
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ ret = fw_common_setup(session);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int fw_initial_setup(struct mvx_session *session)
|
|
+{
|
|
+ int ret;
|
|
+ enum mvx_direction dir;
|
|
+ enum mvx_format codec;
|
|
+ struct mvx_fw_set_option option;
|
|
+
|
|
+ MVX_SESSION_INFO(session, "Firmware initial setup.");
|
|
+
|
|
+#ifdef MVX_FW_DEBUG_ENABLE
|
|
+ fw_set_debug(session, 5);
|
|
+#endif
|
|
+
|
|
+ option.code = MVX_FW_SET_WATCHDOG_TIMEOUT;
|
|
+ option.watchdog_timeout = fw_watchdog_timeout;
|
|
+ ret = fw_set_option(session, &option);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+
|
|
+ dir = get_bitstream_port(session);
|
|
+ codec = session->port[dir].format;
|
|
+
|
|
+ ret = fw_job(session, 1);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+
|
|
+ if (is_encoder(session))
|
|
+ ret = fw_encoder_setup(session);
|
|
+ else
|
|
+ ret = fw_decoder_setup(session);
|
|
+
|
|
+ if (ret != 0) {
|
|
+ MVX_SESSION_WARN(session,
|
|
+ "Failed to perform initial setup.\n");
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ ret = fw_state_change(session, MVX_FW_STATE_RUNNING);
|
|
+ if (ret != 0) {
|
|
+ MVX_SESSION_WARN(session, "Failed to queue state change.");
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ ret = fw_ping(session);
|
|
+ if (ret != 0) {
|
|
+ MVX_SESSION_WARN(session, "Failed to put ping message.");
|
|
+ send_event_error(session, ret);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static void ddr_qos_request_update(struct mvx_session *session)
|
|
+{
|
|
+ struct estimate_ddr_input ddr_input;
|
|
+ //struct estimate_ddr_output ddr_output;
|
|
+
|
|
+ ddr_input.isEnc = is_encoder(session);
|
|
+ ddr_input.fps = session->frame_rate >> 16;
|
|
+
|
|
+ if (ddr_input.isEnc) {
|
|
+ ddr_input.isAFBC = mvx_is_afbc(session->port[MVX_DIR_INPUT].format);
|
|
+ ddr_input.width = session->port[MVX_DIR_INPUT].width;
|
|
+ ddr_input.height = session->port[MVX_DIR_INPUT].height;
|
|
+ } else {
|
|
+ ddr_input.isAFBC = mvx_is_afbc(session->port[MVX_DIR_OUTPUT].format);
|
|
+ ddr_input.width = session->port[MVX_DIR_OUTPUT].width;
|
|
+ ddr_input.height = session->port[MVX_DIR_OUTPUT].height;
|
|
+ }
|
|
+
|
|
+ //mvx_dvfs_estimate_ddr_bandwidth(&ddr_input, &ddr_output);
|
|
+
|
|
+ //session->estimated_ddr_read_throughput = (uint32_t)ddr_output.estimated_read;
|
|
+ //session->estimated_ddr_write_throughput = (uint32_t)ddr_output.estimated_write;
|
|
+
|
|
+ //mvx_dvfs_session_update_ddr_qos(session, session->estimated_ddr_read_throughput, session->estimated_ddr_write_throughput);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * map_buffer() - Memory map buffer to MVE address space.
|
|
+ *
|
|
+ * Return 0 on success, else error code.
|
|
+ */
|
|
+static int map_buffer(struct mvx_session *session,
|
|
+ enum mvx_direction dir,
|
|
+ struct mvx_buffer *buf)
|
|
+{
|
|
+ mvx_mmu_va begin;
|
|
+ mvx_mmu_va end;
|
|
+ enum mvx_fw_region region;
|
|
+ int ret;
|
|
+
|
|
+ if (mvx_is_bitstream(session->port[dir].format))
|
|
+ region = MVX_FW_REGION_PROTECTED;
|
|
+ else if (mvx_is_frame(session->port[dir].format))
|
|
+ region = MVX_FW_REGION_FRAMEBUF;
|
|
+ else
|
|
+ return -EINVAL;
|
|
+
|
|
+ ret = session->fw.ops.get_region(region, &begin, &end);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+
|
|
+ ret = mvx_buffer_map(buf, begin, end);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int queue_roi_regions(struct mvx_session *session,
|
|
+ struct mvx_roi_config *roi_cfg)
|
|
+{
|
|
+ int ret = 0;
|
|
+ if ( roi_cfg->qp_present ) {
|
|
+ ret = fw_set_qp(session, MVX_FW_SET_QP_REGION,
|
|
+ roi_cfg->qp);
|
|
+ }
|
|
+ if ( roi_cfg->roi_present ) {
|
|
+ ret = fw_set_roi_regions(session, MVX_FW_SET_ROI_REGIONS,
|
|
+ roi_cfg);
|
|
+ }
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int queue_qp_epr(struct mvx_session *session,
|
|
+ int *qp)
|
|
+{
|
|
+ int ret = 0;
|
|
+ ret = fw_set_qp(session, MVX_FW_SET_QP_REGION,
|
|
+ *qp);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * queue_buffer() - Put buffer to firmware queue.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+static int queue_buffer(struct mvx_session *session,
|
|
+ enum mvx_direction dir,
|
|
+ struct mvx_buffer *buf)
|
|
+{
|
|
+ struct mvx_session_port *port = &session->port[dir];
|
|
+ struct mvx_fw_msg msg;
|
|
+ int i;
|
|
+
|
|
+ /*
|
|
+ * Vb2 cannot allocate buffers with bidirectional mapping, therefore
|
|
+ * proper direction should be set.
|
|
+ */
|
|
+ enum dma_data_direction dma_dir =
|
|
+ (dir == MVX_DIR_OUTPUT) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
|
|
+
|
|
+ int ret;
|
|
+ bool mapped = mvx_buffer_is_mapped(buf);
|
|
+
|
|
+ if (dir == MVX_DIR_OUTPUT) {
|
|
+ port->scaling_shift = (buf->flags & MVX_BUFFER_FRAME_FLAG_SCALING_MASK) >> 14;
|
|
+ }
|
|
+ if (mvx_buffer_is_mapped(buf) == false) {
|
|
+ ret = map_buffer(session, dir, buf);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+ }
|
|
+ buf->flags &= ~MVX_BUFFER_FRAME_NEED_REALLOC;
|
|
+
|
|
+ if (dir == MVX_DIR_OUTPUT && port->isreallocting == true) {
|
|
+ buf->flags |= MVX_BUFFER_FRAME_NEED_REALLOC;
|
|
+ return -EAGAIN;
|
|
+ }
|
|
+
|
|
+ if (dir == MVX_DIR_OUTPUT && port->buffer_allocated < port->buffer_min) {
|
|
+ buf->flags |= MVX_BUFFER_FRAME_NEED_REALLOC;
|
|
+ return -EAGAIN;
|
|
+ }
|
|
+
|
|
+ if (dir == MVX_DIR_INPUT && (buf->flags & MVX_BUFFER_EOS) != 0) {
|
|
+ session->eos_queued = true;
|
|
+ }
|
|
+
|
|
+ if (dir == MVX_DIR_OUTPUT) {
|
|
+ session->keep_freq_high = false;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Update frame dimensions. They might have changed due to a resolution
|
|
+ * change.
|
|
+ */
|
|
+ if (mvx_is_afbc(port->format) != false) {
|
|
+ port->afbc_width = DIV_ROUND_UP(port->width, 16 << (!!(buf->flags & MVX_BUFFER_AFBC_32X8_SUPERBLOCK)));
|
|
+ ret = mvx_buffer_afbc_set(buf, port->format, port->width,
|
|
+ port->height, port->afbc_width,
|
|
+ port->size[0], port->interlaced);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+ } else if (mvx_is_frame(port->format) != false) {
|
|
+ ret = mvx_buffer_frame_set(buf, port->format, port->width,
|
|
+ port->height, port->stride,
|
|
+ port->size,
|
|
+ port->interlaced);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ if (mapped &&
|
|
+ ((dir == MVX_DIR_OUTPUT) ||
|
|
+ (dir == MVX_DIR_INPUT &&
|
|
+ mvx_is_frame(port->format) &&
|
|
+ (buf->flags & MVX_BUFFER_FLAG_DISABLE_CACHE_MAINTENANCE))))
|
|
+ {
|
|
+ /*
|
|
+ 1. no need to do cache invalidate each time for output buffer,
|
|
+ only invalidate cache when buffer is mapped
|
|
+ 2. no need to do cache clean for input buffer, if there is
|
|
+ on cpu write/read usage.
|
|
+ */
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ ret = mvx_buffer_synch(buf, dma_dir);
|
|
+ }
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+
|
|
+ msg.code = MVX_FW_CODE_BUFFER;
|
|
+ msg.buf = buf;
|
|
+
|
|
+ MVX_SESSION_INFO(session,
|
|
+ "Firmware req: Buffer. dir=%u, len=[%u, %u, %u], flags=0x%08x, eos=%u, interlace=%u",
|
|
+ buf->dir,
|
|
+ buf->planes[0].filled,
|
|
+ buf->planes[1].filled,
|
|
+ buf->planes[2].filled,
|
|
+ buf->flags,
|
|
+ (buf->flags & MVX_BUFFER_EOS) != 0,
|
|
+ (buf->flags & MVX_BUFFER_INTERLACE) != 0);
|
|
+
|
|
+ ret = session->fw.ops.put_message(&session->fw, &msg);
|
|
+ if (ret != 0)
|
|
+ goto send_error;
|
|
+
|
|
+ port->buffer_count++;
|
|
+
|
|
+ if (dir == MVX_DIR_OUTPUT && mvx_is_frame(buf->format)) {
|
|
+ for (i = 0; i < buf->nplanes; i++) {
|
|
+ if (port->buffer_size[i] == 0)
|
|
+ port->buffer_size[i] = mvx_buffer_size(buf, i);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ port->flushed = false;
|
|
+ if (dir == MVX_DIR_OUTPUT && port->isreallocting == true) {
|
|
+ port->isreallocting = false;
|
|
+ }
|
|
+ ret = send_irq(session);
|
|
+ if (ret != 0)
|
|
+ goto send_error;
|
|
+
|
|
+ return 0;
|
|
+
|
|
+send_error:
|
|
+ send_event_error(session, ret);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * queue_pending_buffers() - Queue pending buffers.
|
|
+ *
|
|
+ * Buffer that are queued when the port is still stream off will be put in the
|
|
+ * pending queue. Once both input- and output ports are stream on the pending
|
|
+ * buffers will be forwarded to the firmware.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+static int queue_pending_buffers(struct mvx_session *session,
|
|
+ enum mvx_direction dir)
|
|
+{
|
|
+ struct mvx_buffer *buf;
|
|
+ struct mvx_buffer *tmp;
|
|
+ int roi_config_num = 0;
|
|
+ int roi_config_index = 0;
|
|
+ int qp_num = 0;
|
|
+ int qp_index = 0;
|
|
+ struct mvx_roi_config roi_config;
|
|
+ int ret = 0;
|
|
+
|
|
+ if (dir == MVX_DIR_INPUT && session->port[dir].roi_config_num > 0) {
|
|
+ roi_config_num = session->port[dir].roi_config_num;
|
|
+ }
|
|
+ if (dir == MVX_DIR_INPUT && session->port[dir].qp_num > 0) {
|
|
+ qp_num = session->port[dir].qp_num;
|
|
+ }
|
|
+ list_for_each_entry_safe(buf, tmp, &session->port[dir].buffer_queue,
|
|
+ head) {
|
|
+ if ((buf->flags & MVX_BUFFER_FRAME_FLAG_ROI) == MVX_BUFFER_FRAME_FLAG_ROI &&
|
|
+ roi_config_index < roi_config_num) {
|
|
+ roi_config = session->port[dir].roi_config_queue[roi_config_index];
|
|
+ ret = queue_roi_regions(session, &roi_config);
|
|
+ roi_config_index++;
|
|
+ }
|
|
+ if ((buf->flags & MVX_BUFFER_FRAME_FLAG_GENERAL) == MVX_BUFFER_FRAME_FLAG_GENERAL &&
|
|
+ qp_index < qp_num) {
|
|
+ ret = queue_qp_epr(session, &session->port[dir].qp_queue[qp_index]);
|
|
+ qp_index++;
|
|
+ }
|
|
+ ret = queue_buffer(session, dir, buf);
|
|
+ if ((buf->flags & MVX_BUFFER_FRAME_NEED_REALLOC) == MVX_BUFFER_FRAME_NEED_REALLOC) {
|
|
+ session->event(session, MVX_SESSION_EVENT_BUFFER, buf);
|
|
+ } else if (ret != 0) {
|
|
+ break;
|
|
+ }
|
|
+ list_del(&buf->head);
|
|
+ }
|
|
+
|
|
+ session->port[dir].roi_config_num = 0;
|
|
+ session->port[dir].qp_num = 0;
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * fw_bin_ready() - Complete firmware configuration.
|
|
+ *
|
|
+ * The firmware binary load has completed and the firmware configuration can
|
|
+ * begin.
|
|
+ *
|
|
+ * If the session is no longer 'stream on' (someone issued 'stream off' before
|
|
+ * the firmware load completed) the firmware binary is put back to the cache.
|
|
+ *
|
|
+ * Else the the client session is registered and the firmware instance is
|
|
+ * constructed.
|
|
+ */
|
|
+static void fw_bin_ready(struct mvx_fw_bin *bin,
|
|
+ void *arg,
|
|
+ bool same_thread)
|
|
+{
|
|
+ struct mvx_session *session = arg;
|
|
+ int lock_failed = 1;
|
|
+ int ret;
|
|
+
|
|
+ /*
|
|
+ * Only lock the mutex if the firmware binary was loaded by a
|
|
+ * background thread.
|
|
+ */
|
|
+ if (same_thread == false) {
|
|
+ lock_failed = mutex_lock_interruptible(session->isession.mutex);
|
|
+ if (lock_failed != 0) {
|
|
+ send_event_error(session, lock_failed);
|
|
+ goto put_fw_bin;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /* Return firmware binary if session is no longer 'stream on'. */
|
|
+ if (!is_stream_on(session))
|
|
+ goto put_fw_bin;
|
|
+
|
|
+ /* Create client session. */
|
|
+ session->isession.ncores = session->client_ops->get_ncores(
|
|
+ session->client_ops);
|
|
+ session->isession.l0_pte = mvx_mmu_set_pte(
|
|
+ MVX_ATTR_PRIVATE, phys_cpu2vpu(virt_to_phys(session->mmu.page_table)),
|
|
+ MVX_ACCESS_READ_WRITE);
|
|
+
|
|
+ session->csession = session->client_ops->register_session(
|
|
+ session->client_ops, &session->isession);
|
|
+
|
|
+ mvx_dvfs_register_session(session, is_encoder(session));
|
|
+
|
|
+ if (IS_ERR(session->csession)) {
|
|
+ ret = PTR_ERR(session->csession);
|
|
+ send_event_error(session, ret);
|
|
+ goto put_fw_bin;
|
|
+ }
|
|
+
|
|
+ /* Construct the firmware instance. */
|
|
+ ret = mvx_fw_factory(&session->fw, bin, &session->mmu,
|
|
+ session, session->client_ops, session->csession,
|
|
+ session->isession.ncores,
|
|
+ session->dentry);
|
|
+ if (ret != 0) {
|
|
+ send_event_error(session, ret);
|
|
+ goto unregister_csession;
|
|
+ }
|
|
+
|
|
+ session->fw_bin = bin;
|
|
+
|
|
+ mvx_fw_cache_log(bin, session->csession);
|
|
+
|
|
+ ret = fw_initial_setup(session);
|
|
+ if (ret != 0)
|
|
+ goto unregister_csession;
|
|
+
|
|
+ ret = queue_pending_buffers(session, MVX_DIR_INPUT);
|
|
+ if (ret != 0)
|
|
+ goto unregister_csession;
|
|
+
|
|
+ ret = queue_pending_buffers(session, MVX_DIR_OUTPUT);
|
|
+ if (ret != 0)
|
|
+ goto unregister_csession;
|
|
+
|
|
+ ret = mvx_session_put(session);
|
|
+ if (ret == 0 && lock_failed == 0)
|
|
+ mutex_unlock(session->isession.mutex);
|
|
+
|
|
+ return;
|
|
+
|
|
+unregister_csession:
|
|
+ mvx_dvfs_unregister_session(session);
|
|
+ session->client_ops->unregister_session(session->csession);
|
|
+ session->csession = NULL;
|
|
+
|
|
+put_fw_bin:
|
|
+ mvx_fw_cache_put(session->cache, bin);
|
|
+ session->fw_bin = NULL;
|
|
+
|
|
+ ret = mvx_session_put(session);
|
|
+ if (ret == 0 && lock_failed == 0)
|
|
+ mutex_unlock(session->isession.mutex);
|
|
+}
|
|
+
|
|
+static int calc_afbc_size(struct mvx_session *session,
|
|
+ enum mvx_format format,
|
|
+ unsigned int width,
|
|
+ unsigned int height,
|
|
+ bool tiled_headers,
|
|
+ bool tiled_body,
|
|
+ bool superblock,
|
|
+ bool interlaced)
|
|
+{
|
|
+ static const unsigned int mb_header_size = 16;
|
|
+ unsigned int payload_align = 128;
|
|
+ unsigned int mb_size;
|
|
+ int size;
|
|
+
|
|
+ /* Calculate width and height in super blocks. */
|
|
+ if (superblock != false) {
|
|
+ width = DIV_ROUND_UP(width, 32);
|
|
+ height = DIV_ROUND_UP(height, 8) + 1;
|
|
+ } else {
|
|
+ width = DIV_ROUND_UP(width, 16);
|
|
+ height = DIV_ROUND_UP(height, 16) + 1;
|
|
+ }
|
|
+
|
|
+ /* Round up size to 8x8 tiles. */
|
|
+ if (tiled_headers != false || tiled_body != false) {
|
|
+ width = roundup(width, 8);
|
|
+ height = roundup(height, 8);
|
|
+ }
|
|
+
|
|
+ switch (format) {
|
|
+ case MVX_FORMAT_YUV420_AFBC_8:
|
|
+ mb_size = 384;
|
|
+ break;
|
|
+ case MVX_FORMAT_YUV420_AFBC_10:
|
|
+ mb_size = 480;
|
|
+ break;
|
|
+ case MVX_FORMAT_YUV422_AFBC_8:
|
|
+ mb_size = 512;
|
|
+ break;
|
|
+ case MVX_FORMAT_YUV422_AFBC_10:
|
|
+ mb_size = 656;
|
|
+ break;
|
|
+ default:
|
|
+ MVX_SESSION_WARN(session,
|
|
+ "Unsupported AFBC format. format=%u.",
|
|
+ format);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ /* Round up tiled body to 128 byte boundary. */
|
|
+ if (tiled_body != false)
|
|
+ mb_size = roundup(mb_size, payload_align);
|
|
+
|
|
+ if (interlaced != false)
|
|
+ height = DIV_ROUND_UP(height, 2);
|
|
+
|
|
+ /* Calculate size of AFBC makroblock headers. */
|
|
+ size = roundup(width * height * mb_header_size, payload_align);
|
|
+ size += roundup(width * height * mb_size, payload_align);
|
|
+
|
|
+ if (interlaced != false)
|
|
+ size *= 2;
|
|
+
|
|
+ return size;
|
|
+}
|
|
+
|
|
+static int try_format(struct mvx_session *session,
|
|
+ enum mvx_direction dir,
|
|
+ enum mvx_format format,
|
|
+ unsigned int *width,
|
|
+ unsigned int *height,
|
|
+ uint8_t *nplanes,
|
|
+ unsigned int *stride,
|
|
+ unsigned int *size,
|
|
+ bool *interlaced)
|
|
+{
|
|
+ int ret = 0;
|
|
+
|
|
+ /* Limit width and height to 8k. */
|
|
+ if (*width == ((unsigned int) (-1))) *width = 0;
|
|
+ if (*height == ((unsigned int) (-1))) *height = 0;
|
|
+ *width = min_t(unsigned int, *width, 8192);
|
|
+ *height = min_t(unsigned int, *height, 8192);
|
|
+
|
|
+ /* Stream dimensions are dictated by the input port. */
|
|
+ if (dir == MVX_DIR_OUTPUT) {
|
|
+ *width = session->port[MVX_DIR_INPUT].width >> session->port[MVX_DIR_OUTPUT].scaling_shift;
|
|
+ *height = session->port[MVX_DIR_INPUT].height >> session->port[MVX_DIR_OUTPUT].scaling_shift;
|
|
+ }
|
|
+ if (session->dsl_frame.width != 0 && session->dsl_frame.height != 0) {
|
|
+ *width = session->dsl_frame.width;
|
|
+ *height = session->dsl_frame.height;
|
|
+ } else if (dir == MVX_DIR_OUTPUT && (session->dsl_ratio.hor != 1 || session->dsl_ratio.ver != 1)) {
|
|
+ *width = session->port[MVX_DIR_INPUT].width / session->dsl_ratio.hor;
|
|
+ *height = session->port[MVX_DIR_INPUT].height / session->dsl_ratio.ver;
|
|
+ *width &= ~(1);
|
|
+ *height &= ~(1);
|
|
+ }
|
|
+ /* Interlaced input is not supported by the firmware. */
|
|
+ if (dir == MVX_DIR_INPUT)
|
|
+ *interlaced = false;
|
|
+
|
|
+ if (mvx_is_afbc(format) != false) {
|
|
+ unsigned int afbc_alloc_bytes =
|
|
+ session->port[dir].afbc_alloc_bytes;
|
|
+ if (*nplanes <= 0)
|
|
+ size[0] = 0;
|
|
+
|
|
+ if (dir == MVX_DIR_INPUT) {
|
|
+ /* it is basically a worst-case calcualtion based on a size rounded up to tile size*/
|
|
+ int s1 = calc_afbc_size(session, format, *width,
|
|
+ *height, false, false, false, //*height, false, false, false,
|
|
+ *interlaced);
|
|
+ int s2 = calc_afbc_size(session, format, *width,
|
|
+ *height, false, false, true, //*height, false, false, false,
|
|
+ *interlaced);
|
|
+ int s = max_t(unsigned int, s1, s2);
|
|
+ if (s < 0)
|
|
+ return s;
|
|
+
|
|
+ size[0] = max_t(unsigned int, size[0], s);
|
|
+ }
|
|
+
|
|
+ if (*interlaced != false)
|
|
+ afbc_alloc_bytes *= 2;
|
|
+
|
|
+ size[0] = max_t(unsigned int, size[0],
|
|
+ afbc_alloc_bytes);
|
|
+ size[0] = roundup(size[0], PAGE_SIZE);
|
|
+
|
|
+ *nplanes = 1;
|
|
+ } else if (mvx_is_frame(format) != false) {
|
|
+ ret = mvx_buffer_frame_dim(format, *width, *height, nplanes,
|
|
+ stride, size);
|
|
+ } else {
|
|
+ /*
|
|
+ * For compressed formats the size should be the maximum number
|
|
+ * of bytes an image is expected to become. This is calculated
|
|
+ * as width * height * 2 B/px / 2. Size should be at least one
|
|
+ * page.
|
|
+ */
|
|
+
|
|
+ stride[0] = 0;
|
|
+
|
|
+ if (*nplanes <= 0)
|
|
+ size[0] = 0;
|
|
+
|
|
+ size[0] = max_t(unsigned int, size[0], PAGE_SIZE);
|
|
+ size[0] = max_t(unsigned int, size[0], (*width) * (*height));
|
|
+ size[0] = roundup(size[0], PAGE_SIZE);
|
|
+
|
|
+ *nplanes = 1;
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static void watchdog_work(struct work_struct *work)
|
|
+{
|
|
+ struct mvx_session *session =
|
|
+ container_of(work, struct mvx_session, watchdog_work);
|
|
+ int ret;
|
|
+
|
|
+ mutex_lock(session->isession.mutex);
|
|
+ session->watchdog_count++;
|
|
+
|
|
+ MVX_SESSION_WARN(session, "Watchdog timeout. count=%u. is_encoder=%d. in_fmt=%d, out_fmt=%d",
|
|
+ session->watchdog_count, is_encoder(session), session->port[MVX_DIR_INPUT].format, session->port[MVX_DIR_OUTPUT].format);
|
|
+
|
|
+ /* Print debug information. */
|
|
+ print_debug(session);
|
|
+
|
|
+ if (session->watchdog_count == 1) {
|
|
+ /* Request firmware to dump its state. */
|
|
+ fw_dump(session);
|
|
+
|
|
+ /* Restart watchdog. */
|
|
+ watchdog_start(session, 3000);
|
|
+ } else {
|
|
+ send_event_error(session, -ETIME);
|
|
+ }
|
|
+
|
|
+ ret = kref_put(&session->isession.kref, session->isession.release);
|
|
+ if (ret != 0)
|
|
+ return;
|
|
+
|
|
+ mutex_unlock(session->isession.mutex);
|
|
+}
|
|
+
|
|
+static void watchdog_timeout(struct timer_list *timer)
|
|
+{
|
|
+ struct mvx_session *session =
|
|
+ container_of(timer, struct mvx_session, watchdog_timer);
|
|
+
|
|
+ queue_work(system_unbound_wq, &session->watchdog_work);
|
|
+}
|
|
+
|
|
+#if KERNEL_VERSION(4, 14, 0) > LINUX_VERSION_CODE
|
|
+static void watchdog_timeout_legacy(unsigned long data)
|
|
+{
|
|
+ watchdog_timeout((struct timer_list *)data);
|
|
+}
|
|
+
|
|
+#endif
|
|
+
|
|
+/****************************************************************************
|
|
+ * Exported functions
|
|
+ ****************************************************************************/
|
|
+
|
|
+int mvx_session_construct(struct mvx_session *session,
|
|
+ struct device *dev,
|
|
+ struct mvx_client_ops *client_ops,
|
|
+ struct mvx_fw_cache *cache,
|
|
+ struct mutex *mutex,
|
|
+ void (*destructor)(struct mvx_session *session),
|
|
+ void (*event)(struct mvx_session *session,
|
|
+ enum mvx_session_event event,
|
|
+ void *arg),
|
|
+ struct dentry *dentry)
|
|
+{
|
|
+ int i;
|
|
+ int ret;
|
|
+
|
|
+ if (event == NULL || destructor == NULL)
|
|
+ return -EINVAL;
|
|
+
|
|
+ memset(session, 0, sizeof(*session));
|
|
+ session->dev = dev;
|
|
+ session->client_ops = client_ops;
|
|
+ session->cache = cache;
|
|
+ kref_init(&session->isession.kref);
|
|
+ session->isession.release = session_destructor;
|
|
+ session->isession.mutex = mutex;
|
|
+ session->destructor = destructor;
|
|
+ session->event = event;
|
|
+ session->fw_event.fw_bin_ready = fw_bin_ready;
|
|
+ session->fw_event.arg = session;
|
|
+ session->fw_state = MVX_FW_STATE_STOPPED;
|
|
+ init_waitqueue_head(&session->waitq);
|
|
+ session->dentry = dentry;
|
|
+ session->port[MVX_DIR_INPUT].buffer_min = 1;
|
|
+ session->port[MVX_DIR_OUTPUT].buffer_min = 1;
|
|
+ session->port[MVX_DIR_OUTPUT].seq_param.afbc.buffers_min = 1;
|
|
+ session->port[MVX_DIR_OUTPUT].seq_param.planar.buffers_min = 1;
|
|
+ session->port[MVX_DIR_INPUT].buffer_allocated = 1;
|
|
+ session->port[MVX_DIR_OUTPUT].buffer_allocated = 1;
|
|
+ session->port[MVX_DIR_INPUT].scaling_shift = 0;
|
|
+ session->port[MVX_DIR_OUTPUT].scaling_shift = 0;
|
|
+ session->stream_escaping = MVX_TRI_UNSET;
|
|
+ session->ignore_stream_headers = MVX_TRI_UNSET;
|
|
+ session->frame_reordering = MVX_TRI_UNSET;
|
|
+ session->constr_ipred = MVX_TRI_UNSET;
|
|
+ session->entropy_sync = MVX_TRI_UNSET;
|
|
+ session->temporal_mvp = MVX_TRI_UNSET;
|
|
+ session->resync_interval = -1;
|
|
+ session->port[MVX_DIR_OUTPUT].roi_config_num = 0;
|
|
+ session->port[MVX_DIR_INPUT].roi_config_num = 0;
|
|
+ session->port[MVX_DIR_OUTPUT].qp_num = 0;
|
|
+ session->port[MVX_DIR_INPUT].qp_num = 0;
|
|
+ session->crop_left = 0;
|
|
+ session->crop_right = 0;
|
|
+ session->crop_top = 0;
|
|
+ session->crop_bottom = 0;
|
|
+ session->dsl_ratio.hor = 1;
|
|
+ session->dsl_ratio.ver = 1;
|
|
+ session->dsl_pos_mode = -1;//disable by default
|
|
+ session->estimated_ddr_read_throughput = 0;
|
|
+ session->estimated_ddr_write_throughput = 0;
|
|
+ session->port[MVX_DIR_OUTPUT].buffer_on_hold_count = 0;
|
|
+ session->port[MVX_DIR_OUTPUT].pending_buffer_on_hold_count = 0;
|
|
+ session->port[MVX_DIR_OUTPUT].isallocparam = false;
|
|
+ session->eos_queued = false;
|
|
+ session->keep_freq_high = true;
|
|
+ session->is_suspend = false;
|
|
+ for (i = 0; i < MVX_BUFFER_NPLANES; i++) {
|
|
+ session->port[MVX_DIR_OUTPUT].buffer_size[i] = 0;
|
|
+ }
|
|
+ session->port[MVX_DIR_OUTPUT].buffer_count = 0;
|
|
+ session->port[MVX_DIR_INPUT].buffer_count = 0;
|
|
+ session->watchdog_count = 0;
|
|
+ session->watchdog_timeout = session_watchdog_timeout;
|
|
+ session->frame_id = 0;
|
|
+
|
|
+ INIT_LIST_HEAD(&session->buffer_corrupt_queue);
|
|
+ ret = mvx_mmu_construct(&session->mmu, session->dev);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+
|
|
+ for (i = 0; i < MVX_DIR_MAX; i++)
|
|
+ INIT_LIST_HEAD(&session->port[i].buffer_queue);
|
|
+
|
|
+#if KERNEL_VERSION(4, 14, 0) <= LINUX_VERSION_CODE
|
|
+ timer_setup(&session->watchdog_timer, watchdog_timeout, 0);
|
|
+#else
|
|
+ setup_timer(&session->watchdog_timer, watchdog_timeout_legacy,
|
|
+ (uintptr_t)&session->watchdog_timer);
|
|
+#endif
|
|
+ INIT_WORK(&session->watchdog_work, watchdog_work);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+void mvx_session_destruct(struct mvx_session *session)
|
|
+{
|
|
+ /* Destruct the session object. */
|
|
+ struct mvx_corrupt_buffer* corrupt_buf;
|
|
+ struct mvx_corrupt_buffer* tmp;
|
|
+
|
|
+ if (session->enable_profiling && session->frame_id > 0) {
|
|
+ MVX_SESSION_WARN(session, "[Debug] Destroy session. is_encoder=%d, frame_count=%d", is_encoder(session), session->frame_id);
|
|
+ MVX_SESSION_WARN(session, "[Debug] bus_write_bytes_total=%lu. bus_read_bytes_total=%lu. avg_write_bw=%lu, avg_read_bw=%lu, 30fps_write_bw=%lu, 30fps_read_bw=%lu", session->bus_write_bytes_total, session->bus_read_bytes_total, session->bus_write_bytes_total/session->frame_id, session->bus_read_bytes_total/session->frame_id, session->bus_write_bytes_total/session->frame_id * 30, session->bus_read_bytes_total/session->frame_id * 30);
|
|
+ }
|
|
+
|
|
+ MVX_SESSION_INFO(session, "Destroy session.");
|
|
+
|
|
+ release_fw_bin(session);
|
|
+ mvx_mmu_destruct(&session->mmu);
|
|
+ list_for_each_entry_safe(corrupt_buf, tmp, &session->buffer_corrupt_queue, head) {
|
|
+ list_del(&corrupt_buf->head);
|
|
+ vfree(corrupt_buf);
|
|
+ }
|
|
+}
|
|
+
|
|
+void mvx_session_get(struct mvx_session *session)
|
|
+{
|
|
+ kref_get(&session->isession.kref);
|
|
+}
|
|
+
|
|
+int mvx_session_put(struct mvx_session *session)
|
|
+{
|
|
+ return kref_put(&session->isession.kref,
|
|
+ session->isession.release);
|
|
+}
|
|
+
|
|
+void mvx_session_get_formats(struct mvx_session *session,
|
|
+ enum mvx_direction dir,
|
|
+ uint64_t *formats)
|
|
+{
|
|
+ uint64_t fw_formats;
|
|
+
|
|
+ session->client_ops->get_formats(session->client_ops, dir, formats);
|
|
+ mvx_fw_cache_get_formats(session->cache, dir, &fw_formats);
|
|
+
|
|
+ *formats &= fw_formats;
|
|
+}
|
|
+
|
|
+int mvx_session_try_format(struct mvx_session *session,
|
|
+ enum mvx_direction dir,
|
|
+ enum mvx_format format,
|
|
+ unsigned int *width,
|
|
+ unsigned int *height,
|
|
+ uint8_t *nplanes,
|
|
+ unsigned int *stride,
|
|
+ unsigned int *size,
|
|
+ bool *interlaced)
|
|
+{
|
|
+ return try_format(session, dir, format, width, height, nplanes,
|
|
+ stride, size, interlaced);
|
|
+}
|
|
+
|
|
+int mvx_session_set_format(struct mvx_session *session,
|
|
+ enum mvx_direction dir,
|
|
+ enum mvx_format format,
|
|
+ unsigned int *width,
|
|
+ unsigned int *height,
|
|
+ uint8_t *nplanes,
|
|
+ unsigned int *stride,
|
|
+ unsigned int *size,
|
|
+ bool *interlaced)
|
|
+{
|
|
+ struct mvx_session_port *port = &session->port[dir];
|
|
+ int ret;
|
|
+
|
|
+ if (session->error != 0)
|
|
+ return session->error;
|
|
+
|
|
+ if (is_stream_on(session) != false)
|
|
+ return -EBUSY;
|
|
+
|
|
+ ret = try_format(session, dir, format, width, height, nplanes,
|
|
+ stride, size, interlaced);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+
|
|
+ /*
|
|
+ * If the bitstream format changes, then the firmware binary must be
|
|
+ * released.
|
|
+ */
|
|
+ if (mvx_is_bitstream(port->format) != false &&
|
|
+ format != port->format) {
|
|
+ if (IS_ERR(session->fw_bin) != false) {
|
|
+ MVX_SESSION_WARN(session,
|
|
+ "Can't set format when firmware binary is pending. dir=%d.",
|
|
+ dir);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ release_fw_bin(session);
|
|
+ }
|
|
+
|
|
+ /* Update port settings. */
|
|
+ port->format = format;
|
|
+ port->width = *width;
|
|
+ port->height = *height;
|
|
+ port->nplanes = *nplanes;
|
|
+ port->interlaced = *interlaced;
|
|
+ memcpy(port->stride, stride, sizeof(*stride) * MVX_BUFFER_NPLANES);
|
|
+ memcpy(port->size, size, sizeof(*size) * MVX_BUFFER_NPLANES);
|
|
+
|
|
+ /* TODO AFBC width will have to be provided by user space. */
|
|
+ if (dir == MVX_DIR_INPUT)
|
|
+ port->afbc_width = DIV_ROUND_UP(*width, 16);
|
|
+
|
|
+ /* Input dimensions dictate output dimensions. */
|
|
+ if (dir == MVX_DIR_INPUT) {
|
|
+ struct mvx_session_port *p = &session->port[MVX_DIR_OUTPUT];
|
|
+ (void)try_format(session, MVX_DIR_OUTPUT, p->format, &p->width,
|
|
+ &p->height, &p->nplanes, p->stride, p->size,
|
|
+ &p->interlaced);
|
|
+ }
|
|
+
|
|
+ if (dir == MVX_DIR_OUTPUT) {
|
|
+ if (mvx_is_afbc(port->format)) {
|
|
+ port->buffer_min = port->seq_param.afbc.buffers_min;
|
|
+ } else {
|
|
+ port->buffer_min = port->seq_param.planar.buffers_min;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int mvx_session_qbuf(struct mvx_session *session,
|
|
+ enum mvx_direction dir,
|
|
+ struct mvx_buffer *buf)
|
|
+{
|
|
+ int ret;
|
|
+ if (session->error != 0)
|
|
+ return session->error;
|
|
+
|
|
+ if (is_fw_loaded(session) == false ||
|
|
+ session->port[dir].is_flushing != false) {
|
|
+ list_add_tail(&buf->head, &session->port[dir].buffer_queue);
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ ret = queue_buffer(session, dir, buf);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+
|
|
+ ret = switch_in(session);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int mvx_session_send_eos(struct mvx_session *session)
|
|
+{
|
|
+ struct mvx_session_port *port = &session->port[MVX_DIR_OUTPUT];
|
|
+ struct mvx_buffer *buf;
|
|
+
|
|
+ if (session->error != 0)
|
|
+ return session->error;
|
|
+
|
|
+ if (is_fw_loaded(session) != false)
|
|
+ return fw_eos(session);
|
|
+
|
|
+ if (list_empty(&port->buffer_queue) != false) {
|
|
+ MVX_SESSION_WARN(session,
|
|
+ "Unable to signal EOS. Output buffer queue empty.");
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ buf = list_first_entry(&port->buffer_queue, struct mvx_buffer, head);
|
|
+ list_del(&buf->head);
|
|
+
|
|
+ mvx_buffer_clear(buf);
|
|
+ buf->flags |= MVX_BUFFER_EOS;
|
|
+
|
|
+ session->event(session, MVX_SESSION_EVENT_BUFFER, buf);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int mvx_session_streamon(struct mvx_session *session,
|
|
+ enum mvx_direction dir)
|
|
+{
|
|
+ enum mvx_direction bdir;
|
|
+ struct mvx_hw_ver hw_ver;
|
|
+ enum mvx_direction i;
|
|
+ int ret;
|
|
+
|
|
+ MVX_SESSION_INFO(session, "Stream on. dir=%u.", dir);
|
|
+
|
|
+ if (dir == MVX_DIR_OUTPUT && session->port[dir].isallocparam == true) {
|
|
+ session->port[dir].isallocparam = false;
|
|
+ session->port[dir].isreallocting = false;
|
|
+ }
|
|
+
|
|
+ /* Verify that we don't enable an already activated port. */
|
|
+ if (session->port[dir].stream_on != false)
|
|
+ return 0;
|
|
+
|
|
+ session->port[dir].stream_on = true;
|
|
+
|
|
+ /* Check that both ports are stream on. */
|
|
+ if (!is_stream_on(session))
|
|
+ return 0;
|
|
+
|
|
+ ddr_qos_request_update(session);
|
|
+
|
|
+ /* Verify that a firmware binary load is not in progress. */
|
|
+ if (IS_ERR(session->fw_bin)) {
|
|
+ ret = PTR_ERR(session->fw_bin);
|
|
+ goto disable_port;
|
|
+ }
|
|
+
|
|
+ /* If a firmware binary is already loaded, then we are done. */
|
|
+ if (session->fw_bin != NULL) {
|
|
+ ret = wait_pending(session);
|
|
+ if (ret != 0)
|
|
+ goto disable_port;
|
|
+
|
|
+ ret = fw_state_change(session, MVX_FW_STATE_RUNNING);
|
|
+ if (ret != 0)
|
|
+ goto disable_port;
|
|
+
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ bdir = get_bitstream_port(session);
|
|
+ if (bdir >= MVX_DIR_MAX) {
|
|
+ MVX_SESSION_WARN(session,
|
|
+ "Session only support decoding and encoding, but not transcoding. input_format=%u, output_format=%u.",
|
|
+ session->port[MVX_DIR_INPUT].format,
|
|
+ session->port[MVX_DIR_OUTPUT].format);
|
|
+ ret = -EINVAL;
|
|
+ goto disable_port;
|
|
+ }
|
|
+
|
|
+ /* Verify that client can handle input and output formats. */
|
|
+ for (i = MVX_DIR_INPUT; i < MVX_DIR_MAX; i++) {
|
|
+ uint64_t formats;
|
|
+
|
|
+ session->client_ops->get_formats(session->client_ops,
|
|
+ MVX_DIR_INPUT, &formats);
|
|
+
|
|
+ if (!mvx_test_bit(session->port[i].format, &formats)) {
|
|
+ MVX_SESSION_WARN(session,
|
|
+ "Client cannot support requested formats. input_format=%u, output_format=%u.",
|
|
+ session->port[MVX_DIR_INPUT].format,
|
|
+ session->port[MVX_DIR_OUTPUT].format);
|
|
+ ret = -ENODEV;
|
|
+ goto disable_port;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /* Increment session reference count and flag fw bin as pending. */
|
|
+ mvx_session_get(session);
|
|
+ session->fw_bin = ERR_PTR(-EINPROGRESS);
|
|
+ session->client_ops->get_hw_ver(session->client_ops, &hw_ver);
|
|
+
|
|
+ /* Requesting firmware binary to be loaded. */
|
|
+ ret = mvx_fw_cache_get(session->cache, session->port[bdir].format,
|
|
+ bdir, &session->fw_event, &hw_ver,
|
|
+ session->isession.securevideo);
|
|
+ if (ret != 0) {
|
|
+ session->port[dir].stream_on = false;
|
|
+ session->fw_bin = NULL;
|
|
+ mvx_session_put(session);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+
|
|
+disable_port:
|
|
+ session->port[dir].stream_on = false;
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int mvx_session_streamoff(struct mvx_session *session,
|
|
+ enum mvx_direction dir)
|
|
+{
|
|
+ struct mvx_session_port *port = &session->port[dir];
|
|
+ struct mvx_session_port *port_input = &session->port[MVX_DIR_INPUT];
|
|
+ struct mvx_buffer *buf;
|
|
+ struct mvx_buffer *tmp;
|
|
+ bool force_stop = (dir == MVX_DIR_OUTPUT && port_input->stream_on == false) ? 1 : 0;
|
|
+ int ret = 0;
|
|
+ int i;
|
|
+
|
|
+ MVX_SESSION_INFO(session, "Stream off. dir=%u, flushed=%d, is_flushing=%d, isreallocting=%d, isallocparam=%d, force_stop=%d", dir,port->flushed,port->is_flushing,port->isreallocting,port->isallocparam,force_stop);
|
|
+
|
|
+ port->stream_on = false;
|
|
+
|
|
+ if (is_fw_loaded(session) != false) {
|
|
+ /*
|
|
+ * Flush the ports if at least one buffer has been queued
|
|
+ * since last flush.
|
|
+ */
|
|
+ /* workaround for fw issue: It won't flush output buffer when STOP output stream
|
|
+ * if flushed flag is true. It will result in no frame buffer output for seek operations.
|
|
+ * Now force to flush output buffer if input stream is off.
|
|
+ */
|
|
+
|
|
+ if ((port->flushed == false && port->is_flushing == false) || force_stop) {
|
|
+ ret = wait_pending(session);
|
|
+ if (ret != 0)
|
|
+ goto dequeue_buffers;
|
|
+ if (!(dir == MVX_DIR_OUTPUT && port->isreallocting == true) || force_stop) {
|
|
+ ret = fw_state_change(session, MVX_FW_STATE_STOPPED);
|
|
+ if (ret != 0)
|
|
+ goto dequeue_buffers;
|
|
+
|
|
+ ret = fw_flush(session, dir);
|
|
+ if (ret != 0) {
|
|
+ goto dequeue_buffers;
|
|
+ }
|
|
+ }
|
|
+ ret = wait_pending(session);
|
|
+ if (ret != 0)
|
|
+ goto dequeue_buffers;
|
|
+
|
|
+ send_irq(session);
|
|
+ }
|
|
+ if (dir == MVX_DIR_OUTPUT && port->isallocparam == true) {
|
|
+ wait_flush_done(session, dir);
|
|
+ for (i = 0; i < MVX_BUFFER_NPLANES; i++)
|
|
+ session->port[MVX_DIR_OUTPUT].buffer_size[i] = 0;
|
|
+ }
|
|
+ if (session->fw_state == MVX_FW_STATE_STOPPED) {
|
|
+ fw_switch_out(session);
|
|
+ wait_switch_out(session);
|
|
+ if (session->switched_in) {
|
|
+ MVX_SESSION_WARN(session, "warn: switch_in is %d when stream off done.", session->switched_in);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+dequeue_buffers:
|
|
+ if (ret != 0) {
|
|
+ MVX_SESSION_WARN(session, "stream off error. ret=%d. mvx_session=%p", ret, session);
|
|
+ session_unregister(session);
|
|
+ }
|
|
+
|
|
+ /* Return buffers in pending queue. */
|
|
+ list_for_each_entry_safe(buf, tmp, &port->buffer_queue, head) {
|
|
+ list_del(&buf->head);
|
|
+ session->event(session, MVX_SESSION_EVENT_BUFFER, buf);
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void handle_fw_message(struct mvx_session *session,
|
|
+ struct mvx_fw_msg *msg)
|
|
+{
|
|
+ switch (msg->code) {
|
|
+ case MVX_FW_CODE_ALLOC_PARAM: {
|
|
+ struct mvx_session_port *input = &session->port[MVX_DIR_INPUT];
|
|
+ struct mvx_session_port *output = &session->port[MVX_DIR_OUTPUT];
|
|
+ unsigned int old_stride[MVX_BUFFER_NPLANES];
|
|
+
|
|
+ /* Update input port. */
|
|
+ input->width = msg->alloc_param.width;
|
|
+ input->height = msg->alloc_param.height;
|
|
+
|
|
+ try_format(session, MVX_DIR_INPUT, input->format, &input->width,
|
|
+ &input->height, &input->nplanes, input->stride,
|
|
+ input->size, &input->interlaced);
|
|
+
|
|
+ /*
|
|
+ * Update output port. Set number of valid planes to 0 to force
|
|
+ * stride to be recalculated.
|
|
+ */
|
|
+
|
|
+ output->nplanes = 0;
|
|
+ output->afbc_alloc_bytes = msg->alloc_param.afbc_alloc_bytes;
|
|
+ output->afbc_width = msg->alloc_param.afbc_width;
|
|
+ old_stride[0] = output->stride[0];
|
|
+ old_stride[1] = output->stride[1];
|
|
+ old_stride[2] = output->stride[2];
|
|
+
|
|
+ try_format(session, MVX_DIR_OUTPUT, output->format,
|
|
+ &output->width, &output->height, &output->nplanes,
|
|
+ output->stride, output->size,
|
|
+ &output->interlaced);
|
|
+
|
|
+ MVX_SESSION_INFO(session,
|
|
+ "Firmware rsp: Alloc param. width=%u, height=%u, nplanes=%u, size=[%u, %u, %u], stride=[%u, %u, %u], interlaced=%d.",
|
|
+ msg->alloc_param.width,
|
|
+ msg->alloc_param.height,
|
|
+ output->nplanes,
|
|
+ output->size[0],
|
|
+ output->size[1],
|
|
+ output->size[2],
|
|
+ output->stride[0],
|
|
+ output->stride[1],
|
|
+ output->stride[2],
|
|
+ output->interlaced);
|
|
+
|
|
+ //update ddr qos for decoder output size changed.
|
|
+ ddr_qos_request_update(session);
|
|
+
|
|
+ if (output->size[0] > output->buffer_size[0] ||
|
|
+ output->size[1] > output->buffer_size[1] ||
|
|
+ output->size[2] > output->buffer_size[2]) {
|
|
+ output->isreallocting = true;
|
|
+ } else {
|
|
+ // don't update strides for some vp9 cases. gralloc buffer stride is fixed if no realloc.
|
|
+ output->stride[0] = old_stride[0];
|
|
+ output->stride[1] = old_stride[1];
|
|
+ output->stride[2] = old_stride[2];
|
|
+ }
|
|
+ break;
|
|
+ }
|
|
+ case MVX_FW_CODE_BUFFER_GENERAL: {
|
|
+ struct mvx_buffer *buf = msg->buf;
|
|
+ session->port[buf->dir].buffer_count--;
|
|
+ session->event(session, MVX_SESSION_EVENT_BUFFER, buf);
|
|
+ break;
|
|
+ }
|
|
+ case MVX_FW_CODE_BUFFER: {
|
|
+ struct mvx_buffer *buf = msg->buf;
|
|
+ struct mvx_session_port *output =
|
|
+ &session->port[MVX_DIR_OUTPUT];
|
|
+ struct mvx_v4l2_session *vsession =
|
|
+ container_of(session, struct mvx_v4l2_session, session);
|
|
+ MVX_SESSION_INFO(session,
|
|
+ "Firmware rsp: Buffer. dir=%u, len=[%u, %u, %u], flags=0x%08x, eos=%u",
|
|
+ buf->dir,
|
|
+ buf->planes[0].filled,
|
|
+ buf->planes[1].filled,
|
|
+ buf->planes[2].filled,
|
|
+ buf->flags,
|
|
+ (buf->flags & MVX_BUFFER_EOS) != 0);
|
|
+
|
|
+ session->port[buf->dir].buffer_count--;
|
|
+
|
|
+ if (buf->dir == MVX_DIR_OUTPUT && (buf->flags & MVX_BUFFER_EOS) != 0) {
|
|
+ session->eos_queued = false;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * There is no point to flush or invalidate input buffer
|
|
+ * after it was returned from the HW.
|
|
+ */
|
|
+ if (buf->dir == MVX_DIR_OUTPUT && mvx_is_frame(buf->format)) {
|
|
+ if (!(buf->flags & MVX_BUFFER_FRAME_PRESENT)) {
|
|
+ if (output->size[0] > mvx_buffer_size(buf, 0) ||
|
|
+ output->size[1] > mvx_buffer_size(buf, 1) ||
|
|
+ output->size[2] > mvx_buffer_size(buf, 2) ||
|
|
+ session->port[buf->dir].buffer_allocated < session->port[buf->dir].buffer_min) {
|
|
+ buf->flags |= MVX_BUFFER_FRAME_NEED_REALLOC;
|
|
+ //output->isreallocting = true;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (buf->dir == MVX_DIR_OUTPUT && vsession->port[MVX_DIR_OUTPUT].q_set == true)
|
|
+ mvx_buffer_synch(buf, DMA_FROM_DEVICE);
|
|
+
|
|
+ if (buf->dir == MVX_DIR_OUTPUT && !is_encoder(session)) {
|
|
+ session->port[MVX_DIR_OUTPUT].buffer_on_hold_count = session->port[MVX_DIR_OUTPUT].pending_buffer_on_hold_count;
|
|
+ }
|
|
+
|
|
+ session->event(session, MVX_SESSION_EVENT_BUFFER, buf);
|
|
+ break;
|
|
+ }
|
|
+ case MVX_FW_CODE_COLOR_DESC: {
|
|
+ MVX_SESSION_INFO(session,
|
|
+ "Firmware rsp: Color desc.");
|
|
+ session->color_desc = msg->color_desc;
|
|
+ session->event(session, MVX_SESSION_EVENT_COLOR_DESC, NULL);
|
|
+ break;
|
|
+ }
|
|
+ case MVX_FW_CODE_ERROR: {
|
|
+ MVX_SESSION_WARN(session,
|
|
+ "Firmware rsp: Error. code=%u, message=%s.",
|
|
+ msg->error.error_code, msg->error.message);
|
|
+ print_debug(session);
|
|
+ fw_dump(session);
|
|
+
|
|
+ /*
|
|
+ * Release the dev session. It will prevent a dead session from
|
|
+ * blocking the scheduler.
|
|
+ */
|
|
+ watchdog_stop(session);
|
|
+ session_unregister(session);
|
|
+ send_event_error(session, -EINVAL);
|
|
+ break;
|
|
+ }
|
|
+ case MVX_FW_CODE_FLUSH: {
|
|
+ MVX_SESSION_INFO(session, "Firmware rsp: Flushed. dir=%d.",
|
|
+ msg->flush.dir);
|
|
+ session->port[msg->flush.dir].is_flushing = false;
|
|
+ session->port[msg->flush.dir].flushed = true;
|
|
+ (void)queue_pending_buffers(session, msg->flush.dir);
|
|
+ break;
|
|
+ }
|
|
+ case MVX_FW_CODE_IDLE: {
|
|
+ int ret;
|
|
+ struct mvx_fw_msg msg_ack;
|
|
+
|
|
+ MVX_SESSION_INFO(session, "Firmware rsp: Idle.");
|
|
+
|
|
+ session->idle_count++;
|
|
+
|
|
+ if (session->idle_count == 2)
|
|
+ fw_switch_out(session);
|
|
+
|
|
+ msg_ack.code = MVX_FW_CODE_IDLE_ACK;
|
|
+ ret = session->fw.ops.put_message(&session->fw, &msg_ack);
|
|
+ if (ret == 0)
|
|
+ ret = send_irq(session);
|
|
+
|
|
+ if (ret != 0)
|
|
+ send_event_error(session, ret);
|
|
+
|
|
+ break;
|
|
+ }
|
|
+ case MVX_FW_CODE_JOB: {
|
|
+ MVX_SESSION_INFO(session, "Firmware rsp: Job.");
|
|
+ (void)fw_job(session, 1);
|
|
+ break;
|
|
+ }
|
|
+ case MVX_FW_CODE_PONG:
|
|
+ MVX_SESSION_INFO(session, "Firmware rsp: Pong.");
|
|
+ break;
|
|
+ case MVX_FW_CODE_SEQ_PARAM: {
|
|
+ struct mvx_session_port *p = &session->port[MVX_DIR_OUTPUT];
|
|
+
|
|
+ MVX_SESSION_INFO(session,
|
|
+ "Firmware rsp: Seq param. planar={buffers_min=%u}, afbc={buffers_min=%u}, interlaced=%d.",
|
|
+ msg->seq_param.planar.buffers_min,
|
|
+ msg->seq_param.afbc.buffers_min,
|
|
+ p->interlaced);
|
|
+
|
|
+ if (mvx_is_afbc(p->format) != false)
|
|
+ p->buffer_min = msg->seq_param.afbc.buffers_min;
|
|
+ else
|
|
+ p->buffer_min = msg->seq_param.planar.buffers_min;
|
|
+ p->seq_param = msg->seq_param;
|
|
+
|
|
+ (void)fw_flush(session, MVX_DIR_OUTPUT);
|
|
+
|
|
+ //force to realloc if received seq_param message for it will flush output buffer.
|
|
+ p->isreallocting = true;
|
|
+
|
|
+ break;
|
|
+ }
|
|
+ case MVX_FW_CODE_SET_OPTION: {
|
|
+ MVX_SESSION_INFO(session, "Firmware rsp: Set option.");
|
|
+ break;
|
|
+ }
|
|
+ case MVX_FW_CODE_STATE_CHANGE: {
|
|
+ MVX_SESSION_INFO(session,
|
|
+ "Firmware rsp: State changed. old=%s, new=%s.",
|
|
+ state_to_string(session->fw_state),
|
|
+ state_to_string(msg->state));
|
|
+ session->fw_state = msg->state;
|
|
+ break;
|
|
+ }
|
|
+ case MVX_FW_CODE_SWITCH_IN: {
|
|
+ watchdog_start(session, session->watchdog_timeout);
|
|
+ break;
|
|
+ }
|
|
+ case MVX_FW_CODE_SWITCH_OUT: {
|
|
+ MVX_SESSION_INFO(session, "Firmware rsp: Switched out.");
|
|
+
|
|
+ watchdog_stop(session);
|
|
+ session->switched_in = false;
|
|
+
|
|
+ if (session->is_suspend == false && ((session->fw_state == MVX_FW_STATE_RUNNING &&
|
|
+ session->idle_count < 2) ||
|
|
+ session->fw.msg_pending > 0))
|
|
+ switch_in(session);
|
|
+
|
|
+ break;
|
|
+ }
|
|
+ case MVX_FW_CODE_DUMP:
|
|
+ break;
|
|
+ case MVX_FW_CODE_DEBUG:
|
|
+ break;
|
|
+ case MVX_FW_CODE_UNKNOWN: {
|
|
+ print_debug(session);
|
|
+ break;
|
|
+ }
|
|
+ case MVX_FW_CODE_DPB_HELD_FRAMES: {
|
|
+ session->port[MVX_DIR_OUTPUT].pending_buffer_on_hold_count = msg->arg;
|
|
+ break;
|
|
+ }
|
|
+ case MVX_FW_CODE_MAX:
|
|
+ break;
|
|
+ default:
|
|
+ MVX_SESSION_WARN(session, "Unknown fw msg code. code=%u.",
|
|
+ msg->code);
|
|
+ }
|
|
+}
|
|
+
|
|
+void mvx_session_irq(struct mvx_if_session *isession)
|
|
+{
|
|
+ struct mvx_session *session = mvx_if_session_to_session(isession);
|
|
+ int ret;
|
|
+ struct mvx_session_port *output = &session->port[MVX_DIR_OUTPUT];
|
|
+
|
|
+ if (is_fw_loaded(session) == false)
|
|
+ return;
|
|
+
|
|
+ if (IS_ERR_OR_NULL(session->csession)) {
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ ret = session->fw.ops.handle_rpc(&session->fw);
|
|
+ if (ret < 0) {
|
|
+ send_event_error(session, ret);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ do {
|
|
+ struct mvx_fw_msg msg;
|
|
+
|
|
+ watchdog_update(session, session->watchdog_timeout);
|
|
+
|
|
+ ret = session->fw.ops.get_message(&session->fw, &msg);
|
|
+ if (ret < 0) {
|
|
+ session_unregister(session);
|
|
+ send_event_error(session, ret);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ if (ret > 0)
|
|
+ handle_fw_message(session, &msg);
|
|
+ } while (ret > 0 && session->error == 0);
|
|
+
|
|
+ if (output->isallocparam == false && output->isreallocting == true) {
|
|
+ output->isallocparam = true;
|
|
+ session->event(session, MVX_SESSION_EVENT_PORT_CHANGED, (void *)MVX_DIR_OUTPUT);
|
|
+ }
|
|
+
|
|
+#ifdef MVX_FW_DEBUG_ENABLE
|
|
+ ret = session->fw.ops.handle_fw_ram_print(&session->fw);
|
|
+ if (ret < 0) {
|
|
+ send_event_error(session, ret);
|
|
+ return;
|
|
+ }
|
|
+#endif
|
|
+
|
|
+ wake_up(&session->waitq);
|
|
+}
|
|
+
|
|
+void mvx_session_port_show(struct mvx_session_port *port,
|
|
+ struct seq_file *s)
|
|
+{
|
|
+ mvx_seq_printf(s, "mvx_session_port", 0, "%p\n", port);
|
|
+ mvx_seq_printf(s, "format", 1, "%08x\n", port->format);
|
|
+ mvx_seq_printf(s, "width", 1, "%u\n", port->width);
|
|
+ mvx_seq_printf(s, "height", 1, "%u\n", port->height);
|
|
+ mvx_seq_printf(s, "buffer_min", 1, "%u\n", port->buffer_min);
|
|
+ mvx_seq_printf(s, "buffer_count", 1, "%u\n", port->buffer_count);
|
|
+}
|
|
+
|
|
+int mvx_session_set_securevideo(struct mvx_session *session,
|
|
+ bool securevideo)
|
|
+{
|
|
+ if (session->error != 0)
|
|
+ return session->error;
|
|
+
|
|
+ if (is_fw_loaded(session) != false)
|
|
+ return -EBUSY;
|
|
+
|
|
+ session->isession.securevideo = securevideo;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int mvx_session_set_frame_rate(struct mvx_session *session,
|
|
+ int64_t frame_rate)
|
|
+{
|
|
+ int ret;
|
|
+ if (session->error != 0)
|
|
+ return session->error;
|
|
+
|
|
+ /*Frame rate values are limited to between 1 and 256 frames per second*/
|
|
+ if (frame_rate < (1 << 16)) frame_rate = (1 << 16); /*1 fps*/
|
|
+ else if (frame_rate > (256 << 16)) frame_rate = (256 << 16); /*256 fps*/
|
|
+
|
|
+ if (is_fw_loaded(session) != false) {
|
|
+ struct mvx_fw_set_option option;
|
|
+
|
|
+ option.code = MVX_FW_SET_FRAME_RATE;
|
|
+ option.frame_rate = frame_rate;
|
|
+ ret = fw_set_option(session, &option);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ //update ddr qos for framerate changed.
|
|
+ if (frame_rate != session->frame_rate) {
|
|
+ session->frame_rate = frame_rate;
|
|
+ ddr_qos_request_update(session);
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int mvx_session_set_rate_control(struct mvx_session *session,
|
|
+ bool enabled)
|
|
+{
|
|
+ int ret;
|
|
+ if (session->error != 0)
|
|
+ return session->error;
|
|
+
|
|
+ session->rc_enabled = enabled;
|
|
+ //set default rc type.
|
|
+ session->rc_type = enabled?((session->rc_type)?session->rc_type:MVX_OPT_RATE_CONTROL_MODE_STANDARD):0;
|
|
+
|
|
+ if (is_fw_loaded(session) != false) {
|
|
+ struct mvx_fw_set_option option;
|
|
+
|
|
+ option.code = MVX_FW_SET_TARGET_BITRATE;
|
|
+ option.target_bitrate =
|
|
+ (session->rc_enabled != false) ?
|
|
+ session->target_bitrate : 0;
|
|
+ ret = fw_set_option(session, &option);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int mvx_session_set_bitrate(struct mvx_session *session,
|
|
+ int bitrate)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ if (session->error != 0)
|
|
+ return session->error;
|
|
+
|
|
+ session->target_bitrate = bitrate;
|
|
+
|
|
+ if (is_fw_loaded(session) != false && session->rc_enabled != false) {
|
|
+ struct mvx_fw_set_option option;
|
|
+
|
|
+ option.code = MVX_FW_SET_RATE_CONTROL;
|
|
+ option.rate_control.target_bitrate = session->target_bitrate;
|
|
+ option.rate_control.rate_control_mode = session->rc_type;
|
|
+
|
|
+ ret = fw_set_option(session, &option);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int mvx_session_set_bitrate_control(struct mvx_session *session,
|
|
+ struct mvx_buffer_param_rate_control *rc){
|
|
+ int ret;
|
|
+
|
|
+ if (session->error != 0)
|
|
+ return session->error;
|
|
+
|
|
+ session->rc_type = rc->rate_control_mode;
|
|
+ session->target_bitrate = rc->target_bitrate;
|
|
+ session->maximum_bitrate = rc->maximum_bitrate;
|
|
+ if (is_fw_loaded(session) != false) {
|
|
+ struct mvx_fw_set_option option;
|
|
+
|
|
+ option.code = MVX_FW_SET_RATE_CONTROL;
|
|
+ option.rate_control.target_bitrate = rc->target_bitrate;
|
|
+ option.rate_control.rate_control_mode = rc->rate_control_mode;
|
|
+ if (rc->rate_control_mode == MVX_OPT_RATE_CONTROL_MODE_C_VARIABLE) {
|
|
+ option.rate_control.maximum_bitrate = rc->maximum_bitrate;
|
|
+ }
|
|
+ ret = fw_set_option(session, &option);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int mvx_session_set_crop_left(struct mvx_session * session, int32_t left){
|
|
+ int ret;
|
|
+
|
|
+ if (session->error != 0)
|
|
+ return session->error;
|
|
+
|
|
+ session->crop_left = left;
|
|
+
|
|
+ if (is_fw_loaded(session) != false) {
|
|
+ struct mvx_fw_set_option option;
|
|
+
|
|
+ option.code = MVX_FW_SET_CROP_LEFT;
|
|
+ option.crop_left = left;
|
|
+ ret = fw_set_option(session, &option);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+ }
|
|
+ return 0;
|
|
+
|
|
+}
|
|
+
|
|
+int mvx_session_set_crop_right(struct mvx_session * session, int32_t right){
|
|
+ int ret;
|
|
+
|
|
+ if (session->error != 0)
|
|
+ return session->error;
|
|
+
|
|
+ session->crop_right = right;
|
|
+
|
|
+ if (is_fw_loaded(session) != false) {
|
|
+ struct mvx_fw_set_option option;
|
|
+
|
|
+ option.code = MVX_FW_SET_CROP_RIGHT;
|
|
+ option.crop_right = right;
|
|
+ ret = fw_set_option(session, &option);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+ }
|
|
+ return 0;
|
|
+
|
|
+}
|
|
+
|
|
+int mvx_session_set_crop_top(struct mvx_session * session, int32_t top){
|
|
+ int ret;
|
|
+
|
|
+ if (session->error != 0)
|
|
+ return session->error;
|
|
+
|
|
+ session->crop_top = top;
|
|
+
|
|
+ if (is_fw_loaded(session) != false) {
|
|
+ struct mvx_fw_set_option option;
|
|
+
|
|
+ option.code = MVX_FW_SET_CROP_TOP;
|
|
+ option.crop_top = top;
|
|
+ ret = fw_set_option(session, &option);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+ }
|
|
+ return 0;
|
|
+
|
|
+}
|
|
+
|
|
+int mvx_session_set_crop_bottom(struct mvx_session * session, int32_t bottom){
|
|
+ int ret;
|
|
+
|
|
+ if (session->error != 0)
|
|
+ return session->error;
|
|
+
|
|
+ session->crop_bottom = bottom;
|
|
+
|
|
+ if (is_fw_loaded(session) != false) {
|
|
+ struct mvx_fw_set_option option;
|
|
+
|
|
+ option.code = MVX_FW_SET_CROP_BOTTOM;
|
|
+ option.crop_bottom = bottom;
|
|
+ ret = fw_set_option(session, &option);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+ }
|
|
+ return 0;
|
|
+
|
|
+}
|
|
+
|
|
+int mvx_session_set_nalu_format(struct mvx_session *session,
|
|
+ enum mvx_nalu_format fmt)
|
|
+{
|
|
+ if (session->error != 0)
|
|
+ return session->error;
|
|
+
|
|
+ if (is_fw_loaded(session) != false)
|
|
+ return -EBUSY;
|
|
+
|
|
+ session->nalu_format = fmt;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int mvx_session_set_stream_escaping(struct mvx_session *session,
|
|
+ enum mvx_tristate status)
|
|
+{
|
|
+ if (session->error != 0)
|
|
+ return session->error;
|
|
+
|
|
+ if (is_fw_loaded(session) != false)
|
|
+ return -EBUSY;
|
|
+
|
|
+ session->stream_escaping = status;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int mvx_session_set_profile(struct mvx_session *session,
|
|
+ enum mvx_format format,
|
|
+ enum mvx_profile profile)
|
|
+{
|
|
+ if (session->error != 0)
|
|
+ return session->error;
|
|
+
|
|
+ if (is_fw_loaded(session) != false)
|
|
+ return -EBUSY;
|
|
+
|
|
+ session->profile[format] = profile;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int mvx_session_set_level(struct mvx_session *session,
|
|
+ enum mvx_format format,
|
|
+ enum mvx_level level)
|
|
+{
|
|
+ if (session->error != 0)
|
|
+ return session->error;
|
|
+
|
|
+ if (is_fw_loaded(session) != false)
|
|
+ return -EBUSY;
|
|
+
|
|
+ session->level[format] = level;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int mvx_session_set_ignore_stream_headers(struct mvx_session *session,
|
|
+ enum mvx_tristate status)
|
|
+{
|
|
+ if (session->error != 0)
|
|
+ return session->error;
|
|
+
|
|
+ if (is_fw_loaded(session) != false)
|
|
+ return -EBUSY;
|
|
+
|
|
+ session->ignore_stream_headers = status;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int mvx_session_set_frame_reordering(struct mvx_session *session,
|
|
+ enum mvx_tristate status)
|
|
+{
|
|
+ if (session->error != 0)
|
|
+ return session->error;
|
|
+
|
|
+ if (is_fw_loaded(session) != false)
|
|
+ return -EBUSY;
|
|
+
|
|
+ session->frame_reordering = status;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int mvx_session_set_intbuf_size(struct mvx_session *session,
|
|
+ int size)
|
|
+{
|
|
+ if (session->error != 0)
|
|
+ return session->error;
|
|
+
|
|
+ if (is_fw_loaded(session) != false)
|
|
+ return -EBUSY;
|
|
+
|
|
+ session->intbuf_size = size;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int mvx_session_set_p_frames(struct mvx_session *session,
|
|
+ int val)
|
|
+{
|
|
+ if (session->error != 0)
|
|
+ return session->error;
|
|
+
|
|
+ if (is_fw_loaded(session) != false)
|
|
+ return -EBUSY;
|
|
+
|
|
+ session->p_frames = val;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int mvx_session_set_b_frames(struct mvx_session *session,
|
|
+ int val)
|
|
+{
|
|
+ if (session->error != 0)
|
|
+ return session->error;
|
|
+
|
|
+ if (is_fw_loaded(session) != false)
|
|
+ return -EBUSY;
|
|
+
|
|
+ session->b_frames = val;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int mvx_session_set_gop_type(struct mvx_session *session,
|
|
+ enum mvx_gop_type gop_type)
|
|
+{
|
|
+ if (session->error != 0)
|
|
+ return session->error;
|
|
+
|
|
+ if (is_fw_loaded(session) != false)
|
|
+ return -EBUSY;
|
|
+
|
|
+ session->gop_type = gop_type;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int mvx_session_set_cyclic_intra_refresh_mb(struct mvx_session *session,
|
|
+ int val)
|
|
+{
|
|
+ if (session->error != 0)
|
|
+ return session->error;
|
|
+
|
|
+ if (is_fw_loaded(session) != false)
|
|
+ return -EBUSY;
|
|
+
|
|
+ session->cyclic_intra_refresh_mb = val;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int mvx_session_set_constr_ipred(struct mvx_session *session,
|
|
+ enum mvx_tristate status)
|
|
+{
|
|
+ if (session->error != 0)
|
|
+ return session->error;
|
|
+
|
|
+ if (is_fw_loaded(session) != false)
|
|
+ return -EBUSY;
|
|
+
|
|
+ session->constr_ipred = status;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int mvx_session_set_entropy_sync(struct mvx_session *session,
|
|
+ enum mvx_tristate status)
|
|
+{
|
|
+ if (session->error != 0)
|
|
+ return session->error;
|
|
+
|
|
+ if (is_fw_loaded(session) != false)
|
|
+ return -EBUSY;
|
|
+
|
|
+ session->entropy_sync = status;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int mvx_session_set_temporal_mvp(struct mvx_session *session,
|
|
+ enum mvx_tristate status)
|
|
+{
|
|
+ if (session->error != 0)
|
|
+ return session->error;
|
|
+
|
|
+ if (is_fw_loaded(session) != false)
|
|
+ return -EBUSY;
|
|
+
|
|
+ session->temporal_mvp = status;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int mvx_session_set_tile_rows(struct mvx_session *session,
|
|
+ int val)
|
|
+{
|
|
+ if (session->error != 0)
|
|
+ return session->error;
|
|
+
|
|
+ if (is_fw_loaded(session) != false)
|
|
+ return -EBUSY;
|
|
+
|
|
+ session->tile_rows = val;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int mvx_session_set_tile_cols(struct mvx_session *session,
|
|
+ int val)
|
|
+{
|
|
+ if (session->error != 0)
|
|
+ return session->error;
|
|
+
|
|
+ if (is_fw_loaded(session) != false)
|
|
+ return -EBUSY;
|
|
+
|
|
+ session->tile_cols = val;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int mvx_session_set_min_luma_cb_size(struct mvx_session *session,
|
|
+ int val)
|
|
+{
|
|
+ if (session->error != 0)
|
|
+ return session->error;
|
|
+
|
|
+ if (is_fw_loaded(session) != false)
|
|
+ return -EBUSY;
|
|
+
|
|
+ session->min_luma_cb_size = val;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int mvx_session_set_mb_mask(struct mvx_session *session,
|
|
+ int val)
|
|
+{
|
|
+ /*
|
|
+ * This controls is not implemented.
|
|
+ */
|
|
+ if (session->error != 0)
|
|
+ return session->error;
|
|
+
|
|
+ if (is_fw_loaded(session) != false)
|
|
+ return -EBUSY;
|
|
+
|
|
+ session->mb_mask = val;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int mvx_session_set_entropy_mode(struct mvx_session *session,
|
|
+ enum mvx_entropy_mode mode)
|
|
+{
|
|
+ if (session->error != 0)
|
|
+ return session->error;
|
|
+
|
|
+ if (is_fw_loaded(session) != false)
|
|
+ return -EBUSY;
|
|
+
|
|
+ session->entropy_mode = mode;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int mvx_session_set_multi_slice_mode(struct mvx_session *session,
|
|
+ enum mvx_multi_slice_mode mode)
|
|
+{
|
|
+ if (session->error != 0)
|
|
+ return session->error;
|
|
+
|
|
+ if (is_fw_loaded(session) != false)
|
|
+ return -EBUSY;
|
|
+
|
|
+ session->multi_slice_mode = mode;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int mvx_session_set_multi_slice_max_mb(struct mvx_session *session,
|
|
+ int val)
|
|
+{
|
|
+ if (session->error != 0)
|
|
+ return session->error;
|
|
+
|
|
+ if (is_fw_loaded(session) != false)
|
|
+ return -EBUSY;
|
|
+
|
|
+ session->multi_slice_max_mb = val;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int mvx_session_set_vp9_prob_update(struct mvx_session *session,
|
|
+ enum mvx_vp9_prob_update mode)
|
|
+{
|
|
+ if (session->error != 0)
|
|
+ return session->error;
|
|
+
|
|
+ if (is_fw_loaded(session) != false)
|
|
+ return -EBUSY;
|
|
+
|
|
+ session->vp9_prob_update = mode;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int mvx_session_set_mv_h_search_range(struct mvx_session *session,
|
|
+ int val)
|
|
+{
|
|
+ if (session->error != 0)
|
|
+ return session->error;
|
|
+
|
|
+ if (is_fw_loaded(session) != false)
|
|
+ return -EBUSY;
|
|
+
|
|
+ session->mv_h_search_range = val;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int mvx_session_set_mv_v_search_range(struct mvx_session *session,
|
|
+ int val)
|
|
+{
|
|
+ if (session->error != 0)
|
|
+ return session->error;
|
|
+
|
|
+ if (is_fw_loaded(session) != false)
|
|
+ return -EBUSY;
|
|
+
|
|
+ session->mv_v_search_range = val;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int mvx_session_set_bitdepth_chroma(struct mvx_session *session,
|
|
+ int val)
|
|
+{
|
|
+ if (session->error != 0)
|
|
+ return session->error;
|
|
+
|
|
+ if (is_fw_loaded(session) != false)
|
|
+ return -EBUSY;
|
|
+
|
|
+ session->bitdepth_chroma = val;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int mvx_session_set_bitdepth_luma(struct mvx_session *session,
|
|
+ int val)
|
|
+{
|
|
+ if (session->error != 0)
|
|
+ return session->error;
|
|
+
|
|
+ if (is_fw_loaded(session) != false)
|
|
+ return -EBUSY;
|
|
+
|
|
+ session->bitdepth_luma = val;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int mvx_session_set_force_chroma_format(struct mvx_session *session,
|
|
+ int val)
|
|
+{
|
|
+ if (session->error != 0)
|
|
+ return session->error;
|
|
+
|
|
+ if (is_fw_loaded(session) != false)
|
|
+ return -EBUSY;
|
|
+
|
|
+ session->force_chroma_format = val;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int mvx_session_set_rgb_to_yuv_mode(struct mvx_session *session,
|
|
+ enum mvx_rgb_to_yuv_mode mode)
|
|
+{
|
|
+ if (session->error != 0)
|
|
+ return session->error;
|
|
+
|
|
+ if (is_fw_loaded(session) != false)
|
|
+ return -EBUSY;
|
|
+
|
|
+ session->rgb_to_yuv = mode;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int mvx_session_set_band_limit(struct mvx_session *session,
|
|
+ int val)
|
|
+{
|
|
+ if (session->error != 0)
|
|
+ return session->error;
|
|
+
|
|
+ if (is_fw_loaded(session) != false)
|
|
+ return -EBUSY;
|
|
+
|
|
+ session->band_limit = val;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int mvx_session_set_cabac_init_idc(struct mvx_session *session,
|
|
+ int val)
|
|
+{
|
|
+ if (session->error != 0)
|
|
+ return session->error;
|
|
+
|
|
+ if (is_fw_loaded(session) != false)
|
|
+ return -EBUSY;
|
|
+
|
|
+ session->cabac_init_idc = val;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int mvx_session_set_i_frame_qp(struct mvx_session *session,
|
|
+ enum mvx_format fmt,
|
|
+ int qp)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ if (session->error != 0)
|
|
+ return session->error;
|
|
+
|
|
+ if (is_fw_loaded(session) != false) {
|
|
+ enum mvx_direction dir = get_bitstream_port(session);
|
|
+
|
|
+ fmt = session->port[dir].format;
|
|
+ ret = fw_set_qp(session, MVX_FW_SET_QP_I, qp);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ session->qp[fmt].i_frame = qp;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int mvx_session_set_p_frame_qp(struct mvx_session *session,
|
|
+ enum mvx_format fmt,
|
|
+ int qp)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ if (session->error != 0)
|
|
+ return session->error;
|
|
+
|
|
+ if (is_fw_loaded(session) != false) {
|
|
+ enum mvx_direction dir = get_bitstream_port(session);
|
|
+
|
|
+ fmt = session->port[dir].format;
|
|
+ ret = fw_set_qp(session, MVX_FW_SET_QP_P, qp);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ session->qp[fmt].p_frame = qp;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int mvx_session_set_b_frame_qp(struct mvx_session *session,
|
|
+ enum mvx_format fmt,
|
|
+ int qp)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ if (session->error != 0)
|
|
+ return session->error;
|
|
+
|
|
+ if (is_fw_loaded(session) != false) {
|
|
+ enum mvx_direction dir = get_bitstream_port(session);
|
|
+
|
|
+ fmt = session->port[dir].format;
|
|
+ ret = fw_set_qp(session, MVX_FW_SET_QP_B, qp);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ session->qp[fmt].b_frame = qp;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int mvx_session_set_min_qp(struct mvx_session *session,
|
|
+ enum mvx_format fmt,
|
|
+ int qp)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ if (session->error != 0)
|
|
+ return session->error;
|
|
+
|
|
+ if (is_fw_loaded(session) != false) {
|
|
+ struct mvx_fw_set_option option;
|
|
+ enum mvx_direction dir = get_bitstream_port(session);
|
|
+ int codec = session->port[dir].format;
|
|
+
|
|
+ option.code = MVX_FW_SET_QP_RANGE;
|
|
+ option.qp_range.min = qp;
|
|
+ option.qp_range.max = session->qp[codec].max;
|
|
+ ret = fw_set_option(session, &option);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ session->qp[fmt].min = qp;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int mvx_session_set_max_qp(struct mvx_session *session,
|
|
+ enum mvx_format fmt,
|
|
+ int qp)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ if (session->error != 0)
|
|
+ return session->error;
|
|
+
|
|
+ if (is_fw_loaded(session) != false) {
|
|
+ struct mvx_fw_set_option option;
|
|
+ enum mvx_direction dir = get_bitstream_port(session);
|
|
+ int codec = session->port[dir].format;
|
|
+
|
|
+ option.code = MVX_FW_SET_QP_RANGE;
|
|
+ option.qp_range.min = session->qp[codec].min;
|
|
+ option.qp_range.max = qp;
|
|
+ ret = fw_set_option(session, &option);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ session->qp[fmt].max = qp;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int mvx_session_set_resync_interval(struct mvx_session *session,
|
|
+ int val)
|
|
+{
|
|
+ if (session->error != 0)
|
|
+ return session->error;
|
|
+
|
|
+ if (is_fw_loaded(session) != false)
|
|
+ return -EBUSY;
|
|
+
|
|
+ session->resync_interval = val;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int mvx_session_set_jpeg_quality(struct mvx_session *session,
|
|
+ int val)
|
|
+{
|
|
+ if (session->error != 0)
|
|
+ return session->error;
|
|
+
|
|
+ if (is_fw_loaded(session) != false)
|
|
+ return -EBUSY;
|
|
+
|
|
+ session->jpeg_quality = val;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int mvx_session_get_color_desc(struct mvx_session *session,
|
|
+ struct mvx_fw_color_desc *color_desc)
|
|
+{
|
|
+ *color_desc = session->color_desc;
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int mvx_session_set_color_desc(struct mvx_session *session,
|
|
+ struct mvx_fw_color_desc *color_desc)
|
|
+{
|
|
+ int ret = 0;
|
|
+ if (session->error != 0)
|
|
+ return session->error;
|
|
+
|
|
+ session->color_desc = *color_desc;
|
|
+ if (is_fw_loaded(session) != false) {
|
|
+ struct mvx_fw_set_option option;
|
|
+
|
|
+ option.code = MVX_FW_SET_COLOUR_DESC;
|
|
+ option.colour_desc = *color_desc;
|
|
+ ret = fw_set_option(session, &option);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int mvx_session_set_roi_regions(struct mvx_session *session,
|
|
+ struct mvx_roi_config *roi)
|
|
+{
|
|
+ int ret = 0;
|
|
+ int roi_config_num = 0;
|
|
+ if (is_fw_loaded(session) == false ||
|
|
+ session->port[MVX_DIR_INPUT].is_flushing != false) {
|
|
+ roi_config_num = session->port[MVX_DIR_INPUT].roi_config_num;
|
|
+ if (roi_config_num < MVX_ROI_QP_NUMS) {
|
|
+ MVX_SESSION_INFO(session, "fw is not ready!!!, pending roi num:%d",roi_config_num);
|
|
+ session->port[MVX_DIR_INPUT].roi_config_queue[roi_config_num] = *roi;
|
|
+ session->port[MVX_DIR_INPUT].roi_config_num++;
|
|
+ } else {
|
|
+ MVX_SESSION_ERR(session, "fw is not ready for long time, too many roi pending:%d",roi_config_num);
|
|
+ }
|
|
+ return 0;
|
|
+ }
|
|
+ ret = queue_roi_regions(session, roi);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int mvx_session_set_qp_epr(struct mvx_session *session,
|
|
+ int *qp)
|
|
+{
|
|
+ int ret = 0;
|
|
+ int qp_num = 0;
|
|
+ if (is_fw_loaded(session) == false ||
|
|
+ session->port[MVX_DIR_INPUT].is_flushing != false) {
|
|
+ qp_num = session->port[MVX_DIR_INPUT].qp_num;
|
|
+ if (qp_num < MVX_ROI_QP_NUMS) {
|
|
+ MVX_SESSION_WARN(session, "fw is not ready!!!, pending qp num:%d",qp_num);
|
|
+ session->port[MVX_DIR_INPUT].qp_queue[qp_num] = *qp;
|
|
+ session->port[MVX_DIR_INPUT].qp_num++;
|
|
+ } else {
|
|
+ MVX_SESSION_ERR(session, "fw is not ready for long time, too many qp pending:%d",qp_num);
|
|
+ }
|
|
+ return 0;
|
|
+ }
|
|
+ ret = queue_qp_epr(session, qp);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int mvx_session_set_sei_userdata(struct mvx_session *session,
|
|
+ struct mvx_sei_userdata *userdata)
|
|
+{
|
|
+ int ret = 0;
|
|
+ if (session->error != 0)
|
|
+ return session->error;
|
|
+
|
|
+ session->sei_userdata = *userdata;
|
|
+ if (is_fw_loaded(session) != false) {
|
|
+ struct mvx_fw_set_option option;
|
|
+
|
|
+ option.code = MVX_FW_SET_SEI_USERDATA;
|
|
+ option.userdata = *userdata;
|
|
+ ret = fw_set_option(session, &option);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+ }
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int mvx_session_set_hrd_buffer_size(struct mvx_session *session,
|
|
+ int size)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ if (session->error != 0)
|
|
+ return session->error;
|
|
+
|
|
+ session->nHRDBufsize = size;
|
|
+
|
|
+ if (is_fw_loaded(session) != false) {
|
|
+ struct mvx_fw_set_option option;
|
|
+
|
|
+ option.code = MVX_FW_SET_HRD_BUF_SIZE;
|
|
+ option.nHRDBufsize = size;
|
|
+ ret = fw_set_option(session, &option);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int mvx_session_set_dsl_frame(struct mvx_session *session,
|
|
+ struct mvx_dsl_frame *dsl)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ if (session->error != 0)
|
|
+ return session->error;
|
|
+
|
|
+ session->dsl_frame.width = dsl->width;
|
|
+ session->dsl_frame.height = dsl->height;
|
|
+ if (is_fw_loaded(session) != false) {
|
|
+ struct mvx_fw_set_option option;
|
|
+
|
|
+ option.code = MVX_FW_SET_DSL_FRAME;
|
|
+ option.dsl_frame.width = dsl->width;
|
|
+ option.dsl_frame.height = dsl->height;
|
|
+ ret = fw_set_option(session, &option);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int mvx_session_set_dsl_ratio(struct mvx_session *session,
|
|
+ struct mvx_dsl_ratio *dsl)
|
|
+{
|
|
+ if (session->error != 0)
|
|
+ return session->error;
|
|
+
|
|
+ session->dsl_ratio.hor = dsl->hor;
|
|
+ session->dsl_ratio.ver = dsl->ver;
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int mvx_session_set_long_term_ref(struct mvx_session *session,
|
|
+ struct mvx_long_term_ref *ltr)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ if (session->error != 0)
|
|
+ return session->error;
|
|
+
|
|
+ session->mvx_ltr.mode = ltr->mode;
|
|
+ session->mvx_ltr.period = ltr->period;
|
|
+ if (is_fw_loaded(session) != false) {
|
|
+ struct mvx_fw_set_option option;
|
|
+
|
|
+ option.code = MVX_FW_SET_LONG_TERM_REF;
|
|
+ option.ltr.mode = ltr->mode;
|
|
+ option.ltr.period = ltr->period;
|
|
+ ret = fw_set_option(session, &option);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int mvx_session_set_dsl_mode(struct mvx_session *session,
|
|
+ int *mode)
|
|
+{
|
|
+ if (session->error != 0)
|
|
+ return session->error;
|
|
+
|
|
+ if (is_fw_loaded(session) != false)
|
|
+ return -EBUSY;
|
|
+
|
|
+ session->dsl_pos_mode = *mode;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+int mvx_session_set_force_idr(struct mvx_session *session)
|
|
+{
|
|
+ int ret;
|
|
+ if (session->error != 0)
|
|
+ return session->error;
|
|
+ if (is_fw_loaded(session) != false) {
|
|
+ struct mvx_fw_set_option option;
|
|
+ /*reset GOP type to force idr frame.*/
|
|
+ option.code = MVX_FW_SET_GOP_RESET;
|
|
+ ret = fw_set_option(session, &option);
|
|
+ if (ret != 0) {
|
|
+ MVX_SESSION_WARN(session,
|
|
+ "Failed to GOP reset.");
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int mvx_session_set_watchdog_timeout(struct mvx_session *session, int timeout)
|
|
+{
|
|
+ if (session->error != 0)
|
|
+ return session->error;
|
|
+
|
|
+ if (is_fw_loaded(session) != false)
|
|
+ return -EBUSY;
|
|
+
|
|
+ session->watchdog_timeout = timeout*1000;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int mvx_session_set_profiling(struct mvx_session *session, int enable)
|
|
+{
|
|
+ if (session->error != 0)
|
|
+ return session->error;
|
|
+
|
|
+ if (is_fw_loaded(session) != false)
|
|
+ return -EBUSY;
|
|
+
|
|
+ session->enable_profiling = enable;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
diff --git a/drivers/media/platform/spacemit/vpu_k1x/if/mvx_session.h b/drivers/media/platform/spacemit/vpu_k1x/if/mvx_session.h
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/vpu_k1x/if/mvx_session.h
|
|
@@ -0,0 +1,1144 @@
|
|
+/*
|
|
+ * The confidential and proprietary information contained in this file may
|
|
+ * only be used by a person authorised under and to the extent permitted
|
|
+ * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd.
|
|
+ * ALL RIGHTS RESERVED
|
|
+ *
|
|
+ * This entire notice must be reproduced on all copies of this file
|
|
+ * and copies of this file may only be made by a person if such person is
|
|
+ * permitted to do so under the terms of a subsisting license agreement
|
|
+ * from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * SPDX-License-Identifier: GPL-2.0-only
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU General Public License
|
|
+ * as published by the Free Software Foundation; either version 2
|
|
+ * of the License, or (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
+ *
|
|
+ */
|
|
+
|
|
+#ifndef _MVX_SESSION_H_
|
|
+#define _MVX_SESSION_H_
|
|
+
|
|
+/****************************************************************************
|
|
+ * Includes
|
|
+ ****************************************************************************/
|
|
+
|
|
+#include <linux/kref.h>
|
|
+#include <linux/list.h>
|
|
+#include <linux/mutex.h>
|
|
+#include <linux/types.h>
|
|
+#include <linux/wait.h>
|
|
+#include "mvx_buffer.h"
|
|
+#include "mvx_firmware.h"
|
|
+#include "mvx_firmware_cache.h"
|
|
+#include "mvx_if.h"
|
|
+#include "mvx_log_group.h"
|
|
+
|
|
+/****************************************************************************
|
|
+ * Defines
|
|
+ ****************************************************************************/
|
|
+
|
|
+#define MVX_SESSION_LOG(severity, session, msg, ...) \
|
|
+ MVX_LOG_PRINT_SESSION(&mvx_log_session_if, severity, session, \
|
|
+ msg, ## __VA_ARGS__)
|
|
+
|
|
+#define MVX_SESSION_VERBOSE(session, msg, ...) \
|
|
+ MVX_SESSION_LOG(MVX_LOG_VERBOSE, session, msg, ## __VA_ARGS__)
|
|
+
|
|
+#define MVX_SESSION_DEBUG(session, msg, ...) \
|
|
+ MVX_SESSION_LOG(MVX_LOG_DEBUG, session, msg, ## __VA_ARGS__)
|
|
+
|
|
+#define MVX_SESSION_INFO(session, msg, ...) \
|
|
+ MVX_SESSION_LOG(MVX_LOG_INFO, session, msg, ## __VA_ARGS__)
|
|
+
|
|
+#define MVX_SESSION_WARN(session, msg, ...) \
|
|
+ MVX_SESSION_LOG(MVX_LOG_WARNING, session, msg, ## __VA_ARGS__)
|
|
+
|
|
+#define MVX_SESSION_ERR(session, msg, ...) \
|
|
+ MVX_SESSION_LOG(MVX_LOG_ERROR, session, msg, ## __VA_ARGS__)
|
|
+
|
|
+/****************************************************************************
|
|
+ * Types
|
|
+ ****************************************************************************/
|
|
+
|
|
+struct device;
|
|
+struct file;
|
|
+struct mvx_csched;
|
|
+struct mvx_fw_cache;
|
|
+struct poll_table_struct;
|
|
+
|
|
+/**
|
|
+ * enum mvx_session_event - Session events.
|
|
+ * @MVX_SESSION_EVENT_BUFFER: struct mvx_buffer.
|
|
+ * @MVX_SESSION_EVENT_PORT_CHANGED: enum mvx_direction.
|
|
+ * @MVX_SESSION_EVENT_COLOR_DESC: struct mvx_fw_color_desc.
|
|
+ * @MVX_SESSION_EVENT_ERROR: void
|
|
+ */
|
|
+enum mvx_session_event {
|
|
+ MVX_SESSION_EVENT_BUFFER,
|
|
+ MVX_SESSION_EVENT_PORT_CHANGED,
|
|
+ MVX_SESSION_EVENT_COLOR_DESC,
|
|
+ MVX_SESSION_EVENT_ERROR
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct mvx_session_port - Session input and output port settings.
|
|
+ * @format: Port format.
|
|
+ * @width: Width in pixels.
|
|
+ * @height: Height in pixels.
|
|
+ * @nplanes: Number for planes for current format.
|
|
+ * @stride: Stride per line in bytes for each plane.
|
|
+ * @size: Size in bytes for each plane.
|
|
+ * @afbc_alloc_bytes: Minimum number of bytes required for AFBC.
|
|
+ * @afbc_width: AFBC width in superblocks.
|
|
+ * @stream_on: Boolean if the port has been enabled.
|
|
+ * @buffer_min: Minimum number of buffers required.
|
|
+ * @buffer_count: Number of buffers currently queued to firmware.
|
|
+ * @buffer_queue: Buffers waiting to be queued to the firmware.
|
|
+ * @is_flushing: Set true when port is waiting for a fw flush confirm.
|
|
+ * @flushed: Port has been flushed an no buffers have been queued.
|
|
+ * @interlaced: True if frames are interlaced.
|
|
+ */
|
|
+struct mvx_session_port {
|
|
+ enum mvx_format format;
|
|
+ unsigned int width;
|
|
+ unsigned int height;
|
|
+ uint8_t nplanes;
|
|
+ unsigned int stride[MVX_BUFFER_NPLANES];
|
|
+ unsigned int size[MVX_BUFFER_NPLANES];
|
|
+ unsigned int afbc_alloc_bytes;
|
|
+ unsigned int afbc_width;
|
|
+ bool stream_on;
|
|
+ unsigned int buffer_min;
|
|
+ unsigned int buffer_count;
|
|
+ unsigned int buffer_allocated;
|
|
+ struct list_head buffer_queue;
|
|
+ bool is_flushing;
|
|
+ bool flushed;
|
|
+ bool interlaced;
|
|
+ unsigned int scaling_shift;
|
|
+ struct mvx_roi_config roi_config_queue[MVX_ROI_QP_NUMS];
|
|
+ int qp_queue[MVX_ROI_QP_NUMS];
|
|
+ unsigned int roi_config_num;
|
|
+ unsigned int qp_num;
|
|
+ bool isreallocting;
|
|
+ bool isallocparam;
|
|
+ struct mvx_fw_seq_param seq_param;
|
|
+ uint32_t buffer_on_hold_count; /**< Number of buffers that the firmware has completed process of,
|
|
+ * but is holding on to for frame reordering. */
|
|
+ uint32_t pending_buffer_on_hold_count; /**< Some FW versions signal the DPB in the form of a message
|
|
+ * that will be valid once the next frame has been returned
|
|
+ * by the FW. In these cases, the new DPB is stored in this
|
|
+ * variable and copied to buffer_on_hold_count once the next
|
|
+ * buffer arrives. */
|
|
+ unsigned int buffer_size[MVX_BUFFER_NPLANES];
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct mvx_session_qp - QP settings.
|
|
+ * @i_frame: QP for I frame.
|
|
+ * @p_frame: QP for P frame.
|
|
+ * @b_frame: QP for B frame.
|
|
+ * @min: Minimum QP value.
|
|
+ * @max: Maximum QP value.
|
|
+ */
|
|
+struct mvx_session_qp {
|
|
+ int i_frame;
|
|
+ int p_frame;
|
|
+ int b_frame;
|
|
+ int min;
|
|
+ int max;
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct mvx_session - Session instance.
|
|
+ * @dev: Pointer to device.
|
|
+ * @cache: Pointer to firmware cache.
|
|
+ * @isession: This instance is used to register the session to the
|
|
+ * client.
|
|
+ * @client_ops: Client operations.
|
|
+ * @csession: Client session.
|
|
+ * @destructor: When the isession.kref reaches zero and after the
|
|
+ * session
|
|
+ * object has been destructed, this callback routine is
|
|
+ * invoked
|
|
+ * to allow the owner of the session object to clean up any
|
|
+ * allocated resources.
|
|
+ * @event: Event callback routine.
|
|
+ * @mutex: Mutex protecting the session objects.
|
|
+ * @port: Input and output port settings.
|
|
+ * @mmu: MMU instance.
|
|
+ * @fw: Firmware instance.
|
|
+ * @fw_bin: Pointer to firmware binary.
|
|
+ * @fw_event: Event handler for loading a firmware binary.
|
|
+ * @fw_state: Current firmware state.
|
|
+ * @waitq: Wait queue to signal changes to the session.
|
|
+ * @dentry: Debugfs directory entry for the session.
|
|
+ * @frame_rate: Frame rate in Q16 format.
|
|
+ * @target_bitrate: Bitrate.
|
|
+ * @rc_enabled: Defines if rate control is enabled for the session.
|
|
+ * @profile: Profile for encoder.
|
|
+ * @level: Level for encoder.
|
|
+ * @nalu_format: NALU format.
|
|
+ * @stream_escaping: Defines if stream escaping is enabled.
|
|
+ * @ignore_stream_headers:Defines if decoder should ignore stream headers.
|
|
+ * @frame_reordering: Defines if decoder should reorder frames.
|
|
+ * @intbuf_size: Suggested internal buffer size.
|
|
+ * @p_frames: Number of P-frames for encoder.
|
|
+ * @b_frames: Number of B-frames for encoder.
|
|
+ * @gop_type: GOP type.
|
|
+ * @cyclic_intra_refresh_mb:Intra MB refresh.
|
|
+ * @constr_ipred: Constrained intra prediction.
|
|
+ * @entropy_sync: Enabled entropy synchronization.
|
|
+ * @temporal_mvp: Enable temporal motion vector prediction.
|
|
+ * @tile_rows: Tile size.
|
|
+ * @tile_cols: Tile size.
|
|
+ * @min_luma_cb_size: Minimum luma coding block size.
|
|
+ * @mb_mask: MB mask.
|
|
+ * @entropy_mode: Entropy mode.
|
|
+ * @multi_slice_mode: Multi slice mode.
|
|
+ * @multi_slice_max_mb: Maximum number of macroblocks in a slice.
|
|
+ * @vp9_prob_update: Probability update method.
|
|
+ * @mv_h_search_range: Horizontal search range.
|
|
+ * @mv_v_search_range: Vertical search range.
|
|
+ * @bitdepth_chroma: Bitdepth for chroma.
|
|
+ * @bitdepth_luma: Bitdepth for luma.
|
|
+ * @force_chroma_format:Chroma format.
|
|
+ * @rgb_to_yuv: RGB to YUV conversion mode.
|
|
+ * @band_limit: Maximum bandwidth limit.
|
|
+ * @cabac_init_idc: CABAC initialization table.
|
|
+ * @qp: QP settings per codec.
|
|
+ * @resync_interval: JPEG resync interval.
|
|
+ * @jpeg_quality: JPEG quality level.
|
|
+ * @color_desc: HDR color description.
|
|
+ *
|
|
+ * There is one session for each file handle that has been opened from the
|
|
+ * video device.
|
|
+ *
|
|
+ * There is a separate set of QP controls for every codec. There is no
|
|
+ * information on which codec will be used when controls are initialized with
|
|
+ * their default values. That's why a set of QP-settings is maintained for
|
|
+ * every codec.
|
|
+ */
|
|
+struct mvx_session {
|
|
+ struct device *dev;
|
|
+ struct mvx_fw_cache *cache;
|
|
+ struct mvx_if_session isession;
|
|
+ struct mvx_client_ops *client_ops;
|
|
+ struct mvx_client_session *csession;
|
|
+ void (*destructor)(struct mvx_session *session);
|
|
+ void (*event)(struct mvx_session *session,
|
|
+ enum mvx_session_event event,
|
|
+ void *arg);
|
|
+ struct mvx_session_port port[MVX_DIR_MAX];
|
|
+ struct mvx_mmu mmu;
|
|
+ struct mvx_fw fw;
|
|
+ struct mvx_fw_bin *fw_bin;
|
|
+ struct mvx_fw_event fw_event;
|
|
+ enum mvx_fw_state fw_state;
|
|
+ wait_queue_head_t waitq;
|
|
+ struct timer_list watchdog_timer;
|
|
+ struct work_struct watchdog_work;
|
|
+ unsigned int watchdog_count;
|
|
+ bool switched_in;
|
|
+ unsigned int idle_count;
|
|
+ long error;
|
|
+ struct dentry *dentry;
|
|
+
|
|
+ int64_t frame_rate;
|
|
+ unsigned int target_bitrate;
|
|
+ unsigned int maximum_bitrate;
|
|
+ bool rc_enabled;
|
|
+ int rc_type;
|
|
+ enum mvx_profile profile[MVX_FORMAT_BITSTREAM_LAST + 1];
|
|
+ enum mvx_level level[MVX_FORMAT_BITSTREAM_LAST + 1];
|
|
+ enum mvx_nalu_format nalu_format;
|
|
+ enum mvx_tristate stream_escaping;
|
|
+ enum mvx_tristate ignore_stream_headers;
|
|
+ enum mvx_tristate frame_reordering;
|
|
+ int64_t intbuf_size;
|
|
+ int p_frames;
|
|
+ int b_frames;
|
|
+ enum mvx_gop_type gop_type;
|
|
+ int cyclic_intra_refresh_mb;
|
|
+ enum mvx_tristate constr_ipred;
|
|
+ enum mvx_tristate entropy_sync;
|
|
+ enum mvx_tristate temporal_mvp;
|
|
+ int tile_rows;
|
|
+ int tile_cols;
|
|
+ int min_luma_cb_size;
|
|
+ int mb_mask;
|
|
+ enum mvx_entropy_mode entropy_mode;
|
|
+ enum mvx_multi_slice_mode multi_slice_mode;
|
|
+ int multi_slice_max_mb;
|
|
+ enum mvx_vp9_prob_update vp9_prob_update;
|
|
+ int mv_h_search_range;
|
|
+ int mv_v_search_range;
|
|
+ int bitdepth_chroma;
|
|
+ int bitdepth_luma;
|
|
+ int force_chroma_format;
|
|
+ enum mvx_rgb_to_yuv_mode rgb_to_yuv;
|
|
+ int band_limit;
|
|
+ int cabac_init_idc;
|
|
+ struct mvx_session_qp qp[MVX_FORMAT_BITSTREAM_LAST + 1];
|
|
+ int resync_interval;
|
|
+ int jpeg_quality;
|
|
+ struct mvx_fw_color_desc color_desc;
|
|
+ unsigned int crop_left;
|
|
+ unsigned int crop_right;
|
|
+ unsigned int crop_top;
|
|
+ unsigned int crop_bottom;
|
|
+ struct mvx_sei_userdata sei_userdata;
|
|
+ unsigned int nHRDBufsize;
|
|
+ struct mvx_dsl_frame dsl_frame;
|
|
+ struct mvx_dsl_ratio dsl_ratio;
|
|
+ struct mvx_long_term_ref mvx_ltr;
|
|
+ int dsl_pos_mode;
|
|
+ /*variable for DDR qos setting. */
|
|
+ uint32_t estimated_ddr_read_throughput; /**<estimated ddr throughput requirement for this session */
|
|
+ uint32_t estimated_ddr_write_throughput; /**<estimated ddr throughput requirement for this session */
|
|
+ bool eos_queued; /**< Indicates last incoming buffer has EOS flag */
|
|
+ bool keep_freq_high; /**< Control DVFS by avoiding low frequency at start of usecase */
|
|
+ bool is_suspend;
|
|
+ struct list_head buffer_corrupt_queue;
|
|
+ unsigned int watchdog_timeout; //timeout value[ms] for watchdog
|
|
+ /*for debug. */
|
|
+ uint64_t bus_write_bytes_total;
|
|
+ uint64_t bus_read_bytes_total;
|
|
+ uint64_t frame_id;
|
|
+ int enable_profiling;
|
|
+};
|
|
+
|
|
+/****************************************************************************
|
|
+ * Exported functions
|
|
+ ****************************************************************************/
|
|
+
|
|
+/**
|
|
+ * mvx_session_construct - Construct the session object.
|
|
+ * @session: Pointer to session.
|
|
+ * @dev: Pointer to device.
|
|
+ * @client_ops: Pointer to client ops.
|
|
+ * @cache: Pointer to firmware cache.
|
|
+ * @mutex: Pointer to mutex protecting the session object.
|
|
+ * @destructor: Destructor that will be invoked after the session referece count
|
|
+ * has reached zero. The destructor may be NULL if the owner of the
|
|
+ * session object does not need to be notified.
|
|
+ * @event: Event notification from the session to the client. This function
|
|
+ * must not call session API which could take mvx_session mutex.
|
|
+ * @dsession: Debugfs directory entry for the session.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+int mvx_session_construct(struct mvx_session *session,
|
|
+ struct device *dev,
|
|
+ struct mvx_client_ops *client_ops,
|
|
+ struct mvx_fw_cache *cache,
|
|
+ struct mutex *mutex,
|
|
+ void (*destructor)(struct mvx_session *session),
|
|
+ void (*event)(struct mvx_session *session,
|
|
+ enum mvx_session_event event,
|
|
+ void *arg),
|
|
+ struct dentry *dsession);
|
|
+
|
|
+/**
|
|
+ * mvx_session_construct - Destruct the session object.
|
|
+ * @session: Pointer to session.
|
|
+ */
|
|
+void mvx_session_destruct(struct mvx_session *session);
|
|
+
|
|
+/**
|
|
+ * mvx_session_get - Increment the session reference count.
|
|
+ * @session: Pointer to session.
|
|
+ */
|
|
+void mvx_session_get(struct mvx_session *session);
|
|
+
|
|
+/**
|
|
+ * mvx_session_put - Decrement the session reference count.
|
|
+ * @session: Pointer to session.
|
|
+ *
|
|
+ * If the reference count reaches 0 the session object will be destructed.
|
|
+ *
|
|
+ * Return: 1 if session was removed, else 0.
|
|
+ */
|
|
+int mvx_session_put(struct mvx_session *session);
|
|
+
|
|
+/**
|
|
+ * mvx_session_get_formats() - Get bitmask of supported formats.
|
|
+ * @session: Pointer to session.
|
|
+ * @dir: Which direction to get formats for.
|
|
+ * @formats: Pointer to bitmask listing supported formats.
|
|
+ */
|
|
+void mvx_session_get_formats(struct mvx_session *session,
|
|
+ enum mvx_direction dir,
|
|
+ uint64_t *formats);
|
|
+
|
|
+/**
|
|
+ * mvx_session_try_format() - Validate port format.
|
|
+ * @session: Pointer to session.
|
|
+ * @dir: Which direction to get formats for.
|
|
+ * @format: MVX format.
|
|
+ * @width: Width. Only valid for frame formats.
|
|
+ * @height: Height. Only valid for frame formats.
|
|
+ * @nplanes: Number for planes.
|
|
+ * @stride: Horizonal stride in bytes for each plane.
|
|
+ * @size: Size in bytes for each plane.
|
|
+ * @interlace: True if frames are interlaced.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+int mvx_session_try_format(struct mvx_session *session,
|
|
+ enum mvx_direction dir,
|
|
+ enum mvx_format format,
|
|
+ unsigned int *width,
|
|
+ unsigned int *height,
|
|
+ uint8_t *nplanes,
|
|
+ unsigned int *stride,
|
|
+ unsigned int *size,
|
|
+ bool *interlaced);
|
|
+
|
|
+/**
|
|
+ * mvx_session_set_format() - Validate and set port format.
|
|
+ * @session: Pointer to session.
|
|
+ * @dir: Which direction to get formats for.
|
|
+ * @format: MVX format.
|
|
+ * @width: Width. Only valid for frame formats.
|
|
+ * @height: Height. Only valid for frame formats.
|
|
+ * @nplanes: Number for planes.
|
|
+ * @stride: Horizonal stride in bytes for each plane.
|
|
+ * @size: Size in bytes for each plane.
|
|
+ * @interlaced: True if frames are interlaced.
|
|
+ *
|
|
+ * If *nplanes is 0, then the values of stride and size should be ignored, else
|
|
+ * size and stride should be used when setting the format.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+int mvx_session_set_format(struct mvx_session *session,
|
|
+ enum mvx_direction dir,
|
|
+ enum mvx_format format,
|
|
+ unsigned int *width,
|
|
+ unsigned int *height,
|
|
+ uint8_t *nplanes,
|
|
+ unsigned int *stride,
|
|
+ unsigned int *size,
|
|
+ bool *interlaced);
|
|
+
|
|
+/**
|
|
+ * mvx_session_qbuf() - Queue a buffer.
|
|
+ * @session: Pointer to session.
|
|
+ * @buf: Pointer to buffer.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+int mvx_session_qbuf(struct mvx_session *session,
|
|
+ enum mvx_direction dir,
|
|
+ struct mvx_buffer *buf);
|
|
+
|
|
+/**
|
|
+ * mvx_session_send_eos() - Queue an empty buffer with EOS flag.
|
|
+ * @session: Pointer to session.
|
|
+ *
|
|
+ * If firmware is loaded an empty input buffer will be queued with the EOS flag
|
|
+ * set. EOS will be propagated by the firmware to the output queue.
|
|
+ *
|
|
+ * If the firmware is not loaded a buffer will be dequeued from the output
|
|
+ * queue, cleared and returned with the EOS flag set.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+int mvx_session_send_eos(struct mvx_session *session);
|
|
+
|
|
+/**
|
|
+ * mvx_session_streamon() - Enable stream on input or output port.
|
|
+ * @session: Pointer to session.
|
|
+ * @dir: Port direction.
|
|
+ *
|
|
+ * Both input and output ports must be enabled for streaming to begin.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+int mvx_session_streamon(struct mvx_session *session,
|
|
+ enum mvx_direction dir);
|
|
+
|
|
+/**
|
|
+ * mvx_session_streamoff() - Disable stream on input or output port.
|
|
+ * @session: Pointer to session.
|
|
+ * @dir: Port direction.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+int mvx_session_streamoff(struct mvx_session *session,
|
|
+ enum mvx_direction dir);
|
|
+
|
|
+/**
|
|
+ * mvx_session_irq() - Handle IRQ event from the client.
|
|
+ * @isession: Pointer to if-session.
|
|
+ */
|
|
+void mvx_session_irq(struct mvx_if_session *isession);
|
|
+
|
|
+/**
|
|
+ * mvx_if_session_to_session() - Convert mvx_is_session to mvx_session.
|
|
+ * @session: Pointer to mvx_if_session object.
|
|
+ *
|
|
+ * Return: Pointer to mvx_session object.
|
|
+ */
|
|
+static inline struct mvx_session *mvx_if_session_to_session(
|
|
+ struct mvx_if_session *session)
|
|
+{
|
|
+ return container_of(session, struct mvx_session, isession);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * mvx_session_port_show() - Print debug information into seq-file.
|
|
+ * @port: Pointer to port.
|
|
+ * @s: Seq-file to print to.
|
|
+ */
|
|
+void mvx_session_port_show(struct mvx_session_port *port,
|
|
+ struct seq_file *s);
|
|
+
|
|
+/*
|
|
+ * Functions bellow implement different settings for a session.
|
|
+ *
|
|
+ * Most of options could be set only when the FW is in STOPPED state or not
|
|
+ * loaded. In this case the value will be stored in mvx_session structure
|
|
+ * and applied lated in fw_initial_setup().
|
|
+ *
|
|
+ * Some options support runtime modification. For them we issue a command
|
|
+ * to mvx_fw module if the FW is loaded. For others we return -EBUSY if the
|
|
+ * FW is loaded.
|
|
+ *
|
|
+ * ATTENTION. Currently there is no way to querry from mvx_fw API of from
|
|
+ * mvx_session API if the option supports runtime configuration.
|
|
+ */
|
|
+
|
|
+/**
|
|
+ * mvx_session_set_securevideo() - Enabled or disable secure video.
|
|
+ * @session: Session.
|
|
+ * @securevideo:Enable or disable secure video.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+int mvx_session_set_securevideo(struct mvx_session *session,
|
|
+ bool securevideo);
|
|
+
|
|
+/**
|
|
+ * mvx_session_set_frame_rate() - Set frame rate.
|
|
+ * @session: Session.
|
|
+ * @frame_rate: Frame rate in Q16 format.
|
|
+ *
|
|
+ * This option could be set in runtime.
|
|
+ *
|
|
+ * Return: 0 in case of success, error code otherwise.
|
|
+ */
|
|
+int mvx_session_set_frame_rate(struct mvx_session *session,
|
|
+ int64_t frame_rate);
|
|
+
|
|
+/**
|
|
+ * mvx_session_set_rate_control() - Enable/disable rate controller.
|
|
+ * @session: Session.
|
|
+ * @enabled: Rate controller status.
|
|
+ *
|
|
+ * This option could be set in runtime.
|
|
+ *
|
|
+ * Return: 0 in case of success, error code otherwise.
|
|
+ */
|
|
+int mvx_session_set_rate_control(struct mvx_session *session,
|
|
+ bool enabled);
|
|
+
|
|
+/**
|
|
+ * mvx_session_set_bitrate() - Set bitrate rate.
|
|
+ * @session: Session.
|
|
+ * @bitrate: Bitrate in bits per second.
|
|
+ *
|
|
+ * This option could be set in runtime.
|
|
+ *
|
|
+ * Return: 0 in case of success, error code otherwise.
|
|
+ */
|
|
+int mvx_session_set_bitrate(struct mvx_session *session,
|
|
+ int bitrate);
|
|
+
|
|
+/**
|
|
+ * mvx_session_set_bitrate() - Set bitrate rate control.
|
|
+ * @session: Session.
|
|
+ * @rc_type: bitrate rate control type.
|
|
+ *
|
|
+ * This option could be set in runtime.
|
|
+ *
|
|
+ * Return: 0 in case of success, error code otherwise.
|
|
+ */
|
|
+
|
|
+int mvx_session_set_bitrate_control(struct mvx_session *session,
|
|
+ struct mvx_buffer_param_rate_control *rc_type);
|
|
+/**
|
|
+ * mvx_session_set_crop_left() - Set crop left.
|
|
+ * @session: Session.
|
|
+ * @left: encoder SPS crop param, left offset.
|
|
+ *
|
|
+ * Return: 0 in case of success, error code otherwise.
|
|
+ */
|
|
+
|
|
+int mvx_session_set_crop_left(struct mvx_session *session,
|
|
+ int32_t left);
|
|
+
|
|
+/**
|
|
+ * mvx_session_set_crop_right() - Set crop right.
|
|
+ * @session: Session.
|
|
+ * @right: encoder SPS crop param, right offset.
|
|
+ *
|
|
+ * Return: 0 in case of success, error code otherwise.
|
|
+ */
|
|
+
|
|
+int mvx_session_set_crop_right(struct mvx_session *session,
|
|
+ int32_t right);
|
|
+
|
|
+/**
|
|
+ * mvx_session_set_crop_top() - Set crop top.
|
|
+ * @session: Session.
|
|
+ * @top: encoder SPS crop param, top offset.
|
|
+ *
|
|
+ * Return: 0 in case of success, error code otherwise.
|
|
+ */
|
|
+
|
|
+int mvx_session_set_crop_top(struct mvx_session *session,
|
|
+ int32_t top);
|
|
+
|
|
+/**
|
|
+ * mvx_session_set_crop_bottom() - Set crop bottom.
|
|
+ * @session: Session.
|
|
+ * @top: encoder SPS crop param, bottom offset.
|
|
+ *
|
|
+ * Return: 0 in case of success, error code otherwise.
|
|
+ */
|
|
+
|
|
+int mvx_session_set_crop_bottom(struct mvx_session *session,
|
|
+ int32_t bottom);
|
|
+/**
|
|
+ * mvx_session_set_nalu_format() - Set NALU format.
|
|
+ * @session: Session.
|
|
+ * @fmt: NALU format.
|
|
+ *
|
|
+ * Return: 0 in case of success, error code otherwise.
|
|
+ */
|
|
+int mvx_session_set_nalu_format(struct mvx_session *session,
|
|
+ enum mvx_nalu_format fmt);
|
|
+
|
|
+/**
|
|
+ * mvx_session_set_stream_escaping() - Enable/disable stream escaping
|
|
+ * @session: Session.
|
|
+ * @status: Status
|
|
+ *
|
|
+ * Return: 0 in case of success, error code otherwise.
|
|
+ */
|
|
+int mvx_session_set_stream_escaping(struct mvx_session *session,
|
|
+ enum mvx_tristate status);
|
|
+
|
|
+/**
|
|
+ * mvx_session_set_profile() - Set profile for encoder.
|
|
+ * @session: Session.
|
|
+ * @format: Format.
|
|
+ * @profile: Encoder profile.
|
|
+ *
|
|
+ * Return: 0 in case of success, error code otherwise.
|
|
+ */
|
|
+int mvx_session_set_profile(struct mvx_session *session,
|
|
+ enum mvx_format format,
|
|
+ enum mvx_profile profile);
|
|
+
|
|
+/**
|
|
+ * mvx_session_set_level() - Set level for encoder.
|
|
+ *
|
|
+ * @session: Session.
|
|
+ * @format: Format.
|
|
+ * @level: Encoder level.
|
|
+ *
|
|
+ * Return: 0 in case of success, error code otherwise.
|
|
+ */
|
|
+int mvx_session_set_level(struct mvx_session *session,
|
|
+ enum mvx_format format,
|
|
+ enum mvx_level level);
|
|
+
|
|
+/**
|
|
+ * mvx_session_set_ignore_stream_headers() - Enable/disable stream headers
|
|
+ * ignore.
|
|
+ * @session: Session.
|
|
+ * @status: Status.
|
|
+ *
|
|
+ * Return: 0 in case of success, error code otherwise.
|
|
+ */
|
|
+int mvx_session_set_ignore_stream_headers(struct mvx_session *session,
|
|
+ enum mvx_tristate status);
|
|
+
|
|
+/**
|
|
+ * mvx_session_set_frame_reordering() - Enable/disable frames reordering.
|
|
+ * @session: Session.
|
|
+ * @status: Status.
|
|
+ *
|
|
+ * Return: 0 in case of success, error code otherwise.
|
|
+ */
|
|
+int mvx_session_set_frame_reordering(struct mvx_session *session,
|
|
+ enum mvx_tristate status);
|
|
+
|
|
+/**
|
|
+ * mvx_session_set_intbuf_size() - Set internal buffer size.
|
|
+ * @session: Session.
|
|
+ * @size: Size.
|
|
+ *
|
|
+ * Return: 0 in case of success, error code otherwise.
|
|
+ */
|
|
+int mvx_session_set_intbuf_size(struct mvx_session *session,
|
|
+ int size);
|
|
+
|
|
+/**
|
|
+ * mvx_session_set_p_frame() - Set number of P-frames.
|
|
+ * @session: Session.
|
|
+ * @val: Number of P-frames.
|
|
+ *
|
|
+ * Return: 0 in case of success, error code otherwise.
|
|
+ */
|
|
+int mvx_session_set_p_frames(struct mvx_session *session,
|
|
+ int val);
|
|
+
|
|
+/**
|
|
+ * mvx_session_set_b_frame() - Set number of B-frames.
|
|
+ * @session: Session.
|
|
+ * @val: Number of B-frames.
|
|
+ *
|
|
+ * Return: 0 in case of success, error code otherwise.
|
|
+ */
|
|
+int mvx_session_set_b_frames(struct mvx_session *session,
|
|
+ int val);
|
|
+
|
|
+/**
|
|
+ * mvx_session_set_gop_type() - Set GOP type.
|
|
+ * @session: Session.
|
|
+ * @gop_type: GOP type.
|
|
+ *
|
|
+ * Return: 0 in case of success, error code otherwise.
|
|
+ */
|
|
+int mvx_session_set_gop_type(struct mvx_session *session,
|
|
+ enum mvx_gop_type gop_type);
|
|
+
|
|
+/**
|
|
+ * mvx_session_set_cyclic_intra_refresh_mb() - Set intra MB refresh.
|
|
+ * @session: Session.
|
|
+ * @val: Value.
|
|
+ *
|
|
+ * Return: 0 in case of success, error code otherwise.
|
|
+ */
|
|
+int mvx_session_set_cyclic_intra_refresh_mb(struct mvx_session *session,
|
|
+ int val);
|
|
+
|
|
+/**
|
|
+ * mvx_session_set_constr_ipred() - Enabled/disable constrained intra
|
|
+ * prediction.
|
|
+ * @session: Session.
|
|
+ * @status: Status.
|
|
+ *
|
|
+ * Return: 0 in case of success, error code otherwise.
|
|
+ */
|
|
+int mvx_session_set_constr_ipred(struct mvx_session *session,
|
|
+ enum mvx_tristate status);
|
|
+
|
|
+/**
|
|
+ * mvx_session_set_entropy_sync() - Enable/disable entropy synchronization.
|
|
+ * @session: Session.
|
|
+ * @status: Status.
|
|
+ *
|
|
+ * Return: 0 in case of success, error code otherwise.
|
|
+ */
|
|
+int mvx_session_set_entropy_sync(struct mvx_session *session,
|
|
+ enum mvx_tristate status);
|
|
+
|
|
+/**
|
|
+ * mvx_session_set_temporal_mvp() - Enable/disable temporal MVP.
|
|
+ * @session: Session.
|
|
+ * @status: Status.
|
|
+ *
|
|
+ * Return: 0 in case of success, error code otherwise.
|
|
+ */
|
|
+int mvx_session_set_temporal_mvp(struct mvx_session *session,
|
|
+ enum mvx_tristate status);
|
|
+
|
|
+/**
|
|
+ * mvx_session_set_tile_rows() - Set tile size.
|
|
+ * @session: Session.
|
|
+ * @val: Value.
|
|
+ *
|
|
+ * Return: 0 in case of success, error code otherwise.
|
|
+ */
|
|
+int mvx_session_set_tile_rows(struct mvx_session *session,
|
|
+ int val);
|
|
+
|
|
+/**
|
|
+ * mvx_session_set_tile_cols() - Set tile size.
|
|
+ * @session: Session.
|
|
+ * @val: Value.
|
|
+ *
|
|
+ * Return: 0 in case of success, error code otherwise.
|
|
+ */
|
|
+int mvx_session_set_tile_cols(struct mvx_session *session,
|
|
+ int val);
|
|
+
|
|
+/**
|
|
+ * mvx_session_set_min_luma_cb_size() - Set minimum luma coding block size.
|
|
+ * @session: Session.
|
|
+ * @val: Value.
|
|
+ *
|
|
+ * Return: 0 in case of success, error code otherwise.
|
|
+ */
|
|
+int mvx_session_set_min_luma_cb_size(struct mvx_session *session,
|
|
+ int val);
|
|
+
|
|
+/**
|
|
+ * mvx_session_set_mb_mask() - Set MB mask.
|
|
+ * @session: Session.
|
|
+ * @val: Value.
|
|
+ *
|
|
+ * Return: 0 in case of success, error code otherwise.
|
|
+ */
|
|
+int mvx_session_set_mb_mask(struct mvx_session *session,
|
|
+ int val);
|
|
+
|
|
+/**
|
|
+ * mvx_session_set_entropy_mode() - Set entropy mode.
|
|
+ * @session: Session.
|
|
+ * @mode: Entropy mode.
|
|
+ *
|
|
+ * Return: 0 in case of success, error code otherwise.
|
|
+ */
|
|
+int mvx_session_set_entropy_mode(struct mvx_session *session,
|
|
+ enum mvx_entropy_mode mode);
|
|
+
|
|
+/**
|
|
+ * mvx_session_set_multi_slice_mode() - Set multi slice mode.
|
|
+ * @session: Session.
|
|
+ * @mode: Mode.
|
|
+ *
|
|
+ * Return: 0 in case of success, error code otherwise.
|
|
+ */
|
|
+int mvx_session_set_multi_slice_mode(struct mvx_session *session,
|
|
+ enum mvx_multi_slice_mode mode);
|
|
+
|
|
+/**
|
|
+ * mvx_session_set_multi_slice_max_mb() - Set suggested number of CTUs in a
|
|
+ * slice.
|
|
+ * @session: Session.
|
|
+ * @val: Value.
|
|
+ *
|
|
+ * Return: 0 in case of success, error code otherwise.
|
|
+ */
|
|
+int mvx_session_set_multi_slice_max_mb(struct mvx_session *session,
|
|
+ int val);
|
|
+
|
|
+/**
|
|
+ * mvx_session_set_vp9_prob_update() - Set probability update mode.
|
|
+ * @session: Session.
|
|
+ * @mode: Mode.
|
|
+ *
|
|
+ * Return: 0 in case of success, error code otherwise.
|
|
+ */
|
|
+int mvx_session_set_vp9_prob_update(struct mvx_session *session,
|
|
+ enum mvx_vp9_prob_update mode);
|
|
+
|
|
+/**
|
|
+ * mvx_session_set_mv_h_search_range() - Set horizontal search range for motion
|
|
+ * vectors.
|
|
+ * @session: Session.
|
|
+ * @val: Value.
|
|
+ *
|
|
+ * Return: 0 in case of success, error code otherwise.
|
|
+ */
|
|
+int mvx_session_set_mv_h_search_range(struct mvx_session *session,
|
|
+ int val);
|
|
+
|
|
+/**
|
|
+ * mvx_session_set_mv_v_search_range() - Set vertical search range for motion
|
|
+ * vectors.
|
|
+ * @session: Session.
|
|
+ * @val: Value.
|
|
+ *
|
|
+ * Return: 0 in case of success, error code otherwise.
|
|
+ */
|
|
+int mvx_session_set_mv_v_search_range(struct mvx_session *session,
|
|
+ int val);
|
|
+
|
|
+/**
|
|
+ * mvx_session_set_bitdepth_chroma() - Set bitdepth.
|
|
+ * @session: Session.
|
|
+ * @val: Value.
|
|
+ *
|
|
+ * Return: 0 in case of success, error code otherwise.
|
|
+ */
|
|
+int mvx_session_set_bitdepth_chroma(struct mvx_session *session,
|
|
+ int val);
|
|
+
|
|
+/**
|
|
+ * mvx_session_set_bitdepth_luma() - Set bitdepth.
|
|
+ * @session: Session.
|
|
+ * @val: Value.
|
|
+ *
|
|
+ * Return: 0 in case of success, error code otherwise.
|
|
+ */
|
|
+int mvx_session_set_bitdepth_luma(struct mvx_session *session,
|
|
+ int val);
|
|
+
|
|
+/**
|
|
+ * mvx_session_set_force_chroma_format() - Set chroma format.
|
|
+ * @session: Session.
|
|
+ * @fmt: chroma format.
|
|
+ *
|
|
+ * Return: 0 in case of success, error code otherwise.
|
|
+ */
|
|
+int mvx_session_set_force_chroma_format(struct mvx_session *session,
|
|
+ int fmt);
|
|
+
|
|
+/**
|
|
+ * mvx_session_set_rgb_to_yuv_mode() - Set RGB to YUV conversion mode.
|
|
+ * @session: Session.
|
|
+ * @mode: Mode.
|
|
+ *
|
|
+ * Return: 0 in case of success, error code otherwise.
|
|
+ */
|
|
+int mvx_session_set_rgb_to_yuv_mode(struct mvx_session *session,
|
|
+ enum mvx_rgb_to_yuv_mode mode);
|
|
+
|
|
+/**
|
|
+ * mvx_session_set_band_limit() - Set maximum bandwidth limit.
|
|
+ * @session: Session.
|
|
+ * @val: Value.
|
|
+ *
|
|
+ * Return: 0 in case of success, error code otherwise.
|
|
+ */
|
|
+int mvx_session_set_band_limit(struct mvx_session *session,
|
|
+ int val);
|
|
+
|
|
+/**
|
|
+ * mvx_session_set_cabac_init_idc() - Set CABAC initialization table.
|
|
+ * @session: Session.
|
|
+ * @val: Value.
|
|
+ *
|
|
+ * Return: 0 in case of success, error code otherwise.
|
|
+ */
|
|
+int mvx_session_set_cabac_init_idc(struct mvx_session *session,
|
|
+ int val);
|
|
+
|
|
+/**
|
|
+ * mvx_session_set_i_frame_qp() - Set QP for I frames.
|
|
+ * @session: Session.
|
|
+ * @format: Format.
|
|
+ * @qp: Quantization parameter.
|
|
+ *
|
|
+ * This option could be set in runtime.
|
|
+ *
|
|
+ * Return: 0 in case of success, error code otherwise.
|
|
+ */
|
|
+int mvx_session_set_i_frame_qp(struct mvx_session *session,
|
|
+ enum mvx_format format,
|
|
+ int qp);
|
|
+
|
|
+/**
|
|
+ * mvx_session_set_p_frame_qp() - Set QP for P frames.
|
|
+ * @session: Session.
|
|
+ * @format: Format.
|
|
+ * @qp: Quantization parameter.
|
|
+ *
|
|
+ * This option could be set in runtime.
|
|
+ *
|
|
+ * Return: 0 in case of success, error code otherwise.
|
|
+ */
|
|
+int mvx_session_set_p_frame_qp(struct mvx_session *session,
|
|
+ enum mvx_format format,
|
|
+ int qp);
|
|
+
|
|
+/**
|
|
+ * mvx_session_set_b_frame_qp() - Set QP for B frames.
|
|
+ * @session: Session.
|
|
+ * @format: Format.
|
|
+ * @qp: Quantization parameter.
|
|
+ *
|
|
+ * This option could be set in runtime.
|
|
+ *
|
|
+ * Return: 0 in case of success, error code otherwise.
|
|
+ */
|
|
+int mvx_session_set_b_frame_qp(struct mvx_session *session,
|
|
+ enum mvx_format format,
|
|
+ int qp);
|
|
+
|
|
+/**
|
|
+ * mvx_session_set_min_qp() - Set minimum value of QP range.
|
|
+ * @session: Session.
|
|
+ * @format: Format.
|
|
+ * @qp: Quantization parameter.
|
|
+ *
|
|
+ * This option could be set in runtime.
|
|
+ *
|
|
+ * Return: 0 in case of success, error code otherwise.
|
|
+ */
|
|
+int mvx_session_set_min_qp(struct mvx_session *session,
|
|
+ enum mvx_format format,
|
|
+ int qp);
|
|
+
|
|
+/**
|
|
+ * mvx_session_set_max_qp() - Set maximum value of QP range.
|
|
+ * @session: Session.
|
|
+ * @format: Format.
|
|
+ * @qp: Quantization parameter.
|
|
+ *
|
|
+ * This option could be set in runtime.
|
|
+ *
|
|
+ * Return: 0 in case of success, error code otherwise.
|
|
+ */
|
|
+int mvx_session_set_max_qp(struct mvx_session *session,
|
|
+ enum mvx_format format,
|
|
+ int qp);
|
|
+
|
|
+/**
|
|
+ * mvx_session_set_resync_interval() - Set resync interval for JPEG encoder.
|
|
+ * @session: Session.
|
|
+ * @val: Resync interval.
|
|
+ *
|
|
+ * Return: 0 in case of success, error code otherwise.
|
|
+ */
|
|
+int mvx_session_set_resync_interval(struct mvx_session *session,
|
|
+ int val);
|
|
+
|
|
+/**
|
|
+ * mvx_session_set_jpeg_quality() - Set JPEG quality.
|
|
+ * @session: Session.
|
|
+ * @val: Quality level (1-100).
|
|
+ *
|
|
+ * Return: 0 in case of success, error otherwise.
|
|
+ */
|
|
+int mvx_session_set_jpeg_quality(struct mvx_session *session,
|
|
+ int val);
|
|
+
|
|
+/**
|
|
+ * mvx_session_get_color_desc() - Get color description.
|
|
+ * @session: Pointer to session.
|
|
+ * @color_desc: Color description.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+int mvx_session_get_color_desc(struct mvx_session *session,
|
|
+ struct mvx_fw_color_desc *color_desc);
|
|
+
|
|
+/**
|
|
+ * mvx_session_set_color_desc() - Set color description.
|
|
+ * @session: Pointer to session.
|
|
+ * @color_desc: Color description.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+int mvx_session_set_color_desc(struct mvx_session *session,
|
|
+ struct mvx_fw_color_desc *color_desc);
|
|
+
|
|
+/**
|
|
+ * mvx_session_set_roi_regions() - Set ROI regions.
|
|
+ * @vsession: Pointer to v4l2 session.
|
|
+ * @roi: ROI regions.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+int mvx_session_set_roi_regions(struct mvx_session *session,
|
|
+ struct mvx_roi_config *roi);
|
|
+
|
|
+/**
|
|
+ * mvx_session_set_qp_epr() - Set qp for epr config.
|
|
+ * @vsession: Pointer to v4l2 session.
|
|
+ * @qp: qp.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+
|
|
+int mvx_session_set_qp_epr(struct mvx_session *session,
|
|
+ int *qp);
|
|
+
|
|
+/**
|
|
+ * mvx_session_set_sei_userdata() - Set SEI userdata.
|
|
+ * @vsession: Pointer to v4l2 session.
|
|
+ * @userdata: SEI userdata.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+
|
|
+int mvx_session_set_sei_userdata(struct mvx_session *session,
|
|
+ struct mvx_sei_userdata *userdata);
|
|
+
|
|
+/**
|
|
+ * mvx_session_set_hrd_buffer_size() - Set hrd buffer size.
|
|
+ * @vsession: Pointer to v4l2 session.
|
|
+ * @size: hrd buffer size.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+
|
|
+int mvx_session_set_hrd_buffer_size(struct mvx_session *session,
|
|
+ int size);
|
|
+
|
|
+/**
|
|
+ * mvx_session_set_dsl_frame() - Set DownScale dst frame.
|
|
+ * @vsession: Pointer to v4l2 session.
|
|
+ * @dsl: DownScale dst frame.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+
|
|
+int mvx_session_set_dsl_frame(struct mvx_session *session,
|
|
+ struct mvx_dsl_frame *dsl);
|
|
+
|
|
+/**
|
|
+ * mvx_session_set_dsl_ratio() - Set DownScale ratio.
|
|
+ * @vsession: Pointer to v4l2 session.
|
|
+ * @dsl: DownScale ratio.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+
|
|
+int mvx_session_set_dsl_ratio(struct mvx_session *session,
|
|
+ struct mvx_dsl_ratio *dsl);
|
|
+
|
|
+/**
|
|
+ * mvx_session_set_long_term_ref() - Set long term ref.
|
|
+ * @vsession: Pointer to v4l2 session.
|
|
+ * @ltr: long term ref.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+
|
|
+int mvx_session_set_long_term_ref(struct mvx_session *session,
|
|
+ struct mvx_long_term_ref *ltr);
|
|
+
|
|
+/**
|
|
+ * mvx_session_set_dsl_mode() - Set DownScale mode.
|
|
+ * @vsession: Pointer to v4l2 session.
|
|
+ * @mode: DownScale mode, oly enable on high precision mode.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+
|
|
+int mvx_session_set_dsl_mode(struct mvx_session *session,
|
|
+ int *mode);
|
|
+
|
|
+/*force insert IDR frame.*/
|
|
+int mvx_session_set_force_idr(struct mvx_session *session);
|
|
+
|
|
+/*set timeout value for watchdog*/
|
|
+int mvx_session_set_watchdog_timeout(struct mvx_session *session, int timeout);
|
|
+
|
|
+/*enable/disable profiling for vpu*/
|
|
+int mvx_session_set_profiling(struct mvx_session *session, int enable);
|
|
+
|
|
+#endif /* _MVX_SESSION_H_ */
|
|
diff --git a/drivers/media/platform/spacemit/vpu_k1x/if/v4l2/mvx-v4l2-controls.h b/drivers/media/platform/spacemit/vpu_k1x/if/v4l2/mvx-v4l2-controls.h
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/vpu_k1x/if/v4l2/mvx-v4l2-controls.h
|
|
@@ -0,0 +1,478 @@
|
|
+/*
|
|
+ * The confidential and proprietary information contained in this file may
|
|
+ * only be used by a person authorised under and to the extent permitted
|
|
+ * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd.
|
|
+ * ALL RIGHTS RESERVED
|
|
+ *
|
|
+ * This entire notice must be reproduced on all copies of this file
|
|
+ * and copies of this file may only be made by a person if such person is
|
|
+ * permitted to do so under the terms of a subsisting license agreement
|
|
+ * from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * SPDX-License-Identifier: GPL-2.0-only
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU General Public License
|
|
+ * as published by the Free Software Foundation; either version 2
|
|
+ * of the License, or (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
+ *
|
|
+ */
|
|
+
|
|
+#ifndef _MVX_V4L2_CONTROLS_H_
|
|
+#define _MVX_V4L2_CONTROLS_H_
|
|
+
|
|
+/****************************************************************************
|
|
+ * Includes
|
|
+ ****************************************************************************/
|
|
+
|
|
+#include <linux/videodev2.h>
|
|
+#include <linux/v4l2-controls.h>
|
|
+#include <drm/drm_fourcc.h>
|
|
+
|
|
+/****************************************************************************
|
|
+ * Pixel formats
|
|
+ ****************************************************************************/
|
|
+
|
|
+#define V4L2_PIX_FMT_YUV420_AFBC_8 v4l2_fourcc('Y', '0', 'A', '8')
|
|
+#define V4L2_PIX_FMT_YUV420_AFBC_10 v4l2_fourcc('Y', '0', 'A', 'A')
|
|
+#define V4L2_PIX_FMT_YUV422_AFBC_8 v4l2_fourcc('Y', '2', 'A', '8')
|
|
+#define V4L2_PIX_FMT_YUV422_AFBC_10 v4l2_fourcc('Y', '2', 'A', 'A')
|
|
+#define V4L2_PIX_FMT_Y210 v4l2_fourcc('Y', '2', '1', '0')
|
|
+//#define V4L2_PIX_FMT_P010 v4l2_fourcc('Y', '0', 'P', '1')
|
|
+#define V4L2_PIX_FMT_Y0L2 v4l2_fourcc('Y', '0', 'Y', 'L')
|
|
+#define V4L2_PIX_FMT_RV v4l2_fourcc('R', 'V', '0', '0')
|
|
+
|
|
+#ifndef V4L2_PIX_FMT_HEVC
|
|
+#define V4L2_PIX_FMT_HEVC v4l2_fourcc('H', 'E', 'V', 'C')
|
|
+#endif
|
|
+
|
|
+#ifndef V4L2_PIX_FMT_VP9
|
|
+#define V4L2_PIX_FMT_VP9 v4l2_fourcc('V', 'P', '9', '0')
|
|
+#endif
|
|
+
|
|
+#define V4L2_PIX_FMT_AVS v4l2_fourcc('A', 'V', 'S', '1')
|
|
+#define V4L2_PIX_FMT_AVS2 v4l2_fourcc('A', 'V', 'S', '2')
|
|
+
|
|
+/****************************************************************************
|
|
+ * Buffers
|
|
+ * @see v4l2_buffer
|
|
+ ****************************************************************************/
|
|
+
|
|
+/*
|
|
+ * Extended buffer flags.
|
|
+ */
|
|
+/*
|
|
+#define V4L2_BUF_FLAG_MVX_DECODE_ONLY 0x01000000
|
|
+#define V4L2_BUF_FLAG_MVX_CODEC_CONFIG 0x02000000
|
|
+#define V4L2_BUF_FLAG_MVX_AFBC_TILED_HEADERS 0x10000000
|
|
+#define V4L2_BUF_FLAG_MVX_AFBC_TILED_BODY 0x20000000
|
|
+#define V4L2_BUF_FLAG_MVX_AFBC_32X8_SUPERBLOCK 0x40000000
|
|
+#define V4L2_BUF_FLAG_MVX_MASK 0xff000000
|
|
+#define V4L2_BUF_FLAG_END_OF_SUB_FRAME 0x04000000
|
|
+#define V4L2_BUF_FLAG_MVX_BUFFER_FRAME_PRESENT 0x08000000
|
|
+#define V4L2_BUF_FLAG_MVX_BUFFER_NEED_REALLOC 0x07000000
|
|
+
|
|
+
|
|
+#define V4L2_BUF_FRAME_FLAG_ROTATION_90 0x81000000
|
|
+#define V4L2_BUF_FRAME_FLAG_ROTATION_180 0x82000000
|
|
+#define V4L2_BUF_FRAME_FLAG_ROTATION_270 0x83000000
|
|
+#define V4L2_BUF_FRAME_FLAG_ROTATION_MASK 0x83000000
|
|
+#define V4L2_BUF_FRAME_FLAG_MIRROR_HORI 0x90000000
|
|
+#define V4L2_BUF_FRAME_FLAG_MIRROR_VERT 0xA0000000
|
|
+#define V4L2_BUF_FRAME_FLAG_MIRROR_MASK 0xB0000000
|
|
+#define V4L2_BUF_FRAME_FLAG_SCALING_2 0x84000000
|
|
+#define V4L2_BUF_FRAME_FLAG_SCALING_4 0x88000000
|
|
+#define V4L2_BUF_FRAME_FLAG_SCALING_MASK 0x8C000000
|
|
+
|
|
+#define V4L2_BUF_FLAG_MVX_BUFFER_EPR 0xC0000000
|
|
+#define V4L2_BUF_FLAG_MVX_BUFFER_ROI 0x70000000
|
|
+*/
|
|
+//redefine these flags
|
|
+/*use encode/decode frame/bitstream to update these flags*/
|
|
+
|
|
+#define V4L2_BUF_FLAG_MVX_MASK 0xff000000
|
|
+
|
|
+//for decode frame flag
|
|
+#define V4L2_BUF_FRAME_FLAG_ROTATION_90 0x01000000 /* Frame is rotated 90 degrees */
|
|
+#define V4L2_BUF_FRAME_FLAG_ROTATION_180 0x02000000 /* Frame is rotated 180 degrees */
|
|
+#define V4L2_BUF_FRAME_FLAG_ROTATION_270 0x03000000 /* Frame is rotated 270 degrees */
|
|
+#define V4L2_BUF_FRAME_FLAG_ROTATION_MASK 0x03000000
|
|
+#define V4L2_BUF_FRAME_FLAG_SCALING_2 0x04000000 /* Frame is scaled by half */
|
|
+#define V4L2_BUF_FRAME_FLAG_SCALING_4 0x08000000 /* Frame is scaled by quarter */
|
|
+#define V4L2_BUF_FRAME_FLAG_SCALING_MASK 0x0C000000
|
|
+#define V4L2_BUF_FLAG_MVX_BUFFER_FRAME_PRESENT 0x10000000
|
|
+#define V4L2_BUF_FLAG_MVX_BUFFER_NEED_REALLOC 0x20000000
|
|
+
|
|
+//for decode bitstream flag
|
|
+#define V4L2_BUF_FLAG_MVX_CODEC_CONFIG 0xC1000000
|
|
+#define V4L2_BUF_FLAG_END_OF_SUB_FRAME 0xC2000000
|
|
+#define V4L2_BUF_FLAG_MVX_DECODE_ONLY 0xC4000000
|
|
+
|
|
+//for encode frame flag
|
|
+#define V4L2_BUF_FRAME_FLAG_MIRROR_HORI 0x81000000
|
|
+#define V4L2_BUF_FRAME_FLAG_MIRROR_VERT 0x82000000
|
|
+#define V4L2_BUF_FRAME_FLAG_MIRROR_MASK 0x83000000
|
|
+#define V4L2_BUF_FLAG_MVX_BUFFER_ROI 0x84000000 /* this buffer has a roi region */
|
|
+#define V4L2_BUF_FLAG_MVX_BUFFER_EPR 0x88000000 /* EPR buffer flag */
|
|
+
|
|
+//afbc flag
|
|
+#define V4L2_BUF_FLAG_MVX_AFBC_TILED_HEADERS 0x01000000
|
|
+#define V4L2_BUF_FLAG_MVX_AFBC_TILED_BODY 0x02000000
|
|
+#define V4L2_BUF_FLAG_MVX_AFBC_32X8_SUPERBLOCK 0x04000000
|
|
+#define V4L2_BUF_FLAG_MVX_AFBC_BLOCK_SPLIT 0x08000000
|
|
+
|
|
+#define V4L2_BUF_FLAG_MVX_DISABLE_CACHE_MAINTENANCE 0x50000000 /*disable cache maintenance for buffer.*/
|
|
+
|
|
+/****************************************************************************
|
|
+ * HDR color description.
|
|
+ ****************************************************************************/
|
|
+
|
|
+#define V4L2_EVENT_MVX_COLOR_DESC V4L2_EVENT_PRIVATE_START
|
|
+#define V4L2_MVX_MAX_FRAME_REGIONS 16
|
|
+
|
|
+enum v4l2_mvx_range {
|
|
+ V4L2_MVX_RANGE_UNSPECIFIED,
|
|
+ V4L2_MVX_RANGE_FULL,
|
|
+ V4L2_MVX_RANGE_LIMITED
|
|
+};
|
|
+
|
|
+enum v4l2_mvx_primaries {
|
|
+ V4L2_MVX_PRIMARIES_UNSPECIFIED,
|
|
+ V4L2_MVX_PRIMARIES_BT709, /* Rec.ITU-R BT.709 */
|
|
+ V4L2_MVX_PRIMARIES_BT470M, /* Rec.ITU-R BT.470 System M */
|
|
+ V4L2_MVX_PRIMARIES_BT601_625, /* Rec.ITU-R BT.601 625 */
|
|
+ V4L2_MVX_PRIMARIES_BT601_525, /* Rec.ITU-R BT.601 525 */
|
|
+ V4L2_MVX_PRIMARIES_GENERIC_FILM, /* Generic Film */
|
|
+ V4L2_MVX_PRIMARIES_BT2020 /* Rec.ITU-R BT.2020 */
|
|
+};
|
|
+
|
|
+enum v4l2_mvx_transfer {
|
|
+ V4L2_MVX_TRANSFER_UNSPECIFIED,
|
|
+ V4L2_MVX_TRANSFER_LINEAR, /* Linear transfer characteristics */
|
|
+ V4L2_MVX_TRANSFER_SRGB, /* sRGB */
|
|
+ V4L2_MVX_TRANSFER_SMPTE170M, /* SMPTE 170M */
|
|
+ V4L2_MVX_TRANSFER_GAMMA22, /* Assumed display gamma 2.2 */
|
|
+ V4L2_MVX_TRANSFER_GAMMA28, /* Assumed display gamma 2.8 */
|
|
+ V4L2_MVX_TRANSFER_ST2084, /* SMPTE ST 2084 */
|
|
+ V4L2_MVX_TRANSFER_HLG, /* ARIB STD-B67 hybrid-log-gamma */
|
|
+ V4L2_MVX_TRANSFER_SMPTE240M, /* SMPTE 240M */
|
|
+ V4L2_MVX_TRANSFER_XVYCC, /* IEC 61966-2-4 */
|
|
+ V4L2_MVX_TRANSFER_BT1361, /* Rec.ITU-R BT.1361 extended gamut */
|
|
+ V4L2_MVX_TRANSFER_ST428 /* SMPTE ST 428-1 */
|
|
+};
|
|
+
|
|
+enum v4l2_mvx_matrix {
|
|
+ V4L2_MVX_MATRIX_UNSPECIFIED,
|
|
+ V4L2_MVX_MATRIX_BT709, /* Rec.ITU-R BT.709 */
|
|
+ V4L2_MVX_MATRIX_BT470M, /* KR=0.30, KB=0.11 */
|
|
+ V4L2_MVX_MATRIX_BT601, /* Rec.ITU-R BT.601 625 */
|
|
+ V4L2_MVX_MATRIX_SMPTE240M, /* SMPTE 240M or equivalent */
|
|
+ V4L2_MVX_MATRIX_BT2020, /* Rec.ITU-R BT.2020 non-const lum */
|
|
+ V4L2_MVX_MATRIX_BT2020Constant /* Rec.ITU-R BT.2020 constant lum */
|
|
+};
|
|
+
|
|
+enum v4l2_nalu_format {
|
|
+ V4L2_OPT_NALU_FORMAT_START_CODES,
|
|
+ V4L2_OPT_NALU_FORMAT_ONE_NALU_PER_BUFFER,
|
|
+ V4L2_OPT_NALU_FORMAT_ONE_BYTE_LENGTH_FIELD,
|
|
+ V4L2_OPT_NALU_FORMAT_TWO_BYTE_LENGTH_FIELD,
|
|
+ V4L2_OPT_NALU_FORMAT_FOUR_BYTE_LENGTH_FIELD,
|
|
+ V4L2_OPT_NALU_FORMAT_ONE_FRAME_PER_BUFFER
|
|
+};
|
|
+
|
|
+struct v4l2_mvx_primary {
|
|
+ uint16_t x;
|
|
+ uint16_t y;
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct v4l2_mvx_color_desc - HDR color description.
|
|
+ * @flags: Flags which fields that are valid.
|
|
+ * @range: enum v4l2_mvx_range.
|
|
+ * @primaries: enum v4l2_mvx_primaries.
|
|
+ * @transfer: enum v4l2_mvx_transfer.
|
|
+ * @matrix: enum v4l2_mvx_matrix.
|
|
+ * @display.r: Red point.
|
|
+ * @display.g: Green point.
|
|
+ * @display.b: Blue point.
|
|
+ * @display.w: White point.
|
|
+ * @display.luminance_min: Minimum display luminance.
|
|
+ * @display.luminance_max: Maximum display luminance.
|
|
+ * @content.luminance_max: Maximum content luminance.
|
|
+ * @content.luminance_average: Average content luminance.
|
|
+ *
|
|
+ * Color- and white point primaries are given in increments of 0.00002
|
|
+ * and in the range of 0 to 50'000.
|
|
+ *
|
|
+ * Luminance is given in increments of 0.0001 candelas per m3.
|
|
+ */
|
|
+struct v4l2_mvx_color_desc {
|
|
+ uint32_t flags;
|
|
+ #define V4L2_BUFFER_PARAM_COLOUR_FLAG_MASTERING_DISPLAY_DATA_VALID (1)
|
|
+ #define V4L2_BUFFER_PARAM_COLOUR_FLAG_CONTENT_LIGHT_DATA_VALID (2)
|
|
+ uint8_t range;
|
|
+ uint8_t primaries;
|
|
+ uint8_t transfer;
|
|
+ uint8_t matrix;
|
|
+ struct {
|
|
+ struct v4l2_mvx_primary r;
|
|
+ struct v4l2_mvx_primary g;
|
|
+ struct v4l2_mvx_primary b;
|
|
+ struct v4l2_mvx_primary w;
|
|
+ uint16_t luminance_min;
|
|
+ uint16_t luminance_max;
|
|
+ } display;
|
|
+ struct {
|
|
+ uint16_t luminance_max;
|
|
+ uint16_t luminance_average;
|
|
+ } content;
|
|
+
|
|
+ uint8_t video_format;
|
|
+ uint8_t aspect_ratio_idc;
|
|
+ uint16_t sar_width;
|
|
+ uint16_t sar_height;
|
|
+ uint32_t num_units_in_tick;
|
|
+ uint32_t time_scale;
|
|
+} __attribute__ ((packed));
|
|
+
|
|
+struct v4l2_buffer_param_region
|
|
+{
|
|
+ uint16_t mbx_left; /**< X coordinate of the left most macroblock */
|
|
+ uint16_t mbx_right; /**< X coordinate of the right most macroblock */
|
|
+ uint16_t mby_top; /**< Y coordinate of the top most macroblock */
|
|
+ uint16_t mby_bottom; /**< Y coordinate of the bottom most macroblock */
|
|
+ int16_t qp_delta; /**< QP delta value. This region will be encoded
|
|
+ * with qp = qp_default + qp_delta. */
|
|
+};
|
|
+
|
|
+struct v4l2_mvx_roi_regions
|
|
+{
|
|
+ unsigned int pic_index;
|
|
+ unsigned char qp_present;
|
|
+ unsigned char qp;
|
|
+ unsigned char roi_present;
|
|
+ unsigned char num_roi;
|
|
+ struct v4l2_buffer_param_region roi[V4L2_MVX_MAX_FRAME_REGIONS];
|
|
+};
|
|
+
|
|
+struct v4l2_sei_user_data
|
|
+{
|
|
+ uint8_t flags;
|
|
+ #define V4L2_BUFFER_PARAM_USER_DATA_UNREGISTERED_VALID (1)
|
|
+ uint8_t uuid[16];
|
|
+ char user_data[256 - 35];
|
|
+ uint8_t user_data_len;
|
|
+};
|
|
+
|
|
+struct v4l2_rate_control
|
|
+{
|
|
+ uint32_t rc_type;
|
|
+ #define V4L2_OPT_RATE_CONTROL_MODE_OFF (0)
|
|
+ #define V4L2_OPT_RATE_CONTROL_MODE_STANDARD (1)
|
|
+ #define V4L2_OPT_RATE_CONTROL_MODE_VARIABLE (2)
|
|
+ #define V4L2_OPT_RATE_CONTROL_MODE_CONSTANT (3)
|
|
+ #define V4L2_OPT_RATE_CONTROL_MODE_C_VARIABLE (4)
|
|
+ uint32_t target_bitrate;
|
|
+ uint32_t maximum_bitrate;
|
|
+};
|
|
+
|
|
+struct v4l2_mvx_dsl_frame
|
|
+{
|
|
+ uint32_t width;
|
|
+ uint32_t height;
|
|
+};
|
|
+
|
|
+struct v4l2_mvx_dsl_ratio
|
|
+{
|
|
+ uint32_t hor;
|
|
+ uint32_t ver;
|
|
+};
|
|
+
|
|
+struct v4l2_mvx_long_term_ref
|
|
+{
|
|
+ uint32_t mode;
|
|
+ uint32_t period;
|
|
+};
|
|
+
|
|
+#define V4L2_MVX_COLOR_DESC_DISPLAY_VALID 0x1
|
|
+#define V4L2_MVX_COLOR_DESC_CONTENT_VALID 0x2
|
|
+
|
|
+/****************************************************************************
|
|
+ * Custom IOCTL
|
|
+ ****************************************************************************/
|
|
+
|
|
+#define VIDIOC_G_MVX_COLORDESC _IOWR('V', BASE_VIDIOC_PRIVATE, \
|
|
+ struct v4l2_mvx_color_desc)
|
|
+#define VIDIOC_S_MVX_ROI_REGIONS _IOWR('V', BASE_VIDIOC_PRIVATE + 1, \
|
|
+ struct v4l2_mvx_roi_regions)
|
|
+#define VIDIOC_S_MVX_QP_EPR _IOWR('V', BASE_VIDIOC_PRIVATE + 2, \
|
|
+ int)
|
|
+#define VIDIOC_S_MVX_COLORDESC _IOWR('V', BASE_VIDIOC_PRIVATE + 3, \
|
|
+ struct v4l2_mvx_color_desc)
|
|
+#define VIDIOC_S_MVX_SEI_USERDATA _IOWR('V', BASE_VIDIOC_PRIVATE + 4, \
|
|
+ struct v4l2_sei_user_data)
|
|
+#define VIDIOC_S_MVX_RATE_CONTROL _IOWR('V', BASE_VIDIOC_PRIVATE + 5, \
|
|
+ struct v4l2_rate_control)
|
|
+#define VIDIOC_S_MVX_DSL_FRAME _IOWR('V', BASE_VIDIOC_PRIVATE + 6, \
|
|
+ struct v4l2_mvx_dsl_frame)
|
|
+#define VIDIOC_S_MVX_DSL_RATIO _IOWR('V', BASE_VIDIOC_PRIVATE + 7, \
|
|
+ struct v4l2_mvx_dsl_ratio)
|
|
+#define VIDIOC_S_MVX_LONG_TERM_REF _IOWR('V', BASE_VIDIOC_PRIVATE + 8, \
|
|
+ struct v4l2_mvx_long_term_ref)
|
|
+#define VIDIOC_S_MVX_DSL_MODE _IOWR('V', BASE_VIDIOC_PRIVATE + 9, \
|
|
+ int)
|
|
+/****************************************************************************
|
|
+ * Custom controls
|
|
+ ****************************************************************************/
|
|
+
|
|
+/*
|
|
+ * Video for Linux 2 custom controls.
|
|
+ */
|
|
+enum v4l2_cid_mve_video {
|
|
+ V4L2_CID_MVE_VIDEO_FRAME_RATE = V4L2_CTRL_CLASS_CODEC + 0x2000,
|
|
+ V4L2_CID_MVE_VIDEO_NALU_FORMAT,
|
|
+ V4L2_CID_MVE_VIDEO_STREAM_ESCAPING,
|
|
+ V4L2_CID_MVE_VIDEO_H265_PROFILE,
|
|
+ V4L2_CID_MVE_VIDEO_VC1_PROFILE,
|
|
+ V4L2_CID_MVE_VIDEO_H265_LEVEL,
|
|
+ V4L2_CID_MVE_VIDEO_IGNORE_STREAM_HEADERS,
|
|
+ V4L2_CID_MVE_VIDEO_FRAME_REORDERING,
|
|
+ V4L2_CID_MVE_VIDEO_INTBUF_SIZE,
|
|
+ V4L2_CID_MVE_VIDEO_P_FRAMES,
|
|
+ V4L2_CID_MVE_VIDEO_GOP_TYPE,
|
|
+ V4L2_CID_MVE_VIDEO_CONSTR_IPRED,
|
|
+ V4L2_CID_MVE_VIDEO_ENTROPY_SYNC,
|
|
+ V4L2_CID_MVE_VIDEO_TEMPORAL_MVP,
|
|
+ V4L2_CID_MVE_VIDEO_TILE_ROWS,
|
|
+ V4L2_CID_MVE_VIDEO_TILE_COLS,
|
|
+ V4L2_CID_MVE_VIDEO_MIN_LUMA_CB_SIZE,
|
|
+ V4L2_CID_MVE_VIDEO_MB_MASK,
|
|
+ V4L2_CID_MVE_VIDEO_VP9_PROB_UPDATE,
|
|
+ V4L2_CID_MVE_VIDEO_BITDEPTH_CHROMA,
|
|
+ V4L2_CID_MVE_VIDEO_BITDEPTH_LUMA,
|
|
+ V4L2_CID_MVE_VIDEO_FORCE_CHROMA_FORMAT,
|
|
+ V4L2_CID_MVE_VIDEO_RGB_TO_YUV_MODE,
|
|
+ V4L2_CID_MVE_VIDEO_BANDWIDTH_LIMIT,
|
|
+ V4L2_CID_MVE_VIDEO_CABAC_INIT_IDC,
|
|
+ V4L2_CID_MVE_VIDEO_VPX_B_FRAME_QP,
|
|
+ V4L2_CID_MVE_VIDEO_SECURE_VIDEO,
|
|
+ V4L2_CID_MVE_VIDEO_CROP_LEFT,
|
|
+ V4L2_CID_MVE_VIDEO_CROP_RIGHT,
|
|
+ V4L2_CID_MVE_VIDEO_CROP_TOP,
|
|
+ V4L2_CID_MVE_VIDEO_CROP_BOTTOM,
|
|
+ V4L2_CID_MVE_VIDEO_HRD_BUFFER_SIZE,
|
|
+ V4L2_CID_MVE_VIDEO_WATCHDOG_TIMEOUT,
|
|
+ V4L2_CID_MVE_VIDEO_PROFILING
|
|
+};
|
|
+
|
|
+/* block configuration uncompressed rows header. this configures the size of the
|
|
+ * uncompressed body. */
|
|
+struct v4l2_buffer_general_rows_uncomp_hdr
|
|
+{
|
|
+ uint8_t n_cols_minus1; /* number of quad cols in picture minus 1 */
|
|
+ uint8_t n_rows_minus1; /* number of quad rows in picture minus 1 */
|
|
+ uint8_t reserved[2];
|
|
+};
|
|
+
|
|
+struct v4l2_buffer_general_block_configs
|
|
+{
|
|
+ uint8_t blk_cfg_type;
|
|
+ #define V4L2_BLOCK_CONFIGS_TYPE_NONE (0x00)
|
|
+ #define V4L2_BLOCK_CONFIGS_TYPE_ROW_UNCOMP (0xff)
|
|
+ uint8_t reserved[3];
|
|
+ union
|
|
+ {
|
|
+ struct v4l2_buffer_general_rows_uncomp_hdr rows_uncomp;
|
|
+ } blk_cfgs;
|
|
+};
|
|
+
|
|
+/* input for encoder */
|
|
+struct v4l2_buffer_param_qp
|
|
+{
|
|
+ /* QP (quantization parameter) for encode.
|
|
+ *
|
|
+ * When used to set fixed QP for encode, with rate control
|
|
+ * disabled, then the valid ranges are:
|
|
+ * H264: 0-51
|
|
+ * HEVC: 0-51
|
|
+ * VP8: 0-63
|
|
+ * VP9: 0-63
|
|
+ * Note: The QP must be set separately for I, P and B frames.
|
|
+ *
|
|
+ * But when this message is used with the regions-feature,
|
|
+ * then the valid ranges are the internal bitstream ranges:
|
|
+ * H264: 0-51
|
|
+ * HEVC: 0-51
|
|
+ * VP8: 0-127
|
|
+ * VP9: 0-255
|
|
+ */
|
|
+ int32_t qp;
|
|
+};
|
|
+
|
|
+/* the block parameter record specifies the various properties of a quad */
|
|
+struct v4l2_block_param_record
|
|
+{
|
|
+ uint16_t qp_delta;
|
|
+ /* Bitset of four 4-bit QP delta values for a quad.
|
|
+ * For H.264 and HEVC these are qp delta values in the range -8 to +7.
|
|
+ * For Vp9 these are segment map values in the range 0 to 7.
|
|
+ */
|
|
+ #define V4L2_BLOCK_PARAM_RECORD_QP_DELTA_TOP_LEFT_16X16 (0)
|
|
+ #define V4L2_BLOCK_PARAM_RECORD_QP_DELTA_TOP_LEFT_16X16_SZ (4)
|
|
+ #define V4L2_BLOCK_PARAM_RECORD_QP_DELTA_TOP_RIGHT_16X16 (4)
|
|
+ #define V4L2_BLOCK_PARAM_RECORD_QP_DELTA_TOP_RIGHT_16X16_SZ (4)
|
|
+ #define V4L2_BLOCK_PARAM_RECORD_QP_DELTA_BOT_LEFT_16X16 (8)
|
|
+ #define V4L2_BLOCK_PARAM_RECORD_QP_DELTA_BOT_LEFT_16X16_SZ (4)
|
|
+ #define V4L2_BLOCK_PARAM_RECORD_QP_DELTA_BOT_RIGHT_16X16 (12)
|
|
+ #define V4L2_BLOCK_PARAM_RECORD_QP_DELTA_BOT_RIGHT_16X16_SZ (4)
|
|
+
|
|
+ #define V4L2_BLOCK_PARAM_RECORD_VP9_SEGID_TOP_LEFT_16X16 (0)
|
|
+ #define V4L2_BLOCK_PARAM_RECORD_VP9_SEGID_TOP_LEFT_16X16_SZ (3)
|
|
+ #define V4L2_BLOCK_PARAM_RECORD_VP9_SEGID_TOP_RIGHT_16X16 (4)
|
|
+ #define V4L2_BLOCK_PARAM_RECORD_VP9_SEGID_TOP_RIGHT_16X16_SZ (3)
|
|
+ #define V4L2_BLOCK_PARAM_RECORD_VP9_SEGID_BOT_LEFT_16X16 (8)
|
|
+ #define V4L2_BLOCK_PARAM_RECORD_VP9_SEGID_BOT_LEFT_16X16_SZ (3)
|
|
+ #define V4L2_BLOCK_PARAM_RECORD_VP9_SEGID_BOT_RIGHT_16X16 (12)
|
|
+ #define V4L2_BLOCK_PARAM_RECORD_VP9_SEGID_BOT_RIGHT_16X16_SZ (3)
|
|
+
|
|
+ uint8_t force;
|
|
+ #define V4L2_BLOCK_PARAM_RECORD_FORCE_NONE (0x00)
|
|
+ #define V4L2_BLOCK_PARAM_RECORD_FORCE_QP (0x01)
|
|
+ #define V4L2_BLOCK_PARAM_RECORD_FORCE_32X32 (0x02)
|
|
+ #define V4L2_BLOCK_PARAM_RECORD_FORCE_RB (0x04)
|
|
+
|
|
+ uint8_t reserved;
|
|
+};
|
|
+
|
|
+struct v4l2_buffer_general_rows_uncomp_body
|
|
+{
|
|
+ /* the size of this array is variable and not necessarily equal to 1.
|
|
+ * therefore the sizeof operator should not be used
|
|
+ */
|
|
+ struct v4l2_block_param_record bpr[1];
|
|
+};
|
|
+
|
|
+struct v4l2_core_buffer_header_general
|
|
+{
|
|
+ //uint64_t user_data_tag; // User supplied tracking identifier
|
|
+ //uint64_t app_handle; // Host buffer handle number
|
|
+ uint16_t type; // type of config, value is one of V4L2_BUFFER_GENERAL_TYPE_X
|
|
+ #define V4L2_BUFFER_GENERAL_TYPE_BLOCK_CONFIGS (1) /* block_configs */
|
|
+ #define V4L2_BUFFER_GENERAL_TYPE_ENCODER_STATS (4) /* encoder_stats */
|
|
+ uint16_t config_size; // size of the configuration
|
|
+ uint32_t buffer_size;
|
|
+ struct v4l2_buffer_general_block_configs config;
|
|
+};
|
|
+
|
|
+#endif /* _MVX_V4L2_CONTROLS_H_ */
|
|
diff --git a/drivers/media/platform/spacemit/vpu_k1x/if/v4l2/mvx_ext_if.h b/drivers/media/platform/spacemit/vpu_k1x/if/v4l2/mvx_ext_if.h
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/vpu_k1x/if/v4l2/mvx_ext_if.h
|
|
@@ -0,0 +1,87 @@
|
|
+/*
|
|
+ * The confidential and proprietary information contained in this file may
|
|
+ * only be used by a person authorised under and to the extent permitted
|
|
+ * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd.
|
|
+ * ALL RIGHTS RESERVED
|
|
+ *
|
|
+ * This entire notice must be reproduced on all copies of this file
|
|
+ * and copies of this file may only be made by a person if such person is
|
|
+ * permitted to do so under the terms of a subsisting license agreement
|
|
+ * from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * SPDX-License-Identifier: GPL-2.0-only
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU General Public License
|
|
+ * as published by the Free Software Foundation; either version 2
|
|
+ * of the License, or (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
+ *
|
|
+ */
|
|
+
|
|
+#ifndef _MVX_EXT_H_
|
|
+#define _MVX_EXT_H_
|
|
+
|
|
+/****************************************************************************
|
|
+ * Includes
|
|
+ ****************************************************************************/
|
|
+
|
|
+#include <linux/mutex.h>
|
|
+#include <media/v4l2-dev.h>
|
|
+#include <media/v4l2-device.h>
|
|
+
|
|
+/****************************************************************************
|
|
+ * Types
|
|
+ ****************************************************************************/
|
|
+
|
|
+struct device;
|
|
+struct mvx_csched;
|
|
+struct mvx_fw_cache;
|
|
+
|
|
+struct mvx_ext_if {
|
|
+ struct mutex lock;
|
|
+ struct device *dev;
|
|
+ struct mvx_fw_cache *cache;
|
|
+ struct mvx_client_ops *client_ops;
|
|
+ struct video_device vdev;
|
|
+ struct v4l2_device v4l2_dev;
|
|
+ struct dentry *dsessions;
|
|
+};
|
|
+
|
|
+/****************************************************************************
|
|
+ * Exported functions
|
|
+ ****************************************************************************/
|
|
+
|
|
+/**
|
|
+ * mvx_ext_if_construct() - Construct the external interface object.
|
|
+ * @ext: Pointer to interface object.
|
|
+ * @dev: Pointer to device struct.
|
|
+ * @cache: Pointer to firmware cache.
|
|
+ * @client_ops: Pointer to client client_ops.
|
|
+ * @parent: Parent debugfs directory entry.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+int mvx_ext_if_construct(struct mvx_ext_if *ext,
|
|
+ struct device *dev,
|
|
+ struct mvx_fw_cache *cache,
|
|
+ struct mvx_client_ops *client_ops,
|
|
+ struct dentry *parent);
|
|
+
|
|
+/**
|
|
+ * mvx_ext_if_destruct() - Destroy external interface instance.
|
|
+ * @ext: Pointer to interface object.
|
|
+ */
|
|
+void mvx_ext_if_destruct(struct mvx_ext_if *ext);
|
|
+
|
|
+#endif /* _MVX_EXT_H_ */
|
|
diff --git a/drivers/media/platform/spacemit/vpu_k1x/if/v4l2/mvx_ext_v4l2.c b/drivers/media/platform/spacemit/vpu_k1x/if/v4l2/mvx_ext_v4l2.c
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/vpu_k1x/if/v4l2/mvx_ext_v4l2.c
|
|
@@ -0,0 +1,182 @@
|
|
+/*
|
|
+ * The confidential and proprietary information contained in this file may
|
|
+ * only be used by a person authorised under and to the extent permitted
|
|
+ * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd.
|
|
+ * ALL RIGHTS RESERVED
|
|
+ *
|
|
+ * This entire notice must be reproduced on all copies of this file
|
|
+ * and copies of this file may only be made by a person if such person is
|
|
+ * permitted to do so under the terms of a subsisting license agreement
|
|
+ * from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * SPDX-License-Identifier: GPL-2.0-only
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU General Public License
|
|
+ * as published by the Free Software Foundation; either version 2
|
|
+ * of the License, or (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
+ *
|
|
+ */
|
|
+
|
|
+/****************************************************************************
|
|
+ * Includes
|
|
+ ****************************************************************************/
|
|
+
|
|
+#include <linux/debugfs.h>
|
|
+#include <linux/device.h>
|
|
+#include <linux/err.h>
|
|
+#include <linux/slab.h>
|
|
+#include <linux/videodev2.h>
|
|
+#include <media/v4l2-ctrls.h>
|
|
+#include <media/v4l2-device.h>
|
|
+#include <media/v4l2-event.h>
|
|
+#include <media/v4l2-ioctl.h>
|
|
+#include "mvx_bitops.h"
|
|
+#include "mvx_buffer.h"
|
|
+#include "mvx_ext_if.h"
|
|
+#include "mvx_firmware.h"
|
|
+#include "mvx_if.h"
|
|
+#include "mvx_mmu.h"
|
|
+#include "mvx_session.h"
|
|
+
|
|
+#include "mvx_v4l2_buffer.h"
|
|
+#include "mvx_v4l2_session.h"
|
|
+#include "mvx_v4l2_vidioc.h"
|
|
+#include "mvx_v4l2_fops.h"
|
|
+#include "mvx_log_group.h"
|
|
+
|
|
+static const struct v4l2_file_operations mvx_v4l2_fops = {
|
|
+ .owner = THIS_MODULE,
|
|
+ .open = mvx_v4l2_open,
|
|
+ .release = mvx_v4l2_release,
|
|
+ .poll = mvx_v4l2_poll,
|
|
+ .unlocked_ioctl = video_ioctl2,
|
|
+#ifdef CONFIG_COMPAT
|
|
+ .compat_ioctl32 = video_ioctl2, /* for mvx_v4l2_vidioc_default() */
|
|
+#endif
|
|
+ .mmap = mvx_v4l2_mmap
|
|
+};
|
|
+
|
|
+static const struct v4l2_ioctl_ops mvx_v4l2_ioctl_ops = {
|
|
+ .vidioc_querycap = mvx_v4l2_vidioc_querycap,
|
|
+ .vidioc_enum_fmt_vid_cap = mvx_v4l2_vidioc_enum_fmt_vid_cap,
|
|
+ .vidioc_enum_fmt_vid_out = mvx_v4l2_vidioc_enum_fmt_vid_out,
|
|
+ //.vidioc_enum_fmt_vid_cap_mplane = mvx_v4l2_vidioc_enum_fmt_vid_cap,
|
|
+ //.vidioc_enum_fmt_vid_out_mplane = mvx_v4l2_vidioc_enum_fmt_vid_out,
|
|
+ .vidioc_enum_framesizes = mvx_v4l2_vidioc_enum_framesizes,
|
|
+ .vidioc_g_fmt_vid_cap = mvx_v4l2_vidioc_g_fmt_vid_cap,
|
|
+ .vidioc_g_fmt_vid_cap_mplane = mvx_v4l2_vidioc_g_fmt_vid_cap,
|
|
+ .vidioc_g_fmt_vid_out = mvx_v4l2_vidioc_g_fmt_vid_out,
|
|
+ .vidioc_g_fmt_vid_out_mplane = mvx_v4l2_vidioc_g_fmt_vid_out,
|
|
+ .vidioc_s_fmt_vid_cap = mvx_v4l2_vidioc_s_fmt_vid_cap,
|
|
+ .vidioc_s_fmt_vid_cap_mplane = mvx_v4l2_vidioc_s_fmt_vid_cap,
|
|
+ .vidioc_s_fmt_vid_out = mvx_v4l2_vidioc_s_fmt_vid_out,
|
|
+ .vidioc_s_fmt_vid_out_mplane = mvx_v4l2_vidioc_s_fmt_vid_out,
|
|
+ .vidioc_try_fmt_vid_cap = mvx_v4l2_vidioc_try_fmt_vid_cap,
|
|
+ .vidioc_try_fmt_vid_cap_mplane = mvx_v4l2_vidioc_try_fmt_vid_cap,
|
|
+ .vidioc_try_fmt_vid_out = mvx_v4l2_vidioc_try_fmt_vid_out,
|
|
+ .vidioc_try_fmt_vid_out_mplane = mvx_v4l2_vidioc_try_fmt_vid_out,
|
|
+ .vidioc_g_selection = mvx_v4l2_vidioc_g_selection,
|
|
+ .vidioc_s_selection = mvx_v4l2_vidioc_s_selection,
|
|
+ .vidioc_streamon = mvx_v4l2_vidioc_streamon,
|
|
+ .vidioc_streamoff = mvx_v4l2_vidioc_streamoff,
|
|
+ .vidioc_encoder_cmd = mvx_v4l2_vidioc_encoder_cmd,
|
|
+ .vidioc_try_encoder_cmd = mvx_v4l2_vidioc_try_encoder_cmd,
|
|
+ .vidioc_decoder_cmd = mvx_v4l2_vidioc_decoder_cmd,
|
|
+ .vidioc_try_decoder_cmd = mvx_v4l2_vidioc_try_decoder_cmd,
|
|
+ .vidioc_reqbufs = mvx_v4l2_vidioc_reqbufs,
|
|
+ .vidioc_create_bufs = mvx_v4l2_vidioc_create_bufs,
|
|
+ .vidioc_querybuf = mvx_v4l2_vidioc_querybuf,
|
|
+ .vidioc_qbuf = mvx_v4l2_vidioc_qbuf,
|
|
+ .vidioc_dqbuf = mvx_v4l2_vidioc_dqbuf,
|
|
+ .vidioc_subscribe_event = mvx_v4l2_vidioc_subscribe_event,
|
|
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
|
|
+ .vidioc_default = mvx_v4l2_vidioc_default,
|
|
+ .vidioc_g_parm = mvx_v4l2_vidioc_g_parm,
|
|
+ .vidioc_s_parm = mvx_v4l2_vidioc_s_parm,
|
|
+};
|
|
+
|
|
+/****************************************************************************
|
|
+ * Exported functions and variables
|
|
+ ****************************************************************************/
|
|
+
|
|
+int mvx_ext_if_construct(struct mvx_ext_if *ext,
|
|
+ struct device *dev,
|
|
+ struct mvx_fw_cache *cache,
|
|
+ struct mvx_client_ops *client_ops,
|
|
+ struct dentry *parent)
|
|
+{
|
|
+ int ret;
|
|
+ const char name[] = "mvx";
|
|
+
|
|
+ ext->dev = dev;
|
|
+ ext->cache = cache;
|
|
+ ext->client_ops = client_ops;
|
|
+ mutex_init(&ext->lock);
|
|
+
|
|
+ if (IS_ENABLED(CONFIG_DEBUG_FS)) {
|
|
+ ext->dsessions = debugfs_create_dir("session", parent);
|
|
+ if (IS_ERR_OR_NULL(ext->dsessions))
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+
|
|
+ ret = v4l2_device_register(dev, &ext->v4l2_dev);
|
|
+ if (ret != 0) {
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_ERROR,
|
|
+ "Failed to register V4L2 device. ret=%d.", ret);
|
|
+ goto delete_dentry;
|
|
+ }
|
|
+
|
|
+ /* Video device. */
|
|
+ ext->vdev.fops = &mvx_v4l2_fops;
|
|
+ ext->vdev.ioctl_ops = &mvx_v4l2_ioctl_ops;
|
|
+ ext->vdev.release = video_device_release_empty;
|
|
+ ext->vdev.vfl_dir = VFL_DIR_M2M;
|
|
+ ext->vdev.v4l2_dev = &ext->v4l2_dev;
|
|
+ ext->vdev.device_caps = V4L2_CAP_VIDEO_M2M |
|
|
+ V4L2_CAP_VIDEO_M2M_MPLANE |
|
|
+ V4L2_CAP_EXT_PIX_FORMAT |
|
|
+ V4L2_CAP_STREAMING;
|
|
+ strncpy(ext->vdev.name, name, sizeof(ext->vdev.name));
|
|
+
|
|
+ video_set_drvdata(&ext->vdev, ext);
|
|
+
|
|
+ ret = video_register_device(&ext->vdev, VFL_TYPE_VIDEO, -1);
|
|
+ if (ret != 0) {
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_ERROR,
|
|
+ "Failed to register video device. ret=%d.",
|
|
+ ret);
|
|
+ goto unregister_device;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+
|
|
+unregister_device:
|
|
+ v4l2_device_unregister(&ext->v4l2_dev);
|
|
+
|
|
+delete_dentry:
|
|
+ if (IS_ENABLED(CONFIG_DEBUG_FS))
|
|
+ debugfs_remove_recursive(ext->dsessions);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+void mvx_ext_if_destruct(struct mvx_ext_if *ext)
|
|
+{
|
|
+ video_unregister_device(&ext->vdev);
|
|
+ v4l2_device_unregister(&ext->v4l2_dev);
|
|
+
|
|
+ if (IS_ENABLED(CONFIG_DEBUG_FS))
|
|
+ debugfs_remove_recursive(ext->dsessions);
|
|
+}
|
|
diff --git a/drivers/media/platform/spacemit/vpu_k1x/if/v4l2/mvx_v4l2_buffer.c b/drivers/media/platform/spacemit/vpu_k1x/if/v4l2/mvx_v4l2_buffer.c
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/vpu_k1x/if/v4l2/mvx_v4l2_buffer.c
|
|
@@ -0,0 +1,515 @@
|
|
+/*
|
|
+ * The confidential and proprietary information contained in this file may
|
|
+ * only be used by a person authorised under and to the extent permitted
|
|
+ * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd.
|
|
+ * ALL RIGHTS RESERVED
|
|
+ *
|
|
+ * This entire notice must be reproduced on all copies of this file
|
|
+ * and copies of this file may only be made by a person if such person is
|
|
+ * permitted to do so under the terms of a subsisting license agreement
|
|
+ * from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * SPDX-License-Identifier: GPL-2.0-only
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU General Public License
|
|
+ * as published by the Free Software Foundation; either version 2
|
|
+ * of the License, or (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
+ *
|
|
+ */
|
|
+
|
|
+#include <linux/debugfs.h>
|
|
+#include <linux/time.h>
|
|
+#include <linux/version.h>
|
|
+#include "mvx_ext_if.h"
|
|
+#include "mvx_log_group.h"
|
|
+#include "mvx_seq.h"
|
|
+#include "mvx_v4l2_buffer.h"
|
|
+#include "mvx-v4l2-controls.h"
|
|
+
|
|
+/****************************************************************************
|
|
+ * Static functions and variables
|
|
+ ****************************************************************************/
|
|
+
|
|
+static void v4l2_buffer_show(struct mvx_v4l2_buffer *buffer,
|
|
+ struct seq_file *s)
|
|
+{
|
|
+ struct vb2_v4l2_buffer *v4l2 = &buffer->vb2_v4l2_buffer;
|
|
+ struct vb2_buffer *vb2 = &v4l2->vb2_buf;
|
|
+ int is_multi = V4L2_TYPE_IS_MULTIPLANAR(vb2->type);
|
|
+ int i;
|
|
+ int ind = 0;
|
|
+
|
|
+ mvx_seq_printf(s, "mvx_v4l2_buffer", ind, "%p\n", buffer);
|
|
+
|
|
+ ind++;
|
|
+ mvx_seq_printf(s, "vb2", ind, "%p\n", vb2);
|
|
+
|
|
+ ind++;
|
|
+ mvx_seq_printf(s, "index", ind, "%u\n", vb2->index);
|
|
+ mvx_seq_printf(s, "type", ind, "%u (multi: %s)\n",
|
|
+ vb2->type, is_multi ? "yes" : "no");
|
|
+ mvx_seq_printf(s, "flags", ind, "0x%08x\n", v4l2->flags);
|
|
+ mvx_seq_printf(s, "field", ind, "%u\n", v4l2->field);
|
|
+
|
|
+#if KERNEL_VERSION(4, 5, 0) <= LINUX_VERSION_CODE
|
|
+ mvx_seq_printf(s, "timestamp", ind, "%llu\n", vb2->timestamp);
|
|
+#else
|
|
+ mvx_seq_printf(s, "timestamp", ind, "\n");
|
|
+ ind++;
|
|
+ mvx_seq_printf(s, "tv_sec", ind, "%lu\n", v4l2->vb2_buf.timestamp.tv_sec);
|
|
+ mvx_seq_printf(s, "tv_usec", ind, "%lu\n", v4l2->vb2_buf.timestamp.tv_usec);
|
|
+ ind--;
|
|
+#endif
|
|
+ mvx_seq_printf(s, "timecode", ind, "\n");
|
|
+ ind++;
|
|
+ mvx_seq_printf(s, "type", ind, "%u\n", v4l2->timecode.type);
|
|
+ mvx_seq_printf(s, "flags", ind, "%u\n", v4l2->timecode.flags);
|
|
+ mvx_seq_printf(s, "frames", ind, "%u\n", v4l2->timecode.frames);
|
|
+ mvx_seq_printf(s, "seconds", ind, "%u\n", v4l2->timecode.seconds);
|
|
+ mvx_seq_printf(s, "minutes", ind, "%u\n", v4l2->timecode.minutes);
|
|
+ mvx_seq_printf(s, "hours", ind, "%u\n", v4l2->timecode.hours);
|
|
+ ind--;
|
|
+
|
|
+ mvx_seq_printf(s, "sequence", ind, "%u\n", v4l2->sequence);
|
|
+ mvx_seq_printf(s, "memory", ind, "%u\n", vb2->memory);
|
|
+
|
|
+ mvx_seq_printf(s, "num_planes", ind, "%u\n", vb2->num_planes);
|
|
+
|
|
+ mvx_seq_printf(s, "planes", ind, "\n");
|
|
+ ind++;
|
|
+ for (i = 0; i < vb2->num_planes; ++i) {
|
|
+ char tag[10];
|
|
+ struct vb2_plane *plane = &vb2->planes[i];
|
|
+
|
|
+ scnprintf(tag, sizeof(tag), "#%d", i);
|
|
+ mvx_seq_printf(s, tag, ind,
|
|
+ "bytesused: %10u, length: %10u, m.offset: %10u, m.userptr: %10lu, m.fd: %10d, data_offset: %10u\n",
|
|
+ plane->bytesused,
|
|
+ plane->length,
|
|
+ plane->m.offset,
|
|
+ plane->m.userptr,
|
|
+ plane->m.fd,
|
|
+ plane->data_offset);
|
|
+ }
|
|
+
|
|
+ ind--;
|
|
+}
|
|
+
|
|
+static int buffer_stat_show(struct seq_file *s,
|
|
+ void *v)
|
|
+{
|
|
+ struct mvx_v4l2_buffer *vbuf = s->private;
|
|
+
|
|
+ v4l2_buffer_show(vbuf, s);
|
|
+ seq_puts(s, "\n");
|
|
+ mvx_buffer_show(&vbuf->buf, s);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int buffer_stat_open(struct inode *inode,
|
|
+ struct file *file)
|
|
+{
|
|
+ return single_open(file, buffer_stat_show, inode->i_private);
|
|
+}
|
|
+
|
|
+static const struct file_operations buffer_stat_fops = {
|
|
+ .open = buffer_stat_open,
|
|
+ .read = seq_read,
|
|
+ .llseek = seq_lseek,
|
|
+ .release = seq_release
|
|
+};
|
|
+
|
|
+static int buffer_debugfs_init(struct dentry *parent,
|
|
+ struct mvx_v4l2_buffer *vbuf)
|
|
+{
|
|
+ char name[20];
|
|
+ struct dentry *dentry;
|
|
+
|
|
+ scnprintf(name, sizeof(name), "buffer%u", to_vb2_buf(vbuf)->index);
|
|
+ vbuf->dentry = debugfs_create_dir(name, parent);
|
|
+ if (IS_ERR_OR_NULL(vbuf->dentry))
|
|
+ return -ENOMEM;
|
|
+
|
|
+ dentry = debugfs_create_file("stat", 0400, vbuf->dentry, vbuf,
|
|
+ &buffer_stat_fops);
|
|
+ if (IS_ERR_OR_NULL(dentry))
|
|
+ return -ENOMEM;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * get_bytesused() - Get total number of bytes used for Vb2 buffer.
|
|
+ */
|
|
+static size_t get_bytesused(struct vb2_buffer *b)
|
|
+{
|
|
+ size_t size;
|
|
+ uint32_t i;
|
|
+
|
|
+ for (i = 0, size = 0; i < b->num_planes; i++)
|
|
+ size += b->planes[i].bytesused;
|
|
+
|
|
+ return size;
|
|
+}
|
|
+
|
|
+static int clear_bytesused(struct vb2_buffer *b)
|
|
+{
|
|
+ uint32_t i;
|
|
+
|
|
+ for (i = 0; i < b->num_planes; i++)
|
|
+ b->planes[i].bytesused = 0;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/* Update mvx_buffer flags from vb2_buffer flags */
|
|
+static int update_mvx_flags(struct mvx_buffer *buf,
|
|
+ struct vb2_buffer *b)
|
|
+{
|
|
+ struct vb2_v4l2_buffer *vb2_v4l2 = to_vb2_v4l2_buffer(b);
|
|
+ __u32 flags = vb2_v4l2->flags;
|
|
+ __u32 rtt_flags = (buf->flags & MVX_BUFFER_FRAME_FLAG_ROTATION_MASK);
|
|
+ buf->flags = 0;
|
|
+
|
|
+ if (V4L2_TYPE_IS_OUTPUT(b->type) != false && get_bytesused(b) == 0)
|
|
+ flags |= V4L2_BUF_FLAG_LAST;
|
|
+
|
|
+ if (flags & V4L2_BUF_FLAG_LAST)
|
|
+ buf->flags |= MVX_BUFFER_EOS;
|
|
+
|
|
+ if ((flags & V4L2_BUF_FLAG_MVX_DISABLE_CACHE_MAINTENANCE) == V4L2_BUF_FLAG_MVX_DISABLE_CACHE_MAINTENANCE)
|
|
+ {
|
|
+ buf->flags |= MVX_BUFFER_FLAG_DISABLE_CACHE_MAINTENANCE;
|
|
+ }
|
|
+ if (mvx_is_afbc(buf->format)) {
|
|
+ if ((flags & V4L2_BUF_FLAG_MVX_AFBC_TILED_HEADERS) == V4L2_BUF_FLAG_MVX_AFBC_TILED_HEADERS)
|
|
+ buf->flags |= MVX_BUFFER_AFBC_TILED_HEADERS;
|
|
+
|
|
+ if ((flags & V4L2_BUF_FLAG_MVX_AFBC_TILED_BODY) == V4L2_BUF_FLAG_MVX_AFBC_TILED_BODY)
|
|
+ buf->flags |= MVX_BUFFER_AFBC_TILED_BODY;
|
|
+
|
|
+ if ((flags & V4L2_BUF_FLAG_MVX_AFBC_32X8_SUPERBLOCK) == V4L2_BUF_FLAG_MVX_AFBC_32X8_SUPERBLOCK)
|
|
+ buf->flags |= MVX_BUFFER_AFBC_32X8_SUPERBLOCK;
|
|
+
|
|
+ if ((flags & V4L2_BUF_FLAG_MVX_AFBC_BLOCK_SPLIT) == V4L2_BUF_FLAG_MVX_AFBC_BLOCK_SPLIT)
|
|
+ buf->flags |= MVX_BUFFER_AFBC_BLOCK_SPLIT;
|
|
+ } else if (mvx_is_bitstream(buf->format)) {
|
|
+ if (buf->dir == MVX_DIR_INPUT) {
|
|
+ //decode bitstream port
|
|
+ if ((flags & V4L2_BUF_FLAG_END_OF_SUB_FRAME) == V4L2_BUF_FLAG_END_OF_SUB_FRAME){
|
|
+ buf->flags |= MVX_BUFFER_END_OF_SUB_FRAME;
|
|
+ }
|
|
+ if (flags & V4L2_BUF_FLAG_KEYFRAME)
|
|
+ buf->flags |= MVX_BUFFER_EOF;
|
|
+ if ((flags & V4L2_BUF_FLAG_MVX_CODEC_CONFIG) == V4L2_BUF_FLAG_MVX_CODEC_CONFIG)
|
|
+ buf->flags |= MVX_BUFFER_CODEC_CONFIG;
|
|
+ }
|
|
+ } else if (mvx_is_frame(buf->format)) {
|
|
+ if (buf->dir == MVX_DIR_OUTPUT) {
|
|
+ //decode frame port
|
|
+ if (flags & V4L2_BUF_FRAME_FLAG_ROTATION_MASK) {
|
|
+ if ((flags & V4L2_BUF_FRAME_FLAG_ROTATION_MASK) == V4L2_BUF_FRAME_FLAG_ROTATION_90) {
|
|
+ buf->flags |= MVX_BUFFER_FRAME_FLAG_ROTATION_90;
|
|
+ } else if ((flags & V4L2_BUF_FRAME_FLAG_ROTATION_MASK) == V4L2_BUF_FRAME_FLAG_ROTATION_180) {
|
|
+ buf->flags |= MVX_BUFFER_FRAME_FLAG_ROTATION_180;
|
|
+ } else if ((flags & V4L2_BUF_FRAME_FLAG_ROTATION_MASK) == V4L2_BUF_FRAME_FLAG_ROTATION_270) {
|
|
+ buf->flags |= MVX_BUFFER_FRAME_FLAG_ROTATION_270;
|
|
+ }
|
|
+ }
|
|
+ if (flags & V4L2_BUF_FRAME_FLAG_SCALING_MASK) {
|
|
+ if ((flags & V4L2_BUF_FRAME_FLAG_SCALING_MASK) == V4L2_BUF_FRAME_FLAG_SCALING_2) {
|
|
+ buf->flags |= MVX_BUFFER_FRAME_FLAG_SCALING_2;
|
|
+ } else if ((flags & V4L2_BUF_FRAME_FLAG_SCALING_MASK) == V4L2_BUF_FRAME_FLAG_SCALING_4) {
|
|
+ buf->flags |= MVX_BUFFER_FRAME_FLAG_SCALING_4;
|
|
+ }
|
|
+ }
|
|
+ } else if (buf->dir == MVX_DIR_INPUT) {
|
|
+ //encode frame port
|
|
+ if (flags & V4L2_BUF_FRAME_FLAG_MIRROR_MASK) {
|
|
+ if ((flags & V4L2_BUF_FRAME_FLAG_MIRROR_MASK) == V4L2_BUF_FRAME_FLAG_MIRROR_HORI) {
|
|
+ buf->flags |= MVX_BUFFER_FRAME_FLAG_MIRROR_HORI;
|
|
+ } else if ((flags & V4L2_BUF_FRAME_FLAG_MIRROR_MASK) == V4L2_BUF_FRAME_FLAG_MIRROR_VERT) {
|
|
+ buf->flags |= MVX_BUFFER_FRAME_FLAG_MIRROR_VERT;
|
|
+ }
|
|
+ }
|
|
+ buf->flags |= rtt_flags;
|
|
+ if ((flags & V4L2_BUF_FLAG_MVX_BUFFER_EPR) == V4L2_BUF_FLAG_MVX_BUFFER_EPR) {
|
|
+ buf->flags |= MVX_BUFFER_FRAME_FLAG_GENERAL;
|
|
+ }
|
|
+ if ((flags & V4L2_BUF_FLAG_MVX_BUFFER_ROI) == V4L2_BUF_FLAG_MVX_BUFFER_ROI) {
|
|
+ buf->flags |= MVX_BUFFER_FRAME_FLAG_ROI;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ } else {
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING,
|
|
+ "unrecognized buffer format!.");
|
|
+
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/* Update mvx_buffer from mvx_v4l2_buffer */
|
|
+static int update_mvx_buffer(struct mvx_v4l2_buffer *vbuf)
|
|
+{
|
|
+ struct vb2_buffer *vb2 = to_vb2_buf(vbuf);
|
|
+ struct mvx_buffer *mvx_buf = &vbuf->buf;
|
|
+ int i;
|
|
+ int ret;
|
|
+
|
|
+ if (vb2->num_planes != mvx_buf->nplanes) {
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING,
|
|
+ "VB2 and MVX buffers have different number of planes. vb2_planes=%u, mvx_planes=%u.",
|
|
+ vb2->num_planes, mvx_buf->nplanes);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+#if KERNEL_VERSION(4, 5, 0) <= LINUX_VERSION_CODE
|
|
+ mvx_buf->user_data = vb2->timestamp;
|
|
+#else
|
|
+ {
|
|
+ struct timeval *ts = &vbuf->vb2_v4l2_buffer.vb2_buf.timestamp;
|
|
+
|
|
+ mvx_buf->user_data = ((uint64_t)ts->tv_sec << 32) |
|
|
+ (ts->tv_usec & 0xffffffff);
|
|
+ }
|
|
+#endif
|
|
+
|
|
+ for (i = 0; i < vb2->num_planes; i++) {
|
|
+ unsigned int offset = vb2->planes[i].data_offset;
|
|
+ unsigned int filled = vb2->planes[i].bytesused;
|
|
+ if (V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE == vb2->type &&
|
|
+ vb2->memory == V4L2_MEMORY_DMABUF && offset > 0)
|
|
+ {
|
|
+ // TODO(crbug.com/901264): The way to pass an offset within a DMA-buf is not defined
|
|
+ // in V4L2 specification, so we abuse data_offset for now. Fix it when we have the
|
|
+ // right interface, including any necessary validation and potential alignment.
|
|
+ //workaround: for MPLANAR, if using vb2->planes[i].data_offset, bytesused should be
|
|
+ // larger than data_offset, otherwise __verify_length() will fail in vb2_qbuf()
|
|
+ // so 'bytesused - data_offset' is extact filled data size.
|
|
+ filled = (vb2->planes[i].bytesused - offset);
|
|
+ }
|
|
+ else if (V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE == vb2->type &&
|
|
+ vb2->memory == V4L2_MEMORY_DMABUF && i > 0)
|
|
+ {
|
|
+ //'data_offset' is not passed in v4l2-core for buffer type OUTPUT in "vb2_fill_vb2_v4l2_buffer"
|
|
+ //'data_offset' is always 0, so reset plane offset in hard code.
|
|
+ int k;
|
|
+ offset = 0;
|
|
+ for(k=i;k>0;k--)
|
|
+ offset += vb2->planes[i-k].length;
|
|
+ }
|
|
+ /*
|
|
+ * For single planar mmap buffers the offset is carried by
|
|
+ * the lower part of the offset.
|
|
+ */
|
|
+ if (vb2->memory == V4L2_MEMORY_MMAP)
|
|
+ offset += vb2->planes[i].m.offset & ~PAGE_MASK;
|
|
+
|
|
+ ret = mvx_buffer_filled_set(mvx_buf, i,
|
|
+ filled, offset);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ ret = update_mvx_flags(mvx_buf, to_vb2_buf(vbuf));
|
|
+ if (ret != 0)
|
|
+ return 0;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int update_v4l2_bytesused(struct mvx_v4l2_buffer *vbuf)
|
|
+{
|
|
+ struct vb2_buffer *b = to_vb2_buf(vbuf);
|
|
+ struct mvx_buffer *buf = &vbuf->buf;
|
|
+ int i;
|
|
+
|
|
+ if (b->num_planes != buf->nplanes) {
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING,
|
|
+ "VB2 and MVX buffers have different number of planes. vb2_planes=%u, mvx_planes=%u.",
|
|
+ b->num_planes, buf->nplanes);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * MVX filled is the number of bytes excluding the offset. The total
|
|
+ * length is calculated as 'filled + offset' and should be <= length.
|
|
+ *
|
|
+ * V4L2 bytesused is the total length including the offset.
|
|
+ * bytesused should be <= length and bytesused >= offset.
|
|
+ */
|
|
+ for (i = 0; i < b->num_planes; i++) {
|
|
+ b->planes[i].bytesused =
|
|
+ buf->planes[i].filled + buf->planes[i].offset;
|
|
+ b->planes[i].data_offset = buf->planes[i].offset;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int update_vb2_flags(struct mvx_v4l2_buffer *vbuf)
|
|
+{
|
|
+ struct vb2_v4l2_buffer *b = &vbuf->vb2_v4l2_buffer;
|
|
+ struct mvx_buffer *buf = &vbuf->buf;
|
|
+
|
|
+ b->flags &= ~(V4L2_BUF_FLAG_ERROR |
|
|
+ V4L2_BUF_FLAG_KEYFRAME |
|
|
+ V4L2_BUF_FLAG_LAST);
|
|
+
|
|
+ if (buf->flags & MVX_BUFFER_EOS)
|
|
+ b->flags |= V4L2_BUF_FLAG_LAST;
|
|
+
|
|
+ if ((buf->flags & MVX_BUFFER_EOF) && (buf->flags & MVX_BUFFER_FRAME_FLAG_IFRAME))
|
|
+ b->flags |= V4L2_BUF_FLAG_KEYFRAME;
|
|
+
|
|
+ if (buf->flags & MVX_BUFFER_CORRUPT)
|
|
+ b->flags |= V4L2_BUF_FLAG_ERROR;
|
|
+
|
|
+ if (buf->flags & MVX_BUFFER_REJECTED)
|
|
+ clear_bytesused(&b->vb2_buf);
|
|
+
|
|
+ if (buf->flags & MVX_BUFFER_DECODE_ONLY)
|
|
+ b->flags |= V4L2_BUF_FLAG_MVX_DECODE_ONLY;
|
|
+
|
|
+ if (buf->flags & MVX_BUFFER_CODEC_CONFIG)
|
|
+ b->flags |= V4L2_BUF_FLAG_MVX_CODEC_CONFIG;
|
|
+
|
|
+ if (buf->flags & MVX_BUFFER_AFBC_TILED_HEADERS)
|
|
+ b->flags |= V4L2_BUF_FLAG_MVX_AFBC_TILED_HEADERS;
|
|
+
|
|
+ if (buf->flags & MVX_BUFFER_AFBC_TILED_BODY)
|
|
+ b->flags |= V4L2_BUF_FLAG_MVX_AFBC_TILED_BODY;
|
|
+
|
|
+ if (buf->flags & MVX_BUFFER_AFBC_32X8_SUPERBLOCK)
|
|
+ b->flags |= V4L2_BUF_FLAG_MVX_AFBC_32X8_SUPERBLOCK;
|
|
+
|
|
+ if (buf->flags & MVX_BUFFER_FRAME_PRESENT)
|
|
+ b->flags |= V4L2_BUF_FLAG_MVX_BUFFER_FRAME_PRESENT;
|
|
+
|
|
+ if (buf->flags & MVX_BUFFER_FRAME_NEED_REALLOC)
|
|
+ b->flags |= V4L2_BUF_FLAG_MVX_BUFFER_NEED_REALLOC;
|
|
+
|
|
+ if ((buf->flags & MVX_BUFFER_EOF) && (buf->flags & MVX_BUFFER_FRAME_FLAG_PFRAME))
|
|
+ b->flags |= V4L2_BUF_FLAG_PFRAME;
|
|
+
|
|
+ if ((buf->flags & MVX_BUFFER_EOF) && (buf->flags & MVX_BUFFER_FRAME_FLAG_BFRAME))
|
|
+ b->flags |= V4L2_BUF_FLAG_BFRAME;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/****************************************************************************
|
|
+ * Exported functions and variables
|
|
+ ****************************************************************************/
|
|
+
|
|
+int mvx_v4l2_buffer_construct(struct mvx_v4l2_buffer *vbuf,
|
|
+ struct mvx_v4l2_session *vsession,
|
|
+ enum mvx_direction dir,
|
|
+ unsigned int nplanes,
|
|
+ struct sg_table **sgt)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ ret = mvx_buffer_construct(&vbuf->buf, vsession->ext->dev,
|
|
+ &vsession->session.mmu, dir,
|
|
+ nplanes, sgt);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+
|
|
+ if (IS_ENABLED(CONFIG_DEBUG_FS)) {
|
|
+ struct mvx_v4l2_port *vport = &vsession->port[dir];
|
|
+
|
|
+ ret = buffer_debugfs_init(vport->dentry, vbuf);
|
|
+ if (ret != 0) {
|
|
+ MVX_SESSION_WARN(&vsession->session,
|
|
+ "Failed to create buffer debugfs entry.");
|
|
+ goto destruct_buffer;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+
|
|
+destruct_buffer:
|
|
+ mvx_buffer_destruct(&vbuf->buf);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+void mvx_v4l2_buffer_destruct(struct mvx_v4l2_buffer *vbuf)
|
|
+{
|
|
+ mvx_buffer_destruct(&vbuf->buf);
|
|
+
|
|
+ if (IS_ENABLED(CONFIG_DEBUG_FS))
|
|
+ debugfs_remove_recursive(vbuf->dentry);
|
|
+}
|
|
+
|
|
+struct mvx_v4l2_buffer *mvx_buffer_to_v4l2_buffer(struct mvx_buffer *buffer)
|
|
+{
|
|
+ return container_of(buffer, struct mvx_v4l2_buffer, buf);
|
|
+}
|
|
+
|
|
+/* Update mvx_v4l2_buffer from vb2_buffer */
|
|
+int mvx_v4l2_buffer_set(struct mvx_v4l2_buffer *vbuf,
|
|
+ struct vb2_buffer *b)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ ret = update_mvx_buffer(vbuf);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+enum vb2_buffer_state mvx_v4l2_buffer_update(struct mvx_v4l2_buffer *vbuf)
|
|
+{
|
|
+ struct vb2_buffer *vb2 = to_vb2_buf(vbuf);
|
|
+ struct mvx_buffer *mvx_buf = &vbuf->buf;
|
|
+ int ret;
|
|
+
|
|
+ if (V4L2_TYPE_IS_OUTPUT(vb2->type) != false)
|
|
+ ret = clear_bytesused(vb2);
|
|
+ else
|
|
+ ret = update_v4l2_bytesused(vbuf);
|
|
+
|
|
+ if (ret != 0)
|
|
+ goto error;
|
|
+
|
|
+ ret = update_vb2_flags(vbuf);
|
|
+
|
|
+
|
|
+#if KERNEL_VERSION(4, 5, 0) <= LINUX_VERSION_CODE
|
|
+ vb2->timestamp = mvx_buf->user_data;
|
|
+#else
|
|
+ {
|
|
+ struct timeval *ts = &vbuf->vb2_v4l2_buffer.timestamp;
|
|
+
|
|
+ ts->tv_sec = mvx_buf->user_data >> 32;
|
|
+ ts->tv_usec = mvx_buf->user_data & 0xffffffff;
|
|
+ }
|
|
+#endif
|
|
+ if (ret != 0 ||(vbuf->vb2_v4l2_buffer.flags & V4L2_BUF_FLAG_ERROR) != 0) {
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_INFO,"buf flag is V4L2_BUF_FLAG_ERROR user_data:%llx dir:%d",mvx_buf->user_data,mvx_buf->dir);
|
|
+ goto error;
|
|
+ }
|
|
+
|
|
+ return VB2_BUF_STATE_DONE;
|
|
+
|
|
+error:
|
|
+ return VB2_BUF_STATE_ERROR;
|
|
+}
|
|
diff --git a/drivers/media/platform/spacemit/vpu_k1x/if/v4l2/mvx_v4l2_buffer.h b/drivers/media/platform/spacemit/vpu_k1x/if/v4l2/mvx_v4l2_buffer.h
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/vpu_k1x/if/v4l2/mvx_v4l2_buffer.h
|
|
@@ -0,0 +1,167 @@
|
|
+/*
|
|
+ * The confidential and proprietary information contained in this file may
|
|
+ * only be used by a person authorised under and to the extent permitted
|
|
+ * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd.
|
|
+ * ALL RIGHTS RESERVED
|
|
+ *
|
|
+ * This entire notice must be reproduced on all copies of this file
|
|
+ * and copies of this file may only be made by a person if such person is
|
|
+ * permitted to do so under the terms of a subsisting license agreement
|
|
+ * from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * SPDX-License-Identifier: GPL-2.0-only
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU General Public License
|
|
+ * as published by the Free Software Foundation; either version 2
|
|
+ * of the License, or (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
+ *
|
|
+ */
|
|
+
|
|
+#ifndef _MVX_V4L2_BUFFER_H_
|
|
+#define _MVX_V4L2_BUFFER_H_
|
|
+
|
|
+/****************************************************************************
|
|
+ * Includes
|
|
+ ****************************************************************************/
|
|
+
|
|
+#include <linux/types.h>
|
|
+#include <media/videobuf2-v4l2.h>
|
|
+#include "mvx_buffer.h"
|
|
+#include "mvx_if.h"
|
|
+#include "mvx_v4l2_session.h"
|
|
+
|
|
+/****************************************************************************
|
|
+ * Types
|
|
+ ****************************************************************************/
|
|
+
|
|
+#define vb2_v4l2_to_mvx_v4l2_buffer(v4l2) \
|
|
+ container_of(v4l2, struct mvx_v4l2_buffer, vb2_v4l2_buffer)
|
|
+
|
|
+#define vb2_to_mvx_v4l2_buffer(vb2) \
|
|
+ vb2_v4l2_to_mvx_v4l2_buffer(to_vb2_v4l2_buffer(vb2))
|
|
+
|
|
+#define to_vb2_buf(vbuf) (&((vbuf)->vb2_v4l2_buffer.vb2_buf))
|
|
+
|
|
+/**
|
|
+ * struct mvx_v4l2_buffer - MVX V4L2 buffer.
|
|
+ * @vb2_v4l2_buffer: VB2 V4L2 buffer.
|
|
+ * @buf: MVX buffer.
|
|
+ * @dentry: Debug file system entry.
|
|
+ */
|
|
+struct mvx_v4l2_buffer {
|
|
+ struct vb2_v4l2_buffer vb2_v4l2_buffer;
|
|
+ struct mvx_buffer buf;
|
|
+ struct dentry *dentry;
|
|
+};
|
|
+
|
|
+/****************************************************************************
|
|
+ * Exported functions
|
|
+ ****************************************************************************/
|
|
+
|
|
+/**
|
|
+ * mvx_v4l2_buffer_construct() - Construct MVX V4L2 buffer object.
|
|
+ * @vbuf: Pointer to MVX V4L2 buffer.
|
|
+ * @vsession: Pointer to V4L2 session.
|
|
+ * @dir: Direction of the buffer.
|
|
+ * @nplanes: Number of planes.
|
|
+ * @sgt: Array of pointers to scatter-gatter lists. Each SG list
|
|
+ * contains memory pages for a corresponding plane.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+int mvx_v4l2_buffer_construct(struct mvx_v4l2_buffer *vbuf,
|
|
+ struct mvx_v4l2_session *vsession,
|
|
+ enum mvx_direction dir,
|
|
+ unsigned int nplanes,
|
|
+ struct sg_table **sgt);
|
|
+
|
|
+/**
|
|
+ * mvx_v4l2_buffer_destruct() - Destruct v4l2 buffer object.
|
|
+ * @vbuf: Pointer to MVX V4L2 buffer.
|
|
+ */
|
|
+void mvx_v4l2_buffer_destruct(struct mvx_v4l2_buffer *vbuf);
|
|
+
|
|
+/**
|
|
+ * mvx_buffer_to_v4l2_buffer() - Cast mvx_buffer to mvx_v4l2_buffer.
|
|
+ * @buf: Pointer MVX buffer.
|
|
+ *
|
|
+ * This function casts a pointer to struct mvx_buffer to a pointer to
|
|
+ * a corresponding struct mvx_v4l2_buffer.
|
|
+ *
|
|
+ * Return: Pointer to corresponding mvx_v4l2_buffer object.
|
|
+ */
|
|
+struct mvx_v4l2_buffer *mvx_buffer_to_v4l2_buffer(struct mvx_buffer *buf);
|
|
+
|
|
+/**
|
|
+ * mvx_v4l2_buffer_set_status() - Set status for a buffer.
|
|
+ * @vbuf: Pointer to MVX V4L2 buffer.
|
|
+ * @status: Status to set.
|
|
+ *
|
|
+ * Status is a combination of the following flags:
|
|
+ * V4L2_BUF_FLAG_QUEUED,
|
|
+ * V4L2_BUF_FLAG_DONE,
|
|
+ * V4L2_BUF_FLAG_PREPARED,
|
|
+ * V4L2_BUF_FLAG_ERROR
|
|
+ */
|
|
+void mvx_v4l2_buffer_set_status(struct mvx_v4l2_buffer *vbuf,
|
|
+ uint32_t status);
|
|
+
|
|
+/**
|
|
+ * mvx_v4l2_buffer_get_status() - Get the buffer status.
|
|
+ * @vbuf: Pointer to MVX V4L2 buffer.
|
|
+ *
|
|
+ * Return: Buffer status.
|
|
+ */
|
|
+uint32_t mvx_v4l2_buffer_get_status(struct mvx_v4l2_buffer *vbuf);
|
|
+
|
|
+/**
|
|
+ * mvx_v4l2_buffer_set() - Copy Vb2 buffer to VBUF.
|
|
+ * @vbuf: Destination MVX V4L2 buffer.
|
|
+ * @b: Source Vb2 buffer.
|
|
+ *
|
|
+ * Copies and validates paramters from 'b' to 'vbuf'.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+int mvx_v4l2_buffer_set(struct mvx_v4l2_buffer *vbuf,
|
|
+ struct vb2_buffer *b);
|
|
+
|
|
+/**
|
|
+ * mvx_v4l2_buffer_get() - Copy VBUF to V4L2 buffer.
|
|
+ * @vbuf: Source MVX V4L2 buffer.
|
|
+ * @b: Destination V4L2 buffer.
|
|
+ *
|
|
+ * Copies parameters from 'vbuf' to 'b'.
|
|
+ */
|
|
+void mvx_v4l2_buffer_get(struct mvx_v4l2_buffer *vbuf,
|
|
+ struct v4l2_buffer *b);
|
|
+
|
|
+/**
|
|
+ * mvx_v4l2_buffer_update() - Update the V4L2 buffer.
|
|
+ * @vbuf: Pointer to MVX V4L2 buffer.
|
|
+ *
|
|
+ * This function copies parameters from the MVX buffer to the V4L2 buffer.
|
|
+ * It also sets the time stamp and validates that the buffer length is correct.
|
|
+ * If an error is detectd the buffer length is cleared and the error flag
|
|
+ * is set.
|
|
+ *
|
|
+ * This function should be called after the MVX buffer has changed, for example
|
|
+ * after it has been returned by the firmware or flushed.
|
|
+ *
|
|
+ * Return: VB2_BUF_STATE_DONE on success, else VB2_BUF_STATE_ERROR.
|
|
+ */
|
|
+enum vb2_buffer_state mvx_v4l2_buffer_update(struct mvx_v4l2_buffer *vbuf);
|
|
+
|
|
+#endif /* _MVX_V4L2_BUFFER_H_ */
|
|
diff --git a/drivers/media/platform/spacemit/vpu_k1x/if/v4l2/mvx_v4l2_ctrls.c b/drivers/media/platform/spacemit/vpu_k1x/if/v4l2/mvx_v4l2_ctrls.c
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/vpu_k1x/if/v4l2/mvx_v4l2_ctrls.c
|
|
@@ -0,0 +1,1446 @@
|
|
+/*
|
|
+ * The confidential and proprietary information contained in this file may
|
|
+ * only be used by a person authorised under and to the extent permitted
|
|
+ * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd.
|
|
+ * ALL RIGHTS RESERVED
|
|
+ *
|
|
+ * This entire notice must be reproduced on all copies of this file
|
|
+ * and copies of this file may only be made by a person if such person is
|
|
+ * permitted to do so under the terms of a subsisting license agreement
|
|
+ * from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * SPDX-License-Identifier: GPL-2.0-only
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU General Public License
|
|
+ * as published by the Free Software Foundation; either version 2
|
|
+ * of the License, or (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
+ *
|
|
+ */
|
|
+
|
|
+/****************************************************************************
|
|
+ * Includes
|
|
+ ****************************************************************************/
|
|
+
|
|
+#include "mvx_bitops.h"
|
|
+#include "mvx_v4l2_ctrls.h"
|
|
+#include "mvx_v4l2_session.h"
|
|
+#include "mvx-v4l2-controls.h"
|
|
+
|
|
+/****************************************************************************
|
|
+ * Static functions and variables
|
|
+ ****************************************************************************/
|
|
+
|
|
+/*
|
|
+ * V4L2_CID_MVE_VIDEO_NALU_FORMAT control defines.
|
|
+ */
|
|
+static const char *const nalu_format_str[] = {
|
|
+ "Start codes",
|
|
+ "One nalu per buffer",
|
|
+ "One byte length field",
|
|
+ "Two byte length field",
|
|
+ "Four byte length field"
|
|
+};
|
|
+
|
|
+static const enum mvx_nalu_format mvx_nalu_format_list[] = {
|
|
+ MVX_NALU_FORMAT_START_CODES,
|
|
+ MVX_NALU_FORMAT_ONE_NALU_PER_BUFFER,
|
|
+ MVX_NALU_FORMAT_ONE_BYTE_LENGTH_FIELD,
|
|
+ MVX_NALU_FORMAT_TWO_BYTE_LENGTH_FIELD,
|
|
+ MVX_NALU_FORMAT_FOUR_BYTE_LENGTH_FIELD
|
|
+};
|
|
+
|
|
+/*
|
|
+ * V4L2_CID_MVE_VIDEO_H265_PROFILE control defines.
|
|
+ */
|
|
+static const char *const h265_profile_str[] = {
|
|
+ "Main",
|
|
+ "Main still",
|
|
+ "Main intra"
|
|
+};
|
|
+
|
|
+static const int mvx_h265_profile_list[] = {
|
|
+ MVX_PROFILE_H265_MAIN,
|
|
+ MVX_PROFILE_H265_MAIN_STILL,
|
|
+ MVX_PROFILE_H265_MAIN_INTRA
|
|
+};
|
|
+
|
|
+/*
|
|
+ * V4L2_CID_MVE_VIDEO_VC1_PROFILE control defines.
|
|
+ */
|
|
+static const char *const vc1_profile_str[] = {
|
|
+ "Simple",
|
|
+ "Main",
|
|
+ "Advanced"
|
|
+};
|
|
+
|
|
+static const int mvx_vc1_profile_list[] = {
|
|
+ MVX_PROFILE_VC1_SIMPLE,
|
|
+ MVX_PROFILE_VC1_MAIN,
|
|
+ MVX_PROFILE_VC1_ADVANCED
|
|
+};
|
|
+
|
|
+/*
|
|
+ * V4L2_CID_MPEG_VIDEO_H264_PROFILE control defines.
|
|
+ */
|
|
+static const uint8_t h264_profile_list[] = {
|
|
+ V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE,
|
|
+ V4L2_MPEG_VIDEO_H264_PROFILE_MAIN,
|
|
+ V4L2_MPEG_VIDEO_H264_PROFILE_HIGH,
|
|
+ V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_10
|
|
+};
|
|
+static const uint8_t hevc_profile_list[] = {
|
|
+ V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN,
|
|
+ //V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_STILL_PICTURE,
|
|
+ //V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10
|
|
+};
|
|
+
|
|
+static const enum mvx_profile mvx_h264_profile_list[] = {
|
|
+ MVX_PROFILE_H264_BASELINE,
|
|
+ MVX_PROFILE_H264_MAIN,
|
|
+ MVX_PROFILE_H264_HIGH,
|
|
+ MVX_PROFILE_H264_HIGH_10
|
|
+};
|
|
+
|
|
+/*
|
|
+ * V4L2_CID_MPEG_VIDEO_H264_LEVEL control defines.
|
|
+ */
|
|
+static uint8_t h264_level_list[] = {
|
|
+ V4L2_MPEG_VIDEO_H264_LEVEL_1_0,
|
|
+ V4L2_MPEG_VIDEO_H264_LEVEL_1B,
|
|
+ V4L2_MPEG_VIDEO_H264_LEVEL_1_1,
|
|
+ V4L2_MPEG_VIDEO_H264_LEVEL_1_2,
|
|
+ V4L2_MPEG_VIDEO_H264_LEVEL_1_3,
|
|
+ V4L2_MPEG_VIDEO_H264_LEVEL_2_0,
|
|
+ V4L2_MPEG_VIDEO_H264_LEVEL_2_1,
|
|
+ V4L2_MPEG_VIDEO_H264_LEVEL_2_2,
|
|
+ V4L2_MPEG_VIDEO_H264_LEVEL_3_0,
|
|
+ V4L2_MPEG_VIDEO_H264_LEVEL_3_1,
|
|
+ V4L2_MPEG_VIDEO_H264_LEVEL_3_2,
|
|
+ V4L2_MPEG_VIDEO_H264_LEVEL_4_0,
|
|
+ V4L2_MPEG_VIDEO_H264_LEVEL_4_1,
|
|
+ V4L2_MPEG_VIDEO_H264_LEVEL_4_2,
|
|
+ V4L2_MPEG_VIDEO_H264_LEVEL_5_0,
|
|
+ V4L2_MPEG_VIDEO_H264_LEVEL_5_1
|
|
+};
|
|
+static uint8_t hevc_level_list[] = {
|
|
+ V4L2_MPEG_VIDEO_HEVC_LEVEL_1,
|
|
+ V4L2_MPEG_VIDEO_HEVC_LEVEL_2,
|
|
+ V4L2_MPEG_VIDEO_HEVC_LEVEL_2_1,
|
|
+ V4L2_MPEG_VIDEO_HEVC_LEVEL_3,
|
|
+ V4L2_MPEG_VIDEO_HEVC_LEVEL_3_1,
|
|
+ V4L2_MPEG_VIDEO_HEVC_LEVEL_4,
|
|
+ V4L2_MPEG_VIDEO_HEVC_LEVEL_4_1,
|
|
+ V4L2_MPEG_VIDEO_HEVC_LEVEL_5,
|
|
+ V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1,
|
|
+ V4L2_MPEG_VIDEO_HEVC_LEVEL_5_2,
|
|
+ V4L2_MPEG_VIDEO_HEVC_LEVEL_6,
|
|
+ V4L2_MPEG_VIDEO_HEVC_LEVEL_6_1,
|
|
+ V4L2_MPEG_VIDEO_HEVC_LEVEL_6_2,
|
|
+};
|
|
+
|
|
+static const int mvx_h264_level_list[] = {
|
|
+ MVX_LEVEL_H264_1,
|
|
+ MVX_LEVEL_H264_1b,
|
|
+ MVX_LEVEL_H264_11,
|
|
+ MVX_LEVEL_H264_12,
|
|
+ MVX_LEVEL_H264_13,
|
|
+ MVX_LEVEL_H264_2,
|
|
+ MVX_LEVEL_H264_21,
|
|
+ MVX_LEVEL_H264_22,
|
|
+ MVX_LEVEL_H264_3,
|
|
+ MVX_LEVEL_H264_31,
|
|
+ MVX_LEVEL_H264_32,
|
|
+ MVX_LEVEL_H264_4,
|
|
+ MVX_LEVEL_H264_41,
|
|
+ MVX_LEVEL_H264_42,
|
|
+ MVX_LEVEL_H264_5,
|
|
+ MVX_LEVEL_H264_51
|
|
+};
|
|
+
|
|
+/*
|
|
+ * V4L2_CID_MVE_VIDEO_H265_LEVEL control defines.
|
|
+ */
|
|
+static const char *const h265_level_str[] = {
|
|
+ "Main 1",
|
|
+ "High 1",
|
|
+
|
|
+ "Main 2",
|
|
+ "High 2",
|
|
+ "Main 2.1",
|
|
+ "High 2.1",
|
|
+
|
|
+ "Main 3",
|
|
+ "High 3",
|
|
+ "Main 3.1",
|
|
+ "High 3.1",
|
|
+
|
|
+ "Main 4",
|
|
+ "High 4",
|
|
+ "Main 4.1",
|
|
+ "High 4.1",
|
|
+
|
|
+ "Main 5",
|
|
+ "High 5",
|
|
+ "Main 5.1",
|
|
+ "High 5.1",
|
|
+ "Main 5.2",
|
|
+ "High 5.2",
|
|
+
|
|
+ "Main 6",
|
|
+ "High 6",
|
|
+ "Main 6.1",
|
|
+ "High 6.1",
|
|
+ "Main 6.2",
|
|
+ "High 6.2"
|
|
+};
|
|
+
|
|
+static const int mvx_h265_level_list[] = {
|
|
+ MVX_LEVEL_H265_MAIN_1,
|
|
+ MVX_LEVEL_H265_HIGH_1,
|
|
+
|
|
+ MVX_LEVEL_H265_MAIN_2,
|
|
+ MVX_LEVEL_H265_HIGH_2,
|
|
+ MVX_LEVEL_H265_MAIN_21,
|
|
+ MVX_LEVEL_H265_HIGH_21,
|
|
+
|
|
+ MVX_LEVEL_H265_MAIN_3,
|
|
+ MVX_LEVEL_H265_HIGH_3,
|
|
+ MVX_LEVEL_H265_MAIN_31,
|
|
+ MVX_LEVEL_H265_HIGH_31,
|
|
+
|
|
+ MVX_LEVEL_H265_MAIN_4,
|
|
+ MVX_LEVEL_H265_HIGH_4,
|
|
+ MVX_LEVEL_H265_MAIN_41,
|
|
+ MVX_LEVEL_H265_HIGH_41,
|
|
+
|
|
+ MVX_LEVEL_H265_MAIN_5,
|
|
+ MVX_LEVEL_H265_HIGH_5,
|
|
+ MVX_LEVEL_H265_MAIN_51,
|
|
+ MVX_LEVEL_H265_HIGH_51,
|
|
+ MVX_LEVEL_H265_MAIN_52,
|
|
+ MVX_LEVEL_H265_HIGH_52,
|
|
+
|
|
+ MVX_LEVEL_H265_MAIN_6,
|
|
+ MVX_LEVEL_H265_HIGH_6,
|
|
+ MVX_LEVEL_H265_MAIN_61,
|
|
+ MVX_LEVEL_H265_HIGH_61,
|
|
+ MVX_LEVEL_H265_MAIN_62,
|
|
+ MVX_LEVEL_H265_HIGH_62
|
|
+};
|
|
+
|
|
+/*
|
|
+ * V4L2_CID_MVE_VIDEO_GOP_TYPE control defines.
|
|
+ */
|
|
+static const char *const gop_type_str[] = {
|
|
+ "None",
|
|
+ "Bidirectional",
|
|
+ "Low delay",
|
|
+ "Pyramid"
|
|
+};
|
|
+
|
|
+static const enum mvx_gop_type mvx_gop_type_list[] = {
|
|
+ MVX_GOP_TYPE_NONE,
|
|
+ MVX_GOP_TYPE_BIDIRECTIONAL,
|
|
+ MVX_GOP_TYPE_LOW_DELAY,
|
|
+ MVX_GOP_TYPE_PYRAMID
|
|
+};
|
|
+
|
|
+/*
|
|
+ * V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE control defines.
|
|
+ */
|
|
+#define V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_NONE 2
|
|
+
|
|
+static const uint8_t h264_entropy_mode_list[] = {
|
|
+ V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC,
|
|
+ V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC,
|
|
+ V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_NONE
|
|
+};
|
|
+
|
|
+static const enum mvx_entropy_mode mvx_h264_entropy_mode_list[] = {
|
|
+ MVX_ENTROPY_MODE_CAVLC,
|
|
+ MVX_ENTROPY_MODE_CABAC,
|
|
+ MVX_ENTROPY_MODE_NONE
|
|
+};
|
|
+
|
|
+static const char *const h264_entropy_mode_str[] = {
|
|
+ "CAVLC",
|
|
+ "CABAC",
|
|
+ "None"
|
|
+};
|
|
+
|
|
+/*
|
|
+ * V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE controls list.
|
|
+ */
|
|
+static uint8_t multi_slice_mode_list[] = {
|
|
+ V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE,
|
|
+
|
|
+ /* Misspelling in the header file */
|
|
+ V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_MB
|
|
+};
|
|
+
|
|
+static const enum mvx_multi_slice_mode mvx_multi_slice_mode_list[] = {
|
|
+ MVX_MULTI_SLICE_MODE_SINGLE,
|
|
+ MVX_MULTI_SLICE_MODE_MAX_MB
|
|
+};
|
|
+
|
|
+/*
|
|
+ * V4L2_CID_MVE_VIDEO_VP9_PROB_UPDATE control defines.
|
|
+ */
|
|
+static const char *const vp9_prob_update_str[] = {
|
|
+ "Disabled",
|
|
+ "Implicit",
|
|
+ "Explicit"
|
|
+};
|
|
+
|
|
+static const enum mvx_vp9_prob_update mvx_vp9_prob_update_list[] = {
|
|
+ MVX_VP9_PROB_UPDATE_DISABLED,
|
|
+ MVX_VP9_PROB_UPDATE_IMPLICIT,
|
|
+ MVX_VP9_PROB_UPDATE_EXPLICIT
|
|
+};
|
|
+
|
|
+/*
|
|
+ * V4L2_CID_MVE_VIDEO_RGB_TO_YUV_MODE control defines.
|
|
+ */
|
|
+static const char *const rgb_to_yuv_mode_str[] = {
|
|
+ "BT601 studio",
|
|
+ "BT601 full",
|
|
+ "BT709 studio",
|
|
+ "BT709 full"
|
|
+};
|
|
+
|
|
+static const enum mvx_rgb_to_yuv_mode mvx_rgb_to_yuv_mode_list[] = {
|
|
+ MVX_RGB_TO_YUV_MODE_BT601_STUDIO,
|
|
+ MVX_RGB_TO_YUV_MODE_BT601_FULL,
|
|
+ MVX_RGB_TO_YUV_MODE_BT709_STUDIO,
|
|
+ MVX_RGB_TO_YUV_MODE_BT709_FULL
|
|
+};
|
|
+
|
|
+/**
|
|
+ * find_idx() - Find index of a value in an array.
|
|
+ * @list: Pointer to an array.
|
|
+ * @size: Size of an array.
|
|
+ * @val: Value to look for.
|
|
+ *
|
|
+ * Return: Index of the first occurrence of 'val' in 'list',
|
|
+ * or -EINVAL when not found.
|
|
+ */
|
|
+static int find_idx(const uint8_t *list,
|
|
+ size_t size,
|
|
+ uint8_t val)
|
|
+{
|
|
+ while (size--)
|
|
+ if (list[size] == val)
|
|
+ return size;
|
|
+
|
|
+ return -EINVAL;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * set_ctrl() - Callback used by V4L2 framework to set a control.
|
|
+ * @ctrl: V4L2 control.
|
|
+ *
|
|
+ * Return: 0 on success, error code otherwise.
|
|
+ */
|
|
+static int set_ctrl(struct v4l2_ctrl *ctrl)
|
|
+{
|
|
+ int ret = 0;
|
|
+ struct mvx_v4l2_session *vsession =
|
|
+ container_of(ctrl->handler, struct mvx_v4l2_session,
|
|
+ v4l2_ctrl);
|
|
+ struct mvx_session *session = &vsession->session;
|
|
+ enum mvx_nalu_format nalu_fmt;
|
|
+ enum mvx_profile mvx_profile;
|
|
+ enum mvx_level mvx_level;
|
|
+ enum mvx_gop_type gop_type;
|
|
+ enum mvx_entropy_mode entropy_mode;
|
|
+ enum mvx_multi_slice_mode multi_slice_mode;
|
|
+ enum mvx_vp9_prob_update vp9_prob_update;
|
|
+ enum mvx_rgb_to_yuv_mode rgb_to_yuv_mode;
|
|
+ int32_t i32_val;
|
|
+ int64_t i64_val;
|
|
+ bool bool_val;
|
|
+ enum mvx_tristate tri_val;
|
|
+ struct mvx_buffer_param_rate_control rate_control_param;
|
|
+ ret = mutex_lock_interruptible(&vsession->mutex);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+
|
|
+ switch (ctrl->id) {
|
|
+ case V4L2_CID_MVE_VIDEO_SECURE_VIDEO:
|
|
+ bool_val = *ctrl->p_new.p_s32 != 0;
|
|
+ ret = mvx_session_set_securevideo(session, bool_val);
|
|
+ break;
|
|
+ case V4L2_CID_MVE_VIDEO_FRAME_RATE:
|
|
+ i64_val = (ctrl->type == V4L2_CTRL_TYPE_INTEGER)?*ctrl->p_new.p_s32:*ctrl->p_new.p_s64;
|
|
+ ret = mvx_session_set_frame_rate(session, i64_val);
|
|
+ break;
|
|
+ case V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE:
|
|
+ bool_val = *ctrl->p_new.p_s32 != 0;
|
|
+ ret = mvx_session_set_rate_control(session, bool_val);
|
|
+ break;
|
|
+ case V4L2_CID_MPEG_VIDEO_BITRATE:
|
|
+ i32_val = *ctrl->p_new.p_s32;
|
|
+ ret = mvx_session_set_bitrate(session, i32_val);
|
|
+ break;
|
|
+ case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
|
|
+ {
|
|
+ i32_val = *ctrl->p_new.p_s32;
|
|
+ rate_control_param.rate_control_mode = session->rc_type;
|
|
+ rate_control_param.target_bitrate = session->target_bitrate;
|
|
+ rate_control_param.maximum_bitrate = session->maximum_bitrate;
|
|
+ if (i32_val==V4L2_MPEG_VIDEO_BITRATE_MODE_VBR)
|
|
+ {
|
|
+ rate_control_param.rate_control_mode = MVX_OPT_RATE_CONTROL_MODE_VARIABLE;
|
|
+ }
|
|
+ else if (i32_val==V4L2_MPEG_VIDEO_BITRATE_MODE_CBR)
|
|
+ {
|
|
+ rate_control_param.rate_control_mode = MVX_OPT_RATE_CONTROL_MODE_CONSTANT;
|
|
+ }
|
|
+ else if (i32_val == V4L2_MPEG_VIDEO_BITRATE_MODE_CQ)
|
|
+ {
|
|
+ rate_control_param.rate_control_mode = MVX_OPT_RATE_CONTROL_MODE_OFF;
|
|
+ }
|
|
+ ret = mvx_session_set_bitrate_control(session, &rate_control_param);
|
|
+ break;
|
|
+ }
|
|
+ case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK:
|
|
+ {
|
|
+ rate_control_param.rate_control_mode = session->rc_type;
|
|
+ rate_control_param.target_bitrate = session->target_bitrate;
|
|
+ rate_control_param.maximum_bitrate = *ctrl->p_new.p_s32;
|
|
+ ret = mvx_session_set_bitrate_control(session, &rate_control_param);
|
|
+ break;
|
|
+ }
|
|
+ case V4L2_CID_MVE_VIDEO_CROP_LEFT:
|
|
+ i32_val = *ctrl->p_new.p_s32;
|
|
+ ret = mvx_session_set_crop_left(session, i32_val);
|
|
+ break;
|
|
+ case V4L2_CID_MVE_VIDEO_CROP_RIGHT:
|
|
+ i32_val = *ctrl->p_new.p_s32;
|
|
+ ret = mvx_session_set_crop_right(session, i32_val);
|
|
+ break;
|
|
+ case V4L2_CID_MVE_VIDEO_CROP_TOP:
|
|
+ i32_val = *ctrl->p_new.p_s32;
|
|
+ ret = mvx_session_set_crop_top(session, i32_val);
|
|
+ break;
|
|
+ case V4L2_CID_MVE_VIDEO_CROP_BOTTOM:
|
|
+ i32_val = *ctrl->p_new.p_s32;
|
|
+ ret = mvx_session_set_crop_bottom(session, i32_val);
|
|
+ break;
|
|
+ case V4L2_CID_MVE_VIDEO_HRD_BUFFER_SIZE:
|
|
+ i32_val = *ctrl->p_new.p_s32;
|
|
+ ret = mvx_session_set_hrd_buffer_size(session, i32_val);
|
|
+ break;
|
|
+ case V4L2_CID_MVE_VIDEO_NALU_FORMAT:
|
|
+ i32_val = *ctrl->p_new.p_s32;
|
|
+ nalu_fmt = mvx_nalu_format_list[i32_val];
|
|
+ ret = mvx_session_set_nalu_format(session, nalu_fmt);
|
|
+ break;
|
|
+ case V4L2_CID_MVE_VIDEO_STREAM_ESCAPING:
|
|
+ tri_val = *ctrl->p_new.p_s32;
|
|
+ ret = mvx_session_set_stream_escaping(session, tri_val);
|
|
+ break;
|
|
+ case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
|
|
+ i32_val = *ctrl->p_new.p_s32;
|
|
+ ret = find_idx(h264_profile_list,
|
|
+ ARRAY_SIZE(h264_profile_list), i32_val);
|
|
+ if (ret == -EINVAL)
|
|
+ goto unlock_mutex;
|
|
+
|
|
+ mvx_profile = mvx_h264_profile_list[ret];
|
|
+ ret = mvx_session_set_profile(session,
|
|
+ MVX_FORMAT_H264,
|
|
+ mvx_profile);
|
|
+ break;
|
|
+ case V4L2_CID_MPEG_VIDEO_HEVC_PROFILE:
|
|
+ i32_val = *ctrl->p_new.p_s32;
|
|
+ ret = find_idx(hevc_profile_list,
|
|
+ ARRAY_SIZE(hevc_profile_list), i32_val);
|
|
+ if (ret == -EINVAL)
|
|
+ goto unlock_mutex;
|
|
+
|
|
+ mvx_profile = mvx_h265_profile_list[ret];
|
|
+ ret = mvx_session_set_profile(session,
|
|
+ MVX_FORMAT_HEVC,
|
|
+ mvx_profile);
|
|
+ break;
|
|
+ case V4L2_CID_MVE_VIDEO_H265_PROFILE:
|
|
+ i32_val = *ctrl->p_new.p_s32;
|
|
+ mvx_profile = mvx_h265_profile_list[i32_val];
|
|
+ ret = mvx_session_set_profile(session,
|
|
+ MVX_FORMAT_HEVC,
|
|
+ mvx_profile);
|
|
+ break;
|
|
+ case V4L2_CID_MVE_VIDEO_VC1_PROFILE:
|
|
+ i32_val = *ctrl->p_new.p_s32;
|
|
+ mvx_profile = mvx_vc1_profile_list[i32_val];
|
|
+ ret = mvx_session_set_profile(session,
|
|
+ MVX_FORMAT_VC1,
|
|
+ mvx_profile);
|
|
+ break;
|
|
+ case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
|
|
+ i32_val = *ctrl->p_new.p_s32;
|
|
+ ret = find_idx(h264_level_list,
|
|
+ ARRAY_SIZE(h264_level_list), i32_val);
|
|
+ if (ret == -EINVAL)
|
|
+ goto unlock_mutex;
|
|
+
|
|
+ mvx_level = mvx_h264_level_list[ret];
|
|
+ ret = mvx_session_set_level(session,
|
|
+ MVX_FORMAT_H264,
|
|
+ mvx_level);
|
|
+ break;
|
|
+ case V4L2_CID_MPEG_VIDEO_HEVC_LEVEL:
|
|
+ i32_val = *ctrl->p_new.p_s32;
|
|
+ ret = find_idx(hevc_level_list,
|
|
+ ARRAY_SIZE(hevc_level_list), i32_val);
|
|
+ if (ret == -EINVAL)
|
|
+ goto unlock_mutex;
|
|
+
|
|
+ mvx_level = mvx_h265_level_list[ret*2];
|
|
+ ret = mvx_session_set_level(session,
|
|
+ MVX_FORMAT_HEVC,
|
|
+ mvx_level);
|
|
+ break;
|
|
+ case V4L2_CID_MVE_VIDEO_H265_LEVEL:
|
|
+ i32_val = *ctrl->p_new.p_s32;
|
|
+ mvx_level = mvx_h265_level_list[i32_val];
|
|
+ ret = mvx_session_set_level(session,
|
|
+ MVX_FORMAT_HEVC,
|
|
+ mvx_level);
|
|
+ break;
|
|
+ case V4L2_CID_MVE_VIDEO_IGNORE_STREAM_HEADERS:
|
|
+ tri_val = *ctrl->p_new.p_s32;
|
|
+ ret = mvx_session_set_ignore_stream_headers(session, tri_val);
|
|
+ break;
|
|
+ case V4L2_CID_MVE_VIDEO_FRAME_REORDERING:
|
|
+ tri_val = *ctrl->p_new.p_s32;
|
|
+ ret = mvx_session_set_frame_reordering(session, tri_val);
|
|
+ break;
|
|
+ case V4L2_CID_MVE_VIDEO_INTBUF_SIZE:
|
|
+ i32_val = *ctrl->p_new.p_s32;
|
|
+ ret = mvx_session_set_intbuf_size(session, i32_val);
|
|
+ break;
|
|
+ case V4L2_CID_MVE_VIDEO_P_FRAMES:
|
|
+ i32_val = *ctrl->p_new.p_s32;
|
|
+ ret = mvx_session_set_p_frames(session, i32_val);
|
|
+ break;
|
|
+ case V4L2_CID_MPEG_VIDEO_B_FRAMES:
|
|
+ i32_val = *ctrl->p_new.p_s32;
|
|
+ ret = mvx_session_set_b_frames(session, i32_val);
|
|
+ break;
|
|
+ case V4L2_CID_MVE_VIDEO_GOP_TYPE:
|
|
+ i32_val = *ctrl->p_new.p_s32;
|
|
+ gop_type = mvx_gop_type_list[i32_val];
|
|
+ ret = mvx_session_set_gop_type(session, gop_type);
|
|
+ break;
|
|
+ case V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB:
|
|
+ i32_val = *ctrl->p_new.p_s32;
|
|
+ ret = mvx_session_set_cyclic_intra_refresh_mb(session,
|
|
+ i32_val);
|
|
+ break;
|
|
+ case V4L2_CID_MVE_VIDEO_CONSTR_IPRED:
|
|
+ tri_val = *ctrl->p_new.p_s32;
|
|
+ ret = mvx_session_set_constr_ipred(session, tri_val);
|
|
+ break;
|
|
+ case V4L2_CID_MVE_VIDEO_ENTROPY_SYNC:
|
|
+ tri_val = *ctrl->p_new.p_s32;
|
|
+ ret = mvx_session_set_entropy_sync(session, tri_val);
|
|
+ break;
|
|
+ case V4L2_CID_MVE_VIDEO_TEMPORAL_MVP:
|
|
+ tri_val = *ctrl->p_new.p_s32;
|
|
+ ret = mvx_session_set_temporal_mvp(session, tri_val);
|
|
+ break;
|
|
+ case V4L2_CID_MVE_VIDEO_TILE_ROWS:
|
|
+ i32_val = *ctrl->p_new.p_s32;
|
|
+ ret = mvx_session_set_tile_rows(session, i32_val);
|
|
+ break;
|
|
+ case V4L2_CID_MVE_VIDEO_TILE_COLS:
|
|
+ i32_val = *ctrl->p_new.p_s32;
|
|
+ ret = mvx_session_set_tile_cols(session, i32_val);
|
|
+ break;
|
|
+ case V4L2_CID_MVE_VIDEO_MIN_LUMA_CB_SIZE:
|
|
+ i32_val = *ctrl->p_new.p_s32;
|
|
+ ret = mvx_session_set_min_luma_cb_size(session, i32_val);
|
|
+ break;
|
|
+ case V4L2_CID_MVE_VIDEO_MB_MASK:
|
|
+ i32_val = *ctrl->p_new.p_s32;
|
|
+ ret = mvx_session_set_mb_mask(session, i32_val);
|
|
+ break;
|
|
+ case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE:
|
|
+ i32_val = *ctrl->p_new.p_s32;
|
|
+ ret = find_idx(h264_entropy_mode_list,
|
|
+ ARRAY_SIZE(h264_entropy_mode_list), i32_val);
|
|
+ if (ret == -EINVAL)
|
|
+ goto unlock_mutex;
|
|
+
|
|
+ entropy_mode = mvx_h264_entropy_mode_list[ret];
|
|
+ ret = mvx_session_set_entropy_mode(session, entropy_mode);
|
|
+ break;
|
|
+ case V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME:
|
|
+ mvx_session_set_force_idr(session);
|
|
+ break;
|
|
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE:
|
|
+ i32_val = *ctrl->p_new.p_s32;
|
|
+ ret = find_idx(multi_slice_mode_list,
|
|
+ ARRAY_SIZE(multi_slice_mode_list), i32_val);
|
|
+ if (ret == -EINVAL)
|
|
+ goto unlock_mutex;
|
|
+
|
|
+ multi_slice_mode = mvx_multi_slice_mode_list[ret];
|
|
+ ret = mvx_session_set_multi_slice_mode(session,
|
|
+ multi_slice_mode);
|
|
+ break;
|
|
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB:
|
|
+ i32_val = *ctrl->p_new.p_s32;
|
|
+ ret = mvx_session_set_multi_slice_max_mb(session, i32_val);
|
|
+ break;
|
|
+ case V4L2_CID_MVE_VIDEO_VP9_PROB_UPDATE:
|
|
+ i32_val = *ctrl->p_new.p_s32;
|
|
+ vp9_prob_update = mvx_vp9_prob_update_list[i32_val];
|
|
+ ret = mvx_session_set_vp9_prob_update(session,
|
|
+ vp9_prob_update);
|
|
+ break;
|
|
+ case V4L2_CID_MPEG_VIDEO_MV_H_SEARCH_RANGE:
|
|
+ i32_val = *ctrl->p_new.p_s32;
|
|
+ ret = mvx_session_set_mv_h_search_range(session, i32_val);
|
|
+ break;
|
|
+ case V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE:
|
|
+ i32_val = *ctrl->p_new.p_s32;
|
|
+ ret = mvx_session_set_mv_v_search_range(session, i32_val);
|
|
+ break;
|
|
+ case V4L2_CID_MVE_VIDEO_BITDEPTH_CHROMA:
|
|
+ i32_val = *ctrl->p_new.p_s32;
|
|
+ ret = mvx_session_set_bitdepth_chroma(session, i32_val);
|
|
+ break;
|
|
+ case V4L2_CID_MVE_VIDEO_BITDEPTH_LUMA:
|
|
+ i32_val = *ctrl->p_new.p_s32;
|
|
+ ret = mvx_session_set_bitdepth_luma(session, i32_val);
|
|
+ break;
|
|
+ case V4L2_CID_MVE_VIDEO_FORCE_CHROMA_FORMAT:
|
|
+ i32_val = *ctrl->p_new.p_s32;
|
|
+ ret = mvx_session_set_force_chroma_format(session, i32_val);
|
|
+ break;
|
|
+ case V4L2_CID_MVE_VIDEO_RGB_TO_YUV_MODE:
|
|
+ i32_val = *ctrl->p_new.p_s32;
|
|
+ rgb_to_yuv_mode = mvx_rgb_to_yuv_mode_list[i32_val];
|
|
+ ret = mvx_session_set_rgb_to_yuv_mode(session,
|
|
+ rgb_to_yuv_mode);
|
|
+ break;
|
|
+ case V4L2_CID_MVE_VIDEO_BANDWIDTH_LIMIT:
|
|
+ i32_val = *ctrl->p_new.p_s32;
|
|
+ ret = mvx_session_set_band_limit(session, i32_val);
|
|
+ break;
|
|
+ case V4L2_CID_MVE_VIDEO_CABAC_INIT_IDC:
|
|
+ i32_val = *ctrl->p_new.p_s32;
|
|
+ ret = mvx_session_set_cabac_init_idc(session, i32_val);
|
|
+ break;
|
|
+ case V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP:
|
|
+ i32_val = *ctrl->p_new.p_s32;
|
|
+ ret = mvx_session_set_i_frame_qp(session, MVX_FORMAT_H263,
|
|
+ i32_val);
|
|
+ break;
|
|
+ case V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP:
|
|
+ i32_val = *ctrl->p_new.p_s32;
|
|
+ ret = mvx_session_set_p_frame_qp(session, MVX_FORMAT_H263,
|
|
+ i32_val);
|
|
+ break;
|
|
+ case V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP:
|
|
+ i32_val = *ctrl->p_new.p_s32;
|
|
+ ret = mvx_session_set_b_frame_qp(session, MVX_FORMAT_H263,
|
|
+ i32_val);
|
|
+ break;
|
|
+ case V4L2_CID_MPEG_VIDEO_H263_MIN_QP:
|
|
+ i32_val = *ctrl->p_new.p_s32;
|
|
+ ret = mvx_session_set_min_qp(session, MVX_FORMAT_H263,
|
|
+ i32_val);
|
|
+ break;
|
|
+ case V4L2_CID_MPEG_VIDEO_H263_MAX_QP:
|
|
+ i32_val = *ctrl->p_new.p_s32;
|
|
+ ret = mvx_session_set_max_qp(session, MVX_FORMAT_H263,
|
|
+ i32_val);
|
|
+ break;
|
|
+ case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP:
|
|
+ i32_val = *ctrl->p_new.p_s32;
|
|
+ ret = mvx_session_set_i_frame_qp(session, MVX_FORMAT_H264,
|
|
+ i32_val);
|
|
+ break;
|
|
+ case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP:
|
|
+ i32_val = *ctrl->p_new.p_s32;
|
|
+ ret = mvx_session_set_p_frame_qp(session, MVX_FORMAT_H264,
|
|
+ i32_val);
|
|
+ break;
|
|
+ case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP:
|
|
+ i32_val = *ctrl->p_new.p_s32;
|
|
+ ret = mvx_session_set_b_frame_qp(session, MVX_FORMAT_H264,
|
|
+ i32_val);
|
|
+ break;
|
|
+ case V4L2_CID_MPEG_VIDEO_H264_MIN_QP:
|
|
+ i32_val = *ctrl->p_new.p_s32;
|
|
+ ret = mvx_session_set_min_qp(session, MVX_FORMAT_H264,
|
|
+ i32_val);
|
|
+ break;
|
|
+ case V4L2_CID_MPEG_VIDEO_H264_MAX_QP:
|
|
+ i32_val = *ctrl->p_new.p_s32;
|
|
+ ret = mvx_session_set_max_qp(session, MVX_FORMAT_H264,
|
|
+ i32_val);
|
|
+ break;
|
|
+ case V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP:
|
|
+ i32_val = *ctrl->p_new.p_s32;
|
|
+ ret = mvx_session_set_i_frame_qp(session, MVX_FORMAT_HEVC,
|
|
+ i32_val);
|
|
+ break;
|
|
+ case V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_QP:
|
|
+ i32_val = *ctrl->p_new.p_s32;
|
|
+ ret = mvx_session_set_p_frame_qp(session, MVX_FORMAT_HEVC,
|
|
+ i32_val);
|
|
+ break;
|
|
+ case V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_QP:
|
|
+ i32_val = *ctrl->p_new.p_s32;
|
|
+ ret = mvx_session_set_b_frame_qp(session, MVX_FORMAT_HEVC,
|
|
+ i32_val);
|
|
+ break;
|
|
+ case V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP:
|
|
+ i32_val = *ctrl->p_new.p_s32;
|
|
+ ret = mvx_session_set_min_qp(session, MVX_FORMAT_HEVC,
|
|
+ i32_val);
|
|
+ break;
|
|
+ case V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP:
|
|
+ i32_val = *ctrl->p_new.p_s32;
|
|
+ ret = mvx_session_set_max_qp(session, MVX_FORMAT_HEVC,
|
|
+ i32_val);
|
|
+ break;
|
|
+ case V4L2_CID_MPEG_VIDEO_VPX_I_FRAME_QP:
|
|
+ i32_val = *ctrl->p_new.p_s32;
|
|
+ ret = mvx_session_set_i_frame_qp(session, MVX_FORMAT_VP9,
|
|
+ i32_val);
|
|
+ break;
|
|
+ case V4L2_CID_MPEG_VIDEO_VPX_P_FRAME_QP:
|
|
+ i32_val = *ctrl->p_new.p_s32;
|
|
+ ret = mvx_session_set_p_frame_qp(session, MVX_FORMAT_VP9,
|
|
+ i32_val);
|
|
+ break;
|
|
+ case V4L2_CID_MVE_VIDEO_VPX_B_FRAME_QP:
|
|
+ i32_val = *ctrl->p_new.p_s32;
|
|
+ ret = mvx_session_set_b_frame_qp(session, MVX_FORMAT_VP9,
|
|
+ i32_val);
|
|
+ break;
|
|
+ case V4L2_CID_MPEG_VIDEO_VPX_MIN_QP:
|
|
+ i32_val = *ctrl->p_new.p_s32;
|
|
+ ret = mvx_session_set_min_qp(session, MVX_FORMAT_VP9,
|
|
+ i32_val);
|
|
+ break;
|
|
+ case V4L2_CID_MPEG_VIDEO_VPX_MAX_QP:
|
|
+ i32_val = *ctrl->p_new.p_s32;
|
|
+ ret = mvx_session_set_max_qp(session, MVX_FORMAT_VP9,
|
|
+ i32_val);
|
|
+ break;
|
|
+ case V4L2_CID_JPEG_RESTART_INTERVAL:
|
|
+ i32_val = *ctrl->p_new.p_s32;
|
|
+ if (i32_val != -1)
|
|
+ ret = mvx_session_set_resync_interval(session, i32_val);
|
|
+
|
|
+ break;
|
|
+ case V4L2_CID_JPEG_COMPRESSION_QUALITY:
|
|
+ i32_val = *ctrl->p_new.p_s32;
|
|
+ if (i32_val != 0)
|
|
+ ret = mvx_session_set_jpeg_quality(session, i32_val);
|
|
+
|
|
+ break;
|
|
+ case V4L2_CID_MVE_VIDEO_WATCHDOG_TIMEOUT:
|
|
+ i32_val = *ctrl->p_new.p_s32;
|
|
+ if (i32_val != 0)
|
|
+ ret = mvx_session_set_watchdog_timeout(session, i32_val);
|
|
+
|
|
+ break;
|
|
+ case V4L2_CID_MVE_VIDEO_PROFILING:
|
|
+ i32_val = *ctrl->p_new.p_s32;
|
|
+ if (i32_val != 0)
|
|
+ ret = mvx_session_set_profiling(session, i32_val);
|
|
+
|
|
+ break;
|
|
+ }
|
|
+
|
|
+unlock_mutex:
|
|
+ mutex_unlock(&vsession->mutex);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * get_volatile_ctrl() - Get control value.
|
|
+ * @ctrl: V4L2 control.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+static int get_volatile_ctrl(struct v4l2_ctrl *ctrl)
|
|
+{
|
|
+ struct mvx_v4l2_session *vsession =
|
|
+ container_of(ctrl->handler, struct mvx_v4l2_session,
|
|
+ v4l2_ctrl);
|
|
+
|
|
+ switch (ctrl->id) {
|
|
+ case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT:
|
|
+ ctrl->val = vsession->session.port[MVX_DIR_INPUT].buffer_min;
|
|
+ break;
|
|
+ case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
|
|
+ ctrl->val = vsession->session.port[MVX_DIR_OUTPUT].buffer_min;
|
|
+ break;
|
|
+ default:
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING,
|
|
+ "Unsupported get control. id=%u.",
|
|
+ ctrl->id);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Callbacks required by V4L2 framework to implement controls support.
|
|
+ */
|
|
+static const struct v4l2_ctrl_ops ctrl_ops = {
|
|
+ .g_volatile_ctrl = get_volatile_ctrl,
|
|
+ .s_ctrl = set_ctrl
|
|
+};
|
|
+
|
|
+/**
|
|
+ * get_skip_mask() - Calculate V4L2 menu skip mask.
|
|
+ * @list: Array of menu items.
|
|
+ * @cnt: Number of menu items.
|
|
+ *
|
|
+ * Return: V4L2 menu skip mask.
|
|
+ */
|
|
+static uint64_t get_skip_mask(const uint8_t *list,
|
|
+ size_t cnt)
|
|
+{
|
|
+ uint64_t mask = 0;
|
|
+ int i;
|
|
+
|
|
+ for (i = 0; i < cnt; ++i)
|
|
+ mvx_set_bit(list[i], &mask);
|
|
+
|
|
+ return ~mask;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * mvx_v4l2_ctrl_new_custom_int() - Create custom V4L2 integer control.
|
|
+ * @hnd: V4L2 handler.
|
|
+ * @id: Id of a control.
|
|
+ * @name: Name of a control.
|
|
+ * @min: Minimum allowed value.
|
|
+ * @max: Maximum allowed value.
|
|
+ * @def: Default value.
|
|
+ * @step: Step.
|
|
+ *
|
|
+ * Return: Pointer to v4l2_ctrl structure in case of success,
|
|
+ * or NULL in case of failure.
|
|
+ */
|
|
+static struct v4l2_ctrl *mvx_v4l2_ctrl_new_custom_int(
|
|
+ struct v4l2_ctrl_handler *hnd,
|
|
+ int id,
|
|
+ const char *name,
|
|
+ int64_t min,
|
|
+ int64_t max,
|
|
+ int64_t def,
|
|
+ int32_t step)
|
|
+{
|
|
+ struct v4l2_ctrl_config cfg;
|
|
+
|
|
+ memset(&cfg, 0, sizeof(cfg));
|
|
+
|
|
+ cfg.id = id;
|
|
+ cfg.ops = &ctrl_ops;
|
|
+ cfg.type = V4L2_CTRL_TYPE_INTEGER;
|
|
+ cfg.name = name;
|
|
+ cfg.min = min;
|
|
+ cfg.max = max;
|
|
+ cfg.def = def;
|
|
+ cfg.step = step;
|
|
+
|
|
+ return v4l2_ctrl_new_custom(hnd, &cfg, NULL);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * mvx_v4l2_ctrl_new_custom_tristate() - Create custom V4L2 tristate control.
|
|
+ * @hnd: V4L2 handler.
|
|
+ * @id: Id of a control.
|
|
+ * @name: Name of a control.
|
|
+ * @def: Default value.
|
|
+ *
|
|
+ * Return: Pointer to v4l2_ctrl structure in case of success,
|
|
+ * or NULL in case of failure.
|
|
+ */
|
|
+static struct v4l2_ctrl *mvx_v4l2_ctrl_new_custom_tristate(
|
|
+ struct v4l2_ctrl_handler *hnd,
|
|
+ int id,
|
|
+ const char *name,
|
|
+ enum mvx_tristate def)
|
|
+{
|
|
+ struct v4l2_ctrl_config cfg;
|
|
+
|
|
+ memset(&cfg, 0, sizeof(cfg));
|
|
+
|
|
+ cfg.id = id;
|
|
+ cfg.ops = &ctrl_ops;
|
|
+ cfg.type = V4L2_CTRL_TYPE_INTEGER;
|
|
+ cfg.name = name;
|
|
+ cfg.min = -1;
|
|
+ cfg.max = 1;
|
|
+ cfg.def = def;
|
|
+ cfg.step = 1;
|
|
+
|
|
+ return v4l2_ctrl_new_custom(hnd, &cfg, NULL);
|
|
+}
|
|
+
|
|
+/****************************************************************************
|
|
+ * Exported functions
|
|
+ ****************************************************************************/
|
|
+
|
|
+int mvx_v4l2_ctrls_init(struct v4l2_ctrl_handler *hnd)
|
|
+{
|
|
+ int ret;
|
|
+ struct v4l2_ctrl_config cfg;
|
|
+ struct v4l2_ctrl *ctrl;
|
|
+
|
|
+ ret = v4l2_ctrl_handler_init(hnd, 128);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+
|
|
+ ctrl = mvx_v4l2_ctrl_new_custom_int(
|
|
+ hnd, V4L2_CID_MVE_VIDEO_SECURE_VIDEO,
|
|
+ "secure video", 0, 1, 0, 1);
|
|
+ if (ctrl == NULL)
|
|
+ goto handler_free;
|
|
+
|
|
+ ctrl = mvx_v4l2_ctrl_new_custom_int(
|
|
+ hnd, V4L2_CID_MVE_VIDEO_FRAME_RATE,
|
|
+ "frame rate", 0, 0x10000000, 30 << 16, 1);
|
|
+ if (ctrl == NULL)
|
|
+ goto handler_free;
|
|
+
|
|
+ ctrl = mvx_v4l2_ctrl_new_custom_int(
|
|
+ hnd, V4L2_CID_MVE_VIDEO_CROP_LEFT,
|
|
+ "bitrate control mode", 0, 10000000, 0, 1);
|
|
+ if (ctrl == NULL)
|
|
+ goto handler_free;
|
|
+
|
|
+ ctrl = mvx_v4l2_ctrl_new_custom_int(
|
|
+ hnd, V4L2_CID_MVE_VIDEO_CROP_RIGHT,
|
|
+ "bitrate control mode", 0, 10000000, 0, 1);
|
|
+ if (ctrl == NULL)
|
|
+ goto handler_free;
|
|
+
|
|
+ ctrl = mvx_v4l2_ctrl_new_custom_int(
|
|
+ hnd, V4L2_CID_MVE_VIDEO_CROP_TOP,
|
|
+ "bitrate control mode", 0, 10000000, 0, 1);
|
|
+ if (ctrl == NULL)
|
|
+ goto handler_free;
|
|
+
|
|
+ ctrl = mvx_v4l2_ctrl_new_custom_int(
|
|
+ hnd, V4L2_CID_MVE_VIDEO_CROP_BOTTOM,
|
|
+ "bitrate control mode", 0, 10000000, 0, 1);
|
|
+ if (ctrl == NULL)
|
|
+ goto handler_free;
|
|
+
|
|
+ ctrl = mvx_v4l2_ctrl_new_custom_int(
|
|
+ hnd, V4L2_CID_MVE_VIDEO_HRD_BUFFER_SIZE,
|
|
+ "HRD buffer size", 0, 1073741823, 0, 1);
|
|
+ if (ctrl == NULL)
|
|
+ goto handler_free;
|
|
+
|
|
+ ctrl = v4l2_ctrl_new_std(
|
|
+ hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE,
|
|
+ 0, 1, 1, 0);
|
|
+ if (ctrl == NULL)
|
|
+ goto handler_free;
|
|
+
|
|
+ ctrl = v4l2_ctrl_new_std_menu(
|
|
+ hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_BITRATE_MODE,
|
|
+ V4L2_MPEG_VIDEO_BITRATE_MODE_CQ, ~7, V4L2_MPEG_VIDEO_BITRATE_MODE_VBR);
|
|
+ if (ctrl == NULL)
|
|
+ goto handler_free;
|
|
+
|
|
+ ctrl = v4l2_ctrl_new_std(
|
|
+ hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_BITRATE_PEAK,
|
|
+ 1000, 1000000000, 1, 64000);
|
|
+ if (ctrl == NULL)
|
|
+ goto handler_free;
|
|
+ ctrl = v4l2_ctrl_new_std(
|
|
+ hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_BITRATE,
|
|
+ 1000, 1000000000, 1, 64000);
|
|
+ if (ctrl == NULL)
|
|
+ goto handler_free;
|
|
+
|
|
+ memset(&cfg, 0, sizeof(cfg));
|
|
+ cfg.id = V4L2_CID_MVE_VIDEO_NALU_FORMAT;
|
|
+ cfg.ops = &ctrl_ops;
|
|
+ cfg.type = V4L2_CTRL_TYPE_MENU;
|
|
+ cfg.name = "nalu format";
|
|
+ cfg.max = ARRAY_SIZE(nalu_format_str) - 1;
|
|
+ cfg.def = 0;
|
|
+ cfg.qmenu = nalu_format_str;
|
|
+ ctrl = v4l2_ctrl_new_custom(hnd, &cfg, NULL);
|
|
+ if (ctrl == NULL)
|
|
+ goto handler_free;
|
|
+
|
|
+ ctrl = mvx_v4l2_ctrl_new_custom_tristate(
|
|
+ hnd, V4L2_CID_MVE_VIDEO_STREAM_ESCAPING,
|
|
+ "stream escaping", MVX_TRI_UNSET);
|
|
+ if (ctrl == NULL)
|
|
+ goto handler_free;
|
|
+
|
|
+ ctrl = v4l2_ctrl_new_std_menu(
|
|
+ hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_H264_PROFILE,
|
|
+ V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_10,
|
|
+ get_skip_mask(h264_profile_list,
|
|
+ ARRAY_SIZE(h264_profile_list)),
|
|
+ V4L2_MPEG_VIDEO_H264_PROFILE_MAIN);
|
|
+ if (ctrl == NULL)
|
|
+ goto handler_free;
|
|
+
|
|
+ ctrl = v4l2_ctrl_new_std_menu(
|
|
+ hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_HEVC_PROFILE,
|
|
+ V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN,
|
|
+ get_skip_mask(hevc_profile_list,
|
|
+ ARRAY_SIZE(hevc_profile_list)),
|
|
+ V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN);
|
|
+ if (ctrl == NULL)
|
|
+ goto handler_free;
|
|
+
|
|
+ memset(&cfg, 0, sizeof(cfg));
|
|
+ cfg.id = V4L2_CID_MVE_VIDEO_H265_PROFILE;
|
|
+ cfg.ops = &ctrl_ops;
|
|
+ cfg.type = V4L2_CTRL_TYPE_MENU;
|
|
+ cfg.name = "h265 profile";
|
|
+ cfg.max = ARRAY_SIZE(h265_profile_str) - 1;
|
|
+ cfg.def = 0;
|
|
+ cfg.qmenu = h265_profile_str;
|
|
+ ctrl = v4l2_ctrl_new_custom(hnd, &cfg, NULL);
|
|
+ if (ctrl == NULL)
|
|
+ goto handler_free;
|
|
+
|
|
+ memset(&cfg, 0, sizeof(cfg));
|
|
+ cfg.id = V4L2_CID_MVE_VIDEO_VC1_PROFILE;
|
|
+ cfg.ops = &ctrl_ops;
|
|
+ cfg.type = V4L2_CTRL_TYPE_MENU;
|
|
+ cfg.name = "vc1 profile";
|
|
+ cfg.max = ARRAY_SIZE(vc1_profile_str) - 1;
|
|
+ cfg.def = 0;
|
|
+ cfg.qmenu = vc1_profile_str;
|
|
+ ctrl = v4l2_ctrl_new_custom(hnd, &cfg, NULL);
|
|
+ if (ctrl == NULL)
|
|
+ goto handler_free;
|
|
+
|
|
+ ctrl = v4l2_ctrl_new_std_menu(
|
|
+ hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_H264_LEVEL,
|
|
+ V4L2_MPEG_VIDEO_H264_LEVEL_5_1,
|
|
+ get_skip_mask(h264_level_list, ARRAY_SIZE(h264_level_list)),
|
|
+ V4L2_MPEG_VIDEO_H264_LEVEL_1_0);
|
|
+ if (ctrl == NULL)
|
|
+ goto handler_free;
|
|
+
|
|
+ ctrl = v4l2_ctrl_new_std_menu(
|
|
+ hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_HEVC_LEVEL,
|
|
+ V4L2_MPEG_VIDEO_HEVC_LEVEL_6_2,
|
|
+ get_skip_mask(hevc_level_list, ARRAY_SIZE(hevc_level_list)),
|
|
+ V4L2_MPEG_VIDEO_HEVC_LEVEL_1);
|
|
+ if (ctrl == NULL)
|
|
+ goto handler_free;
|
|
+
|
|
+ memset(&cfg, 0, sizeof(cfg));
|
|
+ cfg.id = V4L2_CID_MVE_VIDEO_H265_LEVEL;
|
|
+ cfg.ops = &ctrl_ops;
|
|
+ cfg.type = V4L2_CTRL_TYPE_MENU;
|
|
+ cfg.name = "h265 level";
|
|
+ cfg.max = ARRAY_SIZE(h265_level_str) - 1;
|
|
+ cfg.def = cfg.max;
|
|
+ cfg.qmenu = h265_level_str;
|
|
+ ctrl = v4l2_ctrl_new_custom(hnd, &cfg, NULL);
|
|
+ if (ctrl == NULL)
|
|
+ goto handler_free;
|
|
+
|
|
+ ctrl = mvx_v4l2_ctrl_new_custom_tristate(
|
|
+ hnd, V4L2_CID_MVE_VIDEO_IGNORE_STREAM_HEADERS,
|
|
+ "ignore stream headers", MVX_TRI_UNSET);
|
|
+ if (ctrl == NULL)
|
|
+ goto handler_free;
|
|
+
|
|
+ ctrl = mvx_v4l2_ctrl_new_custom_tristate(
|
|
+ hnd, V4L2_CID_MVE_VIDEO_FRAME_REORDERING,
|
|
+ "frame reordering", MVX_TRI_UNSET);
|
|
+ if (ctrl == NULL)
|
|
+ goto handler_free;
|
|
+
|
|
+ ctrl = mvx_v4l2_ctrl_new_custom_int(
|
|
+ hnd, V4L2_CID_MVE_VIDEO_INTBUF_SIZE,
|
|
+ "internal buffer size", 0, INT_MAX, 0, 1);
|
|
+ if (ctrl == NULL)
|
|
+ goto handler_free;
|
|
+
|
|
+ ctrl = mvx_v4l2_ctrl_new_custom_int(
|
|
+ hnd, V4L2_CID_MVE_VIDEO_P_FRAMES,
|
|
+ "video P frames", 0, INT_MAX, 30, 1);
|
|
+ if (ctrl == NULL)
|
|
+ goto handler_free;
|
|
+
|
|
+ ctrl = v4l2_ctrl_new_std(
|
|
+ hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_B_FRAMES,
|
|
+ 0, INT_MAX, 1, 0);
|
|
+ if (ctrl == NULL)
|
|
+ goto handler_free;
|
|
+
|
|
+ memset(&cfg, 0, sizeof(cfg));
|
|
+ cfg.id = V4L2_CID_MVE_VIDEO_GOP_TYPE;
|
|
+ cfg.ops = &ctrl_ops;
|
|
+ cfg.type = V4L2_CTRL_TYPE_MENU;
|
|
+ cfg.name = "GOP type";
|
|
+ cfg.max = ARRAY_SIZE(gop_type_str) - 1;
|
|
+ cfg.def = 0;
|
|
+ cfg.qmenu = gop_type_str;
|
|
+ ctrl = v4l2_ctrl_new_custom(hnd, &cfg, NULL);
|
|
+ if (ctrl == NULL)
|
|
+ goto handler_free;
|
|
+
|
|
+ ctrl = v4l2_ctrl_new_std(
|
|
+ hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB,
|
|
+ 0, INT_MAX, 1, 0);
|
|
+ if (ctrl == NULL)
|
|
+ goto handler_free;
|
|
+
|
|
+ ctrl = mvx_v4l2_ctrl_new_custom_tristate(
|
|
+ hnd, V4L2_CID_MVE_VIDEO_CONSTR_IPRED,
|
|
+ "constrained intra prediction", MVX_TRI_UNSET);
|
|
+ if (ctrl == NULL)
|
|
+ goto handler_free;
|
|
+
|
|
+ ctrl = mvx_v4l2_ctrl_new_custom_tristate(
|
|
+ hnd, V4L2_CID_MVE_VIDEO_ENTROPY_SYNC, "entropy sync",
|
|
+ MVX_TRI_UNSET);
|
|
+ if (ctrl == NULL)
|
|
+ goto handler_free;
|
|
+
|
|
+ ctrl = mvx_v4l2_ctrl_new_custom_tristate(
|
|
+ hnd, V4L2_CID_MVE_VIDEO_TEMPORAL_MVP,
|
|
+ "temporal mvp", MVX_TRI_UNSET);
|
|
+ if (ctrl == NULL)
|
|
+ goto handler_free;
|
|
+
|
|
+ ctrl = mvx_v4l2_ctrl_new_custom_int(
|
|
+ hnd, V4L2_CID_MVE_VIDEO_TILE_ROWS,
|
|
+ "tile rows", 0, 65536, 0, 1);
|
|
+ if (ctrl == NULL)
|
|
+ goto handler_free;
|
|
+
|
|
+ ctrl = mvx_v4l2_ctrl_new_custom_int(
|
|
+ hnd, V4L2_CID_MVE_VIDEO_TILE_COLS,
|
|
+ "tile columns", 0, 65536, 0, 1);
|
|
+ if (ctrl == NULL)
|
|
+ goto handler_free;
|
|
+
|
|
+ ctrl = mvx_v4l2_ctrl_new_custom_int(
|
|
+ hnd, V4L2_CID_MVE_VIDEO_MIN_LUMA_CB_SIZE,
|
|
+ "min luma cb size", 8, 16, 8, 8);
|
|
+ if (ctrl == NULL)
|
|
+ goto handler_free;
|
|
+
|
|
+ memset(&cfg, 0, sizeof(cfg));
|
|
+ cfg.id = V4L2_CID_MVE_VIDEO_MB_MASK;
|
|
+ cfg.ops = &ctrl_ops;
|
|
+ cfg.type = V4L2_CTRL_TYPE_BITMASK;
|
|
+ cfg.name = "macroblocks mask";
|
|
+ cfg.def = 0x7fff;
|
|
+ cfg.min = 0;
|
|
+ cfg.max = 0x7fff;
|
|
+ cfg.step = 0;
|
|
+ ctrl = v4l2_ctrl_new_custom(hnd, &cfg, NULL);
|
|
+ if (ctrl == NULL)
|
|
+ goto handler_free;
|
|
+
|
|
+ memset(&cfg, 0, sizeof(cfg));
|
|
+ cfg.id = V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE;
|
|
+ cfg.ops = &ctrl_ops;
|
|
+ cfg.type = V4L2_CTRL_TYPE_MENU;
|
|
+ cfg.name = "H264 Entropy Mode";
|
|
+ cfg.max = ARRAY_SIZE(h264_entropy_mode_str) - 1;
|
|
+ cfg.def = V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_NONE;
|
|
+ cfg.qmenu = h264_entropy_mode_str;
|
|
+ ctrl = v4l2_ctrl_new_custom(hnd, &cfg, NULL);
|
|
+ if (ctrl == NULL)
|
|
+ goto handler_free;
|
|
+
|
|
+ ctrl = v4l2_ctrl_new_std_menu(
|
|
+ hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE,
|
|
+ V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_MB,
|
|
+ get_skip_mask(multi_slice_mode_list,
|
|
+ ARRAY_SIZE(multi_slice_mode_list)),
|
|
+ V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE);
|
|
+ if (ctrl == NULL)
|
|
+ goto handler_free;
|
|
+
|
|
+ ctrl = v4l2_ctrl_new_std(
|
|
+ hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB,
|
|
+ 0, INT_MAX, 1, 0);
|
|
+ if (ctrl == NULL)
|
|
+ goto handler_free;
|
|
+
|
|
+ memset(&cfg, 0, sizeof(cfg));
|
|
+ cfg.id = V4L2_CID_MVE_VIDEO_VP9_PROB_UPDATE;
|
|
+ cfg.ops = &ctrl_ops;
|
|
+ cfg.type = V4L2_CTRL_TYPE_MENU;
|
|
+ cfg.name = "VP9 prob update";
|
|
+ cfg.max = ARRAY_SIZE(vp9_prob_update_str) - 1;
|
|
+ cfg.def = cfg.max;
|
|
+ cfg.qmenu = vp9_prob_update_str;
|
|
+ ctrl = v4l2_ctrl_new_custom(hnd, &cfg, NULL);
|
|
+ if (ctrl == NULL)
|
|
+ goto handler_free;
|
|
+
|
|
+ ctrl = v4l2_ctrl_new_std(
|
|
+ hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_MV_H_SEARCH_RANGE,
|
|
+ 0, INT_MAX, 1, 0);
|
|
+ if (ctrl == NULL)
|
|
+ goto handler_free;
|
|
+
|
|
+ ctrl = v4l2_ctrl_new_std(
|
|
+ hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE,
|
|
+ 0, INT_MAX, 1, 0);
|
|
+ if (ctrl == NULL)
|
|
+ goto handler_free;
|
|
+
|
|
+ ctrl = mvx_v4l2_ctrl_new_custom_int(
|
|
+ hnd, V4L2_CID_MVE_VIDEO_BITDEPTH_CHROMA,
|
|
+ "bitdepth chroma", 0, 0xff, 0, 1);
|
|
+ if (ctrl == NULL)
|
|
+ goto handler_free;
|
|
+
|
|
+ ctrl = mvx_v4l2_ctrl_new_custom_int(
|
|
+ hnd, V4L2_CID_MVE_VIDEO_BITDEPTH_LUMA,
|
|
+ "bitdepth luma", 0, 0xff, 0, 1);
|
|
+ if (ctrl == NULL)
|
|
+ goto handler_free;
|
|
+
|
|
+ ctrl = mvx_v4l2_ctrl_new_custom_int(
|
|
+ hnd, V4L2_CID_MVE_VIDEO_FORCE_CHROMA_FORMAT,
|
|
+ "force chroma format", -1, INT_MAX, 0, 1);
|
|
+ if (ctrl == NULL)
|
|
+ goto handler_free;
|
|
+
|
|
+ memset(&cfg, 0, sizeof(cfg));
|
|
+ cfg.id = V4L2_CID_MVE_VIDEO_RGB_TO_YUV_MODE;
|
|
+ cfg.ops = &ctrl_ops;
|
|
+ cfg.type = V4L2_CTRL_TYPE_MENU;
|
|
+ cfg.name = "RGB to YUV conversion mode";
|
|
+ cfg.max = ARRAY_SIZE(rgb_to_yuv_mode_str) - 1;
|
|
+ cfg.def = cfg.max;
|
|
+ cfg.qmenu = rgb_to_yuv_mode_str;
|
|
+ ctrl = v4l2_ctrl_new_custom(hnd, &cfg, NULL);
|
|
+ if (ctrl == NULL)
|
|
+ goto handler_free;
|
|
+
|
|
+ ctrl = mvx_v4l2_ctrl_new_custom_int(
|
|
+ hnd, V4L2_CID_MVE_VIDEO_BANDWIDTH_LIMIT,
|
|
+ "bandwidth limit", 0, INT_MAX, 0, 1);
|
|
+ if (ctrl == NULL)
|
|
+ goto handler_free;
|
|
+
|
|
+ ctrl = mvx_v4l2_ctrl_new_custom_int(
|
|
+ hnd, V4L2_CID_MVE_VIDEO_CABAC_INIT_IDC,
|
|
+ "CABAC init IDC", 0, 4, 0, 1);
|
|
+ if (ctrl == NULL)
|
|
+ goto handler_free;
|
|
+
|
|
+ ctrl = mvx_v4l2_ctrl_new_custom_int(
|
|
+ hnd, V4L2_CID_MVE_VIDEO_WATCHDOG_TIMEOUT,
|
|
+ "watchdog timeout", 5, 60, 30, 1);
|
|
+ if (ctrl == NULL)
|
|
+ goto handler_free;
|
|
+
|
|
+ ctrl = mvx_v4l2_ctrl_new_custom_int(
|
|
+ hnd, V4L2_CID_MVE_VIDEO_PROFILING,
|
|
+ "enable profiling", 0, 1, 0, 1);
|
|
+ if (ctrl == NULL)
|
|
+ goto handler_free;
|
|
+
|
|
+ ctrl = v4l2_ctrl_new_std(
|
|
+ hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP,
|
|
+ 0, 31, 1, 0);
|
|
+ if (ctrl == NULL)
|
|
+ goto handler_free;
|
|
+
|
|
+ ctrl = v4l2_ctrl_new_std(
|
|
+ hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP,
|
|
+ 0, 31, 1, 0);
|
|
+ if (ctrl == NULL)
|
|
+ goto handler_free;
|
|
+
|
|
+ ctrl = v4l2_ctrl_new_std(
|
|
+ hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP,
|
|
+ 0, 31, 1, 0);
|
|
+ if (ctrl == NULL)
|
|
+ goto handler_free;
|
|
+
|
|
+ ctrl = v4l2_ctrl_new_std(
|
|
+ hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_H263_MIN_QP,
|
|
+ 1, 31, 1, 1);
|
|
+ if (ctrl == NULL)
|
|
+ goto handler_free;
|
|
+
|
|
+ ctrl = v4l2_ctrl_new_std(
|
|
+ hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_H263_MAX_QP,
|
|
+ 1, 31, 1, 1);
|
|
+ if (ctrl == NULL)
|
|
+ goto handler_free;
|
|
+
|
|
+ ctrl = v4l2_ctrl_new_std(
|
|
+ hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP,
|
|
+ 0, 51, 1, 0);
|
|
+ if (ctrl == NULL)
|
|
+ goto handler_free;
|
|
+
|
|
+ ctrl = v4l2_ctrl_new_std(
|
|
+ hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP,
|
|
+ 0, 51, 1, 0);
|
|
+ if (ctrl == NULL)
|
|
+ goto handler_free;
|
|
+
|
|
+ ctrl = v4l2_ctrl_new_std(
|
|
+ hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP,
|
|
+ 0, 51, 1, 0);
|
|
+ if (ctrl == NULL)
|
|
+ goto handler_free;
|
|
+
|
|
+ ctrl = v4l2_ctrl_new_std(
|
|
+ hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_H264_MIN_QP,
|
|
+ 1, 51, 1, 1);
|
|
+ if (ctrl == NULL)
|
|
+ goto handler_free;
|
|
+
|
|
+ ctrl = v4l2_ctrl_new_std(
|
|
+ hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_H264_MAX_QP,
|
|
+ 1, 51, 1, 1);
|
|
+ if (ctrl == NULL)
|
|
+ goto handler_free;
|
|
+
|
|
+ ctrl = v4l2_ctrl_new_std(
|
|
+ hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP,
|
|
+ 0, 51, 1, 0);
|
|
+ if (ctrl == NULL)
|
|
+ goto handler_free;
|
|
+
|
|
+ ctrl = v4l2_ctrl_new_std(
|
|
+ hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_QP,
|
|
+ 0, 51, 1, 0);
|
|
+ if (ctrl == NULL)
|
|
+ goto handler_free;
|
|
+
|
|
+ ctrl = v4l2_ctrl_new_std(
|
|
+ hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_QP,
|
|
+ 0, 51, 1, 0);
|
|
+ if (ctrl == NULL)
|
|
+ goto handler_free;
|
|
+
|
|
+ ctrl = v4l2_ctrl_new_std(
|
|
+ hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP,
|
|
+ 1, 51, 1, 1);
|
|
+ if (ctrl == NULL)
|
|
+ goto handler_free;
|
|
+
|
|
+ ctrl = v4l2_ctrl_new_std(
|
|
+ hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP,
|
|
+ 1, 51, 1, 1);
|
|
+ if (ctrl == NULL)
|
|
+ goto handler_free;
|
|
+
|
|
+ ctrl = v4l2_ctrl_new_std(
|
|
+ hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_VPX_I_FRAME_QP,
|
|
+ 0, 51, 1, 0);
|
|
+ if (ctrl == NULL)
|
|
+ goto handler_free;
|
|
+
|
|
+ ctrl = v4l2_ctrl_new_std(
|
|
+ hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_VPX_P_FRAME_QP,
|
|
+ 0, 51, 1, 0);
|
|
+ if (ctrl == NULL)
|
|
+ goto handler_free;
|
|
+
|
|
+ ctrl = mvx_v4l2_ctrl_new_custom_int(
|
|
+ hnd, V4L2_CID_MVE_VIDEO_VPX_B_FRAME_QP,
|
|
+ "VPx B frame QP value",
|
|
+ 0, 51, 0, 1);
|
|
+ if (ctrl == NULL)
|
|
+ goto handler_free;
|
|
+
|
|
+ ctrl = v4l2_ctrl_new_std(
|
|
+ hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_VPX_MIN_QP,
|
|
+ 1, 51, 1, 1);
|
|
+ if (ctrl == NULL)
|
|
+ goto handler_free;
|
|
+
|
|
+ ctrl = v4l2_ctrl_new_std(
|
|
+ hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_VPX_MAX_QP,
|
|
+ 1, 51, 1, 1);
|
|
+ if (ctrl == NULL)
|
|
+ goto handler_free;
|
|
+
|
|
+ ctrl = v4l2_ctrl_new_std(
|
|
+ hnd, &ctrl_ops, V4L2_CID_MIN_BUFFERS_FOR_OUTPUT,
|
|
+ 1, 32, 1, 1);
|
|
+ if (ctrl == NULL)
|
|
+ goto handler_free;
|
|
+
|
|
+ ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY | V4L2_CTRL_FLAG_VOLATILE;
|
|
+
|
|
+ ctrl = v4l2_ctrl_new_std(
|
|
+ hnd, &ctrl_ops, V4L2_CID_MIN_BUFFERS_FOR_CAPTURE,
|
|
+ 1, 32, 1, 1);
|
|
+ if (ctrl == NULL)
|
|
+ goto handler_free;
|
|
+
|
|
+ ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY | V4L2_CTRL_FLAG_VOLATILE;
|
|
+
|
|
+ ctrl = v4l2_ctrl_new_std(
|
|
+ hnd, &ctrl_ops, V4L2_CID_JPEG_RESTART_INTERVAL,
|
|
+ -1, 0xffff, 1, -1);
|
|
+ if (ctrl == NULL)
|
|
+ goto handler_free;
|
|
+
|
|
+ ctrl = v4l2_ctrl_new_std(
|
|
+ hnd, &ctrl_ops, V4L2_CID_JPEG_COMPRESSION_QUALITY,
|
|
+ 0, 100, 1, 0);
|
|
+ if (ctrl == NULL)
|
|
+ goto handler_free;
|
|
+
|
|
+ ctrl = v4l2_ctrl_new_std(
|
|
+ hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME,
|
|
+ 0, 1, 1, 0);
|
|
+ if (ctrl == NULL)
|
|
+ goto handler_free;
|
|
+
|
|
+ ret = v4l2_ctrl_handler_setup(hnd);
|
|
+ if (ret != 0)
|
|
+ goto handler_free;
|
|
+
|
|
+ return 0;
|
|
+
|
|
+handler_free:
|
|
+ v4l2_ctrl_handler_free(hnd);
|
|
+ return -EINVAL;
|
|
+}
|
|
+
|
|
+void mvx_v4l2_ctrls_done(struct v4l2_ctrl_handler *hnd)
|
|
+{
|
|
+ v4l2_ctrl_handler_free(hnd);
|
|
+}
|
|
diff --git a/drivers/media/platform/spacemit/vpu_k1x/if/v4l2/mvx_v4l2_ctrls.h b/drivers/media/platform/spacemit/vpu_k1x/if/v4l2/mvx_v4l2_ctrls.h
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/vpu_k1x/if/v4l2/mvx_v4l2_ctrls.h
|
|
@@ -0,0 +1,64 @@
|
|
+/*
|
|
+ * The confidential and proprietary information contained in this file may
|
|
+ * only be used by a person authorised under and to the extent permitted
|
|
+ * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd.
|
|
+ * ALL RIGHTS RESERVED
|
|
+ *
|
|
+ * This entire notice must be reproduced on all copies of this file
|
|
+ * and copies of this file may only be made by a person if such person is
|
|
+ * permitted to do so under the terms of a subsisting license agreement
|
|
+ * from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * SPDX-License-Identifier: GPL-2.0-only
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU General Public License
|
|
+ * as published by the Free Software Foundation; either version 2
|
|
+ * of the License, or (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
+ *
|
|
+ */
|
|
+
|
|
+#ifndef _MVX_V4L2_CTRLS_H_
|
|
+#define _MVX_V4L2_CTRLS_H_
|
|
+
|
|
+/****************************************************************************
|
|
+ * Includes
|
|
+ ****************************************************************************/
|
|
+
|
|
+#include <media/v4l2-ctrls.h>
|
|
+
|
|
+/****************************************************************************
|
|
+ * Exported functions
|
|
+ ****************************************************************************/
|
|
+
|
|
+/**
|
|
+ * mvx_v4l2_ctrls_init() - Initialize V4L2 control handler.
|
|
+ * @hnd: V4L2 control handler.
|
|
+ *
|
|
+ * This function initializes V4L2 controls for handler @hnd.
|
|
+ * Controls set to their default values.
|
|
+ *
|
|
+ * Return: 0 on success, error code otherwise.
|
|
+ */
|
|
+int mvx_v4l2_ctrls_init(struct v4l2_ctrl_handler *hnd);
|
|
+
|
|
+/**
|
|
+ * mvx_v4l2_ctrls_done() - Destroy V4L2 control handler.
|
|
+ * @hnd: V4L2 control handler.
|
|
+ *
|
|
+ * This function destroys V4L2 control handler.
|
|
+ */
|
|
+void mvx_v4l2_ctrls_done(struct v4l2_ctrl_handler *hnd);
|
|
+
|
|
+#endif /* _MVX_V4L2_CTRLS_H_ */
|
|
diff --git a/drivers/media/platform/spacemit/vpu_k1x/if/v4l2/mvx_v4l2_fops.c b/drivers/media/platform/spacemit/vpu_k1x/if/v4l2/mvx_v4l2_fops.c
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/vpu_k1x/if/v4l2/mvx_v4l2_fops.c
|
|
@@ -0,0 +1,208 @@
|
|
+/*
|
|
+ * The confidential and proprietary information contained in this file may
|
|
+ * only be used by a person authorised under and to the extent permitted
|
|
+ * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd.
|
|
+ * ALL RIGHTS RESERVED
|
|
+ *
|
|
+ * This entire notice must be reproduced on all copies of this file
|
|
+ * and copies of this file may only be made by a person if such person is
|
|
+ * permitted to do so under the terms of a subsisting license agreement
|
|
+ * from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * SPDX-License-Identifier: GPL-2.0-only
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU General Public License
|
|
+ * as published by the Free Software Foundation; either version 2
|
|
+ * of the License, or (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
+ *
|
|
+ */
|
|
+
|
|
+#include <linux/fs.h>
|
|
+#include <media/v4l2-dev.h>
|
|
+#include <media/v4l2-event.h>
|
|
+#include "mvx_ext_if.h"
|
|
+#include "mvx_v4l2_buffer.h"
|
|
+#include "mvx_v4l2_ctrls.h"
|
|
+#include "mvx_v4l2_fops.h"
|
|
+#include "mvx_v4l2_session.h"
|
|
+#include "mvx_v4l2_vidioc.h"
|
|
+#include "mvx_log_group.h"
|
|
+
|
|
+/****************************************************************************
|
|
+ * Exported functions and variables
|
|
+ ****************************************************************************/
|
|
+
|
|
+int mvx_v4l2_open(struct file *file)
|
|
+{
|
|
+ struct mvx_ext_if *ctx = video_drvdata(file);
|
|
+ struct mvx_v4l2_session *session;
|
|
+ struct v4l2_format fmt = { 0 };
|
|
+ int ret;
|
|
+
|
|
+ session = devm_kzalloc(ctx->dev, sizeof(*session), GFP_KERNEL);
|
|
+ if (session == NULL) {
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING,
|
|
+ "Failed to allocate V4L2 session.");
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+
|
|
+ MVX_SESSION_INFO(&session->session, "v4l2: Open device. id=%u.",
|
|
+ ctx->dev->id);
|
|
+
|
|
+ ret = mvx_v4l2_session_construct(session, ctx);
|
|
+ if (ret != 0)
|
|
+ goto free_session;
|
|
+
|
|
+ file->private_data = &session->fh;
|
|
+ v4l2_fh_init(&session->fh, &ctx->vdev);
|
|
+ v4l2_fh_add(&session->fh);
|
|
+
|
|
+ /* Set default port formats. */
|
|
+ fmt.type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
|
|
+ fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_YUV420M;
|
|
+ fmt.fmt.pix.width = 2;
|
|
+ fmt.fmt.pix.height = 2;
|
|
+ (void)mvx_v4l2_vidioc_s_fmt_vid_out(file, NULL, &fmt);
|
|
+
|
|
+ fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
|
|
+ (void)mvx_v4l2_vidioc_s_fmt_vid_cap(file, NULL, &fmt);
|
|
+
|
|
+ ret = mvx_v4l2_ctrls_init(&session->v4l2_ctrl);
|
|
+ if (ret != 0) {
|
|
+ MVX_SESSION_WARN(&session->session,
|
|
+ "Failed to register V4L2 controls handler. ret=%x",
|
|
+ ret);
|
|
+ goto put_session;
|
|
+ }
|
|
+
|
|
+ session->fh.ctrl_handler = &session->v4l2_ctrl;
|
|
+
|
|
+ return 0;
|
|
+
|
|
+put_session:
|
|
+
|
|
+ /*
|
|
+ * Session was completely constructed, so we have to destroy it
|
|
+ * gracefully using reference counting.
|
|
+ */
|
|
+ mvx_session_put(&session->session);
|
|
+ return ret;
|
|
+
|
|
+free_session:
|
|
+ devm_kfree(ctx->dev, session);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int mvx_v4l2_release(struct file *file)
|
|
+{
|
|
+ struct mvx_v4l2_session *vsession = file_to_session(file);
|
|
+ int i;
|
|
+ int ret;
|
|
+
|
|
+ MVX_SESSION_INFO(&vsession->session, "v4l2: Release.");
|
|
+
|
|
+ mutex_lock(&vsession->mutex);
|
|
+
|
|
+ mvx_v4l2_ctrls_done(vsession->fh.ctrl_handler);
|
|
+
|
|
+ for (i = 0; i < MVX_DIR_MAX; i++)
|
|
+ if (vsession->port[i].q_set != false) {
|
|
+ vb2_queue_release(&vsession->port[i].vb2_queue);
|
|
+ vsession->port[i].q_set = false;
|
|
+ }
|
|
+
|
|
+ ret = mvx_session_put(&vsession->session);
|
|
+ if (ret == 0)
|
|
+ mutex_unlock(&vsession->mutex);
|
|
+
|
|
+ file->private_data = NULL;
|
|
+
|
|
+ MVX_SESSION_INFO(&vsession->session, "v4l2: Release exit.");
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+unsigned int mvx_v4l2_poll(struct file *file,
|
|
+ struct poll_table_struct *wait)
|
|
+{
|
|
+ struct mvx_v4l2_session *vsession = file_to_session(file);
|
|
+ unsigned long events = poll_requested_events(wait);
|
|
+ unsigned int revents = 0;
|
|
+
|
|
+ mutex_lock(&vsession->mutex);
|
|
+
|
|
+ if (vsession->session.error != 0) {
|
|
+ revents = POLLERR;
|
|
+ goto unlock_mutex;
|
|
+ }
|
|
+
|
|
+ /* POLLPRI events are handled by Vb2 */
|
|
+ if (vsession->port[MVX_DIR_INPUT].q_set)
|
|
+ revents |= vb2_poll(&vsession->port[MVX_DIR_INPUT].vb2_queue,
|
|
+ file, wait);
|
|
+ if (vsession->port[MVX_DIR_OUTPUT].q_set)
|
|
+ revents |= vb2_poll(&vsession->port[MVX_DIR_OUTPUT].vb2_queue,
|
|
+ file, wait);
|
|
+#ifndef MODULE
|
|
+ MVX_SESSION_VERBOSE(&vsession->session,
|
|
+ "v4l2: Poll. events=0x%lx, revents=0x%x, nevents=%d.",
|
|
+ events, revents, v4l2_event_pending(&vsession->fh));
|
|
+#else
|
|
+ MVX_SESSION_VERBOSE(&vsession->session,
|
|
+ "v4l2: Poll. events=0x%lx, revents=0x%x",
|
|
+ events, revents);
|
|
+#endif
|
|
+unlock_mutex:
|
|
+ mutex_unlock(&vsession->mutex);
|
|
+
|
|
+ return revents;
|
|
+}
|
|
+
|
|
+int mvx_v4l2_mmap(struct file *file,
|
|
+ struct vm_area_struct *vma)
|
|
+{
|
|
+ struct mvx_v4l2_session *session = file_to_session(file);
|
|
+ unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
|
|
+ enum mvx_direction dir;
|
|
+ struct mvx_v4l2_port *vport;
|
|
+ struct vb2_queue *q;
|
|
+ int ret;
|
|
+
|
|
+ MVX_SESSION_INFO(&session->session,
|
|
+ "v4l2: Memory map. start=0x%08lx, end=0x%08lx, pgoff=0x%08lx, flags=0x%08lx.",
|
|
+ vma->vm_start, vma->vm_end,
|
|
+ vma->vm_pgoff, vma->vm_flags);
|
|
+
|
|
+ if (offset >= DST_QUEUE_OFF_BASE) {
|
|
+ dir = MVX_DIR_OUTPUT;
|
|
+ vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT);
|
|
+ } else {
|
|
+ dir = MVX_DIR_INPUT;
|
|
+ }
|
|
+
|
|
+ vport = &session->port[dir];
|
|
+ q = &vport->vb2_queue;
|
|
+
|
|
+ ret = vb2_mmap(q, vma);
|
|
+ if (ret != 0) {
|
|
+ MVX_SESSION_WARN(&session->session,
|
|
+ "Failed to memory map buffer. q=%p, pgoff=0x%08lx, dir=%d, ret=%d",
|
|
+ q, vma->vm_pgoff, dir, ret);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
diff --git a/drivers/media/platform/spacemit/vpu_k1x/if/v4l2/mvx_v4l2_fops.h b/drivers/media/platform/spacemit/vpu_k1x/if/v4l2/mvx_v4l2_fops.h
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/vpu_k1x/if/v4l2/mvx_v4l2_fops.h
|
|
@@ -0,0 +1,64 @@
|
|
+/*
|
|
+ * The confidential and proprietary information contained in this file may
|
|
+ * only be used by a person authorised under and to the extent permitted
|
|
+ * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd.
|
|
+ * ALL RIGHTS RESERVED
|
|
+ *
|
|
+ * This entire notice must be reproduced on all copies of this file
|
|
+ * and copies of this file may only be made by a person if such person is
|
|
+ * permitted to do so under the terms of a subsisting license agreement
|
|
+ * from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * SPDX-License-Identifier: GPL-2.0-only
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU General Public License
|
|
+ * as published by the Free Software Foundation; either version 2
|
|
+ * of the License, or (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
+ *
|
|
+ */
|
|
+
|
|
+#ifndef _MVX_V4L2_FOPS_H_
|
|
+#define _MVX_V4L2_FOPS_H_
|
|
+
|
|
+/*
|
|
+ * Callbacks for struct v4l2_file_operations.
|
|
+ *
|
|
+ * Prototypes declared bellow represent callbacks required by v4l2 framework.
|
|
+ * They are needed to implement certain syscalls.
|
|
+ */
|
|
+
|
|
+/**
|
|
+ * mvx_v4l2_open() - Callback needed to implement the open() syscall.
|
|
+ */
|
|
+int mvx_v4l2_open(struct file *file);
|
|
+
|
|
+/**
|
|
+ * mvx_v4l2_release() - Callback needed to implement the release() syscall.
|
|
+ */
|
|
+int mvx_v4l2_release(struct file *file);
|
|
+
|
|
+/**
|
|
+ * mvx_v4l2_poll() - Callback needed to implement the poll() syscall.
|
|
+ */
|
|
+unsigned int mvx_v4l2_poll(struct file *file,
|
|
+ struct poll_table_struct *wait);
|
|
+
|
|
+/**
|
|
+ * mvx_v4l2_mmap() - Callback needed to implement the mmap() syscall.
|
|
+ */
|
|
+int mvx_v4l2_mmap(struct file *file,
|
|
+ struct vm_area_struct *vma);
|
|
+
|
|
+#endif /* _MVX_V4L2_FOPS_H_ */
|
|
diff --git a/drivers/media/platform/spacemit/vpu_k1x/if/v4l2/mvx_v4l2_session.c b/drivers/media/platform/spacemit/vpu_k1x/if/v4l2/mvx_v4l2_session.c
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/vpu_k1x/if/v4l2/mvx_v4l2_session.c
|
|
@@ -0,0 +1,620 @@
|
|
+/*
|
|
+ * The confidential and proprietary information contained in this file may
|
|
+ * only be used by a person authorised under and to the extent permitted
|
|
+ * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd.
|
|
+ * ALL RIGHTS RESERVED
|
|
+ *
|
|
+ * This entire notice must be reproduced on all copies of this file
|
|
+ * and copies of this file may only be made by a person if such person is
|
|
+ * permitted to do so under the terms of a subsisting license agreement
|
|
+ * from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * SPDX-License-Identifier: GPL-2.0-only
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU General Public License
|
|
+ * as published by the Free Software Foundation; either version 2
|
|
+ * of the License, or (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
+ *
|
|
+ */
|
|
+
|
|
+/****************************************************************************
|
|
+ * Includes
|
|
+ ****************************************************************************/
|
|
+
|
|
+#include <linux/debugfs.h>
|
|
+#include <linux/sched.h>
|
|
+#include <media/v4l2-event.h>
|
|
+#include <media/videobuf2-v4l2.h>
|
|
+#include <media/videobuf2-dma-sg.h>
|
|
+#include "mvx_ext_if.h"
|
|
+#include "mvx_seq.h"
|
|
+#include "mvx_v4l2_buffer.h"
|
|
+#include "mvx_v4l2_session.h"
|
|
+#include "mvx_log_group.h"
|
|
+#include "mvx-v4l2-controls.h"
|
|
+
|
|
+/****************************************************************************
|
|
+ * Exported and static functions
|
|
+ ****************************************************************************/
|
|
+
|
|
+static void set_format(struct v4l2_pix_format_mplane *pix_mp,
|
|
+ unsigned int width,
|
|
+ unsigned int height,
|
|
+ unsigned int num_planes,
|
|
+ unsigned int *sizeimage,
|
|
+ unsigned int *bytesperline)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ pix_mp->width = width;
|
|
+ pix_mp->height = height;
|
|
+ pix_mp->num_planes = num_planes;
|
|
+
|
|
+ for (i = 0; i < num_planes; ++i) {
|
|
+ pix_mp->plane_fmt[i].sizeimage = sizeimage[i];
|
|
+ pix_mp->plane_fmt[i].bytesperline = bytesperline[i];
|
|
+ }
|
|
+}
|
|
+
|
|
+static void v4l2_port_show(struct mvx_v4l2_port *port,
|
|
+ struct seq_file *s)
|
|
+{
|
|
+ mvx_seq_printf(s, "mvx_v4l2_port", 0, "%p\n", port);
|
|
+ mvx_seq_printf(s, "pixelformat", 1, "0x%x\n",
|
|
+ port->pix_mp.pixelformat);
|
|
+ mvx_seq_printf(s, "vb2_queue", 1, "\n");
|
|
+ mvx_seq_printf(s, "memory", 2, "%u\n",
|
|
+ port->vb2_queue.memory);
|
|
+ mvx_seq_printf(s, "min_buffers_needed", 2, "%u\n",
|
|
+ port->vb2_queue.min_buffers_needed);
|
|
+ mvx_seq_printf(s, "num_buffers", 2, "%u\n",
|
|
+ port->vb2_queue.num_buffers);
|
|
+ mvx_seq_printf(s, "queued_count", 2, "%u\n",
|
|
+ port->vb2_queue.queued_count);
|
|
+ mvx_seq_printf(s, "streaming", 2, "%u\n",
|
|
+ port->vb2_queue.streaming);
|
|
+ mvx_seq_printf(s, "error", 2, "%u\n",
|
|
+ port->vb2_queue.error);
|
|
+ mvx_seq_printf(s, "last_buffer_dequeued", 2, "%u\n",
|
|
+ port->vb2_queue.last_buffer_dequeued);
|
|
+}
|
|
+
|
|
+static int port_stat_show(struct seq_file *s,
|
|
+ void *v)
|
|
+{
|
|
+ struct mvx_v4l2_port *vport = s->private;
|
|
+ struct mvx_session_port *sport = vport->port;
|
|
+
|
|
+ mvx_session_port_show(sport, s);
|
|
+ seq_puts(s, "\n");
|
|
+ v4l2_port_show(vport, s);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int port_stat_open(struct inode *inode,
|
|
+ struct file *file)
|
|
+{
|
|
+ return single_open(file, port_stat_show, inode->i_private);
|
|
+}
|
|
+
|
|
+static const struct file_operations port_stat_fops = {
|
|
+ .open = port_stat_open,
|
|
+ .read = seq_read,
|
|
+ .llseek = seq_lseek,
|
|
+ .release = single_release
|
|
+};
|
|
+
|
|
+static int port_debugfs_init(struct device *dev,
|
|
+ unsigned int i,
|
|
+ struct mvx_v4l2_port *vport,
|
|
+ struct mvx_session_port *sport,
|
|
+ struct dentry *parent)
|
|
+{
|
|
+ char name[20];
|
|
+ struct dentry *dentry;
|
|
+
|
|
+ scnprintf(name, sizeof(name), "port%u", i);
|
|
+ vport->dentry = debugfs_create_dir(name, parent);
|
|
+ if (IS_ERR_OR_NULL(vport->dentry))
|
|
+ return -ENOMEM;
|
|
+
|
|
+ dentry = debugfs_create_file("stat", 0400, vport->dentry, vport,
|
|
+ &port_stat_fops);
|
|
+ if (IS_ERR_OR_NULL(dentry))
|
|
+ return -ENOMEM;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int session_debugfs_init(struct mvx_v4l2_session *session,
|
|
+ struct dentry *parent)
|
|
+{
|
|
+ int ret;
|
|
+ char name[20];
|
|
+ int i;
|
|
+
|
|
+ scnprintf(name, sizeof(name), "%lx", (unsigned long)(&session->session));
|
|
+ session->dentry = debugfs_create_dir(name, parent);
|
|
+ if (IS_ERR_OR_NULL(session->dentry))
|
|
+ return -ENOMEM;
|
|
+
|
|
+ for (i = 0; i < MVX_DIR_MAX; i++) {
|
|
+ struct mvx_v4l2_port *vport = &session->port[i];
|
|
+ struct mvx_session_port *mport = &session->session.port[i];
|
|
+
|
|
+ ret = port_debugfs_init(session->ext->dev, i, vport, mport,
|
|
+ session->dentry);
|
|
+ if (ret != 0)
|
|
+ goto remove_dentry;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+
|
|
+remove_dentry:
|
|
+ debugfs_remove_recursive(session->dentry);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static struct mvx_v4l2_session *mvx_session_to_v4l2_session(
|
|
+ struct mvx_session *session)
|
|
+{
|
|
+ return container_of(session, struct mvx_v4l2_session, session);
|
|
+}
|
|
+
|
|
+static void free_session(struct mvx_session *session)
|
|
+{
|
|
+ struct mvx_v4l2_session *s = mvx_session_to_v4l2_session(session);
|
|
+
|
|
+ MVX_SESSION_INFO(session, "v4l2: Destroy session.");
|
|
+
|
|
+ mvx_session_destruct(session);
|
|
+
|
|
+ if (IS_ENABLED(CONFIG_DEBUG_FS))
|
|
+ debugfs_remove_recursive(s->dentry);
|
|
+
|
|
+ v4l2_fh_del(&s->fh);
|
|
+ v4l2_fh_exit(&s->fh);
|
|
+ if (mutex_is_locked(&s->mutex)) {
|
|
+ mutex_unlock(&s->mutex);
|
|
+ }
|
|
+ devm_kfree(s->ext->dev, s);
|
|
+}
|
|
+#ifdef MODULE
|
|
+static unsigned sev_pos(const struct v4l2_subscribed_event *sev, unsigned idx)
|
|
+{
|
|
+ idx += sev->first;
|
|
+ return idx >= sev->elems ? idx - sev->elems : idx;
|
|
+}
|
|
+
|
|
+/* Caller must hold fh->vdev->fh_lock! */
|
|
+static struct v4l2_subscribed_event *v4l2_event_subscribed(
|
|
+ struct v4l2_fh *fh, u32 type, u32 id)
|
|
+{
|
|
+ struct v4l2_subscribed_event *sev;
|
|
+
|
|
+ assert_spin_locked(&fh->vdev->fh_lock);
|
|
+
|
|
+ list_for_each_entry(sev, &fh->subscribed, list)
|
|
+ if (sev->type == type && sev->id == id)
|
|
+ return sev;
|
|
+
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+static void __v4l2_event_queue_fh(struct v4l2_fh *fh,
|
|
+ const struct v4l2_event *ev, u64 ts)
|
|
+{
|
|
+ struct v4l2_subscribed_event *sev;
|
|
+ struct v4l2_kevent *kev;
|
|
+ bool copy_payload = true;
|
|
+
|
|
+ /* Are we subscribed? */
|
|
+ sev = v4l2_event_subscribed(fh, ev->type, ev->id);
|
|
+ if (sev == NULL)
|
|
+ return;
|
|
+
|
|
+ /* Increase event sequence number on fh. */
|
|
+ fh->sequence++;
|
|
+
|
|
+ /* Do we have any free events? */
|
|
+ if (sev->in_use == sev->elems) {
|
|
+ /* no, remove the oldest one */
|
|
+ kev = sev->events + sev_pos(sev, 0);
|
|
+ list_del(&kev->list);
|
|
+ sev->in_use--;
|
|
+ sev->first = sev_pos(sev, 1);
|
|
+ fh->navailable--;
|
|
+ if (sev->elems == 1) {
|
|
+ if (sev->ops && sev->ops->replace) {
|
|
+ sev->ops->replace(&kev->event, ev);
|
|
+ copy_payload = false;
|
|
+ }
|
|
+ } else if (sev->ops && sev->ops->merge) {
|
|
+ struct v4l2_kevent *second_oldest =
|
|
+ sev->events + sev_pos(sev, 0);
|
|
+ sev->ops->merge(&kev->event, &second_oldest->event);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /* Take one and fill it. */
|
|
+ kev = sev->events + sev_pos(sev, sev->in_use);
|
|
+ kev->event.type = ev->type;
|
|
+ if (copy_payload)
|
|
+ kev->event.u = ev->u;
|
|
+ kev->event.id = ev->id;
|
|
+ kev->ts = ts;
|
|
+ kev->event.sequence = fh->sequence;
|
|
+ sev->in_use++;
|
|
+ list_add_tail(&kev->list, &fh->available);
|
|
+
|
|
+ fh->navailable++;
|
|
+
|
|
+ wake_up_all(&fh->wait);
|
|
+}
|
|
+
|
|
+void spacemit_v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev)
|
|
+{
|
|
+ unsigned long flags;
|
|
+ u64 ts = ktime_get_ns();
|
|
+
|
|
+ spin_lock_irqsave(&fh->vdev->fh_lock, flags);
|
|
+ __v4l2_event_queue_fh(fh, ev, ts);
|
|
+ spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
|
|
+}
|
|
+void spacemit_vb2_queue_error(struct vb2_queue *q)
|
|
+{
|
|
+ q->error = 1;
|
|
+
|
|
+ wake_up_all(&q->done_wq);
|
|
+}
|
|
+
|
|
+#endif
|
|
+static void handle_event(struct mvx_session *session,
|
|
+ enum mvx_session_event event,
|
|
+ void *arg)
|
|
+{
|
|
+ struct mvx_v4l2_session *vsession =
|
|
+ mvx_session_to_v4l2_session(session);
|
|
+
|
|
+ MVX_SESSION_INFO(&vsession->session,
|
|
+ "Event. event=%d, arg=%p.", event, arg);
|
|
+
|
|
+ switch (event) {
|
|
+ case MVX_SESSION_EVENT_BUFFER: {
|
|
+ struct mvx_v4l2_buffer *vbuf = mvx_buffer_to_v4l2_buffer(arg);
|
|
+ struct vb2_buffer *vb = &vbuf->vb2_v4l2_buffer.vb2_buf;
|
|
+
|
|
+ /*
|
|
+ * When streaming is stopped we don't always receive all
|
|
+ * buffers from FW back. So we just return them all to Vb2.
|
|
+ * If the FW later returns a buffer to us, we could silently
|
|
+ * skip it.
|
|
+ */
|
|
+ if (vb->state != VB2_BUF_STATE_DEQUEUED) {
|
|
+ enum vb2_buffer_state state =
|
|
+ mvx_v4l2_buffer_update(vbuf);
|
|
+
|
|
+ vb2_buffer_done(vb, state);
|
|
+ }
|
|
+
|
|
+ if (vbuf->buf.dir == MVX_DIR_OUTPUT && vb->state != VB2_BUF_STATE_QUEUED
|
|
+ && !waitqueue_active(&vb->vb2_queue->done_wq)
|
|
+ && vsession->port[MVX_DIR_INPUT].q_set
|
|
+ && waitqueue_active(&vsession->port[MVX_DIR_INPUT].vb2_queue.done_wq)) {
|
|
+ /*wake up waiters of input queue, because output queue may reallocated and not registered in waiters list */
|
|
+ wake_up(&vsession->port[MVX_DIR_INPUT].vb2_queue.done_wq);
|
|
+ }
|
|
+ break;
|
|
+ }
|
|
+ case MVX_SESSION_EVENT_PORT_CHANGED: {
|
|
+ enum mvx_direction dir = (enum mvx_direction)arg;
|
|
+ struct mvx_v4l2_port *vport = &vsession->port[dir];
|
|
+ struct mvx_session_port *port = &session->port[dir];
|
|
+ const struct v4l2_event event = {
|
|
+ .type = V4L2_EVENT_SOURCE_CHANGE,
|
|
+ .u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION
|
|
+ };
|
|
+ struct v4l2_pix_format_mplane *p = &vport->pix_mp;
|
|
+ p->field = port->interlaced ? V4L2_FIELD_SEQ_TB : V4L2_FIELD_NONE;
|
|
+ set_format(&vport->pix_mp, port->width, port->height,
|
|
+ port->nplanes, port->size, port->stride);
|
|
+#ifndef MODULE
|
|
+ v4l2_event_queue_fh(&vsession->fh, &event);
|
|
+#else
|
|
+ spacemit_v4l2_event_queue_fh(&vsession->fh, &event);
|
|
+#endif
|
|
+ break;
|
|
+ }
|
|
+ case MVX_SESSION_EVENT_COLOR_DESC: {
|
|
+ const struct v4l2_event event = {
|
|
+ .type = V4L2_EVENT_MVX_COLOR_DESC,
|
|
+ };
|
|
+#ifndef MODULE
|
|
+ v4l2_event_queue_fh(&vsession->fh, &event);
|
|
+#else
|
|
+ spacemit_v4l2_event_queue_fh(&vsession->fh, &event);
|
|
+#endif
|
|
+ break;
|
|
+ }
|
|
+ case MVX_SESSION_EVENT_ERROR: {
|
|
+ int i;
|
|
+
|
|
+ for (i = 0; i < MVX_DIR_MAX; ++i) {
|
|
+ struct vb2_queue *q = &vsession->port[i].vb2_queue;
|
|
+#ifndef MODULE
|
|
+ vb2_queue_error(q);
|
|
+#else
|
|
+ spacemit_vb2_queue_error(q);
|
|
+#endif
|
|
+ }
|
|
+
|
|
+ break;
|
|
+ }
|
|
+ default:
|
|
+ MVX_SESSION_WARN(&vsession->session,
|
|
+ "Unsupported session event. event=%d", event);
|
|
+ }
|
|
+}
|
|
+
|
|
+int mvx_v4l2_session_construct(struct mvx_v4l2_session *vsession,
|
|
+ struct mvx_ext_if *ctx)
|
|
+{
|
|
+ int i;
|
|
+ int ret;
|
|
+
|
|
+ vsession->ext = ctx;
|
|
+ mutex_init(&vsession->mutex);
|
|
+
|
|
+ for (i = 0; i < MVX_DIR_MAX; i++) {
|
|
+ struct mvx_v4l2_port *vport = &vsession->port[i];
|
|
+
|
|
+ vport->port = &vsession->session.port[i];
|
|
+ vport->vsession = vsession;
|
|
+ vport->dir = i;
|
|
+ vport->q_set = false;
|
|
+ }
|
|
+
|
|
+ if (IS_ENABLED(CONFIG_DEBUG_FS)) {
|
|
+ ret = session_debugfs_init(vsession, ctx->dsessions);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ ret = mvx_session_construct(&vsession->session, ctx->dev,
|
|
+ ctx->client_ops, ctx->cache,
|
|
+ &vsession->mutex,
|
|
+ free_session, handle_event,
|
|
+ vsession->dentry);
|
|
+ if (ret != 0)
|
|
+ goto remove_dentry;
|
|
+
|
|
+ return 0;
|
|
+
|
|
+remove_dentry:
|
|
+ if (IS_ENABLED(CONFIG_DEBUG_FS))
|
|
+ debugfs_remove_recursive(vsession->dentry);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+struct mvx_v4l2_session *v4l2_fh_to_session(struct v4l2_fh *fh)
|
|
+{
|
|
+ return container_of(fh, struct mvx_v4l2_session, fh);
|
|
+}
|
|
+
|
|
+struct mvx_v4l2_session *file_to_session(struct file *file)
|
|
+{
|
|
+ return v4l2_fh_to_session(file->private_data);
|
|
+}
|
|
+
|
|
+int mvx_v4l2_session_get_color_desc(struct mvx_v4l2_session *vsession,
|
|
+ struct v4l2_mvx_color_desc *c)
|
|
+{
|
|
+ int ret;
|
|
+ struct mvx_fw_color_desc cd;
|
|
+
|
|
+ ret = mvx_session_get_color_desc(&vsession->session, &cd);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+
|
|
+ c->flags = 0;
|
|
+
|
|
+ /* Convert between generic fw_color_desc and V4L2 color desc. */
|
|
+ c->range = cd.range;
|
|
+ c->primaries = cd.primaries;
|
|
+ c->transfer = cd.transfer;
|
|
+ c->matrix = cd.matrix;
|
|
+
|
|
+ if (cd.flags & MVX_FW_COLOR_DESC_DISPLAY_VALID) {
|
|
+ c->flags |= V4L2_MVX_COLOR_DESC_DISPLAY_VALID;
|
|
+ c->display.r.x = cd.display.r.x;
|
|
+ c->display.r.y = cd.display.r.y;
|
|
+ c->display.g.x = cd.display.g.x;
|
|
+ c->display.g.y = cd.display.g.y;
|
|
+ c->display.b.x = cd.display.b.x;
|
|
+ c->display.b.y = cd.display.b.y;
|
|
+ c->display.w.x = cd.display.w.x;
|
|
+ c->display.w.y = cd.display.w.y;
|
|
+ c->display.luminance_min = cd.display.luminance_min;
|
|
+ c->display.luminance_max = cd.display.luminance_max;
|
|
+ }
|
|
+
|
|
+ if (cd.flags & MVX_FW_COLOR_DESC_CONTENT_VALID) {
|
|
+ c->flags |= V4L2_MVX_COLOR_DESC_CONTENT_VALID;
|
|
+ c->content.luminance_max = cd.content.luminance_max;
|
|
+ c->content.luminance_average = cd.content.luminance_average;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int mvx_v4l2_session_set_color_desc(struct mvx_v4l2_session *vsession,
|
|
+ struct v4l2_mvx_color_desc *c){
|
|
+ int ret;
|
|
+ struct mvx_fw_color_desc cd;
|
|
+ memset(&cd, 0, sizeof(cd));
|
|
+ cd.flags = c->flags;
|
|
+ cd.range = c->range;
|
|
+ cd.matrix = c->matrix;
|
|
+ cd.primaries = c->primaries;
|
|
+ cd.transfer = c->transfer;
|
|
+ cd.aspect_ratio_idc = c->aspect_ratio_idc;
|
|
+ cd.num_units_in_tick = c->num_units_in_tick;
|
|
+ cd.sar_height = c->sar_height;
|
|
+ cd.sar_width = c->sar_width;
|
|
+ cd.video_format = c->video_format;
|
|
+ cd.time_scale = c->time_scale;
|
|
+ cd.content.luminance_average = c->content.luminance_average;
|
|
+ cd.content.luminance_max = c->content.luminance_max;
|
|
+ //memcpy(&cd.display, &c->display, sizeof(c->display));
|
|
+ cd.display.r.x = c->display.r.x;
|
|
+ cd.display.r.y = c->display.r.y;
|
|
+ cd.display.g.x = c->display.g.x;
|
|
+ cd.display.g.y = c->display.g.y;
|
|
+ cd.display.b.x = c->display.b.x;
|
|
+ cd.display.b.y = c->display.b.y;
|
|
+ cd.display.w.x = c->display.w.x;
|
|
+ cd.display.w.y = c->display.w.y;
|
|
+ cd.display.luminance_min = c->display.luminance_min;
|
|
+ cd.display.luminance_max = c->display.luminance_max;
|
|
+ ret = mvx_session_set_color_desc(&vsession->session, &cd);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int mvx_v4l2_session_set_roi_regions(struct mvx_v4l2_session *vsession,
|
|
+ struct v4l2_mvx_roi_regions *roi)
|
|
+{
|
|
+ int ret;
|
|
+ struct mvx_roi_config roi_regions;
|
|
+ roi_regions.pic_index = roi->pic_index;
|
|
+ roi_regions.num_roi = roi->num_roi;
|
|
+ roi_regions.qp_present = roi->qp_present;
|
|
+ roi_regions.roi_present = roi->roi_present;
|
|
+ roi_regions.qp = roi->qp;
|
|
+
|
|
+ if (roi_regions.roi_present && roi_regions.num_roi > 0) {
|
|
+ int i = 0;
|
|
+ for (;i < roi_regions.num_roi; i++) {
|
|
+ roi_regions.roi[i].mbx_left = roi->roi[i].mbx_left;
|
|
+ roi_regions.roi[i].mbx_right = roi->roi[i].mbx_right;
|
|
+ roi_regions.roi[i].mby_top = roi->roi[i].mby_top;
|
|
+ roi_regions.roi[i].mby_bottom = roi->roi[i].mby_bottom;
|
|
+ roi_regions.roi[i].qp_delta = roi->roi[i].qp_delta;
|
|
+ }
|
|
+ }
|
|
+ ret = mvx_session_set_roi_regions(&vsession->session, &roi_regions);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int mvx_v4l2_session_set_qp_epr(struct mvx_v4l2_session *vsession,
|
|
+ int *qp)
|
|
+{
|
|
+ int ret;
|
|
+ ret = mvx_session_set_qp_epr(&vsession->session, qp);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int mvx_v4l2_session_set_sei_userdata(struct mvx_v4l2_session *vsession,
|
|
+ struct v4l2_sei_user_data *sei_userdata)
|
|
+{
|
|
+ int ret;
|
|
+ struct mvx_sei_userdata userdata;
|
|
+ userdata.flags = sei_userdata->flags;
|
|
+ userdata.user_data_len = sei_userdata->user_data_len;
|
|
+ memcpy(&userdata.user_data, &sei_userdata->user_data, sizeof(userdata.user_data));
|
|
+ memcpy(&userdata.uuid, &sei_userdata->uuid, sizeof(userdata.uuid));
|
|
+ ret = mvx_session_set_sei_userdata(&vsession->session, &userdata);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int mvx_v4l2_session_set_rate_control(struct mvx_v4l2_session *vsession,
|
|
+ struct v4l2_rate_control *rc)
|
|
+{
|
|
+ int ret;
|
|
+ struct mvx_buffer_param_rate_control mvx_rc;
|
|
+ mvx_rc.rate_control_mode = rc->rc_type;
|
|
+ mvx_rc.target_bitrate = rc->target_bitrate;
|
|
+ mvx_rc.maximum_bitrate = rc->maximum_bitrate;
|
|
+ ret = mvx_session_set_bitrate_control(&vsession->session, &mvx_rc);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int mvx_v4l2_session_set_dsl_frame(struct mvx_v4l2_session *vsession,
|
|
+ struct v4l2_mvx_dsl_frame *dsl)
|
|
+{
|
|
+ int ret;
|
|
+ struct mvx_dsl_frame dsl_frame;
|
|
+ dsl_frame.width = dsl->width;
|
|
+ dsl_frame.height = dsl->height;
|
|
+ ret = mvx_session_set_dsl_frame(&vsession->session, &dsl_frame);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int mvx_v4l2_session_set_dsl_ratio(struct mvx_v4l2_session *vsession,
|
|
+ struct v4l2_mvx_dsl_ratio *dsl)
|
|
+{
|
|
+ int ret;
|
|
+ struct mvx_dsl_ratio dsl_ratio;
|
|
+ dsl_ratio.hor = dsl->hor;
|
|
+ dsl_ratio.ver = dsl->ver;
|
|
+
|
|
+ ret = mvx_session_set_dsl_ratio(&vsession->session, &dsl_ratio);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int mvx_v4l2_session_set_long_term_ref(struct mvx_v4l2_session *vsession,
|
|
+ struct v4l2_mvx_long_term_ref *ltr)
|
|
+{
|
|
+ int ret;
|
|
+ struct mvx_long_term_ref mvx_ltr;
|
|
+ mvx_ltr.mode = ltr->mode;
|
|
+ mvx_ltr.period = ltr->period;
|
|
+ ret = mvx_session_set_long_term_ref(&vsession->session, &mvx_ltr);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+
|
|
+ return 0;
|
|
+
|
|
+}
|
|
+
|
|
+int mvx_v4l2_session_set_dsl_mode(struct mvx_v4l2_session *vsession,
|
|
+ int *mode)
|
|
+{
|
|
+ int ret;
|
|
+ ret = mvx_session_set_dsl_mode(&vsession->session, mode);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
diff --git a/drivers/media/platform/spacemit/vpu_k1x/if/v4l2/mvx_v4l2_session.h b/drivers/media/platform/spacemit/vpu_k1x/if/v4l2/mvx_v4l2_session.h
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/vpu_k1x/if/v4l2/mvx_v4l2_session.h
|
|
@@ -0,0 +1,243 @@
|
|
+/*
|
|
+ * The confidential and proprietary information contained in this file may
|
|
+ * only be used by a person authorised under and to the extent permitted
|
|
+ * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd.
|
|
+ * ALL RIGHTS RESERVED
|
|
+ *
|
|
+ * This entire notice must be reproduced on all copies of this file
|
|
+ * and copies of this file may only be made by a person if such person is
|
|
+ * permitted to do so under the terms of a subsisting license agreement
|
|
+ * from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * SPDX-License-Identifier: GPL-2.0-only
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU General Public License
|
|
+ * as published by the Free Software Foundation; either version 2
|
|
+ * of the License, or (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
+ *
|
|
+ */
|
|
+
|
|
+#ifndef _MVX_V4L2_SESSION_H_
|
|
+#define _MVX_V4L2_SESSION_H_
|
|
+
|
|
+/****************************************************************************
|
|
+ * Includes
|
|
+ ****************************************************************************/
|
|
+
|
|
+#include <linux/videodev2.h>
|
|
+#include <linux/mutex.h>
|
|
+#include <linux/wait.h>
|
|
+#include <media/v4l2-ctrls.h>
|
|
+#include <media/v4l2-fh.h>
|
|
+#include <media/videobuf2-v4l2.h>
|
|
+
|
|
+#include "mvx_session.h"
|
|
+#include "mvx-v4l2-controls.h"
|
|
+
|
|
+/****************************************************************************
|
|
+ * Types
|
|
+ ****************************************************************************/
|
|
+
|
|
+/**
|
|
+ * Offset used to distinguish between input and output port.
|
|
+ */
|
|
+#define DST_QUEUE_OFF_BASE (1 << 30)
|
|
+
|
|
+/**
|
|
+ * struct mvx_v4l2_port - V4L2 port type.
|
|
+ *
|
|
+ * Most of this structure will become redundant when buffer management
|
|
+ * is transferred to Vb2 framework.
|
|
+ *
|
|
+ * @vsession: Pointer to corresponding session.
|
|
+ * @port: Pointer to corresponding mvx port.
|
|
+ * @dir: Direction of a port.
|
|
+ * @type: V4L2 port type.
|
|
+ * @pix_mp: V4L2 multi planar pixel format.
|
|
+ * @crop: V4L2 cropping information.
|
|
+ * @dentry: Debugfs directory entry for the port.
|
|
+ * @q_set: Indicates of Vb2 queue was setup.
|
|
+ * @vb2_queue: Vb2 queue.
|
|
+ */
|
|
+struct mvx_v4l2_port {
|
|
+ struct mvx_v4l2_session *vsession;
|
|
+ struct mvx_session_port *port;
|
|
+ enum mvx_direction dir;
|
|
+ enum v4l2_buf_type type;
|
|
+ struct v4l2_pix_format_mplane pix_mp;
|
|
+ struct v4l2_rect crop;
|
|
+ struct dentry *dentry;
|
|
+ bool q_set;
|
|
+ struct vb2_queue vb2_queue;
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct mvx_v4l2_session - V4L2 session type.
|
|
+ * @ext: Pointer to external interface object.
|
|
+ * @fh: V4L2 file handler.
|
|
+ * @mutex: Mutex protecting the session object.
|
|
+ * @session: Session object.
|
|
+ * @port: Array of v4l2 ports.
|
|
+ * @dentry: Debugfs directory entry representing a session.
|
|
+ * @v4l2_ctrl: v4l2 controls handler.
|
|
+ */
|
|
+struct mvx_v4l2_session {
|
|
+ struct mvx_ext_if *ext;
|
|
+ struct v4l2_fh fh;
|
|
+ struct mutex mutex;
|
|
+ struct mvx_session session;
|
|
+ struct mvx_v4l2_port port[MVX_DIR_MAX];
|
|
+ struct dentry *dentry;
|
|
+ struct v4l2_ctrl_handler v4l2_ctrl;
|
|
+};
|
|
+
|
|
+/****************************************************************************
|
|
+ * Exported functions
|
|
+ ****************************************************************************/
|
|
+
|
|
+/**
|
|
+ * mvx_v4l2_session_construct() - Construct v4l2 session object.
|
|
+ * @vsession: Pointer to a session object.
|
|
+ * @ctx: Pointer to an external interface object.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+int mvx_v4l2_session_construct(struct mvx_v4l2_session *vsession,
|
|
+ struct mvx_ext_if *ctx);
|
|
+
|
|
+/**
|
|
+ * v4l2_fh_to_session() - Cast v4l2 file handler to mvx_v4l2_session.
|
|
+ * @fh: v4l2 file handler.
|
|
+ *
|
|
+ * Return: Pointer to a corresponding mvx_v4l2_session object.
|
|
+ */
|
|
+struct mvx_v4l2_session *v4l2_fh_to_session(struct v4l2_fh *fh);
|
|
+
|
|
+/**
|
|
+ * file_to_session() - Cast file object to mvx_v4l2_session.
|
|
+ * @file: Pointer to a file object.
|
|
+ *
|
|
+ * Return: Pointer to a corresponding mvx_v4l2_session object.
|
|
+ */
|
|
+struct mvx_v4l2_session *file_to_session(struct file *file);
|
|
+
|
|
+/**
|
|
+ * mvx_v4l2_session_get_color_desc() - Get color description.
|
|
+ * @vsession: Pointer to v4l2 session.
|
|
+ * @color: Color description.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+int mvx_v4l2_session_get_color_desc(struct mvx_v4l2_session *vsession,
|
|
+ struct v4l2_mvx_color_desc *color_desc);
|
|
+
|
|
+/**
|
|
+ * mvx_v4l2_session_set_color_desc() - Set color description.
|
|
+ * @vsession: Pointer to v4l2 session.
|
|
+ * @color: Color description.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+
|
|
+int mvx_v4l2_session_set_color_desc(struct mvx_v4l2_session *vsession,
|
|
+ struct v4l2_mvx_color_desc *color_desc);
|
|
+
|
|
+/**
|
|
+ * mvx_v4l2_session_set_sei_userdata() - Set SEI userdata.
|
|
+ * @vsession: Pointer to v4l2 session.
|
|
+ * @sei_userdata: SEI userdata.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+
|
|
+int mvx_v4l2_session_set_sei_userdata(struct mvx_v4l2_session *vsession,
|
|
+ struct v4l2_sei_user_data *sei_userdata);
|
|
+
|
|
+/**
|
|
+ * mvx_v4l2_session_set_roi_regions() - Set Roi Regions.
|
|
+ * @vsession: Pointer to v4l2 session.
|
|
+ * @roi: ROI regions.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+int mvx_v4l2_session_set_roi_regions(struct mvx_v4l2_session *vsession,
|
|
+ struct v4l2_mvx_roi_regions *roi);
|
|
+
|
|
+/**
|
|
+ * mvx_v4l2_session_set_qp_epr() - Set qp.
|
|
+ * @vsession: Pointer to v4l2 session.
|
|
+ * @qp: qp value.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+
|
|
+int mvx_v4l2_session_set_qp_epr(struct mvx_v4l2_session *vsession,
|
|
+ int *qp);
|
|
+
|
|
+/**
|
|
+ * mvx_v4l2_session_set_rate_control() - Set rate Control.
|
|
+ * @vsession: Pointer to v4l2 session.
|
|
+ * @rc: rate control type.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+
|
|
+int mvx_v4l2_session_set_rate_control(struct mvx_v4l2_session *vsession,
|
|
+ struct v4l2_rate_control *rc);
|
|
+
|
|
+/**
|
|
+ * mvx_v4l2_session_set_dsl_frame() - Set DownScale dst frame.
|
|
+ * @vsession: Pointer to v4l2 session.
|
|
+ * @dsl: DownScale dst frame.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+
|
|
+int mvx_v4l2_session_set_dsl_frame(struct mvx_v4l2_session *vsession,
|
|
+ struct v4l2_mvx_dsl_frame *dsl);
|
|
+
|
|
+/**
|
|
+ * mvx_v4l2_session_set_dsl_ratio() - Set DownScale ratio.
|
|
+ * @vsession: Pointer to v4l2 session.
|
|
+ * @dsl: DownScale ratio.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+
|
|
+int mvx_v4l2_session_set_dsl_ratio(struct mvx_v4l2_session *vsession,
|
|
+ struct v4l2_mvx_dsl_ratio *dsl);
|
|
+
|
|
+/**
|
|
+ * mvx_v4l2_session_set_long_term_ref() - Set long term ref.
|
|
+ * @vsession: Pointer to v4l2 session.
|
|
+ * @ltr: long term ref.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+
|
|
+int mvx_v4l2_session_set_long_term_ref(struct mvx_v4l2_session *vsession,
|
|
+ struct v4l2_mvx_long_term_ref *ltr);
|
|
+
|
|
+/**
|
|
+ * mvx_v4l2_session_set_dsl_mode() - Set DownScale mode.
|
|
+ * @vsession: Pointer to v4l2 session.
|
|
+ * @mode: DownScale mode, oly enable on high precision mode.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+
|
|
+int mvx_v4l2_session_set_dsl_mode(struct mvx_v4l2_session *vsession,
|
|
+ int *mode);
|
|
+#endif /* _MVX_V4L2_SESSION_H_ */
|
|
diff --git a/drivers/media/platform/spacemit/vpu_k1x/if/v4l2/mvx_v4l2_vidioc.c b/drivers/media/platform/spacemit/vpu_k1x/if/v4l2/mvx_v4l2_vidioc.c
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/vpu_k1x/if/v4l2/mvx_v4l2_vidioc.c
|
|
@@ -0,0 +1,1663 @@
|
|
+/*
|
|
+ * The confidential and proprietary information contained in this file may
|
|
+ * only be used by a person authorised under and to the extent permitted
|
|
+ * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd.
|
|
+ * ALL RIGHTS RESERVED
|
|
+ *
|
|
+ * This entire notice must be reproduced on all copies of this file
|
|
+ * and copies of this file may only be made by a person if such person is
|
|
+ * permitted to do so under the terms of a subsisting license agreement
|
|
+ * from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * SPDX-License-Identifier: GPL-2.0-only
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU General Public License
|
|
+ * as published by the Free Software Foundation; either version 2
|
|
+ * of the License, or (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
+ *
|
|
+ */
|
|
+
|
|
+/****************************************************************************
|
|
+ * Includes
|
|
+ ****************************************************************************/
|
|
+
|
|
+#include <linux/device.h>
|
|
+#include <linux/sched.h>
|
|
+#include <linux/version.h>
|
|
+#include <media/v4l2-ctrls.h>
|
|
+#include <media/v4l2-event.h>
|
|
+#include <media/videobuf2-v4l2.h>
|
|
+#include <media/videobuf2-dma-sg.h>
|
|
+#include "mvx_bitops.h"
|
|
+#include "mvx_ext_if.h"
|
|
+#include "mvx_if.h"
|
|
+#include "mvx_v4l2_buffer.h"
|
|
+#include "mvx_v4l2_session.h"
|
|
+#include "mvx_v4l2_vidioc.h"
|
|
+#include "mvx-v4l2-controls.h"
|
|
+
|
|
+/****************************************************************************
|
|
+ * Types
|
|
+ ****************************************************************************/
|
|
+
|
|
+struct mvx_format_map {
|
|
+ enum mvx_format format;
|
|
+ uint32_t flags;
|
|
+ uint32_t pixelformat;
|
|
+ const char *description;
|
|
+};
|
|
+
|
|
+/****************************************************************************
|
|
+ * Static functions and variables
|
|
+ ****************************************************************************/
|
|
+
|
|
+struct mvx_format_map mvx_fmts[] = {
|
|
+ { MVX_FORMAT_AVS,
|
|
+ V4L2_FMT_FLAG_COMPRESSED,
|
|
+ V4L2_PIX_FMT_AVS,
|
|
+ "AVS" },
|
|
+ { MVX_FORMAT_AVS2,
|
|
+ V4L2_FMT_FLAG_COMPRESSED,
|
|
+ V4L2_PIX_FMT_AVS2,
|
|
+ "AVS2" },
|
|
+ { MVX_FORMAT_H263,
|
|
+ V4L2_FMT_FLAG_COMPRESSED,
|
|
+ V4L2_PIX_FMT_H263,
|
|
+ "H.263" },
|
|
+ { MVX_FORMAT_H264,
|
|
+ V4L2_FMT_FLAG_COMPRESSED,
|
|
+ V4L2_PIX_FMT_H264,
|
|
+ "H.264" },
|
|
+ { MVX_FORMAT_H264,
|
|
+ V4L2_FMT_FLAG_COMPRESSED,
|
|
+ V4L2_PIX_FMT_H264_MVC,
|
|
+ "H.264 MVC" },
|
|
+ { MVX_FORMAT_H264,
|
|
+ V4L2_FMT_FLAG_COMPRESSED,
|
|
+ V4L2_PIX_FMT_H264_NO_SC,
|
|
+ "H.264 (No Start Codes)" },
|
|
+ { MVX_FORMAT_HEVC,
|
|
+ V4L2_FMT_FLAG_COMPRESSED,
|
|
+ V4L2_PIX_FMT_HEVC,
|
|
+ "HEVC" },
|
|
+ { MVX_FORMAT_MPEG2,
|
|
+ V4L2_FMT_FLAG_COMPRESSED,
|
|
+ V4L2_PIX_FMT_MPEG2,
|
|
+ "MPEG-2 ES" },
|
|
+ { MVX_FORMAT_MPEG4,
|
|
+ V4L2_FMT_FLAG_COMPRESSED,
|
|
+ V4L2_PIX_FMT_MPEG4,
|
|
+ "MPEG-4 part 2 ES" },
|
|
+ { MVX_FORMAT_RV,
|
|
+ V4L2_FMT_FLAG_COMPRESSED,
|
|
+ V4L2_PIX_FMT_RV,
|
|
+ "Real Video" },
|
|
+ { MVX_FORMAT_VC1,
|
|
+ V4L2_FMT_FLAG_COMPRESSED,
|
|
+ V4L2_PIX_FMT_VC1_ANNEX_G,
|
|
+ "VC-1 (SMPTE 412M Annex G)" },
|
|
+ { MVX_FORMAT_VC1,
|
|
+ V4L2_FMT_FLAG_COMPRESSED,
|
|
+ V4L2_PIX_FMT_VC1_ANNEX_L,
|
|
+ "VC-1 (SMPTE 412M Annex L)" },
|
|
+ { MVX_FORMAT_VP8,
|
|
+ V4L2_FMT_FLAG_COMPRESSED,
|
|
+ V4L2_PIX_FMT_VP8,
|
|
+ "VP8" },
|
|
+ { MVX_FORMAT_VP9,
|
|
+ V4L2_FMT_FLAG_COMPRESSED,
|
|
+ V4L2_PIX_FMT_VP9,
|
|
+ "VP9" },
|
|
+ { MVX_FORMAT_JPEG,
|
|
+ V4L2_FMT_FLAG_COMPRESSED,
|
|
+ V4L2_PIX_FMT_JPEG,
|
|
+ "JPEG" },
|
|
+ { MVX_FORMAT_YUV420_AFBC_8,
|
|
+ 0,
|
|
+ V4L2_PIX_FMT_YUV420_AFBC_8,
|
|
+ "YUV420 AFBC 8 bit" },
|
|
+ { MVX_FORMAT_YUV420_AFBC_10,
|
|
+ 0,
|
|
+ V4L2_PIX_FMT_YUV420_AFBC_10,
|
|
+ "YUV420 AFBC 10 bit" },
|
|
+ { MVX_FORMAT_YUV422_AFBC_8,
|
|
+ 0,
|
|
+ V4L2_PIX_FMT_YUV422_AFBC_8,
|
|
+ "YUV422 AFBC 8 bit" },
|
|
+ { MVX_FORMAT_YUV422_AFBC_10,
|
|
+ 0,
|
|
+ V4L2_PIX_FMT_YUV422_AFBC_10,
|
|
+ "YUV422 AFBC 10 bit" },
|
|
+ { MVX_FORMAT_YUV420_I420,
|
|
+ 0,
|
|
+ V4L2_PIX_FMT_YUV420M,
|
|
+ "Planar YUV 4:2:0 (N-C)" },
|
|
+ { MVX_FORMAT_YUV420_NV12,
|
|
+ 0,
|
|
+ V4L2_PIX_FMT_NV12,
|
|
+ "Y/CbCr 4:2:0" },
|
|
+ { MVX_FORMAT_YUV420_NV21,
|
|
+ 0,
|
|
+ V4L2_PIX_FMT_NV21,
|
|
+ "Y/CrCb 4:2:0 (N-C)" },
|
|
+ { MVX_FORMAT_YUV420_NV12,
|
|
+ 0,
|
|
+ V4L2_PIX_FMT_NV12M,
|
|
+ "Y/CbCr 4:2:0" },
|
|
+ { MVX_FORMAT_YUV420_P010,
|
|
+ 0,
|
|
+ V4L2_PIX_FMT_P010,
|
|
+ "YUV 4:2:0 P010 (Microsoft format)" },
|
|
+ { MVX_FORMAT_YUV420_Y0L2,
|
|
+ 0,
|
|
+ V4L2_PIX_FMT_Y0L2,
|
|
+ "YUV 4:2:0 Y0L2 (ARM format)" },
|
|
+ { MVX_FORMAT_YUV420_AQB1,
|
|
+ 0,
|
|
+ v4l2_fourcc('Y', '0', 'A', 'B'),
|
|
+ "YUV 4:2:0 AQB1 (ARM format)" },
|
|
+ { MVX_FORMAT_YUV422_YUY2,
|
|
+ 0,
|
|
+ V4L2_PIX_FMT_YUYV,
|
|
+ "YYUV 4:2:2" },
|
|
+ { MVX_FORMAT_YUV422_UYVY,
|
|
+ 0,
|
|
+ V4L2_PIX_FMT_UYVY,
|
|
+ "UYVY 4:2:2" },
|
|
+ { MVX_FORMAT_YUV422_Y210,
|
|
+ 0,
|
|
+ V4L2_PIX_FMT_Y210,
|
|
+ "YUV 4:2:2 Y210 (Microsoft format)" },
|
|
+
|
|
+ /* ARGB */
|
|
+ { MVX_FORMAT_ARGB_8888,
|
|
+ 0,
|
|
+ DRM_FORMAT_BGRA8888, /* Equal to V4L2_PIX_FMT_ARGB32. */
|
|
+ "32-bit ARGB 8-8-8-8" },
|
|
+ { MVX_FORMAT_ARGB_8888,
|
|
+ 0,
|
|
+ V4L2_PIX_FMT_RGB32,
|
|
+ "32-bit ARGB 8-8-8-8" },
|
|
+
|
|
+ /* ABGR */
|
|
+ { MVX_FORMAT_ABGR_8888,
|
|
+ 0,
|
|
+ DRM_FORMAT_RGBA8888,
|
|
+ "32-bit ABGR-8-8-8-8" },
|
|
+
|
|
+ /* RGBA */
|
|
+ { MVX_FORMAT_RGBA_8888,
|
|
+ 0,
|
|
+ DRM_FORMAT_ABGR8888,
|
|
+ "32-bit RGBA 8-8-8-8" },
|
|
+
|
|
+ /* BGRA (new and legacy format) */
|
|
+ { MVX_FORMAT_BGRA_8888,
|
|
+ 0,
|
|
+ DRM_FORMAT_ARGB8888, /* Equal to V4L2_PIX_FMT_ABGR32. */
|
|
+ "32-bit BGRA 8-8-8-8" },
|
|
+ { MVX_FORMAT_BGRA_8888,
|
|
+ 0,
|
|
+ V4L2_PIX_FMT_BGR32,
|
|
+ "32-bit BGRA 8-8-8-8" }
|
|
+};
|
|
+
|
|
+/*
|
|
+ * Search for format map that matches given pixel format.
|
|
+ */
|
|
+static struct mvx_format_map *mvx_find_format(uint32_t pixelformat)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ for (i = 0; i < ARRAY_SIZE(mvx_fmts); i++)
|
|
+ if (mvx_fmts[i].pixelformat == pixelformat)
|
|
+ return &mvx_fmts[i];
|
|
+
|
|
+ return ERR_PTR(-EINVAL);
|
|
+}
|
|
+
|
|
+static int to_v4l2_format(struct v4l2_format *f,
|
|
+ enum v4l2_buf_type type,
|
|
+ struct v4l2_pix_format_mplane *pix,
|
|
+ unsigned int *stride,
|
|
+ unsigned int *size,
|
|
+ bool interlaced)
|
|
+{
|
|
+ f->type = type;
|
|
+
|
|
+ switch (f->type) {
|
|
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
|
|
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE: {
|
|
+ struct v4l2_pix_format *p = &f->fmt.pix;
|
|
+ uint32_t i;
|
|
+
|
|
+ p->width = pix->width;
|
|
+ p->height = pix->height;
|
|
+ p->pixelformat = pix->pixelformat;
|
|
+ p->field = interlaced ? V4L2_FIELD_SEQ_TB : V4L2_FIELD_NONE;
|
|
+ p->colorspace = pix->colorspace;
|
|
+ p->flags = pix->flags;
|
|
+ p->ycbcr_enc = pix->ycbcr_enc;
|
|
+ p->quantization = pix->quantization;
|
|
+ p->xfer_func = pix->xfer_func;
|
|
+
|
|
+ p->sizeimage = 0;
|
|
+ p->bytesperline = stride[0];
|
|
+ for (i = 0; i < pix->num_planes; ++i)
|
|
+ p->sizeimage += size[i];
|
|
+
|
|
+ break;
|
|
+ }
|
|
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
|
|
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: {
|
|
+ struct v4l2_pix_format_mplane *p = &f->fmt.pix_mp;
|
|
+ int i;
|
|
+
|
|
+ memcpy(p, pix, sizeof(*p));
|
|
+ memset(p->reserved, 0, sizeof(p->reserved));
|
|
+ p->field = interlaced ? V4L2_FIELD_SEQ_TB : V4L2_FIELD_NONE;
|
|
+
|
|
+ for (i = 0; i < pix->num_planes; i++) {
|
|
+ p->plane_fmt[i].bytesperline = stride[i];
|
|
+ p->plane_fmt[i].sizeimage = size[i];
|
|
+ memset(p->plane_fmt[i].reserved, 0,
|
|
+ sizeof(p->plane_fmt[i].reserved));
|
|
+ }
|
|
+
|
|
+ break;
|
|
+ }
|
|
+ default:
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int from_v4l2_format(struct mvx_v4l2_session *vsession,
|
|
+ struct v4l2_format *f,
|
|
+ struct v4l2_pix_format_mplane *pix,
|
|
+ enum mvx_format *format,
|
|
+ unsigned int *stride,
|
|
+ unsigned int *size,
|
|
+ bool *interlaced)
|
|
+{
|
|
+ struct mvx_format_map *map;
|
|
+
|
|
+ switch (f->type) {
|
|
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
|
|
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE: {
|
|
+ struct v4l2_pix_format *p = &f->fmt.pix;
|
|
+
|
|
+ memset(pix, 0, sizeof(*pix));
|
|
+
|
|
+ pix->width = p->width;
|
|
+ pix->height = p->height;
|
|
+ pix->pixelformat = p->pixelformat;
|
|
+ pix->field = p->field;
|
|
+ pix->colorspace = p->colorspace;
|
|
+ pix->flags = p->flags;
|
|
+
|
|
+ if (p->priv != V4L2_PIX_FMT_PRIV_MAGIC) {
|
|
+ pix->ycbcr_enc = V4L2_COLORSPACE_DEFAULT;
|
|
+ pix->quantization = V4L2_QUANTIZATION_DEFAULT;
|
|
+ pix->xfer_func = V4L2_XFER_FUNC_DEFAULT;
|
|
+ }
|
|
+
|
|
+ pix->num_planes = 1;
|
|
+ pix->plane_fmt[0].sizeimage = p->sizeimage;
|
|
+ pix->plane_fmt[0].bytesperline = p->bytesperline;
|
|
+
|
|
+ size[0] = p->sizeimage;
|
|
+ stride[0] = p->bytesperline;
|
|
+
|
|
+ break;
|
|
+ }
|
|
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
|
|
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: {
|
|
+ struct v4l2_pix_format_mplane *p = &f->fmt.pix_mp;
|
|
+ unsigned int i;
|
|
+
|
|
+ if (p->num_planes > MVX_BUFFER_NPLANES)
|
|
+ MVX_SESSION_WARN(&vsession->session,
|
|
+ "Too many planes for format. format=0x%08x, num_planes=%u.",
|
|
+ pix->pixelformat, p->num_planes);
|
|
+
|
|
+ memcpy(pix, p, sizeof(*pix));
|
|
+
|
|
+ for (i = 0;
|
|
+ i < min_t(unsigned int, MVX_BUFFER_NPLANES, p->num_planes);
|
|
+ i++) {
|
|
+ size[i] = p->plane_fmt[i].sizeimage;
|
|
+ stride[i] = p->plane_fmt[i].bytesperline;
|
|
+ }
|
|
+
|
|
+ break;
|
|
+ }
|
|
+ default:
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ /* Adjust default field and color spaces. */
|
|
+
|
|
+ if (pix->field == V4L2_FIELD_SEQ_TB) {
|
|
+ *interlaced = true;
|
|
+ } else {
|
|
+ pix->field = V4L2_FIELD_NONE;
|
|
+ *interlaced = false;
|
|
+ }
|
|
+
|
|
+ if (pix->colorspace == V4L2_COLORSPACE_DEFAULT)
|
|
+ pix->colorspace = V4L2_COLORSPACE_REC709;
|
|
+
|
|
+ if (pix->ycbcr_enc == V4L2_YCBCR_ENC_DEFAULT)
|
|
+ pix->ycbcr_enc = V4L2_YCBCR_ENC_709;
|
|
+
|
|
+ if (pix->quantization == V4L2_QUANTIZATION_DEFAULT)
|
|
+ pix->quantization = V4L2_QUANTIZATION_FULL_RANGE;
|
|
+
|
|
+ if (pix->xfer_func == V4L2_XFER_FUNC_DEFAULT)
|
|
+ pix->xfer_func = V4L2_XFER_FUNC_709;
|
|
+
|
|
+ /* Find mapping between pixel format and mvx format. */
|
|
+ map = mvx_find_format(pix->pixelformat);
|
|
+ if (IS_ERR(map)) {
|
|
+ MVX_SESSION_WARN(&vsession->session,
|
|
+ "Unsupported V4L2 pixel format. format=0x%08x.",
|
|
+ pix->pixelformat);
|
|
+ return PTR_ERR(map);
|
|
+ }
|
|
+
|
|
+ *format = map->format;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * print_format() - Print V4L2 format.
|
|
+ * @session: Pointer to MVX session.
|
|
+ * @f: V4L2 format.
|
|
+ * @prefix: Prefix string.
|
|
+ */
|
|
+static void print_format(struct mvx_session *session,
|
|
+ struct v4l2_format *f,
|
|
+ const char *prefix)
|
|
+{
|
|
+ if (V4L2_TYPE_IS_MULTIPLANAR(f->type) != false) {
|
|
+ struct v4l2_pix_format_mplane *p = &f->fmt.pix_mp;
|
|
+
|
|
+ MVX_SESSION_INFO(session,
|
|
+ "v4l2: %s. type=%u, pixelformat=0x%08x, width=%u, height=%u, num_planes=%u.",
|
|
+ prefix,
|
|
+ f->type, p->pixelformat,
|
|
+ p->width, p->height,
|
|
+ p->num_planes);
|
|
+ } else {
|
|
+ struct v4l2_pix_format *p = &f->fmt.pix;
|
|
+
|
|
+ MVX_SESSION_INFO(session,
|
|
+ "v4l2: %s. type=%u, pixelformat=0x%08x, width=%u, height=%u.",
|
|
+ prefix,
|
|
+ f->type, p->pixelformat,
|
|
+ p->width, p->height);
|
|
+ }
|
|
+}
|
|
+
|
|
+/**
|
|
+ * queue_setup() - Initialize or verify queue parameters.
|
|
+ * @q: Videobuf2 queue.
|
|
+ * @buf_cnt: Requested/requered buffers count.
|
|
+ * @plane_cnt: Required number of planes.
|
|
+ * @plane_size: Required size of each plane.
|
|
+ * @alloc_devs: Device to allocate memory from.
|
|
+ *
|
|
+ * This callback is used to query parameters of a queue from the driver.
|
|
+ * Vb2 sets buf_cnt to requested amount of buffers, but a driver is free to
|
|
+ * choose another value and return it. Vb2 will then call queue_setup() again
|
|
+ * to verify that the new value is accepted by a driver.
|
|
+ *
|
|
+ * Vb2 also uses plane_cnt parameter to signal if queue_setup() was called
|
|
+ * from create_bufs() of reqbufs().
|
|
+ *
|
|
+ * No locking is required in this function. The reason is that will be called
|
|
+ * from within vb2_reqbufs() or vb2_create_bufs() which are executed from our
|
|
+ * code with session mutex already taken.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+#if KERNEL_VERSION(4, 5, 0) <= LINUX_VERSION_CODE
|
|
+static int queue_setup(struct vb2_queue *q,
|
|
+ unsigned int *buf_cnt,
|
|
+ unsigned int *plane_cnt,
|
|
+ unsigned int plane_size[],
|
|
+ struct device *alloc_devs[])
|
|
+#else
|
|
+static int queue_setup(struct vb2_queue *q,
|
|
+ const void *unused,
|
|
+ unsigned int *buf_cnt,
|
|
+ unsigned int *plane_cnt,
|
|
+ unsigned int plane_size[],
|
|
+ void *alloc_devs[])
|
|
+#endif
|
|
+{
|
|
+ struct mvx_v4l2_port *vport = vb2_get_drv_priv(q);
|
|
+ struct mvx_session_port *port = vport->port;
|
|
+ struct mvx_v4l2_session *vsession = vport->vsession;
|
|
+ struct mvx_session *session = &vsession->session;
|
|
+ unsigned int i;
|
|
+
|
|
+ /*
|
|
+ * If the output frame resolution is not known, then there is no need
|
|
+ * to allocate buffers yet. But 1 buffer will be needed to carry
|
|
+ * information about 'resolution change' and 'end of stream'.
|
|
+ */
|
|
+ if (vport->dir == MVX_DIR_OUTPUT &&
|
|
+ mvx_is_frame(port->format) != false &&
|
|
+ (port->width == 0 || port->height == 0))
|
|
+ *buf_cnt = 1;
|
|
+
|
|
+ memset(plane_size, 0, sizeof(plane_size[0]) * VB2_MAX_PLANES);
|
|
+ *plane_cnt = port->nplanes;
|
|
+ for (i = 0; i < port->nplanes; ++i) {
|
|
+ /* Vb2 allocator does not handle well buffers of zero size. */
|
|
+ plane_size[i] = max_t(unsigned int, port->size[i], 1);
|
|
+ alloc_devs[i] = session->dev;
|
|
+ }
|
|
+
|
|
+ MVX_SESSION_VERBOSE(session,
|
|
+ "queue_setup. vsession=%p, vport=%p, vb2_queue=%p, dir=%d, format=0x%x, width=%u, height=%u, nplanes=%u, plane_size=[%u, %u, %u]",
|
|
+ vsession, vport, q, vport->dir, port->format,
|
|
+ port->width, port->height, port->nplanes,
|
|
+ plane_size[0], plane_size[1], plane_size[2]);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * buf_init() - Perform initilization for Vb2 buffer.
|
|
+ * @b: Pointer to Vb2 buffer.
|
|
+ *
|
|
+ * Vb2 framework calls this function once for every allocated buffer.
|
|
+ * A driver fetches a list of memory pages and constructs MVX V4L2 buffers.
|
|
+ *
|
|
+ * No locking is required in this function. The reason is that will be called
|
|
+ * from within vb2_reqbufs() or vb2_create_bufs() which are executed from our
|
|
+ * code with session mutex already taken.
|
|
+ *
|
|
+ * Return: 0 in case of success, error code otherwise.
|
|
+ */
|
|
+static int buf_init(struct vb2_buffer *b)
|
|
+{
|
|
+ struct mvx_v4l2_buffer *vbuf = vb2_to_mvx_v4l2_buffer(b);
|
|
+
|
|
+ int ret;
|
|
+ unsigned int i;
|
|
+ struct sg_table *sgt[MVX_BUFFER_NPLANES] = { 0 };
|
|
+ struct vb2_queue *q = b->vb2_queue;
|
|
+ struct mvx_v4l2_port *vport = vb2_get_drv_priv(q);
|
|
+ struct mvx_v4l2_session *vsession = vport->vsession;
|
|
+ struct mvx_session *session = &vsession->session;
|
|
+ unsigned int flags = vbuf->buf.flags;
|
|
+
|
|
+ MVX_SESSION_VERBOSE(session,
|
|
+ "v4l2: Initialize buffer. vb=%p, type=%u, index=%u, num_planes=%u.",
|
|
+ b, b->type, b->index, b->num_planes);
|
|
+
|
|
+ if (b->num_planes > MVX_BUFFER_NPLANES) {
|
|
+ MVX_SESSION_WARN(session,
|
|
+ "Failed to initialize buffer. Too many planes. vb=%p, num_planes=%u.",
|
|
+ b, b->num_planes);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ for (i = 0; i < b->num_planes; ++i) {
|
|
+ sgt[i] = vb2_dma_sg_plane_desc(b, i);
|
|
+ if (sgt[i] == NULL) {
|
|
+ MVX_SESSION_WARN(session,
|
|
+ "Cannot fetch SG descriptor. vb=%p, plane=%u.",
|
|
+ b, i);
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ ret = mvx_v4l2_buffer_construct(vbuf, vsession, vport->dir,
|
|
+ b->num_planes, sgt);
|
|
+
|
|
+ vbuf->buf.flags = flags;
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * buf_cleanup() - Destroy data associated to Vb2 buffer.
|
|
+ * @b: Pointer to Vb2 buffer.
|
|
+ *
|
|
+ * Vb2 framework calls this function while destroying a buffer.
|
|
+ */
|
|
+static void buf_cleanup(struct vb2_buffer *b)
|
|
+{
|
|
+ struct vb2_queue *q = b->vb2_queue;
|
|
+ struct mvx_v4l2_port *vport = vb2_get_drv_priv(q);
|
|
+ struct mvx_v4l2_session *vsession = vport->vsession;
|
|
+ struct mvx_session *session = &vsession->session;
|
|
+ struct mvx_v4l2_buffer *vbuf = vb2_to_mvx_v4l2_buffer(b);
|
|
+
|
|
+ if (session->port[vport->dir].stream_on) {
|
|
+ MVX_SESSION_VERBOSE(session,
|
|
+ "v4l2: Cleanup buffer in the coding process. Will remap mve va and pa address. dir=%d, type=%u, index=%u, vb=%p, vbuf=%p.",
|
|
+ vport->dir, b->type, b->index, b, vbuf);
|
|
+ } else {
|
|
+ MVX_SESSION_VERBOSE(session,
|
|
+ "v4l2: Cleanup buffer. type=%u, index=%u, vb=%p, vbuf=%p.",
|
|
+ b->type, b->index, b, vbuf);
|
|
+ }
|
|
+
|
|
+ mvx_v4l2_buffer_destruct(vbuf);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * start_streaming() - Start streaming for queue.
|
|
+ * @q: Pointer to a queue.
|
|
+ * @cnt: Amount of buffers already owned by a driver.
|
|
+ *
|
|
+ * Vb2 calls this function when it is ready to start streaming for a queue.
|
|
+ * Vb2 ensures that minimum required amount of buffers were enqueued to the
|
|
+ * driver before calling this function.
|
|
+ *
|
|
+ * Return: 0 in case of success, error code otherwise.
|
|
+ */
|
|
+static int start_streaming(struct vb2_queue *q,
|
|
+ unsigned int cnt)
|
|
+{
|
|
+ /*
|
|
+ * Parameter cnt is not used so far.
|
|
+ */
|
|
+ struct mvx_v4l2_port *vport = vb2_get_drv_priv(q);
|
|
+ struct mvx_v4l2_session *vsession = vport->vsession;
|
|
+ struct mvx_session *session = &vsession->session;
|
|
+ int ret;
|
|
+
|
|
+ MVX_SESSION_VERBOSE(session,
|
|
+ "v4l2: Start streaming. queue=%p, type=%u, cnt=%u.",
|
|
+ q, q->type, cnt);
|
|
+
|
|
+ ret = mvx_session_streamon(&vsession->session, vport->dir);
|
|
+
|
|
+ /*
|
|
+ * If attempt was not successful, we should return all owned buffers
|
|
+ * to Vb2 with vb2_buffer_done() with state VB2_BUF_STATE_QUEUED.
|
|
+ */
|
|
+ if (ret != 0 && atomic_read(&q->owned_by_drv_count) > 0) {
|
|
+ int i;
|
|
+
|
|
+ for (i = 0; i < q->num_buffers; ++i)
|
|
+ if (q->bufs[i]->state == VB2_BUF_STATE_ACTIVE)
|
|
+ vb2_buffer_done(q->bufs[i],
|
|
+ VB2_BUF_STATE_QUEUED);
|
|
+
|
|
+ WARN_ON(atomic_read(&q->owned_by_drv_count));
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * stop_streaming() - Stop streaming for a queue.
|
|
+ * @q: Pointer to a queue.
|
|
+ *
|
|
+ * Vb2 calls this function when streaming should be terminated.
|
|
+ * The driver must ensure that no DMA transfers are ongoing and
|
|
+ * return all buffers to Vb2 with vb2_buffer_done().
|
|
+ */
|
|
+static void stop_streaming(struct vb2_queue *q)
|
|
+{
|
|
+ struct mvx_v4l2_port *vport = vb2_get_drv_priv(q);
|
|
+ struct mvx_v4l2_session *vsession = vport->vsession;
|
|
+ struct mvx_session *session = &vsession->session;
|
|
+
|
|
+ MVX_SESSION_VERBOSE(session,
|
|
+ "v4l2: Stop streaming. queue=%p, type=%u.",
|
|
+ q, q->type);
|
|
+
|
|
+ mvx_session_streamoff(&vsession->session, vport->dir);
|
|
+
|
|
+ /*
|
|
+ * We have to return all owned buffers to Vb2 before exiting from
|
|
+ * this callback.
|
|
+ *
|
|
+ * Note: there must be no access to buffers after they are returned.
|
|
+ */
|
|
+ if (atomic_read(&q->owned_by_drv_count) > 0) {
|
|
+ int i;
|
|
+
|
|
+ for (i = 0; i < q->num_buffers; ++i)
|
|
+ if (q->bufs[i]->state == VB2_BUF_STATE_ACTIVE)
|
|
+ vb2_buffer_done(q->bufs[i],
|
|
+ VB2_BUF_STATE_ERROR);
|
|
+
|
|
+ WARN_ON(atomic_read(&q->owned_by_drv_count));
|
|
+ }
|
|
+}
|
|
+
|
|
+/**
|
|
+ * buf_queue() - Enqueue buffer to a driver.
|
|
+ * @b: Pointer to Vb2 buffer structure.
|
|
+ *
|
|
+ * Vb2 calls this function to enqueue a buffer to a driver.
|
|
+ * A driver should later return a buffer to Vb2 with vb2_buffer_done().
|
|
+ *
|
|
+ * Return: 0 in case of success, error code otherwise.
|
|
+ */
|
|
+static void buf_queue(struct vb2_buffer *b)
|
|
+{
|
|
+ struct vb2_queue *q = b->vb2_queue;
|
|
+ struct mvx_v4l2_port *vport = vb2_get_drv_priv(q);
|
|
+ struct mvx_session_port *port = vport->port;
|
|
+ enum mvx_direction dir = vport->dir;
|
|
+ struct mvx_v4l2_session *vsession = vport->vsession;
|
|
+ struct mvx_session *session = &vsession->session;
|
|
+ struct mvx_v4l2_buffer *vbuf = vb2_to_mvx_v4l2_buffer(b);
|
|
+
|
|
+ int ret;
|
|
+
|
|
+ MVX_SESSION_VERBOSE(session,
|
|
+ "v4l2: Queue buffer. b=%p, type=%u, index=%u.",
|
|
+ b, b->type, b->index);
|
|
+ vbuf->buf.format = vport->port->format;
|
|
+ ret = mvx_v4l2_buffer_set(vbuf, b);
|
|
+ if (ret != 0) {
|
|
+ goto failed;
|
|
+ }
|
|
+ ret = mvx_session_qbuf(&vsession->session, dir, &vbuf->buf);
|
|
+ if (ret != 0) {
|
|
+ goto failed;
|
|
+ }
|
|
+ return;
|
|
+
|
|
+failed:
|
|
+ if (vbuf->buf.flags & MVX_BUFFER_FRAME_NEED_REALLOC) {
|
|
+ vbuf->vb2_v4l2_buffer.flags |= V4L2_BUF_FLAG_MVX_BUFFER_NEED_REALLOC;
|
|
+ port->isreallocting = true;
|
|
+ vb2_buffer_done(b, VB2_BUF_STATE_DONE);
|
|
+ return;
|
|
+ }
|
|
+ vb2_buffer_done(b, VB2_BUF_STATE_ERROR);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * buf_finish() - Finish buffer before it is returned to user space.
|
|
+ * @vb: Pointer to Vb2 buffer structure.
|
|
+ */
|
|
+static void buf_finish(struct vb2_buffer *vb)
|
|
+{
|
|
+ struct mvx_v4l2_port *vport = vb2_get_drv_priv(vb->vb2_queue);
|
|
+ struct mvx_v4l2_buffer *vbuf = vb2_to_mvx_v4l2_buffer(vb);
|
|
+
|
|
+ vport->crop.left = vbuf->buf.crop_left;
|
|
+ vport->crop.top = vbuf->buf.crop_top;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * wait_prepare() - Prepare driver for waiting
|
|
+ * @q: Pointer to Vb2 queue.
|
|
+ *
|
|
+ * Vb2 calls this function when it is about to wait for more buffers to
|
|
+ * be received. A driver should release any locks taken while calling Vb2
|
|
+ * functions.
|
|
+ * This is required to avoid a deadlock.
|
|
+ *
|
|
+ * This is unused for now and will be called from Vb2.
|
|
+ */
|
|
+static void wait_prepare(struct vb2_queue *q)
|
|
+{
|
|
+ struct mvx_v4l2_port *vport = vb2_get_drv_priv(q);
|
|
+ struct mvx_v4l2_session *vsession = vport->vsession;
|
|
+ struct mvx_session *session = &vsession->session;
|
|
+
|
|
+ MVX_SESSION_VERBOSE(session, "v4l2: Wait prepare. queue=%p.", q);
|
|
+
|
|
+ mutex_unlock(&vsession->mutex);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * wait_finish() - Wake up after sleep.
|
|
+ * @q: Pointer to Vb2 queue.
|
|
+ *
|
|
+ * Require mutexes release before.
|
|
+ *
|
|
+ * This is unused for now and will be called from Vb2.
|
|
+ */
|
|
+static void wait_finish(struct vb2_queue *q)
|
|
+{
|
|
+ struct mvx_v4l2_port *vport = vb2_get_drv_priv(q);
|
|
+ struct mvx_v4l2_session *vsession = vport->vsession;
|
|
+ struct mvx_session *session = &vsession->session;
|
|
+ int ignore;
|
|
+
|
|
+ MVX_SESSION_VERBOSE(session, "v4l2: Wait finish. queue=%p.", q);
|
|
+
|
|
+ /*
|
|
+ * mutex_lock_interruptible is declared with attribute
|
|
+ * warn_unused_result, but we have no way to return a status
|
|
+ * from wait_finish().
|
|
+ */
|
|
+ ignore = mutex_lock_interruptible(&vsession->mutex);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * mvx_vb2_ops - Callbacks for Vb2 framework
|
|
+ * Not all possible callbacks are implemented as some of them are optional.
|
|
+ */
|
|
+const struct vb2_ops mvx_vb2_ops = {
|
|
+ .queue_setup = queue_setup,
|
|
+ .buf_init = buf_init,
|
|
+ .buf_finish = buf_finish,
|
|
+ .buf_cleanup = buf_cleanup,
|
|
+ .start_streaming = start_streaming,
|
|
+ .stop_streaming = stop_streaming,
|
|
+ .buf_queue = buf_queue,
|
|
+ .wait_prepare = wait_prepare,
|
|
+ .wait_finish = wait_finish
|
|
+};
|
|
+
|
|
+/**
|
|
+ * setup_vb2_queue() - Initialize vb2_queue before it can be used by Vb2.
|
|
+ */
|
|
+static int setup_vb2_queue(struct mvx_v4l2_port *vport)
|
|
+{
|
|
+ struct vb2_queue *q = &vport->vb2_queue;
|
|
+#if KERNEL_VERSION(4, 5, 0) <= LINUX_VERSION_CODE
|
|
+ struct device *dev = vport->vsession->ext->dev;
|
|
+#endif
|
|
+ int ret;
|
|
+
|
|
+ q->drv_priv = vport;
|
|
+ q->type = vport->type;
|
|
+ q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
|
|
+#if KERNEL_VERSION(4, 5, 0) <= LINUX_VERSION_CODE
|
|
+ q->dev = dev;
|
|
+#endif
|
|
+ q->ops = &mvx_vb2_ops;
|
|
+ q->mem_ops = &vb2_dma_sg_memops;
|
|
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
|
|
+ q->allow_zero_bytesused = true;
|
|
+
|
|
+ /* Let Vb2 handle mvx_v4l2_buffer allocations. */
|
|
+ q->buf_struct_size = sizeof(struct mvx_v4l2_buffer);
|
|
+
|
|
+ ret = vb2_queue_init(q);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+/****************************************************************************
|
|
+ * Exported functions and variables
|
|
+ ****************************************************************************/
|
|
+
|
|
+int mvx_v4l2_vidioc_querycap(struct file *file,
|
|
+ void *fh,
|
|
+ struct v4l2_capability *cap)
|
|
+{
|
|
+ struct mvx_v4l2_session *session = file_to_session(file);
|
|
+
|
|
+ MVX_SESSION_INFO(&session->session, "v4l2: Query capabilities.");
|
|
+
|
|
+ strlcpy(cap->driver, "mvx", sizeof(cap->driver));
|
|
+ strlcpy(cap->card, "Linlon Video device", sizeof(cap->card));
|
|
+ strlcpy(cap->bus_info, "platform:mvx", sizeof(cap->bus_info));
|
|
+
|
|
+ cap->capabilities = V4L2_CAP_DEVICE_CAPS |
|
|
+ V4L2_CAP_VIDEO_M2M |
|
|
+ V4L2_CAP_VIDEO_M2M_MPLANE |
|
|
+ V4L2_CAP_EXT_PIX_FORMAT |
|
|
+ V4L2_CAP_STREAMING;
|
|
+ cap->device_caps = cap->capabilities & ~V4L2_CAP_DEVICE_CAPS;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Loop over the mvx_fmts searching for pixelformat at offset f->index.
|
|
+ *
|
|
+ * Formats that are not present in the 'formats' bitmask will be skipped.
|
|
+ * Which pixelformat that is mapped to which index will consequently depend
|
|
+ * on which mvx_formats that are enabled.
|
|
+ */
|
|
+static int mvx_v4l2_vidioc_enum_fmt_vid(struct mvx_v4l2_session *session,
|
|
+ struct v4l2_fmtdesc *f,
|
|
+ enum mvx_direction dir)
|
|
+{
|
|
+ uint64_t formats;
|
|
+ int index;
|
|
+ int i;
|
|
+
|
|
+ mvx_session_get_formats(&session->session, dir, &formats);
|
|
+
|
|
+ for (i = 0, index = 0; i < ARRAY_SIZE(mvx_fmts); i++)
|
|
+ if (mvx_test_bit(mvx_fmts[i].format, &formats)) {
|
|
+ if (f->index == index) {
|
|
+ f->flags = mvx_fmts[i].flags;
|
|
+ f->pixelformat = mvx_fmts[i].pixelformat;
|
|
+ strlcpy(f->description, mvx_fmts[i].description,
|
|
+ sizeof(f->description));
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ index++;
|
|
+ }
|
|
+
|
|
+ if (i >= ARRAY_SIZE(mvx_fmts))
|
|
+ return -EINVAL;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int mvx_v4l2_vidioc_enum_fmt_vid_cap(struct file *file,
|
|
+ void *fh,
|
|
+ struct v4l2_fmtdesc *f)
|
|
+{
|
|
+ struct mvx_v4l2_session *session = file_to_session(file);
|
|
+ int ret;
|
|
+
|
|
+ ret = mvx_v4l2_vidioc_enum_fmt_vid(session, f, MVX_DIR_OUTPUT);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int mvx_v4l2_vidioc_enum_fmt_vid_out(struct file *file,
|
|
+ void *fh,
|
|
+ struct v4l2_fmtdesc *f)
|
|
+{
|
|
+ struct mvx_v4l2_session *session = file_to_session(file);
|
|
+ int ret;
|
|
+
|
|
+ ret = mvx_v4l2_vidioc_enum_fmt_vid(session, f, MVX_DIR_INPUT);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int mvx_v4l2_vidioc_enum_framesizes(struct file *file,
|
|
+ void *fh,
|
|
+ struct v4l2_frmsizeenum *fsize)
|
|
+{
|
|
+ struct mvx_format_map *format;
|
|
+
|
|
+ /* Verify that format is supported. */
|
|
+ format = mvx_find_format(fsize->pixel_format);
|
|
+ if (IS_ERR(format))
|
|
+ return PTR_ERR(format);
|
|
+
|
|
+ /* For stepwise/continuous frame size the index must be 0. */
|
|
+ if (fsize->index != 0)
|
|
+ return -EINVAL;
|
|
+
|
|
+ fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
|
|
+ fsize->stepwise.min_width = 2;
|
|
+ fsize->stepwise.max_width = 8192;
|
|
+ fsize->stepwise.step_width = 2;
|
|
+ fsize->stepwise.min_height = 2;
|
|
+ fsize->stepwise.max_height = 8192;
|
|
+ fsize->stepwise.step_height = 2;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int mvx_v4l2_vidioc_g_fmt_vid(struct file *file,
|
|
+ struct v4l2_format *f,
|
|
+ enum mvx_direction dir)
|
|
+{
|
|
+ struct mvx_v4l2_session *vsession = file_to_session(file);
|
|
+ struct mvx_v4l2_port *vport = &vsession->port[dir];
|
|
+ struct mvx_session_port *port = &vsession->session.port[dir];
|
|
+ int ret;
|
|
+
|
|
+ ret = mutex_lock_interruptible(&vsession->mutex);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+
|
|
+ to_v4l2_format(f, f->type, &vport->pix_mp, port->stride, port->size,
|
|
+ port->interlaced);
|
|
+
|
|
+ mutex_unlock(&vsession->mutex);
|
|
+
|
|
+ print_format(&vsession->session, f, "Get format");
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int mvx_v4l2_vidioc_g_fmt_vid_cap(struct file *file,
|
|
+ void *fh,
|
|
+ struct v4l2_format *f)
|
|
+{
|
|
+ return mvx_v4l2_vidioc_g_fmt_vid(file, f, MVX_DIR_OUTPUT);
|
|
+}
|
|
+
|
|
+int mvx_v4l2_vidioc_g_fmt_vid_out(struct file *file,
|
|
+ void *fh,
|
|
+ struct v4l2_format *f)
|
|
+{
|
|
+ return mvx_v4l2_vidioc_g_fmt_vid(file, f, MVX_DIR_INPUT);
|
|
+}
|
|
+
|
|
+static int mvx_v4l2_vidioc_s_fmt_vid(struct file *file,
|
|
+ struct v4l2_format *f,
|
|
+ enum mvx_direction dir)
|
|
+{
|
|
+ struct mvx_v4l2_session *vsession = file_to_session(file);
|
|
+ struct mvx_v4l2_port *vport = &vsession->port[dir];
|
|
+ struct v4l2_pix_format_mplane pix_mp;
|
|
+ enum mvx_format format;
|
|
+ unsigned int stride[MVX_BUFFER_NPLANES];
|
|
+ unsigned int size[MVX_BUFFER_NPLANES];
|
|
+ bool interlaced = false;
|
|
+ int ret;
|
|
+
|
|
+ ret = mutex_lock_interruptible(&vsession->mutex);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+
|
|
+ if (vport->q_set != false && vb2_is_busy(&vport->vb2_queue) != false) {
|
|
+ MVX_SESSION_WARN(&vsession->session,
|
|
+ "Can't set format when there there buffers allocated to the port.");
|
|
+ ret = -EBUSY;
|
|
+ goto unlock_mutex;
|
|
+ }
|
|
+
|
|
+ /* Convert V4L2 format to V4L2 multi planar pixel format. */
|
|
+ ret = from_v4l2_format(vsession, f, &pix_mp, &format, stride, size,
|
|
+ &interlaced);
|
|
+ if (ret != 0)
|
|
+ goto unlock_mutex;
|
|
+
|
|
+ /* Validate and adjust settings. */
|
|
+ ret = mvx_session_set_format(&vsession->session, dir, format,
|
|
+ &pix_mp.width, &pix_mp.height,
|
|
+ &pix_mp.num_planes,
|
|
+ stride, size, &interlaced);
|
|
+ if (ret != 0)
|
|
+ goto unlock_mutex;
|
|
+
|
|
+ /* Convert V4L2 multi planar pixel format to format. */
|
|
+ ret = to_v4l2_format(f, f->type, &pix_mp, stride, size, interlaced);
|
|
+ if (ret != 0)
|
|
+ goto unlock_mutex;
|
|
+
|
|
+ vport->type = f->type;
|
|
+ vport->pix_mp = pix_mp;
|
|
+
|
|
+unlock_mutex:
|
|
+ mutex_unlock(&vsession->mutex);
|
|
+
|
|
+ print_format(&vsession->session, f, "Set format");
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int mvx_v4l2_vidioc_s_fmt_vid_cap(struct file *file,
|
|
+ void *fh,
|
|
+ struct v4l2_format *f)
|
|
+{
|
|
+ return mvx_v4l2_vidioc_s_fmt_vid(file, f, MVX_DIR_OUTPUT);
|
|
+}
|
|
+
|
|
+int mvx_v4l2_vidioc_s_fmt_vid_out(struct file *file,
|
|
+ void *fh,
|
|
+ struct v4l2_format *f)
|
|
+{
|
|
+ struct mvx_v4l2_session *vsession = file_to_session(file);
|
|
+ struct v4l2_pix_format_mplane *in =
|
|
+ &vsession->port[MVX_DIR_INPUT].pix_mp;
|
|
+ struct v4l2_pix_format_mplane *out =
|
|
+ &vsession->port[MVX_DIR_OUTPUT].pix_mp;
|
|
+ int ret;
|
|
+
|
|
+ ret = mvx_v4l2_vidioc_s_fmt_vid(file, f, MVX_DIR_INPUT);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+
|
|
+ /* Copy input formats to output port. */
|
|
+ out->colorspace = in->colorspace;
|
|
+ out->ycbcr_enc = in->ycbcr_enc;
|
|
+ out->quantization = in->quantization;
|
|
+ out->xfer_func = in->xfer_func;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int mvx_v4l2_vidioc_try_fmt_vid(struct file *file,
|
|
+ struct v4l2_format *f,
|
|
+ enum mvx_direction dir)
|
|
+{
|
|
+ struct mvx_v4l2_session *vsession = file_to_session(file);
|
|
+ struct v4l2_pix_format_mplane pix;
|
|
+ enum mvx_format format;
|
|
+ unsigned int stride[MVX_BUFFER_NPLANES];
|
|
+ unsigned int size[MVX_BUFFER_NPLANES];
|
|
+ bool interlaced = false;
|
|
+ int ret;
|
|
+
|
|
+ ret = mutex_lock_interruptible(&vsession->mutex);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+
|
|
+ ret = from_v4l2_format(vsession, f, &pix, &format, stride, size,
|
|
+ &interlaced);
|
|
+ if (ret != 0)
|
|
+ goto unlock_mutex;
|
|
+
|
|
+ ret = mvx_session_try_format(&vsession->session, dir, format,
|
|
+ &pix.width, &pix.height, &pix.num_planes,
|
|
+ stride, size, &interlaced);
|
|
+ if (ret != 0)
|
|
+ goto unlock_mutex;
|
|
+
|
|
+ ret = to_v4l2_format(f, f->type, &pix, stride, size, interlaced);
|
|
+ if (ret != 0)
|
|
+ goto unlock_mutex;
|
|
+
|
|
+unlock_mutex:
|
|
+ mutex_unlock(&vsession->mutex);
|
|
+
|
|
+ print_format(&vsession->session, f, "Try format");
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int mvx_v4l2_vidioc_try_fmt_vid_cap(struct file *file,
|
|
+ void *fh,
|
|
+ struct v4l2_format *f)
|
|
+{
|
|
+ return mvx_v4l2_vidioc_try_fmt_vid(file, f, MVX_DIR_OUTPUT);
|
|
+}
|
|
+
|
|
+int mvx_v4l2_vidioc_try_fmt_vid_out(struct file *file,
|
|
+ void *fh,
|
|
+ struct v4l2_format *f)
|
|
+{
|
|
+ return mvx_v4l2_vidioc_try_fmt_vid(file, f, MVX_DIR_INPUT);
|
|
+}
|
|
+int mvx_v4l2_vidioc_g_crop(struct file *file,
|
|
+ void *fh,
|
|
+ struct v4l2_crop *a)
|
|
+{
|
|
+ struct mvx_v4l2_session *vsession = file_to_session(file);
|
|
+ enum mvx_direction dir = V4L2_TYPE_IS_OUTPUT(a->type) ?
|
|
+ MVX_DIR_INPUT : MVX_DIR_OUTPUT;
|
|
+ struct mvx_v4l2_port *vport = &vsession->port[dir];
|
|
+ struct mvx_session_port *port = &vsession->session.port[dir];
|
|
+
|
|
+ mutex_lock(&vsession->mutex);
|
|
+
|
|
+ a->c.left = vport->crop.left;
|
|
+ a->c.top = vport->crop.top;
|
|
+ a->c.width = port->width - vport->crop.left;
|
|
+ a->c.height = port->height - vport->crop.top;
|
|
+
|
|
+ mutex_unlock(&vsession->mutex);
|
|
+
|
|
+ MVX_SESSION_INFO(&vsession->session,
|
|
+ "v4l2: Get crop. dir=%u, crop={left=%u, top=%u, width=%u, height=%u.",
|
|
+ dir, a->c.left, a->c.top, a->c.width, a->c.height);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+int mvx_v4l2_vidioc_g_selection(struct file *file, void *fh,
|
|
+ struct v4l2_selection *s)
|
|
+{
|
|
+ struct v4l2_crop crop = { .type = s->type };
|
|
+ int ret;
|
|
+ ret = mvx_v4l2_vidioc_g_crop(file, fh, &crop);
|
|
+ if (ret == 0)
|
|
+ {
|
|
+ s->r = crop.c;
|
|
+ }
|
|
+ return ret;
|
|
+}
|
|
+int mvx_v4l2_vidioc_s_selection(struct file *file, void *fh,
|
|
+ struct v4l2_selection *s)
|
|
+{
|
|
+ struct mvx_v4l2_session *vsession = file_to_session(file);
|
|
+ int ret = 0;
|
|
+ ret = mutex_lock_interruptible(&vsession->mutex);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+ ret = mvx_session_set_crop_left(&vsession->session, s->r.left);
|
|
+ ret = mvx_session_set_crop_top(&vsession->session, s->r.top);
|
|
+ mutex_unlock(&vsession->mutex);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int mvx_v4l2_vidioc_s_parm(struct file *file, void *fh,
|
|
+ struct v4l2_streamparm *a)
|
|
+{
|
|
+ struct mvx_v4l2_session *vsession = file_to_session(file);
|
|
+ int ret = 0;
|
|
+ ret = mutex_lock_interruptible(&vsession->mutex);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+ if (V4L2_TYPE_IS_OUTPUT(a->type)) {
|
|
+ int64_t framerate = ((int64_t)a->parm.output.timeperframe.denominator << 16)/a->parm.output.timeperframe.numerator;
|
|
+ ret = mvx_session_set_frame_rate(&vsession->session, framerate);
|
|
+ }
|
|
+ mutex_unlock(&vsession->mutex);
|
|
+ return ret;
|
|
+}
|
|
+int mvx_v4l2_vidioc_g_parm(struct file *file, void *fh,
|
|
+ struct v4l2_streamparm *a)
|
|
+{
|
|
+ struct mvx_v4l2_session *vsession = file_to_session(file);
|
|
+ int ret = 0;
|
|
+ ret = mutex_lock_interruptible(&vsession->mutex);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+ if (V4L2_TYPE_IS_OUTPUT(a->type)) {
|
|
+ a->parm.output.timeperframe.numerator = 1 << 16;
|
|
+ a->parm.output.timeperframe.denominator = (int32_t)vsession->session.frame_rate;
|
|
+ }
|
|
+ mutex_unlock(&vsession->mutex);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int mvx_v4l2_vidioc_streamon(struct file *file,
|
|
+ void *priv,
|
|
+ enum v4l2_buf_type type)
|
|
+{
|
|
+ struct mvx_v4l2_session *vsession = file_to_session(file);
|
|
+ enum mvx_direction dir = V4L2_TYPE_IS_OUTPUT(type) ?
|
|
+ MVX_DIR_INPUT : MVX_DIR_OUTPUT;
|
|
+ int ret;
|
|
+
|
|
+ MVX_SESSION_INFO(&vsession->session, "v4l2: Stream on. dir=%u.", dir);
|
|
+
|
|
+ ret = mutex_lock_interruptible(&vsession->mutex);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+
|
|
+ ret = vb2_streamon(&vsession->port[dir].vb2_queue, type);
|
|
+ if (ret != 0)
|
|
+ MVX_SESSION_WARN(&vsession->session,
|
|
+ "v4l2: Failed to stream on. dir=%u.", dir);
|
|
+
|
|
+ mutex_unlock(&vsession->mutex);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int mvx_v4l2_vidioc_streamoff(struct file *file,
|
|
+ void *priv,
|
|
+ enum v4l2_buf_type type)
|
|
+{
|
|
+ struct mvx_v4l2_session *vsession = file_to_session(file);
|
|
+ enum mvx_direction dir = V4L2_TYPE_IS_OUTPUT(type) ?
|
|
+ MVX_DIR_INPUT : MVX_DIR_OUTPUT;
|
|
+ int ret;
|
|
+
|
|
+ MVX_SESSION_INFO(&vsession->session, "v4l2: Stream off. dir=%u.", dir);
|
|
+
|
|
+ ret = mutex_lock_interruptible(&vsession->mutex);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+
|
|
+ ret = vb2_streamoff(&vsession->port[dir].vb2_queue, type);
|
|
+ if (ret != 0)
|
|
+ MVX_SESSION_WARN(&vsession->session,
|
|
+ "v4l2: Failed to stream off. dir=%u.", dir);
|
|
+
|
|
+ MVX_SESSION_INFO(&vsession->session,
|
|
+ "v4l2: Stream off exit. dir=%u, ret=%d.",
|
|
+ dir, ret);
|
|
+
|
|
+ mutex_unlock(&vsession->mutex);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int mvx_v4l2_vidioc_encoder_cmd(struct file *file,
|
|
+ void *priv,
|
|
+ struct v4l2_encoder_cmd *cmd)
|
|
+{
|
|
+ struct mvx_v4l2_session *vsession = file_to_session(file);
|
|
+ int ret;
|
|
+
|
|
+ MVX_SESSION_INFO(&vsession->session, "v4l2: encoder cmd: %u.",
|
|
+ cmd->cmd);
|
|
+
|
|
+ ret = mutex_lock_interruptible(&vsession->mutex);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+
|
|
+ switch (cmd->cmd) {
|
|
+ case V4L2_ENC_CMD_STOP:
|
|
+ ret = mvx_session_send_eos(&vsession->session);
|
|
+ break;
|
|
+ case V4L2_ENC_CMD_START:
|
|
+ /*reset flag for v4l2 core, so that buffers can be queued normal.*/
|
|
+ vsession->port[1].vb2_queue.last_buffer_dequeued = false;
|
|
+ ret = 0;
|
|
+ break;
|
|
+ default:
|
|
+ MVX_SESSION_WARN(&vsession->session,
|
|
+ "Unsupported command. cmd: %u.", cmd->cmd);
|
|
+ ret = -EINVAL;
|
|
+ }
|
|
+
|
|
+ mutex_unlock(&vsession->mutex);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int mvx_v4l2_vidioc_try_encoder_cmd(struct file *file,
|
|
+ void *priv,
|
|
+ struct v4l2_encoder_cmd *cmd)
|
|
+{
|
|
+ switch (cmd->cmd) {
|
|
+ case V4L2_ENC_CMD_STOP:
|
|
+ case V4L2_ENC_CMD_START:
|
|
+ return 0;
|
|
+ default:
|
|
+ return -EINVAL;
|
|
+ }
|
|
+}
|
|
+
|
|
+int mvx_v4l2_vidioc_decoder_cmd(struct file *file,
|
|
+ void *priv,
|
|
+ struct v4l2_decoder_cmd *cmd)
|
|
+{
|
|
+ struct mvx_v4l2_session *vsession = file_to_session(file);
|
|
+ int ret;
|
|
+
|
|
+ MVX_SESSION_INFO(&vsession->session, "v4l2: decoder cmd: %u.",
|
|
+ cmd->cmd);
|
|
+
|
|
+ ret = mutex_lock_interruptible(&vsession->mutex);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+
|
|
+ switch (cmd->cmd) {
|
|
+ case V4L2_DEC_CMD_STOP:
|
|
+ ret = mvx_session_send_eos(&vsession->session);
|
|
+ break;
|
|
+ case V4L2_DEC_CMD_START:
|
|
+ /*reset flag for v4l2 core, so that buffers can be queued normal.*/
|
|
+ vsession->port[1].vb2_queue.last_buffer_dequeued = false;
|
|
+ ret = 0;
|
|
+ break;
|
|
+ default:
|
|
+ MVX_SESSION_WARN(&vsession->session,
|
|
+ "Unsupported command. cmd: %u.", cmd->cmd);
|
|
+ ret = -EINVAL;
|
|
+ }
|
|
+
|
|
+ mutex_unlock(&vsession->mutex);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int mvx_v4l2_vidioc_try_decoder_cmd(struct file *file,
|
|
+ void *priv,
|
|
+ struct v4l2_decoder_cmd *cmd)
|
|
+{
|
|
+ switch (cmd->cmd) {
|
|
+ case V4L2_DEC_CMD_STOP:
|
|
+ case V4L2_DEC_CMD_START:
|
|
+ return 0;
|
|
+ default:
|
|
+ return -EINVAL;
|
|
+ }
|
|
+}
|
|
+
|
|
+int mvx_v4l2_vidioc_reqbufs(struct file *file,
|
|
+ void *fh,
|
|
+ struct v4l2_requestbuffers *b)
|
|
+{
|
|
+ struct mvx_v4l2_session *vsession = file_to_session(file);
|
|
+ enum mvx_direction dir = V4L2_TYPE_IS_OUTPUT(b->type) ?
|
|
+ MVX_DIR_INPUT : MVX_DIR_OUTPUT;
|
|
+ struct mvx_v4l2_port *vport = &vsession->port[dir];
|
|
+ int ret;
|
|
+
|
|
+ MVX_SESSION_INFO(&vsession->session,
|
|
+ "v4l2: Request buffers. dir=%d, type=%u, memory=%u, count=%u.",
|
|
+ dir, b->type, b->memory, b->count);
|
|
+
|
|
+ ret = mutex_lock_interruptible(&vsession->mutex);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+
|
|
+ if (b->count == 0) {
|
|
+ if (vport->q_set != false) {
|
|
+ vb2_queue_release(&vport->vb2_queue);
|
|
+ vport->q_set = false;
|
|
+ }
|
|
+ } else {
|
|
+ if (vport->q_set == false) {
|
|
+ ret = setup_vb2_queue(vport);
|
|
+ if (ret != 0)
|
|
+ goto unlock_mutex;
|
|
+
|
|
+ vport->q_set = true;
|
|
+ }
|
|
+
|
|
+ ret = vb2_reqbufs(&vport->vb2_queue, b);
|
|
+ }
|
|
+ vport->port->buffer_allocated = b->count;
|
|
+unlock_mutex:
|
|
+ mutex_unlock(&vsession->mutex);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int mvx_v4l2_vidioc_create_bufs(struct file *file,
|
|
+ void *fh,
|
|
+ struct v4l2_create_buffers *b)
|
|
+{
|
|
+ struct mvx_v4l2_session *vsession = file_to_session(file);
|
|
+ enum mvx_direction dir = V4L2_TYPE_IS_OUTPUT(b->format.type) ?
|
|
+ MVX_DIR_INPUT : MVX_DIR_OUTPUT;
|
|
+ struct mvx_v4l2_port *vport = &vsession->port[dir];
|
|
+ int ret;
|
|
+
|
|
+ MVX_SESSION_INFO(&vsession->session,
|
|
+ "v4l2: Create buffers. dir=%d, type=%u, memory=%u, count=%u.",
|
|
+ dir, b->format.type, b->memory, b->count);
|
|
+
|
|
+ ret = mutex_lock_interruptible(&vsession->mutex);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+
|
|
+ if (vport->q_set == false)
|
|
+ ret = setup_vb2_queue(vport);
|
|
+
|
|
+ if (ret != 0)
|
|
+ goto unlock_mutex;
|
|
+
|
|
+ vport->q_set = true;
|
|
+#ifndef MODULE
|
|
+ ret = vb2_create_bufs(&vport->vb2_queue, b);
|
|
+#endif
|
|
+ vport->port->buffer_allocated = b->count;
|
|
+unlock_mutex:
|
|
+ mutex_unlock(&vsession->mutex);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int mvx_v4l2_vidioc_querybuf(struct file *file,
|
|
+ void *fh,
|
|
+ struct v4l2_buffer *b)
|
|
+{
|
|
+ struct mvx_v4l2_session *vsession = file_to_session(file);
|
|
+ enum mvx_direction dir = V4L2_TYPE_IS_OUTPUT(b->type) ?
|
|
+ MVX_DIR_INPUT : MVX_DIR_OUTPUT;
|
|
+ struct mvx_v4l2_port *vport = &vsession->port[dir];
|
|
+ int ret;
|
|
+
|
|
+ MVX_SESSION_INFO(&vsession->session,
|
|
+ "v4l2: Query buffer. dir=%d, type=%u, memory=%u, index=%u.",
|
|
+ dir, b->type, b->memory, b->index);
|
|
+
|
|
+ ret = mutex_lock_interruptible(&vsession->mutex);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+
|
|
+ ret = vb2_querybuf(&vport->vb2_queue, b);
|
|
+ if (ret != 0)
|
|
+ goto unlock_mutex;
|
|
+
|
|
+ /*
|
|
+ * When user space wants to mmap() a buffer, we have to be able to
|
|
+ * determine a direction of coresponding port. To make it easier we
|
|
+ * adjust mem_offset on output port by DST_QUEUE_OFF_BASE for all
|
|
+ * buffers.
|
|
+ */
|
|
+ if (dir == MVX_DIR_OUTPUT) {
|
|
+ if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
|
|
+ int i;
|
|
+
|
|
+ for (i = 0; i < b->length; ++i)
|
|
+ b->m.planes[i].m.mem_offset +=
|
|
+ DST_QUEUE_OFF_BASE;
|
|
+ } else {
|
|
+ b->m.offset += DST_QUEUE_OFF_BASE;
|
|
+ }
|
|
+ }
|
|
+
|
|
+unlock_mutex:
|
|
+ mutex_unlock(&vsession->mutex);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int mvx_v4l2_vidioc_qbuf(struct file *file,
|
|
+ void *fh,
|
|
+ struct v4l2_buffer *b)
|
|
+{
|
|
+ struct mvx_v4l2_session *vsession = file_to_session(file);
|
|
+ enum mvx_direction dir = V4L2_TYPE_IS_OUTPUT(b->type) ?
|
|
+ MVX_DIR_INPUT : MVX_DIR_OUTPUT;
|
|
+ struct mvx_v4l2_port *vport = &vsession->port[dir];
|
|
+ struct mvx_v4l2_buffer *vbuf;
|
|
+ struct mvx_buffer *buf;
|
|
+ struct vb2_buffer *vb;
|
|
+ struct v4l2_core_buffer_header_general *v4l2_general;
|
|
+ int ret;
|
|
+
|
|
+ MVX_SESSION_INFO(&vsession->session,
|
|
+ "v4l2: Queue buffer. dir=%d, type=%u, index=%u, flags=0x%x.",
|
|
+ dir, b->type, b->index, b->flags);
|
|
+
|
|
+ mutex_lock(&vsession->mutex);
|
|
+
|
|
+ if ((b->flags & V4L2_BUF_FLAG_MVX_BUFFER_EPR) == V4L2_BUF_FLAG_MVX_BUFFER_EPR ){
|
|
+ vb = vport->vb2_queue.bufs[b->index];
|
|
+ vbuf = vb2_to_mvx_v4l2_buffer(vb);
|
|
+ buf = &vbuf->buf;
|
|
+ v4l2_general = (struct v4l2_core_buffer_header_general *)&b->m.planes[0].reserved[0];
|
|
+ buf->general.header.buffer_size = v4l2_general->buffer_size;
|
|
+ buf->general.header.config_size = v4l2_general->config_size;
|
|
+ buf->general.header.type = v4l2_general->type;
|
|
+
|
|
+ memcpy(&buf->general.config.block_configs, &v4l2_general->config, sizeof(v4l2_general->config));
|
|
+ MVX_SESSION_INFO(&vsession->session,
|
|
+ "v4l2: Queue buffer. type:%d, config size:%d, buffer size:%d, cfg_type:0x%x, cols and rows:%d, %d",
|
|
+ v4l2_general->type ,v4l2_general->config_size, v4l2_general->buffer_size,
|
|
+ v4l2_general->config.blk_cfg_type,v4l2_general->config.blk_cfgs.rows_uncomp.n_cols_minus1,
|
|
+ v4l2_general->config.blk_cfgs.rows_uncomp.n_rows_minus1);
|
|
+ }
|
|
+
|
|
+ if (dir == MVX_DIR_INPUT && V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
|
|
+ vb = vport->vb2_queue.bufs[b->index];
|
|
+ vbuf = vb2_to_mvx_v4l2_buffer(vb);
|
|
+ buf = &vbuf->buf;
|
|
+
|
|
+ buf->flags = 0;
|
|
+ if ((b->reserved2 & V4L2_BUF_FRAME_FLAG_ROTATION_90) == V4L2_BUF_FRAME_FLAG_ROTATION_90) {
|
|
+ buf->flags |= MVX_BUFFER_FRAME_FLAG_ROTATION_90;
|
|
+ }
|
|
+ if ((b->reserved2 & V4L2_BUF_FRAME_FLAG_ROTATION_180) == V4L2_BUF_FRAME_FLAG_ROTATION_180) {
|
|
+ buf->flags |= MVX_BUFFER_FRAME_FLAG_ROTATION_180;
|
|
+ }
|
|
+ if ((b->reserved2 & V4L2_BUF_FRAME_FLAG_ROTATION_270) == V4L2_BUF_FRAME_FLAG_ROTATION_270) {
|
|
+ buf->flags |= MVX_BUFFER_FRAME_FLAG_ROTATION_270;
|
|
+ }
|
|
+ }
|
|
+ ret = vb2_qbuf(&vport->vb2_queue, NULL, b);
|
|
+ mutex_unlock(&vsession->mutex);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int mvx_v4l2_vidioc_dqbuf(struct file *file,
|
|
+ void *fh,
|
|
+ struct v4l2_buffer *b)
|
|
+{
|
|
+ struct mvx_v4l2_session *vsession = file_to_session(file);
|
|
+ struct mvx_ext_if *ctx = vsession->ext;
|
|
+ enum mvx_direction dir = V4L2_TYPE_IS_OUTPUT(b->type) ?
|
|
+ MVX_DIR_INPUT : MVX_DIR_OUTPUT;
|
|
+ struct mvx_v4l2_port *vport = &vsession->port[dir];
|
|
+ struct vb2_buffer *vb;
|
|
+ struct mvx_v4l2_buffer *vbuf;
|
|
+ struct mvx_buffer *buf;
|
|
+ int ret;
|
|
+
|
|
+ MVX_SESSION_INFO(&vsession->session,
|
|
+ "v4l2: Dequeue buffer. dir=%d, type=%u.",
|
|
+ dir, b->type);
|
|
+
|
|
+ mutex_lock(&vsession->mutex);
|
|
+
|
|
+ ret = vb2_dqbuf(&vport->vb2_queue, b, file->f_flags & O_NONBLOCK);
|
|
+ if (ret != 0)
|
|
+ goto unlock_mutex;
|
|
+
|
|
+ if ((dir == MVX_DIR_OUTPUT) && (b->flags & V4L2_BUF_FLAG_LAST)) {
|
|
+ const struct v4l2_event event = {
|
|
+ .type = V4L2_EVENT_EOS
|
|
+ };
|
|
+ v4l2_event_queue(&ctx->vdev, &event);
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * For single planar buffers there is no data offset. Instead the
|
|
+ * offset is added to the memory pointer and subtraced from the
|
|
+ * bytesused.
|
|
+ */
|
|
+ vb = vport->vb2_queue.bufs[b->index];
|
|
+ if (V4L2_TYPE_IS_MULTIPLANAR(vb->type) == false) {
|
|
+ b->bytesused -= vb->planes[0].data_offset;
|
|
+
|
|
+ switch (vb->type) {
|
|
+ case V4L2_MEMORY_MMAP:
|
|
+ b->m.offset += vb->planes[0].data_offset;
|
|
+ break;
|
|
+ case V4L2_MEMORY_USERPTR:
|
|
+ b->m.userptr += vb->planes[0].data_offset;
|
|
+ break;
|
|
+ default:
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (vsession->port[MVX_DIR_INPUT].port->format <= MVX_FORMAT_BITSTREAM_LAST &&
|
|
+ dir == MVX_DIR_OUTPUT && V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
|
|
+ vbuf = vb2_to_mvx_v4l2_buffer(vb);
|
|
+ buf = &vbuf->buf;
|
|
+ b->reserved2 = (buf->width << 16) | (buf->height);
|
|
+ }
|
|
+
|
|
+unlock_mutex:
|
|
+ mutex_unlock(&vsession->mutex);
|
|
+
|
|
+#ifndef MODULE
|
|
+ MVX_SESSION_INFO(&vsession->session,
|
|
+ "v4l2: Dequeued buffer. dir=%d, type=%u, index=%u, flags=0x%x, nevents=%u, fh=%p.",
|
|
+ dir, b->type, b->index, b->flags,
|
|
+ v4l2_event_pending(&vsession->fh), fh);
|
|
+#else
|
|
+ MVX_SESSION_INFO(&vsession->session,
|
|
+ "v4l2: Dequeued buffer. dir=%d, type=%u, index=%u, flags=0x%x, fh=%p.",
|
|
+ dir, b->type, b->index, b->flags,
|
|
+ fh);
|
|
+#endif
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int mvx_v4l2_vidioc_subscribe_event(struct v4l2_fh *fh,
|
|
+ const struct v4l2_event_subscription *sub)
|
|
+{
|
|
+ struct mvx_v4l2_session *session = v4l2_fh_to_session(fh);
|
|
+
|
|
+ MVX_SESSION_INFO(&session->session,
|
|
+ "v4l2: Subscribe event. fh=%p, type=%u.", fh,
|
|
+ sub->type);
|
|
+
|
|
+ switch (sub->type) {
|
|
+ case V4L2_EVENT_CTRL:
|
|
+ return v4l2_ctrl_subscribe_event(fh, sub);
|
|
+ case V4L2_EVENT_EOS:
|
|
+ case V4L2_EVENT_SOURCE_CHANGE:
|
|
+ case V4L2_EVENT_MVX_COLOR_DESC:
|
|
+ return v4l2_event_subscribe(fh, sub, 2, NULL);
|
|
+ default:
|
|
+ MVX_SESSION_WARN(&session->session,
|
|
+ "Can't register for unsupported event. type=%u.",
|
|
+ sub->type);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+long mvx_v4l2_vidioc_default(struct file *file,
|
|
+ void *fh,
|
|
+ bool valid_prio,
|
|
+ unsigned int cmd,
|
|
+ void *arg)
|
|
+{
|
|
+ struct mvx_v4l2_session *vsession = file_to_session(file);
|
|
+ int ret;
|
|
+ MVX_SESSION_INFO(&vsession->session,
|
|
+ "Custom ioctl. cmd=0x%x, arg=0x%p.", cmd, arg);
|
|
+
|
|
+ switch (cmd) {
|
|
+ case VIDIOC_G_MVX_COLORDESC: {
|
|
+ ret = mvx_v4l2_session_get_color_desc(vsession, arg);
|
|
+ break;
|
|
+ }
|
|
+ case VIDIOC_S_MVX_ROI_REGIONS: {
|
|
+ ret = mvx_v4l2_session_set_roi_regions(vsession, arg);
|
|
+ break;
|
|
+ }
|
|
+ case VIDIOC_S_MVX_QP_EPR: {
|
|
+ ret = mvx_v4l2_session_set_qp_epr(vsession, arg);
|
|
+ break;
|
|
+ }
|
|
+ case VIDIOC_S_MVX_COLORDESC: {
|
|
+ ret = mvx_v4l2_session_set_color_desc(vsession, arg);
|
|
+ break;
|
|
+ }
|
|
+ case VIDIOC_S_MVX_SEI_USERDATA: {
|
|
+ ret = mvx_v4l2_session_set_sei_userdata(vsession, arg);
|
|
+ break;
|
|
+ }
|
|
+ case VIDIOC_S_MVX_RATE_CONTROL: {
|
|
+ ret = mvx_v4l2_session_set_rate_control(vsession, arg);
|
|
+ break;
|
|
+ }
|
|
+ case VIDIOC_S_MVX_DSL_FRAME: {
|
|
+ ret = mvx_v4l2_session_set_dsl_frame(vsession, arg);
|
|
+ break;
|
|
+ }
|
|
+ case VIDIOC_S_MVX_DSL_RATIO: {
|
|
+ ret = mvx_v4l2_session_set_dsl_ratio(vsession, arg);
|
|
+ break;
|
|
+ }
|
|
+ case VIDIOC_S_MVX_LONG_TERM_REF: {
|
|
+ ret = mvx_v4l2_session_set_long_term_ref(vsession, arg);
|
|
+ break;
|
|
+ }
|
|
+ case VIDIOC_S_MVX_DSL_MODE: {
|
|
+ ret = mvx_v4l2_session_set_dsl_mode(vsession, arg);
|
|
+ break;
|
|
+ }
|
|
+ default:
|
|
+ MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING,
|
|
+ "Unsupported IOCTL. cmd=0x%x", cmd);
|
|
+ return -ENOTTY;
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
diff --git a/drivers/media/platform/spacemit/vpu_k1x/if/v4l2/mvx_v4l2_vidioc.h b/drivers/media/platform/spacemit/vpu_k1x/if/v4l2/mvx_v4l2_vidioc.h
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/vpu_k1x/if/v4l2/mvx_v4l2_vidioc.h
|
|
@@ -0,0 +1,147 @@
|
|
+/*
|
|
+ * The confidential and proprietary information contained in this file may
|
|
+ * only be used by a person authorised under and to the extent permitted
|
|
+ * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd.
|
|
+ * ALL RIGHTS RESERVED
|
|
+ *
|
|
+ * This entire notice must be reproduced on all copies of this file
|
|
+ * and copies of this file may only be made by a person if such person is
|
|
+ * permitted to do so under the terms of a subsisting license agreement
|
|
+ * from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * SPDX-License-Identifier: GPL-2.0-only
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU General Public License
|
|
+ * as published by the Free Software Foundation; either version 2
|
|
+ * of the License, or (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
+ *
|
|
+ */
|
|
+
|
|
+#ifndef _MVX_V4L2_VIDIOC_H_
|
|
+#define _MVX_V4L2_VIDIOC_H_
|
|
+
|
|
+/****************************************************************************
|
|
+ * Exported functions
|
|
+ *
|
|
+ * Callbacks for struct v4l2_ioctl_ops.
|
|
+ *
|
|
+ * Prototypes declared bellow implement certain v4l2 ioctls and used to
|
|
+ * initialize members of v4l2_ioctl_ops structure.
|
|
+ ****************************************************************************/
|
|
+
|
|
+int mvx_v4l2_vidioc_querycap(struct file *file,
|
|
+ void *fh,
|
|
+ struct v4l2_capability *cap);
|
|
+
|
|
+int mvx_v4l2_vidioc_enum_fmt_vid_cap(struct file *file,
|
|
+ void *fh,
|
|
+ struct v4l2_fmtdesc *f);
|
|
+
|
|
+int mvx_v4l2_vidioc_enum_fmt_vid_out(struct file *file,
|
|
+ void *fh,
|
|
+ struct v4l2_fmtdesc *f);
|
|
+
|
|
+int mvx_v4l2_vidioc_enum_framesizes(struct file *file,
|
|
+ void *fh,
|
|
+ struct v4l2_frmsizeenum *fsize);
|
|
+
|
|
+int mvx_v4l2_vidioc_g_fmt_vid_cap(struct file *file,
|
|
+ void *fh,
|
|
+ struct v4l2_format *f);
|
|
+
|
|
+int mvx_v4l2_vidioc_g_fmt_vid_out(struct file *file,
|
|
+ void *fh,
|
|
+ struct v4l2_format *f);
|
|
+
|
|
+int mvx_v4l2_vidioc_s_fmt_vid_cap(struct file *file,
|
|
+ void *fh,
|
|
+ struct v4l2_format *f);
|
|
+
|
|
+int mvx_v4l2_vidioc_s_fmt_vid_out(struct file *file,
|
|
+ void *fh,
|
|
+ struct v4l2_format *f);
|
|
+
|
|
+int mvx_v4l2_vidioc_try_fmt_vid_cap(struct file *file,
|
|
+ void *fh,
|
|
+ struct v4l2_format *f);
|
|
+
|
|
+int mvx_v4l2_vidioc_try_fmt_vid_out(struct file *file,
|
|
+ void *fh,
|
|
+ struct v4l2_format *f);
|
|
+
|
|
+int mvx_v4l2_vidioc_g_crop(struct file *file,
|
|
+ void *fh,
|
|
+ struct v4l2_crop *a);
|
|
+int mvx_v4l2_vidioc_g_selection(struct file *file, void *fh,
|
|
+ struct v4l2_selection *s);
|
|
+int mvx_v4l2_vidioc_streamon(struct file *file,
|
|
+ void *priv,
|
|
+ enum v4l2_buf_type type);
|
|
+
|
|
+int mvx_v4l2_vidioc_streamoff(struct file *file,
|
|
+ void *priv,
|
|
+ enum v4l2_buf_type type);
|
|
+
|
|
+int mvx_v4l2_vidioc_encoder_cmd(struct file *file,
|
|
+ void *priv,
|
|
+ struct v4l2_encoder_cmd *cmd);
|
|
+
|
|
+int mvx_v4l2_vidioc_try_encoder_cmd(struct file *file,
|
|
+ void *priv,
|
|
+ struct v4l2_encoder_cmd *cmd);
|
|
+
|
|
+int mvx_v4l2_vidioc_decoder_cmd(struct file *file,
|
|
+ void *priv,
|
|
+ struct v4l2_decoder_cmd *cmd);
|
|
+
|
|
+int mvx_v4l2_vidioc_try_decoder_cmd(struct file *file,
|
|
+ void *priv,
|
|
+ struct v4l2_decoder_cmd *cmd);
|
|
+
|
|
+int mvx_v4l2_vidioc_reqbufs(struct file *file,
|
|
+ void *fh,
|
|
+ struct v4l2_requestbuffers *b);
|
|
+
|
|
+int mvx_v4l2_vidioc_create_bufs(struct file *file,
|
|
+ void *fh,
|
|
+ struct v4l2_create_buffers *b);
|
|
+
|
|
+int mvx_v4l2_vidioc_querybuf(struct file *file,
|
|
+ void *fh,
|
|
+ struct v4l2_buffer *b);
|
|
+
|
|
+int mvx_v4l2_vidioc_qbuf(struct file *file,
|
|
+ void *fh,
|
|
+ struct v4l2_buffer *b);
|
|
+
|
|
+int mvx_v4l2_vidioc_dqbuf(struct file *file,
|
|
+ void *fh,
|
|
+ struct v4l2_buffer *b);
|
|
+
|
|
+int mvx_v4l2_vidioc_subscribe_event(struct v4l2_fh *fh,
|
|
+ const struct v4l2_event_subscription *sub);
|
|
+
|
|
+long mvx_v4l2_vidioc_default(struct file *file,
|
|
+ void *fh,
|
|
+ bool valid_prio,
|
|
+ unsigned int cmd,
|
|
+ void *arg);
|
|
+int mvx_v4l2_vidioc_s_selection(struct file *file, void *fh,
|
|
+ struct v4l2_selection *s);
|
|
+int mvx_v4l2_vidioc_g_parm(struct file *file, void *fh,
|
|
+ struct v4l2_streamparm *a);
|
|
+int mvx_v4l2_vidioc_s_parm(struct file *file, void *fh,
|
|
+ struct v4l2_streamparm *a);
|
|
+#endif /* _MVX_V4L2_VIDIOC_H_ */
|
|
diff --git a/drivers/media/platform/spacemit/vpu_k1x/mvx_driver.c b/drivers/media/platform/spacemit/vpu_k1x/mvx_driver.c
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/vpu_k1x/mvx_driver.c
|
|
@@ -0,0 +1,70 @@
|
|
+/*
|
|
+ * The confidential and proprietary information contained in this file may
|
|
+ * only be used by a person authorised under and to the extent permitted
|
|
+ * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd.
|
|
+ * ALL RIGHTS RESERVED
|
|
+ *
|
|
+ * This entire notice must be reproduced on all copies of this file
|
|
+ * and copies of this file may only be made by a person if such person is
|
|
+ * permitted to do so under the terms of a subsisting license agreement
|
|
+ * from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * SPDX-License-Identifier: GPL-2.0-only
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU General Public License
|
|
+ * as published by the Free Software Foundation; either version 2
|
|
+ * of the License, or (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
+ *
|
|
+ */
|
|
+
|
|
+#include <linux/module.h>
|
|
+#include <linux/platform_device.h>
|
|
+#include <linux/printk.h>
|
|
+#include "mvx_if.h"
|
|
+#include "mvx_dev.h"
|
|
+#include "mvx_log_group.h"
|
|
+
|
|
+MODULE_LICENSE("GPL");
|
|
+MODULE_AUTHOR("ARMChina");
|
|
+MODULE_DESCRIPTION("Tiube VPU Driver.");
|
|
+
|
|
+static int __init mvx_init(void)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ ret = mvx_log_group_init("amvx");
|
|
+ if (ret != 0) {
|
|
+ pr_err("Failed to create MVx driver logging.\n");
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ ret = mvx_dev_init();
|
|
+ if (ret != 0) {
|
|
+ pr_err("Failed to register MVx dev driver.\n");
|
|
+ mvx_log_group_deinit();
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void __exit mvx_exit(void)
|
|
+{
|
|
+ mvx_dev_exit();
|
|
+ mvx_log_group_deinit();
|
|
+}
|
|
+
|
|
+module_init(mvx_init);
|
|
+module_exit(mvx_exit);
|
|
diff --git a/drivers/media/platform/spacemit/vpu_k1x/mvx_dvfs.c b/drivers/media/platform/spacemit/vpu_k1x/mvx_dvfs.c
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/vpu_k1x/mvx_dvfs.c
|
|
@@ -0,0 +1,1341 @@
|
|
+/*
|
|
+ * The confidential and proprietary information contained in this file may
|
|
+ * only be used by a person authorised under and to the extent permitted
|
|
+ * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd.
|
|
+ * ALL RIGHTS RESERVED
|
|
+ *
|
|
+ * This entire notice must be reproduced on all copies of this file
|
|
+ * and copies of this file may only be made by a person if such person is
|
|
+ * permitted to do so under the terms of a subsisting license agreement
|
|
+ * from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * SPDX-License-Identifier: GPL-2.0-only
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU General Public License
|
|
+ * as published by the Free Software Foundation; either version 2
|
|
+ * of the License, or (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
+ *
|
|
+ */
|
|
+
|
|
+/****************************************************************************
|
|
+ * Includes
|
|
+ ****************************************************************************/
|
|
+#include <linux/list.h>
|
|
+#include <linux/device.h>
|
|
+#include <linux/pm_qos.h>
|
|
+#include <linux/kthread.h>
|
|
+#include <linux/delay.h>
|
|
+#include <linux/stat.h>
|
|
+#include <linux/sysfs.h>
|
|
+#include <linux/thermal.h>
|
|
+
|
|
+#include "mvx_dvfs.h"
|
|
+#include "mvx_log_group.h"
|
|
+#include "mvx_session.h"
|
|
+
|
|
+#define DVFS_INTERNAL_DEBUG
|
|
+/** Default value for an interval between frequency updates in milliseconds.
|
|
+ * It could be overwritten by user in debug build when sysfs is enabled.
|
|
+ */
|
|
+#define POLL_INTERVAL_MS 100
|
|
+
|
|
+/* Adjustment step in percents of maximum supported frequency */
|
|
+#define UP_STEP_PERCENT 25
|
|
+#define DOWN_STEP_PERCENT 13
|
|
+#define DVFS_FREQ_MAX 819200000
|
|
+
|
|
+#if defined(CONFIG_SYSFS) && defined(DVFS_INTERNAL_DEBUG)
|
|
+#define DVFS_DEBUG_MODE 1
|
|
+#else
|
|
+#define DVFS_DEBUG_MODE 0
|
|
+#endif
|
|
+
|
|
+#define VPU_DDR_QOS_ENABLE 0
|
|
+#define VPU_DDR_QOS_MIN 50000 /* 50MB/s */
|
|
+#define VPU_DDR_QOS_MAX 4096000 /* 4GB/s */
|
|
+#define VPU_DDR_QOS_PREDEFINED_FPS 30
|
|
+
|
|
+#define NELEMS(a) (sizeof(a) / sizeof((a)[0]))
|
|
+
|
|
+extern int session_wait_pending_timeout;
|
|
+extern int session_watchdog_timeout;
|
|
+
|
|
+/**
|
|
+ * Structure used by DVFS module to keep track of session usage and to
|
|
+ * take decisions about power management.
|
|
+ *
|
|
+ * Currently the only parameter taken into consideration is an amount of
|
|
+ * output buffers enqueued in FW for each session. DVFS tries to keep this
|
|
+ * parameter equal to 1 for all sessions. If some session has more than one
|
|
+ * enqueued buffer, it means that a client is waiting for more than one
|
|
+ * frame and the clock frequency should be increased. If some session has
|
|
+ * no buffers enqueued, it means that the client is not waiting for
|
|
+ * anything and the clock frequency could be decreased. Priority is given
|
|
+ * to frequency increasing (when more than one session is registered).
|
|
+ */
|
|
+struct session
|
|
+{
|
|
+ mvx_session_id session_id;
|
|
+ struct list_head list;
|
|
+ bool is_encoder;
|
|
+ uint32_t ddr_qos_read;
|
|
+ uint32_t ddr_qos_write;
|
|
+ int restrict_buffer_count;
|
|
+};
|
|
+
|
|
+/* Avaible VPU frequency. */
|
|
+enum {
|
|
+ VPU_VMIN_LEVEL_0 = 0,
|
|
+ VPU_VMIN_LEVEL_1,
|
|
+ VPU_VMIN_LEVEL_2,
|
|
+ VPU_VMIN_LEVEL_3,
|
|
+ VPU_VMIN_LEVEL_4,
|
|
+ VPU_VMIN_LEVEL_5,
|
|
+ VPU_VMIN_LEVEL_6,
|
|
+};
|
|
+struct vpu_freq_vmin_info
|
|
+{
|
|
+ uint32_t freq;
|
|
+ uint32_t vmin_level;
|
|
+};
|
|
+
|
|
+/* for dvfs and ddr qos adjust. */
|
|
+struct mvx_dvfs_ctx_t
|
|
+{
|
|
+ /* ddr qos params */
|
|
+ uint32_t ddr_qos_rsum;
|
|
+ uint32_t ddr_qos_wsum;
|
|
+
|
|
+ /* Frequency limits */
|
|
+ struct clk* clock;
|
|
+ uint32_t max_freq;
|
|
+ uint32_t min_freq;
|
|
+ uint32_t up_step_freq;
|
|
+ uint32_t down_step_freq;
|
|
+ /**
|
|
+ * DVFS polling interval - an interval between frequency updates in milliseconds.
|
|
+ * It is a constant value for non-debug and non-sysfs builds.
|
|
+ */
|
|
+ uint32_t poll_interval_ms;
|
|
+
|
|
+#ifdef CONFIG_THERMAL
|
|
+ /* thermal restriction */
|
|
+ unsigned long max_state;
|
|
+ unsigned long cur_state;
|
|
+ struct thermal_cooling_device *cdev;
|
|
+#endif
|
|
+
|
|
+ bool sched_suspend;
|
|
+};
|
|
+
|
|
+/* A list containing all registered sessions */
|
|
+static LIST_HEAD(sessions);
|
|
+
|
|
+struct device *mvx_device;
|
|
+
|
|
+/* Flag used to prevent usage of DVFS module when it was not initialized */
|
|
+static bool initialized = false;
|
|
+
|
|
+/* Flag used to indicate that DVFS module is going to shut itself down */
|
|
+static bool shutdown = false;
|
|
+
|
|
+/* Semaphore used to prevent concurrent access to DVFS internal structures */
|
|
+static struct semaphore dvfs_sem;
|
|
+
|
|
+/* DVFS polling task */
|
|
+static struct task_struct *dvfs_task = NULL;
|
|
+static wait_queue_head_t dvfs_wq;
|
|
+
|
|
+#if (1 == DVFS_DEBUG_MODE)
|
|
+/**
|
|
+ * Counters used for debugging/verification purposes.
|
|
+ */
|
|
+
|
|
+/* Flag used to enable/disable DVFS in debug builds */
|
|
+static atomic_t dvfs_enabled = ATOMIC_INIT(1);
|
|
+
|
|
+#if (1 == VPU_DDR_QOS_ENABLE)
|
|
+static atomic_t ddr_qos_enabled = ATOMIC_INIT(1);
|
|
+#endif
|
|
+
|
|
+/* Amount of times clock frequency was changed by DVFS */
|
|
+static atomic_long_t changes_cnt = ATOMIC_LONG_INIT(0);
|
|
+
|
|
+/* Amount of times burst mode was used by DVFS */
|
|
+static atomic_long_t burst_cnt = ATOMIC_LONG_INIT(0);
|
|
+#endif
|
|
+
|
|
+static const struct vpu_freq_vmin_info vpufclk_freqtable[] =
|
|
+{
|
|
+ {307200000, VPU_VMIN_LEVEL_0},
|
|
+ {409600000, VPU_VMIN_LEVEL_1},
|
|
+ {491520000, VPU_VMIN_LEVEL_2},
|
|
+ {600000000, VPU_VMIN_LEVEL_3},
|
|
+ {614400000, VPU_VMIN_LEVEL_4},
|
|
+ {750000000, VPU_VMIN_LEVEL_5},
|
|
+ {819200000, VPU_VMIN_LEVEL_6}
|
|
+};
|
|
+
|
|
+#define FREQ_TABLE_SIZE (sizeof(vpufclk_freqtable)/sizeof(struct vpu_freq_vmin_info))
|
|
+
|
|
+static struct mvx_dvfs_ctx_t mvx_dvfs_ctx;
|
|
+
|
|
+static void set_clock_rate(uint32_t clk_rate)
|
|
+{
|
|
+ //clk_set_rate(mvx_dvfs_ctx.clock, clk_rate);
|
|
+}
|
|
+
|
|
+static uint32_t get_clock_rate(void)
|
|
+{
|
|
+ return 0;
|
|
+ //return clk_get_rate(mvx_dvfs_ctx.clock);
|
|
+}
|
|
+
|
|
+static uint32_t get_max_clock_rate(void)
|
|
+{
|
|
+ return DVFS_FREQ_MAX;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * Allocate and register a session in DVFS module.
|
|
+ *
|
|
+ * This function allocates needed resources for the session and registers
|
|
+ * it in the module.
|
|
+ *
|
|
+ * This function must be called when dvfs_sem semaphore IS locked.
|
|
+ *
|
|
+ * @param session_is Session id
|
|
+ * @return True when registration was successful,
|
|
+ * False otherwise.
|
|
+ */
|
|
+static bool allocate_session(const mvx_session_id session_id, bool is_encoder)
|
|
+{
|
|
+ struct session *session;
|
|
+
|
|
+ session = devm_kzalloc(mvx_device, sizeof(*session), GFP_KERNEL);
|
|
+ if (NULL == session)
|
|
+ {
|
|
+ MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_WARNING,
|
|
+ "DVFS is unable to allocate memory for a new session. session=%p",
|
|
+ session_id);
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ session->session_id = session_id;
|
|
+ session->ddr_qos_read = session->ddr_qos_write = 0;
|
|
+ session->is_encoder = is_encoder;
|
|
+
|
|
+ INIT_LIST_HEAD(&session->list);
|
|
+ list_add(&session->list, &sessions);
|
|
+
|
|
+ return true;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * Unregister a session from DVFS module.
|
|
+ *
|
|
+ * When session is not NULL, the function releases all previously allocated
|
|
+ * resources for the session and unregisters it from DVFS.
|
|
+ *
|
|
+ * This function must be called when dvfs_sem semaphore IS locked.
|
|
+ *
|
|
+ * @param session Session or NULL
|
|
+ */
|
|
+static void free_session(struct session *session)
|
|
+{
|
|
+ if (NULL == session)
|
|
+ {
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ list_del(&session->list);
|
|
+ devm_kfree(mvx_device, session);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * Find a session with provided session_id.
|
|
+ *
|
|
+ * This function tries to find previously registered session with provided
|
|
+ * session_id.
|
|
+ *
|
|
+ * This function must be called when dvfs_sem semaphore IS locked.
|
|
+ *
|
|
+ * @param session_id Session id
|
|
+ * @return pointer to session structure when a session was found,
|
|
+ * NULL when a session was not found.
|
|
+ */
|
|
+static struct session *get_session(const mvx_session_id session_id)
|
|
+{
|
|
+ struct list_head *entry;
|
|
+ struct session *session;
|
|
+ list_for_each(entry, &sessions)
|
|
+ {
|
|
+ session = list_entry(entry, struct session, list);
|
|
+ if (session->session_id == session_id)
|
|
+ {
|
|
+ return session;
|
|
+ }
|
|
+ }
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * Warm up VPU.
|
|
+ *
|
|
+ * This function increases VPU clock frequency for requested amount
|
|
+ * of steps when possible.
|
|
+ *
|
|
+ * @param steps Requested amount of steps.
|
|
+ */
|
|
+static void warm_up(const int steps)
|
|
+{
|
|
+ uint32_t old_freq = get_clock_rate();
|
|
+ uint32_t new_freq;
|
|
+#if (1 == DVFS_DEBUG_MODE)
|
|
+ bool do_burst = false;
|
|
+#endif
|
|
+
|
|
+ /**
|
|
+ * If 3 or more steps are requested, we are far behind required
|
|
+ * performance level.
|
|
+ */
|
|
+ if (steps > 2)
|
|
+ {
|
|
+ new_freq = mvx_dvfs_ctx.max_freq;
|
|
+#if (1 == DVFS_DEBUG_MODE)
|
|
+ do_burst = true;
|
|
+#endif
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ new_freq = min(old_freq + steps * mvx_dvfs_ctx.up_step_freq, mvx_dvfs_ctx.max_freq);
|
|
+ }
|
|
+
|
|
+ if (old_freq != new_freq)
|
|
+ {
|
|
+ MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_INFO, "warm_up. buffer count: %d, old_freq: %d, new_freq: %d", steps, old_freq, new_freq);
|
|
+ set_clock_rate(new_freq);
|
|
+#if (1 == DVFS_DEBUG_MODE)
|
|
+ atomic_long_inc(&changes_cnt);
|
|
+ if (do_burst)
|
|
+ {
|
|
+ atomic_long_inc(&burst_cnt);
|
|
+ }
|
|
+#endif
|
|
+
|
|
+ }
|
|
+}
|
|
+
|
|
+/**
|
|
+ * Cool down VPU.
|
|
+ *
|
|
+ * This function increases VPU clock frequency if possible.
|
|
+ */
|
|
+static void cool_down(void)
|
|
+{
|
|
+ uint32_t old_freq = get_clock_rate();
|
|
+ uint32_t new_freq;
|
|
+ if (old_freq == mvx_dvfs_ctx.min_freq)
|
|
+ {
|
|
+ return;
|
|
+ }
|
|
+ new_freq = max(mvx_dvfs_ctx.min_freq, max(mvx_dvfs_ctx.down_step_freq, old_freq - mvx_dvfs_ctx.down_step_freq));
|
|
+ if (old_freq != new_freq)
|
|
+ {
|
|
+ MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_INFO, "cool_down. old_freq: %d, new_freq: %d", old_freq, new_freq);
|
|
+ set_clock_rate(new_freq);
|
|
+#if (1 == DVFS_DEBUG_MODE)
|
|
+ atomic_long_inc(&changes_cnt);
|
|
+#endif
|
|
+ }
|
|
+}
|
|
+
|
|
+static int get_restrict_buffer_count(mvx_session_id session_id)
|
|
+{
|
|
+ struct mvx_session *session = (struct mvx_session *)session_id;
|
|
+ int buffers_cnt;
|
|
+ struct session* dvfs_session = get_session(session_id);
|
|
+ if (dvfs_session == NULL)
|
|
+ {
|
|
+ return -1;
|
|
+ }
|
|
+
|
|
+ /* Don't have to lock the session since we just want to get the number of buffers */
|
|
+ if (!dvfs_session->is_encoder)
|
|
+ {
|
|
+ buffers_cnt = session->port[MVX_DIR_OUTPUT].buffer_count;
|
|
+ buffers_cnt -= session->port[MVX_DIR_OUTPUT].buffer_on_hold_count;
|
|
+
|
|
+ /* There is no enough inputs, no need to boost vpu. */
|
|
+ if (session->port[MVX_DIR_INPUT].buffer_count <= 1)
|
|
+ {
|
|
+ buffers_cnt = session->port[MVX_DIR_INPUT].buffer_count;
|
|
+ }
|
|
+ //MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_INFO, "get_restrict_buffer_count. out buffer_count: %d, buffer_on_hold_count: %d, in buffer_count: %d", session->port[MVX_DIR_OUTPUT].buffer_count, session->port[MVX_DIR_OUTPUT].buffer_on_hold_count, session->port[MVX_DIR_INPUT].buffer_count);
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ buffers_cnt = session->port[MVX_DIR_INPUT].buffer_count;
|
|
+ if (false != session->eos_queued && buffers_cnt < 2)
|
|
+ {
|
|
+ buffers_cnt = 2;
|
|
+ }
|
|
+ //MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_INFO, "get_restrict_buffer_count. buffer_count: %d, eos_queued: %d", session->port[MVX_DIR_INPUT].buffer_count, session->eos_queued);
|
|
+ }
|
|
+
|
|
+ if (session->keep_freq_high) {
|
|
+ buffers_cnt += 2;
|
|
+ }
|
|
+
|
|
+ return buffers_cnt;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * Update sessions list and VPU clock frequency.
|
|
+ *
|
|
+ * This function queries the state of all registered sessions and adjusts
|
|
+ * VPU clock frequency to meet their needs when dvfs_control is enabled.
|
|
+ * When SYSFS is enabled, the function also stores the status of all sessions
|
|
+ * so it could be retrieved by the user.
|
|
+ *
|
|
+ * This function must be called when dvfs_sem semaphore IS NOT locked.
|
|
+ */
|
|
+static void update_sessions(void)
|
|
+{
|
|
+ struct list_head *entry;
|
|
+ struct list_head *safe;
|
|
+ struct session *session;
|
|
+ int restrict_buffer_count;
|
|
+ unsigned int buf_max = 0;
|
|
+ unsigned int buf_min = UINT_MAX;
|
|
+ int sem_failed;
|
|
+
|
|
+ sem_failed = down_interruptible(&dvfs_sem);
|
|
+ if (sem_failed)
|
|
+ {
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ if (mvx_dvfs_ctx.sched_suspend == true) {
|
|
+ up(&dvfs_sem);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ list_for_each_safe(entry, safe, &sessions)
|
|
+ {
|
|
+ session = list_entry(entry, struct session, list);
|
|
+
|
|
+ /**
|
|
+ * To avoid potential dead lock we release dvfs_sem before a call to
|
|
+ * get_session_status() callback. After a return from the callback
|
|
+ * we have to take dvfs_sem again and to verify that current session
|
|
+ * was not unregistered by the scheduler while we were sleeping.
|
|
+ */
|
|
+ restrict_buffer_count = get_restrict_buffer_count(session->session_id);
|
|
+ session->restrict_buffer_count = restrict_buffer_count;
|
|
+
|
|
+ if (shutdown)
|
|
+ {
|
|
+ up(&dvfs_sem);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ if (restrict_buffer_count < 0)
|
|
+ {
|
|
+ //MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_WARNING,
|
|
+ // "DVFS failed to retrieve status for the session. Session was removed? session=%p, restrict_buffer_count=%d",
|
|
+ // session->session_id, restrict_buffer_count);
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ if (restrict_buffer_count > buf_max)
|
|
+ {
|
|
+ buf_max = restrict_buffer_count;
|
|
+ }
|
|
+ if (restrict_buffer_count < buf_min)
|
|
+ {
|
|
+ buf_min = restrict_buffer_count;
|
|
+ }
|
|
+ }
|
|
+
|
|
+#if (1 == DVFS_DEBUG_MODE)
|
|
+ if (0 == atomic_read(&dvfs_enabled))
|
|
+ {
|
|
+ up(&dvfs_sem);
|
|
+ return;
|
|
+ }
|
|
+#endif
|
|
+
|
|
+ if (buf_max > 1)
|
|
+ {
|
|
+ warm_up(buf_max);
|
|
+ }
|
|
+ else if (buf_min < 1)
|
|
+ {
|
|
+ cool_down();
|
|
+ }
|
|
+ up(&dvfs_sem);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * DVFS polling thread.
|
|
+ *
|
|
+ * This function is executed in a separate kernel thread. It updates clock
|
|
+ * frequency every poll_interval_ms milliseconds.
|
|
+ */
|
|
+static int dvfs_thread(void *v)
|
|
+{
|
|
+ MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_INFO, "DVFS polling thread started");
|
|
+ while (!kthread_should_stop())
|
|
+ {
|
|
+ wait_event_interruptible(dvfs_wq, list_empty(&sessions) == 0 || shutdown);
|
|
+ update_sessions();
|
|
+ msleep_interruptible(mvx_dvfs_ctx.poll_interval_ms);
|
|
+ }
|
|
+
|
|
+ MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_INFO, "DVFS polling thread finished");
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * Return percent percents from a value val.
|
|
+ */
|
|
+static uint32_t ratio(const uint32_t val, const uint32_t percent)
|
|
+{
|
|
+ return (uint32_t)(((uint64_t)val * percent) / 100);
|
|
+}
|
|
+
|
|
+#if (1 == DVFS_DEBUG_MODE)
|
|
+/**
|
|
+ * Print DVFS statistics to sysfs attribute.
|
|
+ *
|
|
+ * Used for debugging/verification purposes.
|
|
+ */
|
|
+static ssize_t sysfs_print_stats(struct device *dev,
|
|
+ struct device_attribute *attr,
|
|
+ char *buf)
|
|
+{
|
|
+ ssize_t num = 0;
|
|
+ struct list_head *entry;
|
|
+ struct session *session;
|
|
+ uint32_t freq = get_clock_rate();
|
|
+
|
|
+ num += scnprintf(buf + num, PAGE_SIZE - num,
|
|
+ "freq: %4u, max_freq: %4u, up_step_freq: %3u, down_step_freq: %3u",
|
|
+ freq, mvx_dvfs_ctx.max_freq, mvx_dvfs_ctx.up_step_freq, mvx_dvfs_ctx.down_step_freq);
|
|
+#if (1 == DVFS_DEBUG_MODE)
|
|
+ num += scnprintf(buf + num, PAGE_SIZE - num,
|
|
+ ", enabled: %1u, poll_interval_ms: %3u, changes_cnt: %10lu, burst_cnt: %10lu",
|
|
+ atomic_read(&dvfs_enabled), mvx_dvfs_ctx.poll_interval_ms,
|
|
+ atomic_long_read(&changes_cnt), atomic_long_read(&burst_cnt));
|
|
+#endif
|
|
+ num += scnprintf(buf + num, PAGE_SIZE - num, "\n");
|
|
+ list_for_each(entry, &sessions)
|
|
+ {
|
|
+ session = list_entry(entry, struct session, list);
|
|
+ num += scnprintf(buf + num, PAGE_SIZE - num,
|
|
+ "%p: out_buf: %02u\n",
|
|
+ session->session_id, session->restrict_buffer_count);
|
|
+ }
|
|
+
|
|
+ return num;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * Print DVFS enabling status to sysfs attribute.
|
|
+ *
|
|
+ * Used for debugging/verification purposes.
|
|
+ */
|
|
+static ssize_t sysfs_print_enabled(struct device *dev,
|
|
+ struct device_attribute *attr,
|
|
+ char *buf)
|
|
+{
|
|
+ ssize_t num = 0;
|
|
+ num += scnprintf(buf, PAGE_SIZE, "%u\n", atomic_read(&dvfs_enabled) ? 1 : 0);
|
|
+ return num;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * Set DVFS enabling status from sysfs attribute.
|
|
+ *
|
|
+ * Used for debugging/verification purposes.
|
|
+ */
|
|
+ssize_t sysfs_set_enabled(struct device *dev, struct device_attribute *attr,
|
|
+ const char *buf, size_t count)
|
|
+{
|
|
+ int failed;
|
|
+ int enabled;
|
|
+ failed = kstrtouint(buf, 10, &enabled);
|
|
+ if (!failed)
|
|
+ {
|
|
+ atomic_set(&dvfs_enabled, enabled);
|
|
+ }
|
|
+ return (failed) ? failed : count;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * Print current clock frequency to sysfs attribute.
|
|
+ *
|
|
+ * Used for debugging/verification purposes.
|
|
+ */
|
|
+static ssize_t sysfs_print_freq(struct device *dev,
|
|
+ struct device_attribute *attr,
|
|
+ char *buf)
|
|
+{
|
|
+ ssize_t num = 0;
|
|
+ uint32_t freq = get_clock_rate();
|
|
+ num += scnprintf(buf, PAGE_SIZE, "%u\n", freq);
|
|
+ return num;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * Set current clock frequency from sysfs attribute.
|
|
+ *
|
|
+ * Used for debugging/verification purposes.
|
|
+ */
|
|
+ssize_t sysfs_set_freq(struct device *dev, struct device_attribute *attr,
|
|
+ const char *buf, size_t count)
|
|
+{
|
|
+ int failed;
|
|
+ unsigned int freq;
|
|
+ failed = kstrtouint(buf, 10, &freq);
|
|
+ if (!failed)
|
|
+ {
|
|
+ set_clock_rate((uint32_t)freq);
|
|
+ }
|
|
+ return (failed) ? failed : count;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * Print min clock frequency to sysfs attribute.
|
|
+ *
|
|
+ * Used for debugging/verification purposes.
|
|
+ */
|
|
+static ssize_t sysfs_print_min_freq(struct device *dev,
|
|
+ struct device_attribute *attr,
|
|
+ char *buf)
|
|
+{
|
|
+ ssize_t num = 0;
|
|
+ uint32_t freq = mvx_dvfs_ctx.min_freq;
|
|
+ num += scnprintf(buf, PAGE_SIZE, "%u\n", freq);
|
|
+ return num;
|
|
+}
|
|
+
|
|
+uint32_t clip_min_max_rate(uint32_t freq, bool is_min_freq)
|
|
+{
|
|
+ int i = 0;
|
|
+ bool clip = false;
|
|
+ uint32_t clip_freq;
|
|
+ if (is_min_freq) {
|
|
+ for (i = 0; i < FREQ_TABLE_SIZE; i++)
|
|
+ {
|
|
+ if (freq <= vpufclk_freqtable[i].freq)
|
|
+ {
|
|
+ clip = true;
|
|
+ clip_freq = vpufclk_freqtable[i].freq;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ if (!clip) clip_freq = vpufclk_freqtable[FREQ_TABLE_SIZE-1].freq;
|
|
+ } else {
|
|
+ for (i = FREQ_TABLE_SIZE-1; i >= 0; i--)
|
|
+ {
|
|
+ if (freq >= vpufclk_freqtable[i].freq)
|
|
+ {
|
|
+ clip = true;
|
|
+ clip_freq = vpufclk_freqtable[i].freq;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ if (!clip) clip_freq = vpufclk_freqtable[0].freq;
|
|
+ }
|
|
+
|
|
+ return clip_freq;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * Set min clock frequency from sysfs attribute.
|
|
+ *
|
|
+ * Used for debugging/verification purposes.
|
|
+ */
|
|
+ssize_t sysfs_set_min_freq(struct device *dev, struct device_attribute *attr,
|
|
+ const char *buf, size_t count)
|
|
+{
|
|
+ int failed;
|
|
+ unsigned int freq;
|
|
+ failed = kstrtouint(buf, 10, &freq);
|
|
+ freq = clip_min_max_rate(freq, true);
|
|
+ if (!failed)
|
|
+ {
|
|
+ mvx_dvfs_ctx.min_freq = (uint32_t)freq;
|
|
+ }
|
|
+ return (failed) ? failed : count;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * Print max clock frequency to sysfs attribute.
|
|
+ *
|
|
+ * Used for debugging/verification purposes.
|
|
+ */
|
|
+static ssize_t sysfs_print_max_frep(struct device *dev,
|
|
+ struct device_attribute *attr,
|
|
+ char *buf)
|
|
+{
|
|
+ ssize_t num = 0;
|
|
+ uint32_t freq = mvx_dvfs_ctx.max_freq;
|
|
+ num += scnprintf(buf, PAGE_SIZE, "%u\n", freq);
|
|
+ return num;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * Set max clock frequency from sysfs attribute.
|
|
+ *
|
|
+ * Used for debugging/verification purposes.
|
|
+ */
|
|
+ssize_t sysfs_set_max_freq(struct device *dev, struct device_attribute *attr,
|
|
+ const char *buf, size_t count)
|
|
+{
|
|
+ int failed;
|
|
+ unsigned int freq;
|
|
+ failed = kstrtouint(buf, 10, &freq);
|
|
+ freq = clip_min_max_rate(freq, false);
|
|
+ if (!failed)
|
|
+ {
|
|
+ mvx_dvfs_ctx.max_freq = (uint32_t)freq;
|
|
+ mvx_dvfs_ctx.up_step_freq = ratio(mvx_dvfs_ctx.max_freq, UP_STEP_PERCENT);
|
|
+ mvx_dvfs_ctx.down_step_freq = ratio(mvx_dvfs_ctx.max_freq, DOWN_STEP_PERCENT);
|
|
+ }
|
|
+ return (failed) ? failed : count;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * Set polling interval from sysfs attribute.
|
|
+ *
|
|
+ * Used for debugging/verification purposes.
|
|
+ */
|
|
+ssize_t sysfs_set_poll_interval_ms(struct device *dev,
|
|
+ struct device_attribute *attr,
|
|
+ const char *buf, size_t count)
|
|
+{
|
|
+ int failed;
|
|
+ failed = kstrtouint(buf, 10, &mvx_dvfs_ctx.poll_interval_ms);
|
|
+ return (failed) ? failed : count;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * Set up_step value from sysfs attribute.
|
|
+ *
|
|
+ * Used for debugging/verification purposes.
|
|
+ */
|
|
+ssize_t sysfs_set_up_step_percent(struct device *dev, struct device_attribute *attr,
|
|
+ const char *buf, size_t count)
|
|
+{
|
|
+ int failed;
|
|
+ unsigned int up_step_percent;
|
|
+ failed = kstrtouint(buf, 10, &up_step_percent);
|
|
+ if (!failed)
|
|
+ {
|
|
+ mvx_dvfs_ctx.up_step_freq = ratio(mvx_dvfs_ctx.max_freq, up_step_percent);
|
|
+ }
|
|
+ return (failed) ? failed : count;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * Set down_step value from sysfs attribute.
|
|
+ *
|
|
+ * Used for debugging/verification purposes.
|
|
+ */
|
|
+ssize_t sysfs_set_down_step_percent(struct device *dev, struct device_attribute *attr,
|
|
+ const char *buf, size_t count)
|
|
+{
|
|
+ int failed;
|
|
+ unsigned int down_step_percent;
|
|
+ failed = kstrtouint(buf, 10, &down_step_percent);
|
|
+ if (!failed)
|
|
+ {
|
|
+ mvx_dvfs_ctx.down_step_freq = ratio(mvx_dvfs_ctx.max_freq, down_step_percent);
|
|
+ }
|
|
+ return (failed) ? failed : count;
|
|
+}
|
|
+/**
|
|
+ * Print available clock frequency to sysfs attribute.
|
|
+ *
|
|
+ * Used for debugging/verification purposes.
|
|
+ */
|
|
+static ssize_t sysfs_print_available_freq(struct device *dev,
|
|
+ struct device_attribute *attr,
|
|
+ char *buf)
|
|
+{
|
|
+ ssize_t num = 0;
|
|
+ int32_t i;
|
|
+ for(i=0; i<FREQ_TABLE_SIZE; i++)
|
|
+ {
|
|
+ num += scnprintf(buf + num, PAGE_SIZE - num, "%u ", vpufclk_freqtable[i].freq);
|
|
+ }
|
|
+ num += scnprintf(buf + num, PAGE_SIZE - num, "\n");
|
|
+ return num;
|
|
+}
|
|
+
|
|
+#if (1 == VPU_DDR_QOS_ENABLE)
|
|
+static ssize_t sysfs_print_ddr_qos_enable(struct device *dev,
|
|
+ struct device_attribute *attr,
|
|
+ char *buf)
|
|
+{
|
|
+ ssize_t num = 0;
|
|
+ num += snprintf(buf, PAGE_SIZE, "%u\n", atomic_read(&ddr_qos_enabled) ? 1 : 0);
|
|
+ return num;
|
|
+}
|
|
+
|
|
+static ssize_t sysfs_set_ddr_qos_enable(struct device *dev,
|
|
+ struct device_attribute *attr,
|
|
+ const char *buf,
|
|
+ size_t count)
|
|
+{
|
|
+ int failed;
|
|
+ int enable;
|
|
+ failed = kstrtouint(buf, 10, &enable);
|
|
+ if (!failed)
|
|
+ {
|
|
+ atomic_set(&ddr_qos_enabled, enable);
|
|
+ }
|
|
+ return (failed) ? failed : count;
|
|
+}
|
|
+#endif
|
|
+
|
|
+/**
|
|
+ * Print watchdog timeout value to sysfs attribute.
|
|
+ *
|
|
+ * Used for debugging/verification purposes.
|
|
+ */
|
|
+static ssize_t sysfs_print_watchdog_timeout(struct device *dev,
|
|
+ struct device_attribute *attr,
|
|
+ char *buf)
|
|
+{
|
|
+ ssize_t num = 0;
|
|
+ uint32_t watchdog_timeout = session_watchdog_timeout;
|
|
+ num += scnprintf(buf, PAGE_SIZE, "%u\n", watchdog_timeout);
|
|
+ return num;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * Set watchdog timeout value from sysfs attribute.
|
|
+ *
|
|
+ * Used for debugging/verification purposes.
|
|
+ */
|
|
+ssize_t sysfs_set_watchdog_timeout(struct device *dev, struct device_attribute *attr,
|
|
+ const char *buf, size_t count)
|
|
+{
|
|
+ int failed;
|
|
+ unsigned int watchdog_timeout;
|
|
+ failed = kstrtouint(buf, 10, &watchdog_timeout);
|
|
+ if (!failed)
|
|
+ {
|
|
+ session_watchdog_timeout = watchdog_timeout;
|
|
+ }
|
|
+ return (failed) ? failed : count;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * Print wait pending timeout value to sysfs attribute.
|
|
+ *
|
|
+ * Used for debugging/verification purposes.
|
|
+ */
|
|
+static ssize_t sysfs_print_wait_pending_timeout(struct device *dev,
|
|
+ struct device_attribute *attr,
|
|
+ char *buf)
|
|
+{
|
|
+ ssize_t num = 0;
|
|
+ uint32_t wait_pending_timeout = session_wait_pending_timeout;
|
|
+ num += scnprintf(buf, PAGE_SIZE, "%u\n", wait_pending_timeout);
|
|
+ return num;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * Set wait pending timeout value from sysfs attribute.
|
|
+ *
|
|
+ * Used for debugging/verification purposes.
|
|
+ */
|
|
+ssize_t sysfs_set_wait_pending_timeout(struct device *dev, struct device_attribute *attr,
|
|
+ const char *buf, size_t count)
|
|
+{
|
|
+ int failed;
|
|
+ unsigned int wait_pending_timeout;
|
|
+ failed = kstrtouint(buf, 10, &wait_pending_timeout);
|
|
+ if (!failed)
|
|
+ {
|
|
+ session_wait_pending_timeout = wait_pending_timeout;
|
|
+ }
|
|
+ return (failed) ? failed : count;
|
|
+}
|
|
+
|
|
+/* Sysfs attributes used to debug/verify DVFS module */
|
|
+static struct device_attribute sysfs_files[] =
|
|
+{
|
|
+ __ATTR(dvfs_stats, S_IRUGO, sysfs_print_stats, NULL),
|
|
+ __ATTR(dvfs_enable, (S_IRUGO | S_IWUSR), sysfs_print_enabled, sysfs_set_enabled),
|
|
+ __ATTR(dvfs_freq, (S_IRUGO | S_IWUSR), sysfs_print_freq, sysfs_set_freq),
|
|
+ __ATTR(dvfs_poll_interval_ms, S_IWUSR, NULL, sysfs_set_poll_interval_ms),
|
|
+ __ATTR(dvfs_up_step_percent, S_IWUSR, NULL, sysfs_set_up_step_percent),
|
|
+ __ATTR(dvfs_down_step_percent, S_IWUSR, NULL, sysfs_set_down_step_percent),
|
|
+ __ATTR(dvfs_available_freqency, S_IRUGO, sysfs_print_available_freq, NULL),
|
|
+ __ATTR(dvfs_min_freq, (S_IRUGO | S_IWUSR), sysfs_print_min_freq, sysfs_set_min_freq),
|
|
+ __ATTR(dvfs_max_freq, (S_IRUGO | S_IWUSR), sysfs_print_max_frep, sysfs_set_max_freq),
|
|
+#if (1 == VPU_DDR_QOS_ENABLE)
|
|
+ __ATTR(ddr_qos_enable, (S_IRUGO | S_IWUSR), sysfs_print_ddr_qos_enable, sysfs_set_ddr_qos_enable),
|
|
+#endif
|
|
+ __ATTR(watchdog_timeout, (S_IRUGO | S_IWUSR), sysfs_print_watchdog_timeout, sysfs_set_watchdog_timeout),
|
|
+ __ATTR(wait_pending_timeout, (S_IRUGO | S_IWUSR), sysfs_print_wait_pending_timeout, sysfs_set_wait_pending_timeout),
|
|
+};
|
|
+
|
|
+/**
|
|
+ * Register all DVFS attributes in sysfs subsystem
|
|
+ */
|
|
+static void sysfs_register_devices(struct device *dev)
|
|
+{
|
|
+ int err;
|
|
+ int i = NELEMS(sysfs_files);
|
|
+
|
|
+ while (i--)
|
|
+ {
|
|
+ err = device_create_file(dev, &sysfs_files[i]);
|
|
+ if (err < 0)
|
|
+ {
|
|
+ MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_ERROR,
|
|
+ "DVFS is unable to create sysfs file. name=%s",
|
|
+ sysfs_files[i].attr.name);
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
+/**
|
|
+ * Remove DVFS attributes from sysfs subsystem
|
|
+ */
|
|
+static void sysfs_unregister_devices(struct device *dev)
|
|
+{
|
|
+ int i = NELEMS(sysfs_files);
|
|
+
|
|
+ while (i--)
|
|
+ {
|
|
+ device_remove_file(dev, &sysfs_files[i]);
|
|
+ }
|
|
+}
|
|
+#endif /* DVFS_DEBUG_MODE */
|
|
+
|
|
+#ifdef CONFIG_THERMAL
|
|
+static int vpu_get_max_state(struct thermal_cooling_device *cdev,
|
|
+ unsigned long *state)
|
|
+{
|
|
+ struct mvx_dvfs_ctx_t *ctx = cdev->devdata;
|
|
+
|
|
+ *state = ctx->max_state;
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int vpu_get_cur_state(struct thermal_cooling_device *cdev,
|
|
+ unsigned long *state)
|
|
+{
|
|
+ struct mvx_dvfs_ctx_t *ctx = cdev->devdata;
|
|
+
|
|
+ *state = ctx->cur_state;
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int vpu_set_cur_state(struct thermal_cooling_device *cdev,
|
|
+ unsigned long state)
|
|
+{
|
|
+ struct mvx_dvfs_ctx_t *ctx = cdev->devdata;
|
|
+
|
|
+ if (state > ctx->max_state)
|
|
+ return -EINVAL;
|
|
+
|
|
+ if (ctx->cur_state == state)
|
|
+ return 0;
|
|
+
|
|
+ ctx->max_freq = vpufclk_freqtable[FREQ_TABLE_SIZE - state - 1].freq;
|
|
+ ctx->cur_state = state;
|
|
+ ctx->up_step_freq = ratio(mvx_dvfs_ctx.max_freq, UP_STEP_PERCENT);
|
|
+ ctx->down_step_freq = ratio(mvx_dvfs_ctx.max_freq, DOWN_STEP_PERCENT);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+__maybe_unused static struct thermal_cooling_device_ops vpu_cooling_ops = {
|
|
+ .get_max_state = vpu_get_max_state,
|
|
+ .get_cur_state = vpu_get_cur_state,
|
|
+ .set_cur_state = vpu_set_cur_state,
|
|
+};
|
|
+#endif
|
|
+
|
|
+/**
|
|
+ * Initialize the DVFS module.
|
|
+ *
|
|
+ * Must be called before any other function in this module.
|
|
+ *
|
|
+ * @param dev Device
|
|
+ */
|
|
+void mvx_dvfs_init(struct device *dev)
|
|
+{
|
|
+ if (!initialized)
|
|
+ {
|
|
+ int i;
|
|
+ int min_vmin_level;
|
|
+ sema_init(&dvfs_sem, 1);
|
|
+
|
|
+ mvx_dvfs_ctx.max_freq = get_max_clock_rate();
|
|
+ mvx_dvfs_ctx.up_step_freq = ratio(mvx_dvfs_ctx.max_freq, UP_STEP_PERCENT);
|
|
+ mvx_dvfs_ctx.down_step_freq = ratio(mvx_dvfs_ctx.max_freq, DOWN_STEP_PERCENT);
|
|
+ mvx_dvfs_ctx.min_freq = vpufclk_freqtable[0].freq;
|
|
+ min_vmin_level = vpufclk_freqtable[0].vmin_level;
|
|
+ mvx_dvfs_ctx.sched_suspend = false;
|
|
+
|
|
+ /*Use the max clk freq with min vmin as bottom freq of dvfs */
|
|
+ for (i=1; i<FREQ_TABLE_SIZE; i++)
|
|
+ {
|
|
+ if (min_vmin_level < vpufclk_freqtable[i].vmin_level)
|
|
+ {
|
|
+ break;
|
|
+ }
|
|
+ mvx_dvfs_ctx.min_freq = vpufclk_freqtable[i].freq;
|
|
+ }
|
|
+
|
|
+ //mvx_dvfs_ctx.clock = devm_clk_get(dev, NULL);
|
|
+ mvx_dvfs_ctx.poll_interval_ms = POLL_INTERVAL_MS;
|
|
+
|
|
+ init_waitqueue_head(&dvfs_wq);
|
|
+ dvfs_task = kthread_run(dvfs_thread, NULL, "dvfs");
|
|
+
|
|
+#if (1 == DVFS_DEBUG_MODE)
|
|
+ if (NULL != dev && IS_ENABLED(CONFIG_DEBUG_FS))
|
|
+ {
|
|
+ sysfs_register_devices(dev);
|
|
+ }
|
|
+#endif
|
|
+
|
|
+ initialized = true;
|
|
+ shutdown = false;
|
|
+ mvx_device = dev;
|
|
+
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_WARNING, "Attempt to initialize DVFS twice");
|
|
+ }
|
|
+}
|
|
+
|
|
+/**
|
|
+ * Deinitialize the DVFS module.
|
|
+ *
|
|
+ * All remaining sessions will be unregistered.
|
|
+ *
|
|
+ * @param dev Device
|
|
+ */
|
|
+void mvx_dvfs_deinit(struct device *dev)
|
|
+{
|
|
+ int sem_failed;
|
|
+ struct list_head *entry;
|
|
+ struct list_head *safe;
|
|
+ struct session *session;
|
|
+
|
|
+ if (!initialized)
|
|
+ {
|
|
+ MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_WARNING,
|
|
+ "Attempt to deinitialize DVFS when it was not initialized");
|
|
+ return;
|
|
+ }
|
|
+
|
|
+#ifdef CONFIG_THERMAL
|
|
+ if (mvx_dvfs_ctx.cdev) {
|
|
+ thermal_cooling_device_unregister(mvx_dvfs_ctx.cdev);
|
|
+ mvx_dvfs_ctx.cdev = NULL;
|
|
+ }
|
|
+#endif
|
|
+
|
|
+ sem_failed = down_interruptible(&dvfs_sem);
|
|
+ shutdown = true;
|
|
+ if (!sem_failed)
|
|
+ {
|
|
+ up(&dvfs_sem);
|
|
+ }
|
|
+
|
|
+ wake_up_interruptible(&dvfs_wq);
|
|
+ if (!IS_ERR_OR_NULL(dvfs_task))
|
|
+ {
|
|
+ kthread_stop(dvfs_task);
|
|
+ }
|
|
+
|
|
+ sem_failed = down_interruptible(&dvfs_sem);
|
|
+ list_for_each_safe(entry, safe, &sessions)
|
|
+ {
|
|
+ session = list_entry(entry, struct session, list);
|
|
+ free_session(session);
|
|
+ }
|
|
+
|
|
+#if (1 == DVFS_DEBUG_MODE)
|
|
+ if (NULL != dev && IS_ENABLED(CONFIG_DEBUG_FS))
|
|
+ {
|
|
+ sysfs_unregister_devices(dev);
|
|
+ }
|
|
+#endif
|
|
+
|
|
+ //devm_clk_put(mvx_device, mvx_dvfs_ctx.clock);
|
|
+ initialized = false;
|
|
+ mvx_device = NULL;
|
|
+ if (!sem_failed)
|
|
+ {
|
|
+ up(&dvfs_sem);
|
|
+ }
|
|
+}
|
|
+
|
|
+/**
|
|
+ * Register session in the DFVS module.
|
|
+ *
|
|
+ * @param session_id Session id
|
|
+ * @return True when registration was successful,
|
|
+ * False, otherwise
|
|
+ */
|
|
+bool mvx_dvfs_register_session(const mvx_session_id session_id, bool is_encoder)
|
|
+{
|
|
+ bool success = false;
|
|
+ int sem_failed;
|
|
+
|
|
+ if (!initialized)
|
|
+ {
|
|
+ MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_WARNING,
|
|
+ "DVFS module was not initialized");
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ sem_failed = down_interruptible(&dvfs_sem);
|
|
+ if (sem_failed)
|
|
+ {
|
|
+ MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_WARNING,
|
|
+ "DVFS semaphore was not obtained, sem_failed=%d", sem_failed);
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ if (shutdown)
|
|
+ {
|
|
+ up(&dvfs_sem);
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ if (get_session(session_id) != NULL)
|
|
+ {
|
|
+ MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_INFO,
|
|
+ "this session is already registered. session=%p",
|
|
+ session_id);
|
|
+ up(&dvfs_sem);
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_INFO,
|
|
+ "mvx_dvfs_register_session. session=%p", session_id);
|
|
+
|
|
+ success = allocate_session(session_id, is_encoder);
|
|
+ up(&dvfs_sem);
|
|
+
|
|
+ if (success)
|
|
+ {
|
|
+ bool adjust = true;
|
|
+#if (1 == DVFS_DEBUG_MODE)
|
|
+ /* Has DVFS been disabled through the sysfs interface? */
|
|
+ adjust = atomic_read(&dvfs_enabled);
|
|
+#endif
|
|
+ if (adjust) {
|
|
+ set_clock_rate(mvx_dvfs_ctx.max_freq);
|
|
+ }
|
|
+ }
|
|
+ wake_up_interruptible(&dvfs_wq);
|
|
+
|
|
+ return success;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * Unregister session from the DFVS module.
|
|
+ *
|
|
+ * Usage of corresponding session is not permitted after this call.
|
|
+ * @param session_id Session id
|
|
+ */
|
|
+void mvx_dvfs_unregister_session(const mvx_session_id session_id)
|
|
+{
|
|
+ struct session *session;
|
|
+ int sem_failed;
|
|
+
|
|
+ if (!initialized)
|
|
+ {
|
|
+ MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_WARNING,
|
|
+ "DVFS module was not initialized");
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ sem_failed = down_interruptible(&dvfs_sem);
|
|
+ if (sem_failed)
|
|
+ {
|
|
+ MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_WARNING,
|
|
+ "DVFS semaphore was not obtained, %d",
|
|
+ sem_failed);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ session = get_session(session_id);
|
|
+ if (NULL != session)
|
|
+ {
|
|
+#if (1 == VPU_DDR_QOS_ENABLE)
|
|
+ if ((session->ddr_qos_read + session->ddr_qos_write) &&
|
|
+ mvx_dvfs_ctx.ddr_qos_rsum >= session->ddr_qos_read &&
|
|
+ mvx_dvfs_ctx.ddr_qos_wsum >= session->ddr_qos_write)
|
|
+ {
|
|
+ mvx_dvfs_ctx.ddr_qos_rsum -= session->ddr_qos_read;
|
|
+ mvx_dvfs_ctx.ddr_qos_wsum -= session->ddr_qos_write;
|
|
+ MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_INFO,
|
|
+ "DVFS remove session[%p] ddr qos: [%d, %d]/[%d, %d]", session_id, session->ddr_qos_read, session->ddr_qos_write, mvx_dvfs_ctx.ddr_qos_rsum, mvx_dvfs_ctx.ddr_qos_wsum);
|
|
+ session->ddr_qos_read = session->ddr_qos_write = 0;
|
|
+ }
|
|
+#endif
|
|
+
|
|
+ MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_INFO, "mvx_dvfs_unregister_session. session=%p", session_id);
|
|
+ free_session(session);
|
|
+
|
|
+ } else {
|
|
+ MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_INFO, "session[%p] is already removed.", session_id);
|
|
+ }
|
|
+
|
|
+ up(&dvfs_sem);
|
|
+}
|
|
+
|
|
+void mvx_dvfs_estimate_ddr_bandwidth(struct estimate_ddr_input* input, struct estimate_ddr_output* output)
|
|
+{
|
|
+ /* predefined DDR throughput requirement for 1080p@30fps */
|
|
+ const int defined_width = 1920;
|
|
+ const int defined_height = 1080;
|
|
+ const uint32_t defined_bandwidth_tbl[2][2][2]={
|
|
+ {
|
|
+ /*decoder*/
|
|
+ {45000, 105000}, /*non-afbc[r, w]*/
|
|
+ {45000, 20000} /*afbc*/
|
|
+ },
|
|
+ { /*encoder*/
|
|
+ {162000, 54000}, /*non-afbc[r, w]*/
|
|
+ {162000, 54000} /*afbc*/
|
|
+ }
|
|
+ };
|
|
+ uint64_t estimated_read;
|
|
+ uint64_t estimated_write;
|
|
+
|
|
+ if (input->width == 0 || input->height == 0) {
|
|
+ input->width = defined_width;
|
|
+ input->height = defined_height;
|
|
+ }
|
|
+ if (input->fps <= 0) {
|
|
+ input->fps = VPU_DDR_QOS_PREDEFINED_FPS;
|
|
+ }
|
|
+
|
|
+ MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_INFO, "estimate_ddr_bandwidth. isEnc=%d, isAFBC=%d, size=(%d, %d), framerate=%d.", input->isEnc, input->isAFBC, input->width, input->height, input->fps);
|
|
+
|
|
+ estimated_read = ((uint64_t)defined_bandwidth_tbl[input->isEnc][input->isAFBC][0] * ( input->width * input->height) * input->fps /(defined_width * defined_height*VPU_DDR_QOS_PREDEFINED_FPS));
|
|
+ estimated_write = ((uint64_t)defined_bandwidth_tbl[input->isEnc][input->isAFBC][1] * ( input->width * input->height) * input->fps /(defined_width * defined_height*VPU_DDR_QOS_PREDEFINED_FPS));
|
|
+ if ((estimated_read + estimated_write) < VPU_DDR_QOS_MIN)
|
|
+ {
|
|
+ estimated_read = VPU_DDR_QOS_MIN/2;
|
|
+ estimated_write = VPU_DDR_QOS_MIN/2;
|
|
+ }
|
|
+
|
|
+ if ((estimated_read + estimated_write) > VPU_DDR_QOS_MAX)
|
|
+ {
|
|
+ estimated_read = VPU_DDR_QOS_MAX/2;
|
|
+ estimated_write = VPU_DDR_QOS_MAX/2;
|
|
+ }
|
|
+
|
|
+ output->estimated_read = estimated_read;
|
|
+ output->estimated_write = estimated_write;
|
|
+}
|
|
+
|
|
+void mvx_dvfs_session_update_ddr_qos(const mvx_session_id session_id, uint32_t read_value, uint32_t write_value)
|
|
+{
|
|
+#if (1 == VPU_DDR_QOS_ENABLE)
|
|
+ struct session *session;
|
|
+ int sem_failed;
|
|
+
|
|
+#if (1 == DVFS_DEBUG_MODE)
|
|
+ if (0 == atomic_read(&ddr_qos_enabled))
|
|
+ {
|
|
+ return;
|
|
+ }
|
|
+#endif
|
|
+
|
|
+ sem_failed = down_interruptible(&dvfs_sem);
|
|
+ if (sem_failed)
|
|
+ {
|
|
+ MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_ERROR,
|
|
+ "DVFS semaphore was not obtained, %d",
|
|
+ sem_failed);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ session = get_session(session_id);
|
|
+ if (NULL != session && (session->ddr_qos_read != read_value || session->ddr_qos_write != write_value)
|
|
+ && mvx_dvfs_ctx.ddr_qos_rsum >= session->ddr_qos_read
|
|
+ && mvx_dvfs_ctx.ddr_qos_wsum >= session->ddr_qos_write)
|
|
+ {
|
|
+ mvx_dvfs_ctx.ddr_qos_rsum -= session->ddr_qos_read;
|
|
+ mvx_dvfs_ctx.ddr_qos_rsum += read_value;
|
|
+ session->ddr_qos_read = read_value;
|
|
+ mvx_dvfs_ctx.ddr_qos_wsum -= session->ddr_qos_write;
|
|
+ mvx_dvfs_ctx.ddr_qos_wsum += write_value;
|
|
+ session->ddr_qos_write = write_value;
|
|
+ MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_INFO,
|
|
+ "DVFS update session[%p] ddr qos: [%d, %d]/[%d, %d]", session_id, read_value, write_value, mvx_dvfs_ctx.ddr_qos_rsum, mvx_dvfs_ctx.ddr_qos_wsum);
|
|
+ }
|
|
+
|
|
+ up(&dvfs_sem);
|
|
+#endif
|
|
+}
|
|
+
|
|
+void mvx_dvfs_suspend_session(void)
|
|
+{
|
|
+ int sem_failed;
|
|
+
|
|
+ sem_failed = down_interruptible(&dvfs_sem);
|
|
+ if (sem_failed)
|
|
+ {
|
|
+ MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_WARNING,
|
|
+ "DVFS semaphore was not obtained, sem_failed=%d", sem_failed);
|
|
+ }
|
|
+
|
|
+ mvx_dvfs_ctx.sched_suspend = true;
|
|
+
|
|
+ if (!sem_failed)
|
|
+ {
|
|
+ up(&dvfs_sem);
|
|
+ }
|
|
+}
|
|
+
|
|
+void mvx_dvfs_resume_session(void)
|
|
+{
|
|
+ int sem_failed;
|
|
+
|
|
+ sem_failed = down_interruptible(&dvfs_sem);
|
|
+ if (sem_failed)
|
|
+ {
|
|
+ MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_WARNING,
|
|
+ "DVFS semaphore was not obtained, sem_failed=%d", sem_failed);
|
|
+ }
|
|
+
|
|
+ mvx_dvfs_ctx.sched_suspend = false;
|
|
+
|
|
+ if (!sem_failed)
|
|
+ {
|
|
+ up(&dvfs_sem);
|
|
+ }
|
|
+}
|
|
diff --git a/drivers/media/platform/spacemit/vpu_k1x/mvx_dvfs.h b/drivers/media/platform/spacemit/vpu_k1x/mvx_dvfs.h
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/vpu_k1x/mvx_dvfs.h
|
|
@@ -0,0 +1,101 @@
|
|
+/*
|
|
+ * The confidential and proprietary information contained in this file may
|
|
+ * only be used by a person authorised under and to the extent permitted
|
|
+ * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd.
|
|
+ * ALL RIGHTS RESERVED
|
|
+ *
|
|
+ * This entire notice must be reproduced on all copies of this file
|
|
+ * and copies of this file may only be made by a person if such person is
|
|
+ * permitted to do so under the terms of a subsisting license agreement
|
|
+ * from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * SPDX-License-Identifier: GPL-2.0-only
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU General Public License
|
|
+ * as published by the Free Software Foundation; either version 2
|
|
+ * of the License, or (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
+ *
|
|
+ */
|
|
+
|
|
+#ifndef _MVX_DVFS_H_
|
|
+#define _MVX_DVFS_H_
|
|
+
|
|
+typedef void *mvx_session_id;
|
|
+
|
|
+struct estimate_ddr_input
|
|
+{
|
|
+ int width;
|
|
+ int height;
|
|
+ int isAFBC;
|
|
+ int fps;
|
|
+ int isEnc;
|
|
+};
|
|
+
|
|
+struct estimate_ddr_output
|
|
+{
|
|
+ uint64_t estimated_read;
|
|
+ uint64_t estimated_write;
|
|
+};
|
|
+
|
|
+/**
|
|
+ * Initialize the DVFS module.
|
|
+ *
|
|
+ * Must be called before any other function in this module.
|
|
+ *
|
|
+ * @param dev Device
|
|
+ */
|
|
+void mvx_dvfs_init(struct device *dev);
|
|
+
|
|
+/**
|
|
+ * Deinitialize the DVFS module.
|
|
+ *
|
|
+ * All remaining sessions will be unregistered.
|
|
+ *
|
|
+ * @param dev Device
|
|
+ */
|
|
+void mvx_dvfs_deinit(struct device *dev);
|
|
+
|
|
+/**
|
|
+ * Register session in the DFVS module.
|
|
+ *
|
|
+ * @param session_id Session id
|
|
+ * @return True when registration was successful,
|
|
+ * False, otherwise
|
|
+ */
|
|
+bool mvx_dvfs_register_session(const mvx_session_id session_id, bool is_encoder);
|
|
+
|
|
+/**
|
|
+ * Unregister session from the DFVS module.
|
|
+ *
|
|
+ * Usage of corresponding session is not permitted after this call.
|
|
+ * @param session_id Session id
|
|
+ */
|
|
+void mvx_dvfs_unregister_session(const mvx_session_id session_id);
|
|
+
|
|
+void mvx_dvfs_estimate_ddr_bandwidth(struct estimate_ddr_input* input, struct estimate_ddr_output* output);
|
|
+
|
|
+void mvx_dvfs_session_update_ddr_qos(const mvx_session_id session_id, uint32_t read_value, uint32_t write_value);
|
|
+
|
|
+/**
|
|
+ * Suspend dvfs thread to adjust vpu clk when device enters suspend state.
|
|
+ */
|
|
+void mvx_dvfs_suspend_session(void);
|
|
+
|
|
+/**
|
|
+ * Resume dvfs thread to adjust vpu clk when device resumes from suspend state.
|
|
+ */
|
|
+void mvx_dvfs_resume_session(void);
|
|
+
|
|
+#endif /* MVX_DVFS_H */
|
|
diff --git a/drivers/media/platform/spacemit/vpu_k1x/mvx_log.c b/drivers/media/platform/spacemit/vpu_k1x/mvx_log.c
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/vpu_k1x/mvx_log.c
|
|
@@ -0,0 +1,931 @@
|
|
+/*
|
|
+ * The confidential and proprietary information contained in this file may
|
|
+ * only be used by a person authorised under and to the extent permitted
|
|
+ * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd.
|
|
+ * ALL RIGHTS RESERVED
|
|
+ *
|
|
+ * This entire notice must be reproduced on all copies of this file
|
|
+ * and copies of this file may only be made by a person if such person is
|
|
+ * permitted to do so under the terms of a subsisting license agreement
|
|
+ * from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * SPDX-License-Identifier: GPL-2.0-only
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU General Public License
|
|
+ * as published by the Free Software Foundation; either version 2
|
|
+ * of the License, or (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
+ *
|
|
+ */
|
|
+
|
|
+/******************************************************************************
|
|
+ * Includes
|
|
+ ******************************************************************************/
|
|
+
|
|
+#include "mvx_log.h"
|
|
+#include "mvx_log_ram.h"
|
|
+
|
|
+#include <linux/uaccess.h>
|
|
+#include <linux/aio.h>
|
|
+#include <linux/debugfs.h>
|
|
+#include <linux/dcache.h>
|
|
+#include <linux/export.h>
|
|
+#include <linux/fs.h>
|
|
+#include <linux/poll.h>
|
|
+#include <linux/namei.h>
|
|
+#include <linux/sched.h>
|
|
+#include <linux/time.h>
|
|
+#include <linux/un.h>
|
|
+#include <linux/version.h>
|
|
+#include <linux/vmalloc.h>
|
|
+
|
|
+/******************************************************************************
|
|
+ * Defines
|
|
+ ******************************************************************************/
|
|
+
|
|
+#ifndef UNUSED
|
|
+#define UNUSED(x) (void)(x)
|
|
+#endif /* UNUSED */
|
|
+
|
|
+/******************************************************************************
|
|
+ * Types
|
|
+ ******************************************************************************/
|
|
+
|
|
+/******************************************************************************
|
|
+ * Variables
|
|
+ ******************************************************************************/
|
|
+
|
|
+#ifdef MVX_LOG_FTRACE_ENABLE
|
|
+
|
|
+/**
|
|
+ * Map severity to string.
|
|
+ */
|
|
+static const char *const severity_to_name[] = {
|
|
+ "Panic",
|
|
+ "Error",
|
|
+ "Warning",
|
|
+ "Info",
|
|
+ "Debug",
|
|
+ "Verbose"
|
|
+};
|
|
+#endif /* MVX_LOG_FTRACE_ENABLE */
|
|
+
|
|
+/**
|
|
+ * Map severity to kernel log level.
|
|
+ */
|
|
+static const char *const severity_to_kern_level[] = {
|
|
+ KERN_EMERG,
|
|
+ KERN_ERR,
|
|
+ KERN_WARNING,
|
|
+ KERN_NOTICE,
|
|
+ KERN_INFO,
|
|
+ KERN_DEBUG
|
|
+};
|
|
+
|
|
+/******************************************************************************
|
|
+ * Static functions
|
|
+ ******************************************************************************/
|
|
+
|
|
+/******************************************************************************
|
|
+ * Log
|
|
+ *
|
|
+ * Directory i_node->i_private
|
|
+ * --------------------------------------------------------
|
|
+ * mvx struct mvx_log *
|
|
+ * +-- group
|
|
+ * | +-- <group> struct mvx_log_group *
|
|
+ * | +-- severity
|
|
+ * | +-- drain
|
|
+ * +-- drain
|
|
+ * +-- <drain> struct mvx_log_drain *
|
|
+ *
|
|
+ ******************************************************************************/
|
|
+
|
|
+/**
|
|
+ * trim() - Trim of trailing new line.
|
|
+ * @str: Pointer to string.
|
|
+ */
|
|
+static void trim(char *str)
|
|
+{
|
|
+ size_t len = strlen(str);
|
|
+
|
|
+ while (len-- > 0) {
|
|
+ if (str[len] != '\n')
|
|
+ break;
|
|
+
|
|
+ str[len] = '\0';
|
|
+ }
|
|
+}
|
|
+
|
|
+/**
|
|
+ * lookup() - Search for child dentry with matching name.
|
|
+ * @parent: Pointer to parent dentry.
|
|
+ * @name: Name of dentry to look for.
|
|
+ *
|
|
+ * Return: Pointer to dentry, NULL if not found.
|
|
+ */
|
|
+static struct dentry *lookup(struct dentry *parent,
|
|
+ const char *name)
|
|
+{
|
|
+ struct dentry *child;
|
|
+
|
|
+ /* Loop over directory entries in mvx/drain/. */
|
|
+#if (KERNEL_VERSION(3, 18, 0) <= LINUX_VERSION_CODE)
|
|
+ list_for_each_entry(child, &parent->d_subdirs, d_child)
|
|
+#else
|
|
+ list_for_each_entry(child, &parent->d_subdirs, d_u.d_child)
|
|
+#endif
|
|
+ {
|
|
+ if (strcmp(name, child->d_name.name) == 0)
|
|
+ return child;
|
|
+ }
|
|
+
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * get_inode_private() - Get inode private member of parent directory.
|
|
+ * @file: File pointer.
|
|
+ * @parent: Number of parent directories.
|
|
+ *
|
|
+ * Return: Inode private member, or NULL on error.
|
|
+ */
|
|
+static void *get_inode_private(struct file *file,
|
|
+ int parent)
|
|
+{
|
|
+ struct dentry *d = file->f_path.dentry;
|
|
+
|
|
+ while (d != NULL && parent-- > 0)
|
|
+ d = d->d_parent;
|
|
+
|
|
+ if (d == NULL || d->d_inode == NULL)
|
|
+ return NULL;
|
|
+
|
|
+ return d->d_inode->i_private;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * readme_read() - Read handle function for mvx/group/<group>/drain. The
|
|
+ * function returns the usage instruction message.
|
|
+ * @file: File pointer.
|
|
+ * @user_buffer: The user space buffer that is read to.
|
|
+ * @count: The maximum number of bytes to read.
|
|
+ * @position: The current position in the buffer.
|
|
+ */
|
|
+static ssize_t readme_read(struct file *file,
|
|
+ char __user *user_buffer,
|
|
+ size_t count,
|
|
+ loff_t *position)
|
|
+{
|
|
+ static const char msg[] =
|
|
+ "LOG GROUPS\n"
|
|
+ "\n"
|
|
+ "The avaible log groups can be found under 'group'.\n"
|
|
+ "$ ls group\n"
|
|
+ "\n"
|
|
+ "SEVERITY LEVELS\n"
|
|
+ " 0 - Panic\n"
|
|
+ " 1 - Error\n"
|
|
+ " 2 - Warning\n"
|
|
+ " 3 - Info\n"
|
|
+ " 4 - Debug\n"
|
|
+ " 5 - Verbose\n"
|
|
+ "\n"
|
|
+ "The severity level for a log group can be read and set at runtime.\n"
|
|
+ "$ cat group/general/severity\n"
|
|
+ "$ echo 3 > group/general/severity\n";
|
|
+
|
|
+ return simple_read_from_buffer(user_buffer, count, position, msg,
|
|
+ sizeof(msg));
|
|
+}
|
|
+
|
|
+/**
|
|
+ * group_drain_read() - Read handle function for mvx/group/<group>/drain. The
|
|
+ * function returns the name of the currently configured
|
|
+ * drain.
|
|
+ * @file: File pointer.
|
|
+ * @user_buffer: The user space buffer that is read to.
|
|
+ * @count: The maximum number of bytes to read.
|
|
+ * @position: The current position in the buffer.
|
|
+ */
|
|
+static ssize_t group_drain_read(struct file *file,
|
|
+ char __user *user_buffer,
|
|
+ size_t count,
|
|
+ loff_t *position)
|
|
+{
|
|
+ /* File path mvx/group/<group>/drain. */
|
|
+ struct mvx_log_group *group = get_inode_private(file, 1);
|
|
+ struct mvx_log_drain *drain = group->drain;
|
|
+ char name[100];
|
|
+ size_t len;
|
|
+
|
|
+ if (drain == NULL || drain->dentry == NULL) {
|
|
+ pr_err("MVX: No drain assigned to log group.\n");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ len = scnprintf(name, sizeof(name), "%s\n", drain->dentry->d_name.name);
|
|
+
|
|
+ return simple_read_from_buffer(user_buffer, count, position, name, len);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * group_drain_write() - Write handle function for mvx/group/<group>/drain. The
|
|
+ * function sets the drain for the group. If the drain
|
|
+ * does not match any registered drain, then error is
|
|
+ * returned to user space.
|
|
+ * @file: File pointer.
|
|
+ * @user_buffer: The user space buffer that is written to.
|
|
+ * @count: The maximum number of bytes to write.
|
|
+ * @position: The current position in the buffer.
|
|
+ */
|
|
+static ssize_t group_drain_write(struct file *file,
|
|
+ const char __user *user_buffer,
|
|
+ size_t count,
|
|
+ loff_t *position)
|
|
+{
|
|
+ /* File path mvx/group/<group>/drain. */
|
|
+ struct mvx_log_group *group = get_inode_private(file, 1);
|
|
+ struct mvx_log *log = get_inode_private(file, 3);
|
|
+ struct dentry *dentry;
|
|
+ char drain_str[100];
|
|
+ ssize_t size;
|
|
+
|
|
+ /* Check that input is not larger that path buffer. */
|
|
+ if (count > (sizeof(drain_str) - 1)) {
|
|
+ pr_err("MVX: Input overflow.\n");
|
|
+
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ /* Append input to path. */
|
|
+ size = simple_write_to_buffer(drain_str, sizeof(drain_str) - 1,
|
|
+ position, user_buffer, count);
|
|
+ drain_str[count] = '\0';
|
|
+ trim(drain_str);
|
|
+
|
|
+ dentry = lookup(log->drain_dir, drain_str);
|
|
+
|
|
+ if (IS_ERR_OR_NULL(dentry)) {
|
|
+ pr_warn("MVX: No drain matching '%s'.\n", drain_str);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ /* Assign drain to log group. */
|
|
+ group->drain = dentry->d_inode->i_private;
|
|
+
|
|
+ return size;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * drain_ram_read() - Read the RAM buffer.
|
|
+ * @drain: The RAM buffer drain.
|
|
+ * @user_buffer: The user space buffer that is read to.
|
|
+ * @count: The maximum number of bytes to read.
|
|
+ * @position: The current position in the buffer.
|
|
+ * @pos: The last used position of the drain buffer
|
|
+ */
|
|
+static ssize_t drain_ram_read(struct mvx_log_drain_ram *drain,
|
|
+ char __user *user_buffer,
|
|
+ size_t count,
|
|
+ loff_t *position,
|
|
+ size_t pos)
|
|
+{
|
|
+ ssize_t n = 0;
|
|
+
|
|
+ /* Make sure position is not beyond end of file. */
|
|
+ if (*position > pos)
|
|
+ return -EINVAL;
|
|
+
|
|
+ /* If position is more than BUFFER_SIZE bytes behind, then fast forward
|
|
+ * to current position minus BUFFER_SIZE.
|
|
+ */
|
|
+ if ((pos - *position) > drain->buffer_size)
|
|
+ *position = pos - drain->buffer_size;
|
|
+
|
|
+ /* Copy data to user space. */
|
|
+ while ((n < count) && (*position < pos)) {
|
|
+ size_t offset;
|
|
+ size_t length;
|
|
+
|
|
+ /* Offset in circular buffer. */
|
|
+ offset = *position & (drain->buffer_size - 1);
|
|
+
|
|
+ /* Available number of bytes. */
|
|
+ length = min((size_t)(pos - *position), count - n);
|
|
+
|
|
+ /* Make sure length does not go beyond end of circular buffer.
|
|
+ */
|
|
+ length = min(length, drain->buffer_size - offset);
|
|
+
|
|
+ /* Copy data from kernel- to user space. */
|
|
+ length -= copy_to_user(&user_buffer[n], &drain->buf[offset],
|
|
+ length);
|
|
+
|
|
+ /* No bytes were copied. Return error. */
|
|
+ if (length == 0)
|
|
+ return -EINVAL;
|
|
+
|
|
+ *position += length;
|
|
+ n += length;
|
|
+ }
|
|
+
|
|
+ return n;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * drain_ram_read_msg() - Read of the RAM file.
|
|
+ * @file: File pointer.
|
|
+ * @user_buffer: The user space buffer that is read to.
|
|
+ * @count: The maximum number of bytes to read.
|
|
+ * @position: The current position in the buffer.
|
|
+ */
|
|
+static ssize_t drain_ram_read_msg(struct file *file,
|
|
+ char __user *user_buffer,
|
|
+ size_t count,
|
|
+ loff_t *position)
|
|
+{
|
|
+ struct mvx_log_drain_ram *drain = get_inode_private(file, 1);
|
|
+
|
|
+ while (*position == drain->write_pos) {
|
|
+ int ret;
|
|
+
|
|
+ if (file->f_flags & O_NONBLOCK)
|
|
+ return -EAGAIN;
|
|
+
|
|
+ /* Block until there is data available. */
|
|
+ ret = wait_event_interruptible(drain->queue,
|
|
+ *position < drain->write_pos);
|
|
+ if (ret != 0)
|
|
+ return -EINTR;
|
|
+ }
|
|
+
|
|
+ return drain_ram_read(drain, user_buffer, count, position,
|
|
+ drain->write_pos);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * drain_ram_msg_poll() - Handle poll.
|
|
+ * @file: File pointer.
|
|
+ * @wait: The poll table to which the wait queue is added.
|
|
+ */
|
|
+static unsigned int drain_ram_msg_poll(struct file *file,
|
|
+ poll_table *wait)
|
|
+{
|
|
+ unsigned int mask = 0;
|
|
+ struct mvx_log_drain_ram *drain = get_inode_private(file, 1);
|
|
+
|
|
+ poll_wait(file, &drain->queue, wait);
|
|
+
|
|
+ if (file->f_pos < drain->write_pos)
|
|
+ mask |= POLLIN | POLLRDNORM;
|
|
+ else if (file->f_pos > drain->write_pos)
|
|
+ mask |= POLLERR;
|
|
+
|
|
+ return mask;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * drain_ram_ioctl() - Handle IOCTL.
|
|
+ * @file: File pointer.
|
|
+ * @cmd: The value of the command to be handled.
|
|
+ * @arg: Extra argument.
|
|
+ */
|
|
+static long drain_ram_ioctl(struct file *file,
|
|
+ unsigned int cmd,
|
|
+ unsigned long arg)
|
|
+{
|
|
+ struct mvx_log_drain_ram *drain_ram = get_inode_private(file, 1);
|
|
+
|
|
+ switch (cmd) {
|
|
+ case MVX_LOG_IOCTL_CLEAR:
|
|
+ drain_ram->read_pos = drain_ram->write_pos;
|
|
+ break;
|
|
+ default:
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * drain_ram_open() - Open file handle function.
|
|
+ * @inode: The inode associated with the file.
|
|
+ * @file: Pointer to the opened file.
|
|
+ *
|
|
+ * Return: 0 Always succeeds.
|
|
+ */
|
|
+static int drain_ram_open(struct inode *inode,
|
|
+ struct file *file)
|
|
+{
|
|
+ struct mvx_log_drain_ram *drain_ram = get_inode_private(file, 1);
|
|
+
|
|
+ file->f_pos = drain_ram->read_pos;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/******************************************************************************
|
|
+ * External interface
|
|
+ ******************************************************************************/
|
|
+
|
|
+int mvx_log_construct(struct mvx_log *log,
|
|
+ const char *entry_name)
|
|
+{
|
|
+ int ret;
|
|
+ static const struct file_operations readme_fops = {
|
|
+ .read = readme_read
|
|
+ };
|
|
+ struct dentry *dentry;
|
|
+
|
|
+ if (!IS_ENABLED(CONFIG_DEBUG_FS)) {
|
|
+ pr_info(
|
|
+ "MVX: Debugfs is not enabled. '%s' dir is not created.\n",
|
|
+ entry_name);
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ log->mvx_dir = debugfs_create_dir(entry_name, NULL);
|
|
+ if (IS_ERR_OR_NULL(log->mvx_dir)) {
|
|
+ pr_err("MVX: Failed to create '%s' dir.\n", entry_name);
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+
|
|
+ log->log_dir = debugfs_create_dir("log", log->mvx_dir);
|
|
+ if (IS_ERR_OR_NULL(log->log_dir)) {
|
|
+ pr_err("MVX: Failed to create 'log' dir.\n");
|
|
+ ret = -ENOMEM;
|
|
+ goto error;
|
|
+ }
|
|
+
|
|
+ log->log_dir->d_inode->i_private = log;
|
|
+
|
|
+ log->drain_dir = debugfs_create_dir("drain", log->log_dir);
|
|
+ if (IS_ERR_OR_NULL(log->drain_dir)) {
|
|
+ pr_err("MVX: Failed to create 'drain' dir.\n");
|
|
+ ret = -ENOMEM;
|
|
+ goto error;
|
|
+ }
|
|
+
|
|
+ log->group_dir = debugfs_create_dir("group", log->log_dir);
|
|
+ if (IS_ERR_OR_NULL(log->group_dir)) {
|
|
+ pr_err("MVX: Failed to create 'group' dir.\n");
|
|
+ ret = -ENOMEM;
|
|
+ goto error;
|
|
+ }
|
|
+
|
|
+ /* Create <group>/drain. */
|
|
+ dentry = debugfs_create_file("README", 0400, log->log_dir, NULL,
|
|
+ &readme_fops);
|
|
+ if (IS_ERR_OR_NULL(dentry)) {
|
|
+ pr_err("MVX: Failed to create 'README'.\n");
|
|
+ ret = -ENOMEM;
|
|
+ goto error;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+
|
|
+error:
|
|
+ debugfs_remove_recursive(log->mvx_dir);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+void mvx_log_destruct(struct mvx_log *log)
|
|
+{
|
|
+ if (IS_ENABLED(CONFIG_DEBUG_FS))
|
|
+ debugfs_remove_recursive(log->mvx_dir);
|
|
+}
|
|
+
|
|
+/******************************************************************************
|
|
+ * Log Drain
|
|
+ ******************************************************************************/
|
|
+
|
|
+static int drain_construct(struct mvx_log_drain *drain,
|
|
+ mvx_print_fptr print,
|
|
+ mvx_data_fptr data)
|
|
+{
|
|
+ drain->print = print;
|
|
+ drain->data = data;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void drain_destruct(struct mvx_log_drain *drain)
|
|
+{
|
|
+ UNUSED(drain);
|
|
+}
|
|
+
|
|
+static void drain_dmesg_print(struct mvx_log_drain *drain,
|
|
+ enum mvx_log_severity severity,
|
|
+ const char *tag,
|
|
+ const char *msg,
|
|
+ const unsigned int n_args,
|
|
+ ...)
|
|
+{
|
|
+ va_list args;
|
|
+ char fmt[500];
|
|
+
|
|
+ severity = min_t(int, severity, MVX_LOG_VERBOSE);
|
|
+
|
|
+ snprintf(fmt, sizeof(fmt), "%s%s: %s\n",
|
|
+ severity_to_kern_level[severity], tag, msg);
|
|
+ fmt[sizeof(fmt) - 1] = '\0';
|
|
+
|
|
+ va_start(args, n_args);
|
|
+ vprintk(fmt, args);
|
|
+ va_end(args);
|
|
+}
|
|
+
|
|
+static void drain_dmesg_data(struct mvx_log_drain *drain,
|
|
+ enum mvx_log_severity severity,
|
|
+ struct iovec *vec,
|
|
+ size_t count)
|
|
+{
|
|
+ size_t i;
|
|
+
|
|
+ pr_info("count=%zu\n", count);
|
|
+
|
|
+ for (i = 0; i < count; ++i) {
|
|
+ const char *p = vec[i].iov_base;
|
|
+ size_t length = vec[i].iov_len;
|
|
+
|
|
+ pr_info(" length=%zu\n", length);
|
|
+
|
|
+ while (length > 0) {
|
|
+ size_t j = min_t(size_t, length, 32);
|
|
+ size_t buf_size=3 + j * 3 + 1;
|
|
+ char buf[3 + 32 * 3 + 1];
|
|
+ size_t n = 0;
|
|
+
|
|
+ length -= j;
|
|
+
|
|
+ n += scnprintf(&buf[n], (buf_size) - n, " ");
|
|
+
|
|
+ while (j-- > 0)
|
|
+ n += scnprintf(&buf[n], (buf_size) - n,
|
|
+ " %02x", *p++);
|
|
+
|
|
+ pr_info("%s\n", buf);
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
+int mvx_log_drain_dmesg_construct(struct mvx_log_drain *drain)
|
|
+{
|
|
+ return drain_construct(drain, drain_dmesg_print, drain_dmesg_data);
|
|
+}
|
|
+
|
|
+void mvx_log_drain_dmesg_destruct(struct mvx_log_drain *drain)
|
|
+{
|
|
+ drain_destruct(drain);
|
|
+}
|
|
+
|
|
+int mvx_log_drain_add(struct mvx_log *log,
|
|
+ const char *name,
|
|
+ struct mvx_log_drain *drain)
|
|
+{
|
|
+ if (!IS_ENABLED(CONFIG_DEBUG_FS)) {
|
|
+ pr_info(
|
|
+ "MVX: Debugfs is not enabled. '%s' dir is not created.\n",
|
|
+ name);
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ /* Create <drain> directory. */
|
|
+ drain->dentry = debugfs_create_dir(name, log->drain_dir);
|
|
+ if (IS_ERR_OR_NULL(drain->dentry)) {
|
|
+ pr_err("MVX: Failed to create '%s' dir.\n", name);
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+
|
|
+ /* Store pointer to drain object in inode private data. */
|
|
+ drain->dentry->d_inode->i_private = drain;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void drain_ram_data(struct mvx_log_drain *drain,
|
|
+ enum mvx_log_severity severity,
|
|
+ struct iovec *vec,
|
|
+ size_t count)
|
|
+{
|
|
+ struct mvx_log_drain_ram *drain_ram =
|
|
+ (struct mvx_log_drain_ram *)drain;
|
|
+ size_t i;
|
|
+ size_t length;
|
|
+ size_t pos;
|
|
+ int sem_taken;
|
|
+
|
|
+ if (!IS_ENABLED(CONFIG_DEBUG_FS))
|
|
+ return;
|
|
+
|
|
+ /* Calculate the total length of the output. */
|
|
+ for (i = 0, length = 0; i < count; ++i)
|
|
+ length += vec[i].iov_len;
|
|
+
|
|
+ /* Round up to next 32-bit boundary. */
|
|
+ length = (length + 3) & ~3;
|
|
+
|
|
+ if (length > drain_ram->buffer_size) {
|
|
+ pr_err(
|
|
+ "MVX: Logged data larger than output buffer. length=%zu, buffer_length=%zu.\n",
|
|
+ length,
|
|
+ (size_t)drain_ram->buffer_size);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ sem_taken = down_interruptible(&drain_ram->sem);
|
|
+
|
|
+ pos = drain_ram->write_pos & (drain_ram->buffer_size - 1);
|
|
+
|
|
+ /* Loop over scatter input. */
|
|
+ for (i = 0; i < count; ++i) {
|
|
+ const char *buf = vec[i].iov_base;
|
|
+ size_t len = vec[i].iov_len;
|
|
+
|
|
+ /* Copy log message to output buffer. */
|
|
+ while (len > 0) {
|
|
+ size_t n = min(len, drain_ram->buffer_size - pos);
|
|
+
|
|
+ memcpy(&drain_ram->buf[pos], buf, n);
|
|
+
|
|
+ len -= n;
|
|
+ buf += n;
|
|
+ pos = (pos + n) & (drain_ram->buffer_size - 1);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /* Update write_pos. Length has already been 4 byte aligned */
|
|
+ drain_ram->write_pos += length;
|
|
+
|
|
+ if (sem_taken == 0)
|
|
+ up(&drain_ram->sem);
|
|
+
|
|
+ wake_up_interruptible(&drain_ram->queue);
|
|
+}
|
|
+
|
|
+static void drain_ram_print(struct mvx_log_drain *drain,
|
|
+ enum mvx_log_severity severity,
|
|
+ const char *tag,
|
|
+ const char *msg,
|
|
+ const unsigned int n_args,
|
|
+ ...)
|
|
+{
|
|
+ char buf[500];
|
|
+ va_list args;
|
|
+ size_t n = 0;
|
|
+ struct mvx_log_header header;
|
|
+ struct iovec vec[2];
|
|
+ struct timespec64 timespec;
|
|
+
|
|
+ if (!IS_ENABLED(CONFIG_DEBUG_FS))
|
|
+ return;
|
|
+
|
|
+ /* Write the log message. */
|
|
+ va_start(args, n_args);
|
|
+ n += vscnprintf(buf, sizeof(buf), msg, args);
|
|
+ va_end(args);
|
|
+
|
|
+ ktime_get_real_ts64(×pec);
|
|
+
|
|
+ header.magic = MVX_LOG_MAGIC;
|
|
+ header.length = n;
|
|
+ header.type = MVX_LOG_TYPE_TEXT;
|
|
+ header.severity = severity;
|
|
+ header.timestamp.sec = timespec.tv_sec;
|
|
+ header.timestamp.nsec = timespec.tv_nsec;
|
|
+
|
|
+ vec[0].iov_base = &header;
|
|
+ vec[0].iov_len = sizeof(header);
|
|
+
|
|
+ vec[1].iov_base = buf;
|
|
+ vec[1].iov_len = n;
|
|
+
|
|
+ drain_ram_data(drain, severity, vec, 2);
|
|
+}
|
|
+
|
|
+int mvx_log_drain_ram_construct(struct mvx_log_drain_ram *drain,
|
|
+ size_t buffer_size)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ ret = drain_construct(&drain->base, drain_ram_print, drain_ram_data);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+
|
|
+ if (!IS_ENABLED(CONFIG_DEBUG_FS)) {
|
|
+ pr_info("MVX: No Debugfs no RAM drain.\n");
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ drain->buf = vmalloc(buffer_size);
|
|
+ if (drain->buf == NULL)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ *(size_t *) &drain->buffer_size = buffer_size;
|
|
+ drain->read_pos = 0;
|
|
+ drain->write_pos = 0;
|
|
+ init_waitqueue_head(&drain->queue);
|
|
+ sema_init(&drain->sem, 1);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+void mvx_log_drain_ram_destruct(struct mvx_log_drain_ram *drain)
|
|
+{
|
|
+ if (IS_ENABLED(CONFIG_DEBUG_FS))
|
|
+ vfree(drain->buf);
|
|
+
|
|
+ drain_destruct(&drain->base);
|
|
+}
|
|
+
|
|
+int mvx_log_drain_ram_add(struct mvx_log *log,
|
|
+ const char *name,
|
|
+ struct mvx_log_drain_ram *drain)
|
|
+{
|
|
+ static const struct file_operations drain_ram_msg = {
|
|
+ .read = drain_ram_read_msg,
|
|
+ .poll = drain_ram_msg_poll,
|
|
+ .open = drain_ram_open,
|
|
+ .unlocked_ioctl = drain_ram_ioctl
|
|
+ };
|
|
+ struct dentry *dentry;
|
|
+ int ret;
|
|
+
|
|
+ if (!IS_ENABLED(CONFIG_DEBUG_FS)) {
|
|
+ pr_info(
|
|
+ "MVX: Debugfs is not enabled. RAM drain dirs are not created.\n");
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ ret = mvx_log_drain_add(log, name, &drain->base);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+
|
|
+ /* Create dentry. */
|
|
+ dentry = debugfs_create_file("msg", 0600, drain->base.dentry, NULL,
|
|
+ &drain_ram_msg);
|
|
+ if (IS_ERR_OR_NULL(dentry)) {
|
|
+ pr_err("MVX: Failed to create '%s/msg.\n", name);
|
|
+ ret = -ENOMEM;
|
|
+ goto error;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+
|
|
+error:
|
|
+ debugfs_remove_recursive(drain->base.dentry);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+#ifdef MVX_LOG_FTRACE_ENABLE
|
|
+static void drain_ftrace_print(struct mvx_log_drain *drain,
|
|
+ enum mvx_log_severity severity,
|
|
+ const char *tag,
|
|
+ const char *msg,
|
|
+ const unsigned int n_args,
|
|
+ ...)
|
|
+{
|
|
+ va_list args;
|
|
+ char fmt[500];
|
|
+
|
|
+ severity = min_t(int, severity, MVX_LOG_VERBOSE);
|
|
+
|
|
+ snprintf(fmt, sizeof(fmt), "%s %s: %s\n", severity_to_name[severity],
|
|
+ tag, msg);
|
|
+ fmt[sizeof(fmt) - 1] = '\0';
|
|
+
|
|
+ va_start(args, n_args);
|
|
+ ftrace_vprintk(fmt, args);
|
|
+ va_end(args);
|
|
+}
|
|
+
|
|
+static void drain_ftrace_data(struct mvx_log_drain *drain,
|
|
+ enum mvx_log_severity severity,
|
|
+ struct iovec *vec,
|
|
+ size_t count)
|
|
+{
|
|
+ size_t i;
|
|
+
|
|
+ trace_printk("count=%zu\n", count);
|
|
+
|
|
+ for (i = 0; i < count; ++i) {
|
|
+ const char *p = vec[i].iov_base;
|
|
+ size_t length = vec[i].iov_len;
|
|
+
|
|
+ trace_printk(" length=%zu\n", length);
|
|
+
|
|
+ while (length > 0) {
|
|
+ size_t j = min_t(size_t, length, 32);
|
|
+ char buf[3 + j * 3 + 1];
|
|
+ size_t n = 0;
|
|
+
|
|
+ length -= j;
|
|
+
|
|
+ n += scnprintf(&buf[n], sizeof(buf) - n, " ");
|
|
+
|
|
+ while (j-- > 0)
|
|
+ n += scnprintf(&buf[n], sizeof(buf) - n,
|
|
+ " %02x", *p++);
|
|
+
|
|
+ trace_printk("%s\n", buf);
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
+int mvx_log_drain_ftrace_construct(struct mvx_log_drain *drain)
|
|
+{
|
|
+ return drain_construct(drain, drain_ftrace_print, drain_ftrace_data);
|
|
+}
|
|
+
|
|
+void mvx_log_drain_ftrace_destruct(struct mvx_log_drain *drain)
|
|
+{
|
|
+ drain_destruct(drain);
|
|
+}
|
|
+
|
|
+#endif /* MVX_LOG_FTRACE_ENABLE */
|
|
+
|
|
+/******************************************************************************
|
|
+ * Log Group
|
|
+ ******************************************************************************/
|
|
+
|
|
+void mvx_log_group_construct(struct mvx_log_group *group,
|
|
+ const char *tag,
|
|
+ const enum mvx_log_severity severity,
|
|
+ struct mvx_log_drain *drain)
|
|
+{
|
|
+ group->tag = tag;
|
|
+ group->severity = severity;
|
|
+ group->drain = drain;
|
|
+}
|
|
+
|
|
+int mvx_log_group_add(struct mvx_log *log,
|
|
+ const char *name,
|
|
+ struct mvx_log_group *group)
|
|
+{
|
|
+ static const struct file_operations group_drain_fops = {
|
|
+ .read = group_drain_read,
|
|
+ .write = group_drain_write
|
|
+ };
|
|
+ struct dentry *dentry;
|
|
+ int ret;
|
|
+
|
|
+ if (!IS_ENABLED(CONFIG_DEBUG_FS)) {
|
|
+ pr_info(
|
|
+ "MVX: Debugfs is not enabled. '%s' dir is not created.\n",
|
|
+ name);
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ /* Create <group> directory. */
|
|
+ group->dentry = debugfs_create_dir(name, log->group_dir);
|
|
+ if (IS_ERR_OR_NULL(group->dentry)) {
|
|
+ pr_err("MVX: Failed to create '%s' dir.\n", name);
|
|
+ ret = -ENOMEM;
|
|
+ goto error;
|
|
+ }
|
|
+
|
|
+ /* Store reference to group object in inode private data. */
|
|
+ group->dentry->d_inode->i_private = group;
|
|
+
|
|
+ /* Create <group>/severity. */
|
|
+ debugfs_create_u32("severity", 0600, group->dentry, &group->severity);
|
|
+
|
|
+ /* Create <group>/drain. */
|
|
+ dentry = debugfs_create_file("drain", 0600, group->dentry, NULL, &group_drain_fops);
|
|
+ if (IS_ERR_OR_NULL(dentry)) {
|
|
+ pr_err("MVX: Failed to create '%s/severity' value.\n", name);
|
|
+ ret = -ENOMEM;
|
|
+ goto error;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+
|
|
+error:
|
|
+ mvx_log_group_destruct(group);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+void mvx_log_group_destruct(struct mvx_log_group *group)
|
|
+{
|
|
+ UNUSED(group);
|
|
+}
|
|
+
|
|
+const char *mvx_log_strrchr(const char *s)
|
|
+{
|
|
+ const char *p = strrchr(s, '/');
|
|
+
|
|
+ return (p == NULL) ? s : p + 1;
|
|
+}
|
|
diff --git a/drivers/media/platform/spacemit/vpu_k1x/mvx_log.h b/drivers/media/platform/spacemit/vpu_k1x/mvx_log.h
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/vpu_k1x/mvx_log.h
|
|
@@ -0,0 +1,386 @@
|
|
+/*
|
|
+ * The confidential and proprietary information contained in this file may
|
|
+ * only be used by a person authorised under and to the extent permitted
|
|
+ * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd.
|
|
+ * ALL RIGHTS RESERVED
|
|
+ *
|
|
+ * This entire notice must be reproduced on all copies of this file
|
|
+ * and copies of this file may only be made by a person if such person is
|
|
+ * permitted to do so under the terms of a subsisting license agreement
|
|
+ * from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * SPDX-License-Identifier: GPL-2.0-only
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU General Public License
|
|
+ * as published by the Free Software Foundation; either version 2
|
|
+ * of the License, or (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
+ *
|
|
+ */
|
|
+
|
|
+#ifndef MVX_LOG_H
|
|
+#define MVX_LOG_H
|
|
+
|
|
+/******************************************************************************
|
|
+ * Includes
|
|
+ ******************************************************************************/
|
|
+
|
|
+#include <linux/net.h>
|
|
+#include <linux/semaphore.h>
|
|
+#include <linux/types.h>
|
|
+#include <linux/uio.h>
|
|
+#include <linux/wait.h>
|
|
+
|
|
+/******************************************************************************
|
|
+ * Defines
|
|
+ ******************************************************************************/
|
|
+
|
|
+/**
|
|
+ * Print a log message.
|
|
+ *
|
|
+ * @_lg: Pointer to log group.
|
|
+ * @_severity: Severity.
|
|
+ * @_fmt: Format string.
|
|
+ */
|
|
+#define MVX_LOG_PRINT(_lg, _severity, _fmt, ...) \
|
|
+ do { \
|
|
+ if ((_severity) <= (_lg)->severity) { \
|
|
+ __MVX_LOG_PRINT(_lg, _severity, _fmt, ## __VA_ARGS__); \
|
|
+ } \
|
|
+ } while (0)
|
|
+
|
|
+/**
|
|
+ * Print a log message for a session.
|
|
+ *
|
|
+ * @_lg: Pointer to log group.
|
|
+ * @_severity: Severity.
|
|
+ * @_session: Pointer to session.
|
|
+ * @_fmt: Format string.
|
|
+ */
|
|
+#define MVX_LOG_PRINT_SESSION(_lg, _severity, _session, _fmt, ...) \
|
|
+ do { \
|
|
+ if ((_severity) <= (_lg)->severity) { \
|
|
+ __MVX_LOG_PRINT(_lg, _severity, "%px " _fmt, _session, \
|
|
+ ## __VA_ARGS__); \
|
|
+ } \
|
|
+ } while (0)
|
|
+
|
|
+/**
|
|
+ * Print binary data.
|
|
+ *
|
|
+ * @_lg: Pointer to log group.
|
|
+ * @_severity: Severity.
|
|
+ * @_vec: Scatter input vector data.
|
|
+ * @_count: _vec array size.
|
|
+ */
|
|
+#define MVX_LOG_DATA(_lg, _severity, _vec, _count) \
|
|
+ do { \
|
|
+ if ((_severity) <= (_lg)->severity) { \
|
|
+ (_lg)->drain->data((_lg)->drain, _severity, _vec, \
|
|
+ _count); \
|
|
+ } \
|
|
+ } while (0)
|
|
+
|
|
+/**
|
|
+ * Check if severity level for log group is enabled.
|
|
+ *
|
|
+ * @_lg: Pointer to log group.
|
|
+ * @_severity: Severity.
|
|
+ */
|
|
+#define MVX_LOG_ENABLED(_lg, _severity) \
|
|
+ ((_severity) <= (_lg)->severity)
|
|
+
|
|
+/**
|
|
+ * Execute function if log group is enabled.
|
|
+ *
|
|
+ * @_lg: Pointer to log group.
|
|
+ * @_severity: Severity.
|
|
+ * @_exec: The function to be executed.
|
|
+ */
|
|
+#define MVX_LOG_EXECUTE(_lg, _severity, _exec) \
|
|
+ do { \
|
|
+ if (MVX_LOG_ENABLED(_lg, _severity)) { \
|
|
+ _exec; \
|
|
+ } \
|
|
+ } while (0)
|
|
+
|
|
+#ifdef MVX_LOG_PRINT_FILE_ENABLE
|
|
+#define __MVX_LOG_PRINT(_lg, _severity, _fmt, ...) \
|
|
+ ((_lg)->drain->print((_lg)->drain, _severity, (_lg)->tag, \
|
|
+ _fmt " (%s:%d)", \
|
|
+ __MVX_LOG_N_ARGS(__VA_ARGS__), \
|
|
+ ## __VA_ARGS__, \
|
|
+ mvx_log_strrchr(__FILE__), __LINE__))
|
|
+#else
|
|
+#define __MVX_LOG_PRINT(_lg, _severity, _fmt, ...) \
|
|
+ ((_lg)->drain->print((_lg)->drain, _severity, (_lg)->tag, _fmt, \
|
|
+ __MVX_LOG_N_ARGS(__VA_ARGS__), \
|
|
+ ## __VA_ARGS__))
|
|
+#endif /* MVX_LOG_PRINT_FILE_ENABLE */
|
|
+
|
|
+#define __MVX_LOG_N_ARGS(...) \
|
|
+ __MVX_LOG_COUNT(dummy, ## __VA_ARGS__, 8, 7, 6, 5, 4, 3, 2, 1, 0)
|
|
+
|
|
+#define __MVX_LOG_COUNT(_0, _1, _2, _3, _4, _5, _6, _7, _8, N, ...) N
|
|
+
|
|
+/******************************************************************************
|
|
+ * Types
|
|
+ ******************************************************************************/
|
|
+
|
|
+/**
|
|
+ * enum mvx_log_severity - Severity levels.
|
|
+ */
|
|
+enum mvx_log_severity {
|
|
+ MVX_LOG_PANIC,
|
|
+ MVX_LOG_ERROR,
|
|
+ MVX_LOG_WARNING,
|
|
+ MVX_LOG_INFO,
|
|
+ MVX_LOG_DEBUG,
|
|
+ MVX_LOG_VERBOSE,
|
|
+ MVX_LOG_MAX
|
|
+};
|
|
+
|
|
+struct mvx_log_drain;
|
|
+
|
|
+/**
|
|
+ * mvx_print_fptr() - Function pointer to output text messages.
|
|
+ *
|
|
+ * @drain: Pointer to drain.
|
|
+ * @severity: Severity level.
|
|
+ * @tag: Log group tag.
|
|
+ * @fmt: Format string.
|
|
+ * @n_args: Number of arguments to format string.
|
|
+ */
|
|
+typedef void (*mvx_print_fptr)(struct mvx_log_drain *drain,
|
|
+ enum mvx_log_severity severity,
|
|
+ const char *tag,
|
|
+ const char *fmt,
|
|
+ const unsigned int n_args,
|
|
+ ...);
|
|
+
|
|
+/**
|
|
+ * mvx_data_fptr() - Function pointer to output binary data.
|
|
+ *
|
|
+ * @drain: Pointer to drain.
|
|
+ * @severity: Severity level.
|
|
+ * @vec: Pointer to the buffers that are copied.
|
|
+ * @count: The number of vec buffers.
|
|
+ */
|
|
+typedef void (*mvx_data_fptr)(struct mvx_log_drain *drain,
|
|
+ enum mvx_log_severity severity,
|
|
+ struct iovec *vec,
|
|
+ size_t count);
|
|
+
|
|
+/**
|
|
+ * struct mvx_log_drain - Structure with information about the drain. The drain
|
|
+ * handles the formatting and redirection of the log
|
|
+ * messages.
|
|
+ * @print: Print function pointer.
|
|
+ * @data: Data function pointer.
|
|
+ * @dentry: Debugfs dentry.
|
|
+ */
|
|
+struct mvx_log_drain {
|
|
+ mvx_print_fptr print;
|
|
+ mvx_data_fptr data;
|
|
+ struct dentry *dentry;
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct mvx_log_drain_ram - Structure describing a specialized RAM drain.
|
|
+ * @base: Base class.
|
|
+ * @buf: Pointer to output buffer.
|
|
+ * @buffer_size: Size of the buffer. Must be power of 2.
|
|
+ * @read_pos: Read position when a new file handle is opened. Is
|
|
+ * updated when the buffer is cleared.
|
|
+ * @write_pos: Current write position in RAM buffer.
|
|
+ * @queue: Wait queue for blocking IO.
|
|
+ * @sem: Semaphore to prevent concurrent writes.
|
|
+ */
|
|
+struct mvx_log_drain_ram {
|
|
+ struct mvx_log_drain base;
|
|
+ char *buf;
|
|
+ const size_t buffer_size;
|
|
+ size_t read_pos;
|
|
+ size_t write_pos;
|
|
+ wait_queue_head_t queue;
|
|
+ struct semaphore sem;
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct mvx_log_group - Structure describing log group. The log group filters
|
|
+ * which log messages that shall be forwarded to the
|
|
+ * drain.
|
|
+ * @tag: Name of log group.
|
|
+ * @severity: Severity level.
|
|
+ * @drain: Drain.
|
|
+ * @dentry: Debugfs dentry.
|
|
+ */
|
|
+struct mvx_log_group {
|
|
+ const char *tag;
|
|
+ enum mvx_log_severity severity;
|
|
+ struct mvx_log_drain *drain;
|
|
+ struct dentry *dentry;
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct mvx_log - Log class that keeps track of registered groups and drains.
|
|
+ */
|
|
+struct mvx_log {
|
|
+ struct dentry *mvx_dir;
|
|
+ struct dentry *log_dir;
|
|
+ struct dentry *drain_dir;
|
|
+ struct dentry *group_dir;
|
|
+};
|
|
+
|
|
+/****************************************************************************
|
|
+ * Log
|
|
+ ****************************************************************************/
|
|
+
|
|
+/**
|
|
+ * mvx_log_construct() - Log constructor.
|
|
+ * @log: Pointer to log.
|
|
+ * @entry_name: The name of the directory
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+int mvx_log_construct(struct mvx_log *log,
|
|
+ const char *entry_name);
|
|
+
|
|
+/**
|
|
+ * mvx_log_destruct() - Log destructor.
|
|
+ * @log: Pointer to log.
|
|
+ */
|
|
+void mvx_log_destruct(struct mvx_log *log);
|
|
+
|
|
+/****************************************************************************
|
|
+ * Drain
|
|
+ ****************************************************************************/
|
|
+
|
|
+/**
|
|
+ * mvx_log_drain_dmesg_construct() - Dmesg drain constructor.
|
|
+ * @drain: Pointer to drain.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+int mvx_log_drain_dmesg_construct(struct mvx_log_drain *drain);
|
|
+
|
|
+/**
|
|
+ * mvx_log_drain_dmesg_destruct() - Dmesg drain destructor.
|
|
+ * @drain: Pointer to drain.
|
|
+ */
|
|
+void mvx_log_drain_dmesg_destruct(struct mvx_log_drain *drain);
|
|
+
|
|
+/**
|
|
+ * mvx_log_drain_add() - Add drain to log.
|
|
+ * @log: Pointer to log.
|
|
+ * @name: Name of drain.
|
|
+ * @drain: Pointer to drain.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+int mvx_log_drain_add(struct mvx_log *log,
|
|
+ const char *name,
|
|
+ struct mvx_log_drain *drain);
|
|
+
|
|
+/**
|
|
+ * mvx_log_drain_ram_construct() - RAM drain constructor.
|
|
+ * @drain: Pointer to drain.
|
|
+ * @print: Print function pointer.
|
|
+ * @data: Data function pointer.
|
|
+ * @buffer_size: The size of the RAM drain buffer.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+int mvx_log_drain_ram_construct(struct mvx_log_drain_ram *drain,
|
|
+ size_t buffer_size);
|
|
+
|
|
+/**
|
|
+ * mvx_log_drain_ram_destruct() - RAM drain destructor.
|
|
+ * @drain: Pointer to drain.
|
|
+ */
|
|
+void mvx_log_drain_ram_destruct(struct mvx_log_drain_ram *drain);
|
|
+
|
|
+/**
|
|
+ * mvx_log_drain_ram_add() - Derived function to add RAM drain to log.
|
|
+ * @log: Pointer to log.
|
|
+ * @name: Name of drain.
|
|
+ * @drain: Pointer to drain.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+int mvx_log_drain_ram_add(struct mvx_log *log,
|
|
+ const char *name,
|
|
+ struct mvx_log_drain_ram *drain);
|
|
+
|
|
+#ifdef MVX_LOG_FTRACE_ENABLE
|
|
+
|
|
+/**
|
|
+ * mvx_log_drain_ftrace_construct() - Ftrace drain constructor.
|
|
+ * @drain: Pointer to drain.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+int mvx_log_drain_ftrace_construct(struct mvx_log_drain *drain);
|
|
+
|
|
+/**
|
|
+ * mvx_log_drain_ftrace_destruct() - Ftrace drain destructor.
|
|
+ * @drain: Pointer to drain.
|
|
+ */
|
|
+void mvx_log_drain_ftrace_destruct(struct mvx_log_drain *drain);
|
|
+
|
|
+#endif /* MVX_LOG_FTRACE_ENABLE */
|
|
+
|
|
+/****************************************************************************
|
|
+ * Group
|
|
+ ****************************************************************************/
|
|
+
|
|
+/**
|
|
+ * mvx_log_group_construct() - Group constructor.
|
|
+ * @group: Pointer to group.
|
|
+ * @tag: Name of the group, to be used in log messages.
|
|
+ * @severity: Minimum severity to output log message.
|
|
+ * @drain: Pointer to drain.
|
|
+ */
|
|
+void mvx_log_group_construct(struct mvx_log_group *group,
|
|
+ const char *tag,
|
|
+ const enum mvx_log_severity severity,
|
|
+ struct mvx_log_drain *drain);
|
|
+
|
|
+/**
|
|
+ * mvx_log_group_add() - Add a group with given name to log.
|
|
+ * @log: Pointer to log.
|
|
+ * @name: Name of group.
|
|
+ * @group: Pointer to group.
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+int mvx_log_group_add(struct mvx_log *log,
|
|
+ const char *name,
|
|
+ struct mvx_log_group *group);
|
|
+
|
|
+/**
|
|
+ * mvx_log_group_destruct() - Group destructor.
|
|
+ * @group: Pointer to group.
|
|
+ */
|
|
+void mvx_log_group_destruct(struct mvx_log_group *group);
|
|
+
|
|
+/**
|
|
+ * mvx_log_strrchr() - Find last occurrence of '/' in string.
|
|
+ * @s: Pointer to string.
|
|
+ *
|
|
+ * Return: Pointer to '/'+1, or pointer to begin of string.
|
|
+ */
|
|
+const char *mvx_log_strrchr(const char *s);
|
|
+
|
|
+#endif /* MVX_LOG_H */
|
|
diff --git a/drivers/media/platform/spacemit/vpu_k1x/mvx_log_group.c b/drivers/media/platform/spacemit/vpu_k1x/mvx_log_group.c
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/vpu_k1x/mvx_log_group.c
|
|
@@ -0,0 +1,168 @@
|
|
+/*
|
|
+ * The confidential and proprietary information contained in this file may
|
|
+ * only be used by a person authorised under and to the extent permitted
|
|
+ * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd.
|
|
+ * ALL RIGHTS RESERVED
|
|
+ *
|
|
+ * This entire notice must be reproduced on all copies of this file
|
|
+ * and copies of this file may only be made by a person if such person is
|
|
+ * permitted to do so under the terms of a subsisting license agreement
|
|
+ * from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * SPDX-License-Identifier: GPL-2.0-only
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU General Public License
|
|
+ * as published by the Free Software Foundation; either version 2
|
|
+ * of the License, or (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
+ *
|
|
+ */
|
|
+
|
|
+/******************************************************************************
|
|
+ * Includes
|
|
+ ******************************************************************************/
|
|
+
|
|
+#include "mvx_log.h"
|
|
+
|
|
+/******************************************************************************
|
|
+ * Private variables
|
|
+ ******************************************************************************/
|
|
+
|
|
+static struct mvx_log log;
|
|
+
|
|
+static struct mvx_log_drain drain_dmesg_if;
|
|
+static struct mvx_log_drain_ram drain_ram0_if;
|
|
+
|
|
+#ifdef MVX_LOG_FTRACE_ENABLE
|
|
+static struct mvx_log_drain drain_ftrace_if;
|
|
+#endif /* MVX_LOG_FTRACE_ENABLE */
|
|
+
|
|
+struct mvx_log_group mvx_log_if;
|
|
+struct mvx_log_group mvx_log_fwif_if;
|
|
+struct mvx_log_group mvx_log_session_if;
|
|
+struct mvx_log_group mvx_log_dev;
|
|
+
|
|
+/******************************************************************************
|
|
+ * External interface
|
|
+ ******************************************************************************/
|
|
+
|
|
+int mvx_log_group_init(const char *entry_name)
|
|
+{
|
|
+ int ret;
|
|
+ struct mvx_log_drain *drain_default = &drain_dmesg_if;
|
|
+ struct mvx_log_drain *drain_ram = &drain_ram0_if.base;
|
|
+
|
|
+#ifdef MVX_LOG_FTRACE_ENABLE
|
|
+ drain_default = &drain_ftrace_if;
|
|
+#endif /* MVX_LOG_FTRACE_ENABLE */
|
|
+
|
|
+ /* Construct log object. */
|
|
+ ret = mvx_log_construct(&log, entry_name);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+
|
|
+ /* Construct drain objects and add them to log. */
|
|
+ mvx_log_drain_dmesg_construct(&drain_dmesg_if);
|
|
+ ret = mvx_log_drain_add(&log, "dmesg", &drain_dmesg_if);
|
|
+ if (ret != 0)
|
|
+ goto delete_log_entry;
|
|
+
|
|
+ mvx_log_drain_ram_construct(&drain_ram0_if, 256 * 1024);
|
|
+ ret = mvx_log_drain_ram_add(&log, "ram0", &drain_ram0_if);
|
|
+ if (ret != 0)
|
|
+ goto delete_dmesg_drain;
|
|
+
|
|
+#ifdef MVX_LOG_FTRACE_ENABLE
|
|
+ mvx_log_drain_ftrace_construct(&drain_ftrace_if);
|
|
+ mvx_log_drain_add(&log, "ftrace", &drain_ftrace_if);
|
|
+ if (ret != 0)
|
|
+ goto delete_ram_drain;
|
|
+
|
|
+#endif /* MVX_LOG_FTRACE_ENABLE */
|
|
+
|
|
+ /* Construct group objects. */
|
|
+ mvx_log_group_construct(&mvx_log_if, "MVX if", MVX_LOG_WARNING,
|
|
+ drain_default);
|
|
+ ret = mvx_log_group_add(&log, "generic", &mvx_log_if);
|
|
+ if (ret != 0)
|
|
+ goto delete_ftrace_drain;
|
|
+
|
|
+ mvx_log_group_construct(&mvx_log_fwif_if, "MVX fwif", MVX_LOG_INFO,
|
|
+ drain_ram);
|
|
+ ret = mvx_log_group_add(&log, "firmware_interface",
|
|
+ &mvx_log_fwif_if);
|
|
+ if (ret != 0)
|
|
+ goto delete_generic_group;
|
|
+
|
|
+ mvx_log_group_construct(&mvx_log_session_if, "MVX session",
|
|
+ MVX_LOG_WARNING,
|
|
+ drain_default);
|
|
+ ret = mvx_log_group_add(&log, "session",
|
|
+ &mvx_log_session_if);
|
|
+ if (ret != 0)
|
|
+ goto delete_fwif_group;
|
|
+
|
|
+ mvx_log_group_construct(&mvx_log_dev, "MVX dev", MVX_LOG_WARNING,
|
|
+ drain_default);
|
|
+ ret = mvx_log_group_add(&log, "dev", &mvx_log_dev);
|
|
+ if (ret != 0)
|
|
+ goto delete_session_group;
|
|
+
|
|
+ return 0;
|
|
+
|
|
+delete_session_group:
|
|
+ mvx_log_group_destruct(&mvx_log_session_if);
|
|
+
|
|
+delete_fwif_group:
|
|
+ mvx_log_group_destruct(&mvx_log_fwif_if);
|
|
+
|
|
+delete_generic_group:
|
|
+ mvx_log_group_destruct(&mvx_log_if);
|
|
+
|
|
+delete_ftrace_drain:
|
|
+
|
|
+#ifdef MVX_LOG_FTRACE_ENABLE
|
|
+ mvx_log_drain_ftrace_destruct(&drain_ftrace_if);
|
|
+
|
|
+delete_ram_drain:
|
|
+#endif /* MVX_LOG_FTRACE_ENABLE */
|
|
+
|
|
+ mvx_log_drain_ram_destruct(&drain_ram0_if);
|
|
+
|
|
+delete_dmesg_drain:
|
|
+ mvx_log_drain_dmesg_destruct(&drain_dmesg_if);
|
|
+
|
|
+delete_log_entry:
|
|
+ mvx_log_destruct(&log);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+void mvx_log_group_deinit(void)
|
|
+{
|
|
+ /* Destroy objects in reverse order. */
|
|
+ mvx_log_group_destruct(&mvx_log_dev);
|
|
+ mvx_log_group_destruct(&mvx_log_session_if);
|
|
+ mvx_log_group_destruct(&mvx_log_fwif_if);
|
|
+ mvx_log_group_destruct(&mvx_log_if);
|
|
+
|
|
+#ifdef MVX_LOG_FTRACE_ENABLE
|
|
+ mvx_log_drain_ftrace_destruct(&drain_ftrace_if);
|
|
+#endif /* MVX_LOG_FTRACE_ENABLE */
|
|
+
|
|
+ mvx_log_drain_ram_destruct(&drain_ram0_if);
|
|
+ mvx_log_drain_dmesg_destruct(&drain_dmesg_if);
|
|
+
|
|
+ mvx_log_destruct(&log);
|
|
+}
|
|
diff --git a/drivers/media/platform/spacemit/vpu_k1x/mvx_log_group.h b/drivers/media/platform/spacemit/vpu_k1x/mvx_log_group.h
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/vpu_k1x/mvx_log_group.h
|
|
@@ -0,0 +1,68 @@
|
|
+/*
|
|
+ * The confidential and proprietary information contained in this file may
|
|
+ * only be used by a person authorised under and to the extent permitted
|
|
+ * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd.
|
|
+ * ALL RIGHTS RESERVED
|
|
+ *
|
|
+ * This entire notice must be reproduced on all copies of this file
|
|
+ * and copies of this file may only be made by a person if such person is
|
|
+ * permitted to do so under the terms of a subsisting license agreement
|
|
+ * from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * SPDX-License-Identifier: GPL-2.0-only
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU General Public License
|
|
+ * as published by the Free Software Foundation; either version 2
|
|
+ * of the License, or (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
+ *
|
|
+ */
|
|
+
|
|
+#ifndef _MVX_LOG_GROUP_H_
|
|
+#define _MVX_LOG_GROUP_H_
|
|
+
|
|
+/****************************************************************************
|
|
+ * Includes
|
|
+ ****************************************************************************/
|
|
+
|
|
+#include "mvx_log.h"
|
|
+
|
|
+/******************************************************************************
|
|
+ * Prototypes
|
|
+ ******************************************************************************/
|
|
+
|
|
+extern struct mvx_log_group mvx_log_if;
|
|
+extern struct mvx_log_group mvx_log_fwif_if;
|
|
+extern struct mvx_log_group mvx_log_session_if;
|
|
+extern struct mvx_log_group mvx_log_dev;
|
|
+
|
|
+/****************************************************************************
|
|
+ * Exported functions
|
|
+ ****************************************************************************/
|
|
+
|
|
+/**
|
|
+ * mvx_log_group_init() - Initialize log module. This function must be called
|
|
+ * before any of the log groups is used.
|
|
+ * @entry_name: The name of the directory
|
|
+ *
|
|
+ * Return: 0 on success, else error code.
|
|
+ */
|
|
+int mvx_log_group_init(const char *entry_name);
|
|
+
|
|
+/**
|
|
+ * mvx_log_group_deinit() - Destroy log module.
|
|
+ */
|
|
+void mvx_log_group_deinit(void);
|
|
+
|
|
+#endif /* _MVX_LOG_GROUP_H_ */
|
|
diff --git a/drivers/media/platform/spacemit/vpu_k1x/mvx_log_ram.h b/drivers/media/platform/spacemit/vpu_k1x/mvx_log_ram.h
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/vpu_k1x/mvx_log_ram.h
|
|
@@ -0,0 +1,212 @@
|
|
+/*
|
|
+ * The confidential and proprietary information contained in this file may
|
|
+ * only be used by a person authorised under and to the extent permitted
|
|
+ * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd.
|
|
+ * ALL RIGHTS RESERVED
|
|
+ *
|
|
+ * This entire notice must be reproduced on all copies of this file
|
|
+ * and copies of this file may only be made by a person if such person is
|
|
+ * permitted to do so under the terms of a subsisting license agreement
|
|
+ * from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * SPDX-License-Identifier: GPL-2.0-only
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU General Public License
|
|
+ * as published by the Free Software Foundation; either version 2
|
|
+ * of the License, or (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
+ *
|
|
+ */
|
|
+
|
|
+#ifndef MVX_LOG_RAM_H
|
|
+#define MVX_LOG_RAM_H
|
|
+
|
|
+/******************************************************************************
|
|
+ * Includes
|
|
+ ******************************************************************************/
|
|
+
|
|
+#ifndef __KERNEL__
|
|
+#include <stdint.h>
|
|
+#include <time.h>
|
|
+#include <sys/time.h>
|
|
+#else
|
|
+#include <linux/types.h>
|
|
+#include <linux/time.h>
|
|
+#endif
|
|
+
|
|
+/******************************************************************************
|
|
+ * Defines
|
|
+ ******************************************************************************/
|
|
+
|
|
+/**
|
|
+ * Magic word "MVXL" that prefix all messages.
|
|
+ *
|
|
+ * Messages are stored in native byte order. The magic word can be used to
|
|
+ * detect if the log has been stored in the same byte order as the application
|
|
+ * unpacking the log is using.
|
|
+ */
|
|
+#define MVX_LOG_MAGIC 0x4d56584c
|
|
+
|
|
+/**
|
|
+ * The maximum message length.
|
|
+ */
|
|
+#define MVX_LOG_MESSAGE_LENGTH_MAX 4096
|
|
+
|
|
+/******************************************************************************
|
|
+ * Types
|
|
+ ******************************************************************************/
|
|
+
|
|
+/**
|
|
+ * enum mvx_log_ioctl - IOCTL commands.
|
|
+ * @MVX_LOG_IOCTL_CLEAR: Clear the log.
|
|
+ */
|
|
+enum mvx_log_ioctl {
|
|
+ MVX_LOG_IOCTL_CLEAR
|
|
+};
|
|
+
|
|
+/**
|
|
+ * enum mvx_log_type - Message type. The definitions are assigned values that
|
|
+ * are not allowed to change.
|
|
+ */
|
|
+enum mvx_log_type {
|
|
+ MVX_LOG_TYPE_TEXT = 0,
|
|
+ MVX_LOG_TYPE_FWIF = 1,
|
|
+ MVX_LOG_TYPE_FW_BINARY = 2,
|
|
+ MVX_LOG_TYPE_MAX
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct mvx_log_timeval - Portable time value format.
|
|
+ * @sec: Seconds since 1970-01-01, Unix time epoch.
|
|
+ * @nsec: Nano seconds.
|
|
+ */
|
|
+struct mvx_log_timeval {
|
|
+ uint64_t sec;
|
|
+ uint64_t nsec;
|
|
+}
|
|
+__attribute__((packed));
|
|
+
|
|
+/**
|
|
+ * struct mvx_log_header - Common header for all messages stored in RAM buffer.
|
|
+ * @magic: Magic word.
|
|
+ * @length: Length of message, excluding this header.
|
|
+ * @type: Message type.
|
|
+ * @severity: Message severity.
|
|
+ * @timestamp: Time stamp.
|
|
+ */
|
|
+struct mvx_log_header {
|
|
+ uint32_t magic;
|
|
+ uint16_t length;
|
|
+ uint8_t type;
|
|
+ uint8_t severity;
|
|
+ struct mvx_log_timeval timestamp;
|
|
+}
|
|
+__attribute__((packed));
|
|
+
|
|
+/******************************************************************************
|
|
+ * Text message
|
|
+ ******************************************************************************/
|
|
+
|
|
+/**
|
|
+ * struct mvx_log_text - ASCII text message.
|
|
+ * @message[0]: ASCII text message.
|
|
+ *
|
|
+ * The message shall be header.length long and should end with a standard ASCII
|
|
+ * character. The parser of the log will add new line and null terminate
|
|
+ * the string.
|
|
+ */
|
|
+struct mvx_log_text {
|
|
+ char message[0];
|
|
+}
|
|
+__attribute__((packed));
|
|
+
|
|
+/******************************************************************************
|
|
+ * Firmware interface
|
|
+ ******************************************************************************/
|
|
+
|
|
+/**
|
|
+ * enum mvx_log_fwif_channel - Firmware interface message types.
|
|
+ */
|
|
+enum mvx_log_fwif_channel {
|
|
+ MVX_LOG_FWIF_CHANNEL_MESSAGE,
|
|
+ MVX_LOG_FWIF_CHANNEL_INPUT_BUFFER,
|
|
+ MVX_LOG_FWIF_CHANNEL_OUTPUT_BUFFER,
|
|
+ MVX_LOG_FWIF_CHANNEL_RPC
|
|
+};
|
|
+
|
|
+/**
|
|
+ * enum mvx_log_fwif_direction - Firmware interface message types.
|
|
+ */
|
|
+enum mvx_log_fwif_direction {
|
|
+ MVX_LOG_FWIF_DIRECTION_HOST_TO_FIRMWARE,
|
|
+ MVX_LOG_FWIF_DIRECTION_FIRMWARE_TO_HOST
|
|
+};
|
|
+
|
|
+/**
|
|
+ * enum mvx_log_fwif_code - Special message codes for message types not defined
|
|
+ * by the firmware interface.
|
|
+ */
|
|
+enum mvx_log_fwif_code {
|
|
+ MVX_LOG_FWIF_CODE_STAT = 16000
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct mvx_log_fwif - Firmware interface header type.
|
|
+ * @version_minor: Protocol version.
|
|
+ * @version_major: Protocol version.
|
|
+ * @channel: @see enum mvx_log_fwif_channel.
|
|
+ * @direction: @see enum mvx_log_fwif_direction.
|
|
+ * @session: Session id.
|
|
+ * @data[0]: Data following the firmware interface message
|
|
+ * header.
|
|
+ */
|
|
+struct mvx_log_fwif {
|
|
+ uint8_t version_minor;
|
|
+ uint8_t version_major;
|
|
+ uint8_t channel;
|
|
+ uint8_t direction;
|
|
+ uint64_t session;
|
|
+ uint8_t data[0];
|
|
+}
|
|
+__attribute__((packed));
|
|
+
|
|
+/**
|
|
+ * struct mvx_log_fwif_stat - Firmware interface statistics.
|
|
+ * @handle: Buffer handle.
|
|
+ * @queued: Number of buffers currently queued to the firmware.
|
|
+ */
|
|
+struct mvx_log_fwif_stat {
|
|
+ uint64_t handle;
|
|
+ uint32_t queued;
|
|
+}
|
|
+__attribute__((packed));
|
|
+
|
|
+/******************************************************************************
|
|
+ * Firmware binary header
|
|
+ ******************************************************************************/
|
|
+
|
|
+/**
|
|
+ * struct mvx_log_fw_binary - Firmware binary header.
|
|
+ * @session: Session id.
|
|
+ * @data[0]: Firmware binary, byte 0..length.
|
|
+ *
|
|
+ * The first ~100 bytes of the firmware binary contain information describing
|
|
+ * the codec.
|
|
+ */
|
|
+struct mvx_log_fw_binary {
|
|
+ uint64_t session;
|
|
+ uint8_t data[0];
|
|
+};
|
|
+
|
|
+#endif /* MVX_LOG_RAM_H */
|
|
diff --git a/drivers/media/platform/spacemit/vpu_k1x/mvx_pm_runtime.c b/drivers/media/platform/spacemit/vpu_k1x/mvx_pm_runtime.c
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/vpu_k1x/mvx_pm_runtime.c
|
|
@@ -0,0 +1,85 @@
|
|
+/*
|
|
+ * The confidential and proprietary information contained in this file may
|
|
+ * only be used by a person authorised under and to the extent permitted
|
|
+ * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd.
|
|
+ * ALL RIGHTS RESERVED
|
|
+ *
|
|
+ * This entire notice must be reproduced on all copies of this file
|
|
+ * and copies of this file may only be made by a person if such person is
|
|
+ * permitted to do so under the terms of a subsisting license agreement
|
|
+ * from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * SPDX-License-Identifier: GPL-2.0-only
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU General Public License
|
|
+ * as published by the Free Software Foundation; either version 2
|
|
+ * of the License, or (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
+ *
|
|
+ */
|
|
+
|
|
+#include <linux/pm_runtime.h>
|
|
+#include <linux/clk.h>
|
|
+#include <linux/delay.h>
|
|
+#include "mvx_log_group.h"
|
|
+#include "mvx_dev.h"
|
|
+
|
|
+int mvx_pm_runtime_get_sync(struct device *dev)
|
|
+{
|
|
+#ifdef CONFIG_PM
|
|
+ int ret;
|
|
+ struct mvx_dev_ctx *ctx;
|
|
+ ctx = dev_get_drvdata(dev);
|
|
+
|
|
+ mutex_lock(&ctx->pm_mutex);
|
|
+ ret = pm_runtime_get_sync(dev);
|
|
+ if (ret < 0)
|
|
+ MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_WARNING,
|
|
+ "PM runtime get sync failed! ret=%d", ret);
|
|
+
|
|
+ mutex_unlock(&ctx->pm_mutex);
|
|
+ return ret;
|
|
+#else /* !CONFIG_PM */
|
|
+ return 1;
|
|
+#endif /* CONFIG_PM */
|
|
+}
|
|
+
|
|
+int mvx_pm_runtime_put_sync(struct device *dev)
|
|
+{
|
|
+#ifdef CONFIG_PM
|
|
+ int ret;
|
|
+ int retry_count = 10;
|
|
+ struct mvx_dev_ctx *ctx;
|
|
+ ctx = dev_get_drvdata(dev);
|
|
+
|
|
+ mutex_lock(&ctx->pm_mutex);
|
|
+ ret = pm_runtime_put_sync(dev);
|
|
+ if (ret < 0) {
|
|
+ MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_WARNING,
|
|
+ "PM runtime put sync failed! ret=%d", ret);
|
|
+ while (ret == -EAGAIN && retry_count > 0) {
|
|
+ msleep(20);
|
|
+ pm_runtime_get_noresume(dev);
|
|
+ ret = pm_runtime_put_sync(dev);
|
|
+ MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_WARNING, "PM runtime put sync return EAGAIN. try to put sync again. ret=%d, retry_count=%d", ret, retry_count);
|
|
+ retry_count--;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ mutex_unlock(&ctx->pm_mutex);
|
|
+ return ret;
|
|
+#else /* !CONFIG_PM */
|
|
+ return 0;
|
|
+#endif /* CONFIG_PM */
|
|
+}
|
|
diff --git a/drivers/media/platform/spacemit/vpu_k1x/mvx_pm_runtime.h b/drivers/media/platform/spacemit/vpu_k1x/mvx_pm_runtime.h
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/vpu_k1x/mvx_pm_runtime.h
|
|
@@ -0,0 +1,67 @@
|
|
+/*
|
|
+ * The confidential and proprietary information contained in this file may
|
|
+ * only be used by a person authorised under and to the extent permitted
|
|
+ * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd.
|
|
+ * ALL RIGHTS RESERVED
|
|
+ *
|
|
+ * This entire notice must be reproduced on all copies of this file
|
|
+ * and copies of this file may only be made by a person if such person is
|
|
+ * permitted to do so under the terms of a subsisting license agreement
|
|
+ * from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * SPDX-License-Identifier: GPL-2.0-only
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU General Public License
|
|
+ * as published by the Free Software Foundation; either version 2
|
|
+ * of the License, or (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
+ *
|
|
+ */
|
|
+
|
|
+#ifndef _MVX_PM_RUNTIME_H_
|
|
+#define _MVX_PM_RUNTIME_H_
|
|
+
|
|
+/****************************************************************************
|
|
+ * Types
|
|
+ ****************************************************************************/
|
|
+
|
|
+struct device;
|
|
+
|
|
+/****************************************************************************
|
|
+ * Exported functions
|
|
+ ****************************************************************************/
|
|
+
|
|
+/**
|
|
+ * mvx_pm_runtime_get_sync() - The same function as pm_runtime_get_sync(), but
|
|
+ * with the addon that it prints a log line when
|
|
+ * error happens.
|
|
+ * @dev: Pointer to device.
|
|
+ *
|
|
+ * Return: 0 on success, 1 if already 'active', else error code.
|
|
+ */
|
|
+int mvx_pm_runtime_get_sync(struct device *dev);
|
|
+
|
|
+/**
|
|
+ * mvx_pm_runtime_put_sync() - The same function as pm_runtime_put_sync(), but
|
|
+ * with the addon that it prints a log line when
|
|
+ * error happens.
|
|
+ * It will not return error if CONFIG_PM is
|
|
+ * undefined.
|
|
+ * @dev: Pointer to device.
|
|
+ *
|
|
+ * Return: 0 on success, 1 if already 'suspended', else error code.
|
|
+ */
|
|
+int mvx_pm_runtime_put_sync(struct device *dev);
|
|
+
|
|
+#endif /* _MVX_PM_RUNTIME_H_ */
|
|
diff --git a/drivers/media/platform/spacemit/vpu_k1x/mvx_seq.c b/drivers/media/platform/spacemit/vpu_k1x/mvx_seq.c
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/vpu_k1x/mvx_seq.c
|
|
@@ -0,0 +1,95 @@
|
|
+/*
|
|
+ * The confidential and proprietary information contained in this file may
|
|
+ * only be used by a person authorised under and to the extent permitted
|
|
+ * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd.
|
|
+ * ALL RIGHTS RESERVED
|
|
+ *
|
|
+ * This entire notice must be reproduced on all copies of this file
|
|
+ * and copies of this file may only be made by a person if such person is
|
|
+ * permitted to do so under the terms of a subsisting license agreement
|
|
+ * from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * SPDX-License-Identifier: GPL-2.0-only
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU General Public License
|
|
+ * as published by the Free Software Foundation; either version 2
|
|
+ * of the License, or (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
+ *
|
|
+ */
|
|
+
|
|
+#include <linux/device.h>
|
|
+#include <linux/seq_file.h>
|
|
+#include "mvx_seq.h"
|
|
+
|
|
+struct mvx_seq_hash_it *mvx_seq_hash_start(struct device *dev,
|
|
+ struct hlist_head *head,
|
|
+ size_t size,
|
|
+ loff_t pos)
|
|
+{
|
|
+ struct mvx_seq_hash_it *it;
|
|
+ size_t i;
|
|
+
|
|
+ it = devm_kzalloc(dev, sizeof(*it), GFP_KERNEL);
|
|
+ if (it == NULL)
|
|
+ return ERR_PTR(-ENOMEM);
|
|
+
|
|
+ it->dev = dev;
|
|
+ for (i = 0; i < size; ++i) {
|
|
+ it->i = i;
|
|
+ hlist_for_each(it->node, &head[i]) {
|
|
+ if (pos-- == 0)
|
|
+ return it;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ devm_kfree(dev, it);
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+struct mvx_seq_hash_it *mvx_seq_hash_next(void *v,
|
|
+ struct hlist_head *head,
|
|
+ size_t size,
|
|
+ loff_t *pos)
|
|
+{
|
|
+ struct mvx_seq_hash_it *it = v;
|
|
+
|
|
+ ++*pos;
|
|
+ it->node = it->node->next;
|
|
+
|
|
+ if (it->node != NULL)
|
|
+ return it;
|
|
+
|
|
+ do {
|
|
+ ++it->i;
|
|
+ } while ((it->i < size) && hlist_empty(&head[it->i]));
|
|
+
|
|
+ if (it->i == size) {
|
|
+ devm_kfree(it->dev, it);
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ it->node = head[it->i].first;
|
|
+ return it;
|
|
+}
|
|
+
|
|
+void mvx_seq_hash_stop(void *v)
|
|
+{
|
|
+ struct mvx_seq_hash_it *it = v;
|
|
+
|
|
+ if (it == NULL)
|
|
+ return;
|
|
+
|
|
+ devm_kfree(it->dev, it);
|
|
+}
|
|
diff --git a/drivers/media/platform/spacemit/vpu_k1x/mvx_seq.h b/drivers/media/platform/spacemit/vpu_k1x/mvx_seq.h
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/vpu_k1x/mvx_seq.h
|
|
@@ -0,0 +1,94 @@
|
|
+/*
|
|
+ * The confidential and proprietary information contained in this file may
|
|
+ * only be used by a person authorised under and to the extent permitted
|
|
+ * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd.
|
|
+ * ALL RIGHTS RESERVED
|
|
+ *
|
|
+ * This entire notice must be reproduced on all copies of this file
|
|
+ * and copies of this file may only be made by a person if such person is
|
|
+ * permitted to do so under the terms of a subsisting license agreement
|
|
+ * from Arm Technology (China) Co., Ltd.
|
|
+ *
|
|
+ * SPDX-License-Identifier: GPL-2.0-only
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU General Public License
|
|
+ * as published by the Free Software Foundation; either version 2
|
|
+ * of the License, or (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
+ *
|
|
+ */
|
|
+
|
|
+#ifndef _MVX_SEQ_H_
|
|
+#define _MVX_SEQ_H_
|
|
+
|
|
+/****************************************************************************
|
|
+ * Defines
|
|
+ ****************************************************************************/
|
|
+
|
|
+#define mvx_seq_printf(s, tag, ind, fmt, ...) \
|
|
+ seq_printf(s, "%-*s%-*s: " fmt, (3 * (ind)), "", 30 - (3 * (ind)), \
|
|
+ tag, ## __VA_ARGS__)
|
|
+
|
|
+/****************************************************************************
|
|
+ * Types
|
|
+ ****************************************************************************/
|
|
+
|
|
+/**
|
|
+ * struct mvx_seq_hash_it - Iterator over hash table.
|
|
+ */
|
|
+struct mvx_seq_hash_it {
|
|
+ struct hlist_node *node;
|
|
+ size_t i;
|
|
+ struct device *dev;
|
|
+};
|
|
+
|
|
+/**
|
|
+ * mvx_seq_hash_start() - Initialize iterator.
|
|
+ * @dev: Pointer to device.
|
|
+ * @head: Pointer to a head of a hash table.
|
|
+ * @size: Size of a hash table.
|
|
+ * @pos: Position to start.
|
|
+ *
|
|
+ * Iterator created by this function should be provided to
|
|
+ * mvx_seq_hash_start and mvx_seq_hash_stop as the first parameter.
|
|
+ *
|
|
+ * Return: Pointer to an iterator on success or ERR_PTR().
|
|
+ */
|
|
+struct mvx_seq_hash_it *mvx_seq_hash_start(struct device *dev,
|
|
+ struct hlist_head *head,
|
|
+ size_t size,
|
|
+ loff_t pos);
|
|
+
|
|
+/**
|
|
+ * mvx_seq_hash_next() - Move iterator to the next element.
|
|
+ * @v: Pointer to an iterator.
|
|
+ * @head: Pointer to a head of a hash table.
|
|
+ * @size: Size of a hash table.
|
|
+ * @pos: Position.
|
|
+ *
|
|
+ * Return: Iterator which points to a new element or NULL when the table
|
|
+ * is over.
|
|
+ */
|
|
+struct mvx_seq_hash_it *mvx_seq_hash_next(void *v,
|
|
+ struct hlist_head *head,
|
|
+ size_t size,
|
|
+ loff_t *pos);
|
|
+
|
|
+/**
|
|
+ * mvx_seq_hash_stop() - Close an iterator.
|
|
+ * @v: Pointer to an iterator.
|
|
+ */
|
|
+void mvx_seq_hash_stop(void *v);
|
|
+
|
|
+#endif /* _MVX_SEQ_H_ */
|
|
diff --git a/drivers/media/platform/spacemit/vpu_k1x/sconscript b/drivers/media/platform/spacemit/vpu_k1x/sconscript
|
|
new file mode 100755
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/vpu_k1x/sconscript
|
|
@@ -0,0 +1,36 @@
|
|
+import os
|
|
+
|
|
+Import('env')
|
|
+
|
|
+# Get source path to current directory.
|
|
+path = env.Dir('.').srcnode().path
|
|
+
|
|
+prints = ""
|
|
+if env['log_ftrace'] == '1':
|
|
+ prints += " CONFIG_VIDEO_LINLON_FTRACE=y"
|
|
+if env['log_print_file'] == '1':
|
|
+ prints += " CONFIG_VIDEO_LINLON_PRINT_FILE=y"
|
|
+
|
|
+if env['mode'] == 'mono':
|
|
+ targets = [os.path.join('#', path, 'amvx.ko')]
|
|
+else:
|
|
+ targets = [os.path.join('#', path, 'amvx_if.ko'), os.path.join('#', path, 'amvx_dev.ko')]
|
|
+
|
|
+extra_ccflags=""
|
|
+if 'EXTRA_CCFLAGS' in os.environ:
|
|
+ extra_ccflags = os.environ['EXTRA_CCFLAGS']
|
|
+if env['coverage']:
|
|
+ extra_ccflags += ' -fprofile-arcs -ftest-coverage'
|
|
+
|
|
+amvx = env.Command(targets, [],
|
|
+ 'make -C %s %s_%s KDIR=%s EXTRA_CCFLAGS="%s" %s' %
|
|
+ (path, env['mode'], env['interface'], env['KDIR'], extra_ccflags, prints))
|
|
+
|
|
+# Flag to always build.
|
|
+env.AlwaysBuild(amvx)
|
|
+
|
|
+# Install kernel module in bin directory.
|
|
+env.Install(env['BIN_DIR'], [amvx])
|
|
+
|
|
+# Install user space header files
|
|
+env.Install(env['INCLUDE_DIR'], ['if/v4l2/mvx-v4l2-controls.h'])
|
|
--
|
|
Armbian
|
|
|