aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMichael Grzeschik <m.grzeschik@pengutronix.de>2022-11-14 01:06:25 +0100
committerMichael Grzeschik <m.grzeschik@pengutronix.de>2023-02-22 15:43:46 +0100
commit4b0c3ff265c781435cf4acb11f81ffcee54e1a91 (patch)
tree8a7941759021655ce2f0382c5696a729d7161790
parentc7c59a33acf44bad7c0d8a37f2e6ceffefc90eb8 (diff)
downloadlinux-4b0c3ff265c781435cf4acb11f81ffcee54e1a91.tar.gz
rkvdec: add vepu540 h264 rkvenc encoder support
-rw-r--r--drivers/staging/media/rkvdec/Makefile2
-rw-r--r--drivers/staging/media/rkvdec/TODO11
-rw-r--r--drivers/staging/media/rkvdec/rkvenc-h264.c1606
-rw-r--r--drivers/staging/media/rkvdec/rkvenc-vepu540.h3381
-rw-r--r--drivers/staging/media/rkvdec/rkvpu.c940
-rw-r--r--drivers/staging/media/rkvdec/rkvpu.h109
-rw-r--r--include/uapi/linux/v4l2-controls.h13
7 files changed, 5879 insertions, 183 deletions
diff --git a/drivers/staging/media/rkvdec/Makefile b/drivers/staging/media/rkvdec/Makefile
index d09c3495b31aed..45588b81a166e9 100644
--- a/drivers/staging/media/rkvdec/Makefile
+++ b/drivers/staging/media/rkvdec/Makefile
@@ -1,3 +1,3 @@
obj-$(CONFIG_VIDEO_ROCKCHIP_VDEC) += rockchip-vpu.o
-rockchip-vpu-y += rkvpu.o rkvdec-h264.o rkvdec-vp9.o
+rockchip-vpu-y += rkvpu.o rkvdec-h264.o rkvdec-vp9.o rkvenc-h264.o
diff --git a/drivers/staging/media/rkvdec/TODO b/drivers/staging/media/rkvdec/TODO
index 2c0779383276ef..f1a79673026f96 100644
--- a/drivers/staging/media/rkvdec/TODO
+++ b/drivers/staging/media/rkvdec/TODO
@@ -9,3 +9,14 @@
code in rkvdec_request_validate and cedrus_request_validate.
The helper needs to the driver private data associated with
the videobuf2 queue, from a media request.
+
+
+* h264-encoder:
+- where does sps and pps come from?
+- bootlin testapp is creating fixed bitstream_fd
+- allocate regmap fields
+- add encoder ctrls
+- add compatibles
+- mapping between V4L2_PIX_FMT and VEPU54x_HW_FMT
+- implement interrupt handling
+- add feedback parameters in irq handler
diff --git a/drivers/staging/media/rkvdec/rkvenc-h264.c b/drivers/staging/media/rkvdec/rkvenc-h264.c
new file mode 100644
index 00000000000000..a720e4d0ec4efd
--- /dev/null
+++ b/drivers/staging/media/rkvdec/rkvenc-h264.c
@@ -0,0 +1,1606 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2022 Pengutronix
+ * Michael Grzeschik <m.grzeschik@pengutronix.de>
+ */
+
+#include <linux/of.h>
+
+#include <media/v4l2-h264.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "rkvenc-vepu540.h"
+#include "rkvpu.h"
+
+struct rkvenc_h264_run {
+ struct rkvpu_run base;
+ const struct v4l2_ctrl_h264_encode_params *encode_params;
+ const struct v4l2_ctrl_h264_encode_rc *encode_rc;
+ const struct v4l2_ctrl_h264_sps *sps;
+ const struct v4l2_ctrl_h264_pps *pps;
+ const struct v4l2_ctrl_h264_scaling_matrix *scaling_matrix;
+ struct vb2_buffer *ref_buf[V4L2_H264_NUM_DPB_ENTRIES];
+};
+
+struct rkvenc_h264_ctx {
+ struct regmap *l1_regmap;
+ struct regmap *l2_regmap;
+ struct regmap_field *l1_fields[L1_MAX_FIELDS];
+ struct regmap_field *l2_fields[L2_MAX_FIELDS];
+};
+
+static const struct regmap_config rkvenc_l1_regmap_cfg = {
+ .name = "rkvenc_l1",
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .reg_format_endian = REGMAP_ENDIAN_NATIVE,
+ .val_format_endian = REGMAP_ENDIAN_NATIVE,
+ .cache_type = REGCACHE_NONE,
+ /*
+ * No concurrent access expected - driver has one interrupt handler,
+ * regmap is not shared, no driver or user-space API.
+ */
+ .disable_locking = true,
+ .max_register = RKVENC_VEPU540_L1_MMU_ACKG(1),
+};
+
+static const struct regmap_config rkvenc_l2_regmap_cfg = {
+ .name = "rkvenc_l2",
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .reg_format_endian = REGMAP_ENDIAN_NATIVE,
+ .val_format_endian = REGMAP_ENDIAN_NATIVE,
+ .cache_type = REGCACHE_NONE,
+ /*
+ * No concurrent access expected - driver has one interrupt handler,
+ * regmap is not shared, no driver or user-space API.
+ */
+ .disable_locking = true,
+ .max_register = RKVENC_VEPU540_L2_I32_SOBEL_E(9),
+};
+
+#define CHROMA_KLUT_TAB_SIZE 24
+
+static unsigned int h264e_klut_weight[30] = {
+ 0x0a000010, 0x00064000, 0x14000020, 0x000c8000,
+ 0x28000040, 0x00194000, 0x50800080, 0x0032c000,
+ 0xa1000100, 0x00658000, 0x42800200, 0x00cb0001,
+ 0x85000400, 0x01964002, 0x0a000800, 0x032c8005,
+ 0x14001000, 0x0659400a, 0x28802000, 0x0cb2c014,
+ 0x51004000, 0x1965c028, 0xa2808000, 0x32cbc050,
+ 0x4500ffff, 0x659780a1, 0x8a81fffe, 0xCC000142,
+ 0xFF83FFFF, 0x000001FF,
+};
+
+static int h264_aq_tthd_default[16] = {
+ 0, 0, 0, 0,
+ 3, 3, 5, 5,
+ 8, 8, 8, 15,
+ 15, 20, 25, 25,
+};
+
+static int h264_P_aq_step_default[16] = {
+ -8, -7, -6, -5,
+ -4, -3, -2, -1,
+ 0, 1, 2, 3,
+ 4, 5, 7, 8,
+};
+
+static int h264_I_aq_step_default[16] = {
+ -8, -7, -6, -5,
+ -4, -3, -2, -1,
+ 0, 1, 3, 3,
+ 4, 5, 8, 8,
+};
+
+static void setup_vepu541_normal(struct rkvpu_ctx *ctx)
+{
+ struct rkvenc_h264_ctx *h264_ctx = ctx->priv;
+
+ regmap_field_write(h264_ctx->l1_fields[SAFE_CLR], 0);
+ regmap_field_write(h264_ctx->l1_fields[FORCE_CLR], 0);
+
+ regmap_field_write(h264_ctx->l1_fields[LKT_ADDR], 0);
+
+ regmap_field_write(h264_ctx->l1_fields[ENC_DONE_EN], 1);
+ regmap_field_write(h264_ctx->l1_fields[LKT_DONE_EN], 1);
+ regmap_field_write(h264_ctx->l1_fields[SCLR_DONE_EN], 1);
+ regmap_field_write(h264_ctx->l1_fields[ENC_SLICE_DONE_EN], 1);
+ regmap_field_write(h264_ctx->l1_fields[OFLW_DONE_EN], 1);
+ regmap_field_write(h264_ctx->l1_fields[BRSP_DONE_EN], 1);
+ regmap_field_write(h264_ctx->l1_fields[BERR_DONE_EN], 1);
+ regmap_field_write(h264_ctx->l1_fields[RERR_DONE_EN], 1);
+ regmap_field_write(h264_ctx->l1_fields[WDG_DONE_EN], 0);
+
+ regmap_field_write(h264_ctx->l1_fields[ENC_DONE_MSK], 0);
+ regmap_field_write(h264_ctx->l1_fields[LKT_DONE_MSK], 0);
+ regmap_field_write(h264_ctx->l1_fields[SCLR_DONE_MSK], 0);
+ regmap_field_write(h264_ctx->l1_fields[ENC_SLICE_DONE_MSK], 0);
+ regmap_field_write(h264_ctx->l1_fields[OFLW_DONE_MSK], 0);
+ regmap_field_write(h264_ctx->l1_fields[BRSP_DONE_MSK], 0);
+ regmap_field_write(h264_ctx->l1_fields[BERR_DONE_MSK], 0);
+ regmap_field_write(h264_ctx->l1_fields[RERR_DONE_MSK], 0);
+ regmap_field_write(h264_ctx->l1_fields[WDG_DONE_MSK], 0);
+
+ regmap_field_write(h264_ctx->l1_fields[VS_LOAD_THD], 0);
+ regmap_field_write(h264_ctx->l1_fields[RFP_LOAD_THRD], 0);
+
+ regmap_field_write(h264_ctx->l1_fields[CMVW_BUS_ORDR], 0);
+ regmap_field_write(h264_ctx->l1_fields[DSPW_BUS_ORDR], 0);
+ regmap_field_write(h264_ctx->l1_fields[RFPW_BUS_ORDR], 0);
+ regmap_field_write(h264_ctx->l1_fields[SRC_BUS_EDIN], 0);
+ regmap_field_write(h264_ctx->l1_fields[MEIW_BUS_EDIN], 0);
+ regmap_field_write(h264_ctx->l1_fields[BSW_BUS_EDIN], 7);
+ regmap_field_write(h264_ctx->l1_fields[LKTR_BUS_EDIN], 0);
+ regmap_field_write(h264_ctx->l1_fields[ROIR_BUS_EDIN], 0);
+ regmap_field_write(h264_ctx->l1_fields[LKTW_BUS_EDIN], 0);
+ regmap_field_write(h264_ctx->l1_fields[AFBC_BSIZE], 1);
+
+ /* vepu540 */
+ regmap_field_write(h264_ctx->l1_fields[VPU540_AXI_BRSP_CKE], 0);
+ regmap_field_write(h264_ctx->l1_fields[VPU540_DSPR_OTSD], 1);
+}
+
+static bool rkvenc_alpha_swap(int format)
+{
+ switch (format) {
+ case V4L2_PIX_FMT_ARGB32:
+ case V4L2_PIX_FMT_ABGR32:
+ return 1;
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
+static bool rkvenc_rbuv_swap(int format)
+{
+ switch (format) {
+ case V4L2_PIX_FMT_ARGB32:
+ case V4L2_PIX_FMT_RGBA32:
+ case V4L2_PIX_FMT_BGR24:
+ case V4L2_PIX_FMT_NV61:
+ case V4L2_PIX_FMT_NV21:
+ case V4L2_PIX_FMT_YVYU:
+ case V4L2_PIX_FMT_VYUY:
+ case V4L2_PIX_FMT_YUV444M:
+ return 1;
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
+static bool rkvenc_src_endian(int format)
+{
+ switch (format) {
+ case V4L2_PIX_FMT_RGB565:
+ return 7;
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
+static bool rkvenc_fmt_is_rgb(int format)
+{
+ switch (format) {
+ case V4L2_PIX_FMT_ARGB32:
+ case V4L2_PIX_FMT_RGBA32:
+ case V4L2_PIX_FMT_BGR32:
+ case V4L2_PIX_FMT_RGB565:
+ return 1;
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
+static int rkvenc_hw_fmt(int format)
+{
+ switch (format) {
+ case V4L2_PIX_FMT_UYVY:
+ return 9;
+ case V4L2_PIX_FMT_YUYV:
+ return 8;
+ case V4L2_PIX_FMT_YUV420M:
+ case V4L2_PIX_FMT_YUV420:
+ return 7;
+ case V4L2_PIX_FMT_NV21M:
+ case V4L2_PIX_FMT_NV21:
+ case V4L2_PIX_FMT_NV12M:
+ case V4L2_PIX_FMT_NV12:
+ return 6;
+ case V4L2_PIX_FMT_YUV422P:
+ return 5;
+ case V4L2_PIX_FMT_NV61M:
+ case V4L2_PIX_FMT_NV61:
+ case V4L2_PIX_FMT_NV16M:
+ case V4L2_PIX_FMT_NV16:
+ return 4;
+ case V4L2_PIX_FMT_RGB565:
+ return 2;
+ case V4L2_PIX_FMT_RGB24:
+ return 1;
+ case V4L2_PIX_FMT_RGBA32:
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
+static bool rkvenc_yuvfmt_is_sp(int format)
+{
+ switch (format) {
+ case V4L2_PIX_FMT_NV61:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV21:
+ case V4L2_PIX_FMT_NV12:
+ return 1;
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
+static void setup_vepu541_prep(struct rkvpu_ctx *ctx)
+{
+ struct rkvenc_h264_ctx *h264_ctx = ctx->priv;
+ const struct v4l2_pix_format_mplane *src_fmt;
+ const struct v4l2_format *f;
+ int y_stride;
+ int c_stride;
+
+ f = &ctx->src_fmt;
+ src_fmt = &f->fmt.pix_mp;
+
+ regmap_field_write(h264_ctx->l1_fields[PIC_WD8_M1],
+ ALIGN(src_fmt->width, 16) / 8 - 1);
+ regmap_field_write(h264_ctx->l1_fields[PIC_WFILL],
+ ALIGN(src_fmt->width, 16) - src_fmt->width);
+ regmap_field_write(h264_ctx->l1_fields[PIC_HD8_M1],
+ ALIGN(src_fmt->height, 16) / 8 - 1);
+ regmap_field_write(h264_ctx->l1_fields[PIC_HFILL],
+ ALIGN(src_fmt->height, 16) - src_fmt->height);
+
+ regmap_field_write(h264_ctx->l1_fields[SRC_BUS_EDIN], rkvenc_src_endian(src_fmt->pixelformat));
+
+ regmap_field_write(h264_ctx->l1_fields[SRC_CFMT], rkvenc_hw_fmt(src_fmt->pixelformat));
+ regmap_field_write(h264_ctx->l1_fields[ALPHA_SWAP], rkvenc_alpha_swap(src_fmt->pixelformat));
+ regmap_field_write(h264_ctx->l1_fields[RBUV_SWAP], rkvenc_rbuv_swap(src_fmt->pixelformat));
+ regmap_field_write(h264_ctx->l1_fields[SRC_RANGE], 0);
+ regmap_field_write(h264_ctx->l1_fields[OUT_FMT_CFG], 0);
+
+ //FIXME
+ /*if (MPP_FRAME_FMT_IS_FBC(src_fmt->pixelformat)) {
+ y_stride = mpp_frame_get_fbc_hdr_stride(task->frame);
+ if (!y_stride)
+ y_stride = ALIGN(prep->hor_stride, 16);
+ } else
+ */
+
+ y_stride = (f->fmt.pix.bytesperline) ? (f->fmt.pix.bytesperline) : (src_fmt->width);
+
+ c_stride = rkvenc_yuvfmt_is_sp(src_fmt->pixelformat) ?
+ y_stride : y_stride / 2;
+
+ if (rkvenc_fmt_is_rgb(src_fmt->pixelformat)) {
+ regmap_field_write(h264_ctx->l1_fields[CSC_WGT_B2Y], 25);
+ regmap_field_write(h264_ctx->l1_fields[CSC_WGT_G2Y], 129);
+ regmap_field_write(h264_ctx->l1_fields[CSC_WGT_R2Y], 66);
+
+ regmap_field_write(h264_ctx->l1_fields[CSC_WGT_B2U], 112);
+ regmap_field_write(h264_ctx->l1_fields[CSC_WGT_G2U], -74);
+ regmap_field_write(h264_ctx->l1_fields[CSC_WGT_R2U], -38);
+
+ regmap_field_write(h264_ctx->l1_fields[CSC_WGT_B2V], -18);
+ regmap_field_write(h264_ctx->l1_fields[CSC_WGT_G2V], -94);
+ regmap_field_write(h264_ctx->l1_fields[CSC_WGT_R2V], 112);
+
+ regmap_field_write(h264_ctx->l1_fields[CSC_OFST_Y], 15);
+ regmap_field_write(h264_ctx->l1_fields[CSC_OFST_U], 128);
+ regmap_field_write(h264_ctx->l1_fields[CSC_OFST_V], 128);
+ } else {
+ regmap_field_write(h264_ctx->l1_fields[CSC_WGT_B2Y], 0);
+ regmap_field_write(h264_ctx->l1_fields[CSC_WGT_G2Y], 0);
+ regmap_field_write(h264_ctx->l1_fields[CSC_WGT_R2Y], 0);
+
+ regmap_field_write(h264_ctx->l1_fields[CSC_WGT_B2U], 0);
+ regmap_field_write(h264_ctx->l1_fields[CSC_WGT_G2U], 0);
+ regmap_field_write(h264_ctx->l1_fields[CSC_WGT_R2U], 0);
+
+ regmap_field_write(h264_ctx->l1_fields[CSC_WGT_B2V], 0);
+ regmap_field_write(h264_ctx->l1_fields[CSC_WGT_G2V], 0);
+ regmap_field_write(h264_ctx->l1_fields[CSC_WGT_R2V], 0);
+
+ regmap_field_write(h264_ctx->l1_fields[CSC_OFST_Y], 0);
+ regmap_field_write(h264_ctx->l1_fields[CSC_OFST_U], 0);
+ regmap_field_write(h264_ctx->l1_fields[CSC_OFST_V], 0);
+ }
+
+ regmap_field_write(h264_ctx->l1_fields[AFBCD_EN], 0); // No AFBC Format for now
+ regmap_field_write(h264_ctx->l1_fields[SRC_STRD0], y_stride);
+ regmap_field_write(h264_ctx->l1_fields[SRC_STRD1], c_stride);
+
+ regmap_field_write(h264_ctx->l1_fields[SRC_MIRR], 0); // disable for now
+ regmap_field_write(h264_ctx->l1_fields[SRC_ROT], 0); // disable for now
+ regmap_field_write(h264_ctx->l1_fields[TXA_EN], 1);
+
+ regmap_field_write(h264_ctx->l1_fields[SLI_CRS_EN], 1);
+
+ regmap_field_write(h264_ctx->l1_fields[PIC_OFST_Y], 0);
+ regmap_field_write(h264_ctx->l1_fields[PIC_OFST_X], 0);
+}
+
+static void setup_vepu541_codec(struct rkvpu_ctx *ctx, struct rkvenc_h264_run *run)
+{
+ struct rkvenc_h264_ctx *h264_ctx = ctx->priv;
+ const struct v4l2_ctrl_h264_encode_params *encode_params;
+ const struct v4l2_ctrl_h264_sps *sps;
+ const struct v4l2_ctrl_h264_pps *pps;
+
+ encode_params = run->encode_params;
+ sps = run->sps;
+ pps = run->pps;
+
+ regmap_field_write(h264_ctx->l1_fields[ENC_STND], 0);
+ regmap_field_write(h264_ctx->l1_fields[CUR_FRM_REF],
+ encode_params->nal_reference_idc > 0);
+ regmap_field_write(h264_ctx->l1_fields[BS_SCP], 1);
+ regmap_field_write(h264_ctx->l1_fields[LAMB_MOD_SEL],
+ (encode_params->slice_type == V4L2_H264_SLICE_TYPE_I) ? 0 : 1);
+ regmap_field_write(h264_ctx->l1_fields[ATR_THD_SEL], 0);
+ regmap_field_write(h264_ctx->l1_fields[NODE_INT], 0);
+
+ regmap_field_write(h264_ctx->l1_fields[NAL_REF_IDC],
+ encode_params->nal_reference_idc);
+ regmap_field_write(h264_ctx->l1_fields[NAL_UNIT_TYPE],
+ encode_params->nalu_type);
+
+ regmap_field_write(h264_ctx->l1_fields[MAX_FNUM],
+ sps->log2_max_frame_num_minus4);
+ regmap_field_write(h264_ctx->l1_fields[DRCT_8X8],
+ !!(sps->flags & V4L2_H264_SPS_FLAG_DIRECT_8X8_INFERENCE));
+ regmap_field_write(h264_ctx->l1_fields[MPOC_LM4],
+ sps->log2_max_pic_order_cnt_lsb_minus4);
+
+ regmap_field_write(h264_ctx->l1_fields[ETPY_MODE],
+ !!(pps->flags & V4L2_H264_PPS_FLAG_ENTROPY_CODING_MODE));
+ regmap_field_write(h264_ctx->l1_fields[TRNS_8X8],
+ !!(pps->flags & V4L2_H264_PPS_FLAG_TRANSFORM_8X8_MODE));
+ regmap_field_write(h264_ctx->l1_fields[CSIP_FLAG],
+ !!(pps->flags & V4L2_H264_PPS_FLAG_CONSTRAINED_INTRA_PRED));
+ regmap_field_write(h264_ctx->l1_fields[NUM_REF0_IDX],
+ pps->num_ref_idx_l0_default_active_minus1);
+ regmap_field_write(h264_ctx->l1_fields[NUM_REF1_IDX],
+ pps->num_ref_idx_l1_default_active_minus1);
+ regmap_field_write(h264_ctx->l1_fields[PIC_INIT_QP],
+ pps->pic_init_qp_minus26 + 26);
+ regmap_field_write(h264_ctx->l1_fields[CB_OFST],
+ pps->chroma_qp_index_offset);
+ regmap_field_write(h264_ctx->l1_fields[CR_OFST],
+ pps->second_chroma_qp_index_offset);
+ regmap_field_write(h264_ctx->l1_fields[WGHT_PRED],
+ !!(pps->flags & V4L2_H264_PPS_FLAG_WEIGHTED_PRED));
+ regmap_field_write(h264_ctx->l1_fields[DBF_CP_FLG],
+ !!(pps->flags & V4L2_H264_PPS_FLAG_DEBLOCKING_FILTER_CONTROL_PRESENT));
+
+ regmap_field_write(h264_ctx->l1_fields[SLI_TYPE],
+ (encode_params->slice_type == V4L2_H264_SLICE_TYPE_I) ? (2) : (0));
+ regmap_field_write(h264_ctx->l1_fields[PPS_ID],
+ encode_params->pic_parameter_set_id);
+ regmap_field_write(h264_ctx->l1_fields[DRCT_SMVP], 0);
+ regmap_field_write(h264_ctx->l1_fields[NUM_REF_OVRD],
+ encode_params->num_ref_idx_override);
+ regmap_field_write(h264_ctx->l1_fields[CBC_INIT_IDC],
+ encode_params->cabac_init_idc);
+ regmap_field_write(h264_ctx->l1_fields[FRM_NUM], encode_params->frame_num);
+
+ regmap_field_write(h264_ctx->l1_fields[IDR_PIC_ID],
+ (encode_params->slice_type == V4L2_H264_SLICE_TYPE_I) ?
+ encode_params->idr_pic_id : (unsigned int)(-1));
+ regmap_field_write(h264_ctx->l1_fields[POC_LSB], sps->pic_order_cnt_type);
+
+
+ regmap_field_write(h264_ctx->l1_fields[DIS_DBLK_IDC],
+ encode_params->disable_deblocking_filter_idc);
+ regmap_field_write(h264_ctx->l1_fields[SLI_ALPH_OFST],
+ encode_params->slice_alpha_c0_offset_div2);
+
+#if 0
+ {
+ H264eRplmo rplmo; // reference picture list modification operation
+ H264eMmco mmco; // memory management control operation
+ int ret;
+
+ h264e_reorder_rd_rewind(slice->reorder);
+ /* reorder process */
+ ret = h264e_reorder_rd_op(slice->reorder, &rplmo);
+ if (!ret) {
+ regmap_field_write(h264_ctx->l1_fields[REF_LIST0_RODR], 1);
+ regmap_field_write(h264_ctx->l1_fields[RODR_PIC_IDX],
+ rplmo.modification_of_pic_nums_idc);
+
+ switch (rplmo.modification_of_pic_nums_idc) {
+ case 0:
+ case 1:
+ regmap_field_write(h264_ctx->l1_fields[RODR_PIC_NUM],
+ rplmo.abs_diff_pic_num_minus1);
+ break;
+ case 2:
+ regmap_field_write(h264_ctx->l1_fields[RODR_PIC_NUM],
+ rplmo.long_term_pic_idx);
+ break;
+ default:
+ mpp_err_f("invalid modification_of_pic_nums_idc %d\n",
+ rplmo.modification_of_pic_nums_idc);
+ break;
+ }
+ } else {
+ // slice->ref_pic_list_modification_flag;
+ regmap_field_write(h264_ctx->l1_fields[REF_LIST0_RODR], 0);
+ regmap_field_write(h264_ctx->l1_fields[RODR_PIC_IDX], 0);
+ regmap_field_write(h264_ctx->l1_fields[RODR_PIC_NUM], 0);
+ }
+
+#endif
+ /* clear all mmco arg first */
+ regmap_field_write(h264_ctx->l1_fields[NOPP_FLG], 0);
+ regmap_field_write(h264_ctx->l1_fields[LTRF_FLG], 0);
+ regmap_field_write(h264_ctx->l1_fields[ARPM_FLG], 0);
+ regmap_field_write(h264_ctx->l1_fields[MMCO4_PRE], 0);
+ regmap_field_write(h264_ctx->l1_fields[MMCO_TYPE0], 0);
+ regmap_field_write(h264_ctx->l1_fields[MMCO_PARM0], 0);
+ regmap_field_write(h264_ctx->l1_fields[MMCO_TYPE1], 0);
+ regmap_field_write(h264_ctx->l1_fields[MMCO_PARM1], 0);
+ regmap_field_write(h264_ctx->l1_fields[MMCO_TYPE2], 0);
+ regmap_field_write(h264_ctx->l1_fields[MMCO_PARM2], 0);
+ regmap_field_write(h264_ctx->l1_fields[LONG_TERM_FRAME_IDX0], 0);
+ regmap_field_write(h264_ctx->l1_fields[LONG_TERM_FRAME_IDX1], 0);
+ regmap_field_write(h264_ctx->l1_fields[LONG_TERM_FRAME_IDX2], 0);
+
+#if 0
+ h264e_marking_rd_rewind(slice->marking);
+
+ /* only update used parameter */
+ if (encode_params->slice_type == V4L2_H264_SLICE_TYPE_I) {
+#endif
+ regmap_field_write(h264_ctx->l1_fields[NOPP_FLG],
+ encode_params->no_output_of_prior_pics);
+ regmap_field_write(h264_ctx->l1_fields[LTRF_FLG],
+ encode_params->long_term_reference_flag);
+#if 0
+ return 0;
+ }
+
+ if (h264e_marking_is_empty(slice->marking))
+ return 0;
+
+ regmap_field_write(h264_ctx->l1_fields[ARPM_FLG], 1);
+
+ /* max 3 mmco */
+ int type = 0;
+ int param_0 = 0;
+ int param_1 = 0;
+
+ h264e_marking_rd_op(slice->marking, &mmco);
+ type = mmco.mmco;
+ switch (type) {
+ case 1:
+ param_0 = mmco.difference_of_pic_nums_minus1;
+ break;
+ case 2:
+ param_0 = mmco.long_term_pic_num;
+ break;
+ case 3:
+ param_0 = mmco.difference_of_pic_nums_minus1;
+ param_1 = mmco.long_term_frame_idx;
+ break;
+ case 4:
+ param_0 = mmco.max_long_term_frame_idx_plus1;
+ break;
+ case 5:
+ break;
+ case 6:
+ param_0 = mmco.long_term_frame_idx;
+ break;
+ default:
+ mpp_err_f("unsupported mmco 0 %d\n", type);
+ type = 0;
+ break;
+ }
+
+ regmap_field_write(h264_ctx->l1_fields[MMCO_TYPE0], type);
+ regmap_field_write(h264_ctx->l1_fields[MMCO_PARM0], param_0);
+ regmap_field_write(h264_ctx->l1_fields[LONG_TERM_FRAME_IDX0], param_1);
+
+ if (h264e_marking_is_empty(slice->marking))
+ break;
+
+ h264e_marking_rd_op(slice->marking, &mmco);
+ type = mmco.mmco;
+ param_0 = 0;
+ param_1 = 0;
+ switch (type) {
+ case 1:
+ param_0 = mmco.difference_of_pic_nums_minus1;
+ break;
+ case 2:
+ param_0 = mmco.long_term_pic_num;
+ break;
+ case 3:
+ param_0 = mmco.difference_of_pic_nums_minus1;
+ param_1 = mmco.long_term_frame_idx;
+ break;
+ case 4:
+ param_0 = mmco.max_long_term_frame_idx_plus1;
+ break;
+ case 5:
+ break;
+ case 6:
+ param_0 = mmco.long_term_frame_idx;
+ break;
+ default:
+ mpp_err_f("unsupported mmco 0 %d\n", type);
+ type = 0;
+ break;
+ }
+
+ regmap_field_write(h264_ctx->l1_fields[MMCO_TYPE1], type);
+ regmap_field_write(h264_ctx->l1_fields[MMCO_PARM1], param_0);
+ regmap_field_write(h264_ctx->l1_fields[LONG_TERM_FRAME_IDX1], param_1);
+
+ if (h264e_marking_is_empty(slice->marking))
+ break;
+
+ h264e_marking_rd_op(slice->marking, &mmco);
+ type = mmco.mmco;
+ param_0 = 0;
+ param_1 = 0;
+ switch (type) {
+ case 1:
+ param_0 = mmco.difference_of_pic_nums_minus1;
+ break;
+ case 2: {
+ param_0 = mmco.long_term_pic_num;
+ break;
+ case 3:
+ param_0 = mmco.difference_of_pic_nums_minus1;
+ param_1 = mmco.long_term_frame_idx;
+ break;
+ case 4:
+ param_0 = mmco.max_long_term_frame_idx_plus1;
+ break;
+ case 5:
+ break;
+ case 6:
+ param_0 = mmco.long_term_frame_idx;
+ break;
+ default:
+ mpp_err_f("unsupported mmco 0 %d\n", type);
+ type = 0;
+ break;
+ }
+
+ regmap_field_write(h264_ctx->l1_fields[MMCO_TYPE2], type);
+ regmap_field_write(h264_ctx->l1_fields[MMCO_PARM2], param_0);
+ regmap_field_write(h264_ctx->l1_fields[LONG_TERM_FRAME_IDX2], param_1);
+ }
+#endif
+}
+
+static void setup_vepu541_rdo_pred(struct rkvpu_ctx *ctx, struct rkvenc_h264_run *run)
+{
+ struct rkvenc_h264_ctx *h264_ctx = ctx->priv;
+ const struct v4l2_ctrl_h264_encode_params *encode_params;
+ const struct v4l2_ctrl_h264_sps *sps;
+ const struct v4l2_ctrl_h264_pps *pps;
+
+ encode_params = run->encode_params;
+ sps = run->sps;
+ pps = run->pps;
+
+ if (encode_params->slice_type == V4L2_H264_SLICE_TYPE_I) {
+ regmap_field_write(h264_ctx->l1_fields[CHRM_KLUT_OFST], 0);
+ regmap_bulk_write(h264_ctx->l1_regmap, RKVENC_VEPU540_L1_KLUT_WGT(0),
+ &h264e_klut_weight[0], CHROMA_KLUT_TAB_SIZE);
+ } else {
+ regmap_field_write(h264_ctx->l1_fields[CHRM_KLUT_OFST], 3);
+ regmap_bulk_write(h264_ctx->l1_regmap, RKVENC_VEPU540_L1_KLUT_WGT(0),
+ &h264e_klut_weight[4], CHROMA_KLUT_TAB_SIZE);
+ }
+
+ regmap_field_write(h264_ctx->l1_fields[VTHD_Y], 9);
+ regmap_field_write(h264_ctx->l1_fields[VTHD_C], 63);
+
+ regmap_field_write(h264_ctx->l1_fields[RECT_SIZE],
+ (sps->profile_idc == V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE &&
+ sps->level_idc <= V4L2_MPEG_VIDEO_H264_LEVEL_3_0) ? 1 : 0);
+ regmap_field_write(h264_ctx->l1_fields[INTER_4X4], 0);
+ regmap_field_write(h264_ctx->l1_fields[VLC_LMT],
+ (sps->profile_idc < V4L2_MPEG_VIDEO_H264_PROFILE_MAIN) &&
+ !encode_params->flags & V4L2_H264_ENCODE_FLAG_ENTROPY_CODING_MODE);
+ regmap_field_write(h264_ctx->l1_fields[CHRM_SPCL], 1);
+ regmap_field_write(h264_ctx->l1_fields[RDO_MASK], 24);
+ regmap_field_write(h264_ctx->l1_fields[CCWA_E], 1);
+ regmap_field_write(h264_ctx->l1_fields[SCL_LST_SEL],
+ pps->flags & V4L2_H264_PPS_FLAG_SCALING_MATRIX_PRESENT);
+ regmap_field_write(h264_ctx->l1_fields[SCL_LST_SEL_],
+ pps->flags & V4L2_H264_PPS_FLAG_SCALING_MATRIX_PRESENT);
+ regmap_field_write(h264_ctx->l1_fields[ATR_E], 1);
+ regmap_field_write(h264_ctx->l1_fields[ATF_EDG], 0);
+ regmap_field_write(h264_ctx->l1_fields[ATF_LVL_E], 0);
+ regmap_field_write(h264_ctx->l1_fields[ATF_INTRA_E], 1);
+ regmap_field_write(h264_ctx->l1_fields[SATD_BYPS_FLG], 0);
+}
+
+static void setup_vepu541_rc_base(struct rkvpu_ctx *ctx, struct rkvenc_h264_run *run)
+{
+ struct rkvpu_dev *rkvpu = ctx->dev;
+ struct rkvenc_h264_ctx *h264_ctx = ctx->priv;
+ const struct v4l2_ctrl_h264_encode_params *encode_params = run->encode_params;
+ const struct v4l2_ctrl_h264_encode_rc *encode_rc = run->encode_rc;
+ const struct v4l2_ctrl_h264_sps *sps = run->sps;
+ int mb_w = sps->pic_width_in_mbs_minus1 + 1;
+ int mb_h = sps->pic_height_in_map_units_minus1 + 1; //sps->pic_height_in_mbs;
+ unsigned int qp_target = encode_rc->qp;
+ unsigned int qp_min = encode_rc->qp_min;
+ unsigned int qp_max = encode_rc->qp_max;
+ unsigned int qpmap_mode = 1;
+ int mb_target_bits_mul_16 = DIV_ROUND_UP((encode_rc->target_bits << 4), (mb_w * mb_h));
+ int mb_target_bits;
+ int negative_bits_thd;
+ int positive_bits_thd;
+
+ v4l2_info(&rkvpu->v4l2_dev, "bittarget %d qp [%d %d %d]\n",
+ encode_rc->target_bits, qp_min, qp_target, qp_max);
+
+ if (mb_target_bits_mul_16 >= 0x100000) {
+ mb_target_bits_mul_16 = 0x50000;
+ }
+
+ mb_target_bits = (mb_target_bits_mul_16 * mb_w) >> 4;
+ negative_bits_thd = 0 - mb_target_bits / 4;
+ positive_bits_thd = mb_target_bits / 4;
+
+ regmap_field_write(h264_ctx->l1_fields[PIC_QP], qp_target);
+
+ regmap_field_write(h264_ctx->l1_fields[RC_EN], 1);
+ regmap_field_write(h264_ctx->l1_fields[AQ_EN], 1);
+ regmap_field_write(h264_ctx->l1_fields[AQ_MODE], 0);
+ regmap_field_write(h264_ctx->l1_fields[RC_CTU_NUM], mb_w);
+
+ regmap_field_write(h264_ctx->l1_fields[RC_QP_RANGE],
+ (encode_params->slice_type == V4L2_H264_SLICE_TYPE_I) ?
+ 0 : 2);
+ regmap_field_write(h264_ctx->l1_fields[RC_MAX_QP], qp_max);
+ regmap_field_write(h264_ctx->l1_fields[RC_MIN_QP], qp_min);
+
+ regmap_field_write(h264_ctx->l1_fields[CTU_EBIT], mb_target_bits_mul_16);
+
+ regmap_field_write(h264_ctx->l1_fields[QP_ADJ0], -1);
+ regmap_field_write(h264_ctx->l1_fields[QP_ADJ1], 0);
+ regmap_field_write(h264_ctx->l1_fields[QP_ADJ2], 0);
+ regmap_field_write(h264_ctx->l1_fields[QP_ADJ3], 0);
+ regmap_field_write(h264_ctx->l1_fields[QP_ADJ4], 0);
+ regmap_field_write(h264_ctx->l1_fields[QP_ADJ5], 0);
+ regmap_field_write(h264_ctx->l1_fields[QP_ADJ6], 0);
+ regmap_field_write(h264_ctx->l1_fields[QP_ADJ7], 0);
+ regmap_field_write(h264_ctx->l1_fields[QP_ADJ8], 1);
+
+ regmap_field_write(h264_ctx->l1_fields[RC_DTHD0], negative_bits_thd);
+ regmap_field_write(h264_ctx->l1_fields[RC_DTHD1], positive_bits_thd);
+ regmap_field_write(h264_ctx->l1_fields[RC_DTHD2], positive_bits_thd);
+ regmap_field_write(h264_ctx->l1_fields[RC_DTHD3], positive_bits_thd);
+ regmap_field_write(h264_ctx->l1_fields[RC_DTHD4], positive_bits_thd);
+ regmap_field_write(h264_ctx->l1_fields[RC_DTHD5], positive_bits_thd);
+ regmap_field_write(h264_ctx->l1_fields[RC_DTHD6], positive_bits_thd);
+ regmap_field_write(h264_ctx->l1_fields[RC_DTHD7], positive_bits_thd);
+ regmap_field_write(h264_ctx->l1_fields[RC_DTHD8], positive_bits_thd);
+
+ regmap_field_write(h264_ctx->l1_fields[QPMIN_AREA0], qp_min);
+ regmap_field_write(h264_ctx->l1_fields[QPMAX_AREA0], qp_max);
+ regmap_field_write(h264_ctx->l1_fields[QPMIN_AREA1], qp_min);
+ regmap_field_write(h264_ctx->l1_fields[QPMAX_AREA1], qp_max);
+ regmap_field_write(h264_ctx->l1_fields[QPMIN_AREA2], qp_min);
+
+ regmap_field_write(h264_ctx->l1_fields[QPMAX_AREA2], qp_max);
+ regmap_field_write(h264_ctx->l1_fields[QPMIN_AREA3], qp_min);
+ regmap_field_write(h264_ctx->l1_fields[QPMAX_AREA3], qp_max);
+ regmap_field_write(h264_ctx->l1_fields[QPMIN_AREA4], qp_min);
+ regmap_field_write(h264_ctx->l1_fields[QPMAX_AREA4], qp_max);
+
+ regmap_field_write(h264_ctx->l1_fields[QPMIN_AREA5], qp_min);
+ regmap_field_write(h264_ctx->l1_fields[QPMAX_AREA5], qp_max);
+ regmap_field_write(h264_ctx->l1_fields[QPMIN_AREA6], qp_min);
+ regmap_field_write(h264_ctx->l1_fields[QPMAX_AREA6], qp_max);
+ regmap_field_write(h264_ctx->l1_fields[QPMIN_AREA7], qp_min);
+
+ regmap_field_write(h264_ctx->l1_fields[QPMAX_AREA7], qp_max);
+ regmap_field_write(h264_ctx->l1_fields[QPMAP_MODE], qpmap_mode);
+}
+
+static void setup_vepu541_io_buf(struct rkvpu_ctx *ctx,
+ struct rkvenc_h264_run *run)
+{
+ struct rkvenc_h264_ctx *h264_ctx = ctx->priv;
+ const struct v4l2_pix_format_mplane *src_fmt;
+ const struct v4l2_format *f;
+ int ver_stride;
+ int hor_stride;
+ int src_plane2offset = 0;
+ int src_plane3offset = 0;
+ struct vb2_v4l2_buffer *src_buf = run->base.bufs.src;
+ struct vb2_v4l2_buffer *dst_buf = run->base.bufs.dst;
+
+ f = &ctx->src_fmt;
+ src_fmt = &f->fmt.pix_mp;
+
+ ver_stride = (f->fmt.pix.bytesperline) ?
+ (f->fmt.pix.bytesperline) : (src_fmt->width);
+ hor_stride = src_fmt->height;
+
+ /*
+ if (MPP_FRAME_FMT_IS_FBC(fmt)) {
+ off_in[0] = mpp_frame_get_fbc_offset(frm);
+ off_in[1] = 0;
+ } else
+ */
+ if (!rkvenc_fmt_is_rgb(src_fmt->pixelformat)) {
+ switch (src_fmt->pixelformat) {
+ case V4L2_PIX_FMT_NV61:
+ case V4L2_PIX_FMT_NV21:
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV16:
+ src_plane2offset = hor_stride * ver_stride;
+ src_plane3offset = hor_stride * ver_stride;
+ break;
+ case V4L2_PIX_FMT_YUV422P:
+ src_plane2offset = hor_stride * ver_stride;
+ src_plane3offset = hor_stride * ver_stride * 3 / 2;
+ break;
+ case V4L2_PIX_FMT_YUV420:
+ src_plane2offset = hor_stride * ver_stride;
+ src_plane3offset = hor_stride * ver_stride * 5 / 4;
+ break;
+ default:
+ break;
+ }
+ }
+
+ regmap_field_write(h264_ctx->l1_fields[ADR_SRC0],
+ vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0));
+
+ if (src_buf->vb2_buf.num_planes > 1)
+ regmap_field_write(h264_ctx->l1_fields[ADR_SRC1],
+ vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 1) + src_plane2offset);
+ else
+ regmap_field_write(h264_ctx->l1_fields[ADR_SRC1], 0xbeefdead);
+
+ if (src_buf->vb2_buf.num_planes > 2)
+ regmap_field_write(h264_ctx->l1_fields[ADR_SRC2],
+ vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 2) + src_plane3offset);
+ else
+ regmap_field_write(h264_ctx->l1_fields[ADR_SRC2], 0xdeadbeef);
+
+ f = &ctx->dst_fmt;
+ regmap_field_write(h264_ctx->l1_fields[BSBT_ADDR],
+ (vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0) + f->fmt.pix_mp.plane_fmt[0].sizeimage));
+ regmap_field_write(h264_ctx->l1_fields[BSBB_ADDR],
+ vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0));
+ regmap_field_write(h264_ctx->l1_fields[BSBR_ADDR],
+ vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0));
+ regmap_field_write(h264_ctx->l1_fields[ADR_BSBS],
+ vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0) /* + mpp_packet_get_length(pkt) */ );
+}
+
+static void setup_vepu541_roi(struct rkvpu_ctx *ctx, struct rkvenc_h264_run *run)
+{
+ struct rkvenc_h264_ctx *h264_ctx = ctx->priv;
+#if 0
+ const struct v4l2_ctrl_h264_sps *sps;
+ sps = run->sps;
+
+ if (ctx->roi_data2) {
+ MppEncROICfg2 *cfg = ( MppEncROICfg2 *)ctx->roi_data2;
+
+ regmap_field_write(h264_ctx->l1_fields[ROI_ENC], 1);
+ regmap_field_write(h264_ctx->l1_fields[ROI_ADDR],
+ mpp_buffer_get_fd(cfg->base_cfg_buf));
+ } else if (ctx->qpmap) {
+ regmap_field_write(h264_ctx->l1_fields[ROI_ENC], 1);
+ regmap_field_write(h264_ctx->l1_fields[ROI_ADDR],
+ mpp_buffer_get_fd(ctx->qpmap));
+ } else {
+ MppEncROICfg *roi = ctx->roi_data;
+ unsigned int w = sps->pic_width_in_mbs * 16;
+ unsigned int h = sps->pic_height_in_mbs * 16;
+
+ /* roi setup */
+ if (roi && roi->number && roi->regions) {
+ int roi_buf_size = vepu541_get_roi_buf_size(w, h);
+
+ if (!ctx->roi_buf || roi_buf_size != ctx->roi_buf_size) {
+ if (NULL == ctx->roi_grp)
+ mpp_buffer_group_get_internal(&ctx->roi_grp,
+ MPP_BUFFER_TYPE_ION);
+ else if (roi_buf_size != ctx->roi_buf_size) {
+ if (ctx->roi_buf) {
+ mpp_buffer_put(ctx->roi_buf);
+ ctx->roi_buf = NULL;
+ }
+ mpp_buffer_group_clear(ctx->roi_grp);
+ }
+
+ if (!ctx->roi_grp)
+ return;
+
+ if (NULL == ctx->roi_buf)
+ mpp_buffer_get(ctx->roi_grp,
+ &ctx->roi_buf, roi_buf_size);
+
+ ctx->roi_buf_size = roi_buf_size;
+ }
+
+ if (!ctx->roi_grp)
+ return;
+
+ int fd = mpp_buffer_get_fd(ctx->roi_buf);
+ void *buf = mpp_buffer_get_ptr(ctx->roi_buf);
+
+ regmap_field_write(h264_ctx->l1_fields[ROI_ENC], 1);
+ regmap_field_write(h264_ctx->l1_fields[ROI_ADDR], fd);
+
+ vepu541_set_roi(buf, roi, w, h);
+ } else {
+#endif
+ regmap_field_write(h264_ctx->l1_fields[ROI_ENC], 0);
+ regmap_field_write(h264_ctx->l1_fields[ROI_ADDR], 0);
+#if 0
+ }
+ }
+#endif
+}
+
+static unsigned int rec_luma_size(unsigned int width, unsigned int height)
+{
+ return round_up(width, MB_DIM) * round_up(height, MB_DIM);
+}
+
+
+static void setup_vepu541_recn_refr(struct rkvpu_ctx *ctx,
+ struct rkvenc_h264_run *run)
+{
+ struct rkvenc_h264_ctx *h264_ctx = ctx->priv;
+ const struct v4l2_format *f = &ctx->src_fmt;
+ const struct v4l2_pix_format_mplane *src_fmt = &f->fmt.pix_mp;
+ struct vb2_v4l2_buffer *dst_buf = run->base.bufs.dst;
+ const struct v4l2_ctrl_h264_encode_params *encode_params;
+ struct rkvpu_enc_buf *enc_buf;
+ struct rkvpu_aux_buf *rec_buf;
+ struct rkvpu_aux_buf *ds_buf;
+
+ enc_buf = rkvpu_get_enc_buf(dst_buf);
+ rec_buf = &enc_buf->rec_buf;
+ ds_buf = &enc_buf->ds_buf;
+
+ encode_params = run->encode_params;
+
+ regmap_field_write(h264_ctx->l1_fields[RFPW_H_ADDR], rec_buf->dma);
+ regmap_field_write(h264_ctx->l1_fields[RFPW_B_ADDR], rec_buf->dma +
+ rec_luma_size(src_fmt->width, src_fmt->height)); // fbc_hdr_size;
+ regmap_field_write(h264_ctx->l1_fields[DSPW_ADDR], ds_buf->dma);
+
+ if (encode_params->slice_type == V4L2_H264_SLICE_TYPE_P) {
+ struct vb2_queue *queue;
+ struct vb2_v4l2_buffer *ref_buf;
+ u64 reference_ts;
+ struct vb2_buffer *buf;
+
+ queue = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE);
+
+ reference_ts = encode_params->reference_ts;
+ buf = vb2_find_buffer(queue, reference_ts);
+ if (!buf)
+ return;
+
+ ref_buf = to_vb2_v4l2_buffer(buf);
+ enc_buf = rkvpu_get_enc_buf(ref_buf);
+ rec_buf = &enc_buf->rec_buf;
+ ds_buf = &enc_buf->ds_buf;
+
+ regmap_field_write(h264_ctx->l1_fields[RFPR_H_ADDR], rec_buf->dma);
+ regmap_field_write(h264_ctx->l1_fields[RFPR_B_ADDR], rec_buf->dma +
+ rec_luma_size(src_fmt->width, src_fmt->height)); // fbc_hdr_size;
+
+ regmap_field_write(h264_ctx->l1_fields[DSPR_ADDR], ds_buf->dma);
+ }
+}
+
+static void setup_vepu541_split(struct rkvpu_ctx *ctx)
+{
+ struct rkvenc_h264_ctx *h264_ctx = ctx->priv;
+#if 0
+ switch (split_mode) {
+ case MPP_ENC_SPLIT_NONE:
+#endif
+ regmap_field_write(h264_ctx->l1_fields[SLI_SPLT], 0);
+ regmap_field_write(h264_ctx->l1_fields[SLI_SPLT_MODE], 0);
+ regmap_field_write(h264_ctx->l1_fields[SLI_SPLT_CPST], 0);
+ regmap_field_write(h264_ctx->l1_fields[SLI_MAX_NUM_M1], 0);
+ regmap_field_write(h264_ctx->l1_fields[SLI_FLSH], 0);
+ regmap_field_write(h264_ctx->l1_fields[SLI_SPLT_CNUM_M1], 0);
+
+ regmap_field_write(h264_ctx->l1_fields[SLI_SPLT_BYTE], 0);
+ regmap_field_write(h264_ctx->l1_fields[SLEN_FIFO], 0);
+#if 0
+ break;
+ case MPP_ENC_SPLIT_BY_BYTE:
+ regmap_field_write(h264_ctx->l1_fields[SLI_SPLT], 1);
+ regmap_field_write(h264_ctx->l1_fields[SLI_SPLT_MODE], 0);
+ regmap_field_write(h264_ctx->l1_fields[SLI_SPLT_CPST], 0);
+ regmap_field_write(h264_ctx->l1_fields[SLI_MAX_NUM_M1], 500);
+ regmap_field_write(h264_ctx->l1_fields[SLI_FLSH], 1);
+ regmap_field_write(h264_ctx->l1_fields[SLI_SPLT_CNUM_M1], 0);
+
+ regmap_field_write(h264_ctx->l1_fields[SLI_SPLT_BYTE],
+ cfg->split_arg);
+ regmap_field_write(h264_ctx->l1_fields[SLEN_FIFO], 0);
+ break;
+ case MPP_ENC_SPLIT_BY_CTU:
+ regmap_field_write(h264_ctx->l1_fields[SLI_SPLT], 1);
+ regmap_field_write(h264_ctx->l1_fields[SLI_SPLT_MODE], 1);
+ regmap_field_write(h264_ctx->l1_fields[SLI_SPLT_CPST], 0);
+ regmap_field_write(h264_ctx->l1_fields[SLI_MAX_NUM_M1], 500);
+ regmap_field_write(h264_ctx->l1_fields[SLI_FLSH], 1);
+ regmap_field_write(h264_ctx->l1_fields[SLI_SPLT_CNUM_M1],
+ cfg->split_arg - 1);
+
+ regmap_field_write(h264_ctx->l1_fields[SLI_SPLT_BYTE], 0);
+ regmap_field_write(h264_ctx->l1_fields[SLEN_FIFO], 0);
+ break;
+ default:
+ pr_info("invalide slice split mode \n");
+ break;
+ }
+#endif
+}
+
+static void setup_vepu540_force_slice_split(struct rkvpu_ctx *ctx, int width)
+{
+ struct rkvenc_h264_ctx *h264_ctx = ctx->priv;
+ int mb_w = ALIGN(width, 16) >> 4;
+
+ regmap_field_write(h264_ctx->l1_fields[SLI_SPLT], 1);
+ regmap_field_write(h264_ctx->l1_fields[SLI_SPLT_MODE], 1);
+ regmap_field_write(h264_ctx->l1_fields[SLI_SPLT_CPST], 0);
+ regmap_field_write(h264_ctx->l1_fields[SLI_MAX_NUM_M1], 500);
+ regmap_field_write(h264_ctx->l1_fields[SLI_FLSH], 1);
+ regmap_field_write(h264_ctx->l1_fields[SLI_SPLT_CNUM_M1], mb_w - 1);
+
+ regmap_field_write(h264_ctx->l1_fields[SLI_SPLT_BYTE], 0);
+ regmap_field_write(h264_ctx->l1_fields[SLEN_FIFO], 0);
+ regmap_field_write(h264_ctx->l1_fields[SLI_CRS_EN], 0);
+}
+
+static void setup_vepu541_me(struct rkvpu_ctx *ctx, struct rkvenc_h264_run *run)
+{
+ struct rkvenc_h264_ctx *h264_ctx = ctx->priv;
+ const struct v4l2_ctrl_h264_encode_params *encode_params;
+ const struct v4l2_ctrl_h264_sps *sps;
+ const struct v4l2_ctrl_h264_pps *pps;
+
+ int w_temp = 1296;
+ int h_temp = 1;
+ int h_val_0 = 1;
+ int h_val_1 = 18;
+ int temp0, temp1;
+ int swin_scope_wd16;
+ unsigned int pic_temp;
+ int cime_linebuf_w;
+ int pic_w, pic_wd64;
+ int cime_w = 176;
+ int cime_h = 112;
+ int cime_blk_w_max = 44;
+ int cime_blk_h_max = 28;
+ u16 pic_height_in_mbs;
+ u16 pic_width_in_mbs;
+ u32 pic_wd8_m1;
+ u32 cme_srch_h;
+ u32 cme_srch_v;
+ u32 cme_rama_h;
+
+ encode_params = run->encode_params;
+ sps = run->sps;
+ pps = run->pps;
+
+ pic_height_in_mbs = sps->pic_height_in_map_units_minus1 + 1;
+ pic_width_in_mbs = sps->pic_width_in_mbs_minus1 + 1;
+
+ /*
+ * Step 1. limit the mv range by level_idc
+ * For level 1 and level 1b the vertical MV range is [-64,+63.75]
+ * For level 1.1, 1.2, 1.3 and 2 the vertical MV range is [-128,+127.75]
+ */
+ switch (sps->level_idc) {
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_0:
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1B:
+ cime_blk_h_max = 12;
+ break;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_1:
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_2:
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_3:
+ case V4L2_MPEG_VIDEO_H264_LEVEL_2_0:
+ cime_blk_h_max = 28;
+ break;
+ default:
+ cime_blk_h_max = 28;
+ break;
+ }
+
+ if (cime_w < cime_blk_w_max * 4)
+ cime_blk_w_max = cime_w / 4;
+
+ if (cime_h < cime_blk_h_max * 4)
+ cime_blk_h_max = cime_h / 4;
+
+ /*
+ * Step 2. limit the mv range by image size
+ */
+ if (cime_blk_w_max / 4 * 2 > (pic_width_in_mbs * 2 + 1) / 2)
+ cime_blk_w_max = (pic_width_in_mbs * 2 + 1) / 2 / 2 * 4;
+
+ if (cime_blk_h_max / 4 > ALIGN(pic_height_in_mbs * 16, 64) / 128 * 4)
+ cime_blk_h_max = ALIGN(pic_height_in_mbs * 16, 64) / 128 * 16;
+
+ regmap_field_write(h264_ctx->l1_fields[CME_SRCH_H], cime_blk_w_max / 4);
+ regmap_field_write(h264_ctx->l1_fields[CME_SRCH_V], cime_blk_h_max / 4);
+ regmap_field_write(h264_ctx->l1_fields[RME_SRCH_H], 7);
+ regmap_field_write(h264_ctx->l1_fields[RME_SRCH_V], 5);
+ regmap_field_write(h264_ctx->l1_fields[DLT_FRM_NUM], 0);
+
+ if (encode_params->slice_type == V4L2_H264_SLICE_TYPE_I) {
+ regmap_field_write(h264_ctx->l1_fields[PMV_MDST_H], 0);
+ regmap_field_write(h264_ctx->l1_fields[PMV_MDST_V], 0);
+ } else {
+ regmap_field_write(h264_ctx->l1_fields[PMV_MDST_H], 5);
+ regmap_field_write(h264_ctx->l1_fields[PMV_MDST_V], 5);
+ }
+ regmap_field_write(h264_ctx->l1_fields[MV_LIMIT], 2);
+ regmap_field_write(h264_ctx->l1_fields[PMV_NUM], 2);
+
+ // VEPU540 specific
+ regmap_field_read(h264_ctx->l1_fields[PIC_WD8_M1], &pic_wd8_m1);
+ pic_temp = ((pic_wd8_m1 + 1) * 8 + 63) / 64 * 64;
+ cime_linebuf_w = pic_temp / 64;
+
+ regmap_field_write(h264_ctx->l1_fields[CME_LINEBUF_W], cime_linebuf_w);
+
+ while ((w_temp > ((h_temp - h_val_0) * cime_linebuf_w * 4 + ((h_val_1 - h_temp) * 4 * 7)))
+ && (h_temp < 17))
+ h_temp = h_temp + h_val_0;
+
+ if (w_temp < ((h_temp - h_val_0) * cime_linebuf_w * 4 + ((h_val_1 - h_temp) * 4 * 7)))
+ h_temp = h_temp - h_val_0;
+
+ regmap_field_write(h264_ctx->l1_fields[CME_RAMA_H], h_temp);
+
+ regmap_field_read(h264_ctx->l1_fields[CME_SRCH_H], &cme_srch_h);
+ swin_scope_wd16 = (cme_srch_h + 3) / 4 * 2 + 1;
+
+ regmap_field_read(h264_ctx->l1_fields[CME_SRCH_V], &cme_srch_v);
+ temp0 = 2 * cme_srch_v + 1;
+
+ regmap_field_read(h264_ctx->l1_fields[CME_RAMA_H], &cme_rama_h);
+ if (temp0 > cme_rama_h)
+ temp0 = cme_rama_h;
+
+ temp1 = 0;
+
+ pic_w = pic_width_in_mbs * 16;
+ pic_wd64 = (pic_w + 63) / 64;
+
+ if (pic_wd64 >= swin_scope_wd16)
+ temp1 = swin_scope_wd16;
+ else
+ temp1 = pic_wd64 * 2;
+
+ regmap_field_write(h264_ctx->l1_fields[CME_RAMA_MAX],
+ pic_wd64 * (temp0 - 1) + temp1);
+
+ // TODO: VEPU541 (8K)
+#if 0
+ int swin_all_4_ver;
+ int swin_all_16_hor;
+
+ if (pic_w > 3584)
+ regmap_field_write(h264_ctx->l1_fields[CME_RAMA_H], 8);
+ else if (pic_w > 3136)
+ regmap_field_write(h264_ctx->l1_fields[CME_RAMA_H], 9);
+ else if (pic_w > 2816)
+ regmap_field_write(h264_ctx->l1_fields[CME_RAMA_H], 10);
+ else if (pic_w > 2560)
+ regmap_field_write(h264_ctx->l1_fields[CME_RAMA_H], 11);
+ else if (pic_w > 2368)
+ regmap_field_write(h264_ctx->l1_fields[CME_RAMA_H], 12);
+ else if (pic_w > 2176)
+ regmap_field_write(h264_ctx->l1_fields[CME_RAMA_H], 13);
+ else if (pic_w > 2048)
+ regmap_field_write(h264_ctx->l1_fields[CME_RAMA_H], 14);
+ else if (pic_w > 1856)
+ regmap_field_write(h264_ctx->l1_fields[CME_RAMA_H], 15);
+ else if (pic_w > 1792)
+ regmap_field_write(h264_ctx->l1_fields[CME_RAMA_H], 16);
+ else
+ regmap_field_write(h264_ctx->l1_fields[CME_RAMA_H], 17);
+
+ regmap_field_read(h264_ctx->l1_fields[CME_SRCH_V], &cme_srch_v);
+ swin_all_4_ver = 2 * cme_srch_v + 1;
+
+ regmap_field_read(h264_ctx->l1_fields[CME_SRCH_H], &cme_srch_h);
+ swin_all_16_hor = (cme_srch_h * 4 + 15) / 16 * 2 + 1;
+
+ regmap_field_read(h264_ctx->l1_fields[CME_RAMA_H]), &cme_rama_h);
+ if (swin_all_4_ver < cme_rama_h)
+ regmap_field_write(h264_ctx->l1_fields[CME_RAMA_MAX],
+ (swin_all_4_ver - 1) * pic_wd64 + swin_all_16_hor);
+ else
+ regmap_field_write(h264_ctx->l1_fields[CME_RAMA_MAX],
+ (cme_rama_h - 1) * pic_wd64 + swin_all_16_hor);
+#endif
+}
+
+#define H264E_LAMBDA_TAB_SIZE 52
+
+static unsigned int h264e_lambda_default[58] = {
+ 0x00000003, 0x00000005, 0x00000006, 0x00000007,
+ 0x00000009, 0x0000000b, 0x0000000e, 0x00000012,
+ 0x00000016, 0x0000001c, 0x00000024, 0x0000002d,
+ 0x00000039, 0x00000048, 0x0000005b, 0x00000073,
+ 0x00000091, 0x000000b6, 0x000000e6, 0x00000122,
+ 0x0000016d, 0x000001cc, 0x00000244, 0x000002db,
+ 0x00000399, 0x00000489, 0x000005b6, 0x00000733,
+ 0x00000912, 0x00000b6d, 0x00000e66, 0x00001224,
+ 0x000016db, 0x00001ccc, 0x00002449, 0x00002db7,
+ 0x00003999, 0x00004892, 0x00005b6f, 0x00007333,
+ 0x00009124, 0x0000b6de, 0x0000e666, 0x00012249,
+ 0x00016dbc, 0x0001cccc, 0x00024492, 0x0002db79,
+ 0x00039999, 0x00048924, 0x0005b6f2, 0x00073333,
+ 0x00091249, 0x000b6de5, 0x000e6666, 0x00122492,
+ 0x0016dbcb, 0x001ccccc,
+};
+
+static void setup_vepu541_l2(struct rkvpu_ctx *ctx, struct rkvenc_h264_run *run)
+{
+ struct rkvenc_h264_ctx *h264_ctx = ctx->priv;
+ const struct v4l2_ctrl_h264_encode_params *encode_params;
+ int i;
+
+ encode_params = run->encode_params;
+
+ regmap_field_write(h264_ctx->l2_fields[IPRD_TTHDY4_0], 1);
+ regmap_field_write(h264_ctx->l2_fields[IPRD_TTHDY4_1], 4);
+ regmap_field_write(h264_ctx->l2_fields[IPRD_TTHDY4_2], 9);
+ regmap_field_write(h264_ctx->l2_fields[IPRD_TTHDY4_3], 36);
+
+ regmap_field_write(h264_ctx->l2_fields[IPRD_TTHDC8_0], 1);
+ regmap_field_write(h264_ctx->l2_fields[IPRD_TTHDC8_1], 4);
+ regmap_field_write(h264_ctx->l2_fields[IPRD_TTHDC8_2], 9);
+ regmap_field_write(h264_ctx->l2_fields[IPRD_TTHDC8_3], 36);
+
+ regmap_field_write(h264_ctx->l2_fields[IPRD_TTHDY8_0], 1);
+ regmap_field_write(h264_ctx->l2_fields[IPRD_TTHDY8_1], 4);
+ regmap_field_write(h264_ctx->l2_fields[IPRD_TTHDY8_2], 9);
+ regmap_field_write(h264_ctx->l2_fields[IPRD_TTHDY8_3], 36);
+
+ regmap_field_write(h264_ctx->l2_fields[IPRD_TTHD_UL], 0x0);
+
+ regmap_field_write(h264_ctx->l2_fields[IPRD_WGTY8_0], 0x30);
+ regmap_field_write(h264_ctx->l2_fields[IPRD_WGTY8_1], 0x3c);
+ regmap_field_write(h264_ctx->l2_fields[IPRD_WGTY8_2], 0x28);
+ regmap_field_write(h264_ctx->l2_fields[IPRD_WGTY8_3], 0x30);
+
+ regmap_field_write(h264_ctx->l2_fields[IPRD_WGTY4_0], 0x30);
+ regmap_field_write(h264_ctx->l2_fields[IPRD_WGTY4_1], 0x3c);
+ regmap_field_write(h264_ctx->l2_fields[IPRD_WGTY4_2], 0x28);
+ regmap_field_write(h264_ctx->l2_fields[IPRD_WGTY4_3], 0x30);
+
+ regmap_field_write(h264_ctx->l2_fields[IPRD_WGTY16_0], 0x30);
+ regmap_field_write(h264_ctx->l2_fields[IPRD_WGTY16_1], 0x3c);
+ regmap_field_write(h264_ctx->l2_fields[IPRD_WGTY16_2], 0x28);
+ regmap_field_write(h264_ctx->l2_fields[IPRD_WGTY16_3], 0x30);
+
+ regmap_field_write(h264_ctx->l2_fields[IPRD_WGTC8_0], 0x24);
+ regmap_field_write(h264_ctx->l2_fields[IPRD_WGTC8_1], 0x2a);
+ regmap_field_write(h264_ctx->l2_fields[IPRD_WGTC8_2], 0x1c);
+ regmap_field_write(h264_ctx->l2_fields[IPRD_WGTC8_3], 0x20);
+
+ /* 000556ab */
+ regmap_field_write(h264_ctx->l2_fields[QNT_BIAS_P], 171);
+
+ regmap_field_write(h264_ctx->l2_fields[ATR_THD0], 1);
+ regmap_field_write(h264_ctx->l2_fields[ATR_THD1], 4);
+
+ if (encode_params->slice_type == V4L2_H264_SLICE_TYPE_I) {
+ regmap_field_write(h264_ctx->l2_fields[QNT_BIAS_I], 683);
+ regmap_field_write(h264_ctx->l2_fields[ATR_THD2], 36);
+ regmap_field_write(h264_ctx->l2_fields[ATR_LV16_WGT0], 16);
+ regmap_field_write(h264_ctx->l2_fields[ATR_LV16_WGT1], 16);
+ regmap_field_write(h264_ctx->l2_fields[ATR_LV16_WGT2], 16);
+
+ regmap_field_write(h264_ctx->l2_fields[ATR_LV8_WGT0], 32);
+ regmap_field_write(h264_ctx->l2_fields[ATR_LV8_WGT1], 32);
+ regmap_field_write(h264_ctx->l2_fields[ATR_LV8_WGT2], 32);
+
+ regmap_field_write(h264_ctx->l2_fields[ATR_LV4_WGT0], 20);
+ regmap_field_write(h264_ctx->l2_fields[ATR_LV4_WGT1], 18);
+ regmap_field_write(h264_ctx->l2_fields[ATR_LV4_WGT2], 16);
+ } else {
+ regmap_field_write(h264_ctx->l2_fields[QNT_BIAS_I], 583);
+ regmap_field_write(h264_ctx->l2_fields[ATR_THD2], 81);
+ regmap_field_write(h264_ctx->l2_fields[ATR_LV16_WGT0], 28);
+ regmap_field_write(h264_ctx->l2_fields[ATR_LV16_WGT1], 27);
+ regmap_field_write(h264_ctx->l2_fields[ATR_LV16_WGT2], 23);
+
+ regmap_field_write(h264_ctx->l2_fields[ATR_LV8_WGT0], 32);
+ regmap_field_write(h264_ctx->l2_fields[ATR_LV8_WGT1], 32);
+ regmap_field_write(h264_ctx->l2_fields[ATR_LV8_WGT2], 32);
+
+ regmap_field_write(h264_ctx->l2_fields[ATR_LV4_WGT0], 28);
+ regmap_field_write(h264_ctx->l2_fields[ATR_LV4_WGT1], 27);
+ regmap_field_write(h264_ctx->l2_fields[ATR_LV4_WGT2], 23);
+ }
+
+ regmap_field_write(h264_ctx->l2_fields[ATR_QP], 45);
+ regmap_field_write(h264_ctx->l2_fields[ATF_TTHD0], 0);
+ regmap_field_write(h264_ctx->l2_fields[ATF_TTHD1], 64);
+ regmap_field_write(h264_ctx->l2_fields[ATF_TTHD2], 144);
+ regmap_field_write(h264_ctx->l2_fields[ATF_TTHD3], 2500);
+
+ regmap_field_write(h264_ctx->l2_fields[ATF_STHD_10], 80);
+ regmap_field_write(h264_ctx->l2_fields[ATF_STHD_MAX], 280);
+
+ regmap_field_write(h264_ctx->l2_fields[ATF_STHD_11], 144);
+ regmap_field_write(h264_ctx->l2_fields[ATF_STHD_20], 192);
+
+ regmap_field_write(h264_ctx->l2_fields[ATF_WGT10], 26);
+ regmap_field_write(h264_ctx->l2_fields[ATF_WGT11], 24);
+
+ regmap_field_write(h264_ctx->l2_fields[ATF_WGT12], 19);
+ regmap_field_write(h264_ctx->l2_fields[ATF_WGT20], 22);
+
+ regmap_field_write(h264_ctx->l2_fields[ATF_WGT21], 19);
+ regmap_field_write(h264_ctx->l2_fields[ATF_WGT30], 19);
+
+ regmap_field_write(h264_ctx->l2_fields[ATF_OFST10], 3500);
+ regmap_field_write(h264_ctx->l2_fields[ATF_OFST11], 3500);
+
+ regmap_field_write(h264_ctx->l2_fields[ATF_OFST12], 0);
+ regmap_field_write(h264_ctx->l2_fields[ATF_OFST20], 3500);
+
+ regmap_field_write(h264_ctx->l2_fields[ATF_OFST21], 1000);
+ regmap_field_write(h264_ctx->l2_fields[ATF_OFST30], 0);
+
+ // HEVC specific
+ regmap_field_write(h264_ctx->l2_fields[IPRD_WGT_QP0], 0);
+ /* ~ */
+ regmap_field_write(h264_ctx->l2_fields[IPRD_WGT_QP51], 0);
+
+ regmap_bulk_write(h264_ctx->l2_regmap, RKVENC_VEPU540_L2_RDO_WGTA_QP(0),
+ &h264e_lambda_default[6], H264E_LAMBDA_TAB_SIZE);
+
+ regmap_bulk_write(h264_ctx->l2_regmap, RKVENC_VEPU540_L2_RDO_WGTB_QP(0),
+ &h264e_lambda_default[5], H264E_LAMBDA_TAB_SIZE);
+
+ regmap_field_write(h264_ctx->l2_fields[MADI_MODE], 0);
+
+ regmap_bulk_write(h264_ctx->l2_regmap, RKVENC_VEPU540_L2_AQ_TTHD(0),
+ &h264_aq_tthd_default, 16);
+
+ if (encode_params->slice_type == V4L2_H264_SLICE_TYPE_I)
+ for (i = 0; i < ARRAY_SIZE(h264_P_aq_step_default); i++)
+ regmap_field_write(h264_ctx->l2_fields[AQ_STEP0 + i],
+ h264_I_aq_step_default[i] & 0x3f);
+ else
+ for (i = 0; i < ARRAY_SIZE(h264_P_aq_step_default); i++)
+ regmap_field_write(h264_ctx->l2_fields[AQ_STEP0 + i],
+ h264_P_aq_step_default[i] & 0x3f);
+
+ regmap_field_write(h264_ctx->l2_fields[MVD_PNLT_E], 1);
+ regmap_field_write(h264_ctx->l2_fields[MVD_PNLT_COEF], 1);
+ regmap_field_write(h264_ctx->l2_fields[MVD_PNLT_CNST], 16000);
+ regmap_field_write(h264_ctx->l2_fields[MVD_PNLT_LTHD], 0);
+ regmap_field_write(h264_ctx->l2_fields[MVD_PNLT_HTHD], 0);
+
+ regmap_field_write(h264_ctx->l2_fields[ATR1_THD0], 1);
+ regmap_field_write(h264_ctx->l2_fields[ATR1_THD1], 4);
+ regmap_field_write(h264_ctx->l2_fields[ATR1_THD2], 49);
+}
+
+static int h264e_vepu541_gen_regs(struct rkvpu_ctx *ctx, struct rkvenc_h264_run *run)
+{
+ struct rkvenc_h264_ctx *h264_ctx = ctx->priv;
+ struct rkvpu_dev *rkvpu = ctx->dev;
+ struct vb2_v4l2_buffer *src_buf = run->base.bufs.src;
+ const struct v4l2_format *f = &ctx->src_fmt;
+ const struct v4l2_pix_format_mplane *src_fmt = &f->fmt.pix_mp;
+ struct vb2_v4l2_buffer *dst_buf = run->base.bufs.dst;
+ struct rkvpu_enc_buf *enc_buf;
+ struct rkvpu_aux_buf *me_buf;
+
+ enc_buf = rkvpu_get_enc_buf(dst_buf);
+ me_buf = &enc_buf->me_buf;
+ v4l2_info(&rkvpu->v4l2_dev, "frame %d generate regs now", src_buf->sequence);
+
+ setup_vepu541_l2(ctx, run);
+
+ setup_vepu541_normal(ctx);
+ setup_vepu541_prep(ctx);
+ setup_vepu541_codec(ctx, run);
+ setup_vepu541_rdo_pred(ctx, run);
+ setup_vepu541_rc_base(ctx, run);
+ setup_vepu541_io_buf(ctx, run);
+
+ setup_vepu541_recn_refr(ctx, run);
+
+ regmap_field_write(h264_ctx->l1_fields[MEIW_ADDR], me_buf->dma);
+ setup_vepu541_roi(ctx, run);
+
+#if 0
+ regmap_field_write(h264_ctx->l1_fields[PIC_OFST_Y],
+ mpp_frame_get_offset_y(task->frame));
+ regmap_field_write(h264_ctx->l1_fields[PIC_OFST_X],
+ mpp_frame_get_offset_x(task->frame));
+
+#endif
+ setup_vepu541_split(ctx);
+
+ /* only limitation on vepu540 */
+ if (src_fmt->width > 1920)
+ setup_vepu540_force_slice_split(ctx, src_fmt->width);
+
+ setup_vepu541_me(ctx, run /*, TODO: ctx->is_vepu540 */);
+
+ //FIXME
+// vepu540_set_osd(&ctx->osd_cfg);
+ //TODO: vepu541_set_osd(&ctx->osd_cfg);
+
+ return 0;
+}
+
+static int rkvenc_l2_read(void *context, unsigned int reg,
+ unsigned int *val)
+{
+ struct rkvpu_ctx *ctx = context;
+ struct rkvenc_h264_ctx *h264_ctx = ctx->priv;
+ int ret = 0;
+
+ ret = regmap_write(h264_ctx->l1_regmap,
+ RKVENC_VEPU540_L1_L2CFG_ADDR, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read(h264_ctx->l1_regmap,
+ RKVENC_VEPU540_L1_L2CFG_RDATA, val);
+ if (ret < 0)
+ return ret;
+
+ return ret;
+}
+
+static int rkvenc_l2_write(void *context, unsigned int reg,
+ unsigned int val)
+{
+ struct rkvpu_ctx *ctx = context;
+ struct rkvenc_h264_ctx *h264_ctx = ctx->priv;
+ int ret = 0;
+
+ ret = regmap_write(h264_ctx->l1_regmap,
+ RKVENC_VEPU540_L1_L2CFG_ADDR, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(h264_ctx->l1_regmap,
+ RKVENC_VEPU540_L1_L2CFG_WDATA, val);
+ if (ret < 0)
+ return ret;
+
+ return ret;
+}
+
+static const struct regmap_bus regmap_l2_bus = {
+ .reg_read = rkvenc_l2_read,
+ .reg_write = rkvenc_l2_write,
+ .max_raw_read = 4,
+ .max_raw_write = 4,
+ .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
+ .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
+};
+
+static int rkvenc_h264_init_regmap(struct rkvpu_ctx *ctx)
+{
+ struct rkvenc_h264_ctx *h264_ctx = ctx->priv;
+ struct rkvpu_dev *rkvpu = ctx->dev;
+ struct device *dev = rkvpu->dev;
+ int ret;
+
+ h264_ctx->l1_regmap = devm_regmap_init_mmio(dev, rkvpu->regs,
+ &rkvenc_l1_regmap_cfg);
+ if (IS_ERR(h264_ctx->l1_regmap))
+ return dev_err_probe(dev, PTR_ERR(h264_ctx->l1_regmap),
+ "failed to initialize regmap\n");
+
+ ret = devm_regmap_field_bulk_alloc(dev, h264_ctx->l1_regmap,
+ h264_ctx->l1_fields,
+ rkvenc_vepu540_l1_fields,
+ L1_MAX_FIELDS);
+
+ h264_ctx->l2_regmap = devm_regmap_init(dev, &regmap_l2_bus, ctx,
+ &rkvenc_l2_regmap_cfg);
+ if (IS_ERR(h264_ctx->l2_regmap))
+ return dev_err_probe(dev, PTR_ERR(h264_ctx->l2_regmap),
+ "failed to initialize regmap\n");
+
+ ret = devm_regmap_field_bulk_alloc(dev, h264_ctx->l2_regmap,
+ h264_ctx->l2_fields,
+ rkvenc_vepu540_l2_fields,
+ L2_MAX_FIELDS);
+
+ return ret;
+}
+
+static int rkvenc_h264_start(struct rkvpu_ctx *ctx)
+{
+ struct rkvenc_h264_ctx *h264_ctx;
+ int ret;
+
+ h264_ctx = kzalloc(sizeof(*h264_ctx), GFP_KERNEL);
+ if (!h264_ctx)
+ return -ENOMEM;
+
+ ctx->priv = h264_ctx;
+
+ ret = rkvenc_h264_init_regmap(ctx);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void rkvenc_h264_stop(struct rkvpu_ctx *ctx)
+{
+}
+
+static void rkvenc_h264_run_preamble(struct rkvpu_ctx *ctx,
+ struct rkvenc_h264_run *run)
+{
+ struct v4l2_ctrl *ctrl;
+
+ ctrl = v4l2_ctrl_find(&ctx->ctrl_hdl,
+ V4L2_CID_STATELESS_H264_ENCODE_PARAMS);
+ run->encode_params = ctrl ? ctrl->p_cur.p : NULL;
+ ctrl = v4l2_ctrl_find(&ctx->ctrl_hdl,
+ V4L2_CID_STATELESS_H264_ENCODE_RC);
+ run->encode_rc = ctrl ? ctrl->p_cur.p : NULL;
+ ctrl = v4l2_ctrl_find(&ctx->ctrl_hdl,
+ V4L2_CID_STATELESS_H264_SPS);
+ run->sps = ctrl ? ctrl->p_cur.p : NULL;
+
+ ctrl = v4l2_ctrl_find(&ctx->ctrl_hdl,
+ V4L2_CID_STATELESS_H264_PPS);
+ run->pps = ctrl ? ctrl->p_cur.p : NULL;
+
+ rkvpu_run_preamble(ctx, &run->base);
+}
+
+static int rkvenc_h264_run(struct rkvpu_ctx *ctx)
+{
+ struct rkvenc_h264_ctx *h264_ctx = ctx->priv;
+ struct rkvpu_dev *rkvpu = ctx->dev;
+ struct rkvenc_h264_run run;
+
+ rkvenc_h264_run_preamble(ctx, &run);
+
+ h264e_vepu541_gen_regs(ctx, &run);
+
+ rkvpu_run_postamble(ctx, &run.base);
+
+ schedule_delayed_work(&rkvpu->watchdog_work, msecs_to_jiffies(2000));
+
+ /* Start encoding! */
+ regmap_field_write(h264_ctx->l1_fields[LKT_NUM], 0);
+ regmap_field_write(h264_ctx->l1_fields[CLK_GATE_EN], 1);
+ regmap_field_write(h264_ctx->l1_fields[RESETN_HW_EN], 0);
+ regmap_field_write(h264_ctx->l1_fields[ENC_DONE_TMVP_EN], 1);
+ regmap_field_write(h264_ctx->l1_fields[RKVENC_CMD], 1);
+
+ return 0;
+}
+
+static int rkvenc_h264_irq(struct rkvpu_ctx *ctx)
+{
+ struct rkvenc_h264_ctx *h264_ctx = ctx->priv;
+ enum vb2_buffer_state state;
+ u32 status;
+ int ret;
+
+ ret = regmap_read(h264_ctx->l1_regmap,
+ RKVENC_VEPU540_L1_INT_STA, &status);
+ if (!status || ret)
+ return IRQ_NONE;
+
+ state = (status & BIT(0)) ?
+ VB2_BUF_STATE_DONE : VB2_BUF_STATE_ERROR;
+
+ ret = regmap_write(h264_ctx->l1_regmap,
+ RKVENC_VEPU540_L1_INT_MSK, 0x100);
+ if (ret)
+ return IRQ_NONE;
+
+ ret = regmap_write(h264_ctx->l1_regmap,
+ RKVENC_VEPU540_L1_INT_CLR, 0xffffffff);
+ if (ret)
+ return IRQ_NONE;
+
+ ret = regmap_write(h264_ctx->l1_regmap,
+ RKVENC_VEPU540_L1_INT_STA, 0);
+ if (ret)
+ return IRQ_NONE;
+
+ return state;
+}
+
+static void rkvdec_h264_done(struct rkvpu_ctx *ctx,
+ struct vb2_v4l2_buffer *src_buf,
+ struct vb2_v4l2_buffer *dst_buf,
+ enum vb2_buffer_state result)
+{
+ struct rkvenc_h264_ctx *h264_ctx = ctx->priv;
+ int bsl;
+
+ regmap_field_read(h264_ctx->l1_fields[BS_LGTH], &bsl);
+ vb2_set_plane_payload(&dst_buf->vb2_buf, 0, bsl);
+
+ return;
+}
+
+#if 0
+static int rkvenc_h264_watchdog(struct rkvpu_ctx *ctx)
+{
+ struct rkvpu_dev *rkvpu = ctx->dev;
+
+ /* TODO: Do all necessary things */
+
+ return 0;
+}
+
+static int rkvenc_h264_try_ctrl(struct rkvpu_ctx *ctx, struct v4l2_ctrl *ctrl)
+{
+ if (ctrl->id == V4L2_CID_STATELESS_H264_SPS)
+ return rkvdec_h264_validate_sps(ctx, ctrl->p_new.p_h264_sps);
+
+ return 0;
+}
+#endif
+
+const struct rkvpu_ops rkvenc_h264_fmt_ops = {
+// .adjust_fmt = rkvenc_h264_adjust_fmt,
+ .start = rkvenc_h264_start,
+ .stop = rkvenc_h264_stop,
+ .run = rkvenc_h264_run,
+ .irq = rkvenc_h264_irq,
+ .done = rkvdec_h264_done,
+#if 0
+ .watchdog = rkvenc_h264_watchdog,
+ .try_ctrl = rkvenc_h264_try_ctrl,
+#endif
+};
diff --git a/drivers/staging/media/rkvdec/rkvenc-vepu540.h b/drivers/staging/media/rkvdec/rkvenc-vepu540.h
new file mode 100644
index 00000000000000..765cd56fa8d45d
--- /dev/null
+++ b/drivers/staging/media/rkvdec/rkvenc-vepu540.h
@@ -0,0 +1,3381 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef RKVENC_VEPU540_REGS_H_
+#define RKVENC_VEPU540_REGS_H_
+
+#include <linux/regmap.h>
+
+/* L1 */
+#define RKVENC_VEPU540_L1_VERSION 0x0
+#define RKVENC_VEPU540_L1_STRT 0x4
+#define RKVENC_VEPU540_L1_CLR 0x8
+#define RKVENC_VEPU540_L1_LKT_ADDR 0xc
+#define RKVENC_VEPU540_L1_INT_EN 0x10
+#define RKVENC_VEPU540_L1_INT_MSK 0x14
+#define RKVENC_VEPU540_L1_INT_CLR 0x18
+#define RKVENC_VEPU540_L1_INT_STA 0x1c
+#define RKVENC_VEPU540_L1_ENC_IDLE_EN 0x20
+#define RKVENC_VEPU540_L1_ENC_RSL 0x30
+#define RKVENC_VEPU540_L1_ENC_PIC 0x34
+#define RKVENC_VEPU540_L1_ENC_WDG 0x38
+#define RKVENC_VEPU540_L1_DTRNS_MAP 0x3c
+#define RKVENC_VEPU540_L1_DTRNS_CFG 0x40
+#define RKVENC_VEPU540_L1_SRC_FMT 0x44
+#define RKVENC_VEPU540_L1_SRC_UDFY 0x48
+#define RKVENC_VEPU540_L1_SRC_UDFU 0x4c
+#define RKVENC_VEPU540_L1_SRC_UDFV 0x50
+#define RKVENC_VEPU540_L1_SRC_UDFO 0x54
+#define RKVENC_VEPU540_L1_SRC_PROC 0x58
+#define RKVENC_VEPU540_L1_SLI_CFG_H264 0x5c
+#define RKVENC_VEPU540_L1_TILE_CFG_HEVC 0x5c
+#define RKVENC_VEPU540_L1_TILE_POS 0x60
+#define RKVENC_VEPU540_L1_KLUT_OFST 0x64
+#define RKVENC_VEPU540_L1_KLUT_WGT(x) (0x68 + x * 0x4) // -> 0 - 23
+#define RKVENC_VEPU540_L1_RC_CFG 0xc8
+#define RKVENC_VEPU540_L1_RC_QP 0xcc
+#define RKVENC_VEPU540_L1_RC_TGT 0xd0
+#define RKVENC_VEPU540_L1_RC_ADJ(x) (0xd4 + x * 0x4) // -> 0 - 1
+#define RKVENC_VEPU540_L1_RC_DTHD(x) (0xdc + x * 0x4) // -> 0 - 8
+#define RKVENC_VEPU540_L1_ROI_QTHD(x) (0x100 + x * 0x4) // -> 0 - 3
+#define RKVENC_VEPU540_L1_PIC_OFST 0x110
+#define RKVENC_VEPU540_L1_SRC_STRD 0x114
+#define RKVENC_VEPU540_L1_ADR_SRC0 0x118
+#define RKVENC_VEPU540_L1_ADR_SRC1 0x11c
+#define RKVENC_VEPU540_L1_ADR_SRC2 0x120
+#define RKVENC_VEPU540_L1_ADR_ROI 0x124
+#define RKVENC_VEPU540_L1_ADR_RFPW_H 0x128
+#define RKVENC_VEPU540_L1_ADR_RFPW_B 0x12c
+#define RKVENC_VEPU540_L1_ADR_RFPR_H 0x130
+#define RKVENC_VEPU540_L1_ADR_RFPR_B 0x134
+#define RKVENC_VEPU540_L1_ADR_CMVW 0x138
+#define RKVENC_VEPU540_L1_ADR_CMVR 0x13c
+#define RKVENC_VEPU540_L1_ADR_DSPW 0x140
+#define RKVENC_VEPU540_L1_ADR_DSPR 0x144
+#define RKVENC_VEPU540_L1_ADR_MEIW 0x148
+#define RKVENC_VEPU540_L1_ADR_BSBT 0x14c
+#define RKVENC_VEPU540_L1_ADR_BSBB 0x150
+#define RKVENC_VEPU540_L1_ADR_BSBR 0x154
+#define RKVENC_VEPU540_L1_ADR_BSBS 0x158
+#define RKVENC_VEPU540_L1_SLI_SPLT 0x15c
+#define RKVENC_VEPU540_L1_SLI_BYTE 0x160
+#define RKVENC_VEPU540_L1_MR_RNGE 0x164
+#define RKVENC_VEPU540_L1_ME_CFG 0x168
+#define RKVENC_VEPU540_L1_ME_CACH 0x16c
+#define RKVENC_VEPU540_L1_SYNT_LONG_REFM0 0x170
+#define RKVENC_VEPU540_L1_SYNT_LONG_REFM1 0x174
+#define RKVENC_VEPU540_L1_OSD_INV_CFG 0x178
+#define RKVENC_VEPU540_L1_ADR_LPFW 0x17c
+#define RKVENC_VEPU540_L1_ADR_LPFR 0x180
+#define RKVENC_VEPU540_L1_IPRD_CSTS 0x194
+#define RKVENC_VEPU540_L1_RDO_CFG_H264 0x198
+#define RKVENC_VEPU540_L1_RDO_CFG_HEVC 0x198
+#define RKVENC_VEPU540_L1_SYNT_NAL 0x19c
+#define RKVENC_VEPU540_L1_SYNT_SPS 0x1a0
+#define RKVENC_VEPU540_L1_SYNT_PPS 0x1a4
+#define RKVENC_VEPU540_L1_SYNT_SLI(x) (0x1a8 + x * 0x4) // -> 0 - 2
+#define RKVENC_VEPU540_L1_SYNT_REFM0 0x1b4
+#define RKVENC_VEPU540_L1_SYNT_REFM1 0x1b8
+#define RKVENC_VEPU540_L1_OSD_CFG 0x1c0
+#define RKVENC_VEPU540_L1_OSD_INV 0x1c4
+#define RKVENC_VEPU540_L1_SYNT_REFM2 0x1c8
+#define RKVENC_VEPU540_L1_SYNT_REFM3_HEVC 0x1cc
+#define RKVENC_VEPU540_L1_OSD_POS(x) 0x1d0
+#define RKVENC_VEPU540_L1_ADR_OSD(x) (0x1f0 + x * 0x4) // -> 0 - 7
+#define RKVENC_VEPU540_L1_ST_BSL 0x210
+#define RKVENC_VEPU540_L1_ST_SSE_LE32 0x214
+#define RKVENC_VEPU540_L1_ST_SSE_QP 0x218
+#define RKVENC_VEPU540_L1_ST_SAO 0x21c
+#define RKVENC_VEPU540_L1_ST_HEAD_BL 0x220
+#define RKVENC_VEPU540_L1_ST_RES_BL 0x224
+#define RKVENC_VEPU540_L1_ST_ENC 0x228
+#define RKVENC_VEPU540_L1_ST_LKT 0x22c
+#define RKVENC_VEPU540_L1_ST_NADR 0x230
+#define RKVENC_VEPU540_L1_ST_BSB 0x234
+#define RKVENC_VEPU540_L1_ST_BUS 0x238
+#define RKVENC_VEPU540_L1_ST_SNUM 0x23c
+#define RKVENC_VEPU540_L1_ST_SLEN 0x240
+#define RKVENC_VEPU540_L1_ST_PNUM_P64 0x244
+#define RKVENC_VEPU540_L1_ST_PNUM_P32 0x248
+#define RKVENC_VEPU540_L1_ST_PNUM_P16 0x24c
+#define RKVENC_VEPU540_L1_ST_PNUM_P8 0x250
+#define RKVENC_VEPU540_L1_ST_PNUM_I32 0x254
+#define RKVENC_VEPU540_L1_ST_PNUM_I16 0x258
+#define RKVENC_VEPU540_L1_ST_PNUM_I8 0x25c
+#define RKVENC_VEPU540_L1_ST_PNUM_I4 0x260
+#define RKVENC_VEPU540_L1_ST_BB_QP(x) (0x264 + x * 0x4) // -> 0 - 51
+#define RKVENC_VEPU540_L1_ST_CPLX_TMP 0x334
+#define RKVENC_VEPU540_L1_ST_BNUM_CME 0x338
+#define RKVENC_VEPU540_L1_ST_CPLX_SPT 0x33c
+#define RKVENC_VEPU540_L1_ST_BNUM_B16 0x340
+#define RKVENC_VEPU540_L1_ST_CPLX_MAX_B16 0x344
+#define RKVENC_VEPU540_L1_L2CFG_ADDR 0x3f0
+#define RKVENC_VEPU540_L1_L2CFG_WDATA 0x3f4
+#define RKVENC_VEPU540_L1_L2CFG_RDATA 0x3f8
+#define RKVENC_VEPU540_L1_OSD_PLT(x) (0x400 + x * 0x4) // -> 0 - 255
+#define RKVENC_VEPU540_L1_ST_WDG 0x85c
+#define RKVENC_VEPU540_L1_ST_PPL 0x860
+#define RKVENC_VEPU540_L1_ST_SLI_NUM 0x874
+#define RKVENC_VEPU540_L1_DBG_DMA_RFPR 0x8e4
+#define RKVENC_VEPU540_L1_DBG_DMA_CH_ST 0x8e8
+#define RKVENC_VEPU540_L1_MMU_ADDR(x) (0xf00 + x * 0x40) // -> 0 - 1
+#define RKVENC_VEPU540_L1_MMU_ST(x) (0xf04 + x * 0x40) // -> 0 - 1
+#define RKVENC_VEPU540_L1_MMU_CMD(x) (0xf08 + x * 0x40) // -> 0 - 1
+#define RKVENC_VEPU540_L1_MMU_PFA(x) (0xf0c + x * 0x40) // -> 0 - 1
+#define RKVENC_VEPU540_L1_MMU_ZAP(x) (0xf10 + x * 0x40) // -> 0 - 1
+#define RKVENC_VEPU540_L1_MMU_ERR(x) (0xf14 + x * 0x40) // -> 0 - 1
+#define RKVENC_VEPU540_L1_MMU_INT_CLR(x) (0xf18 + x * 0x40) // -> 0 - 1
+#define RKVENC_VEPU540_L1_MMU_INT_MSK(x) (0xf1c + x * 0x40) // -> 0 - 1
+#define RKVENC_VEPU540_L1_MMU_INT_STA(x) (0xf20 + x * 0x40) // -> 0 - 1
+#define RKVENC_VEPU540_L1_MMU_ACKG(x) (0xf24 + x * 0x40) // -> 0 - 1
+
+enum rkvenc_vepu540_regfields_l1 {
+ /*
+ * VERSION
+ * Address: 0x0000 Access type: read only
+ * VEPU version. It contains IP function summary and sub-version informations.
+ */
+ /* Sub-version(version 1.1) */
+ SUB_VER,
+ /* Support H.264 encoding */
+ H264_ENC,
+ /* Support HEVC encoding */
+ H265_ENC,
+ /*
+ * The maximum resolution supported
+ * 4'd0: 4096x2304 pixels,
+ * 4'd1: 1920x1088 pixels,
+ * others: reserved
+ */
+ PIC_SIZE,
+ /*
+ * OSD capability.
+ * 2'd0: 8-area OSD, with 256-color palette
+ * 2'd3: no OSD
+ * others: reserved
+ */
+ OSD_CAP,
+ /*
+ * pre-process filter capability
+ * 2'd0: basic pre-process filter
+ * 2'd3: no pre-process filter
+ * others: reserved
+ */
+ FILTR_CAP,
+ /* B frame encoding capability */
+ BFRM_CAP,
+ /* frame buffer compress capability */
+ /* 2'd0: No FBC,
+ * 2'd3: Support AFBC for video source and FBC
+ * for reconstructured picture
+ * others: reserved
+ */
+ FBC_CAP,
+ /* IP indentifier for RKVENC default: 0x50 */
+ RKVENC_VER,
+ /*
+ * ENC_STRT
+ * Address: 0x0004 Access type: read and write/write only
+ * Start cmd register.(auto clock gating enable, auto reset enable and
+ * tmvp adjust enable when frame done are also allocated here.)
+ */
+ /*
+ * Number of new nodes in link table.
+ * It's valid only when rkvenc_cmd is 2 or 3.
+ */
+ LKT_NUM,
+ /*
+ * Rockchip video encoder command:
+ * 2'd0: N/A
+ * 2'd1: one frame encode by register configuration
+ * 2'd2: multi-frame encode start with link table
+ * 2'd3: multi_frame_encode link table update
+ */
+ RKVENC_CMD,
+ /* RKVENC encoder clock gating enable */
+ CLK_GATE_EN,
+ /* auto reset core clock domain when frame finished */
+ RESETN_HW_EN,
+ /* wait tmvp write done by dma */
+ ENC_DONE_TMVP_EN,
+ /*
+ * ENC_CLR
+ * Address offset: 0x0008 Access type: read and write
+ * ENC_CLR.safe_clr only clears RKVENC DMA and confirms the integrity of
+ * AXI transactions. To execute the global reset of RKVENC, user needs to
+ * configure SOC CRU register which controls RKVENC's asynchronous reset
+ */
+ /*
+ * Safe clear. This filed only clears DMA module to confirm the
+ * integrity of AXI transactions
+ */
+ SAFE_CLR,
+ /*
+ * Force clear. Clear all the sub modules besides regfile and AHB data
+ * path.
+ */
+ FORCE_CLR,
+
+ /*
+ * LKT_ADDR
+ * Address offset: 0x000c Access type: read and write
+ * Link table
+ */
+ /*
+ * High 28 bits of the address for the first node in current link table
+ * (16bytes aligned)
+ */
+ LKT_ADDR,
+ /*
+ * INT_EN
+ * Address offset: 0x0010 Access type: read and write
+ * VEPU interrupt enable
+ */
+ /* One frame encode finish interrupt enable */
+ ENC_DONE_EN,
+ /* Link table finish interrupt enable */
+ LKT_DONE_EN,
+ /* Safe clear finish interrupt enable */
+ SCLR_DONE_EN,
+ /* Safe clear finish interrupt enable */
+ ENC_SLICE_DONE_EN,
+ /* Bit stream overflow interrupt enable */
+ OFLW_DONE_EN,
+ /* AXI write response fifo full interrupt enable */
+ BRSP_DONE_EN,
+ /* AXI write response channel error interrupt enable */
+ BERR_DONE_EN,
+ /* AXI read channel error interrupt enable */
+ RERR_DONE_EN,
+ /* timeout error interrupt enable */
+ WDG_DONE_EN,
+
+ /*
+ * INT_MSK
+ * Address offset: 0x0014 Access type: read and write
+ * VEPU interrupt mask
+ */
+ /* One frame encode finish interrupt mask */
+ ENC_DONE_MSK,
+ /* Link table finish interrupt mask */
+ LKT_DONE_MSK,
+ /* Safe clear finish interrupt mask */
+ SCLR_DONE_MSK,
+ /* Safe clear finish interrupt mask */
+ ENC_SLICE_DONE_MSK,
+ /* Bit stream overflow interrupt mask */
+ OFLW_DONE_MSK,
+ /* AXI write response fifo full interrupt mask */
+ BRSP_DONE_MSK,
+ /* AXI write response channel error interrupt mask */
+ BERR_DONE_MSK,
+ /* AXI read channel error interrupt mask */
+ RERR_DONE_MSK,
+ /* timeout error interrupt mask */
+ WDG_DONE_MSK,
+
+ /*
+ * INT_CLR
+ * Address offset: 0x0018 Access type: read and write, write one to clear
+ * VEPU interrupt clear
+ */
+ /* One frame encode finish interrupt clear */
+ ENC_DONE_CLR,
+ /* Link table finish interrupt clear */
+ LKT_DONE_CLR,
+ /* Safe clear finish interrupt clear */
+ SCLR_DONE_CLR,
+ /* One slice encode finish interrupt clear */
+ ENC_SLICE_DONE_CLR,
+ /* Bit stream overflow interrupt clear */
+ OFLW_DONE_CLR,
+ /* AXI write response fifo full interrupt clear */
+ BRSP_DONE_CLR,
+ /* AXI write response channel error interrupt clear */
+ BERR_DONE_CLR,
+ /* AXI read channel error interrupt clear */
+ RERR_DONE_CLR,
+ /* timeout error interrupt clear */
+ WDG_DONE_CLR,
+
+ /*
+ * INT_STA
+ * Address offset: 0x001c Access type: read and write, write one to clear
+ * VEPU interrupt status
+ */
+ /* One frame encode finish interrupt status */
+ ENC_DONE_STA,
+ /* Link table finish interrupt status */
+ LKT_DONE_STA,
+ /* Safe clear finish interrupt status */
+ SCLR_DONE_STA,
+ /* One slice encode finish interrupt status */
+ ENC_SLICE_DONE_STA,
+ /* Bit stream overflow interrupt status */
+ OFLW_DONE_STA,
+ /* AXI write response fifo full interrupt status */
+ BRSP_DONE_STA,
+ /* AXI write response channel error interrupt status */
+ BERR_DONE_STA,
+ /* AXI read channel error interrupt status */
+ RERR_DONE_STA,
+ /* timeout error interrupt status */
+ WDG_DONE_STA,
+
+ /* reg gap 008~011 */
+ //reg_008_011[4],
+
+ /*
+ * ENC_RSL
+ * Address offset: 0x0030 Access type: read and write
+ * Resolution
+ */
+ /* ceil(picture width/8) - 1 */
+ PIC_WD8_M1,
+ /* filling pixels to maintain picture width 8 pixels aligned */
+ PIC_WFILL,
+ /* Ceil(picture_height/8)-1 */
+ PIC_HD8_M1,
+ /* Filling pixels to maintain picture height 8 pixels aligned */
+ PIC_HFILL,
+
+ /*
+ * ENC_PIC
+ * Address offset: 0x0034 Access type: read and write
+ * VEPU common configuration
+ */
+ /* Video standard: 0->H.264 */
+ ENC_STND,
+ /* ROI encode enable */
+ ROI_ENC,
+ /* Current frame should be refered in future */
+ CUR_FRM_REF,
+ /* Output ME information */
+ MEI_STOR,
+ /* Output start code prefix */
+ BS_SCP,
+ /* 0: select table A, 1: select table B */
+ LAMB_MOD_SEL,
+ /* QP value for current frame encoding */
+ PIC_QP,
+ /* sum of reference pictures (indexed by difference POCs), HEVC only */
+ TOT_POC_NUM,
+ /* bit width to express the maximum ctu number in current picure, HEVC only */
+ LOG2_CTU_NUM,
+ /* 1'h0: Select atr_thd group 1'h1: Select atr_thd group1 */
+ ATR_THD_SEL,
+ /* Dual-core handshake Rx ID. */
+ DCHS_RXID,
+ /* Dual-core handshake tx ID. */
+ DCHS_TXID,
+ /* Dual-core handshake rx enable. */
+ DCHS_RXE,
+ /* RDO intra-prediction satd path bypass enable. */
+ SATD_BYPS_EN,
+ /* Slice length fifo enable. */
+ SLEN_FIFO,
+ /* Node interrupt enable (only for link table node configuration). */
+ NODE_INT,
+
+ /*,
+ * ENC_WDG
+ * Address offset: 0x0038 Access type: read and write
+ * VEPU watch dog configure register
+ */
+ /*
+ * Video source loading timeout threshold.
+ * 24'h0: No time limit
+ * 24'hx: x*256 core clock cycles
+ */
+ VS_LOAD_THD,
+ /*
+ * Reference picture loading timeout threshold.
+ * 8'h0: No time limit
+ * 8'hx: x*256 core clock cycles
+ */
+ RFP_LOAD_THRD,
+
+ /*
+ * DTRNS_MAP
+ * Address offset: 0x003c Access type: read and write
+ * Data transaction mapping (endian and order)
+ */
+ /* swap the position of 64bits in 128bits for lpf write data between tiles */
+ LPFW_BUS_ORDR,
+ /* Swap the position of 64 bits in 128 bits for co-located Mv(HEVC only). */
+ CMVW_BUS_ORDR,
+ /* Swap the position of 64 bits in 128 bits for down-sampled picture. */
+ DSPW_BUS_ORDR,
+ /* Swap the position of 64 bits in 128 bits for reference picture. */
+ RFPW_BUS_ORDR,
+ /*
+ * Data swap for video source loading channel.
+ * [3]: Swap 64 bits in 128 bits
+ * [2]: Swap 32 bits in 64 bits
+ * [1]: Swap 16 bits in 32 bits
+ * [0]: Swap 8 bits in 16 bits
+ */
+ SRC_BUS_EDIN,
+ /*
+ * Data swap for ME information write channel.
+ * [3]: Swap 64 bits in 128 bits
+ * [2]: Swap 32 bits in 64 bits
+ * [1]: Swap 16 bits in 32 bits
+ * [0]: Swap 8 bits in 16 bits
+ */
+ MEIW_BUS_EDIN,
+ /*
+ * Data swap for bis stream write channel.
+ * [2]: Swap 32 bits in 64 bits
+ * [1]: Swap 16 bits in 32 bits
+ * [0]: Swap 8 bits in 16 bits
+ */
+ BSW_BUS_EDIN,
+ /*
+ * Data swap for link table read channel.
+ * [3]: Swap 64 bits in 128 bits
+ * [2]: Swap 32 bits in 64 bits
+ * [1]: Swap 16 bits in 32 bits
+ * [0]: Swap 8 bits in 16 bits
+ */
+ LKTR_BUS_EDIN,
+ /*
+ * Data swap for ROI configuration read channel.
+ * [3]: Swap 64 bits in 128 bits
+ * [2]: Swap 32 bits in 64 bits
+ * [1]: Swap 16 bits in 32 bits
+ * [0]: Swap 8 bits in 16 bits
+ */
+ ROIR_BUS_EDIN,
+ /*
+ * Data swap for link table write channel.
+ * [3]: Swap 64 bits in 128 bits
+ * [2]: Swap 32 bits in 64 bits
+ * [1]: Swap 16 bits in 32 bits
+ * [0]: Swap 8 bits in 16 bits
+ */
+ LKTW_BUS_EDIN,
+ /*
+ * AFBC video source loading burst size.
+ * 1'h0: 32 bytes
+ * 1'h1: 64 bytes
+ */
+ AFBC_BSIZE,
+
+ /*,
+ * DTRNS_CFG
+ * Address offset: 0x0040 Access type: read and write
+ * (AXI bus) Data transaction configuration
+ */
+ /*
+ * AXI write response channel check enable.
+ * [6]: Reconstructed picture write response check enable.
+ * [5]: ME information write response check enable.
+ * [4]: CTU information write response check enable.
+ * [3]: Down-sampled picture write response check enable.
+ * [2]: Bit stream write response check enable.
+ * [1]: Link table mode write reponse check enable.
+ * [0]: Reserved for video preprocess.
+ */
+ VPU541_AXI_BRSP_CKE,
+ /*
+ * Down sampled reference picture read outstanding enable.
+ * 1'h0: No outstanding
+ * 1'h1: Outstanding read, which improves data transaction efficiency,
+ * but core clock frequency should not lower than bus clock frequency.
+ */
+ VPU541_DSPR_OTSD,
+
+ /*
+ * Down sampled reference picture read outstanding enable.
+ * 1'h0: No outstanding
+ * 1'h1: Outstanding read, which improves data transaction efficiency,
+ * but core clock frequency should not lower than bus clock frequency.
+ */
+ VPU540_DSPR_OTSD,
+ /*
+ * AXI write response channel check enable.
+ * [7]: lpf write response check enable
+ * [6]: Reconstructed picture write response check enable.
+ * [5]: ME information write response check enable.
+ * [4]: CTU information write response check enable.
+ * [3]: Down-sampled picture write response check enable.
+ * [2]: Bit stream write response check enable.
+ * [1]: Link table mode write reponse check enable.
+ * [0]: Reserved for video preprocess.
+ */
+ VPU540_AXI_BRSP_CKE,
+
+ /*
+ * SRC_FMT
+ * Address offset: 0x0044 Access type: read and write
+ * Video source format
+ */
+ /*
+ * Swap the position of alpha and RGB for ARBG8888.
+ * 1'h0: BGRA8888 or RGBA8888.
+ * 1'h1: ABGR8888 or ARGB8888.
+ */
+ ALPHA_SWAP,
+ /*
+ * Swap the position of R and B for BGRA8888, RGB888, RGB 656 format,
+ * Swap the position of U and V for YUV422-SP, YUV420-SP, YUYV422 and UYUV422 format.
+ * 1'h0: RGB or YUYV or UYVY.
+ * 1'h1: BGR or YVYU or VYUY.
+ */
+ RBUV_SWAP,
+ /*
+ * Video source color format.
+ * 4'h0: BGRA8888
+ * 4'h1: RGB888
+ * 4'h2: RGB565
+ * 4'h4: YUV422 SP
+ * 4'h5: YUV422 P
+ * 4'h6: YUV420 SP
+ * 4'h7: YUV420 P
+ * 4'h8: YUYV422
+ * 4'h9: UYVY422
+ * Others: Reserved
+ */
+ SRC_CFMT,
+ /*
+ * Video source clip (low active).
+ * 1'h0: [16:235] for luma and [16:240] for chroma.
+ * 1'h1: [0:255] for both luma and chroma.
+ */
+ SRC_RANGE,
+ /*
+ * Ourput reconstructed frame format
+ * 1'h0: yuv420
+ * 1'h1: yuv400
+ */
+ OUT_FMT_CFG,
+
+ /*
+ * SRC_UDFY
+ * Address offset: 0x0048 Access type: read and write
+ * Weight of user defined formula for RBG to Y conversion
+ */
+ /* Weight of BLUE in RBG to Y conversion formula. */
+ CSC_WGT_B2Y,
+ /* Weight of GREEN in RBG to Y conversion formula. */
+ CSC_WGT_G2Y,
+ /* Weight of RED in RBG to Y conversion formula. */
+ CSC_WGT_R2Y,
+
+ /*
+ * SRC_UDFU
+ * Address offset: 0x004c Access type: read and write
+ * Weight of user defined formula for RBG to U conversion
+ */
+ /* Weight of BLUE in RBG to U conversion formula. */
+ CSC_WGT_B2U,
+ /* Weight of GREEN in RBG to U conversion formula. */
+ CSC_WGT_G2U,
+ /* Weight of RED in RBG to U conversion formula. */
+ CSC_WGT_R2U,
+
+ /*
+ * SRC_UDFV
+ * Address offset: 0x0050 Access type: read and write
+ * Weight of user defined formula for RBG to V conversion
+ */
+ /* Weight of BLUE in RBG to V conversion formula. */
+ CSC_WGT_B2V,
+ /* Weight of GREEN in RBG to V conversion formula. */
+ CSC_WGT_G2V,
+ /* Weight of RED in RBG to V conversion formula. */
+ CSC_WGT_R2V,
+
+ /*
+ * SRC_UDFO
+ * Address offset: 0x0054 Access type: read and write
+ * Offset of user defined formula for RBG to YUV conversion
+ */
+ /* Offset of RBG to V conversion formula. */
+ CSC_OFST_V,
+ /* Offset of RBG to U conversion formula. */
+ CSC_OFST_U,
+ /* Offset of RBG to Y conversion formula. */
+ CSC_OFST_Y,
+
+ /*
+ * SRC_PROC
+ * Address offset: 0x0058 Access type: read and write
+ * Video source process
+ */
+ /* Video source mirror mode enable. */
+ SRC_MIRR,
+ /*
+ * Video source rotation mode.
+ * 2'h0: 0 degree
+ * 2'h1: Clockwise 90 degree
+ * 2'h2: Clockwise 180 degree
+ * 2'h3: Clockwise 280 degree
+ */
+ SRC_ROT,
+ /* Video source texture analysis enable. */
+ TXA_EN,
+ /* AFBC decompress enable (for AFBC format video source). */
+ AFBCD_EN,
+
+ /*
+ * SLI_CFG_H264
+ * Address offset: 0x005C Access type: read and write
+ * Slice cross lines configuration, h264 only.
+ */
+ /*
+ * Slice cut cross lines enable,
+ * using for breaking the resolution limit, h264 only.
+ */
+ SLI_CRS_EN,
+
+ /* reg gap 024 */
+ REG_024,
+
+ /*
+ * KLUT_OFST
+ * Address offset: 0x0064 Access type: read and write
+ * Offset of (RDO) chroma cost weight table
+ */
+ /* Offset of (RDO) chroma cost weight table, values from 0 to 6. */
+ CHRM_KLUT_OFST,
+
+ /*
+ * KLUT_WGT0
+ * Address offset: 0x0068 Access type: read and write
+ * (RDO) Chroma weight table configure register0
+ */
+ /* Data0 in chroma cost weight table. */
+ CHRM_KLUT_WGT0,
+ /* Low 9 bits of data1 in chroma cost weight table. */
+ CHRM_KLUT_WGT1_L9,
+
+ /*
+ * KLUT_WGT1
+ * Address offset: 0x006C Access type: read and write
+ * (RDO) Chroma weight table configure register1
+ */
+ /* High 9 bits of data1 in chroma cost weight table. */
+ CHRM_KLUT_WGT1_H9,
+ /* Data2 in chroma cost weight table. */
+ CHRM_KLUT_WGT2,
+
+ /*
+ * KLUT_WGT2
+ * Address offset: 0x0070 Access type: read and write
+ * (RDO) Chroma weight table configure register2
+ */
+ /* Data3 in chroma cost weight table. */
+ CHRM_KLUT_WGT3,
+ /* Low 9 bits of data4 in chroma cost weight table. */
+ CHRM_KLUT_WGT4_L9,
+
+ /*
+ * KLUT_WGT3
+ * Address offset: 0x0074 Access type: read and write
+ * (RDO) Chroma weight table configure register3
+ */
+ /* High 9 bits of data4 in chroma cost weight table. */
+ CHRM_KLUT_WGT4_H9,
+ /* Data5 in chroma cost weight table. */
+ CHRM_KLUT_WGT5,
+
+ /*
+ * KLUT_WGT4
+ * Address offset: 0x0078 Access type: read and write
+ * (RDO) Chroma weight table configure register4
+ */
+ /* Data6 in chroma cost weight table. */
+ CHRM_KLUT_WGT6,
+ /* Low 9 bits of data7 in chroma cost weight table. */
+ CHRM_KLUT_WGT7_L9,
+
+ /*
+ * KLUT_WGT5
+ * Address offset: 0x007C Access type: read and write
+ * (RDO) Chroma weight table configure register5
+ */
+ /* High 9 bits of data7 in chroma cost weight table. */
+ CHRM_KLUT_WGT7_H9,
+ /* Data8 in chroma cost weight table. */
+ CHRM_KLUT_WGT8,
+
+ /*
+ * KLUT_WGT6
+ * Address offset: 0x0080 Access type: read and write
+ * (RDO) Chroma weight table configure register6
+ */
+ /* Data9 in chroma cost weight table. */
+ CHRM_KLUT_WGT9,
+ /* Low 9 bits of data10 in chroma cost weight table. */
+ CHRM_KLUT_WGT10_L9,
+
+ /*
+ * KLUT_WGT7
+ * Address offset: 0x0084 Access type: read and write
+ * (RDO) Chroma weight table configure register7
+ */
+ /* High 9 bits of data10 in chroma cost weight table. */
+ CHRM_KLUT_WGT10_H9,
+ /* Data11 in chroma cost weight table. */
+ CHRM_KLUT_WGT11,
+
+ /*
+ * KLUT_WGT8
+ * Address offset: 0x0088 Access type: read and write
+ * (RDO) Chroma weight table configure register8
+ */
+ /* Data12 in chroma cost weight table. */
+ CHRM_KLUT_WGT12,
+ /* Low 9 bits of data13 in chroma cost weight table. */
+ CHRM_KLUT_WGT13_L9,
+
+ /*
+ * KLUT_WGT9
+ * Address offset: 0x008C Access type: read and write
+ * (RDO) Chroma weight table configure register9
+ */
+ /* High 9 bits of data13 in chroma cost weight table. */
+ CHRM_KLUT_WGT13_H9,
+ /* Data14 in chroma cost weight table. */
+ CHRM_KLUT_WGT14,
+
+ /*
+ * KLUT_WGT10
+ * Address offset: 0x0090 Access type: read and write
+ * (RDO) Chroma weight table configure register10
+ */
+ /* Data15 in chroma cost weight table. */
+ CHRM_KLUT_WGT15,
+ /* Low 9 bits of data16 in chroma cost weight table. */
+ CHRM_KLUT_WGT16_L9,
+
+ /*
+ * KLUT_WGT11
+ * Address offset: 0x0094 Access type: read and write
+ * (RDO) Chroma weight table configure register11
+ */
+ /* High 9 bits of data16 in chroma cost weight table. */
+ CHRM_KLUT_WGT16_H9,
+ /* Data17 in chroma cost weight table. */
+ CHRM_KLUT_WGT17,
+
+ /*
+ * KLUT_WGT12
+ * Address offset: 0x0098 Access type: read and write
+ * (RDO) Chroma weight table configure register12
+ */
+ /* Data18 in chroma cost weight table. */
+ CHRM_KLUT_WGT18,
+ /* Low 9 bits of data19 in chroma cost weight table. */
+ CHRM_KLUT_WGT19_L9,
+
+ /*
+ * KLUT_WGT13
+ * Address offset: 0x009C Access type: read and write
+ * (RDO) Chroma weight table configure register13
+ */
+ /* High 9 bits of data19 in chroma cost weight table. */
+ CHRM_KLUT_WGT19_H9,
+ /* Data14 in chroma cost weight table. */
+ CHRM_KLUT_WGT20,
+
+ /*
+ * KLUT_WGT14
+ * Address offset: 0x00A0 Access type: read and write
+ * (RDO) Chroma weight table configure register14
+ */
+ /* Data21 in chroma cost weight table. */
+ CHRM_KLUT_WGT21,
+ /* Low 9 bits of data22 in chroma cost weight table. */
+ CHRM_KLUT_WGT22_L9,
+
+ /*
+ * KLUT_WGT15
+ * Address offset: 0x00A4 Access type: read and write
+ * (RDO) Chroma weight table configure register15
+ */
+ /* High 9 bits of data22 in chroma cost weight table. */
+ CHRM_KLUT_WGT22_H9,
+ /* Data23 in chroma cost weight table. */
+ CHRM_KLUT_WGT23,
+
+ /*
+ * KLUT_WGT16
+ * Address offset: 0x00A8 Access type: read and write
+ * (RDO) Chroma weight table configure register16
+ */
+ /* Data24 in chroma cost weight table. */
+ CHRM_KLUT_WGT24,
+ /* Low 9 bits of data25 in chroma cost weight table. */
+ CHRM_KLUT_WGT25_L9,
+
+ /*
+ * KLUT_WGT17
+ * Address offset: 0x00AC Access type: read and write
+ * (RDO) Chroma weight table configure register17
+ */
+ /* High 9 bits of data25 in chroma cost weight table. */
+ CHRM_KLUT_WGT25_H9,
+ /* Data26 in chroma cost weight table. */
+ CHRM_KLUT_WGT26,
+
+ /*
+ * KLUT_WGT18
+ * Address offset: 0x00B0 Access type: read and write
+ * (RDO) Chroma weight table configure register18
+ */
+ /* Data27 in chroma cost weight table. */
+ CHRM_KLUT_WGT27,
+ /* Low 9 bits of data28 in chroma cost weight table. */
+ CHRM_KLUT_WGT28_L9,
+
+ /*
+ * KLUT_WGT19
+ * Address offset: 0x00B4 Access type: read and write
+ * (RDO) Chroma weight table configure register19
+ */
+ /* High 9 bits of data28 in chroma cost weight table. */
+ CHRM_KLUT_WGT28_H9,
+ /* Data29 in chroma cost weight table. */
+ CHRM_KLUT_WGT29,
+
+ /*
+ * KLUT_WGT20
+ * Address offset: 0x00B8 Access type: read and write
+ * (RDO) Chroma weight table configure register20
+ */
+ /* Data30 in chroma cost weight table. */
+ CHRM_KLUT_WGT30,
+ /* Low 9 bits of data31 in chroma cost weight table. */
+ CHRM_KLUT_WGT31_L9,
+
+ /*
+ * KLUT_WGT21
+ * Address offset: 0x00BC Access type: read and write
+ * (RDO) Chroma weight table configure register21
+ */
+ /* High 9 bits of data31 in chroma cost weight table. */
+ CHRM_KLUT_WGT31_H9,
+ /* Data32 in chroma cost weight table. */
+ CHRM_KLUT_WGT32,
+
+ /*
+ * KLUT_WGT22
+ * Address offset: 0x00C0 Access type: read and write
+ * (RDO) Chroma weight table configure register22
+ */
+ /* Data33 in chroma cost weight table. */
+ CHRM_KLUT_WGT33,
+ /* Low 9 bits of data34 in chroma cost weight table. */
+ CHRM_KLUT_WGT34_L9,
+
+ /*
+ * KLUT_WGT23
+ * Address offset: 0x00C4 Access type: read and write
+ * (RDO) Chroma weight table configure register23
+ */
+ /* High 9 bits of data34 in chroma cost weight table. */
+ CHRM_KLUT_WGT34_H9,
+
+ /*
+ * RC_CFG
+ * Address offset: 0x00C8 Access type: read and write
+ * Rate control configuration
+ */
+ /* Rate control enable. */
+ RC_EN,
+ /* Adaptive quantization enable. */
+ AQ_EN,
+ /*
+ * Mode of aq_delta calculation for CU32 and CU64.
+ * 1'b0: aq_delta of CU32/CU64 is calculated by corresponding MADI32/64,
+ * 1'b1: aq_delta of CU32/CU64 is calculated by corresponding 4/16 CU16 qp_deltas.
+ */
+ AQ_MODE,
+ /* RC adjustment intervals, base on CTU number. */
+ RC_CTU_NUM,
+
+ /*
+ * RC_QP
+ * Address offset: 0x00CC Access type: read and write
+ * QP configuration for rate control
+ */
+ /*
+ * QP adjust range(delta_qp) in rate control.
+ * Delta_qp is constrained between -rc_qp_range to rc_qp_range.
+ */
+ RC_QP_RANGE,
+ /* Max QP for rate control and AQ mode. */
+ RC_MAX_QP,
+ /* Min QP for rate control and AQ mode. */
+ RC_MIN_QP,
+
+ /*
+ * RC_TGT
+ * Address offset: 0x00D0 Access type: read and write
+ * The target bit rate for rate control
+ */
+ /*
+ * Target bit num for one 64x64 CTU(for HEVC)
+ * or one 16x16 MB(for H.264), with 1/16 precision.
+ */
+ CTU_EBIT,
+
+ /*
+ * RC_ADJ0
+ * Address offset: 0x00D4 Access type: read and write
+ * QP adjust configuration for rate control
+ */
+ /* QP adjust step0 for rate control. */
+ QP_ADJ0,
+ /* QP adjust step1 for rate control. */
+ QP_ADJ1,
+ /* QP adjust step2 for rate control. */
+ QP_ADJ2,
+ /* QP adjust step3 for rate control. */
+ QP_ADJ3,
+ /* QP adjust step4 for rate control. */
+ QP_ADJ4,
+
+ /*
+ * RC_ADJ1
+ * Address offset: 0x00D8 Access type: read and write
+ * QP adjust configuration for rate control
+ */
+ /* QP adjust step5 for rate control. */
+ QP_ADJ5,
+ /* QP adjust step6 for rate control. */
+ QP_ADJ6,
+ /* QP adjust step7 for rate control. */
+ QP_ADJ7,
+ /* QP adjust step8 for rate control. */
+ QP_ADJ8,
+
+ /*
+ * RC_DTHD0~8
+ * Address offset: 0x00DC~0x00FC Access type: read and write
+ * Bits rate deviation threshold0~8
+ */
+ /* Bits rate deviation threshold0~8. */
+ RC_DTHD0,
+ RC_DTHD1,
+ RC_DTHD2,
+ RC_DTHD3,
+ RC_DTHD4,
+ RC_DTHD5,
+ RC_DTHD6,
+ RC_DTHD7,
+ RC_DTHD8,
+ RC_DTHD9,
+
+ /*
+ * ROI_QTHD0
+ * Address offset: 0x0100 Access type: read and write
+ * ROI QP threshold configuration0
+ */
+ /* Min QP for 16x16 CU inside ROI area0. */
+ QPMIN_AREA0,
+ /* Max QP for 16x16 CU inside ROI area0. */
+ QPMAX_AREA0,
+ /* Min QP for 16x16 CU inside ROI area1. */
+ QPMIN_AREA1,
+ /* Max QP for 16x16 CU inside ROI area1. */
+ QPMAX_AREA1,
+ /* Min QP for 16x16 CU inside ROI area2. */
+ QPMIN_AREA2,
+
+ /*
+ * ROI_QTHD1
+ * Address offset: 0x0104 Access type: read and write
+ * ROI QP threshold configuration1
+ */
+ /* Max QP for 16x16 CU inside ROI area2. */
+ QPMAX_AREA2,
+ /* Min QP for 16x16 CU inside ROI area3. */
+ QPMIN_AREA3,
+ /* Max QP for 16x16 CU inside ROI area3. */
+ QPMAX_AREA3,
+ /* Min QP for 16x16 CU inside ROI area4. */
+ QPMIN_AREA4,
+ /* Min QP for 16x16 CU inside ROI area4. */
+ QPMAX_AREA4,
+
+ /*
+ * ROI_QTHD2
+ * Address offset: 0x0108 Access type: read and write
+ * ROI QP threshold configuration2
+ */
+ /* Min QP for 16x16 CU inside ROI area5. */
+ QPMIN_AREA5,
+ /* Max QP for 16x16 CU inside ROI area5. */
+ QPMAX_AREA5,
+ /* Min QP for 16x16 CU inside ROI area6. */
+ QPMIN_AREA6,
+ /* Max QP for 16x16 CU inside ROI area6. */
+ QPMAX_AREA6,
+ /* Min QP for 16x16 CU inside ROI area7. */
+ QPMIN_AREA7,
+
+ /*
+ * ROI_QTHD3
+ * Address offset: 0x010C Access type: read and write
+ * ROI QP threshold configuration3
+ */
+ /* Max QP for 16x16 CU inside ROI area7. */
+ QPMAX_AREA7,
+ /*
+ * QP theshold generation for the CUs whose size is bigger than 16x16.
+ * 2'h0: Mean value of 16x16 CU QP thesholds
+ * 2'h1: Max value of 16x16 CU QP thesholds
+ * 2'h2: Min value of 16x16 CU QP thesholds
+ * 2'h3: Reserved
+ */
+ QPMAP_MODE,
+
+ /*
+ * PIC_OFST
+ * Address offset: 0x0110 Access type: read and write
+ * Encoding picture offset
+ */
+ /* Vertical offset for encoding picture. */
+ PIC_OFST_Y,
+ /* Horizontal offset for encoding picture. */
+ PIC_OFST_X,
+
+ /*
+ * SRC_STRID
+ * Address offset: 0x0114 Access type: read and write
+ * Video source stride
+ */
+ /*
+ * Video source stride0, based on pixel (byte).
+ * Note that if the video format is YUV, src_strd is the LUMA component
+ * stride while src_strid1 is the CHROMA component stride.
+ */
+ SRC_STRD0,
+ /*
+ * CHROMA stride of video source, only for YUV format.
+ * Note that U and V stride must be the same when color format is YUV
+ * planar.
+ */
+ SRC_STRD1,
+
+ /*
+ * ADR_SRC0
+ * Address offset: 0x0118 Access type: read and write
+ * Base address of the 1st storage area for video source
+ */
+ /*
+ * Base address of the 1st storage area for video source.
+ * ARGB8888, BGR888, RGB565, YUYV422 and UYUV422 have only one storage
+ * area, while adr_src0 is configured as the base address of video
+ * source frame buffer.
+ * YUV422/420 semi-planar have 2 storage area, while adr_src0 is
+ * configured as the base address of Y frame buffer.
+ * YUV422/420 planar have 3 storage area, while adr_src0 is configured
+ * as the base address of Y frame buffer.
+ * Note that if the video source is compressed by AFBC, adr_src0 is
+ * configured as the base address of compressed frame buffer.
+ */
+ ADR_SRC0,
+
+ /*
+ * ADR_SRC1
+ * Address offset: 0x011C Access type: read and write
+ * Base address of the 2nd storage area for video source
+ */
+ /*
+ * Base address of V frame buffer when video source is uncompress and
+ * color format is YUV422/420 planar.
+ */
+ ADR_SRC1,
+
+ /*
+ * ADR_SRC2
+ * Address offset: 0x0120 Access type: read and write
+ * Base address of the 3rd storage area for video source
+ */
+ /*
+ * Base address of V frame buffer when video source is uncompress and
+ * color format is YUV422/420 planar.
+ */
+ ADR_SRC2,
+
+ /*
+ * ADR_ROI
+ * Address offset: 0x0124 Access type: read and write
+ * Base address for ROI configuration, 16 bytes aligned
+ */
+ /* High 28 bits of base address for ROI configuration. */
+ ROI_ADDR,
+
+ /*
+ * ADR_RFPW_H
+ * Address offset: 0x0128 Access type: read and write
+ * Base address of header_block for compressed reference frame write,
+ * 4K bytes aligned
+ */
+ /*
+ * High 20 bits of the header_block base address for compressed
+ * reference frame write.
+ */
+ RFPW_H_ADDR,
+
+ /*
+ * ADR_RFPW_B
+ * Address offset: 0x012C Access type: read and write
+ * Base address of body_block for compressed reference frame write,
+ * 4K bytes aligned
+ */
+ /*
+ * High 20 bits of the body_block base address for compressed
+ * reference frame write.
+ */
+ RFPW_B_ADDR,
+
+ /*
+ * ADR_RFPR_H
+ * Address offset: 0x0130 Access type: read and write
+ * Base address of header_block for compressed reference frame read,
+ * 4K bytes aligned
+ */
+ /*
+ * High 20 bits of the header_block base address for compressed
+ * reference frame read.
+ */
+ RFPR_H_ADDR,
+
+ /*
+ * ADR_RFPR_B
+ * Address offset: 0x0134 Access type: read and write
+ * Base address of body_block for compressed reference frame read,
+ * 4K bytes aligned
+ */
+ /*
+ * High 20 bits of the body_block base address for compressed
+ * reference frame read.
+ */
+ RFPR_B_ADDR,
+
+ /*
+ * ADR_CMVW
+ * Address offset: 0x0138 Access type: read and write
+ * Base address for col-located Mv write, 1KB aligned, HEVC only
+ */
+ /* High 22 bits of base address for col-located Mv write, HEVC only. */
+ CMVW_ADDR,
+
+ /*
+ * ADR_CMVR
+ * Address offset: 0x013C Access type: read and write
+ * Base address for col-located Mv read, 1KB aligned, HEVC only
+ */
+ /* High 22 bits of base address for col-located Mv read, HEVC only. */
+ CMVR_ADDR,
+
+ /*
+ * ADR_DSPW
+ * Address offset: 0x0140 Access type: read and write
+ * Base address for down-sampled reference frame write, 1KB aligned
+ */
+ /* High 22 bits of base address for down-sampled reference frame write. */
+ DSPW_ADDR,
+
+ /*
+ * ADR_DSPR
+ * Address offset: 0x0144 Access type: read and write
+ * Base address for down-sampled reference frame read, 1KB aligned
+ */
+ /* High 22 bits of base address for down-sampled reference frame read. */
+ DSPR_ADDR,
+
+ /*
+ * ADR_MEIW
+ * Address offset: 0x0148 Access type: read and write
+ * Base address for ME information write, 1KB aligned
+ */
+ /* High 22 bits of base address for ME information write. */
+ MEIW_ADDR,
+
+ /*
+ * ADR_BSBT
+ * Address offset: 0x014C Access type: read and write
+ * Top address of bit stream buffer, 128B aligned
+ */
+ /* High 25 bits of the top address of bit stream buffer. */
+ BSBT_ADDR,
+
+ /*
+ * ADR_BSBB
+ * Address offset: 0x0150 Access type: read and write
+ * Bottom address of bit stream buffer, 128B aligned
+ */
+ /* High 25 bits of the bottom address of bit stream buffer. */
+ BSBB_ADDR,
+
+ /*
+ * ADR_BSBR
+ * Address offset: 0x0154 Access type: read and write
+ * Read address of bit stream buffer, 128B aligned
+ */
+ /*
+ * Read address of bit stream buffer, 128B aligned.
+ * VEPU will pause when write address meets read address and then send
+ * an interrupt. SW should move some data out from bit stream buffer
+ * and change this register accordingly.
+ * After that VEPU will continue processing automatically.
+ */
+ BSBR_ADDR,
+
+ /*
+ * ADR_BSBS
+ * Address offset: 0x0158 Access type: read and write
+ * Start address of bit stream buffer
+ */
+ /*
+ * Start address of bit stream buffer.
+ * VEPU begins to write bit stream from this address and increase
+ * address automatically.
+ * Note that the VEPU's real-time write address is marked in BSB_STUS.
+ */
+ ADR_BSBS,
+
+ /*
+ * SLI_SPLT
+ * Address offset: 0x015C Access type: read and write
+ * Slice split configuration
+ */
+ /* Slice split enable. */
+ SLI_SPLT,
+ /*
+ * Slice split mode.
+ * 1'h0: Slice splited by byte.
+ * 1'h1: Slice splited by number of MB(H.264)/CTU(HEVC).
+ */
+ SLI_SPLT_MODE,
+ /*
+ * Slice split compensation when slice is splited by byte.
+ * Byte distortion of current slice will be compensated in the next slice.
+ */
+ SLI_SPLT_CPST,
+ /* Max slice num in one frame. */
+ SLI_MAX_NUM_M1,
+ /* Slice flush. Flush all the bit stream after each slice finished. */
+ SLI_FLSH,
+ /* Number of CTU/MB for slice split. Valid when slice is splited by CTU/MB. */
+ SLI_SPLT_CNUM_M1,
+
+ /*
+ * SLI_BYTE
+ * Address offset: 0x0160 Access type: read and write
+ * Number of bytes for slice split
+ */
+ /* Byte number for each slice when slice is splited by byte. */
+ SLI_SPLT_BYTE,
+
+ /*
+ * ME_RNGE
+ * Address offset: 0x0164 Access type: read and write
+ * Motion estimation range
+ */
+ /* CME horizontal search range, base on 16 pixels. */
+ CME_SRCH_H,
+ /* CME vertical search range, base on 16 pixel. */
+ CME_SRCH_V,
+ /* RME horizontal search range, values from 3 to 7. */
+ RME_SRCH_H,
+ /* RME vertical search range, values from 4 to 5. */
+ RME_SRCH_V,
+ /* Frame number difference value between current and reference frame, HEVC only. */
+ DLT_FRM_NUM,
+
+ /*
+ * ME_CNST
+ * Address offset: 0x0168 Access type: read and write
+ * Motion estimation configuration
+ */
+ /* Min horizontal distance for PMV selection. */
+ PMV_MDST_H,
+ /* Min vertical distance for PMV selection. */
+ PMV_MDST_V,
+ /*
+ * Motion vector limit ( by level), H.264 only.
+ * 2'h0: Mvy is limited to [-64,63].
+ * Others: Mvy is limited to [-128,127].
+ */
+ MV_LIMIT,
+ /* PMV number (should be constant2). */
+ PMV_NUM,
+ /* Store col-Mv information to external memory, HEVC only. */
+ COLMV_STOR,
+ /* Load co-located Mvs as predicated Mv candidates, HEVC only. */
+ COLMV_LOAD,
+ /*
+ * [4]: Disable 64x64 block RME.
+ * [3]: Disable 32x32 block RME.
+ * [2]: Disable 16x16 block RME.
+ * [1]: Disable 8x8 block RME.
+ * [0]: Disable 4x4 block RME.
+ */
+ RME_DIS,
+ /*
+ * [4]: Disable 64x64 block FME.
+ * [3]: Disable 32x32 block FME.
+ * [2]: Disable 16x16 block FME.
+ * [1]: Disable 8x8 block FME.
+ * [0]: Disable 4x4 block FME.
+ */
+ FME_DIS,
+
+ /*
+ * ME_RAM
+ * Address offset: 0x016C Access type: read and write
+ * ME cache configuration
+ */
+ /* CME's max RAM address. */
+ CME_RAMA_MAX,
+ /* Height of CME RAMA district, base on 4 pixels. */
+ CME_RAMA_H,
+ /*
+ * L2 cach mapping, base on pixels.
+ * 2'h0: 32x512
+ * 2'h1: 16x1024
+ * 2'h2: 8x2048
+ * 2'h3: 4x4096
+ */
+ CACH_L2_MAP,
+ /* The width of CIME down-sample recon data linebuf, based on 64 pixel. */
+ CME_LINEBUF_W,
+
+ /*
+ * SYNT_LONG_REFM0
+ * Address offset: 0x0170 Access type: read and write
+ * Long term reference frame mark0 for HEVC
+ */
+ /* Poc_lsb_lt[1] */
+ POC_LSB_LT1,
+ /* Poc_lsb_lt[2] */
+ POC_LSB_LT2,
+
+ /*
+ * SYNT_LONG_REFM1
+ * Address offset: 0x0174 Access type: read and write
+ * Long term reference frame mark1 for HEVC
+ */
+ /* Delta_poc_msb_cycle_lt[1] */
+ DLT_POC_MSB_CYCL1,
+ /* Delta_poc_msb_cycle_lt[2] */
+ DLT_POC_MSB_CYCL2,
+
+ /*
+ * OSD_INV_CFG
+ * Address offset: 0x0178 Access type: read and write
+ * OSD color inverse configuration
+ *
+ * Added in vepu540
+ */
+ /*
+ * OSD color inverse enable of chroma component,
+ * each bit controls corresponding region.
+ */
+ OSD_CH_INV_EN,
+ /*
+ * OSD color inverse expression type
+ * each bit controls corresponding region.
+ * 1'h0: AND,
+ * 1'h1: OR
+ */
+ OSD_ITYPE,
+ /*
+ * OSD color inverse expression switch for luma component
+ * each bit controls corresponding region.
+ * 1'h0: Expression need to determine the condition,
+ * 1'h1: Expression don't need to determine the condition,
+ */
+ OSD_LU_INV_MSK,
+ /*
+ * OSD color inverse expression switch for chroma component
+ * each bit controls corresponding region.
+ * 1'h0: Expression need to determine the condition,
+ * 1'h1: Expression don't need to determine the condition,
+ */
+ OSD_CH_INV_MSK,
+
+ /*
+ * IPRD_CSTS
+ * Address offset: 0x0194 Access type: read and write
+ * Cost function configuration for intra prediction
+ */
+ /* LUMA variance threshold to select intra prediction cost function. */
+ VTHD_Y,
+ /* CHROMA variance threshold to select intra prediction cost function. */
+ VTHD_C,
+
+ /*
+ * RDO_CFG_H264
+ * Address offset: 0x0198 Access type: read and write
+ * H.264 RDO configuration
+ */
+ /* Limit sub_mb_rect_size for low level. */
+ RECT_SIZE,
+ /* 4x4 sub MB enable. */
+ INTER_4X4,
+ /* Reserved */
+ ARB_SEL,
+ /* CAVLC syntax limit. */
+ VLC_LMT,
+ /* Chroma special candidates enable. */
+ CHRM_SPCL,
+ /*
+ * [7]: Disable intra4x4.
+ * [6]: Disable intra8x8.
+ * [5]: Disable intra16x16.
+ * [4]: Disable inter8x8 with T4.
+ * [3]: Disable inter8x8 with T8.
+ * [2]: Disable inter16x16 with T4.
+ * [1]: Disable inter16x16 with T8.
+ * [0]: Disable skip mode.
+ */
+ RDO_MASK,
+ /* Chroma cost weight adjustment(KLUT) enable. */
+ CCWA_E,
+ /*
+ * Scale list selection.
+ * 1'h0: Flat scale list.
+ * 1'h1: Default scale list.
+ */
+ SCL_LST_SEL,
+ /* Anti-ring enable. */
+ ATR_E,
+ /* Edge of anti-flicker, base on MB. the MBs inside edge should not influenced. */
+ ATF_EDG,
+ /* Block level anti-flicker enable. */
+ ATF_LVL_E,
+ /* Intra mode anti-flicker enable. */
+ ATF_INTRA_E,
+ /*
+ * Scale list selection. (for vepu540)
+ * 2'h0: Flat scale list.
+ * 2'h1: Default scale list.
+ * 2'h2: User defined.
+ * 2'h3: Reserved.
+ */
+ SCL_LST_SEL_,
+ /*
+ * Rdo cost caculation expression for intra by using sad or satd.
+ * 1'h0: SATD,
+ * 1'h1: SAD,
+ */
+ SATD_BYPS_FLG,
+
+ /*
+ * SYNT_NAL_H264
+ * Address offset: 0x019C Access type: read and write
+ * NAL configuration for H.264
+ */
+ /* nal_ref_idc */
+ NAL_REF_IDC,
+ NAL_UNIT_TYPE,
+ NAL_UNIT_TYPE_HEVC,
+ /* nal_unit_type */
+
+ /*
+ * SYNT_SPS_H264
+ * Address offset: 0x01A0 Access type: read and write
+ * Sequence parameter set syntax configuration for H.264
+ */
+ /* log2_max_frame_num_minus4 */
+ MAX_FNUM,
+ /* direct_8x8_inference_flag */
+ DRCT_8X8,
+ /* log2_max_pic_order_cnt_lsb_minus4 */
+ MPOC_LM4,
+
+ /*
+ * SYNT_PPS_H264
+ * Address offset: 0x01A4 Access type: read and write
+ * Picture parameter set configuration for H.264
+ */
+ /* entropy_coding_mode_flag */
+ ETPY_MODE,
+ /* transform_8x8_mode_flag */
+ TRNS_8X8,
+ /* constrained_intra_pred_flag */
+ CSIP_FLAG,
+ /* num_ref_idx_l0_active_minus1 */
+ NUM_REF0_IDX,
+ /* num_ref_idx_l1_active_minus1 */
+ NUM_REF1_IDX,
+ /* pic_init_qp_minus26 + 26 */
+ PIC_INIT_QP,
+ /* chroma_qp_index_offset */
+ CB_OFST,
+ /* second_chroma_qp_index_offset */
+ CR_OFST,
+ /* weight_pred_flag */
+ WGHT_PRED,
+ /* deblocking_filter_control_present_flag */
+ DBF_CP_FLG,
+
+ /*
+ * SYNT_SLI0_H264
+ * Address offset: 0x01A8 Access type: read and write
+ * Slice header configuration0 for H.264
+ */
+ /* slice_type: 0->P, 1->B, 2->I. */
+ SLI_TYPE,
+ /* pic_parameter_set_id */
+ PPS_ID,
+ /* direct_spatial_mv_pred_flag */
+ DRCT_SMVP,
+ /* num_ref_idx_active_override_flag */
+ NUM_REF_OVRD,
+ /* cabac_init_idc */
+ CBC_INIT_IDC,
+ /* frame_num */
+ FRM_NUM,
+
+ /*
+ * SYNT_SLI1_H264
+ * Address offset: 0x01AC Access type: read and write
+ * Slice header configuration1 for H.264
+ */
+ /* idr_pid */
+ IDR_PIC_ID,
+ /* pic_order_cnt_lsb */
+ POC_LSB,
+
+ /*
+ * SYNT_SLI2_H264
+ * Address offset: 0x01B0 Access type: read and write
+ * Slice header configuration2 for H.264
+ */
+ /* reordering_of_pic_nums_idc */
+ RODR_PIC_IDX,
+ /* ref_pic_list_reordering_flag_l0 */
+ REF_LIST0_RODR,
+ /* slice_beta_offset_div2 */
+ SLI_BETA_OFST,
+ /* slice_alpha_c0_offset_div2 */
+ SLI_ALPH_OFST,
+ /* disable_deblocking_filter_idc */
+ DIS_DBLK_IDC,
+ /* abs_diff_pic_num_minus1/long_term_pic_num */
+ RODR_PIC_NUM,
+
+ /*
+ * SYNT_REFM0_H264
+ * Address offset: 0x01B4 Access type: read and write
+ * Reference frame mark0 for H.264
+ */
+ /* no_output_of_prior_pics_flag */
+ NOPP_FLG,
+ /* long_term_reference_flag */
+ LTRF_FLG,
+ /* adaptive_ref_pic_marking_mode_flag */
+ ARPM_FLG,
+ /* A No.4 MMCO should be executed firstly if mmo4_pre is 1 */
+ MMCO4_PRE,
+ /* memory_management_control_operation */
+ MMCO_TYPE0,
+ /*
+ * MMCO parameters which have different meanings according to different mmco_parm0 valus.
+ * difference_of_pic_nums_minus1 for mmco_parm0 equals 0 or 3.
+ * long_term_pic_num for mmco_parm0 equals 2.
+ * long_term_frame_idx for mmco_parm0 equals 6.
+ * max_long_term_frame_idx_plus1 for mmco_parm0 equals 4.
+ */
+ MMCO_PARM0,
+ /* memory_management_control_operation[1] */
+ MMCO_TYPE1,
+ /* memory_management_control_operation[2] */
+ MMCO_TYPE2,
+
+ /*
+ * SYNT_REFM1_H264
+ * Address offset: 0x01B8 Access type: read and write
+ * Reference frame mark1 for H.264
+ */
+ /*
+ * MMCO parameters which have different meanings according to different mmco_parm1 valus.
+ * difference_of_pic_nums_minus1 for mmco_parm1 equals 0 or 3.
+ * long_term_pic_num for mmco_parm1 equals 2.
+ * long_term_frame_idx for mmco_parm1 equals 6.
+ * max_long_term_frame_idx_plus1 for mmco_parm1 equals 4.
+ */
+ MMCO_PARM1,
+ /*
+ * MMCO parameters which have different meanings according to different mmco_parm2 valus.
+ * difference_of_pic_nums_minus1 for mmco_parm2 equals 0 or 3.
+ * long_term_pic_num for mmco_parm2 equals 2.
+ * long_term_frame_idx for mmco_parm2 equals 6.
+ * max_long_term_frame_idx_plus1 for mmco_parm2 equals 4.
+ */
+ MMCO_PARM2,
+
+ /*
+ * OSD_CFG
+ * Address offset: 0x01C0 Access type: read and write
+ * OSD configuration
+ */
+ /* OSD region enable, each bit controls corresponding OSD region. */
+ OSD_E,
+ /* OSD inverse color enable, each bit controls corresponding region. */
+ OSD_INV_E,
+ /*
+ * OSD palette clock selection.
+ * 1'h0: Configure bus clock domain.
+ * 1'h1: Core clock domain.
+ */
+ OSD_PLT_CKS,
+ /*
+ * OSD palette type.
+ * 1'h1: Default type.
+ * 1'h0: User defined type.
+ */
+ OSD_PLT_TYP,
+
+ /*
+ * OSD_INV
+ * Address offset: 0x01C4 Access type: read and write
+ * OSD color inverse configuration
+ */
+ /* Color inverse theshold for OSD region0. */
+ OSD_ITHD_R0,
+ /* Color inverse theshold for OSD region1. */
+ OSD_ITHD_R1,
+ /* Color inverse theshold for OSD region2. */
+ OSD_ITHD_R2,
+ /* Color inverse theshold for OSD region3. */
+ OSD_ITHD_R3,
+ /* Color inverse theshold for OSD region4. */
+ OSD_ITHD_R4,
+ /* Color inverse theshold for OSD region5. */
+ OSD_ITHD_R5,
+ /* Color inverse theshold for OSD region6. */
+ OSD_ITHD_R6,
+ /* Color inverse theshold for OSD region7. */
+ OSD_ITHD_R7,
+
+ /*
+ * SYNT_REFM2_H264
+ * Address offset: 0x01C8 Access type: read and write
+ * Reference frame mark2 for H.264
+ */
+ /* long_term_frame_idx[0] (when mmco equal 3) */
+ LONG_TERM_FRAME_IDX0,
+ /* long_term_frame_idx[1] (when mmco equal 3) */
+ LONG_TERM_FRAME_IDX1,
+ /* long_term_frame_idx[2] (when mmco equal 3) */
+ LONG_TERM_FRAME_IDX2,
+
+ /*
+ * SYNT_REFM3
+ * Address offset: 0x01CC Access type: read and write
+ * Reference frame mark3 for HEVC
+ */
+ REG115,
+
+ /*
+ * OSD_POS
+ * Address offset: 0x01D0~0x01EC Access type: read and write
+ * OSD region position
+ */
+ OSD0_POS_LB_X,
+ OSD0_POS_LB_Y,
+ OSD0_POS_RB_X,
+ OSD0_POS_RB_Y,
+
+ OSD1_POS_LB_X,
+ OSD1_POS_LB_Y,
+ OSD1_POS_RB_X,
+ OSD1_POS_RB_Y,
+
+ OSD2_POS_LB_X,
+ OSD2_POS_LB_Y,
+ OSD2_POS_RB_X,
+ OSD2_POS_RB_Y,
+
+ OSD3_POS_LB_X,
+ OSD3_POS_LB_Y,
+ OSD3_POS_RB_X,
+ OSD3_POS_RB_Y,
+
+ OSD4_POS_LB_X,
+ OSD4_POS_LB_Y,
+ OSD4_POS_RB_X,
+ OSD4_POS_RB_Y,
+
+ OSD5_POS_LB_X,
+ OSD5_POS_LB_Y,
+ OSD5_POS_RB_X,
+ OSD5_POS_RB_Y,
+
+ OSD6_POS_LB_X,
+ OSD6_POS_LB_Y,
+ OSD6_POS_RB_X,
+ OSD6_POS_RB_Y,
+
+ OSD7_POS_LB_X,
+ OSD7_POS_LB_Y,
+ OSD7_POS_RB_X,
+ OSD7_POS_RB_Y,
+
+ /*
+ * ADR_OSD
+ * Address offset: 0x01F0~0x20C Access type: read and write
+ * Base address for OSD region, 16B aligned
+ */
+ OSD_ADDR0,
+ OSD_ADDR1,
+ OSD_ADDR2,
+ OSD_ADDR3,
+ OSD_ADDR4,
+ OSD_ADDR5,
+ OSD_ADDR6,
+ OSD_ADDR7,
+
+ /*
+ * ST_BSL
+ * Address offset: 0x210 Access type: read only
+ * Bit stream length for current frame
+ */
+ /* Bit stream length for current frame. */
+ BS_LGTH,
+
+ /*
+ * ST_SSE_L32
+ * Address offset: 0x214 Access type: read only
+ * Low 32 bits of encoding distortion (SSE)
+ */
+ SSE_L32,
+
+ /*
+ * ST_SSE_QP
+ * Address offset: 0x218 Access type: read only
+ * High 8 bits of encoding distortion (SSE) and sum of QP for the encoded frame
+ */
+ /* Sum of QP for the encoded frame. */
+ QP_SUM,
+ /* High bits of encoding distortion(SSE). */
+ SSE_H8,
+
+ /*
+ * ST_SAO
+ * Address offset: 0x21C Access type: read only
+ * Number of CTUs which adjusted by SAO
+ */
+ /* Number of CTUs whose CHROMA component are adjusted by SAO. */
+ SAO_CNUM,
+ /* Number of CTUs whose LUMA component are adjusted by SAO. */
+ SAO_YNUM,
+
+ /*
+ * ST_ENC
+ * Address offset: 0x228 Access type: read only
+ * VEPU working status
+ */
+ /*
+ * VEPU working status.
+ * 2'h0: Idle.
+ * 2'h1: Working in register conifguration mode.
+ * 2'h2: Working in link table configuration mode.
+ */
+ ST_ENC,
+ /*
+ * Status of safe clear.
+ * 1'h0: Safe clear is finished or not started.
+ * 1'h1: VEPU is performing safe clear.
+ */
+ ST_SCLR,
+
+ /*
+ * ST_LKT
+ * Address offset: 0x22C Access type: read only
+ * Status of link table mode encoding
+ */
+ /* Number of frames has been encoded since link table mode started. */
+ FNUM_ENC,
+ /* Number of frames has been configured since link table mode started. */
+ FNUM_CFG,
+ /*
+ * Number of frames has been encoded since link table mode started,
+ * updated only when corresponding link table node send interrupt
+ * (VEPU_ENC_PIC_node_int==1).
+ */
+ FNUM_INT,
+
+ /*
+ * ST_NADR
+ * Address offset: 0x230 Access type: read only
+ * Address of the processing link table node
+ */
+ /* High 28 bits of the address for the processing linke table node. */
+ NODE_ADDR,
+
+ /*
+ * ST_BSB
+ * Address offset: 0x234 Access type: read only
+ * Status of bit stream buffer
+ */
+ /* High 28 bits of bit stream buffer write address. */
+ BSBW_ADDR,
+
+ /*
+ * ST_BUS
+ * Address offset: 0x238 Access type: read only
+ * VEPU bus status
+ */
+ /*
+ * AXI write response idle.
+ * [6]: Reconstructed picture channel (AXI0_WID==5)
+ * [5]: ME information channel (AXI0_WID==4)
+ * [4]: Co-located Mv channel (AXI0_WID==3)
+ * [3]: Down-sampled picture channel (AXI0_WID==2)
+ * [2]: Bit stream channel (AXI0_WID==1)
+ * [1]: Link table node channel (AXI0_WID==0)
+ * [0]: Reserved
+ */
+ AXIB_IDL,
+ /*
+ * AXI write response outstanding overflow.
+ * [6]: Reconstructed picture channel (AXI0_WID==5)
+ * [5]: ME information channel (AXI0_WID==4)
+ * [4]: Co-located Mv channel (AXI0_WID==3)
+ * [3]: Down-sampled picture channel (AXI0_WID==2)
+ * [2]: Bit stream channel (AXI0_WID==1)
+ * [1]: Link table node channel (AXI0_WID==0)
+ * [0]: Reserved.
+ */
+ AXIB_OVFL,
+ /*
+ * AXI write response error.
+ * [6]: Reconstructed picture channel (AXI0_WID==5)
+ * [5]: ME information channel (AXI0_WID==4)
+ * [4]: Co-located Mv channel (AXI0_WID==3)
+ * [3]: Down-sampled picture channel (AXI0_WID==2)
+ * [2]: Bit stream channel (AXI0_WID==1)
+ * [1]: Link table node channel (AXI0_WID==0)
+ * [0]: Reserved.
+ */
+ AXIB_ERR,
+ /*
+ * AXI read error.
+ * [5]: ROI configuration (AXI0_ARID==7)
+ * [4]: Down-sampled picture (AXI0_ARID==6)
+ * [3]: Co-located Mv (AXI0_ARID==5)
+ * [2]: Link table (AXI0_ARID==4)
+ * [1]: Reference picture (AXI0_ARID==1,2,3,8)
+ * [0]: Video source load (AXI1)
+ */
+ AXIR_ERR,
+
+ /*
+ * ST_SNUM
+ * Address offset: 0x23C Access type: read only
+ * Slice number status
+ */
+ /* Number for slices has been encoded and not read out (by reading ST_SLEN). */
+ SLI_NUM,
+
+ /*
+ * ST_SLEN
+ * Address offset: 0x240 Access type: read only
+ * Status of slice length
+ */
+ /* Byte length for the earlist encoded slice which has not been read out( by reading VEPU_ST_SLEN). */
+ SLI_LEN,
+
+ /*
+ * ST_PNUM_P64
+ * Address offset: 0x244 Access type: read only
+ * Number of 64x64 inter predicted blocks
+ */
+ /* Number of 64x64 inter predicted blocks. */
+ PNUM_P64,
+
+ /*
+ * ST_PNUM_P32
+ * Address offset: 0x248 Access type: read only
+ * Number of 32x32 inter predicted blocks
+ */
+ /* Number of 32x32 inter predicted blocks. */
+ PNUM_P32,
+
+ /*
+ * ST_PNUM_P16
+ * Address offset: 0x24C Access type: read only
+ * Number of 16x16 inter predicted blocks
+ */
+ /* Number of 16x16 inter predicted blocks. */
+ PNUM_P16,
+
+ /*
+ * ST_PNUM_P8
+ * Address offset: 0x250 Access type: read only
+ * Number of 8x8 inter predicted blocks
+ */
+ /* Number of 8x8 inter predicted blocks. */
+ PNUM_P8,
+
+ /*
+ * ST_PNUM_I32
+ * Address offset: 0x254 Access type: read only
+ * Number of 32x32 intra predicted blocks
+ */
+ /* Number of 32x32 intra predicted blocks. */
+ PNUM_I32,
+
+ /*
+ * ST_PNUM_I16
+ * Address offset: 0x258 Access type: read only
+ * Number of 16x16 intra predicted blocks
+ */
+ /* Number of 16x16 intra predicted blocks. */
+ PNUM_I16,
+
+ /*
+ * ST_PNUM_I8
+ * Address offset: 0x25C Access type: read only
+ * Number of 8x8 intra predicted blocks
+ */
+ /* Number of 8x8 intra predicted blocks. */
+ PNUM_I8,
+
+ /*
+ * ST_PNUM_I4
+ * Address offset: 0x260 Access type: read only
+ * Number of 4x4 intra predicted blocks
+ */
+ /* Number of 4x4 intra predicted blocks. */
+ PNUM_I4,
+
+ /*
+ * ST_B8_QP0~51
+ * Address offset: 0x264~0x330 Access type: read only
+ * Number of block8x8s with QP=0~51
+ */
+ /*
+ * Number of block8x8s with QP value.
+ * HEVC CUs of which size are bigger that 8x8 are considered as
+ * (CU_size/8)*(CU_size/8) clock8x8s,
+ * while H.264 MB is considered as 4 block8x8s.
+ */
+ //NUM_QP[52],
+
+ /*
+ * ST_CPLX_TMP
+ * Address offset: 0x334 Access type: read only
+ * Temporal complexity(MADP) for current encoding and reference frame
+ */
+ /* Mean absolute differences between current encoding and reference frame. */
+ MADP,
+
+ /*
+ * ST_BNUM_CME
+ * Address offset: 0x338 Access type: read only
+ * Number of CME blocks in frame.
+ * H.264: number CME blocks (4 MBs) in 16x64 aligned extended frame,
+ * except for the CME blocks configured as force intra.
+ * HEVC : number CME blocks (CTU) in 64x64 aligned extended frame,
+ * except for the CME blocks configured as force intra.
+ */
+ /* Number of CTU (HEVC: 64x64, H.264: 64x16) for CME inter-frame prediction. */
+ NUM_CTU,
+
+ /*
+ * ST_CPLX_SPT
+ * Address offset: 0x33C Access type: read only
+ * Spatial complexity(MADI) for current encoding frame
+ */
+ /* Mean absolute differences for current encoding frame. */
+ MADI,
+
+ /*
+ * ST_BNUM_B16
+ * Address offset: 0x340 Access type: read only
+ * Number of valid 16x16 blocks for one frame.
+ */
+ /* Number of valid 16x16 blocks for one frame. */
+ NUM_B16,
+ /* sentinel */
+ L1_MAX_FIELDS
+};
+
+static const struct reg_field rkvenc_vepu540_l1_fields[] = {
+ [SUB_VER] = REG_FIELD(RKVENC_VEPU540_L1_VERSION, 0, 7),
+ [H264_ENC] = REG_FIELD(RKVENC_VEPU540_L1_VERSION, 8, 8),
+ [H265_ENC] = REG_FIELD(RKVENC_VEPU540_L1_VERSION, 9, 9),
+
+ [PIC_SIZE] = REG_FIELD(RKVENC_VEPU540_L1_VERSION, 12, 15), /* RES_CAP in Datasheet TRM Part2 Page748 */
+ [OSD_CAP] = REG_FIELD(RKVENC_VEPU540_L1_VERSION, 16, 17),
+ [FILTR_CAP] = REG_FIELD(RKVENC_VEPU540_L1_VERSION, 18, 19),
+ [BFRM_CAP] = REG_FIELD(RKVENC_VEPU540_L1_VERSION, 20, 20),
+ [FBC_CAP] = REG_FIELD(RKVENC_VEPU540_L1_VERSION, 21, 22),
+ [RKVENC_VER] = REG_FIELD(RKVENC_VEPU540_L1_VERSION, 24, 31), /* IP_ID in Datasheet TRM Part 2 Page 747 */
+ [LKT_NUM] = REG_FIELD(RKVENC_VEPU540_L1_STRT, 0, 7),
+ [RKVENC_CMD] = REG_FIELD(RKVENC_VEPU540_L1_STRT, 8, 9), /* VEPU_CMD in TRM */
+ [CLK_GATE_EN] = REG_FIELD(RKVENC_VEPU540_L1_STRT, 16, 16),
+ [RESETN_HW_EN] = REG_FIELD(RKVENC_VEPU540_L1_STRT, 17, 17),
+ [ENC_DONE_TMVP_EN] = REG_FIELD(RKVENC_VEPU540_L1_STRT, 18, 18),
+
+ [SAFE_CLR] = REG_FIELD(RKVENC_VEPU540_L1_CLR, 0, 0),
+ [FORCE_CLR] = REG_FIELD(RKVENC_VEPU540_L1_CLR, 1, 1),
+
+ [LKT_ADDR] = REG_FIELD(RKVENC_VEPU540_L1_LKT_ADDR, 4, 31),
+
+ [ENC_DONE_EN] = REG_FIELD(RKVENC_VEPU540_L1_INT_EN, 0, 0),
+ [LKT_DONE_EN] = REG_FIELD(RKVENC_VEPU540_L1_INT_EN, 1, 1),
+ [SCLR_DONE_EN] = REG_FIELD(RKVENC_VEPU540_L1_INT_EN, 2, 2),
+ [ENC_SLICE_DONE_EN] = REG_FIELD(RKVENC_VEPU540_L1_INT_EN, 3, 3),
+ [OFLW_DONE_EN] = REG_FIELD(RKVENC_VEPU540_L1_INT_EN, 4, 4),
+ [BRSP_DONE_EN] = REG_FIELD(RKVENC_VEPU540_L1_INT_EN, 5, 5),
+ [BERR_DONE_EN] = REG_FIELD(RKVENC_VEPU540_L1_INT_EN, 6, 6),
+ [RERR_DONE_EN] = REG_FIELD(RKVENC_VEPU540_L1_INT_EN, 7, 7),
+ [WDG_DONE_EN] = REG_FIELD(RKVENC_VEPU540_L1_INT_EN, 8, 8),
+
+ [ENC_DONE_MSK] = REG_FIELD(RKVENC_VEPU540_L1_INT_MSK, 0, 0),
+ [LKT_DONE_MSK] = REG_FIELD(RKVENC_VEPU540_L1_INT_MSK, 1, 1),
+ [SCLR_DONE_MSK] = REG_FIELD(RKVENC_VEPU540_L1_INT_MSK, 2, 2),
+ [ENC_SLICE_DONE_MSK] = REG_FIELD(RKVENC_VEPU540_L1_INT_MSK, 3, 3),
+ [OFLW_DONE_MSK] = REG_FIELD(RKVENC_VEPU540_L1_INT_MSK, 4, 4),
+ [BRSP_DONE_MSK] = REG_FIELD(RKVENC_VEPU540_L1_INT_MSK, 5, 5),
+ [BERR_DONE_MSK] = REG_FIELD(RKVENC_VEPU540_L1_INT_MSK, 6, 6),
+ [RERR_DONE_MSK] = REG_FIELD(RKVENC_VEPU540_L1_INT_MSK, 7, 7),
+ [WDG_DONE_MSK] = REG_FIELD(RKVENC_VEPU540_L1_INT_MSK, 8, 8),
+
+ [ENC_DONE_CLR] = REG_FIELD(RKVENC_VEPU540_L1_INT_CLR, 0, 0),
+ [LKT_DONE_CLR] = REG_FIELD(RKVENC_VEPU540_L1_INT_CLR, 1, 1),
+ [SCLR_DONE_CLR] = REG_FIELD(RKVENC_VEPU540_L1_INT_CLR, 2, 2),
+ [ENC_SLICE_DONE_CLR] = REG_FIELD(RKVENC_VEPU540_L1_INT_CLR, 3, 3),
+ [OFLW_DONE_CLR] = REG_FIELD(RKVENC_VEPU540_L1_INT_CLR, 4, 4),
+ [BRSP_DONE_CLR] = REG_FIELD(RKVENC_VEPU540_L1_INT_CLR, 5, 5),
+ [BERR_DONE_CLR] = REG_FIELD(RKVENC_VEPU540_L1_INT_CLR, 6, 6),
+ [RERR_DONE_CLR] = REG_FIELD(RKVENC_VEPU540_L1_INT_CLR, 7, 7),
+ [WDG_DONE_CLR] = REG_FIELD(RKVENC_VEPU540_L1_INT_CLR, 8, 8),
+
+ [ENC_DONE_STA] = REG_FIELD(RKVENC_VEPU540_L1_INT_STA, 0, 0),
+ [LKT_DONE_STA] = REG_FIELD(RKVENC_VEPU540_L1_INT_STA, 1, 1),
+ [SCLR_DONE_STA] = REG_FIELD(RKVENC_VEPU540_L1_INT_STA, 2, 2),
+ [ENC_SLICE_DONE_STA] = REG_FIELD(RKVENC_VEPU540_L1_INT_STA, 3, 3),
+ [OFLW_DONE_STA] = REG_FIELD(RKVENC_VEPU540_L1_INT_STA, 4, 4),
+ [BRSP_DONE_STA] = REG_FIELD(RKVENC_VEPU540_L1_INT_STA, 5, 5),
+ [BERR_DONE_STA] = REG_FIELD(RKVENC_VEPU540_L1_INT_STA, 6, 6),
+ [RERR_DONE_STA] = REG_FIELD(RKVENC_VEPU540_L1_INT_STA, 7, 7),
+ [WDG_DONE_STA] = REG_FIELD(RKVENC_VEPU540_L1_INT_STA, 8, 8),
+
+ [PIC_WD8_M1] = REG_FIELD(RKVENC_VEPU540_L1_ENC_RSL, 0, 8),
+ [PIC_WFILL] = REG_FIELD(RKVENC_VEPU540_L1_ENC_RSL, 10, 15),
+ [PIC_HD8_M1] = REG_FIELD(RKVENC_VEPU540_L1_ENC_RSL, 16, 24),
+ [PIC_HFILL] = REG_FIELD(RKVENC_VEPU540_L1_ENC_RSL, 26, 31),
+
+ [ENC_STND] = REG_FIELD(RKVENC_VEPU540_L1_ENC_PIC, 0, 0),
+ [ROI_ENC] = REG_FIELD(RKVENC_VEPU540_L1_ENC_PIC, 1, 1),
+ [CUR_FRM_REF] = REG_FIELD(RKVENC_VEPU540_L1_ENC_PIC, 2, 2),
+ [MEI_STOR] = REG_FIELD(RKVENC_VEPU540_L1_ENC_PIC, 3, 3),
+ [BS_SCP] = REG_FIELD(RKVENC_VEPU540_L1_ENC_PIC, 4, 4),
+ [LAMB_MOD_SEL] = REG_FIELD(RKVENC_VEPU540_L1_ENC_PIC, 5, 5),
+ [PIC_QP] = REG_FIELD(RKVENC_VEPU540_L1_ENC_PIC, 8, 13),
+ [TOT_POC_NUM] = REG_FIELD(RKVENC_VEPU540_L1_ENC_PIC, 14, 18),
+ [LOG2_CTU_NUM] = REG_FIELD(RKVENC_VEPU540_L1_ENC_PIC, 19, 22),
+ [ATR_THD_SEL] = REG_FIELD(RKVENC_VEPU540_L1_ENC_PIC, 23, 23),
+ [DCHS_RXID] = REG_FIELD(RKVENC_VEPU540_L1_ENC_PIC, 24, 25),
+ [DCHS_TXID] = REG_FIELD(RKVENC_VEPU540_L1_ENC_PIC, 26, 27),
+ [DCHS_RXE] = REG_FIELD(RKVENC_VEPU540_L1_ENC_PIC, 28, 28),
+ [SATD_BYPS_EN] = REG_FIELD(RKVENC_VEPU540_L1_ENC_PIC, 29, 29),
+ [SLEN_FIFO] = REG_FIELD(RKVENC_VEPU540_L1_ENC_PIC, 30, 30),
+ [NODE_INT] = REG_FIELD(RKVENC_VEPU540_L1_ENC_PIC, 31, 31),
+
+ [VS_LOAD_THD] = REG_FIELD(RKVENC_VEPU540_L1_ENC_WDG, 0, 23),
+ [RFP_LOAD_THRD] = REG_FIELD(RKVENC_VEPU540_L1_ENC_WDG, 24, 31),
+
+ [LPFW_BUS_ORDR] = REG_FIELD(RKVENC_VEPU540_L1_DTRNS_MAP, 0, 0),
+ [CMVW_BUS_ORDR] = REG_FIELD(RKVENC_VEPU540_L1_DTRNS_MAP, 1, 1),
+ [DSPW_BUS_ORDR] = REG_FIELD(RKVENC_VEPU540_L1_DTRNS_MAP, 2, 2),
+ [RFPW_BUS_ORDR] = REG_FIELD(RKVENC_VEPU540_L1_DTRNS_MAP, 3, 3),
+ [SRC_BUS_EDIN] = REG_FIELD(RKVENC_VEPU540_L1_DTRNS_MAP, 4, 7),
+ [MEIW_BUS_EDIN] = REG_FIELD(RKVENC_VEPU540_L1_DTRNS_MAP, 8, 11),
+ [BSW_BUS_EDIN] = REG_FIELD(RKVENC_VEPU540_L1_DTRNS_MAP, 12, 14),
+ [LKTR_BUS_EDIN] = REG_FIELD(RKVENC_VEPU540_L1_DTRNS_MAP, 15, 18),
+ [ROIR_BUS_EDIN] = REG_FIELD(RKVENC_VEPU540_L1_DTRNS_MAP, 19, 22),
+ [LKTW_BUS_EDIN] = REG_FIELD(RKVENC_VEPU540_L1_DTRNS_MAP, 23, 26),
+ [AFBC_BSIZE] = REG_FIELD(RKVENC_VEPU540_L1_DTRNS_MAP, 27, 27),
+
+ [VPU541_AXI_BRSP_CKE] = REG_FIELD(RKVENC_VEPU540_L1_DTRNS_CFG, 0, 6),
+ [VPU541_DSPR_OTSD] = REG_FIELD(RKVENC_VEPU540_L1_DTRNS_CFG, 7, 7),
+
+ [VPU540_DSPR_OTSD] = REG_FIELD(RKVENC_VEPU540_L1_DTRNS_CFG, 7, 7),
+ [VPU540_AXI_BRSP_CKE] = REG_FIELD(RKVENC_VEPU540_L1_DTRNS_CFG, 16, 23),
+
+ [ALPHA_SWAP] = REG_FIELD(RKVENC_VEPU540_L1_SRC_FMT, 0, 0),
+ [RBUV_SWAP] = REG_FIELD(RKVENC_VEPU540_L1_SRC_FMT, 1, 1),
+ [SRC_CFMT] = REG_FIELD(RKVENC_VEPU540_L1_SRC_FMT, 2, 5),
+ [SRC_RANGE] = REG_FIELD(RKVENC_VEPU540_L1_SRC_FMT, 6, 6),
+ [OUT_FMT_CFG] = REG_FIELD(RKVENC_VEPU540_L1_SRC_FMT, 7, 7),
+
+ [CSC_WGT_B2Y] = REG_FIELD(RKVENC_VEPU540_L1_SRC_UDFY, 0, 8),
+ [CSC_WGT_G2Y] = REG_FIELD(RKVENC_VEPU540_L1_SRC_UDFY, 9, 17),
+ [CSC_WGT_R2Y] = REG_FIELD(RKVENC_VEPU540_L1_SRC_UDFY, 18, 26),
+
+ [CSC_WGT_B2U] = REG_FIELD(RKVENC_VEPU540_L1_SRC_UDFU, 0, 8),
+ [CSC_WGT_G2U] = REG_FIELD(RKVENC_VEPU540_L1_SRC_UDFU, 9, 17),
+ [CSC_WGT_R2U] = REG_FIELD(RKVENC_VEPU540_L1_SRC_UDFU, 18, 26),
+
+ [CSC_WGT_B2V] = REG_FIELD(RKVENC_VEPU540_L1_SRC_UDFV, 0, 8),
+ [CSC_WGT_G2V] = REG_FIELD(RKVENC_VEPU540_L1_SRC_UDFV, 9, 17),
+ [CSC_WGT_R2V] = REG_FIELD(RKVENC_VEPU540_L1_SRC_UDFV, 18, 26),
+
+ [CSC_OFST_V] = REG_FIELD(RKVENC_VEPU540_L1_SRC_UDFO, 0, 7),
+ [CSC_OFST_U] = REG_FIELD(RKVENC_VEPU540_L1_SRC_UDFO, 8, 15),
+ [CSC_OFST_Y] = REG_FIELD(RKVENC_VEPU540_L1_SRC_UDFO, 16, 20),
+
+ [SRC_MIRR] = REG_FIELD(RKVENC_VEPU540_L1_SRC_PROC, 26, 26),
+ [SRC_ROT] = REG_FIELD(RKVENC_VEPU540_L1_SRC_PROC, 27, 28),
+ [TXA_EN] = REG_FIELD(RKVENC_VEPU540_L1_SRC_PROC, 29, 29),
+ [AFBCD_EN] = REG_FIELD(RKVENC_VEPU540_L1_SRC_PROC, 30, 30),
+
+ [SLI_CRS_EN] = REG_FIELD(RKVENC_VEPU540_L1_SLI_CFG_H264, 31, 31),
+
+ [CHRM_KLUT_OFST] = REG_FIELD(RKVENC_VEPU540_L1_KLUT_OFST, 0, 2),
+
+ [CHRM_KLUT_WGT0] = REG_FIELD(RKVENC_VEPU540_L1_KLUT_WGT(0), 0, 17),
+ [CHRM_KLUT_WGT1_L9] = REG_FIELD(RKVENC_VEPU540_L1_KLUT_WGT(0), 23, 31),
+
+ [CHRM_KLUT_WGT1_H9] = REG_FIELD(RKVENC_VEPU540_L1_KLUT_WGT(1), 0, 8),
+ [CHRM_KLUT_WGT2] = REG_FIELD(RKVENC_VEPU540_L1_KLUT_WGT(1), 14, 31),
+
+ [CHRM_KLUT_WGT3] = REG_FIELD(RKVENC_VEPU540_L1_KLUT_WGT(2), 0, 17),
+ [CHRM_KLUT_WGT4_L9] = REG_FIELD(RKVENC_VEPU540_L1_KLUT_WGT(2), 23, 31),
+
+ [CHRM_KLUT_WGT4_H9] = REG_FIELD(RKVENC_VEPU540_L1_KLUT_WGT(3), 0, 8),
+ [CHRM_KLUT_WGT5] = REG_FIELD(RKVENC_VEPU540_L1_KLUT_WGT(3), 14, 31),
+
+ [CHRM_KLUT_WGT6] = REG_FIELD(RKVENC_VEPU540_L1_KLUT_WGT(4), 0, 17),
+ [CHRM_KLUT_WGT7_L9] = REG_FIELD(RKVENC_VEPU540_L1_KLUT_WGT(4), 23, 31),
+
+ [CHRM_KLUT_WGT7_H9] = REG_FIELD(RKVENC_VEPU540_L1_KLUT_WGT(5), 0, 8),
+ [CHRM_KLUT_WGT8] = REG_FIELD(RKVENC_VEPU540_L1_KLUT_WGT(5), 14, 31),
+
+ [CHRM_KLUT_WGT9] = REG_FIELD(RKVENC_VEPU540_L1_KLUT_WGT(6), 0, 17),
+ [CHRM_KLUT_WGT10_L9] = REG_FIELD(RKVENC_VEPU540_L1_KLUT_WGT(6), 23, 31),
+
+ [CHRM_KLUT_WGT10_H9] = REG_FIELD(RKVENC_VEPU540_L1_KLUT_WGT(7), 0, 8),
+ [CHRM_KLUT_WGT11] = REG_FIELD(RKVENC_VEPU540_L1_KLUT_WGT(7), 14, 31),
+
+ [CHRM_KLUT_WGT12] = REG_FIELD(RKVENC_VEPU540_L1_KLUT_WGT(8), 0, 17),
+ [CHRM_KLUT_WGT13_L9] = REG_FIELD(RKVENC_VEPU540_L1_KLUT_WGT(8), 23, 31),
+
+ [CHRM_KLUT_WGT13_H9] = REG_FIELD(RKVENC_VEPU540_L1_KLUT_WGT(9), 0, 8),
+ [CHRM_KLUT_WGT14] = REG_FIELD(RKVENC_VEPU540_L1_KLUT_WGT(9), 14, 31),
+
+ [CHRM_KLUT_WGT15] = REG_FIELD(RKVENC_VEPU540_L1_KLUT_WGT(10), 0, 17),
+ [CHRM_KLUT_WGT16_L9] = REG_FIELD(RKVENC_VEPU540_L1_KLUT_WGT(10), 23, 31),
+
+ [CHRM_KLUT_WGT16_H9] = REG_FIELD(RKVENC_VEPU540_L1_KLUT_WGT(11), 0, 8),
+ [CHRM_KLUT_WGT17] = REG_FIELD(RKVENC_VEPU540_L1_KLUT_WGT(11), 14, 31),
+
+ [CHRM_KLUT_WGT18] = REG_FIELD(RKVENC_VEPU540_L1_KLUT_WGT(12), 0, 17),
+ [CHRM_KLUT_WGT19_L9] = REG_FIELD(RKVENC_VEPU540_L1_KLUT_WGT(12), 23, 31),
+
+ [CHRM_KLUT_WGT19_H9] = REG_FIELD(RKVENC_VEPU540_L1_KLUT_WGT(13), 0, 8),
+ [CHRM_KLUT_WGT20] = REG_FIELD(RKVENC_VEPU540_L1_KLUT_WGT(13), 14, 31),
+
+ [CHRM_KLUT_WGT21] = REG_FIELD(RKVENC_VEPU540_L1_KLUT_WGT(14), 0, 17),
+ [CHRM_KLUT_WGT22_L9] = REG_FIELD(RKVENC_VEPU540_L1_KLUT_WGT(14), 23, 31),
+
+ [CHRM_KLUT_WGT22_H9] = REG_FIELD(RKVENC_VEPU540_L1_KLUT_WGT(15), 0, 8),
+ [CHRM_KLUT_WGT23] = REG_FIELD(RKVENC_VEPU540_L1_KLUT_WGT(15), 14, 31),
+
+ [CHRM_KLUT_WGT24] = REG_FIELD(RKVENC_VEPU540_L1_KLUT_WGT(16), 0, 17),
+ [CHRM_KLUT_WGT25_L9] = REG_FIELD(RKVENC_VEPU540_L1_KLUT_WGT(16), 23, 31),
+
+ [CHRM_KLUT_WGT25_H9] = REG_FIELD(RKVENC_VEPU540_L1_KLUT_WGT(17), 0, 8),
+ [CHRM_KLUT_WGT26] = REG_FIELD(RKVENC_VEPU540_L1_KLUT_WGT(17), 14, 31),
+
+ [CHRM_KLUT_WGT27] = REG_FIELD(RKVENC_VEPU540_L1_KLUT_WGT(18), 0, 17),
+ [CHRM_KLUT_WGT28_L9] = REG_FIELD(RKVENC_VEPU540_L1_KLUT_WGT(18), 23, 31),
+
+ [CHRM_KLUT_WGT28_H9] = REG_FIELD(RKVENC_VEPU540_L1_KLUT_WGT(19), 0, 8),
+ [CHRM_KLUT_WGT29] = REG_FIELD(RKVENC_VEPU540_L1_KLUT_WGT(19), 14, 31),
+
+ [CHRM_KLUT_WGT30] = REG_FIELD(RKVENC_VEPU540_L1_KLUT_WGT(20), 0, 17),
+ [CHRM_KLUT_WGT31_L9] = REG_FIELD(RKVENC_VEPU540_L1_KLUT_WGT(20), 23, 31),
+
+ [CHRM_KLUT_WGT31_H9] = REG_FIELD(RKVENC_VEPU540_L1_KLUT_WGT(21), 0, 8),
+ [CHRM_KLUT_WGT32] = REG_FIELD(RKVENC_VEPU540_L1_KLUT_WGT(21), 14, 31),
+
+ [CHRM_KLUT_WGT33] = REG_FIELD(RKVENC_VEPU540_L1_KLUT_WGT(22), 0, 17),
+ [CHRM_KLUT_WGT34_L9] = REG_FIELD(RKVENC_VEPU540_L1_KLUT_WGT(22), 23, 31),
+
+ [CHRM_KLUT_WGT34_H9] = REG_FIELD(RKVENC_VEPU540_L1_KLUT_WGT(23), 0, 8),
+
+ [RC_EN] = REG_FIELD(RKVENC_VEPU540_L1_RC_CFG, 0, 0),
+ [AQ_EN] = REG_FIELD(RKVENC_VEPU540_L1_RC_CFG, 1, 1),
+ [AQ_MODE] = REG_FIELD(RKVENC_VEPU540_L1_RC_CFG, 2, 2),
+ [RC_CTU_NUM] = REG_FIELD(RKVENC_VEPU540_L1_RC_CFG, 16, 31),
+
+ [RC_QP_RANGE] = REG_FIELD(RKVENC_VEPU540_L1_RC_QP, 16, 19),
+ [RC_MAX_QP] = REG_FIELD(RKVENC_VEPU540_L1_RC_QP, 20, 25),
+ [RC_MIN_QP] = REG_FIELD(RKVENC_VEPU540_L1_RC_QP, 26, 31),
+
+ [CTU_EBIT] = REG_FIELD(RKVENC_VEPU540_L1_RC_TGT, 0, 19),
+
+ [QP_ADJ0] = REG_FIELD(RKVENC_VEPU540_L1_RC_ADJ(0), 0, 4),
+ [QP_ADJ1] = REG_FIELD(RKVENC_VEPU540_L1_RC_ADJ(0), 5, 9),
+ [QP_ADJ2] = REG_FIELD(RKVENC_VEPU540_L1_RC_ADJ(0), 10, 14),
+ [QP_ADJ3] = REG_FIELD(RKVENC_VEPU540_L1_RC_ADJ(0), 15, 19),
+ [QP_ADJ4] = REG_FIELD(RKVENC_VEPU540_L1_RC_ADJ(0), 20, 24),
+
+ [QP_ADJ5] = REG_FIELD(RKVENC_VEPU540_L1_RC_ADJ(1), 0, 4),
+ [QP_ADJ6] = REG_FIELD(RKVENC_VEPU540_L1_RC_ADJ(1), 5, 9),
+ [QP_ADJ7] = REG_FIELD(RKVENC_VEPU540_L1_RC_ADJ(1), 10, 14),
+ [QP_ADJ8] = REG_FIELD(RKVENC_VEPU540_L1_RC_ADJ(1), 15, 19),
+
+ [RC_DTHD0] = REG_FIELD(RKVENC_VEPU540_L1_RC_DTHD(0), 0, 31),
+ [RC_DTHD1] = REG_FIELD(RKVENC_VEPU540_L1_RC_DTHD(1), 0, 31),
+ [RC_DTHD2] = REG_FIELD(RKVENC_VEPU540_L1_RC_DTHD(2), 0, 31),
+ [RC_DTHD3] = REG_FIELD(RKVENC_VEPU540_L1_RC_DTHD(3), 0, 31),
+ [RC_DTHD4] = REG_FIELD(RKVENC_VEPU540_L1_RC_DTHD(4), 0, 31),
+ [RC_DTHD5] = REG_FIELD(RKVENC_VEPU540_L1_RC_DTHD(5), 0, 31),
+ [RC_DTHD6] = REG_FIELD(RKVENC_VEPU540_L1_RC_DTHD(6), 0, 31),
+ [RC_DTHD7] = REG_FIELD(RKVENC_VEPU540_L1_RC_DTHD(7), 0, 31),
+ [RC_DTHD8] = REG_FIELD(RKVENC_VEPU540_L1_RC_DTHD(8), 0, 31),
+
+ [QPMIN_AREA0] = REG_FIELD(RKVENC_VEPU540_L1_ROI_QTHD(0), 0, 5),
+ [QPMAX_AREA0] = REG_FIELD(RKVENC_VEPU540_L1_ROI_QTHD(0), 6, 11),
+ [QPMIN_AREA1] = REG_FIELD(RKVENC_VEPU540_L1_ROI_QTHD(0), 12, 17),
+ [QPMAX_AREA1] = REG_FIELD(RKVENC_VEPU540_L1_ROI_QTHD(0), 18, 23),
+ [QPMIN_AREA2] = REG_FIELD(RKVENC_VEPU540_L1_ROI_QTHD(0), 24, 29),
+
+ [QPMAX_AREA2] = REG_FIELD(RKVENC_VEPU540_L1_ROI_QTHD(1), 0, 5),
+ [QPMIN_AREA3] = REG_FIELD(RKVENC_VEPU540_L1_ROI_QTHD(1), 6, 11),
+ [QPMAX_AREA3] = REG_FIELD(RKVENC_VEPU540_L1_ROI_QTHD(1), 12, 17),
+ [QPMIN_AREA4] = REG_FIELD(RKVENC_VEPU540_L1_ROI_QTHD(1), 18, 23),
+ [QPMAX_AREA4] = REG_FIELD(RKVENC_VEPU540_L1_ROI_QTHD(1), 24, 29),
+
+ [QPMIN_AREA5] = REG_FIELD(RKVENC_VEPU540_L1_ROI_QTHD(2), 0, 5),
+ [QPMAX_AREA5] = REG_FIELD(RKVENC_VEPU540_L1_ROI_QTHD(2), 6, 11),
+ [QPMIN_AREA6] = REG_FIELD(RKVENC_VEPU540_L1_ROI_QTHD(2), 12, 17),
+ [QPMAX_AREA6] = REG_FIELD(RKVENC_VEPU540_L1_ROI_QTHD(2), 18, 23),
+ [QPMIN_AREA7] = REG_FIELD(RKVENC_VEPU540_L1_ROI_QTHD(2), 24, 29),
+
+ [QPMAX_AREA7] = REG_FIELD(RKVENC_VEPU540_L1_ROI_QTHD(3), 0, 5),
+ [QPMAP_MODE] = REG_FIELD(RKVENC_VEPU540_L1_ROI_QTHD(3), 30, 31),
+
+ [PIC_OFST_Y] = REG_FIELD(RKVENC_VEPU540_L1_PIC_OFST, 0, 12),
+ [PIC_OFST_X] = REG_FIELD(RKVENC_VEPU540_L1_PIC_OFST, 16, 28),
+
+ [SRC_STRD0] = REG_FIELD(RKVENC_VEPU540_L1_SRC_STRD, 0, 15),
+ [SRC_STRD1] = REG_FIELD(RKVENC_VEPU540_L1_SRC_STRD, 16, 31),
+
+ [ADR_SRC0] = REG_FIELD(RKVENC_VEPU540_L1_ADR_SRC0, 0, 31),
+
+ [ADR_SRC1] = REG_FIELD(RKVENC_VEPU540_L1_ADR_SRC1, 0, 31),
+
+ [ADR_SRC2] = REG_FIELD(RKVENC_VEPU540_L1_ADR_SRC2, 0, 31),
+
+ [ROI_ADDR] = REG_FIELD(RKVENC_VEPU540_L1_ADR_ROI, 4, 31),
+
+ [RFPW_H_ADDR] = REG_FIELD(RKVENC_VEPU540_L1_ADR_RFPW_H, 12, 31),
+
+ [RFPW_B_ADDR] = REG_FIELD(RKVENC_VEPU540_L1_ADR_RFPW_B, 12, 31),
+
+ [RFPR_H_ADDR] = REG_FIELD(RKVENC_VEPU540_L1_ADR_RFPR_H, 12, 31),
+
+ [RFPR_B_ADDR] = REG_FIELD(RKVENC_VEPU540_L1_ADR_RFPR_B, 12, 31),
+
+ [CMVW_ADDR] = REG_FIELD(RKVENC_VEPU540_L1_ADR_CMVW, 10, 31),
+
+ [CMVR_ADDR] = REG_FIELD(RKVENC_VEPU540_L1_ADR_CMVR, 10, 31),
+
+ [DSPW_ADDR] = REG_FIELD(RKVENC_VEPU540_L1_ADR_DSPW, 10, 31),
+
+ [DSPR_ADDR] = REG_FIELD(RKVENC_VEPU540_L1_ADR_DSPR, 10, 31),
+
+ [MEIW_ADDR] = REG_FIELD(RKVENC_VEPU540_L1_ADR_MEIW, 10, 31),
+
+ [BSBT_ADDR] = REG_FIELD(RKVENC_VEPU540_L1_ADR_BSBT, 7, 31),
+
+ [BSBB_ADDR] = REG_FIELD(RKVENC_VEPU540_L1_ADR_BSBB, 7, 31),
+
+ [BSBR_ADDR] = REG_FIELD(RKVENC_VEPU540_L1_ADR_BSBR, 7, 31),
+
+ [ADR_BSBS] = REG_FIELD(RKVENC_VEPU540_L1_ADR_BSBS, 0, 31),
+
+ [SLI_SPLT] = REG_FIELD(RKVENC_VEPU540_L1_SLI_SPLT, 0, 0),
+ [SLI_SPLT_MODE] = REG_FIELD(RKVENC_VEPU540_L1_SLI_SPLT, 1, 1),
+ [SLI_SPLT_CPST] = REG_FIELD(RKVENC_VEPU540_L1_SLI_SPLT, 2, 2),
+ [SLI_MAX_NUM_M1] = REG_FIELD(RKVENC_VEPU540_L1_SLI_SPLT, 3, 12),
+ [SLI_FLSH] = REG_FIELD(RKVENC_VEPU540_L1_SLI_SPLT, 13, 13),
+ [SLI_SPLT_CNUM_M1] = REG_FIELD(RKVENC_VEPU540_L1_SLI_SPLT, 16, 31),
+
+ [SLI_SPLT_BYTE] = REG_FIELD(RKVENC_VEPU540_L1_SLI_BYTE, 0, 17),
+
+ [CME_SRCH_H] = REG_FIELD(RKVENC_VEPU540_L1_MR_RNGE, 0, 3),
+ [CME_SRCH_V] = REG_FIELD(RKVENC_VEPU540_L1_MR_RNGE, 4, 7),
+ [RME_SRCH_H] = REG_FIELD(RKVENC_VEPU540_L1_MR_RNGE, 8, 10),
+ [RME_SRCH_V] = REG_FIELD(RKVENC_VEPU540_L1_MR_RNGE, 11, 13),
+ [DLT_FRM_NUM] = REG_FIELD(RKVENC_VEPU540_L1_MR_RNGE, 16, 31),
+
+ [PMV_MDST_H] = REG_FIELD(RKVENC_VEPU540_L1_ME_CFG, 0, 7),
+ [PMV_MDST_V] = REG_FIELD(RKVENC_VEPU540_L1_ME_CFG, 8, 15),
+ [MV_LIMIT] = REG_FIELD(RKVENC_VEPU540_L1_ME_CFG, 16, 17),
+ [PMV_NUM] = REG_FIELD(RKVENC_VEPU540_L1_ME_CFG, 18, 19),
+ [COLMV_STOR] = REG_FIELD(RKVENC_VEPU540_L1_ME_CFG, 20, 20),
+ [COLMV_LOAD] = REG_FIELD(RKVENC_VEPU540_L1_ME_CFG, 21, 21),
+ [RME_DIS] = REG_FIELD(RKVENC_VEPU540_L1_ME_CFG, 22, 26),
+ [FME_DIS] = REG_FIELD(RKVENC_VEPU540_L1_ME_CFG, 27, 31),
+
+ [CME_RAMA_MAX] = REG_FIELD(RKVENC_VEPU540_L1_ME_CACH, 0, 10),
+ [CME_RAMA_H] = REG_FIELD(RKVENC_VEPU540_L1_ME_CACH, 11, 15),
+ [CACH_L2_MAP] = REG_FIELD(RKVENC_VEPU540_L1_ME_CACH, 16, 17),
+ [CME_LINEBUF_W] = REG_FIELD(RKVENC_VEPU540_L1_ME_CACH, 18, 25),
+
+ [POC_LSB_LT1] = REG_FIELD(RKVENC_VEPU540_L1_ME_CFG, 0, 15),
+ [POC_LSB_LT2] = REG_FIELD(RKVENC_VEPU540_L1_ME_CFG, 16, 31),
+
+ [DLT_POC_MSB_CYCL1] = REG_FIELD(RKVENC_VEPU540_L1_SYNT_LONG_REFM0, 0, 15),
+ [DLT_POC_MSB_CYCL2] = REG_FIELD(RKVENC_VEPU540_L1_SYNT_LONG_REFM0, 16, 31),
+
+ [OSD_CH_INV_EN] = REG_FIELD(RKVENC_VEPU540_L1_OSD_INV_CFG, 0, 7),
+ [OSD_ITYPE] = REG_FIELD(RKVENC_VEPU540_L1_OSD_INV_CFG, 8, 15),
+ [OSD_LU_INV_MSK] = REG_FIELD(RKVENC_VEPU540_L1_OSD_INV_CFG, 16, 23),
+ [OSD_CH_INV_MSK] = REG_FIELD(RKVENC_VEPU540_L1_OSD_INV_CFG, 24, 31),
+
+ [VTHD_Y] = REG_FIELD(RKVENC_VEPU540_L1_IPRD_CSTS, 0, 11),
+ [VTHD_C] = REG_FIELD(RKVENC_VEPU540_L1_IPRD_CSTS, 16, 27),
+
+ [RECT_SIZE] = REG_FIELD(RKVENC_VEPU540_L1_RDO_CFG_H264, 0, 0),
+ [INTER_4X4] = REG_FIELD(RKVENC_VEPU540_L1_RDO_CFG_H264, 1, 1),
+ [ARB_SEL] = REG_FIELD(RKVENC_VEPU540_L1_RDO_CFG_H264, 2, 2),
+ [VLC_LMT] = REG_FIELD(RKVENC_VEPU540_L1_RDO_CFG_H264, 3, 3),
+ [CHRM_SPCL] = REG_FIELD(RKVENC_VEPU540_L1_RDO_CFG_H264, 4, 4),
+ [RDO_MASK] = REG_FIELD(RKVENC_VEPU540_L1_RDO_CFG_H264, 5, 12),
+ [CCWA_E] = REG_FIELD(RKVENC_VEPU540_L1_RDO_CFG_H264, 13, 13),
+ [SCL_LST_SEL] = REG_FIELD(RKVENC_VEPU540_L1_RDO_CFG_H264, 14, 14),
+ [ATR_E] = REG_FIELD(RKVENC_VEPU540_L1_RDO_CFG_H264, 15, 15),
+ [ATF_EDG] = REG_FIELD(RKVENC_VEPU540_L1_RDO_CFG_H264, 16, 17),
+ [ATF_LVL_E] = REG_FIELD(RKVENC_VEPU540_L1_RDO_CFG_H264, 18, 18),
+ [ATF_INTRA_E] = REG_FIELD(RKVENC_VEPU540_L1_RDO_CFG_H264, 19, 19),
+ [SCL_LST_SEL_] = REG_FIELD(RKVENC_VEPU540_L1_RDO_CFG_H264, 20, 21),
+ [SATD_BYPS_FLG] = REG_FIELD(RKVENC_VEPU540_L1_RDO_CFG_H264, 31, 31),
+
+ [NAL_REF_IDC] = REG_FIELD(RKVENC_VEPU540_L1_SYNT_NAL, 0, 1),
+ [NAL_UNIT_TYPE] = REG_FIELD(RKVENC_VEPU540_L1_SYNT_NAL, 2, 6),
+
+ [MAX_FNUM] = REG_FIELD(RKVENC_VEPU540_L1_SYNT_SPS, 0, 3),
+ [DRCT_8X8] = REG_FIELD(RKVENC_VEPU540_L1_SYNT_SPS, 4, 4),
+ [MPOC_LM4] = REG_FIELD(RKVENC_VEPU540_L1_SYNT_SPS, 5, 8),
+
+ [ETPY_MODE] = REG_FIELD(RKVENC_VEPU540_L1_SYNT_PPS, 0, 0),
+ [TRNS_8X8] = REG_FIELD(RKVENC_VEPU540_L1_SYNT_PPS, 1, 1),
+ [CSIP_FLAG] = REG_FIELD(RKVENC_VEPU540_L1_SYNT_PPS, 2, 2),
+ [NUM_REF0_IDX] = REG_FIELD(RKVENC_VEPU540_L1_SYNT_PPS, 3, 4),
+ [NUM_REF1_IDX] = REG_FIELD(RKVENC_VEPU540_L1_SYNT_PPS, 5, 6),
+ [PIC_INIT_QP] = REG_FIELD(RKVENC_VEPU540_L1_SYNT_PPS, 7, 12),
+ [CB_OFST] = REG_FIELD(RKVENC_VEPU540_L1_SYNT_PPS, 13, 17),
+ [CR_OFST] = REG_FIELD(RKVENC_VEPU540_L1_SYNT_PPS, 18, 22),
+ [WGHT_PRED] = REG_FIELD(RKVENC_VEPU540_L1_SYNT_PPS, 23, 23),
+ [DBF_CP_FLG] = REG_FIELD(RKVENC_VEPU540_L1_SYNT_PPS, 24, 24),
+
+ [SLI_TYPE] = REG_FIELD(RKVENC_VEPU540_L1_SYNT_SLI(0), 0, 1),
+ [PPS_ID] = REG_FIELD(RKVENC_VEPU540_L1_SYNT_SLI(0), 2, 9),
+ [DRCT_SMVP] = REG_FIELD(RKVENC_VEPU540_L1_SYNT_SLI(0), 10, 10),
+ [NUM_REF_OVRD] = REG_FIELD(RKVENC_VEPU540_L1_SYNT_SLI(0), 11, 11),
+ [CBC_INIT_IDC] = REG_FIELD(RKVENC_VEPU540_L1_SYNT_SLI(0), 12, 13),
+ [FRM_NUM] = REG_FIELD(RKVENC_VEPU540_L1_SYNT_SLI(0), 16, 31),
+
+ [IDR_PIC_ID] = REG_FIELD(RKVENC_VEPU540_L1_SYNT_SLI(1), 0, 15),
+ [POC_LSB] = REG_FIELD(RKVENC_VEPU540_L1_SYNT_SLI(1), 16, 31),
+
+ [RODR_PIC_IDX] = REG_FIELD(RKVENC_VEPU540_L1_SYNT_SLI(2), 0, 1),
+ [REF_LIST0_RODR] = REG_FIELD(RKVENC_VEPU540_L1_SYNT_SLI(2), 2, 2),
+ [SLI_BETA_OFST] = REG_FIELD(RKVENC_VEPU540_L1_SYNT_SLI(2), 3, 6),
+ [SLI_ALPH_OFST] = REG_FIELD(RKVENC_VEPU540_L1_SYNT_SLI(2), 7, 10),
+ [DIS_DBLK_IDC] = REG_FIELD(RKVENC_VEPU540_L1_SYNT_SLI(2), 11, 12),
+ [RODR_PIC_NUM] = REG_FIELD(RKVENC_VEPU540_L1_SYNT_SLI(2), 16, 31),
+
+ [NOPP_FLG] = REG_FIELD(RKVENC_VEPU540_L1_SYNT_REFM0, 0, 0),
+ [LTRF_FLG] = REG_FIELD(RKVENC_VEPU540_L1_SYNT_REFM0, 1, 1),
+ [ARPM_FLG] = REG_FIELD(RKVENC_VEPU540_L1_SYNT_REFM0, 2, 2),
+ [MMCO4_PRE] = REG_FIELD(RKVENC_VEPU540_L1_SYNT_REFM0, 3, 3),
+ [MMCO_TYPE0] = REG_FIELD(RKVENC_VEPU540_L1_SYNT_REFM0, 4, 6),
+ [MMCO_PARM0] = REG_FIELD(RKVENC_VEPU540_L1_SYNT_REFM0, 7, 22),
+ [MMCO_TYPE1] = REG_FIELD(RKVENC_VEPU540_L1_SYNT_REFM0, 23, 25),
+ [MMCO_TYPE2] = REG_FIELD(RKVENC_VEPU540_L1_SYNT_REFM0, 26, 28),
+
+ [MMCO_PARM1] = REG_FIELD(RKVENC_VEPU540_L1_SYNT_REFM1, 0, 15),
+ [MMCO_PARM2] = REG_FIELD(RKVENC_VEPU540_L1_SYNT_REFM1, 16, 31),
+
+ [OSD_E] = REG_FIELD(RKVENC_VEPU540_L1_OSD_CFG, 0, 7),
+ [OSD_INV_E] = REG_FIELD(RKVENC_VEPU540_L1_OSD_CFG, 8, 15),
+ [OSD_PLT_CKS] = REG_FIELD(RKVENC_VEPU540_L1_OSD_CFG, 16, 16),
+ [OSD_PLT_TYP] = REG_FIELD(RKVENC_VEPU540_L1_OSD_CFG, 17, 17),
+
+ [OSD_ITHD_R0] = REG_FIELD(RKVENC_VEPU540_L1_OSD_INV, 0, 3),
+ [OSD_ITHD_R1] = REG_FIELD(RKVENC_VEPU540_L1_OSD_INV, 4, 7),
+ [OSD_ITHD_R2] = REG_FIELD(RKVENC_VEPU540_L1_OSD_INV, 8, 11),
+ [OSD_ITHD_R3] = REG_FIELD(RKVENC_VEPU540_L1_OSD_INV, 12, 15),
+ [OSD_ITHD_R4] = REG_FIELD(RKVENC_VEPU540_L1_OSD_INV, 16, 19),
+ [OSD_ITHD_R5] = REG_FIELD(RKVENC_VEPU540_L1_OSD_INV, 20, 23),
+ [OSD_ITHD_R6] = REG_FIELD(RKVENC_VEPU540_L1_OSD_INV, 24, 27),
+ [OSD_ITHD_R7] = REG_FIELD(RKVENC_VEPU540_L1_OSD_INV, 28, 31),
+
+ [LONG_TERM_FRAME_IDX0] = REG_FIELD(RKVENC_VEPU540_L1_SYNT_REFM2, 0, 3),
+ [LONG_TERM_FRAME_IDX1] = REG_FIELD(RKVENC_VEPU540_L1_SYNT_REFM2, 4, 7),
+ [LONG_TERM_FRAME_IDX2] = REG_FIELD(RKVENC_VEPU540_L1_SYNT_REFM2, 8, 11),
+
+ [OSD0_POS_LB_X] = REG_FIELD(RKVENC_VEPU540_L1_OSD_POS(0), 0, 7),
+ [OSD0_POS_LB_Y] = REG_FIELD(RKVENC_VEPU540_L1_OSD_POS(0), 8, 15),
+ [OSD0_POS_RB_X] = REG_FIELD(RKVENC_VEPU540_L1_OSD_POS(0), 16, 23),
+ [OSD0_POS_RB_Y] = REG_FIELD(RKVENC_VEPU540_L1_OSD_POS(0), 24, 31),
+
+ [OSD1_POS_LB_X] = REG_FIELD(RKVENC_VEPU540_L1_OSD_POS(1), 0, 7),
+ [OSD1_POS_LB_Y] = REG_FIELD(RKVENC_VEPU540_L1_OSD_POS(1), 8, 15),
+ [OSD1_POS_RB_X] = REG_FIELD(RKVENC_VEPU540_L1_OSD_POS(1), 16, 23),
+ [OSD1_POS_RB_Y] = REG_FIELD(RKVENC_VEPU540_L1_OSD_POS(1), 24, 31),
+
+ [OSD2_POS_LB_X] = REG_FIELD(RKVENC_VEPU540_L1_OSD_POS(2), 0, 7),
+ [OSD2_POS_LB_Y] = REG_FIELD(RKVENC_VEPU540_L1_OSD_POS(2), 8, 15),
+ [OSD2_POS_RB_X] = REG_FIELD(RKVENC_VEPU540_L1_OSD_POS(2), 16, 23),
+ [OSD2_POS_RB_Y] = REG_FIELD(RKVENC_VEPU540_L1_OSD_POS(2), 24, 31),
+
+ [OSD3_POS_LB_X] = REG_FIELD(RKVENC_VEPU540_L1_OSD_POS(3), 0, 7),
+ [OSD3_POS_LB_Y] = REG_FIELD(RKVENC_VEPU540_L1_OSD_POS(3), 8, 15),
+ [OSD3_POS_RB_X] = REG_FIELD(RKVENC_VEPU540_L1_OSD_POS(3), 16, 23),
+ [OSD3_POS_RB_Y] = REG_FIELD(RKVENC_VEPU540_L1_OSD_POS(3), 24, 31),
+
+ [OSD4_POS_LB_X] = REG_FIELD(RKVENC_VEPU540_L1_OSD_POS(4), 0, 7),
+ [OSD4_POS_LB_Y] = REG_FIELD(RKVENC_VEPU540_L1_OSD_POS(4), 8, 15),
+ [OSD4_POS_RB_X] = REG_FIELD(RKVENC_VEPU540_L1_OSD_POS(4), 16, 23),
+ [OSD4_POS_RB_Y] = REG_FIELD(RKVENC_VEPU540_L1_OSD_POS(4), 24, 31),
+
+ [OSD5_POS_LB_X] = REG_FIELD(RKVENC_VEPU540_L1_OSD_POS(5), 0, 7),
+ [OSD5_POS_LB_Y] = REG_FIELD(RKVENC_VEPU540_L1_OSD_POS(5), 8, 15),
+ [OSD5_POS_RB_X] = REG_FIELD(RKVENC_VEPU540_L1_OSD_POS(5), 16, 23),
+ [OSD5_POS_RB_Y] = REG_FIELD(RKVENC_VEPU540_L1_OSD_POS(5), 24, 31),
+
+ [OSD6_POS_LB_X] = REG_FIELD(RKVENC_VEPU540_L1_OSD_POS(6), 0, 7),
+ [OSD6_POS_LB_Y] = REG_FIELD(RKVENC_VEPU540_L1_OSD_POS(6), 8, 15),
+ [OSD6_POS_RB_X] = REG_FIELD(RKVENC_VEPU540_L1_OSD_POS(6), 16, 23),
+ [OSD6_POS_RB_Y] = REG_FIELD(RKVENC_VEPU540_L1_OSD_POS(6), 24, 31),
+
+ [OSD7_POS_LB_X] = REG_FIELD(RKVENC_VEPU540_L1_OSD_POS(7), 0, 7),
+ [OSD7_POS_LB_Y] = REG_FIELD(RKVENC_VEPU540_L1_OSD_POS(7), 8, 15),
+ [OSD7_POS_RB_X] = REG_FIELD(RKVENC_VEPU540_L1_OSD_POS(7), 16, 23),
+ [OSD7_POS_RB_Y] = REG_FIELD(RKVENC_VEPU540_L1_OSD_POS(7), 24, 31),
+
+ [OSD_ADDR0] = REG_FIELD(RKVENC_VEPU540_L1_ADR_OSD(0), 4, 31),
+ [OSD_ADDR1] = REG_FIELD(RKVENC_VEPU540_L1_ADR_OSD(1), 4, 31),
+ [OSD_ADDR2] = REG_FIELD(RKVENC_VEPU540_L1_ADR_OSD(2), 4, 31),
+ [OSD_ADDR3] = REG_FIELD(RKVENC_VEPU540_L1_ADR_OSD(3), 4, 31),
+ [OSD_ADDR4] = REG_FIELD(RKVENC_VEPU540_L1_ADR_OSD(4), 4, 31),
+ [OSD_ADDR5] = REG_FIELD(RKVENC_VEPU540_L1_ADR_OSD(5), 4, 31),
+ [OSD_ADDR6] = REG_FIELD(RKVENC_VEPU540_L1_ADR_OSD(6), 4, 31),
+ [OSD_ADDR7] = REG_FIELD(RKVENC_VEPU540_L1_ADR_OSD(7), 4, 31),
+
+ [BS_LGTH] = REG_FIELD(RKVENC_VEPU540_L1_ST_BSL, 0, 26),
+
+ [SSE_L32] = REG_FIELD(RKVENC_VEPU540_L1_ST_SSE_LE32, 0, 31),
+
+ [QP_SUM] = REG_FIELD(RKVENC_VEPU540_L1_ST_SSE_QP, 24, 31),
+ [SSE_H8] = REG_FIELD(RKVENC_VEPU540_L1_ST_SSE_QP, 0, 21),
+
+ [SAO_CNUM] = REG_FIELD(RKVENC_VEPU540_L1_ST_SAO, 0, 11),
+ [SAO_YNUM] = REG_FIELD(RKVENC_VEPU540_L1_ST_SAO, 12, 23),
+
+ [ST_ENC] = REG_FIELD(RKVENC_VEPU540_L1_ST_ENC, 0, 1),
+ [ST_SCLR] = REG_FIELD(RKVENC_VEPU540_L1_ST_ENC, 2, 2),
+
+ [FNUM_ENC] = REG_FIELD(RKVENC_VEPU540_L1_ST_LKT, 0, 7),
+ [FNUM_CFG] = REG_FIELD(RKVENC_VEPU540_L1_ST_LKT, 8, 15),
+ [FNUM_INT] = REG_FIELD(RKVENC_VEPU540_L1_ST_LKT, 16, 23),
+
+ [NODE_ADDR] = REG_FIELD(RKVENC_VEPU540_L1_ST_NADR, 4, 31),
+
+ [BSBW_ADDR] = REG_FIELD(RKVENC_VEPU540_L1_ST_BSB, 4, 31),
+
+ [AXIB_IDL] = REG_FIELD(RKVENC_VEPU540_L1_ST_BUS, 0, 7),
+ [AXIB_OVFL] = REG_FIELD(RKVENC_VEPU540_L1_ST_BUS, 8, 15),
+ [AXIB_ERR] = REG_FIELD(RKVENC_VEPU540_L1_ST_BUS, 16, 23),
+ [AXIR_ERR] = REG_FIELD(RKVENC_VEPU540_L1_ST_BUS, 24, 30),
+
+ [SLI_NUM] = REG_FIELD(RKVENC_VEPU540_L1_ST_SNUM, 0, 5),
+
+ [SLI_LEN] = REG_FIELD(RKVENC_VEPU540_L1_ST_SLEN, 0, 24),
+
+ [PNUM_P64] = REG_FIELD(RKVENC_VEPU540_L1_ST_PNUM_P64, 0, 11),
+
+ [PNUM_P32] = REG_FIELD(RKVENC_VEPU540_L1_ST_PNUM_P32, 0, 13),
+
+ [PNUM_P16] = REG_FIELD(RKVENC_VEPU540_L1_ST_PNUM_P16, 0, 15),
+
+ [PNUM_P8] = REG_FIELD(RKVENC_VEPU540_L1_ST_PNUM_P8, 0, 17),
+
+ [PNUM_I32] = REG_FIELD(RKVENC_VEPU540_L1_ST_PNUM_I32, 0, 13),
+
+ [PNUM_I16] = REG_FIELD(RKVENC_VEPU540_L1_ST_PNUM_I16, 0, 15),
+
+ [PNUM_I8] = REG_FIELD(RKVENC_VEPU540_L1_ST_PNUM_I8, 0, 17),
+
+ [PNUM_I4] = REG_FIELD(RKVENC_VEPU540_L1_ST_PNUM_I4, 0, 19),
+
+ // [NUM_QP][52] = REG_FIELD(, , ),
+
+ [MADP] = REG_FIELD(RKVENC_VEPU540_L1_ST_CPLX_TMP, 0, 17),
+
+ [NUM_CTU] = REG_FIELD(RKVENC_VEPU540_L1_ST_BNUM_CME, 0, 15),
+
+ [MADI] = REG_FIELD(RKVENC_VEPU540_L1_ST_CPLX_SPT, 0, 31),
+
+ [NUM_B16] = REG_FIELD(RKVENC_VEPU540_L1_ST_BNUM_B16, 0, 16),
+};
+
+
+#define RKVENC_VEPU540_L2_IPRD_TTHDY4_(x) (0x4 + x * 0x4) // -> 0 - 1
+#define RKVENC_VEPU540_L2_IPRD_TTHDC8_(x) (0xc + x * 0x4) // -> 0 - 1
+#define RKVENC_VEPU540_L2_IPRD_TTHDY8_(x) (0x14 + x * 0x4) // -> 0 - 1
+#define RKVENC_VEPU540_L2_IPRD_TTHD_UL 0x1c
+#define RKVENC_VEPU540_L2_IPRD_WGTY8 0x20
+#define RKVENC_VEPU540_L2_IPRD_WGTY4 0x24
+#define RKVENC_VEPU540_L2_IPRD_WGTY16 0x28
+#define RKVENC_VEPU540_L2_IPRD_WGTC8 0x2c
+#define RKVENC_VEPU540_L2_QNT_BIAS_COMB 0x30
+#define RKVENC_VEPU540_L2_ATR_THD(x) (0x34 + x * 0x4) // -> 0 - 1
+#define RKVENC_VEPU540_L2_ATR_WGT16 0x3c
+#define RKVENC_VEPU540_L2_ATR_WGT8 0x40
+#define RKVENC_VEPU540_L2_ATR_WGT4 0x44
+#define RKVENC_VEPU540_L2_ATF_TTHD(x) (0x48 + x * 0x4) // -> 0 - 1
+#define RKVENC_VEPU540_L2_ATF_STHD(x) (0x50 + x * 0x4) // -> 0 - 1
+#define RKVENC_VEPU540_L2_ATF_WGT(x) (0x58 + x * 0x4) // -> 0 - 2
+#define RKVENC_VEPU540_L2_ATF_OFST(x) (0x64 + x * 0x4) // -> 0 - 2
+#define RKVENC_VEPU540_L2_IPRD_WGT_QP(x) (0x70 + x * 0x4) // -> 0 - 51
+#define RKVENC_VEPU540_L2_RDO_WGTA_QP(x) (0x140 + x * 0x4) // -> 0 - 51
+#define RKVENC_VEPU540_L2_RDO_WGTB_QP(x) (0x210 + x * 0x4) // -> 0 - 51
+#define RKVENC_VEPU540_L2_MADI_CFG 0x2e0
+#define RKVENC_VEPU540_L2_AQ_TTHD(x) (0x2e4 + x * 0x4) // -> 0 - 3
+#define RKVENC_VEPU540_L2_AQ_STP(x) (0x2f4 + x * 0x4) // -> 0 - 3
+#define RKVENC_VEPU540_L2_RME_MVD_PNSH 0x304
+#define RKVENC_VEPU540_L2_ATR1_THD(x) (0x308 + x * 0x4) // -> 0 - 1
+#define RKVENC_VEPU540_L2_PREI_DIF_IDX_L(x) (0x310 + x * 0x8) // -> 0 - 16
+#define RKVENC_VEPU540_L2_PREI_DIF_IDX_H(x) (0x314 + x * 0x8) // -> 0 - 16
+#define RKVENC_VEPU540_L2_RDO_CKG 0x400
+#define RKVENC_VEPU540_L2_I16_SOBEL_T 0x410
+#define RKVENC_VEPU540_L2_I16_SOBEL_A(x) (0x414 + x * 0x4) // -> 0 - 1
+#define RKVENC_VEPU540_L2_I16_SOBEL_B(x) (0x41c + x * 0x4) // -> 0 - 4
+#define RKVENC_VEPU540_L2_I16_SOBEL_C(x) (0x430 + x * 0x4) // -> 0 - 1
+#define RKVENC_VEPU540_L2_I16_SOBEL_D(x) (0x438 + x * 0x4) // -> 0 - 4
+#define RKVENC_VEPU540_L2_I16_SOBEL_E(x) (0x44c + x * 0x4) // -> 0 - 16
+#define RKVENC_VEPU540_L2_I32_SOBEL_T(x) (0x494 + x * 0x4) // -> 0 - 2
+#define RKVENC_VEPU540_L2_I32_SOBEL_A 0x4a0
+#define RKVENC_VEPU540_L2_I32_SOBEL_B(x) (0x4a4 + x * 0x4) // -> 0 - 2
+#define RKVENC_VEPU540_L2_I32_SOBEL_C 0x4b0
+#define RKVENC_VEPU540_L2_I32_SOBEL_D(x) (0x4b4 + x * 0x4) // -> 0 - 2
+#define RKVENC_VEPU540_L2_I32_SOBEL_E(x) (0x4c0 + x * 0x4) // -> 0 - 9
+
+enum rkvenc_vepu540_regfields_l2 {
+ /*
+ * IPRD_TTHDY4_0_H264 ~ IPRD_TTHDY4_1_H264
+ * Address: 0x0004~0x0008 Access type: read and write
+ * The texture thredsholds for H.264 LUMA 4x4 intra prediction
+ */
+ IPRD_TTHDY4_0,
+ IPRD_TTHDY4_1,
+ IPRD_TTHDY4_2,
+ IPRD_TTHDY4_3,
+
+ /*
+ * IPRD_TTHDC8_0_H264 ~ IPRD_TTHDC8_1_H264
+ * Address: 0x000C~0x0010 Access type: read and write
+ * The texture threshold for H.264 CHROMA 8x8 intra prediction.
+ */
+ IPRD_TTHDC8_0,
+ IPRD_TTHDC8_1,
+ IPRD_TTHDC8_2,
+ IPRD_TTHDC8_3,
+
+ /*
+ * IPRD_TTHDY8_0_H264 ~ IPRD_TTHDY8_1_H264
+ * Address: 0x0014~0x0018 Access type: read and write
+ * The texture thredsholds for H.264 LUMA 8x8 intra prediction
+ */
+ IPRD_TTHDY8_0,
+ IPRD_TTHDY8_1,
+ IPRD_TTHDY8_2,
+ IPRD_TTHDY8_3,
+
+ /*
+ * IPRD_TTHD_UL_H264
+ * Address: 0x001C Access type: read and write
+ * Texture thredsholds of up and left MB for H.264 LUMA intra prediction.
+ */
+ IPRD_TTHD_UL,
+
+ /*
+ * IPRD_WGTY8_H264
+ * Address: 0x0020 Access type: read and write
+ * Weights of the cost for H.264 LUMA 8x8 intra prediction
+ */
+ IPRD_WGTY8_0,
+ IPRD_WGTY8_1,
+ IPRD_WGTY8_2,
+ IPRD_WGTY8_3,
+
+ /*
+ * IPRD_WGTY4_H264
+ * Address: 0x0024 Access type: read and write
+ * Weights of the cost for H.264 LUMA 4x4 intra prediction
+ */
+ IPRD_WGTY4_0,
+ IPRD_WGTY4_1,
+ IPRD_WGTY4_2,
+ IPRD_WGTY4_3,
+
+ /*
+ * IPRD_WGTY16_H264
+ * Address: 0x0028 Access type: read and write
+ * Weights of the cost for H.264 LUMA 16x16 intra prediction
+ */
+ IPRD_WGTY16_0,
+ IPRD_WGTY16_1,
+ IPRD_WGTY16_2,
+ IPRD_WGTY16_3,
+
+ /*
+ * IPRD_WGTC8_H264
+ * Address: 0x002C Access type: read and write
+ * Weights of the cost for H.264 CHROMA 8x8 intra prediction
+ */
+ IPRD_WGTC8_0,
+ IPRD_WGTC8_1,
+ IPRD_WGTC8_2,
+ IPRD_WGTC8_3,
+
+ /*
+ * QNT_BIAS_COMB
+ * Address: 0x0030 Access type: read and write
+ * Quantization bias for H.264 and HEVC.
+ */
+ /* Quantization bias for HEVC and H.264 I frame. */
+ QNT_BIAS_I,
+ /* Quantization bias for HEVC and H.264 P frame. */
+ QNT_BIAS_P,
+
+ /*
+ * ATR_THD0_H264
+ * Address: 0x0034 Access type: read and write
+ * H.264 anti ringing noise threshold configuration0.
+ */
+ /* The 1st threshold for H.264 anti-ringing-noise. */
+ ATR_THD0,
+ /* The 2nd threshold for H.264 anti-ringing-noise. */
+ ATR_THD1,
+
+ /*
+ * ATR_THD1_H264
+ * Address: 0x0038 Access type: read and write
+ * H.264 anti ringing noise threshold configuration1.
+ */
+ /* The 3rd threshold for H.264 anti-ringing-noise. */
+ ATR_THD2,
+ /* QP threshold of P frame for H.264 anti-ringing-nois. */
+ ATR_QP,
+
+ /*
+ * ATR_WGT16_H264
+ * Address: 0x003C Access type: read and write
+ * Weights of 16x16 cost for H.264 anti ringing noise.
+ */
+ /* The 1st weight for H.264 16x16 anti-ringing-noise. */
+ ATR_LV16_WGT0,
+ /* The 2nd weight for H.264 16x16 anti-ringing-noise. */
+ ATR_LV16_WGT1,
+ /* The 3rd weight for H.264 16x16 anti-ringing-noise. */
+ ATR_LV16_WGT2,
+
+ /*
+ * ATR_WGT8_H264
+ * Address: 0x0040 Access type: read and write
+ * Weights of 8x8 cost for H.264 anti ringing noise.
+ */
+ /* The 1st weight for H.264 8x8 anti-ringing-noise. */
+ ATR_LV8_WGT0,
+ /* The 2nd weight for H.264 8x8 anti-ringing-noise. */
+ ATR_LV8_WGT1,
+ /* The 3rd weight for H.264 8x8 anti-ringing-noise. */
+ ATR_LV8_WGT2,
+
+ /*
+ * ATR_WGT4_H264
+ * Address: 0x0044 Access type: read and write
+ * Weights of 4x4 cost for H.264 anti ringing noise.
+ */
+ /* The 1st weight for H.264 4x4 anti-ringing-noise. */
+ ATR_LV4_WGT0,
+ /* The 2nd weight for H.264 4x4 anti-ringing-noise. */
+ ATR_LV4_WGT1,
+ /* The 3rd weight for H.264 4x4 anti-ringing-noise. */
+ ATR_LV4_WGT2,
+
+ /*
+ * ATF_TTHD0_H264 ~ ATF_TTHD1_H264
+ * Address: 0x0048~0x004C Access type: read and write
+ * Texture threshold configuration for H.264 anti-flicker
+ */
+ ATF_TTHD0,
+ ATF_TTHD1,
+ ATF_TTHD2,
+ ATF_TTHD3,
+
+ /*
+ * ATF_STHD0_H264
+ * Address: 0x0050 Access type: read and write
+ * (CME) SAD threshold configuration1 for H.264 anti-flicker.
+ */
+ /* (CME) SAD threshold0 of texture interval1 for H.264 anti-flicker. */
+ ATF_STHD_10,
+ /* Max (CME) SAD threshold for H.264 anti-flicker. */
+ ATF_STHD_MAX,
+
+ /*
+ * ATF_STHD1_H264
+ * Address: 0x0054 Access type: read and write
+ * (CME) SAD threshold configuration1 for H.264 anti-flicker.
+ */
+ /* (CME) SAD threshold1 of texture interval1 for H.264 anti-flicker. */
+ ATF_STHD_11,
+ /* (CME) SAD threshold0 of texture interval2 for H.264 anti-flicker. */
+ ATF_STHD_20,
+
+ /*
+ * ATF_WGT0_H264
+ * Address: 0x0058 Access type: read and write
+ * Weight configuration0 for H.264 anti-flicker.
+ */
+ /* The 1st weight in texture interval1 for H.264 anti-flicker. */
+ ATF_WGT10,
+ /* The 2nd weight in texture interval1 for H.264 anti-flicker. */
+ ATF_WGT11,
+
+ /*
+ * ATF_WGT1_H264
+ * Address: 0x005C Access type: read and write
+ * Weight configuration1 for H.264 anti-flicker.
+ */
+ /* The 3rd weight in texture interval1 for H.264 anti-flicker. */
+ ATF_WGT12,
+ /* The 1st weight in texture interval2 for H.264 anti-flicker. */
+ ATF_WGT20,
+
+ /*
+ * ATF_WGT2_H264
+ * Address: 0x0060 Access type: read and write
+ * Weight configuration2 for H.264 anti-flicker.
+ */
+ /* The 2nd weight in texture interval2 for H.264 anti-flicker. */
+ ATF_WGT21,
+ /* The weight in texture interval3 for H.264 anti-flicker. */
+ ATF_WGT30,
+
+ /*
+ * ATF_OFST0_H264
+ * Address: 0x0064 Access type: read and write
+ * Offset configuration0 for H.264 anti-flicker.
+ */
+ /* The 1st offset in texture interval1 for H.264 anti-flicker. */
+ ATF_OFST10,
+ /* The 2nd offset in texture interval1 for H.264 anti-flicker. */
+ ATF_OFST11,
+
+ /*
+ * ATF_OFST1_H264
+ * Address: 0x0068 Access type: read and write
+ * Offset configuration1 for H.264 anti-flicker.
+ */
+ /* The 3rd offset in texture interval1 for H.264 anti-flicker. */
+ ATF_OFST12,
+ /* The 1st offset in texture interval2 for H.264 anti-flicker. */
+ ATF_OFST20,
+
+ /*
+ * ATF_OFST2_H264
+ * Address: 0x006C Access type: read and write
+ * Offset configuration2 for H.264 anti-flicker.
+ */
+ /* The 2nd offset in texture interval1 for H.264 anti-flicker. */
+ ATF_OFST21,
+ /* The offset in texture interval3 for H.264 anti-flicker. */
+ ATF_OFST30,
+
+ /*
+ * IPRD_WGT_QP0_HEVC ~ IPRD_WGT_QP51_HEVC
+ * Address: 0x0070 ~ 0x013C Access type: read and write
+ * Weight of SATD cost when QP is 0~51 for HEVC intra prediction.
+ */
+ IPRD_WGT_QP0,
+ IPRD_WGT_QP1,
+ IPRD_WGT_QP2,
+ IPRD_WGT_QP3,
+ IPRD_WGT_QP4,
+ IPRD_WGT_QP5,
+ IPRD_WGT_QP6,
+ IPRD_WGT_QP7,
+ IPRD_WGT_QP8,
+ IPRD_WGT_QP9,
+ IPRD_WGT_QP10,
+ IPRD_WGT_QP11,
+ IPRD_WGT_QP12,
+ IPRD_WGT_QP13,
+ IPRD_WGT_QP14,
+ IPRD_WGT_QP15,
+ IPRD_WGT_QP16,
+ IPRD_WGT_QP17,
+ IPRD_WGT_QP18,
+ IPRD_WGT_QP19,
+ IPRD_WGT_QP20,
+ IPRD_WGT_QP21,
+ IPRD_WGT_QP22,
+ IPRD_WGT_QP23,
+ IPRD_WGT_QP24,
+ IPRD_WGT_QP25,
+ IPRD_WGT_QP26,
+ IPRD_WGT_QP27,
+ IPRD_WGT_QP28,
+ IPRD_WGT_QP29,
+ IPRD_WGT_QP30,
+ IPRD_WGT_QP31,
+ IPRD_WGT_QP32,
+ IPRD_WGT_QP33,
+ IPRD_WGT_QP34,
+ IPRD_WGT_QP35,
+ IPRD_WGT_QP36,
+ IPRD_WGT_QP37,
+ IPRD_WGT_QP38,
+ IPRD_WGT_QP39,
+ IPRD_WGT_QP40,
+ IPRD_WGT_QP41,
+ IPRD_WGT_QP42,
+ IPRD_WGT_QP43,
+ IPRD_WGT_QP44,
+ IPRD_WGT_QP45,
+ IPRD_WGT_QP46,
+ IPRD_WGT_QP47,
+ IPRD_WGT_QP48,
+ IPRD_WGT_QP49,
+ IPRD_WGT_QP50,
+ IPRD_WGT_QP51,
+
+ /*
+ * RDO_WGTA_QP0_COMB ~ RDO_WGTA_QP51_COMB
+ * Address: 0x0140 ~ 0x020C Access type: read and write
+ * Weight of group A for HEVC and H.264 RDO mode decision when QP is 0~51.
+ */
+ WGT_QP_GRPA0,
+ WGT_QP_GRPA1,
+ WGT_QP_GRPA2,
+ WGT_QP_GRPA3,
+ WGT_QP_GRPA4,
+ WGT_QP_GRPA5,
+ WGT_QP_GRPA6,
+ WGT_QP_GRPA7,
+ WGT_QP_GRPA8,
+ WGT_QP_GRPA9,
+ WGT_QP_GRPA10,
+ WGT_QP_GRPA11,
+ WGT_QP_GRPA12,
+ WGT_QP_GRPA13,
+ WGT_QP_GRPA14,
+ WGT_QP_GRPA15,
+ WGT_QP_GRPA16,
+ WGT_QP_GRPA17,
+ WGT_QP_GRPA18,
+ WGT_QP_GRPA19,
+ WGT_QP_GRPA20,
+ WGT_QP_GRPA21,
+ WGT_QP_GRPA22,
+ WGT_QP_GRPA23,
+ WGT_QP_GRPA24,
+ WGT_QP_GRPA25,
+ WGT_QP_GRPA26,
+ WGT_QP_GRPA27,
+ WGT_QP_GRPA28,
+ WGT_QP_GRPA29,
+ WGT_QP_GRPA30,
+ WGT_QP_GRPA31,
+ WGT_QP_GRPA32,
+ WGT_QP_GRPA33,
+ WGT_QP_GRPA34,
+ WGT_QP_GRPA35,
+ WGT_QP_GRPA36,
+ WGT_QP_GRPA37,
+ WGT_QP_GRPA38,
+ WGT_QP_GRPA39,
+ WGT_QP_GRPA40,
+ WGT_QP_GRPA41,
+ WGT_QP_GRPA42,
+ WGT_QP_GRPA43,
+ WGT_QP_GRPA44,
+ WGT_QP_GRPA45,
+ WGT_QP_GRPA46,
+ WGT_QP_GRPA47,
+ WGT_QP_GRPA48,
+ WGT_QP_GRPA49,
+ WGT_QP_GRPA50,
+ WGT_QP_GRPA51,
+
+ /*
+ * RDO_WGTB_QP0_COMB ~ RDO_WGTB_QP51_COMB
+ * Address: 0x0210 ~ 0x02DC Access type: read and write
+ * Weight of group B for HEVC and H.264 RDO mode decision when QP is 0~51.
+ */
+ WGT_QP_GRPB0,
+ WGT_QP_GRPB1,
+ WGT_QP_GRPB2,
+ WGT_QP_GRPB3,
+ WGT_QP_GRPB4,
+ WGT_QP_GRPB5,
+ WGT_QP_GRPB6,
+ WGT_QP_GRPB7,
+ WGT_QP_GRPB8,
+ WGT_QP_GRPB9,
+ WGT_QP_GRPB10,
+ WGT_QP_GRPB11,
+ WGT_QP_GRPB12,
+ WGT_QP_GRPB13,
+ WGT_QP_GRPB14,
+ WGT_QP_GRPB15,
+ WGT_QP_GRPB16,
+ WGT_QP_GRPB17,
+ WGT_QP_GRPB18,
+ WGT_QP_GRPB19,
+ WGT_QP_GRPB20,
+ WGT_QP_GRPB21,
+ WGT_QP_GRPB22,
+ WGT_QP_GRPB23,
+ WGT_QP_GRPB24,
+ WGT_QP_GRPB25,
+ WGT_QP_GRPB26,
+ WGT_QP_GRPB27,
+ WGT_QP_GRPB28,
+ WGT_QP_GRPB29,
+ WGT_QP_GRPB30,
+ WGT_QP_GRPB31,
+ WGT_QP_GRPB32,
+ WGT_QP_GRPB33,
+ WGT_QP_GRPB34,
+ WGT_QP_GRPB35,
+ WGT_QP_GRPB36,
+ WGT_QP_GRPB37,
+ WGT_QP_GRPB38,
+ WGT_QP_GRPB39,
+ WGT_QP_GRPB40,
+ WGT_QP_GRPB41,
+ WGT_QP_GRPB42,
+ WGT_QP_GRPB43,
+ WGT_QP_GRPB44,
+ WGT_QP_GRPB45,
+ WGT_QP_GRPB46,
+ WGT_QP_GRPB47,
+ WGT_QP_GRPB48,
+ WGT_QP_GRPB49,
+ WGT_QP_GRPB50,
+ WGT_QP_GRPB51,
+
+ /*
+ * MADI_CFG
+ * Address: 0x02E0 Access type: read and write
+ * MADI configuration for CU32 and CU64.
+ */
+ /*
+ * MADI generation mode for CU32 and CU64.
+ * 1'h0: Follow 32x32 and 64x64 MADI functions.
+ * 1'h1: Calculated by the mean of corresponding CU16 MADIs.
+ */
+ MADI_MODE,
+
+ /*
+ * AQ_TTHD0 ~ AQ_TTHD3
+ * Address: 0x02E4 ~ 0x02F0 Access type: read and write
+ * Texture threshold configuration for adaptive QP adjustment.
+ */
+ /* Texture threshold for adaptive QP adjustment. */
+ AQ_TTHD0,
+ AQ_TTHD1,
+ AQ_TTHD2,
+ AQ_TTHD3,
+ AQ_TTHD4,
+ AQ_TTHD5,
+ AQ_TTHD6,
+ AQ_TTHD7,
+ AQ_TTHD8,
+ AQ_TTHD9,
+ AQ_TTHD10,
+ AQ_TTHD11,
+ AQ_TTHD12,
+ AQ_TTHD13,
+ AQ_TTHD14,
+ AQ_TTHD15,
+
+ /*
+ * AQ_STP0 ~ AQ_STP3
+ * Address: 0x02F4 ~ 0x300 Access type: read and write
+ * Adjustment step configuration0 for adaptive QP adjustment.
+ */
+ /*
+ * MADI generation mode for CU32 and CU64.
+ * 1'h0: Follow 32x32 and 64x64 MADI functions.
+ * 1'h1: Calculated by the mean of corresponding CU16 MADIs.
+ */
+ /* QP adjust step when current texture strength is between n-1 and n step. */
+ AQ_STEP0,
+ AQ_STEP1,
+ AQ_STEP2,
+ AQ_STEP3,
+ AQ_STEP4,
+ AQ_STEP5,
+ AQ_STEP6,
+ AQ_STEP7,
+ AQ_STEP8,
+ AQ_STEP9,
+ AQ_STEP10,
+ AQ_STEP11,
+ AQ_STEP12,
+ AQ_STEP13,
+ AQ_STEP14,
+ AQ_STEP15,
+
+ /*
+ * RME_MVD_PNSH_H264
+ * Address: 0x0304 Access type: read and write
+ * RME MVD(motion vector difference) cost penalty, H.264 only.
+ */
+ /* MVD cost penalty enable. */
+ MVD_PNLT_E,
+ /* MVD cost penalty coefficienc. */
+ MVD_PNLT_COEF,
+ /* MVD cost penalty constant. */
+ MVD_PNLT_CNST,
+ /* Low threshold of the MVs which should be punished. */
+ MVD_PNLT_LTHD,
+ /* High threshold of the MVs which should be punished. */
+ MVD_PNLT_HTHD,
+
+ /*
+ * ATR1_THD0_H264
+ * Address: 0x0308 Access type: read and write
+ * H.264 anti ringing noise threshold configuration0 of group1.
+ */
+ /* The 1st threshold for H.264 anti-ringing-noise of group1. */
+ ATR1_THD0,
+ /* The 2nd threshold for H.264 anti-ringing-noise of group1. */
+ ATR1_THD1,
+
+ /*
+ * ATR1_THD0_H264
+ * Address: 0x030C Access type: read and write
+ * H.264 anti ringing noise threshold configuration1 of group1.
+ */
+ /* The 3rd threshold for H.264 anti-ringing-noise of group1. */
+ ATR1_THD2,
+
+ /* sentinel */
+ L2_MAX_FIELDS
+};
+
+static const struct reg_field rkvenc_vepu540_l2_fields[] = {
+ [IPRD_TTHDY4_0] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_TTHDY4_(0), 0, 11),
+ [IPRD_TTHDY4_1] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_TTHDY4_(0), 16, 27),
+ [IPRD_TTHDY4_2] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_TTHDY4_(1), 0, 11),
+ [IPRD_TTHDY4_3] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_TTHDY4_(1), 16, 27),
+
+ [IPRD_TTHDC8_0] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_TTHDC8_(0), 0, 11),
+ [IPRD_TTHDC8_1] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_TTHDC8_(0), 16, 27),
+ [IPRD_TTHDC8_2] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_TTHDC8_(1), 0, 11),
+ [IPRD_TTHDC8_3] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_TTHDC8_(1), 16, 27),
+
+ [IPRD_TTHDY8_0] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_TTHDY8_(0), 0, 11),
+ [IPRD_TTHDY8_1] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_TTHDY8_(0), 16, 27),
+ [IPRD_TTHDY8_2] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_TTHDY8_(1), 0, 11),
+ [IPRD_TTHDY8_3] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_TTHDY8_(1), 16, 27),
+
+ [IPRD_TTHD_UL] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_TTHD_UL, 0, 11),
+
+ [IPRD_WGTY8_0] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_WGTY8, 0, 7),
+ [IPRD_WGTY8_1] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_WGTY8, 8, 15),
+ [IPRD_WGTY8_2] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_WGTY8, 16, 23),
+ [IPRD_WGTY8_3] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_WGTY8, 24, 31),
+
+ [IPRD_WGTY4_0] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_WGTY4, 0, 7),
+ [IPRD_WGTY4_1] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_WGTY4, 8, 15),
+ [IPRD_WGTY4_2] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_WGTY4, 16, 23),
+ [IPRD_WGTY4_3] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_WGTY4, 24, 31),
+
+ [IPRD_WGTY16_0] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_WGTY16, 0, 7),
+ [IPRD_WGTY16_1] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_WGTY16, 8, 15),
+ [IPRD_WGTY16_2] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_WGTY16, 16, 23),
+ [IPRD_WGTY16_3] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_WGTY16, 24, 31),
+
+ [IPRD_WGTC8_0] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_WGTC8, 0, 7),
+ [IPRD_WGTC8_1] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_WGTC8, 8, 15),
+ [IPRD_WGTC8_2] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_WGTC8, 16, 23),
+ [IPRD_WGTC8_3] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_WGTC8, 24, 31),
+
+ [QNT_BIAS_I] = REG_FIELD(RKVENC_VEPU540_L2_QNT_BIAS_COMB, 0, 9),
+ [QNT_BIAS_P] = REG_FIELD(RKVENC_VEPU540_L2_QNT_BIAS_COMB, 10, 19),
+
+ [ATR_THD0] = REG_FIELD(RKVENC_VEPU540_L2_ATR_THD(0), 0, 11),
+ [ATR_THD1] = REG_FIELD(RKVENC_VEPU540_L2_ATR_THD(0), 16, 27),
+
+ [ATR_THD2] = REG_FIELD(RKVENC_VEPU540_L2_ATR_THD(1), 0, 11),
+ [ATR_QP] = REG_FIELD(RKVENC_VEPU540_L2_ATR_THD(1), 16, 21),
+
+ [ATR_LV16_WGT0] = REG_FIELD(RKVENC_VEPU540_L2_ATR_WGT16, 0, 7),
+ [ATR_LV16_WGT1] = REG_FIELD(RKVENC_VEPU540_L2_ATR_WGT16, 8, 15),
+ [ATR_LV16_WGT2] = REG_FIELD(RKVENC_VEPU540_L2_ATR_WGT16, 16, 23),
+
+ [ATR_LV8_WGT0] = REG_FIELD(RKVENC_VEPU540_L2_ATR_WGT8, 0, 7),
+ [ATR_LV8_WGT1] = REG_FIELD(RKVENC_VEPU540_L2_ATR_WGT8, 8, 15),
+ [ATR_LV8_WGT2] = REG_FIELD(RKVENC_VEPU540_L2_ATR_WGT8, 16, 23),
+
+ [ATR_LV4_WGT0] = REG_FIELD(RKVENC_VEPU540_L2_ATR_WGT4, 0, 7),
+ [ATR_LV4_WGT1] = REG_FIELD(RKVENC_VEPU540_L2_ATR_WGT4, 8, 15),
+ [ATR_LV4_WGT2] = REG_FIELD(RKVENC_VEPU540_L2_ATR_WGT4, 16, 23),
+
+ [ATF_TTHD0] = REG_FIELD(RKVENC_VEPU540_L2_ATF_TTHD(0), 0, 11),
+ [ATF_TTHD1] = REG_FIELD(RKVENC_VEPU540_L2_ATF_TTHD(0), 16, 27),
+ [ATF_TTHD2] = REG_FIELD(RKVENC_VEPU540_L2_ATF_TTHD(1), 0, 11),
+ [ATF_TTHD3] = REG_FIELD(RKVENC_VEPU540_L2_ATF_TTHD(1), 16, 27),
+
+ [ATF_STHD_10] = REG_FIELD(RKVENC_VEPU540_L2_ATF_STHD(0), 0, 13),
+ [ATF_STHD_MAX] = REG_FIELD(RKVENC_VEPU540_L2_ATF_STHD(0), 16, 29),
+
+ [ATF_STHD_11] = REG_FIELD(RKVENC_VEPU540_L2_ATF_STHD(1), 0, 13),
+ [ATF_STHD_20] = REG_FIELD(RKVENC_VEPU540_L2_ATF_STHD(1), 16, 29),
+
+ [ATF_WGT10] = REG_FIELD(RKVENC_VEPU540_L2_ATF_WGT(0), 0, 8),
+ [ATF_WGT11] = REG_FIELD(RKVENC_VEPU540_L2_ATF_WGT(0), 16, 24),
+
+ [ATF_WGT12] = REG_FIELD(RKVENC_VEPU540_L2_ATF_WGT(1), 0, 8),
+ [ATF_WGT20] = REG_FIELD(RKVENC_VEPU540_L2_ATF_WGT(1), 16, 24),
+
+ [ATF_WGT21] = REG_FIELD(RKVENC_VEPU540_L2_ATF_WGT(2), 0, 8),
+ [ATF_WGT30] = REG_FIELD(RKVENC_VEPU540_L2_ATF_WGT(2), 16, 24),
+
+ [ATF_OFST10] = REG_FIELD(RKVENC_VEPU540_L2_ATF_OFST(0), 0, 13),
+ [ATF_OFST11] = REG_FIELD(RKVENC_VEPU540_L2_ATF_OFST(0), 16, 29),
+
+ [ATF_OFST12] = REG_FIELD(RKVENC_VEPU540_L2_ATF_OFST(1), 0, 13),
+ [ATF_OFST20] = REG_FIELD(RKVENC_VEPU540_L2_ATF_OFST(1), 16, 29),
+
+ [ATF_OFST21] = REG_FIELD(RKVENC_VEPU540_L2_ATF_OFST(2), 0, 13),
+ [ATF_OFST30] = REG_FIELD(RKVENC_VEPU540_L2_ATF_OFST(2), 16, 29),
+
+ [IPRD_WGT_QP0] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_WGT_QP(0), 0, 19),
+ [IPRD_WGT_QP1] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_WGT_QP(1), 0, 19),
+ [IPRD_WGT_QP2] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_WGT_QP(2), 0, 19),
+ [IPRD_WGT_QP3] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_WGT_QP(3), 0, 19),
+ [IPRD_WGT_QP4] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_WGT_QP(4), 0, 19),
+ [IPRD_WGT_QP5] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_WGT_QP(5), 0, 19),
+ [IPRD_WGT_QP6] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_WGT_QP(6), 0, 19),
+ [IPRD_WGT_QP7] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_WGT_QP(7), 0, 19),
+ [IPRD_WGT_QP8] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_WGT_QP(8), 0, 19),
+ [IPRD_WGT_QP9] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_WGT_QP(9), 0, 19),
+ [IPRD_WGT_QP10] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_WGT_QP(10), 0, 19),
+ [IPRD_WGT_QP11] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_WGT_QP(11), 0, 19),
+ [IPRD_WGT_QP12] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_WGT_QP(12), 0, 19),
+ [IPRD_WGT_QP13] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_WGT_QP(13), 0, 19),
+ [IPRD_WGT_QP14] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_WGT_QP(14), 0, 19),
+ [IPRD_WGT_QP15] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_WGT_QP(15), 0, 19),
+ [IPRD_WGT_QP16] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_WGT_QP(16), 0, 19),
+ [IPRD_WGT_QP17] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_WGT_QP(17), 0, 19),
+ [IPRD_WGT_QP18] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_WGT_QP(18), 0, 19),
+ [IPRD_WGT_QP19] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_WGT_QP(19), 0, 19),
+ [IPRD_WGT_QP20] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_WGT_QP(20), 0, 19),
+ [IPRD_WGT_QP21] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_WGT_QP(21), 0, 19),
+ [IPRD_WGT_QP22] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_WGT_QP(22), 0, 19),
+ [IPRD_WGT_QP23] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_WGT_QP(23), 0, 19),
+ [IPRD_WGT_QP24] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_WGT_QP(24), 0, 19),
+ [IPRD_WGT_QP25] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_WGT_QP(25), 0, 19),
+ [IPRD_WGT_QP26] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_WGT_QP(26), 0, 19),
+ [IPRD_WGT_QP27] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_WGT_QP(27), 0, 19),
+ [IPRD_WGT_QP28] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_WGT_QP(28), 0, 19),
+ [IPRD_WGT_QP29] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_WGT_QP(29), 0, 19),
+ [IPRD_WGT_QP30] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_WGT_QP(30), 0, 19),
+ [IPRD_WGT_QP31] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_WGT_QP(31), 0, 19),
+ [IPRD_WGT_QP32] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_WGT_QP(32), 0, 19),
+ [IPRD_WGT_QP33] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_WGT_QP(33), 0, 19),
+ [IPRD_WGT_QP34] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_WGT_QP(34), 0, 19),
+ [IPRD_WGT_QP35] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_WGT_QP(35), 0, 19),
+ [IPRD_WGT_QP36] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_WGT_QP(36), 0, 19),
+ [IPRD_WGT_QP37] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_WGT_QP(37), 0, 19),
+ [IPRD_WGT_QP38] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_WGT_QP(38), 0, 19),
+ [IPRD_WGT_QP39] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_WGT_QP(39), 0, 19),
+ [IPRD_WGT_QP40] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_WGT_QP(40), 0, 19),
+ [IPRD_WGT_QP41] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_WGT_QP(41), 0, 19),
+ [IPRD_WGT_QP42] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_WGT_QP(42), 0, 19),
+ [IPRD_WGT_QP43] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_WGT_QP(43), 0, 19),
+ [IPRD_WGT_QP44] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_WGT_QP(44), 0, 19),
+ [IPRD_WGT_QP45] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_WGT_QP(45), 0, 19),
+ [IPRD_WGT_QP46] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_WGT_QP(46), 0, 19),
+ [IPRD_WGT_QP47] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_WGT_QP(47), 0, 19),
+ [IPRD_WGT_QP48] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_WGT_QP(48), 0, 19),
+ [IPRD_WGT_QP49] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_WGT_QP(49), 0, 19),
+ [IPRD_WGT_QP50] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_WGT_QP(50), 0, 19),
+ [IPRD_WGT_QP51] = REG_FIELD(RKVENC_VEPU540_L2_IPRD_WGT_QP(51), 0, 19),
+
+ [WGT_QP_GRPA0] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTA_QP(0), 0, 23),
+ [WGT_QP_GRPA1] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTA_QP(1), 0, 23),
+ [WGT_QP_GRPA2] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTA_QP(2), 0, 23),
+ [WGT_QP_GRPA3] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTA_QP(3), 0, 23),
+ [WGT_QP_GRPA4] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTA_QP(4), 0, 23),
+ [WGT_QP_GRPA5] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTA_QP(5), 0, 23),
+ [WGT_QP_GRPA6] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTA_QP(6), 0, 23),
+ [WGT_QP_GRPA7] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTA_QP(7), 0, 23),
+ [WGT_QP_GRPA8] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTA_QP(8), 0, 23),
+ [WGT_QP_GRPA9] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTA_QP(9), 0, 23),
+ [WGT_QP_GRPA10] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTA_QP(10), 0, 23),
+ [WGT_QP_GRPA11] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTA_QP(11), 0, 23),
+ [WGT_QP_GRPA12] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTA_QP(12), 0, 23),
+ [WGT_QP_GRPA13] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTA_QP(13), 0, 23),
+ [WGT_QP_GRPA14] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTA_QP(14), 0, 23),
+ [WGT_QP_GRPA15] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTA_QP(15), 0, 23),
+ [WGT_QP_GRPA16] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTA_QP(16), 0, 23),
+ [WGT_QP_GRPA17] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTA_QP(17), 0, 23),
+ [WGT_QP_GRPA18] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTA_QP(18), 0, 23),
+ [WGT_QP_GRPA19] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTA_QP(19), 0, 23),
+ [WGT_QP_GRPA20] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTA_QP(20), 0, 23),
+ [WGT_QP_GRPA21] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTA_QP(21), 0, 23),
+ [WGT_QP_GRPA22] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTA_QP(22), 0, 23),
+ [WGT_QP_GRPA23] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTA_QP(23), 0, 23),
+ [WGT_QP_GRPA24] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTA_QP(24), 0, 23),
+ [WGT_QP_GRPA25] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTA_QP(25), 0, 23),
+ [WGT_QP_GRPA26] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTA_QP(26), 0, 23),
+ [WGT_QP_GRPA27] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTA_QP(27), 0, 23),
+ [WGT_QP_GRPA28] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTA_QP(28), 0, 23),
+ [WGT_QP_GRPA29] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTA_QP(29), 0, 23),
+ [WGT_QP_GRPA30] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTA_QP(30), 0, 23),
+ [WGT_QP_GRPA31] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTA_QP(31), 0, 23),
+ [WGT_QP_GRPA32] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTA_QP(32), 0, 23),
+ [WGT_QP_GRPA33] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTA_QP(33), 0, 23),
+ [WGT_QP_GRPA34] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTA_QP(34), 0, 23),
+ [WGT_QP_GRPA35] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTA_QP(35), 0, 23),
+ [WGT_QP_GRPA36] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTA_QP(36), 0, 23),
+ [WGT_QP_GRPA37] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTA_QP(37), 0, 23),
+ [WGT_QP_GRPA38] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTA_QP(38), 0, 23),
+ [WGT_QP_GRPA39] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTA_QP(39), 0, 23),
+ [WGT_QP_GRPA40] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTA_QP(40), 0, 23),
+ [WGT_QP_GRPA41] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTA_QP(41), 0, 23),
+ [WGT_QP_GRPA42] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTA_QP(42), 0, 23),
+ [WGT_QP_GRPA43] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTA_QP(43), 0, 23),
+ [WGT_QP_GRPA44] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTA_QP(44), 0, 23),
+ [WGT_QP_GRPA45] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTA_QP(45), 0, 23),
+ [WGT_QP_GRPA46] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTA_QP(46), 0, 23),
+ [WGT_QP_GRPA47] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTA_QP(47), 0, 23),
+ [WGT_QP_GRPA48] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTA_QP(48), 0, 23),
+ [WGT_QP_GRPA49] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTA_QP(49), 0, 23),
+ [WGT_QP_GRPA50] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTA_QP(50), 0, 23),
+ [WGT_QP_GRPA51] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTA_QP(51), 0, 23),
+
+ [WGT_QP_GRPB0] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTB_QP(0), 0, 23),
+ [WGT_QP_GRPB1] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTB_QP(1), 0, 23),
+ [WGT_QP_GRPB2] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTB_QP(2), 0, 23),
+ [WGT_QP_GRPB3] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTB_QP(3), 0, 23),
+ [WGT_QP_GRPB4] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTB_QP(4), 0, 23),
+ [WGT_QP_GRPB5] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTB_QP(5), 0, 23),
+ [WGT_QP_GRPB6] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTB_QP(6), 0, 23),
+ [WGT_QP_GRPB7] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTB_QP(7), 0, 23),
+ [WGT_QP_GRPB8] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTB_QP(8), 0, 23),
+ [WGT_QP_GRPB9] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTB_QP(9), 0, 23),
+ [WGT_QP_GRPB10] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTB_QP(10), 0, 23),
+ [WGT_QP_GRPB11] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTB_QP(11), 0, 23),
+ [WGT_QP_GRPB12] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTB_QP(12), 0, 23),
+ [WGT_QP_GRPB13] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTB_QP(13), 0, 23),
+ [WGT_QP_GRPB14] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTB_QP(14), 0, 23),
+ [WGT_QP_GRPB15] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTB_QP(15), 0, 23),
+ [WGT_QP_GRPB16] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTB_QP(16), 0, 23),
+ [WGT_QP_GRPB17] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTB_QP(17), 0, 23),
+ [WGT_QP_GRPB18] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTB_QP(18), 0, 23),
+ [WGT_QP_GRPB19] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTB_QP(19), 0, 23),
+ [WGT_QP_GRPB20] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTB_QP(20), 0, 23),
+ [WGT_QP_GRPB21] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTB_QP(21), 0, 23),
+ [WGT_QP_GRPB22] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTB_QP(22), 0, 23),
+ [WGT_QP_GRPB23] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTB_QP(23), 0, 23),
+ [WGT_QP_GRPB24] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTB_QP(24), 0, 23),
+ [WGT_QP_GRPB25] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTB_QP(25), 0, 23),
+ [WGT_QP_GRPB26] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTB_QP(26), 0, 23),
+ [WGT_QP_GRPB27] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTB_QP(27), 0, 23),
+ [WGT_QP_GRPB28] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTB_QP(28), 0, 23),
+ [WGT_QP_GRPB29] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTB_QP(29), 0, 23),
+ [WGT_QP_GRPB30] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTB_QP(30), 0, 23),
+ [WGT_QP_GRPB31] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTB_QP(31), 0, 23),
+ [WGT_QP_GRPB32] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTB_QP(32), 0, 23),
+ [WGT_QP_GRPB33] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTB_QP(33), 0, 23),
+ [WGT_QP_GRPB34] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTB_QP(34), 0, 23),
+ [WGT_QP_GRPB35] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTB_QP(35), 0, 23),
+ [WGT_QP_GRPB36] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTB_QP(36), 0, 23),
+ [WGT_QP_GRPB37] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTB_QP(37), 0, 23),
+ [WGT_QP_GRPB38] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTB_QP(38), 0, 23),
+ [WGT_QP_GRPB39] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTB_QP(39), 0, 23),
+ [WGT_QP_GRPB40] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTB_QP(40), 0, 23),
+ [WGT_QP_GRPB41] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTB_QP(41), 0, 23),
+ [WGT_QP_GRPB42] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTB_QP(42), 0, 23),
+ [WGT_QP_GRPB43] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTB_QP(43), 0, 23),
+ [WGT_QP_GRPB44] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTB_QP(44), 0, 23),
+ [WGT_QP_GRPB45] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTB_QP(45), 0, 23),
+ [WGT_QP_GRPB46] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTB_QP(46), 0, 23),
+ [WGT_QP_GRPB47] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTB_QP(47), 0, 23),
+ [WGT_QP_GRPB48] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTB_QP(48), 0, 23),
+ [WGT_QP_GRPB49] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTB_QP(49), 0, 23),
+ [WGT_QP_GRPB50] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTB_QP(50), 0, 23),
+ [WGT_QP_GRPB51] = REG_FIELD(RKVENC_VEPU540_L2_RDO_WGTB_QP(51), 0, 23),
+
+ [MADI_MODE] = REG_FIELD(RKVENC_VEPU540_L2_MADI_CFG, 0, 0),
+
+ [AQ_TTHD0] = REG_FIELD(RKVENC_VEPU540_L2_AQ_TTHD(0), 0, 7),
+ [AQ_TTHD1] = REG_FIELD(RKVENC_VEPU540_L2_AQ_TTHD(0), 8, 15),
+ [AQ_TTHD2] = REG_FIELD(RKVENC_VEPU540_L2_AQ_TTHD(0), 16, 23),
+ [AQ_TTHD3] = REG_FIELD(RKVENC_VEPU540_L2_AQ_TTHD(0), 24, 31),
+ [AQ_TTHD4] = REG_FIELD(RKVENC_VEPU540_L2_AQ_TTHD(1), 0, 7),
+ [AQ_TTHD5] = REG_FIELD(RKVENC_VEPU540_L2_AQ_TTHD(1), 8, 15),
+ [AQ_TTHD6] = REG_FIELD(RKVENC_VEPU540_L2_AQ_TTHD(1), 16, 23),
+ [AQ_TTHD7] = REG_FIELD(RKVENC_VEPU540_L2_AQ_TTHD(1), 24, 31),
+ [AQ_TTHD8] = REG_FIELD(RKVENC_VEPU540_L2_AQ_TTHD(2), 0, 7),
+ [AQ_TTHD9] = REG_FIELD(RKVENC_VEPU540_L2_AQ_TTHD(2), 8, 15),
+ [AQ_TTHD10] = REG_FIELD(RKVENC_VEPU540_L2_AQ_TTHD(2), 16, 23),
+ [AQ_TTHD11] = REG_FIELD(RKVENC_VEPU540_L2_AQ_TTHD(2), 24, 31),
+ [AQ_TTHD12] = REG_FIELD(RKVENC_VEPU540_L2_AQ_TTHD(3), 0, 7),
+ [AQ_TTHD13] = REG_FIELD(RKVENC_VEPU540_L2_AQ_TTHD(3), 8, 15),
+ [AQ_TTHD14] = REG_FIELD(RKVENC_VEPU540_L2_AQ_TTHD(3), 16, 23),
+ [AQ_TTHD15] = REG_FIELD(RKVENC_VEPU540_L2_AQ_TTHD(3), 24, 31),
+
+ [AQ_STEP0] = REG_FIELD(RKVENC_VEPU540_L2_AQ_STP(0), 0, 5),
+ [AQ_STEP1] = REG_FIELD(RKVENC_VEPU540_L2_AQ_STP(0), 8, 13),
+ [AQ_STEP2] = REG_FIELD(RKVENC_VEPU540_L2_AQ_STP(0), 16, 21),
+ [AQ_STEP3] = REG_FIELD(RKVENC_VEPU540_L2_AQ_STP(0), 24, 29),
+ [AQ_STEP4] = REG_FIELD(RKVENC_VEPU540_L2_AQ_STP(1), 0, 5),
+ [AQ_STEP5] = REG_FIELD(RKVENC_VEPU540_L2_AQ_STP(1), 8, 13),
+ [AQ_STEP6] = REG_FIELD(RKVENC_VEPU540_L2_AQ_STP(1), 16, 21),
+ [AQ_STEP7] = REG_FIELD(RKVENC_VEPU540_L2_AQ_STP(1), 24, 29),
+ [AQ_STEP8] = REG_FIELD(RKVENC_VEPU540_L2_AQ_STP(2), 0, 5),
+ [AQ_STEP9] = REG_FIELD(RKVENC_VEPU540_L2_AQ_STP(2), 8, 13),
+ [AQ_STEP10] = REG_FIELD(RKVENC_VEPU540_L2_AQ_STP(2), 16, 21),
+ [AQ_STEP11] = REG_FIELD(RKVENC_VEPU540_L2_AQ_STP(2), 24, 29),
+ [AQ_STEP12] = REG_FIELD(RKVENC_VEPU540_L2_AQ_STP(3), 0, 5),
+ [AQ_STEP13] = REG_FIELD(RKVENC_VEPU540_L2_AQ_STP(3), 8, 13),
+ [AQ_STEP14] = REG_FIELD(RKVENC_VEPU540_L2_AQ_STP(3), 16, 21),
+ [AQ_STEP15] = REG_FIELD(RKVENC_VEPU540_L2_AQ_STP(3), 24, 29),
+
+ [MVD_PNLT_E] = REG_FIELD(RKVENC_VEPU540_L2_RME_MVD_PNSH, 0, 0),
+ [MVD_PNLT_COEF] = REG_FIELD(RKVENC_VEPU540_L2_RME_MVD_PNSH, 1, 5),
+ [MVD_PNLT_CNST] = REG_FIELD(RKVENC_VEPU540_L2_RME_MVD_PNSH, 6, 19),
+ [MVD_PNLT_LTHD] = REG_FIELD(RKVENC_VEPU540_L2_RME_MVD_PNSH, 20, 23),
+ [MVD_PNLT_HTHD] = REG_FIELD(RKVENC_VEPU540_L2_RME_MVD_PNSH, 24, 27),
+
+ [ATR1_THD0] = REG_FIELD(RKVENC_VEPU540_L2_ATR1_THD(0), 0, 11),
+ [ATR1_THD1] = REG_FIELD(RKVENC_VEPU540_L2_ATR1_THD(0), 16, 27),
+
+ [ATR1_THD2] = REG_FIELD(RKVENC_VEPU540_L2_ATR1_THD(1), 0, 11),
+};
+
+#endif /* RKVENC_VEPU540_REGS_H_ */
diff --git a/drivers/staging/media/rkvdec/rkvpu.c b/drivers/staging/media/rkvdec/rkvpu.c
index a525289e4245f8..d4e64e569f0665 100644
--- a/drivers/staging/media/rkvdec/rkvpu.c
+++ b/drivers/staging/media/rkvdec/rkvpu.c
@@ -41,33 +41,63 @@ static const struct v4l2_ctrl_ops rkvpu_ctrl_ops = {
.try_ctrl = rkvpu_try_ctrl,
};
-static const struct rkvpu_ctrl_desc rkvdec_h264_ctrl_descs[] = {
+static const struct rkvpu_ctrl controls[] = {
{
+ .codec = RKVPU_H264_ENCODER,
+ .cfg.id = V4L2_CID_STATELESS_H264_ENCODE_PARAMS,
+ },
+ {
+ .codec = RKVPU_H264_ENCODER,
+ .cfg.id = V4L2_CID_STATELESS_H264_ENCODE_RC,
+ },
+ {
+ .codec = RKVPU_H264_ENCODER,
+ .cfg.id = V4L2_CID_STATELESS_H264_SPS,
+ .cfg.ops = &rkvpu_ctrl_ops,
+ },
+ {
+ .codec = RKVPU_H264_ENCODER,
+ .cfg.id = V4L2_CID_STATELESS_H264_PPS,
+ },
+ {
+ .codec = RKVPU_H264_ENCODER,
+ .cfg.id = V4L2_CID_STATELESS_H264_ENCODE_FEEDBACK,
+ /* XXX: Maybe put the flag back and see. */
+// .flags = V4L2_CTRL_FLAG_READ_ONLY,
+ },
+ {
+ .codec = RKVPU_H264_DECODER,
.cfg.id = V4L2_CID_STATELESS_H264_DECODE_PARAMS,
},
{
+ .codec = RKVPU_H264_DECODER,
.cfg.id = V4L2_CID_STATELESS_H264_SPS,
.cfg.ops = &rkvpu_ctrl_ops,
},
{
+ .codec = RKVPU_H264_DECODER,
.cfg.id = V4L2_CID_STATELESS_H264_PPS,
},
{
+ .codec = RKVPU_H264_DECODER,
.cfg.id = V4L2_CID_STATELESS_H264_SCALING_MATRIX,
},
{
+ .codec = RKVPU_H264_DECODER,
.cfg.id = V4L2_CID_STATELESS_H264_DECODE_MODE,
.cfg.min = V4L2_STATELESS_H264_DECODE_MODE_FRAME_BASED,
.cfg.max = V4L2_STATELESS_H264_DECODE_MODE_FRAME_BASED,
.cfg.def = V4L2_STATELESS_H264_DECODE_MODE_FRAME_BASED,
},
{
+ .codec = RKVPU_H264_DECODER,
.cfg.id = V4L2_CID_STATELESS_H264_START_CODE,
.cfg.min = V4L2_STATELESS_H264_START_CODE_ANNEX_B,
.cfg.def = V4L2_STATELESS_H264_START_CODE_ANNEX_B,
.cfg.max = V4L2_STATELESS_H264_START_CODE_ANNEX_B,
},
{
+ .codec = RKVPU_H264_DECODER,
.cfg.id = V4L2_CID_MPEG_VIDEO_H264_PROFILE,
.cfg.min = V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE,
.cfg.max = V4L2_MPEG_VIDEO_H264_PROFILE_HIGH,
@@ -76,29 +106,21 @@ static const struct rkvpu_ctrl_desc rkvdec_h264_ctrl_descs[] = {
.cfg.def = V4L2_MPEG_VIDEO_H264_PROFILE_MAIN,
},
{
+ .codec = RKVPU_H264_DECODER,
.cfg.id = V4L2_CID_MPEG_VIDEO_H264_LEVEL,
.cfg.min = V4L2_MPEG_VIDEO_H264_LEVEL_1_0,
.cfg.max = V4L2_MPEG_VIDEO_H264_LEVEL_5_1,
},
-};
-
-static const struct rkvpu_ctrls rkvdec_h264_ctrls = {
- .ctrls = rkvdec_h264_ctrl_descs,
- .num_ctrls = ARRAY_SIZE(rkvdec_h264_ctrl_descs),
-};
-
-static const u32 rkvpu_h264_vp9_dst_fmts[] = {
- V4L2_PIX_FMT_NV12,
-};
-
-static const struct rkvpu_ctrl_desc rkvdec_vp9_ctrl_descs[] = {
{
+ .codec = RKVPU_VP9_DECODER,
.cfg.id = V4L2_CID_STATELESS_VP9_FRAME,
},
{
+ .codec = RKVPU_VP9_DECODER,
.cfg.id = V4L2_CID_STATELESS_VP9_COMPRESSED_HDR,
},
{
+ .codec = RKVPU_VP9_DECODER,
.cfg.id = V4L2_CID_MPEG_VIDEO_VP9_PROFILE,
.cfg.min = V4L2_MPEG_VIDEO_VP9_PROFILE_0,
.cfg.max = V4L2_MPEG_VIDEO_VP9_PROFILE_0,
@@ -106,12 +128,13 @@ static const struct rkvpu_ctrl_desc rkvdec_vp9_ctrl_descs[] = {
},
};
-static const struct rkvpu_ctrls rkvdec_vp9_ctrls = {
- .ctrls = rkvdec_vp9_ctrl_descs,
- .num_ctrls = ARRAY_SIZE(rkvdec_vp9_ctrl_descs),
+static const struct rkvpu_hw_fmt rkvpu_h264_vp9_hw_dst_fmts[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_NV12,
+ },
};
-static const struct rkvpu_fmt_desc rkvpu_fmts[] = {
+static const struct rkvpu_fmt_desc rkvpu_dec_fmts[] = {
{
.fourcc = V4L2_PIX_FMT_H264_SLICE,
.frmsize = {
@@ -122,11 +145,11 @@ static const struct rkvpu_fmt_desc rkvpu_fmts[] = {
.max_height = 2304,
.step_height = 16,
},
- .ctrls = &rkvdec_h264_ctrls,
.ops = &rkvdec_h264_fmt_ops,
- .num_dst_fmts = ARRAY_SIZE(rkvpu_h264_vp9_dst_fmts),
- .dst_fmts = rkvpu_h264_vp9_dst_fmts,
+ .num_peer_fmts = ARRAY_SIZE(rkvpu_h264_vp9_hw_dst_fmts),
+ .peer_fmts = rkvpu_h264_vp9_hw_dst_fmts,
.subsystem_flags = VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF,
+ .codec = RKVPU_H264_DECODER,
},
{
.fourcc = V4L2_PIX_FMT_VP9_FRAME,
@@ -138,26 +161,94 @@ static const struct rkvpu_fmt_desc rkvpu_fmts[] = {
.max_height = 2304,
.step_height = 64,
},
- .ctrls = &rkvdec_vp9_ctrls,
.ops = &rkvdec_vp9_fmt_ops,
- .num_dst_fmts = ARRAY_SIZE(rkvpu_h264_vp9_dst_fmts),
- .dst_fmts = rkvpu_h264_vp9_dst_fmts,
+ .num_peer_fmts = ARRAY_SIZE(rkvpu_h264_vp9_hw_dst_fmts),
+ .peer_fmts = rkvpu_h264_vp9_hw_dst_fmts,
+ .codec = RKVPU_VP9_DECODER,
}
};
+static const struct rkvpu_hw_fmt rkvpu_hw_src_fmts[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_YUV420M,
+ .enc_fmt = RKVPU_ENC_FMT_YUV420P,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_NV12M,
+ .enc_fmt = RKVPU_ENC_FMT_YUV420SP,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .enc_fmt = RKVPU_ENC_FMT_YUYV422,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .enc_fmt = RKVPU_ENC_FMT_UYVY422,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .enc_fmt = RKVPU_ENC_FMT_RGB565,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_RGB444,
+ .enc_fmt = RKVPU_ENC_FMT_RGB444,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_RGB24,
+ .enc_fmt = RKVPU_ENC_FMT_RGB888,
+ },
+};
+
+static const struct rkvpu_fmt_desc rkvpu_enc_fmts[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_H264_SLICE,
+ .max_depth = 2,
+ .frmsize = {
+ .min_width = 96,
+ .max_width = 4096,
+ .step_width = MB_DIM,
+ .min_height = 32,
+ .max_height = 4096,
+ .step_height = MB_DIM,
+ },
+ .ops = &rkvenc_h264_fmt_ops,
+ .subsystem_flags = VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF,
+ .num_peer_fmts = ARRAY_SIZE(rkvpu_hw_src_fmts),
+ .peer_fmts = rkvpu_hw_src_fmts,
+ .codec = RKVPU_H264_ENCODER,
+ },
+};
+
static const struct rkvpu_fmt_desc *
-rkvpu_find_fmt_desc(u32 fourcc)
+rkvpu_find_fmt_desc_supports_peer(const struct rkvpu_ctx *ctx, u32 fourcc)
{
- unsigned int i;
+ struct rkvpu_dev *rkvpu = ctx->dev;
+ const struct rkvpu_variant *variant = rkvpu->variant;
+ int i, j;
- for (i = 0; i < ARRAY_SIZE(rkvpu_fmts); i++) {
- if (rkvpu_fmts[i].fourcc == fourcc)
- return &rkvpu_fmts[i];
+ for (i = 0; i < variant->num_fmts; i++) {
+ for (j = 0; j < variant->fmts[i].num_peer_fmts; j++)
+ if (variant->fmts[i].peer_fmts[j].fourcc == fourcc)
+ return &variant->fmts[i];
}
return NULL;
}
+static const struct rkvpu_fmt_desc *
+rkvpu_find_fmt_desc(const struct rkvpu_ctx *ctx, u32 fourcc)
+{
+ struct rkvpu_dev *rkvpu = ctx->dev;
+ const struct rkvpu_variant *variant = rkvpu->variant;
+ int i;
+
+ for (i = 0; i < variant->num_fmts; i++)
+ if (variant->fmts[i].fourcc == fourcc)
+ return &variant->fmts[i];
+
+ return NULL;
+}
+
static void rkvpu_reset_fmt(struct rkvpu_ctx *ctx, struct v4l2_format *f,
u32 fourcc)
{
@@ -173,26 +264,30 @@ static void rkvpu_reset_fmt(struct rkvpu_ctx *ctx, struct v4l2_format *f,
static void rkvpu_reset_src_fmt(struct rkvpu_ctx *ctx)
{
struct v4l2_format *f = &ctx->src_fmt;
+ struct rkvpu_dev *rkvpu = ctx->dev;
+ const struct rkvpu_variant *variant = rkvpu->variant;
+ const struct rkvpu_fmt_desc *desc;
- ctx->fmt_desc = &rkvpu_fmts[0];
- rkvpu_reset_fmt(ctx, f, ctx->fmt_desc->fourcc);
+ desc = ctx->fmt_desc = &variant->fmts[0];
+ rkvpu_reset_fmt(ctx, f, desc->fourcc);
f->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
f->fmt.pix_mp.width = ctx->fmt_desc->frmsize.min_width;
f->fmt.pix_mp.height = ctx->fmt_desc->frmsize.min_height;
- if (ctx->fmt_desc->ops->adjust_fmt)
- ctx->fmt_desc->ops->adjust_fmt(ctx, f);
+ if (desc->ops->adjust_fmt)
+ desc->ops->adjust_fmt(ctx, f);
}
static void rkvpu_reset_dst_fmt(struct rkvpu_ctx *ctx)
{
struct v4l2_format *f = &ctx->dst_fmt;
+ const struct rkvpu_fmt_desc *desc = ctx->fmt_desc;
- rkvpu_reset_fmt(ctx, f, ctx->fmt_desc->dst_fmts[0]);
+ rkvpu_reset_fmt(ctx, f, desc->peer_fmts[0].fourcc);
f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
v4l2_fill_pixfmt_mp(&f->fmt.pix_mp,
- ctx->fmt_desc->dst_fmts[0],
+ ctx->fmt_desc->fourcc,
ctx->src_fmt.fmt.pix_mp.width,
ctx->src_fmt.fmt.pix_mp.height);
f->fmt.pix_mp.plane_fmt[0].sizeimage += 128 *
@@ -203,12 +298,13 @@ static void rkvpu_reset_dst_fmt(struct rkvpu_ctx *ctx)
static int rkvpu_enum_framesizes(struct file *file, void *priv,
struct v4l2_frmsizeenum *fsize)
{
+ struct rkvpu_ctx *ctx = fh_to_rkvpu_ctx(priv);
const struct rkvpu_fmt_desc *fmt;
if (fsize->index != 0)
return -EINVAL;
- fmt = rkvpu_find_fmt_desc(fsize->pixel_format);
+ fmt = rkvpu_find_fmt_desc(ctx, fsize->pixel_format);
if (!fmt)
return -EINVAL;
@@ -236,7 +332,9 @@ static int rkvpu_try_capture_fmt(struct file *file, void *priv,
{
struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
struct rkvpu_ctx *ctx = fh_to_rkvpu_ctx(priv);
- const struct rkvpu_fmt_desc *src_desc;
+ struct rkvpu_dev *rkvpu = ctx->dev;
+ const struct rkvpu_variant *variant = rkvpu->variant;
+ const struct rkvpu_fmt_desc *desc;
unsigned int i;
/*
@@ -244,31 +342,57 @@ static int rkvpu_try_capture_fmt(struct file *file, void *priv,
* on the src end has not been set yet, it should point to the
* default value.
*/
- src_desc = ctx->fmt_desc;
- if (WARN_ON(!src_desc))
- return -EINVAL;
+ desc = ctx->fmt_desc;
+ if (!ctx->is_encoder) {
+ if (WARN_ON(!desc))
+ return -EINVAL;
- for (i = 0; i < src_desc->num_dst_fmts; i++) {
- if (src_desc->dst_fmts[i] == pix_mp->pixelformat)
- break;
+ for (i = 0; i < desc->num_peer_fmts; i++) {
+ if (desc->peer_fmts[i].fourcc == pix_mp->pixelformat)
+ break;
+ }
+
+ if (i == variant->num_fmts)
+ pix_mp->pixelformat = variant->fmts[0].fourcc;
+
+ v4l2_info(&rkvpu->v4l2_dev, "setting in %s found %c%c%c%c\n",
+ __func__,
+ (pix_mp->pixelformat & 0x7f),
+ (pix_mp->pixelformat >> 8) & 0x7f,
+ (pix_mp->pixelformat >> 16) & 0x7f,
+ (pix_mp->pixelformat >> 24) & 0x7f);
+
+ /* Always apply the frmsize constraint of the src end. */
+ pix_mp->width = max(pix_mp->width, ctx->src_fmt.fmt.pix_mp.width);
+ pix_mp->height = max(pix_mp->height, ctx->src_fmt.fmt.pix_mp.height);
+ } else if(!desc) {
+ pix_mp->pixelformat = variant->fmts[0].fourcc;
+ desc = &variant->fmts[0];
}
- if (i == src_desc->num_dst_fmts)
- pix_mp->pixelformat = src_desc->dst_fmts[0];
+ if (ctx->is_encoder)
+ pix_mp->num_planes = 1;
- /* Always apply the frmsize constraint of the src end. */
- pix_mp->width = max(pix_mp->width, ctx->src_fmt.fmt.pix_mp.width);
- pix_mp->height = max(pix_mp->height, ctx->src_fmt.fmt.pix_mp.height);
v4l2_apply_frmsize_constraints(&pix_mp->width,
&pix_mp->height,
- &src_desc->frmsize);
+ &desc->frmsize);
- v4l2_fill_pixfmt_mp(pix_mp, pix_mp->pixelformat,
- pix_mp->width, pix_mp->height);
- pix_mp->plane_fmt[0].sizeimage +=
- 128 *
- DIV_ROUND_UP(pix_mp->width, 16) *
- DIV_ROUND_UP(pix_mp->height, 16);
+ if (!ctx->is_encoder) {
+ v4l2_fill_pixfmt_mp(pix_mp, pix_mp->pixelformat,
+ pix_mp->width, pix_mp->height);
+ pix_mp->plane_fmt[0].sizeimage +=
+ 128 *
+ DIV_ROUND_UP(pix_mp->width, 16) *
+ DIV_ROUND_UP(pix_mp->height, 16);
+ } else if (!pix_mp->plane_fmt[0].sizeimage) {
+ /*
+ * For coded formats the application can specify
+ * sizeimage. If the application passes a zero sizeimage,
+ * let's default to the maximum frame size.
+ */
+ pix_mp->plane_fmt[0].sizeimage = desc->header_size +
+ pix_mp->width * pix_mp->height * desc->max_depth;
+ }
pix_mp->field = V4L2_FIELD_NONE;
return 0;
@@ -279,12 +403,50 @@ static int rkvpu_try_output_fmt(struct file *file, void *priv,
{
struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
struct rkvpu_ctx *ctx = fh_to_rkvpu_ctx(priv);
+ struct rkvpu_dev *rkvpu = ctx->dev;
const struct rkvpu_fmt_desc *desc;
+ const struct rkvpu_variant *variant = rkvpu->variant;
+ unsigned int i;
+
+ /*
+ * The codec context should point to a src format desc, if the format
+ * on the src end has not been set yet, it should point to the
+ * default value.
+ */
+ desc = ctx->fmt_desc;
+
+ if (ctx->is_encoder) {
+ bool found = false;
+ if (WARN_ON(!desc))
+ return -EINVAL;
- desc = rkvpu_find_fmt_desc(pix_mp->pixelformat);
- if (!desc) {
- pix_mp->pixelformat = rkvpu_fmts[0].fourcc;
- desc = &rkvpu_fmts[0];
+ v4l2_info(&rkvpu->v4l2_dev, "trying in %s found %c%c%c%c\n",
+ __func__,
+ (pix_mp->pixelformat & 0x7f),
+ (pix_mp->pixelformat >> 8) & 0x7f,
+ (pix_mp->pixelformat >> 16) & 0x7f,
+ (pix_mp->pixelformat >> 24) & 0x7f);
+
+ for (i = 0; i < desc->num_peer_fmts; i++) {
+ if (desc->peer_fmts[i].fourcc == pix_mp->pixelformat) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found && i == variant->num_fmts)
+ pix_mp->pixelformat = variant->fmts[0].fourcc;
+
+ v4l2_info(&rkvpu->v4l2_dev, "setting in %s %d %c%c%c%c\n",
+ __func__, i,
+ (pix_mp->pixelformat & 0x7f),
+ (pix_mp->pixelformat >> 8) & 0x7f,
+ (pix_mp->pixelformat >> 16) & 0x7f,
+ (pix_mp->pixelformat >> 24) & 0x7f);
+
+ } else if (!desc) {
+ pix_mp->pixelformat = variant->fmts[0].fourcc;
+ desc = &variant->fmts[0];
}
v4l2_apply_frmsize_constraints(&pix_mp->width,
@@ -292,13 +454,17 @@ static int rkvpu_try_output_fmt(struct file *file, void *priv,
&desc->frmsize);
pix_mp->field = V4L2_FIELD_NONE;
- /* All src formats are considered single planar for now. */
- pix_mp->num_planes = 1;
+ /* All decoder src formats are considered single planar for now. */
+ if (!ctx->is_encoder)
+ pix_mp->num_planes = 1;
+
+ v4l2_fill_pixfmt_mp(pix_mp, pix_mp->pixelformat,
+ pix_mp->width, pix_mp->height);
- if (desc->ops->adjust_fmt) {
+ if (ctx->fmt_desc->ops->adjust_fmt) {
int ret;
- ret = desc->ops->adjust_fmt(ctx, f);
+ ret = ctx->fmt_desc->ops->adjust_fmt(ctx, f);
if (ret)
return ret;
}
@@ -310,20 +476,56 @@ static int rkvpu_s_capture_fmt(struct file *file, void *priv,
struct v4l2_format *f)
{
struct rkvpu_ctx *ctx = fh_to_rkvpu_ctx(priv);
+ struct rkvpu_dev *rkvpu = ctx->dev;
+ struct v4l2_m2m_ctx *m2m_ctx = ctx->fh.m2m_ctx;
struct vb2_queue *vq;
+ struct v4l2_pix_format_mplane *pix_mp;
+ struct v4l2_format *cap_fmt;
int ret;
/* Change not allowed if queue is busy */
- vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
- V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ vq = v4l2_m2m_get_vq(m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
if (vb2_is_busy(vq))
return -EBUSY;
+ if (ctx->is_encoder) {
+ struct vb2_queue *peer_vq;
+
+ /*
+ * Since format change on the CAPTURE queue will reset
+ * the OUTPUT queue, we can't allow doing so
+ * when the OUTPUT queue has buffers allocated.
+ */
+ peer_vq = v4l2_m2m_get_vq(m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ if (vb2_is_busy(peer_vq) &&
+ (pix_mp->pixelformat != ctx->dst_fmt.fmt.pix_mp.pixelformat ||
+ pix_mp->height != ctx->dst_fmt.fmt.pix_mp.height ||
+ pix_mp->width != ctx->dst_fmt.fmt.pix_mp.width))
+ return -EBUSY;
+ }
+
ret = rkvpu_try_capture_fmt(file, priv, f);
if (ret)
return ret;
ctx->dst_fmt = *f;
+
+ pix_mp = &f->fmt.pix_mp;
+
+ v4l2_info(&rkvpu->v4l2_dev, "setting in %s dst_fmt %c%c%c%c\n",
+ __func__,
+ (pix_mp->pixelformat & 0x7f),
+ (pix_mp->pixelformat >> 8) & 0x7f,
+ (pix_mp->pixelformat >> 16) & 0x7f,
+ (pix_mp->pixelformat >> 24) & 0x7f);
+
+ /* Colorimetry information are always propagated. */
+ cap_fmt = &ctx->src_fmt;
+ cap_fmt->fmt.pix_mp.colorspace = f->fmt.pix_mp.colorspace;
+ cap_fmt->fmt.pix_mp.xfer_func = f->fmt.pix_mp.xfer_func;
+ cap_fmt->fmt.pix_mp.ycbcr_enc = f->fmt.pix_mp.ycbcr_enc;
+ cap_fmt->fmt.pix_mp.quantization = f->fmt.pix_mp.quantization;
+
return 0;
}
@@ -331,44 +533,66 @@ static int rkvpu_s_output_fmt(struct file *file, void *priv,
struct v4l2_format *f)
{
struct rkvpu_ctx *ctx = fh_to_rkvpu_ctx(priv);
+ struct rkvpu_dev *rkvpu = ctx->dev;
struct v4l2_m2m_ctx *m2m_ctx = ctx->fh.m2m_ctx;
const struct rkvpu_fmt_desc *desc;
struct v4l2_format *cap_fmt;
- struct vb2_queue *peer_vq, *vq;
+ struct vb2_queue *vq;
+ struct v4l2_pix_format_mplane *pix_mp;
int ret;
- /*
- * In order to support dynamic resolution change, the decoder admits
- * a resolution change, as long as the pixelformat remains. Can't be
- * done if streaming.
- */
vq = v4l2_m2m_get_vq(m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
- if (vb2_is_streaming(vq) ||
- (vb2_is_busy(vq) &&
- f->fmt.pix_mp.pixelformat != ctx->src_fmt.fmt.pix_mp.pixelformat))
- return -EBUSY;
-
- /*
- * Since format change on the OUTPUT queue will reset the CAPTURE
- * queue, we can't allow doing so when the CAPTURE queue has buffers
- * allocated.
- */
- peer_vq = v4l2_m2m_get_vq(m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
- if (vb2_is_busy(peer_vq))
- return -EBUSY;
-
ret = rkvpu_try_output_fmt(file, priv, f);
if (ret)
return ret;
- desc = rkvpu_find_fmt_desc(f->fmt.pix_mp.pixelformat);
+ if (!ctx->is_encoder) {
+ struct vb2_queue *peer_vq;
+ /*
+ * In order to support dynamic resolution change, the decoder admits
+ * a resolution change, as long as the pixelformat remains. Can't be
+ * done if streaming.
+ */
+ if (vb2_is_streaming(vq) ||
+ (vb2_is_busy(vq) &&
+ f->fmt.pix_mp.pixelformat != ctx->src_fmt.fmt.pix_mp.pixelformat))
+ return -EBUSY;
+
+ /*
+ * Since format change on the OUTPUT queue will reset the CAPTURE
+ * queue, we can't allow doing so when the CAPTURE queue has buffers
+ * allocated.
+ */
+ peer_vq = v4l2_m2m_get_vq(m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ if (vb2_is_busy(peer_vq))
+ return -EBUSY;
+ } else {
+ /*
+ * The encoder doesn't admit a format change if
+ * there are OUTPUT buffers allocated.
+ */
+ if (vb2_is_busy(vq))
+ return -EBUSY;
+ }
+
+ desc = rkvpu_find_fmt_desc_supports_peer(ctx, f->fmt.pix_mp.pixelformat);
+ desc = ctx->fmt_desc;
if (!desc)
return -EINVAL;
ctx->fmt_desc = desc;
ctx->src_fmt = *f;
+ pix_mp = &f->fmt.pix_mp;
+
+ v4l2_info(&rkvpu->v4l2_dev, "setting in %s src_fmt %c%c%c%c\n",
+ __func__,
+ (pix_mp->pixelformat & 0x7f),
+ (pix_mp->pixelformat >> 8) & 0x7f,
+ (pix_mp->pixelformat >> 16) & 0x7f,
+ (pix_mp->pixelformat >> 24) & 0x7f);
+
/*
- * Current desrc format might have become invalid with newly
+ * Current src format might have become invalid with newly
* selected codec, so reset it to default just to be safe and
* keep internal driver state sane. User is mandated to set
* the desrc format again after we return, so we don't need
@@ -376,7 +600,8 @@ static int rkvpu_s_output_fmt(struct file *file, void *priv,
*
* Note that this will propagates any size changes to the desrc format.
*/
- rkvpu_reset_dst_fmt(ctx);
+ if (!ctx->is_encoder)
+ rkvpu_reset_dst_fmt(ctx);
/* Propagate colorspace information to capture. */
cap_fmt = &ctx->dst_fmt;
@@ -409,28 +634,33 @@ static int rkvpu_g_capture_fmt(struct file *file, void *priv,
return 0;
}
-static int rkvpu_enum_output_fmt(struct file *file, void *priv,
+static int rkvpu_enum_fmt(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- if (f->index >= ARRAY_SIZE(rkvpu_fmts))
- return -EINVAL;
+ struct rkvpu_ctx *ctx = fh_to_rkvpu_ctx(priv);
+ struct rkvpu_dev *rkvpu = ctx->dev;
+ bool capture = V4L2_TYPE_IS_CAPTURE(f->type);
+ bool coded = capture == ctx->is_encoder;
- f->pixelformat = rkvpu_fmts[f->index].fourcc;
- return 0;
-}
+ if (coded) {
+ const struct rkvpu_variant *variant = rkvpu->variant;
-static int rkvpu_enum_capture_fmt(struct file *file, void *priv,
- struct v4l2_fmtdesc *f)
-{
- struct rkvpu_ctx *ctx = fh_to_rkvpu_ctx(priv);
+ if (f->index >= variant->num_fmts)
+ return -EINVAL;
- if (WARN_ON(!ctx->fmt_desc))
- return -EINVAL;
+ f->pixelformat = variant->fmts[f->index].fourcc;
+ } else {
+ const struct rkvpu_fmt_desc *desc = ctx->fmt_desc;
- if (f->index >= ctx->fmt_desc->num_dst_fmts)
- return -EINVAL;
+ if (WARN_ON(!desc))
+ return -EINVAL;
+
+ if (f->index >= desc->num_peer_fmts)
+ return -EINVAL;
+
+ f->pixelformat = desc->peer_fmts[f->index].fourcc;
+ }
- f->pixelformat = ctx->fmt_desc->dst_fmts[f->index];
return 0;
}
@@ -444,8 +674,8 @@ static const struct v4l2_ioctl_ops rkvpu_ioctl_ops = {
.vidioc_s_fmt_vid_cap_mplane = rkvpu_s_capture_fmt,
.vidioc_g_fmt_vid_out_mplane = rkvpu_g_output_fmt,
.vidioc_g_fmt_vid_cap_mplane = rkvpu_g_capture_fmt,
- .vidioc_enum_fmt_vid_out = rkvpu_enum_output_fmt,
- .vidioc_enum_fmt_vid_cap = rkvpu_enum_capture_fmt,
+ .vidioc_enum_fmt_vid_out = rkvpu_enum_fmt,
+ .vidioc_enum_fmt_vid_cap = rkvpu_enum_fmt,
.vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
.vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
@@ -467,7 +697,9 @@ static int rkvpu_queue_setup(struct vb2_queue *vq, unsigned int *num_buffers,
struct device *alloc_devs[])
{
struct rkvpu_ctx *ctx = vb2_get_drv_priv(vq);
+ struct rkvpu_dev *rkvpu = ctx->dev;
struct v4l2_format *f;
+ struct v4l2_pix_format_mplane *pix_mp;
unsigned int i;
if (V4L2_TYPE_IS_OUTPUT(vq->type))
@@ -475,13 +707,23 @@ static int rkvpu_queue_setup(struct vb2_queue *vq, unsigned int *num_buffers,
else
f = &ctx->dst_fmt;
+ pix_mp = &f->fmt.pix_mp;
+
+ v4l2_info(&rkvpu->v4l2_dev, "trying format %c%c%c%c\n",
+ (pix_mp->pixelformat & 0x7f),
+ (pix_mp->pixelformat >> 8) & 0x7f,
+ (pix_mp->pixelformat >> 16) & 0x7f,
+ (pix_mp->pixelformat >> 24) & 0x7f);
+
if (*num_planes) {
- if (*num_planes != f->fmt.pix_mp.num_planes)
+ if (*num_planes != f->fmt.pix_mp.num_planes) {
return -EINVAL;
+ }
for (i = 0; i < f->fmt.pix_mp.num_planes; i++) {
- if (sizes[i] < f->fmt.pix_mp.plane_fmt[i].sizeimage)
+ if (sizes[i] < f->fmt.pix_mp.plane_fmt[i].sizeimage) {
return -EINVAL;
+ }
}
} else {
*num_planes = f->fmt.pix_mp.num_planes;
@@ -492,11 +734,69 @@ static int rkvpu_queue_setup(struct vb2_queue *vq, unsigned int *num_buffers,
return 0;
}
+unsigned int rkvpu_h264_enc_rec_luma_size(unsigned int width,
+ unsigned int height)
+{
+ return round_up(width, MB_DIM) * round_up(height, MB_DIM);
+}
+
+unsigned int rkvpu_h264_enc_rec_image_size(unsigned int width,
+ unsigned int height)
+{
+ /* Reconstructed image is YUV 4:2:0 with 1.5 bpp. */
+ return rkvpu_h264_enc_rec_luma_size(width, height) * 3 / 2;
+}
+
+static int rkvpu_buf_init(struct vb2_buffer *vb)
+{
+ struct vb2_queue *vq = vb->vb2_queue;
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct rkvpu_ctx *ctx = vb2_get_drv_priv(vq);
+ struct rkvpu_dev *vpu = ctx->dev;
+ struct rkvpu_enc_buf *enc_buf;
+ struct rkvpu_aux_buf *rec_buf;
+ struct rkvpu_aux_buf *ds_buf;
+ struct rkvpu_aux_buf *me_buf;
+
+ if (!ctx->is_encoder || V4L2_TYPE_IS_OUTPUT(vq->type))
+ return 0;
+
+ enc_buf = rkvpu_get_enc_buf(vbuf);
+ rec_buf = &enc_buf->rec_buf;
+ ds_buf = &enc_buf->ds_buf;
+ me_buf = &enc_buf->me_buf;
+
+ rec_buf->size = rkvpu_h264_enc_rec_image_size(ctx->src_fmt.fmt.pix_mp.width,
+ ctx->src_fmt.fmt.pix_mp.height);
+ rec_buf->cpu = dma_alloc_coherent(vpu->dev, rec_buf->size,
+ &rec_buf->dma, GFP_KERNEL);
+ if (!rec_buf->cpu)
+ return -ENOMEM;
+
+ ds_buf->size = rkvpu_h264_enc_rec_image_size(ctx->src_fmt.fmt.pix_mp.width,
+ ctx->src_fmt.fmt.pix_mp.height);
+ ds_buf->cpu = dma_alloc_coherent(vpu->dev, ds_buf->size,
+ &ds_buf->dma, GFP_KERNEL);
+ if (!ds_buf->cpu)
+ return -ENOMEM;
+
+ me_buf->size = rkvpu_h264_enc_rec_image_size(ctx->src_fmt.fmt.pix_mp.width,
+ ctx->src_fmt.fmt.pix_mp.height);
+ me_buf->cpu = dma_alloc_coherent(vpu->dev, me_buf->size,
+ &me_buf->dma, GFP_KERNEL);
+ if (!me_buf->cpu)
+ return -ENOMEM;
+
+ return 0;
+}
+
static int rkvpu_buf_prepare(struct vb2_buffer *vb)
{
struct vb2_queue *vq = vb->vb2_queue;
struct rkvpu_ctx *ctx = vb2_get_drv_priv(vq);
+ struct rkvpu_dev *rkvpu = ctx->dev;
struct v4l2_format *f;
+ struct v4l2_pix_format_mplane *pix_mp;
unsigned int i;
if (V4L2_TYPE_IS_OUTPUT(vq->type))
@@ -504,6 +804,14 @@ static int rkvpu_buf_prepare(struct vb2_buffer *vb)
else
f = &ctx->dst_fmt;
+ pix_mp = &f->fmt.pix_mp;
+
+ v4l2_info(&rkvpu->v4l2_dev, "trying format %c%c%c%c\n",
+ (pix_mp->pixelformat & 0x7f),
+ (pix_mp->pixelformat >> 8) & 0x7f,
+ (pix_mp->pixelformat >> 16) & 0x7f,
+ (pix_mp->pixelformat >> 24) & 0x7f);
+
for (i = 0; i < f->fmt.pix_mp.num_planes; ++i) {
u32 sizeimage = f->fmt.pix_mp.plane_fmt[i].sizeimage;
@@ -516,8 +824,12 @@ static int rkvpu_buf_prepare(struct vb2_buffer *vb)
* (for OUTPUT buffers, if userspace passes 0 bytesused, v4l2-core sets
* it to buffer length).
*/
- if (V4L2_TYPE_IS_CAPTURE(vq->type))
- vb2_set_plane_payload(vb, 0, f->fmt.pix_mp.plane_fmt[0].sizeimage);
+ if (V4L2_TYPE_IS_CAPTURE(vq->type)) {
+ if (ctx->is_encoder)
+ vb2_set_plane_payload(vb, 0, 0);
+ else
+ vb2_set_plane_payload(vb, 0, f->fmt.pix_mp.plane_fmt[0].sizeimage);
+ }
return 0;
}
@@ -530,6 +842,30 @@ static void rkvpu_buf_queue(struct vb2_buffer *vb)
v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
}
+static void rkvpu_buf_cleanup(struct vb2_buffer *vb)
+{
+ struct vb2_queue *vq = vb->vb2_queue;
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct rkvpu_ctx *ctx = vb2_get_drv_priv(vq);
+ struct rkvpu_dev *vpu = ctx->dev;
+ struct rkvpu_enc_buf *enc_buf;
+ struct rkvpu_aux_buf *rec_buf;
+ struct rkvpu_aux_buf *ds_buf;
+ struct rkvpu_aux_buf *me_buf;
+
+ if (!ctx->is_encoder || V4L2_TYPE_IS_OUTPUT(vq->type))
+ return;
+
+ enc_buf = rkvpu_get_enc_buf(vbuf);
+ rec_buf = &enc_buf->rec_buf;
+ ds_buf = &enc_buf->ds_buf;
+ me_buf = &enc_buf->me_buf;
+
+ dma_free_coherent(vpu->dev, rec_buf->size, rec_buf->cpu, rec_buf->dma);
+ dma_free_coherent(vpu->dev, ds_buf->size, ds_buf->cpu, ds_buf->dma);
+ dma_free_coherent(vpu->dev, me_buf->size, me_buf->cpu, me_buf->dma);
+}
+
static int rkvpu_buf_out_validate(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
@@ -548,13 +884,13 @@ static void rkvpu_buf_request_complete(struct vb2_buffer *vb)
static int rkvpu_start_streaming(struct vb2_queue *q, unsigned int count)
{
struct rkvpu_ctx *ctx = vb2_get_drv_priv(q);
- const struct rkvpu_fmt_desc *desc;
+ const struct rkvpu_fmt_desc *desc = ctx->fmt_desc;
+
int ret;
if (V4L2_TYPE_IS_CAPTURE(q->type))
return 0;
- desc = ctx->fmt_desc;
if (WARN_ON(!desc))
return -EINVAL;
@@ -607,10 +943,12 @@ static void rkvpu_stop_streaming(struct vb2_queue *q)
static const struct vb2_ops rkvpu_queue_ops = {
.queue_setup = rkvpu_queue_setup,
+ .buf_init = rkvpu_buf_init,
.buf_prepare = rkvpu_buf_prepare,
.buf_queue = rkvpu_buf_queue,
.buf_out_validate = rkvpu_buf_out_validate,
.buf_request_complete = rkvpu_buf_request_complete,
+ .buf_cleanup = rkvpu_buf_cleanup,
.start_streaming = rkvpu_start_streaming,
.stop_streaming = rkvpu_stop_streaming,
.wait_prepare = vb2_ops_wait_prepare,
@@ -635,15 +973,261 @@ static const struct media_device_ops rkvpu_media_ops = {
.req_queue = v4l2_m2m_request_queue,
};
+static int rkvpu_register_entity(struct media_device *mdev,
+ struct media_entity *entity,
+ const char *entity_name,
+ struct media_pad *pads, int num_pads,
+ int function, struct video_device *vdev)
+{
+ char *name;
+ int ret;
+
+ entity->obj_type = MEDIA_ENTITY_TYPE_BASE;
+ if (function == MEDIA_ENT_F_IO_V4L) {
+ entity->info.dev.major = VIDEO_MAJOR;
+ entity->info.dev.minor = vdev->minor;
+ }
+
+ name = devm_kasprintf(mdev->dev, GFP_KERNEL, "%s-%s", vdev->name,
+ entity_name);
+ if (!name)
+ return -ENOMEM;
+
+ entity->name = name;
+ entity->function = function;
+
+ ret = media_entity_pads_init(entity, num_pads, pads);
+ if (ret)
+ return ret;
+
+ ret = media_device_register_entity(mdev, entity);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int rkvpu_attach_func(struct rkvpu_dev *vpu,
+ struct rkvpu_func *func)
+{
+ struct media_device *mdev = &vpu->mdev;
+ struct media_link *link;
+ int ret;
+
+ /* Create the three encoder entities with their pads */
+ func->source_pad.flags = MEDIA_PAD_FL_SOURCE;
+ ret = rkvpu_register_entity(mdev, &func->vdev.entity, "source",
+ &func->source_pad, 1, MEDIA_ENT_F_IO_V4L,
+ &func->vdev);
+ if (ret)
+ return ret;
+
+ func->proc_pads[0].flags = MEDIA_PAD_FL_SINK;
+ func->proc_pads[1].flags = MEDIA_PAD_FL_SOURCE;
+ ret = rkvpu_register_entity(mdev, &func->proc, "proc",
+ func->proc_pads, 2, func->id,
+ &func->vdev);
+ if (ret)
+ goto err_rel_entity0;
+
+ func->sink_pad.flags = MEDIA_PAD_FL_SINK;
+ ret = rkvpu_register_entity(mdev, &func->sink, "sink",
+ &func->sink_pad, 1, MEDIA_ENT_F_IO_V4L,
+ &func->vdev);
+ if (ret)
+ goto err_rel_entity1;
+
+ /* Connect the three entities */
+ ret = media_create_pad_link(&func->vdev.entity, 0, &func->proc, 0,
+ MEDIA_LNK_FL_IMMUTABLE |
+ MEDIA_LNK_FL_ENABLED);
+ if (ret)
+ goto err_rel_entity2;
+
+ ret = media_create_pad_link(&func->proc, 1, &func->sink, 0,
+ MEDIA_LNK_FL_IMMUTABLE |
+ MEDIA_LNK_FL_ENABLED);
+ if (ret)
+ goto err_rm_links0;
+
+ /* Create video interface */
+ func->intf_devnode = media_devnode_create(mdev, MEDIA_INTF_T_V4L_VIDEO,
+ 0, VIDEO_MAJOR,
+ func->vdev.minor);
+ if (!func->intf_devnode) {
+ ret = -ENOMEM;
+ goto err_rm_links1;
+ }
+
+ /* Connect the two DMA engines to the interface */
+ link = media_create_intf_link(&func->vdev.entity,
+ &func->intf_devnode->intf,
+ MEDIA_LNK_FL_IMMUTABLE |
+ MEDIA_LNK_FL_ENABLED);
+ if (!link) {
+ ret = -ENOMEM;
+ goto err_rm_devnode;
+ }
+
+ link = media_create_intf_link(&func->sink, &func->intf_devnode->intf,
+ MEDIA_LNK_FL_IMMUTABLE |
+ MEDIA_LNK_FL_ENABLED);
+ if (!link) {
+ ret = -ENOMEM;
+ goto err_rm_devnode;
+ }
+ return 0;
+
+err_rm_devnode:
+ media_devnode_remove(func->intf_devnode);
+
+err_rm_links1:
+ media_entity_remove_links(&func->sink);
+
+err_rm_links0:
+ media_entity_remove_links(&func->proc);
+ media_entity_remove_links(&func->vdev.entity);
+
+err_rel_entity2:
+ media_device_unregister_entity(&func->sink);
+
+err_rel_entity1:
+ media_device_unregister_entity(&func->proc);
+
+err_rel_entity0:
+ media_device_unregister_entity(&func->vdev.entity);
+ return ret;
+}
+
+static void rkvpu_detach_func(struct rkvpu_func *func)
+{
+ media_devnode_remove(func->intf_devnode);
+ media_entity_remove_links(&func->sink);
+ media_entity_remove_links(&func->proc);
+ media_entity_remove_links(&func->vdev.entity);
+ media_device_unregister_entity(&func->sink);
+ media_device_unregister_entity(&func->proc);
+ media_device_unregister_entity(&func->vdev.entity);
+}
+
+static const struct v4l2_file_operations rkvpu_fops;
+static const struct of_device_id of_rkvpu_match[];
+
+static int rkvpu_add_func(struct rkvpu_dev *rkvpu, unsigned int funcid)
+{
+ const struct of_device_id *match;
+ struct rkvpu_func *func;
+ struct video_device *vfd;
+ int ret;
+
+ match = of_match_node(of_rkvpu_match, rkvpu->dev->of_node);
+ func = devm_kzalloc(rkvpu->dev, sizeof(*func), GFP_KERNEL);
+ if (!func) {
+ v4l2_err(&rkvpu->v4l2_dev, "Failed to allocate video device\n");
+ return -ENOMEM;
+ }
+
+ func->id = funcid;
+
+ vfd = &func->vdev;
+ vfd->fops = &rkvpu_fops;
+ vfd->release = video_device_release_empty;
+ vfd->lock = &rkvpu->vdev_lock;
+ vfd->v4l2_dev = &rkvpu->v4l2_dev;
+ vfd->vfl_dir = VFL_DIR_M2M;
+ vfd->device_caps = V4L2_CAP_STREAMING |
+ V4L2_CAP_VIDEO_M2M_MPLANE;
+ vfd->ioctl_ops = &rkvpu_ioctl_ops;
+ snprintf(vfd->name, sizeof(vfd->name), "%s-%s", match->compatible,
+ funcid == MEDIA_ENT_F_PROC_VIDEO_ENCODER ? "enc" : "dec");
+
+ if (funcid == MEDIA_ENT_F_PROC_VIDEO_ENCODER) {
+ rkvpu->encoder = func;
+ } else {
+ rkvpu->decoder = func;
+ v4l2_disable_ioctl(vfd, VIDIOC_TRY_ENCODER_CMD);
+ v4l2_disable_ioctl(vfd, VIDIOC_ENCODER_CMD);
+ }
+
+ video_set_drvdata(vfd, rkvpu);
+
+ ret = video_register_device(vfd, VFL_TYPE_VIDEO, -1);
+ if (ret) {
+ v4l2_err(&rkvpu->v4l2_dev, "Failed to register video device\n");
+ return ret;
+ }
+
+ ret = rkvpu_attach_func(rkvpu, func);
+ if (ret) {
+ v4l2_err(&rkvpu->v4l2_dev,
+ "Failed to attach functionality to the media device\n");
+ goto err_unreg_dev;
+ }
+
+ v4l2_info(&rkvpu->v4l2_dev, "registered %s as /dev/video%d\n",
+ vfd->name, vfd->num);
+
+ return 0;
+
+err_unreg_dev:
+ video_unregister_device(vfd);
+ return ret;
+}
+
+static int rkvpu_add_enc_func(struct rkvpu_dev *rkvpu)
+{
+ if (!(rkvpu->variant->codec & RKVPU_ENCODERS))
+ return 0;
+
+ return rkvpu_add_func(rkvpu, MEDIA_ENT_F_PROC_VIDEO_ENCODER);
+}
+
+static int rkvpu_add_dec_func(struct rkvpu_dev *rkvpu)
+{
+ if (!(rkvpu->variant->codec & RKVPU_DECODERS))
+ return 0;
+
+ return rkvpu_add_func(rkvpu, MEDIA_ENT_F_PROC_VIDEO_DECODER);
+}
+
+static void rkvpu_remove_func(struct rkvpu_dev *rkvpu,
+ unsigned int funcid)
+{
+ struct rkvpu_func *func;
+
+ if (funcid == MEDIA_ENT_F_PROC_VIDEO_ENCODER)
+ func = rkvpu->encoder;
+ else
+ func = rkvpu->decoder;
+
+ if (!func)
+ return;
+
+ rkvpu_detach_func(func);
+ video_unregister_device(&func->vdev);
+}
+
+static void rkvpu_remove_enc_func(struct rkvpu_dev *rkvpu)
+{
+ rkvpu_remove_func(rkvpu, MEDIA_ENT_F_PROC_VIDEO_ENCODER);
+}
+
+static void rkvpu_remove_dec_func(struct rkvpu_dev *rkvpu)
+{
+ rkvpu_remove_func(rkvpu, MEDIA_ENT_F_PROC_VIDEO_DECODER);
+}
+
static void rkvpu_job_finish_no_pm(struct rkvpu_ctx *ctx,
enum vb2_buffer_state result)
{
- if (ctx->fmt_desc->ops->done) {
+ const struct rkvpu_fmt_desc *desc = ctx->fmt_desc;
+
+ if (desc->ops->done) {
struct vb2_v4l2_buffer *src_buf, *dst_buf;
src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
- ctx->fmt_desc->ops->done(ctx, src_buf, dst_buf, result);
+ desc->ops->done(ctx, src_buf, dst_buf, result);
}
v4l2_m2m_buf_done_and_job_finish(ctx->dev->m2m_dev, ctx->fh.m2m_ctx,
@@ -724,6 +1308,8 @@ static int rkvpu_queue_init(void *priv,
src_vq->ops = &rkvpu_queue_ops;
src_vq->mem_ops = &vb2_dma_contig_memops;
+ if (ctx->is_encoder)
+ src_vq->buf_struct_size = sizeof(struct rkvpu_src_buffer);
/*
* Driver does mostly sequential access, so sacrifice TLB efficiency
* for faster allocation. Also, no CPU access on the source queue,
@@ -738,14 +1324,18 @@ static int rkvpu_queue_init(void *priv,
src_vq->supports_requests = true;
src_vq->requires_requests = true;
+ if (ctx->is_encoder)
+ src_vq->buf_struct_size = sizeof(struct rkvpu_enc_buf);
+
ret = vb2_queue_init(src_vq);
if (ret)
return ret;
dst_vq->bidirectional = true;
dst_vq->mem_ops = &vb2_dma_contig_memops;
- dst_vq->dma_attrs = DMA_ATTR_ALLOC_SINGLE_PAGES |
- DMA_ATTR_NO_KERNEL_MAPPING;
+ dst_vq->dma_attrs = DMA_ATTR_ALLOC_SINGLE_PAGES;
+ if (!ctx->is_encoder)
+ dst_vq->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
dst_vq->drv_priv = ctx;
@@ -758,36 +1348,22 @@ static int rkvpu_queue_init(void *priv,
return vb2_queue_init(dst_vq);
}
-static int rkvpu_add_ctrls(struct rkvpu_ctx *ctx,
- const struct rkvpu_ctrls *ctrls)
-{
- unsigned int i;
-
- for (i = 0; i < ctrls->num_ctrls; i++) {
- const struct v4l2_ctrl_config *cfg = &ctrls->ctrls[i].cfg;
-
- v4l2_ctrl_new_custom(&ctx->ctrl_hdl, cfg, ctx);
- if (ctx->ctrl_hdl.error)
- return ctx->ctrl_hdl.error;
- }
-
- return 0;
-}
-
-static int rkvpu_init_ctrls(struct rkvpu_ctx *ctx)
+static int rkvpu_init_ctrls(struct rkvpu_ctx *ctx, int allowed_codecs)
{
- unsigned int i, nctrls = 0;
+ unsigned int i, nctrls = ARRAY_SIZE(controls);
int ret;
- for (i = 0; i < ARRAY_SIZE(rkvpu_fmts); i++)
- nctrls += rkvpu_fmts[i].ctrls->num_ctrls;
-
v4l2_ctrl_handler_init(&ctx->ctrl_hdl, nctrls);
- for (i = 0; i < ARRAY_SIZE(rkvpu_fmts); i++) {
- ret = rkvpu_add_ctrls(ctx, rkvpu_fmts[i].ctrls);
- if (ret)
- goto err_free_handler;
+ for (i = 0; i < nctrls; i++) {
+ if (!(allowed_codecs & controls[i].codec))
+ continue;
+
+ v4l2_ctrl_new_custom(&ctx->ctrl_hdl,
+ &controls[i].cfg, ctx);
+ if (ctx->ctrl_hdl.error)
+ return ctx->ctrl_hdl.error;
+
}
ret = v4l2_ctrl_handler_setup(&ctx->ctrl_hdl);
@@ -805,21 +1381,34 @@ err_free_handler:
static int rkvpu_open(struct file *filp)
{
struct rkvpu_dev *rkvpu = video_drvdata(filp);
+ struct video_device *vdev = video_devdata(filp);
+ struct rkvpu_func *func = rkvpu_vdev_to_func(vdev);
struct rkvpu_ctx *ctx;
- int ret;
+ int allowed_codecs, ret;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
ctx->dev = rkvpu;
+ if (func->id == MEDIA_ENT_F_PROC_VIDEO_ENCODER) {
+ allowed_codecs = rkvpu->variant->codec & RKVPU_ENCODERS;
+ ctx->is_encoder = true;
+ } else if (func->id == MEDIA_ENT_F_PROC_VIDEO_DECODER) {
+ allowed_codecs = rkvpu->variant->codec & RKVPU_DECODERS;
+ ctx->is_encoder = false;
+ } else {
+ ret = -ENODEV;
+ goto err_free_ctx;
+ }
+
rkvpu_reset_src_fmt(ctx);
rkvpu_reset_dst_fmt(ctx);
v4l2_fh_init(&ctx->fh, video_devdata(filp));
- ret = rkvpu_init_ctrls(ctx);
+ ret = rkvpu_init_ctrls(ctx, allowed_codecs);
if (ret)
- goto err_free_ctx;
+ goto err_free_fh;
ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(rkvpu->m2m_dev, ctx,
rkvpu_queue_init);
@@ -836,6 +1425,9 @@ static int rkvpu_open(struct file *filp)
err_cleanup_ctrls:
v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
+err_free_fh:
+ v4l2_fh_exit(&ctx->fh);
+
err_free_ctx:
kfree(ctx);
return ret;
@@ -888,44 +1480,32 @@ static int rkvpu_v4l2_init(struct rkvpu_dev *rkvpu)
rkvpu->mdev.ops = &rkvpu_media_ops;
rkvpu->v4l2_dev.mdev = &rkvpu->mdev;
- rkvpu->vdev.lock = &rkvpu->vdev_lock;
- rkvpu->vdev.v4l2_dev = &rkvpu->v4l2_dev;
- rkvpu->vdev.fops = &rkvpu_fops;
- rkvpu->vdev.release = video_device_release_empty;
- rkvpu->vdev.vfl_dir = VFL_DIR_M2M;
- rkvpu->vdev.device_caps = V4L2_CAP_STREAMING |
- V4L2_CAP_VIDEO_M2M_MPLANE;
- rkvpu->vdev.ioctl_ops = &rkvpu_ioctl_ops;
- video_set_drvdata(&rkvpu->vdev, rkvpu);
- strscpy(rkvpu->vdev.name, "rkvpu", sizeof(rkvpu->vdev.name));
-
- ret = video_register_device(&rkvpu->vdev, VFL_TYPE_VIDEO, -1);
+ ret = rkvpu_add_enc_func(rkvpu);
if (ret) {
- v4l2_err(&rkvpu->v4l2_dev, "Failed to register video device\n");
+ dev_err(rkvpu->dev, "Failed to register encoder\n");
goto err_cleanup_mc;
}
- ret = v4l2_m2m_register_media_controller(rkvpu->m2m_dev, &rkvpu->vdev,
- MEDIA_ENT_F_PROC_VIDEO_DECODER);
+ ret = rkvpu_add_dec_func(rkvpu);
if (ret) {
- v4l2_err(&rkvpu->v4l2_dev,
- "Failed to initialize V4L2 M2M media controller\n");
- goto err_unregister_vdev;
+ dev_err(rkvpu->dev, "Failed to register decoder\n");
+ goto err_rm_enc_func;
}
ret = media_device_register(&rkvpu->mdev);
if (ret) {
v4l2_err(&rkvpu->v4l2_dev, "Failed to register media device\n");
- goto err_unregister_mc;
+ goto err_rm_dec_func;
}
return 0;
-err_unregister_mc:
- v4l2_m2m_unregister_media_controller(rkvpu->m2m_dev);
-err_unregister_vdev:
- video_unregister_device(&rkvpu->vdev);
+err_rm_dec_func:
+ rkvpu_remove_dec_func(rkvpu);
+
+err_rm_enc_func:
+ rkvpu_remove_enc_func(rkvpu);
err_cleanup_mc:
media_device_cleanup(&rkvpu->mdev);
@@ -939,8 +1519,8 @@ err_unregister_v4l2:
static void rkvpu_v4l2_cleanup(struct rkvpu_dev *rkvpu)
{
media_device_unregister(&rkvpu->mdev);
- v4l2_m2m_unregister_media_controller(rkvpu->m2m_dev);
- video_unregister_device(&rkvpu->vdev);
+ rkvpu_remove_dec_func(rkvpu);
+ rkvpu_remove_enc_func(rkvpu);
media_device_cleanup(&rkvpu->mdev);
v4l2_m2m_release(rkvpu->m2m_dev);
v4l2_device_unregister(&rkvpu->v4l2_dev);
@@ -950,7 +1530,7 @@ static irqreturn_t rkvpu_irq_handler(int irq, void *priv)
{
struct rkvpu_dev *rkvpu = priv;
struct rkvpu_ctx *ctx = v4l2_m2m_get_curr_priv(rkvpu->m2m_dev);
- const struct rkvpu_desc *desc = ctx->fmt_desc;
+ const struct rkvpu_fmt_desc *desc = ctx->fmt_desc;
enum vb2_buffer_state state = VB2_BUF_STATE_ERROR;
if (WARN_ON(!desc))
@@ -974,7 +1554,7 @@ static void rkvpu_watchdog_func(struct work_struct *work)
struct rkvpu_dev *rkvpu = container_of(to_delayed_work(work),
struct rkvpu_dev, watchdog_work);
struct rkvpu_ctx *ctx = v4l2_m2m_get_curr_priv(rkvpu->m2m_dev);
- const struct rkvpu_desc *desc = ctx->fmt_desc;
+ const struct rkvpu_fmt_desc *desc = ctx->fmt_desc;
dev_err(rkvpu->dev, "Frame processing timed out!\n");
@@ -987,8 +1567,21 @@ static void rkvpu_watchdog_func(struct work_struct *work)
rkvpu_job_finish(ctx, VB2_BUF_STATE_ERROR);
}
+const struct rkvpu_variant rkvdpu_vpu_variant = {
+ .fmts = rkvpu_dec_fmts,
+ .num_fmts = ARRAY_SIZE(rkvpu_dec_fmts),
+ .codec = RKVPU_H264_DECODER | RKVPU_VP9_DECODER,
+};
+
+const struct rkvpu_variant rkvepu540_vpu_variant = {
+ .fmts = rkvpu_enc_fmts,
+ .num_fmts = ARRAY_SIZE(rkvpu_enc_fmts),
+ .codec = RKVPU_H264_ENCODER,
+};
+
static const struct of_device_id of_rkvpu_match[] = {
- { .compatible = "rockchip,rk3399-vdec" },
+ { .compatible = "rockchip,rk3399-vdec", .data = &rkvdpu_vpu_variant },
+ { .compatible = "rockchip,rk356x-venc", .data = &rkvepu540_vpu_variant },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, of_rkvpu_match);
@@ -999,6 +1592,7 @@ static const char * const rkvpu_clk_names[] = {
static int rkvpu_probe(struct platform_device *pdev)
{
+ const struct of_device_id *match;
struct rkvpu_dev *rkvpu;
unsigned int i;
int ret, irq;
@@ -1012,6 +1606,9 @@ static int rkvpu_probe(struct platform_device *pdev)
mutex_init(&rkvpu->vdev_lock);
INIT_DELAYED_WORK(&rkvpu->watchdog_work, rkvpu_watchdog_func);
+ match = of_match_node(of_rkvpu_match, pdev->dev.of_node);
+ rkvpu->variant = match->data;
+
rkvpu->clocks = devm_kcalloc(&pdev->dev, ARRAY_SIZE(rkvpu_clk_names),
sizeof(*rkvpu->clocks), GFP_KERNEL);
if (!rkvpu->clocks)
@@ -1020,7 +1617,8 @@ static int rkvpu_probe(struct platform_device *pdev)
for (i = 0; i < ARRAY_SIZE(rkvpu_clk_names); i++)
rkvpu->clocks[i].id = rkvpu_clk_names[i];
- ret = devm_clk_bulk_get(&pdev->dev, ARRAY_SIZE(rkvpu_clk_names),
+ ret = devm_clk_bulk_get_optional(&pdev->dev,
+ ARRAY_SIZE(rkvpu_clk_names),
rkvpu->clocks);
if (ret)
return ret;
diff --git a/drivers/staging/media/rkvdec/rkvpu.h b/drivers/staging/media/rkvdec/rkvpu.h
index e53551d26cdc90..f1e2f3df59e878 100644
--- a/drivers/staging/media/rkvdec/rkvpu.h
+++ b/drivers/staging/media/rkvdec/rkvpu.h
@@ -14,6 +14,7 @@
#include <linux/platform_device.h>
#include <linux/videodev2.h>
#include <linux/wait.h>
+#include <linux/regmap.h>
#include <linux/clk.h>
#include <media/v4l2-ctrls.h>
@@ -22,15 +23,22 @@
#include <media/videobuf2-core.h>
#include <media/videobuf2-dma-contig.h>
+#define MB_DIM 16
+#define TILE_MB_DIM 4
+#define MB_WIDTH(w) DIV_ROUND_UP(w, MB_DIM)
+#define MB_HEIGHT(h) DIV_ROUND_UP(h, MB_DIM)
+
struct rkvpu_ctx;
-struct rkvpu_ctrl_desc {
- struct v4l2_ctrl_config cfg;
-};
+#define RKVPU_H264_ENCODER BIT(1)
+#define RKVPU_ENCODERS 0x0000ffff
+#define RKVPU_H264_DECODER BIT(18)
+#define RKVPU_VP9_DECODER BIT(20)
+#define RKVPU_DECODERS 0xffff0000
-struct rkvpu_ctrls {
- const struct rkvpu_ctrl_desc *ctrls;
- unsigned int num_ctrls;
+struct rkvpu_ctrl {
+ unsigned int codec;
+ struct v4l2_ctrl_config cfg;
};
struct rkvpu_run {
@@ -77,30 +85,88 @@ struct rkvpu_ops {
int (*try_ctrl)(struct rkvpu_ctx *ctx, struct v4l2_ctrl *ctrl);
};
+/**
+ * enum rkvpu_enc_fmt - source format ID for hardware registers.
+ *
+ * @RKVPU_ENC_FMT_YUV420P: Y/CbCr 4:2:0 planar format
+ * @RKVPU_ENC_FMT_YUV420SP: Y/CbCr 4:2:0 semi-planar format
+ * @RKVPU_ENC_FMT_YUYV422: YUV 4:2:2 packed format (YUYV)
+ * @RKVPU_ENC_FMT_UYVY422: YUV 4:2:2 packed format (UYVY)
+ */
+enum rkvpu_enc_fmt {
+ RKVPU_ENC_FMT_YUV420P = 0,
+ RKVPU_ENC_FMT_YUV420SP = 1,
+ RKVPU_ENC_FMT_YUYV422 = 2,
+ RKVPU_ENC_FMT_UYVY422 = 3,
+ RKVPU_ENC_FMT_RGB565 = 4,
+ RKVPU_ENC_FMT_RGB444 = 5,
+ RKVPU_ENC_FMT_RGB888 = 6,
+ RKVPU_ENC_FMT_RGB101010 = 7,
+};
+
+struct rkvpu_hw_fmt {
+ enum rkvpu_enc_fmt enc_fmt;
+ struct v4l2_frmsize_stepwise frmsize;
+ u32 fourcc;
+};
+
struct rkvpu_fmt_desc {
+ char *name;
+ unsigned int codec;
+ int header_size;
u32 fourcc;
- struct v4l2_frmsize_stepwise frmsize;
- const struct rkvpu_ctrls *ctrls;
+ int max_depth;
const struct rkvpu_ops *ops;
- unsigned int num_dst_fmts;
- const u32 *dst_fmts;
+ unsigned int num_peer_fmts;
+ const struct rkvpu_hw_fmt *peer_fmts;
+ struct v4l2_frmsize_stepwise frmsize;
u32 subsystem_flags;
};
+struct rkvpu_variant {
+ unsigned int codec;
+ const struct rkvpu_ctrls *ctrls;
+ unsigned int num_fmts;
+ const struct rkvpu_fmt_desc *fmts;
+};
+
+struct rkvpu_func {
+ unsigned int id;
+ struct video_device vdev;
+ struct media_pad source_pad;
+ struct media_entity sink;
+ struct media_pad sink_pad;
+ struct media_entity proc;
+ struct media_pad proc_pads[2];
+ struct media_intf_devnode *intf_devnode;
+};
+
+static inline struct rkvpu_func *
+rkvpu_vdev_to_func(struct video_device *vdev)
+{
+ return container_of(vdev, struct rkvpu_func, vdev);
+}
+
struct rkvpu_dev {
struct v4l2_device v4l2_dev;
struct media_device mdev;
- struct video_device vdev;
+ struct rkvpu_func *encoder;
+ struct rkvpu_func *decoder;
struct v4l2_m2m_dev *m2m_dev;
struct device *dev;
struct clk_bulk_data *clocks;
void __iomem *regs;
struct mutex vdev_lock; /* serializes ioctls */
struct delayed_work watchdog_work;
+
+ const struct rkvpu_ops *ops;
+
+ const struct rkvpu_variant *variant;
};
struct rkvpu_ctx {
struct v4l2_fh fh;
+ bool is_encoder;
struct v4l2_format src_fmt;
struct v4l2_format dst_fmt;
const struct rkvpu_fmt_desc *fmt_desc;
@@ -118,11 +184,32 @@ struct rkvpu_aux_buf {
void *cpu;
dma_addr_t dma;
size_t size;
+ unsigned long attrs;
+};
+
+struct rkvpu_enc_buf {
+ struct v4l2_m2m_buffer m2m_buf;
+ struct rkvpu_aux_buf rec_buf;
+ struct rkvpu_aux_buf ds_buf;
+ struct rkvpu_aux_buf me_buf;
};
void rkvpu_run_preamble(struct rkvpu_ctx *ctx, struct rkvpu_run *run);
void rkvpu_run_postamble(struct rkvpu_ctx *ctx, struct rkvpu_run *run);
+static inline struct rkvpu_enc_buf *
+rkvpu_get_enc_buf(struct vb2_v4l2_buffer *v4l2_buf)
+{
+ struct v4l2_m2m_buffer *m2m_buf;
+ struct rkvpu_enc_buf *enc_buf;
+
+ m2m_buf = container_of(v4l2_buf, struct v4l2_m2m_buffer, vb);
+ enc_buf = container_of(m2m_buf, struct rkvpu_enc_buf, m2m_buf);
+
+ return enc_buf;
+}
+
+extern const struct rkvpu_ops rkvenc_h264_fmt_ops;
extern const struct rkvpu_ops rkvdec_h264_fmt_ops;
extern const struct rkvpu_ops rkvdec_vp9_fmt_ops;
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index b908e82864bd7a..711448b046a4d5 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -1617,6 +1617,17 @@ struct v4l2_ctrl_h264_encode_params {
__u32 flags; /* V4L2_H264_ENCODE_FLAG_ */
+ /* Nal parameters */
+ __u8 nal_reference_idc; // 2 bit
+ __u8 nalu_type; // 5 bit
+
+ /* Unchanged parameters */
+ __u8 num_ref_idx_override; // bool
+
+ /* idr mmco flag */
+ __u8 no_output_of_prior_pics; // bool
+ __u8 long_term_reference_flag; // bool
+
/* Reference */
__u64 reference_ts;
@@ -1639,6 +1650,8 @@ struct v4l2_ctrl_h264_encode_rc {
__u32 cp_target[10];
__s32 cp_target_error[6];
__s32 cp_qp_delta[7];
+
+ __u32 target_bits;
};
#define V4L2_CID_STATELESS_H264_ENCODE_FEEDBACK (V4L2_CID_CODEC_STATELESS_BASE + 10)